summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.bazelignore1
-rw-r--r--.bazelrc27
-rw-r--r--.buildkite/hooks/post-command65
-rw-r--r--.buildkite/hooks/pre-command34
-rw-r--r--.buildkite/pipeline.yaml229
-rw-r--r--.devcontainer.json9
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.yml67
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml11
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.yml24
-rw-r--r--.github/labeler.yml42
-rw-r--r--.github/workflows/go.yml68
-rw-r--r--.github/workflows/issue_reviver.yml18
-rw-r--r--.github/workflows/labeler.yml13
-rw-r--r--.gitignore7
-rw-r--r--.vscode/tasks.json31
-rw-r--r--BUILD145
-rw-r--r--CODE_OF_CONDUCT.md91
-rw-r--r--CONTRIBUTING.md127
-rw-r--r--GOVERNANCE.md113
-rw-r--r--Makefile456
-rw-r--r--README.md133
-rw-r--r--SECURITY.md10
-rw-r--r--WORKSPACE2524
-rw-r--r--debian/BUILD50
-rw-r--r--debian/description1
-rwxr-xr-xdebian/postinst.sh31
-rw-r--r--g3doc/BUILD44
-rw-r--r--g3doc/Layers.pngbin11044 -> 0 bytes
-rw-r--r--g3doc/Layers.svg1
-rw-r--r--g3doc/Machine-Virtualization.pngbin13205 -> 0 bytes
-rw-r--r--g3doc/Machine-Virtualization.svg1
-rw-r--r--g3doc/README.md164
-rw-r--r--g3doc/Rule-Based-Execution.pngbin6780 -> 0 bytes
-rw-r--r--g3doc/Rule-Based-Execution.svg1
-rw-r--r--g3doc/Sentry-Gofer.pngbin9064 -> 0 bytes
-rw-r--r--g3doc/Sentry-Gofer.svg1
-rw-r--r--g3doc/architecture_guide/BUILD50
-rw-r--r--g3doc/architecture_guide/performance.md277
-rw-r--r--g3doc/architecture_guide/platforms.md61
-rw-r--r--g3doc/architecture_guide/platforms.pngbin21384 -> 0 bytes
-rw-r--r--g3doc/architecture_guide/platforms.svg334
-rw-r--r--g3doc/architecture_guide/resources.md144
-rw-r--r--g3doc/architecture_guide/resources.pngbin16621 -> 0 bytes
-rw-r--r--g3doc/architecture_guide/resources.svg208
-rw-r--r--g3doc/architecture_guide/security.md255
-rw-r--r--g3doc/architecture_guide/security.pngbin16932 -> 0 bytes
-rw-r--r--g3doc/architecture_guide/security.svg153
-rw-r--r--g3doc/community.md31
-rw-r--r--g3doc/logo.pngbin27719 -> 0 bytes
-rw-r--r--g3doc/logo.txt1
-rw-r--r--g3doc/proposals/BUILD16
-rw-r--r--g3doc/proposals/gsoc-2021-ideas.md146
-rw-r--r--g3doc/proposals/runtime_dedicate_os_thread.md188
-rw-r--r--g3doc/roadmap.md49
-rw-r--r--g3doc/style.md97
-rw-r--r--g3doc/user_guide/BUILD70
-rw-r--r--g3doc/user_guide/FAQ.md152
-rw-r--r--g3doc/user_guide/checkpoint_restore.md101
-rw-r--r--g3doc/user_guide/compatibility.md94
-rw-r--r--g3doc/user_guide/containerd/BUILD33
-rw-r--r--g3doc/user_guide/containerd/configuration.md102
-rw-r--r--g3doc/user_guide/containerd/containerd_11.md167
-rw-r--r--g3doc/user_guide/containerd/quick_start.md182
-rw-r--r--g3doc/user_guide/debugging.md156
-rw-r--r--g3doc/user_guide/filesystem.md60
-rw-r--r--g3doc/user_guide/install.md181
-rw-r--r--g3doc/user_guide/networking.md84
-rw-r--r--g3doc/user_guide/platforms.md95
-rw-r--r--g3doc/user_guide/quick_start/BUILD33
-rw-r--r--g3doc/user_guide/quick_start/docker.md99
-rw-r--r--g3doc/user_guide/quick_start/kubernetes.md34
-rw-r--r--g3doc/user_guide/quick_start/oci.md43
-rw-r--r--g3doc/user_guide/tutorials/BUILD55
-rw-r--r--g3doc/user_guide/tutorials/add-node-pool.pngbin70208 -> 0 bytes
-rw-r--r--g3doc/user_guide/tutorials/cni.md174
-rw-r--r--g3doc/user_guide/tutorials/docker-compose.md100
-rw-r--r--g3doc/user_guide/tutorials/docker.md70
-rw-r--r--g3doc/user_guide/tutorials/knative.md88
-rw-r--r--g3doc/user_guide/tutorials/kubernetes.md236
-rw-r--r--g3doc/user_guide/tutorials/node-pool-button.pngbin13757 -> 0 bytes
-rw-r--r--images/BUILD1
-rw-r--r--images/README.md70
-rw-r--r--images/agent/Dockerfile12
-rw-r--r--images/agent/README.md7
-rw-r--r--images/arm-qemu/Dockerfile.x86_6412
-rwxr-xr-ximages/arm-qemu/initramfs/init39
-rwxr-xr-ximages/arm-qemu/test.sh28
-rw-r--r--images/basic/alpine/Dockerfile1
-rw-r--r--images/basic/busybox/Dockerfile1
-rw-r--r--images/basic/fsstress/Dockerfile.x86_6417
-rwxr-xr-ximages/basic/fsstress/run.sh17
-rw-r--r--images/basic/httpd/Dockerfile1
-rw-r--r--images/basic/integrationtest/Dockerfile.x86_6413
-rw-r--r--images/basic/integrationtest/copy_up_testfile.txt1
-rw-r--r--images/basic/integrationtest/link_test.c93
-rw-r--r--images/basic/integrationtest/ping4.sh25
-rw-r--r--images/basic/integrationtest/ping6.sh32
-rw-r--r--images/basic/integrationtest/test_copy_up.c88
-rw-r--r--images/basic/integrationtest/test_rewinddir.c78
-rw-r--r--images/basic/integrationtest/test_sticky.c96
-rw-r--r--images/basic/mysql/Dockerfile1
-rw-r--r--images/basic/nginx/Dockerfile1
-rw-r--r--images/basic/python/Dockerfile2
-rw-r--r--images/basic/resolv/Dockerfile1
-rw-r--r--images/basic/ruby/Dockerfile1
-rw-r--r--images/basic/tmpfile/Dockerfile4
-rw-r--r--images/basic/tomcat/Dockerfile1
-rw-r--r--images/basic/tomcat/Dockerfile.aarch641
-rw-r--r--images/basic/ubuntu/Dockerfile1
-rw-r--r--images/benchmarks/ab/Dockerfile7
-rw-r--r--images/benchmarks/absl/Dockerfile.x86_6422
-rw-r--r--images/benchmarks/alpine/Dockerfile1
-rw-r--r--images/benchmarks/ffmpeg/Dockerfile9
-rw-r--r--images/benchmarks/fio/Dockerfile7
-rw-r--r--images/benchmarks/hey/Dockerfile13
-rw-r--r--images/benchmarks/httpd/Dockerfile17
-rw-r--r--images/benchmarks/httpd/apache2-tmpdir.conf5
-rw-r--r--images/benchmarks/iperf/Dockerfile8
-rw-r--r--images/benchmarks/nginx/Dockerfile12
-rw-r--r--images/benchmarks/nginx/nginx.conf19
-rw-r--r--images/benchmarks/nginx/nginx_gofer.conf19
-rw-r--r--images/benchmarks/node/Dockerfile1
-rw-r--r--images/benchmarks/node/index.hbs8
-rw-r--r--images/benchmarks/node/index.js42
-rw-r--r--images/benchmarks/node/package-lock.json486
-rw-r--r--images/benchmarks/node/package.json19
-rw-r--r--images/benchmarks/redis/Dockerfile1
-rwxr-xr-ximages/benchmarks/ruby/Dockerfile27
-rwxr-xr-ximages/benchmarks/ruby/Gemfile5
-rw-r--r--images/benchmarks/ruby/Gemfile.lock26
-rwxr-xr-ximages/benchmarks/ruby/config.ru2
-rwxr-xr-ximages/benchmarks/ruby/index.erb8
-rwxr-xr-ximages/benchmarks/ruby/main.rb27
-rw-r--r--images/benchmarks/runsc/Dockerfile.x86_6425
-rw-r--r--images/benchmarks/sysbench/Dockerfile7
-rw-r--r--images/benchmarks/tensorflow/Dockerfile7
-rw-r--r--images/benchmarks/util/Dockerfile3
-rw-r--r--images/default/Dockerfile29
-rw-r--r--images/defs.bzl34
-rw-r--r--images/iptables/Dockerfile2
-rw-r--r--images/jekyll/Dockerfile.x86_6419
-rwxr-xr-ximages/jekyll/build.sh22
-rw-r--r--images/jekyll/checks.rb36
-rw-r--r--images/packetdrill/Dockerfile8
-rw-r--r--images/packetimpact/Dockerfile18
-rw-r--r--images/runtimes/go1.12/Dockerfile.x86_644
-rw-r--r--images/runtimes/java11/Dockerfile22
-rw-r--r--images/runtimes/nodejs12.4.0/Dockerfile21
-rw-r--r--images/runtimes/php7.3.6/Dockerfile19
-rw-r--r--images/runtimes/python3.7.3/Dockerfile21
-rw-r--r--images/syzkaller/Dockerfile11
-rw-r--r--images/syzkaller/README.md58
-rw-r--r--images/syzkaller/default-gvisor-config.cfg15
-rw-r--r--nogo.yaml176
-rw-r--r--pkg/abi/BUILD13
-rw-r--r--pkg/abi/abi_linux_state_autogen.go5
-rw-r--r--pkg/abi/abi_state_autogen.go3
-rw-r--r--pkg/abi/linux/BUILD94
-rw-r--r--pkg/abi/linux/errno/BUILD9
-rw-r--r--pkg/abi/linux/errno/errno_state_autogen.go3
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go14903
-rw-r--r--pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go741
-rw-r--r--pkg/abi/linux/linux_amd64_state_autogen.go120
-rw-r--r--pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go653
-rw-r--r--pkg/abi/linux/linux_arm64_state_autogen.go50
-rw-r--r--pkg/abi/linux/linux_state_autogen.go290
-rw-r--r--pkg/abi/linux/netfilter_test.go48
-rw-r--r--pkg/amutex/BUILD21
-rw-r--r--pkg/amutex/amutex_state_autogen.go3
-rw-r--r--pkg/amutex/amutex_test.go98
-rw-r--r--pkg/atomicbitops/BUILD24
-rw-r--r--pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go64
-rw-r--r--pkg/atomicbitops/atomicbitops_64bit_state_autogen.go64
-rw-r--r--pkg/atomicbitops/atomicbitops_state_autogen.go6
-rw-r--r--pkg/atomicbitops/atomicbitops_test.go198
-rw-r--r--pkg/binary/BUILD16
-rw-r--r--pkg/binary/binary.go266
-rw-r--r--pkg/binary/binary_test.go266
-rw-r--r--pkg/bits/BUILD55
-rw-r--r--pkg/bits/bits32.go33
-rw-r--r--pkg/bits/bits64.go33
-rw-r--r--pkg/bits/bits_state_autogen.go6
-rw-r--r--pkg/bits/bits_template.go52
-rw-r--r--pkg/bits/uint64_test.go134
-rw-r--r--pkg/bpf/BUILD32
-rw-r--r--pkg/bpf/bpf_state_autogen.go36
-rw-r--r--pkg/bpf/decoder_test.go146
-rw-r--r--pkg/bpf/interpreter_test.go799
-rw-r--r--pkg/bpf/program_builder_test.go185
-rw-r--r--pkg/buffer/BUILD46
-rw-r--r--pkg/buffer/buffer_list.go221
-rw-r--r--pkg/buffer/buffer_state_autogen.go163
-rw-r--r--pkg/buffer/buffer_test.go111
-rw-r--r--pkg/buffer/buffer_unsafe_state_autogen.go3
-rw-r--r--pkg/buffer/pool_test.go51
-rw-r--r--pkg/buffer/view_test.go900
-rw-r--r--pkg/cleanup/BUILD17
-rw-r--r--pkg/cleanup/cleanup_state_autogen.go3
-rw-r--r--pkg/cleanup/cleanup_test.go66
-rw-r--r--pkg/compressio/BUILD17
-rw-r--r--pkg/compressio/compressio_state_autogen.go3
-rw-r--r--pkg/compressio/compressio_test.go290
-rw-r--r--pkg/context/BUILD12
-rw-r--r--pkg/context/context_state_autogen.go3
-rw-r--r--pkg/control/client/BUILD15
-rw-r--r--pkg/control/client/client_state_autogen.go3
-rw-r--r--pkg/control/server/BUILD15
-rw-r--r--pkg/control/server/server_state_autogen.go3
-rw-r--r--pkg/coverage/BUILD14
-rw-r--r--pkg/coverage/coverage_state_autogen.go5
-rw-r--r--pkg/cpuid/BUILD36
-rw-r--r--pkg/cpuid/cpuid_arm64_state_autogen.go53
-rw-r--r--pkg/cpuid/cpuid_arm64_test.go56
-rw-r--r--pkg/cpuid/cpuid_parse_x86_test.go146
-rw-r--r--pkg/cpuid/cpuid_state_autogen.go3
-rw-r--r--pkg/cpuid/cpuid_x86_state_autogen.go115
-rw-r--r--pkg/cpuid/cpuid_x86_test.go244
-rw-r--r--pkg/crypto/BUILD12
-rw-r--r--pkg/crypto/crypto.go16
-rw-r--r--pkg/crypto/crypto_stdlib.go35
-rw-r--r--pkg/errors/BUILD10
-rw-r--r--pkg/errors/errors_state_autogen.go3
-rw-r--r--pkg/errors/linuxerr/BUILD26
-rw-r--r--pkg/errors/linuxerr/linuxerr_state_autogen.go3
-rw-r--r--pkg/errors/linuxerr/linuxerr_test.go306
-rw-r--r--pkg/eventchannel/BUILD40
-rw-r--r--pkg/eventchannel/event.proto27
-rw-r--r--pkg/eventchannel/event_test.go146
-rw-r--r--pkg/eventchannel/eventchannel_go_proto/event.pb.go156
-rw-r--r--pkg/eventchannel/eventchannel_state_autogen.go5
-rw-r--r--pkg/fd/BUILD18
-rw-r--r--pkg/fd/fd_state_autogen.go3
-rw-r--r--pkg/fd/fd_test.go137
-rw-r--r--pkg/fdchannel/BUILD24
-rw-r--r--pkg/fdchannel/fdchannel_test.go133
-rw-r--r--pkg/fdchannel/fdchannel_unsafe_state_autogen.go5
-rw-r--r--pkg/fdnotifier/BUILD17
-rw-r--r--pkg/fdnotifier/fdnotifier_state_autogen.go5
-rw-r--r--pkg/fdnotifier/fdnotifier_unsafe_state_autogen.go5
-rw-r--r--pkg/flipcall/BUILD34
-rw-r--r--pkg/flipcall/flipcall_example_test.go113
-rw-r--r--pkg/flipcall/flipcall_linux_state_autogen.go5
-rw-r--r--pkg/flipcall/flipcall_state_autogen.go5
-rw-r--r--pkg/flipcall/flipcall_test.go405
-rw-r--r--pkg/flipcall/flipcall_unsafe_state_autogen.go3
-rw-r--r--pkg/fspath/BUILD26
-rw-r--r--pkg/fspath/builder_test.go58
-rw-r--r--pkg/fspath/fspath_state_autogen.go3
-rw-r--r--pkg/fspath/fspath_test.go134
-rw-r--r--pkg/gohacks/BUILD20
-rw-r--r--pkg/gohacks/gohacks_test.go97
-rw-r--r--pkg/goid/BUILD23
-rw-r--r--pkg/goid/goid_test.go81
-rw-r--r--pkg/hostarch/BUILD42
-rw-r--r--pkg/hostarch/addr_range.go (renamed from pkg/segment/range.go)39
-rw-r--r--pkg/hostarch/addr_range_seq_test.go197
-rw-r--r--pkg/hostarch/hostarch_arm64_state_autogen.go5
-rw-r--r--pkg/hostarch/hostarch_state_autogen.go80
-rw-r--r--pkg/hostarch/hostarch_unsafe_state_autogen.go3
-rw-r--r--pkg/hostarch/hostarch_x86_state_autogen.go5
-rw-r--r--pkg/ilist/BUILD56
-rw-r--r--pkg/ilist/ilist_state_autogen.go68
-rw-r--r--pkg/ilist/interface_list.go (renamed from pkg/ilist/list.go)15
-rw-r--r--pkg/ilist/list_test.go240
-rw-r--r--pkg/linewriter/BUILD18
-rw-r--r--pkg/linewriter/linewriter_test.go81
-rw-r--r--pkg/lisafs/README.md363
-rw-r--r--pkg/log/BUILD32
-rw-r--r--pkg/log/json_test.go64
-rw-r--r--pkg/log/log_test.go105
-rw-r--r--pkg/marshal/BUILD16
-rw-r--r--pkg/marshal/marshal_state_autogen.go3
-rw-r--r--pkg/marshal/primitive/BUILD18
-rw-r--r--pkg/marshal/primitive/primitive_abi_autogen_unsafe.go1366
-rw-r--r--pkg/marshal/primitive/primitive_state_autogen.go3
-rw-r--r--pkg/memutil/BUILD14
-rw-r--r--pkg/memutil/memutil_linux_unsafe_state_autogen.go5
-rw-r--r--pkg/memutil/memutil_state_autogen.go5
-rw-r--r--pkg/merkletree/BUILD23
-rw-r--r--pkg/merkletree/merkletree_state_autogen.go3
-rw-r--r--pkg/merkletree/merkletree_test.go905
-rw-r--r--pkg/metric/BUILD38
-rw-r--r--pkg/metric/metric.proto101
-rw-r--r--pkg/metric/metric_go_proto/metric.pb.go732
-rw-r--r--pkg/metric/metric_state_autogen.go3
-rw-r--r--pkg/metric/metric_test.go499
-rw-r--r--pkg/p9/BUILD56
-rw-r--r--pkg/p9/buffer_test.go31
-rw-r--r--pkg/p9/client_test.go111
-rw-r--r--pkg/p9/messages_test.go507
-rw-r--r--pkg/p9/p9_state_autogen.go3
-rw-r--r--pkg/p9/p9_test.go188
-rw-r--r--pkg/p9/p9test/BUILD90
-rw-r--r--pkg/p9/p9test/client_test.go2252
-rw-r--r--pkg/p9/p9test/p9test.go329
-rw-r--r--pkg/p9/transport_test.go233
-rw-r--r--pkg/p9/version_test.go145
-rw-r--r--pkg/pool/BUILD25
-rw-r--r--pkg/pool/pool_state_autogen.go3
-rw-r--r--pkg/pool/pool_test.go64
-rw-r--r--pkg/procid/BUILD40
-rw-r--r--pkg/procid/procid_net_test.go21
-rw-r--r--pkg/procid/procid_state_autogen.go5
-rw-r--r--pkg/procid/procid_test.go86
-rw-r--r--pkg/rand/BUILD16
-rw-r--r--pkg/rand/rand_linux_state_autogen.go3
-rw-r--r--pkg/rand/rand_state_autogen.go5
-rw-r--r--pkg/refs/BUILD42
-rw-r--r--pkg/refs/refcounter_test.go179
-rw-r--r--pkg/refs/refs_state_autogen.go156
-rw-r--r--pkg/refs/weak_ref_list.go221
-rw-r--r--pkg/refsvfs2/BUILD39
-rw-r--r--pkg/refsvfs2/README.md66
-rw-r--r--pkg/refsvfs2/refsvfs2_state_autogen.go3
-rw-r--r--pkg/ring0/BUILD86
-rw-r--r--pkg/ring0/aarch64.go123
-rw-r--r--pkg/ring0/defs.go113
-rw-r--r--pkg/ring0/defs_amd64.go162
-rw-r--r--pkg/ring0/defs_arm64.go142
-rw-r--r--pkg/ring0/defs_impl_amd64.go598
-rw-r--r--pkg/ring0/defs_impl_arm64.go (renamed from pkg/ring0/offsets_arm64.go)338
-rw-r--r--pkg/ring0/entry_impl_amd64.s (renamed from pkg/ring0/entry_amd64.s)70
-rw-r--r--pkg/ring0/entry_impl_arm64.s (renamed from pkg/ring0/entry_arm64.s)90
-rw-r--r--pkg/ring0/gen_offsets/BUILD41
-rw-r--r--pkg/ring0/gen_offsets/main.go24
-rw-r--r--pkg/ring0/offsets_amd64.go101
-rw-r--r--pkg/ring0/pagetables/BUILD88
-rw-r--r--pkg/ring0/pagetables/pagetables_aarch64_state_autogen.go6
-rw-r--r--pkg/ring0/pagetables/pagetables_amd64_state_autogen.go5
-rw-r--r--pkg/ring0/pagetables/pagetables_amd64_test.go76
-rw-r--r--pkg/ring0/pagetables/pagetables_arm64_state_autogen.go5
-rw-r--r--pkg/ring0/pagetables/pagetables_arm64_test.go81
-rw-r--r--pkg/ring0/pagetables/pagetables_state_autogen.go3
-rw-r--r--pkg/ring0/pagetables/pagetables_test.go156
-rw-r--r--pkg/ring0/pagetables/pagetables_unsafe_state_autogen.go3
-rw-r--r--pkg/ring0/pagetables/pagetables_x86_state_autogen.go6
-rw-r--r--pkg/ring0/pagetables/walker_empty_amd64.go265
-rw-r--r--pkg/ring0/pagetables/walker_empty_arm64.go275
-rw-r--r--pkg/ring0/pagetables/walker_lookup_amd64.go265
-rw-r--r--pkg/ring0/pagetables/walker_lookup_arm64.go275
-rw-r--r--pkg/ring0/pagetables/walker_map_amd64.go265
-rw-r--r--pkg/ring0/pagetables/walker_map_arm64.go275
-rw-r--r--pkg/ring0/pagetables/walker_unmap_amd64.go265
-rw-r--r--pkg/ring0/pagetables/walker_unmap_arm64.go275
-rw-r--r--pkg/ring0/ring0_amd64_state_autogen.go7
-rw-r--r--pkg/ring0/ring0_arm64_state_autogen.go7
-rw-r--r--pkg/ring0/ring0_impl_amd64_state_autogen.go7
-rw-r--r--pkg/ring0/ring0_impl_arm64_state_autogen.go7
-rw-r--r--pkg/ring0/ring0_state_autogen.go3
-rw-r--r--pkg/ring0/ring0_unsafe_state_autogen.go3
-rw-r--r--pkg/ring0/x86.go297
-rw-r--r--pkg/safecopy/BUILD34
-rw-r--r--pkg/safecopy/LICENSE27
-rw-r--r--pkg/safecopy/safecopy_state_autogen.go3
-rw-r--r--pkg/safecopy/safecopy_test.go568
-rw-r--r--pkg/safecopy/safecopy_unsafe_state_autogen.go3
-rw-r--r--pkg/safemem/BUILD30
-rw-r--r--pkg/safemem/io_test.go199
-rw-r--r--pkg/safemem/safemem_state_autogen.go3
-rw-r--r--pkg/safemem/safemem_unsafe_state_autogen.go3
-rw-r--r--pkg/safemem/seq_test.go217
-rw-r--r--pkg/seccomp/BUILD59
-rw-r--r--pkg/seccomp/seccomp_amd64_state_autogen.go5
-rw-r--r--pkg/seccomp/seccomp_arm64_state_autogen.go5
-rw-r--r--pkg/seccomp/seccomp_state_autogen.go3
-rw-r--r--pkg/seccomp/seccomp_test.go1059
-rw-r--r--pkg/seccomp/seccomp_test_victim.go116
-rw-r--r--pkg/seccomp/seccomp_test_victim_amd64.go33
-rw-r--r--pkg/seccomp/seccomp_test_victim_arm64.go30
-rw-r--r--pkg/seccomp/seccomp_unsafe_state_autogen.go3
-rw-r--r--pkg/secio/BUILD19
-rw-r--r--pkg/secio/secio_state_autogen.go3
-rw-r--r--pkg/secio/secio_test.go126
-rw-r--r--pkg/segment/BUILD33
-rw-r--r--pkg/segment/set_state.go25
-rw-r--r--pkg/segment/test/BUILD68
-rw-r--r--pkg/segment/test/segment_test.go865
-rw-r--r--pkg/segment/test/set_functions.go55
-rw-r--r--pkg/sentry/BUILD14
-rw-r--r--pkg/sentry/arch/BUILD47
-rw-r--r--pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/arch/arch_aarch64_state_autogen.go76
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go13
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go412
-rw-r--r--pkg/sentry/arch/arch_amd64_state_autogen.go43
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go588
-rw-r--r--pkg/sentry/arch/arch_arm64_state_autogen.go43
-rw-r--r--pkg/sentry/arch/arch_state_autogen.go80
-rw-r--r--pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go13
-rw-r--r--pkg/sentry/arch/arch_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/arch/arch_x86_abi_autogen_unsafe.go16
-rw-r--r--pkg/sentry/arch/arch_x86_impl_abi_autogen_unsafe.go16
-rw-r--r--pkg/sentry/arch/arch_x86_impl_state_autogen.go44
-rw-r--r--pkg/sentry/arch/arch_x86_state_autogen.go39
-rw-r--r--pkg/sentry/arch/fpu/BUILD21
-rw-r--r--pkg/sentry/arch/fpu/fpu_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/arch/fpu/fpu_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/arch/fpu/fpu_state_autogen.go3
-rw-r--r--pkg/sentry/arch/registers.proto93
-rw-r--r--pkg/sentry/arch/registers_go_proto/registers.pb.go863
-rw-r--r--pkg/sentry/contexttest/BUILD21
-rw-r--r--pkg/sentry/contexttest/contexttest.go168
-rw-r--r--pkg/sentry/control/BUILD51
-rw-r--r--pkg/sentry/control/control_state_autogen.go3
-rw-r--r--pkg/sentry/control/proc_test.go166
-rw-r--r--pkg/sentry/device/BUILD20
-rw-r--r--pkg/sentry/device/device_state_autogen.go97
-rw-r--r--pkg/sentry/device/device_test.go59
-rw-r--r--pkg/sentry/devices/memdev/BUILD29
-rw-r--r--pkg/sentry/devices/memdev/memdev_state_autogen.go241
-rw-r--r--pkg/sentry/devices/quotedev/BUILD16
-rw-r--r--pkg/sentry/devices/quotedev/quotedev.go52
-rw-r--r--pkg/sentry/devices/ttydev/BUILD16
-rw-r--r--pkg/sentry/devices/ttydev/ttydev_state_autogen.go32
-rw-r--r--pkg/sentry/devices/tundev/BUILD24
-rw-r--r--pkg/sentry/devices/tundev/tundev_state_autogen.go70
-rw-r--r--pkg/sentry/fdimport/BUILD21
-rw-r--r--pkg/sentry/fdimport/fdimport_state_autogen.go3
-rw-r--r--pkg/sentry/fs/BUILD139
-rw-r--r--pkg/sentry/fs/README.md229
-rw-r--r--pkg/sentry/fs/anon/BUILD20
-rw-r--r--pkg/sentry/fs/anon/anon_state_autogen.go3
-rw-r--r--pkg/sentry/fs/copy_up_test.go183
-rw-r--r--pkg/sentry/fs/dev/BUILD42
-rw-r--r--pkg/sentry/fs/dev/dev_state_autogen.go324
-rw-r--r--pkg/sentry/fs/dirent_cache_test.go247
-rw-r--r--pkg/sentry/fs/dirent_list.go221
-rw-r--r--pkg/sentry/fs/dirent_refs_test.go418
-rw-r--r--pkg/sentry/fs/event_list.go221
-rw-r--r--pkg/sentry/fs/fdpipe/BUILD54
-rw-r--r--pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go41
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_opener_test.go523
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_test.go514
-rw-r--r--pkg/sentry/fs/file_overlay_test.go192
-rw-r--r--pkg/sentry/fs/filetest/BUILD19
-rw-r--r--pkg/sentry/fs/filetest/filetest.go61
-rw-r--r--pkg/sentry/fs/fs_state_autogen.go1207
-rw-r--r--pkg/sentry/fs/fsutil/BUILD119
-rw-r--r--pkg/sentry/fs/fsutil/README.md207
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set_impl.go1647
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set_test.go38
-rw-r--r--pkg/sentry/fs/fsutil/file_range_set_impl.go1647
-rw-r--r--pkg/sentry/fs/fsutil/frame_ref_set_impl.go1647
-rw-r--r--pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go328
-rw-r--r--pkg/sentry/fs/fsutil/fsutil_state_autogen.go411
-rw-r--r--pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached_test.go390
-rw-r--r--pkg/sentry/fs/g3doc/.gitignore1
-rw-r--r--pkg/sentry/fs/g3doc/fuse.md360
-rw-r--r--pkg/sentry/fs/g3doc/inotify.md122
-rw-r--r--pkg/sentry/fs/gofer/BUILD72
-rw-r--r--pkg/sentry/fs/gofer/gofer_state_autogen.go271
-rw-r--r--pkg/sentry/fs/gofer/gofer_test.go310
-rw-r--r--pkg/sentry/fs/host/BUILD87
-rw-r--r--pkg/sentry/fs/host/descriptor_test.go78
-rw-r--r--pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go5
-rw-r--r--pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go5
-rw-r--r--pkg/sentry/fs/host/host_state_autogen.go220
-rw-r--r--pkg/sentry/fs/host/host_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/fs/host/inode_test.go45
-rw-r--r--pkg/sentry/fs/host/socket_test.go252
-rw-r--r--pkg/sentry/fs/host/wait_test.go69
-rw-r--r--pkg/sentry/fs/inode_overlay_test.go470
-rw-r--r--pkg/sentry/fs/lock/BUILD62
-rw-r--r--pkg/sentry/fs/lock/lock_range.go76
-rw-r--r--pkg/sentry/fs/lock/lock_range_test.go137
-rw-r--r--pkg/sentry/fs/lock/lock_set.go1643
-rw-r--r--pkg/sentry/fs/lock/lock_state_autogen.go232
-rw-r--r--pkg/sentry/fs/lock/lock_test.go1060
-rw-r--r--pkg/sentry/fs/mount_test.go273
-rw-r--r--pkg/sentry/fs/mounts_test.go105
-rw-r--r--pkg/sentry/fs/path_test.go289
-rw-r--r--pkg/sentry/fs/proc/BUILD75
-rw-r--r--pkg/sentry/fs/proc/README.md336
-rw-r--r--pkg/sentry/fs/proc/device/BUILD10
-rw-r--r--pkg/sentry/fs/proc/device/device_state_autogen.go3
-rw-r--r--pkg/sentry/fs/proc/net_test.go74
-rw-r--r--pkg/sentry/fs/proc/proc_state_autogen.go1835
-rw-r--r--pkg/sentry/fs/proc/seqfile/BUILD36
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go106
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile_test.go279
-rw-r--r--pkg/sentry/fs/proc/sys_net_test.go198
-rw-r--r--pkg/sentry/fs/ramfs/BUILD39
-rw-r--r--pkg/sentry/fs/ramfs/ramfs_state_autogen.go182
-rw-r--r--pkg/sentry/fs/ramfs/tree_test.go80
-rw-r--r--pkg/sentry/fs/sys/BUILD24
-rw-r--r--pkg/sentry/fs/sys/sys_state_autogen.go61
-rw-r--r--pkg/sentry/fs/timerfd/BUILD21
-rw-r--r--pkg/sentry/fs/timerfd/timerfd_state_autogen.go42
-rw-r--r--pkg/sentry/fs/tmpfs/BUILD52
-rw-r--r--pkg/sentry/fs/tmpfs/file_test.go73
-rw-r--r--pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go211
-rw-r--r--pkg/sentry/fs/tty/BUILD50
-rw-r--r--pkg/sentry/fs/tty/tty_state_autogen.go407
-rw-r--r--pkg/sentry/fs/tty/tty_test.go56
-rw-r--r--pkg/sentry/fs/user/BUILD41
-rw-r--r--pkg/sentry/fs/user/user_state_autogen.go3
-rw-r--r--pkg/sentry/fs/user/user_test.go201
-rw-r--r--pkg/sentry/fsbridge/BUILD24
-rw-r--r--pkg/sentry/fsbridge/fsbridge_state_autogen.go126
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/BUILD49
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go687
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/dir_refs.go (renamed from pkg/refsvfs2/refs_template.go)61
-rw-r--r--pkg/sentry/fsimpl/devpts/BUILD65
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts_state_autogen.go502
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts_test.go90
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/BUILD37
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go41
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go230
-rw-r--r--pkg/sentry/fsimpl/eventfd/BUILD35
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go57
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd_test.go97
-rw-r--r--pkg/sentry/fsimpl/ext/BUILD104
-rw-r--r--pkg/sentry/fsimpl/ext/README.md117
-rw-r--r--pkg/sentry/fsimpl/ext/assets/README.md36
-rw-r--r--pkg/sentry/fsimpl/ext/assets/bigfile.txt41
-rw-r--r--pkg/sentry/fsimpl/ext/assets/file.txt1
l---------pkg/sentry/fsimpl/ext/assets/symlink.txt1
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext2bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext3bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext4bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/BUILD17
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go211
-rwxr-xr-xpkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh72
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_file.go204
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_test.go160
-rw-r--r--pkg/sentry/fsimpl/ext/dentry.go82
-rw-r--r--pkg/sentry/fsimpl/ext/directory.go312
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/BUILD48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group.go143
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_32.go74
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_64.go95
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent.go75
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_new.go63
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_old.go51
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/disklayout.go48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/extent.go155
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/extent_test.go30
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode.go277
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_new.go98
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_old.go119
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_test.go224
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock.go477
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_32.go78
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_64.go97
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_old.go107
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_test.go30
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/test_utils.go30
-rw-r--r--pkg/sentry/fsimpl/ext/ext.go159
-rw-r--r--pkg/sentry/fsimpl/ext/ext_test.go926
-rw-r--r--pkg/sentry/fsimpl/ext/extent_file.go240
-rw-r--r--pkg/sentry/fsimpl/ext/extent_test.go266
-rw-r--r--pkg/sentry/fsimpl/ext/file_description.go65
-rw-r--r--pkg/sentry/fsimpl/ext/filesystem.go556
-rw-r--r--pkg/sentry/fsimpl/ext/inode.go247
-rw-r--r--pkg/sentry/fsimpl/ext/regular_file.go155
-rw-r--r--pkg/sentry/fsimpl/ext/symlink.go115
-rw-r--r--pkg/sentry/fsimpl/ext/utils.go94
-rw-r--r--pkg/sentry/fsimpl/fuse/BUILD92
-rw-r--r--pkg/sentry/fsimpl/fuse/connection_test.go111
-rw-r--r--pkg/sentry/fsimpl/fuse/dev_test.go320
-rw-r--r--pkg/sentry/fsimpl/fuse/fuse_state_autogen.go560
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/fuse/request_list.go221
-rw-r--r--pkg/sentry/fsimpl/fuse/utils_test.go127
-rw-r--r--pkg/sentry/fsimpl/gofer/BUILD99
-rw-r--r--pkg/sentry/fsimpl/gofer/dentry_list.go221
-rw-r--r--pkg/sentry/fsimpl/gofer/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer_state_autogen.go608
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer_test.go68
-rw-r--r--pkg/sentry/fsimpl/host/BUILD80
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go140
-rw-r--r--pkg/sentry/fsimpl/host/host_state_autogen.go327
-rw-r--r--pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/kernfs/BUILD151
-rw-r--r--pkg/sentry/fsimpl/kernfs/dentry_list.go221
-rw-r--r--pkg/sentry/fsimpl/kernfs/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go965
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_test.go348
-rw-r--r--pkg/sentry/fsimpl/kernfs/slot_list.go221
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go140
-rw-r--r--pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go140
-rw-r--r--pkg/sentry/fsimpl/overlay/BUILD49
-rw-r--r--pkg/sentry/fsimpl/overlay/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/overlay/overlay_state_autogen.go321
-rw-r--r--pkg/sentry/fsimpl/pipefs/BUILD21
-rw-r--r--pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go111
-rw-r--r--pkg/sentry/fsimpl/proc/BUILD134
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/proc/proc_state_autogen.go2366
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_sys_test.go149
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_test.go511
-rw-r--r--pkg/sentry/fsimpl/signalfd/BUILD19
-rw-r--r--pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go51
-rw-r--r--pkg/sentry/fsimpl/sockfs/BUILD18
-rw-r--r--pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go96
-rw-r--r--pkg/sentry/fsimpl/sys/BUILD56
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go140
-rw-r--r--pkg/sentry/fsimpl/sys/sys_state_autogen.go263
-rw-r--r--pkg/sentry/fsimpl/sys/sys_test.go89
-rw-r--r--pkg/sentry/fsimpl/testutil/BUILD38
-rw-r--r--pkg/sentry/fsimpl/testutil/kernel.go182
-rw-r--r--pkg/sentry/fsimpl/testutil/testutil.go288
-rw-r--r--pkg/sentry/fsimpl/timerfd/BUILD19
-rw-r--r--pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go54
-rw-r--r--pkg/sentry/fsimpl/tmpfs/BUILD131
-rw-r--r--pkg/sentry/fsimpl/tmpfs/benchmark_test.go488
-rw-r--r--pkg/sentry/fsimpl/tmpfs/dentry_list.go221
-rw-r--r--pkg/sentry/fsimpl/tmpfs/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go140
-rw-r--r--pkg/sentry/fsimpl/tmpfs/pipe_test.go240
-rw-r--r--pkg/sentry/fsimpl/tmpfs/regular_file_test.go349
-rw-r--r--pkg/sentry/fsimpl/tmpfs/stat_test.go236
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go593
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs_test.go157
-rw-r--r--pkg/sentry/fsimpl/verity/BUILD55
-rw-r--r--pkg/sentry/fsimpl/verity/verity_state_autogen.go240
-rw-r--r--pkg/sentry/fsimpl/verity/verity_test.go1211
-rw-r--r--pkg/sentry/fsmetric/BUILD10
-rw-r--r--pkg/sentry/fsmetric/fsmetric_state_autogen.go3
-rw-r--r--pkg/sentry/hostcpu/BUILD20
-rw-r--r--pkg/sentry/hostcpu/hostcpu_state_autogen.go3
-rw-r--r--pkg/sentry/hostcpu/hostcpu_test.go52
-rw-r--r--pkg/sentry/hostfd/BUILD19
-rw-r--r--pkg/sentry/hostfd/hostfd_linux_state_autogen.go5
-rw-r--r--pkg/sentry/hostfd/hostfd_state_autogen.go3
-rw-r--r--pkg/sentry/hostfd/hostfd_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/hostmm/BUILD20
-rw-r--r--pkg/sentry/hostmm/hostmm_state_autogen.go3
-rw-r--r--pkg/sentry/inet/BUILD21
-rw-r--r--pkg/sentry/inet/inet_state_autogen.go70
-rw-r--r--pkg/sentry/kernel/BUILD317
-rw-r--r--pkg/sentry/kernel/README.md108
-rw-r--r--pkg/sentry/kernel/auth/BUILD71
-rw-r--r--pkg/sentry/kernel/auth/atomicptr_credentials_unsafe.go (renamed from pkg/sync/atomicptr/generic_atomicptr_unsafe.go)26
-rw-r--r--pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go280
-rw-r--r--pkg/sentry/kernel/auth/auth_state_autogen.go280
-rw-r--r--pkg/sentry/kernel/auth/auth_unsafe_abi_autogen_unsafe.go13
-rw-r--r--pkg/sentry/kernel/auth/auth_unsafe_state_autogen.go37
-rw-r--r--pkg/sentry/kernel/auth/id_map_range.go76
-rw-r--r--pkg/sentry/kernel/auth/id_map_set.go1643
-rw-r--r--pkg/sentry/kernel/contexttest/BUILD17
-rw-r--r--pkg/sentry/kernel/contexttest/contexttest.go40
-rw-r--r--pkg/sentry/kernel/epoll/BUILD52
-rw-r--r--pkg/sentry/kernel/epoll/epoll_list.go221
-rw-r--r--pkg/sentry/kernel/epoll/epoll_state_autogen.go192
-rw-r--r--pkg/sentry/kernel/epoll/epoll_test.go55
-rw-r--r--pkg/sentry/kernel/eventfd/BUILD35
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd_state_autogen.go45
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd_test.go78
-rw-r--r--pkg/sentry/kernel/fasync/BUILD19
-rw-r--r--pkg/sentry/kernel/fasync/fasync_state_autogen.go57
-rw-r--r--pkg/sentry/kernel/fd_table_refs.go140
-rw-r--r--pkg/sentry/kernel/fd_table_test.go228
-rw-r--r--pkg/sentry/kernel/fs_context_refs.go140
-rw-r--r--pkg/sentry/kernel/futex/BUILD61
-rw-r--r--pkg/sentry/kernel/futex/atomicptr_bucket_unsafe.go37
-rw-r--r--pkg/sentry/kernel/futex/futex_state_autogen.go122
-rw-r--r--pkg/sentry/kernel/futex/futex_test.go536
-rw-r--r--pkg/sentry/kernel/futex/futex_unsafe_state_autogen.go37
-rw-r--r--pkg/sentry/kernel/futex/waiter_list.go221
-rw-r--r--pkg/sentry/kernel/g3doc/run_states.dot99
-rw-r--r--pkg/sentry/kernel/g3doc/run_states.pngbin234152 -> 0 bytes
-rw-r--r--pkg/sentry/kernel/ipc_namespace_refs.go140
-rw-r--r--pkg/sentry/kernel/kernel_abi_autogen_unsafe.go230
-rw-r--r--pkg/sentry/kernel/kernel_amd64_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/kernel/kernel_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/kernel/kernel_arm64_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/kernel/kernel_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/kernel/kernel_opts_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/kernel/kernel_opts_state_autogen.go34
-rw-r--r--pkg/sentry/kernel/kernel_state_autogen.go2599
-rw-r--r--pkg/sentry/kernel/kernel_unsafe_abi_autogen_unsafe.go13
-rw-r--r--pkg/sentry/kernel/kernel_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/kernel/memevent/BUILD24
-rw-r--r--pkg/sentry/kernel/memevent/memevent_state_autogen.go3
-rw-r--r--pkg/sentry/kernel/memevent/memory_events.proto29
-rw-r--r--pkg/sentry/kernel/memevent/memory_events_go_proto/memory_events.pb.go158
-rw-r--r--pkg/sentry/kernel/pending_signals_list.go221
-rw-r--r--pkg/sentry/kernel/pipe/BUILD58
-rw-r--r--pkg/sentry/kernel/pipe/node_test.go321
-rw-r--r--pkg/sentry/kernel/pipe/pipe_state_autogen.go227
-rw-r--r--pkg/sentry/kernel/pipe/pipe_test.go140
-rw-r--r--pkg/sentry/kernel/pipe/pipe_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/kernel/process_group_list.go221
-rw-r--r--pkg/sentry/kernel/process_group_refs.go140
-rw-r--r--pkg/sentry/kernel/sched/BUILD19
-rw-r--r--pkg/sentry/kernel/sched/cpuset_test.go44
-rw-r--r--pkg/sentry/kernel/sched/sched_state_autogen.go3
-rw-r--r--pkg/sentry/kernel/semaphore/BUILD50
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore_state_autogen.go220
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore_test.go172
-rw-r--r--pkg/sentry/kernel/semaphore/waiter_list.go221
-rw-r--r--pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go38
-rw-r--r--pkg/sentry/kernel/session_list.go221
-rw-r--r--pkg/sentry/kernel/session_refs.go140
-rw-r--r--pkg/sentry/kernel/shm/BUILD47
-rw-r--r--pkg/sentry/kernel/shm/shm_refs.go140
-rw-r--r--pkg/sentry/kernel/shm/shm_state_autogen.go147
-rw-r--r--pkg/sentry/kernel/signalfd/BUILD22
-rw-r--r--pkg/sentry/kernel/signalfd/signalfd_state_autogen.go39
-rw-r--r--pkg/sentry/kernel/socket_list.go221
-rw-r--r--pkg/sentry/kernel/table_test.go110
-rw-r--r--pkg/sentry/kernel/task_list.go221
-rw-r--r--pkg/sentry/kernel/task_test.go69
-rw-r--r--pkg/sentry/kernel/time/BUILD20
-rw-r--r--pkg/sentry/kernel/time/time_state_autogen.go103
-rw-r--r--pkg/sentry/kernel/timekeeper_test.go156
-rw-r--r--pkg/sentry/kernel/uncaught_signal.proto37
-rw-r--r--pkg/sentry/kernel/uncaught_signal_go_proto/uncaught_signal.pb.go193
-rw-r--r--pkg/sentry/limits/BUILD29
-rw-r--r--pkg/sentry/limits/limits_state_autogen.go65
-rw-r--r--pkg/sentry/limits/limits_test.go44
-rw-r--r--pkg/sentry/loader/BUILD43
-rw-r--r--pkg/sentry/loader/loader_abi_autogen_unsafe.go13
-rw-r--r--pkg/sentry/loader/loader_state_autogen.go96
-rw-r--r--pkg/sentry/loader/vdsodata/BUILD38
-rw-r--r--pkg/sentry/loader/vdsodata/vdso_amd64.go7
-rw-r--r--pkg/sentry/loader/vdsodata/vdso_arm64.go7
-rw-r--r--pkg/sentry/memmap/BUILD68
-rw-r--r--pkg/sentry/memmap/file_range.go76
-rw-r--r--pkg/sentry/memmap/mappable_range.go76
-rw-r--r--pkg/sentry/memmap/mapping_set_impl.go1643
-rw-r--r--pkg/sentry/memmap/mapping_set_test.go259
-rw-r--r--pkg/sentry/memmap/memmap_impl_state_autogen.go116
-rw-r--r--pkg/sentry/memmap/memmap_state_autogen.go100
-rw-r--r--pkg/sentry/mm/BUILD170
-rw-r--r--pkg/sentry/mm/README.md280
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go140
-rw-r--r--pkg/sentry/mm/file_refcount_set.go1647
-rw-r--r--pkg/sentry/mm/io_list.go221
-rw-r--r--pkg/sentry/mm/mm_state_autogen.go808
-rw-r--r--pkg/sentry/mm/mm_test.go275
-rw-r--r--pkg/sentry/mm/pma_set.go1647
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go140
-rw-r--r--pkg/sentry/mm/vma_set.go1647
-rw-r--r--pkg/sentry/pgalloc/BUILD112
-rw-r--r--pkg/sentry/pgalloc/evictable_range.go76
-rw-r--r--pkg/sentry/pgalloc/evictable_range_set.go1643
-rw-r--r--pkg/sentry/pgalloc/pgalloc_state_autogen.go389
-rw-r--r--pkg/sentry/pgalloc/pgalloc_test.go246
-rw-r--r--pkg/sentry/pgalloc/pgalloc_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/pgalloc/reclaim_set.go (renamed from pkg/segment/set.go)687
-rw-r--r--pkg/sentry/pgalloc/usage_set.go1647
-rw-r--r--pkg/sentry/platform/BUILD23
-rw-r--r--pkg/sentry/platform/interrupt/BUILD19
-rw-r--r--pkg/sentry/platform/interrupt/interrupt_state_autogen.go3
-rw-r--r--pkg/sentry/platform/interrupt/interrupt_test.go99
-rw-r--r--pkg/sentry/platform/kvm/BUILD101
-rw-r--r--pkg/sentry/platform/kvm/bluepill_impl_amd64.s (renamed from pkg/sentry/platform/kvm/bluepill_amd64.s)70
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_state_autogen.go7
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_test.go90
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_test.s21
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_unsafe_state_autogen.go7
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_state_autogen.go7
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_test.go32
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_unsafe_state_autogen.go7
-rw-r--r--pkg/sentry/platform/kvm/kvm_state_autogen.go3
-rw-r--r--pkg/sentry/platform/kvm/kvm_test.go529
-rw-r--r--pkg/sentry/platform/kvm/kvm_unsafe_state_autogen.go6
-rw-r--r--pkg/sentry/platform/kvm/testutil/BUILD17
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil.go90
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_amd64.go142
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_amd64.s135
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_arm64.go70
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_arm64.s134
-rw-r--r--pkg/sentry/platform/kvm/virtual_map_test.go93
-rw-r--r--pkg/sentry/platform/platform_state_autogen.go3
-rw-r--r--pkg/sentry/platform/ptrace/BUILD41
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_arm64_unsafe_state_autogen.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_linux_state_autogen.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_linux_unsafe_state_autogen.go6
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_state_autogen.go3
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_unsafe_state_autogen.go5
-rw-r--r--pkg/sentry/sighandling/BUILD16
-rw-r--r--pkg/sentry/sighandling/sighandling_state_autogen.go3
-rw-r--r--pkg/sentry/sighandling/sighandling_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/socket/BUILD27
-rw-r--r--pkg/sentry/socket/control/BUILD44
-rw-r--r--pkg/sentry/socket/control/control_state_autogen.go60
-rw-r--r--pkg/sentry/socket/control/control_test.go59
-rw-r--r--pkg/sentry/socket/hostinet/BUILD48
-rw-r--r--pkg/sentry/socket/hostinet/hostinet_impl_state_autogen.go5
-rw-r--r--pkg/sentry/socket/hostinet/hostinet_state_autogen.go89
-rw-r--r--pkg/sentry/socket/hostinet/hostinet_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/socket/netfilter/BUILD34
-rw-r--r--pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go156
-rw-r--r--pkg/sentry/socket/netfilter/netfilter_state_autogen.go3
-rw-r--r--pkg/sentry/socket/netlink/BUILD58
-rw-r--r--pkg/sentry/socket/netlink/message_test.go330
-rw-r--r--pkg/sentry/socket/netlink/netlink_state_autogen.go149
-rw-r--r--pkg/sentry/socket/netlink/port/BUILD16
-rw-r--r--pkg/sentry/socket/netlink/port/port_state_autogen.go36
-rw-r--r--pkg/sentry/socket/netlink/port/port_test.go82
-rw-r--r--pkg/sentry/socket/netlink/route/BUILD22
-rw-r--r--pkg/sentry/socket/netlink/route/route_state_autogen.go32
-rw-r--r--pkg/sentry/socket/netlink/uevent/BUILD16
-rw-r--r--pkg/sentry/socket/netlink/uevent/uevent_state_autogen.go32
-rw-r--r--pkg/sentry/socket/netstack/BUILD58
-rw-r--r--pkg/sentry/socket/netstack/netstack_state_autogen.go148
-rw-r--r--pkg/sentry/socket/socket_state_autogen.go98
-rw-r--r--pkg/sentry/socket/unix/BUILD71
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go140
-rw-r--r--pkg/sentry/socket/unix/socket_vfs2_refs.go140
-rw-r--r--pkg/sentry/socket/unix/transport/BUILD56
-rw-r--r--pkg/sentry/socket/unix/transport/queue_refs.go140
-rw-r--r--pkg/sentry/socket/unix/transport/transport_message_list.go221
-rw-r--r--pkg/sentry/socket/unix/transport/transport_state_autogen.go398
-rw-r--r--pkg/sentry/socket/unix/unix_state_autogen.go168
-rw-r--r--pkg/sentry/state/BUILD26
-rw-r--r--pkg/sentry/state/state_state_autogen.go5
-rw-r--r--pkg/sentry/state/state_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/strace/BUILD46
-rw-r--r--pkg/sentry/strace/strace.proto49
-rw-r--r--pkg/sentry/strace/strace_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/strace/strace_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/strace/strace_go_proto/strace.pb.go362
-rw-r--r--pkg/sentry/strace/strace_state_autogen.go3
-rw-r--r--pkg/sentry/syscalls/BUILD22
-rw-r--r--pkg/sentry/syscalls/linux/BUILD109
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go718
-rw-r--r--pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go17
-rw-r--r--pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go7
-rw-r--r--pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go17
-rw-r--r--pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go7
-rw-r--r--pkg/sentry/syscalls/linux/linux_state_autogen.go112
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/BUILD80
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go372
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go15
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go42
-rw-r--r--pkg/sentry/syscalls/syscalls_state_autogen.go3
-rw-r--r--pkg/sentry/time/BUILD55
-rw-r--r--pkg/sentry/time/LICENSE27
-rw-r--r--pkg/sentry/time/calibrated_clock_test.go187
-rw-r--r--pkg/sentry/time/parameters_test.go501
-rw-r--r--pkg/sentry/time/sampler_test.go183
-rw-r--r--pkg/sentry/time/seqatomic_parameters_unsafe.go38
-rw-r--r--pkg/sentry/time/time_amd64_state_autogen.go5
-rw-r--r--pkg/sentry/time/time_arm64_state_autogen.go5
-rw-r--r--pkg/sentry/time/time_state_autogen.go3
-rw-r--r--pkg/sentry/time/time_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/unimpl/BUILD20
-rw-r--r--pkg/sentry/unimpl/unimpl_state_autogen.go3
-rw-r--r--pkg/sentry/unimpl/unimplemented_syscall.proto27
-rw-r--r--pkg/sentry/unimpl/unimplemented_syscall_go_proto/unimplemented_syscall.pb.go164
-rw-r--r--pkg/sentry/uniqueid/BUILD13
-rw-r--r--pkg/sentry/uniqueid/uniqueid_state_autogen.go3
-rw-r--r--pkg/sentry/usage/BUILD23
-rw-r--r--pkg/sentry/usage/usage_state_autogen.go86
-rw-r--r--pkg/sentry/usage/usage_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/vfs/BUILD143
-rw-r--r--pkg/sentry/vfs/README.md186
-rw-r--r--pkg/sentry/vfs/epoll_interest_list.go221
-rw-r--r--pkg/sentry/vfs/event_list.go221
-rw-r--r--pkg/sentry/vfs/file_description_impl_util_test.go225
-rw-r--r--pkg/sentry/vfs/file_description_refs.go140
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go140
-rw-r--r--pkg/sentry/vfs/g3doc/inotify.md210
-rw-r--r--pkg/sentry/vfs/genericfstree/BUILD16
-rw-r--r--pkg/sentry/vfs/genericfstree/genericfstree.go92
-rw-r--r--pkg/sentry/vfs/memxattr/BUILD16
-rw-r--r--pkg/sentry/vfs/memxattr/memxattr_state_autogen.go36
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go140
-rw-r--r--pkg/sentry/vfs/mount_test.go467
-rw-r--r--pkg/sentry/vfs/vfs_state_autogen.go2052
-rw-r--r--pkg/sentry/vfs/vfs_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/watchdog/BUILD17
-rw-r--r--pkg/sentry/watchdog/watchdog_state_autogen.go3
-rw-r--r--pkg/shim/BUILD60
-rw-r--r--pkg/shim/proc/BUILD38
-rw-r--r--pkg/shim/proc/proc_state_autogen.go3
-rw-r--r--pkg/shim/runsc/BUILD18
-rw-r--r--pkg/shim/runsc/runsc_state_autogen.go3
-rw-r--r--pkg/shim/runtimeoptions/BUILD32
-rw-r--r--pkg/shim/runtimeoptions/runtimeoptions.proto25
-rw-r--r--pkg/shim/runtimeoptions/runtimeoptions_state_autogen.go5
-rw-r--r--pkg/shim/runtimeoptions/runtimeoptions_test.go57
-rw-r--r--pkg/shim/service_test.go121
-rw-r--r--pkg/shim/shim_linux_state_autogen.go5
-rw-r--r--pkg/shim/shim_state_autogen.go5
-rw-r--r--pkg/shim/utils/BUILD37
-rw-r--r--pkg/shim/utils/errors_test.go50
-rw-r--r--pkg/shim/utils/utils_state_autogen.go3
-rw-r--r--pkg/shim/utils/volumes_test.go330
-rw-r--r--pkg/sleep/BUILD21
-rw-r--r--pkg/sleep/sleep_test.go567
-rw-r--r--pkg/sleep/sleep_unsafe_state_autogen.go3
-rw-r--r--pkg/state/BUILD86
-rw-r--r--pkg/state/README.md158
-rw-r--r--pkg/state/addr_range.go76
-rw-r--r--pkg/state/addr_set.go1643
-rw-r--r--pkg/state/complete_list.go221
-rw-r--r--pkg/state/deferred_list.go206
-rw-r--r--pkg/state/pretty/BUILD13
-rw-r--r--pkg/state/pretty/pretty_state_autogen.go3
-rw-r--r--pkg/state/statefile/BUILD21
-rw-r--r--pkg/state/statefile/statefile_state_autogen.go3
-rw-r--r--pkg/state/statefile/statefile_test.go290
-rw-r--r--pkg/state/tests/BUILD43
-rw-r--r--pkg/state/tests/array.go35
-rw-r--r--pkg/state/tests/array_test.go134
-rw-r--r--pkg/state/tests/bench.go24
-rw-r--r--pkg/state/tests/bench_test.go153
-rw-r--r--pkg/state/tests/bool_test.go31
-rw-r--r--pkg/state/tests/float_test.go118
-rw-r--r--pkg/state/tests/integer.go163
-rw-r--r--pkg/state/tests/integer_test.go94
-rw-r--r--pkg/state/tests/load.go61
-rw-r--r--pkg/state/tests/load_test.go78
-rw-r--r--pkg/state/tests/map.go28
-rw-r--r--pkg/state/tests/map_test.go90
-rw-r--r--pkg/state/tests/register.go21
-rw-r--r--pkg/state/tests/register_test.go179
-rw-r--r--pkg/state/tests/string_test.go34
-rw-r--r--pkg/state/tests/struct.go100
-rw-r--r--pkg/state/tests/struct_test.go100
-rw-r--r--pkg/state/tests/tests.go215
-rw-r--r--pkg/state/wire/BUILD12
-rw-r--r--pkg/sync/BUILD49
-rw-r--r--pkg/sync/LICENSE27
-rw-r--r--pkg/sync/README.md5
-rw-r--r--pkg/sync/atomicptr/BUILD36
-rw-r--r--pkg/sync/atomicptr/atomicptr_test.go31
-rw-r--r--pkg/sync/atomicptrmap/BUILD76
-rw-r--r--pkg/sync/atomicptrmap/atomicptrmap.go20
-rw-r--r--pkg/sync/atomicptrmap/atomicptrmap_test.go635
-rw-r--r--pkg/sync/atomicptrmap/generic_atomicptrmap_unsafe.go500
-rw-r--r--pkg/sync/gate_test.go129
-rw-r--r--pkg/sync/mutex_test.go71
-rw-r--r--pkg/sync/rwmutex_test.go205
-rw-r--r--pkg/sync/seqatomic/BUILD45
-rw-r--r--pkg/sync/seqatomic/generic_seqatomic_unsafe.go50
-rw-r--r--pkg/sync/seqatomic/seqatomic_test.go132
-rw-r--r--pkg/sync/seqcount_test.go100
-rw-r--r--pkg/syncevent/BUILD35
-rw-r--r--pkg/syncevent/broadcaster.go220
-rw-r--r--pkg/syncevent/broadcaster_test.go376
-rw-r--r--pkg/syncevent/receiver.go103
-rw-r--r--pkg/syncevent/source.go61
-rw-r--r--pkg/syncevent/syncevent.go32
-rw-r--r--pkg/syncevent/syncevent_example_test.go108
-rw-r--r--pkg/syncevent/waiter_test.go414
-rw-r--r--pkg/syncevent/waiter_unsafe.go197
-rw-r--r--pkg/syserr/BUILD21
-rw-r--r--pkg/syserr/syserr_linux_state_autogen.go5
-rw-r--r--pkg/syserr/syserr_state_autogen.go3
-rw-r--r--pkg/syserror/BUILD10
-rw-r--r--pkg/syserror/syserror_state_autogen.go3
-rw-r--r--pkg/tcpip/BUILD99
-rw-r--r--pkg/tcpip/adapters/gonet/BUILD37
-rw-r--r--pkg/tcpip/adapters/gonet/gonet_state_autogen.go3
-rw-r--r--pkg/tcpip/adapters/gonet/gonet_test.go725
-rw-r--r--pkg/tcpip/buffer/BUILD25
-rw-r--r--pkg/tcpip/buffer/buffer_state_autogen.go39
-rw-r--r--pkg/tcpip/buffer/buffer_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/buffer/view_test.go629
-rw-r--r--pkg/tcpip/checker/BUILD17
-rw-r--r--pkg/tcpip/checker/checker.go1625
-rw-r--r--pkg/tcpip/faketime/BUILD21
-rw-r--r--pkg/tcpip/faketime/faketime_state_autogen.go3
-rw-r--r--pkg/tcpip/faketime/faketime_test.go95
-rw-r--r--pkg/tcpip/hash/jenkins/BUILD18
-rw-r--r--pkg/tcpip/hash/jenkins/jenkins_state_autogen.go3
-rw-r--r--pkg/tcpip/hash/jenkins/jenkins_test.go176
-rw-r--r--pkg/tcpip/header/BUILD76
-rw-r--r--pkg/tcpip/header/checksum_test.go461
-rw-r--r--pkg/tcpip/header/eth_test.go150
-rw-r--r--pkg/tcpip/header/header_state_autogen.go74
-rw-r--r--pkg/tcpip/header/igmp_test.go110
-rw-r--r--pkg/tcpip/header/ipv4_test.go254
-rw-r--r--pkg/tcpip/header/ipv6_extension_headers_test.go1346
-rw-r--r--pkg/tcpip/header/ipv6_test.go457
-rw-r--r--pkg/tcpip/header/ipversion_test.go67
-rw-r--r--pkg/tcpip/header/mld_test.go61
-rw-r--r--pkg/tcpip/header/ndp_test.go1748
-rw-r--r--pkg/tcpip/header/parse/BUILD15
-rw-r--r--pkg/tcpip/header/parse/parse_state_autogen.go3
-rw-r--r--pkg/tcpip/header/tcp_test.go168
-rw-r--r--pkg/tcpip/link/channel/BUILD15
-rw-r--r--pkg/tcpip/link/channel/channel_state_autogen.go36
-rw-r--r--pkg/tcpip/link/ethernet/BUILD29
-rw-r--r--pkg/tcpip/link/ethernet/ethernet_state_autogen.go3
-rw-r--r--pkg/tcpip/link/ethernet/ethernet_test.go71
-rw-r--r--pkg/tcpip/link/fdbased/BUILD40
-rw-r--r--pkg/tcpip/link/fdbased/endpoint_test.go624
-rw-r--r--pkg/tcpip/link/fdbased/fdbased_state_autogen.go8
-rw-r--r--pkg/tcpip/link/fdbased/fdbased_unsafe_state_autogen.go6
-rw-r--r--pkg/tcpip/link/loopback/BUILD15
-rw-r--r--pkg/tcpip/link/loopback/loopback_state_autogen.go3
-rw-r--r--pkg/tcpip/link/muxed/BUILD29
-rw-r--r--pkg/tcpip/link/muxed/injectable_test.go101
-rw-r--r--pkg/tcpip/link/muxed/muxed_state_autogen.go3
-rw-r--r--pkg/tcpip/link/nested/BUILD31
-rw-r--r--pkg/tcpip/link/nested/nested_state_autogen.go3
-rw-r--r--pkg/tcpip/link/nested/nested_test.go109
-rw-r--r--pkg/tcpip/link/packetsocket/BUILD14
-rw-r--r--pkg/tcpip/link/packetsocket/packetsocket_state_autogen.go3
-rw-r--r--pkg/tcpip/link/pipe/BUILD15
-rw-r--r--pkg/tcpip/link/pipe/pipe_state_autogen.go3
-rw-r--r--pkg/tcpip/link/qdisc/fifo/BUILD19
-rw-r--r--pkg/tcpip/link/qdisc/fifo/fifo_state_autogen.go3
-rw-r--r--pkg/tcpip/link/rawfile/BUILD33
-rw-r--r--pkg/tcpip/link/rawfile/errors_test.go55
-rw-r--r--pkg/tcpip/link/rawfile/rawfile_state_autogen.go5
-rw-r--r--pkg/tcpip/link/rawfile/rawfile_unsafe_state_autogen.go9
-rw-r--r--pkg/tcpip/link/sharedmem/BUILD43
-rw-r--r--pkg/tcpip/link/sharedmem/pipe/BUILD23
-rw-r--r--pkg/tcpip/link/sharedmem/pipe/pipe_state_autogen.go3
-rw-r--r--pkg/tcpip/link/sharedmem/pipe/pipe_test.go512
-rw-r--r--pkg/tcpip/link/sharedmem/pipe/pipe_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/link/sharedmem/queue/BUILD27
-rw-r--r--pkg/tcpip/link/sharedmem/queue/queue_state_autogen.go3
-rw-r--r--pkg/tcpip/link/sharedmem/queue/queue_test.go517
-rw-r--r--pkg/tcpip/link/sharedmem/sharedmem_state_autogen.go6
-rw-r--r--pkg/tcpip/link/sharedmem/sharedmem_test.go815
-rw-r--r--pkg/tcpip/link/sharedmem/sharedmem_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/link/sniffer/BUILD21
-rw-r--r--pkg/tcpip/link/sniffer/sniffer_state_autogen.go3
-rw-r--r--pkg/tcpip/link/tun/BUILD43
-rw-r--r--pkg/tcpip/link/tun/tun_endpoint_refs.go140
-rw-r--r--pkg/tcpip/link/tun/tun_state_autogen.go68
-rw-r--r--pkg/tcpip/link/tun/tun_unsafe_state_autogen.go5
-rw-r--r--pkg/tcpip/link/waitable/BUILD30
-rw-r--r--pkg/tcpip/link/waitable/waitable_state_autogen.go3
-rw-r--r--pkg/tcpip/link/waitable/waitable_test.go182
-rw-r--r--pkg/tcpip/network/BUILD30
-rw-r--r--pkg/tcpip/network/arp/BUILD53
-rw-r--r--pkg/tcpip/network/arp/arp_state_autogen.go3
-rw-r--r--pkg/tcpip/network/arp/arp_test.go680
-rw-r--r--pkg/tcpip/network/arp/stats_test.go51
-rw-r--r--pkg/tcpip/network/hash/BUILD13
-rw-r--r--pkg/tcpip/network/hash/hash_state_autogen.go3
-rw-r--r--pkg/tcpip/network/internal/fragmentation/BUILD54
-rw-r--r--pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go68
-rw-r--r--pkg/tcpip/network/internal/fragmentation/fragmentation_test.go648
-rw-r--r--pkg/tcpip/network/internal/fragmentation/reassembler_list.go221
-rw-r--r--pkg/tcpip/network/internal/fragmentation/reassembler_test.go233
-rw-r--r--pkg/tcpip/network/internal/ip/BUILD40
-rw-r--r--pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go381
-rw-r--r--pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go808
-rw-r--r--pkg/tcpip/network/internal/ip/ip_state_autogen.go32
-rw-r--r--pkg/tcpip/network/internal/testutil/BUILD21
-rw-r--r--pkg/tcpip/network/internal/testutil/testutil.go129
-rw-r--r--pkg/tcpip/network/ip_test.go2034
-rw-r--r--pkg/tcpip/network/ipv4/BUILD68
-rw-r--r--pkg/tcpip/network/ipv4/igmp_test.go387
-rw-r--r--pkg/tcpip/network/ipv4/ipv4_state_autogen.go113
-rw-r--r--pkg/tcpip/network/ipv4/ipv4_test.go3352
-rw-r--r--pkg/tcpip/network/ipv4/stats_test.go99
-rw-r--r--pkg/tcpip/network/ipv6/BUILD72
-rw-r--r--pkg/tcpip/network/ipv6/icmp_test.go1730
-rw-r--r--pkg/tcpip/network/ipv6/ipv6_state_autogen.go136
-rw-r--r--pkg/tcpip/network/ipv6/ipv6_test.go3491
-rw-r--r--pkg/tcpip/network/ipv6/mld_test.go603
-rw-r--r--pkg/tcpip/network/ipv6/ndp_test.go1341
-rw-r--r--pkg/tcpip/network/multicast_group_test.go1278
-rw-r--r--pkg/tcpip/ports/BUILD28
-rw-r--r--pkg/tcpip/ports/ports_state_autogen.go42
-rw-r--r--pkg/tcpip/ports/ports_test.go525
-rw-r--r--pkg/tcpip/sample/tun_tcp_connect/BUILD21
-rw-r--r--pkg/tcpip/sample/tun_tcp_connect/main.go220
-rw-r--r--pkg/tcpip/sample/tun_tcp_echo/BUILD21
-rw-r--r--pkg/tcpip/sample/tun_tcp_echo/main.go231
-rw-r--r--pkg/tcpip/seqnum/BUILD9
-rw-r--r--pkg/tcpip/seqnum/seqnum_state_autogen.go3
-rw-r--r--pkg/tcpip/sock_err_list.go221
-rw-r--r--pkg/tcpip/stack/BUILD152
-rw-r--r--pkg/tcpip/stack/addressable_endpoint_state_test.go61
-rw-r--r--pkg/tcpip/stack/forwarding_test.go790
-rw-r--r--pkg/tcpip/stack/ndp_test.go5569
-rw-r--r--pkg/tcpip/stack/neighbor_cache_test.go1584
-rw-r--r--pkg/tcpip/stack/neighbor_entry_list.go221
-rw-r--r--pkg/tcpip/stack/neighbor_entry_test.go2269
-rw-r--r--pkg/tcpip/stack/nic_test.go224
-rw-r--r--pkg/tcpip/stack/nud_test.go816
-rw-r--r--pkg/tcpip/stack/packet_buffer_list.go221
-rw-r--r--pkg/tcpip/stack/packet_buffer_test.go649
-rw-r--r--pkg/tcpip/stack/stack_state_autogen.go1284
-rw-r--r--pkg/tcpip/stack/stack_test.go4543
-rw-r--r--pkg/tcpip/stack/stack_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/stack/transport_demuxer_test.go446
-rw-r--r--pkg/tcpip/stack/transport_test.go555
-rw-r--r--pkg/tcpip/stack/tuple_list.go221
-rw-r--r--pkg/tcpip/tcpip_state_autogen.go1323
-rw-r--r--pkg/tcpip/tcpip_test.go325
-rw-r--r--pkg/tcpip/tests/integration/BUILD141
-rw-r--r--pkg/tcpip/tests/integration/forward_test.go690
-rw-r--r--pkg/tcpip/tests/integration/iptables_test.go1134
-rw-r--r--pkg/tcpip/tests/integration/link_resolution_test.go1640
-rw-r--r--pkg/tcpip/tests/integration/loopback_test.go763
-rw-r--r--pkg/tcpip/tests/integration/multicast_broadcast_test.go723
-rw-r--r--pkg/tcpip/tests/integration/route_test.go433
-rw-r--r--pkg/tcpip/tests/utils/BUILD22
-rw-r--r--pkg/tcpip/tests/utils/utils.go390
-rw-r--r--pkg/tcpip/testutil/BUILD21
-rw-r--r--pkg/tcpip/testutil/testutil.go123
-rw-r--r--pkg/tcpip/testutil/testutil_test.go103
-rw-r--r--pkg/tcpip/testutil/testutil_unsafe.go26
-rw-r--r--pkg/tcpip/timer_test.go353
-rw-r--r--pkg/tcpip/transport/icmp/BUILD59
-rw-r--r--pkg/tcpip/transport/icmp/icmp_packet_list.go221
-rw-r--r--pkg/tcpip/transport/icmp/icmp_state_autogen.go168
-rw-r--r--pkg/tcpip/transport/icmp/icmp_test.go235
-rw-r--r--pkg/tcpip/transport/packet/BUILD37
-rw-r--r--pkg/tcpip/transport/packet/packet_list.go221
-rw-r--r--pkg/tcpip/transport/packet/packet_state_autogen.go171
-rw-r--r--pkg/tcpip/transport/raw/BUILD39
-rw-r--r--pkg/tcpip/transport/raw/raw_packet_list.go221
-rw-r--r--pkg/tcpip/transport/raw/raw_state_autogen.go165
-rw-r--r--pkg/tcpip/transport/tcp/BUILD139
-rw-r--r--pkg/tcpip/transport/tcp/dual_stack_test.go650
-rw-r--r--pkg/tcpip/transport/tcp/rcv_test.go74
-rw-r--r--pkg/tcpip/transport/tcp/sack_scoreboard_test.go249
-rw-r--r--pkg/tcpip/transport/tcp/segment_test.go69
-rw-r--r--pkg/tcpip/transport/tcp/tcp_endpoint_list.go221
-rw-r--r--pkg/tcpip/transport/tcp/tcp_noracedetector_test.go559
-rw-r--r--pkg/tcpip/transport/tcp/tcp_rack_test.go1102
-rw-r--r--pkg/tcpip/transport/tcp/tcp_sack_test.go704
-rw-r--r--pkg/tcpip/transport/tcp/tcp_segment_list.go221
-rw-r--r--pkg/tcpip/transport/tcp/tcp_state_autogen.go897
-rw-r--r--pkg/tcpip/transport/tcp/tcp_test.go7966
-rw-r--r--pkg/tcpip/transport/tcp/tcp_timestamp_test.go311
-rw-r--r--pkg/tcpip/transport/tcp/tcp_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/transport/tcp/testing/context/BUILD26
-rw-r--r--pkg/tcpip/transport/tcp/testing/context/context.go1233
-rw-r--r--pkg/tcpip/transport/tcp/timer_test.go50
-rw-r--r--pkg/tcpip/transport/tcpconntrack/BUILD23
-rw-r--r--pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go511
-rw-r--r--pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go3
-rw-r--r--pkg/tcpip/transport/udp/BUILD65
-rw-r--r--pkg/tcpip/transport/udp/udp_packet_list.go221
-rw-r--r--pkg/tcpip/transport/udp/udp_state_autogen.go239
-rw-r--r--pkg/tcpip/transport/udp/udp_test.go2559
-rw-r--r--pkg/test/criutil/BUILD14
-rw-r--r--pkg/test/criutil/criutil.go372
-rw-r--r--pkg/test/dockerutil/BUILD43
-rw-r--r--pkg/test/dockerutil/README.md86
-rw-r--r--pkg/test/dockerutil/container.go548
-rw-r--r--pkg/test/dockerutil/dockerutil.go172
-rw-r--r--pkg/test/dockerutil/exec.go188
-rw-r--r--pkg/test/dockerutil/network.go110
-rw-r--r--pkg/test/dockerutil/profile.go150
-rw-r--r--pkg/test/dockerutil/profile_test.go125
-rw-r--r--pkg/test/testutil/BUILD24
-rw-r--r--pkg/test/testutil/sh.go515
-rw-r--r--pkg/test/testutil/testutil.go599
-rw-r--r--pkg/test/testutil/testutil_runfiles.go78
-rw-r--r--pkg/unet/BUILD29
-rw-r--r--pkg/unet/unet_state_autogen.go3
-rw-r--r--pkg/unet/unet_test.go739
-rw-r--r--pkg/unet/unet_unsafe_state_autogen.go3
-rw-r--r--pkg/urpc/BUILD23
-rw-r--r--pkg/urpc/urpc_state_autogen.go3
-rw-r--r--pkg/urpc/urpc_test.go210
-rw-r--r--pkg/usermem/BUILD37
-rw-r--r--pkg/usermem/README.md31
-rw-r--r--pkg/usermem/usermem_state_autogen.go3
-rw-r--r--pkg/usermem/usermem_test.go407
-rw-r--r--pkg/usermem/usermem_unsafe_state_autogen.go3
-rw-r--r--pkg/waiter/BUILD35
-rw-r--r--pkg/waiter/waiter_list.go221
-rw-r--r--pkg/waiter/waiter_state_autogen.go126
-rw-r--r--pkg/waiter/waiter_test.go198
-rw-r--r--runsc/BUILD53
-rw-r--r--runsc/boot/BUILD150
-rw-r--r--runsc/boot/boot_amd64_state_autogen.go3
-rw-r--r--runsc/boot/boot_arm64_state_autogen.go3
-rw-r--r--runsc/boot/boot_state_autogen.go39
-rw-r--r--runsc/boot/compat_test.go90
-rw-r--r--runsc/boot/filter/BUILD28
-rw-r--r--runsc/boot/filter/filter_amd64_state_autogen.go5
-rw-r--r--runsc/boot/filter/filter_arm64_state_autogen.go5
-rw-r--r--runsc/boot/filter/filter_race_state_autogen.go5
-rw-r--r--runsc/boot/filter/filter_state_autogen.go7
-rw-r--r--runsc/boot/fs_test.go252
-rw-r--r--runsc/boot/loader_test.go731
-rw-r--r--runsc/boot/platforms/BUILD15
-rw-r--r--runsc/boot/platforms/platforms_state_autogen.go3
-rw-r--r--runsc/boot/pprof/BUILD11
-rw-r--r--runsc/boot/pprof/pprof_state_autogen.go5
-rw-r--r--runsc/cgroup/BUILD28
-rw-r--r--runsc/cgroup/cgroup_state_autogen.go3
-rw-r--r--runsc/cgroup/cgroup_test.go865
-rw-r--r--runsc/cli/BUILD25
-rw-r--r--runsc/cli/cli_state_autogen.go3
-rw-r--r--runsc/cmd/BUILD105
-rw-r--r--runsc/cmd/capability_test.go130
-rw-r--r--runsc/cmd/cmd_state_autogen.go5
-rw-r--r--runsc/cmd/delete_test.go41
-rw-r--r--runsc/cmd/exec_test.go154
-rw-r--r--runsc/cmd/gofer_test.go163
-rw-r--r--runsc/cmd/mitigate_test.go161
-rw-r--r--runsc/config/BUILD28
-rw-r--r--runsc/config/config_state_autogen.go3
-rw-r--r--runsc/config/config_test.go288
-rw-r--r--runsc/console/BUILD17
-rw-r--r--runsc/console/console_state_autogen.go3
-rw-r--r--runsc/container/BUILD76
-rw-r--r--runsc/container/console_test.go667
-rw-r--r--runsc/container/container_norace_test.go21
-rw-r--r--runsc/container/container_race_test.go21
-rw-r--r--runsc/container/container_state_autogen.go3
-rw-r--r--runsc/container/container_test.go2599
-rw-r--r--runsc/container/multi_container_test.go2089
-rw-r--r--runsc/container/shared_volume_test.go265
-rw-r--r--runsc/flag/BUILD9
-rw-r--r--runsc/flag/flag_state_autogen.go5
-rw-r--r--runsc/fsgofer/BUILD39
-rw-r--r--runsc/fsgofer/filter/BUILD26
-rw-r--r--runsc/fsgofer/filter/filter_amd64_state_autogen.go5
-rw-r--r--runsc/fsgofer/filter/filter_arm64_state_autogen.go5
-rw-r--r--runsc/fsgofer/filter/filter_race_state_autogen.go5
-rw-r--r--runsc/fsgofer/filter/filter_state_autogen.go6
-rw-r--r--runsc/fsgofer/fsgofer_amd64_unsafe_state_autogen.go5
-rw-r--r--runsc/fsgofer/fsgofer_arm64_unsafe_state_autogen.go5
-rw-r--r--runsc/fsgofer/fsgofer_state_autogen.go3
-rw-r--r--runsc/fsgofer/fsgofer_test.go1139
-rw-r--r--runsc/fsgofer/fsgofer_unsafe_state_autogen.go3
-rw-r--r--runsc/mitigate/BUILD23
-rw-r--r--runsc/mitigate/mitigate_state_autogen.go3
-rw-r--r--runsc/mitigate/mitigate_test.go501
-rw-r--r--runsc/mitigate/mock/BUILD11
-rw-r--r--runsc/mitigate/mock/mock.go154
-rw-r--r--runsc/sandbox/BUILD39
-rw-r--r--runsc/sandbox/sandbox_state_autogen.go3
-rw-r--r--runsc/sandbox/sandbox_unsafe_state_autogen.go3
-rw-r--r--runsc/specutils/BUILD34
-rw-r--r--runsc/specutils/safemount_test/BUILD23
-rw-r--r--runsc/specutils/safemount_test/safemount_runner.go117
-rw-r--r--runsc/specutils/safemount_test/safemount_test.go53
-rw-r--r--runsc/specutils/seccomp/BUILD38
-rw-r--r--runsc/specutils/seccomp/seccomp_amd64_state_autogen.go5
-rw-r--r--runsc/specutils/seccomp/seccomp_arm64_state_autogen.go5
-rw-r--r--runsc/specutils/seccomp/seccomp_state_autogen.go3
-rw-r--r--runsc/specutils/seccomp/seccomp_test.go409
-rw-r--r--runsc/specutils/specutils_state_autogen.go3
-rw-r--r--runsc/specutils/specutils_test.go271
-rwxr-xr-xrunsc/version_test.sh36
-rw-r--r--shim/BUILD26
-rw-r--r--shim/README.md8
-rw-r--r--shim/cli/BUILD16
-rw-r--r--shim/cli/cli_state_autogen.go3
-rw-r--r--shim/runsc.toml6
-rw-r--r--test/BUILD1
-rw-r--r--test/README.md40
-rw-r--r--test/benchmarks/BUILD11
-rw-r--r--test/benchmarks/README.md129
-rw-r--r--test/benchmarks/base/BUILD50
-rw-r--r--test/benchmarks/base/base.go98
-rw-r--r--test/benchmarks/base/size_test.go181
-rw-r--r--test/benchmarks/base/startup_test.go144
-rw-r--r--test/benchmarks/base/sysbench_test.go92
-rw-r--r--test/benchmarks/database/BUILD22
-rw-r--r--test/benchmarks/database/database.go16
-rw-r--r--test/benchmarks/database/redis_test.go139
-rw-r--r--test/benchmarks/defs.bzl14
-rw-r--r--test/benchmarks/fs/BUILD29
-rw-r--r--test/benchmarks/fs/bazel_test.go160
-rw-r--r--test/benchmarks/fs/fio_test.go163
-rw-r--r--test/benchmarks/harness/BUILD20
-rw-r--r--test/benchmarks/harness/harness.go57
-rw-r--r--test/benchmarks/harness/machine.go87
-rw-r--r--test/benchmarks/harness/util.go113
-rw-r--r--test/benchmarks/media/BUILD21
-rw-r--r--test/benchmarks/media/ffmpeg_test.go61
-rw-r--r--test/benchmarks/media/media.go16
-rw-r--r--test/benchmarks/ml/BUILD22
-rw-r--r--test/benchmarks/ml/ml.go16
-rw-r--r--test/benchmarks/ml/tensorflow_test.go87
-rw-r--r--test/benchmarks/network/BUILD89
-rw-r--r--test/benchmarks/network/httpd_test.go135
-rw-r--r--test/benchmarks/network/iperf_test.go121
-rw-r--r--test/benchmarks/network/network.go80
-rw-r--r--test/benchmarks/network/nginx_test.go147
-rw-r--r--test/benchmarks/network/node_test.go137
-rw-r--r--test/benchmarks/network/ruby_test.go144
-rw-r--r--test/benchmarks/tcp/BUILD41
-rw-r--r--test/benchmarks/tcp/README.md87
-rw-r--r--test/benchmarks/tcp/nsjoin.c47
-rwxr-xr-xtest/benchmarks/tcp/tcp_benchmark.sh396
-rw-r--r--test/benchmarks/tcp/tcp_proxy.go462
-rw-r--r--test/benchmarks/tools/BUILD35
-rw-r--r--test/benchmarks/tools/ab.go97
-rw-r--r--test/benchmarks/tools/ab_test.go90
-rw-r--r--test/benchmarks/tools/fio.go116
-rw-r--r--test/benchmarks/tools/fio_test.go122
-rw-r--r--test/benchmarks/tools/hey.go82
-rw-r--r--test/benchmarks/tools/hey_test.go81
-rw-r--r--test/benchmarks/tools/iperf.go65
-rw-r--r--test/benchmarks/tools/iperf_test.go34
-rw-r--r--test/benchmarks/tools/meminfo.go60
-rw-r--r--test/benchmarks/tools/meminfo_test.go84
-rw-r--r--test/benchmarks/tools/parser_util.go101
-rw-r--r--test/benchmarks/tools/redis.go74
-rw-r--r--test/benchmarks/tools/redis_test.go87
-rw-r--r--test/benchmarks/tools/sysbench.go236
-rw-r--r--test/benchmarks/tools/sysbench_test.go169
-rw-r--r--test/benchmarks/tools/tools.go17
-rw-r--r--test/cmd/test_app/BUILD21
-rw-r--r--test/cmd/test_app/fds.go186
-rw-r--r--test/cmd/test_app/main.go400
-rw-r--r--test/e2e/BUILD32
-rw-r--r--test/e2e/exec_test.go268
-rw-r--r--test/e2e/integration.go16
-rw-r--r--test/e2e/integration_test.go744
-rw-r--r--test/fsstress/BUILD27
-rw-r--r--test/fsstress/fsstress.go16
-rw-r--r--test/fsstress/fsstress_test.go124
-rw-r--r--test/fuse/BUILD80
-rw-r--r--test/fuse/README.md188
-rw-r--r--test/fuse/linux/BUILD243
-rw-r--r--test/fuse/linux/create_test.cc128
-rw-r--r--test/fuse/linux/fuse_base.cc447
-rw-r--r--test/fuse/linux/fuse_base.h251
-rw-r--r--test/fuse/linux/fuse_fd_util.cc61
-rw-r--r--test/fuse/linux/fuse_fd_util.h48
-rw-r--r--test/fuse/linux/mkdir_test.cc88
-rw-r--r--test/fuse/linux/mknod_test.cc107
-rw-r--r--test/fuse/linux/mount_test.cc86
-rw-r--r--test/fuse/linux/open_test.cc128
-rw-r--r--test/fuse/linux/read_test.cc390
-rw-r--r--test/fuse/linux/readdir_test.cc193
-rw-r--r--test/fuse/linux/readlink_test.cc85
-rw-r--r--test/fuse/linux/release_test.cc74
-rw-r--r--test/fuse/linux/rmdir_test.cc100
-rw-r--r--test/fuse/linux/setstat_test.cc338
-rw-r--r--test/fuse/linux/stat_test.cc229
-rw-r--r--test/fuse/linux/symlink_test.cc88
-rw-r--r--test/fuse/linux/unlink_test.cc107
-rw-r--r--test/fuse/linux/write_test.cc303
-rw-r--r--test/image/BUILD33
-rw-r--r--test/image/image.go16
-rw-r--r--test/image/image_test.go326
-rw-r--r--test/image/latin10k.txt33
-rw-r--r--test/image/mysql.sql23
-rw-r--r--test/image/ruby.rb23
-rwxr-xr-xtest/image/ruby.sh20
-rw-r--r--test/iptables/BUILD41
-rw-r--r--test/iptables/README.md84
-rw-r--r--test/iptables/filter_input.go990
-rw-r--r--test/iptables/filter_output.go714
-rw-r--r--test/iptables/iptables.go115
-rw-r--r--test/iptables/iptables_test.go466
-rw-r--r--test/iptables/iptables_unsafe.go64
-rw-r--r--test/iptables/iptables_util.go327
-rw-r--r--test/iptables/nat.go1037
-rw-r--r--test/iptables/runner/BUILD12
-rw-r--r--test/iptables/runner/main.go79
-rw-r--r--test/kubernetes/gvisor-injection-admission-webhook.yaml89
-rw-r--r--test/packetdrill/BUILD55
-rw-r--r--test/packetdrill/accept_ack_drop.pkt27
-rw-r--r--test/packetdrill/defs.bzl89
-rw-r--r--test/packetdrill/fin_wait2_timeout.pkt23
-rw-r--r--test/packetdrill/listen_close_before_handshake_complete.pkt31
-rw-r--r--test/packetdrill/no_rst_to_rst.pkt36
-rwxr-xr-xtest/packetdrill/packetdrill_setup.sh26
-rwxr-xr-xtest/packetdrill/packetdrill_test.sh231
-rw-r--r--test/packetdrill/reset_for_ack_when_no_syn_cookies_in_use.pkt9
-rw-r--r--test/packetdrill/sanity_test.pkt7
-rw-r--r--test/packetdrill/tcp_defer_accept.pkt48
-rw-r--r--test/packetdrill/tcp_defer_accept_timeout.pkt48
-rw-r--r--test/packetimpact/README.md709
-rw-r--r--test/packetimpact/dut/BUILD30
-rw-r--r--test/packetimpact/dut/posix_server.cc464
-rw-r--r--test/packetimpact/netdevs/BUILD23
-rw-r--r--test/packetimpact/netdevs/netdevs.go122
-rw-r--r--test/packetimpact/netdevs/netdevs_test.go227
-rw-r--r--test/packetimpact/proto/BUILD12
-rw-r--r--test/packetimpact/proto/posix_server.proto268
-rw-r--r--test/packetimpact/runner/BUILD38
-rw-r--r--test/packetimpact/runner/defs.bzl315
-rw-r--r--test/packetimpact/runner/dut.go657
-rw-r--r--test/packetimpact/runner/packetimpact_test.go32
-rw-r--r--test/packetimpact/testbench/BUILD47
-rw-r--r--test/packetimpact/testbench/connections.go1263
-rw-r--r--test/packetimpact/testbench/dut.go789
-rw-r--r--test/packetimpact/testbench/dut_client.go31
-rw-r--r--test/packetimpact/testbench/layers.go1602
-rw-r--r--test/packetimpact/testbench/layers_test.go728
-rw-r--r--test/packetimpact/testbench/rawsockets.go207
-rw-r--r--test/packetimpact/testbench/testbench.go180
-rw-r--r--test/packetimpact/tests/BUILD431
-rw-r--r--test/packetimpact/tests/fin_wait2_timeout_test.go74
-rw-r--r--test/packetimpact/tests/generic_dgram_socket_send_recv_test.go781
-rw-r--r--test/packetimpact/tests/icmpv6_param_problem_test.go74
-rw-r--r--test/packetimpact/tests/ipv4_fragment_reassembly_test.go175
-rw-r--r--test/packetimpact/tests/ipv4_id_uniqueness_test.go121
-rw-r--r--test/packetimpact/tests/ipv6_fragment_icmp_error_test.go356
-rw-r--r--test/packetimpact/tests/ipv6_fragment_reassembly_test.go182
-rw-r--r--test/packetimpact/tests/ipv6_unknown_options_action_test.go183
-rw-r--r--test/packetimpact/tests/tcp_connect_icmp_error_test.go106
-rw-r--r--test/packetimpact/tests/tcp_cork_mss_test.go83
-rw-r--r--test/packetimpact/tests/tcp_fin_retransmission_test.go87
-rw-r--r--test/packetimpact/tests/tcp_handshake_window_size_test.go65
-rw-r--r--test/packetimpact/tests/tcp_info_test.go102
-rw-r--r--test/packetimpact/tests/tcp_linger_test.go265
-rw-r--r--test/packetimpact/tests/tcp_listen_backlog_test.go94
-rw-r--r--test/packetimpact/tests/tcp_network_unreachable_test.go144
-rw-r--r--test/packetimpact/tests/tcp_noaccept_close_rst_test.go46
-rw-r--r--test/packetimpact/tests/tcp_outside_the_window_test.go182
-rw-r--r--test/packetimpact/tests/tcp_paws_mechanism_test.go108
-rw-r--r--test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go286
-rw-r--r--test/packetimpact/tests/tcp_rack_test.go404
-rw-r--r--test/packetimpact/tests/tcp_rcv_buf_space_test.go78
-rw-r--r--test/packetimpact/tests/tcp_retransmits_test.go94
-rw-r--r--test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go103
-rw-r--r--test/packetimpact/tests/tcp_syncookie_test.go151
-rw-r--r--test/packetimpact/tests/tcp_synrcvd_reset_test.go60
-rw-r--r--test/packetimpact/tests/tcp_synsent_reset_test.go94
-rw-r--r--test/packetimpact/tests/tcp_timewait_reset_test.go67
-rw-r--r--test/packetimpact/tests/tcp_unacc_seq_ack_test.go274
-rw-r--r--test/packetimpact/tests/tcp_user_timeout_test.go99
-rw-r--r--test/packetimpact/tests/tcp_window_shrink_test.go72
-rw-r--r--test/packetimpact/tests/tcp_zero_receive_window_test.go202
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go115
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_test.go111
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go97
-rw-r--r--test/packetimpact/tests/udp_any_addr_recv_unicast_test.go50
-rw-r--r--test/packetimpact/tests/udp_discard_mcast_source_addr_test.go93
-rw-r--r--test/packetimpact/tests/udp_icmp_error_propagation_test.go352
-rw-r--r--test/perf/BUILD141
-rw-r--r--test/perf/linux/BUILD372
-rw-r--r--test/perf/linux/clock_getres_benchmark.cc39
-rw-r--r--test/perf/linux/clock_gettime_benchmark.cc60
-rw-r--r--test/perf/linux/death_benchmark.cc36
-rw-r--r--test/perf/linux/epoll_benchmark.cc99
-rw-r--r--test/perf/linux/fork_benchmark.cc350
-rw-r--r--test/perf/linux/futex_benchmark.cc198
-rw-r--r--test/perf/linux/getdents_benchmark.cc149
-rw-r--r--test/perf/linux/getpid_benchmark.cc55
-rw-r--r--test/perf/linux/gettid_benchmark.cc38
-rw-r--r--test/perf/linux/mapping_benchmark.cc163
-rw-r--r--test/perf/linux/open_benchmark.cc56
-rw-r--r--test/perf/linux/open_read_close_benchmark.cc61
-rw-r--r--test/perf/linux/pipe_benchmark.cc66
-rw-r--r--test/perf/linux/randread_benchmark.cc100
-rw-r--r--test/perf/linux/read_benchmark.cc53
-rw-r--r--test/perf/linux/sched_yield_benchmark.cc37
-rw-r--r--test/perf/linux/send_recv_benchmark.cc372
-rw-r--r--test/perf/linux/seqwrite_benchmark.cc66
-rw-r--r--test/perf/linux/signal_benchmark.cc61
-rw-r--r--test/perf/linux/sleep_benchmark.cc60
-rw-r--r--test/perf/linux/stat_benchmark.cc62
-rw-r--r--test/perf/linux/unlink_benchmark.cc66
-rw-r--r--test/perf/linux/write_benchmark.cc64
-rw-r--r--test/root/BUILD43
-rw-r--r--test/root/cgroup_test.go362
-rw-r--r--test/root/chroot_test.go151
-rw-r--r--test/root/crictl_test.go476
-rw-r--r--test/root/main_test.go49
-rw-r--r--test/root/oom_score_adj_test.go358
-rw-r--r--test/root/root.go21
-rw-r--r--test/root/runsc_test.go151
-rw-r--r--test/runner/BUILD30
-rw-r--r--test/runner/defs.bzl238
-rw-r--r--test/runner/gtest/BUILD9
-rw-r--r--test/runner/gtest/gtest.go170
-rw-r--r--test/runner/main.go542
-rw-r--r--test/runner/setup_container/BUILD19
-rw-r--r--test/runner/setup_container/setup_container.cc79
-rw-r--r--test/runtimes/BUILD46
-rw-r--r--test/runtimes/README.md62
-rw-r--r--test/runtimes/defs.bzl89
-rw-r--r--test/runtimes/exclude/go1.12.csv13
-rw-r--r--test/runtimes/exclude/java11.csv220
-rw-r--r--test/runtimes/exclude/nodejs12.4.0.csv45
-rw-r--r--test/runtimes/exclude/php7.3.6.csv48
-rw-r--r--test/runtimes/exclude/python3.7.3.csv19
-rw-r--r--test/runtimes/proctor/BUILD14
-rw-r--r--test/runtimes/proctor/lib/BUILD25
-rw-r--r--test/runtimes/proctor/lib/go.go99
-rw-r--r--test/runtimes/proctor/lib/java.go76
-rw-r--r--test/runtimes/proctor/lib/lib.go108
-rw-r--r--test/runtimes/proctor/lib/lib_test.go127
-rw-r--r--test/runtimes/proctor/lib/nodejs.go46
-rw-r--r--test/runtimes/proctor/lib/php.go43
-rw-r--r--test/runtimes/proctor/lib/python.go49
-rw-r--r--test/runtimes/proctor/main.go113
-rw-r--r--test/runtimes/runner/BUILD11
-rw-r--r--test/runtimes/runner/lib/BUILD22
-rw-r--r--test/runtimes/runner/lib/exclude_test.go39
-rw-r--r--test/runtimes/runner/lib/lib.go199
-rw-r--r--test/runtimes/runner/main.go42
-rw-r--r--test/syscalls/BUILD1033
-rw-r--r--test/syscalls/README.md107
-rw-r--r--test/syscalls/linux/32bit.cc248
-rw-r--r--test/syscalls/linux/BUILD4353
-rw-r--r--test/syscalls/linux/accept_bind.cc666
-rw-r--r--test/syscalls/linux/accept_bind_stream.cc92
-rw-r--r--test/syscalls/linux/access.cc170
-rw-r--r--test/syscalls/linux/affinity.cc242
-rw-r--r--test/syscalls/linux/aio.cc430
-rw-r--r--test/syscalls/linux/alarm.cc192
-rw-r--r--test/syscalls/linux/arch_prctl.cc48
-rw-r--r--test/syscalls/linux/bad.cc45
-rw-r--r--test/syscalls/linux/base_poll_test.cc65
-rw-r--r--test/syscalls/linux/base_poll_test.h101
-rw-r--r--test/syscalls/linux/bind.cc145
-rw-r--r--test/syscalls/linux/brk.cc31
-rw-r--r--test/syscalls/linux/cgroup.cc591
-rw-r--r--test/syscalls/linux/chdir.cc64
-rw-r--r--test/syscalls/linux/chmod.cc300
-rw-r--r--test/syscalls/linux/chown.cc251
-rw-r--r--test/syscalls/linux/chroot.cc441
-rw-r--r--test/syscalls/linux/clock_getres.cc37
-rw-r--r--test/syscalls/linux/clock_gettime.cc163
-rw-r--r--test/syscalls/linux/clock_nanosleep.cc179
-rw-r--r--test/syscalls/linux/concurrency.cc130
-rw-r--r--test/syscalls/linux/connect_external.cc163
-rw-r--r--test/syscalls/linux/creat.cc68
-rw-r--r--test/syscalls/linux/dev.cc180
-rw-r--r--test/syscalls/linux/dup.cc188
-rw-r--r--test/syscalls/linux/epoll.cc502
-rw-r--r--test/syscalls/linux/eventfd.cc222
-rw-r--r--test/syscalls/linux/exceptions.cc377
-rw-r--r--test/syscalls/linux/exec.cc904
-rw-r--r--test/syscalls/linux/exec.h34
-rw-r--r--test/syscalls/linux/exec_assert_closed_workload.cc45
-rw-r--r--test/syscalls/linux/exec_basic_workload.cc31
-rw-r--r--test/syscalls/linux/exec_binary.cc1681
-rw-r--r--test/syscalls/linux/exec_proc_exe_workload.cc42
-rw-r--r--test/syscalls/linux/exec_state_workload.cc206
-rw-r--r--test/syscalls/linux/exit.cc78
-rwxr-xr-xtest/syscalls/linux/exit_script.sh22
-rw-r--r--test/syscalls/linux/fadvise64.cc83
-rw-r--r--test/syscalls/linux/fallocate.cc199
-rw-r--r--test/syscalls/linux/fault.cc81
-rw-r--r--test/syscalls/linux/fchdir.cc89
-rw-r--r--test/syscalls/linux/fcntl.cc1997
-rw-r--r--test/syscalls/linux/file_base.h100
-rw-r--r--test/syscalls/linux/flock.cc714
-rw-r--r--test/syscalls/linux/fork.cc464
-rw-r--r--test/syscalls/linux/fpsig_fork.cc188
-rw-r--r--test/syscalls/linux/fpsig_nested.cc167
-rw-r--r--test/syscalls/linux/fsync.cc58
-rw-r--r--test/syscalls/linux/futex.cc834
-rw-r--r--test/syscalls/linux/getcpu.cc40
-rw-r--r--test/syscalls/linux/getdents.cc567
-rw-r--r--test/syscalls/linux/getrandom.cc63
-rw-r--r--test/syscalls/linux/getrusage.cc185
-rw-r--r--test/syscalls/linux/inotify.cc2532
-rw-r--r--test/syscalls/linux/ioctl.cc419
-rw-r--r--test/syscalls/linux/ip6tables.cc233
-rw-r--r--test/syscalls/linux/ip_socket_test_util.cc263
-rw-r--r--test/syscalls/linux/ip_socket_test_util.h143
-rw-r--r--test/syscalls/linux/iptables.cc274
-rw-r--r--test/syscalls/linux/iptables.h302
-rw-r--r--test/syscalls/linux/itimer.cc366
-rw-r--r--test/syscalls/linux/kcov.cc184
-rw-r--r--test/syscalls/linux/kill.cc389
-rw-r--r--test/syscalls/linux/link.cc360
-rw-r--r--test/syscalls/linux/lseek.cc202
-rw-r--r--test/syscalls/linux/madvise.cc251
-rw-r--r--test/syscalls/linux/membarrier.cc268
-rw-r--r--test/syscalls/linux/memfd.cc542
-rw-r--r--test/syscalls/linux/memory_accounting.cc99
-rw-r--r--test/syscalls/linux/mempolicy.cc289
-rw-r--r--test/syscalls/linux/mincore.cc96
-rw-r--r--test/syscalls/linux/mkdir.cc128
-rw-r--r--test/syscalls/linux/mknod.cc234
-rw-r--r--test/syscalls/linux/mlock.cc320
-rw-r--r--test/syscalls/linux/mmap.cc1697
-rw-r--r--test/syscalls/linux/mount.cc385
-rw-r--r--test/syscalls/linux/mremap.cc492
-rw-r--r--test/syscalls/linux/msync.cc151
-rw-r--r--test/syscalls/linux/munmap.cc53
-rw-r--r--test/syscalls/linux/network_namespace.cc52
-rw-r--r--test/syscalls/linux/open.cc534
-rw-r--r--test/syscalls/linux/open_create.cc236
-rw-r--r--test/syscalls/linux/packet_socket.cc558
-rw-r--r--test/syscalls/linux/packet_socket_raw.cc728
-rw-r--r--test/syscalls/linux/partial_bad_buffer.cc408
-rw-r--r--test/syscalls/linux/pause.cc88
-rw-r--r--test/syscalls/linux/ping_socket.cc291
-rw-r--r--test/syscalls/linux/pipe.cc740
-rw-r--r--test/syscalls/linux/poll.cc355
-rw-r--r--test/syscalls/linux/ppoll.cc155
-rw-r--r--test/syscalls/linux/prctl.cc231
-rw-r--r--test/syscalls/linux/prctl_setuid.cc268
-rw-r--r--test/syscalls/linux/pread64.cc177
-rw-r--r--test/syscalls/linux/preadv.cc109
-rw-r--r--test/syscalls/linux/preadv2.cc298
-rw-r--r--test/syscalls/linux/priority.cc223
-rw-r--r--test/syscalls/linux/priority_execve.cc42
-rw-r--r--test/syscalls/linux/proc.cc2738
-rw-r--r--test/syscalls/linux/proc_net.cc596
-rw-r--r--test/syscalls/linux/proc_net_tcp.cc496
-rw-r--r--test/syscalls/linux/proc_net_udp.cc309
-rw-r--r--test/syscalls/linux/proc_net_unix.cc383
-rw-r--r--test/syscalls/linux/proc_pid_oomscore.cc72
-rw-r--r--test/syscalls/linux/proc_pid_smaps.cc468
-rw-r--r--test/syscalls/linux/proc_pid_uid_gid_map.cc316
-rw-r--r--test/syscalls/linux/processes.cc90
-rw-r--r--test/syscalls/linux/pselect.cc190
-rw-r--r--test/syscalls/linux/ptrace.cc2302
-rw-r--r--test/syscalls/linux/pty.cc1759
-rw-r--r--test/syscalls/linux/pty_root.cc78
-rw-r--r--test/syscalls/linux/pwrite64.cc94
-rw-r--r--test/syscalls/linux/pwritev2.cc324
-rw-r--r--test/syscalls/linux/raw_socket.cc957
-rw-r--r--test/syscalls/linux/raw_socket_hdrincl.cc407
-rw-r--r--test/syscalls/linux/raw_socket_icmp.cc549
-rw-r--r--test/syscalls/linux/read.cc168
-rw-r--r--test/syscalls/linux/readahead.cc102
-rw-r--r--test/syscalls/linux/readv.cc308
-rw-r--r--test/syscalls/linux/readv_common.cc220
-rw-r--r--test/syscalls/linux/readv_common.h61
-rw-r--r--test/syscalls/linux/readv_socket.cc212
-rw-r--r--test/syscalls/linux/rename.cc500
-rw-r--r--test/syscalls/linux/rlimits.cc73
-rw-r--r--test/syscalls/linux/rseq.cc202
-rw-r--r--test/syscalls/linux/rseq/BUILD61
-rw-r--r--test/syscalls/linux/rseq/critical.h39
-rw-r--r--test/syscalls/linux/rseq/critical_amd64.S66
-rw-r--r--test/syscalls/linux/rseq/critical_arm64.S66
-rw-r--r--test/syscalls/linux/rseq/rseq.cc377
-rw-r--r--test/syscalls/linux/rseq/start_amd64.S45
-rw-r--r--test/syscalls/linux/rseq/start_arm64.S45
-rw-r--r--test/syscalls/linux/rseq/syscalls.h69
-rw-r--r--test/syscalls/linux/rseq/test.h41
-rw-r--r--test/syscalls/linux/rseq/types.h31
-rw-r--r--test/syscalls/linux/rseq/uapi.h51
-rw-r--r--test/syscalls/linux/rtsignal.cc171
-rw-r--r--test/syscalls/linux/sched.cc71
-rw-r--r--test/syscalls/linux/sched_yield.cc33
-rw-r--r--test/syscalls/linux/seccomp.cc425
-rw-r--r--test/syscalls/linux/select.cc168
-rw-r--r--test/syscalls/linux/semaphore.cc1024
-rw-r--r--test/syscalls/linux/sendfile.cc729
-rw-r--r--test/syscalls/linux/sendfile_socket.cc231
-rw-r--r--test/syscalls/linux/setgid.cc413
-rw-r--r--test/syscalls/linux/shm.cc505
-rw-r--r--test/syscalls/linux/sigaction.cc79
-rw-r--r--test/syscalls/linux/sigaltstack.cc268
-rw-r--r--test/syscalls/linux/sigaltstack_check.cc33
-rw-r--r--test/syscalls/linux/signalfd.cc373
-rw-r--r--test/syscalls/linux/sigprocmask.cc269
-rw-r--r--test/syscalls/linux/sigreturn_amd64.cc136
-rw-r--r--test/syscalls/linux/sigreturn_arm64.cc97
-rw-r--r--test/syscalls/linux/sigstop.cc200
-rw-r--r--test/syscalls/linux/sigtimedwait.cc323
-rw-r--r--test/syscalls/linux/socket.cc210
-rw-r--r--test/syscalls/linux/socket_abstract.cc49
-rw-r--r--test/syscalls/linux/socket_bind_to_device.cc313
-rw-r--r--test/syscalls/linux/socket_bind_to_device_distribution.cc353
-rw-r--r--test/syscalls/linux/socket_bind_to_device_sequence.cc513
-rw-r--r--test/syscalls/linux/socket_bind_to_device_util.cc78
-rw-r--r--test/syscalls/linux/socket_bind_to_device_util.h67
-rw-r--r--test/syscalls/linux/socket_blocking.cc60
-rw-r--r--test/syscalls/linux/socket_blocking.h29
-rw-r--r--test/syscalls/linux/socket_capability.cc61
-rw-r--r--test/syscalls/linux/socket_filesystem.cc49
-rw-r--r--test/syscalls/linux/socket_generic.h30
-rw-r--r--test/syscalls/linux/socket_generic_stress.cc261
-rw-r--r--test/syscalls/linux/socket_generic_test_cases.cc947
-rw-r--r--test/syscalls/linux/socket_inet_loopback.cc2534
-rw-r--r--test/syscalls/linux/socket_inet_loopback_isolated.cc489
-rw-r--r--test/syscalls/linux/socket_inet_loopback_nogotsan.cc212
-rw-r--r--test/syscalls/linux/socket_inet_loopback_test_params.h86
-rw-r--r--test/syscalls/linux/socket_ip_loopback_blocking.cc49
-rw-r--r--test/syscalls/linux/socket_ip_tcp_generic.cc1308
-rw-r--r--test/syscalls/linux/socket_ip_tcp_generic.h29
-rw-r--r--test/syscalls/linux/socket_ip_tcp_generic_loopback.cc45
-rw-r--r--test/syscalls/linux/socket_ip_tcp_loopback.cc40
-rw-r--r--test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc45
-rw-r--r--test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc44
-rw-r--r--test/syscalls/linux/socket_ip_tcp_udp_generic.cc77
-rw-r--r--test/syscalls/linux/socket_ip_udp_generic.cc545
-rw-r--r--test/syscalls/linux/socket_ip_udp_generic.h29
-rw-r--r--test/syscalls/linux/socket_ip_udp_loopback.cc50
-rw-r--r--test/syscalls/linux/socket_ip_udp_loopback_blocking.cc39
-rw-r--r--test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc39
-rw-r--r--test/syscalls/linux/socket_ip_udp_unbound_external_networking.cc59
-rw-r--r--test/syscalls/linux/socket_ip_udp_unbound_external_networking.h46
-rw-r--r--test/syscalls/linux/socket_ip_unbound.cc468
-rw-r--r--test/syscalls/linux/socket_ip_unbound_netlink.cc102
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound.cc2512
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound.h29
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc994
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h31
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc39
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_loopback.cc32
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_loopback_netlink.cc32
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc89
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc202
-rw-r--r--test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h29
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound.cc127
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound.h29
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.cc87
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h31
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_external_networking_test.cc39
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_loopback.cc32
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_loopback_netlink.cc32
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc53
-rw-r--r--test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h29
-rw-r--r--test/syscalls/linux/socket_netdevice.cc212
-rw-r--r--test/syscalls/linux/socket_netlink.cc153
-rw-r--r--test/syscalls/linux/socket_netlink_route.cc971
-rw-r--r--test/syscalls/linux/socket_netlink_route_util.cc223
-rw-r--r--test/syscalls/linux/socket_netlink_route_util.h68
-rw-r--r--test/syscalls/linux/socket_netlink_uevent.cc83
-rw-r--r--test/syscalls/linux/socket_netlink_util.cc198
-rw-r--r--test/syscalls/linux/socket_netlink_util.h70
-rw-r--r--test/syscalls/linux/socket_non_blocking.cc62
-rw-r--r--test/syscalls/linux/socket_non_blocking.h29
-rw-r--r--test/syscalls/linux/socket_non_stream.cc337
-rw-r--r--test/syscalls/linux/socket_non_stream.h29
-rw-r--r--test/syscalls/linux/socket_non_stream_blocking.cc85
-rw-r--r--test/syscalls/linux/socket_non_stream_blocking.h30
-rw-r--r--test/syscalls/linux/socket_stream.cc178
-rw-r--r--test/syscalls/linux/socket_stream.h30
-rw-r--r--test/syscalls/linux/socket_stream_blocking.cc163
-rw-r--r--test/syscalls/linux/socket_stream_blocking.h30
-rw-r--r--test/syscalls/linux/socket_stream_nonblock.cc49
-rw-r--r--test/syscalls/linux/socket_stream_nonblock.h30
-rw-r--r--test/syscalls/linux/socket_test_util.cc1117
-rw-r--r--test/syscalls/linux/socket_test_util.h591
-rw-r--r--test/syscalls/linux/socket_test_util_impl.cc28
-rw-r--r--test/syscalls/linux/socket_unix.cc274
-rw-r--r--test/syscalls/linux/socket_unix.h29
-rw-r--r--test/syscalls/linux/socket_unix_abstract_nonblock.cc39
-rw-r--r--test/syscalls/linux/socket_unix_blocking_local.cc45
-rw-r--r--test/syscalls/linux/socket_unix_cmsg.cc1501
-rw-r--r--test/syscalls/linux/socket_unix_cmsg.h30
-rw-r--r--test/syscalls/linux/socket_unix_dgram.cc80
-rw-r--r--test/syscalls/linux/socket_unix_dgram.h29
-rw-r--r--test/syscalls/linux/socket_unix_dgram_local.cc58
-rw-r--r--test/syscalls/linux/socket_unix_dgram_non_blocking.cc57
-rw-r--r--test/syscalls/linux/socket_unix_domain.cc39
-rw-r--r--test/syscalls/linux/socket_unix_filesystem_nonblock.cc39
-rw-r--r--test/syscalls/linux/socket_unix_non_stream.cc256
-rw-r--r--test/syscalls/linux/socket_unix_non_stream.h30
-rw-r--r--test/syscalls/linux/socket_unix_non_stream_blocking_local.cc42
-rw-r--r--test/syscalls/linux/socket_unix_pair.cc44
-rw-r--r--test/syscalls/linux/socket_unix_pair_nonblock.cc39
-rw-r--r--test/syscalls/linux/socket_unix_seqpacket.cc115
-rw-r--r--test/syscalls/linux/socket_unix_seqpacket.h30
-rw-r--r--test/syscalls/linux/socket_unix_seqpacket_local.cc58
-rw-r--r--test/syscalls/linux/socket_unix_stream.cc236
-rw-r--r--test/syscalls/linux/socket_unix_stream_blocking_local.cc40
-rw-r--r--test/syscalls/linux/socket_unix_stream_local.cc48
-rw-r--r--test/syscalls/linux/socket_unix_stream_nonblock_local.cc39
-rw-r--r--test/syscalls/linux/socket_unix_unbound_abstract.cc162
-rw-r--r--test/syscalls/linux/socket_unix_unbound_dgram.cc183
-rw-r--r--test/syscalls/linux/socket_unix_unbound_filesystem.cc100
-rw-r--r--test/syscalls/linux/socket_unix_unbound_seqpacket.cc89
-rw-r--r--test/syscalls/linux/socket_unix_unbound_stream.cc733
-rw-r--r--test/syscalls/linux/splice.cc939
-rw-r--r--test/syscalls/linux/stat.cc799
-rw-r--r--test/syscalls/linux/stat_times.cc303
-rw-r--r--test/syscalls/linux/statfs.cc89
-rw-r--r--test/syscalls/linux/sticky.cc157
-rw-r--r--test/syscalls/linux/symlink.cc472
-rw-r--r--test/syscalls/linux/sync.cc69
-rw-r--r--test/syscalls/linux/sync_file_range.cc112
-rw-r--r--test/syscalls/linux/sysinfo.cc86
-rw-r--r--test/syscalls/linux/syslog.cc51
-rw-r--r--test/syscalls/linux/sysret.cc142
-rw-r--r--test/syscalls/linux/tcp_socket.cc2097
-rw-r--r--test/syscalls/linux/tgkill.cc48
-rw-r--r--test/syscalls/linux/time.cc107
-rw-r--r--test/syscalls/linux/timerfd.cc273
-rw-r--r--test/syscalls/linux/timers.cc572
-rw-r--r--test/syscalls/linux/tkill.cc75
-rw-r--r--test/syscalls/linux/truncate.cc248
-rw-r--r--test/syscalls/linux/tuntap.cc538
-rw-r--r--test/syscalls/linux/tuntap_hostinet.cc38
-rw-r--r--test/syscalls/linux/udp_bind.cc311
-rw-r--r--test/syscalls/linux/udp_socket.cc2100
-rw-r--r--test/syscalls/linux/uidgid.cc303
-rw-r--r--test/syscalls/linux/uname.cc110
-rw-r--r--test/syscalls/linux/unix_domain_socket_test_util.cc351
-rw-r--r--test/syscalls/linux/unix_domain_socket_test_util.h162
-rw-r--r--test/syscalls/linux/unlink.cc228
-rw-r--r--test/syscalls/linux/unshare.cc50
-rw-r--r--test/syscalls/linux/utimes.cc320
-rw-r--r--test/syscalls/linux/vdso.cc48
-rw-r--r--test/syscalls/linux/vdso_clock_gettime.cc83
-rw-r--r--test/syscalls/linux/verity_getdents.cc95
-rw-r--r--test/syscalls/linux/verity_ioctl.cc244
-rw-r--r--test/syscalls/linux/verity_mmap.cc158
-rw-r--r--test/syscalls/linux/verity_mount.cc57
-rw-r--r--test/syscalls/linux/verity_symlink.cc117
-rw-r--r--test/syscalls/linux/vfork.cc195
-rw-r--r--test/syscalls/linux/vsyscall.cc46
-rw-r--r--test/syscalls/linux/wait.cc913
-rw-r--r--test/syscalls/linux/write.cc340
-rw-r--r--test/syscalls/linux/xattr.cc709
-rw-r--r--test/uds/BUILD17
-rw-r--r--test/uds/uds.go228
-rw-r--r--test/util/BUILD416
-rw-r--r--test/util/capability_util.cc85
-rw-r--r--test/util/capability_util.h128
-rw-r--r--test/util/cgroup_util.cc250
-rw-r--r--test/util/cgroup_util.h123
-rw-r--r--test/util/cleanup.h61
-rw-r--r--test/util/epoll_util.cc52
-rw-r--r--test/util/epoll_util.h36
-rw-r--r--test/util/eventfd_util.h43
-rw-r--r--test/util/file_descriptor.h134
-rw-r--r--test/util/fs_util.cc727
-rw-r--r--test/util/fs_util.h237
-rw-r--r--test/util/fs_util_test.cc105
-rw-r--r--test/util/fuse_util.cc63
-rw-r--r--test/util/fuse_util.h75
-rw-r--r--test/util/logging.cc95
-rw-r--r--test/util/logging.h117
-rw-r--r--test/util/memory_util.h147
-rw-r--r--test/util/mount_util.cc176
-rw-r--r--test/util/mount_util.h99
-rw-r--r--test/util/mount_util_test.cc47
-rw-r--r--test/util/multiprocess_util.cc176
-rw-r--r--test/util/multiprocess_util.h133
-rw-r--r--test/util/platform_util.cc48
-rw-r--r--test/util/platform_util.h56
-rw-r--r--test/util/posix_error.cc98
-rw-r--r--test/util/posix_error.h459
-rw-r--r--test/util/posix_error_test.cc46
-rw-r--r--test/util/proc_util.cc107
-rw-r--r--test/util/proc_util.h150
-rw-r--r--test/util/proc_util_test.cc81
-rw-r--r--test/util/pty_util.cc58
-rw-r--r--test/util/pty_util.h39
-rw-r--r--test/util/rlimit_util.cc45
-rw-r--r--test/util/rlimit_util.h32
-rw-r--r--test/util/save_util.cc65
-rw-r--r--test/util/save_util.h60
-rw-r--r--test/util/save_util_linux.cc49
-rw-r--r--test/util/save_util_other.cc31
-rw-r--r--test/util/signal_util.cc104
-rw-r--r--test/util/signal_util.h107
-rw-r--r--test/util/temp_path.cc164
-rw-r--r--test/util/temp_path.h135
-rw-r--r--test/util/temp_umask.h39
-rw-r--r--test/util/test_main.cc20
-rw-r--r--test/util/test_util.cc239
-rw-r--r--test/util/test_util.h816
-rw-r--r--test/util/test_util_impl.cc59
-rw-r--r--test/util/test_util_runfiles.cc46
-rw-r--r--test/util/test_util_test.cc251
-rw-r--r--test/util/thread_util.h93
-rw-r--r--test/util/time_util.cc41
-rw-r--r--test/util/time_util.h29
-rw-r--r--test/util/timer_util.cc45
-rw-r--r--test/util/timer_util.h169
-rw-r--r--test/util/uid_util.cc44
-rw-r--r--test/util/uid_util.h29
-rw-r--r--test/util/verity_util.cc102
-rw-r--r--test/util/verity_util.h85
-rw-r--r--tools/BUILD19
-rw-r--r--tools/bazel.mk219
-rw-r--r--tools/bazel_gazelle_generate.patch15
-rw-r--r--tools/bazeldefs/BUILD61
-rw-r--r--tools/bazeldefs/cc.bzl51
-rw-r--r--tools/bazeldefs/defs.bzl81
-rw-r--r--tools/bazeldefs/go.bzl159
-rw-r--r--tools/bazeldefs/pkg.bzl7
-rw-r--r--tools/bazeldefs/platforms.bzl9
-rw-r--r--tools/bazeldefs/tags.bzl60
-rw-r--r--tools/bigquery/BUILD18
-rw-r--r--tools/bigquery/bigquery.go160
-rw-r--r--tools/checkescape/BUILD16
-rw-r--r--tools/checkescape/checkescape.go881
-rw-r--r--tools/checkescape/test1/BUILD9
-rw-r--r--tools/checkescape/test1/test1.go192
-rw-r--r--tools/checkescape/test2/BUILD9
-rw-r--r--tools/checkescape/test2/test2.go89
-rw-r--r--tools/checklocks/BUILD21
-rw-r--r--tools/checklocks/README.md157
-rw-r--r--tools/checklocks/analysis.go628
-rw-r--r--tools/checklocks/annotations.go129
-rw-r--r--tools/checklocks/checklocks.go154
-rw-r--r--tools/checklocks/facts.go614
-rw-r--r--tools/checklocks/state.go315
-rw-r--r--tools/checklocks/test/BUILD20
-rw-r--r--tools/checklocks/test/alignment.go51
-rw-r--r--tools/checklocks/test/atomics.go91
-rw-r--r--tools/checklocks/test/basics.go145
-rw-r--r--tools/checklocks/test/branches.go56
-rw-r--r--tools/checklocks/test/closures.go100
-rw-r--r--tools/checklocks/test/defer.go38
-rw-r--r--tools/checklocks/test/incompat.go54
-rw-r--r--tools/checklocks/test/methods.go117
-rw-r--r--tools/checklocks/test/parameters.go48
-rw-r--r--tools/checklocks/test/return.go61
-rw-r--r--tools/checklocks/test/test.go64
-rw-r--r--tools/checkunsafe/BUILD13
-rw-r--r--tools/checkunsafe/check_unsafe.go56
-rw-r--r--tools/defs.bzl349
-rw-r--r--tools/deps.bzl119
-rw-r--r--tools/github/BUILD14
-rw-r--r--tools/github/main.go161
-rw-r--r--tools/github/reviver/BUILD27
-rw-r--r--tools/github/reviver/github.go178
-rw-r--r--tools/github/reviver/github_test.go55
-rw-r--r--tools/github/reviver/reviver.go192
-rw-r--r--tools/github/reviver/reviver_test.go97
-rwxr-xr-xtools/go_branch.sh171
-rw-r--r--tools/go_generics/BUILD20
-rw-r--r--tools/go_generics/defs.bzl127
-rw-r--r--tools/go_generics/globals/BUILD13
-rw-r--r--tools/go_generics/globals/globals_visitor.go597
-rw-r--r--tools/go_generics/globals/scope.go84
-rw-r--r--tools/go_generics/go_merge/BUILD12
-rw-r--r--tools/go_generics/go_merge/main.go150
-rw-r--r--tools/go_generics/imports.go158
-rw-r--r--tools/go_generics/main.go286
-rw-r--r--tools/go_generics/remove.go105
-rw-r--r--tools/go_generics/rules_tests/BUILD43
-rw-r--r--tools/go_generics/rules_tests/template.go42
-rw-r--r--tools/go_generics/rules_tests/template_test.go48
-rw-r--r--tools/go_generics/tests/BUILD7
-rw-r--r--tools/go_generics/tests/all_stmts/BUILD16
-rw-r--r--tools/go_generics/tests/all_stmts/input.go292
-rw-r--r--tools/go_generics/tests/all_stmts/output.go290
-rw-r--r--tools/go_generics/tests/all_types/BUILD16
-rw-r--r--tools/go_generics/tests/all_types/input.go45
-rw-r--r--tools/go_generics/tests/all_types/lib/lib.go18
-rw-r--r--tools/go_generics/tests/all_types/output.go43
-rw-r--r--tools/go_generics/tests/anon/BUILD18
-rw-r--r--tools/go_generics/tests/anon/input.go46
-rw-r--r--tools/go_generics/tests/anon/output.go42
-rw-r--r--tools/go_generics/tests/consts/BUILD23
-rw-r--r--tools/go_generics/tests/consts/input.go26
-rw-r--r--tools/go_generics/tests/consts/output.go26
-rw-r--r--tools/go_generics/tests/defs.bzl67
-rw-r--r--tools/go_generics/tests/imports/BUILD24
-rw-r--r--tools/go_generics/tests/imports/input.go24
-rw-r--r--tools/go_generics/tests/imports/output.go27
-rw-r--r--tools/go_generics/tests/remove_typedef/BUILD16
-rw-r--r--tools/go_generics/tests/remove_typedef/input.go37
-rw-r--r--tools/go_generics/tests/remove_typedef/output.go29
-rw-r--r--tools/go_generics/tests/simple/BUILD17
-rw-r--r--tools/go_generics/tests/simple/input.go45
-rw-r--r--tools/go_generics/tests/simple/output.go43
-rw-r--r--tools/go_marshal/BUILD24
-rw-r--r--tools/go_marshal/README.md145
-rw-r--r--tools/go_marshal/analysis/BUILD12
-rw-r--r--tools/go_marshal/analysis/analysis_unsafe.go179
-rw-r--r--tools/go_marshal/defs.bzl67
-rw-r--r--tools/go_marshal/gomarshal/BUILD22
-rw-r--r--tools/go_marshal/gomarshal/generator.go584
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces.go276
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go146
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_dynamic.go96
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go285
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_struct.go616
-rw-r--r--tools/go_marshal/gomarshal/generator_tests.go233
-rw-r--r--tools/go_marshal/gomarshal/util.go503
-rw-r--r--tools/go_marshal/main.go73
-rw-r--r--tools/go_marshal/test/BUILD52
-rw-r--r--tools/go_marshal/test/benchmark_test.go220
-rw-r--r--tools/go_marshal/test/dynamic.go83
-rw-r--r--tools/go_marshal/test/escape/BUILD14
-rw-r--r--tools/go_marshal/test/escape/escape.go100
-rw-r--r--tools/go_marshal/test/external/BUILD11
-rw-r--r--tools/go_marshal/test/external/external.go31
-rw-r--r--tools/go_marshal/test/marshal_test.go554
-rw-r--r--tools/go_marshal/test/test.go200
-rw-r--r--tools/go_stateify/BUILD16
-rw-r--r--tools/go_stateify/defs.bzl60
-rw-r--r--tools/go_stateify/main.go472
-rw-r--r--tools/images.mk169
-rw-r--r--tools/installers/BUILD32
-rwxr-xr-xtools/installers/containerd.sh126
-rwxr-xr-xtools/installers/head.sh27
-rwxr-xr-xtools/installers/images.sh24
-rwxr-xr-xtools/installers/master.sh34
-rwxr-xr-xtools/installers/shim.sh32
-rwxr-xr-xtools/make_apt.sh152
-rwxr-xr-xtools/make_release.sh95
-rw-r--r--tools/nogo/BUILD80
-rw-r--r--tools/nogo/README.md31
-rw-r--r--tools/nogo/analyzers.go127
-rw-r--r--tools/nogo/build.go33
-rw-r--r--tools/nogo/check/BUILD14
-rw-r--r--tools/nogo/check/main.go115
-rw-r--r--tools/nogo/config-schema.json97
-rw-r--r--tools/nogo/config.go311
-rw-r--r--tools/nogo/defs.bzl501
-rw-r--r--tools/nogo/filter/BUILD15
-rw-r--r--tools/nogo/filter/main.go190
-rw-r--r--tools/nogo/findings.go127
-rw-r--r--tools/nogo/nogo.go647
-rw-r--r--tools/nogo/objdump/BUILD10
-rw-r--r--tools/nogo/objdump/objdump.go96
-rw-r--r--tools/parsers/BUILD45
-rw-r--r--tools/parsers/go_parser.go150
-rw-r--r--tools/parsers/go_parser_test.go179
-rw-r--r--tools/parsers/parser_main.go147
-rw-r--r--tools/parsers/version.go21
-rw-r--r--tools/rules_go_symbols.patch14
-rw-r--r--tools/rules_go_visibility.patch22
-rwxr-xr-xtools/tag_release.sh84
-rw-r--r--tools/tags/BUILD11
-rw-r--r--tools/tags/tags.go89
-rw-r--r--tools/verity/BUILD15
-rw-r--r--tools/verity/measure_tool.go87
-rw-r--r--tools/verity/measure_tool_unsafe.go39
-rw-r--r--tools/worker/BUILD21
-rw-r--r--tools/worker/worker.go325
-rwxr-xr-xtools/workspace_status.sh18
-rw-r--r--tools/yamltest/BUILD13
-rw-r--r--tools/yamltest/defs.bzl41
-rw-r--r--tools/yamltest/main.go133
-rw-r--r--vdso/BUILD81
-rw-r--r--vdso/barrier.h49
-rw-r--r--vdso/check_vdso.py204
-rw-r--r--vdso/compiler.h29
-rw-r--r--vdso/cycle_clock.h51
-rw-r--r--vdso/seqlock.h39
-rw-r--r--vdso/syscalls.h100
-rw-r--r--vdso/vdso.cc155
-rw-r--r--vdso/vdso_amd64.lds102
-rw-r--r--vdso/vdso_arm64.lds99
-rw-r--r--vdso/vdso_time.cc159
-rw-r--r--vdso/vdso_time.h27
-rw-r--r--webhook/BUILD28
-rw-r--r--webhook/pkg/cli/BUILD17
-rw-r--r--webhook/pkg/cli/cli_state_autogen.go3
-rw-r--r--webhook/pkg/injector/BUILD34
-rw-r--r--webhook/pkg/injector/certs.go97
-rwxr-xr-xwebhook/pkg/injector/gencerts.sh71
-rw-r--r--webhook/pkg/injector/injector_state_autogen.go3
-rw-r--r--website/BUILD197
-rw-r--r--website/_config.yml46
-rw-r--r--website/_includes/byline.html18
-rw-r--r--website/_includes/footer-links.html43
-rw-r--r--website/_includes/footer.html50
-rw-r--r--website/_includes/graph.html205
-rw-r--r--website/_includes/header-links.html19
-rw-r--r--website/_includes/header.html30
-rw-r--r--website/_includes/paginator.html10
-rw-r--r--website/_includes/required_linux.html2
-rw-r--r--website/_layouts/base.html9
-rw-r--r--website/_layouts/blog.html17
-rw-r--r--website/_layouts/default.html14
-rw-r--r--website/_layouts/docs.html62
-rw-r--r--website/_layouts/post.html10
-rw-r--r--website/_plugins/svg_mime_type.rb3
-rw-r--r--website/_sass/footer.scss15
-rw-r--r--website/_sass/front.scss17
-rw-r--r--website/_sass/navbar.scss26
-rw-r--r--website/_sass/sidebar.scss61
-rw-r--r--website/_sass/style.scss154
-rw-r--r--website/archive.key29
-rw-r--r--website/assets/favicons/apple-touch-icon-180x180.pngbin18820 -> 0 bytes
-rw-r--r--website/assets/favicons/favicon-16x16.pngbin926 -> 0 bytes
-rw-r--r--website/assets/favicons/favicon-32x32.pngbin2308 -> 0 bytes
-rw-r--r--website/assets/favicons/favicon.icobin1150 -> 0 bytes
-rw-r--r--website/assets/favicons/pwa-192x192.pngbin20666 -> 0 bytes
-rw-r--r--website/assets/favicons/pwa-512x512.pngbin24397 -> 0 bytes
-rw-r--r--website/assets/favicons/tile150x150.pngbin18440 -> 0 bytes
-rw-r--r--website/assets/favicons/tile310x150.pngbin21486 -> 0 bytes
-rw-r--r--website/assets/favicons/tile310x310.pngbin25629 -> 0 bytes
-rw-r--r--website/assets/favicons/tile70x70.pngbin11148 -> 0 bytes
-rw-r--r--website/assets/images/2019-11-18-security-basics-figure1.pngbin19088 -> 0 bytes
-rw-r--r--website/assets/images/2019-11-18-security-basics-figure2.pngbin17642 -> 0 bytes
-rw-r--r--website/assets/images/2019-11-18-security-basics-figure3.pngbin16471 -> 0 bytes
-rw-r--r--website/assets/images/2020-04-02-networking-security-figure1.pngbin29775 -> 0 bytes
-rw-r--r--website/assets/images/2020-09-18-containing-a-real-vulnerability-figure1.pngbin48602 -> 0 bytes
-rw-r--r--website/assets/images/background.jpgbin1070364 -> 0 bytes
-rw-r--r--website/assets/images/background_1080p.jpgbin344285 -> 0 bytes
-rw-r--r--website/assets/logos/Makefile13
-rw-r--r--website/assets/logos/README.md10
-rw-r--r--website/assets/logos/logo_solo_monochrome.pngbin10483 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_monochrome.svg73
-rw-r--r--website/assets/logos/logo_solo_on_dark-1024.pngbin59374 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark-128.pngbin5951 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark-16.pngbin701 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark.pngbin8387 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark.svg73
-rw-r--r--website/assets/logos/logo_solo_on_dark_full-1024.pngbin80121 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark_full-128.pngbin8616 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark_full-16.pngbin900 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark_full.pngbin17055 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_dark_full.svg79
-rw-r--r--website/assets/logos/logo_solo_on_white.pngbin10572 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_white.svg73
-rw-r--r--website/assets/logos/logo_solo_on_white_bordered-1024.pngbin95350 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_white_bordered-128.pngbin10231 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_white_bordered-16.pngbin960 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_white_bordered.pngbin15330 -> 0 bytes
-rw-r--r--website/assets/logos/logo_solo_on_white_bordered.svg82
-rw-r--r--website/assets/logos/logo_with_text_monochrome.pngbin22220 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_monochrome.svg116
-rw-r--r--website/assets/logos/logo_with_text_on_dark-1024.pngbin30774 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark-128.pngbin3129 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark-16.pngbin315 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark.pngbin17035 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark.svg116
-rw-r--r--website/assets/logos/logo_with_text_on_dark_full-1024.pngbin34866 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark_full-128.pngbin3746 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark_full-16.pngbin372 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark_full.pngbin25956 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_dark_full.svg120
-rw-r--r--website/assets/logos/logo_with_text_on_white.pngbin22363 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_white.svg116
-rw-r--r--website/assets/logos/logo_with_text_on_white_bordered.pngbin27719 -> 0 bytes
-rw-r--r--website/assets/logos/logo_with_text_on_white_bordered.svg122
-rw-r--r--website/assets/logos/powered-gvisor.pngbin5193 -> 0 bytes
-rw-r--r--website/blog/2019-11-18-security-basics.md310
-rw-r--r--website/blog/2020-04-02-networking-security.md183
-rw-r--r--website/blog/2020-09-18-containing-a-real-vulnerability.md226
-rw-r--r--website/blog/2020-10-22-platform-portability.md120
-rw-r--r--website/blog/BUILD58
-rw-r--r--website/blog/README.md62
-rw-r--r--website/blog/index.html27
-rw-r--r--website/cmd/server/BUILD13
-rw-r--r--website/cmd/server/main.go393
-rw-r--r--website/cmd/syscalldocs/BUILD9
-rw-r--r--website/cmd/syscalldocs/main.go237
-rw-r--r--website/css/main.scss10
-rw-r--r--website/defs.bzl186
-rw-r--r--website/index.md50
-rw-r--r--website/performance/README.md10
-rw-r--r--website/performance/applications.csv13
-rw-r--r--website/performance/density.csv9
-rw-r--r--website/performance/ffmpeg.csv3
-rw-r--r--website/performance/fio-tmpfs.csv9
-rw-r--r--website/performance/fio.csv9
-rw-r--r--website/performance/httpd100k.csv17
-rw-r--r--website/performance/httpd10240k.csv17
-rw-r--r--website/performance/iperf.csv5
-rw-r--r--website/performance/redis.csv35
-rw-r--r--website/performance/startup.csv7
-rw-r--r--website/performance/sysbench-cpu.csv3
-rw-r--r--website/performance/sysbench-memory.csv3
-rw-r--r--website/performance/syscall.csv4
-rw-r--r--website/performance/tensorflow.csv3
2143 files changed, 90287 insertions, 322448 deletions
diff --git a/.bazelignore b/.bazelignore
deleted file mode 100644
index 511b10433..000000000
--- a/.bazelignore
+++ /dev/null
@@ -1 +0,0 @@
-bazel-gvisor
diff --git a/.bazelrc b/.bazelrc
deleted file mode 100644
index 413cee3b0..000000000
--- a/.bazelrc
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Ensure a strong hash function.
-startup --host_jvm_args=-Dbazel.DigestFunction=SHA256
-
-# Build with C++17.
-build --cxxopt=-std=c++17
-
-# Display the current git revision in the info block.
-build --stamp --workspace_status_command tools/workspace_status.sh
-
-# Set flags for aarch64.
-build:cross-aarch64 --crosstool_top=@crosstool//:toolchains --compiler=gcc
-build:cross-aarch64 --cpu=aarch64
-build:cross-aarch64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64
diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command
deleted file mode 100644
index 5e6db407a..000000000
--- a/.buildkite/hooks/post-command
+++ /dev/null
@@ -1,65 +0,0 @@
-# Upload all relevant test failures.
-make -s testlogs 2>/dev/null | grep // | sort | uniq | (
- declare log_count=0
- while read target log; do
- if test -z "${target}"; then
- continue
- fi
-
- # N.B. If *all* tests fail due to some common cause, then we will
- # end up spending way too much time uploading logs. Instead, we just
- # upload the first 10 and stop. That is hopefully enough to debug.
- #
- # We include this test in the metadata, but note that we cannot
- # upload the actual test logs. The user should rerun locally.
- log_count=$((${log_count}+1))
- if test "${log_count}" -ge 10; then
- echo " * ${target} (no upload)" | \
- buildkite-agent annotate --style error --context failures --append
- else
- buildkite-agent artifact upload "${log}"
- echo " * [${target}](artifact://${log#/}) (${BUILDKITE_LABEL})" | \
- buildkite-agent annotate --style error --context failures --append
- fi
- done
-)
-
-# Upload all profiles, and include in an annotation.
-declare profile_output=$(mktemp --tmpdir)
-for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do
- # Generate a link to the profile parsing function in gvisor.dev, which
- # implicitly uses a prefix of https://storage.googleapis.com. Note that
- # this relies on the specific BuildKite bucket location, and will break if
- # this changes (although the artifacts will still exist and be just fine).
- profile_name="${file#/tmp/profile/}"
- profile_url="https://gvisor.dev/profile/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}/"
- buildkite-agent artifact upload "${file}"
- echo "<li><a href='${profile_url}'>${profile_name}</a></li>" >> "${profile_output}"
-done
-
-# Upload if we had outputs.
-if test -s "${profile_output}"; then
- # Make the list a collapsible section in markdown.
- sed -i "1s|^|<details><summary>${BUILDKITE_LABEL}</summary><ul>\n|" "${profile_output}"
- echo "</ul></details>" >> "${profile_output}"
- cat "${profile_output}" | buildkite-agent annotate --style info --context profiles --append
-fi
-rm -rf "${profile_output}"
-
-# Clean the bazel cache, if there's failure.
-if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then
- # Attempt to clear the cache and shut down.
- make clean || echo "make clean failed with code $?"
- make bazel-shutdown || echo "make bazel-shutdown failed with code $?"
- # Attempt to clear any Go cache.
- sudo rm -rf "${HOME}/.cache/go-build"
- sudo rm -rf "${HOME}/go"
-fi
-
-# Kill any running containers (clear state), except for "bootstrap".
-for container in $(docker ps -q); do
- maybe_kill="$(docker inspect -f '{{if ne "/bootstrap" .Name}}true{{ end }}' "${container}")"
- if test -n "${maybe_kill}"; then
- docker container kill "${container}"
- fi
-done \ No newline at end of file
diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command
deleted file mode 100644
index 9eb6d38f4..000000000
--- a/.buildkite/hooks/pre-command
+++ /dev/null
@@ -1,34 +0,0 @@
-# Install packages we need. Docker must be installed and configured,
-# as should Go itself. We just install some extra bits and pieces.
-function install_pkgs() {
- while true; do
- if sudo apt-get update && sudo apt-get install -y "$@"; then
- break
- fi
- done
-}
-install_pkgs make "linux-headers-$(uname -r)" linux-libc-dev \
- graphviz jq curl binutils gnupg gnupg-agent gcc pkg-config \
- apt-transport-https ca-certificates software-properties-common
-
-# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.
-export PARTITION=${BUILDKITE_PARALLEL_JOB:-0}
-PARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.
-export TOTAL_PARTITIONS=${BUILDKITE_PARALLEL_JOB_COUNT:-1}
-
-# Ensure Docker has experimental enabled.
-EXPERIMENTAL=$(sudo docker version --format='{{.Server.Experimental}}')
-if test "${EXPERIMENTAL}" != "true"; then
- make sudo TARGETS=//runsc:runsc ARGS="install --experimental=true"
- sudo systemctl restart docker
-fi
-
-# Helper for benchmarks, based on the branch.
-if test "${BUILDKITE_BRANCH}" = "master"; then
- export BENCHMARKS_OFFICIAL=true
-else
- export BENCHMARKS_OFFICIAL=false
-fi
-
-# Clear existing profiles.
-sudo rm -rf /tmp/profile
diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml
deleted file mode 100644
index c1b478dc3..000000000
--- a/.buildkite/pipeline.yaml
+++ /dev/null
@@ -1,229 +0,0 @@
-_templates:
- common: &common
- timeout_in_minutes: 30
- retry:
- automatic:
- - exit_status: -1
- limit: 10
- - exit_status: "*"
- limit: 2
- benchmarks: &benchmarks
- timeout_in_minutes: 120
- retry:
- automatic: false
- soft_fail: true
- if: build.branch == "master"
- env:
- # BENCHMARKS_OFFICIAL is set from hooks/pre-command, based
- # on whether this is executing on the master branch.
- BENCHMARKS_DATASET: buildkite
- BENCHMARKS_PLATFORMS: "ptrace kvm"
- BENCHMARKS_PROJECT: gvisor-benchmarks
- BENCHMARKS_TABLE: benchmarks
- BENCHMARKS_UPLOAD: true
-
-steps:
- # Run basic smoke tests before preceding to other tests.
- - <<: *common
- label: ":fire: Smoke tests"
- command: make smoke-tests
- - wait
-
- # Check that the Go branch builds.
- - <<: *common
- label: ":golang: Go branch"
- commands:
- - tools/go_branch.sh
- - git checkout go && git clean -xf .
- - go build ./...
-
- # Release workflow.
- - <<: *common
- label: ":ship: Release tests"
- commands:
- - make artifacts/x86_64
- - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64
- - make release
-
- # Images tests.
- - <<: *common
- label: ":docker: Images (x86_64)"
- command: make ARCH=x86_64 load-all-images
- - <<: *common
- label: ":docker: Images (aarch64)"
- command: make ARCH=aarch64 load-all-images
-
- # Basic unit tests.
- - <<: *common
- label: ":golang: Nogo tests"
- command: make nogo-tests
- - <<: *common
- label: ":test_tube: Unit tests"
- command: make unit-tests
- - <<: *common
- label: ":test_tube: runsc tests"
- command: make runsc-tests
-
- # All system call tests.
- - <<: *common
- label: ":toolbox: System call tests"
- command: make syscall-tests
- parallelism: 20
-
- # Integration tests.
- - <<: *common
- label: ":docker: Docker tests"
- command: make docker-tests
- - <<: *common
- label: ":goggles: Overlay tests"
- command: make overlay-tests
- - <<: *common
- label: ":safety_pin: Host network tests"
- command: make hostnet-tests
- - <<: *common
- label: ":satellite: SWGSO tests"
- command: make swgso-tests
- - <<: *common
- label: ":coffee: Do tests"
- command: make do-tests
- - <<: *common
- label: ":person_in_lotus_position: KVM tests"
- command: make kvm-tests
- - <<: *common
- label: ":weight_lifter: Fsstress test"
- command: make fsstress-test
- - <<: *common
- label: ":docker: Containerd 1.3.9 tests"
- command: make containerd-test-1.3.9
- - <<: *common
- label: ":docker: Containerd 1.4.3 tests"
- command: make containerd-test-1.4.3
-
- # Check the website builds.
- - <<: *common
- label: ":earth_americas: Website tests"
- command: make website-build
-
- # Networking tests.
- - <<: *common
- label: ":table_tennis_paddle_and_ball: IPTables tests"
- command: make iptables-tests
- - <<: *common
- label: ":construction_worker: Packetdrill tests"
- command: make packetdrill-tests
- - <<: *common
- label: ":hammer: Packetimpact tests"
- command: make packetimpact-tests
-
- # Runtime tests.
- - <<: *common
- label: ":php: PHP runtime tests"
- command: make php7.3.6-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":java: Java runtime tests"
- command: make java11-runtime-tests_vfs2
- parallelism: 40
- - <<: *common
- label: ":golang: Go runtime tests"
- command: make go1.12-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":node: NodeJS runtime tests"
- command: make nodejs12.4.0-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":python: Python runtime tests"
- command: make python3.7.3-runtime-tests_vfs2
- parallelism: 10
-
- # Runtime tests (VFS1).
- - <<: *common
- label: ":php: PHP runtime tests (VFS1)"
- command: make php7.3.6-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":java: Java runtime tests (VFS1)"
- command: make java11-runtime-tests
- parallelism: 40
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":golang: Go runtime tests (VFS1)"
- command: make go1.12-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":node: NodeJS runtime tests (VFS1)"
- command: make nodejs12.4.0-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":python: Python runtime tests (VFS1)"
- command: make python3.7.3-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
-
- # ARM tests.
- - <<: *common
- label: ":mechanical_arm: ARM"
- command: make arm-qemu-smoke-test
-
- # Run basic benchmarks smoke tests (no upload).
- - <<: *common
- label: ":fire: Benchmarks smoke test"
- command: make benchmark-platforms
- # Use the opposite of the benchmarks filter.
- if: build.branch != "master"
-
- # Run all benchmarks.
- - <<: *benchmarks
- label: ":bazel: ABSL build benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="ABSL/page_cache.clean" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
- - <<: *benchmarks
- label: ":go: runsc build benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Runsc/page_cache.clean/filesystem.bind" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
- - <<: *benchmarks
- label: ":metal: FFMPEG benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test
- # For fio, running with --test.benchtime=Xs scales the written/read
- # bytes to several GB. This is not a problem for root/bind/volume mounts,
- # but for tmpfs mounts, the size can grow to more memory than the machine
- # has availabe. Fix the runs to 1GB written/read for the benchmark.
- - <<: *benchmarks
- label: ":floppy_disk: FIO benchmarks (read/write)"
- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_FILTER=Fio/operation\.[rw][er] BENCHMARKS_OPTIONS=--test.benchtime=1000x
- # For rand(read|write) fio benchmarks, running 15s does not overwhelm the system for tmpfs mounts.
- - <<: *benchmarks
- label: ":cd: FIO benchmarks (randread/randwrite)"
- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_FILTER=Fio/operation\.rand BENCHMARKS_OPTIONS=--test.benchtime=15s
- - <<: *benchmarks
- label: ":globe_with_meridians: HTTPD benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test
- - <<: *benchmarks
- label: ":piedpiper: iperf benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=iperf BENCHMARKS_TARGETS=test/benchmarks/network:iperf_test
- - <<: *benchmarks
- label: ":nginx: nginx benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=nginx BENCHMARKS_TARGETS=test/benchmarks/network:nginx_test
- - <<: *benchmarks
- label: ":node: node benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=node BENCHMARKS_TARGETS=test/benchmarks/network:node_test
- - <<: *benchmarks
- label: ":redis: Redis benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=redis BENCHMARKS_TARGETS=test/benchmarks/database:redis_test BENCHMARKS_OPTIONS=-test.benchtime=15s
- - <<: *benchmarks
- label: ":ruby: Ruby benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=ruby BENCHMARKS_TARGETS=test/benchmarks/network:ruby_test
- - <<: *benchmarks
- label: ":weight_lifter: Size benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=size BENCHMARKS_TARGETS=test/benchmarks/base:size_test
- - <<: *benchmarks
- label: ":speedboat: Startup benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=startup BENCHMARKS_TARGETS=test/benchmarks/base:startup_test
- - <<: *benchmarks
- label: ":computer: sysbench benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test
- - <<: *benchmarks
- label: ":tensorflow: TensorFlow benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test
diff --git a/.devcontainer.json b/.devcontainer.json
deleted file mode 100644
index 6f7fe4bf8..000000000
--- a/.devcontainer.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "dockerFile": "images/default/Dockerfile",
- "overrideCommand": true,
- "mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
- "runArgs": ["--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined"],
- "extensions": [
- "bazelbuild.vscode-bazel"
- ]
-}
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
deleted file mode 100644
index f096ad598..000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-name: Bug report
-description: Create a bug report to help us improve
-labels:
- - 'type: bug'
-body:
- - type: textarea
- id: description
- attributes:
- label: Description
- description: >
- A clear description of the bug. If possible, explicitly indicate the
- expected behavior vs. the observed behavior.
- placeholder: Describe the problem.
- validations:
- required: true
- - type: textarea
- id: repro
- attributes:
- label: Steps to reproduce
- description: >
- If available, please include detailed reproduction steps.
-
- If the bug requires software that is not publicly available, see if it
- can be reproduced with software that is publicly available.
- placeholder: How can others reproduce the issue?
- - type: markdown
- attributes:
- value: |
- # Environment
-
- Please include the following details of your environment.
- - type: textarea
- id: runscVersion
- attributes:
- label: "runsc version"
- placeholder: "`runsc -version`"
- render: shell
- - type: textarea
- id: docker
- attributes:
- label: "docker version (if using docker)"
- placeholder: "`docker version` or `docker info`"
- render: shell
- - type: input
- id: uname
- attributes:
- label: "uname"
- placeholder: "`uname -a`"
- - type: textarea
- id: kubectl
- attributes:
- label: "kubectl (if using Kubernetes)"
- placeholder: "`kubectl version` and `kubectl get nodes`"
- render: shell
- - type: input
- id: gitDescribe
- attributes:
- label: "repo state (if built from source)"
- placeholder: "`git describe`"
- - type: textarea
- id: runscLogs
- attributes:
- label: "runsc debug logs (if available)"
- description: >
- See the [debug guide](https://gvisor.dev/docs/user_guide/debugging/)
- to learn about logging.
- render: shell
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
deleted file mode 100644
index 772c9a0ac..000000000
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-blank_issues_enabled: false
-contact_links:
- - name: gVisor Documentation (FAQ)
- url: https://gvisor.dev/docs/user_guide/faq/
- about: Please see our documentation for common questions and answers.
- - name: gVisor Documentation (Debugging)
- url: https://gvisor.dev/docs/user_guide/debugging/
- about: Please see our documentation for debugging tips.
- - name: gVisor User Forum
- url: https://groups.google.com/g/gvisor-users
- about: Ask and answer general questions here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
deleted file mode 100644
index 5073aba8b..000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-name: Feature request
-description: Suggest an idea or improvement
-labels:
- - 'type: enhancement'
-body:
- - type: textarea
- id: description
- attributes:
- label: Description
- placeholder: A clear description of the feature or enhancement.
- validations:
- required: true
- - type: textarea
- id: related
- attributes:
- label: Is this feature related to a specific bug?
- description: Please include a bug references if yes.
- - type: textarea
- id: solution
- attributes:
- label: Do you have a specific solution in mind?
- description: >
- Please include any details about a solution that you have in mind,
- including any alternatives considered.
diff --git a/.github/labeler.yml b/.github/labeler.yml
deleted file mode 100644
index b6a17051c..000000000
--- a/.github/labeler.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-"arch: arm":
- - "**/*_arm64.*"
- - "**/*_aarch64.*"
-"arch: x86_64":
- - "**/*_amd64.*"
- - "**/*_x86.*"
-"area: bazel":
- - "**/BUILD"
- - "**/*.bzl"
-"area: docs":
- - "**/g3doc/**"
- - "**/README.md"
-"area: filesystem":
- - "pkg/sentry/fs/**"
- - "pkg/sentry/vfs/**"
- - "pkg/sentry/fsimpl/**"
-"area: hostinet":
- - "pkg/sentry/socket/hostinet/**"
-"area: networking":
- - "pkg/tcpip/**"
- - "pkg/sentry/socket/**"
-"area: kernel":
- - "pkg/sentry/arch/**"
- - "pkg/sentry/kernel/**"
- - "pkg/sentry/syscalls/**"
-"area: mm":
- - "pkg/sentry/mm/**"
-"area: tests":
- - "**/tests/**"
- - "**/*_test.go"
- - "**/test/**"
-"area: tooling":
- - "tools/**"
-"dependencies":
- - "WORKSPACE"
- - "go.mod"
- - "go.sum"
-"platform: kvm":
- - "pkg/sentry/platform/kvm/**"
- - "pkg/sentry/platform/ring0/**"
-"platform: ptrace":
- - "pkg/sentry/platform/ptrace/**"
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
deleted file mode 100644
index 4c8b8ea5c..000000000
--- a/.github/workflows/go.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-# This workflow generates the Go branch. Note that this does not test the Go
-# branch, as this is rolled into the main continuous integration pipeline. This
-# workflow simply generates and pushes the branch, as long as appropriate
-# permissions are available.
-name: "Go"
-"on":
- push:
- branches:
- - master
- pull_request:
- branches:
- - master
- - "feature/**"
-
-jobs:
- generate:
- runs-on: ubuntu-latest
- steps:
- - name: Cancel previous
- uses: styfle/cancel-workflow-action@0.7.0
- with:
- access_token: ${{ github.token }}
- - id: setup
- run: |
- if ! [[ -z "${{ secrets.GO_TOKEN }}" ]]; then
- echo ::set-output name=has_token::true
- else
- echo ::set-output name=has_token::false
- fi
- - run: |
- jq -nc '{"state": "pending", "context": "go tests"}' | \
- curl -sL -X POST -d @- \
- -H "Content-Type: application/json" \
- -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
- "${{ github.event.pull_request.statuses_url }}"
- if: github.event_name == 'pull_request'
- - uses: actions/checkout@v2
- if: github.event_name == 'push' && steps.setup.outputs.has_token == 'true'
- with:
- fetch-depth: 0
- token: '${{ secrets.GO_TOKEN }}'
- - uses: actions/checkout@v2
- if: github.event_name == 'pull_request' || steps.setup.outputs.has_token != 'true'
- with:
- fetch-depth: 0
- - uses: actions/setup-go@v2
- with:
- go-version: 1.15
- - run: tools/go_branch.sh
- - run: git checkout go && git clean -xf . && go build ./...
- - if: github.event_name == 'push'
- run: |
- git remote add upstream "https://github.com/${{ github.repository }}"
- git push upstream go:go
- - if: ${{ success() && github.event_name == 'pull_request' }}
- run: |
- jq -nc '{"state": "success", "context": "go tests"}' | \
- curl -sL -X POST -d @- \
- -H "Content-Type: application/json" \
- -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
- "${{ github.event.pull_request.statuses_url }}"
- - if: ${{ failure() && github.event_name == 'pull_request' }}
- run: |
- jq -nc '{"state": "failure", "context": "go tests"}' | \
- curl -sL -X POST -d @- \
- -H "Content-Type: application/json" \
- -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
- "${{ github.event.pull_request.statuses_url }}"
diff --git a/.github/workflows/issue_reviver.yml b/.github/workflows/issue_reviver.yml
deleted file mode 100644
index a6a9b1b4f..000000000
--- a/.github/workflows/issue_reviver.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# This workflow revives issues that are still referenced in the code, and may
-# have been accidentally closed.
-name: "Issue reviver"
-"on":
- schedule:
- - cron: '0 0 * * *'
-
-jobs:
- issue_reviver:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- if: github.repository == 'google/gvisor'
- - run: make run TARGETS="//tools/github" ARGS="-path=. revive"
- if: github.repository == 'google/gvisor'
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- GITHUB_REPOSITORY: ${{ github.repository }}
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
deleted file mode 100644
index 3a19065e1..000000000
--- a/.github/workflows/labeler.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Labeler labels incoming pull requests.
-name: "Labeler"
-"on":
-- pull_request
-
-jobs:
- label:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/labeler@v2
- if: github.base_ref == null
- with:
- repo-token: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index a2a3fd508..000000000
--- a/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-# Generated bazel symlinks.
-/bazel-*
-# Generated build event file.
-/.build_events.json
-# Generated repository.
-/repo
-/repo.key \ No newline at end of file
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
deleted file mode 100644
index 42a018434..000000000
--- a/.vscode/tasks.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
- "version": "2.0.0",
- "tasks": [
- {
- "label": "Build",
- "type": "shell",
- "command": "bazel build //...",
- "group": {
- "kind": "build",
- "isDefault": true
- },
- "presentation": {
- "reveal": "always",
- "panel": "new"
- }
- },
- {
- "label": "Test",
- "type": "shell",
- "command": "bazel test //...",
- "group": {
- "kind": "test",
- "isDefault": true
- },
- "presentation": {
- "reveal": "always",
- "panel": "new"
- }
- }
- ]
-}
diff --git a/BUILD b/BUILD
deleted file mode 100644
index 6cbd2c413..000000000
--- a/BUILD
+++ /dev/null
@@ -1,145 +0,0 @@
-load("//tools:defs.bzl", "build_test", "gazelle", "go_path")
-load("//tools/nogo:defs.bzl", "nogo_config")
-load("//tools/yamltest:defs.bzl", "yaml_test")
-load("//website:defs.bzl", "doc")
-
-package(licenses = ["notice"])
-
-exports_files(["LICENSE"])
-
-nogo_config(
- name = "nogo_config",
- srcs = ["nogo.yaml"],
- visibility = ["//:sandbox"],
-)
-
-doc(
- name = "contributing",
- src = "CONTRIBUTING.md",
- category = "Project",
- permalink = "/contributing/",
- visibility = ["//website:__pkg__"],
- weight = "20",
-)
-
-doc(
- name = "security",
- src = "SECURITY.md",
- category = "Project",
- permalink = "/security/",
- visibility = ["//website:__pkg__"],
- weight = "30",
-)
-
-doc(
- name = "governance",
- src = "GOVERNANCE.md",
- category = "Project",
- permalink = "/community/governance/",
- subcategory = "Community",
- visibility = ["//website:__pkg__"],
- weight = "20",
-)
-
-doc(
- name = "code_of_conduct",
- src = "CODE_OF_CONDUCT.md",
- category = "Project",
- permalink = "/community/code_of_conduct/",
- subcategory = "Community",
- visibility = ["//website:__pkg__"],
- weight = "99",
-)
-
-yaml_test(
- name = "nogo_config_test",
- srcs = glob(["nogo*.yaml"]),
- schema = "//tools/nogo:config-schema.json",
-)
-
-yaml_test(
- name = "github_workflows_test",
- srcs = glob([".github/workflows/*.yml"]),
- schema = "@github_workflow_schema//file",
-)
-
-yaml_test(
- name = "buildkite_pipelines_test",
- srcs = glob([".buildkite/*.yaml"]),
- schema = "@buildkite_pipeline_schema//file",
-)
-
-# The sandbox filegroup is used for sandbox-internal dependencies.
-package_group(
- name = "sandbox",
- packages = ["//..."],
-)
-
-# For targets that will not normally build internally, we ensure that they are
-# least build by a static BUILD test.
-build_test(
- name = "build_test",
- targets = [
- "//test/e2e:integration_test",
- "//test/image:image_test",
- "//test/root:root_test",
- "//test/benchmarks/base:startup_test",
- "//test/benchmarks/base:size_test",
- "//test/benchmarks/base:sysbench_test",
- "//test/benchmarks/database:redis_test",
- "//test/benchmarks/fs:bazel_test",
- "//test/benchmarks/fs:fio_test",
- "//test/benchmarks/media:ffmpeg_test",
- "//test/benchmarks/ml:tensorflow_test",
- "//test/benchmarks/network:httpd_test",
- "//test/benchmarks/network:nginx_test",
- "//test/benchmarks/network:node_test",
- "//test/benchmarks/network:ruby_test",
- ],
-)
-
-# gopath defines a directory that is structured in a way that is compatible
-# with standard Go tools. Things like godoc, editors and refactor tools should
-# work as expected.
-#
-# The files in this tree are symlinks to the true sources.
-go_path(
- name = "gopath",
- mode = "link",
- deps = [
- # Main binaries.
- #
- # For reasons related to reproducibility of the generated
- # files, in order to ensure that :gopath produces only a
- # a single "pure" version of all files, we can only depend
- # on go_library targets here, and not go_binary. Thus the
- # binaries have been factored into a cli package, which is
- # a good practice in any case.
- "//runsc/cli",
- "//shim/cli",
- "//webhook/pkg/cli",
-
- # Packages that are not dependencies of the above.
- "//pkg/sentry/kernel/memevent",
- "//pkg/tcpip/adapters/gonet",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/ethernet",
- "//pkg/tcpip/link/muxed",
- "//pkg/tcpip/link/pipe",
- "//pkg/tcpip/link/sharedmem",
- "//pkg/tcpip/link/sharedmem/pipe",
- "//pkg/tcpip/link/sharedmem/queue",
- "//pkg/tcpip/link/tun",
- "//pkg/tcpip/link/waitable",
- "//pkg/tcpip/sample/tun_tcp_connect",
- "//pkg/tcpip/sample/tun_tcp_echo",
- "//pkg/tcpip/transport/tcpconntrack",
- ],
-)
-
-# gazelle is a set of build tools.
-#
-# To update the WORKSPACE from go.mod, use:
-# bazel run //:gazelle -- update-repos -from_file=go.mod
-gazelle(name = "gazelle")
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
deleted file mode 100644
index fbf517fe5..000000000
--- a/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,91 +0,0 @@
-# Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, gender identity and expression, level of
-experience, education, socio-economic status, nationality, personal appearance,
-race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions that are
-not aligned to this Code of Conduct, or to ban temporarily or permanently any
-contributor for other behaviors that they deem inappropriate, threatening,
-offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-This Code of Conduct also applies outside the project spaces when the Project
-Steward has a reasonable belief that an individual's behavior may have a
-negative impact on the project or its community.
-
-## Conflict Resolution
-
-We do not believe that all conflict is bad; healthy debate and disagreement
-often yield positive results. However, it is never okay to be disrespectful or
-to engage in behavior that violates the project’s code of conduct.
-
-If you see someone violating the code of conduct, you are encouraged to address
-the behavior directly with those involved. Many issues can be resolved quickly
-and easily, and this gives people more control over the outcome of their
-dispute. If you are unable to resolve the matter for any reason, or if the
-behavior is threatening or harassing, report it. We are dedicated to providing
-an environment where participants feel welcome and safe.
-
-Reports should be directed to Jaice Singer DuMars, jaice at google dot com, the
-Project Steward for gVisor. It is the Project Steward’s duty to receive and
-address reported violations of the code of conduct. They will then work with a
-committee consisting of representatives from the Open Source Programs Office and
-the Google Open Source Strategy team. If for any reason you are uncomfortable
-reaching out the Project Steward, please email opensource@google.com.
-
-We will investigate every complaint, but you may not receive a direct response.
-We will use our discretion in determining when and how to follow up on reported
-incidents, which may range from not taking action to permanent expulsion from
-the project and project-sponsored spaces. We will notify the accused of the
-report and provide them an opportunity to discuss it before any action is taken.
-The identity of the reporter will be omitted from the details of the report
-supplied to the accused. In potentially harmful situations, such as ongoing
-harassment or threats to anyone's safety, we may take action without notice.
-
-## Attribution
-
-This Code of Conduct is adapted from the
-[Contributor Covenant, version 1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html).
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index c53df7d25..000000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# Contributing
-
-Want to contribute? Great! First, read this page.
-
-### Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution;
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to <https://cla.developers.google.com/> to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-### Using GOPATH
-
-Some editors may require the code to be structured in a `GOPATH` directory tree.
-In this case, you may use the `:gopath` target to generate a directory tree with
-symlinks to the original source files.
-
-```
-bazel build :gopath
-```
-
-You can then set the `GOPATH` in your editor to `bazel-bin/gopath`.
-
-If you use this mechanism, keep in mind that the generated tree is not the
-canonical source. You will still need to build and test with `bazel`. New files
-will need to be added to the appropriate `BUILD` files, and the `:gopath` target
-will need to be re-run to generate appropriate symlinks in the `GOPATH`
-directory tree.
-
-Dependencies can be added by using `go mod get`. In order to keep the
-`WORKSPACE` file in sync, run `tools/go_mod.sh` in place of `go mod`.
-
-### Coding Guidelines
-
-All code should comply with the [style guide](g3doc/style.md). Note that code
-may be automatically formatted per the guidelines when merged.
-
-As a secure runtime, we need to maintain the safety of all of code included in
-gVisor. The following rules help mitigate issues.
-
-Definitions for the rules below:
-
-`core`:
-
-* `//pkg/sentry/...`
-* Transitive dependencies in `//pkg/...`, etc.
-
-`runsc`:
-
-* `//runsc/...`
-
-Rules:
-
-* No cgo in `core` or `runsc`. The final binary must be a statically-linked
- pure Go binary.
-
-* Any files importing "unsafe" must have a name ending in `_unsafe.go`.
-
-* `core` may only depend on the following packages:
-
- * Itself.
- * Go standard library.
- * Except (transitively) package "net" (this will result in a non-cgo
- binary). Use `//pkg/unet` instead.
- * `@org_golang_x_sys//unix:go_default_library` (Go import
- `golang.org/x/sys/unix`).
- * Generated Go protobuf packages.
- * `@org_golang_google_protobuf//proto:go_default_library` (Go import
- `google.golang.org/protobuf`).
-
-* `runsc` may only depend on the following packages:
-
- * All packages allowed for `core`.
- * `@com_github_google_subcommands//:go_default_library` (Go import
- `github.com/google/subcommands`).
- * `@com_github_opencontainers_runtime_spec//specs_go:go_default_library`
- (Go import `github.com/opencontainers/runtime-spec/specs_go`).
-
-### Code reviews
-
-Before sending code reviews, run `bazel test ...` to ensure tests are passing.
-
-Code changes are accepted via [pull request][github].
-
-When approved, the change will be submitted by a team member and automatically
-merged into the repository.
-
-### Presubmit checks
-
-Accessing check logs may require membership in the
-[gvisor-dev mailing list][gvisor-dev-list], which is public.
-
-### Bug IDs
-
-Some TODOs and NOTEs sprinkled throughout the code have associated IDs of the
-form `b/1234`. These correspond to bugs in our internal bug tracker. Eventually
-these bugs will be moved to the GitHub Issues, but until then they can simply be
-ignored.
-
-### Build and test with Docker
-
-Running `make dev` is a convenient way to build and install `runsc` as a Docker
-runtime. The output of this command will show the runtimes installed.
-
-You may use `make refresh` to refresh the binary after any changes. For example:
-
-```bash
-make dev
-docker run --rm --runtime=my-branch --rm hello-world
-make refresh
-```
-
-### The small print
-
-Contributions made by corporations are covered by a different agreement than the
-one above, the
-[Software Grant and Corporate Contributor License Agreement][gccla].
-
-[gcla]: https://cla.developers.google.com/about/google-individual
-[gccla]: https://cla.developers.google.com/about/google-corporate
-[github]: https://github.com/google/gvisor/compare
-[gvisor-dev-list]: https://groups.google.com/forum/#!forum/gvisor-dev
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
deleted file mode 100644
index 40846bc2f..000000000
--- a/GOVERNANCE.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# Governance
-
-## Projects
-
-A *project* is the primary unit of collaboration. Each project may have its own
-repository and contribution process.
-
-All projects are covered by the [Code of Conduct](CODE_OF_CONDUCT.md), and
-should include an up-to-date copy in the project repository or a link here.
-
-## Contributors
-
-Anyone can be a *contributor* to a project, provided they have signed relevant
-Contributor License Agreements (CLAs) and follow the project's contribution
-guidelines. Contributions will be reviewed by a maintainer, and must pass all
-applicable tests.
-
-Reviews check for code quality and style, including documentation, and enforce
-other policies. Contributions may be rejected for reasons unrelated to the code
-in question. For example, a change may be too complex to maintain or duplicate
-existing functionality.
-
-Note that contributions are not limited to code alone. Bugs, documentation,
-experience reports or public advocacy are all valuable ways to contribute to a
-project and build trust in the community.
-
-## Maintainers
-
-Each project has one or more *maintainers*. Maintainers set technical direction,
-facilitate contributions and exercise overall stewardship.
-
-Maintainers have write access to the project repository. Maintainers review and
-approve changes. They can also assign issues and add additional reviewers.
-
-Note that some repositories may not allow direct commit access, which is
-reserved for administrators or automated processes. In this case, maintainers
-have approval rights, and a separate process exists for merging a change.
-
-Maintainers are responsible for upholding the code of conduct in interactions
-via project communication channels. If comments or exchanges are in violation,
-they may remove them at their discretion.
-
-### Repositories requiring synchronization
-
-For some projects initiated by Google, the infrastructure which synchronizes and
-merges internal and external changes requires that merges are performed by a
-Google employee. In such cases, Google will initiate a rotation to merge changes
-once they pass tests and are approved by a maintainer. This does not preclude
-non-Google contributors from becoming maintainers, in which case the maintainer
-holds approval rights and the merge is an automated process. In some cases,
-Google-internal tests may fail and have to be fixed: the Google employee will
-work with the submitter to achieve this.
-
-### Becoming a maintainer
-
-The list of maintainers is defined by the list of people with commit access or
-approval authority on a repository, typically via a Gerrit group or a GitHub
-team.
-
-Existing maintainers may elevate a contributor to maintainer status on evidence
-of previous contributions and established trust. This decision is based on lazy
-consensus from existing maintainers. While contributors may ask maintainers to
-make this decision, existing maintainers will also pro-actively identify
-contributors who have demonstrated a sustained track record of technical
-leadership and direct contributions.
-
-## Special Interest Groups (SIGs)
-
-From time-to-time, a SIG may be formed in order to solve larger, more complex
-problems across one or more projects. There are many avenues for collaboration
-outside a SIG, but a SIG can provide structure for collaboration on a single
-topic.
-
-Each group will be established by a charter, and governed by the Code of
-Conduct. Some resources may be provided to the group, such as mailing lists or
-meeting space, and archives will be public.
-
-## Security disclosure
-
-Projects may maintain security mailing lists for vulnerability reports and
-internal project audits may occasionally reveal security issues. Access to these
-lists and audits will be limited to project *maintainers*; individual
-maintainers should opt to participate in these lists based on need and
-expertise. Once maintainers become aware of a potential security issue, they
-will assess the scope and potential impact. If reported externally, maintainers
-will determine a reasonable embargo period with the reporter.
-
-During the embargo period, the maintainers will prioritize a fix for the
-security issue. They may choose to disclose the issue to additional trusted
-contributors in order to facilitate a fix, subjecting them to the embargo, or
-notify affected users in order to give them an advanced opportunity to mitigate
-the issue. The inclusion of specific users in this disclosure is left to the
-discretion of the maintainers and contributors involved, and depends on the
-scale of known project use and exposure.
-
-Once a fix is widely available or the embargo period ends, the maintainers will
-make technical details about the vulnerability and associated fixes available.
-
-## Mailing lists
-
-There are four key mailing lists that span projects.
-
-* [gvisor-users](mailto:gvisor-users@googlegroups.com): general purpose user
- list.
-* [gvisor-dev](mailto:gvisor-dev@googlegroups.com): general purpose
- development list.
-* [gvisor-security](mailto:gvisor-security@googlegroups.com): private security
- list. Access to this list is restricted to maintainers of the core gVisor
- project, subject to the security disclosure policy described above.
-* [gvisor-syzkaller](mailto:gvisor-syzkaller@googlegroups.com): private
- syzkaller bug tracking list. Access to this list is not limited to
- maintainers, but will be granted to those who can credibly contribute to
- fixes.
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 68fb4c892..000000000
--- a/Makefile
+++ /dev/null
@@ -1,456 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-default: runsc
-.PHONY: default
-
-# Header for debugging (used by other macros).
-header = echo --- $(1) >&2
-
-# Make hacks.
-EMPTY :=
-SPACE := $(EMPTY) $(EMPTY)
-SHELL = /bin/bash
-
-## usage: make <target>
-## or
-## make <build|test|copy|run|sudo> STARTUP_OPTIONS="..." OPTIONS="..." TARGETS="..." ARGS="..."
-##
-## Basic targets.
-##
-## This Makefile wraps basic build and test targets for ease-of-use. Bazel
-## is run inside a canonical Docker container in order to simplify up-front
-## requirements.
-##
-## There are common arguments that may be passed to targets. These are:
-## OPTIONS - Build or test options.
-## TARGETS - The bazel targets.
-## ARGS - Arguments for run or sudo.
-##
-## Additionally, the copy target expects a DESTINATION to be provided.
-##
-## For example, to build runsc using this Makefile, you can run:
-## make build OPTIONS="" TARGETS="//runsc"'
-##
-help: ## Shows all targets and help from the Makefile (this message).
- @grep --no-filename -E '^([a-z.A-Z_%-]+:.*?|)##' $(MAKEFILE_LIST) | \
- awk 'BEGIN {FS = "(:.*?|)## ?"}; { \
- if (length($$1) > 0) { \
- printf " \033[36m%-20s\033[0m %s\n", $$1, $$2; \
- } else { \
- printf "%s\n", $$2; \
- } \
- }'
-
-build: ## Builds the given $(TARGETS) with the given $(OPTIONS). E.g. make build TARGETS=runsc
- @$(call build,$(OPTIONS) $(TARGETS))
-.PHONY: build
-
-test: ## Tests the given $(TARGETS) with the given $(OPTIONS). E.g. make test TARGETS=pkg/buffer:buffer_test
- @$(call test,$(OPTIONS) $(TARGETS))
-.PHONY: test
-
-copy: ## Copies the given $(TARGETS) to the given $(DESTINATION). E.g. make copy TARGETS=runsc DESTINATION=/tmp
- @$(call copy,$(TARGETS),$(DESTINATION))
-.PHONY: copy
-
-run: ## Runs the given $(TARGETS), built with $(OPTIONS), using $(ARGS). E.g. make run TARGETS=runsc ARGS=-version
- @$(call run,$(TARGETS),$(ARGS))
-.PHONY: run
-
-sudo: ## Runs the given $(TARGETS) as per run, but using "sudo -E". E.g. make sudo TARGETS=test/root:root_test ARGS=-test.v
- @$(call sudo,$(TARGETS),$(ARGS))
-.PHONY: sudo
-
-# Load image helpers.
-include tools/images.mk
-
-# Load all bazel wrappers.
-#
-# This file should define the basic "build", "test", "run" and "sudo" rules, in
-# addition to the $(BRANCH_NAME) and $(BUILD_ROOTS) variables.
-ifneq (,$(wildcard tools/google.mk))
-include tools/google.mk
-else
-include tools/bazel.mk
-endif
-
-##
-## Development helpers and tooling.
-##
-## These targets faciliate local development by automatically
-## installing and configuring a runtime. Several variables may
-## be used here to tweak the installation:
-## RUNTIME - The name of the installed runtime (default: branch).
-## RUNTIME_DIR - Where the runtime will be installed (default: temporary directory with the $RUNTIME).
-## RUNTIME_BIN - The runtime binary (default: $RUNTIME_DIR/runsc).
-## RUNTIME_LOG_DIR - The logs directory (default: $RUNTIME_DIR/logs).
-## RUNTIME_LOGS - The log pattern (default: $RUNTIME_LOG_DIR/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%).
-##
-ifeq (,$(BRANCH_NAME))
-RUNTIME := runsc
-RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/$(RUNTIME)
-else
-RUNTIME := $(BRANCH_NAME)
-RUNTIME_DIR := $(shell dirname $(shell mktemp -u))/$(RUNTIME)
-endif
-RUNTIME_BIN := $(RUNTIME_DIR)/runsc
-RUNTIME_LOG_DIR := $(RUNTIME_DIR)/logs
-RUNTIME_LOGS := $(RUNTIME_LOG_DIR)/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%
-
-$(RUNTIME_BIN): # See below.
- @mkdir -p "$(RUNTIME_DIR)"
- @$(call copy,//runsc,$(RUNTIME_BIN))
-.PHONY: $(RUNTIME_BIN) # Real file, but force rebuild.
-
-# Configure helpers for below.
-configure_noreload = \
- $(call header,CONFIGURE $(1) → $(RUNTIME_BIN) $(2)); \
- sudo $(RUNTIME_BIN) install --experimental=true --runtime="$(1)" -- --debug-log "$(RUNTIME_LOGS)" $(2) && \
- sudo rm -rf "$(RUNTIME_LOG_DIR)" && mkdir -p "$(RUNTIME_LOG_DIR)"
-reload_docker = \
- sudo systemctl reload docker && \
- if test -f /etc/docker/daemon.json; then \
- sudo chmod 0755 /etc/docker && \
- sudo chmod 0644 /etc/docker/daemon.json; \
- fi
-configure = $(call configure_noreload,$(1),$(2)) && $(reload_docker)
-
-# Helpers for above. Requires $(RUNTIME_BIN) dependency.
-install_runtime = $(call configure,$(1),$(2) --TESTONLY-test-name-env=RUNSC_TEST_NAME)
-# Don't use cached results, otherwise multiple runs using different runtimes
-# may be skipped, if all other inputs are the same.
-test_runtime = $(call test,--test_arg=--runtime=$(1) --nocache_test_results $(PARTITIONS) $(2))
-
-refresh: $(RUNTIME_BIN) ## Updates the runtime binary.
-.PHONY: refresh
-
-dev: $(RUNTIME_BIN) ## Installs a set of local runtimes. Requires sudo.
- @$(call configure_noreload,$(RUNTIME),--net-raw)
- @$(call configure_noreload,$(RUNTIME)-d,--net-raw --debug --strace --log-packets)
- @$(call configure_noreload,$(RUNTIME)-p,--net-raw --profile)
- @$(call configure_noreload,$(RUNTIME)-vfs2-d,--net-raw --debug --strace --log-packets --vfs2)
- @$(call configure_noreload,$(RUNTIME)-vfs2-fuse-d,--net-raw --debug --strace --log-packets --vfs2 --fuse)
- @$(call configure_noreload,$(RUNTIME)-vfs2-cgroup-d,--net-raw --debug --strace --log-packets --vfs2 --cgroupfs)
- @$(call reload_docker)
-.PHONY: dev
-
-##
-## Canonical build and test targets.
-##
-## These targets are used by continuous integration and provide
-## convenient entrypoints for testing changes. If you're adding a
-## new subsystem or workflow, consider adding a new target here.
-##
-## Some targets support a PARTITION (1-indexed) and TOTAL_PARTITIONS
-## environment variables for high-level test sharding. Unlike most
-## other variables, these are sourced from the environment.
-##
-PARTITION ?= 1
-TOTAL_PARTITIONS ?= 1
-PARTITIONS := --test_arg=--partition=$(PARTITION) --test_arg=--total_partitions=$(TOTAL_PARTITIONS)
-
-runsc: ## Builds the runsc binary.
- @$(call build,-c opt //runsc)
-.PHONY: runsc
-
-debian: ## Builds the debian packages.
- @$(call build,-c opt //debian:debian)
-.PHONY: debian
-
-smoke-tests: ## Runs a simple smoke test after build runsc.
- @$(call run,//runsc,--alsologtostderr --network none --debug --TESTONLY-unsafe-nonroot=true --rootless do true)
-.PHONY: smoke-tests
-
-nogo-tests:
- @$(call test,--build_tag_filters=nogo --test_tag_filters=nogo //:all pkg/... tools/...)
-.PHONY: nogo-tests
-
-unit-tests: ## Local package unit tests in pkg/..., tools/.., etc.
- @$(call test,--build_tag_filters=-nogo --test_tag_filters=-nogo //:all pkg/... tools/...)
-.PHONY: unit-tests
-
-runsc-tests: ## Run all tests in runsc/...
- @$(call test,runsc/...)
-.PHONY: runsc-tests
-
-tests: ## Runs all unit tests and syscall tests.
-tests: unit-tests nogo-tests runsc-tests syscall-tests
-.PHONY: tests
-
-integration-tests: ## Run all standard integration tests.
-integration-tests: docker-tests overlay-tests hostnet-tests swgso-tests
-integration-tests: do-tests kvm-tests containerd-test-1.3.9
-.PHONY: integration-tests
-
-network-tests: ## Run all networking integration tests.
-network-tests: iptables-tests packetdrill-tests packetimpact-tests
-.PHONY: network-tests
-
-# The set of system call targets.
-SYSCALL_TARGETS := test/syscalls/... test/fuse/...
-
-syscall-%-tests:
- @$(call test,--test_tag_filters=runsc_$* $(PARTITIONS) test/syscalls/...)
-
-syscall-native-tests:
- @$(call test,--test_tag_filters=native $(PARTITIONS) test/syscalls/...)
-.PHONY: syscall-native-tests
-
-syscall-tests: ## Run all system call tests.
- @$(call test,$(PARTITIONS) $(SYSCALL_TARGETS))
-.PHONY: syscall-tests
-
-%-runtime-tests: load-runtimes_% $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),) # Ensure flags are cleared.
- @$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)
-
-%-runtime-tests_vfs2: load-runtimes_% $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),--vfs2)
- @$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)
-
-do-tests:
- @$(call run,//runsc,--rootless do true)
- @$(call run,//runsc,--rootless -network=none do true)
- @$(call sudo,//runsc,do true)
-.PHONY: do-tests
-
-arm-qemu-smoke-test: BAZEL_OPTIONS=--config=cross-aarch64
-arm-qemu-smoke-test: load-arm-qemu
- export T=$$(mktemp -d --tmpdir release.XXXXXX); \
- mkdir -p $$T/bin/arm64/ && \
- $(call copy,//runsc:runsc,$$T/bin/arm64) && \
- docker run --rm -v $$T/bin/arm64/runsc:/workdir/initramfs/runsc gvisor.dev/images/arm-qemu
-.PHONY: arm-qemu-smoke-test
-
-simple-tests: unit-tests # Compatibility target.
-.PHONY: simple-tests
-
-# Standard integration targets.
-INTEGRATION_TARGETS := //test/image:image_test //test/e2e:integration_test
-
-docker-tests: load-basic $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),) # Clear flags.
- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
- @$(call install_runtime,$(RUNTIME),--vfs2)
- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
-.PHONY: docker-tests
-
-overlay-tests: load-basic $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),--overlay)
- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
-.PHONY: overlay-tests
-
-swgso-tests: load-basic $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),--software-gso=true --gso=false)
- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
-.PHONY: swgso-tests
-
-hostnet-tests: load-basic $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),--network=host)
- @$(call test_runtime,$(RUNTIME),--test_arg=-checkpoint=false --test_arg=-hostnet=true $(INTEGRATION_TARGETS))
-.PHONY: hostnet-tests
-
-kvm-tests: load-basic $(RUNTIME_BIN)
- @(lsmod | grep -E '^(kvm_intel|kvm_amd)') || sudo modprobe kvm
- @if ! test -w /dev/kvm; then sudo chmod a+rw /dev/kvm; fi
- @$(call test,//pkg/sentry/platform/kvm:kvm_test)
- @$(call install_runtime,$(RUNTIME),--platform=kvm)
- @$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
-.PHONY: kvm-tests
-
-iptables-tests: load-iptables $(RUNTIME_BIN)
- @sudo modprobe iptable_filter
- @sudo modprobe ip6table_filter
- @$(call test,--test_arg=-runtime=runc $(PARTITIONS) //test/iptables:iptables_test)
- @$(call install_runtime,$(RUNTIME),--net-raw)
- @$(call test_runtime,$(RUNTIME),//test/iptables:iptables_test)
-.PHONY: iptables-tests
-
-packetdrill-tests: load-packetdrill $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),) # Clear flags.
- @$(call test_runtime,$(RUNTIME),//test/packetdrill:all_tests)
-.PHONY: packetdrill-tests
-
-packetimpact-tests: load-packetimpact $(RUNTIME_BIN)
- @sudo modprobe iptable_filter
- @sudo modprobe ip6table_filter
- @$(call install_runtime,$(RUNTIME),) # Clear flags.
- @$(call test_runtime,$(RUNTIME),--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)
-.PHONY: packetimpact-tests
-
-fsstress-test: load-basic $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),--vfs2)
- @$(call test_runtime,$(RUNTIME),//test/fsstress:fsstress_test)
-.PHONY: fsstress-test
-
-# Specific containerd version tests.
-containerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd load-basic_ubuntu $(RUNTIME_BIN)
- @$(call install_runtime,$(RUNTIME),) # Clear flags.
- @$(call sudo,tools/installers:containerd,$*)
- @$(call sudo,tools/installers:shim)
- @$(call sudo,test/root:root_test,--runtime=$(RUNTIME) -test.v)
-
-# The shim builds with containerd 1.3.9 and it's not backward compatible. Test
-# with 1.3.9 and newer versions.
-containerd-tests: ## Runs all supported containerd version tests.
-containerd-tests: containerd-test-1.3.9
-containerd-tests: containerd-test-1.4.3
-
-##
-## Benchmarks.
-##
-## Targets to run benchmarks. See //test/benchmarks for details.
-##
-## common arguments:
-## BENCHMARKS_PROJECT - BigQuery project to which to send data.
-## BENCHMARKS_DATASET - BigQuery dataset to which to send data.
-## BENCHMARKS_TABLE - BigQuery table to which to send data.
-## BENCHMARKS_SUITE - name of the benchmark suite. See //tools/bigquery/bigquery.go.
-## BENCHMARKS_UPLOAD - if true, upload benchmark data from the run.
-## BENCHMARKS_OFFICIAL - marks the data as official.
-## BENCHMARKS_PLATFORMS - platforms to run benchmarks (e.g. ptrace kvm).
-## BENCHMARKS_FILTER - filter to be applied to the test suite.
-## BENCHMARKS_OPTIONS - options to be passed to the test.
-## BENCHMARKS_PROFILE - profile options to be passed to the test.
-## BENCH_RUNTIME_ARGS - args to configure the runtime which runs the benchmarks.
-##
-BENCHMARKS_PROJECT ?= gvisor-benchmarks
-BENCHMARKS_DATASET ?= kokoro
-BENCHMARKS_TABLE ?= benchmarks
-BENCHMARKS_SUITE ?= ffmpeg
-BENCHMARKS_UPLOAD ?= false
-BENCHMARKS_OFFICIAL ?= false
-BENCHMARKS_PLATFORMS ?= ptrace
-BENCHMARKS_TARGETS := //test/benchmarks/media:ffmpeg_test
-BENCHMARKS_FILTER := .
-BENCHMARKS_OPTIONS := -test.benchtime=30s
-BENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) $(BENCHMARKS_OPTIONS)
-BENCHMARKS_PROFILE := -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex
-BENCH_VFS := --vfs2
-BENCH_RUNTIME_ARGS ?=
-
-init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.
- @$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))
-.PHONY: init-benchmark-table
-
-# $(1) is the runtime name, $(2) are the arguments.
-run_benchmark = \
- ($(call header,BENCHMARK $(1) $(2)); \
- set -euo pipefail; \
- export T=$$(mktemp --tmpdir logs.$(1).XXXXXX); \
- if test "$(1)" = "runc"; then $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS)) | tee $$T; fi; \
- if test "$(1)" != "runc"; then $(call install_runtime,$(1),--profile $(2)); \
- $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS) $(BENCHMARKS_PROFILE)) | tee $$T; fi; \
- if test "$(BENCHMARKS_UPLOAD)" = "true"; then \
- $(call run,tools/parsers:parser,parse --debug --file=$$T --runtime=$(1) --suite_name=$(BENCHMARKS_SUITE) --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)); \
- fi; \
- rm -rf $$T)
-
-benchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc and all given platforms in BENCHMARK_PLATFORMS.
- @$(foreach PLATFORM,$(BENCHMARKS_PLATFORMS), \
- $(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS) --vfs2) && \
- $(call run_benchmark,$(PLATFORM)_vfs1,--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS)) && \
- ) true
- @$(call run_benchmark,runc)
-.PHONY: benchmark-platforms
-
-run-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.
- @if test "$(RUNTIME)" = "runc"; then $(call run_benchmark,$(RUNTIME)); fi;
- @if test "$(RUNTIME)" != "runc"; then $(call run_benchmark,$(RUNTIME)$(BENCH_VFS),$(BENCH_RUNTIME_ARGS) $(BENCH_VFS)); fi;
-.PHONY: run-benchmark
-
-##
-## Website & documentation helpers.
-##
-## The website is built from repository documentation and wrappers, using
-## using a locally-defined Docker image (see images/jekyll). The following
-## variables may be set when using website-push:
-## WEBSITE_IMAGE - The name of the container image.
-## WEBSITE_SERVICE - The backend service.
-## WEBSITE_PROJECT - The project id to use.
-## WEBSITE_REGION - The region to deploy to.
-##
-WEBSITE_IMAGE := gcr.io/gvisordev/gvisordev
-WEBSITE_SERVICE := gvisordev
-WEBSITE_PROJECT := gvisordev
-WEBSITE_REGION := us-central1
-
-website-build: load-jekyll ## Build the site image locally.
- @$(call run,//website:website,$(WEBSITE_IMAGE))
-.PHONY: website-build
-
-website-server: website-build ## Run a local server for development.
- @docker run -i -p 8080:8080 $(WEBSITE_IMAGE)
-.PHONY: website-server
-
-website-push: website-build ## Push a new image and update the service.
- @docker push $(WEBSITE_IMAGE)
-.PHONY: website-push
-
-website-deploy: website-push ## Deploy a new version of the website.
- @gcloud run deploy $(WEBSITE_SERVICE) --platform=managed --region=$(WEBSITE_REGION) --project=$(WEBSITE_PROJECT) --image=$(WEBSITE_IMAGE) --memory 1Gi
-.PHONY: website-deploy
-
-##
-## Repository builders.
-##
-## This builds a local apt repository. The following variables may be set:
-## RELEASE_ROOT - The repository root (default: "repo" directory).
-## RELEASE_KEY - The repository GPG private key file (default: dummy key is created).
-## RELEASE_ARTIFACTS - The release artifacts directory. May contain multiple.
-## RELEASE_NIGHTLY - Set to true if a nightly release (default: false).
-## RELEASE_COMMIT - The commit or Change-Id for the release (needed for tag).
-## RELEASE_NAME - The name of the release in the proper format (needed for tag).
-## RELEASE_NOTES - The file containing release notes (needed for tag).
-##
-RELEASE_ROOT := repo
-RELEASE_KEY := repo.key
-RELEASE_ARTIFACTS := artifacts
-RELEASE_NIGHTLY := false
-RELEASE_COMMIT :=
-RELEASE_NAME :=
-RELEASE_NOTES :=
-GPG_TEST_OPTIONS := $(shell if gpg --pinentry-mode loopback --version >/dev/null 2>&1; then echo --pinentry-mode loopback; fi)
-
-$(RELEASE_KEY):
- @echo "WARNING: Generating a key for testing ($@); don't use this."
- @T=$$(mktemp --tmpdir keyring.XXXXXX); \
- C=$$(mktemp --tmpdir config.XXXXXX); \
- echo Key-Type: DSA >> $$C && \
- echo Key-Length: 1024 >> $$C && \
- echo Name-Real: Test >> $$C && \
- echo Name-Email: test@example.com >> $$C && \
- echo Expire-Date: 0 >> $$C && \
- echo %commit >> $$C && \
- gpg --batch $(GPG_TEST_OPTIONS) --passphrase '' --no-default-keyring --secret-keyring $$T --no-tty --gen-key $$C && \
- gpg --batch $(GPG_TEST_OPTIONS) --export-secret-keys --no-default-keyring --secret-keyring $$T > $@; \
- rc=$$?; rm -f $$T $$C; exit $$rc
-
-$(RELEASE_ARTIFACTS)/%:
- @mkdir -p $@
- @$(call copy,//runsc:runsc,$@)
- @$(call copy,//shim:containerd-shim-runsc-v1,$@)
- @$(call copy,//debian:debian,$@)
-
-release: $(RELEASE_KEY) $(RELEASE_ARTIFACTS)/$(ARCH)
- @mkdir -p $(RELEASE_ROOT)
- @NIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$(find $(RELEASE_ARTIFACTS) -type f)
-.PHONY: release
-
-tag: ## Creates and pushes a release tag.
- @tools/tag_release.sh "$(RELEASE_COMMIT)" "$(RELEASE_NAME)" "$(RELEASE_NOTES)"
-.PHONY: tag
diff --git a/README.md b/README.md
index aa8b8c068..ab5073fe5 100644
--- a/README.md
+++ b/README.md
@@ -1,130 +1,5 @@
-![gVisor](g3doc/logo.png)
+# gVisor
-[![Build status](https://badge.buildkite.com/3b159f20b9830461a71112566c4171c0bdfd2f980a8e4c0ae6.svg?branch=master)](https://buildkite.com/gvisor/pipeline)
-[![Issue reviver](https://github.com/google/gvisor/actions/workflows/issue_reviver.yml/badge.svg)](https://github.com/google/gvisor/actions/workflows/issue_reviver.yml)
-[![gVisor chat](https://badges.gitter.im/gvisor/community.png)](https://gitter.im/gvisor/community)
-[![code search](https://img.shields.io/badge/code-search-blue)](https://cs.opensource.google/gvisor/gvisor)
-
-## What is gVisor?
-
-**gVisor** is an application kernel, written in Go, that implements a
-substantial portion of the Linux system surface. It includes an
-[Open Container Initiative (OCI)][oci] runtime called `runsc` that provides an
-isolation boundary between the application and the host kernel. The `runsc`
-runtime integrates with Docker and Kubernetes, making it simple to run sandboxed
-containers.
-
-## Why does gVisor exist?
-
-Containers are not a [**sandbox**][sandbox]. While containers have
-revolutionized how we develop, package, and deploy applications, using them to
-run untrusted or potentially malicious code without additional isolation is not
-a good idea. While using a single, shared kernel allows for efficiency and
-performance gains, it also means that container escape is possible with a single
-vulnerability.
-
-gVisor is an application kernel for containers. It limits the host kernel
-surface accessible to the application while still giving the application access
-to all the features it expects. Unlike most kernels, gVisor does not assume or
-require a fixed set of physical resources; instead, it leverages existing host
-kernel functionality and runs as a normal process. In other words, gVisor
-implements Linux by way of Linux.
-
-gVisor should not be confused with technologies and tools to harden containers
-against external threats, provide additional integrity checks, or limit the
-scope of access for a service. One should always be careful about what data is
-made available to a container.
-
-## Documentation
-
-User documentation and technical architecture, including quick start guides, can
-be found at [gvisor.dev][gvisor-dev].
-
-## Installing from source
-
-gVisor builds on x86_64 and ARM64. Other architectures may become available in
-the future.
-
-For the purposes of these instructions, [bazel][bazel] and other build
-dependencies are wrapped in a build container. It is possible to use
-[bazel][bazel] directly, or type `make help` for standard targets.
-
-### Requirements
-
-Make sure the following dependencies are installed:
-
-* Linux 4.14.77+ ([older linux][old-linux])
-* [Docker version 17.09.0 or greater][docker]
-
-### Building
-
-Build and install the `runsc` binary:
-
-```sh
-make runsc
-sudo cp ./bazel-bin/runsc/linux_amd64_pure_stripped/runsc /usr/local/bin
-```
-
-### Testing
-
-To run standard test suites, you can use:
-
-```sh
-make unit-tests
-make tests
-```
-
-To run specific tests, you can specify the target:
-
-```sh
-make test TARGETS="//runsc:version_test"
-```
-
-### Using `go get`
-
-This project uses [bazel][bazel] to build and manage dependencies. A synthetic
-`go` branch is maintained that is compatible with standard `go` tooling for
-convenience.
-
-For example, to build and install `runsc` directly from this branch:
-
-```sh
-echo "module runsc" > go.mod
-GO111MODULE=on go get gvisor.dev/gvisor/runsc@go
-CGO_ENABLED=0 GO111MODULE=on sudo -E go build -o /usr/local/bin/runsc gvisor.dev/gvisor/runsc
-```
-
-Subsequently, you can build and install the shim binary for `containerd`:
-
-```sh
-GO111MODULE=on sudo -E go build -o /usr/local/bin/containerd-shim-runsc-v1 gvisor.dev/gvisor/shim
-```
-
-Note that this branch is supported in a best effort capacity, and direct
-development on this branch is not supported. Development should occur on the
-`master` branch, which is then reflected into the `go` branch.
-
-## Community & Governance
-
-See [GOVERNANCE.md](GOVERNANCE.md) for project governance information.
-
-The [gvisor-users mailing list][gvisor-users-list] and
-[gvisor-dev mailing list][gvisor-dev-list] are good starting points for
-questions and discussion.
-
-## Security Policy
-
-See [SECURITY.md](SECURITY.md).
-
-## Contributing
-
-See [Contributing.md](CONTRIBUTING.md).
-
-[bazel]: https://bazel.build
-[docker]: https://www.docker.com
-[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users
-[gvisor-dev]: https://gvisor.dev
-[gvisor-dev-list]: https://groups.google.com/forum/#!forum/gvisor-dev
-[oci]: https://www.opencontainers.org
-[old-linux]: https://gvisor.dev/docs/user_guide/networking/#gso
-[sandbox]: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
+This branch is a synthetic branch, containing only Go sources, that is
+compatible with standard Go tools. See the master branch for authoritative
+sources and tests.
diff --git a/SECURITY.md b/SECURITY.md
deleted file mode 100644
index a96843895..000000000
--- a/SECURITY.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Security and Vulnerability Reporting
-
-Sensitive security-related questions, comments, and reports should be sent to
-the [gvisor-security mailing list][gvisor-security-list]. You should receive a
-prompt response, typically within 48 hours.
-
-Policies for security list access, vulnerability embargo, and vulnerability
-disclosure are outlined in the [governance policy](GOVERNANCE.md).
-
-[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security
diff --git a/WORKSPACE b/WORKSPACE
deleted file mode 100644
index a27f9afeb..000000000
--- a/WORKSPACE
+++ /dev/null
@@ -1,2524 +0,0 @@
-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
-load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-
-# Root certificates.
-#
-# Note that the sha256 hash is ommitted here intentionally. This should not be
-# used in any part of the build other than as certificates present in images.
-http_file(
- name = "google_root_pem",
- urls = [
- "https://pki.goog/roots.pem",
- ],
-)
-
-# Bazel/starlark utilities.
-http_archive(
- name = "bazel_skylib",
- sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
- urls = [
- "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
- ],
-)
-
-load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace")
-
-bazel_skylib_workspace()
-
-# Load go bazel rules and gazelle.
-#
-# Note that this repository actually patches some other Go repositories as it
-# loads it, in order to limit visibility. We hack this process by patching the
-# patch used by the Go rules, turning the trick against itself.
-
-http_archive(
- name = "io_bazel_rules_go",
- patch_args = ["-p1"],
- patches = [
- # Ensure we don't destroy the facts visibility.
- "//tools:rules_go_visibility.patch",
- # Newer versions of the rules_go rules will automatically strip test
- # binaries of symbols, which we don't want.
- "//tools:rules_go_symbols.patch",
- ],
- sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
- "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
- ],
-)
-
-http_archive(
- name = "bazel_gazelle",
- patch_args = ["-p1"],
- patches = [
- # Fix permissions for facts for go_library, not just tool library.
- # This is actually a no-op with the hacky patch above, but should
- # slightly future proof this mechanism.
- "//tools:bazel_gazelle_generate.patch",
- ],
- sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
- "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
- ],
-)
-
-load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
-
-go_rules_dependencies()
-
-go_register_toolchains(go_version = "1.16.2")
-
-load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
-
-gazelle_dependencies()
-
-# Some repository below has a transitive dependency on this repository. This
-# declaration must precede any later declaration that transitively depends on
-# an older version, since only the first declaration is considered.
-go_repository(
- name = "org_golang_x_sys",
- importpath = "golang.org/x/sys",
- sum = "h1:+39ahH47SWi1PhMRAHfIrm8f69HRZ5K2koXH6dmO8TQ=",
- version = "v0.0.0-20210314195730-07df6a141424",
-)
-
-# Load C++ rules.
-http_archive(
- name = "rules_cc",
- sha256 = "67412176974bfce3f4cf8bdaff39784a72ed709fc58def599d1f68710b58d68b",
- strip_prefix = "rules_cc-b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e.zip",
- "https://github.com/bazelbuild/rules_cc/archive/b7fe9697c0c76ab2fd431a891dbb9a6a32ed7c3e.zip",
- ],
-)
-
-# Load C++ cross-compilation toolchains.
-http_archive(
- name = "coral_crosstool",
- sha256 = "088ef98b19a45d7224be13636487e3af57b1564880b67df7be8b3b7eee4a1bfc",
- strip_prefix = "crosstool-142e930ac6bf1295ff3ba7ba2b5b6324dfb42839",
- urls = [
- "https://github.com/google-coral/crosstool/archive/142e930ac6bf1295ff3ba7ba2b5b6324dfb42839.tar.gz",
- ],
-)
-
-load("@coral_crosstool//:configure.bzl", "cc_crosstool")
-
-cc_crosstool(name = "crosstool")
-
-# Load protobuf dependencies.
-http_archive(
- name = "rules_proto",
- sha256 = "2a20fd8af3cad3fbab9fd3aec4a137621e0c31f858af213a7ae0f997723fc4a9",
- strip_prefix = "rules_proto-a0761ed101b939e19d83b2da5f59034bffc19c12",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/a0761ed101b939e19d83b2da5f59034bffc19c12.tar.gz",
- "https://github.com/bazelbuild/rules_proto/archive/a0761ed101b939e19d83b2da5f59034bffc19c12.tar.gz",
- ],
-)
-
-load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
-
-go_repository(
- name = "com_github_go_gl_glfw",
- importpath = "github.com/go-gl/glfw",
- sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
- version = "v0.0.0-20190409004039-e6da0acd62b1",
-)
-
-go_repository(
- name = "com_github_google_go_github_v35",
- importpath = "github.com/google/go-github/v35",
- sum = "h1:s/soW8jauhjUC3rh8JI0FePuocj0DEI9DNBg/bVplE8=",
- version = "v35.2.0",
-)
-
-go_repository(
- name = "com_github_google_martian_v3",
- importpath = "github.com/google/martian/v3",
- sum = "h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=",
- version = "v3.1.0",
-)
-
-go_repository(
- name = "io_rsc_quote_v3",
- importpath = "rsc.io/quote/v3",
- sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=",
- version = "v3.1.0",
-)
-
-go_repository(
- name = "io_rsc_sampler",
- importpath = "rsc.io/sampler",
- sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=",
- version = "v1.3.0",
-)
-
-go_repository(
- name = "org_golang_x_term",
- importpath = "golang.org/x/term",
- sum = "h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=",
- version = "v0.0.0-20201126162022-7de9c90e9dd1",
-)
-
-go_repository(
- name = "com_github_hashicorp_errwrap",
- importpath = "github.com/hashicorp/errwrap",
- sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_hashicorp_go_multierror",
- importpath = "github.com/hashicorp/go-multierror",
- sum = "h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_github_alecthomas_template",
- importpath = "github.com/alecthomas/template",
- sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=",
- version = "v0.0.0-20190718012654-fb15b899a751",
-)
-
-go_repository(
- name = "com_github_alecthomas_units",
- importpath = "github.com/alecthomas/units",
- sum = "h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=",
- version = "v0.0.0-20190717042225-c3de453c63f4",
-)
-
-go_repository(
- name = "com_github_alexflint_go_filemutex",
- importpath = "github.com/alexflint/go-filemutex",
- sum = "h1:AMzIhMUqU3jMrZiTuW0zkYeKlKDAFD+DG20IoO421/Y=",
- version = "v0.0.0-20171022225611-72bdc8eae2ae",
-)
-
-go_repository(
- name = "com_github_antihax_optional",
- importpath = "github.com/antihax/optional",
- sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_armon_consul_api",
- importpath = "github.com/armon/consul-api",
- sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
- version = "v0.0.0-20180202201655-eb2c6b5be1b6",
-)
-
-go_repository(
- name = "com_github_asaskevich_govalidator",
- importpath = "github.com/asaskevich/govalidator",
- sum = "h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=",
- version = "v0.0.0-20190424111038-f61b66f89f4a",
-)
-
-go_repository(
- name = "com_github_aws_aws_sdk_go",
- importpath = "github.com/aws/aws-sdk-go",
- sum = "h1:m45+Ru/wA+73cOZXiEGLDH2d9uLN3iHqMc0/z4noDXE=",
- version = "v1.15.11",
-)
-
-go_repository(
- name = "com_github_azure_azure_sdk_for_go",
- importpath = "github.com/Azure/azure-sdk-for-go",
- sum = "h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=",
- version = "v16.2.1+incompatible",
-)
-
-go_repository(
- name = "com_github_azure_go_ansiterm",
- importpath = "github.com/Azure/go-ansiterm",
- sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=",
- version = "v0.0.0-20170929234023-d6e3b3328b78",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest",
- importpath = "github.com/Azure/go-autorest",
- sum = "h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=",
- version = "v14.2.0+incompatible",
-)
-
-go_repository(
- name = "com_github_beorn7_perks",
- importpath = "github.com/beorn7/perks",
- sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_bgentry_speakeasy",
- importpath = "github.com/bgentry/speakeasy",
- sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_bitly_go_simplejson",
- importpath = "github.com/bitly/go-simplejson",
- sum = "h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=",
- version = "v0.5.0",
-)
-
-go_repository(
- name = "com_github_blang_semver",
- importpath = "github.com/blang/semver",
- sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=",
- version = "v3.5.1+incompatible",
-)
-
-go_repository(
- name = "com_github_bmizerany_assert",
- importpath = "github.com/bmizerany/assert",
- sum = "h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=",
- version = "v0.0.0-20160611221934-b7ed37b82869",
-)
-
-go_repository(
- name = "com_github_bshuster_repo_logrus_logstash_hook",
- importpath = "github.com/bshuster-repo/logrus-logstash-hook",
- sum = "h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=",
- version = "v0.4.1",
-)
-
-go_repository(
- name = "com_github_buger_jsonparser",
- importpath = "github.com/buger/jsonparser",
- sum = "h1:y853v6rXx+zefEcjET3JuKAqvhj+FKflQijjeaSv2iA=",
- version = "v0.0.0-20180808090653-f4dd9f5a6b44",
-)
-
-go_repository(
- name = "com_github_bugsnag_bugsnag_go",
- importpath = "github.com/bugsnag/bugsnag-go",
- sum = "h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=",
- version = "v0.0.0-20141110184014-b1d153021fcd",
-)
-
-go_repository(
- name = "com_github_bugsnag_osext",
- importpath = "github.com/bugsnag/osext",
- sum = "h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=",
- version = "v0.0.0-20130617224835-0dd3f918b21b",
-)
-
-go_repository(
- name = "com_github_bugsnag_panicwrap",
- importpath = "github.com/bugsnag/panicwrap",
- sum = "h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=",
- version = "v0.0.0-20151223152923-e2c28503fcd0",
-)
-
-go_repository(
- name = "com_github_cespare_xxhash",
- importpath = "github.com/cespare/xxhash",
- sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_github_cespare_xxhash_v2",
- importpath = "github.com/cespare/xxhash/v2",
- sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=",
- version = "v2.1.1",
-)
-
-go_repository(
- name = "com_github_checkpoint_restore_go_criu_v4",
- importpath = "github.com/checkpoint-restore/go-criu/v4",
- sum = "h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=",
- version = "v4.1.0",
-)
-
-go_repository(
- name = "com_github_cncf_xds_go",
- importpath = "github.com/cncf/xds/go",
- sum = "h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=",
- version = "v0.0.0-20210312221358-fbca930ec8ed",
-)
-
-go_repository(
- name = "com_github_cockroachdb_datadriven",
- importpath = "github.com/cockroachdb/datadriven",
- sum = "h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=",
- version = "v0.0.0-20190809214429-80d97fb3cbaa",
-)
-
-go_repository(
- name = "com_github_containerd_btrfs",
- importpath = "github.com/containerd/btrfs",
- sum = "h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_containerd_go_cni",
- importpath = "github.com/containerd/go-cni",
- sum = "h1:YbJAhpTevL2v6u8JC1NhCYRwf+3Vzxcc5vGnYoJ7VeE=",
- version = "v1.0.2",
-)
-
-go_repository(
- name = "com_github_containerd_imgcrypt",
- importpath = "github.com/containerd/imgcrypt",
- sum = "h1:31GdXSmR2bZRPuZ/5lgkXivzNSejJQPsVOq82qqzP/M=",
- version = "v1.0.3",
-)
-
-go_repository(
- name = "com_github_containernetworking_cni",
- importpath = "github.com/containernetworking/cni",
- sum = "h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=",
- version = "v0.8.1",
-)
-
-go_repository(
- name = "com_github_containernetworking_plugins",
- importpath = "github.com/containernetworking/plugins",
- sum = "h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M=",
- version = "v0.8.7",
-)
-
-go_repository(
- name = "com_github_containers_ocicrypt",
- importpath = "github.com/containers/ocicrypt",
- sum = "h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=",
- version = "v1.0.3",
-)
-
-go_repository(
- name = "com_github_coreos_bbolt",
- importpath = "github.com/coreos/bbolt",
- sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
- version = "v1.3.2",
-)
-
-go_repository(
- name = "com_github_coreos_etcd",
- importpath = "github.com/coreos/etcd",
- sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
- version = "v3.3.10+incompatible",
-)
-
-go_repository(
- name = "com_github_coreos_go_iptables",
- importpath = "github.com/coreos/go-iptables",
- sum = "h1:mw6SAibtHKZcNzAsOxjoHIG0gy5YFHhypWSSNc6EjbQ=",
- version = "v0.5.0",
-)
-
-go_repository(
- name = "com_github_coreos_go_oidc",
- importpath = "github.com/coreos/go-oidc",
- sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=",
- version = "v2.1.0+incompatible",
-)
-
-go_repository(
- name = "com_github_coreos_go_semver",
- importpath = "github.com/coreos/go-semver",
- sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=",
- version = "v0.3.0",
-)
-
-go_repository(
- name = "com_github_coreos_go_systemd",
- importpath = "github.com/coreos/go-systemd",
- sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
- version = "v0.0.0-20190321100706-95778dfbb74e",
-)
-
-go_repository(
- name = "com_github_coreos_pkg",
- importpath = "github.com/coreos/pkg",
- sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
- version = "v0.0.0-20180928190104-399ea9e2e55f",
-)
-
-go_repository(
- name = "com_github_creack_pty",
- importpath = "github.com/creack/pty",
- sum = "h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=",
- version = "v1.1.7",
-)
-
-go_repository(
- name = "com_github_cyphar_filepath_securejoin",
- importpath = "github.com/cyphar/filepath-securejoin",
- sum = "h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=",
- version = "v0.2.2",
-)
-
-go_repository(
- name = "com_github_d2g_dhcp4",
- importpath = "github.com/d2g/dhcp4",
- sum = "h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc=",
- version = "v0.0.0-20170904100407-a1d1b6c41b1c",
-)
-
-go_repository(
- name = "com_github_d2g_dhcp4client",
- importpath = "github.com/d2g/dhcp4client",
- sum = "h1:suYBsYZIkSlUMEz4TAYCczKf62IA2UWC+O8+KtdOhCo=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_d2g_dhcp4server",
- importpath = "github.com/d2g/dhcp4server",
- sum = "h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY=",
- version = "v0.0.0-20181031114812-7d4a0a7f59a5",
-)
-
-go_repository(
- name = "com_github_d2g_hardwareaddr",
- importpath = "github.com/d2g/hardwareaddr",
- sum = "h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8=",
- version = "v0.0.0-20190221164911-e7d9fbe030e4",
-)
-
-go_repository(
- name = "com_github_denverdino_aliyungo",
- importpath = "github.com/denverdino/aliyungo",
- sum = "h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=",
- version = "v0.0.0-20190125010748-a747050bb1ba",
-)
-
-go_repository(
- name = "com_github_dgryski_go_sip13",
- importpath = "github.com/dgryski/go-sip13",
- sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
- version = "v0.0.0-20181026042036-e10d5fee7954",
-)
-
-go_repository(
- name = "com_github_dnaeon_go_vcr",
- importpath = "github.com/dnaeon/go-vcr",
- sum = "h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_docker_go_metrics",
- importpath = "github.com/docker/go-metrics",
- sum = "h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=",
- version = "v0.0.1",
-)
-
-go_repository(
- name = "com_github_docker_libtrust",
- importpath = "github.com/docker/libtrust",
- sum = "h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=",
- version = "v0.0.0-20150114040149-fa567046d9b1",
-)
-
-go_repository(
- name = "com_github_docopt_docopt_go",
- importpath = "github.com/docopt/docopt-go",
- sum = "h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=",
- version = "v0.0.0-20180111231733-ee0de3bc6815",
-)
-
-go_repository(
- name = "com_github_fatih_color",
- importpath = "github.com/fatih/color",
- sum = "h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=",
- version = "v1.7.0",
-)
-
-go_repository(
- name = "com_github_form3tech_oss_jwt_go",
- importpath = "github.com/form3tech-oss/jwt-go",
- sum = "h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=",
- version = "v3.2.2+incompatible",
-)
-
-go_repository(
- name = "com_github_frankban_quicktest",
- importpath = "github.com/frankban/quicktest",
- sum = "h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=",
- version = "v1.11.3",
-)
-
-go_repository(
- name = "com_github_fullsailor_pkcs7",
- importpath = "github.com/fullsailor/pkcs7",
- sum = "h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=",
- version = "v0.0.0-20190404230743-d7302db945fa",
-)
-
-go_repository(
- name = "com_github_garyburd_redigo",
- importpath = "github.com/garyburd/redigo",
- sum = "h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko=",
- version = "v0.0.0-20150301180006-535138d7bcd7",
-)
-
-go_repository(
- name = "com_github_go_ini_ini",
- importpath = "github.com/go-ini/ini",
- sum = "h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=",
- version = "v1.25.4",
-)
-
-go_repository(
- name = "com_github_go_kit_kit",
- importpath = "github.com/go-kit/kit",
- sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=",
- version = "v0.9.0",
-)
-
-go_repository(
- name = "com_github_go_logfmt_logfmt",
- importpath = "github.com/go-logfmt/logfmt",
- sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
- version = "v0.4.0",
-)
-
-go_repository(
- name = "com_github_go_stack_stack",
- importpath = "github.com/go-stack/stack",
- sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
- version = "v1.8.0",
-)
-
-go_repository(
- name = "com_github_godbus_dbus",
- importpath = "github.com/godbus/dbus",
- sum = "h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=",
- version = "v0.0.0-20190422162347-ade71ed3457e",
-)
-
-go_repository(
- name = "com_github_gopherjs_gopherjs",
- importpath = "github.com/gopherjs/gopherjs",
- sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=",
- version = "v0.0.0-20181017120253-0766667cb4d1",
-)
-
-go_repository(
- name = "com_github_gorilla_handlers",
- importpath = "github.com/gorilla/handlers",
- sum = "h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo=",
- version = "v0.0.0-20150720190736-60c7bfde3e33",
-)
-
-go_repository(
- name = "com_github_gorilla_mux",
- importpath = "github.com/gorilla/mux",
- sum = "h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=",
- version = "v1.7.2",
-)
-
-go_repository(
- name = "com_github_gorilla_websocket",
- importpath = "github.com/gorilla/websocket",
- sum = "h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=",
- version = "v1.4.2",
-)
-
-go_repository(
- name = "com_github_grpc_ecosystem_go_grpc_middleware",
- importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
- sum = "h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=",
- version = "v1.0.1-0.20190118093823-f849b5445de4",
-)
-
-go_repository(
- name = "com_github_grpc_ecosystem_go_grpc_prometheus",
- importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
- sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_grpc_ecosystem_grpc_gateway",
- importpath = "github.com/grpc-ecosystem/grpc-gateway",
- sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=",
- version = "v1.16.0",
-)
-
-go_repository(
- name = "com_github_hashicorp_hcl",
- importpath = "github.com/hashicorp/hcl",
- sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_j_keck_arping",
- importpath = "github.com/j-keck/arping",
- sum = "h1:742eGXur0715JMq73aD95/FU0XpVKXqNuTnEfXsLOYQ=",
- version = "v0.0.0-20160618110441-2cf9dc699c56",
-)
-
-go_repository(
- name = "com_github_jmespath_go_jmespath",
- importpath = "github.com/jmespath/go-jmespath",
- sum = "h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=",
- version = "v0.0.0-20160803190731-bd40a432e4c7",
-)
-
-go_repository(
- name = "com_github_jonboulle_clockwork",
- importpath = "github.com/jonboulle/clockwork",
- sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_jtolds_gls",
- importpath = "github.com/jtolds/gls",
- sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=",
- version = "v4.20.0+incompatible",
-)
-
-go_repository(
- name = "com_github_julienschmidt_httprouter",
- importpath = "github.com/julienschmidt/httprouter",
- sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_klauspost_compress",
- importpath = "github.com/klauspost/compress",
- sum = "h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=",
- version = "v1.11.13",
-)
-
-go_repository(
- name = "com_github_kr_logfmt",
- importpath = "github.com/kr/logfmt",
- sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
- version = "v0.0.0-20140226030751-b84e30acd515",
-)
-
-go_repository(
- name = "com_github_magiconair_properties",
- importpath = "github.com/magiconair/properties",
- sum = "h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=",
- version = "v1.8.0",
-)
-
-go_repository(
- name = "com_github_marstr_guid",
- importpath = "github.com/marstr/guid",
- sum = "h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_github_mattn_go_colorable",
- importpath = "github.com/mattn/go-colorable",
- sum = "h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=",
- version = "v0.0.9",
-)
-
-go_repository(
- name = "com_github_mattn_go_isatty",
- importpath = "github.com/mattn/go-isatty",
- sum = "h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=",
- version = "v0.0.4",
-)
-
-go_repository(
- name = "com_github_mattn_go_runewidth",
- importpath = "github.com/mattn/go-runewidth",
- sum = "h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=",
- version = "v0.0.2",
-)
-
-go_repository(
- name = "com_github_mattn_go_shellwords",
- importpath = "github.com/mattn/go-shellwords",
- sum = "h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=",
- version = "v1.0.3",
-)
-
-go_repository(
- name = "com_github_matttproud_golang_protobuf_extensions",
- importpath = "github.com/matttproud/golang_protobuf_extensions",
- sum = "h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=",
- version = "v1.0.2-0.20181231171920-c182affec369",
-)
-
-go_repository(
- name = "com_github_miekg_pkcs11",
- importpath = "github.com/miekg/pkcs11",
- sum = "h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=",
- version = "v1.0.3",
-)
-
-go_repository(
- name = "com_github_mistifyio_go_zfs",
- importpath = "github.com/mistifyio/go-zfs",
- sum = "h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=",
- version = "v2.1.2-0.20190413222219-f784269be439+incompatible",
-)
-
-go_repository(
- name = "com_github_mitchellh_go_homedir",
- importpath = "github.com/mitchellh/go-homedir",
- sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_github_mitchellh_mapstructure",
- importpath = "github.com/mitchellh/mapstructure",
- sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
- version = "v1.1.2",
-)
-
-go_repository(
- name = "com_github_mitchellh_osext",
- importpath = "github.com/mitchellh/osext",
- sum = "h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=",
- version = "v0.0.0-20151018003038-5e2d6d41470f",
-)
-
-go_repository(
- name = "com_github_moby_locker",
- importpath = "github.com/moby/locker",
- sum = "h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_moby_sys_mountinfo",
- importpath = "github.com/moby/sys/mountinfo",
- sum = "h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=",
- version = "v0.4.1",
-)
-
-go_repository(
- name = "com_github_moby_sys_symlink",
- importpath = "github.com/moby/sys/symlink",
- sum = "h1:MTFZ74KtNI6qQQpuBxU+uKCim4WtOMokr03hCfJcazE=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_moby_term",
- importpath = "github.com/moby/term",
- sum = "h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=",
- version = "v0.0.0-20200312100748-672ec06f55cd",
-)
-
-go_repository(
- name = "com_github_mrunalp_fileutils",
- importpath = "github.com/mrunalp/fileutils",
- sum = "h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=",
- version = "v0.5.0",
-)
-
-go_repository(
- name = "com_github_mwitkow_go_conntrack",
- importpath = "github.com/mwitkow/go-conntrack",
- sum = "h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=",
- version = "v0.0.0-20161129095857-cc309e4a2223",
-)
-
-go_repository(
- name = "com_github_ncw_swift",
- importpath = "github.com/ncw/swift",
- sum = "h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=",
- version = "v1.0.47",
-)
-
-go_repository(
- name = "com_github_nxadm_tail",
- importpath = "github.com/nxadm/tail",
- sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=",
- version = "v1.4.4",
-)
-
-go_repository(
- name = "com_github_oklog_ulid",
- importpath = "github.com/oklog/ulid",
- sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
- version = "v1.3.1",
-)
-
-go_repository(
- name = "com_github_olekukonko_tablewriter",
- importpath = "github.com/olekukonko/tablewriter",
- sum = "h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78=",
- version = "v0.0.0-20170122224234-a0225b3f23b5",
-)
-
-go_repository(
- name = "com_github_oneofone_xxhash",
- importpath = "github.com/OneOfOne/xxhash",
- sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
- version = "v1.2.2",
-)
-
-go_repository(
- name = "com_github_opencontainers_runtime_tools",
- importpath = "github.com/opencontainers/runtime-tools",
- sum = "h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=",
- version = "v0.0.0-20181011054405-1d69bd0f9c39",
-)
-
-go_repository(
- name = "com_github_opencontainers_selinux",
- importpath = "github.com/opencontainers/selinux",
- sum = "h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=",
- version = "v1.8.0",
-)
-
-go_repository(
- name = "com_github_pelletier_go_toml",
- importpath = "github.com/pelletier/go-toml",
- sum = "h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=",
- version = "v1.8.1",
-)
-
-go_repository(
- name = "com_github_pquerna_cachecontrol",
- importpath = "github.com/pquerna/cachecontrol",
- sum = "h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=",
- version = "v0.0.0-20171018203845-0dec1b30a021",
-)
-
-go_repository(
- name = "com_github_prometheus_client_golang",
- importpath = "github.com/prometheus/client_golang",
- sum = "h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=",
- version = "v1.7.1",
-)
-
-go_repository(
- name = "com_github_prometheus_common",
- importpath = "github.com/prometheus/common",
- sum = "h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=",
- version = "v0.10.0",
-)
-
-go_repository(
- name = "com_github_prometheus_tsdb",
- importpath = "github.com/prometheus/tsdb",
- sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
- version = "v0.7.1",
-)
-
-go_repository(
- name = "com_github_rogpeppe_fastuuid",
- importpath = "github.com/rogpeppe/fastuuid",
- sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_safchain_ethtool",
- importpath = "github.com/safchain/ethtool",
- sum = "h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=",
- version = "v0.0.0-20190326074333-42ed695e3de8",
-)
-
-go_repository(
- name = "com_github_satori_go_uuid",
- importpath = "github.com/satori/go.uuid",
- sum = "h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_seccomp_libseccomp_golang",
- importpath = "github.com/seccomp/libseccomp-golang",
- sum = "h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=",
- version = "v0.9.1",
-)
-
-go_repository(
- name = "com_github_shopify_logrus_bugsnag",
- importpath = "github.com/Shopify/logrus-bugsnag",
- sum = "h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=",
- version = "v0.0.0-20171204204709-577dee27f20d",
-)
-
-go_repository(
- name = "com_github_smartystreets_assertions",
- importpath = "github.com/smartystreets/assertions",
- sum = "h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=",
- version = "v0.0.0-20180927180507-b2de0cb4f26d",
-)
-
-go_repository(
- name = "com_github_smartystreets_goconvey",
- importpath = "github.com/smartystreets/goconvey",
- sum = "h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=",
- version = "v0.0.0-20190330032615-68dc04aab96a",
-)
-
-go_repository(
- name = "com_github_soheilhy_cmux",
- importpath = "github.com/soheilhy/cmux",
- sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
- version = "v0.1.4",
-)
-
-go_repository(
- name = "com_github_spaolacci_murmur3",
- importpath = "github.com/spaolacci/murmur3",
- sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
- version = "v0.0.0-20180118202830-f09979ecbc72",
-)
-
-go_repository(
- name = "com_github_spf13_cast",
- importpath = "github.com/spf13/cast",
- sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=",
- version = "v1.3.0",
-)
-
-go_repository(
- name = "com_github_spf13_jwalterweatherman",
- importpath = "github.com/spf13/jwalterweatherman",
- sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_spf13_viper",
- importpath = "github.com/spf13/viper",
- sum = "h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=",
- version = "v1.4.0",
-)
-
-go_repository(
- name = "com_github_stefanberger_go_pkcs11uri",
- importpath = "github.com/stefanberger/go-pkcs11uri",
- sum = "h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=",
- version = "v0.0.0-20201008174630-78d3cae3a980",
-)
-
-go_repository(
- name = "com_github_tchap_go_patricia",
- importpath = "github.com/tchap/go-patricia",
- sum = "h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=",
- version = "v2.2.6+incompatible",
-)
-
-go_repository(
- name = "com_github_tmc_grpc_websocket_proxy",
- importpath = "github.com/tmc/grpc-websocket-proxy",
- sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
- version = "v0.0.0-20190109142713-0ad062ec5ee5",
-)
-
-go_repository(
- name = "com_github_ugorji_go",
- importpath = "github.com/ugorji/go",
- sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=",
- version = "v1.1.4",
-)
-
-go_repository(
- name = "com_github_willf_bitset",
- importpath = "github.com/willf/bitset",
- sum = "h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=",
- version = "v1.1.11",
-)
-
-go_repository(
- name = "com_github_xiang90_probing",
- importpath = "github.com/xiang90/probing",
- sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
- version = "v0.0.0-20190116061207-43a291ad63a2",
-)
-
-go_repository(
- name = "com_github_xordataexchange_crypt",
- importpath = "github.com/xordataexchange/crypt",
- sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
- version = "v0.0.3-0.20170626215501-b2862e3d0a77",
-)
-
-go_repository(
- name = "com_github_yvasiyarov_go_metrics",
- importpath = "github.com/yvasiyarov/go-metrics",
- sum = "h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=",
- version = "v0.0.0-20140926110328-57bccd1ccd43",
-)
-
-go_repository(
- name = "com_github_yvasiyarov_gorelic",
- importpath = "github.com/yvasiyarov/gorelic",
- sum = "h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=",
- version = "v0.0.0-20141212073537-a9bba5b9ab50",
-)
-
-go_repository(
- name = "com_github_yvasiyarov_newrelic_platform_go",
- importpath = "github.com/yvasiyarov/newrelic_platform_go",
- sum = "h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=",
- version = "v0.0.0-20140908184405-b21fdbd4370f",
-)
-
-go_repository(
- name = "in_gopkg_airbrake_gobrake_v2",
- importpath = "gopkg.in/airbrake/gobrake.v2",
- sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=",
- version = "v2.0.9",
-)
-
-go_repository(
- name = "in_gopkg_alecthomas_kingpin_v2",
- importpath = "gopkg.in/alecthomas/kingpin.v2",
- sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
- version = "v2.2.6",
-)
-
-go_repository(
- name = "in_gopkg_cheggaaa_pb_v1",
- importpath = "gopkg.in/cheggaaa/pb.v1",
- sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=",
- version = "v1.0.25",
-)
-
-go_repository(
- name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2",
- importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
- sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=",
- version = "v2.1.2",
-)
-
-go_repository(
- name = "in_gopkg_natefinch_lumberjack_v2",
- importpath = "gopkg.in/natefinch/lumberjack.v2",
- sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
- version = "v2.0.0",
-)
-
-go_repository(
- name = "in_gopkg_resty_v1",
- importpath = "gopkg.in/resty.v1",
- sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
- version = "v1.12.0",
-)
-
-go_repository(
- name = "in_gopkg_square_go_jose_v2",
- importpath = "gopkg.in/square/go-jose.v2",
- sum = "h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=",
- version = "v2.5.1",
-)
-
-go_repository(
- name = "in_gopkg_yaml_v3",
- importpath = "gopkg.in/yaml.v3",
- sum = "h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=",
- version = "v3.0.0-20200313102051-9f266ea9e77c",
-)
-
-go_repository(
- name = "io_etcd_go_bbolt",
- importpath = "go.etcd.io/bbolt",
- sum = "h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=",
- version = "v1.3.5",
-)
-
-go_repository(
- name = "io_etcd_go_etcd",
- importpath = "go.etcd.io/etcd",
- sum = "h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=",
- version = "v0.5.0-alpha.5.0.20200910180754-dd1b699fc489",
-)
-
-go_repository(
- name = "io_k8s_component_base",
- importpath = "k8s.io/component-base",
- sum = "h1:wHmQb3b/QCTODA/MGih9x7XZQmhdDlA8AxJnAID6T2A=",
- version = "v0.16.13",
-)
-
-go_repository(
- name = "io_k8s_cri_api",
- importpath = "k8s.io/cri-api",
- sum = "h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc=",
- version = "v0.20.6",
-)
-
-go_repository(
- name = "io_k8s_klog_v2",
- importpath = "k8s.io/klog/v2",
- sum = "h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=",
- version = "v2.4.0",
-)
-
-go_repository(
- name = "io_k8s_kubernetes",
- importpath = "k8s.io/kubernetes",
- sum = "h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=",
- version = "v1.13.0",
-)
-
-go_repository(
- name = "io_k8s_sigs_apiserver_network_proxy_konnectivity_client",
- importpath = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client",
- sum = "h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=",
- version = "v0.0.15",
-)
-
-go_repository(
- name = "io_k8s_sigs_structured_merge_diff_v4",
- importpath = "sigs.k8s.io/structured-merge-diff/v4",
- sum = "h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=",
- version = "v4.0.3",
-)
-
-go_repository(
- name = "io_opentelemetry_go_proto_otlp",
- importpath = "go.opentelemetry.io/proto/otlp",
- sum = "h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=",
- version = "v0.7.0",
-)
-
-go_repository(
- name = "org_golang_google_cloud",
- importpath = "google.golang.org/cloud",
- sum = "h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=",
- version = "v0.0.0-20151119220103-975617b05ea8",
-)
-
-go_repository(
- name = "org_mozilla_go_pkcs7",
- importpath = "go.mozilla.org/pkcs7",
- sum = "h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=",
- version = "v0.0.0-20200128120323-432b2356ecb1",
-)
-
-go_repository(
- name = "org_uber_go_zap",
- importpath = "go.uber.org/zap",
- sum = "h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=",
- version = "v1.10.0",
-)
-
-go_repository(
- name = "tools_gotest_v3",
- importpath = "gotest.tools/v3",
- sum = "h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=",
- version = "v3.0.3",
-)
-
-go_repository(
- name = "com_github_bazelbuild_rules_go",
- importpath = "github.com/bazelbuild/rules_go",
- sum = "h1:KViqR7qKXwz+LrNdIauCDU21kneCk+4DnYjpvlJwH50=",
- version = "v0.27.0",
-)
-
-rules_proto_dependencies()
-
-rules_proto_toolchains()
-
-# Load bazel_toolchain to support Remote Build Execution.
-# See releases at https://releases.bazel.build/bazel-toolchains.html
-http_archive(
- name = "bazel_toolchains",
- sha256 = "1adf5db506a7e3c465a26988514cfc3971af6d5b3c2218925cd6e71ee443fc3f",
- strip_prefix = "bazel-toolchains-4.0.0",
- urls = [
- "https://github.com/bazelbuild/bazel-toolchains/releases/download/4.0.0/bazel-toolchains-4.0.0.tar.gz",
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/4.0.0/bazel-toolchains-4.0.0.tar.gz",
- ],
-)
-
-# Creates a default toolchain config for RBE.
-load("@bazel_toolchains//rules:rbe_repo.bzl", "rbe_autoconfig")
-
-rbe_autoconfig(name = "rbe_default")
-
-http_archive(
- name = "rules_pkg",
- sha256 = "6b5969a7acd7b60c02f816773b06fcf32fbe8ba0c7919ccdc2df4f8fb923804a",
- url = "https://github.com/bazelbuild/rules_pkg/releases/download/0.3.0/rules_pkg-0.3.0.tar.gz",
-)
-
-load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
-
-rules_pkg_dependencies()
-
-# System Call test dependencies.
-# grpc also has a dependency on abseil but as this is before grpc dependency
-# declaration, it will take precedence over grpc's one
-# Version LTS 20210324.2
-http_archive(
- name = "com_google_absl",
- sha256 = "1764491a199eb9325b177126547f03d244f86b4ff28f16f206c7b3e7e4f777ec",
- strip_prefix = "abseil-cpp-278e0a071885a22dcd2fd1b5576cc44757299343",
- urls = [
- "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/278e0a071885a22dcd2fd1b5576cc44757299343.tar.gz",
- "https://github.com/abseil/abseil-cpp/archive/278e0a071885a22dcd2fd1b5576cc44757299343.tar.gz"
- ],
-)
-
-# Load C++ grpc rules.
-http_archive(
- name = "com_github_grpc_grpc",
- sha256 = "2fcb7f1ab160d6fd3aaade64520be3e5446fc4c6fa7ba6581afdc4e26094bd81",
- strip_prefix = "grpc-1.26.0",
- urls = [
- "https://github.com/grpc/grpc/archive/v1.26.0.tar.gz",
- ],
-)
-
-load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
-
-grpc_deps()
-
-load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps")
-
-grpc_extra_deps()
-
-
-http_archive(
- name = "com_google_googletest",
- sha256 = "0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912",
- strip_prefix = "googletest-565f1b848215b77c3732bca345fe76a0431d8b34",
- urls = [
- "https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz",
- "https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz",
- ],
-)
-
-http_archive(
- name = "com_google_benchmark",
- sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a",
- strip_prefix = "benchmark-1.5.0",
- urls = [
- "https://mirror.bazel.build/github.com/google/benchmark/archive/v1.5.0.tar.gz",
- "https://github.com/google/benchmark/archive/v1.5.0.tar.gz",
- ],
-)
-
-http_archive(
- name = "com_google_protobuf",
- sha256 = "528927e398f4e290001886894dac17c5c6a2e5548f3fb68004cfb01af901b53a",
- strip_prefix = "protobuf-3.17.3",
- urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.17.3.zip"],
-)
-load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
-protobuf_deps()
-
-# Schemas for testing.
-http_file(
- name = "buildkite_pipeline_schema",
- sha256 = "3369c58038b4d55c08928affafb653716eb1e7b3cabb4a391aef979dd921f4e1",
- urls = ["https://raw.githubusercontent.com/buildkite/pipeline-schema/f7a0894074d194bcf19eec5411fec0528f7f4180/schema.json"],
-)
-
-http_file(
- name = "github_workflow_schema",
- sha256 = "60603d1095b11d136e04a8b95be83a23ad8044169e46f82f925c320c1cf47a49",
- urls = ["https://raw.githubusercontent.com/SchemaStore/schemastore/27612065234778feaac216ce14dd47846fe0a2dd/src/schemas/json/github-workflow.json"],
-)
-
-# External Go repositories.
-#
-# Unfortunately, gazelle will automatically parse go modules in the
-# repositories and generate new go_repository stanzas. These may not respect
-# pins that we have in go.mod or below. So order actually matters here.
-
-go_repository(
- name = "com_github_sirupsen_logrus",
- importpath = "github.com/sirupsen/logrus",
- sum = "h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=",
- version = "v1.7.0",
-)
-
-go_repository(
- name = "com_github_containerd_containerd",
- build_file_proto_mode = "disable",
- importpath = "github.com/containerd/containerd",
- sum = "h1:K2U/F4jGAMBqeUssfgJRbFuomLcS2Fxo1vR3UM/Mbh8=",
- version = "v1.3.9",
-)
-
-go_repository(
- name = "com_github_cenkalti_backoff",
- importpath = "github.com/cenkalti/backoff",
- sum = "h1:8eZxmY1yvxGHzdzTEhI09npjMVGzNAdrqzruTX6jcK4=",
- version = "v1.1.1-0.20190506075156-2146c9339422",
-)
-
-go_repository(
- name = "com_github_gofrs_flock",
- importpath = "github.com/gofrs/flock",
- sum = "h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=",
- version = "v0.8.0",
-)
-
-go_repository(
- name = "com_github_golang_mock",
- importpath = "github.com/golang/mock",
- sum = "h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=",
- version = "v1.5.0",
-)
-
-go_repository(
- name = "com_github_google_subcommands",
- importpath = "github.com/google/subcommands",
- sum = "h1:8nlgEAjIalk6uj/CGKCdOO8CQqTeysvcW4RFZ6HbkGM=",
- version = "v1.0.2-0.20190508160503-636abe8753b8",
-)
-
-go_repository(
- name = "com_github_google_uuid",
- importpath = "github.com/google/uuid",
- sum = "h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_kr_pretty",
- importpath = "github.com/kr/pretty",
- sum = "h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=",
- version = "v0.2.1",
-)
-
-go_repository(
- name = "com_github_kr_pty",
- importpath = "github.com/kr/pty",
- sum = "h1:zc0R6cOw98cMengLA0fvU55mqbnN7sd/tBMLzSejp+M=",
- version = "v1.1.4-0.20190131011033-7dc38fb350b1",
-)
-
-go_repository(
- name = "com_github_kr_text",
- importpath = "github.com/kr/text",
- sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_mohae_deepcopy",
- importpath = "github.com/mohae/deepcopy",
- sum = "h1:Sha2bQdoWE5YQPTlJOL31rmce94/tYi113SlFo1xQ2c=",
- version = "v0.0.0-20170308212314-bb9b5e7adda9",
-)
-
-go_repository(
- name = "com_github_syndtr_gocapability",
- importpath = "github.com/syndtr/gocapability",
- sum = "h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=",
- version = "v0.0.0-20180916011248-d98352740cb2",
-)
-
-go_repository(
- name = "com_github_vishvananda_netlink",
- importpath = "github.com/vishvananda/netlink",
- sum = "h1:7SWt9pGCMaw+N1ZhRsaLKaYNviFhxambdoaoYlDqz1w=",
- version = "v1.0.1-0.20190930145447-2ec5bdc52b86",
-)
-
-go_repository(
- name = "org_golang_google_grpc",
- build_file_proto_mode = "disable",
- importpath = "google.golang.org/grpc",
- sum = "h1:c+E5hkHV2oYLLcjZ0Uulu4thvOFKB0a9TWvowIWqgu4=",
- version = "v1.39.0-dev.0.20210518002758-2713b77e8526",
-)
-
-go_repository(
- name = "in_gopkg_check_v1",
- importpath = "gopkg.in/check.v1",
- sum = "h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=",
- version = "v1.0.0-20190902080502-41f04d3bba15",
-)
-
-go_repository(
- name = "org_golang_x_crypto",
- importpath = "golang.org/x/crypto",
- sum = "h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=",
- version = "v0.0.0-20210220033148-5ea612d1eb83",
-)
-
-go_repository(
- name = "org_golang_x_mod",
- importpath = "golang.org/x/mod",
- sum = "h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=",
- version = "v0.4.1",
-)
-
-go_repository(
- name = "org_golang_x_net",
- importpath = "golang.org/x/net",
- sum = "h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=",
- version = "v0.0.0-20210226172049-e18ecbb05110",
-)
-
-go_repository(
- name = "org_golang_x_sync",
- importpath = "golang.org/x/sync",
- sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=",
- version = "v0.0.0-20210220032951-036812b2e83c",
-)
-
-go_repository(
- name = "org_golang_x_text",
- importpath = "golang.org/x/text",
- sum = "h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=",
- version = "v0.3.5",
-)
-
-go_repository(
- name = "org_golang_x_time",
- importpath = "golang.org/x/time",
- sum = "h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=",
- version = "v0.0.0-20191024005414-555d28b269f0",
-)
-
-go_repository(
- name = "org_golang_x_tools",
- importpath = "golang.org/x/tools",
- sum = "h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "org_golang_x_xerrors",
- importpath = "golang.org/x/xerrors",
- sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=",
- version = "v0.0.0-20200804184101-5ec99f83aff1",
-)
-
-go_repository(
- name = "com_github_google_btree",
- importpath = "github.com/google/btree",
- sum = "h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_golang_protobuf",
- importpath = "github.com/golang/protobuf",
- sum = "h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=",
- version = "v1.5.0",
-)
-
-go_repository(
- name = "org_golang_x_oauth2",
- importpath = "golang.org/x/oauth2",
- sum = "h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc=",
- version = "v0.0.0-20210402161424-2e8d93401602",
-)
-
-go_repository(
- name = "com_github_docker_docker",
- importpath = "github.com/docker/docker",
- sum = "h1:5AkIsnQpeL7eaqsM+Vl4Xbj5eIZFpPZZzXtNyfzzK/w=",
- version = "v1.4.2-0.20191028175130-9e7d5ac5ea55",
-)
-
-go_repository(
- name = "com_github_docker_go_connections",
- importpath = "github.com/docker/go-connections",
- sum = "h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=",
- version = "v0.3.0",
-)
-
-go_repository(
- name = "com_github_pkg_errors",
- importpath = "github.com/pkg/errors",
- sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
- version = "v0.9.1",
-)
-
-go_repository(
- name = "com_github_docker_go_units",
- importpath = "github.com/docker/go-units",
- sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=",
- version = "v0.4.0",
-)
-
-go_repository(
- name = "com_github_opencontainers_go_digest",
- importpath = "github.com/opencontainers/go-digest",
- sum = "h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_docker_distribution",
- importpath = "github.com/docker/distribution",
- sum = "h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=",
- version = "v2.7.1-0.20190205005809-0d3efadf0154+incompatible",
-)
-
-go_repository(
- name = "com_github_davecgh_go_spew",
- importpath = "github.com/davecgh/go-spew",
- sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
- version = "v1.1.1",
-)
-
-go_repository(
- name = "com_github_konsorten_go_windows_terminal_sequences",
- importpath = "github.com/konsorten/go-windows-terminal-sequences",
- sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=",
- version = "v1.0.3",
-)
-
-go_repository(
- name = "com_github_pmezard_go_difflib",
- importpath = "github.com/pmezard/go-difflib",
- sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_stretchr_testify",
- importpath = "github.com/stretchr/testify",
- sum = "h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=",
- version = "v1.6.1",
-)
-
-go_repository(
- name = "com_github_opencontainers_image_spec",
- importpath = "github.com/opencontainers/image-spec",
- sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_microsoft_go_winio",
- importpath = "github.com/Microsoft/go-winio",
- sum = "h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=",
- version = "v0.5.0",
-)
-
-go_repository(
- name = "com_github_stretchr_objx",
- importpath = "github.com/stretchr/objx",
- sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
- version = "v0.2.0",
-)
-
-go_repository(
- name = "org_uber_go_atomic",
- importpath = "go.uber.org/atomic",
- sum = "h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=",
- version = "v1.7.0",
-)
-
-go_repository(
- name = "org_uber_go_multierr",
- importpath = "go.uber.org/multierr",
- sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=",
- version = "v1.6.0",
-)
-
-go_repository(
- name = "com_google_cloud_go",
- importpath = "cloud.google.com/go",
- sum = "h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0=",
- version = "v0.79.0",
-)
-
-go_repository(
- name = "io_opencensus_go",
- importpath = "go.opencensus.io",
- sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=",
- version = "v0.23.0",
-)
-
-go_repository(
- name = "co_honnef_go_tools",
- importpath = "honnef.co/go/tools",
- sum = "h1:EVDuO03OCZwpV2t/tLLxPmPiomagMoBOgfPt0FM+4IY=",
- version = "v0.1.1",
-)
-
-go_repository(
- name = "com_github_burntsushi_toml",
- importpath = "github.com/BurntSushi/toml",
- sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
- version = "v0.3.1",
-)
-
-go_repository(
- name = "com_github_census_instrumentation_opencensus_proto",
- importpath = "github.com/census-instrumentation/opencensus-proto",
- sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
- version = "v0.2.1",
-)
-
-go_repository(
- name = "com_github_client9_misspell",
- importpath = "github.com/client9/misspell",
- sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
- version = "v0.3.4",
-)
-
-go_repository(
- name = "com_github_cncf_udpa_go",
- importpath = "github.com/cncf/udpa/go",
- sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
- version = "v0.0.0-20201120205902-5459f2c99403",
-)
-
-go_repository(
- name = "com_github_containerd_cgroups",
- build_file_proto_mode = "disable",
- importpath = "github.com/containerd/cgroups",
- sum = "h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=",
- version = "v0.0.0-20201119153540-4cbc285b3327",
-)
-
-go_repository(
- name = "com_github_containerd_console",
- importpath = "github.com/containerd/console",
- sum = "h1:u7SFAJyRqWcG6ogaMAx3KjSTy1e3hT9QxqX7Jco7dRc=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_containerd_continuity",
- importpath = "github.com/containerd/continuity",
- sum = "h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_containerd_fifo",
- importpath = "github.com/containerd/fifo",
- sum = "h1:lsjC5ENBl+Zgf38+B0ymougXFp0BaubeIVETltYZTQw=",
- version = "v0.0.0-20191213151349-ff969a566b00",
-)
-
-go_repository(
- name = "com_github_containerd_go_runc",
- importpath = "github.com/containerd/go-runc",
- sum = "h1:PRTagVMbJcCezLcHXe8UJvR1oBzp2lG3CEumeFOLOds=",
- version = "v0.0.0-20200220073739-7016d3ce2328",
-)
-
-go_repository(
- name = "com_github_containerd_ttrpc",
- importpath = "github.com/containerd/ttrpc",
- sum = "h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=",
- version = "v1.0.2",
-)
-
-go_repository(
- name = "com_github_docker_go_events",
- importpath = "github.com/docker/go-events",
- sum = "h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=",
- version = "v0.0.0-20190806004212-e31b211e4f1c",
-)
-
-go_repository(
- name = "com_github_dustin_go_humanize",
- importpath = "github.com/dustin/go-humanize",
- sum = "h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_envoyproxy_go_control_plane",
- importpath = "github.com/envoyproxy/go-control-plane",
- sum = "h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s=",
- version = "v0.9.9-0.20210512163311-63b5d3c536b0",
-)
-
-go_repository(
- name = "com_github_envoyproxy_protoc_gen_validate",
- importpath = "github.com/envoyproxy/protoc-gen-validate",
- sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_gogo_googleapis",
- importpath = "github.com/gogo/googleapis",
- sum = "h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=",
- version = "v1.4.1",
-)
-
-go_repository(
- name = "com_github_gogo_protobuf",
- importpath = "github.com/gogo/protobuf",
- sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=",
- version = "v1.3.2",
-)
-
-go_repository(
- name = "com_github_golang_glog",
- importpath = "github.com/golang/glog",
- sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
- version = "v0.0.0-20160126235308-23def4e6c14b",
-)
-
-go_repository(
- name = "com_github_google_go_cmp",
- importpath = "github.com/google/go-cmp",
- sum = "h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=",
- version = "v0.5.5",
-)
-
-go_repository(
- name = "com_github_google_go_querystring",
- importpath = "github.com/google/go-querystring",
- sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_hashicorp_golang_lru",
- importpath = "github.com/hashicorp/golang-lru",
- sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
- version = "v0.5.1",
-)
-
-go_repository(
- name = "com_github_inconshreveable_mousetrap",
- importpath = "github.com/inconshreveable/mousetrap",
- sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_kisielk_errcheck",
- importpath = "github.com/kisielk/errcheck",
- sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=",
- version = "v1.5.0",
-)
-
-go_repository(
- name = "com_github_kisielk_gotool",
- importpath = "github.com/kisielk/gotool",
- sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_microsoft_hcsshim",
- importpath = "github.com/Microsoft/hcsshim",
- sum = "h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns=",
- version = "v0.8.14",
-)
-
-go_repository(
- name = "com_github_opencontainers_runc",
- importpath = "github.com/opencontainers/runc",
- sum = "h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=",
- version = "v1.0.0-rc90",
-)
-
-go_repository(
- name = "com_github_opencontainers_runtime_spec",
- importpath = "github.com/opencontainers/runtime-spec",
- sum = "h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=",
- version = "v1.0.2",
-)
-
-go_repository(
- name = "com_github_pborman_uuid",
- importpath = "github.com/pborman/uuid",
- sum = "h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "com_github_prometheus_client_model",
- importpath = "github.com/prometheus/client_model",
- sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=",
- version = "v0.2.0",
-)
-
-go_repository(
- name = "com_github_prometheus_procfs",
- importpath = "github.com/prometheus/procfs",
- sum = "h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=",
- version = "v0.6.0",
-)
-
-go_repository(
- name = "com_github_spf13_cobra",
- importpath = "github.com/spf13/cobra",
- sum = "h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_spf13_pflag",
- importpath = "github.com/spf13/pflag",
- sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
- version = "v1.0.5",
-)
-
-go_repository(
- name = "com_github_urfave_cli",
- importpath = "github.com/urfave/cli",
- sum = "h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=",
- version = "v1.22.2",
-)
-
-go_repository(
- name = "com_github_yuin_goldmark",
- importpath = "github.com/yuin/goldmark",
- sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=",
- version = "v1.2.1",
-)
-
-go_repository(
- name = "in_gopkg_yaml_v2",
- importpath = "gopkg.in/yaml.v2",
- sum = "h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=",
- version = "v2.2.8",
-)
-
-go_repository(
- name = "org_bazil_fuse",
- importpath = "bazil.org/fuse",
- sum = "h1:SC+c6A1qTFstO9qmB86mPV2IpYme/2ZoEQ0hrP+wo+Q=",
- version = "v0.0.0-20160811212531-371fbbdaa898",
-)
-
-go_repository(
- name = "org_golang_google_appengine",
- importpath = "google.golang.org/appengine",
- sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=",
- version = "v1.6.7",
-)
-
-go_repository(
- name = "org_golang_google_genproto",
- importpath = "google.golang.org/genproto",
- sum = "h1:YRBxgxUW6GFi+AKsn8WGA9k1SZohK+gGuEqdeT5aoNQ=",
- version = "v0.0.0-20210312152112-fc591d9ea70f",
-)
-
-go_repository(
- name = "org_golang_google_protobuf",
- importpath = "google.golang.org/protobuf",
- sum = "h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=",
- version = "v1.26.0",
-)
-
-go_repository(
- name = "org_golang_x_exp",
- importpath = "golang.org/x/exp",
- sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=",
- version = "v0.0.0-20200224162631-6cc2880d07d6",
-)
-
-go_repository(
- name = "org_golang_x_lint",
- importpath = "golang.org/x/lint",
- sum = "h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=",
- version = "v0.0.0-20201208152925-83fdc39ff7b5",
-)
-
-go_repository(
- name = "tools_gotest",
- importpath = "gotest.tools",
- sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=",
- version = "v2.2.0+incompatible",
-)
-
-go_repository(
- name = "com_github_burntsushi_xgb",
- importpath = "github.com/BurntSushi/xgb",
- sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
- version = "v0.0.0-20160522181843-27f122750802",
-)
-
-go_repository(
- name = "com_github_chzyer_logex",
- importpath = "github.com/chzyer/logex",
- sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
- version = "v1.1.10",
-)
-
-go_repository(
- name = "com_github_chzyer_readline",
- importpath = "github.com/chzyer/readline",
- sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
- version = "v0.0.0-20180603132655-2972be24d48e",
-)
-
-go_repository(
- name = "com_github_chzyer_test",
- importpath = "github.com/chzyer/test",
- sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
- version = "v0.0.0-20180213035817-a1ea475d72b1",
-)
-
-go_repository(
- name = "com_github_go_gl_glfw_v3_3_glfw",
- importpath = "github.com/go-gl/glfw/v3.3/glfw",
- sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
- version = "v0.0.0-20200222043503-6f7a984d4dc4",
-)
-
-go_repository(
- name = "com_github_golang_groupcache",
- importpath = "github.com/golang/groupcache",
- sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
- version = "v0.0.0-20200121045136-8c9f03a8e57e",
-)
-
-go_repository(
- name = "com_github_google_martian",
- importpath = "github.com/google/martian",
- sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
- version = "v2.1.0+incompatible",
-)
-
-go_repository(
- name = "com_github_google_pprof",
- importpath = "github.com/google/pprof",
- sum = "h1:l2YRhr+YLzmSp7KJMswRVk/lO5SwoFIcCLzJsVj+YPc=",
- version = "v0.0.0-20210423192551-a2663126120b",
-)
-
-go_repository(
- name = "com_github_google_renameio",
- importpath = "github.com/google/renameio",
- sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_googleapis_gax_go_v2",
- importpath = "github.com/googleapis/gax-go/v2",
- sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
- version = "v2.0.5",
-)
-
-go_repository(
- name = "com_github_ianlancetaylor_demangle",
- importpath = "github.com/ianlancetaylor/demangle",
- sum = "h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=",
- version = "v0.0.0-20200824232613-28f6c0f3b639",
-)
-
-go_repository(
- name = "com_github_jstemmer_go_junit_report",
- importpath = "github.com/jstemmer/go-junit-report",
- sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
- version = "v0.9.1",
-)
-
-go_repository(
- name = "com_github_rogpeppe_go_internal",
- importpath = "github.com/rogpeppe/go-internal",
- sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
- version = "v1.3.0",
-)
-
-go_repository(
- name = "com_shuralyov_dmitri_gpu_mtl",
- importpath = "dmitri.shuralyov.com/gpu/mtl",
- sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
- version = "v0.0.0-20190408044501-666a987793e9",
-)
-
-go_repository(
- name = "in_gopkg_errgo_v2",
- importpath = "gopkg.in/errgo.v2",
- sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
- version = "v2.1.0",
-)
-
-go_repository(
- name = "io_rsc_binaryregexp",
- importpath = "rsc.io/binaryregexp",
- sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
- version = "v0.2.0",
-)
-
-go_repository(
- name = "org_golang_google_api",
- importpath = "google.golang.org/api",
- sum = "h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc=",
- version = "v0.42.0",
-)
-
-go_repository(
- name = "org_golang_x_image",
- importpath = "golang.org/x/image",
- sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=",
- version = "v0.0.0-20190802002840-cff245a6509b",
-)
-
-go_repository(
- name = "org_golang_x_mobile",
- importpath = "golang.org/x/mobile",
- sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=",
- version = "v0.0.0-20190719004257-d2bd2a29d028",
-)
-
-go_repository(
- name = "com_github_containerd_typeurl",
- importpath = "github.com/containerd/typeurl",
- sum = "h1:HovfQDS/K3Mr7eyS0QJLxE1CbVUhjZCl6g3OhFJgP1o=",
- version = "v0.0.0-20200205145503-b45ef1f1f737",
-)
-
-go_repository(
- name = "com_github_vishvananda_netns",
- importpath = "github.com/vishvananda/netns",
- sum = "h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=",
- version = "v0.0.0-20210104183010-2eb08e3e575f",
-)
-
-go_repository(
- name = "com_google_cloud_go_bigquery",
- importpath = "cloud.google.com/go/bigquery",
- sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=",
- version = "v1.8.0",
-)
-
-go_repository(
- name = "com_google_cloud_go_datastore",
- importpath = "cloud.google.com/go/datastore",
- sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_google_cloud_go_pubsub",
- importpath = "cloud.google.com/go/pubsub",
- sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
- version = "v1.3.1",
-)
-
-go_repository(
- name = "com_google_cloud_go_storage",
- importpath = "cloud.google.com/go/storage",
- sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=",
- version = "v1.10.0",
-)
-
-go_repository(
- name = "com_github_cilium_ebpf",
- importpath = "github.com/cilium/ebpf",
- sum = "h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=",
- version = "v0.4.0",
-)
-
-go_repository(
- name = "com_github_coreos_go_systemd_v22",
- importpath = "github.com/coreos/go-systemd/v22",
- sum = "h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=",
- version = "v22.1.0",
-)
-
-go_repository(
- name = "com_github_cpuguy83_go_md2man_v2",
- importpath = "github.com/cpuguy83/go-md2man/v2",
- sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
- version = "v2.0.0",
-)
-
-go_repository(
- name = "com_github_godbus_dbus_v5",
- importpath = "github.com/godbus/dbus/v5",
- sum = "h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=",
- version = "v5.0.3",
-)
-
-go_repository(
- name = "com_github_russross_blackfriday_v2",
- importpath = "github.com/russross/blackfriday/v2",
- sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
- version = "v2.0.1",
-)
-
-go_repository(
- name = "com_github_shurcool_sanitized_anchor_name",
- importpath = "github.com/shurcooL/sanitized_anchor_name",
- sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_autorest",
- importpath = "github.com/Azure/go-autorest/autorest",
- sum = "h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=",
- version = "v0.11.1",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_autorest_adal",
- importpath = "github.com/Azure/go-autorest/autorest/adal",
- sum = "h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=",
- version = "v0.9.5",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_autorest_date",
- importpath = "github.com/Azure/go-autorest/autorest/date",
- sum = "h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=",
- version = "v0.3.0",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_autorest_mocks",
- importpath = "github.com/Azure/go-autorest/autorest/mocks",
- sum = "h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=",
- version = "v0.4.1",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_logger",
- importpath = "github.com/Azure/go-autorest/logger",
- sum = "h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=",
- version = "v0.2.0",
-)
-
-go_repository(
- name = "com_github_azure_go_autorest_tracing",
- importpath = "github.com/Azure/go-autorest/tracing",
- sum = "h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=",
- version = "v0.6.0",
-)
-
-go_repository(
- name = "com_github_dgrijalva_jwt_go",
- importpath = "github.com/dgrijalva/jwt-go",
- sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
- version = "v3.2.0+incompatible",
-)
-
-go_repository(
- name = "com_github_docker_spdystream",
- importpath = "github.com/docker/spdystream",
- sum = "h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=",
- version = "v0.0.0-20160310174837-449fdfce4d96",
-)
-
-go_repository(
- name = "com_github_elazarl_goproxy",
- importpath = "github.com/elazarl/goproxy",
- sum = "h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=",
- version = "v0.0.0-20180725130230-947c36da3153",
-)
-
-go_repository(
- name = "com_github_emicklei_go_restful",
- importpath = "github.com/emicklei/go-restful",
- sum = "h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=",
- version = "v2.9.5+incompatible",
-)
-
-go_repository(
- name = "com_github_evanphx_json_patch",
- importpath = "github.com/evanphx/json-patch",
- sum = "h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=",
- version = "v4.9.0+incompatible",
-)
-
-go_repository(
- name = "com_github_fsnotify_fsnotify",
- importpath = "github.com/fsnotify/fsnotify",
- sum = "h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=",
- version = "v1.4.9",
-)
-
-go_repository(
- name = "com_github_ghodss_yaml",
- importpath = "github.com/ghodss/yaml",
- sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_go_logr_logr",
- importpath = "github.com/go-logr/logr",
- sum = "h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=",
- version = "v0.2.0",
-)
-
-go_repository(
- name = "com_github_go_openapi_jsonpointer",
- importpath = "github.com/go-openapi/jsonpointer",
- sum = "h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=",
- version = "v0.19.3",
-)
-
-go_repository(
- name = "com_github_go_openapi_jsonreference",
- importpath = "github.com/go-openapi/jsonreference",
- sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=",
- version = "v0.19.3",
-)
-
-go_repository(
- name = "com_github_go_openapi_spec",
- importpath = "github.com/go-openapi/spec",
- sum = "h1:A4SZ6IWh3lnjH0rG0Z5lkxazMGBECtrZcbyYQi+64k4=",
- version = "v0.19.0",
-)
-
-go_repository(
- name = "com_github_go_openapi_swag",
- importpath = "github.com/go-openapi/swag",
- sum = "h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=",
- version = "v0.19.5",
-)
-
-go_repository(
- name = "com_github_google_gofuzz",
- importpath = "github.com/google/gofuzz",
- sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=",
- version = "v1.1.0",
-)
-
-go_repository(
- name = "com_github_googleapis_gnostic",
- build_file_proto_mode = "disable_global",
- importpath = "github.com/googleapis/gnostic",
- sum = "h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk=",
- version = "v0.4.0",
-)
-
-go_repository(
- name = "com_github_gophercloud_gophercloud",
- importpath = "github.com/gophercloud/gophercloud",
- sum = "h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=",
- version = "v0.1.0",
-)
-
-go_repository(
- name = "com_github_gregjones_httpcache",
- importpath = "github.com/gregjones/httpcache",
- sum = "h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=",
- version = "v0.0.0-20180305231024-9cad4c3443a7",
-)
-
-go_repository(
- name = "com_github_hpcloud_tail",
- importpath = "github.com/hpcloud/tail",
- sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "com_github_imdario_mergo",
- importpath = "github.com/imdario/mergo",
- sum = "h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=",
- version = "v0.3.9",
-)
-
-go_repository(
- name = "com_github_json_iterator_go",
- importpath = "github.com/json-iterator/go",
- sum = "h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=",
- version = "v1.1.10",
-)
-
-go_repository(
- name = "com_github_mailru_easyjson",
- importpath = "github.com/mailru/easyjson",
- sum = "h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=",
- version = "v0.7.0",
-)
-
-go_repository(
- name = "com_github_mattbaird_jsonpatch",
- importpath = "github.com/mattbaird/jsonpatch",
- sum = "h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8=",
- version = "v0.0.0-20171005235357-81af80346b1a",
-)
-
-go_repository(
- name = "com_github_modern_go_concurrent",
- importpath = "github.com/modern-go/concurrent",
- sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=",
- version = "v0.0.0-20180306012644-bacd9c7ef1dd",
-)
-
-go_repository(
- name = "com_github_modern_go_reflect2",
- importpath = "github.com/modern-go/reflect2",
- sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=",
- version = "v1.0.1",
-)
-
-go_repository(
- name = "com_github_munnerz_goautoneg",
- importpath = "github.com/munnerz/goautoneg",
- sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=",
- version = "v0.0.0-20191010083416-a7dc8b61c822",
-)
-
-go_repository(
- name = "com_github_mxk_go_flowrate",
- importpath = "github.com/mxk/go-flowrate",
- sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=",
- version = "v0.0.0-20140419014527-cca7078d478f",
-)
-
-go_repository(
- name = "com_github_nytimes_gziphandler",
- importpath = "github.com/NYTimes/gziphandler",
- sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
- version = "v0.0.0-20170623195520-56545f4a5d46",
-)
-
-go_repository(
- name = "com_github_onsi_ginkgo",
- importpath = "github.com/onsi/ginkgo",
- sum = "h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=",
- version = "v1.12.1",
-)
-
-go_repository(
- name = "com_github_onsi_gomega",
- importpath = "github.com/onsi/gomega",
- sum = "h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo=",
- version = "v1.10.0",
-)
-
-go_repository(
- name = "com_github_peterbourgon_diskv",
- importpath = "github.com/peterbourgon/diskv",
- sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=",
- version = "v2.0.1+incompatible",
-)
-
-go_repository(
- name = "com_github_puerkitobio_purell",
- importpath = "github.com/PuerkitoBio/purell",
- sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=",
- version = "v1.1.1",
-)
-
-go_repository(
- name = "com_github_puerkitobio_urlesc",
- importpath = "github.com/PuerkitoBio/urlesc",
- sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=",
- version = "v0.0.0-20170810143723-de5bf2ad4578",
-)
-
-go_repository(
- name = "com_github_spf13_afero",
- importpath = "github.com/spf13/afero",
- sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=",
- version = "v1.2.2",
-)
-
-go_repository(
- name = "in_gopkg_fsnotify_v1",
- importpath = "gopkg.in/fsnotify.v1",
- sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
- version = "v1.4.7",
-)
-
-go_repository(
- name = "in_gopkg_inf_v0",
- importpath = "gopkg.in/inf.v0",
- sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
- version = "v0.9.1",
-)
-
-go_repository(
- name = "in_gopkg_tomb_v1",
- importpath = "gopkg.in/tomb.v1",
- sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
- version = "v1.0.0-20141024135613-dd632973f1e7",
-)
-
-go_repository(
- name = "io_k8s_api",
- build_file_proto_mode = "disable_global",
- importpath = "k8s.io/api",
- sum = "h1:/RE6SNxrws72vzEJsCil3WSR2T9gUlYYoRxnJyZiexs=",
- version = "v0.16.13",
-)
-
-go_repository(
- name = "io_k8s_apimachinery",
- build_file_proto_mode = "disable_global",
- importpath = "k8s.io/apimachinery",
- sum = "h1:eUHWTe8VT+VOZVKGfSCcFZDrr9RZ8djLYGjIanaZnXc=",
- version = "v0.16.14-rc.0",
-)
-
-go_repository(
- name = "io_k8s_client_go",
- importpath = "k8s.io/client-go",
- sum = "h1:jp76b20+4h8qZBxferSAVZ6MjBEpw3F309zLmPhngag=",
- version = "v0.16.13",
-)
-
-go_repository(
- name = "io_k8s_gengo",
- importpath = "k8s.io/gengo",
- sum = "h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs=",
- version = "v0.0.0-20200413195148-3a45101e95ac",
-)
-
-go_repository(
- name = "io_k8s_klog",
- importpath = "k8s.io/klog",
- sum = "h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=",
- version = "v1.0.0",
-)
-
-go_repository(
- name = "io_k8s_kube_openapi",
- importpath = "k8s.io/kube-openapi",
- sum = "h1:PsbYeEz2x7ll6JYUzBEG+DT78910DDTlvn5Ma10F5/E=",
- version = "v0.0.0-20200410163147-594e756bea31",
-)
-
-go_repository(
- name = "io_k8s_sigs_structured_merge_diff",
- importpath = "sigs.k8s.io/structured-merge-diff",
- sum = "h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ=",
- version = "v0.0.0-20190525122527-15d366b2352e",
-)
-
-go_repository(
- name = "io_k8s_sigs_yaml",
- importpath = "sigs.k8s.io/yaml",
- sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
- version = "v1.2.0",
-)
-
-go_repository(
- name = "io_k8s_utils",
- importpath = "k8s.io/utils",
- sum = "h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=",
- version = "v0.0.0-20201110183641-67b214c5f920",
-)
-
-go_repository(
- name = "com_github_xeipuuv_gojsonpointer",
- importpath = "github.com/xeipuuv/gojsonpointer",
- sum = "h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=",
- version = "v0.0.0-20180127040702-4e3ac2762d5f",
-)
-
-go_repository(
- name = "com_github_xeipuuv_gojsonreference",
- importpath = "github.com/xeipuuv/gojsonreference",
- sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=",
- version = "v0.0.0-20180127040603-bd5ef7bd5415",
-)
-
-go_repository(
- name = "com_github_xeipuuv_gojsonschema",
- importpath = "github.com/xeipuuv/gojsonschema",
- sum = "h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=",
- version = "v1.2.0",
-)
diff --git a/debian/BUILD b/debian/BUILD
deleted file mode 100644
index 32cc209bf..000000000
--- a/debian/BUILD
+++ /dev/null
@@ -1,50 +0,0 @@
-load("//tools:defs.bzl", "pkg_deb", "pkg_tar", "select_arch", "version")
-
-package(licenses = ["notice"])
-
-pkg_tar(
- name = "debian-bin",
- srcs = [
- "//runsc",
- "//shim:containerd-shim-runsc-v1",
- ],
- mode = "0755",
- package_dir = "/usr/bin",
-)
-
-pkg_tar(
- name = "debian-data",
- extension = "tar.gz",
- deps = [
- ":debian-bin",
- "//shim:config",
- ],
-)
-
-pkg_deb(
- name = "debian",
- out = "runsc-latest.deb",
- architecture = select_arch(
- amd64 = "amd64",
- arm64 = "arm64",
- ),
- changes = "runsc.changes",
- conffiles = [
- "/etc/containerd/runsc.toml",
- ],
- data = ":debian-data",
- deb = "runsc.deb",
- # Note that the description_file will be flatten (all newlines removed),
- # and therefore it is kept to a simple one-line description. The expected
- # format for debian packages is "short summary\nLonger explanation of
- # tool." and this is impossible with the flattening.
- description_file = "description",
- homepage = "https://gvisor.dev/",
- maintainer = "The gVisor Authors <gvisor-dev@googlegroups.com>",
- package = "runsc",
- postinst = "postinst.sh",
- version_file = version,
- visibility = [
- "//visibility:public",
- ],
-)
diff --git a/debian/description b/debian/description
deleted file mode 100644
index 9e8e08805..000000000
--- a/debian/description
+++ /dev/null
@@ -1 +0,0 @@
-gVisor container sandbox runtime
diff --git a/debian/postinst.sh b/debian/postinst.sh
deleted file mode 100755
index 6a326f823..000000000
--- a/debian/postinst.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh -e
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [ "$1" != configure ]; then
- exit 0
-fi
-
-# Update docker configuration.
-if [ -f /etc/docker/daemon.json ]; then
- runsc install
- if systemctl is-active -q docker; then
- systemctl restart docker || echo "unable to restart docker; you must do so manually." >&2
- fi
-fi
-
-# For containerd-based installers, we don't automatically update the
-# configuration. If it uses a v2 shim, then it will find the package binaries
-# automatically when provided the appropriate annotation.
diff --git a/g3doc/BUILD b/g3doc/BUILD
deleted file mode 100644
index f91a77b6f..000000000
--- a/g3doc/BUILD
+++ /dev/null
@@ -1,44 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "index",
- src = "README.md",
- category = "Project",
- data = glob([
- "*.png",
- "*.svg",
- ]),
- permalink = "/docs/",
- weight = "0",
-)
-
-doc(
- name = "roadmap",
- src = "roadmap.md",
- category = "Project",
- permalink = "/roadmap/",
- weight = "10",
-)
-
-doc(
- name = "community",
- src = "community.md",
- category = "Project",
- permalink = "/community/",
- subcategory = "Community",
- weight = "10",
-)
-
-doc(
- name = "style",
- src = "style.md",
- category = "Project",
- permalink = "/community/style_guide/",
- subcategory = "Community",
- weight = "99",
-)
diff --git a/g3doc/Layers.png b/g3doc/Layers.png
deleted file mode 100644
index 308c6c451..000000000
--- a/g3doc/Layers.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/Layers.svg b/g3doc/Layers.svg
deleted file mode 100644
index 0a366f841..000000000
--- a/g3doc/Layers.svg
+++ /dev/null
@@ -1 +0,0 @@
-<svg version="1.1" viewBox="0.0 0.0 371.8346456692913 255.01574803149606" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l371.83466 0l0 255.01575l-371.83466 0l0 -255.01575z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l371.83466 0l0 255.01575l-371.83466 0z" fill-rule="evenodd"/><path fill="#f4cccc" d="m36.454067 6.6430445l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#cc4125" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 6.6430445l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m78.206116 37.98824l5.125 -13.359373l1.90625 0l5.46875 13.359373l-2.015625 0l-1.546875 -4.046875l-5.59375 0l-1.46875 4.046875l-1.875 0zm3.859375 -5.484375l4.53125 0l-1.40625 -3.703123q-0.625 -1.6875 -0.9375 -2.765625q-0.265625 1.28125 -0.71875 2.546875l-1.46875 3.921873zm9.849823 9.1875l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.891342 8.484375l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.844467 4.78125l0 -13.359373l1.640625 0l0 13.359373l-1.640625 0zm4.191696 -11.468748l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm10.457321 -3.546875l1.609375 0.21875q-0.265625 1.65625 -1.359375 2.609375q-1.078125 0.9375 -2.671875 0.9375q-1.984375 0 -3.1875 -1.296875q-1.203125 -1.296875 -1.203125 -3.71875q0 -1.5781231 0.515625 -2.749998q0.515625 -1.171875 1.578125 -1.75q1.0625 -0.59375 2.3125 -0.59375q1.578125 0 2.578125 0.796875q1.0 0.796875 1.28125 2.265625l-1.59375 0.234375q-0.234375 -0.96875 -0.8125 -1.453125q-0.578125 -0.5 -1.390625 -0.5q-1.234375 0 -2.015625 0.890625q-0.78125 0.890625 -0.78125 2.812498q0 1.953125 0.75 2.84375q0.75 0.875 1.953125 0.875q0.96875 0 1.609375 -0.59375q0.65625 -0.59375 0.828125 -1.828125zm9.328125 2.359375q-0.921875 0.765625 -1.765625 1.09375q-0.828125 0.3125 -1.796875 0.3125q-1.59375 0 -2.453125 -0.78125q-0.859375 -0.78125 -0.859375 -1.984375q0 -0.71875 0.328125 -1.296875q0.328125 -0.59375 0.84375 -0.9375q0.53125 -0.359375 1.1875 -0.546875q0.46875 -0.125 1.453125 -0.25q1.984375 -0.234375 2.921875 -0.5624981q0.015625 -0.34375 0.015625 -0.421875q0 -1.0 -0.46875 -1.421875q-0.625 -0.546875 -1.875 -0.546875q-1.15625 0 -1.703125 0.40625q-0.546875 0.40625 -0.8125 1.421875l-1.609375 -0.21875q0.21875 -1.015625 0.71875 -1.640625q0.5 -0.640625 1.453125 -0.984375q0.953125 -0.34375 2.1875 -0.34375q1.25 0 2.015625 0.296875q0.78125 0.28125 1.140625 0.734375q0.375 0.4375 0.515625 1.109375q0.078125 0.421875 0.078125 1.515625l0 2.187498q0 2.28125 0.109375 2.890625q0.109375 0.59375 0.40625 1.15625l-1.703125 0q-0.265625 -0.515625 -0.328125 -1.1875zm-0.140625 -3.671875q-0.890625 0.375 -2.671875 0.625q-1.015625 0.140625 -1.4375 0.328125q-0.421875 0.1875 -0.65625 0.53125q-0.21875 0.34375 -0.21875 0.78125q0 0.65625 0.5 1.09375q0.5 0.4375 1.453125 0.4375q0.9375 0 1.671875 -0.40625q0.75 -0.421875 1.09375 -1.140625q0.265625 -0.5625 0.265625 -1.640625l0 -0.609375zm7.781967 3.390625l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578123l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671873q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm1.6051788 -9.999998l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm3.5354462 -4.84375q0 -2.687498 1.484375 -3.968748q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609373q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.7968731 -0.8125 -2.718748q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765623zm9.297592 4.84375l0 -9.671873l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.5937481l0 5.953125l-1.640625 0l0 -5.890625q0 -0.9999981 -0.203125 -1.4843731q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515623l0 5.28125l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m4.454068 73.068245l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m4.454068 73.068245l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m172.45407 73.068245l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m172.45407 73.068245l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m73.43044 56.702377l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m87.06437 74.471375l1.203125 -0.109375q0.078125 0.71875 0.390625 1.1875q0.3125 0.453125 0.953125 0.734375q0.65625 0.28125 1.46875 0.28125q0.71875 0 1.265625 -0.21875q0.5625 -0.21875 0.828125 -0.578125q0.265625 -0.375 0.265625 -0.828125q0 -0.453125 -0.265625 -0.78125q-0.25 -0.328125 -0.84375 -0.5625q-0.390625 -0.15625 -1.703125 -0.46875q-1.3125 -0.3125 -1.84375 -0.59375q-0.671875 -0.359375 -1.015625 -0.890625q-0.328125 -0.53125 -0.328125 -1.1875q0 -0.71875 0.40625 -1.34375q0.40625 -0.625 1.1875 -0.953125q0.796875 -0.328125 1.765625 -0.328125q1.046875 0 1.859375 0.34375q0.8125 0.34375 1.25 1.015625q0.4375 0.65625 0.46875 1.484375l-1.203125 0.09375q-0.109375 -0.90625 -0.671875 -1.359375q-0.5625 -0.46875 -1.65625 -0.46875q-1.140625 0 -1.671875 0.421875q-0.515625 0.421875 -0.515625 1.015625q0 0.515625 0.359375 0.84375q0.375 0.328125 1.90625 0.6875q1.546875 0.34375 2.109375 0.59375q0.84375 0.390625 1.234375 0.984375q0.390625 0.578125 0.390625 1.359375q0 0.75 -0.4375 1.4375q-0.421875 0.671875 -1.25 1.046875q-0.8125 0.359375 -1.828125 0.359375q-1.296875 0 -2.171875 -0.375q-0.875 -0.375 -1.375 -1.125q-0.5 -0.765625 -0.53125 -1.71875zm9.12413 5.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.6953125 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.5218506 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.0312424 0 1.5781174 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.7031174 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.321053 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.4062653 0 -2.2812653 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.6562653 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.4531403 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.4062653 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#d9d2e9" d="m36.454067 87.40656l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#8e7cc3" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 87.40656l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m98.35295 119.54864l1.59375 0.234375q0.109375 0.75 0.5625 1.078125q0.609375 0.453125 1.671875 0.453125q1.140625 0 1.75 -0.453125q0.625 -0.453125 0.84375 -1.265625q0.125 -0.5 0.109375 -2.109375q-1.0625 1.265625 -2.671875 1.265625q-2.0 0 -3.09375 -1.4375q-1.09375 -1.4375 -1.09375 -3.453125q0 -1.390625 0.5 -2.5625q0.515625 -1.171875 1.453125 -1.796875q0.953125 -0.640625 2.25 -0.640625q1.703125 0 2.8125 1.375l0 -1.15625l1.515625 0l0 8.359375q0 2.265625 -0.46875 3.203125q-0.453125 0.9375 -1.453125 1.484375q-0.984375 0.546875 -2.453125 0.546875q-1.71875 0 -2.796875 -0.78125q-1.0625 -0.765625 -1.03125 -2.34375zm1.359375 -5.8125q0 1.90625 0.75 2.78125q0.765625 0.875 1.90625 0.875q1.125 0 1.890625 -0.859375q0.765625 -0.875 0.765625 -2.734375q0 -1.78125 -0.796875 -2.671875q-0.78125 -0.90625 -1.890625 -0.90625q-1.09375 0 -1.859375 0.890625q-0.765625 0.875 -0.765625 2.625zm13.344467 5.015625l-5.171875 -13.359375l1.921875 0l3.46875 9.703125q0.421875 1.171875 0.703125 2.1875q0.3125 -1.09375 0.71875 -2.1875l3.609375 -9.703125l1.796875 0l-5.234375 13.359375l-1.8125 0zm8.427948 -11.46875l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.46875l0 -9.671875l1.640625 0l0 9.671875l-1.640625 0zm3.4885712 -2.890625l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm9.375 -1.953125q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm9.281967 4.84375l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m3.6351707 152.91733l48.850395 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m3.6351707 152.91733l48.850395 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m195.25722 152.91733l47.338577 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m195.25722 152.91733l47.338577 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m52.485565 136.88583l142.77165 0l0 32.06299l-142.77165 0z" fill-rule="evenodd"/><path fill="#000000" d="m65.21821 157.71732l0 -9.546875l1.265625 0l0 8.421875l4.703125 0l0 1.125l-5.96875 0zm7.3343506 -8.1875l0 -1.359375l1.171875 0l0 1.359375l-1.171875 0zm0 8.1875l0 -6.90625l1.171875 0l0 6.90625l-1.171875 0zm2.945465 0l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm11.118057 -8.1875l0 -1.359375l1.171875 0l0 1.359375l-1.171875 0zm0 8.1875l0 -6.90625l1.171875 0l0 6.90625l-1.171875 0zm5.507965 -1.046875l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm11.006226 4.125l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm9.865463 1.390625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm7.0859375 4.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8124924 0 1.2031174 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1874924 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8124924 0 1.4218674 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0624924 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.2499924 0.328125 1.7343674 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.4531174 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.695305 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.5218506 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.321045 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.40625 0 -2.28125 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.65625 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.453125 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.40625 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#cfe2f3" d="m36.454067 167.00784l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path stroke="#6d9eeb" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 167.00784l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m76.63558 198.35303l0 -13.359375l1.765625 0l0 5.484375l6.9375 0l0 -5.484375l1.765625 0l0 13.359375l-1.765625 0l0 -6.296875l-6.9375 0l0 6.296875l-1.765625 0zm12.597946 -4.84375q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm8.641342 1.953125l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm13.5625 1.421875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm6.9134827 1.46875l0 -13.359375l1.78125 0l0 6.625l6.6249924 -6.625l2.390625 0l-5.5937424 5.421875l5.8437424 7.9375l-2.328125 0l-4.7656174 -6.765625l-2.171875 2.140625l0 4.625l-1.78125 0zm18.943565 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125717 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.228302 0l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm17.000732 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.7656403 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375153 0 3.1562653 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.2187653 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.5468903 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.3906403 -2.65625l5.4062653 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.0312653 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.094467 5.765625l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m4.454068 233.43303l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m4.454068 233.43303l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m172.45407 233.43303l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m172.45407 233.43303l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m73.43044 217.06717l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m96.04542 237.89867l0 -9.546875l1.265625 0l0 3.921875l4.953125 0l0 -3.921875l1.265625 0l0 9.546875l-1.265625 0l0 -4.5l-4.953125 0l0 4.5l-1.265625 0zm13.953278 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm8.93837 0l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm7.9124756 3.453125l-2.125 -6.90625l1.21875 0l1.09375 3.984375l0.421875 1.484375q0.015625 -0.109375 0.359375 -1.421875l1.0937424 -4.046875l1.203125 0l1.03125 4.0l0.34375 1.328125l0.40625 -1.34375l1.171875 -3.984375l1.140625 0l-2.15625 6.90625l-1.21875 0l-1.09375 -4.140625l-0.265625 -1.171875l-1.4062424 5.3125l-1.21875 0zm12.859535 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59376526 0.21875 -1.2812653 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.4218903 -0.171875 2.0937653 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.3437653 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.89064026 0 1.4375153 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.9218903 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.2031403 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm9.18837 -2.21875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375z" fill-rule="nonzero"/><defs><linearGradient id="p.1" gradientUnits="userSpaceOnUse" gradientTransform="matrix(4.53514884533539 0.0 0.0 4.53514884533539 0.0 0.0)" spreadMethod="pad" x1="8.21347768339151" y1="37.02644733653771" x2="8.213461293294644" y2="41.56159618184348"><stop offset="0.0" stop-color="#ff0000"/><stop offset="0.51" stop-color="#dab7a6"/><stop offset="0.99999994" stop-color="#dab7a6" stop-opacity="0.0"/><stop offset="1.0" stop-color="#ffffff" stop-opacity="0.0"/></linearGradient></defs><path fill="url(#p.1)" d="m37.249344 167.92108l173.29134 0l0 20.566925l-173.29134 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m272.4455 182.06865l129.5748 -74.83464l20.629913 35.74803l-129.5748 74.83464z" fill-rule="evenodd"/><path fill="#000000" d="m287.51392 188.73558l1.1823425 -0.82717896q0.51071167 0.6974335 1.1166077 0.9970703q0.5980835 0.28611755 1.4464111 0.1931305q0.84054565 -0.10652161 1.6794434 -0.5910187q0.75772095 -0.4376068 1.2010193 -0.9823456q0.44906616 -0.5660858 0.50097656 -1.1013031q0.057678223 -0.55656433 -0.20785522 -1.0166931q-0.27334595 -0.47366333 -0.7392273 -0.6557007q-0.47366333 -0.1955719 -1.2366333 -0.079711914q-0.478302 0.07775879 -2.032318 0.54222107q-1.5618286 0.45092773 -2.2805786 0.48712158q-0.9222717 0.027420044 -1.5864563 -0.31072998q-0.6719971 -0.35168457 -1.0703125 -1.0418701q-0.4295349 -0.74432373 -0.38497925 -1.6361694q0.05029297 -0.9131775 0.6668701 -1.7203827q0.63012695 -0.81500244 1.6313782 -1.3932648q1.1095276 -0.64079285 2.1592712 -0.7598877q1.0419617 -0.1326294 1.8867493 0.29968262q0.8583679 0.4244995 1.3987732 1.2671814l-1.2036743 0.82147217q-0.64712524 -0.87127686 -1.5022583 -1.0089111q-0.8629761 -0.15116882 -2.013092 0.51304626q-1.1906738 0.68766785 -1.4819641 1.4333191q-0.2913208 0.745636 0.06793213 1.3681641q0.30459595 0.52778625 0.8865051 0.6608429q0.5740967 0.119522095 2.3815613 -0.43717957q1.8210144 -0.5645294 2.5725403 -0.6376953q1.0924377 -0.107666016 1.857605 0.28042603q0.77090454 0.366745 1.2316895 1.1652069q0.4529724 0.78492737 0.39904785 1.7543335q-0.040405273 0.9616089 -0.6663208 1.8463745q-0.62594604 0.8847656 -1.6813354 1.4942932q-1.3530579 0.78144836 -2.486084 0.9125519q-1.1408386 0.11756897 -2.1214905 -0.3625946q-0.96713257 -0.48797607 -1.5721436 -1.4738007zm13.40155 -4.9431458l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.6396332q-0.6629944 0.3829193 -1.1454773 0.39089966q-0.4902649 -0.0055389404 -0.8343506 -0.25790405q-0.3518982 -0.26589966 -0.9844971 -1.3620911l-2.382019 -4.1276093l-0.8930054 0.5157471l-0.5466919 -0.9473114l0.8930054 -0.5157623l-1.0308838 -1.786377l0.79599 -1.434082l1.4526367 2.5171661l1.2177734 -0.7033081l0.5466919 0.94732666l-1.2177734 0.70329285l2.4210815 4.195282q0.30456543 0.5278015 0.4446106 0.645401q0.15356445 0.109802246 0.35705566 0.11857605q0.19570923 -0.004760742 0.4663086 -0.16105652q0.20297241 -0.11721802 0.49645996 -0.35888672zm1.8165283 0.41241455l-4.147064 -7.1861115l1.0959778 -0.6329651l0.6247864 1.0826569q-0.0178833 -1.0001068 0.19332886 -1.4468842q0.21121216 -0.44676208 0.64419556 -0.6968231q0.6088562 -0.35165405 1.471283 -0.3264618l0.22875977 1.3654938q-0.59487915 7.4768066E-4 -1.0413818 0.25862122q-0.39239502 0.22662354 -0.5765381 0.6577606q-0.17843628 0.40979004 -0.06384277 0.92100525q0.17193604 0.7667999 0.61709595 1.5381927l2.1711426 3.7622223l-1.2177429 0.70329285zm2.0899658 -6.006668q-1.1480408 -1.9893646 -0.5930481 -3.5910034q0.47280884 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79660034 1.3803864 0.83795166 2.4210815q0.04135132 1.0407104 -0.49923706 1.948349q-0.5348511 0.88630676 -1.4819946 1.4333191q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.43519592 -2.5502625 -2.2621765zm1.2583313 -0.72673035q0.79663086 1.3803711 1.7902527 1.7267303q1.0072021 0.33854675 1.9137268 -0.18502808q0.9065552 -0.5235596 1.1036072 -1.5575867q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17720032q-0.90652466 0.5235748 -1.117096 1.5654144q-0.2048645 1.0204926 0.59173584 2.400879zm8.984772 -0.38945007l-4.1470337 -7.1861115l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306152 1.6072083 -2.4980164q0.6494751 -0.37509155 1.3234558 -0.45761108q0.6739807 -0.08250427 1.163269 0.14013672q0.48928833 0.22264099 0.9021301 0.687912q0.26287842 0.29925537 0.74710083 1.1383057l2.553833 4.425354l-1.2177429 0.70329285l-2.522583 -4.371216q-0.42956543 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078125 -0.8442688 -0.3063202q-0.47677612 -0.01335144 -0.9638672 0.2679596q-0.7847595 0.45324707 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.187973l2.264862 3.9246216l-1.2177429 0.70329285zm7.819275 -3.7220154l1.2922058 -0.511734q0.3878479 0.51579285 0.8666992 0.5640259q0.65527344 0.072631836 1.4400635 -0.38059998q0.8388977 -0.48449707 1.1036682 -1.0885162q0.26480103 -0.60401917 0.07571411 -1.306778q-0.1161499 -0.42010498 -0.81121826 -1.6245575q-0.25161743 1.408371 -1.4423218 2.096054q-1.4883423 0.85957336 -2.9171448 0.25932312q-1.428833 -0.60025024 -2.2957153 -2.1024323q-0.5935669 -1.0285187 -0.72805786 -2.1056366q-0.12097168 -1.0849152 0.30926514 -1.9649353q0.4437561 -0.8878174 1.3908997 -1.4348297q1.2718811 -0.7345581 2.690796 -0.182724l-0.4998474 -0.8661194l1.1230469 -0.6485901l3.5847168 6.2117157q0.9684448 1.6781158 1.0362854 2.5771942q0.067840576 0.8990936 -0.4420166 1.7348785q-0.5098877 0.8357849 -1.5923462 1.4609375q-1.2854004 0.7423706 -2.4195251 0.62150574q-1.1206055 -0.12869263 -1.7651672 -1.3081818zm-1.4765625 -4.9031525q0.81222534 1.4074402 1.7418518 1.7366486q0.94314575 0.32138062 1.7820435 -0.16311646q0.8388977 -0.4844818 1.0401001 -1.4487457q0.19342041 -0.97779846 -0.60317993 -2.3581848q-0.76538086 -1.3262482 -1.7298889 -1.6533508q-0.97229004 -0.3406372 -1.7976685 0.1360321q-0.8118286 0.46887207 -0.9974365 1.4602051q-0.1855774 0.991333 0.56417847 2.290512z" fill-rule="nonzero"/><path fill="#000000" d="m294.23132 199.68793l-0.80441284 -1.3939209l1.2177429 -0.70329285l0.80441284 1.3939056l-1.2177429 0.7033081zm4.920227 8.525894l-4.147064 -7.1861115l1.2177734 -0.7033081l4.1470337 7.1861115l-1.2177429 0.7033081zm1.3493347 -3.6482391l1.0948792 -0.88494873q0.51641846 0.6760864 1.2029724 0.80285645q0.6922302 0.10542297 1.5176086 -0.3712616q0.8388672 -0.4844818 1.0495605 -1.057251q0.20285034 -0.58628845 -0.062683105 -1.0464172q-0.23431396 -0.4059906 -0.7266846 -0.4464264q-0.35079956 -0.013916016 -1.4790955 0.3129425q-1.5347595 0.43530273 -2.2030334 0.49645996q-0.66256714 0.03982544 -1.183075 -0.23695374q-0.5069885 -0.28459167 -0.8115845 -0.8123779q-0.28115845 -0.48719788 -0.2989807 -1.018219q-0.017791748 -0.5310211 0.2048645 -1.0204926q0.15917969 -0.3806305 0.56817627 -0.797287q0.4147339 -0.43800354 0.9694824 -0.75839233q0.852417 -0.49230957 1.6289368 -0.61598206q0.7765198 -0.123687744 1.3162842 0.123931885q0.5455017 0.22625732 1.0733948 0.85964966l-1.0969849 0.85006714q-0.4013672 -0.5079651 -0.9733887 -0.59262085q-0.5720215 -0.0846405 -1.2756042 0.3217163q-0.8388977 0.4844818 -1.0402222 0.9796753q-0.19558716 0.47383118 0.015289307 0.8392334q0.14056396 0.24359131 0.39874268 0.34710693q0.2581787 0.103500366 0.64749146 0.05909729q0.22845459 -0.041732788 1.2620544 -0.31388855q1.4806519 -0.40403748 2.127594 -0.470932q0.6390991 -0.08041382 1.1653442 0.17501831q0.5397949 0.24760437 0.89904785 0.87013245q0.35144043 0.60899353 0.29852295 1.3613129q-0.047210693 0.73095703 -0.5519409 1.4194183q-0.49118042 0.68063354 -1.3300476 1.1651306q-1.407196 0.81269836 -2.4736633 0.6527405q-1.0664673 -0.15994263 -1.933258 -1.193039zm6.1190186 -5.4646606q-1.1480408 -1.9893799 -0.5930481 -3.5910187q0.47283936 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.85957336 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79663086 1.3803864 0.83795166 2.4210968q0.04135132 1.0406952 -0.49923706 1.948349q-0.5348511 0.8862915 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.43519592 -2.5502625 -2.2621613zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902832 1.7267303q1.0071716 0.33854675 1.9136963 -0.18501282q0.9065552 -0.5235748 1.1036072 -1.5576019q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17721558q-0.90652466 0.5235596 -1.117096 1.5653992q-0.2048645 1.0204926 0.59173584 2.400879zm8.957733 -0.3738098l-5.7246704 -9.919815l1.2177734 -0.70329285l5.72464 9.9198l-1.2177429 0.7033081zm7.2713623 -5.390396q-0.34069824 0.972641 -0.8225403 1.5756989q-0.48962402 0.5895386 -1.2067566 1.0036926q-1.1906738 0.6876831 -2.162445 0.47302246q-0.9717407 -0.21464539 -1.4872131 -1.1078339q-0.30456543 -0.52778625 -0.3109436 -1.1015167q-0.014190674 -0.58724976 0.21627808 -1.0631866q0.23620605 -0.49728394 0.64520264 -0.9139252q0.31063843 -0.3057251 0.980896 -0.8010864q1.3733215 -1.02771 1.9363098 -1.6776581q-0.14837646 -0.25712585 -0.18740845 -0.32478333q-0.42956543 -0.74432373 -0.93963623 -0.84669495q-0.7156677 -0.14602661 -1.6357422 0.38536072q-0.8524475 0.49230957 -1.0922546 1.0458069q-0.23410034 0.5321655 0.013824463 1.3994293l-1.2843933 0.52526855q-0.2749939 -0.85162354 -0.18301392 -1.5362854q0.10549927 -0.6924591 0.66851807 -1.3424072q0.5552063 -0.66348267 1.4752808 -1.1948547q0.92007446 -0.5313873 1.6133118 -0.6430664q0.7067566 -0.11949158 1.1648254 0.04902649q0.45803833 0.16850281 0.8552551 0.60672q0.24728394 0.27218628 0.71588135 1.0841675l0.9371643 1.6239929q0.9840393 1.7051697 1.3094177 2.1126862q0.33892822 0.39971924 0.81103516 0.68640137l-1.2718506 0.7345581q-0.40811157 -0.26953125 -0.7590027 -0.75253296zm-1.6645203 -2.6654663q-0.5067749 0.65356445 -1.7234497 1.6088562q-0.69522095 0.5458679 -0.9283142 0.8609314q-0.23312378 0.31506348 -0.25280762 0.6873169q-0.013977051 0.3508911 0.16564941 0.66215515q0.28115845 0.48719788 0.83392334 0.6010132q0.5662842 0.10598755 1.269867 -0.30036926q0.70358276 -0.40634155 1.072998 -1.0166473q0.37512207 -0.63165283 0.3197937 -1.3214569q-0.031341553 -0.5232086 -0.49990845 -1.3352051l-0.25775146 -0.44659424zm7.223419 -0.8156891l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.63964844q-0.6629944 0.38290405 -1.1454468 0.39089966q-0.4902954 -0.0055389404 -0.8343811 -0.25790405q-0.3518982 -0.26591492 -0.9844971 -1.3620911l-2.382019 -4.1276093l-0.8930054 0.5157471l-0.5466919 -0.94732666l0.8930054 -0.5157471l-1.0308838 -1.786377l0.7960205 -1.434082l1.4526367 2.5171661l1.2177429 -0.7033081l0.5466919 0.94732666l-1.2177429 0.70329285l2.421051 4.195282q0.30459595 0.52778625 0.4446106 0.645401q0.15356445 0.10978699 0.35708618 0.11856079q0.19567871 -0.004760742 0.46627808 -0.16104126q0.20297241 -0.11721802 0.49645996 -0.35890198zm-3.0901794 -8.121277l-0.80441284 -1.3939209l1.2177429 -0.70329285l0.80444336 1.3939209l-1.2177734 0.70329285zm4.920227 8.525894l-4.1470337 -7.1861115l1.2177429 -0.70329285l4.147064 7.186096l-1.2177734 0.7033081zm0.54074097 -5.111908q-1.1480408 -1.9893799 -0.5930481 -3.5910187q0.47280884 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79660034 1.3803864 0.83795166 2.4210968q0.0413208 1.0406952 -0.49923706 1.948349q-0.5348511 0.8862915 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959412 -0.43519592 -2.5502625 -2.2621613zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902527 1.7267303q1.0072021 0.33854675 1.9137268 -0.18501282q0.9065552 -0.5235748 1.1036072 -1.5576019q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17721558q-0.9065552 0.5235596 -1.117096 1.5653992q-0.2048645 1.0204926 0.59173584 2.400879zm8.984772 -0.38945007l-4.1470337 -7.1861115l1.0959473 -0.6329651l0.5857544 1.0149841q0.10531616 -1.6306152 1.6072083 -2.4980164q0.6494446 -0.37509155 1.3234558 -0.45761108q0.6739807 -0.08250427 1.163269 0.14013672q0.48928833 0.22264099 0.9021301 0.687912q0.26287842 0.29925537 0.74710083 1.1383057l2.553833 4.425354l-1.2177429 0.70329285l-2.5226135 -4.371216q-0.4295349 -0.74432373 -0.7892456 -1.0237579q-0.3539734 -0.30078125 -0.8442383 -0.3063202q-0.47677612 -0.01335144 -0.9638672 0.2679596q-0.7847595 0.45324707 -1.0640869 1.2821808q-0.2735901 0.8075714 0.52301025 2.1879578l2.264862 3.9246216l-1.2177429 0.70329285z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m282.76047 199.94267c-17.003845 0 -26.795105 -5.566925 -34.007706 -11.133865c-7.2126007 -5.566925 -11.846542 -11.13385 -23.6931 -11.13385" fill-rule="evenodd"/><path stroke="#000000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m282.76047 199.94267c-17.003876 0 -26.795105 -5.5669403 -34.007706 -11.133865c-3.6062927 -2.7834625 -6.567932 -5.566925 -10.10881 -7.6545258c-0.4426117 -0.2609558 -0.8942871 -0.5110321 -1.3573761 -0.74887085c-0.11578369 -0.0594635 -0.23228455 -0.1181488 -0.34950256 -0.17605591l-0.13806152 -0.066833496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="2.0" stroke-linecap="butt" d="m237.48381 176.93082l-9.563843 1.350235l8.194244 5.1131744z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m272.4455 118.06866l129.5748 -74.83465l20.629913 35.74803l-129.5748 74.83464z" fill-rule="evenodd"/><path fill="#000000" d="m290.03357 127.53869l-5.72464 -9.9198l1.3124695 -0.75800323l5.72464 9.919807l-1.3124695 0.7579956zm3.470581 -2.0044022l-4.1470337 -7.186104l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306229 1.6072083 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45760345q0.6739807 -0.0825119 1.163269 0.14012146q0.48928833 0.22264099 0.9021301 0.6879196q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253464l-1.2177429 0.70329285l-2.522583 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078888 -0.8442688 -0.30632782q-0.4767456 -0.01335144 -0.9638672 0.26796722q-0.7847595 0.4532318 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.187973l2.2648926 3.924614l-1.2177734 0.70329285zm12.360168 -7.1384735l-0.5232849 -0.906723q-0.059539795 1.4598389 -1.3855286 2.2256546q-0.8659668 0.5001221 -1.8564148 0.44062805q-0.9904785 -0.05949402 -1.8883972 -0.67765045q-0.88442993 -0.62597656 -1.5170288 -1.7221603q-0.6247864 -1.0826492 -0.77282715 -2.151947q-0.14230347 -1.0906448 0.30926514 -1.9649353q0.44378662 -0.887825 1.336792 -1.4035797q0.6494751 -0.37509155 1.3063049 -0.39356232q0.6703491 -0.026283264 1.2392883 0.2405777l-2.054016 -3.5592194l1.2177429 -0.7033005l5.7246704 9.919807l-1.1365662 0.6564102zm-5.9122925 -1.3669891q0.79660034 1.3803787 1.7767029 1.7345505q0.97232056 0.3406372 1.7570801 -0.112602234q0.7983093 -0.4610443 0.97817993 -1.4310303q0.18560791 -0.991333 -0.58758545 -2.3311157q-0.8512573 -1.4751129 -1.8178406 -1.8370972q-0.96658325 -0.36198425 -1.805481 0.12251282q-0.8118286 0.46886444 -0.97036743 1.4445648q-0.15853882 0.9757004 0.6693115 2.4102173zm12.53952 -5.5459747l1.3442383 -0.57787323q0.34274292 1.2816315 -0.11764526 2.3594894q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.9142914 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898376 -2.1887817q-1.0465393 -1.813446 -0.69085693 -3.3540955q0.35568237 -1.5406494 1.8440247 -2.4002304q1.4342346 -0.828331 2.9109192 -0.36397552q1.4823608 0.4430008 2.5054626 2.2158508q0.0625 0.10826111 0.18743896 0.32479095l-5.3580933 3.094513q0.75494385 1.1518478 1.7173462 1.4440689q0.96810913 0.27088165 1.861145 -0.24487305q0.6765137 -0.39072418 0.9470215 -1.0160904q0.2705078 -0.6253662 0.09597778 -1.5530472zm-5.131775 0.32941437l4.005066 -2.3130646q-0.60446167 -0.8598404 -1.2410278 -1.0876312q-0.98794556 -0.36769867 -1.90802 0.16368103q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.8481064zm10.854523 3.1318436l-5.740265 -9.946869l1.1094971 -0.6407852l0.5388794 0.9337845q0.07220459 -0.78147125 0.4031067 -1.333458q0.3444214 -0.5597992 1.0480042 -0.9661484q0.92007446 -0.5313797 1.8970032 -0.46406555q0.9769592 0.06730652 1.8285828 0.7302551q0.8573303 0.64160156 1.4508972 1.6701202q0.6404114 1.1097183 0.74212646 2.2238083q0.11526489 1.1062698 -0.3691101 2.01754q-0.4708252 0.90345 -1.3097229 1.3879471q-0.6088867 0.35164642 -1.2443542 0.37583923q-0.62197876 0.01637268 -1.159668 -0.19635773l2.022766 3.5050888l-1.2177429 0.7033005zm-2.551239 -6.952957q0.80441284 1.3939209 1.7418518 1.7366486q0.95095825 0.3349228 1.7492676 -0.12612915q0.8118286 -0.46886444 0.9953308 -1.495079q0.18353271 -1.026207 -0.6443176 -2.4607239q-0.79663086 -1.3803787 -1.7554016 -1.728836q-0.96658325 -0.36198425 -1.7513428 0.09125519q-0.77124023 0.4454193 -0.958374 1.5278625q-0.1736145 1.0746231 0.62298584 2.4550018zm12.239746 -5.408928l1.3442688 -0.57788086q0.34274292 1.2816391 -0.11764526 2.359497q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.91428375 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898682 -2.1887894q-1.0465088 -1.8134384 -0.6908264 -3.3540878q0.35565186 -1.5406494 1.8440247 -2.400238q1.4342346 -0.828331 2.9108887 -0.36397552q1.4823914 0.44300842 2.5054932 2.2158508q0.062469482 0.10826874 0.18743896 0.32479858l-5.3580933 3.094513q0.75491333 1.1518402 1.7173462 1.4440689q0.96810913 0.27087402 1.861145 -0.24488068q0.6765137 -0.39071655 0.9470215 -1.0160828q0.2705078 -0.62537384 0.095947266 -1.5530472zm-5.1317444 0.32941437l4.0050354 -2.3130722q-0.60443115 -0.85983276 -1.2410278 -1.0876236q-0.98791504 -0.3677063 -1.9079895 0.1636734q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.848114zm9.2612915 0.3710785l-4.1470337 -7.1861115l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306229 1.6072083 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45760345q0.6739807 -0.0825119 1.163269 0.14012909q0.48928833 0.22263336 0.9021301 0.687912q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253464l-1.2177429 0.70329285l-2.522583 -4.371208q-0.42956543 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078888 -0.8442688 -0.30632782q-0.4767456 -0.01335144 -0.9638672 0.26796722q-0.7847595 0.4532318 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.187973l2.2648926 3.924614l-1.2177734 0.7033005zm12.360168 -7.138481l-0.5232849 -0.906723q-0.059539795 1.4598389 -1.3855286 2.2256546q-0.8659668 0.5001221 -1.8564148 0.44062805q-0.9904785 -0.05949402 -1.8883972 -0.67765045q-0.88442993 -0.62596893 -1.5170288 -1.7221603q-0.6247864 -1.0826492 -0.77282715 -2.151947q-0.14230347 -1.0906448 0.30926514 -1.9649353q0.44378662 -0.887825 1.336792 -1.4035797q0.6494751 -0.37509155 1.3063049 -0.39356232q0.6703491 -0.026283264 1.2392883 0.2405777l-2.054016 -3.5592194l1.2177429 -0.7033005l5.7246704 9.919807l-1.1365662 0.6564102zm-5.9122925 -1.3669891q0.79660034 1.3803787 1.7767029 1.7345505q0.97232056 0.3406372 1.7570801 -0.112602234q0.7983093 -0.4610443 0.97817993 -1.4310303q0.18560791 -0.991333 -0.58758545 -2.3311157q-0.8512573 -1.4751129 -1.8178406 -1.8370972q-0.96658325 -0.36198425 -1.805481 0.12251282q-0.8118286 0.46886444 -0.97036743 1.4445648q-0.15853882 0.9757004 0.6693115 2.4102173zm12.53952 -5.5459747l1.3442383 -0.57787323q0.34274292 1.2816391 -0.11764526 2.3594894q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.9142914 -3.0654602 0.47128296q-1.4823914 -0.4430008 -2.4898682 -2.1887817q-1.0465088 -1.813446 -0.6908264 -3.3540955q0.35568237 -1.5406494 1.8440247 -2.4002304q1.4342346 -0.828331 2.9109192 -0.36397552q1.4823608 0.4430008 2.5054626 2.2158508q0.0625 0.10826111 0.18743896 0.32479095l-5.3580933 3.094513q0.75494385 1.1518478 1.7173462 1.4440689q0.96810913 0.27088165 1.861145 -0.24487305q0.6765137 -0.39072418 0.9470215 -1.0160904q0.2705078 -0.6253662 0.09597778 -1.5530472zm-5.131775 0.32941437l4.005066 -2.3130646q-0.60446167 -0.8598404 -1.2410278 -1.0876312q-0.98794556 -0.36769867 -1.90802 0.16368103q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.8481064zm9.261322 0.3710785l-4.147064 -7.186104l1.0959778 -0.6329727l0.5857544 1.0149918q0.105285645 -1.6306229 1.6071777 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45761108q0.67401123 -0.0825119 1.163269 0.14012909q0.48928833 0.22264099 0.9021301 0.6879196q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253387l-1.2177429 0.7033005l-2.522583 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237656q-0.3539734 -0.30078125 -0.8442688 -0.3063202q-0.4767456 -0.01335907 -0.96383667 0.2679596q-0.78479004 0.45323944 -1.0640869 1.2821732q-0.2736206 0.80758667 0.52301025 2.1879654l2.264862 3.924614l-1.2177429 0.7033005zm9.725006 -7.078125l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.6396408q-0.6629944 0.38291168 -1.1454468 0.39089966q-0.4902954 -0.0055389404 -0.8343811 -0.25791168q-0.3518982 -0.26589966 -0.9844971 -1.3620834l-2.382019 -4.127617l-0.8930054 0.5157547l-0.5466919 -0.94731903l0.8930054 -0.5157547l-1.0308838 -1.786377l0.79599 -1.4340897l1.4526672 2.5171661l1.2177429 -0.7033005l0.5466919 0.94732666l-1.2177429 0.70329285l2.421051 4.195282q0.30459595 0.5277939 0.4446106 0.645401q0.15356445 0.10979462 0.35708618 0.11856842q0.19567871 -0.004760742 0.46627808 -0.16104889q0.20297241 -0.11721802 0.49645996 -0.35889435z" fill-rule="nonzero"/><path fill="#000000" d="m299.15155 144.21382l-5.72464 -9.919815l1.2177429 -0.70329285l3.2645264 5.6568604l1.1950684 -4.587631l1.5830688 -0.9142914l-1.2081604 4.252365l5.625824 2.7774506l-1.5018921 0.8674011l-4.4921265 -2.3134918l-0.38955688 1.3256378l1.6478882 2.8554993l-1.2177429 0.7033081zm10.503723 -9.151794l1.3442383 -0.57788086q0.34274292 1.2816315 -0.11764526 2.359497q-0.44683838 1.0700378 -1.6916504 1.788971q-1.5830688 0.9142761 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898682 -2.188797q-1.0465393 -1.8134308 -0.69085693 -3.3540802q0.35568237 -1.5406494 1.8440247 -2.400238q1.4342346 -0.8283386 2.9109192 -0.36398315q1.4823914 0.44300842 2.5054932 2.2158508q0.062469482 0.10827637 0.18743896 0.32479858l-5.3580933 3.094513q0.75491333 1.1518402 1.7173157 1.4440765q0.96813965 0.27087402 1.861145 -0.2448883q0.6765137 -0.39071655 0.947052 -1.0160828q0.2705078 -0.6253662 0.095947266 -1.5530396zm-5.1317444 0.32940674l4.0050354 -2.3130646q-0.60446167 -0.85983276 -1.2410278 -1.0876312q-0.98791504 -0.3677063 -1.90802 0.16368103q-0.8388672 0.48449707 -1.0926819 1.3889008q-0.2480774 0.8830719 0.23669434 1.848114zm9.247772 0.378891l-4.147064 -7.1861115l1.0959778 -0.6329651l0.6247864 1.0826569q-0.017913818 -1.0001068 0.19329834 -1.4468765q0.21124268 -0.4467697 0.64419556 -0.69683075q0.6088867 -0.35165405 1.4713135 -0.32646942l0.22875977 1.3655014q-0.59487915 7.4768066E-4 -1.0413818 0.25862122q-0.39239502 0.22662354 -0.5765686 0.6577606q-0.17840576 0.40979004 -0.063812256 0.92100525q0.17190552 0.7667999 0.61709595 1.5381927l2.1711426 3.7622223l-1.2177429 0.70329285zm4.6274414 -2.6725311l-4.147064 -7.186104l1.0959778 -0.6329727l0.5857544 1.0149841q0.105285645 -1.6306152 1.6071777 -2.4980164q0.6494751 -0.37509918 1.3234558 -0.45761108q0.67401123 -0.0825119 1.1632996 0.14012909q0.4892578 0.22264099 0.9020996 0.6879196q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253464l-1.2177429 0.70329285l-2.522583 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237656q-0.3539734 -0.30078125 -0.8442688 -0.3063202q-0.4767456 -0.01335907 -0.96383667 0.2679596q-0.78479004 0.45323944 -1.0640869 1.2821732q-0.2736206 0.80758667 0.52301025 2.1879654l2.264862 3.9246216l-1.2177429 0.70329285zm11.281708 -9.60112l1.3442688 -0.57788086q0.34274292 1.2816391 -0.11764526 2.359497q-0.4468689 1.0700378 -1.6916809 1.788971q-1.5830688 0.91428375 -3.0654602 0.47127533q-1.4823914 -0.4430008 -2.4898682 -2.1887817q-1.0465088 -1.8134384 -0.6908264 -3.3540878q0.35565186 -1.5406494 1.8440247 -2.400238q1.4342346 -0.828331 2.9108887 -0.36397552q1.4823914 0.44300842 2.5054932 2.2158508q0.062469482 0.10826111 0.18743896 0.32479095l-5.3580933 3.094513q0.75491333 1.1518478 1.7173462 1.4440689q0.96810913 0.27088165 1.861145 -0.24487305q0.6765137 -0.39072418 0.9470215 -1.0160904q0.2705078 -0.6253662 0.095947266 -1.5530396zm-5.1317444 0.32941437l4.0050354 -2.3130722q-0.60443115 -0.85983276 -1.2410278 -1.0876236q-0.98791504 -0.3677063 -1.9079895 0.1636734q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.848114zm9.234253 0.3867035l-5.7246704 -9.919807l1.2177734 -0.7033005l5.72464 9.919807l-1.2177429 0.7033005z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m282.76047 135.94267c-17.003845 0 -26.795105 -5.566925 -34.007706 -11.133858c-7.2126007 -5.5669327 -11.846542 -11.133858 -23.6931 -11.133858" fill-rule="evenodd"/><path stroke="#000000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m282.76047 135.94267c-17.003876 0 -26.795105 -5.5669403 -34.007706 -11.133865c-3.6062927 -2.7834625 -6.567932 -5.566925 -10.10881 -7.6545258c-0.4426117 -0.26094818 -0.8942871 -0.5110321 -1.3573761 -0.74887085c-0.11578369 -0.0594635 -0.23228455 -0.11816406 -0.34950256 -0.17607117l-0.13806152 -0.06682587" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="2.0" stroke-linecap="butt" d="m237.48381 112.93081l-9.563843 1.350235l8.194244 5.113182z" fill-rule="evenodd"/></g></svg> \ No newline at end of file
diff --git a/g3doc/Machine-Virtualization.png b/g3doc/Machine-Virtualization.png
deleted file mode 100644
index 1ba2ed6b2..000000000
--- a/g3doc/Machine-Virtualization.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/Machine-Virtualization.svg b/g3doc/Machine-Virtualization.svg
deleted file mode 100644
index 5352da07b..000000000
--- a/g3doc/Machine-Virtualization.svg
+++ /dev/null
@@ -1 +0,0 @@
-<svg version="1.1" viewBox="0.0 0.0 387.7034120734908 336.4225721784777" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l387.7034 0l0 336.42258l-387.7034 0l0 -336.42258z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l387.7034 0l0 336.42258l-387.7034 0z" fill-rule="evenodd"/><path fill="#f4cccc" d="m44.454067 14.643044l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#cc4125" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m44.454067 14.643044l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m86.206116 45.98824l5.125 -13.359375l1.90625 0l5.46875 13.359375l-2.015625 0l-1.546875 -4.046875l-5.59375 0l-1.46875 4.046875l-1.875 0zm3.859375 -5.484375l4.53125 0l-1.40625 -3.703125q-0.625 -1.6875 -0.9375 -2.765625q-0.265625 1.28125 -0.71875 2.546875l-1.46875 3.921875zm9.849823 9.1875l0 -13.375l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546875q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.84375 -0.765625 -2.765625q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.84375zm8.891342 8.484375l0 -13.375l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546875q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.84375 -0.765625 -2.765625q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.84375zm8.844467 4.78125l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0zm4.191696 -11.46875l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.46875l0 -9.671875l1.640625 0l0 9.671875l-1.640625 0zm10.457321 -3.546875l1.609375 0.21875q-0.265625 1.65625 -1.359375 2.609375q-1.078125 0.9375 -2.671875 0.9375q-1.984375 0 -3.1875 -1.296875q-1.203125 -1.296875 -1.203125 -3.71875q0 -1.578125 0.515625 -2.75q0.515625 -1.171875 1.578125 -1.75q1.0625 -0.59375 2.3125 -0.59375q1.578125 0 2.578125 0.796875q1.0 0.796875 1.28125 2.265625l-1.59375 0.234375q-0.234375 -0.96875 -0.8125 -1.453125q-0.578125 -0.5 -1.390625 -0.5q-1.234375 0 -2.015625 0.890625q-0.78125 0.890625 -0.78125 2.8125q0 1.953125 0.75 2.84375q0.75 0.875 1.953125 0.875q0.96875 0 1.609375 -0.59375q0.65625 -0.59375 0.828125 -1.828125zm9.328125 2.359375q-0.921875 0.765625 -1.765625 1.09375q-0.828125 0.3125 -1.796875 0.3125q-1.59375 0 -2.453125 -0.78125q-0.859375 -0.78125 -0.859375 -1.984375q0 -0.71875 0.328125 -1.296875q0.328125 -0.59375 0.84375 -0.9375q0.53125 -0.359375 1.1875 -0.546875q0.46875 -0.125 1.453125 -0.25q1.984375 -0.234375 2.921875 -0.5625q0.015625 -0.34375 0.015625 -0.421875q0 -1.0 -0.46875 -1.421875q-0.625 -0.546875 -1.875 -0.546875q-1.15625 0 -1.703125 0.40625q-0.546875 0.40625 -0.8125 1.421875l-1.609375 -0.21875q0.21875 -1.015625 0.71875 -1.640625q0.5 -0.640625 1.453125 -0.984375q0.953125 -0.34375 2.1875 -0.34375q1.25 0 2.015625 0.296875q0.78125 0.28125 1.140625 0.734375q0.375 0.4375 0.515625 1.109375q0.078125 0.421875 0.078125 1.515625l0 2.1875q0 2.28125 0.109375 2.890625q0.109375 0.59375 0.40625 1.15625l-1.703125 0q-0.265625 -0.515625 -0.328125 -1.1875zm-0.140625 -3.671875q-0.890625 0.375 -2.671875 0.625q-1.015625 0.140625 -1.4375 0.328125q-0.421875 0.1875 -0.65625 0.53125q-0.21875 0.34375 -0.21875 0.78125q0 0.65625 0.5 1.09375q0.5 0.4375 1.453125 0.4375q0.9375 0 1.671875 -0.40625q0.75 -0.421875 1.09375 -1.140625q0.265625 -0.5625 0.265625 -1.640625l0 -0.609375zm7.781967 3.390625l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm1.6051788 -10.0l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.46875l0 -9.671875l1.640625 0l0 9.671875l-1.640625 0zm3.5354462 -4.84375q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm9.297592 4.84375l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m12.454068 81.068245l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m12.454068 81.068245l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m180.45407 81.068245l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m180.45407 81.068245l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m81.43044 64.70238l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m95.06437 82.471375l1.203125 -0.109375q0.078125 0.71875 0.390625 1.1875q0.3125 0.453125 0.953125 0.734375q0.65625 0.28125 1.46875 0.28125q0.71875 0 1.265625 -0.21875q0.5625 -0.21875 0.828125 -0.578125q0.265625 -0.375 0.265625 -0.828125q0 -0.453125 -0.265625 -0.78125q-0.25 -0.328125 -0.84375 -0.5625q-0.390625 -0.15625 -1.703125 -0.46875q-1.3125 -0.3125 -1.84375 -0.59375q-0.671875 -0.359375 -1.015625 -0.890625q-0.328125 -0.53125 -0.328125 -1.1875q0 -0.71875 0.40625 -1.34375q0.40625 -0.625 1.1875 -0.953125q0.796875 -0.328125 1.765625 -0.328125q1.046875 0 1.859375 0.34375q0.8125 0.34375 1.25 1.015625q0.4375 0.65625 0.46875 1.484375l-1.203125 0.09375q-0.109375 -0.90625 -0.671875 -1.359375q-0.5625 -0.46875 -1.65625 -0.46875q-1.140625 0 -1.671875 0.421875q-0.515625 0.421875 -0.515625 1.015625q0 0.515625 0.359375 0.84375q0.375 0.328125 1.90625 0.6875q1.546875 0.34375 2.109375 0.59375q0.84375 0.390625 1.234375 0.984375q0.390625 0.578125 0.390625 1.359375q0 0.75 -0.4375 1.4375q-0.421875 0.671875 -1.25 1.046875q-0.8125 0.359375 -1.828125 0.359375q-1.296875 0 -2.171875 -0.375q-0.875 -0.375 -1.375 -1.125q-0.5 -0.765625 -0.53125 -1.71875zm9.12413 5.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.6953125 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.521843 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.32106 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.4062653 0 -2.2812653 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.6562653 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.4531403 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.4062653 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#fff2cc" d="m44.454067 95.40656l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#f1c232" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m44.454067 95.40656l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m85.11598 121.51739l0 -1.578125l5.65625 0l0 4.953125q-1.296875 1.046875 -2.6875 1.578125q-1.375 0.515625 -2.84375 0.515625q-1.96875 0 -3.578125 -0.84375q-1.609375 -0.84375 -2.421875 -2.4375q-0.8125 -1.59375 -0.8125 -3.5625q0 -1.953125 0.8125 -3.640625q0.8125 -1.6875 2.34375 -2.5q1.53125 -0.828125 3.515625 -0.828125q1.453125 0 2.625 0.46875q1.171875 0.46875 1.828125 1.3125q0.671875 0.828125 1.015625 2.171875l-1.59375 0.4375q-0.296875 -1.015625 -0.75 -1.59375q-0.4375 -0.59375 -1.265625 -0.9375q-0.828125 -0.34375 -1.84375 -0.34375q-1.203125 0 -2.09375 0.375q-0.890625 0.359375 -1.4375 0.96875q-0.53125 0.59375 -0.828125 1.3125q-0.515625 1.234375 -0.515625 2.6875q0 1.78125 0.609375 2.984375q0.625 1.203125 1.796875 1.796875q1.171875 0.578125 2.5 0.578125q1.140625 0 2.234375 -0.4375q1.09375 -0.453125 1.65625 -0.953125l0 -2.484375l-3.921875 0zm14.386429 5.234375l0 -1.421875q-1.125 1.640625 -3.0625 1.640625q-0.859375 0 -1.609375 -0.328125q-0.734375 -0.328125 -1.09375 -0.828125q-0.359375 -0.5 -0.5 -1.21875q-0.109375 -0.46875 -0.109375 -1.53125l0 -5.984375l1.640625 0l0 5.359375q0 1.28125 0.109375 1.734375q0.15625 0.640625 0.65625 1.015625q0.5 0.375 1.234375 0.375q0.734375 0 1.375 -0.375q0.65625 -0.390625 0.921875 -1.03125q0.265625 -0.65625 0.265625 -1.890625l0 -5.1875l1.640625 0l0 9.671875l-1.46875 0zm10.672592 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm8.485092 2.875l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm13.5625 1.421875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm6.9134827 1.46875l0 -13.359375l1.78125 0l0 6.625l6.625 -6.625l2.390625 0l-5.59375 5.421875l5.84375 7.9375l-2.328125 0l-4.765625 -6.765625l-2.171875 2.140625l0 4.625l-1.78125 0zm18.943573 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125717 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.228302 0l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm17.000717 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.094467 5.765625l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m12.454068 161.83176l57.574802 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m12.454068 161.83176l57.574802 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m193.71391 161.83176l60.7874 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m193.71391 161.83176l60.7874 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m70.02887 145.80026l123.68504 0l0 32.06299l-123.68504 0z" fill-rule="evenodd"/><path fill="#000000" d="m87.09864 166.63176l-3.6875 -9.546875l1.359375 0l2.484375 6.9375q0.296875 0.828125 0.5 1.5625q0.21875 -0.78125 0.515625 -1.5625l2.578125 -6.9375l1.28125 0l-3.734375 9.546875l-1.296875 0zm6.0303802 -8.1875l0 -1.359375l1.171875 0l0 1.359375l-1.171875 0zm0 8.1875l0 -6.90625l1.171875 0l0 6.90625l-1.171875 0zm2.92984 0l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm7.0164948 -1.046875l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.6717377 1.046875l0 -1.015625q-0.8125 1.171875 -2.1875 1.171875q-0.609375 0 -1.140625 -0.234375q-0.53125 -0.234375 -0.796875 -0.578125q-0.25 -0.359375 -0.359375 -0.875q-0.0625 -0.34375 -0.0625 -1.09375l0 -4.28125l1.171875 0l0 3.828125q0 0.921875 0.0625 1.234375q0.109375 0.46875 0.46875 0.734375q0.359375 0.25 0.890625 0.25q0.515625 0 0.984375 -0.265625q0.46875 -0.265625 0.65625 -0.734375q0.1875 -0.46875 0.1875 -1.34375l0 -3.703125l1.171875 0l0 6.90625l-1.046875 0zm7.3968506 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm6.6797028 0l0 -9.546875l1.171875 0l0 3.421875q0.828125 -0.9375 2.0781174 -0.9375q0.765625 0 1.328125 0.296875q0.5625 0.296875 0.8125 0.84375q0.25 0.53125 0.25 1.546875l0 4.375l-1.171875 0l0 -4.375q0 -0.890625 -0.390625 -1.28125q-0.375 -0.40625 -1.078125 -0.40625q-0.515625 0 -0.9843674 0.28125q-0.453125 0.265625 -0.65625 0.734375q-0.1875 0.453125 -0.1875 1.265625l0 3.78125l-1.171875 0zm11.928093 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm8.93837 0l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm7.9124756 3.453125l-2.125 -6.90625l1.21875 0l1.09375 3.984375l0.421875 1.484375q0.015625 -0.109375 0.359375 -1.421875l1.09375 -4.046875l1.203125 0l1.03125 4.0l0.34375 1.328125l0.40625 -1.34375l1.171875 -3.984375l1.140625 0l-2.15625 6.90625l-1.21875 0l-1.09375 -4.140625l-0.265625 -1.171875l-1.40625 5.3125l-1.21875 0zm12.859528 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm9.18837 -2.21875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375z" fill-rule="nonzero"/><path fill="#d9ead3" d="m44.454067 175.40657l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path stroke="#93c47d" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m44.454067 175.40657l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m115.3588 206.75175l-5.171875 -13.359375l1.921875 0l3.46875 9.703125q0.421875 1.171875 0.703125 2.1875q0.3125 -1.09375 0.71875 -2.1875l3.609375 -9.703125l1.796875 0l-5.234375 13.359375l-1.8125 0zm8.584198 0l0 -13.359375l2.65625 0l3.1562424 9.453125q0.4375 1.328125 0.640625 1.984375q0.234375 -0.734375 0.703125 -2.140625l3.203125 -9.296875l2.375 0l0 13.359375l-1.703125 0l0 -11.171875l-3.875 11.171875l-1.59375 0l-3.8593674 -11.375l0 11.375l-1.703125 0zm15.540794 0l0 -13.359375l2.65625 0l3.15625 9.453125q0.4375 1.328125 0.640625 1.984375q0.234375 -0.734375 0.703125 -2.140625l3.203125 -9.296875l2.375 0l0 13.359375l-1.703125 0l0 -11.171875l-3.875 11.171875l-1.59375 0l-3.859375 -11.375l0 11.375l-1.703125 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m12.454068 239.1764l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m12.454068 239.1764l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m180.45407 239.1764l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m180.45407 239.1764l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m81.43044 222.81055l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m95.06437 240.57954l1.203125 -0.109375q0.078125 0.71875 0.390625 1.1875q0.3125 0.453125 0.953125 0.734375q0.65625 0.28125 1.46875 0.28125q0.71875 0 1.265625 -0.21875q0.5625 -0.21875 0.828125 -0.578125q0.265625 -0.375 0.265625 -0.828125q0 -0.453125 -0.265625 -0.78125q-0.25 -0.328125 -0.84375 -0.5625q-0.390625 -0.15625 -1.703125 -0.46875q-1.3125 -0.3125 -1.84375 -0.59375q-0.671875 -0.359375 -1.015625 -0.890625q-0.328125 -0.53125 -0.328125 -1.1875q0 -0.71875 0.40625 -1.34375q0.40625 -0.625 1.1875 -0.953125q0.796875 -0.328125 1.765625 -0.328125q1.046875 0 1.859375 0.34375q0.8125 0.34375 1.25 1.015625q0.4375 0.65625 0.46875 1.484375l-1.203125 0.09375q-0.109375 -0.90625 -0.671875 -1.359375q-0.5625 -0.46875 -1.65625 -0.46875q-1.140625 0 -1.671875 0.421875q-0.515625 0.421875 -0.515625 1.015625q0 0.515625 0.359375 0.84375q0.375 0.328125 1.90625 0.6875q1.546875 0.34375 2.109375 0.59375q0.84375 0.390625 1.234375 0.984375q0.390625 0.578125 0.390625 1.359375q0 0.75 -0.4375 1.4375q-0.421875 0.671875 -1.25 1.046875q-0.8125 0.359375 -1.828125 0.359375q-1.296875 0 -2.171875 -0.375q-0.875 -0.375 -1.375 -1.125q-0.5 -0.765625 -0.53125 -1.71875zm9.12413 5.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.6953125 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.521843 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.32106 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.4062653 0 -2.2812653 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.6562653 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.4531403 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.4062653 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#cfe2f3" d="m44.454067 252.7512l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path stroke="#6d9eeb" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m44.454067 252.7512l174.83464 0l0 48.850388l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m84.63558 284.0964l0 -13.359375l1.765625 0l0 5.484375l6.9375 0l0 -5.484375l1.765625 0l0 13.359375l-1.765625 0l0 -6.296875l-6.9375 0l0 6.296875l-1.765625 0zm12.597946 -4.84375q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm8.641342 1.953125l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm13.5625 1.421875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm6.913475 1.46875l0 -13.359375l1.78125 0l0 6.625l6.625 -6.625l2.390625 0l-5.59375 5.421875l5.84375 7.9375l-2.328125 0l-4.765625 -6.765625l-2.171875 2.140625l0 4.625l-1.78125 0zm18.943573 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125717 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.228302 0l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm17.000732 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.7656403 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375153 0 3.1562653 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.2187653 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.5468903 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.3906403 -2.65625l5.4062653 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.0312653 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.094467 5.765625l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m12.454068 319.17642l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m12.454068 319.17642l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m180.45407 319.17642l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m180.45407 319.17642l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m81.43044 302.81055l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m104.04542 323.64203l0 -9.546875l1.265625 0l0 3.921875l4.953125 0l0 -3.921875l1.265625 0l0 9.546875l-1.265625 0l0 -4.5l-4.953125 0l0 4.5l-1.265625 0zm13.953278 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm8.938362 0l0 -0.875q-0.65625 1.03125 -1.9374924 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.6249924 0 1.1093674 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.7031174 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.3281174 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.3437424 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm7.912468 3.453125l-2.125 -6.90625l1.21875 0l1.09375 3.984375l0.421875 1.484375q0.015625 -0.109375 0.359375 -1.421875l1.09375 -4.046875l1.203125 0l1.03125 4.0l0.34375 1.328125l0.40625 -1.34375l1.171875 -3.984375l1.140625 0l-2.15625 6.90625l-1.21875 0l-1.09375 -4.140625l-0.265625 -1.171875l-1.40625 5.3125l-1.21875 0zm12.859543 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59376526 0.21875 -1.2812653 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.4218903 -0.171875 2.0937653 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.3437653 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.89064026 0 1.4375153 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.9218903 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.2031403 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm9.18837 -2.21875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375z" fill-rule="nonzero"/><defs><linearGradient id="p.1" gradientUnits="userSpaceOnUse" gradientTransform="matrix(4.545553100086654 0.0 0.0 4.545553100086654 0.0 0.0)" spreadMethod="pad" x1="9.954639806354566" y1="38.70166210013951" x2="9.95462288989064" y2="43.24721520019468"><stop offset="0.0" stop-color="#ff0000"/><stop offset="0.51" stop-color="#dab7a6"/><stop offset="0.99999994" stop-color="#dab7a6" stop-opacity="0.0"/><stop offset="1.0" stop-color="#ffffff" stop-opacity="0.0"/></linearGradient></defs><path fill="url(#p.1)" d="m45.249344 175.92108l173.29134 0l0 20.661423l-173.29134 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m280.4455 190.06865l129.5748 -74.83464l20.629913 35.74803l-129.5748 74.83464z" fill-rule="evenodd"/><path fill="#000000" d="m295.51392 196.73558l1.1823425 -0.82717896q0.51071167 0.6974335 1.1166077 0.9970703q0.5980835 0.28611755 1.4464111 0.1931305q0.84054565 -0.10652161 1.6794434 -0.5910187q0.75772095 -0.4376068 1.2010193 -0.9823456q0.44906616 -0.5660858 0.50097656 -1.1013031q0.057678223 -0.55656433 -0.20785522 -1.0166931q-0.27334595 -0.47366333 -0.7392273 -0.6557007q-0.47366333 -0.1955719 -1.2366333 -0.079711914q-0.478302 0.07775879 -2.032318 0.54222107q-1.5618286 0.45092773 -2.2805786 0.48712158q-0.9222717 0.027420044 -1.5864563 -0.31072998q-0.6719971 -0.35168457 -1.0703125 -1.0418701q-0.4295349 -0.74432373 -0.38497925 -1.6361694q0.05029297 -0.9131775 0.6668701 -1.7203827q0.63012695 -0.81500244 1.6313782 -1.3932648q1.1095276 -0.64079285 2.1592712 -0.7598877q1.0419617 -0.1326294 1.8867493 0.29968262q0.8583679 0.4244995 1.3987732 1.2671814l-1.2036743 0.82147217q-0.64712524 -0.87127686 -1.5022583 -1.0089111q-0.8629761 -0.15116882 -2.013092 0.51304626q-1.1906738 0.68766785 -1.4819641 1.4333191q-0.2913208 0.745636 0.06793213 1.3681641q0.30459595 0.52778625 0.8865051 0.6608429q0.5740967 0.119522095 2.3815613 -0.43717957q1.8210144 -0.5645294 2.5725403 -0.6376953q1.0924377 -0.107666016 1.857605 0.28042603q0.77090454 0.366745 1.2316895 1.1652069q0.4529724 0.78492737 0.39904785 1.7543335q-0.040405273 0.9616089 -0.6663208 1.8463745q-0.62594604 0.8847656 -1.6813354 1.4942932q-1.3530579 0.78144836 -2.486084 0.9125519q-1.1408386 0.11756897 -2.1214905 -0.3625946q-0.96713257 -0.48797607 -1.5721436 -1.4738007zm13.40155 -4.9431458l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.6396332q-0.6629944 0.3829193 -1.1454773 0.39089966q-0.4902649 -0.0055389404 -0.8343506 -0.25790405q-0.3518982 -0.26589966 -0.9844971 -1.3620911l-2.382019 -4.1276093l-0.8930054 0.5157471l-0.5466919 -0.9473114l0.8930054 -0.5157623l-1.0308838 -1.786377l0.79599 -1.434082l1.4526367 2.5171661l1.2177734 -0.7033081l0.5466919 0.94732666l-1.2177734 0.70329285l2.4210815 4.195282q0.30456543 0.5278015 0.4446106 0.645401q0.15356445 0.109802246 0.35705566 0.11857605q0.19570923 -0.004760742 0.4663086 -0.16105652q0.20297241 -0.11721802 0.49645996 -0.35888672zm1.8165283 0.41241455l-4.147064 -7.1861115l1.0959778 -0.6329651l0.6247864 1.0826569q-0.0178833 -1.0001068 0.19332886 -1.4468842q0.21121216 -0.44676208 0.64419556 -0.6968231q0.6088562 -0.35165405 1.471283 -0.3264618l0.22875977 1.3654938q-0.59487915 7.4768066E-4 -1.0413818 0.25862122q-0.39239502 0.22662354 -0.5765381 0.6577606q-0.17843628 0.40979004 -0.06384277 0.92100525q0.17193604 0.7667999 0.61709595 1.5381927l2.1711426 3.7622223l-1.2177429 0.70329285zm2.0899658 -6.006668q-1.1480408 -1.9893646 -0.5930481 -3.5910034q0.47280884 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79660034 1.3803864 0.83795166 2.4210815q0.04135132 1.0407104 -0.49923706 1.948349q-0.5348511 0.88630676 -1.4819946 1.4333191q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.43519592 -2.5502625 -2.2621765zm1.2583313 -0.72673035q0.79663086 1.3803711 1.7902527 1.7267303q1.0072021 0.33854675 1.9137268 -0.18502808q0.9065552 -0.5235596 1.1036072 -1.5575867q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17720032q-0.90652466 0.5235748 -1.117096 1.5654144q-0.2048645 1.0204926 0.59173584 2.400879zm8.984772 -0.38945007l-4.1470337 -7.1861115l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306152 1.6072083 -2.4980164q0.6494751 -0.37509155 1.3234558 -0.45761108q0.6739807 -0.08250427 1.163269 0.14013672q0.48928833 0.22264099 0.9021301 0.687912q0.26287842 0.29925537 0.74710083 1.1383057l2.553833 4.425354l-1.2177429 0.70329285l-2.522583 -4.371216q-0.42956543 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078125 -0.8442688 -0.3063202q-0.47677612 -0.01335144 -0.9638672 0.2679596q-0.7847595 0.45324707 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.187973l2.264862 3.9246216l-1.2177429 0.70329285zm7.819275 -3.7220154l1.2922058 -0.511734q0.3878479 0.51579285 0.8666992 0.5640259q0.65527344 0.072631836 1.4400635 -0.38059998q0.8388977 -0.48449707 1.1036682 -1.0885162q0.26480103 -0.60401917 0.07571411 -1.306778q-0.1161499 -0.42010498 -0.81121826 -1.6245575q-0.25161743 1.408371 -1.4423218 2.096054q-1.4883423 0.85957336 -2.9171448 0.25932312q-1.428833 -0.60025024 -2.2957153 -2.1024323q-0.5935669 -1.0285187 -0.72805786 -2.1056366q-0.12097168 -1.0849152 0.30926514 -1.9649353q0.4437561 -0.8878174 1.3908997 -1.4348297q1.2718811 -0.7345581 2.690796 -0.182724l-0.4998474 -0.8661194l1.1230469 -0.6485901l3.5847168 6.2117157q0.9684448 1.6781158 1.0362854 2.5771942q0.067840576 0.8990936 -0.4420166 1.7348785q-0.5098877 0.8357849 -1.5923462 1.4609375q-1.2854004 0.7423706 -2.4195251 0.62150574q-1.1206055 -0.12869263 -1.7651672 -1.3081818zm-1.4765625 -4.9031525q0.81222534 1.4074402 1.7418518 1.7366486q0.94314575 0.32138062 1.7820435 -0.16311646q0.8388977 -0.4844818 1.0401001 -1.4487457q0.19342041 -0.97779846 -0.60317993 -2.3581848q-0.76538086 -1.3262482 -1.7298889 -1.6533508q-0.97229004 -0.3406372 -1.7976685 0.1360321q-0.8118286 0.46887207 -0.9974365 1.4602051q-0.1855774 0.991333 0.56417847 2.290512z" fill-rule="nonzero"/><path fill="#000000" d="m302.23132 207.68793l-0.80441284 -1.3939209l1.2177429 -0.70329285l0.80441284 1.3939056l-1.2177429 0.7033081zm4.920227 8.525894l-4.147064 -7.1861115l1.2177734 -0.7033081l4.1470337 7.1861115l-1.2177429 0.7033081zm1.3493347 -3.6482391l1.0948792 -0.88494873q0.51641846 0.6760864 1.2029724 0.80285645q0.6922302 0.10542297 1.5176086 -0.3712616q0.8388672 -0.4844818 1.0495605 -1.057251q0.20285034 -0.58628845 -0.062683105 -1.0464172q-0.23431396 -0.4059906 -0.7266846 -0.4464264q-0.35079956 -0.013916016 -1.4790955 0.3129425q-1.5347595 0.43530273 -2.2030334 0.49645996q-0.66256714 0.03982544 -1.183075 -0.23695374q-0.5069885 -0.28459167 -0.8115845 -0.8123779q-0.28115845 -0.48719788 -0.2989807 -1.018219q-0.017791748 -0.5310211 0.2048645 -1.0204926q0.15917969 -0.3806305 0.56817627 -0.797287q0.4147339 -0.43800354 0.9694824 -0.75839233q0.852417 -0.49230957 1.6289368 -0.61598206q0.7765198 -0.123687744 1.3162842 0.123931885q0.5455017 0.22625732 1.0733948 0.85964966l-1.0969849 0.85006714q-0.4013672 -0.5079651 -0.9733887 -0.59262085q-0.5720215 -0.0846405 -1.2756042 0.3217163q-0.8388977 0.4844818 -1.0402222 0.9796753q-0.19558716 0.47383118 0.015289307 0.8392334q0.14056396 0.24359131 0.39874268 0.34710693q0.2581787 0.103500366 0.64749146 0.05909729q0.22845459 -0.041732788 1.2620544 -0.31388855q1.4806519 -0.40403748 2.127594 -0.470932q0.6390991 -0.08041382 1.1653442 0.17501831q0.5397949 0.24760437 0.89904785 0.87013245q0.35144043 0.60899353 0.29852295 1.3613129q-0.047210693 0.73095703 -0.5519409 1.4194183q-0.49118042 0.68063354 -1.3300476 1.1651306q-1.407196 0.81269836 -2.4736633 0.6527405q-1.0664673 -0.15994263 -1.933258 -1.193039zm6.1190186 -5.4646606q-1.1480408 -1.9893799 -0.5930481 -3.5910187q0.47283936 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.85957336 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79663086 1.3803864 0.83795166 2.4210968q0.04135132 1.0406952 -0.49923706 1.948349q-0.5348511 0.8862915 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.43519592 -2.5502625 -2.2621613zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902832 1.7267303q1.0071716 0.33854675 1.9136963 -0.18501282q0.9065552 -0.5235748 1.1036072 -1.5576019q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17721558q-0.90652466 0.5235596 -1.117096 1.5653992q-0.2048645 1.0204926 0.59173584 2.400879zm8.957733 -0.3738098l-5.7246704 -9.919815l1.2177734 -0.70329285l5.72464 9.9198l-1.2177429 0.7033081zm7.2713623 -5.390396q-0.34069824 0.972641 -0.8225403 1.5756989q-0.48962402 0.5895386 -1.2067566 1.0036926q-1.1906738 0.6876831 -2.162445 0.47302246q-0.9717407 -0.21464539 -1.4872131 -1.1078339q-0.30456543 -0.52778625 -0.3109436 -1.1015167q-0.014190674 -0.58724976 0.21627808 -1.0631866q0.23620605 -0.49728394 0.64520264 -0.9139252q0.31063843 -0.3057251 0.980896 -0.8010864q1.3733215 -1.02771 1.9363098 -1.6776581q-0.14837646 -0.25712585 -0.18740845 -0.32478333q-0.42956543 -0.74432373 -0.93963623 -0.84669495q-0.7156677 -0.14602661 -1.6357422 0.38536072q-0.8524475 0.49230957 -1.0922546 1.0458069q-0.23410034 0.5321655 0.013824463 1.3994293l-1.2843933 0.52526855q-0.2749939 -0.85162354 -0.18301392 -1.5362854q0.10549927 -0.6924591 0.66851807 -1.3424072q0.5552063 -0.66348267 1.4752808 -1.1948547q0.92007446 -0.5313873 1.6133118 -0.6430664q0.7067566 -0.11949158 1.1648254 0.04902649q0.45803833 0.16850281 0.8552551 0.60672q0.24728394 0.27218628 0.71588135 1.0841675l0.9371643 1.6239929q0.9840393 1.7051697 1.3094177 2.1126862q0.33892822 0.39971924 0.81103516 0.68640137l-1.2718506 0.7345581q-0.40811157 -0.26953125 -0.7590027 -0.75253296zm-1.6645203 -2.6654663q-0.5067749 0.65356445 -1.7234497 1.6088562q-0.69522095 0.5458679 -0.9283142 0.8609314q-0.23312378 0.31506348 -0.25280762 0.6873169q-0.013977051 0.3508911 0.16564941 0.66215515q0.28115845 0.48719788 0.83392334 0.6010132q0.5662842 0.10598755 1.269867 -0.30036926q0.70358276 -0.40634155 1.072998 -1.0166473q0.37512207 -0.63165283 0.3197937 -1.3214569q-0.031341553 -0.5232086 -0.49990845 -1.3352051l-0.25775146 -0.44659424zm7.223419 -0.8156891l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.63964844q-0.6629944 0.38290405 -1.1454468 0.39089966q-0.4902954 -0.0055389404 -0.8343811 -0.25790405q-0.3518982 -0.26591492 -0.9844971 -1.3620911l-2.382019 -4.1276093l-0.8930054 0.5157471l-0.5466919 -0.94732666l0.8930054 -0.5157471l-1.0308838 -1.786377l0.7960205 -1.434082l1.4526367 2.5171661l1.2177429 -0.7033081l0.5466919 0.94732666l-1.2177429 0.70329285l2.421051 4.195282q0.30459595 0.52778625 0.4446106 0.645401q0.15356445 0.10978699 0.35708618 0.11856079q0.19567871 -0.004760742 0.46627808 -0.16104126q0.20297241 -0.11721802 0.49645996 -0.35890198zm-3.0901794 -8.121277l-0.80441284 -1.3939209l1.2177429 -0.70329285l0.80444336 1.3939209l-1.2177734 0.70329285zm4.920227 8.525894l-4.1470337 -7.1861115l1.2177429 -0.70329285l4.147064 7.186096l-1.2177734 0.7033081zm0.54074097 -5.111908q-1.1480408 -1.9893799 -0.5930481 -3.5910187q0.47280884 -1.3376465 1.7988281 -2.1034698q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.132553q0.79660034 1.3803864 0.83795166 2.4210968q0.0413208 1.0406952 -0.49923706 1.948349q-0.5348511 0.8862915 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959412 -0.43519592 -2.5502625 -2.2621613zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902527 1.7267303q1.0072021 0.33854675 1.9137268 -0.18501282q0.9065552 -0.5235748 1.1036072 -1.5576019q0.21057129 -1.0418396 -0.60946655 -2.4628143q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635925 -1.9002075 0.17721558q-0.9065552 0.5235596 -1.117096 1.5653992q-0.2048645 1.0204926 0.59173584 2.400879zm8.984772 -0.38945007l-4.1470337 -7.1861115l1.0959473 -0.6329651l0.5857544 1.0149841q0.10531616 -1.6306152 1.6072083 -2.4980164q0.6494446 -0.37509155 1.3234558 -0.45761108q0.6739807 -0.08250427 1.163269 0.14013672q0.48928833 0.22264099 0.9021301 0.687912q0.26287842 0.29925537 0.74710083 1.1383057l2.553833 4.425354l-1.2177429 0.70329285l-2.5226135 -4.371216q-0.4295349 -0.74432373 -0.7892456 -1.0237579q-0.3539734 -0.30078125 -0.8442383 -0.3063202q-0.47677612 -0.01335144 -0.9638672 0.2679596q-0.7847595 0.45324707 -1.0640869 1.2821808q-0.2735901 0.8075714 0.52301025 2.1879578l2.264862 3.9246216l-1.2177429 0.70329285z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m290.76047 207.94267c-17.003845 0 -26.795105 -5.566925 -34.00769 -11.133865c-7.212616 -5.566925 -11.846558 -11.13385 -23.693115 -11.13385" fill-rule="evenodd"/><path stroke="#000000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m290.76047 207.94267c-17.003876 0 -26.795105 -5.5669403 -34.00769 -11.133865c-3.606308 -2.7834625 -6.5679474 -5.566925 -10.108826 -7.6545258c-0.4426117 -0.2609558 -0.8942871 -0.5110321 -1.3573761 -0.74887085c-0.11578369 -0.0594635 -0.23228455 -0.1181488 -0.34950256 -0.17605591l-0.13806152 -0.066833496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="2.0" stroke-linecap="butt" d="m245.48381 184.93082l-9.563843 1.350235l8.194244 5.1131744z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m280.4455 126.06866l129.5748 -74.83465l20.629913 35.74803l-129.5748 74.83464z" fill-rule="evenodd"/><path fill="#000000" d="m298.03357 135.5387l-5.72464 -9.919807l1.3124695 -0.75800323l5.72464 9.9198l-1.3124695 0.75801086zm3.470581 -2.0044098l-4.1470337 -7.186104l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306229 1.6072083 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45760345q0.6739807 -0.0825119 1.163269 0.14012146q0.48928833 0.22264099 0.9021301 0.6879196q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253464l-1.2177429 0.70329285l-2.522583 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078888 -0.8442688 -0.30632782q-0.4767456 -0.01335144 -0.9638672 0.26796722q-0.7847595 0.4532318 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.1879654l2.2648926 3.9246216l-1.2177734 0.70329285zm12.360168 -7.1384735l-0.5232849 -0.906723q-0.059539795 1.4598389 -1.3855286 2.2256546q-0.8659668 0.5001297 -1.8564148 0.44062042q-0.9904785 -0.05949402 -1.8883972 -0.6776428q-0.88442993 -0.62597656 -1.5170288 -1.7221603q-0.6247864 -1.0826492 -0.77282715 -2.151947q-0.14230347 -1.0906448 0.30926514 -1.9649353q0.44378662 -0.887825 1.336792 -1.4035797q0.6494751 -0.37509155 1.3063049 -0.39356232q0.6703491 -0.026283264 1.2392883 0.2405777l-2.054016 -3.5592194l1.2177429 -0.7033005l5.7246704 9.919807l-1.1365662 0.6564102zm-5.9122925 -1.3669891q0.79660034 1.3803787 1.7767029 1.7345505q0.97232056 0.3406372 1.7570801 -0.112602234q0.7983093 -0.4610443 0.97817993 -1.4310303q0.18560791 -0.991333 -0.58758545 -2.3311157q-0.8512573 -1.4751129 -1.8178406 -1.8370972q-0.96658325 -0.36198425 -1.805481 0.12251282q-0.8118286 0.46886444 -0.97036743 1.4445648q-0.15853882 0.9757004 0.6693115 2.4102173zm12.53952 -5.5459747l1.3442383 -0.57787323q0.34274292 1.2816315 -0.11764526 2.3594894q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.9142914 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898376 -2.1887817q-1.0465393 -1.813446 -0.69085693 -3.3540955q0.35568237 -1.5406494 1.8440247 -2.4002304q1.4342346 -0.828331 2.9109192 -0.36397552q1.4823608 0.4430008 2.5054626 2.2158508q0.0625 0.10826111 0.18743896 0.32479095l-5.3580933 3.094513q0.75494385 1.1518478 1.7173462 1.4440689q0.96810913 0.27088165 1.861145 -0.24487305q0.6765137 -0.39072418 0.9470215 -1.0160904q0.2705078 -0.6253662 0.09597778 -1.5530472zm-5.131775 0.32941437l4.005066 -2.3130646q-0.60446167 -0.8598404 -1.2410278 -1.0876312q-0.98794556 -0.36769867 -1.90802 0.16368103q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.8481064zm10.854523 3.1318436l-5.740265 -9.946869l1.1094971 -0.6407852l0.5388794 0.9337845q0.07220459 -0.78147125 0.4031067 -1.333458q0.3444214 -0.5597992 1.0480042 -0.9661484q0.92007446 -0.5313797 1.8970032 -0.46406555q0.9769592 0.06730652 1.8285828 0.7302551q0.8573303 0.64160156 1.4508972 1.6701202q0.6404114 1.1097183 0.74212646 2.2238083q0.11526489 1.1062698 -0.3691101 2.01754q-0.4708252 0.90345 -1.3097229 1.3879471q-0.6088867 0.35164642 -1.2443542 0.37583923q-0.62197876 0.01637268 -1.159668 -0.19635773l2.022766 3.5050888l-1.2177429 0.7033005zm-2.551239 -6.952957q0.80441284 1.3939209 1.7418518 1.7366486q0.95095825 0.3349228 1.7492676 -0.12612915q0.8118286 -0.46886444 0.9953308 -1.495079q0.18353271 -1.026207 -0.6443176 -2.4607239q-0.79663086 -1.3803787 -1.7554016 -1.728836q-0.96658325 -0.36198425 -1.7513428 0.09125519q-0.77124023 0.4454193 -0.958374 1.5278625q-0.1736145 1.0746231 0.62298584 2.4550018zm12.239746 -5.408928l1.3442688 -0.57788086q0.34274292 1.2816391 -0.11764526 2.359497q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.91428375 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898682 -2.1887894q-1.0465088 -1.8134384 -0.6908264 -3.3540878q0.35565186 -1.5406494 1.8440247 -2.400238q1.4342346 -0.828331 2.9108887 -0.36397552q1.4823914 0.44300842 2.5054932 2.2158508q0.062469482 0.10826874 0.18743896 0.32479858l-5.3580933 3.094513q0.75491333 1.1518402 1.7173462 1.4440689q0.96810913 0.27087402 1.861145 -0.24488068q0.6765137 -0.39071655 0.9470215 -1.0160828q0.2705078 -0.62537384 0.095947266 -1.5530472zm-5.1317444 0.32941437l4.0050354 -2.3130722q-0.60443115 -0.85983276 -1.2410278 -1.0876236q-0.98791504 -0.3677063 -1.9079895 0.1636734q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.848114zm9.2612915 0.3710785l-4.1470337 -7.1861115l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306229 1.6072083 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45760345q0.6739807 -0.0825119 1.163269 0.14012909q0.48928833 0.22263336 0.9021301 0.687912q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253464l-1.2177429 0.70329285l-2.522583 -4.371208q-0.42956543 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078888 -0.8442688 -0.30632782q-0.4767456 -0.01335144 -0.9638672 0.26796722q-0.7847595 0.4532318 -1.0640869 1.2821655q-0.2735901 0.80758667 0.52301025 2.187973l2.2648926 3.924614l-1.2177734 0.7033005zm12.360168 -7.138481l-0.5232849 -0.906723q-0.059539795 1.4598389 -1.3855286 2.2256546q-0.8659668 0.5001221 -1.8564148 0.44062805q-0.9904785 -0.05949402 -1.8883972 -0.67765045q-0.88442993 -0.62596893 -1.5170288 -1.7221603q-0.6247864 -1.0826492 -0.77282715 -2.151947q-0.14230347 -1.0906448 0.30926514 -1.9649353q0.44378662 -0.887825 1.336792 -1.4035797q0.6494751 -0.37509155 1.3063049 -0.39356232q0.6703491 -0.026283264 1.2392883 0.2405777l-2.054016 -3.5592194l1.2177429 -0.7033005l5.7246704 9.919807l-1.1365662 0.6564102zm-5.9122925 -1.3669891q0.79660034 1.3803787 1.7767029 1.7345505q0.97232056 0.3406372 1.7570801 -0.112602234q0.7983093 -0.4610443 0.97817993 -1.4310303q0.18560791 -0.991333 -0.58758545 -2.3311157q-0.8512573 -1.4751129 -1.8178406 -1.8370972q-0.96658325 -0.36198425 -1.805481 0.12251282q-0.8118286 0.46886444 -0.97036743 1.4445648q-0.15853882 0.9757004 0.6693115 2.4102173zm12.53952 -5.5459747l1.3442383 -0.57787323q0.34274292 1.2816391 -0.11764526 2.3594894q-0.4468689 1.0700455 -1.6916809 1.788971q-1.5830688 0.9142914 -3.0654602 0.47128296q-1.4823914 -0.4430008 -2.4898682 -2.1887817q-1.0465088 -1.813446 -0.6908264 -3.3540955q0.35568237 -1.5406494 1.8440247 -2.4002304q1.4342346 -0.828331 2.9109192 -0.36397552q1.4823608 0.4430008 2.5054626 2.2158508q0.0625 0.10826111 0.18743896 0.32479095l-5.3580933 3.094513q0.75494385 1.1518478 1.7173462 1.4440689q0.96810913 0.27088165 1.861145 -0.24487305q0.6765137 -0.39072418 0.9470215 -1.0160904q0.2705078 -0.6253662 0.09597778 -1.5530472zm-5.131775 0.32941437l4.005066 -2.3130646q-0.60446167 -0.8598404 -1.2410278 -1.0876312q-0.98794556 -0.36769867 -1.90802 0.16368103q-0.8388977 0.48449707 -1.0926819 1.3889084q-0.2480774 0.88306427 0.23666382 1.8481064zm9.261322 0.3710785l-4.147064 -7.186104l1.0959778 -0.6329727l0.5857544 1.0149918q0.105285645 -1.6306229 1.6071777 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45761108q0.67401123 -0.0825119 1.163269 0.14012909q0.48928833 0.22264099 0.9021301 0.6879196q0.26290894 0.29925537 0.74710083 1.1383133l2.553833 4.4253387l-1.2177429 0.7033005l-2.522583 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237656q-0.3539734 -0.30078125 -0.8442688 -0.3063202q-0.4767456 -0.01335907 -0.96383667 0.2679596q-0.78479004 0.45323944 -1.0640869 1.2821732q-0.2736206 0.80758667 0.52301025 2.1879654l2.264862 3.924614l-1.2177429 0.7033005zm9.725006 -7.078125l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.6396408q-0.6629944 0.38291168 -1.1454468 0.39089966q-0.4902954 -0.0055389404 -0.8343811 -0.25791168q-0.3518982 -0.26589966 -0.9844971 -1.3620834l-2.382019 -4.127617l-0.8930054 0.5157547l-0.5466919 -0.94731903l0.8930054 -0.5157547l-1.0308838 -1.786377l0.79599 -1.4340897l1.4526672 2.5171661l1.2177429 -0.7033005l0.5466919 0.94732666l-1.2177429 0.70329285l2.421051 4.195282q0.30459595 0.5277939 0.4446106 0.645401q0.15356445 0.10979462 0.35708618 0.11856842q0.19567871 -0.004760742 0.46627808 -0.16104889q0.20297241 -0.11721802 0.49645996 -0.35889435z" fill-rule="nonzero"/><path fill="#000000" d="m307.15155 152.21382l-5.72464 -9.919815l1.2177429 -0.70329285l3.2645264 5.6568604l1.1950684 -4.587631l1.5830688 -0.9142914l-1.2081604 4.252365l5.625824 2.7774506l-1.5018921 0.8674011l-4.4921265 -2.3134918l-0.38955688 1.3256378l1.6478882 2.8554993l-1.2177429 0.7033081zm10.503723 -9.151794l1.3442383 -0.57788086q0.34274292 1.2816315 -0.11764526 2.359497q-0.44683838 1.0700378 -1.6916504 1.788971q-1.5830688 0.9142761 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898682 -2.188797q-1.0465393 -1.8134308 -0.69085693 -3.3540802q0.35568237 -1.5406494 1.8440247 -2.400238q1.4342346 -0.8283386 2.9109192 -0.36398315q1.4823914 0.44300842 2.5054932 2.2158508q0.062469482 0.10827637 0.18743896 0.32479858l-5.3580933 3.094513q0.75491333 1.1518402 1.7173157 1.4440765q0.96813965 0.27087402 1.861145 -0.2448883q0.6765137 -0.39071655 0.947052 -1.0160828q0.2705078 -0.6253662 0.095947266 -1.5530396zm-5.1317444 0.32940674l4.0050354 -2.3130646q-0.60446167 -0.85983276 -1.2410278 -1.0876312q-0.98791504 -0.3677063 -1.90802 0.16368103q-0.8388672 0.48449707 -1.0926819 1.3889008q-0.2480774 0.8830719 0.23669434 1.848114zm9.247772 0.378891l-4.147064 -7.1861115l1.0959778 -0.6329651l0.6247864 1.0826569q-0.017913818 -1.0001068 0.19329834 -1.4468842q0.21124268 -0.44676208 0.64419556 -0.6968231q0.6088867 -0.35165405 1.4713135 -0.3264618l0.22875977 1.3654938q-0.59487915 7.4768066E-4 -1.0413818 0.25862122q-0.39239502 0.22662354 -0.5765686 0.6577606q-0.17840576 0.40979004 -0.063812256 0.92100525q0.17190552 0.7667999 0.61709595 1.5381927l2.1711426 3.7622223l-1.2177429 0.70329285zm4.6274414 -2.6725311l-4.147064 -7.1861115l1.0959778 -0.6329651l0.5857544 1.0149841q0.105285645 -1.6306152 1.6071777 -2.4980164q0.6494751 -0.37509155 1.3234558 -0.45761108q0.67401123 -0.08250427 1.1632996 0.14012146q0.4892578 0.22264099 0.9020996 0.68792725q0.26290894 0.29925537 0.74710083 1.1383057l2.553833 4.425354l-1.2177429 0.70329285l-2.522583 -4.371216q-0.4295349 -0.74432373 -0.7892456 -1.0237579q-0.3539734 -0.30078125 -0.8442688 -0.3063202q-0.4767456 -0.01335144 -0.96383667 0.2679596q-0.78479004 0.4532318 -1.0640869 1.2821655q-0.2736206 0.80758667 0.52301025 2.187973l2.264862 3.9246216l-1.2177429 0.70329285zm11.281708 -9.60112l1.3442688 -0.57788086q0.34274292 1.2816315 -0.11764526 2.359497q-0.4468689 1.0700378 -1.6916809 1.788971q-1.5830688 0.9142914 -3.0654602 0.47128296q-1.4823914 -0.44300842 -2.4898682 -2.1887817q-1.0465088 -1.813446 -0.6908264 -3.3540955q0.35565186 -1.5406494 1.8440247 -2.400238q1.4342346 -0.828331 2.9108887 -0.36397552q1.4823914 0.44300842 2.5054932 2.2158432q0.062469482 0.10827637 0.18743896 0.32479858l-5.3580933 3.094513q0.75491333 1.1518555 1.7173462 1.4440765q0.96810913 0.27087402 1.861145 -0.24487305q0.6765137 -0.3907318 0.9470215 -1.016098q0.2705078 -0.6253662 0.095947266 -1.5530396zm-5.1317444 0.32940674l4.0050354 -2.3130646q-0.60443115 -0.85983276 -1.2410278 -1.0876312q-0.98791504 -0.3677063 -1.9079895 0.16368103q-0.8388977 0.48449707 -1.0926819 1.388916q-0.2480774 0.88305664 0.23666382 1.8480988zm9.234253 0.3867035l-5.7246704 -9.9198l1.2177734 -0.7033005l5.72464 9.919807l-1.2177429 0.70329285z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m290.76047 143.94267c-17.003845 0 -26.795105 -5.566925 -34.00769 -11.133865c-7.212616 -5.566925 -11.846558 -11.13385 -23.693115 -11.13385" fill-rule="evenodd"/><path stroke="#000000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m290.76047 143.94267c-17.003876 0 -26.795105 -5.5669403 -34.00769 -11.133865c-3.606308 -2.7834625 -6.5679474 -5.566925 -10.108826 -7.6545258c-0.4426117 -0.26094818 -0.8942871 -0.5110321 -1.3573761 -0.74887085c-0.11578369 -0.0594635 -0.23228455 -0.11816406 -0.34950256 -0.17607117l-0.13806152 -0.06682587" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="2.0" stroke-linecap="butt" d="m245.48381 120.93081l-9.563843 1.350235l8.194244 5.113182z" fill-rule="evenodd"/></g></svg> \ No newline at end of file
diff --git a/g3doc/README.md b/g3doc/README.md
deleted file mode 100644
index 22bfb15f7..000000000
--- a/g3doc/README.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# What is gVisor?
-
-gVisor is an application kernel, written in Go, that implements a substantial
-portion of the [Linux system call interface][linux]. It provides an additional
-layer of isolation between running applications and the host operating system.
-
-gVisor includes an [Open Container Initiative (OCI)][oci] runtime called `runsc`
-that makes it easy to work with existing container tooling. The `runsc` runtime
-integrates with Docker and Kubernetes, making it simple to run sandboxed
-containers.
-
-gVisor can be used with Docker, Kubernetes, or directly using `runsc`. Use the
-links below to see detailed instructions for each of them:
-
-* [Docker](./user_guide/quick_start/docker.md): The quickest and easiest way
- to get started.
-* [Kubernetes](./user_guide/quick_start/kubernetes.md): Isolate Pods in your
- K8s cluster with gVisor.
-* [OCI Quick Start](./user_guide/quick_start/oci.md): Expert mode. Customize
- gVisor for your environment.
-
-## What does gVisor do?
-
-gVisor provides a virtualized environment in order to sandbox containers. The
-system interfaces normally implemented by the host kernel are moved into a
-distinct, per-sandbox application kernel in order to minimize the risk of an
-container escape exploit. gVisor does not introduce large fixed overheads
-however, and still retains a process-like model with respect to resource
-utilization.
-
-## How is this different?
-
-Two other approaches are commonly taken to provide stronger isolation than
-native containers.
-
-**Machine-level virtualization**, such as [KVM][kvm] and [Xen][xen], exposes
-virtualized hardware to a guest kernel via a Virtual Machine Monitor (VMM). This
-virtualized hardware is generally enlightened (paravirtualized) and additional
-mechanisms can be used to improve the visibility between the guest and host
-(e.g. balloon drivers, paravirtualized spinlocks). Running containers in
-distinct virtual machines can provide great isolation, compatibility and
-performance (though nested virtualization may bring challenges in this area),
-but for containers it often requires additional proxies and agents, and may
-require a larger resource footprint and slower start-up times.
-
-![Machine-level virtualization](Machine-Virtualization.png "Machine-level virtualization")
-
-**Rule-based execution**, such as [seccomp][seccomp], [SELinux][selinux] and
-[AppArmor][apparmor], allows the specification of a fine-grained security policy
-for an application or container. These schemes typically rely on hooks
-implemented inside the host kernel to enforce the rules. If the surface can be
-made small enough, then this is an excellent way to sandbox applications and
-maintain native performance. However, in practice it can be extremely difficult
-(if not impossible) to reliably define a policy for arbitrary, previously
-unknown applications, making this approach challenging to apply universally.
-
-![Rule-based execution](Rule-Based-Execution.png "Rule-based execution")
-
-Rule-based execution is often combined with additional layers for
-defense-in-depth.
-
-**gVisor** provides a third isolation mechanism, distinct from those above.
-
-gVisor intercepts application system calls and acts as the guest kernel, without
-the need for translation through virtualized hardware. gVisor may be thought of
-as either a merged guest kernel and VMM, or as seccomp on steroids. This
-architecture allows it to provide a flexible resource footprint (i.e. one based
-on threads and memory mappings, not fixed guest physical resources) while also
-lowering the fixed costs of virtualization. However, this comes at the price of
-reduced application compatibility and higher per-system call overhead.
-
-![gVisor](Layers.png "gVisor")
-
-On top of this, gVisor employs rule-based execution to provide defense-in-depth
-(details below).
-
-gVisor's approach is similar to [User Mode Linux (UML)][uml], although UML
-virtualizes hardware internally and thus provides a fixed resource footprint.
-
-Each of the above approaches may excel in distinct scenarios. For example,
-machine-level virtualization will face challenges achieving high density, while
-gVisor may provide poor performance for system call heavy workloads.
-
-## Why Go?
-
-gVisor is written in [Go][golang] in order to avoid security pitfalls that can
-plague kernels. With Go, there are strong types, built-in bounds checks, no
-uninitialized variables, no use-after-free, no stack overflow, and a built-in
-race detector. However, the use of Go has its challenges, and the runtime often
-introduces performance overhead.
-
-## What are the different components?
-
-A gVisor sandbox consists of multiple processes. These processes collectively
-comprise an environment in which one or more containers can be run.
-
-Each sandbox has its own isolated instance of:
-
-* The **Sentry**, which is a kernel that runs the containers and intercepts
- and responds to system calls made by the application.
-
-Each container running in the sandbox has its own isolated instance of:
-
-* A **Gofer** which provides file system access to the containers.
-
-![gVisor architecture diagram](Sentry-Gofer.png "gVisor architecture diagram")
-
-## What is runsc?
-
-The entrypoint to running a sandboxed container is the `runsc` executable.
-`runsc` implements the [Open Container Initiative (OCI)][oci] runtime
-specification, which is used by Docker and Kubernetes. This means that OCI
-compatible _filesystem bundles_ can be run by `runsc`. Filesystem bundles are
-comprised of a `config.json` file containing container configuration, and a root
-filesystem for the container. Please see the [OCI runtime spec][runtime-spec]
-for more information on filesystem bundles. `runsc` implements multiple commands
-that perform various functions such as starting, stopping, listing, and querying
-the status of containers.
-
-### Sentry {#sentry}
-
-The Sentry is the largest component of gVisor. It can be thought of as a
-application kernel. The Sentry implements all the kernel functionality needed by
-the application, including: system calls, signal delivery, memory management and
-page faulting logic, the threading model, and more.
-
-When the application makes a system call, the
-[Platform](./architecture_guide/platforms.md) redirects the call to the Sentry,
-which will do the necessary work to service it. It is important to note that the
-Sentry does not pass system calls through to the host kernel. As a userspace
-application, the Sentry will make some host system calls to support its
-operation, but it does not allow the application to directly control the system
-calls it makes. For example, the Sentry is not able to open files directly; file
-system operations that extend beyond the sandbox (not internal `/proc` files,
-pipes, etc) are sent to the Gofer, described below.
-
-### Gofer {#gofer}
-
-The Gofer is a standard host process which is started with each container and
-communicates with the Sentry via the [9P protocol][9p] over a socket or shared
-memory channel. The Sentry process is started in a restricted seccomp container
-without access to file system resources. The Gofer mediates all access to the
-these resources, providing an additional level of isolation.
-
-### Application {#application}
-
-The application is a normal Linux binary provided to gVisor in an OCI runtime
-bundle. gVisor aims to provide an environment equivalent to Linux v4.4, so
-applications should be able to run unmodified. However, gVisor does not
-presently implement every system call, `/proc` file, or `/sys` file so some
-incompatibilities may occur. See [Compatibility](./user_guide/compatibility.md)
-for more information.
-
-[9p]: https://en.wikipedia.org/wiki/9P_(protocol)
-[apparmor]: https://wiki.ubuntu.com/AppArmor
-[golang]: https://golang.org
-[kvm]: https://www.linux-kvm.org
-[linux]: https://en.wikipedia.org/wiki/Linux_kernel_interfaces
-[oci]: https://www.opencontainers.org
-[runtime-spec]: https://github.com/opencontainers/runtime-spec
-[seccomp]: https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt
-[selinux]: https://selinuxproject.org
-[uml]: http://user-mode-linux.sourceforge.net/
-[xen]: https://www.xenproject.org
diff --git a/g3doc/Rule-Based-Execution.png b/g3doc/Rule-Based-Execution.png
deleted file mode 100644
index b42654a90..000000000
--- a/g3doc/Rule-Based-Execution.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/Rule-Based-Execution.svg b/g3doc/Rule-Based-Execution.svg
deleted file mode 100644
index bd6717043..000000000
--- a/g3doc/Rule-Based-Execution.svg
+++ /dev/null
@@ -1 +0,0 @@
-<svg version="1.1" viewBox="0.0 0.0 355.03674540682414 172.5564304461942" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l355.03674 0l0 172.55643l-355.03674 0l0 -172.55643z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l355.03674 0l0 172.55643l-355.03674 0z" fill-rule="evenodd"/><path fill="#f4cccc" d="m36.454067 6.6430445l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#cc4125" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 6.6430445l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m78.206116 37.98824l5.125 -13.359373l1.90625 0l5.46875 13.359373l-2.015625 0l-1.546875 -4.046875l-5.59375 0l-1.46875 4.046875l-1.875 0zm3.859375 -5.484375l4.53125 0l-1.40625 -3.703123q-0.625 -1.6875 -0.9375 -2.765625q-0.265625 1.28125 -0.71875 2.546875l-1.46875 3.921873zm9.849823 9.1875l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.891342 8.484375l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.844467 4.78125l0 -13.359373l1.640625 0l0 13.359373l-1.640625 0zm4.191696 -11.468748l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm10.457321 -3.546875l1.609375 0.21875q-0.265625 1.65625 -1.359375 2.609375q-1.078125 0.9375 -2.671875 0.9375q-1.984375 0 -3.1875 -1.296875q-1.203125 -1.296875 -1.203125 -3.71875q0 -1.5781231 0.515625 -2.749998q0.515625 -1.171875 1.578125 -1.75q1.0625 -0.59375 2.3125 -0.59375q1.578125 0 2.578125 0.796875q1.0 0.796875 1.28125 2.265625l-1.59375 0.234375q-0.234375 -0.96875 -0.8125 -1.453125q-0.578125 -0.5 -1.390625 -0.5q-1.234375 0 -2.015625 0.890625q-0.78125 0.890625 -0.78125 2.812498q0 1.953125 0.75 2.84375q0.75 0.875 1.953125 0.875q0.96875 0 1.609375 -0.59375q0.65625 -0.59375 0.828125 -1.828125zm9.328125 2.359375q-0.921875 0.765625 -1.765625 1.09375q-0.828125 0.3125 -1.796875 0.3125q-1.59375 0 -2.453125 -0.78125q-0.859375 -0.78125 -0.859375 -1.984375q0 -0.71875 0.328125 -1.296875q0.328125 -0.59375 0.84375 -0.9375q0.53125 -0.359375 1.1875 -0.546875q0.46875 -0.125 1.453125 -0.25q1.984375 -0.234375 2.921875 -0.5624981q0.015625 -0.34375 0.015625 -0.421875q0 -1.0 -0.46875 -1.421875q-0.625 -0.546875 -1.875 -0.546875q-1.15625 0 -1.703125 0.40625q-0.546875 0.40625 -0.8125 1.421875l-1.609375 -0.21875q0.21875 -1.015625 0.71875 -1.640625q0.5 -0.640625 1.453125 -0.984375q0.953125 -0.34375 2.1875 -0.34375q1.25 0 2.015625 0.296875q0.78125 0.28125 1.140625 0.734375q0.375 0.4375 0.515625 1.109375q0.078125 0.421875 0.078125 1.515625l0 2.187498q0 2.28125 0.109375 2.890625q0.109375 0.59375 0.40625 1.15625l-1.703125 0q-0.265625 -0.515625 -0.328125 -1.1875zm-0.140625 -3.671875q-0.890625 0.375 -2.671875 0.625q-1.015625 0.140625 -1.4375 0.328125q-0.421875 0.1875 -0.65625 0.53125q-0.21875 0.34375 -0.21875 0.78125q0 0.65625 0.5 1.09375q0.5 0.4375 1.453125 0.4375q0.9375 0 1.671875 -0.40625q0.75 -0.421875 1.09375 -1.140625q0.265625 -0.5625 0.265625 -1.640625l0 -0.609375zm7.781967 3.390625l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578123l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671873q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm1.6051788 -9.999998l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm3.5354462 -4.84375q0 -2.687498 1.484375 -3.968748q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609373q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.7968731 -0.8125 -2.718748q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765623zm9.297592 4.84375l0 -9.671873l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.5937481l0 5.953125l-1.640625 0l0 -5.890625q0 -0.9999981 -0.203125 -1.4843731q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515623l0 5.28125l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m3.6351707 71.39028l48.850395 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m3.6351707 71.39028l48.850395 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m195.25722 71.39028l47.338577 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m195.25722 71.39028l47.338577 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m52.485565 55.358784l142.77165 0l0 32.062992l-142.77165 0z" fill-rule="evenodd"/><path fill="#000000" d="m65.21821 76.19028l0 -9.546875l1.265625 0l0 8.421875l4.703125 0l0 1.125l-5.96875 0zm7.3343506 -8.1875l0 -1.359375l1.171875 0l0 1.359375l-1.171875 0zm0 8.1875l0 -6.90625l1.171875 0l0 6.90625l-1.171875 0zm2.945465 0l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm11.118057 -8.1875l0 -1.359375l1.171875 0l0 1.359375l-1.171875 0zm0 8.1875l0 -6.90625l1.171875 0l0 6.90625l-1.171875 0zm5.507965 -1.046875l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm11.006226 4.125l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm9.865463 1.390625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm7.0859375 4.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8124924 0 1.2031174 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1874924 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8124924 0 1.4218674 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0624924 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.2499924 0.328125 1.7343674 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.4531174 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.695305 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.5218506 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.321045 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.40625 0 -2.28125 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.65625 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.453125 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.40625 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#cfe2f3" d="m36.454067 85.4808l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path stroke="#6d9eeb" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 85.4808l174.83464 0l0 48.850395l-174.83464 0z" fill-rule="evenodd"/><path fill="#000000" d="m76.63558 116.82599l0 -13.359375l1.765625 0l0 5.484375l6.9375 0l0 -5.484375l1.765625 0l0 13.359375l-1.765625 0l0 -6.296875l-6.9375 0l0 6.296875l-1.765625 0zm12.597946 -4.84375q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm8.641342 1.953125l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm13.5625 1.421875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm6.9134827 1.46875l0 -13.359375l1.78125 0l0 6.625l6.6249924 -6.625l2.390625 0l-5.5937424 5.421875l5.8437424 7.9375l-2.328125 0l-4.7656174 -6.765625l-2.171875 2.140625l0 4.625l-1.78125 0zm18.943565 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125717 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.228302 0l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm17.000732 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.7656403 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375153 0 3.1562653 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.2187653 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.5468903 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.3906403 -2.65625l5.4062653 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.0312653 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.094467 5.765625l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m4.454068 151.90599l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m4.454068 151.90599l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m172.45407 151.90599l74.04724 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m172.45407 151.90599l74.04724 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m73.43044 135.54013l100.88189 0l0 32.06299l-100.88189 0z" fill-rule="evenodd"/><path fill="#000000" d="m96.04542 156.37163l0 -9.546875l1.265625 0l0 3.921875l4.953125 0l0 -3.921875l1.265625 0l0 9.546875l-1.265625 0l0 -4.5l-4.953125 0l0 4.5l-1.265625 0zm13.953278 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm8.93837 0l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm7.9124756 3.453125l-2.125 -6.90625l1.21875 0l1.09375 3.984375l0.421875 1.484375q0.015625 -0.109375 0.359375 -1.421875l1.0937424 -4.046875l1.203125 0l1.03125 4.0l0.34375 1.328125l0.40625 -1.34375l1.171875 -3.984375l1.140625 0l-2.15625 6.90625l-1.21875 0l-1.09375 -4.140625l-0.265625 -1.171875l-1.4062424 5.3125l-1.21875 0zm12.859535 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59376526 0.21875 -1.2812653 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.4218903 -0.171875 2.0937653 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.3437653 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.89064026 0 1.4375153 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.9218903 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.2031403 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm9.18837 -2.21875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375z" fill-rule="nonzero"/><defs><linearGradient id="p.1" gradientUnits="userSpaceOnUse" gradientTransform="matrix(4.54555197232122 0.0 0.0 4.54555197232122 0.0 0.0)" spreadMethod="pad" x1="8.189483259998303" y1="18.80511284496466" x2="8.189466907412452" y2="23.35066481725647"><stop offset="0.0" stop-color="#ff0000"/><stop offset="0.51" stop-color="#dab7a6"/><stop offset="0.99999994" stop-color="#dab7a6" stop-opacity="0.0"/><stop offset="1.0" stop-color="#ffffff" stop-opacity="0.0"/></linearGradient></defs><path fill="url(#p.1)" d="m37.225723 85.48025l173.29134 0l0 20.661415l-173.29134 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m272.4455 100.54161l129.5748 -74.83464l20.629913 35.74803l-129.5748 74.83464z" fill-rule="evenodd"/><path fill="#000000" d="m287.51392 107.20854l1.1823425 -0.8271866q0.51071167 0.6974335 1.1166077 0.9970856q0.5980835 0.28610992 1.4464111 0.19311523q0.84054565 -0.10652161 1.6794434 -0.5910187q0.75772095 -0.4376068 1.2010193 -0.98233795q0.44906616 -0.5660858 0.50097656 -1.1013031q0.057678223 -0.55656433 -0.20785522 -1.0166931q-0.27334595 -0.47366333 -0.7392273 -0.6557007q-0.47366333 -0.1955719 -1.2366333 -0.079704285q-0.478302 0.07775116 -2.032318 0.54221344q-1.5618286 0.45092773 -2.2805786 0.48712158q-0.9222717 0.027420044 -1.5864563 -0.31072998q-0.6719971 -0.35168457 -1.0703125 -1.0418777q-0.4295349 -0.74432373 -0.38497925 -1.6361618q0.05029297 -0.9131851 0.6668701 -1.7203751q0.63012695 -0.8150101 1.6313782 -1.39328q1.1095276 -0.6407852 2.1592712 -0.75988007q1.0419617 -0.13263702 1.8867493 0.29968262q0.8583679 0.4244995 1.3987732 1.2671738l-1.2036743 0.8214798q-0.64712524 -0.87127686 -1.5022583 -1.0089188q-0.8629761 -0.15116882 -2.013092 0.5130539q-1.1906738 0.6876755 -1.4819641 1.4333115q-0.2913208 0.745636 0.06793213 1.3681641q0.30459595 0.5277939 0.8865051 0.6608505q0.5740967 0.119522095 2.3815613 -0.43717957q1.8210144 -0.5645218 2.5725403 -0.6376953q1.0924377 -0.107666016 1.857605 0.28042603q0.77090454 0.366745 1.2316895 1.1652069q0.4529724 0.78491974 0.39904785 1.7543335q-0.040405273 0.96160126 -0.6663208 1.8463745q-0.62594604 0.8847656 -1.6813354 1.4942932q-1.3530579 0.78144073 -2.486084 0.9125519q-1.1408386 0.11756897 -2.1214905 -0.3625946q-0.96713257 -0.48797607 -1.5721436 -1.4738007zm13.40155 -4.9431534l0.8006897 0.98106384q-0.45169067 0.4052124 -0.857605 0.63964844q-0.6629944 0.38290405 -1.1454773 0.39089966q-0.4902649 -0.00554657 -0.8343506 -0.25791168q-0.3518982 -0.2659073 -0.9844971 -1.3620911l-2.382019 -4.127617l-0.8930054 0.5157547l-0.5466919 -0.94731903l0.8930054 -0.5157547l-1.0308838 -1.786377l0.79599 -1.4340897l1.4526367 2.5171661l1.2177734 -0.70329285l0.5466919 0.94731903l-1.2177734 0.7033005l2.4210815 4.1952744q0.30456543 0.5277939 0.4446106 0.645401q0.15356445 0.10979462 0.35705566 0.11856842q0.19570923 -0.004760742 0.4663086 -0.16104889q0.20297241 -0.11721802 0.49645996 -0.35889435zm1.8165283 0.41242218l-4.147064 -7.186104l1.0959778 -0.6329727l0.6247864 1.0826492q-0.0178833 -1.0000992 0.19332886 -1.4468689q0.21121216 -0.44677734 0.64419556 -0.6968384q0.6088562 -0.35164642 1.471283 -0.3264618l0.22875977 1.3654938q-0.59487915 7.4768066E-4 -1.0413818 0.25862885q-0.39239502 0.2266159 -0.5765381 0.6577606q-0.17843628 0.40979004 -0.06384277 0.9209976q0.17193604 0.76680756 0.61709595 1.5382004l2.1711426 3.7622147l-1.2177429 0.7033005zm2.0899658 -6.0066605q-1.1480408 -1.9893799 -0.5930481 -3.5910187q0.47280884 -1.3376465 1.7988281 -2.1034622q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384125 2.4934998 2.132553q0.79660034 1.3803787 0.83795166 2.4210892q0.04135132 1.0407028 -0.49923706 1.948349q-0.5348511 0.8862915 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.4351883 -2.5502625 -2.2621613zm1.2583313 -0.7267456q0.79663086 1.3803787 1.7902527 1.726738q1.0072021 0.33853912 1.9137268 -0.18502045q0.9065552 -0.5235672 1.1036072 -1.5575943q0.21057129 -1.0418396 -0.60946655 -2.462822q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635162 -1.9002075 0.17721558q-0.90652466 0.5235672 -1.117096 1.5654068q-0.2048645 1.0204926 0.59173584 2.4008713zm8.984772 -0.38944244l-4.1470337 -7.1861115l1.0959778 -0.6329651l0.5857239 1.0149841q0.10531616 -1.6306229 1.6072083 -2.498024q0.6494751 -0.37509155 1.3234558 -0.45760345q0.6739807 -0.0825119 1.163269 0.14012909q0.48928833 0.22264099 0.9021301 0.6879196q0.26287842 0.29925537 0.74710083 1.1383057l2.553833 4.4253464l-1.2177429 0.7033005l-2.522583 -4.371216q-0.42956543 -0.74432373 -0.7892456 -1.0237579q-0.3540039 -0.30078125 -0.8442688 -0.30632782q-0.47677612 -0.01335144 -0.9638672 0.26796722q-0.7847595 0.45323944 -1.0640869 1.2821732q-0.2735901 0.80757904 0.52301025 2.1879654l2.264862 3.924614l-1.2177429 0.7033005zm7.819275 -3.7220154l1.2922058 -0.511734q0.3878479 0.5157852 0.8666992 0.56401825q0.65527344 0.072639465 1.4400635 -0.38059235q0.8388977 -0.48449707 1.1036682 -1.0885162q0.26480103 -0.60401917 0.07571411 -1.3067856q-0.1161499 -0.42009735 -0.81121826 -1.6245499q-0.25161743 1.408371 -1.4423218 2.0960464q-1.4883423 0.859581 -2.9171448 0.25933075q-1.428833 -0.60025024 -2.2957153 -2.1024323q-0.5935669 -1.0285187 -0.72805786 -2.1056366q-0.12097168 -1.0849228 0.30926514 -1.9649353q0.4437561 -0.887825 1.3908997 -1.4348297q1.2718811 -0.7345581 2.690796 -0.18271637l-0.4998474 -0.866127l1.1230469 -0.6485977l3.5847168 6.2117233q0.9684448 1.6781082 1.0362854 2.5771942q0.067840576 0.899086 -0.4420166 1.7348709q-0.5098877 0.83579254 -1.5923462 1.4609451q-1.2854004 0.7423706 -2.4195251 0.6214981q-1.1206055 -0.12869263 -1.7651672 -1.3081741zm-1.4765625 -4.90316q0.81222534 1.4074478 1.7418518 1.7366486q0.94314575 0.32138824 1.7820435 -0.16310883q0.8388977 -0.48448944 1.0401001 -1.4487534q0.19342041 -0.97779846 -0.60317993 -2.3581848q-0.76538086 -1.3262482 -1.7298889 -1.6533508q-0.97229004 -0.3406372 -1.7976685 0.13603973q-0.8118286 0.46886444 -0.9974365 1.4601974q-0.1855774 0.991333 0.56417847 2.290512z" fill-rule="nonzero"/><path fill="#000000" d="m294.23132 118.16088l-0.80441284 -1.3939133l1.2177429 -0.7033005l0.80441284 1.3939209l-1.2177429 0.70329285zm4.920227 8.525894l-4.147064 -7.1861115l1.2177734 -0.70329285l4.1470337 7.186104l-1.2177429 0.7033005zm1.3493347 -3.6482391l1.0948792 -0.88494873q0.51641846 0.67609406 1.2029724 0.8028641q0.6922302 0.10542297 1.5176086 -0.37125397q0.8388672 -0.48449707 1.0495605 -1.0572586q0.20285034 -0.5862961 -0.062683105 -1.0464249q-0.23431396 -0.4059906 -0.7266846 -0.44641113q-0.35079956 -0.013923645 -1.4790955 0.31292725q-1.5347595 0.43530273 -2.2030334 0.4964676q-0.66256714 0.03981781 -1.183075 -0.23695374q-0.5069885 -0.28459167 -0.8115845 -0.8123779q-0.28115845 -0.48719788 -0.2989807 -1.0182266q-0.017791748 -0.5310211 0.2048645 -1.0204926q0.15917969 -0.3806305 0.56817627 -0.79727936q0.4147339 -0.43800354 0.9694824 -0.75839233q0.852417 -0.49230957 1.6289368 -0.6159897q0.7765198 -0.123680115 1.3162842 0.123931885q0.5455017 0.22625732 1.0733948 0.8596573l-1.0969849 0.85006714q-0.4013672 -0.5079727 -0.9733887 -0.59262085q-0.5720215 -0.0846405 -1.2756042 0.32170868q-0.8388977 0.48449707 -1.0402222 0.9796829q-0.19558716 0.47383118 0.015289307 0.8392334q0.14056396 0.24359131 0.39874268 0.3470993q0.2581787 0.103507996 0.64749146 0.05910492q0.22845459 -0.041732788 1.2620544 -0.31388855q1.4806519 -0.4040451 2.127594 -0.470932q0.6390991 -0.08041382 1.1653442 0.17501068q0.5397949 0.247612 0.89904785 0.87013245q0.35144043 0.60899353 0.29852295 1.3613129q-0.047210693 0.73096466 -0.5519409 1.4194183q-0.49118042 0.68063354 -1.3300476 1.1651306q-1.407196 0.81269836 -2.4736633 0.65275574q-1.0664673 -0.15995026 -1.933258 -1.1930542zm6.1190186 -5.4646606q-1.1480408 -1.9893723 -0.5930481 -3.591011q0.47283936 -1.3376541 1.7988281 -2.1034698q1.4883423 -0.859581 2.984253 -0.4243927q1.501648 0.41384888 2.4934998 2.1325607q0.79663086 1.3803787 0.83795166 2.4210815q0.04135132 1.0407028 -0.49923706 1.948349q-0.5348511 0.88629913 -1.4819946 1.4333115q-1.5018921 0.8674011 -2.9899902 0.44573975q-1.4959106 -0.43519592 -2.5502625 -2.262169zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902832 1.7267456q1.0071716 0.33853912 1.9136963 -0.18502808q0.9065552 -0.5235672 1.1036072 -1.5575943q0.21057129 -1.0418396 -0.60946655 -2.462822q-0.76538086 -1.3262482 -1.7725525 -1.6647873q-0.99365234 -0.34635925 -1.9002075 0.17720795q-0.90652466 0.5235672 -1.117096 1.5654068q-0.2048645 1.0204926 0.59173584 2.4008713zm8.957733 -0.3738098l-5.7246704 -9.919807l1.2177734 -0.7033005l5.72464 9.919807l-1.2177429 0.7033005zm7.2713623 -5.390396q-0.34069824 0.9726486 -0.8225403 1.5757141q-0.48962402 0.5895233 -1.2067566 1.003685q-1.1906738 0.6876755 -2.162445 0.47302246q-0.9717407 -0.21464539 -1.4872131 -1.1078339q-0.30456543 -0.5277939 -0.3109436 -1.1015167q-0.014190674 -0.58724976 0.21627808 -1.0631866q0.23620605 -0.4972763 0.64520264 -0.9139328q0.31063843 -0.30571747 0.980896 -0.8010864q1.3733215 -1.0277023 1.9363098 -1.6776505q-0.14837646 -0.25712585 -0.18740845 -0.32479095q-0.42956543 -0.74432373 -0.93963623 -0.84669495q-0.7156677 -0.14601898 -1.6357422 0.38536072q-0.8524475 0.49230957 -1.0922546 1.0458145q-0.23410034 0.5321655 0.013824463 1.3994217l-1.2843933 0.5252762q-0.2749939 -0.85163116 -0.18301392 -1.5362854q0.10549927 -0.6924591 0.66851807 -1.3424072q0.5552063 -0.66348267 1.4752808 -1.1948624q0.92007446 -0.5313797 1.6133118 -0.6430588q0.7067566 -0.11948395 1.1648254 0.04901886q0.45803833 0.16851044 0.8552551 0.60672q0.24728394 0.27218628 0.71588135 1.0841827l0.9371643 1.6239777q0.9840393 1.7051773 1.3094177 2.1127014q0.33892822 0.39970398 0.81103516 0.6863861l-1.2718506 0.7345581q-0.40811157 -0.26953125 -0.7590027 -0.75253296zm-1.6645203 -2.6654587q-0.5067749 0.65356445 -1.7234497 1.6088486q-0.69522095 0.5458679 -0.9283142 0.8609314q-0.23312378 0.31506348 -0.25280762 0.6873169q-0.013977051 0.35090637 0.16564941 0.6621628q0.28115845 0.48719788 0.83392334 0.60100555q0.5662842 0.10598755 1.269867 -0.30036163q0.70358276 -0.40634918 1.072998 -1.016655q0.37512207 -0.63165283 0.3197937 -1.3214569q-0.031341553 -0.5232086 -0.49990845 -1.3351974l-0.25775146 -0.44659424zm7.223419 -0.8156891l0.8006897 0.98106384q-0.45169067 0.40522003 -0.857605 0.63964844q-0.6629944 0.38290405 -1.1454468 0.39089966q-0.4902954 -0.0055389404 -0.8343811 -0.25791168q-0.3518982 -0.26589966 -0.9844971 -1.3620911l-2.382019 -4.1276093l-0.8930054 0.5157471l-0.5466919 -0.94731903l0.8930054 -0.5157471l-1.0308838 -1.786377l0.7960205 -1.4340897l1.4526367 2.5171661l1.2177429 -0.7033005l0.5466919 0.94731903l-1.2177429 0.7033005l2.421051 4.195282q0.30459595 0.5277939 0.4446106 0.645401q0.15356445 0.10978699 0.35708618 0.11856079q0.19567871 -0.004760742 0.46627808 -0.16104889q0.20297241 -0.11721039 0.49645996 -0.35889435zm-3.0901794 -8.121277l-0.80441284 -1.3939209l1.2177429 -0.70329285l0.80444336 1.3939133l-1.2177734 0.7033005zm4.920227 8.525887l-4.1470337 -7.186104l1.2177429 -0.7033005l4.147064 7.186104l-1.2177734 0.7033005zm0.54074097 -5.111908q-1.1480408 -1.9893799 -0.5930481 -3.591011q0.47280884 -1.3376541 1.7988281 -2.1034698q1.4883423 -0.8595886 2.984253 -0.4243927q1.501648 0.41384125 2.4934998 2.132553q0.79660034 1.3803864 0.83795166 2.4210892q0.0413208 1.0407028 -0.49923706 1.948349q-0.5348511 0.88629913 -1.4819946 1.4333038q-1.5018921 0.8674011 -2.9899902 0.44574738q-1.4959412 -0.43519592 -2.5502625 -2.262169zm1.2583313 -0.7267456q0.79663086 1.3803864 1.7902527 1.726738q1.0072021 0.33854675 1.9137268 -0.18502045q0.9065552 -0.5235672 1.1036072 -1.5575943q0.21057129 -1.0418396 -0.60946655 -2.462822q-0.76538086 -1.3262482 -1.7725525 -1.6647949q-0.99365234 -0.34635162 -1.9002075 0.17721558q-0.9065552 0.5235672 -1.117096 1.5654068q-0.2048645 1.0204926 0.59173584 2.4008713zm8.984772 -0.38944244l-4.1470337 -7.1861115l1.0959473 -0.6329651l0.5857544 1.0149841q0.10531616 -1.6306152 1.6072083 -2.4980164q0.6494446 -0.37509918 1.3234558 -0.45761108q0.6739807 -0.0825119 1.163269 0.14012909q0.48928833 0.22264099 0.9021301 0.6879196q0.26287842 0.29925537 0.74710083 1.1383133l2.553833 4.4253387l-1.2177429 0.7033005l-2.5226135 -4.371208q-0.4295349 -0.74432373 -0.7892456 -1.0237656q-0.3539734 -0.30078125 -0.8442383 -0.3063202q-0.47677612 -0.01335907 -0.9638672 0.2679596q-0.7847595 0.45323944 -1.0640869 1.2821732q-0.2735901 0.80757904 0.52301025 2.1879654l2.264862 3.924614l-1.2177429 0.7033005z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m282.76047 118.41563c-17.003845 0 -26.795105 -5.566925 -34.007706 -11.133858c-7.2126007 -5.566925 -11.846542 -11.133858 -23.6931 -11.133858" fill-rule="evenodd"/><path stroke="#000000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m282.76047 118.415634c-17.003876 0 -26.795105 -5.5669327 -34.007706 -11.133858c-3.6062927 -2.7834702 -6.567932 -5.5669327 -10.10881 -7.6545334c-0.4426117 -0.26094818 -0.8942871 -0.51101685 -1.3573761 -0.74887085c-0.11578369 -0.0594635 -0.23228455 -0.11816406 -0.34950256 -0.17607117l-0.13806152 -0.06682587" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="2.0" stroke-linecap="butt" d="m237.48381 95.40378l-9.563843 1.350235l8.194244 5.1131744z" fill-rule="evenodd"/></g></svg> \ No newline at end of file
diff --git a/g3doc/Sentry-Gofer.png b/g3doc/Sentry-Gofer.png
deleted file mode 100644
index ca2c27ef7..000000000
--- a/g3doc/Sentry-Gofer.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/Sentry-Gofer.svg b/g3doc/Sentry-Gofer.svg
deleted file mode 100644
index 5c10750d2..000000000
--- a/g3doc/Sentry-Gofer.svg
+++ /dev/null
@@ -1 +0,0 @@
-<svg version="1.1" viewBox="0.0 0.0 358.8556430446194 249.67191601049868" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l358.85565 0l0 249.67192l-358.85565 0l0 -249.67192z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l358.85565 0l0 249.67192l-358.85565 0z" fill-rule="evenodd"/><path fill="#f4cccc" d="m36.454067 6.6430445l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path stroke="#cc4125" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 6.6430445l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path fill="#000000" d="m48.00139 37.98824l5.125 -13.359373l1.90625 0l5.46875 13.359373l-2.015625 0l-1.546875 -4.046875l-5.59375 0l-1.46875 4.046875l-1.875 0zm3.859375 -5.484375l4.53125 0l-1.40625 -3.703123q-0.625 -1.6875 -0.9375 -2.765625q-0.265625 1.28125 -0.71875 2.546875l-1.46875 3.921873zm9.849823 9.1875l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.891342 8.484375l0 -13.374998l1.484375 0l0 1.25q0.53125 -0.734375 1.1875 -1.09375q0.671875 -0.375 1.625 -0.375q1.234375 0 2.171875 0.640625q0.953125 0.625 1.4375 1.796875q0.484375 1.15625 0.484375 2.546873q0 1.484375 -0.53125 2.671875q-0.53125 1.1875 -1.546875 1.828125q-1.015625 0.625 -2.140625 0.625q-0.8125 0 -1.46875 -0.34375q-0.65625 -0.34375 -1.0625 -0.875l0 4.703125l-1.640625 0zm1.484375 -8.484375q0 1.859375 0.75 2.765625q0.765625 0.890625 1.828125 0.890625q1.09375 0 1.875 -0.921875q0.78125 -0.9375 0.78125 -2.875q0 -1.8437481 -0.765625 -2.765623q-0.75 -0.921875 -1.8125 -0.921875q-1.046875 0 -1.859375 0.984375q-0.796875 0.96875 -0.796875 2.843748zm8.844467 4.78125l0 -13.359373l1.640625 0l0 13.359373l-1.640625 0zm4.191696 -11.468748l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm10.457321 -3.546875l1.609375 0.21875q-0.265625 1.65625 -1.359375 2.609375q-1.078125 0.9375 -2.671875 0.9375q-1.984375 0 -3.1875 -1.296875q-1.203125 -1.296875 -1.203125 -3.71875q0 -1.5781231 0.515625 -2.749998q0.515625 -1.171875 1.578125 -1.75q1.0625 -0.59375 2.3125 -0.59375q1.578125 0 2.578125 0.796875q1.0 0.796875 1.28125 2.265625l-1.59375 0.234375q-0.234375 -0.96875 -0.8125 -1.453125q-0.578125 -0.5 -1.390625 -0.5q-1.234375 0 -2.015625 0.890625q-0.78125 0.890625 -0.78125 2.812498q0 1.953125 0.75 2.84375q0.75 0.875 1.953125 0.875q0.96875 0 1.609375 -0.59375q0.65625 -0.59375 0.828125 -1.828125zm9.328125 2.359375q-0.921875 0.765625 -1.765625 1.09375q-0.828125 0.3125 -1.796875 0.3125q-1.59375 0 -2.453125 -0.78125q-0.859375 -0.78125 -0.859375 -1.984375q0 -0.71875 0.328125 -1.296875q0.328125 -0.59375 0.84375 -0.9375q0.53125 -0.359375 1.1875 -0.546875q0.46875 -0.125 1.453125 -0.25q1.984375 -0.234375 2.921875 -0.5624981q0.015625 -0.34375 0.015625 -0.421875q0 -1.0 -0.46875 -1.421875q-0.625 -0.546875 -1.875 -0.546875q-1.15625 0 -1.703125 0.40625q-0.546875 0.40625 -0.8125 1.421875l-1.609375 -0.21875q0.21875 -1.015625 0.71875 -1.640625q0.5 -0.640625 1.453125 -0.984375q0.953125 -0.34375 2.1875 -0.34375q1.25 0 2.015625 0.296875q0.78125 0.28125 1.140625 0.734375q0.375 0.4375 0.515625 1.109375q0.078125 0.421875 0.078125 1.515625l0 2.187498q0 2.28125 0.109375 2.890625q0.109375 0.59375 0.40625 1.15625l-1.703125 0q-0.265625 -0.515625 -0.328125 -1.1875zm-0.140625 -3.671875q-0.890625 0.375 -2.671875 0.625q-1.015625 0.140625 -1.4375 0.328125q-0.421875 0.1875 -0.65625 0.53125q-0.21875 0.34375 -0.21875 0.78125q0 0.65625 0.5 1.09375q0.5 0.4375 1.453125 0.4375q0.9375 0 1.671875 -0.40625q0.75 -0.421875 1.09375 -1.140625q0.265625 -0.5625 0.265625 -1.640625l0 -0.609375zm7.781967 3.390625l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578123l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671873q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm1.6051788 -9.999998l0 -1.890625l1.640625 0l0 1.890625l-1.640625 0zm0 11.468748l0 -9.671873l1.640625 0l0 9.671873l-1.640625 0zm3.5354462 -4.84375q0 -2.687498 1.484375 -3.968748q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609373q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.7968731 -0.8125 -2.718748q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765623zm9.297592 4.84375l0 -9.671873l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.5937481l0 5.953125l-1.640625 0l0 -5.890625q0 -0.9999981 -0.203125 -1.4843731q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515623l0 5.28125l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m4.4540663 73.055115l40.47244 0.25196838" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m4.4540663 73.055115l40.47244 0.25196838" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m144.0 74.0l35.27559 -0.94488525" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m144.0 74.0l35.27559 -0.94488525" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m44.926506 57.27559l97.48032 0l0 32.062996l-97.48032 0z" fill-rule="evenodd"/><path fill="#000000" d="m56.859642 75.044586l1.203125 -0.109375q0.078125 0.71875 0.390625 1.1875q0.3125 0.453125 0.953125 0.734375q0.65625 0.28125 1.46875 0.28125q0.71875 0 1.265625 -0.21875q0.5625 -0.21875 0.828125 -0.578125q0.265625 -0.375 0.265625 -0.828125q0 -0.453125 -0.265625 -0.78125q-0.25 -0.328125 -0.84375 -0.5625q-0.390625 -0.15625 -1.703125 -0.46875q-1.3125 -0.3125 -1.84375 -0.59375q-0.671875 -0.359375 -1.015625 -0.890625q-0.328125 -0.53125 -0.328125 -1.1875q0 -0.71875 0.40625 -1.34375q0.40625 -0.625 1.1875 -0.953125q0.796875 -0.328125 1.765625 -0.328125q1.046875 0 1.859375 0.34375q0.8125 0.34375 1.25 1.015625q0.4375 0.65625 0.46875 1.484375l-1.203125 0.09375q-0.109375 -0.90625 -0.671875 -1.359375q-0.5625 -0.46875 -1.65625 -0.46875q-1.140625 0 -1.671875 0.421875q-0.515625 0.421875 -0.515625 1.015625q0 0.515625 0.359375 0.84375q0.375 0.328125 1.90625 0.6875q1.546875 0.34375 2.109375 0.59375q0.84375 0.390625 1.234375 0.984375q0.390625 0.578125 0.390625 1.359375q0 0.75 -0.4375 1.4375q-0.421875 0.671875 -1.25 1.046875q-0.8125 0.359375 -1.828125 0.359375q-1.296875 0 -2.171875 -0.375q-0.875 -0.375 -1.375 -1.125q-0.5 -0.765625 -0.53125 -1.71875zm9.12413 5.71875l-0.125 -1.09375q0.375 0.109375 0.65625 0.109375q0.390625 0 0.625 -0.140625q0.234375 -0.125 0.390625 -0.359375q0.109375 -0.171875 0.359375 -0.875q0.03125 -0.09375 0.109375 -0.28125l-2.625 -6.921875l1.265625 0l1.4375 4.0q0.28125 0.765625 0.5 1.59375q0.203125 -0.796875 0.46875 -1.578125l1.484375 -4.015625l1.171875 0l-2.625 7.015625q-0.421875 1.140625 -0.65625 1.578125q-0.3125 0.578125 -0.71875 0.84375q-0.40625 0.28125 -0.96875 0.28125q-0.328125 0 -0.75 -0.15625zm6.2421875 -4.71875l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125 0 1.203125 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125 0 1.421875 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.25 0.328125 1.734375 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.453125 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625zm9.6953125 1.015625l0.171875 1.03125q-0.5 0.109375 -0.890625 0.109375q-0.640625 0 -1.0 -0.203125q-0.34375 -0.203125 -0.484375 -0.53125q-0.140625 -0.328125 -0.140625 -1.390625l0 -3.96875l-0.859375 0l0 -0.90625l0.859375 0l0 -1.71875l1.171875 -0.703125l0 2.421875l1.171875 0l0 0.90625l-1.171875 0l0 4.046875q0 0.5 0.046875 0.640625q0.0625 0.140625 0.203125 0.234375q0.140625 0.078125 0.40625 0.078125q0.203125 0 0.515625 -0.046875zm5.8748627 -1.171875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375zm6.5218506 4.125l0 -6.90625l1.046875 0l0 0.96875q0.328125 -0.515625 0.859375 -0.8125q0.546875 -0.3125 1.234375 -0.3125q0.78125 0 1.265625 0.3125q0.484375 0.3125 0.6875 0.890625q0.828125 -1.203125 2.140625 -1.203125q1.03125 0 1.578125 0.578125q0.5625 0.5625 0.5625 1.734375l0 4.75l-1.171875 0l0 -4.359375q0 -0.703125 -0.125 -1.0q-0.109375 -0.3125 -0.40625 -0.5q-0.296875 -0.1875 -0.703125 -0.1875q-0.71875 0 -1.203125 0.484375q-0.484375 0.484375 -0.484375 1.546875l0 4.015625l-1.171875 0l0 -4.484375q0 -0.78125 -0.296875 -1.171875q-0.28125 -0.390625 -0.921875 -0.390625q-0.5 0 -0.921875 0.265625q-0.421875 0.25 -0.609375 0.75q-0.1875 0.5 -0.1875 1.453125l0 3.578125l-1.171875 0zm19.321045 -2.53125l1.15625 0.15625q-0.1875 1.1875 -0.96875 1.859375q-0.78125 0.671875 -1.921875 0.671875q-1.40625 0 -2.28125 -0.921875q-0.859375 -0.9375 -0.859375 -2.65625q0 -1.125 0.375 -1.96875q0.375 -0.84375 1.125 -1.25q0.765625 -0.421875 1.65625 -0.421875q1.125 0 1.84375 0.578125q0.71875 0.5625 0.921875 1.609375l-1.140625 0.171875q-0.171875 -0.703125 -0.59375 -1.046875q-0.40625 -0.359375 -0.984375 -0.359375q-0.890625 0 -1.453125 0.640625q-0.546875 0.640625 -0.546875 2.0q0 1.40625 0.53125 2.03125q0.546875 0.625 1.40625 0.625q0.6875 0 1.140625 -0.421875q0.46875 -0.421875 0.59375 -1.296875zm6.6640625 1.671875q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.96109 0l0 -9.546875l1.171875 0l0 9.546875l-1.171875 0zm2.507965 -2.0625l1.15625 -0.1875q0.109375 0.703125 0.546875 1.078125q0.453125 0.359375 1.25 0.359375q0.8125076 0 1.2031326 -0.328125q0.390625 -0.328125 0.390625 -0.765625q0 -0.390625 -0.359375 -0.625q-0.234375 -0.15625 -1.1875076 -0.390625q-1.296875 -0.328125 -1.796875 -0.5625q-0.484375 -0.25 -0.75 -0.65625q-0.25 -0.421875 -0.25 -0.9375q0 -0.453125 0.203125 -0.84375q0.21875 -0.40625 0.578125 -0.671875q0.28125 -0.1875 0.75 -0.328125q0.46875 -0.140625 1.015625 -0.140625q0.8125076 0 1.4218826 0.234375q0.609375 0.234375 0.90625 0.640625q0.296875 0.390625 0.40625 1.0625l-1.140625 0.15625q-0.078125 -0.53125 -0.453125 -0.828125q-0.375 -0.3125 -1.0625076 -0.3125q-0.8125 0 -1.15625 0.265625q-0.34375 0.265625 -0.34375 0.625q0 0.234375 0.140625 0.421875q0.15625 0.1875 0.453125 0.3125q0.171875 0.0625 1.03125 0.296875q1.2500076 0.328125 1.7343826 0.546875q0.5 0.203125 0.78125 0.609375q0.28125 0.40625 0.28125 1.0q0 0.59375 -0.34375 1.109375q-0.34375 0.515625 -1.0 0.796875q-0.640625 0.28125 -1.4531326 0.28125q-1.34375 0 -2.046875 -0.5625q-0.703125 -0.5625 -0.90625 -1.65625z" fill-rule="nonzero"/><path fill="#d9d2e9" d="m36.454067 87.40682l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path stroke="#8e7cc3" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m36.454067 87.40682l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path fill="#000000" d="m67.55086 114.45515l1.65625 -0.140625q0.125 1.0 0.546875 1.640625q0.4375 0.640625 1.34375 1.046875q0.921875 0.390625 2.0625 0.390625q1.0 0 1.78125 -0.296875q0.78125 -0.296875 1.15625 -0.8125q0.375 -0.53125 0.375 -1.15625q0 -0.625 -0.375 -1.09375q-0.359375 -0.46875 -1.1875 -0.796875q-0.546875 -0.203125 -2.390625 -0.640625q-1.828125 -0.453125 -2.5625 -0.84375q-0.96875 -0.5 -1.4375 -1.234375q-0.46875 -0.75 -0.46875 -1.671875q0 -1.0 0.578125 -1.875q0.578125 -0.890625 1.671875 -1.34375q1.109375 -0.453125 2.453125 -0.453125q1.484375 0 2.609375 0.484375q1.140625 0.46875 1.75 1.40625q0.609375 0.921875 0.65625 2.09375l-1.6875 0.125q-0.140625 -1.265625 -0.9375 -1.90625q-0.78125 -0.65625 -2.3125 -0.65625q-1.609375 0 -2.34375 0.59375q-0.734375 0.59375 -0.734375 1.421875q0 0.71875 0.53125 1.171875q0.5 0.46875 2.65625 0.96875q2.15625 0.484375 2.953125 0.84375q1.171875 0.53125 1.71875 1.359375q0.5625 0.828125 0.5625 1.90625q0 1.0625 -0.609375 2.015625q-0.609375 0.9375 -1.75 1.46875q-1.140625 0.515625 -2.578125 0.515625q-1.8125 0 -3.046875 -0.53125q-1.21875 -0.53125 -1.921875 -1.59375q-0.6875 -1.0625 -0.71875 -2.40625zm19.459198 1.1875l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.141342 5.765625l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm13.953842 -1.46875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm1.5895538 1.46875l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.150177 3.71875l-0.1875 -1.53125q0.546875 0.140625 0.9375 0.140625q0.546875 0 0.875 -0.1875q0.328125 -0.171875 0.546875 -0.5q0.15625 -0.25 0.5 -1.21875q0.046875 -0.140625 0.140625 -0.40625l-3.671875 -9.6875l1.765625 0l2.015625 5.59375q0.390625 1.078125 0.703125 2.25q0.28125 -1.125 0.671875 -2.203125l2.078125 -5.640625l1.640625 0l-3.6875 9.828125q-0.59375 1.609375 -0.921875 2.203125q-0.4375 0.8125 -1.0 1.1875q-0.5625 0.375 -1.34375 0.375q-0.484375 0 -1.0625 -0.203125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m6.8477693 152.91733l48.85039 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m6.8477693 152.91733l48.85039 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m145.08398 152.91733l33.95276 0" fill-rule="evenodd"/><path stroke="#ff0000" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m145.08398 152.91733l33.95276 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m55.698162 136.88583l89.38582 0l0 32.06299l-89.38582 0z" fill-rule="evenodd"/><path fill="#000000" d="m81.92428 150.43732l0 -8.59375l1.140625 0l0 7.578125l4.234375 0l0 1.015625l-5.375 0zm6.595703 -7.375l0 -1.21875l1.0625 0l0 1.21875l-1.0625 0zm0 7.375l0 -6.21875l1.0625 0l0 6.21875l-1.0625 0zm2.6660156 0l0 -6.21875l0.9375 0l0 0.875q0.296875 -0.46875 0.78125 -0.734375q0.484375 -0.28125 1.109375 -0.28125q0.6875 0 1.125 0.28125q0.453125 0.28125 0.625 0.796875q0.75 -1.078125 1.921875 -1.078125q0.9375 0 1.421875 0.515625q0.5 0.5 0.5 1.578125l0 4.265625l-1.046875 0l0 -3.921875q0 -0.625 -0.109375 -0.90625q-0.09375 -0.28125 -0.359375 -0.453125q-0.265625 -0.171875 -0.640625 -0.171875q-0.65625 0 -1.09375 0.4375q-0.421875 0.4375 -0.421875 1.40625l0 3.609375l-1.0625 0l0 -4.046875q0 -0.703125 -0.265625 -1.046875q-0.25 -0.359375 -0.828125 -0.359375q-0.453125 0 -0.828125 0.234375q-0.375 0.234375 -0.546875 0.6875q-0.171875 0.453125 -0.171875 1.296875l0 3.234375l-1.046875 0zm9.996094 -7.375l0 -1.21875l1.0625 0l0 1.21875l-1.0625 0zm0 7.375l0 -6.21875l1.0625 0l0 6.21875l-1.0625 0zm4.9628906 -0.9375l0.15625 0.921875q-0.453125 0.09375 -0.796875 0.09375q-0.578125 0 -0.890625 -0.171875q-0.3125 -0.1875 -0.453125 -0.484375q-0.125 -0.296875 -0.125 -1.25l0 -3.578125l-0.765625 0l0 -0.8125l0.765625 0l0 -1.546875l1.046875 -0.625l0 2.171875l1.0625 0l0 0.8125l-1.0625 0l0 3.640625q0 0.453125 0.046875 0.578125q0.0625 0.125 0.1875 0.203125q0.125 0.078125 0.359375 0.078125q0.1875 0 0.46875 -0.03125zm5.2871094 -1.0625l1.09375 0.125q-0.25 0.953125 -0.953125 1.484375q-0.703125 0.53125 -1.78125 0.53125q-1.359375 0 -2.171875 -0.84375q-0.796875 -0.84375 -0.796875 -2.359375q0 -1.5625 0.8125 -2.421875q0.8125 -0.875 2.09375 -0.875q1.25 0 2.03125 0.84375q0.796875 0.84375 0.796875 2.390625q0 0.09375 0 0.28125l-4.640625 0q0.0625 1.03125 0.578125 1.578125q0.515625 0.53125 1.296875 0.53125q0.578125 0 0.984375 -0.296875q0.421875 -0.3125 0.65625 -0.96875zm-3.453125 -1.703125l3.46875 0q-0.0625 -0.796875 -0.390625 -1.1875q-0.515625 -0.609375 -1.3125 -0.609375q-0.734375 0 -1.234375 0.484375q-0.484375 0.484375 -0.53125 1.3125zm9.908203 3.703125l0 -0.78125q-0.59375 0.921875 -1.734375 0.921875q-0.75 0 -1.375 -0.40625q-0.625 -0.421875 -0.96875 -1.15625q-0.34375 -0.734375 -0.34375 -1.6875q0 -0.921875 0.3125 -1.6875q0.3125 -0.765625 0.9375 -1.15625q0.625 -0.40625 1.390625 -0.40625q0.5625 0 1.0 0.234375q0.4375 0.234375 0.71875 0.609375l0 -3.078125l1.046875 0l0 8.59375l-0.984375 0zm-3.328125 -3.109375q0 1.203125 0.5 1.796875q0.5 0.578125 1.1875 0.578125q0.6875 0 1.171875 -0.5625q0.484375 -0.5625 0.484375 -1.71875q0 -1.28125 -0.5 -1.875q-0.484375 -0.59375 -1.203125 -0.59375q-0.703125 0 -1.171875 0.578125q-0.46875 0.5625 -0.46875 1.796875z" fill-rule="nonzero"/><path fill="#000000" d="m68.0942 162.57794l1.03125 -0.15625q0.09375 0.625 0.484375 0.953125q0.40625 0.328125 1.140625 0.328125q0.71875 0 1.0625 -0.28125q0.359375 -0.296875 0.359375 -0.703125q0 -0.359375 -0.3125 -0.5625q-0.21875 -0.140625 -1.078125 -0.359375q-1.15625 -0.296875 -1.609375 -0.5q-0.4375 -0.21875 -0.671875 -0.59375q-0.234375 -0.375 -0.234375 -0.84375q0 -0.40625 0.1875 -0.765625q0.1875 -0.359375 0.515625 -0.59375q0.25 -0.171875 0.671875 -0.296875q0.421875 -0.125 0.921875 -0.125q0.71875 0 1.265625 0.21875q0.5625 0.203125 0.828125 0.5625q0.265625 0.359375 0.359375 0.953125l-1.03125 0.140625q-0.0625 -0.46875 -0.40625 -0.734375q-0.328125 -0.28125 -0.953125 -0.28125q-0.71875 0 -1.03125 0.25q-0.3125 0.234375 -0.3125 0.5625q0 0.203125 0.125 0.359375q0.140625 0.171875 0.40625 0.28125q0.15625 0.0625 0.9375 0.265625q1.125 0.3125 1.5625 0.5q0.4375 0.1875 0.6875 0.546875q0.25 0.359375 0.25 0.90625q0 0.53125 -0.3125 1.0q-0.296875 0.453125 -0.875 0.71875q-0.578125 0.25 -1.3125 0.25q-1.21875 0 -1.859375 -0.5q-0.625 -0.515625 -0.796875 -1.5zm6.375 4.25l-0.125 -0.984375q0.34375 0.09375 0.609375 0.09375q0.34375 0 0.546875 -0.125q0.21875 -0.109375 0.359375 -0.3125q0.09375 -0.171875 0.328125 -0.796875q0.015625 -0.078125 0.09375 -0.25l-2.375 -6.234375l1.140625 0l1.296875 3.59375q0.25 0.6875 0.453125 1.453125q0.1875 -0.734375 0.4375 -1.421875l1.328125 -3.625l1.046875 0l-2.359375 6.328125q-0.390625 1.015625 -0.59375 1.40625q-0.28125 0.53125 -0.65625 0.765625q-0.359375 0.25 -0.859375 0.25q-0.296875 0 -0.671875 -0.140625zm5.625 -4.25l1.03125 -0.15625q0.09375 0.625 0.484375 0.953125q0.40625 0.328125 1.140625 0.328125q0.71875 0 1.0625 -0.28125q0.359375 -0.296875 0.359375 -0.703125q0 -0.359375 -0.3125 -0.5625q-0.21875 -0.140625 -1.078125 -0.359375q-1.15625 -0.296875 -1.609375 -0.5q-0.4375 -0.21875 -0.671875 -0.59375q-0.234375 -0.375 -0.234375 -0.84375q0 -0.40625 0.1875 -0.765625q0.1875 -0.359375 0.515625 -0.59375q0.25 -0.171875 0.671875 -0.296875q0.421875 -0.125 0.921875 -0.125q0.71875 0 1.265625 0.21875q0.5625 0.203125 0.828125 0.5625q0.265625 0.359375 0.359375 0.953125l-1.03125 0.140625q-0.0625 -0.46875 -0.40625 -0.734375q-0.328125 -0.28125 -0.953125 -0.28125q-0.71875 0 -1.03125 0.25q-0.3125 0.234375 -0.3125 0.5625q0 0.203125 0.125 0.359375q0.140625 0.171875 0.40625 0.28125q0.15625 0.0625 0.9375 0.265625q1.125 0.3125 1.5625 0.5q0.4375 0.1875 0.6875 0.546875q0.25 0.359375 0.25 0.90625q0 0.53125 -0.3125 1.0q-0.296875 0.453125 -0.875 0.71875q-0.578125 0.25 -1.3125 0.25q-1.21875 0 -1.859375 -0.5q-0.625 -0.515625 -0.796875 -1.5zm8.71875 0.921875l0.15625 0.921875q-0.453125 0.09375 -0.796875 0.09375q-0.578125 0 -0.890625 -0.171875q-0.3125 -0.1875 -0.453125 -0.484375q-0.125 -0.296875 -0.125 -1.25l0 -3.578125l-0.765625 0l0 -0.8125l0.765625 0l0 -1.546875l1.046875 -0.625l0 2.171875l1.0625 0l0 0.8125l-1.0625 0l0 3.640625q0 0.453125 0.046875 0.578125q0.0625 0.125 0.1875 0.203125q0.125 0.078125 0.359375 0.078125q0.1875 0 0.46875 -0.03125zm5.2871094 -1.0625l1.09375 0.125q-0.25 0.953125 -0.953125 1.484375q-0.703125 0.53125 -1.78125 0.53125q-1.359375 0 -2.171875 -0.84375q-0.796875 -0.84375 -0.796875 -2.359375q0 -1.5625 0.8125 -2.421875q0.8125 -0.875 2.09375 -0.875q1.25 0 2.03125 0.84375q0.796875 0.84375 0.796875 2.390625q0 0.09375 0 0.28125l-4.640625 0q0.0625 1.03125 0.578125 1.578125q0.515625 0.53125 1.296875 0.53125q0.578125 0 0.984375 -0.296875q0.421875 -0.3125 0.65625 -0.96875zm-3.453125 -1.703125l3.46875 0q-0.0625 -0.796875 -0.390625 -1.1875q-0.515625 -0.609375 -1.3125 -0.609375q-0.734375 0 -1.234375 0.484375q-0.484375 0.484375 -0.53125 1.3125zm5.876953 3.703125l0 -6.21875l0.9375 0l0 0.875q0.296875 -0.46875 0.78125 -0.734375q0.484375 -0.28125 1.109375 -0.28125q0.6875 0 1.125 0.28125q0.453125 0.28125 0.625 0.796875q0.75 -1.078125 1.921875 -1.078125q0.9375 0 1.421875 0.515625q0.5 0.5 0.5 1.578125l0 4.265625l-1.046875 0l0 -3.921875q0 -0.625 -0.109375 -0.90625q-0.09375 -0.28125 -0.359375 -0.453125q-0.265625 -0.171875 -0.640625 -0.171875q-0.65625 0 -1.09375 0.4375q-0.421875 0.4375 -0.421875 1.40625l0 3.609375l-1.0625 0l0 -4.046875q0 -0.703125 -0.265625 -1.046875q-0.25 -0.359375 -0.828125 -0.359375q-0.453125 0 -0.828125 0.234375q-0.375 0.234375 -0.546875 0.6875q-0.171875 0.453125 -0.171875 1.296875l0 3.234375l-1.046875 0zm17.392578 -2.28125l1.03125 0.140625q-0.171875 1.0625 -0.875 1.671875q-0.703125 0.609375 -1.71875 0.609375q-1.28125 0 -2.0625 -0.828125q-0.765625 -0.84375 -0.765625 -2.40625q0 -1.0 0.328125 -1.75q0.34375 -0.765625 1.015625 -1.140625q0.6875 -0.375 1.5 -0.375q1.0 0 1.640625 0.515625q0.65625 0.5 0.84375 1.453125l-1.03125 0.15625q-0.140625 -0.625 -0.515625 -0.9375q-0.375 -0.328125 -0.90625 -0.328125q-0.796875 0 -1.296875 0.578125q-0.5 0.5625 -0.5 1.796875q0 1.265625 0.484375 1.828125q0.484375 0.5625 1.25 0.5625q0.625 0 1.03125 -0.375q0.421875 -0.375 0.546875 -1.171875zm6.0000076 1.515625q-0.5937576 0.5 -1.1406326 0.703125q-0.53125 0.203125 -1.15625 0.203125q-1.03125 0 -1.578125 -0.5q-0.546875 -0.5 -0.546875 -1.28125q0 -0.453125 0.203125 -0.828125q0.203125 -0.390625 0.546875 -0.609375q0.34375 -0.234375 0.765625 -0.34375q0.296875 -0.09375 0.9375 -0.171875q1.265625 -0.140625 1.8750076 -0.359375q0 -0.21875 0 -0.265625q0 -0.65625 -0.29688263 -0.921875q-0.40625 -0.34375 -1.203125 -0.34375q-0.734375 0 -1.09375 0.265625q-0.359375 0.25 -0.53125 0.90625l-1.03125 -0.140625q0.140625 -0.65625 0.46875 -1.0625q0.328125 -0.40625 0.9375 -0.625q0.609375 -0.21875 1.40625 -0.21875q0.796875 0 1.2968826 0.1875q0.5 0.1875 0.734375 0.46875q0.234375 0.28125 0.328125 0.71875q0.046875 0.265625 0.046875 0.96875l0 1.40625q0 1.46875 0.0625 1.859375q0.078125 0.390625 0.28125 0.75l-1.109375 0q-0.15625 -0.328125 -0.203125 -0.765625zm-0.09375 -2.359375q-0.5781326 0.234375 -1.7187576 0.40625q-0.65625 0.09375 -0.921875 0.21875q-0.265625 0.109375 -0.421875 0.328125q-0.140625 0.21875 -0.140625 0.5q0 0.421875 0.3125 0.703125q0.328125 0.28125 0.9375 0.28125q0.609375 0 1.078125 -0.265625q0.484375 -0.265625 0.703125 -0.734375q0.17188263 -0.359375 0.17188263 -1.046875l0 -0.390625zm2.6738281 3.125l0 -8.59375l1.0625 0l0 8.59375l-1.0625 0zm2.6660156 0l0 -8.59375l1.0625 0l0 8.59375l-1.0625 0zm2.2753906 -1.859375l1.03125 -0.15625q0.09375 0.625 0.484375 0.953125q0.40625 0.328125 1.140625 0.328125q0.71875 0 1.0625 -0.28125q0.359375 -0.296875 0.359375 -0.703125q0 -0.359375 -0.3125 -0.5625q-0.21875 -0.140625 -1.078125 -0.359375q-1.15625 -0.296875 -1.609375 -0.5q-0.4375 -0.21875 -0.671875 -0.59375q-0.234375 -0.375 -0.234375 -0.84375q0 -0.40625 0.1875 -0.765625q0.1875 -0.359375 0.515625 -0.59375q0.25 -0.171875 0.671875 -0.296875q0.421875 -0.125 0.921875 -0.125q0.71875 0 1.265625 0.21875q0.5625 0.203125 0.828125 0.5625q0.265625 0.359375 0.359375 0.953125l-1.03125 0.140625q-0.0625 -0.46875 -0.40625 -0.734375q-0.328125 -0.28125 -0.953125 -0.28125q-0.71875 0 -1.03125 0.25q-0.3125 0.234375 -0.3125 0.5625q0 0.203125 0.125 0.359375q0.140625 0.171875 0.40625 0.28125q0.15625 0.0625 0.9375 0.265625q1.125 0.3125 1.5625 0.5q0.4375 0.1875 0.6875 0.546875q0.25 0.359375 0.25 0.90625q0 0.53125 -0.3125 1.0q-0.296875 0.453125 -0.875 0.71875q-0.578125 0.25 -1.3125 0.25q-1.21875 0 -1.859375 -0.5q-0.625 -0.515625 -0.796875 -1.5z" fill-rule="nonzero"/><path fill="#cfe2f3" d="m89.902885 171.04015l174.83463 0l0 48.850388l-174.83463 0z" fill-rule="evenodd"/><path stroke="#6d9eeb" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m89.902885 171.04015l174.83463 0l0 48.850388l-174.83463 0z" fill-rule="evenodd"/><path fill="#000000" d="m130.0844 202.38535l0 -13.359375l1.765625 0l0 5.484375l6.9375 0l0 -5.484375l1.765625 0l0 13.359375l-1.765625 0l0 -6.296875l-6.9375 0l0 6.296875l-1.765625 0zm12.597946 -4.84375q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.046875 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.03125 0 -3.28125 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.8125 0.921875 2.046875 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.046875 0.921875q-0.796875 0.90625 -0.796875 2.765625zm8.641342 1.953125l1.625 -0.25q0.125 0.96875 0.75 1.5q0.625 0.515625 1.75 0.515625q1.125 0 1.671875 -0.453125q0.546875 -0.46875 0.546875 -1.09375q0 -0.546875 -0.484375 -0.875q-0.328125 -0.21875 -1.671875 -0.546875q-1.8125 -0.46875 -2.515625 -0.796875q-0.6875 -0.328125 -1.046875 -0.90625q-0.359375 -0.59375 -0.359375 -1.3125q0 -0.640625 0.296875 -1.1875q0.296875 -0.5625 0.8125 -0.921875q0.375 -0.28125 1.03125 -0.46875q0.671875 -0.203125 1.421875 -0.203125q1.140625 0 2.0 0.328125q0.859375 0.328125 1.265625 0.890625q0.421875 0.5625 0.578125 1.5l-1.609375 0.21875q-0.109375 -0.75 -0.640625 -1.171875q-0.515625 -0.421875 -1.46875 -0.421875q-1.140625 0 -1.625 0.375q-0.46875 0.375 -0.46875 0.875q0 0.3125 0.1875 0.578125q0.203125 0.265625 0.640625 0.4375q0.234375 0.09375 1.4375 0.421875q1.75 0.453125 2.4375 0.75q0.6875 0.296875 1.078125 0.859375q0.390625 0.5625 0.390625 1.40625q0 0.828125 -0.484375 1.546875q-0.46875 0.71875 -1.375 1.125q-0.90625 0.390625 -2.046875 0.390625q-1.875 0 -2.875 -0.78125q-0.984375 -0.78125 -1.25 -2.328125zm13.5625 1.421875l0.234375 1.453125q-0.6875 0.140625 -1.234375 0.140625q-0.890625 0 -1.390625 -0.28125q-0.484375 -0.28125 -0.6875 -0.734375q-0.203125 -0.46875 -0.203125 -1.9375l0 -5.578125l-1.203125 0l0 -1.265625l1.203125 0l0 -2.390625l1.625 -0.984375l0 3.375l1.65625 0l0 1.265625l-1.65625 0l0 5.671875q0 0.6875 0.078125 0.890625q0.09375 0.203125 0.28125 0.328125q0.203125 0.109375 0.578125 0.109375q0.265625 0 0.71875 -0.0625zm6.9134827 1.46875l0 -13.359375l1.78125 0l0 6.625l6.625 -6.625l2.390625 0l-5.59375 5.421875l5.84375 7.9375l-2.328125 0l-4.765625 -6.765625l-2.171875 2.140625l0 4.625l-1.78125 0zm18.943573 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125717 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm6.228302 0l0 -9.671875l1.46875 0l0 1.375q1.0625 -1.59375 3.078125 -1.59375q0.875 0 1.609375 0.3125q0.734375 0.3125 1.09375 0.828125q0.375 0.5 0.515625 1.203125q0.09375 0.453125 0.09375 1.59375l0 5.953125l-1.640625 0l0 -5.890625q0 -1.0 -0.203125 -1.484375q-0.1875 -0.5 -0.671875 -0.796875q-0.484375 -0.296875 -1.140625 -0.296875q-1.046875 0 -1.8125 0.671875q-0.75 0.65625 -0.75 2.515625l0 5.28125l-1.640625 0zm17.000732 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.7656403 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375153 0 3.1562653 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.2187653 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.5468903 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.3906403 -2.65625l5.4062653 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.0312653 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.094467 5.765625l0 -13.359375l1.640625 0l0 13.359375l-1.640625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m9.1482525 232.87665l117.79527 -0.34646606" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m9.1482525 232.87665l117.79527 -0.34646606" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m233.43172 232.53018l120.34645 0.34646606" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.43172 232.53018l120.34645 0.34646606" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m126.94353 216.49869l106.48819 0l0 32.06299l-106.48819 0z" fill-rule="evenodd"/><path fill="#000000" d="m152.36165 237.33018l0 -9.546875l1.265625 0l0 3.921875l4.953125 0l0 -3.921875l1.265625 0l0 9.546875l-1.265625 0l0 -4.5l-4.953125 0l0 4.5l-1.265625 0zm13.953278 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm8.93837 0l0 -0.875q-0.65625 1.03125 -1.9375 1.03125q-0.8125 0 -1.515625 -0.453125q-0.6875 -0.453125 -1.078125 -1.265625q-0.375 -0.828125 -0.375 -1.890625q0 -1.03125 0.34375 -1.875q0.34375 -0.84375 1.03125 -1.28125q0.703125 -0.453125 1.546875 -0.453125q0.625 0 1.109375 0.265625q0.5 0.25 0.796875 0.671875l0 -3.421875l1.171875 0l0 9.546875l-1.09375 0zm-3.703125 -3.453125q0 1.328125 0.5625 1.984375q0.5625 0.65625 1.328125 0.65625q0.765625 0 1.296875 -0.625q0.53125 -0.625 0.53125 -1.90625q0 -1.421875 -0.546875 -2.078125q-0.546875 -0.671875 -1.34375 -0.671875q-0.78125 0 -1.3125 0.640625q-0.515625 0.625 -0.515625 2.0zm7.9124756 3.453125l-2.125 -6.90625l1.21875 0l1.09375 3.984375l0.421875 1.484375q0.015625 -0.109375 0.359375 -1.421875l1.09375 -4.046875l1.203125 0l1.03125 4.0l0.34375 1.328125l0.40625 -1.34375l1.171875 -3.984375l1.140625 0l-2.15625 6.90625l-1.21875 0l-1.09375 -4.140625l-0.265625 -1.171875l-1.40625 5.3125l-1.21875 0zm12.859528 -0.859375q-0.65625 0.5625 -1.265625 0.796875q-0.59375 0.21875 -1.28125 0.21875q-1.140625 0 -1.75 -0.546875q-0.609375 -0.5625 -0.609375 -1.4375q0 -0.5 0.21875 -0.921875q0.234375 -0.421875 0.609375 -0.671875q0.375 -0.25 0.84375 -0.390625q0.34375 -0.078125 1.046875 -0.171875q1.421875 -0.171875 2.09375 -0.40625q0 -0.234375 0 -0.296875q0 -0.71875 -0.328125 -1.015625q-0.453125 -0.390625 -1.34375 -0.390625q-0.8125 0 -1.21875 0.296875q-0.390625 0.28125 -0.578125 1.015625l-1.140625 -0.15625q0.15625 -0.734375 0.515625 -1.1875q0.359375 -0.453125 1.03125 -0.6875q0.671875 -0.25 1.5625 -0.25q0.890625 0 1.4375 0.203125q0.5625 0.203125 0.8125 0.53125q0.265625 0.3125 0.375 0.796875q0.046875 0.296875 0.046875 1.078125l0 1.5625q0 1.625 0.078125 2.0625q0.078125 0.4375 0.296875 0.828125l-1.21875 0q-0.1875 -0.359375 -0.234375 -0.859375zm-0.09375 -2.609375q-0.640625 0.265625 -1.921875 0.4375q-0.71875 0.109375 -1.015625 0.25q-0.296875 0.125 -0.46875 0.375q-0.15625 0.25 -0.15625 0.546875q0 0.46875 0.34375 0.78125q0.359375 0.3125 1.046875 0.3125q0.671875 0 1.203125 -0.296875q0.53125 -0.296875 0.78125 -0.8125q0.1875 -0.390625 0.1875 -1.171875l0 -0.421875zm2.9749756 3.46875l0 -6.90625l1.0625 0l0 1.046875q0.40625 -0.734375 0.734375 -0.96875q0.34375 -0.234375 0.765625 -0.234375q0.59375 0 1.203125 0.375l-0.40625 1.078125q-0.4375 -0.25 -0.859375 -0.25q-0.390625 0 -0.703125 0.234375q-0.296875 0.234375 -0.421875 0.640625q-0.203125 0.625 -0.203125 1.359375l0 3.625l-1.171875 0zm9.18837 -2.21875l1.203125 0.140625q-0.28125 1.0625 -1.0625 1.65625q-0.765625 0.578125 -1.96875 0.578125q-1.515625 0 -2.40625 -0.9375q-0.890625 -0.9375 -0.890625 -2.609375q0 -1.75 0.890625 -2.703125q0.90625 -0.96875 2.34375 -0.96875q1.390625 0 2.265625 0.9375q0.875 0.9375 0.875 2.65625q0 0.109375 0 0.3125l-5.15625 0q0.0625 1.140625 0.640625 1.75q0.578125 0.59375 1.4375 0.59375q0.65625 0 1.109375 -0.328125q0.453125 -0.34375 0.71875 -1.078125zm-3.84375 -1.90625l3.859375 0q-0.078125 -0.859375 -0.4375 -1.296875q-0.5625 -0.6875 -1.453125 -0.6875q-0.8125 0 -1.359375 0.546875q-0.546875 0.53125 -0.609375 1.4375z" fill-rule="nonzero"/><defs><linearGradient id="p.1" gradientUnits="userSpaceOnUse" gradientTransform="matrix(4.500288203456436 0.0 0.0 4.500288203456436 0.0 0.0)" spreadMethod="pad" x1="20.153856515233464" y1="38.20913288577608" x2="20.153840567727556" y2="42.70942108920426"><stop offset="0.0" stop-color="#ff0000"/><stop offset="0.51" stop-color="#dab7a6"/><stop offset="0.99999994" stop-color="#dab7a6" stop-opacity="0.0"/><stop offset="1.0" stop-color="#ffffff" stop-opacity="0.0"/></linearGradient></defs><path fill="url(#p.1)" d="m90.698166 171.95273l173.29134 0l0 20.251968l-173.29134 0z" fill-rule="evenodd"/><path fill="#d9d2e9" d="m203.76447 87.804726l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path stroke="#8e7cc3" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m203.76447 87.804726l114.4252 0l0 48.850395l-114.4252 0z" fill-rule="evenodd"/><path fill="#000000" d="m245.33514 113.91555l0 -1.578125l5.65625 0l0 4.953125q-1.296875 1.046875 -2.6875 1.578125q-1.375 0.515625 -2.84375 0.515625q-1.96875 0 -3.578125 -0.84375q-1.609375 -0.84375 -2.421875 -2.4375q-0.8125 -1.59375 -0.8125 -3.5625q0 -1.953125 0.8125 -3.640625q0.8125 -1.6875 2.34375 -2.5q1.53125 -0.828125 3.515625 -0.828125q1.453125 0 2.625 0.46875q1.171875 0.46875 1.828125 1.3125q0.671875 0.828125 1.015625 2.171875l-1.59375 0.4375q-0.296875 -1.015625 -0.75 -1.59375q-0.4375 -0.59375 -1.265625 -0.9375q-0.828125 -0.34375 -1.84375 -0.34375q-1.203125 0 -2.09375 0.375q-0.890625 0.359375 -1.4375 0.96875q-0.53125 0.59375 -0.828125 1.3125q-0.515625 1.234375 -0.515625 2.6875q0 1.78125 0.609375 2.984375q0.625 1.203125 1.796875 1.796875q1.171875 0.578125 2.5 0.578125q1.140625 0 2.234375 -0.4375q1.09375 -0.453125 1.65625 -0.953125l0 -2.484375l-3.921875 0zm7.448929 0.390625q0 -2.6875 1.484375 -3.96875q1.25 -1.078125 3.0468597 -1.078125q2.0 0 3.265625 1.3125q1.265625 1.296875 1.265625 3.609375q0 1.859375 -0.5625 2.9375q-0.5625 1.0625 -1.640625 1.65625q-1.0625 0.59375 -2.328125 0.59375q-2.0312347 0 -3.2812347 -1.296875q-1.25 -1.3125 -1.25 -3.765625zm1.6875 0q0 1.859375 0.796875 2.796875q0.81248474 0.921875 2.0468597 0.921875q1.21875 0 2.03125 -0.921875q0.8125 -0.9375 0.8125 -2.84375q0 -1.796875 -0.8125 -2.71875q-0.8125 -0.921875 -2.03125 -0.921875q-1.234375 0 -2.0468597 0.921875q-0.796875 0.90625 -0.796875 2.765625zm9.688217 4.84375l0 -8.40625l-1.453125 0l0 -1.265625l1.453125 0l0 -1.03125q0 -0.96875 0.171875 -1.453125q0.234375 -0.640625 0.828125 -1.03125q0.59375 -0.390625 1.671875 -0.390625q0.6875 0 1.53125 0.15625l-0.25 1.4375q-0.5 -0.09375 -0.953125 -0.09375q-0.75 0 -1.0625 0.328125q-0.3125 0.3125 -0.3125 1.1875l0 0.890625l1.890625 0l0 1.265625l-1.890625 0l0 8.40625l-1.625 0zm11.417664 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q-0.78125 -0.953125 -2.03125 -0.953125q-1.125 0 -1.90625 0.765625q-0.765625 0.75 -0.84375 2.015625zm9.125732 5.765625l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m179.05511 152.91733l37.984253 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m179.05511 152.91733l37.984253 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m306.4252 152.91733l47.338593 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m306.4252 152.91733l47.338593 0" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m217.03937 136.88583l89.38583 0l0 32.06299l-89.38583 0z" fill-rule="evenodd"/><path fill="#000000" d="m226.58624 154.67169l1.0625 -0.09375q0.078125 0.65625 0.359375 1.0625q0.28125 0.40625 0.859375 0.671875q0.59375 0.25 1.328125 0.25q0.640625 0 1.140625 -0.1875q0.5 -0.203125 0.734375 -0.53125q0.25 -0.34375 0.25 -0.734375q0 -0.40625 -0.234375 -0.703125q-0.234375 -0.3125 -0.765625 -0.515625q-0.359375 -0.140625 -1.546875 -0.421875q-1.171875 -0.28125 -1.640625 -0.53125q-0.625 -0.328125 -0.921875 -0.796875q-0.296875 -0.484375 -0.296875 -1.078125q0 -0.640625 0.359375 -1.203125q0.375 -0.578125 1.078125 -0.859375q0.71875 -0.296875 1.578125 -0.296875q0.953125 0 1.6875 0.3125q0.734375 0.296875 1.125 0.90625q0.390625 0.59375 0.421875 1.34375l-1.09375 0.078125q-0.09375 -0.8125 -0.609375 -1.21875q-0.5 -0.421875 -1.484375 -0.421875q-1.03125 0 -1.5 0.375q-0.46875 0.375 -0.46875 0.90625q0 0.46875 0.328125 0.765625q0.328125 0.296875 1.703125 0.609375q1.390625 0.3125 1.90625 0.546875q0.75 0.359375 1.109375 0.890625q0.359375 0.515625 0.359375 1.21875q0 0.6875 -0.390625 1.296875q-0.390625 0.59375 -1.125 0.9375q-0.734375 0.328125 -1.65625 0.328125q-1.171875 0 -1.96875 -0.328125q-0.78125 -0.34375 -1.234375 -1.03125q-0.4375 -0.6875 -0.453125 -1.546875zm8.207031 5.15625l-0.125 -0.984375q0.34375 0.09375 0.609375 0.09375q0.34375 0 0.546875 -0.125q0.21875 -0.109375 0.359375 -0.3125q0.09375 -0.171875 0.328125 -0.796875q0.015625 -0.078125 0.09375 -0.25l-2.375 -6.234375l1.140625 0l1.296875 3.59375q0.25 0.6875 0.453125 1.453125q0.1875 -0.734375 0.4375 -1.421875l1.328125 -3.625l1.046875 0l-2.359375 6.328125q-0.390625 1.015625 -0.59375 1.40625q-0.28125 0.53125 -0.65625 0.765625q-0.359375 0.25 -0.859375 0.25q-0.296875 0 -0.671875 -0.140625zm5.625 -4.25l1.03125 -0.15625q0.09375 0.625 0.484375 0.953125q0.40625 0.328125 1.140625 0.328125q0.71875 0 1.0625 -0.28125q0.359375 -0.296875 0.359375 -0.703125q0 -0.359375 -0.3125 -0.5625q-0.21875 -0.140625 -1.078125 -0.359375q-1.15625 -0.296875 -1.609375 -0.5q-0.4375 -0.21875 -0.671875 -0.59375q-0.234375 -0.375 -0.234375 -0.84375q0 -0.40625 0.1875 -0.765625q0.1875 -0.359375 0.515625 -0.59375q0.25 -0.171875 0.671875 -0.296875q0.421875 -0.125 0.921875 -0.125q0.71875 0 1.265625 0.21875q0.5625 0.203125 0.828125 0.5625q0.265625 0.359375 0.359375 0.953125l-1.03125 0.140625q-0.0625 -0.46875 -0.40625 -0.734375q-0.328125 -0.28125 -0.953125 -0.28125q-0.71875 0 -1.03125 0.25q-0.3125 0.234375 -0.3125 0.5625q0 0.203125 0.125 0.359375q0.140625 0.171875 0.40625 0.28125q0.15625 0.0625 0.9375 0.265625q1.125 0.3125 1.5625 0.5q0.4375 0.1875 0.6875 0.546875q0.25 0.359375 0.25 0.90625q0 0.53125 -0.3125 1.0q-0.296875 0.453125 -0.875 0.71875q-0.578125 0.25 -1.3125 0.25q-1.21875 0 -1.859375 -0.5q-0.625 -0.515625 -0.796875 -1.5zm8.71875 0.921875l0.15625 0.921875q-0.453125 0.09375 -0.796875 0.09375q-0.578125 0 -0.890625 -0.171875q-0.3125 -0.1875 -0.453125 -0.484375q-0.125 -0.296875 -0.125 -1.25l0 -3.578125l-0.765625 0l0 -0.8125l0.765625 0l0 -1.546875l1.046875 -0.625l0 2.171875l1.0625 0l0 0.8125l-1.0625 0l0 3.640625q0 0.453125 0.046875 0.578125q0.0625 0.125 0.1875 0.203125q0.125 0.078125 0.359375 0.078125q0.1875 0 0.46875 -0.03125zm5.2871094 -1.0625l1.09375 0.125q-0.25 0.953125 -0.953125 1.484375q-0.703125 0.53125 -1.78125 0.53125q-1.359375 0 -2.171875 -0.84375q-0.796875 -0.84375 -0.796875 -2.359375q0 -1.5625 0.8125 -2.421875q0.8125 -0.875 2.09375 -0.875q1.25 0 2.03125 0.84375q0.796875 0.84375 0.796875 2.390625q0 0.09375 0 0.28125l-4.640625 0q0.0625 1.03125 0.578125 1.578125q0.515625 0.53125 1.296875 0.53125q0.578125 0 0.984375 -0.296875q0.421875 -0.3125 0.65625 -0.96875zm-3.453125 -1.703125l3.46875 0q-0.0625 -0.796875 -0.390625 -1.1875q-0.515625 -0.609375 -1.3125 -0.609375q-0.734375 0 -1.234375 0.484375q-0.484375 0.484375 -0.53125 1.3125zm5.876953 3.703125l0 -6.21875l0.9375 0l0 0.875q0.296875 -0.46875 0.78125 -0.734375q0.484375 -0.28125 1.109375 -0.28125q0.6875 0 1.125 0.28125q0.453125 0.28125 0.625 0.796875q0.75 -1.078125 1.921875 -1.078125q0.9375 0 1.421875 0.515625q0.5 0.5 0.5 1.578125l0 4.265625l-1.046875 0l0 -3.921875q0 -0.625 -0.109375 -0.90625q-0.09375 -0.28125 -0.359375 -0.453125q-0.265625 -0.171875 -0.640625 -0.171875q-0.65625 0 -1.09375 0.4375q-0.421875 0.4375 -0.421875 1.40625l0 3.609375l-1.0625 0l0 -4.046875q0 -0.703125 -0.265625 -1.046875q-0.25 -0.359375 -0.828125 -0.359375q-0.453125 0 -0.828125 0.234375q-0.375 0.234375 -0.546875 0.6875q-0.171875 0.453125 -0.171875 1.296875l0 3.234375l-1.046875 0zm17.392578 -2.28125l1.03125 0.140625q-0.171875 1.0625 -0.875 1.671875q-0.703125 0.609375 -1.71875 0.609375q-1.28125 0 -2.0625 -0.828125q-0.765625 -0.84375 -0.765625 -2.40625q0 -1.0 0.328125 -1.75q0.34375 -0.765625 1.015625 -1.140625q0.6875 -0.375 1.5 -0.375q1.0 0 1.640625 0.515625q0.65625 0.5 0.84375 1.453125l-1.03125 0.15625q-0.140625 -0.625 -0.515625 -0.9375q-0.375 -0.328125 -0.90625 -0.328125q-0.796875 0 -1.296875 0.578125q-0.5 0.5625 -0.5 1.796875q0 1.265625 0.484375 1.828125q0.484375 0.5625 1.25 0.5625q0.625 0 1.03125 -0.375q0.421875 -0.375 0.546875 -1.171875zm6.0 1.515625q-0.59375 0.5 -1.140625 0.703125q-0.53125 0.203125 -1.15625 0.203125q-1.03125 0 -1.578125 -0.5q-0.546875 -0.5 -0.546875 -1.28125q0 -0.453125 0.203125 -0.828125q0.203125 -0.390625 0.546875 -0.609375q0.34375 -0.234375 0.765625 -0.34375q0.296875 -0.09375 0.9375 -0.171875q1.265625 -0.140625 1.875 -0.359375q0 -0.21875 0 -0.265625q0 -0.65625 -0.296875 -0.921875q-0.40625 -0.34375 -1.203125 -0.34375q-0.734375 0 -1.09375 0.265625q-0.359375 0.25 -0.53125 0.90625l-1.03125 -0.140625q0.140625 -0.65625 0.46875 -1.0625q0.328125 -0.40625 0.9375 -0.625q0.609375 -0.21875 1.40625 -0.21875q0.796875 0 1.296875 0.1875q0.5 0.1875 0.734375 0.46875q0.234375 0.28125 0.328125 0.71875q0.046875 0.265625 0.046875 0.96875l0 1.40625q0 1.46875 0.0625 1.859375q0.078125 0.390625 0.28125 0.75l-1.109375 0q-0.15625 -0.328125 -0.203125 -0.765625zm-0.09375 -2.359375q-0.578125 0.234375 -1.71875 0.40625q-0.65625 0.09375 -0.921875 0.21875q-0.265625 0.109375 -0.421875 0.328125q-0.140625 0.21875 -0.140625 0.5q0 0.421875 0.3125 0.703125q0.328125 0.28125 0.9375 0.28125q0.609375 0 1.078125 -0.265625q0.484375 -0.265625 0.703125 -0.734375q0.171875 -0.359375 0.171875 -1.046875l0 -0.390625zm2.6738281 3.125l0 -8.59375l1.0625 0l0 8.59375l-1.0625 0zm2.6660156 0l0 -8.59375l1.0625 0l0 8.59375l-1.0625 0zm2.2753906 -1.859375l1.03125 -0.15625q0.09375 0.625 0.484375 0.953125q0.40625 0.328125 1.140625 0.328125q0.71875 0 1.0625 -0.28125q0.359375 -0.296875 0.359375 -0.703125q0 -0.359375 -0.3125 -0.5625q-0.21875 -0.140625 -1.078125 -0.359375q-1.15625 -0.296875 -1.609375 -0.5q-0.4375 -0.21875 -0.671875 -0.59375q-0.234375 -0.375 -0.234375 -0.84375q0 -0.40625 0.1875 -0.765625q0.1875 -0.359375 0.515625 -0.59375q0.25 -0.171875 0.671875 -0.296875q0.421875 -0.125 0.921875 -0.125q0.71875 0 1.265625 0.21875q0.5625 0.203125 0.828125 0.5625q0.265625 0.359375 0.359375 0.953125l-1.03125 0.140625q-0.0625 -0.46875 -0.40625 -0.734375q-0.328125 -0.28125 -0.953125 -0.28125q-0.71875 0 -1.03125 0.25q-0.3125 0.234375 -0.3125 0.5625q0 0.203125 0.125 0.359375q0.140625 0.171875 0.40625 0.28125q0.15625 0.0625 0.9375 0.265625q1.125 0.3125 1.5625 0.5q0.4375 0.1875 0.6875 0.546875q0.25 0.359375 0.25 0.90625q0 0.53125 -0.3125 1.0q-0.296875 0.453125 -0.875 0.71875q-0.578125 0.25 -1.3125 0.25q-1.21875 0 -1.859375 -0.5q-0.625 -0.515625 -0.796875 -1.5z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m150.87927 111.83202l52.88188 0.40944672" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m154.30624 111.85855l46.027924 0.35638428" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m154.30624 111.85856l1.133255 -1.1158447l-3.0983734 1.1006241l3.0809631 1.1484756z" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m200.33417 112.214935l-1.133255 1.1158447l3.0983887 -1.1006317l-3.0809784 -1.148468z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m159.04854 85.32021l37.417328 0l0 32.06299l-37.417328 0z" fill-rule="evenodd"/><path fill="#000000" d="m168.78291 104.91708l1.125 -0.109375q0.140625 0.796875 0.546875 1.15625q0.40625 0.359375 1.03125 0.359375q0.53125 0 0.9375 -0.25q0.421875 -0.25 0.671875 -0.65625q0.265625 -0.421875 0.4375 -1.125q0.171875 -0.703125 0.171875 -1.421875q0 -0.078125 0 -0.234375q-0.359375 0.546875 -0.96875 0.90625q-0.59375 0.34375 -1.3125 0.34375q-1.1875 0 -2.015625 -0.859375q-0.8125 -0.859375 -0.8125 -2.265625q0 -1.453125 0.859375 -2.328125q0.859375 -0.890625 2.140625 -0.890625q0.9375 0 1.703125 0.5q0.78125 0.5 1.171875 1.4375q0.40625 0.921875 0.40625 2.671875q0 1.828125 -0.40625 2.921875q-0.390625 1.078125 -1.171875 1.640625q-0.78125 0.5625 -1.84375 0.5625q-1.109375 0 -1.828125 -0.609375q-0.703125 -0.625 -0.84375 -1.75zm4.796875 -4.21875q0 -1.0 -0.546875 -1.59375q-0.53125 -0.59375 -1.28125 -0.59375q-0.78125 0 -1.375 0.640625q-0.578125 0.640625 -0.578125 1.65625q0 0.90625 0.546875 1.484375q0.5625 0.5625 1.359375 0.5625q0.828125 0 1.34375 -0.5625q0.53125 -0.578125 0.53125 -1.59375zm2.9124756 6.421875l0 -9.546875l3.59375 0q0.953125 0 1.453125 0.09375q0.703125 0.125 1.171875 0.453125q0.484375 0.328125 0.765625 0.921875q0.296875 0.59375 0.296875 1.296875q0 1.21875 -0.78125 2.0625q-0.765625 0.84375 -2.796875 0.84375l-2.4375 0l0 3.875l-1.265625 0zm1.265625 -5.0l2.453125 0q1.234375 0 1.75 -0.453125q0.515625 -0.46875 0.515625 -1.28125q0 -0.609375 -0.3125 -1.03125q-0.296875 -0.421875 -0.796875 -0.5625q-0.3125 -0.09375 -1.171875 -0.09375l-2.4375 0l0 3.421875z" fill-rule="nonzero"/></g></svg> \ No newline at end of file
diff --git a/g3doc/architecture_guide/BUILD b/g3doc/architecture_guide/BUILD
deleted file mode 100644
index 404f627a4..000000000
--- a/g3doc/architecture_guide/BUILD
+++ /dev/null
@@ -1,50 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "platforms",
- src = "platforms.md",
- category = "Architecture Guide",
- data = [
- "platforms.png",
- "platforms.svg",
- ],
- permalink = "/docs/architecture_guide/platforms/",
- weight = "40",
-)
-
-doc(
- name = "resources",
- src = "resources.md",
- category = "Architecture Guide",
- data = [
- "resources.png",
- "resources.svg",
- ],
- permalink = "/docs/architecture_guide/resources/",
- weight = "30",
-)
-
-doc(
- name = "security",
- src = "security.md",
- category = "Architecture Guide",
- data = [
- "security.png",
- "security.svg",
- ],
- permalink = "/docs/architecture_guide/security/",
- weight = "10",
-)
-
-doc(
- name = "performance",
- src = "performance.md",
- category = "Architecture Guide",
- permalink = "/docs/architecture_guide/performance/",
- weight = "20",
-)
diff --git a/g3doc/architecture_guide/performance.md b/g3doc/architecture_guide/performance.md
deleted file mode 100644
index b89facfd3..000000000
--- a/g3doc/architecture_guide/performance.md
+++ /dev/null
@@ -1,277 +0,0 @@
-# Performance Guide
-
-[TOC]
-
-gVisor is designed to provide a secure, virtualized environment while preserving
-key benefits of containerization, such as small fixed overheads and a dynamic
-resource footprint. For containerized infrastructure, this can provide a
-turn-key solution for sandboxing untrusted workloads: there are no changes to
-the fundamental resource model.
-
-gVisor imposes runtime costs over native containers. These costs come in two
-forms: additional cycles and memory usage, which may manifest as increased
-latency, reduced throughput or density, or not at all. In general, these costs
-come from two different sources.
-
-First, the existence of the [Sentry](../README.md#sentry) means that additional
-memory will be required, and application system calls must traverse additional
-layers of software. The design emphasizes
-[security](/docs/architecture_guide/security/) and therefore we chose to use a
-language for the Sentry that provides benefits in this domain but may not yet
-offer the raw performance of other choices. Costs imposed by these design
-choices are **structural costs**.
-
-Second, as gVisor is an independent implementation of the system call surface,
-many of the subsystems or specific calls are not as optimized as more mature
-implementations. A good example here is the network stack, which is continuing
-to evolve but does not support all the advanced recovery mechanisms offered by
-other stacks and is less CPU efficient. This is an **implementation cost** and
-is distinct from **structural costs**. Improvements here are ongoing and driven
-by the workloads that matter to gVisor users and contributors.
-
-This page provides a guide for understanding baseline performance, and calls out
-distinct **structural costs** and **implementation costs**, highlighting where
-improvements are possible and not possible.
-
-While we include a variety of workloads here, it’s worth emphasizing that gVisor
-may not be an appropriate solution for every workload, for reasons other than
-performance. For example, a sandbox may provide minimal benefit for a trusted
-database, since _user data would already be inside the sandbox_ and there is no
-need for an attacker to break out in the first place.
-
-## Methodology
-
-All data below was generated using the [benchmark tools][benchmark-tools]
-repository, and the machines under test are uniform [Google Compute Engine][gce]
-Virtual Machines (VMs) with the following specifications:
-
- Machine type: n1-standard-4 (broadwell)
- Image: Debian GNU/Linux 9 (stretch) 4.19.0-0
- BootDisk: 2048GB SSD persistent disk
-
-Through this document, `runsc` is used to indicate the runtime provided by
-gVisor. When relevant, we use the name `runsc-platform` to describe a specific
-[platform choice](/docs/architecture_guide/platforms/).
-
-**Except where specified, all tests below are conducted with the `ptrace`
-platform. The `ptrace` platform works everywhere and does not require hardware
-virtualization or kernel modifications but suffers from the highest structural
-costs by far. This platform is used to provide a clear understanding of the
-performance model, but in no way represents an ideal scenario. In the future,
-this guide will be extended to bare metal environments and include additional
-platforms.**
-
-## Memory access
-
-gVisor does not introduce any additional costs with respect to raw memory
-accesses. Page faults and other Operating System (OS) mechanisms are translated
-through the Sentry, but once mappings are installed and available to the
-application, there is no additional overhead.
-
-{% include graph.html id="sysbench-memory"
-url="/performance/sysbench-memory.csv" title="perf.py sysbench.memory
---runtime=runc --runtime=runsc" %}
-
-The above figure demonstrates the memory transfer rate as measured by
-`sysbench`.
-
-## Memory usage
-
-The Sentry provides an additional layer of indirection, and it requires memory
-in order to store state associated with the application. This memory generally
-consists of a fixed component, plus an amount that varies with the usage of
-operating system resources (e.g. how many sockets or files are opened).
-
-For many use cases, fixed memory overheads are a primary concern. This may be
-because sandboxed containers handle a low volume of requests, and it is
-therefore important to achieve high densities for efficiency.
-
-{% include graph.html id="density" url="/performance/density.csv" title="perf.py
-density --runtime=runc --runtime=runsc" log="true" y_min="100000" %}
-
-The above figure demonstrates these costs based on three sample applications.
-This test is the result of running many instances of a container (50, or 5 in
-the case of redis) and calculating available memory on the host before and
-afterwards, and dividing the difference by the number of containers. This
-technique is used for measuring memory usage over the `usage_in_bytes` value of
-the container cgroup because we found that some container runtimes, other than
-`runc` and `runsc`, do not use an individual container cgroup.
-
-The first application is an instance of `sleep`: a trivial application that does
-nothing. The second application is a synthetic `node` application which imports
-a number of modules and listens for requests. The third application is a similar
-synthetic `ruby` application which does the same. Finally, we include an
-instance of `redis` storing approximately 1GB of data. In all cases, the sandbox
-itself is responsible for a small, mostly fixed amount of memory overhead.
-
-## CPU performance
-
-gVisor does not perform emulation or otherwise interfere with the raw execution
-of CPU instructions by the application. Therefore, there is no runtime cost
-imposed for CPU operations.
-
-{% include graph.html id="sysbench-cpu" url="/performance/sysbench-cpu.csv"
-title="perf.py sysbench.cpu --runtime=runc --runtime=runsc" %}
-
-The above figure demonstrates the `sysbench` measurement of CPU events per
-second. Events per second is based on a CPU-bound loop that calculates all prime
-numbers in a specified range. We note that `runsc` does not impose a performance
-penalty, as the code is executing natively in both cases.
-
-This has important consequences for classes of workloads that are often
-CPU-bound, such as data processing or machine learning. In these cases, `runsc`
-will similarly impose minimal runtime overhead.
-
-{% include graph.html id="tensorflow" url="/performance/tensorflow.csv"
-title="perf.py tensorflow --runtime=runc --runtime=runsc" %}
-
-For example, the above figure shows a sample TensorFlow workload, the
-[convolutional neural network example][cnn]. The time indicated includes the
-full start-up and run time for the workload, which trains a model.
-
-## System calls
-
-Some **structural costs** of gVisor are heavily influenced by the
-[platform choice](/docs/architecture_guide/platforms/), which implements system
-call interception. Today, gVisor supports a variety of platforms. These
-platforms present distinct performance, compatibility and security trade-offs.
-For example, the KVM platform has low overhead system call interception but runs
-poorly with nested virtualization.
-
-{% include graph.html id="syscall" url="/performance/syscall.csv" title="perf.py
-syscall --runtime=runc --runtime=runsc-ptrace --runtime=runsc-kvm" y_min="100"
-log="true" %}
-
-The above figure demonstrates the time required for a raw system call on various
-platforms. The test is implemented by a custom binary which performs a large
-number of system calls and calculates the average time required.
-
-This cost will principally impact applications that are system call bound, which
-tend to be high-performance data stores and static network services. In general,
-the impact of system call interception will be lower the more work an
-application does.
-
-{% include graph.html id="redis" url="/performance/redis.csv" title="perf.py
-redis --runtime=runc --runtime=runsc" %}
-
-For example, `redis` is an application that performs relatively little work in
-userspace: in general it reads from a connected socket, reads or modifies some
-data, and writes a result back to the socket. The above figure shows the results
-of running [comprehensive set of benchmarks][redis-benchmark]. We can see that
-small operations impose a large overhead, while larger operations, such as
-`LRANGE`, where more work is done in the application, have a smaller relative
-overhead.
-
-Some of these costs above are **structural costs**, and `redis` is likely to
-remain a challenging performance scenario. However, optimizing the
-[platform](/docs/architecture_guide/platforms/) will also have a dramatic
-impact.
-
-## Start-up time
-
-For many use cases, the ability to spin-up containers quickly and efficiently is
-important. A sandbox may be short-lived and perform minimal user work (e.g. a
-function invocation).
-
-{% include graph.html id="startup" url="/performance/startup.csv" title="perf.py
-startup --runtime=runc --runtime=runsc" %}
-
-The above figure indicates how total time required to start a container through
-[Docker][docker]. This benchmark uses three different applications. First, an
-alpine Linux-container that executes `true`. Second, a `node` application that
-loads a number of modules and binds an HTTP server. The time is measured by a
-successful request to the bound port. Finally, a `ruby` application that
-similarly loads a number of modules and binds an HTTP server.
-
-> Note: most of the time overhead above is associated Docker itself. This is
-> evident with the empty `runc` benchmark. To avoid these costs with `runsc`,
-> you may also consider using `runsc do` mode or invoking the
-> [OCI runtime](../user_guide/quick_start/oci.md) directly.
-
-## Network
-
-Networking is mostly bound by **implementation costs**, and gVisor's network
-stack is improving quickly.
-
-While typically not an important metric in practice for common sandbox use
-cases, nevertheless `iperf` is a common microbenchmark used to measure raw
-throughput.
-
-{% include graph.html id="iperf" url="/performance/iperf.csv" title="perf.py
-iperf --runtime=runc --runtime=runsc" %}
-
-The above figure shows the result of an `iperf` test between two instances. For
-the upload case, the specified runtime is used for the `iperf` client, and in
-the download case, the specified runtime is the server. A native runtime is
-always used for the other endpoint in the test.
-
-{% include graph.html id="applications" metric="requests_per_second"
-url="/performance/applications.csv" title="perf.py http.(node|ruby)
---connections=25 --runtime=runc --runtime=runsc" %}
-
-The above figure shows the result of simple `node` and `ruby` web services that
-render a template upon receiving a request. Because these synthetic benchmarks
-do minimal work per request, much like the `redis` case, they suffer from high
-overheads. In practice, the more work an application does the smaller the impact
-of **structural costs** become.
-
-## File system
-
-Some aspects of file system performance are also reflective of **implementation
-costs**, and an area where gVisor's implementation is improving quickly.
-
-In terms of raw disk I/O, gVisor does not introduce significant fundamental
-overhead. For general file operations, gVisor introduces a small fixed overhead
-for data that transitions across the sandbox boundary. This manifests as
-**structural costs** in some cases, since these operations must be routed
-through the [Gofer](../README.md#gofer) as a result of our
-[Security Model](/docs/architecture_guide/security/), but in most cases are
-dominated by **implementation costs**, due to an internal
-[Virtual File System][vfs] (VFS) implementation that needs improvement.
-
-{% include graph.html id="fio-bw" url="/performance/fio.csv" title="perf.py fio
---engine=sync --runtime=runc --runtime=runsc" log="true" %}
-
-The above figures demonstrate the results of `fio` for reads and writes to and
-from the disk. In this case, the disk quickly becomes the bottleneck and
-dominates other costs.
-
-{% include graph.html id="fio-tmpfs-bw" url="/performance/fio-tmpfs.csv"
-title="perf.py fio --engine=sync --runtime=runc --tmpfs=True --runtime=runsc"
-log="true" %}
-
-The above figure shows the raw I/O performance of using a `tmpfs` mount which is
-sandbox-internal in the case of `runsc`. Generally these operations are
-similarly bound to the cost of copying around data in-memory, and we don't see
-the cost of VFS operations.
-
-{% include graph.html id="httpd100k" metric="transfer_rate"
-url="/performance/httpd100k.csv" title="perf.py http.httpd --connections=1
---connections=5 --connections=10 --connections=25 --runtime=runc
---runtime=runsc" %}
-
-The high costs of VFS operations can manifest in benchmarks that execute many
-such operations in the hot path for serving requests, for example. The above
-figure shows the result of using gVisor to serve small pieces of static content
-with predictably poor results. This workload represents `apache` serving a
-single file sized 100k from the container image to a client running
-[ApacheBench][ab] with varying levels of concurrency. The high overhead comes
-principally from the VFS implementation that needs improvement, with several
-internal serialization points (since all requests are reading the same file).
-Note that some of some of network stack performance issues also impact this
-benchmark.
-
-{% include graph.html id="ffmpeg" url="/performance/ffmpeg.csv" title="perf.py
-media.ffmpeg --runtime=runc --runtime=runsc" %}
-
-For benchmarks that are bound by raw disk I/O and a mix of compute, file system
-operations are less of an issue. The above figure shows the total time required
-for an `ffmpeg` container to start, load and transcode a 27MB input video.
-
-[ab]: https://en.wikipedia.org/wiki/ApacheBench
-[benchmark-tools]: https://github.com/google/gvisor/tree/master/test/benchmarks
-[gce]: https://cloud.google.com/compute/
-[cnn]: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py
-[docker]: https://docker.io
-[redis-benchmark]: https://redis.io/topics/benchmarks
-[vfs]: https://en.wikipedia.org/wiki/Virtual_file_system
diff --git a/g3doc/architecture_guide/platforms.md b/g3doc/architecture_guide/platforms.md
deleted file mode 100644
index e19c77236..000000000
--- a/g3doc/architecture_guide/platforms.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# Platform Guide
-
-[TOC]
-
-gVisor requires a platform to implement interception of syscalls, basic context
-switching, and memory mapping functionality. Internally, gVisor uses an
-abstraction sensibly called [Platform][platform]. A simplified version of this
-interface looks like:
-
-```golang
-type Platform interface {
- NewAddressSpace() (AddressSpace, error)
- NewContext() Context
-}
-
-type Context interface {
- Switch(as AddressSpace, ac arch.Context) (..., error)
-}
-
-type AddressSpace interface {
- MapFile(addr hostarch.Addr, f File, fr FileRange, at hostarch.AccessType, ...) error
- Unmap(addr hostarch.Addr, length uint64)
-}
-```
-
-There are a number of different ways to implement this interface that come with
-various trade-offs, generally around performance and hardware requirements.
-
-## Implementations
-
-The choice of platform depends on the context in which `runsc` is executing. In
-general, virtualized platforms may be limited to platforms that do not require
-hardware virtualized support (since the hardware is already in use):
-
-![Platforms](platforms.png "Platform examples.")
-
-### ptrace
-
-The ptrace platform uses [PTRACE_SYSEMU][ptrace] to execute user code without
-allowing it to execute host system calls. This platform can run anywhere that
-`ptrace` works (even VMs without nested virtualization), which is ubiquitous.
-
-Unfortunately, the ptrace platform has high context switch overhead, so system
-call-heavy applications may pay a [performance penalty](./performance.md).
-
-### KVM
-
-The KVM platform uses the kernel's [KVM][kvm] functionality to allow the Sentry
-to act as both guest OS and VMM. The KVM platform can run on bare-metal or in a
-VM with nested virtualization enabled. While there is no virtualized hardware
-layer -- the sandbox retains a process model -- gVisor leverages virtualization
-extensions available on modern processors in order to improve isolation and
-performance of address space switches.
-
-## Changing Platforms
-
-See [Changing Platforms](../user_guide/platforms.md).
-
-[kvm]: https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt
-[platform]: https://cs.opensource.google/gvisor/gvisor/+/release-20190304.1:pkg/sentry/platform/platform.go;l=33
-[ptrace]: http://man7.org/linux/man-pages/man2/ptrace.2.html
diff --git a/g3doc/architecture_guide/platforms.png b/g3doc/architecture_guide/platforms.png
deleted file mode 100644
index 005d56feb..000000000
--- a/g3doc/architecture_guide/platforms.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/architecture_guide/platforms.svg b/g3doc/architecture_guide/platforms.svg
deleted file mode 100644
index b0bac9ba7..000000000
--- a/g3doc/architecture_guide/platforms.svg
+++ /dev/null
@@ -1,334 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="142.67763mm"
- height="67.063133mm"
- viewBox="0 0 142.67763 67.063134"
- version="1.1"
- id="svg8"
- inkscape:export-filename="/home/ascannell/resources.png"
- inkscape:export-xdpi="53.50127"
- inkscape:export-ydpi="53.50127"
- inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
- sodipodi:docname="platforms.svg">
- <defs
- id="defs2" />
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="0.98994949"
- inkscape:cx="86.443612"
- inkscape:cy="102.88104"
- inkscape:document-units="mm"
- inkscape:current-layer="layer1"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1005"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1" />
- <metadata
- id="metadata5">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(-36.081387,-98.953278)">
- <rect
- id="rect10"
- width="33.408691"
- height="33.408691"
- x="36.081387"
- y="120.06757"
- style="fill:#44aa00;stroke-width:0.26458332" />
- <rect
- style="fill:#b3b3b3;stroke-width:0.23881446"
- id="rect16"
- width="142.45465"
- height="10.423517"
- x="36.08139"
- y="155.5929" />
- <rect
- id="rect10-7"
- width="30.52453"
- height="18.976137"
- x="37.416695"
- y="121.65508"
- style="fill:#ff8080;stroke-width:0.19060372" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="41.03727"
- y="148.58765"
- id="text65"><tspan
- sodipodi:role="line"
- id="tspan63"
- x="41.03727"
- y="148.58765"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="45.473087"
- y="132.50232"
- id="text123"><tspan
- sodipodi:role="line"
- id="tspan121"
- x="45.473087"
- y="132.50232"
- style="stroke-width:0.08327847">workload</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:6.43922186px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.16098055"
- x="97.768547"
- y="163.15665"
- id="text163"><tspan
- sodipodi:role="line"
- id="tspan161"
- x="97.768547"
- y="163.15665"
- style="stroke-width:0.16098055">host</tspan></text>
- <rect
- style="fill:#e9afdd;stroke-width:0.39185274"
- id="rect16-7"
- width="72.9646"
- height="54.79026"
- x="105.79441"
- y="98.953278" />
- <rect
- id="rect10-5"
- width="33.408691"
- height="33.408691"
- x="108.24348"
- y="100.53072"
- style="fill:#44aa00;stroke-width:0.26458332" />
- <rect
- id="rect10-7-6"
- width="30.52453"
- height="20.045216"
- x="109.57877"
- y="102.11823"
- style="fill:#ff8080;stroke-width:0.19589928" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="112.86765"
- y="129.01863"
- id="text65-2"><tspan
- sodipodi:role="line"
- id="tspan63-9"
- x="112.86765"
- y="129.01863"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="117.63519"
- y="114.02371"
- id="text123-1"><tspan
- sodipodi:role="line"
- id="tspan121-2"
- x="117.63519"
- y="114.02371"
- style="stroke-width:0.08327847">workload</tspan></text>
- <rect
- id="rect10-7-7"
- width="11.815663"
- height="8.0126781"
- x="54.538059"
- y="143.27702"
- style="fill:#aaccff;stroke-width:0.07705856" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:4.35074377px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.10876859"
- x="55.931114"
- y="148.90578"
- id="text144"><tspan
- sodipodi:role="line"
- id="tspan142"
- x="55.931114"
- y="148.90578"
- style="stroke-width:0.10876859">KVM</tspan></text>
- <rect
- id="rect10-6"
- width="33.408691"
- height="33.408691"
- x="71.044685"
- y="119.73112"
- style="fill:#44aa00;stroke-width:0.26458332" />
- <rect
- id="rect10-7-0"
- width="30.52453"
- height="18.976137"
- x="72.37999"
- y="121.31865"
- style="fill:#ff8080;stroke-width:0.19060372" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="76.000565"
- y="148.25128"
- id="text65-6"><tspan
- sodipodi:role="line"
- id="tspan63-2"
- x="76.000565"
- y="148.25128"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="80.436386"
- y="132.16595"
- id="text123-6"><tspan
- sodipodi:role="line"
- id="tspan121-1"
- x="80.436386"
- y="132.16595"
- style="stroke-width:0.08327847">workload</tspan></text>
- <rect
- id="rect10-7-7-8"
- width="11.815664"
- height="8.0126781"
- x="89.501358"
- y="142.94067"
- style="fill:#ffeeaa;stroke-width:0.07705856" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.39456654px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08486416"
- x="89.92292"
- y="147.89806"
- id="text144-7"><tspan
- sodipodi:role="line"
- id="tspan142-9"
- x="89.92292"
- y="147.89806"
- style="stroke-width:0.08486416">ptrace</tspan></text>
- <rect
- id="rect10-7-7-8-3"
- width="11.815665"
- height="8.0126781"
- x="127.08897"
- y="123.97878"
- style="fill:#ffeeaa;stroke-width:0.07705856" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.39456654px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08486416"
- x="127.51052"
- y="128.9362"
- id="text144-7-7"><tspan
- sodipodi:role="line"
- id="tspan142-9-5"
- x="127.51052"
- y="128.9362"
- style="stroke-width:0.08486416">ptrace</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:5.45061255px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.13626531"
- x="138.49318"
- y="152.11841"
- id="text229"><tspan
- sodipodi:role="line"
- id="tspan227"
- x="138.49318"
- y="152.11841"
- style="stroke-width:0.13626531">VM</tspan></text>
- <rect
- style="fill:#b3b3b3;stroke-width:0.16518368"
- id="rect16-9"
- width="68.15374"
- height="10.423517"
- x="108.24348"
- y="134.99774" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:6.17854786px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.15446369"
- x="132.91473"
- y="142.07658"
- id="text248"><tspan
- sodipodi:role="line"
- id="tspan246"
- x="132.91473"
- y="142.07658"
- style="stroke-width:0.15446369">guest</tspan></text>
- <rect
- id="rect10-5-2"
- width="33.408691"
- height="33.408691"
- x="143.32402"
- y="100.35877"
- style="fill:#44aa00;stroke-width:0.26458332" />
- <rect
- id="rect10-7-6-2"
- width="30.52453"
- height="20.045216"
- x="144.65933"
- y="101.94627"
- style="fill:#ff8080;stroke-width:0.19589929" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="147.94815"
- y="128.84665"
- id="text65-2-8"><tspan
- sodipodi:role="line"
- id="tspan63-9-9"
- x="147.94815"
- y="128.84665"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="152.71565"
- y="113.85176"
- id="text123-1-7"><tspan
- sodipodi:role="line"
- id="tspan121-2-3"
- x="152.71565"
- y="113.85176"
- style="stroke-width:0.08327847">workload</tspan></text>
- <rect
- id="rect10-7-7-8-3-6"
- width="11.815666"
- height="8.0126781"
- x="162.16933"
- y="123.80682"
- style="fill:#ffeeaa;stroke-width:0.07705856" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.39456654px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08486416"
- x="162.59088"
- y="128.76421"
- id="text144-7-7-1"><tspan
- sodipodi:role="line"
- id="tspan142-9-5-2"
- x="162.59088"
- y="128.76421"
- style="stroke-width:0.08486416">ptrace</tspan></text>
- </g>
-</svg>
diff --git a/g3doc/architecture_guide/resources.md b/g3doc/architecture_guide/resources.md
deleted file mode 100644
index fc997d40c..000000000
--- a/g3doc/architecture_guide/resources.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# Resource Model
-
-[TOC]
-
-The resource model for gVisor does not assume a fixed number of threads of
-execution (i.e. vCPUs) or amount of physical memory. Where possible, decisions
-about underlying physical resources are delegated to the host system, where
-optimizations can be made with global information. This delegation allows the
-sandbox to be highly dynamic in terms of resource usage: spanning a large number
-of cores and large amount of memory when busy, and yielding those resources back
-to the host when not.
-
-In order words, the shape of the sandbox should closely track the shape of the
-sandboxed process:
-
-![Resource model](resources.png "Workloads of different shapes.")
-
-## Processes
-
-Much like a Virtual Machine (VM), a gVisor sandbox appears as an opaque process
-on the system. Processes within the sandbox do not manifest as processes on the
-host system, and process-level interactions within the sandbox require entering
-the sandbox (e.g. via a [Docker exec][exec]).
-
-## Networking
-
-The sandbox attaches a network endpoint to the system, but runs its own network
-stack. All network resources, other than packets in flight on the host, exist
-only inside the sandbox, bound by relevant resource limits.
-
-You can interact with network endpoints exposed by the sandbox, just as you
-would any other container, but network introspection similarly requires entering
-the sandbox.
-
-## Files
-
-Files in the sandbox may be backed by different implementations. For host-native
-files (where a file descriptor is available), the Gofer may return a file
-descriptor to the Sentry via [SCM_RIGHTS][scmrights][^1].
-
-These files may be read from and written to through standard system calls, and
-also mapped into the associated application's address space. This allows the
-same host memory to be shared across multiple sandboxes, although this mechanism
-does not preclude the use of side-channels (see [Security Model](./security.md).
-
-Note that some file systems exist only within the context of the sandbox. For
-example, in many cases a `tmpfs` mount will be available at `/tmp` or
-`/dev/shm`, which allocates memory directly from the sandbox memory file (see
-below). Ultimately, these will be accounted against relevant limits in a similar
-way as the host native case.
-
-## Threads
-
-The Sentry models individual task threads with [goroutines][goroutine]. As a
-result, each task thread is a lightweight [green thread][greenthread], and may
-not correspond to an underlying host thread.
-
-However, application execution is modelled as a blocking system call with the
-Sentry. This means that additional host threads may be created, *depending on
-the number of active application threads*. In practice, a busy application will
-converge on the number of active threads, and the host will be able to make
-scheduling decisions about all application threads.
-
-## Time
-
-Time in the sandbox is provided by the Sentry, through its own [vDSO][vdso] and
-time-keeping implementation. This is distinct from the host time, and no state
-is shared with the host, although the time will be initialized with the host
-clock.
-
-The Sentry runs timers to note the passage of time, much like a kernel running
-on hardware (though the timers are software timers, in this case). These timers
-provide updates to the vDSO, the time returned through system calls, and the
-time recorded for usage or limit tracking (e.g. [RLIMIT_CPU][rlimit]).
-
-When all application threads are idle, the Sentry disables timers until an event
-occurs that wakes either the Sentry or an application thread, similar to a
-[tickless kernel][tickless]. This allows the Sentry to achieve near zero CPU
-usage for idle applications.
-
-## Memory
-
-The Sentry implements its own memory management, including demand-paging and a
-Sentry internal page cache for files that cannot be used natively. A single
-[memfd][memfd] backs all application memory.
-
-### Address spaces
-
-The creation of address spaces is platform-specific. For some platforms,
-additional "stub" processes may be created on the host in order to support
-additional address spaces. These stubs are subject to various limits applied at
-the sandbox level (e.g. PID limits).
-
-### Physical memory
-
-The host is able to manage physical memory using regular means (e.g. tracking
-working sets, reclaiming and swapping under pressure). The Sentry lazily
-populates host mappings for applications, and allow the host to demand-page
-those regions, which is critical for the functioning of those mechanisms.
-
-In order to avoid excessive overhead, the Sentry does not demand-page individual
-pages. Instead, it selects appropriate regions based on heuristics. There is a
-trade-off here: the Sentry is unable to trivially determine which pages are
-active and which are not. Even if pages were individually faulted, the host may
-select pages to be reclaimed or swapped without the Sentry's knowledge.
-
-Therefore, memory usage statistics within the sandbox (e.g. via `proc`) are
-approximations. The Sentry maintains an internal breakdown of memory usage, and
-can collect accurate information but only through a relatively expensive API
-call. In any case, it would likely be considered unwise to share precise
-information about how the host is managing memory with the sandbox.
-
-Finally, when an application marks a region of memory as no longer needed, for
-example via a call to [madvise][madvise], the Sentry *releases this memory back
-to the host*. There can be performance penalties for this, since it may be
-cheaper in many cases to retain the memory and use it to satisfy some other
-request. However, releasing it immediately to the host allows the host to more
-effectively multiplex resources and apply an efficient global policy.
-
-## Limits
-
-All Sentry threads and Sentry memory are subject to a container cgroup. However,
-application usage will not appear as anonymous memory usage, and will instead be
-accounted to the `memfd`. All anonymous memory will correspond to Sentry usage,
-and host memory charged to the container will work as standard.
-
-The cgroups can be monitored for standard signals: pressure indicators,
-threshold notifiers, etc. and can also be adjusted dynamically. Note that the
-Sentry itself may listen for pressure signals in its containing cgroup, in order
-to purge internal caches.
-
-[goroutine]: https://tour.golang.org/concurrency/1
-[greenthread]: https://en.wikipedia.org/wiki/Green_threads
-[scheduler]: https://morsmachine.dk/go-scheduler
-[vdso]: https://en.wikipedia.org/wiki/VDSO
-[rlimit]: http://man7.org/linux/man-pages/man2/getrlimit.2.html
-[tickless]: https://en.wikipedia.org/wiki/Tickless_kernel
-[memfd]: http://man7.org/linux/man-pages/man2/memfd_create.2.html
-[scmrights]: http://man7.org/linux/man-pages/man7/unix.7.html
-[madvise]: http://man7.org/linux/man-pages/man2/madvise.2.html
-[exec]: https://docs.docker.com/engine/reference/commandline/exec/
-[^1]: Unless host networking is enabled, the Sentry is not able to create or
- open host file descriptors itself, it can only receive them in this way
- from the Gofer.
diff --git a/g3doc/architecture_guide/resources.png b/g3doc/architecture_guide/resources.png
deleted file mode 100644
index f715008ec..000000000
--- a/g3doc/architecture_guide/resources.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/architecture_guide/resources.svg b/g3doc/architecture_guide/resources.svg
deleted file mode 100644
index fd7805d90..000000000
--- a/g3doc/architecture_guide/resources.svg
+++ /dev/null
@@ -1,208 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="108.24417mm"
- height="47.513165mm"
- viewBox="0 0 108.24417 47.513165"
- version="1.1"
- id="svg8"
- inkscape:export-filename="/home/ascannell/resources.png"
- inkscape:export-xdpi="53.50127"
- inkscape:export-ydpi="53.50127"
- inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
- sodipodi:docname="resources.svg">
- <defs
- id="defs2" />
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="0.98994949"
- inkscape:cx="16.897058"
- inkscape:cy="41.261746"
- inkscape:document-units="mm"
- inkscape:current-layer="layer1"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1005"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1" />
- <metadata
- id="metadata5">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(-36.081387,-118.50325)">
- <rect
- id="rect10"
- width="33.408691"
- height="33.408691"
- x="36.081387"
- y="120.06757"
- style="fill:#44aa00;stroke-width:0.26458332" />
- <circle
- style="fill:#44aa00;stroke-width:0.21849461"
- id="path12"
- cx="87.958534"
- cy="136.63828"
- r="17.105247" />
- <path
- sodipodi:type="star"
- style="fill:#44aa00;stroke-width:0.26458332"
- id="path14"
- sodipodi:sides="3"
- sodipodi:cx="124.13387"
- sodipodi:cy="141.81859"
- sodipodi:r1="23.31534"
- sodipodi:r2="11.65767"
- sodipodi:arg1="0.52359878"
- sodipodi:arg2="1.5707963"
- inkscape:flatsided="false"
- inkscape:rounded="0"
- inkscape:randomized="0"
- d="m 144.32555,153.47626 -20.19168,0 -20.19167,0 10.09583,-17.48651 10.09584,-17.4865 10.09584,17.4865 z"
- inkscape:transform-center-x="1.8384776e-06"
- inkscape:transform-center-y="-5.8288369" />
- <rect
- style="fill:#b3b3b3;stroke-width:0.20817307"
- id="rect16"
- width="108.24416"
- height="10.423517"
- x="36.08139"
- y="155.5929" />
- <path
- sodipodi:type="star"
- style="fill:#ff8080;stroke-width:0.20018946"
- id="path14-3"
- sodipodi:sides="3"
- sodipodi:cx="124.13387"
- sodipodi:cy="139.31911"
- sodipodi:r1="17.640888"
- sodipodi:r2="8.8204451"
- sodipodi:arg1="0.52359878"
- sodipodi:arg2="1.5707963"
- inkscape:flatsided="false"
- inkscape:rounded="0"
- inkscape:randomized="0"
- d="m 139.41133,148.13955 -15.27746,0 -15.27745,0 7.63872,-13.23067 7.63873,-13.23066 7.63873,13.23066 z"
- inkscape:transform-center-x="3.9117172e-06"
- inkscape:transform-center-y="-4.4102243" />
- <circle
- style="fill:#ff8080;stroke-width:0.18094084"
- id="path12-6"
- cx="87.93705"
- cy="134.75125"
- r="14.165282" />
- <rect
- id="rect10-7"
- width="30.52453"
- height="25.657875"
- x="37.416695"
- y="121.65508"
- style="fill:#ff8080;stroke-width:0.22163473" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="47.387276"
- y="151.7626"
- id="text65"><tspan
- sodipodi:role="line"
- id="tspan63"
- x="47.387276"
- y="151.7626"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="82.156319"
- y="151.71547"
- id="text65-5"><tspan
- sodipodi:role="line"
- id="tspan63-3"
- x="82.156319"
- y="151.71547"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.40292525px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08507314"
- x="118.66879"
- y="151.71547"
- id="text65-5-5"><tspan
- sodipodi:role="line"
- id="tspan63-3-6"
- x="118.66879"
- y="151.71547"
- style="stroke-width:0.08507314">gVisor</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="45.473087"
- y="136.20644"
- id="text123"><tspan
- sodipodi:role="line"
- id="tspan121"
- x="45.473087"
- y="136.20644"
- style="stroke-width:0.08327847">workload</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="80.153076"
- y="136.00925"
- id="text123-1"><tspan
- sodipodi:role="line"
- id="tspan121-2"
- x="80.153076"
- y="136.00925"
- style="stroke-width:0.08327847">workload</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:3.33113885px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.08327847"
- x="116.50173"
- y="138.68195"
- id="text123-1-7"><tspan
- sodipodi:role="line"
- id="tspan121-2-0"
- x="116.50173"
- y="138.68195"
- style="stroke-width:0.08327847">workload</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:6.43922186px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.16098055"
- x="81.893562"
- y="163.15665"
- id="text163"><tspan
- sodipodi:role="line"
- id="tspan161"
- x="81.893562"
- y="163.15665"
- style="stroke-width:0.16098055">host</tspan></text>
- </g>
-</svg>
diff --git a/g3doc/architecture_guide/security.md b/g3doc/architecture_guide/security.md
deleted file mode 100644
index 9363d834c..000000000
--- a/g3doc/architecture_guide/security.md
+++ /dev/null
@@ -1,255 +0,0 @@
-# Security Model
-
-[TOC]
-
-gVisor was created in order to provide additional defense against the
-exploitation of kernel bugs by untrusted userspace code. In order to understand
-how gVisor achieves this goal, it is first necessary to understand the basic
-threat model.
-
-## Threats: The Anatomy of an Exploit
-
-An exploit takes advantage of a software or hardware bug in order to escalate
-privileges, gain access to privileged data, or disrupt services. All of the
-possible interactions that a malicious application can have with the rest of the
-system (attack vectors) define the attack surface. We categorize these attack
-vectors into several common classes.
-
-### System API
-
-An operating system or hypervisor exposes an abstract System API in the form of
-system calls and traps. This API may be documented and stable, as with Linux, or
-it may be abstracted behind a library, as with Windows (i.e. win32.dll or
-ntdll.dll). The System API includes all standard interfaces that application
-code uses to interact with the system. This includes high-level abstractions
-that are derived from low-level system calls, such as system files, sockets and
-namespaces.
-
-Although the System API is exposed to applications by design, bugs and race
-conditions within the kernel or hypervisor may occasionally be exploitable via
-the API. This is common in part due to the fact that most kernels and
-hypervisors are written in [C][clang], which is well-suited to interfacing with
-hardware but often prone to security issues. In order to exploit these issues, a
-typical attack might involve some combination of the following:
-
-1. Opening or creating some combination of files, sockets or other descriptors.
-1. Passing crafted, malicious arguments, structures or packets.
-1. Racing with multiple threads in order to hit specific code paths.
-
-For example, for the [Dirty Cow][dirtycow] privilege escalation bug, an
-application would open a specific file in `/proc` or use a specific `ptrace`
-system call, and use multiple threads in order to trigger a race condition when
-touching a fresh page of memory. The attacker then gains control over a page of
-memory belonging to the system. With additional privileges or access to
-privileged data in the kernel, an attacker will often be able to employ
-additional techniques to gain full access to the rest of the system.
-
-While bugs in the implementation of the System API are readily fixed, they are
-also the most common form of exploit. The exposure created by this class of
-exploit is what gVisor aims to minimize and control, described in detail below.
-
-### System ABI
-
-Hardware and software exploits occasionally exist in execution paths that are
-not part of an intended System API. In this case, exploits may be found as part
-of implicit actions the hardware or privileged system code takes in response to
-certain events, such as traps or interrupts. For example, the recent
-[POPSS][popss] flaw required only native code execution (no specific system call
-or file access). In that case, the Xen hypervisor was similarly vulnerable,
-highlighting that hypervisors are not immune to this vector.
-
-### Side Channels
-
-Hardware side channels may be exploitable by any code running on a system:
-native, sandboxed, or virtualized. However, many host-level mitigations against
-hardware side channels are still effective with a sandbox. For example, kernels
-built with retpoline protect against some speculative execution attacks
-(Spectre) and frame poisoning may protect against L1 terminal fault (L1TF)
-attacks. Hypervisors may introduce additional complications in this regard, as
-there is no mitigation against an application in a normally functioning Virtual
-Machine (VM) exploiting the L1TF vulnerability for another VM on the sibling
-hyperthread.
-
-### Other Vectors
-
-The above categories in no way represent an exhaustive list of exploits, as we
-focus only on running untrusted code from within the operating system or
-hypervisor. We do not consider other ways that a more generic adversary may
-interact with a system, such as inserting a portable storage device with a
-malicious filesystem image, using a combination of crafted keyboard or touch
-inputs, or saturating a network device with ill-formed packets.
-
-Furthermore, high-level systems may contain exploitable components. An attacker
-need not escalate privileges within a container if there’s an exploitable
-network-accessible service on the host or some other API path. *A sandbox is not
-a substitute for a secure architecture*.
-
-## Goals: Limiting Exposure
-
-![Threat model](security.png "Threat model.")
-
-gVisor’s primary design goal is to minimize the System API attack vector through
-multiple layers of defense, while still providing a process model. There are two
-primary security principles that inform this design. First, the application’s
-direct interactions with the host System API are intercepted by the Sentry,
-which implements the System API instead. Second, the System API accessible to
-the Sentry itself is minimized to a safer, restricted set. The first principle
-minimizes the possibility of direct exploitation of the host System API by
-applications, and the second principle minimizes indirect exploitability, which
-is the exploitation by an exploited or buggy Sentry (e.g. chaining an exploit).
-
-The first principle is similar to the security basis for a Virtual Machine (VM).
-With a VM, an application’s interactions with the host are replaced by
-interactions with a guest operating system and a set of virtualized hardware
-devices. These hardware devices are then implemented via the host System API by
-a Virtual Machine Monitor (VMM). The Sentry similarly prevents direct
-interactions by providing its own implementation of the System API that the
-application must interact with. Applications are not able to directly craft
-specific arguments or flags for the host System API, or interact directly with
-host primitives.
-
-For both the Sentry and a VMM, it’s worth noting that while direct interactions
-are not possible, indirect interactions are still possible. For example, a read
-on a host-backed file in the Sentry may ultimately result in a host read system
-call (made by the Sentry, not by passing through arguments from the
-application), similar to how a read on a block device in a VM may result in the
-VMM issuing a corresponding host read system call from a backing file.
-
-An important distinction from a VM is that the Sentry implements a System API
-based directly on host System API primitives instead of relying on virtualized
-hardware and a guest operating system. This selects a distinct set of
-trade-offs, largely in the performance, efficiency and compatibility domains.
-Since transitions in and out of the sandbox are relatively expensive, a guest
-operating system will typically take ownership of resources. For example, in the
-above case, the guest operating system may read the block device data in a local
-page cache, to avoid subsequent reads. This may lead to better performance but
-lower efficiency, since memory may be wasted or duplicated. The Sentry opts
-instead to defer to the host for many operations during runtime, for improved
-efficiency but lower performance in some use cases.
-
-### What can a sandbox do?
-
-An application in a gVisor sandbox is permitted to do most things a standard
-container can do: for example, applications can read and write files mapped
-within the container, make network connections, etc. As described above,
-gVisor's primary goal is to limit exposure to bugs and exploits while still
-allowing most applications to run. Even so, gVisor will limit some operations
-that might be permitted with a standard container. Even with appropriate
-capabilities, a user in a gVisor sandbox will only be able to manipulate
-virtualized system resources (e.g. the system time, kernel settings or
-filesystem attributes) and not underlying host system resources.
-
-While the sandbox virtualizes many operations for the application, we limit the
-sandbox's own interactions with the host to the following high-level operations:
-
-1. Communicate with a Gofer process via a connected socket. The sandbox may
- receive new file descriptors from the Gofer process, corresponding to opened
- files. These files can then be read from and written to by the sandbox.
-1. Make a minimal set of host system calls. The calls do not include the
- creation of new sockets (unless host networking mode is enabled) or opening
- files. The calls include duplication and closing of file descriptors,
- synchronization, timers and signal management.
-1. Read and write packets to a virtual ethernet device. This is not required if
- host networking is enabled (or networking is disabled).
-
-### System ABI, Side Channels and Other Vectors
-
-gVisor relies on the host operating system and the platform for defense against
-hardware-based attacks. Given the nature of these vulnerabilities, there is
-little defense that gVisor can provide (there’s no guarantee that additional
-hardware measures, such as virtualization, memory encryption, etc. would
-actually decrease the attack surface). Note that this is true even when using
-hardware virtualization for acceleration, as the host kernel or hypervisor is
-ultimately responsible for defending against attacks from within malicious
-guests.
-
-gVisor similarly relies on the host resource mechanisms (cgroups) for defense
-against resource exhaustion and denial of service attacks. Network policy
-controls should be applied at the container level to ensure appropriate network
-policy enforcement. Note that the sandbox itself is not capable of altering or
-configuring these mechanisms, and the sandbox itself should make an attacker
-less likely to exploit or override these controls through other means.
-
-## Principles: Defense-in-Depth
-
-For gVisor development, there are several engineering principles that are
-employed in order to ensure that the system meets its design goals.
-
-1. No system call is passed through directly to the host. Every supported call
- has an independent implementation in the Sentry, that is unlikely to suffer
- from identical vulnerabilities that may appear in the host. This has the
- consequence that all kernel features used by applications require an
- implementation within the Sentry.
-1. Only common, universal functionality is implemented. Some filesystems,
- network devices or modules may expose specialized functionality to user
- space applications via mechanisms such as extended attributes, raw sockets
- or ioctls. Since the Sentry is responsible for implementing the full system
- call surface, we do not implement or pass through these specialized APIs.
-1. The host surface exposed to the Sentry is minimized. While the system call
- surface is not trivial, it is explicitly enumerated and controlled. The
- Sentry is not permitted to open new files, create new sockets or do many
- other interesting things on the host.
-
-Additionally, we have practical restrictions that are imposed on the project to
-minimize the risk of Sentry exploitability. For example:
-
-1. Unsafe code is carefully controlled. All unsafe code is isolated in files
- that end with "unsafe.go", in order to facilitate validation and auditing.
- No file without the unsafe suffix may import the unsafe package.
-1. No CGo is allowed. The Sentry must be a pure Go binary.
-1. External imports are not generally allowed within the core packages. Only
- limited external imports are used within the setup code. The code available
- inside the Sentry is carefully controlled, to ensure that the above rules
- are effective.
-
-Finally, we recognize that security is a process, and that vigilance is
-critical. Beyond our security disclosure process, the Sentry is fuzzed
-continuously to identify potential bugs and races proactively, and production
-crashes are recorded and triaged to similarly identify material issues.
-
-## FAQ
-
-### Is this more or less secure than a Virtual Machine?
-
-The security of a VM depends to a large extent on what is exposed from the host
-kernel and userspace support code. For example, device emulation code in the
-host kernel (e.g. APIC) or optimizations (e.g. vhost) can be more complex than a
-simple system call, and exploits carry the same risks. Similarly, the userspace
-support code is frequently unsandboxed, and exploits, while rare, may allow
-unfettered access to the system.
-
-Some platforms leverage the same virtualization hardware as VMs in order to
-provide better system call interception performance. However, gVisor does not
-implement any device emulation, and instead opts to use a sandboxed host System
-API directly. Both approaches significantly reduce the original attack surface.
-Ultimately, since gVisor is capable of using the same hardware mechanism, one
-should not assume that the mere use of virtualization hardware makes a system
-more or less secure, just as it would be a mistake to make the claim that the
-use of a unibody alone makes a car safe.
-
-### Does this stop hardware side channels?
-
-In general, gVisor does not provide protection against hardware side channels,
-although it may make exploits that rely on direct access to the host System API
-more difficult to use. To minimize exposure, you should follow relevant guidance
-from vendors and keep your host kernel and firmware up-to-date.
-
-### Is this just a ptrace sandbox?
-
-No: the term “ptrace sandbox” generally refers to software that uses the Linux
-ptrace facility to inspect and authorize system calls made by applications,
-enforcing a specific policy. These commonly suffer from two issues. First,
-vulnerable system calls may be authorized by the sandbox, as the application
-still has direct access to some System API. Second, it’s impossible to avoid
-time-of-check, time-of-use race conditions without disabling multi-threading.
-
-In gVisor, the platforms that use ptrace operate differently. The stubs that are
-traced are never allowed to continue execution into the host kernel and complete
-a call directly. Instead, all system calls are interpreted and handled by the
-Sentry itself, who reflects resulting register state back into the tracee before
-continuing execution in userspace. This is very similar to the mechanism used by
-User-Mode Linux (UML).
-
-[dirtycow]: https://en.wikipedia.org/wiki/Dirty_COW
-[clang]: https://en.wikipedia.org/wiki/C_(programming_language)
-[popss]: https://nvd.nist.gov/vuln/detail/CVE-2018-8897
diff --git a/g3doc/architecture_guide/security.png b/g3doc/architecture_guide/security.png
deleted file mode 100644
index c29befbf6..000000000
--- a/g3doc/architecture_guide/security.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/architecture_guide/security.svg b/g3doc/architecture_guide/security.svg
deleted file mode 100644
index 0575e2dec..000000000
--- a/g3doc/architecture_guide/security.svg
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="92.963379mm"
- height="107.18885mm"
- viewBox="0 0 92.963379 107.18885"
- version="1.1"
- id="svg8"
- inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
- sodipodi:docname="defense.svg">
- <defs
- id="defs2" />
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="0.98994949"
- inkscape:cx="-242.99254"
- inkscape:cy="136.90181"
- inkscape:document-units="mm"
- inkscape:current-layer="layer4"
- showgrid="false"
- inkscape:object-nodes="true"
- inkscape:window-width="1920"
- inkscape:window-height="1005"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" />
- <metadata
- id="metadata5">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="Layer 2"
- transform="translate(-61.112559,-78.160466)">
- <g
- id="g4644"
- style="fill:none;fill-opacity:0.34351148;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:0.25572576"
- transform="matrix(1,0,0,-1,2.138671,277.94235)">
- <path
- transform="scale(0.26458333)"
- inkscape:connector-curvature="0"
- style="opacity:1;fill:none;fill-opacity:0.34351148;stroke:#00a500;stroke-width:3.77952766;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:0.25572576"
- d="M 398.57227,351.84766 224.7832,452.18359 398.57227,552.51953 572.35938,452.18359 Z"
- id="path4638" />
- <path
- inkscape:connector-curvature="0"
- style="opacity:1;fill:none;fill-opacity:0.34351148;stroke:#00a500;stroke-width:3.77952766;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:0.25572576"
- d="M 572.35938,452.18359 398.57227,552.51953 V 753.19141 L 572.35938,652.85547 Z"
- transform="scale(0.26458333)"
- id="path4640" />
- <path
- id="path4642"
- d="m 59.473888,119.64024 45.981172,26.54722 v 53.09443 L 59.473888,172.73467 Z"
- style="opacity:1;fill:none;fill-opacity:0.34351148;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:0.25572576"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="Layer 3"
- transform="translate(-61.112559,-78.160466)">
- <g
- id="g4554"
- transform="matrix(-0.39771468,0.69855937,-0.69855937,-0.39771468,366.58103,126.65261)">
- <g
- id="g4662"
- transform="translate(59.46839,130.66062)">
- <path
- inkscape:connector-curvature="0"
- id="path4548"
- transform="scale(0.26458333)"
- d="M 398.57227,351.84766 224.7832,452.18359 398.57227,552.51953 572.35938,452.18359 Z"
- style="opacity:1;fill:#0066ff;fill-opacity:0.34509804;stroke:#00a5ff;stroke-width:4.70182848;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
- <path
- inkscape:connector-curvature="0"
- id="path4550"
- transform="scale(0.26458333)"
- d="M 572.35938,452.18359 398.57227,552.51953 V 753.19141 L 572.35938,652.85547 Z"
- style="opacity:1;fill:#0044aa;fill-opacity:0.34509804;stroke:#00a5ff;stroke-width:4.29276943;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
- <path
- inkscape:connector-curvature="0"
- style="opacity:1;fill:#5599ff;fill-opacity:0.34509804;stroke:#00a5ff;stroke-width:1.24402535;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- d="m 59.473888,119.64024 45.981172,26.54722 v 53.09443 L 59.473888,172.73467 Z"
- id="path4552" />
- </g>
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer4"
- inkscape:label="Layer 4"
- transform="translate(-61.112559,-78.160466)">
- <path
- style="fill:#e000ae;fill-opacity:1;stroke-width:0.12476727"
- d="m 84.610811,107.36071 v 2.55773 2.55772 h 2.49535 2.49534 v -2.55772 -2.55773 h -2.49534 z m 40.674129,0 v 2.55773 2.55772 h 2.49535 2.49534 v -2.55772 -2.55773 h -2.49534 z m -35.558669,5.11545 v 2.55773 2.55773 h 2.49535 2.49534 v -2.55773 -2.55773 h -2.49534 z m 4.99069,5.11546 v 2.55773 2.55773 h -2.49534 -2.49535 v 2.49534 2.49535 h -2.55773 -2.55773 v 2.55773 2.55773 h -2.55773 -2.55773 v 10.16853 10.16853 h 2.55773 2.55773 v -7.67562 -7.67587 l 2.52654,0.0339 2.52654,0.0336 0.0327,5.08427 0.0327,5.08426 h 2.49388 2.49388 v 2.55919 2.5592 l 5.08427,-0.0327 5.084269,-0.0326 v -2.49534 -2.49535 l -5.084269,-0.0324 -5.08427,-0.0327 v -2.55626 -2.55651 h 12.726269 12.72626 v 2.55651 2.55626 l -5.05868,0.0327 -5.05893,0.0324 v 2.49535 2.49534 l 5.05893,0.0326 5.05868,0.0327 v -2.55919 -2.55919 h 2.49388 2.49413 l 0.0324,-5.08426 0.0327,-5.08427 2.52653,-0.0336 2.52654,-0.0339 v 7.67586 7.67563 h 2.55773 2.55773 v -10.16854 -10.16853 h -2.55773 -2.55773 v -2.55773 -2.55773 h -2.55773 -2.55773 v -2.49535 -2.49534 h -2.49535 -2.49534 v -2.55773 -2.55773 h -2.55773 -2.55773 v 2.55773 2.55773 h -7.6108 -7.610809 v -2.55773 -2.55773 h -2.55774 z m 25.452519,0 h 2.49535 2.49535 v -2.55773 -2.55773 h -2.49535 -2.49535 v 2.55773 z m -25.452519,10.10615 h 5.11546 5.115459 v 2.55773 2.55773 h -5.115459 -5.11546 v -2.55773 z m 15.221609,0 h 5.11546 5.11545 v 2.55773 2.55773 h -5.11545 -5.11546 v -2.55773 z"
- id="path4732"
- inkscape:connector-curvature="0" />
- </g>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- style="display:inline"
- transform="translate(-61.112559,-78.160466)">
- <g
- transform="translate(-131.49557,42.495842)"
- style="fill:#007200;fill-opacity:0.34351148;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- id="g4628">
- <path
- id="path4529"
- d="m 239.09034,36.164616 -45.98169,26.547215 45.98169,26.547217 45.98117,-26.547217 z"
- style="opacity:1;fill:#4aba19;fill-opacity:0.34509804;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- inkscape:connector-curvature="0" />
- <path
- id="path4531"
- d="m 285.07151,62.711828 -45.98117,26.54722 v 53.094432 l 45.98117,-26.54722 z"
- style="opacity:1;fill:#007900;fill-opacity:0.34351148;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- inkscape:connector-curvature="0" />
- <path
- inkscape:connector-curvature="0"
- style="opacity:1;fill:#003d00;fill-opacity:0.34509804;stroke:#00a500;stroke-width:1;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- d="m 193.10865,62.711831 45.98117,26.54722 v 53.094429 l -45.98117,-26.54722 z"
- id="path4541" />
- </g>
- </g>
-</svg>
diff --git a/g3doc/community.md b/g3doc/community.md
deleted file mode 100644
index 76f4d87c3..000000000
--- a/g3doc/community.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Participation
-
-To contribute code, please read the [contributing guide](../CONTRIBUTING.md).
-
-Please note that the [Code of Conduct](../CODE_OF_CONDUCT.md) applies to
-community forums as well as technical participation.
-
-## Communication channels
-
-The project maintains two mailing lists:
-
-* [gvisor-users][gvisor-users] for accouncements and general discussion.
-* [gvisor-dev][gvisor-dev] for development and contribution.
-
-We also have a [chat room hosted on Gitter][gitter-chat].
-
-We'd love to hear from you!
-
-## Community meetings
-
-The community calendar shows upcoming public meetings and opportunities to
-collaborate or discuss the project. Meetings are planned and announced ahead of
-time via the [gvisor-users][gvisor-users] mailing list.
-
-These meetings are public: anyone can join.
-
-<iframe src="https://calendar.google.com/calendar/b/1/embed?showTitle=0&amp;height=600&amp;wkst=1&amp;bgcolor=%23FFFFFF&amp;src=bd6f4k210u3ukmlj9b8vl053fk%40group.calendar.google.com&amp;color=%23AB8B00&amp;ctz=America%2FLos_Angeles" style="border-width:0" width="600" height="400" frameborder="0" scrolling="no"></iframe>
-
-[gitter-chat]: https://gitter.im/gvisor/community
-[gvisor-dev]: https://groups.google.com/forum/#!forum/gvisor-dev
-[gvisor-users]: https://groups.google.com/forum/#!forum/gvisor-users
diff --git a/g3doc/logo.png b/g3doc/logo.png
deleted file mode 100644
index bd1a1e4b7..000000000
--- a/g3doc/logo.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/logo.txt b/g3doc/logo.txt
deleted file mode 100644
index 92f9cad5f..000000000
--- a/g3doc/logo.txt
+++ /dev/null
@@ -1 +0,0 @@
-The gVisor logo files are licensed under CC BY-SA 4.0 (Creative Commons Attribution-ShareAlike 4.0 International).
diff --git a/g3doc/proposals/BUILD b/g3doc/proposals/BUILD
deleted file mode 100644
index 710283142..000000000
--- a/g3doc/proposals/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "gsoc_2021",
- src = "gsoc-2021-ideas.md",
- category = "Project",
- include_in_menu = False,
- permalink = "/community/gsoc_2021/",
- subcategory = "Community",
- weight = "99",
-)
diff --git a/g3doc/proposals/gsoc-2021-ideas.md b/g3doc/proposals/gsoc-2021-ideas.md
deleted file mode 100644
index ecaf0dfe1..000000000
--- a/g3doc/proposals/gsoc-2021-ideas.md
+++ /dev/null
@@ -1,146 +0,0 @@
-# Project Ideas for Google Summer of Code 2021
-
-This is a collection of project ideas for
-[Google Summer of Code 2021][gsoc-2021-site]. These projects are intended to be
-relatively self-contained and should be good starting projects for new
-contributors to gVisor. We expect individual contributors to be able to make
-reasonable progress on these projects over the course of several weeks.
-Familiarity with Golang and knowledge about systems programming in Linux will be
-helpful.
-
-If you're interested in contributing to gVisor through Google Summer of Code
-2021, but would like to propose your own idea for a project, please see our
-[roadmap](../roadmap.md) for areas of development, and get in touch through our
-[mailing list][gvisor-mailing-list] or [chat][gvisor-chat]!
-
-## Implement the `setns` syscall
-
-Estimated complexity: *easy*
-
-This project involves implementing the [`setns`][man-setns] syscall. gVisor
-currently supports manipulation of namespaces through the `clone` and `unshare`
-syscalls. These two syscalls essentially implement the requisite logic for
-`setns`, but there is currently no way to obtain a file descriptor referring to
-a namespace in gVisor. As described in the `setns` man page, the two typical
-ways of obtaining such a file descriptor in Linux are by opening a file in
-`/proc/[pid]/ns`, or through the `pidfd_open` syscall.
-
-For gVisor, we recommend implementing the `/proc/[pid]/ns` mechanism first,
-which would involve implementing a trivial namespace file type in procfs.
-
-## Implement `fanotify`
-
-Estimated complexity: *medium*
-
-Implement [`fanotify`][man-fanotify] in gVisor, which is a filesystem event
-notification mechanism. gVisor currently supports `inotify`, which is a similar
-mechanism with slightly different capabilities, but which should serve as a good
-reference.
-
-The `fanotify` interface adds two new syscalls:
-
-- `fanotify_init` creates a new notification group, which is a collection of
- filesystem objects watched by the kernel. The group is represented by a file
- descriptor returned by this syscall. Events on the watched objects can be
- retrieved by reading from this file descriptor.
-
-- `fanotify_mark` adds a filesystem object to a watch group, or modifies the
- parameters of an existing watch.
-
-Unlike `inotify`, `fanotify` can set watches on filesystems and mount points,
-which will require some additional data tracking on the corresponding filesystem
-objects within the sentry.
-
-A well-designed implementation should reuse the notifications from `inotify` for
-files and directories (this is also how Linux implements these mechanisms), and
-should implement the necessary tracking and notifications for filesystems and
-mount points.
-
-## Implement `io_uring`
-
-Estimated complexity: *hard*
-
-`io_uring` is the latest asynchronous I/O API in Linux. This project will
-involve implementing the system interfaces required to support `io_uring` in
-gVisor. A successful implementation should have similar relatively performance
-and scalability characteristics compared to synchronous I/O syscalls, as in
-Linux.
-
-The core of the `io_uring` interface is deceptively simple, involving only three
-new syscalls:
-
-- `io_uring_setup(2)` creates a new `io_uring` instance represented by a file
- descriptor, including a set of request submission and completion queues
- backed by shared memory ring buffers.
-
-- `io_uring_register(2)` optionally binds kernel resources such as files and
- memory buffers to handles, which can then be passed to `io_uring`
- operations. Pre-registering resources in this way moves the cost of looking
- up and validating these resources to registration time rather than paying
- the cost during the operation.
-
-- `io_uring_enter(2)` is the syscall used to submit queued operations and wait
- for completions. This is the most complex part of the mechanism, requiring
- the kernel to process queued request from the submission queue, dispatching
- the appropriate I/O operation based on the request arguments and blocking
- for the requested number of operations to be completed before returning.
-
-An `io_uring` request is effectively an opcode specifying the I/O operation to
-perform, and corresponding arguments. The opcodes and arguments closely relate
-to the the corresponding synchronous I/O syscall. In addition, there are some
-`io_uring`-specific arguments that specify things like how to process requests,
-how to interpret the arguments and communicate the status of the ring buffers.
-
-For a detailed description of the `io_uring` interface, see the
-[design doc][io-uring-doc] by the `io_uring` authors.
-
-Due to the complexity of the full `io_uring` mechanism and the numerous
-supported operations, it should be implemented in two stages:
-
-In the first stage, a simplified version of the `io_uring_setup` and
-`io_uring_enter` syscalls should be implemented, which will only support a
-minimal set of arguments and just one or two simple opcodes. This simplified
-implementation can be used to figure out how to integrate `io_uring` with
-gVisor's virtual filesystem and memory management subsystems, as well as
-benchmark the implementation to ensure it has the desired performance
-characteristics. The goal in this stage should be to implement the smallest
-subset of features required to perform a basic operation through `io_uring`s.
-
-In the second stage, support can be added for all the I/O operations supported
-by Linux, as well as advanced `io_uring` features such as fixed files and
-buffers (via `io_uring_register`), polled I/O and kernel-side request polling.
-
-A single contributor can expect to make reasonable progress on the first stage
-within the scope of Google Summer of Code. The second stage, while not
-necessarily difficult, is likely to be very time consuming. However it also
-lends itself well to parallel development by multiple contributors.
-
-## Implement message queues
-
-Estimated complexity: *hard*
-
-Linux provides two alternate message queues:
-[System V message queues][man-sysvmq] and [POSIX message queues][man-posixmq].
-gVisor currently doesn't implement either.
-
-Both mechanisms add multiple syscalls for managing and using the message queues,
-see the relevant man pages above for their full description.
-
-The core of both mechanisms are very similar, it may be possible to back both
-mechanisms with a common implementation in gVisor. Linux however has two
-distinct implementations.
-
-An individual contributor can reasonably implement a minimal version of one of
-these two mechanisms within the scope of Google Summer of Code. The System V
-queue may be slightly easier to implement, as gVisor already implements System V
-semaphores and shared memory regions, so the code for managing IPC objects and
-the registry already exist.
-
-[gsoc-2021-site]: https://summerofcode.withgoogle.com
-[gvisor-chat]: https://gitter.im/gvisor/community
-[gvisor-mailing-list]: https://groups.google.com/g/gvisor-dev
-[io-uring-doc]: https://kernel.dk/io_uring.pdf
-[man-fanotify]: https://man7.org/linux/man-pages/man7/fanotify.7.html
-[man-sysvmq]: https://man7.org/linux/man-pages/man7/sysvipc.7.html
-[man-posixmq]: https://man7.org/linux/man-pages//man7/mq_overview.7.html
-[man-setns]: https://man7.org/linux/man-pages/man2/setns.2.html
diff --git a/g3doc/proposals/runtime_dedicate_os_thread.md b/g3doc/proposals/runtime_dedicate_os_thread.md
deleted file mode 100644
index dc70055b0..000000000
--- a/g3doc/proposals/runtime_dedicate_os_thread.md
+++ /dev/null
@@ -1,188 +0,0 @@
-# `runtime.DedicateOSThread`
-
-Status as of 2020-09-18: Deprioritized; initial studies in #2180 suggest that
-this may be difficult to support in the Go runtime due to issues with GC.
-
-## Summary
-
-Allow goroutines to bind to kernel threads in a way that allows their scheduling
-to be kernel-managed rather than runtime-managed.
-
-## Objectives
-
-* Reduce Go runtime overhead in the gVisor sentry (#2184).
-
-* Minimize intrusiveness of changes to the Go runtime.
-
-## Background
-
-In Go, execution contexts are referred to as goroutines, which the runtime calls
-Gs. The Go runtime maintains a variably-sized pool of threads (called Ms by the
-runtime) on which Gs are executed, as well as a pool of "virtual processors"
-(called Ps by the runtime) of size equal to `runtime.GOMAXPROCS()`. Usually,
-each M requires a P in order to execute Gs, limiting the number of concurrently
-executing goroutines to `runtime.GOMAXPROCS()`.
-
-The `runtime.LockOSThread` function temporarily locks the invoking goroutine to
-its current thread. It is primarily useful for interacting with OS or non-Go
-library facilities that are per-thread. It does not reduce interactions with the
-Go runtime scheduler: locked Ms relinquish their P when they become blocked, and
-only continue execution after another M "chooses" their locked G to run and
-donates their P to the locked M instead.
-
-## Problems
-
-### Context Switch Overhead
-
-Most goroutines in the gVisor sentry are task goroutines, which back application
-threads. Task goroutines spend large amounts of time blocked on syscalls that
-execute untrusted application code. When invoking said syscall (which varies by
-gVisor platform), the task goroutine may interact with the Go runtime in one of
-three ways:
-
-* It can invoke the syscall without informing the runtime. In this case, the
- task goroutine will continue to hold its P during the syscall, limiting the
- number of application threads that can run concurrently to
- `runtime.GOMAXPROCS()`. This is problematic because the Go runtime scheduler
- is known to scale poorly with `GOMAXPROCS`; see #1942 and
- https://github.com/golang/go/issues/28808. It also means that preemption of
- application threads must be driven by sentry or runtime code, which is
- strictly slower than kernel-driven preemption (since the sentry must invoke
- another syscall to preempt the application thread).
-
-* It can call `runtime.entersyscallblock` before invoking the syscall, and
- `runtime.exitsyscall` after the syscall returns. In this case, the task
- goroutine will release its P while the syscall is executing. This allows the
- number of threads concurrently executing application code to exceed
- `GOMAXPROCS`. However, this incurs additional latency on syscall entry (to
- hand off the released P to another M, often requiring a `futex(FUTEX_WAKE)`
- syscall) and on syscall exit (to acquire a new P). It also drastically
- increases the number of threads that concurrently interact with the runtime
- scheduler, which is also problematic for performance (both in terms of CPU
- utilization and in terms of context switch latency); see #205.
-
-- It can call `runtime.entersyscall` before invoking the syscall, and
- `runtime.exitsyscall` after the syscall returns. In this case, the task
- goroutine "lazily releases" its P, allowing the runtime's "sysmon" thread to
- steal it on behalf of another M after a 20us delay. This mitigates the
- context switch latency problem when there are few task goroutines and the
- interval between switches to application code (i.e. the interval between
- application syscalls, page faults, or signal delivery) is short. (Cynically,
- this means that it's most effective in microbenchmarks). However, the delay
- before a P is stolen can also be problematic for performance when there are
- both many task goroutines switching to application code (lazily releasing
- their Ps) *and* many task goroutines switching to sentry code (contending
- for Ps), which is likely in larger heterogeneous workloads.
-
-### Blocking Overhead
-
-Task goroutines block on behalf of application syscalls like `futex` and
-`epoll_wait` by receiving from a Go channel. (Future work may convert task
-goroutine blocking to use the `syncevent` package to avoid overhead associated
-with channels and `select`, but this does not change how blocking interacts with
-the Go runtime scheduler.)
-
-If `runtime.LockOSThread()` is not in effect when a task goroutine blocks, then
-when the task goroutine is unblocked (by e.g. an application `FUTEX_WAKE`,
-signal delivery, or a timeout) by sending to the blocked channel,
-`runtime.ready` migrates the unblocked G to the unblocking P. In most cases,
-this implies that every application thread block/unblock cycle results in a
-migration of the thread between Ps, and therefore Ms, and therefore cores,
-resulting in reduced application performance due to loss of CPU caches.
-Furthermore, in most cases, the unblocking P cannot immediately switch to the
-unblocked G (instead resuming execution of its current application thread after
-completing the application's `futex(FUTEX_WAKE)`, `tgkill`, etc. syscall), often
-requiring that another P steal the unblocked G before it can resume execution.
-
-If `runtime.LockOSThread()` is in effect when a task goroutine blocks, then the
-G will remain locked to its M, avoiding the core migration described above;
-however, wakeup latency is significantly increased since, as described in
-"Background", the G still needs to be selected by the scheduler before it can
-run, and the M that selects the G then needs to transfer its P to the locked M,
-incurring an additional `FUTEX_WAKE` syscall and round of kernel scheduling.
-
-## Proposal
-
-We propose to add a function, tentatively called `DedicateOSThread`, to the Go
-`runtime` package, documented as follows:
-
-```go
-// DedicateOSThread wires the calling goroutine to its current operating system
-// thread, and exempts it from counting against GOMAXPROCS. The calling
-// goroutine will always execute in that thread, and no other goroutine will
-// execute in it, until the calling goroutine has made as many calls to
-// UndedicateOSThread as to DedicateOSThread. If the calling goroutine exits
-// without unlocking the thread, the thread will be terminated.
-//
-// DedicateOSThread should only be used by long-lived goroutines that usually
-// block due to blocking system calls, rather than interaction with other
-// goroutines.
-func DedicateOSThread()
-```
-
-Mechanically, `DedicateOSThread` implies `LockOSThread` (i.e. it locks the
-invoking G to a M), but additionally locks the invoking M to a P. Ps locked by
-`DedicateOSThread` are not counted against `GOMAXPROCS`; that is, the actual
-number of Ps in the system (`len(runtime.allp)`) is `GOMAXPROCS` plus the number
-of bound Ps (plus some slack to avoid frequent changes to `runtime.allp`).
-Corollaries:
-
-* If `runtime.ready` observes that a readied G is locked to a M locked to a P,
- it immediately wakes the locked M without migrating the G to the readying P
- or waiting for a future call to `runtime.schedule` to select the readied G
- in `runtime.findrunnable`.
-
-* `runtime.stoplockedm` and `runtime.reentersyscall` skip the release of
- locked Ps; the latter also skips sysmon wakeup. `runtime.stoplockedm` and
- `runtime.exitsyscall` skip re-acquisition of Ps if one is locked.
-
-* sysmon does not attempt to preempt Gs that are locked to Ps, avoiding
- fruitless overhead from `tgkill` syscalls and signal delivery.
-
-* `runtime.findrunnable`'s work stealing skips locked Ps (suggesting that
- unlocked Ps be tracked in a separate array). `runtime.findrunnable` on
- locked Ps skip the global run queue, work stealing, and possibly netpoll.
-
-* New goroutines created by goroutines with locked Ps are enqueued on the
- global run queue rather than the invoking P's local run queue.
-
-While gVisor's use case does not strictly require that the association is
-reversible (with `runtime.UndedicateOSThread`), such a feature is required to
-allow reuse of locked Ms, which is likely to be critical for performance.
-
-## Alternatives Considered
-
-* Make the runtime scale well with `GOMAXPROCS`. While we are also
- concurrently investigating this problem, this would not address the issues
- of increased preemption cost or blocking overhead.
-
-* Make the runtime scale well with number of Ms. It is unclear if this is
- actually feasible, and would not address blocking overhead.
-
-* Make P-locking part of `LockOSThread`'s behavior. This would likely
- introduce performance regressions in existing uses of `LockOSThread` that do
- not fit this usage pattern. In particular, since `DedicateOSThread`
- transitions the invoker's P from "counted against `GOMAXPROCS`" to "not
- counted against `GOMAXPROCS`", it may need to wake another M to run a new P
- (that is counted against `GOMAXPROCS`), and the converse applies to
- `UndedicateOSThread`.
-
-* Rewrite the gVisor sentry in a language that does not force userspace
- scheduling. This is a last resort due to the amount of code involved.
-
-## Related Issues
-
-The proposed functionality is directly analogous to `spawn_blocking` in Rust
-async runtimes
-[`async_std`](https://docs.rs/async-std/1.8.0/async_std/task/fn.spawn_blocking.html)
-and [`tokio`](https://docs.rs/tokio/0.3.5/tokio/task/fn.spawn_blocking.html).
-
-Outside of gVisor:
-
-* https://github.com/golang/go/issues/21827#issuecomment-595152452 describes a
- use case for this feature in go-delve, where the goroutine that would use
- this feature spends much of its time blocked in `ptrace` syscalls.
-
-* This feature may improve performance in the use case described in
- https://github.com/golang/go/issues/18237, given the prominence of
- syscall.Syscall in the profile given in that bug report.
diff --git a/g3doc/roadmap.md b/g3doc/roadmap.md
deleted file mode 100644
index 06ea25a8b..000000000
--- a/g3doc/roadmap.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Roadmap
-
-gVisor [GitHub Issues][issues] serve as the source-of-truth for most work in
-flight. Specific performance and compatibility issues are generally tracked
-there. [GitHub Milestones][milestones] may be used to track larger features that
-span many issues. However, labels are also used to aggregate cross-cutting
-feature work.
-
-## Core Improvements
-
-Most gVisor work is focused on four areas.
-
-* [Performance][performance]: overall sandbox performance, including platform
- performance, is a critical area for investment. This includes: network
- performance (throughput and latency), file system performance (metadata and
- data I/O), application switch and fault costs, etc. The goal of gVisor is to
- provide sandboxing without a material performance or efficiency impact on
- all but the most performance-sensitive applications.
-
-* [Compatibility][compatibility]: supporting a wide range of applications
- requires supporting a large system API, including special system files (e.g.
- proc, sys, dev, etc.). The goal of gVisor is to support the broad set of
- applications that depend on a generic Linux API, rather than a specific
- kernel version.
-
-* [Infrastructure & tooling][infrastructure]: the above goals require
- aggressive testing and coverage, and well-established processes. This
- includes adding appropriate system call coverage, end-to-end suites and
- runtime tests.
-
-* [Integration][integration]: Container infrastructure is evolving rapidly and
- becoming more complex, and gVisor must continuously implement relevant and
- popular features to ensure that integration points remain robust and
- feature-complete while preserving security guarantees.
-
-## Releases
-
-Releases are available on [GitHub][releases].
-
-As a convenience, binary packages are also published. Instructions for their use
-are available via the [Installation instructions](./user_guide/install.md).
-
-[issues]: https://github.com/google/gvisor/issues
-[milestones]: https://github.com/google/gvisor/milestones
-[releases]: https://github.com/google/gvisor/releases
-[performance]: https://github.com/google/gvisor/issues?q=is%3Aopen+is%3Aissue+label%3A%22area%3A+performance%22
-[integration]: https://github.com/google/gvisor/issues?q=is%3Aopen+is%3Aissue+label%3A%22area%3A+integration%22
-[compatibility]: https://github.com/google/gvisor/issues?q=is%3Aopen+is%3Aissue+label%3A%22area%3A+compatibility%22
-[infrastructure]: https://github.com/google/gvisor/issues?q=is%3Aopen+is%3Aissue+label%3A%22area%3A+tooling%22
diff --git a/g3doc/style.md b/g3doc/style.md
deleted file mode 100644
index 8258b0233..000000000
--- a/g3doc/style.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# Provisional style guide
-
-> These guidelines are new and may change. This note will be removed when
-> consensus is reached.
-
-Not all existing code will comply with this style guide, but new code should.
-Further, it is a goal to eventually update all existing code to be in
-compliance.
-
-## All code
-
-### Early exit
-
-All code, unless it substantially increases the line count or complexity, should
-use early exits from loops and functions where possible.
-
-## Go specific
-
-All Go code should comply with the [Go Code Review Comments][gostyle] and
-[Effective Go][effective_go] guides, as well as the additional guidelines
-described below.
-
-### Mutexes
-
-#### Naming
-
-Mutexes should be named mu or xxxMu. Mutexes as a general rule should not be
-exported. Instead, export methods which use the mutexes to avoid leaky
-abstractions.
-
-#### Location
-
-Mutexes should be sibling fields to the fields that they protect. Mutexes should
-not be declared as global variables, instead use a struct (anonymous ok, but
-naming conventions still apply).
-
-Mutexes should be ordered before the fields that they protect.
-
-#### Comments
-
-Mutexes should have a comment on their declaration explaining any ordering
-requirements (or pointing to where this information can be found), if
-applicable. There is no need for a comment explaining which fields are
-protected.
-
-Each field or variable protected by a mutex should state as such in a comment on
-the field or variable declaration.
-
-### Function comments
-
-Functions with special entry conditions (e.g., a lock must be held) should state
-these conditions in a `Preconditions:` comment block. One condition per line;
-multiple conditions are specified with a bullet (`*`).
-
-Functions with notable exit conditions (e.g., a `Done` function must eventually
-be called by the caller) can similarly have a `Postconditions:` block.
-
-### Unused returns
-
-Unused returns should be explicitly ignored with underscores. If there is a
-function which is commonly used without using its return(s), a wrapper function
-should be declared which explicitly ignores the returns. That said, in many
-cases, it may make sense for the wrapper to check the returns.
-
-### Formatting verbs
-
-Built-in types should use their associated verbs (e.g. %d for integral types),
-but other types should use a %v variant, even if they implement fmt.Stringer.
-The built-in `error` type should use %w when formatted with `fmt.Errorf`, but
-only then.
-
-### Wrapping
-
-Comments should be wrapped at 80 columns with a 2 space tab size.
-
-Code does not need to be wrapped, but if wrapping would make it more readable,
-it should be wrapped with each subcomponent of the thing being wrapped on its
-own line. For example, if a struct is split between lines, each field should be
-on its own line.
-
-#### Example
-
-```go
-_ = exec.Cmd{
- Path: "/foo/bar",
- Args: []string{"-baz"},
-}
-```
-
-## C++ specific
-
-C++ code should conform to the [Google C++ Style Guide][cppstyle] and the
-guidelines described for tests.
-
-[cppstyle]: https://google.github.io/styleguide/cppguide.html
-[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments
-[effective_go]: https://golang.org/doc/effective_go.html
diff --git a/g3doc/user_guide/BUILD b/g3doc/user_guide/BUILD
deleted file mode 100644
index b69aee12c..000000000
--- a/g3doc/user_guide/BUILD
+++ /dev/null
@@ -1,70 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "compatibility",
- src = "compatibility.md",
- category = "Compatibility",
- permalink = "/docs/user_guide/compatibility/",
- weight = "0",
-)
-
-doc(
- name = "checkpoint_restore",
- src = "checkpoint_restore.md",
- category = "User Guide",
- permalink = "/docs/user_guide/checkpoint_restore/",
- weight = "60",
-)
-
-doc(
- name = "debugging",
- src = "debugging.md",
- category = "User Guide",
- permalink = "/docs/user_guide/debugging/",
- weight = "70",
-)
-
-doc(
- name = "FAQ",
- src = "FAQ.md",
- category = "User Guide",
- permalink = "/docs/user_guide/faq/",
- weight = "90",
-)
-
-doc(
- name = "filesystem",
- src = "filesystem.md",
- category = "User Guide",
- permalink = "/docs/user_guide/filesystem/",
- weight = "40",
-)
-
-doc(
- name = "networking",
- src = "networking.md",
- category = "User Guide",
- permalink = "/docs/user_guide/networking/",
- weight = "50",
-)
-
-doc(
- name = "install",
- src = "install.md",
- category = "User Guide",
- permalink = "/docs/user_guide/install/",
- weight = "10",
-)
-
-doc(
- name = "platforms",
- src = "platforms.md",
- category = "User Guide",
- permalink = "/docs/user_guide/platforms/",
- weight = "30",
-)
diff --git a/g3doc/user_guide/FAQ.md b/g3doc/user_guide/FAQ.md
deleted file mode 100644
index 26c836ddf..000000000
--- a/g3doc/user_guide/FAQ.md
+++ /dev/null
@@ -1,152 +0,0 @@
-# FAQ
-
-[TOC]
-
-### What operating systems are supported? {#supported-os}
-
-Today, gVisor requires Linux.
-
-### What CPU architectures are supported? {#supported-cpus}
-
-gVisor currently supports [x86_64/AMD64](https://en.wikipedia.org/wiki/X86-64)
-compatible processors. Preliminary support is also available for
-[ARM64](https://en.wikipedia.org/wiki/ARM_architecture#AArch64).
-
-### Do I need to modify my Linux application to use gVisor? {#modify-app}
-
-No. gVisor is capable of running unmodified Linux binaries.
-
-### What binary formats does gVisor support? {#supported-binaries}
-
-gVisor supports Linux
-[ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) binaries.
-
-Binaries run in gVisor should be built for the
-[AMD64](https://en.wikipedia.org/wiki/X86-64) or
-[AArch64](https://en.wikipedia.org/wiki/ARM_architecture#AArch64) CPU
-architectures.
-
-### Can I run Docker images using gVisor? {#docker-images}
-
-Yes. Please see the [Docker Quick Start][docker].
-
-### Can I run Kubernetes pods using gVisor? {#k8s-pods}
-
-Yes. Please see the [Kubernetes Quick Start][k8s].
-
-### What's the security model? {#security-model}
-
-See the [Security Model][security-model].
-
-## Troubleshooting
-
-### My container runs fine with `runc` but fails with `runsc` {#app-compatibility}
-
-If you’re having problems running a container with `runsc` it’s most likely due
-to a compatibility issue or a missing feature in gVisor. See
-[Debugging][debugging].
-
-### When I run my container, docker fails with: `open /run/containerd/.../<containerid>/log.json: no such file or directory` {#memfd-create}
-
-You are using an older version of Linux which doesn't support `memfd_create`.
-
-This is tracked in [bug #268](https://gvisor.dev/issue/268).
-
-### When I run my container, docker fails with: `flag provided but not defined: -console` {#old-docker}
-
-You're using an old version of Docker. See [Docker Quick Start][docker].
-
-### I can’t see a file copied with: `docker cp` {#fs-cache}
-
-For performance reasons, gVisor caches directory contents, and therefore it may
-not realize a new file was copied to a given directory. To invalidate the cache
-and force a refresh, create a file under the directory in question and list the
-contents again.
-
-As a workaround, shared root filesystem can be enabled. See
-[Filesystem][filesystem].
-
-This bug is tracked in [bug #4](https://gvisor.dev/issue/4).
-
-Note that `kubectl cp` works because it does the copy by exec'ing inside the
-sandbox, and thus gVisor's internal cache is made aware of the new files and
-directories.
-
-### I'm getting an error like: `panic: unable to attach: operation not permitted` or `fork/exec /proc/self/exe: invalid argument: unknown` {#runsc-perms}
-
-Make sure that permissions is correct on the `runsc` binary.
-
-```bash
-sudo chmod a+rx /usr/local/bin/runsc
-```
-
-### I'm getting an error like `mount submount "/etc/hostname": creating mount with source ".../hostname": input/output error: unknown.` {#memlock}
-
-There is a bug in Linux kernel versions 5.1 to 5.3.15, 5.4.2, and 5.5. Upgrade
-to a newer kernel or add the following to
-`/lib/systemd/system/containerd.service` as a workaround.
-
-```
-LimitMEMLOCK=infinity
-```
-
-And run `systemctl daemon-reload && systemctl restart containerd` to restart
-containerd.
-
-See [issue #1765](https://gvisor.dev/issue/1765) for more details.
-
-### I'm getting an error like `RuntimeHandler "runsc" not supported` {#runtime-handler}
-
-This error indicates that the Kubernetes CRI runtime was not set up to handle
-`runsc` as a runtime handler. Please ensure that containerd configuration has
-been created properly and containerd has been restarted. See the
-[containerd quick start](containerd/quick_start.md) for more details.
-
-If you have ensured that containerd has been set up properly and you used
-kubeadm to create your cluster please check if Docker is also installed on that
-system. Kubeadm prefers using Docker if both Docker and containerd are
-installed.
-
-Please recreate your cluster and set the `--cri-socket` option on kubeadm
-commands. For example:
-
-```bash
-kubeadm init --cri-socket=/var/run/containerd/containerd.sock ...
-```
-
-To fix an existing cluster edit the `/var/lib/kubelet/kubeadm-flags.env` file
-and set the `--container-runtime` flag to `remote` and set the
-`--container-runtime-endpoint` flag to point to the containerd socket. e.g.
-`/var/run/containerd/containerd.sock`.
-
-### My container cannot resolve another container's name when using Docker user defined bridge {#docker-bridge}
-
-This is normally indicated by errors like `bad address 'container-name'` when
-trying to communicate to another container in the same network.
-
-Docker user defined bridge uses an embedded DNS server bound to the loopback
-interface on address 127.0.0.10. This requires access to the host network in
-order to communicate to the DNS server. runsc network is isolated from the host
-and cannot access the DNS server on the host network without breaking the
-sandbox isolation. There are a few different workarounds you can try:
-
-* Use default bridge network with `--link` to connect containers. Default
- bridge doesn't use embedded DNS.
-* Use [`--network=host`][host-net] option in runsc, however beware that it
- will use the host network stack and is less secure.
-* Use IPs instead of container names.
-* Use [Kubernetes][k8s]. Container name lookup works fine in Kubernetes.
-
-### I'm getting an error like `dial unix /run/containerd/s/09e4...8cff: connect: connection refused: unknown` {#shim-connect}
-
-This error may happen when using `gvisor-containerd-shim` with a `containerd`
-that does not contain the fix for [CVE-2020-15257]. The resolve the issue,
-update containerd to 1.3.9 or 1.4.3 (or newer versions respectively).
-
-[security-model]: /docs/architecture_guide/security/
-[host-net]: /docs/user_guide/networking/#network-passthrough
-[debugging]: /docs/user_guide/debugging/
-[filesystem]: /docs/user_guide/filesystem/
-[docker]: /docs/user_guide/quick_start/docker/
-[k8s]: /docs/user_guide/quick_start/kubernetes/
-[CVE-2020-15257]: https://github.com/containerd/containerd/security/advisories/GHSA-36xw-fx78-c5r4
diff --git a/g3doc/user_guide/checkpoint_restore.md b/g3doc/user_guide/checkpoint_restore.md
deleted file mode 100644
index 0ab0911b0..000000000
--- a/g3doc/user_guide/checkpoint_restore.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# Checkpoint/Restore
-
-[TOC]
-
-gVisor has the ability to checkpoint a process, save its current state in a
-state file, and restore into a new container using the state file.
-
-## How to use checkpoint/restore
-
-Checkpoint/restore functionality is currently available via raw `runsc`
-commands. To use the checkpoint command, first run a container.
-
-```bash
-runsc run <container id>
-```
-
-To checkpoint the container, the `--image-path` flag must be provided. This is
-the directory path within which the checkpoint state-file will be created. The
-file will be called `checkpoint.img` and necessary directories will be created
-if they do not yet exist.
-
-> Note: Two checkpoints cannot be saved to the same directory; every image-path
-> provided must be unique.
-
-```bash
-runsc checkpoint --image-path=<path> <container id>
-```
-
-There is also an optional `--leave-running` flag that allows the container to
-continue to run after the checkpoint has been made. (By default, containers stop
-their processes after committing a checkpoint.)
-
-> Note: All top-level runsc flags needed when calling run must be provided to
-> checkpoint if --leave-running is used.
-
-> Note: --leave-running functions by causing an immediate restore so the
-> container, although will maintain its given container id, may have a different
-> process id.
-
-```bash
-runsc checkpoint --image-path=<path> --leave-running <container id>
-```
-
-To restore, provide the image path to the `checkpoint.img` file created during
-the checkpoint. Because containers stop by default after checkpointing, restore
-needs to happen in a new container (restore is a command which parallels start).
-
-```bash
-runsc create <container id>
-
-runsc restore --image-path=<path> <container id>
-```
-
-## How to use checkpoint/restore in Docker:
-
-Currently checkpoint/restore through `runsc` is not entirely compatible with
-Docker, although there has been progress made from both gVisor and Docker to
-enable compatibility. Here, we document the ideal workflow.
-
-Run a container:
-
-```bash
-docker run [options] --runtime=runsc <image>`
-```
-
-Checkpoint a container:
-
-```bash
-docker checkpoint create <container> <checkpoint_name>`
-```
-
-Create a new container into which to restore:
-
-```bash
-docker create [options] --runtime=runsc <image>
-```
-
-Restore a container:
-
-```bash
-docker start --checkpoint --checkpoint-dir=<directory> <container>
-```
-
-### Issues Preventing Compatibility with Docker
-
-- **[Moby #37360][leave-running]:** Docker version 18.03.0-ce and earlier
- hangs when checkpointing and does not create the checkpoint. To successfully
- use this feature, install a custom version of docker-ce from the moby
- repository. This issue is caused by an improper implementation of the
- `--leave-running` flag. This issue is fixed in newer releases.
-- **Docker does not support restoration into new containers:** Docker
- currently expects the container which created the checkpoint to be the same
- container used to restore which is not possible in runsc. When Docker
- supports container migration and therefore restoration into new containers,
- this will be the flow.
-- **[Moby #37344][checkpoint-dir]:** Docker does not currently support the
- `--checkpoint-dir` flag but this will be required when restoring from a
- checkpoint made in another container.
-
-[leave-running]: https://github.com/moby/moby/pull/37360
-[checkpoint-dir]: https://github.com/moby/moby/issues/37344
diff --git a/g3doc/user_guide/compatibility.md b/g3doc/user_guide/compatibility.md
deleted file mode 100644
index 76e879a01..000000000
--- a/g3doc/user_guide/compatibility.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# Applications
-
-[TOC]
-
-gVisor implements a large portion of the Linux surface and while we strive to
-make it broadly compatible, there are (and always will be) unimplemented
-features and bugs. The only real way to know if it will work is to try. If you
-find a container that doesn’t work and there is no known issue, please
-[file a bug][bug] indicating the full command you used to run the image. You can
-view open issues related to compatibility [here][issues].
-
-If you're able to provide the [debug logs](../debugging/), the problem likely to
-be fixed much faster.
-
-## What works?
-
-The following applications/images have been tested:
-
-* elasticsearch
-* golang
-* httpd
-* java8
-* jenkins
-* mariadb
-* memcached
-* mongo
-* mysql
-* nginx
-* node
-* php
-* postgres
-* prometheus
-* python
-* redis
-* registry
-* tomcat
-* wordpress
-
-## Utilities
-
-Most common utilities work. Note that:
-
-* Some tools, such as `tcpdump` and old versions of `ping`, require explicitly
- enabling raw sockets via the unsafe `--net-raw` runsc flag.
-* Different Docker images can behave differently. For example, Alpine Linux
- and Ubuntu have different `ip` binaries.
-
- Specific tools include:
-
-<!-- mdformat off(don't wrap the table) -->
-
-| Tool | Status |
-| :--------: | :-----------------------------------------: |
-| apt-get | Working. |
-| bundle | Working. |
-| cat | Working. |
-| curl | Working. |
-| dd | Working. |
-| df | Working. |
-| dig | Working. |
-| drill | Working. |
-| env | Working. |
-| find | Working. |
-| gcore | Working. |
-| gdb | Working. |
-| gosu | Working. |
-| grep | Working (unless stdin is a pipe and stdout is /dev/null). |
-| ifconfig | Works partially, like ip. Full support [in progress](https://gvisor.dev/issue/578). |
-| ip | Some subcommands work (e.g. addr, route). Full support [in progress](https://gvisor.dev/issue/578). |
-| less | Working. |
-| ls | Working. |
-| lsof | Working. |
-| mount | Works in readonly mode. gVisor doesn't currently support creating new mounts at runtime. |
-| nc | Working. |
-| nmap | Not working. |
-| netstat | [In progress](https://gvisor.dev/issue/2112). |
-| nslookup | Working. |
-| ping | Working. |
-| ps | Working. |
-| route | Working. |
-| ss | [In progress](https://gvisor.dev/issue/2114). |
-| sshd | Partially working. Job control [in progress](https://gvisor.dev/issue/154). |
-| strace | Working. |
-| tar | Working. |
-| tcpdump | Working. [Promiscuous mode in progress](https://gvisor.dev/issue/3333). |
-| top | Working. |
-| uptime | Working. |
-| vim | Working. |
-| wget | Working. |
-
-<!-- mdformat on -->
-
-[bug]: https://github.com/google/gvisor/issues/new?title=Compatibility%20Issue:
-[issues]: https://github.com/google/gvisor/issues?q=is%3Aissue+is%3Aopen+label%3A%22area%3A+compatibility%22
diff --git a/g3doc/user_guide/containerd/BUILD b/g3doc/user_guide/containerd/BUILD
deleted file mode 100644
index 0ede4819c..000000000
--- a/g3doc/user_guide/containerd/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "quick_start",
- src = "quick_start.md",
- category = "User Guide",
- permalink = "/docs/user_guide/containerd/quick_start/",
- subcategory = "Containerd",
- weight = "10",
-)
-
-doc(
- name = "configuration",
- src = "configuration.md",
- category = "User Guide",
- permalink = "/docs/user_guide/containerd/configuration/",
- subcategory = "Containerd",
- weight = "90",
-)
-
-doc(
- name = "containerd_11",
- src = "containerd_11.md",
- category = "User Guide",
- include_in_menu = False,
- permalink = "/docs/user_guide/containerd/containerd_11/",
- subcategory = "Containerd",
-)
diff --git a/g3doc/user_guide/containerd/configuration.md b/g3doc/user_guide/containerd/configuration.md
deleted file mode 100644
index a214fb0c7..000000000
--- a/g3doc/user_guide/containerd/configuration.md
+++ /dev/null
@@ -1,102 +0,0 @@
-# Containerd Advanced Configuration
-
-This document describes how to configure runtime options for
-`containerd-shim-runsc-v1`. You can find the installation instructions and
-minimal requirements in [Containerd Quick Start](./quick_start.md).
-
-## Shim Configuration
-
-The shim can be provided with a configuration file containing options to the
-shim itself as well as a set of flags to runsc. Here is a quick example:
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/runsc.toml
-option = "value"
-[runsc_config]
- flag = "value"
-EOF
-```
-
-The set of options that can be configured can be found in
-[options.go](https://cs.opensource.google/gvisor/gvisor/+/master:pkg/shim/options.go).
-Values under `[runsc_config]` can be used to set arbitrary flags to runsc.
-`flag = "value"` is converted to `--flag="value"` when runsc is invoked. Run
-`runsc flags` so see which flags are available
-
-Next, containerd needs to be configured to send the configuration file to the
-shim.
-
-### Containerd 1.3+
-
-Starting in 1.3, containerd supports a configurable `ConfigPath` in the runtime
-configuration. Here is an example:
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/config.toml
-version = 2
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
- runtime_type = "io.containerd.runc.v2"
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
- runtime_type = "io.containerd.runsc.v1"
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc.options]
- TypeUrl = "io.containerd.runsc.v1.options"
- ConfigPath = "/etc/containerd/runsc.toml"
-EOF
-```
-
-When you are done, restart containerd to pick up the changes.
-
-```shell
-sudo systemctl restart containerd
-```
-
-## Debug
-
-When `shim_debug` is enabled in `/etc/containerd/config.toml`, containerd will
-forward shim logs to its own log. You can additionally set `level = "debug"` to
-enable debug logs. To see the logs run `sudo journalctl -u containerd`. Here is
-a containerd configuration file that enables both options:
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/config.toml
-version = 2
-[debug]
- level = "debug"
-[plugins."io.containerd.runtime.v1.linux"]
- shim_debug = true
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
- runtime_type = "io.containerd.runc.v2"
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
- runtime_type = "io.containerd.runsc.v1"
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc.option]
- TypeUrl = "io.containerd.runsc.v1.options"
- ConfigPath = "/etc/containerd/runsc.toml"
-EOF
-```
-
-It can be hard to separate containerd messages from the shim's though. To create
-a log file dedicated to the shim, you can set the `log_path` and `log_level`
-values in the shim configuration file:
-
-- `log_path` is the directory where the shim logs will be created. `%ID%` is
- the path is replaced with the container ID.
-- `log_level` sets the logs level. It is normally set to "debug" as there is
- not much interesting happening with other log levels.
-
-### Example: Enable shim and gVisor debug logging
-
-gVisor debug logging can be enabled by setting the `debug` and `debug-log` flag.
-The shim will replace "%ID%" with the container ID, and "%COMMAND%" with the
-runsc command (run, boot, etc.) in the path of the `debug-log` flag.
-
-Find out more about debugging in the [debugging guide](../debugging.md).
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/runsc.toml
-log_path = "/var/log/runsc/%ID%/shim.log"
-log_level = "debug"
-[runsc_config]
- debug = "true"
- debug-log = "/var/log/runsc/%ID%/gvisor.%COMMAND%.log"
-EOF
-```
diff --git a/g3doc/user_guide/containerd/containerd_11.md b/g3doc/user_guide/containerd/containerd_11.md
deleted file mode 100644
index 200d3da76..000000000
--- a/g3doc/user_guide/containerd/containerd_11.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Older Versions (containerd 1.1)
-
-**WARNING: containerd 1.1 and shim v1 is no longer supported. The instructions
-below is kept just for reference in case you're dealing with an old version.
-It's highly recommended upgrading to the latest version.**
-
-This document describes how to install and run the `gvisor-containerd-shim`
-using the untrusted workload CRI extension. This requires `containerd` 1.1 or
-later.
-
-*Note: The untrusted workload CRI extension is deprecated by containerd and
-`gvisor-containerd-shim` is maintained on a best-effort basis. If you are using
-containerd 1.2+, please see the
-[containerd 1.2+ documentation](./quick_start.md) and use
-`containerd-shim-runsc-v1`.*
-
-## Requirements
-
-- **runsc** and **gvisor-containerd-shim**: See the
- [installation guide](/docs/user_guide/install/).
-- **containerd**: See the [containerd website](https://containerd.io/) for
- information on how to install containerd.
-
-## Configure containerd
-
-Create the configuration for the gvisor shim in
-`/etc/containerd/gvisor-containerd-shim.toml`:
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/gvisor-containerd-shim.toml
-# This is the path to the default runc containerd-shim.
-runc_shim = "/usr/local/bin/containerd-shim"
-EOF
-```
-
-Update `/etc/containerd/config.toml`. Be sure to update the path to
-`gvisor-containerd-shim` and `runsc` if necessary:
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/config.toml
-disabled_plugins = ["restart"]
-[plugins.linux]
- shim = "/usr/local/bin/gvisor-containerd-shim"
- shim_debug = true
-[plugins.cri.containerd.untrusted_workload_runtime]
- runtime_type = "io.containerd.runtime.v1.linux"
- runtime_engine = "/usr/local/bin/runsc"
- runtime_root = "/run/containerd/runsc"
-EOF
-```
-
-Restart `containerd`:
-
-```shell
-sudo systemctl restart containerd
-```
-
-## Usage
-
-You can run containers in gVisor via containerd's CRI.
-
-### Install crictl
-
-Download and install the `crictl` binary:
-
-```shell
-{
-wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
-tar xf crictl-v1.13.0-linux-amd64.tar.gz
-sudo mv crictl /usr/local/bin
-}
-```
-
-Write the `crictl` configuration file:
-
-```shell
-cat <<EOF | sudo tee /etc/crictl.yaml
-runtime-endpoint: unix:///run/containerd/containerd.sock
-EOF
-```
-
-### Create the nginx Sandbox in gVisor
-
-Pull the nginx image:
-
-```shell
-sudo crictl pull nginx
-```
-
-Create the sandbox creation request:
-
-```shell
-cat <<EOF | tee sandbox.json
-{
- "metadata": {
- "name": "nginx-sandbox",
- "namespace": "default",
- "attempt": 1,
- "uid": "hdishd83djaidwnduwk28bcsb"
- },
- "annotations": {
- "io.kubernetes.cri.untrusted-workload": "true"
- },
- "linux": {
- },
- "log_directory": "/tmp"
-}
-EOF
-```
-
-Create the pod in gVisor:
-
-```shell
-SANDBOX_ID=$(sudo crictl runp sandbox.json)
-```
-
-### Run the nginx Container in the Sandbox
-
-Create the nginx container creation request:
-
-```shell
-cat <<EOF | tee container.json
-{
- "metadata": {
- "name": "nginx"
- },
- "image":{
- "image": "nginx"
- },
- "log_path":"nginx.0.log",
- "linux": {
- }
-}
-EOF
-```
-
-Create the nginx container:
-
-```shell
-CONTAINER_ID=$(sudo crictl create ${SANDBOX_ID} container.json sandbox.json)
-```
-
-Start the nginx container:
-
-```shell
-sudo crictl start ${CONTAINER_ID}
-```
-
-### Validate the container
-
-Inspect the created pod:
-
-```shell
-sudo crictl inspectp ${SANDBOX_ID}
-```
-
-Inspect the nginx container:
-
-```shell
-sudo crictl inspect ${CONTAINER_ID}
-```
-
-Verify that nginx is running in gVisor:
-
-```shell
-sudo crictl exec ${CONTAINER_ID} dmesg | grep -i gvisor
-```
diff --git a/g3doc/user_guide/containerd/quick_start.md b/g3doc/user_guide/containerd/quick_start.md
deleted file mode 100644
index c742f225c..000000000
--- a/g3doc/user_guide/containerd/quick_start.md
+++ /dev/null
@@ -1,182 +0,0 @@
-# Containerd Quick Start
-
-This document describes how to use `containerd-shim-runsc-v1` with the
-containerd runtime handler support on `containerd`.
-
-> ⚠️ NOTE: If you are using Kubernetes and set up your cluster using kubeadm you
-> may run into issues. See the [FAQ](../FAQ.md#runtime-handler) for details.
-
-## Requirements
-
-- **runsc** and **containerd-shim-runsc-v1**: See the
- [installation guide](/docs/user_guide/install/).
-- **containerd**: See the [containerd website](https://containerd.io/) for
- information on how to install containerd. **Minimal version supported: 1.3.9
- or 1.4.3.**
-
-## Configure containerd
-
-Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is in
-`${PATH}` or in the same directory as `containerd` binary.
-
-```shell
-cat <<EOF | sudo tee /etc/containerd/config.toml
-version = 2
-[plugins."io.containerd.runtime.v1.linux"]
- shim_debug = true
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
- runtime_type = "io.containerd.runc.v2"
-[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
- runtime_type = "io.containerd.runsc.v1"
-EOF
-```
-
-Restart `containerd`:
-
-```shell
-sudo systemctl restart containerd
-```
-
-## Usage
-
-You can run containers in gVisor via containerd's CRI.
-
-### Install crictl
-
-Download and install the `crictl` binary:
-
-```shell
-{
-wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
-tar xf crictl-v1.13.0-linux-amd64.tar.gz
-sudo mv crictl /usr/local/bin
-}
-```
-
-Write the `crictl` configuration file:
-
-```shell
-cat <<EOF | sudo tee /etc/crictl.yaml
-runtime-endpoint: unix:///run/containerd/containerd.sock
-EOF
-```
-
-### Create the nginx sandbox in gVisor
-
-Pull the nginx image:
-
-```shell
-sudo crictl pull nginx
-```
-
-Create the sandbox creation request:
-
-```shell
-cat <<EOF | tee sandbox.json
-{
- "metadata": {
- "name": "nginx-sandbox",
- "namespace": "default",
- "attempt": 1,
- "uid": "hdishd83djaidwnduwk28bcsb"
- },
- "linux": {
- },
- "log_directory": "/tmp"
-}
-EOF
-```
-
-Create the pod in gVisor:
-
-```shell
-SANDBOX_ID=$(sudo crictl runp --runtime runsc sandbox.json)
-```
-
-### Run the nginx container in the sandbox
-
-Create the nginx container creation request:
-
-```shell
-cat <<EOF | tee container.json
-{
- "metadata": {
- "name": "nginx"
- },
- "image":{
- "image": "nginx"
- },
- "log_path":"nginx.0.log",
- "linux": {
- }
-}
-EOF
-```
-
-Create the nginx container:
-
-```shell
-CONTAINER_ID=$(sudo crictl create ${SANDBOX_ID} container.json sandbox.json)
-```
-
-Start the nginx container:
-
-```shell
-sudo crictl start ${CONTAINER_ID}
-```
-
-### Validate the container
-
-Inspect the created pod:
-
-```shell
-sudo crictl inspectp ${SANDBOX_ID}
-```
-
-Inspect the nginx container:
-
-```shell
-sudo crictl inspect ${CONTAINER_ID}
-```
-
-Verify that nginx is running in gVisor:
-
-```shell
-sudo crictl exec ${CONTAINER_ID} dmesg | grep -i gvisor
-```
-
-### Set up the Kubernetes RuntimeClass
-
-Install the RuntimeClass for gVisor:
-
-```shell
-cat <<EOF | kubectl apply -f -
-apiVersion: node.k8s.io/v1beta1
-kind: RuntimeClass
-metadata:
- name: gvisor
-handler: runsc
-EOF
-```
-
-Create a Pod with the gVisor RuntimeClass:
-
-```shell
-cat <<EOF | kubectl apply -f -
-apiVersion: v1
-kind: Pod
-metadata:
- name: nginx-gvisor
-spec:
- runtimeClassName: gvisor
- containers:
- - name: nginx
- image: nginx
-EOF
-```
-
-Verify that the Pod is running:
-
-```shell
-kubectl get pod nginx-gvisor -o wide
-```
diff --git a/g3doc/user_guide/debugging.md b/g3doc/user_guide/debugging.md
deleted file mode 100644
index 2291b5fab..000000000
--- a/g3doc/user_guide/debugging.md
+++ /dev/null
@@ -1,156 +0,0 @@
-# Debugging
-
-[TOC]
-
-To enable debug and system call logging, add the `runtimeArgs` below to your
-[Docker](../quick_start/docker/) configuration (`/etc/docker/daemon.json`):
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--debug-log=/tmp/runsc/",
- "--debug",
- "--strace"
- ]
- }
- }
-}
-```
-
-> Note: the last `/` in `--debug-log` is needed to interpret it as a directory.
-> Then each `runsc` command executed will create a separate log file. Otherwise,
-> log messages from all commands will be appended to the same file.
-
-You may also want to pass `--log-packets` to troubleshoot network problems. Then
-restart the Docker daemon:
-
-```bash
-sudo systemctl restart docker
-```
-
-Run your container again, and inspect the files under `/tmp/runsc`. The log file
-ending with `.boot` will contain the strace logs from your application, which
-can be useful for identifying missing or broken system calls in gVisor. If you
-are having problems starting the container, the log file ending with `.create`
-may have the reason for the failure.
-
-## Stack traces
-
-The command `runsc debug --stacks` collects stack traces while the sandbox is
-running which can be useful to troubleshoot issues or just to learn more about
-gVisor. It connects to the sandbox process, collects a stack dump, and writes it
-to the console. For example:
-
-```bash
-docker run --runtime=runsc --rm -d alpine sh -c "while true; do echo running; sleep 1; done"
-63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b
-
-sudo runsc --root /var/run/docker/runtime-runsc/moby debug --stacks 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b
-```
-
-> Note: `--root` variable is provided by docker and is normally set to
-> `/var/run/docker/runtime-[runtime-name]/moby`. If in doubt, `--root` is logged
-> to `runsc` logs.
-
-## Debugger
-
-You can debug gVisor like any other Golang program. If you're running with
-Docker, you'll need to find the sandbox PID and attach the debugger as root.
-Here is an example:
-
-Install a runsc with debug symbols (you can also use the
-[nightly release](../install/#nightly)):
-
-```bash
-make dev BAZEL_OPTIONS="-c dbg"
-```
-
-Start the container you want to debug using the runsc runtime with debug
-options:
-
-```bash
-docker run --runtime=$(git branch --show-current)-d --rm --name=test -p 8080:80 -d nginx
-```
-
-Find the PID and attach your favorite debugger:
-
-```bash
-sudo dlv attach $(docker inspect test | grep Pid | head -n 1 | grep -oe "[0-9]*")
-```
-
-Set a breakpoint for accept:
-
-```bash
-break gvisor.dev/gvisor/pkg/sentry/socket/netstack.(*SocketOperations).Accept
-continue
-```
-
-In a different window connect to nginx to trigger the breakpoint:
-
-```bash
-curl http://localhost:8080/
-```
-
-## Profiling
-
-`runsc` integrates with Go profiling tools and gives you easy commands to
-profile CPU and heap usage. First you need to enable `--profile` in the command
-line options before starting the container:
-
-```json
-{
- "runtimes": {
- "runsc-prof": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--profile"
- ]
- }
- }
-}
-```
-
-> Note: Enabling profiling loosens the seccomp protection added to the sandbox,
-> and should not be run in production under normal circumstances.
-
-Then restart docker to refresh the runtime options. While the container is
-running, execute `runsc debug` to collect profile information and save to a
-file. Here are the options available:
-
-* **--profile-heap:** Generates heap profile to the speficied file.
-* **--profile-cpu:** Enables CPU profiler, waits for `--duration` seconds and
- generates CPU profile to the speficied file.
-
-For example:
-
-```bash
-docker run --runtime=runsc-prof --rm -d alpine sh -c "while true; do echo running; sleep 1; done"
-63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b
-
-sudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-heap=/tmp/heap.prof 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b
-sudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-cpu=/tmp/cpu.prof --duration=30s 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b
-```
-
-The resulting files can be opened using `go tool pprof` or [pprof][]. The
-examples below create image file (`.svg`) with the heap profile and writes the
-top functions using CPU to the console:
-
-```bash
-go tool pprof -svg /usr/local/bin/runsc /tmp/heap.prof
-go tool pprof -top /usr/local/bin/runsc /tmp/cpu.prof
-```
-
-[pprof]: https://github.com/google/pprof/blob/master/doc/README.md
-
-### Docker Proxy
-
-When forwarding a port to the container, Docker will likely route traffic
-through the [docker-proxy][]. This proxy may make profiling noisy, so it can be
-helpful to bypass it. Do so by sending traffic directly to the container IP and
-port. e.g., if the `docker0` IP is `192.168.9.1`, the container IP is likely a
-subsequent IP, such as `192.168.9.2`.
-
-[docker-proxy]: https://windsock.io/the-docker-proxy/
diff --git a/g3doc/user_guide/filesystem.md b/g3doc/user_guide/filesystem.md
deleted file mode 100644
index cd00762dd..000000000
--- a/g3doc/user_guide/filesystem.md
+++ /dev/null
@@ -1,60 +0,0 @@
-# Filesystem
-
-[TOC]
-
-gVisor accesses the filesystem through a file proxy, called the Gofer. The gofer
-runs as a separate process, that is isolated from the sandbox. Gofer instances
-communicate with their respective sentry using the 9P protocol. For another
-explanation see [What is gVisor?](../README.md).
-
-## Sandbox overlay
-
-To isolate the host filesystem from the sandbox, you can set a writable tmpfs
-overlay on top of the entire filesystem. All modifications are made to the
-overlay, keeping the host filesystem unmodified.
-
-> Note: All created and modified files are stored in memory inside the sandbox.
-
-To use the tmpfs overlay, add the following `runtimeArgs` to your Docker
-configuration (`/etc/docker/daemon.json`) and restart the Docker daemon:
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--overlay"
- ]
- }
- }
-}
-```
-
-## Shared root filesystem
-
-The root filesystem is where the image is extracted and is not generally
-modified from outside the sandbox. This allows for some optimizations, like
-skipping checks to determine if a directory has changed since the last time it
-was cached, thus missing updates that may have happened. If you need to `docker
-cp` files inside the root filesystem, you may want to enable shared mode. Just
-be aware that file system access will be slower due to the extra checks that are
-required.
-
-> Note: External mounts are always shared.
-
-To use set the root filesystem shared, add the following `runtimeArgs` to your
-Docker configuration (`/etc/docker/daemon.json`) and restart the Docker daemon:
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--file-access=shared"
- ]
- }
- }
-}
-```
diff --git a/g3doc/user_guide/install.md b/g3doc/user_guide/install.md
deleted file mode 100644
index 85ba6a161..000000000
--- a/g3doc/user_guide/install.md
+++ /dev/null
@@ -1,181 +0,0 @@
-# Installation
-
-[TOC]
-
-> Note: gVisor supports only x86\_64 and requires Linux 4.14.77+
-> ([older Linux](./networking.md#gso)).
-
-## Install latest release {#install-latest}
-
-To download and install the latest release manually follow these steps:
-
-```bash
-(
- set -e
- ARCH=$(uname -m)
- URL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}
- wget ${URL}/runsc ${URL}/runsc.sha512 \
- ${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512
- sha512sum -c runsc.sha512 \
- -c containerd-shim-runsc-v1.sha512
- rm -f *.sha512
- chmod a+rx runsc containerd-shim-runsc-v1
- sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
-)
-```
-
-To install gVisor as a Docker runtime, run the following commands:
-
-```bash
-/usr/local/bin/runsc install
-sudo systemctl reload docker
-docker run --rm --runtime=runsc hello-world
-```
-
-For more details about using gVisor with Docker, see
-[Docker Quick Start](./quick_start/docker.md)
-
-Note: It is important to copy `runsc` to a location that is readable and
-executable to all users, since `runsc` executes itself as user `nobody` to avoid
-unnecessary privileges. The `/usr/local/bin` directory is a good place to put
-the `runsc` binary.
-
-## Install from an `apt` repository
-
-First, appropriate dependencies must be installed to allow `apt` to install
-packages via https:
-
-```bash
-sudo apt-get update && \
-sudo apt-get install -y \
- apt-transport-https \
- ca-certificates \
- curl \
- gnupg-agent \
- software-properties-common
-```
-
-Next, configure the key used to sign archives and the repository.
-
-NOTE: The key was updated on 2021-07-13 to replace the expired key. If you get
-errors about the key being expired, run the `apt-key add` command below again.
-
-```bash
-curl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -
-sudo add-apt-repository "deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases release main"
-```
-
-Now the runsc package can be installed:
-
-```bash
-sudo apt-get update && sudo apt-get install -y runsc
-```
-
-If you have Docker installed, it will be automatically configured.
-
-## Versions
-
-The `runsc` binaries and repositories are available in multiple versions and
-release channels. You should pick the version you'd like to install. For
-experimentation, the nightly release is recommended. For production use, the
-latest release is recommended.
-
-After selecting an appropriate release channel from the options below, proceed
-to the preferred installation mechanism: manual or from an `apt` repository.
-
-> Note: Older releases are still available but may not have an `${ARCH}`
-> component in the URL. These release were available for `x86_64` only.
-
-### HEAD
-
-Binaries are available for every commit on the `master` branch, and are
-available at the following URL:
-
-`https://storage.googleapis.com/gvisor/releases/master/latest/${ARCH}`
-
-You can use this link with the steps described in
-[Install latest release](#install-latest).
-
-For `apt` installation, use the `master` to configure the repository:
-
-```bash
-sudo add-apt-repository "deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases master main"
-```
-
-### Nightly
-
-Nightly releases are built most nights from the master branch, and are available
-at the following URL:
-
-`https://storage.googleapis.com/gvisor/releases/nightly/latest/${ARCH}`
-
-You can use this link with the steps described in
-[Install latest release](#install-latest).
-
-Specific nightly releases can be found at:
-
-`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/${ARCH}`
-
-Note that a release may not be available for every day.
-
-For `apt` installation, use the `nightly` to configure the repository:
-
-```bash
-sudo add-apt-repository "deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases nightly main"
-```
-
-### Latest release
-
-The latest official release is available at the following URL:
-
-`https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}`
-
-You can use this link with the steps described in
-[Install latest release](#install-latest).
-
-For `apt` installation, use the `release` to configure the repository:
-
-```bash
-sudo add-apt-repository "deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases release main"
-```
-
-### Specific release
-
-Specific releases are the latest [point release](#point-release) for a given
-date. Specific releases should be available for any date that has a point
-release. A given release is available at the following URL:
-
-`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}/${ARCH}`
-
-You can use this link with the steps described in
-[Install latest release](#install-latest).
-
-See the [releases](https://github.com/google/gvisor/releases) page for
-information about specific releases.
-
-For `apt` installation of a specific release, which may include point updates,
-use the date of the release for repository, e.g. `${yyyymmdd}`.
-
-```bash
-sudo add-apt-repository "deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases yyyymmdd main"
-```
-
-> Note: only newer releases may be available as `apt` repositories.
-
-### Point release
-
-Point releases correspond to
-[releases](https://github.com/google/gvisor/releases) tagged in the Github
-repository. A given point release is available at the following URL:
-
-`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}.${rc}/${ARCH}`
-
-You can use this link with the steps described in
-[Install latest release](#install-latest).
-
-Note that `apt` installation of a specific point release is not supported.
-
-After installation, try out `runsc` by following the
-[Docker Quick Start](./quick_start/docker.md),
-[Containerd QuickStart](./containerd/quick_start.md), or
-[OCI Quick Start](./quick_start/oci.md).
diff --git a/g3doc/user_guide/networking.md b/g3doc/user_guide/networking.md
deleted file mode 100644
index 75f01aac5..000000000
--- a/g3doc/user_guide/networking.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# Networking
-
-[TOC]
-
-gVisor implements its own network stack called netstack. All aspects of the
-network stack are handled inside the Sentry — including TCP connection state,
-control messages, and packet assembly — keeping it isolated from the host
-network stack. Data link layer packets are written directly to the virtual
-device inside the network namespace setup by Docker or Kubernetes.
-
-The IP address and routes configured for the device are transferred inside the
-sandbox. The loopback device runs exclusively inside the sandbox and does not
-use the host. You can inspect them by running:
-
-```bash
-docker run --rm --runtime=runsc alpine ip addr
-```
-
-## Network passthrough
-
-For high-performance networking applications, you may choose to disable the user
-space network stack and instead use the host network stack, including the
-loopback. Note that this mode decreases the isolation to the host.
-
-Add the following `runtimeArgs` to your Docker configuration
-(`/etc/docker/daemon.json`) and restart the Docker daemon:
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--network=host"
- ]
- }
- }
-}
-```
-
-## Disabling external networking
-
-To completely isolate the host and network from the sandbox, external networking
-can be disabled. The sandbox will still contain a loopback provided by netstack.
-
-Add the following `runtimeArgs` to your Docker configuration
-(`/etc/docker/daemon.json`) and restart the Docker daemon:
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--network=none"
- ]
- }
- }
-}
-```
-
-### Disable GSO {#gso}
-
-If your Linux is older than 4.14.77, you can disable Generic Segmentation
-Offload (GSO) to run with a kernel that is newer than 3.17. Add the
-`--gso=false` flag to your Docker runtime configuration
-(`/etc/docker/daemon.json`) and restart the Docker daemon:
-
-> Note: Network performance, especially for large payloads, will be greatly
-> reduced.
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--gso=false"
- ]
- }
- }
-}
-```
-
diff --git a/g3doc/user_guide/platforms.md b/g3doc/user_guide/platforms.md
deleted file mode 100644
index 752025881..000000000
--- a/g3doc/user_guide/platforms.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# Changing Platforms
-
-[TOC]
-
-This guide described how to change the
-[platform](../architecture_guide/platforms.md) used by `runsc`.
-
-## Prerequisites
-
-If you intend to run the KVM platform, you will also to have KVM installed on
-your system. If you are running a Debian based system like Debian or Ubuntu you
-can usually do this by ensuring the module is loaded, and permissions are
-appropriately set on the `/dev/kvm` device.
-
-If you have an Intel CPU:
-
-```bash
-sudo modprobe kvm-intel && sudo chmod a+rw /dev/kvm
-```
-
-If you have an AMD CPU:
-
-```bash
-sudo modprobe kvm-amd && sudo chmod a+rw /dev/kvm
-```
-
-If you are using a virtual machine you will need to make sure that nested
-virtualization is configured. Here are links to documents on how to set up
-nested virtualization in several popular environments:
-
-* Google Cloud: [Enabling Nested Virtualization for VM Instances][nested-gcp]
-* Microsoft Azure:
- [How to enable nested virtualization in an Azure VM][nested-azure]
-* VirtualBox: [Nested Virtualization][nested-virtualbox]
-* KVM: [Nested Guests][nested-kvm]
-
-***Note: nested virtualization will have poor performance and is historically a
-cause of security issues (e.g.
-[CVE-2018-12904](https://nvd.nist.gov/vuln/detail/CVE-2018-12904)). It is not
-recommended for production.***
-
-## Configuring Docker
-
-The platform is selected by the `--platform` command line flag passed to
-`runsc`. By default, the ptrace platform is selected. For example, to select the
-KVM platform, modify your Docker configuration (`/etc/docker/daemon.json`) to
-pass the `--platform` argument:
-
-```json
-{
- "runtimes": {
- "runsc": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--platform=kvm"
- ]
- }
- }
-}
-```
-
-You must restart the Docker daemon after making changes to this file, typically
-this is done via `systemd`:
-
-```bash
-sudo systemctl restart docker
-```
-
-Note that you may configure multiple runtimes using different platforms. For
-example, the following configuration has one configuration for ptrace and one
-for the KVM platform:
-
-```json
-{
- "runtimes": {
- "runsc-ptrace": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--platform=ptrace"
- ]
- },
- "runsc-kvm": {
- "path": "/usr/local/bin/runsc",
- "runtimeArgs": [
- "--platform=kvm"
- ]
- }
- }
-}
-```
-
-[nested-azure]: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/nested-virtualization
-[nested-gcp]: https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances
-[nested-virtualbox]: https://www.virtualbox.org/manual/UserManual.html#nested-virt
-[nested-kvm]: https://www.linux-kvm.org/page/Nested_Guests
diff --git a/g3doc/user_guide/quick_start/BUILD b/g3doc/user_guide/quick_start/BUILD
deleted file mode 100644
index 63f17f9cb..000000000
--- a/g3doc/user_guide/quick_start/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "docker",
- src = "docker.md",
- category = "User Guide",
- permalink = "/docs/user_guide/quick_start/docker/",
- subcategory = "Quick Start",
- weight = "11",
-)
-
-doc(
- name = "oci",
- src = "oci.md",
- category = "User Guide",
- permalink = "/docs/user_guide/quick_start/oci/",
- subcategory = "Quick Start",
- weight = "12",
-)
-
-doc(
- name = "kubernetes",
- src = "kubernetes.md",
- category = "User Guide",
- permalink = "/docs/user_guide/quick_start/kubernetes/",
- subcategory = "Quick Start",
- weight = "13",
-)
diff --git a/g3doc/user_guide/quick_start/docker.md b/g3doc/user_guide/quick_start/docker.md
deleted file mode 100644
index ee842e453..000000000
--- a/g3doc/user_guide/quick_start/docker.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# Docker Quick Start
-
-> Note: This guide requires Docker version 17.09.0 or greater. Refer to the
-> [Docker documentation][docker] for how to install it.
-
-This guide will help you quickly get started running Docker containers using
-gVisor.
-
-First, follow the [Installation guide][install].
-
-If you use the `apt` repository or the `automated` install, then you can skip
-the next section and proceed straight to running a container.
-
-## Configuring Docker
-
-First you will need to configure Docker to use `runsc` by adding a runtime entry
-to your Docker configuration (e.g. `/etc/docker/daemon.json`). The easiest way
-to this is via the `runsc install` command. This will install a docker runtime
-named "runsc" by default.
-
-```bash
-sudo runsc install
-```
-
-You must restart the Docker daemon after installing the runtime. Typically this
-is done via `systemd`:
-
-```bash
-sudo systemctl restart docker
-```
-
-## Running a container
-
-Now run your container using the `runsc` runtime:
-
-```bash
-docker run --runtime=runsc --rm hello-world
-```
-
-You can also run a terminal to explore the container.
-
-```bash
-docker run --runtime=runsc --rm -it ubuntu /bin/bash
-```
-
-Many docker options are compatible with gVisor, try them out. Here is an
-example:
-
-```bash
-docker run --runtime=runsc --rm --link backend:database -v ~/bin:/tools:ro -p 8080:80 --cpus=0.5 -it busybox telnet towel.blinkenlights.nl
-```
-
-## Verify the runtime
-
-You can verify that you are running in gVisor using the `dmesg` command.
-
-```text
-$ docker run --runtime=runsc -it ubuntu dmesg
-[ 0.000000] Starting gVisor...
-[ 0.354495] Daemonizing children...
-[ 0.564053] Constructing home...
-[ 0.976710] Preparing for the zombie uprising...
-[ 1.299083] Creating process schedule...
-[ 1.479987] Committing treasure map to memory...
-[ 1.704109] Searching for socket adapter...
-[ 1.748935] Generating random numbers by fair dice roll...
-[ 2.059747] Digging up root...
-[ 2.259327] Checking naughty and nice process list...
-[ 2.610538] Rewriting operating system in Javascript...
-[ 2.613217] Ready!
-```
-
-Note that this is easily replicated by an attacker so applications should never
-use `dmesg` to verify the runtime in a security sensitive context.
-
-## Options
-
-You may also wish to install a runtime entry with different options. The `runsc
-install` command can accept flags that will be passed to the runtime when it is
-invoked by Docker. For example, to install a runtime with debugging enabled, run
-the following:
-
-```bash
-sudo runsc install --runtime runsc-debug -- \
- --debug \
- --debug-log=/tmp/runsc-debug.log \
- --strace \
- --log-packets
-```
-
-Next, look at the different options available for gVisor: [platform][platforms],
-[network][networking], [filesystem][filesystem].
-
-[docker]: https://docs.docker.com/install/
-[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver
-[install]: /docs/user_guide/install/
-[filesystem]: /docs/user_guide/filesystem/
-[networking]: /docs/user_guide/networking/
-[platforms]: /docs/user_guide/platforms/
diff --git a/g3doc/user_guide/quick_start/kubernetes.md b/g3doc/user_guide/quick_start/kubernetes.md
deleted file mode 100644
index 395cd4b71..000000000
--- a/g3doc/user_guide/quick_start/kubernetes.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Kubernetes Quick Start
-
-gVisor can be used to run Kubernetes pods and has several integration points
-with Kubernetes.
-
-## Using Minikube
-
-gVisor can run sandboxed containers in a Kubernetes cluster with Minikube. After
-the gVisor addon is enabled, pods with a `gvisor` [Runtime Class][runtimeclass]
-set to true will execute with `runsc`. Follow [these instructions][minikube] to
-enable gVisor addon.
-
-## Using Containerd
-
-You can also setup Kubernetes nodes to run pods in gVisor using
-[containerd][containerd] and the gVisor containerd shim. You can find
-instructions in the [Containerd Quick Start][gvisor-containerd].
-
-## Using GKE Sandbox
-
-[GKE Sandbox][gke-sandbox] is available in [Google Kubernetes Engine][gke]. You
-just need to deploy a node pool with gVisor enabled in your cluster, and it will
-run pods annotated with `runtimeClassName: gvisor` inside a gVisor sandbox for
-you. [Here][wordpress-quick] is a quick example showing how to deploy a
-WordPress site. You can view the full documentation [here][gke-sandbox-docs].
-
-[containerd]: https://containerd.io/
-[minikube]: https://github.com/kubernetes/minikube/blob/master/deploy/addons/gvisor/README.md
-[gke]: https://cloud.google.com/kubernetes-engine/
-[gke-sandbox]: https://cloud.google.com/kubernetes-engine/sandbox/
-[gke-sandbox-docs]: https://cloud.google.com/kubernetes-engine/docs/how-to/sandbox-pods
-[gvisor-containerd]: /docs/user_guide/containerd/quick_start/
-[runtimeclass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
-[wordpress-quick]: /docs/tutorials/kubernetes/
diff --git a/g3doc/user_guide/quick_start/oci.md b/g3doc/user_guide/quick_start/oci.md
deleted file mode 100644
index e7768946b..000000000
--- a/g3doc/user_guide/quick_start/oci.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# OCI Quick Start
-
-This guide will quickly get you started running your first gVisor sandbox
-container using the runtime directly with the default platform.
-
-First, follow the [Installation guide][install].
-
-## Run an OCI compatible container
-
-Now we will create an [OCI][oci] container bundle to run our container. First we
-will create a root directory for our bundle.
-
-```bash
-mkdir bundle
-cd bundle
-```
-
-Create a root file system for the container. We will use the Docker
-`hello-world` image as the basis for our container.
-
-```bash
-mkdir rootfs
-docker export $(docker create hello-world) | tar -xf - -C rootfs
-```
-
-Next, create an specification file called `config.json` that contains our
-container specification. We tell the container to run the `/hello` program.
-
-```bash
-runsc spec -- /hello
-```
-
-Finally run the container.
-
-```bash
-sudo runsc run hello
-```
-
-Next try [using CNI to set up networking](../../../tutorials/cni/) or
-[running gVisor using Docker](../docker/).
-
-[oci]: https://opencontainers.org/
-[install]: /docs/user_guide/install
diff --git a/g3doc/user_guide/tutorials/BUILD b/g3doc/user_guide/tutorials/BUILD
deleted file mode 100644
index a862c76f4..000000000
--- a/g3doc/user_guide/tutorials/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//website:defs.bzl", "doc")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-doc(
- name = "docker",
- src = "docker.md",
- category = "User Guide",
- permalink = "/docs/tutorials/docker/",
- subcategory = "Tutorials",
- weight = "10",
-)
-
-doc(
- name = "docker_compose",
- src = "docker-compose.md",
- category = "User Guide",
- permalink = "/docs/tutorials/docker-compose/",
- subcategory = "Tutorials",
- weight = "20",
-)
-
-doc(
- name = "kubernetes",
- src = "kubernetes.md",
- category = "User Guide",
- data = [
- "add-node-pool.png",
- "node-pool-button.png",
- ],
- permalink = "/docs/tutorials/kubernetes/",
- subcategory = "Tutorials",
- weight = "30",
-)
-
-doc(
- name = "knative",
- src = "knative.md",
- category = "User Guide",
- permalink = "/docs/tutorials/knative/",
- subcategory = "Tutorials",
- weight = "40",
-)
-
-doc(
- name = "cni",
- src = "cni.md",
- category = "User Guide",
- permalink = "/docs/tutorials/cni/",
- subcategory = "Tutorials",
- weight = "50",
-)
diff --git a/g3doc/user_guide/tutorials/add-node-pool.png b/g3doc/user_guide/tutorials/add-node-pool.png
deleted file mode 100644
index e4560359b..000000000
--- a/g3doc/user_guide/tutorials/add-node-pool.png
+++ /dev/null
Binary files differ
diff --git a/g3doc/user_guide/tutorials/cni.md b/g3doc/user_guide/tutorials/cni.md
deleted file mode 100644
index ee8d0ac92..000000000
--- a/g3doc/user_guide/tutorials/cni.md
+++ /dev/null
@@ -1,174 +0,0 @@
-# Using CNI
-
-This tutorial will show you how to set up networking for a gVisor sandbox using
-the
-[Container Networking Interface (CNI)](https://github.com/containernetworking/cni).
-
-## Install CNI Plugins
-
-First you will need to install the CNI plugins. CNI plugins are used to set up a
-network namespace that `runsc` can use with the sandbox.
-
-Start by creating the directories for CNI plugin binaries:
-
-```
-sudo mkdir -p /opt/cni/bin
-```
-
-Download the CNI plugins:
-
-```
-wget https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz
-```
-
-Next, unpack the plugins into the CNI binary directory:
-
-```
-sudo tar -xvf cni-plugins-linux-amd64-v0.8.3.tgz -C /opt/cni/bin/
-```
-
-## Configure CNI Plugins
-
-This section will show you how to configure CNI plugins. This tutorial will use
-the "bridge" and "loopback" plugins which will create the necessary bridge and
-loopback devices in our network namespace. However, you should be able to use
-any CNI compatible plugin to set up networking for gVisor sandboxes.
-
-The bridge plugin configuration specifies the IP address subnet range for IP
-addresses that will be assigned to sandboxes as well as the network routing
-configuration. This tutorial will assign IP addresses from the `10.22.0.0/16`
-range and allow all outbound traffic, however you can modify this configuration
-to suit your use case.
-
-Create the bridge and loopback plugin configurations:
-
-```
-sudo mkdir -p /etc/cni/net.d
-
-sudo sh -c 'cat > /etc/cni/net.d/10-bridge.conf << EOF
-{
- "cniVersion": "0.3.1",
- "name": "mynet",
- "type": "bridge",
- "bridge": "cni0",
- "isGateway": true,
- "ipMasq": true,
- "ipam": {
- "type": "host-local",
- "subnet": "10.22.0.0/16",
- "routes": [
- { "dst": "0.0.0.0/0" }
- ]
- }
-}
-EOF'
-
-sudo sh -c 'cat > /etc/cni/net.d/99-loopback.conf << EOF
-{
- "cniVersion": "0.3.1",
- "name": "lo",
- "type": "loopback"
-}
-EOF'
-```
-
-## Create a Network Namespace
-
-For each gVisor sandbox you will create a network namespace and configure it
-using CNI. First, create a random network namespace name and then create the
-namespace.
-
-The network namespace path will then be `/var/run/netns/${CNI_CONTAINERID}`.
-
-```
-export CNI_PATH=/opt/cni/bin
-export CNI_CONTAINERID=$(printf '%x%x%x%x' $RANDOM $RANDOM $RANDOM $RANDOM)
-export CNI_COMMAND=ADD
-export CNI_NETNS=/var/run/netns/${CNI_CONTAINERID}
-
-sudo ip netns add ${CNI_CONTAINERID}
-```
-
-Next, run the bridge and loopback plugins to apply the configuration that was
-created earlier to the namespace. Each plugin outputs some JSON indicating the
-results of executing the plugin. For example, The bridge plugin's response
-includes the IP address assigned to the ethernet device created in the network
-namespace. Take note of the IP address for use later.
-
-```
-export CNI_IFNAME="eth0"
-sudo -E /opt/cni/bin/bridge < /etc/cni/net.d/10-bridge.conf
-export CNI_IFNAME="lo"
-sudo -E /opt/cni/bin/loopback < /etc/cni/net.d/99-loopback.conf
-```
-
-Get the IP address assigned to our sandbox:
-
-```
-POD_IP=$(sudo ip netns exec ${CNI_CONTAINERID} ip -4 addr show eth0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
-```
-
-## Create the OCI Bundle
-
-Now that our network namespace is created and configured, we can create the OCI
-bundle for our container. As part of the bundle's `config.json` we will specify
-that the container use the network namespace that we created.
-
-The container will run a simple python webserver that we will be able to connect
-to via the IP address assigned to it via the bridge CNI plugin.
-
-Create the bundle and root filesystem directories:
-
-```
-sudo mkdir -p bundle
-cd bundle
-sudo mkdir rootfs
-sudo docker export $(docker create python) | sudo tar --same-owner -pxf - -C rootfs
-sudo mkdir -p rootfs/var/www/html
-sudo sh -c 'echo "Hello World!" > rootfs/var/www/html/index.html'
-```
-
-Next create the `config.json` specifying the network namespace.
-
-```
-sudo runsc spec \
- --cwd /var/www/html \
- --netns /var/run/netns/${CNI_CONTAINERID} \
- -- python -m http.server
-```
-
-## Run the Container
-
-Now we can run and connect to the webserver. Run the container in gVisor. Use
-the same ID used for the network namespace to be consistent:
-
-```
-sudo runsc run -detach ${CNI_CONTAINERID}
-```
-
-Connect to the server via the sandbox's IP address:
-
-```
-curl http://${POD_IP}:8000/
-```
-
-You should see the server returning `Hello World!`.
-
-## Cleanup
-
-After you are finished running the container, you can clean up the network
-namespace .
-
-```
-sudo runsc kill ${CNI_CONTAINERID}
-sudo runsc delete ${CNI_CONTAINERID}
-
-export CNI_COMMAND=DEL
-
-export CNI_IFNAME="lo"
-sudo -E /opt/cni/bin/loopback < /etc/cni/net.d/99-loopback.conf
-export CNI_IFNAME="eth0"
-sudo -E /opt/cni/bin/bridge < /etc/cni/net.d/10-bridge.conf
-
-sudo ip netns delete ${CNI_CONTAINERID}
-```
diff --git a/g3doc/user_guide/tutorials/docker-compose.md b/g3doc/user_guide/tutorials/docker-compose.md
deleted file mode 100644
index 3284231f8..000000000
--- a/g3doc/user_guide/tutorials/docker-compose.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# Wordpress with Docker Compose
-
-This page shows you how to deploy a sample [WordPress][wordpress] site using
-[Docker Compose][docker-compose].
-
-### Before you begin
-
-[Follow these instructions][docker-install] to install runsc with Docker. This
-document assumes that Docker and Docker Compose are installed and the runtime
-name chosen for gVisor is `runsc`.
-
-### Configuration
-
-We'll start by creating the `docker-compose.yaml` file to specify our services.
-We will specify two services, a `wordpress` service for the Wordpress Apache
-server, and a `db` service for MySQL. We will configure Wordpress to connect to
-MySQL via the `db` service host name.
-
-> **Note:** Docker Compose uses it's own network by default and allows services
-> to communicate using their service name. Docker Compose does this by setting
-> up a DNS server at IP address 127.0.0.11 and configuring containers to use it
-> via [resolv.conf][resolv.conf]. This IP is not addressable inside a gVisor
-> sandbox so it's important that we set the DNS IP address to the alternative
-> `8.8.8.8` and use a network that allows routing to it. See
-> [Networking in Compose][compose-networking] for more details.
-
-> **Note:** The `runtime` field was removed from services in the 3.x version of
-> the API in versions of docker-compose < 1.27.0. You will need to write your
-> `docker-compose.yaml` file using the 2.x format or use docker-compose >=
-> 1.27.0. See this [issue](https://github.com/docker/compose/issues/6239) for
-> more details.
-
-```yaml
-version: '2.3'
-
-services:
- db:
- image: mysql:5.7
- volumes:
- - db_data:/var/lib/mysql
- restart: always
- environment:
- MYSQL_ROOT_PASSWORD: somewordpress
- MYSQL_DATABASE: wordpress
- MYSQL_USER: wordpress
- MYSQL_PASSWORD: wordpress
- # All services must be on the same network to communicate.
- network_mode: "bridge"
-
- wordpress:
- depends_on:
- - db
- # When using the "bridge" network specify links.
- links:
- - db
- image: wordpress:latest
- ports:
- - "8080:80"
- restart: always
- environment:
- WORDPRESS_DB_HOST: db:3306
- WORDPRESS_DB_USER: wordpress
- WORDPRESS_DB_PASSWORD: wordpress
- WORDPRESS_DB_NAME: wordpress
- # Specify the dns address if needed.
- dns:
- - 8.8.8.8
- # All services must be on the same network to communicate.
- network_mode: "bridge"
- # Specify the runtime used by Docker. Must be set up in
- # /etc/docker/daemon.json.
- runtime: "runsc"
-
-volumes:
- db_data: {}
-```
-
-Once you have a `docker-compose.yaml` in the current directory you can start the
-containers:
-
-```bash
-docker-compose up
-```
-
-Once the containers have started you can access wordpress at
-http://localhost:8080.
-
-Congrats! You now how a working wordpress site up and running using Docker
-Compose.
-
-### What's next
-
-Learn how to deploy [WordPress with Kubernetes][wordpress-k8s].
-
-[docker-compose]: https://docs.docker.com/compose/
-[docker-install]: ../quick_start/docker.md
-[wordpress]: https://wordpress.com/
-[resolv.conf]: https://man7.org/linux/man-pages/man5/resolv.conf.5.html
-[wordpress-k8s]: kubernetes.md
-[compose-networking]: https://docs.docker.com/compose/networking/
diff --git a/g3doc/user_guide/tutorials/docker.md b/g3doc/user_guide/tutorials/docker.md
deleted file mode 100644
index 9ca01da2a..000000000
--- a/g3doc/user_guide/tutorials/docker.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# WordPress with Docker
-
-This page shows you how to deploy a sample [WordPress][wordpress] site using
-[Docker][docker].
-
-### Before you begin
-
-[Follow these instructions][docker-install] to install runsc with Docker. This
-document assumes that the runtime name chosen is `runsc`.
-
-### Running WordPress
-
-Now, let's deploy a WordPress site using Docker. WordPress site requires two
-containers: web server in the frontend, MySQL database in the backend.
-
-First, let's define a few environment variables that are shared between both
-containers:
-
-```bash
-export MYSQL_PASSWORD=${YOUR_SECRET_PASSWORD_HERE?}
-export MYSQL_DB=wordpress
-export MYSQL_USER=wordpress
-```
-
-Next, let's start the database container running MySQL and wait until the
-database is initialized:
-
-```bash
-docker run --runtime=runsc --name mysql -d \
- -e MYSQL_RANDOM_ROOT_PASSWORD=1 \
- -e MYSQL_PASSWORD="${MYSQL_PASSWORD}" \
- -e MYSQL_DATABASE="${MYSQL_DB}" \
- -e MYSQL_USER="${MYSQL_USER}" \
- mysql:5.7
-
-# Wait until this message appears in the log.
-docker logs mysql |& grep 'port: 3306 MySQL Community Server (GPL)'
-```
-
-Once the database is running, you can start the WordPress frontend. We use the
-`--link` option to connect the frontend to the database, and expose the
-WordPress to port 8080 on the localhost.
-
-```bash
-docker run --runtime=runsc --name wordpress -d \
- --link mysql:mysql \
- -p 8080:80 \
- -e WORDPRESS_DB_HOST=mysql \
- -e WORDPRESS_DB_USER="${MYSQL_USER}" \
- -e WORDPRESS_DB_PASSWORD="${MYSQL_PASSWORD}" \
- -e WORDPRESS_DB_NAME="${MYSQL_DB}" \
- -e WORDPRESS_TABLE_PREFIX=wp_ \
- wordpress
-```
-
-Now, you can access the WordPress website pointing your favorite browser to
-<http://localhost:8080>.
-
-Congratulations! You have just deployed a WordPress site using Docker.
-
-### What's next
-
-Learn how to deploy WordPress with [Kubernetes][wordpress-k8s] or
-[Docker Compose][wordpress-compose].
-
-[docker]: https://www.docker.com/
-[docker-install]: ../quick_start/docker.md
-[wordpress]: https://wordpress.com/
-[wordpress-k8s]: kubernetes.md
-[wordpress-compose]: docker-compose.md
diff --git a/g3doc/user_guide/tutorials/knative.md b/g3doc/user_guide/tutorials/knative.md
deleted file mode 100644
index 3f5207fcc..000000000
--- a/g3doc/user_guide/tutorials/knative.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# Knative Services
-
-[Knative](https://knative.dev/) is a platform for running serverless workloads
-on Kubernetes. This guide will show you how to run basic Knative workloads in
-gVisor.
-
-## Prerequisites
-
-This guide assumes you have have a cluster that is capable of running gVisor
-workloads. This could be a
-[GKE Sandbox](https://cloud.google.com/kubernetes-engine/sandbox/) enabled
-cluster on Google Cloud Platform or one you have set up yourself using
-[containerd Quick Start](https://gvisor.dev/docs/user_guide/containerd/quick_start/).
-
-This guide will also assume you have Knative installed using
-[Istio](https://istio.io/) as the network layer. You can follow the
-[Knative installation guide](https://knative.dev/docs/install/install-serving-with-yaml/)
-to install Knative.
-
-## Enable the RuntimeClass feature flag
-
-Knative allows the use of various parameters on Pods via
-[feature flags](https://knative.dev/docs/serving/feature-flags/). We will enable
-the
-[runtimeClassName](https://knative.dev/docs/serving/feature-flags/#kubernetes-runtime-class)
-feature flag to enable the use of the Kubernetes
-[Runtime Class](https://kubernetes.io/docs/concepts/containers/runtime-class/).
-
-Edit the feature flags ConfigMap.
-
-```bash
-kubectl edit configmap config-features -n knative-serving
-```
-
-Add the `kubernetes.podspec-runtimeclassname: enabled` to the `data` field. Once
-you are finished the ConfigMap will look something like this (minus all the
-system fields).
-
-```yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: config-features
- namespace: knative-serving
- labels:
- serving.knative.dev/release: v0.22.0
-data:
- kubernetes.podspec-runtimeclassname: enabled
-```
-
-## Deploy the Service
-
-After you have set the Runtime Class feature flag you can now create Knative
-services that specify a `runtimeClassName` in the spec.
-
-```bash
-cat <<EOF | kubectl apply -f -
-apiVersion: serving.knative.dev/v1
-kind: Service
-metadata:
- name: helloworld-go
-spec:
- template:
- spec:
- runtimeClassName: gvisor
- containers:
- - image: gcr.io/knative-samples/helloworld-go
- env:
- - name: TARGET
- value: "gVisor User"
-EOF
-```
-
-You can see the pods running and their Runtime Class.
-
-```bash
-kubectl get pods -o=custom-columns='NAME:.metadata.name,RUNTIME CLASS:.spec.runtimeClassName,STATUS:.status.phase'
-```
-
-Output should look something like the following. Note that your service might
-scale to zero. If you access it via it's URL you should get a new Pod.
-
-```
-NAME RUNTIME CLASS STATUS
-helloworld-go-00002-deployment-646c87b7f5-5v68s gvisor Running
-```
-
-Congrats! Your Knative service is now running in gVisor!
diff --git a/g3doc/user_guide/tutorials/kubernetes.md b/g3doc/user_guide/tutorials/kubernetes.md
deleted file mode 100644
index 1ec6e71e9..000000000
--- a/g3doc/user_guide/tutorials/kubernetes.md
+++ /dev/null
@@ -1,236 +0,0 @@
-# WordPress with Kubernetes
-
-This page shows you how to deploy a sample [WordPress][wordpress] site using
-[GKE Sandbox][gke-sandbox].
-
-### Before you begin
-
-Take the following steps to enable the Kubernetes Engine API:
-
-1. Visit the [Kubernetes Engine page][project-selector] in the Google Cloud
- Platform Console.
-1. Create or select a project.
-
-### Creating a node pool with gVisor enabled
-
-Create a node pool inside your cluster with option `--sandbox type=gvisor` added
-to the command, like below:
-
-```bash
-gcloud beta container node-pools create sandbox-pool --cluster=${CLUSTER_NAME} --image-type=cos_containerd --sandbox type=gvisor
-```
-
-If you prefer to use the console, select your cluster and select the **ADD NODE
-POOL** button:
-
-![+ ADD NODE POOL](node-pool-button.png)
-
-Then select the **Image type** with **Containerd** and select **Enable sandbox
-with gVisor** option. Select other options as you like:
-
-![+ NODE POOL](add-node-pool.png)
-
-### Check that gVisor is enabled
-
-The gvisor RuntimeClass is instantiated during node creation. You can check for
-the existence of the gvisor RuntimeClass using the following command:
-
-```bash
-kubectl get runtimeclasses
-```
-
-### Wordpress deployment
-
-Now, let's deploy a WordPress site using GKE Sandbox. WordPress site requires
-two pods: web server in the frontend, MySQL database in the backend. Both
-applications use PersistentVolumes to store the site data data. In addition,
-they use secret store to share MySQL password between them.
-
-First, let's download the deployment configuration files to add the runtime
-class annotation to them:
-
-```bash
-curl -LO https://k8s.io/examples/application/wordpress/wordpress-deployment.yaml
-curl -LO https://k8s.io/examples/application/wordpress/mysql-deployment.yaml
-```
-
-Add a **spec.template.spec.runtimeClassName** set to **gvisor** to both files,
-as shown below:
-
-**wordpress-deployment.yaml:**
-
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: wordpress
- labels:
- app: wordpress
-spec:
- ports:
- - port: 80
- selector:
- app: wordpress
- tier: frontend
- type: LoadBalancer
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: wp-pv-claim
- labels:
- app: wordpress
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 20Gi
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: wordpress
- labels:
- app: wordpress
-spec:
- selector:
- matchLabels:
- app: wordpress
- tier: frontend
- strategy:
- type: Recreate
- template:
- metadata:
- labels:
- app: wordpress
- tier: frontend
- spec:
- runtimeClassName: gvisor # ADD THIS LINE
- containers:
- - image: wordpress:4.8-apache
- name: wordpress
- env:
- - name: WORDPRESS_DB_HOST
- value: wordpress-mysql
- - name: WORDPRESS_DB_PASSWORD
- valueFrom:
- secretKeyRef:
- name: mysql-pass
- key: password
- ports:
- - containerPort: 80
- name: wordpress
- volumeMounts:
- - name: wordpress-persistent-storage
- mountPath: /var/www/html
- volumes:
- - name: wordpress-persistent-storage
- persistentVolumeClaim:
- claimName: wp-pv-claim
-```
-
-**mysql-deployment.yaml:**
-
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: wordpress-mysql
- labels:
- app: wordpress
-spec:
- ports:
- - port: 3306
- selector:
- app: wordpress
- tier: mysql
- clusterIP: None
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: mysql-pv-claim
- labels:
- app: wordpress
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 20Gi
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: wordpress-mysql
- labels:
- app: wordpress
-spec:
- selector:
- matchLabels:
- app: wordpress
- tier: mysql
- strategy:
- type: Recreate
- template:
- metadata:
- labels:
- app: wordpress
- tier: mysql
- spec:
- runtimeClassName: gvisor # ADD THIS LINE
- containers:
- - image: mysql:5.6
- name: mysql
- env:
- - name: MYSQL_ROOT_PASSWORD
- valueFrom:
- secretKeyRef:
- name: mysql-pass
- key: password
- ports:
- - containerPort: 3306
- name: mysql
- volumeMounts:
- - name: mysql-persistent-storage
- mountPath: /var/lib/mysql
- volumes:
- - name: mysql-persistent-storage
- persistentVolumeClaim:
- claimName: mysql-pv-claim
-```
-
-Note that apart from `runtimeClassName: gvisor`, nothing else about the
-Deployment has is changed.
-
-You are now ready to deploy the entire application. Just create a secret to
-store MySQL's password and *apply* both deployments:
-
-```bash
-kubectl create secret generic mysql-pass --from-literal=password=${YOUR_SECRET_PASSWORD_HERE?}
-kubectl apply -f mysql-deployment.yaml
-kubectl apply -f wordpress-deployment.yaml
-```
-
-Wait for the deployments to be ready and an external IP to be assigned to the
-Wordpress service:
-
-```bash
-watch kubectl get service wordpress
-```
-
-Now, copy the service `EXTERNAL-IP` from above to your favorite browser to view
-and configure your new WordPress site.
-
-Congratulations! You have just deployed a WordPress site using GKE Sandbox.
-
-### What's next
-
-To learn more about GKE Sandbox and how to run your deployment securely, take a
-look at the [documentation][gke-sandbox-docs].
-
-[gke-sandbox-docs]: https://cloud.google.com/kubernetes-engine/docs/how-to/sandbox-pods
-[gke-sandbox]: https://cloud.google.com/kubernetes-engine/sandbox/
-[project-selector]: https://console.cloud.google.com/projectselector/kubernetes
-[wordpress]: https://wordpress.com/
diff --git a/g3doc/user_guide/tutorials/node-pool-button.png b/g3doc/user_guide/tutorials/node-pool-button.png
deleted file mode 100644
index bee0c11dc..000000000
--- a/g3doc/user_guide/tutorials/node-pool-button.png
+++ /dev/null
Binary files differ
diff --git a/images/BUILD b/images/BUILD
deleted file mode 100644
index 34b950644..000000000
--- a/images/BUILD
+++ /dev/null
@@ -1 +0,0 @@
-package(licenses = ["notice"])
diff --git a/images/README.md b/images/README.md
deleted file mode 100644
index 297c7c3f3..000000000
--- a/images/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Container Images
-
-This directory contains all images used by tests.
-
-Note that all these images must be pushed to the testing project hosted on
-[Google Container Registry][gcr]. This will happen automatically as part of
-continuous integration. This will speed up loading as images will not need to be
-built from scratch for each test run.
-
-Image tooling is accessible via `make`, specifically via `images/Makefile`.
-
-## Why make?
-
-Make is used because it can bootstrap the `default` image, which contains
-`bazel` and all other parts of the toolchain.
-
-## Listing images
-
-To list all images, use `make list-all-images` from the top-level directory.
-
-## Loading and referencing images
-
-To build a specific image, use `make load-<image>` from the top-level directory.
-This will ensure that an image `gvisor.dev/images/<image>:latest` is available.
-
-Images should always be referred to via the `gvisor.dev/images` canonical path.
-This tag exists only locally, but serves to decouple tests from the underlying
-image infrastructure.
-
-The continuous integration system can either take fine-grained dependencies on
-single images via individual `load` targets, or pull all images via a single
-`load-all-images` invocation.
-
-## Adding new images
-
-To add a new image, create a new directory under `images` containing a
-Dockerfile and any other files that the image requires. You may choose to add to
-an existing subdirectory if applicable, or create a new one.
-
-All images will be tagged and memoized using a hash of the directory contents.
-As a result, every image should be made completely reproducible if possible.
-This means using fixed tags and fixed versions whenever feasible.
-
-Note that images should also be made architecture-independent if possible. The
-build scripts will handle loading the appropriate architecture onto the machine
-and tagging it with the single canonical tag.
-
-Add a `load-<image>` dependency in the Makefile if the image is required for a
-particular set of tests. This target will pull the tag from the image repository
-if available.
-
-## Building and pushing images
-
-All images can be built manually by running `build-<image>` and pushed using
-`push-<image>`. Note that you can also use `build-all-images` and
-`push-all-images`. Note that pushing will require appropriate permissions in the
-project.
-
-The continuous integration system can either take fine-grained dependencies on
-individual `push` targets, or ensure all images are up-to-date with a single
-`push-all-images` invocation.
-
-## Multi-Arch images
-
-By default, the image is built for host architecture. Cross-building can be
-achieved by specifying `ARCH` variable to make. For example:
-
-```
-$ make ARCH=aarch64 rebuild-default
-```
diff --git a/images/agent/Dockerfile b/images/agent/Dockerfile
deleted file mode 100644
index 1d8979390..000000000
--- a/images/agent/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM golang:1.15 as build-agent
-RUN git clone --depth=1 --branch=v3.25.0 https://github.com/buildkite/agent
-RUN cd agent && go build -i -o /buildkite-agent .
-
-FROM golang:1.15 as build-agent-metrics
-RUN git clone --depth=1 --branch=v5.2.0 https://github.com/buildkite/buildkite-agent-metrics
-RUN cd buildkite-agent-metrics && go build -i -o /buildkite-agent-metrics .
-
-FROM gcr.io/distroless/base-debian10
-COPY --from=build-agent /buildkite-agent /
-COPY --from=build-agent-metrics /buildkite-agent-metrics /
-CMD ["/buildkite-agent"]
diff --git a/images/agent/README.md b/images/agent/README.md
deleted file mode 100644
index acb57bd2f..000000000
--- a/images/agent/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Build Agent
-
-This is the image used by the build agent. It is built and bundled via a
-separate packaging mechanism in order to provide local caching and to ensure
-that there is better build provenance. Note that continuous integration system
-will generally deploy new agents from the primary branch, and will only deploy
-as instances are recycled. Updates to this image should be made carefully.
diff --git a/images/arm-qemu/Dockerfile.x86_64 b/images/arm-qemu/Dockerfile.x86_64
deleted file mode 100644
index 1a2ecaf42..000000000
--- a/images/arm-qemu/Dockerfile.x86_64
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM fedora:33
-
-RUN dnf install -y qemu-system-aarch64 gzip cpio wget
-
-WORKDIR /workdir
-RUN wget -4 http://dl-cdn.alpinelinux.org/alpine/edge/releases/aarch64/netboot/vmlinuz-lts
-RUN wget -4 http://dl-cdn.alpinelinux.org/alpine/edge/releases/aarch64/netboot/initramfs-lts
-
-COPY initramfs /workdir/initramfs
-COPY test.sh /workdir/
-
-CMD ./test.sh
diff --git a/images/arm-qemu/initramfs/init b/images/arm-qemu/initramfs/init
deleted file mode 100755
index b355daadd..000000000
--- a/images/arm-qemu/initramfs/init
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script is started as the init process in a test virtual machine,
-# it does all required initialization steps and run a test command inside a
-# gVisor instance.
-
-set -x -e
-
-/bin/busybox mkdir -p /usr/bin /usr/sbin /proc /sys /dev /tmp
-
-/bin/busybox --install -s
-export PATH=/usr/bin:/bin:/usr/sbin:/sbin
-
-mount -t proc -o noexec,nosuid,nodev proc /proc
-mount -t sysfs -o noexec,nosuid,nodev sysfs /sys
-mount -t devtmpfs -o exec,nosuid,mode=0755,size=2M devtmpfs /dev
-
-uname -a
-/runsc --TESTONLY-unsafe-nonroot --rootless --network none --debug --alsologtostderr do uname -a
-echo "runsc exited with code $?"
-
-# Shutdown the VM. poweroff and halt doesn't work for unknown reasons.
-# qemu is started with the -no-reboot flag, so the VM will be terminated.
-reboot -f
-exit 1
diff --git a/images/arm-qemu/test.sh b/images/arm-qemu/test.sh
deleted file mode 100755
index 2c9336015..000000000
--- a/images/arm-qemu/test.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeuo pipefail -m
-
-cd initramfs
-find . | cpio -v -o -c -R root:root | gzip -9 >> ../initramfs-lts
-cd ..
-
-qemu-system-aarch64 -M virt -m 512M -cpu cortex-a57 \
- -kernel vmlinuz-lts -initrd initramfs-lts \
- -append "console=ttyAMA0 panic=-1" -nographic -no-reboot \
- | tee /dev/stderr | grep "runsc exited with code 0"
-
-echo "PASS"
diff --git a/images/basic/alpine/Dockerfile b/images/basic/alpine/Dockerfile
deleted file mode 100644
index 12b26040a..000000000
--- a/images/basic/alpine/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM alpine:3.11.5
diff --git a/images/basic/busybox/Dockerfile b/images/basic/busybox/Dockerfile
deleted file mode 100644
index 79b3f683a..000000000
--- a/images/basic/busybox/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM busybox:1.31.1
diff --git a/images/basic/fsstress/Dockerfile.x86_64 b/images/basic/fsstress/Dockerfile.x86_64
deleted file mode 100644
index 21b86065a..000000000
--- a/images/basic/fsstress/Dockerfile.x86_64
+++ /dev/null
@@ -1,17 +0,0 @@
-# Usage: docker run --rm fsstress -d /test -n 10000 -p 100 -X -v
-FROM alpine
-
-RUN apk update && apk add git
-RUN git clone https://github.com/linux-test-project/ltp.git --depth 1
-
-WORKDIR /ltp
-RUN ./travis/alpine.sh
-RUN make autotools && ./configure
-RUN make -C testcases/kernel/fs/fsstress
-RUN cp ./testcases/kernel/fs/fsstress/fsstress /usr/bin
-RUN rm -rf /fsstress /tmp
-
-WORKDIR /
-# This is required, otherwise running with -p > 1 prematurelly exits.
-COPY run.sh .
-ENTRYPOINT ["/run.sh"]
diff --git a/images/basic/fsstress/run.sh b/images/basic/fsstress/run.sh
deleted file mode 100755
index ebb7a37ad..000000000
--- a/images/basic/fsstress/run.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# Copyright 2021 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-/usr/bin/fsstress "$@" \ No newline at end of file
diff --git a/images/basic/httpd/Dockerfile b/images/basic/httpd/Dockerfile
deleted file mode 100644
index 83bc0ed88..000000000
--- a/images/basic/httpd/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM httpd:2.4.43
diff --git a/images/basic/integrationtest/Dockerfile.x86_64 b/images/basic/integrationtest/Dockerfile.x86_64
deleted file mode 100644
index b9fed05cb..000000000
--- a/images/basic/integrationtest/Dockerfile.x86_64
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ubuntu:bionic
-
-WORKDIR /root
-COPY . .
-RUN chmod +x *.sh
-
-RUN apt-get update && apt-get install -y gcc iputils-ping iproute2
-
-# Compilation Steps.
-RUN gcc -O2 -o test_copy_up test_copy_up.c
-RUN gcc -O2 -o test_rewinddir test_rewinddir.c
-RUN gcc -O2 -o link_test link_test.c
-RUN gcc -O2 -o test_sticky test_sticky.c
diff --git a/images/basic/integrationtest/copy_up_testfile.txt b/images/basic/integrationtest/copy_up_testfile.txt
deleted file mode 100644
index e4188c841..000000000
--- a/images/basic/integrationtest/copy_up_testfile.txt
+++ /dev/null
@@ -1 +0,0 @@
-old data
diff --git a/images/basic/integrationtest/link_test.c b/images/basic/integrationtest/link_test.c
deleted file mode 100644
index 45ab00abe..000000000
--- a/images/basic/integrationtest/link_test.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <err.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-// Basic test for linkat(2). Syscall tests requires CAP_DAC_READ_SEARCH and it
-// cannot use tricks like userns as root. For this reason, run a basic link test
-// to ensure some coverage.
-int main(int argc, char** argv) {
- const char kOldPath[] = "old.txt";
- int fd = open(kOldPath, O_RDWR | O_CREAT | O_TRUNC, 0600);
- if (fd < 0) {
- errx(1, "open(%s) failed", kOldPath);
- }
- const char kData[] = "some random content";
- if (write(fd, kData, sizeof(kData)) < 0) {
- err(1, "write failed");
- }
- close(fd);
-
- struct stat old_stat;
- if (stat(kOldPath, &old_stat)) {
- errx(1, "stat(%s) failed", kOldPath);
- }
-
- const char kNewPath[] = "new.txt";
- if (link(kOldPath, kNewPath)) {
- errx(1, "link(%s, %s) failed", kOldPath, kNewPath);
- }
-
- struct stat new_stat;
- if (stat(kNewPath, &new_stat)) {
- errx(1, "stat(%s) failed", kNewPath);
- }
-
- // Check that files are the same.
- if (old_stat.st_dev != new_stat.st_dev) {
- errx(1, "files st_dev is different, want: %lu, got: %lu", old_stat.st_dev,
- new_stat.st_dev);
- }
- if (old_stat.st_ino != new_stat.st_ino) {
- errx(1, "files st_ino is different, want: %lu, got: %lu", old_stat.st_ino,
- new_stat.st_ino);
- }
-
- // Check that link count is correct.
- if (new_stat.st_nlink != old_stat.st_nlink + 1) {
- errx(1, "wrong nlink, want: %lu, got: %lu", old_stat.st_nlink + 1,
- new_stat.st_nlink);
- }
-
- // Check taht contents are the same.
- fd = open(kNewPath, O_RDONLY);
- if (fd < 0) {
- errx(1, "open(%s) failed", kNewPath);
- }
- char buf[sizeof(kData)] = {};
- if (read(fd, buf, sizeof(buf)) < 0) {
- err(1, "read failed");
- }
- close(fd);
-
- if (strcmp(buf, kData) != 0) {
- errx(1, "file content mismatch: %s", buf);
- }
-
- // Cleanup.
- if (unlink(kNewPath)) {
- errx(1, "unlink(%s) failed", kNewPath);
- }
- if (unlink(kOldPath)) {
- errx(1, "unlink(%s) failed", kOldPath);
- }
-
- // Success!
- return 0;
-}
diff --git a/images/basic/integrationtest/ping4.sh b/images/basic/integrationtest/ping4.sh
deleted file mode 100644
index 2a343712a..000000000
--- a/images/basic/integrationtest/ping4.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-# The docker API doesn't provide for starting a container, running a command,
-# and getting the exit status of the command in one go. The most straightforward
-# way to do this is to verify the output of the command, so we output nothing on
-# success and an error message on failure.
-if ! out=$(ping -c 10 127.0.0.1); then
- echo "$out"
-fi
diff --git a/images/basic/integrationtest/ping6.sh b/images/basic/integrationtest/ping6.sh
deleted file mode 100644
index 4268951d0..000000000
--- a/images/basic/integrationtest/ping6.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-# Enable ipv6 on loopback if it's not already enabled. Runsc doesn't enable ipv6
-# loopback unless an ipv6 address was assigned to the container, which docker
-# does not do by default.
-if ! [[ $(ip -6 addr show dev lo) ]]; then
- ip addr add ::1 dev lo
-fi
-
-# The docker API doesn't provide for starting a container, running a command,
-# and getting the exit status of the command in one go. The most straightforward
-# way to do this is to verify the output of the command, so we output nothing on
-# success and an error message on failure.
-if ! out=$(/bin/ping6 -c 10 ::1); then
- echo "$out"
-fi
diff --git a/images/basic/integrationtest/test_copy_up.c b/images/basic/integrationtest/test_copy_up.c
deleted file mode 100644
index 010b261dc..000000000
--- a/images/basic/integrationtest/test_copy_up.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include <err.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-int main(int argc, char** argv) {
- const char kTestFilePath[] = "copy_up_testfile.txt";
- const char kOldFileData[] = "old data\n";
- const char kNewFileData[] = "new data\n";
- const size_t kPageSize = sysconf(_SC_PAGE_SIZE);
-
- // Open a file that already exists in a host overlayfs lower layer.
- const int fd_rdonly = open(kTestFilePath, O_RDONLY);
- if (fd_rdonly < 0) {
- err(1, "open(%s, O_RDONLY)", kTestFilePath);
- }
-
- // Check that the file's initial contents are what we expect when read via
- // syscall.
- char oldbuf[sizeof(kOldFileData)] = {};
- ssize_t n = pread(fd_rdonly, oldbuf, sizeof(oldbuf), 0);
- if (n < 0) {
- err(1, "initial pread");
- }
- if (n != strlen(kOldFileData)) {
- errx(1, "short initial pread (%ld/%lu bytes)", n, strlen(kOldFileData));
- }
- if (strcmp(oldbuf, kOldFileData) != 0) {
- errx(1, "initial pread returned wrong data: %s", oldbuf);
- }
-
- // Check that the file's initial contents are what we expect when read via
- // memory mapping.
- void* page = mmap(NULL, kPageSize, PROT_READ, MAP_SHARED, fd_rdonly, 0);
- if (page == MAP_FAILED) {
- err(1, "mmap");
- }
- if (strcmp(page, kOldFileData) != 0) {
- errx(1, "mapping contains wrong initial data: %s", (const char*)page);
- }
-
- // Open the same file writably, causing host overlayfs to copy it up, and
- // replace its contents.
- const int fd_rdwr = open(kTestFilePath, O_RDWR);
- if (fd_rdwr < 0) {
- err(1, "open(%s, O_RDWR)", kTestFilePath);
- }
- n = write(fd_rdwr, kNewFileData, strlen(kNewFileData));
- if (n < 0) {
- err(1, "write");
- }
- if (n != strlen(kNewFileData)) {
- errx(1, "short write (%ld/%lu bytes)", n, strlen(kNewFileData));
- }
- if (ftruncate(fd_rdwr, strlen(kNewFileData)) < 0) {
- err(1, "truncate");
- }
-
- int failed = 0;
-
- // Check that syscalls on the old FD return updated contents. (Before Linux
- // 4.18, this requires that runsc use a post-copy-up FD to service the read.)
- char newbuf[sizeof(kNewFileData)] = {};
- n = pread(fd_rdonly, newbuf, sizeof(newbuf), 0);
- if (n < 0) {
- err(1, "final pread");
- }
- if (n != strlen(kNewFileData)) {
- warnx("short final pread (%ld/%lu bytes)", n, strlen(kNewFileData));
- failed = 1;
- } else if (strcmp(newbuf, kNewFileData) != 0) {
- warnx("final pread returned wrong data: %s", newbuf);
- failed = 1;
- }
-
- // Check that the memory mapping of the old FD has been updated. (Linux
- // overlayfs does not do this, so regardless of kernel version this requires
- // that runsc replace existing memory mappings with mappings of a
- // post-copy-up FD.)
- if (strcmp(page, kNewFileData) != 0) {
- warnx("mapping contains wrong final data: %s", (const char*)page);
- failed = 1;
- }
-
- return failed;
-}
diff --git a/images/basic/integrationtest/test_rewinddir.c b/images/basic/integrationtest/test_rewinddir.c
deleted file mode 100644
index f1a4085e1..000000000
--- a/images/basic/integrationtest/test_rewinddir.c
+++ /dev/null
@@ -1,78 +0,0 @@
-#include <dirent.h>
-#include <err.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-int main(int argc, char** argv) {
- const char kDirPath[] = "rewinddir_test_dir";
- const char kFileBasename[] = "rewinddir_test_file";
-
- // Create the test directory.
- if (mkdir(kDirPath, 0755) < 0) {
- err(1, "mkdir(%s)", kDirPath);
- }
-
- // The test directory should initially be empty.
- DIR* dir = opendir(kDirPath);
- if (!dir) {
- err(1, "opendir(%s)", kDirPath);
- }
- int failed = 0;
- while (1) {
- errno = 0;
- struct dirent* d = readdir(dir);
- if (!d) {
- if (errno != 0) {
- err(1, "readdir");
- }
- break;
- }
- if (strcmp(d->d_name, ".") != 0 && strcmp(d->d_name, "..") != 0) {
- warnx("unexpected file %s in new directory", d->d_name);
- failed = 1;
- }
- }
-
- // Create a file in the test directory.
- char* file_path = malloc(strlen(kDirPath) + 1 + strlen(kFileBasename));
- if (!file_path) {
- errx(1, "malloc");
- }
- strcpy(file_path, kDirPath);
- file_path[strlen(kDirPath)] = '/';
- strcpy(file_path + strlen(kDirPath) + 1, kFileBasename);
- if (mknod(file_path, 0644, 0) < 0) {
- err(1, "mknod(%s)", file_path);
- }
-
- // After rewinddir(), re-reading the directory stream should yield the new
- // file.
- rewinddir(dir);
- size_t found_file = 0;
- while (1) {
- errno = 0;
- struct dirent* d = readdir(dir);
- if (!d) {
- if (errno != 0) {
- err(1, "readdir");
- }
- break;
- }
- if (strcmp(d->d_name, kFileBasename) == 0) {
- found_file++;
- } else if (strcmp(d->d_name, ".") != 0 && strcmp(d->d_name, "..") != 0) {
- warnx("unexpected file %s in new directory", d->d_name);
- failed = 1;
- }
- }
- if (found_file != 1) {
- warnx("readdir returned file %s %zu times, wanted 1", kFileBasename,
- found_file);
- failed = 1;
- }
-
- return failed;
-}
diff --git a/images/basic/integrationtest/test_sticky.c b/images/basic/integrationtest/test_sticky.c
deleted file mode 100644
index 58dcf91d3..000000000
--- a/images/basic/integrationtest/test_sticky.c
+++ /dev/null
@@ -1,96 +0,0 @@
-#include <err.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-void createFile(const char* path) {
- int fd = open(path, O_WRONLY | O_CREAT, 0777);
- if (fd < 0) {
- err(1, "open(%s)", path);
- exit(1);
- } else {
- close(fd);
- }
-}
-
-void waitAndCheckStatus(pid_t child) {
- int status;
- if (waitpid(child, &status, 0) == -1) {
- err(1, "waitpid() failed");
- exit(1);
- }
-
- if (WIFEXITED(status)) {
- int es = WEXITSTATUS(status);
- if (es) {
- err(1, "child exit status %d", es);
- exit(1);
- }
- } else {
- err(1, "child did not exit normally");
- exit(1);
- }
-}
-
-void deleteFile(uid_t user, const char* path) {
- pid_t child = fork();
- if (child == 0) {
- if (setuid(user)) {
- err(1, "setuid(%d)", user);
- exit(1);
- }
-
- if (unlink(path)) {
- err(1, "unlink(%s)", path);
- exit(1);
- }
- exit(0);
- }
- waitAndCheckStatus(child);
-}
-
-int main(int argc, char** argv) {
- const char kUser1Dir[] = "/user1dir";
- const char kUser2File[] = "/user1dir/user2file";
- const char kUser2File2[] = "/user1dir/user2file2";
-
- const uid_t user1 = 6666;
- const uid_t user2 = 6667;
-
- if (mkdir(kUser1Dir, 0755) != 0) {
- err(1, "mkdir(%s)", kUser1Dir);
- exit(1);
- }
- // Enable sticky bit for user1dir.
- if (chmod(kUser1Dir, 01777) != 0) {
- err(1, "chmod(%s)", kUser1Dir);
- exit(1);
- }
- createFile(kUser2File);
- createFile(kUser2File2);
-
- if (chown(kUser1Dir, user1, getegid())) {
- err(1, "chown(%s)", kUser1Dir);
- exit(1);
- }
- if (chown(kUser2File, user2, getegid())) {
- err(1, "chown(%s)", kUser2File);
- exit(1);
- }
- if (chown(kUser2File2, user2, getegid())) {
- err(1, "chown(%s)", kUser2File2);
- exit(1);
- }
-
- // User1 should be able to delete any file inside user1dir, even files of
- // other users due to the sticky bit.
- deleteFile(user1, kUser2File);
-
- // User2 should naturally be able to delete its own file even if the file is
- // inside a sticky dir owned by someone else.
- deleteFile(user2, kUser2File2);
-}
diff --git a/images/basic/mysql/Dockerfile b/images/basic/mysql/Dockerfile
deleted file mode 100644
index d87bfe55b..000000000
--- a/images/basic/mysql/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM mysql/mysql-server:8.0.19
diff --git a/images/basic/nginx/Dockerfile b/images/basic/nginx/Dockerfile
deleted file mode 100644
index af2e62526..000000000
--- a/images/basic/nginx/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM nginx:1.17.9
diff --git a/images/basic/python/Dockerfile b/images/basic/python/Dockerfile
deleted file mode 100644
index acf07cca9..000000000
--- a/images/basic/python/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM python:3
-ENTRYPOINT ["python", "-m", "http.server", "8080"]
diff --git a/images/basic/resolv/Dockerfile b/images/basic/resolv/Dockerfile
deleted file mode 100644
index 13665bdaf..000000000
--- a/images/basic/resolv/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM k8s.gcr.io/busybox:latest
diff --git a/images/basic/ruby/Dockerfile b/images/basic/ruby/Dockerfile
deleted file mode 100644
index d290418fb..000000000
--- a/images/basic/ruby/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM ruby:2.7.1
diff --git a/images/basic/tmpfile/Dockerfile b/images/basic/tmpfile/Dockerfile
deleted file mode 100644
index e3816c8cb..000000000
--- a/images/basic/tmpfile/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-# Create file under /tmp to ensure files inside '/tmp' are not overridden.
-FROM alpine:3.11.5
-RUN mkdir -p /tmp/foo \
- && echo 123 > /tmp/foo/file.txt
diff --git a/images/basic/tomcat/Dockerfile b/images/basic/tomcat/Dockerfile
deleted file mode 100644
index c7db39a36..000000000
--- a/images/basic/tomcat/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM tomcat:8.0
diff --git a/images/basic/tomcat/Dockerfile.aarch64 b/images/basic/tomcat/Dockerfile.aarch64
deleted file mode 100644
index ed4096de9..000000000
--- a/images/basic/tomcat/Dockerfile.aarch64
+++ /dev/null
@@ -1 +0,0 @@
-FROM arm64v8/tomcat:8.0
diff --git a/images/basic/ubuntu/Dockerfile b/images/basic/ubuntu/Dockerfile
deleted file mode 100644
index 331b71343..000000000
--- a/images/basic/ubuntu/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM ubuntu:trusty
diff --git a/images/benchmarks/ab/Dockerfile b/images/benchmarks/ab/Dockerfile
deleted file mode 100644
index 10544639b..000000000
--- a/images/benchmarks/ab/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- apache2-utils \
- && rm -rf /var/lib/apt/lists/*
diff --git a/images/benchmarks/absl/Dockerfile.x86_64 b/images/benchmarks/absl/Dockerfile.x86_64
deleted file mode 100644
index 810c9ef5e..000000000
--- a/images/benchmarks/absl/Dockerfile.x86_64
+++ /dev/null
@@ -1,22 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- wget \
- git \
- pkg-config \
- zip \
- g++ \
- zlib1g-dev \
- unzip \
- python3 \
- && rm -rf /var/lib/apt/lists/*
-
-RUN wget https://github.com/bazelbuild/bazel/releases/download/0.27.0/bazel-0.27.0-installer-linux-x86_64.sh
-RUN chmod +x bazel-0.27.0-installer-linux-x86_64.sh
-RUN ./bazel-0.27.0-installer-linux-x86_64.sh
-
-RUN mkdir abseil-cpp && cd abseil-cpp \
- && git init && git remote add origin https://github.com/abseil/abseil-cpp.git \
- && git fetch --depth 1 origin 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f && git checkout FETCH_HEAD
diff --git a/images/benchmarks/alpine/Dockerfile b/images/benchmarks/alpine/Dockerfile
deleted file mode 100644
index b09b037ca..000000000
--- a/images/benchmarks/alpine/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM alpine:latest
diff --git a/images/benchmarks/ffmpeg/Dockerfile b/images/benchmarks/ffmpeg/Dockerfile
deleted file mode 100644
index 7108df64f..000000000
--- a/images/benchmarks/ffmpeg/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- ffmpeg \
- && rm -rf /var/lib/apt/lists/*
-WORKDIR /media
-ADD https://samples.ffmpeg.org/MPEG-4/video.mp4 video.mp4
diff --git a/images/benchmarks/fio/Dockerfile b/images/benchmarks/fio/Dockerfile
deleted file mode 100644
index 9531df7fa..000000000
--- a/images/benchmarks/fio/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- fio \
- && rm -rf /var/lib/apt/lists/*
diff --git a/images/benchmarks/hey/Dockerfile b/images/benchmarks/hey/Dockerfile
deleted file mode 100644
index 4b6a0f849..000000000
--- a/images/benchmarks/hey/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM golang:1.15 as build
-RUN go get github.com/rakyll/hey
-WORKDIR /go/src/github.com/rakyll/hey
-RUN go mod download
-RUN CGO_ENABLED=0 go build -o /hey hey.go
-
-FROM ubuntu:18.04
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- wget \
- && rm -rf /var/lib/apt/lists/*
-COPY --from=build /hey /bin/hey
diff --git a/images/benchmarks/httpd/Dockerfile b/images/benchmarks/httpd/Dockerfile
deleted file mode 100644
index e95538a40..000000000
--- a/images/benchmarks/httpd/Dockerfile
+++ /dev/null
@@ -1,17 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- apache2 \
- && rm -rf /var/lib/apt/lists/*
-
-# Generate a bunch of relevant files.
-RUN mkdir -p /local && \
- for size in 1 10 100 1024 10240; do \
- dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \
- done
-
-# Rewrite DocumentRoot to point to /tmp/html instead of the default path.
-RUN sed -i 's/DocumentRoot.*\/var\/www\/html$/DocumentRoot \/tmp\/html/' /etc/apache2/sites-enabled/000-default.conf
-COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf
diff --git a/images/benchmarks/httpd/apache2-tmpdir.conf b/images/benchmarks/httpd/apache2-tmpdir.conf
deleted file mode 100644
index e33f8d9bb..000000000
--- a/images/benchmarks/httpd/apache2-tmpdir.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-<Directory /tmp/html/>
- Options Indexes FollowSymLinks
- AllowOverride None
- Require all granted
-</Directory> \ No newline at end of file
diff --git a/images/benchmarks/iperf/Dockerfile b/images/benchmarks/iperf/Dockerfile
deleted file mode 100644
index 4cbfd0d70..000000000
--- a/images/benchmarks/iperf/Dockerfile
+++ /dev/null
@@ -1,8 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- iperf \
- && rm -rf /var/lib/apt/lists/*
-
diff --git a/images/benchmarks/nginx/Dockerfile b/images/benchmarks/nginx/Dockerfile
deleted file mode 100644
index c8e3330d0..000000000
--- a/images/benchmarks/nginx/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM nginx:1.15.10
-
-# Generate a bunch of relevant files.
-RUN mkdir -p /local && \
- for size in 1 10 100 1024 10240; do \
- dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \
- done
-
-RUN touch /local/index.html
-
-COPY ./nginx.conf /etc/nginx/nginx.conf
-COPY ./nginx_gofer.conf /etc/nginx/nginx_gofer.conf
diff --git a/images/benchmarks/nginx/nginx.conf b/images/benchmarks/nginx/nginx.conf
deleted file mode 100644
index 2c43c0cda..000000000
--- a/images/benchmarks/nginx/nginx.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-user nginx;
-worker_processes 1;
-daemon off;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- server {
- location / {
- root /tmp/html;
- }
- }
-}
diff --git a/images/benchmarks/nginx/nginx_gofer.conf b/images/benchmarks/nginx/nginx_gofer.conf
deleted file mode 100644
index dbba2a575..000000000
--- a/images/benchmarks/nginx/nginx_gofer.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-user nginx;
-worker_processes 1;
-daemon off;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- server {
- location / {
- root /local;
- }
- }
-}
diff --git a/images/benchmarks/node/Dockerfile b/images/benchmarks/node/Dockerfile
deleted file mode 100644
index bf45650a0..000000000
--- a/images/benchmarks/node/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM node:onbuild
diff --git a/images/benchmarks/node/index.hbs b/images/benchmarks/node/index.hbs
deleted file mode 100644
index 03feceb75..000000000
--- a/images/benchmarks/node/index.hbs
+++ /dev/null
@@ -1,8 +0,0 @@
-<!DOCTYPE html>
-<html>
-<body>
- {{#each text}}
- <p>{{this}}</p>
- {{/each}}
-</body>
-</html>
diff --git a/images/benchmarks/node/index.js b/images/benchmarks/node/index.js
deleted file mode 100644
index 831015d18..000000000
--- a/images/benchmarks/node/index.js
+++ /dev/null
@@ -1,42 +0,0 @@
-const app = require('express')();
-const path = require('path');
-const redis = require('redis');
-const srs = require('secure-random-string');
-
-// The hostname is the first argument.
-const host_name = process.argv[2];
-
-var client = redis.createClient({host: host_name, detect_buffers: true});
-
-app.set('views', __dirname);
-app.set('view engine', 'hbs');
-
-app.get('/', (req, res) => {
- var tmp = [];
- /* Pull four random keys from the redis server. */
- for (i = 0; i < 4; i++) {
- client.get(Math.floor(Math.random() * (100)), function(err, reply) {
- tmp.push(reply.toString());
- });
- }
- res.render('index', {text: tmp});
-});
-
-/**
- * Securely generate a random string.
- * @param {number} len
- * @return {string}
- */
-function randomBody(len) {
- return srs({alphanumeric: true, length: len});
-}
-
-/** Mutates one hundred keys randomly. */
-function generateText() {
- for (i = 0; i < 100; i++) {
- client.set(i, randomBody(1024));
- }
-}
-
-generateText();
-app.listen(8080);
diff --git a/images/benchmarks/node/package-lock.json b/images/benchmarks/node/package-lock.json
deleted file mode 100644
index 580e68aa5..000000000
--- a/images/benchmarks/node/package-lock.json
+++ /dev/null
@@ -1,486 +0,0 @@
-{
- "name": "nodedum",
- "version": "1.0.0",
- "lockfileVersion": 1,
- "requires": true,
- "dependencies": {
- "accepts": {
- "version": "1.3.5",
- "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
- "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
- "requires": {
- "mime-types": "~2.1.18",
- "negotiator": "0.6.1"
- }
- },
- "array-flatten": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
- "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
- },
- "async": {
- "version": "2.6.2",
- "resolved": "https://registry.npmjs.org/async/-/async-2.6.2.tgz",
- "integrity": "sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==",
- "requires": {
- "lodash": "^4.17.11"
- }
- },
- "body-parser": {
- "version": "1.18.3",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz",
- "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=",
- "requires": {
- "bytes": "3.0.0",
- "content-type": "~1.0.4",
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "http-errors": "~1.6.3",
- "iconv-lite": "0.4.23",
- "on-finished": "~2.3.0",
- "qs": "6.5.2",
- "raw-body": "2.3.3",
- "type-is": "~1.6.16"
- }
- },
- "bytes": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
- "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
- },
- "commander": {
- "version": "2.20.0",
- "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz",
- "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==",
- "optional": true
- },
- "content-disposition": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
- "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
- },
- "content-type": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
- "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
- },
- "cookie": {
- "version": "0.3.1",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
- "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
- },
- "cookie-signature": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
- "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
- },
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "depd": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
- "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
- },
- "destroy": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
- "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
- },
- "double-ended-queue": {
- "version": "2.1.0-0",
- "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz",
- "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw="
- },
- "ee-first": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
- "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
- },
- "encodeurl": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
- "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
- },
- "escape-html": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
- "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
- },
- "etag": {
- "version": "1.8.1",
- "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
- "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
- },
- "express": {
- "version": "4.16.4",
- "resolved": "https://registry.npmjs.org/express/-/express-4.16.4.tgz",
- "integrity": "sha512-j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==",
- "requires": {
- "accepts": "~1.3.5",
- "array-flatten": "1.1.1",
- "body-parser": "1.18.3",
- "content-disposition": "0.5.2",
- "content-type": "~1.0.4",
- "cookie": "0.3.1",
- "cookie-signature": "1.0.6",
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "finalhandler": "1.1.1",
- "fresh": "0.5.2",
- "merge-descriptors": "1.0.1",
- "methods": "~1.1.2",
- "on-finished": "~2.3.0",
- "parseurl": "~1.3.2",
- "path-to-regexp": "0.1.7",
- "proxy-addr": "~2.0.4",
- "qs": "6.5.2",
- "range-parser": "~1.2.0",
- "safe-buffer": "5.1.2",
- "send": "0.16.2",
- "serve-static": "1.13.2",
- "setprototypeof": "1.1.0",
- "statuses": "~1.4.0",
- "type-is": "~1.6.16",
- "utils-merge": "1.0.1",
- "vary": "~1.1.2"
- }
- },
- "finalhandler": {
- "version": "1.1.1",
- "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
- "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==",
- "requires": {
- "debug": "2.6.9",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "on-finished": "~2.3.0",
- "parseurl": "~1.3.2",
- "statuses": "~1.4.0",
- "unpipe": "~1.0.0"
- }
- },
- "foreachasync": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz",
- "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY="
- },
- "forwarded": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
- "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
- },
- "fresh": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
- "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
- },
- "handlebars": {
- "version": "4.0.14",
- "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.14.tgz",
- "integrity": "sha512-E7tDoyAA8ilZIV3xDJgl18sX3M8xB9/fMw8+mfW4msLW8jlX97bAnWgT3pmaNXuvzIEgSBMnAHfuXsB2hdzfow==",
- "requires": {
- "async": "^2.5.0",
- "optimist": "^0.6.1",
- "source-map": "^0.6.1",
- "uglify-js": "^3.1.4"
- }
- },
- "hbs": {
- "version": "4.0.4",
- "resolved": "https://registry.npmjs.org/hbs/-/hbs-4.0.4.tgz",
- "integrity": "sha512-esVlyV/V59mKkwFai5YmPRSNIWZzhqL5YMN0++ueMxyK1cCfPa5f6JiHtapPKAIVAhQR6rpGxow0troav9WMEg==",
- "requires": {
- "handlebars": "4.0.14",
- "walk": "2.3.9"
- }
- },
- "http-errors": {
- "version": "1.6.3",
- "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
- "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
- "requires": {
- "depd": "~1.1.2",
- "inherits": "2.0.3",
- "setprototypeof": "1.1.0",
- "statuses": ">= 1.4.0 < 2"
- }
- },
- "iconv-lite": {
- "version": "0.4.23",
- "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
- "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
- "requires": {
- "safer-buffer": ">= 2.1.2 < 3"
- }
- },
- "inherits": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
- "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
- },
- "ipaddr.js": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz",
- "integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4="
- },
- "lodash": {
- "version": "4.17.15",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
- "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
- },
- "media-typer": {
- "version": "0.3.0",
- "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
- "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
- },
- "merge-descriptors": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
- "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
- },
- "methods": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
- "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
- },
- "mime": {
- "version": "1.4.1",
- "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
- "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
- },
- "mime-db": {
- "version": "1.37.0",
- "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
- "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg=="
- },
- "mime-types": {
- "version": "2.1.21",
- "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
- "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
- "requires": {
- "mime-db": "~1.37.0"
- }
- },
- "minimist": {
- "version": "0.0.10",
- "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz",
- "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8="
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- },
- "negotiator": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
- "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
- },
- "on-finished": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
- "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
- "requires": {
- "ee-first": "1.1.1"
- }
- },
- "optimist": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz",
- "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=",
- "requires": {
- "minimist": "~0.0.1",
- "wordwrap": "~0.0.2"
- }
- },
- "parseurl": {
- "version": "1.3.2",
- "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
- "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
- },
- "path-to-regexp": {
- "version": "0.1.7",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
- "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
- },
- "proxy-addr": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz",
- "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==",
- "requires": {
- "forwarded": "~0.1.2",
- "ipaddr.js": "1.8.0"
- }
- },
- "qs": {
- "version": "6.5.2",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
- "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
- },
- "range-parser": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
- "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
- },
- "raw-body": {
- "version": "2.3.3",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
- "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
- "requires": {
- "bytes": "3.0.0",
- "http-errors": "1.6.3",
- "iconv-lite": "0.4.23",
- "unpipe": "1.0.0"
- }
- },
- "redis": {
- "version": "2.8.0",
- "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz",
- "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==",
- "requires": {
- "double-ended-queue": "^2.1.0-0",
- "redis-commands": "^1.2.0",
- "redis-parser": "^2.6.0"
- },
- "dependencies": {
- "redis-commands": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.4.0.tgz",
- "integrity": "sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw=="
- },
- "redis-parser": {
- "version": "2.6.0",
- "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz",
- "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs="
- }
- }
- },
- "redis-commands": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.5.0.tgz",
- "integrity": "sha512-6KxamqpZ468MeQC3bkWmCB1fp56XL64D4Kf0zJSwDZbVLLm7KFkoIcHrgRvQ+sk8dnhySs7+yBg94yIkAK7aJg=="
- },
- "redis-parser": {
- "version": "2.6.0",
- "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz",
- "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs="
- },
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
- },
- "safer-buffer": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
- "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
- },
- "secure-random-string": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/secure-random-string/-/secure-random-string-1.1.0.tgz",
- "integrity": "sha512-V/h8jqoz58zklNGybVhP++cWrxEPXlLM/6BeJ4e0a8zlb4BsbYRzFs16snrxByPa5LUxCVTD3M6EYIVIHR1fAg=="
- },
- "send": {
- "version": "0.16.2",
- "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz",
- "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==",
- "requires": {
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "destroy": "~1.0.4",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "fresh": "0.5.2",
- "http-errors": "~1.6.2",
- "mime": "1.4.1",
- "ms": "2.0.0",
- "on-finished": "~2.3.0",
- "range-parser": "~1.2.0",
- "statuses": "~1.4.0"
- }
- },
- "serve-static": {
- "version": "1.13.2",
- "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz",
- "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==",
- "requires": {
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "parseurl": "~1.3.2",
- "send": "0.16.2"
- }
- },
- "setprototypeof": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
- "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
- },
- "source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
- },
- "statuses": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz",
- "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew=="
- },
- "type-is": {
- "version": "1.6.16",
- "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
- "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
- "requires": {
- "media-typer": "0.3.0",
- "mime-types": "~2.1.18"
- }
- },
- "uglify-js": {
- "version": "3.5.9",
- "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.5.9.tgz",
- "integrity": "sha512-WpT0RqsDtAWPNJK955DEnb6xjymR8Fn0OlK4TT4pS0ASYsVPqr5ELhgwOwLCP5J5vHeJ4xmMmz3DEgdqC10JeQ==",
- "optional": true,
- "requires": {
- "commander": "~2.20.0",
- "source-map": "~0.6.1"
- }
- },
- "unpipe": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
- "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
- },
- "utils-merge": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
- "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
- },
- "vary": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
- "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
- },
- "walk": {
- "version": "2.3.9",
- "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.9.tgz",
- "integrity": "sha1-MbTbZnjyrgHDnqn7hyWpAx5Vins=",
- "requires": {
- "foreachasync": "^3.0.0"
- }
- },
- "wordwrap": {
- "version": "0.0.3",
- "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz",
- "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc="
- }
- }
-}
diff --git a/images/benchmarks/node/package.json b/images/benchmarks/node/package.json
deleted file mode 100644
index 7dcadd523..000000000
--- a/images/benchmarks/node/package.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "nodedum",
- "version": "1.0.0",
- "description": "",
- "main": "index.js",
- "scripts": {
- "test": "echo \"Error: no test specified\" && exit 1"
- },
- "author": "",
- "license": "ISC",
- "dependencies": {
- "express": "^4.16.4",
- "hbs": "^4.0.4",
- "redis": "^2.8.0",
- "redis-commands": "^1.2.0",
- "redis-parser": "^2.6.0",
- "secure-random-string": "^1.1.0"
- }
-}
diff --git a/images/benchmarks/redis/Dockerfile b/images/benchmarks/redis/Dockerfile
deleted file mode 100644
index 0f17249af..000000000
--- a/images/benchmarks/redis/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM redis:5.0.4
diff --git a/images/benchmarks/ruby/Dockerfile b/images/benchmarks/ruby/Dockerfile
deleted file mode 100755
index 13c4f6eed..000000000
--- a/images/benchmarks/ruby/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# example based on https://github.com/errm/fib
-FROM alpine:3.9 as build
-
-COPY Gemfile Gemfile.lock ./
-
-RUN apk add --no-cache ruby ruby-dev ruby-bundler ruby-json build-base bash \
- && bundle install --frozen -j4 -r3 --no-cache --without development \
- && apk del --no-cache ruby-bundler \
- && rm -rf /usr/lib/ruby/gems/*/cache
-
-FROM alpine:3.9 as prod
-
-COPY --from=build /usr/lib/ruby/gems /usr/lib/ruby/gems
-RUN apk add --no-cache ruby ruby-json ruby-etc redis apache2-utils \
- && ruby -e "Gem::Specification.map.each do |spec| \
- Gem::Installer.for_spec( \
- spec, \
- wrappers: true, \
- force: true, \
- install_dir: spec.base_dir, \
- build_args: spec.build_args, \
- ).generate_bin \
- end"
-
-COPY . /app/.
-
-STOPSIGNAL SIGINT
diff --git a/images/benchmarks/ruby/Gemfile b/images/benchmarks/ruby/Gemfile
deleted file mode 100755
index ac521b32c..000000000
--- a/images/benchmarks/ruby/Gemfile
+++ /dev/null
@@ -1,5 +0,0 @@
-source "https://rubygems.org"
-
-gem "sinatra"
-gem "puma"
-gem "redis" \ No newline at end of file
diff --git a/images/benchmarks/ruby/Gemfile.lock b/images/benchmarks/ruby/Gemfile.lock
deleted file mode 100644
index 041778e02..000000000
--- a/images/benchmarks/ruby/Gemfile.lock
+++ /dev/null
@@ -1,26 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- mustermann (1.0.3)
- puma (3.4.0)
- rack (2.0.6)
- rack-protection (2.0.5)
- rack
- redis (4.1.0)
- sinatra (2.0.5)
- mustermann (~> 1.0)
- rack (~> 2.0)
- rack-protection (= 2.0.5)
- tilt (~> 2.0)
- tilt (2.0.9)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- puma
- redis
- sinatra
-
-BUNDLED WITH
- 1.17.1 \ No newline at end of file
diff --git a/images/benchmarks/ruby/config.ru b/images/benchmarks/ruby/config.ru
deleted file mode 100755
index b2d135cc0..000000000
--- a/images/benchmarks/ruby/config.ru
+++ /dev/null
@@ -1,2 +0,0 @@
-require './main'
-run Sinatra::Application \ No newline at end of file
diff --git a/images/benchmarks/ruby/index.erb b/images/benchmarks/ruby/index.erb
deleted file mode 100755
index 7f7300e80..000000000
--- a/images/benchmarks/ruby/index.erb
+++ /dev/null
@@ -1,8 +0,0 @@
-<!DOCTYPE html>
-<html>
-<body>
- <% text.each do |t| %>
- <p><%= t %></p>
- <% end %>
-</body>
-</html>
diff --git a/images/benchmarks/ruby/main.rb b/images/benchmarks/ruby/main.rb
deleted file mode 100755
index b998f004e..000000000
--- a/images/benchmarks/ruby/main.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-require "sinatra"
-require "securerandom"
-require "redis"
-
-redis_host = ENV["HOST"]
-$redis = Redis.new(host: redis_host)
-
-def generateText
- for i in 0..99
- $redis.set(i, randomBody(1024))
- end
-end
-
-def randomBody(length)
- return SecureRandom.alphanumeric(length)
-end
-
-generateText
-template = ERB.new(File.read('./index.erb'))
-
-get "/" do
- texts = Array.new
- for i in 0..4
- texts.push($redis.get(rand(0..99)))
- end
- template.result_with_hash(text: texts)
-end
diff --git a/images/benchmarks/runsc/Dockerfile.x86_64 b/images/benchmarks/runsc/Dockerfile.x86_64
deleted file mode 100644
index 28ae64816..000000000
--- a/images/benchmarks/runsc/Dockerfile.x86_64
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- wget \
- git \
- pkg-config \
- zip \
- g++ \
- zlib1g-dev \
- unzip \
- python-minimal \
- python3 \
- python3-pip \
- && rm -rf /var/lib/apt/lists/*
-
-RUN wget https://github.com/bazelbuild/bazel/releases/download/3.4.1/bazel-3.4.1-installer-linux-x86_64.sh
-RUN chmod +x bazel-3.4.1-installer-linux-x86_64.sh
-RUN ./bazel-3.4.1-installer-linux-x86_64.sh
-
-# Download release-20200601.0
-RUN mkdir gvisor && cd gvisor \
- && git init && git remote add origin https://github.com/google/gvisor.git \
- && git fetch --depth 1 origin a9b47390c821942d60784e308f681f213645049c && git checkout FETCH_HEAD
diff --git a/images/benchmarks/sysbench/Dockerfile b/images/benchmarks/sysbench/Dockerfile
deleted file mode 100644
index 55e865f43..000000000
--- a/images/benchmarks/sysbench/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- sysbench \
- && rm -rf /var/lib/apt/lists/*
diff --git a/images/benchmarks/tensorflow/Dockerfile b/images/benchmarks/tensorflow/Dockerfile
deleted file mode 100644
index 7564a4ee5..000000000
--- a/images/benchmarks/tensorflow/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM tensorflow/tensorflow:1.13.2
-
-RUN apt-get update \
- && apt-get install -y git
-RUN git clone --depth 1 https://github.com/aymericdamien/TensorFlow-Examples.git
-RUN python -m pip install -U pip setuptools
-RUN python -m pip install matplotlib
diff --git a/images/benchmarks/util/Dockerfile b/images/benchmarks/util/Dockerfile
deleted file mode 100644
index f2799b3e6..000000000
--- a/images/benchmarks/util/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM ubuntu:bionic
-
-RUN apt-get update && apt-get install -y wget
diff --git a/images/default/Dockerfile b/images/default/Dockerfile
deleted file mode 100644
index 5f652f2c3..000000000
--- a/images/default/Dockerfile
+++ /dev/null
@@ -1,29 +0,0 @@
-FROM ubuntu:focal
-
-ENV DEBIAN_FRONTEND="noninteractive"
-RUN apt-get update && apt-get install -y curl gnupg2 git \
- python python3 python3-distutils python3-pip \
- build-essential crossbuild-essential-arm64 qemu-user-static \
- openjdk-11-jdk-headless zip unzip \
- apt-transport-https ca-certificates gnupg-agent \
- software-properties-common \
- pkg-config libffi-dev patch diffutils libssl-dev
-
-# Install Docker client for the website build.
-RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
-RUN add-apt-repository \
- "deb https://download.docker.com/linux/ubuntu \
- $(lsb_release -cs) \
- stable"
-RUN apt-get install docker-ce-cli
-
-# Install gcloud.
-RUN curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-289.0.0-linux-x86_64.tar.gz | \
- tar zxf - google-cloud-sdk && \
- google-cloud-sdk/install.sh --quiet && \
- ln -s /google-cloud-sdk/bin/gcloud /usr/bin/gcloud
-
-# Download the official bazel binary. The APT repository isn't used because there is not packages for arm64.
-RUN sh -c 'curl -o /usr/local/bin/bazel https://releases.bazel.build/4.0.0/release/bazel-4.0.0-linux-$(uname -m | sed s/aarch64/arm64/) && chmod ugo+x /usr/local/bin/bazel'
-WORKDIR /workspace
-ENTRYPOINT ["/usr/local/bin/bazel"]
diff --git a/images/defs.bzl b/images/defs.bzl
deleted file mode 100644
index c1f96e312..000000000
--- a/images/defs.bzl
+++ /dev/null
@@ -1,34 +0,0 @@
-"""Helpers for Docker image generation."""
-
-def _docker_image_impl(ctx):
- importer = ctx.actions.declare_file(ctx.label.name)
-
- importer_content = [
- "#!/bin/bash",
- "set -euo pipefail",
- "source_file='%s'" % ctx.file.data.path,
- "if [[ ! -f \"$source_file\" ]]; then",
- " source_file='%s'" % ctx.file.data.short_path,
- "fi",
- "exec docker import " + " ".join([
- "-c '%s'" % attr
- for attr in ctx.attr.statements
- ]) + " \"$source_file\" $1",
- "",
- ]
-
- ctx.actions.write(importer, "\n".join(importer_content), is_executable = True)
- return [DefaultInfo(
- runfiles = ctx.runfiles([ctx.file.data]),
- executable = importer,
- )]
-
-docker_image = rule(
- implementation = _docker_image_impl,
- doc = "Tool to import a Docker image; takes a single parameter (image name).",
- attrs = {
- "statements": attr.string_list(doc = "Extra Dockerfile directives."),
- "data": attr.label(doc = "Image filesystem tarball", allow_single_file = [".tgz", ".tar.gz"]),
- },
- executable = True,
-)
diff --git a/images/iptables/Dockerfile b/images/iptables/Dockerfile
deleted file mode 100644
index efd91cb80..000000000
--- a/images/iptables/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu
-RUN apt update && apt install -y iptables
diff --git a/images/jekyll/Dockerfile.x86_64 b/images/jekyll/Dockerfile.x86_64
deleted file mode 100644
index ae19f3bfc..000000000
--- a/images/jekyll/Dockerfile.x86_64
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM jekyll/jekyll:4.0.0
-USER root
-
-RUN gem install \
- html-proofer:3.10.2 \
- nokogiri:1.10.1 \
- jekyll-autoprefixer:1.0.2 \
- jekyll-inline-svg:1.1.4 \
- jekyll-paginate:1.1.0 \
- kramdown-parser-gfm:1.1.0 \
- jekyll-relative-links:0.6.1 \
- jekyll-feed:0.13.0 \
- jekyll-sitemap:1.4.0
-
-# checks.rb is used with html-proofer for presubmit checks.
-COPY checks.rb /checks.rb
-
-COPY build.sh /build.sh
-CMD ["/build.sh"]
diff --git a/images/jekyll/build.sh b/images/jekyll/build.sh
deleted file mode 100755
index 010972ea6..000000000
--- a/images/jekyll/build.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euxo pipefail
-
-# Generate the syntax highlighting css file.
-/usr/gem/bin/rougify style github >/input/_sass/syntax.css
-# Build website including pages irrespective of date.
-/usr/gem/bin/jekyll build --future -t -s /input -d /output
diff --git a/images/jekyll/checks.rb b/images/jekyll/checks.rb
deleted file mode 100644
index fc7e6b5a8..000000000
--- a/images/jekyll/checks.rb
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/local/bin/ruby
-#
-# HTMLProofer checks for the gVisor website.
-#
-require 'html-proofer'
-
-# NoOpenerCheck checks to make sure links with target=_blank include the
-# rel=noopener attribute.
-class NoOpenerCheck < ::HTMLProofer::Check
- def run
- @html.css('a').each do |node|
- link = create_element(node)
- line = node.line
-
- rel = link.respond_to?(:rel) ? link.rel.split(' ') : []
-
- if link.respond_to?(:target) && link.target == "_blank" && !rel.include?("noopener")
- return add_issue("You should set rel=noopener for links with target=_blank", line: line)
- end
- end
- end
-end
-
-def main()
- options = {
- :check_html => true,
- :check_favicon => true,
- :disable_external => true,
- }
-
- HTMLProofer.check_directories(ARGV, options).run
-end
-
-if __FILE__ == $0
- main
-end
diff --git a/images/packetdrill/Dockerfile b/images/packetdrill/Dockerfile
deleted file mode 100644
index b4cd73006..000000000
--- a/images/packetdrill/Dockerfile
+++ /dev/null
@@ -1,8 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y net-tools git iptables iputils-ping \
- netcat tcpdump jq tar bison flex make
-# Pick up updated git.
-RUN hash -r
-RUN git clone --depth 1 --branch packetdrill-v2.0 \
- https://github.com/google/packetdrill.git
-RUN cd packetdrill/gtests/net/packetdrill && ./configure && make
diff --git a/images/packetimpact/Dockerfile b/images/packetimpact/Dockerfile
deleted file mode 100644
index 906d5cdd6..000000000
--- a/images/packetimpact/Dockerfile
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM ubuntu:focal
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
- # iptables to disable OS native packet processing.
- iptables \
- # nc to check that the posix_server is running.
- netcat \
- # tcpdump to log brief packet sniffing.
- tcpdump \
- # ip link show to display MAC addresses.
- iproute2 \
- # tshark to log verbose packet sniffing.
- tshark \
- # killall for cleanup.
- psmisc \
- # qemu-system-x86 to emulate fuchsia.
- qemu-system-x86 \
- # sha1sum to generate entropy.
- libdigest-sha-perl
diff --git a/images/runtimes/go1.12/Dockerfile.x86_64 b/images/runtimes/go1.12/Dockerfile.x86_64
deleted file mode 100644
index cb2944062..000000000
--- a/images/runtimes/go1.12/Dockerfile.x86_64
+++ /dev/null
@@ -1,4 +0,0 @@
-# Go is easy, since we already have everything we need to compile the proctor
-# binary and run the tests in the golang Docker image.
-FROM golang:1.12
-RUN ["go", "tool", "dist", "test", "-compile-only"]
diff --git a/images/runtimes/java11/Dockerfile b/images/runtimes/java11/Dockerfile
deleted file mode 100644
index 21f5aa3fe..000000000
--- a/images/runtimes/java11/Dockerfile
+++ /dev/null
@@ -1,22 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y \
- autoconf \
- build-essential \
- curl \
- make \
- openjdk-11-jdk \
- unzip \
- zip
-
-# Download the JDK test library.
-WORKDIR /root
-RUN set -ex \
- && curl -fsSL --retry 10 -o /tmp/jdktests.tar.gz http://hg.openjdk.java.net/jdk-updates/jdk11u/archive/8b3498547395.tar.gz/test \
- && tar -xzf /tmp/jdktests.tar.gz \
- && mv jdk11u-8b3498547395/test test \
- && rm -f /tmp/jdktests.tar.gz
-
-# Install jtreg and add to PATH.
-RUN curl -o jtreg.tar.gz https://ci.adoptopenjdk.net/view/Dependencies/job/jtreg/lastSuccessfulBuild/artifact/jtreg-4.2.0-tip.tar.gz
-RUN tar -xzf jtreg.tar.gz
-ENV PATH="/root/jtreg/bin:$PATH"
diff --git a/images/runtimes/nodejs12.4.0/Dockerfile b/images/runtimes/nodejs12.4.0/Dockerfile
deleted file mode 100644
index d17924b62..000000000
--- a/images/runtimes/nodejs12.4.0/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y \
- curl \
- dumb-init \
- g++ \
- make \
- python
-
-WORKDIR /root
-ARG VERSION=v12.4.0
-RUN curl -o node-${VERSION}.tar.gz https://nodejs.org/dist/${VERSION}/node-${VERSION}.tar.gz
-RUN tar -zxf node-${VERSION}.tar.gz
-
-WORKDIR /root/node-${VERSION}
-RUN ./configure
-RUN make
-RUN make test-build
-
-# Including dumb-init emulates the Linux "init" process, preventing the failure
-# of tests involving worker processes.
-ENTRYPOINT ["/usr/bin/dumb-init"]
diff --git a/images/runtimes/php7.3.6/Dockerfile b/images/runtimes/php7.3.6/Dockerfile
deleted file mode 100644
index e5f67f79c..000000000
--- a/images/runtimes/php7.3.6/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y \
- autoconf \
- automake \
- bison \
- build-essential \
- curl \
- libtool \
- libxml2-dev \
- re2c
-
-WORKDIR /root
-ARG VERSION=7.3.6
-RUN curl -o php-${VERSION}.tar.gz https://www.php.net/distributions/php-${VERSION}.tar.gz
-RUN tar -zxf php-${VERSION}.tar.gz
-
-WORKDIR /root/php-${VERSION}
-RUN ./configure
-RUN make
diff --git a/images/runtimes/python3.7.3/Dockerfile b/images/runtimes/python3.7.3/Dockerfile
deleted file mode 100644
index 4d1e1e221..000000000
--- a/images/runtimes/python3.7.3/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM ubuntu:bionic
-RUN apt-get update && apt-get install -y \
- curl \
- gcc \
- libbz2-dev \
- libffi-dev \
- liblzma-dev \
- libreadline-dev \
- libssl-dev \
- make \
- zlib1g-dev
-
-# Use flags -LJO to follow the html redirect and download .tar.gz.
-WORKDIR /root
-ARG VERSION=3.7.3
-RUN curl -LJO https://github.com/python/cpython/archive/v${VERSION}.tar.gz
-RUN tar -zxf cpython-${VERSION}.tar.gz
-
-WORKDIR /root/cpython-${VERSION}
-RUN ./configure --with-pydebug
-RUN make -s -j2
diff --git a/images/syzkaller/Dockerfile b/images/syzkaller/Dockerfile
deleted file mode 100644
index 9a85ae345..000000000
--- a/images/syzkaller/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM gcr.io/syzkaller/env
-
-# This image is mostly for investigating syzkaller crashes, so let's install
-# developer tools.
-RUN apt update && apt install -y git vim strace gdb procps
-
-WORKDIR /syzkaller/gopath/src/github.com/google/syzkaller
-
-RUN git init . && git remote add origin https://github.com/google/syzkaller && git fetch origin && git checkout origin/master && make
-
-ENTRYPOINT ./bin/syz-manager --config /tmp/syzkaller/syzkaller.cfg
diff --git a/images/syzkaller/README.md b/images/syzkaller/README.md
deleted file mode 100644
index 7e500cab3..000000000
--- a/images/syzkaller/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-syzkaller is an unsupervised coverage-guided kernel fuzzer.
-
-* [Github](https://github.com/google/syzkaller)
-* [gVisor dashboard](https://syzkaller.appspot.com/gvisor)
-
-# How to run syzkaller.
-
-First, we need to load a syzkaller docker image:
-
-```bash
-make load-syzkaller
-```
-
-or we can rebuild it to use an up-to-date version of the master branch:
-
-```bash
-make rebuild-syzkaller
-```
-
-Then we need to create a directory with all artifacts that we will need to run a
-syzkaller. Then we will bind-mount this directory to a docker container.
-
-We need to build runsc and place it on the artifact directory:
-
-```bash
-make RUNTIME_DIR=/tmp/syzkaller refresh
-```
-
-The next step is to create a syzkaller config. We can copy the default one and
-customize it:
-
-```bash
-cp images/syzkaller/default-gvisor-config.cfg /tmp/syzkaller/syzkaller.cfg
-```
-
-Now we can start syzkaller in a docker container:
-
-```bash
-docker run --privileged -it --rm \
- -v /tmp/syzkaller:/tmp/syzkaller \
- gvisor.dev/images/syzkaller:latest
-```
-
-All logs will be in /tmp/syzkaller/workdir.
-
-# How to run a syz repro.
-
-We need to repeat all preparation steps from the previous section and save a
-syzkaller repro in /tmp/syzkaller/repro.
-
-Now we can run syz-repro to reproduce a crash:
-
-```bash
-docker run --privileged -it --rm -v \
- /tmp/syzkaller:/tmp/syzkaller --entrypoint="" \
- gvisor.dev/images/syzkaller:latest ./bin/syz-repro -config \
- /tmp/syzkaller/syzkaller.cfg /tmp/syzkaller/repro
-```
diff --git a/images/syzkaller/default-gvisor-config.cfg b/images/syzkaller/default-gvisor-config.cfg
deleted file mode 100644
index c69641c21..000000000
--- a/images/syzkaller/default-gvisor-config.cfg
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "name": "gvisor",
- "target": "linux/amd64",
- "http": ":80",
- "workdir": "/tmp/syzkaller/workdir/",
- "image": "/tmp/syzkaller/runsc",
- "syzkaller": "/syzkaller/gopath/src/github.com/google/syzkaller",
- "cover": false,
- "procs": 1,
- "type": "gvisor",
- "vm": {
- "count": 1,
- "runsc_args": "--debug --network none --platform ptrace --vfs2 --fuse -net-raw -watchdog-action=panic"
- }
-}
diff --git a/nogo.yaml b/nogo.yaml
deleted file mode 100644
index 9b7fc5c8f..000000000
--- a/nogo.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-groups:
- # We define three basic groups: generated (all generated files),
- # external (all files outside the repository), and internal (all
- # files within the local repository). We can't enforce many style
- # checks on generated and external code, so enable those cases
- # selectively for analyzers below.
- - name: generated
- regex: "^(bazel-genfiles|bazel-out|bazel-bin)/"
- default: true
- - name: external
- regex: "^external/"
- default: false
- - name: internal
- regex: ".*"
- default: true
-global:
- generated:
- suppress:
- # Suppress the basic style checks for
- # generated code, but keep the analysis
- # that are required for quality & security.
- - "should not use ALL_CAPS in Go names"
- - "should not use underscores"
- - "comment on exported"
- - "methods on the same type should have the same receiver name"
- - "at least one file in a package"
- - "package comment should be of the form"
- # Generated code may have dead code paths.
- - "identical build constraints"
- - "no value of type"
- - "is never used"
- # go_embed_data rules generate unicode literals.
- - "string literal contains the Unicode format character"
- - "string literal contains the Unicode control character"
- - "string literal contains Unicode control characters"
- - "string literal contains Unicode format and control characters"
- # Some external code will generate protov1
- # implementations. These should be ignored.
- - "proto.* is deprecated"
- - "xxx_messageInfo_.*"
- - "receiver name should be a reflection of its identity"
- # Generated gRPC code is not compliant either.
- - "error strings should not be capitalized"
- - "grpc.Errorf is deprecated"
- # Generated proto code does not always follow capitalization conventions.
- - "(field|method|struct|type) .* should be .*"
- # Generated proto code sometimes duplicates imports with aliases.
- - "duplicate import"
- # These will never be annotated.
- - "unexpected call to atomic function"
- internal:
- suppress:
- # We use ALL_CAPS for system definitions,
- # which are common enough in the code base
- # that we shouldn't annotate exceptions.
- #
- # Same story for underscores.
- - "should not use ALL_CAPS in Go names"
- - "should not use underscores in Go names"
- # These need to be annotated.
- - "unexpected call to atomic function.*"
- - "return with unexpected locks held.*"
- - "incompatible return states.*"
- exclude:
- # Generated: exempt all.
- - pkg/shim/runtimeoptions/runtimeoptions_cri.go
-analyzers:
- asmdecl:
- external: # Enabled.
- assign:
- external:
- exclude:
- - gazelle/walk/walk.go
- atomic:
- external: # Enabled.
- bools:
- external: # Enabled.
- buildtag:
- external: # Enabled.
- cgocall:
- external: # Enabled.
- checklocks:
- internal:
- exclude:
- - "^-$" # b/181776900: analyzer fails on buildkite.
- shadow: # Disable for now.
- generated:
- exclude: [".*"]
- internal:
- exclude: [".*"]
- composites: # Disable for now.
- generated:
- exclude: [".*"]
- internal:
- exclude: [".*"]
- errorsas:
- external: # Enabled.
- httpresponse:
- external: # Enabled.
- loopclosure:
- external: # Enabled.
- nilfunc:
- external: # Enabled.
- nilness:
- internal:
- exclude:
- - pkg/sentry/platform/kvm/kvm_test.go # Intentional.
- - tools/bigquery/bigquery.go # False positive.
- printf:
- external: # Enabled.
- shift:
- generated: # Disabled for generated code; these shifts are well-defined.
- exclude: [".*"]
- external: # Enabled.
- stringintconv:
- external:
- exclude:
- - ".*protobuf/.*.go" # Bad conversions.
- - ".*flate/huffman_bit_writer.go" # Bad conversion.
- # Runtime internal violations.
- - ".*reflect/value.go"
- - ".*encoding/xml/xml.go"
- - ".*runtime/pprof/internal/profile/proto.go"
- - ".*fmt/scan.go"
- - ".*go/types/conversions.go"
- - ".*golang.org/x/net/dns/dnsmessage/message.go"
- tests:
- external: # Enabled.
- unmarshal:
- external: # Enabled.
- unreachable:
- external: # Enabled.
- unsafeptr:
- internal:
- exclude:
- - ".*_test.go" # Exclude tests.
- - "pkg/flipcall/.*_unsafe.go" # Special case.
- - pkg/gohacks/gohacks_unsafe.go # Special case.
- - pkg/ring0/pagetables/allocator_unsafe.go # Special case.
- - pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go # Special case.
- - pkg/sentry/platform/kvm/bluepill_unsafe.go # Special case.
- - pkg/sentry/platform/kvm/machine_unsafe.go # Special case.
- - pkg/sentry/platform/safecopy/safecopy_unsafe.go # Special case.
- - pkg/sentry/usage/memory_unsafe.go # Special case.
- - pkg/sentry/vfs/mount_unsafe.go # Special case.
- - pkg/state/decode_unsafe.go # Special case.
- unusedresult:
- external: # Enabled.
- checkescape:
- external: # Enabled.
- SA4016:
- internal:
- exclude:
- - pkg/gohacks/gohacks_unsafe.go # x ^ 0 always equals x.
- SA2001:
- internal:
- exclude:
- - pkg/sentry/fs/fs.go # Intentional.
- - pkg/sentry/fs/gofer/inode.go # Intentional.
- - pkg/refs/refcounter_test.go # Intentional.
- ST1019:
- generated:
- exclude:
- # package ".../kubeapi/core/v1/v1" is being imported more than once
- - generated.gen.pb.go
- ST1021:
- internal:
- suppress:
- - "comment on exported type Translation" # Intentional.
- - "comment on exported type PinnedRange" # Intentional.
- SA5011:
- internal:
- exclude:
- # https://github.com/dominikh/go-tools/issues/924
- - pkg/sentry/fs/fdpipe/pipe_opener_test.go
- - pkg/tcpip/tests/integration/link_resolution_test.go
diff --git a/pkg/abi/BUILD b/pkg/abi/BUILD
deleted file mode 100644
index 839f822eb..000000000
--- a/pkg/abi/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "abi",
- srcs = [
- "abi.go",
- "abi_linux.go",
- "flag.go",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/pkg/abi/abi_linux_state_autogen.go b/pkg/abi/abi_linux_state_autogen.go
new file mode 100644
index 000000000..327ef0e5c
--- /dev/null
+++ b/pkg/abi/abi_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package abi
diff --git a/pkg/abi/abi_state_autogen.go b/pkg/abi/abi_state_autogen.go
new file mode 100644
index 000000000..d54002c3b
--- /dev/null
+++ b/pkg/abi/abi_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package abi
diff --git a/pkg/abi/linux/BUILD b/pkg/abi/linux/BUILD
deleted file mode 100644
index eb004a7f6..000000000
--- a/pkg/abi/linux/BUILD
+++ /dev/null
@@ -1,94 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-# Package linux contains the constants and types needed to interface with a
-# Linux kernel. It should be used instead of syscall or golang.org/x/sys/unix
-# when the host OS may not be Linux.
-
-package(licenses = ["notice"])
-
-go_library(
- name = "linux",
- srcs = [
- "aio.go",
- "arch_amd64.go",
- "audit.go",
- "bpf.go",
- "capability.go",
- "clone.go",
- "context.go",
- "dev.go",
- "elf.go",
- "epoll.go",
- "epoll_amd64.go",
- "epoll_arm64.go",
- "errqueue.go",
- "eventfd.go",
- "exec.go",
- "fadvise.go",
- "fcntl.go",
- "file.go",
- "file_amd64.go",
- "file_arm64.go",
- "fs.go",
- "fuse.go",
- "futex.go",
- "inotify.go",
- "ioctl.go",
- "ioctl_tun.go",
- "ip.go",
- "ipc.go",
- "limits.go",
- "linux.go",
- "membarrier.go",
- "mm.go",
- "netdevice.go",
- "netfilter.go",
- "netfilter_ipv6.go",
- "netlink.go",
- "netlink_route.go",
- "poll.go",
- "prctl.go",
- "ptrace.go",
- "ptrace_amd64.go",
- "ptrace_arm64.go",
- "rseq.go",
- "rusage.go",
- "sched.go",
- "seccomp.go",
- "sem.go",
- "sem_amd64.go",
- "sem_arm64.go",
- "shm.go",
- "signal.go",
- "signalfd.go",
- "socket.go",
- "splice.go",
- "tcp.go",
- "time.go",
- "timer.go",
- "tty.go",
- "uio.go",
- "utsname.go",
- "wait.go",
- "xattr.go",
- ],
- marshal = True,
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/abi",
- "//pkg/bits",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- ],
-)
-
-go_test(
- name = "linux_test",
- size = "small",
- srcs = [
- "netfilter_test.go",
- ],
- library = ":linux",
-)
diff --git a/pkg/abi/linux/errno/BUILD b/pkg/abi/linux/errno/BUILD
deleted file mode 100644
index d003342d5..000000000
--- a/pkg/abi/linux/errno/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "errno",
- srcs = ["errno.go"],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/abi/linux/errno/errno_state_autogen.go b/pkg/abi/linux/errno/errno_state_autogen.go
new file mode 100644
index 000000000..4c4ae64a6
--- /dev/null
+++ b/pkg/abi/linux/errno/errno_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package errno
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
new file mode 100644
index 000000000..ed00375de
--- /dev/null
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -0,0 +1,14903 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*BPFInstruction)(nil)
+var _ marshal.Marshallable = (*CapUserData)(nil)
+var _ marshal.Marshallable = (*CapUserHeader)(nil)
+var _ marshal.Marshallable = (*ClockT)(nil)
+var _ marshal.Marshallable = (*ControlMessageCredentials)(nil)
+var _ marshal.Marshallable = (*ControlMessageHeader)(nil)
+var _ marshal.Marshallable = (*ControlMessageIPPacketInfo)(nil)
+var _ marshal.Marshallable = (*DigestMetadata)(nil)
+var _ marshal.Marshallable = (*ElfHeader64)(nil)
+var _ marshal.Marshallable = (*ElfProg64)(nil)
+var _ marshal.Marshallable = (*ElfSection64)(nil)
+var _ marshal.Marshallable = (*ErrorName)(nil)
+var _ marshal.Marshallable = (*ExtensionName)(nil)
+var _ marshal.Marshallable = (*FOwnerEx)(nil)
+var _ marshal.Marshallable = (*FUSEAttr)(nil)
+var _ marshal.Marshallable = (*FUSECreateMeta)(nil)
+var _ marshal.Marshallable = (*FUSEDirentMeta)(nil)
+var _ marshal.Marshallable = (*FUSEEntryOut)(nil)
+var _ marshal.Marshallable = (*FUSEGetAttrIn)(nil)
+var _ marshal.Marshallable = (*FUSEGetAttrOut)(nil)
+var _ marshal.Marshallable = (*FUSEHeaderIn)(nil)
+var _ marshal.Marshallable = (*FUSEHeaderOut)(nil)
+var _ marshal.Marshallable = (*FUSEInitIn)(nil)
+var _ marshal.Marshallable = (*FUSEInitOut)(nil)
+var _ marshal.Marshallable = (*FUSEMkdirMeta)(nil)
+var _ marshal.Marshallable = (*FUSEMknodMeta)(nil)
+var _ marshal.Marshallable = (*FUSEOpID)(nil)
+var _ marshal.Marshallable = (*FUSEOpcode)(nil)
+var _ marshal.Marshallable = (*FUSEOpenIn)(nil)
+var _ marshal.Marshallable = (*FUSEOpenOut)(nil)
+var _ marshal.Marshallable = (*FUSEReadIn)(nil)
+var _ marshal.Marshallable = (*FUSEReleaseIn)(nil)
+var _ marshal.Marshallable = (*FUSESetAttrIn)(nil)
+var _ marshal.Marshallable = (*FUSEWriteIn)(nil)
+var _ marshal.Marshallable = (*FUSEWriteOut)(nil)
+var _ marshal.Marshallable = (*Flock)(nil)
+var _ marshal.Marshallable = (*IFConf)(nil)
+var _ marshal.Marshallable = (*IFReq)(nil)
+var _ marshal.Marshallable = (*IOCallback)(nil)
+var _ marshal.Marshallable = (*IOEvent)(nil)
+var _ marshal.Marshallable = (*IP6TEntry)(nil)
+var _ marshal.Marshallable = (*IP6TIP)(nil)
+var _ marshal.Marshallable = (*IP6TReplace)(nil)
+var _ marshal.Marshallable = (*IPCPerm)(nil)
+var _ marshal.Marshallable = (*IPTEntry)(nil)
+var _ marshal.Marshallable = (*IPTGetEntries)(nil)
+var _ marshal.Marshallable = (*IPTGetinfo)(nil)
+var _ marshal.Marshallable = (*IPTIP)(nil)
+var _ marshal.Marshallable = (*IPTOwnerInfo)(nil)
+var _ marshal.Marshallable = (*IPTReplace)(nil)
+var _ marshal.Marshallable = (*Inet6Addr)(nil)
+var _ marshal.Marshallable = (*Inet6MulticastRequest)(nil)
+var _ marshal.Marshallable = (*InetAddr)(nil)
+var _ marshal.Marshallable = (*InetMulticastRequest)(nil)
+var _ marshal.Marshallable = (*InetMulticastRequestWithNIC)(nil)
+var _ marshal.Marshallable = (*InterfaceAddrMessage)(nil)
+var _ marshal.Marshallable = (*InterfaceInfoMessage)(nil)
+var _ marshal.Marshallable = (*ItimerVal)(nil)
+var _ marshal.Marshallable = (*Itimerspec)(nil)
+var _ marshal.Marshallable = (*KernelIP6TEntry)(nil)
+var _ marshal.Marshallable = (*KernelIP6TGetEntries)(nil)
+var _ marshal.Marshallable = (*KernelIPTEntry)(nil)
+var _ marshal.Marshallable = (*KernelIPTGetEntries)(nil)
+var _ marshal.Marshallable = (*Linger)(nil)
+var _ marshal.Marshallable = (*NFNATRange)(nil)
+var _ marshal.Marshallable = (*NetlinkAttrHeader)(nil)
+var _ marshal.Marshallable = (*NetlinkErrorMessage)(nil)
+var _ marshal.Marshallable = (*NetlinkMessageHeader)(nil)
+var _ marshal.Marshallable = (*NfNATIPV4MultiRangeCompat)(nil)
+var _ marshal.Marshallable = (*NfNATIPV4Range)(nil)
+var _ marshal.Marshallable = (*NumaPolicy)(nil)
+var _ marshal.Marshallable = (*PollFD)(nil)
+var _ marshal.Marshallable = (*RSeqCriticalSection)(nil)
+var _ marshal.Marshallable = (*RobustListHead)(nil)
+var _ marshal.Marshallable = (*RouteMessage)(nil)
+var _ marshal.Marshallable = (*Rusage)(nil)
+var _ marshal.Marshallable = (*SeccompData)(nil)
+var _ marshal.Marshallable = (*SemInfo)(nil)
+var _ marshal.Marshallable = (*Sembuf)(nil)
+var _ marshal.Marshallable = (*ShmInfo)(nil)
+var _ marshal.Marshallable = (*ShmParams)(nil)
+var _ marshal.Marshallable = (*ShmidDS)(nil)
+var _ marshal.Marshallable = (*SigAction)(nil)
+var _ marshal.Marshallable = (*Sigevent)(nil)
+var _ marshal.Marshallable = (*SignalInfo)(nil)
+var _ marshal.Marshallable = (*SignalSet)(nil)
+var _ marshal.Marshallable = (*SignalStack)(nil)
+var _ marshal.Marshallable = (*SignalfdSiginfo)(nil)
+var _ marshal.Marshallable = (*SockAddrInet)(nil)
+var _ marshal.Marshallable = (*SockAddrInet6)(nil)
+var _ marshal.Marshallable = (*SockAddrLink)(nil)
+var _ marshal.Marshallable = (*SockAddrNetlink)(nil)
+var _ marshal.Marshallable = (*SockAddrUnix)(nil)
+var _ marshal.Marshallable = (*SockErrCMsgIPv4)(nil)
+var _ marshal.Marshallable = (*SockErrCMsgIPv6)(nil)
+var _ marshal.Marshallable = (*SockExtendedErr)(nil)
+var _ marshal.Marshallable = (*Statfs)(nil)
+var _ marshal.Marshallable = (*Statx)(nil)
+var _ marshal.Marshallable = (*StatxTimestamp)(nil)
+var _ marshal.Marshallable = (*Sysinfo)(nil)
+var _ marshal.Marshallable = (*TCPInfo)(nil)
+var _ marshal.Marshallable = (*TableName)(nil)
+var _ marshal.Marshallable = (*Termios)(nil)
+var _ marshal.Marshallable = (*TimeT)(nil)
+var _ marshal.Marshallable = (*TimerID)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+var _ marshal.Marshallable = (*Timeval)(nil)
+var _ marshal.Marshallable = (*Tms)(nil)
+var _ marshal.Marshallable = (*Utime)(nil)
+var _ marshal.Marshallable = (*UtsName)(nil)
+var _ marshal.Marshallable = (*WindowSize)(nil)
+var _ marshal.Marshallable = (*Winsize)(nil)
+var _ marshal.Marshallable = (*XTCounters)(nil)
+var _ marshal.Marshallable = (*XTEntryMatch)(nil)
+var _ marshal.Marshallable = (*XTEntryTarget)(nil)
+var _ marshal.Marshallable = (*XTErrorTarget)(nil)
+var _ marshal.Marshallable = (*XTGetRevision)(nil)
+var _ marshal.Marshallable = (*XTRedirectTarget)(nil)
+var _ marshal.Marshallable = (*XTSNATTarget)(nil)
+var _ marshal.Marshallable = (*XTStandardTarget)(nil)
+var _ marshal.Marshallable = (*XTTCP)(nil)
+var _ marshal.Marshallable = (*XTUDP)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IOCallback) SizeBytes() int {
+ return 64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IOCallback) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.FD))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Buf))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Offset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IOCallback) UnmarshalBytes(src []byte) {
+ i.Data = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Key = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ i.OpCode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.ReqPrio = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Buf = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Bytes = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Offset = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Reserved2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.ResFD = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IOCallback) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IOCallback) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IOCallback) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IOEvent) SizeBytes() int {
+ return 32
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IOEvent) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Obj))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Result))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Result2))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IOEvent) UnmarshalBytes(src []byte) {
+ i.Data = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Obj = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Result = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.Result2 = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IOEvent) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IOEvent) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IOEvent) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (b *BPFInstruction) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (b *BPFInstruction) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(b.OpCode))
+ dst = dst[2:]
+ dst[0] = byte(b.JumpIfTrue)
+ dst = dst[1:]
+ dst[0] = byte(b.JumpIfFalse)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(b.K))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (b *BPFInstruction) UnmarshalBytes(src []byte) {
+ b.OpCode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ b.JumpIfTrue = uint8(src[0])
+ src = src[1:]
+ b.JumpIfFalse = uint8(src[0])
+ src = src[1:]
+ b.K = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (b *BPFInstruction) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (b *BPFInstruction) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(b), uintptr(b.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (b *BPFInstruction) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(b), unsafe.Pointer(&src[0]), uintptr(b.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (b *BPFInstruction) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(b)))
+ hdr.Len = b.SizeBytes()
+ hdr.Cap = b.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that b
+ // must live until the use above.
+ runtime.KeepAlive(b) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (b *BPFInstruction) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return b.CopyOutN(cc, addr, b.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (b *BPFInstruction) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(b)))
+ hdr.Len = b.SizeBytes()
+ hdr.Cap = b.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that b
+ // must live until the use above.
+ runtime.KeepAlive(b) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (b *BPFInstruction) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(b)))
+ hdr.Len = b.SizeBytes()
+ hdr.Cap = b.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that b
+ // must live until the use above.
+ runtime.KeepAlive(b) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyBPFInstructionSliceIn copies in a slice of BPFInstruction objects from the task's memory.
+func CopyBPFInstructionSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []BPFInstruction) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*BPFInstruction)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyBPFInstructionSliceOut copies a slice of BPFInstruction objects to the task's memory.
+func CopyBPFInstructionSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []BPFInstruction) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*BPFInstruction)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeBPFInstructionSlice is like BPFInstruction.MarshalUnsafe, but for a []BPFInstruction.
+func MarshalUnsafeBPFInstructionSlice(src []BPFInstruction, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*BPFInstruction)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeBPFInstructionSlice is like BPFInstruction.UnmarshalUnsafe, but for a []BPFInstruction.
+func UnmarshalUnsafeBPFInstructionSlice(dst []BPFInstruction, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*BPFInstruction)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *CapUserData) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *CapUserData) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Effective))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Permitted))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Inheritable))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *CapUserData) UnmarshalBytes(src []byte) {
+ c.Effective = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.Permitted = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.Inheritable = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *CapUserData) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *CapUserData) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *CapUserData) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *CapUserData) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *CapUserData) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *CapUserData) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *CapUserData) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyCapUserDataSliceIn copies in a slice of CapUserData objects from the task's memory.
+func CopyCapUserDataSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []CapUserData) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*CapUserData)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyCapUserDataSliceOut copies a slice of CapUserData objects to the task's memory.
+func CopyCapUserDataSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []CapUserData) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*CapUserData)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeCapUserDataSlice is like CapUserData.MarshalUnsafe, but for a []CapUserData.
+func MarshalUnsafeCapUserDataSlice(src []CapUserData, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*CapUserData)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeCapUserDataSlice is like CapUserData.UnmarshalUnsafe, but for a []CapUserData.
+func UnmarshalUnsafeCapUserDataSlice(dst []CapUserData, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*CapUserData)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *CapUserHeader) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *CapUserHeader) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Version))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Pid))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *CapUserHeader) UnmarshalBytes(src []byte) {
+ c.Version = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.Pid = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *CapUserHeader) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *CapUserHeader) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *CapUserHeader) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *CapUserHeader) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *ElfHeader64) SizeBytes() int {
+ return 48 +
+ 1*16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *ElfHeader64) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(e.Ident[idx])
+ dst = dst[1:]
+ }
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Type))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Machine))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Version))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Entry))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Phoff))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Shoff))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Ehsize))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Phentsize))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Phnum))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Shentsize))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Shnum))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(e.Shstrndx))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *ElfHeader64) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 16; idx++ {
+ e.Ident[idx] = src[0]
+ src = src[1:]
+ }
+ e.Type = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Machine = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Version = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Entry = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Phoff = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Shoff = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Ehsize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Phentsize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Phnum = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Shentsize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Shnum = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ e.Shstrndx = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *ElfHeader64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *ElfHeader64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *ElfHeader64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *ElfHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *ElfHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *ElfHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *ElfHeader64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *ElfProg64) SizeBytes() int {
+ return 56
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *ElfProg64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Type))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Off))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Vaddr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Paddr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Filesz))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Memsz))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Align))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *ElfProg64) UnmarshalBytes(src []byte) {
+ e.Type = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Vaddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Paddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Filesz = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Memsz = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Align = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *ElfProg64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *ElfProg64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *ElfProg64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *ElfProg64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *ElfProg64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *ElfProg64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *ElfProg64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *ElfSection64) SizeBytes() int {
+ return 64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *ElfSection64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Name))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Type))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Flags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Addr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Off))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Link))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Info))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Addralign))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(e.Entsize))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *ElfSection64) UnmarshalBytes(src []byte) {
+ e.Name = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Type = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Link = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Info = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ e.Addralign = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ e.Entsize = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *ElfSection64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *ElfSection64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *ElfSection64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *ElfSection64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *ElfSection64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *ElfSection64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *ElfSection64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockErrCMsgIPv4) SizeBytes() int {
+ return 0 +
+ (*SockExtendedErr)(nil).SizeBytes() +
+ (*SockAddrInet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockErrCMsgIPv4) MarshalBytes(dst []byte) {
+ s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()])
+ dst = dst[s.SockExtendedErr.SizeBytes():]
+ s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()])
+ dst = dst[s.Offender.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockErrCMsgIPv4) UnmarshalBytes(src []byte) {
+ s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()])
+ src = src[s.SockExtendedErr.SizeBytes():]
+ s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()])
+ src = src[s.Offender.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockErrCMsgIPv4) Packed() bool {
+ return s.Offender.Packed() && s.SockExtendedErr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockErrCMsgIPv4) MarshalUnsafe(dst []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockErrCMsgIPv4) UnmarshalUnsafe(src []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockErrCMsgIPv4) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockErrCMsgIPv6) SizeBytes() int {
+ return 0 +
+ (*SockExtendedErr)(nil).SizeBytes() +
+ (*SockAddrInet6)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockErrCMsgIPv6) MarshalBytes(dst []byte) {
+ s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()])
+ dst = dst[s.SockExtendedErr.SizeBytes():]
+ s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()])
+ dst = dst[s.Offender.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockErrCMsgIPv6) UnmarshalBytes(src []byte) {
+ s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()])
+ src = src[s.SockExtendedErr.SizeBytes():]
+ s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()])
+ src = src[s.Offender.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockErrCMsgIPv6) Packed() bool {
+ return s.Offender.Packed() && s.SockExtendedErr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockErrCMsgIPv6) MarshalUnsafe(dst []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockErrCMsgIPv6) UnmarshalUnsafe(src []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockErrCMsgIPv6) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockExtendedErr) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockExtendedErr) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ dst[0] = byte(s.Origin)
+ dst = dst[1:]
+ dst[0] = byte(s.Type)
+ dst = dst[1:]
+ dst[0] = byte(s.Code)
+ dst = dst[1:]
+ dst[0] = byte(s.Pad)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Info))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Data))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockExtendedErr) UnmarshalBytes(src []byte) {
+ s.Errno = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Origin = uint8(src[0])
+ src = src[1:]
+ s.Type = uint8(src[0])
+ src = src[1:]
+ s.Code = uint8(src[0])
+ src = src[1:]
+ s.Pad = uint8(src[0])
+ src = src[1:]
+ s.Info = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Data = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockExtendedErr) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockExtendedErr) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockExtendedErr) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockExtendedErr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockExtendedErr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockExtendedErr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockExtendedErr) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FOwnerEx) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FOwnerEx) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FOwnerEx) UnmarshalBytes(src []byte) {
+ f.Type = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FOwnerEx) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FOwnerEx) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FOwnerEx) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *Flock) SizeBytes() int {
+ return 24 +
+ 1*4 +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *Flock) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.Type))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.Whence))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Start))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Len))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *Flock) UnmarshalBytes(src []byte) {
+ f.Type = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.Whence = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ f.Start = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Len = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *Flock) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *Flock) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *Flock) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *Flock) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *Flock) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *Flock) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *Flock) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Statx) SizeBytes() int {
+ return 80 +
+ (*StatxTimestamp)(nil).SizeBytes() +
+ (*StatxTimestamp)(nil).SizeBytes() +
+ (*StatxTimestamp)(nil).SizeBytes() +
+ (*StatxTimestamp)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Statx) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mask))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Attributes))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Mode))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.AttributesMask))
+ dst = dst[8:]
+ s.Atime.MarshalBytes(dst[:s.Atime.SizeBytes()])
+ dst = dst[s.Atime.SizeBytes():]
+ s.Btime.MarshalBytes(dst[:s.Btime.SizeBytes()])
+ dst = dst[s.Btime.SizeBytes():]
+ s.Ctime.MarshalBytes(dst[:s.Ctime.SizeBytes()])
+ dst = dst[s.Ctime.SizeBytes():]
+ s.Mtime.MarshalBytes(dst[:s.Mtime.SizeBytes()])
+ dst = dst[s.Mtime.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMajor))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMinor))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.DevMajor))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.DevMinor))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Statx) UnmarshalBytes(src []byte) {
+ s.Mask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Blksize = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Attributes = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Mode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.AttributesMask = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Atime.UnmarshalBytes(src[:s.Atime.SizeBytes()])
+ src = src[s.Atime.SizeBytes():]
+ s.Btime.UnmarshalBytes(src[:s.Btime.SizeBytes()])
+ src = src[s.Btime.SizeBytes():]
+ s.Ctime.UnmarshalBytes(src[:s.Ctime.SizeBytes()])
+ src = src[s.Ctime.SizeBytes():]
+ s.Mtime.UnmarshalBytes(src[:s.Mtime.SizeBytes()])
+ src = src[s.Mtime.SizeBytes():]
+ s.RdevMajor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.RdevMinor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.DevMajor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.DevMinor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Statx) Packed() bool {
+ return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Statx) MarshalUnsafe(dst []byte) {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type Statx doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Statx) UnmarshalUnsafe(src []byte) {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Statx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Statx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Statx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ // Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Statx) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Statfs) SizeBytes() int {
+ return 80 +
+ 4*2 +
+ 8*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Statfs) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Type))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlockSize))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksFree))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksAvailable))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Files))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FilesFree))
+ dst = dst[8:]
+ for idx := 0; idx < 2; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FSID[idx]))
+ dst = dst[4:]
+ }
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.NameLength))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FragmentSize))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ dst = dst[8:]
+ for idx := 0; idx < 4; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Spare[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Statfs) UnmarshalBytes(src []byte) {
+ s.Type = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlockSize = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlocksFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlocksAvailable = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Files = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FilesFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 2; idx++ {
+ s.FSID[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ s.NameLength = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FragmentSize = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 4; idx++ {
+ s.Spare[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Statfs) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Statfs) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Statfs) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Statfs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Statfs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Statfs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEAttr) SizeBytes() int {
+ return 88
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEAttr) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEAttr) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.BlkSize = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEAttr) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSECreateMeta) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSECreateMeta) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEDirentMeta) SizeBytes() int {
+ return 24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Type = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEDirentMeta) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEEntryOut) SizeBytes() int {
+ return 40 +
+ (*FUSEAttr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
+ dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
+ f.NodeID = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Generation = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValidNSec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.AttrValidNSec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEEntryOut) Packed() bool {
+ return f.Attr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEGetAttrIn) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
+ f.GetAttrFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEGetAttrIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEGetAttrOut) SizeBytes() int {
+ return 16 +
+ (*FUSEAttr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
+ f.AttrValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValidNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEGetAttrOut) Packed() bool {
+ return f.Attr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEHeaderIn) SizeBytes() int {
+ return 28 +
+ (*FUSEOpcode)(nil).SizeBytes() +
+ (*FUSEOpID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
+ dst = dst[f.Opcode.SizeBytes():]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
+ f.Len = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
+ src = src[f.Opcode.SizeBytes():]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+ f.NodeID = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEHeaderIn) Packed() bool {
+ return f.Opcode.Packed() && f.Unique.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEHeaderOut) SizeBytes() int {
+ return 8 +
+ (*FUSEOpID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ dst = dst[4:]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
+ f.Len = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Error = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEHeaderOut) Packed() bool {
+ return f.Unique.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
+ if f.Unique.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
+ if f.Unique.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEInitIn) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEInitIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
+ f.Major = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEInitIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEInitOut) SizeBytes() int {
+ return 32 +
+ 4*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
+ dst = dst[4*(8):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+ f.Major = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxBackground = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.CongestionThreshold = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.MaxWrite = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.TimeGran = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxPages = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
+ src = src[4*(8):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEInitOut) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEMkdirMeta) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEMkdirMeta) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEMknodMeta) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEMknodMeta) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (f *FUSEOpID) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEOpID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*f))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEOpID) UnmarshalBytes(src []byte) {
+ *f = FUSEOpID(uint64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEOpID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (f *FUSEOpcode) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEOpcode) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*f))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
+ *f = FUSEOpcode(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEOpcode) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEOpenIn) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEOpenIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEOpenOut) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.OpenFlag = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEOpenOut) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEReadIn) SizeBytes() int {
+ return 40
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEReadIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.ReadFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEReadIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEReleaseIn) SizeBytes() int {
+ return 24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.ReleaseFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEReleaseIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSESetAttrIn) SizeBytes() int {
+ return 88
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
+ f.Valid = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSESetAttrIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEWriteIn) SizeBytes() int {
+ return 40
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.WriteFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEWriteIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEWriteOut) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEWriteOut) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *RobustListHead) SizeBytes() int {
+ return 24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *RobustListHead) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.List))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.FutexOffset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.ListOpPending))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *RobustListHead) UnmarshalBytes(src []byte) {
+ r.List = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.FutexOffset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.ListOpPending = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *RobustListHead) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *RobustListHead) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *RobustListHead) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *RobustListHead) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *RobustListHead) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *RobustListHead) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *RobustListHead) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (d *DigestMetadata) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (d *DigestMetadata) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(d.DigestAlgorithm))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(d.DigestSize))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (d *DigestMetadata) UnmarshalBytes(src []byte) {
+ d.DigestAlgorithm = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ d.DigestSize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (d *DigestMetadata) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (d *DigestMetadata) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(d), uintptr(d.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (d *DigestMetadata) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(d), unsafe.Pointer(&src[0]), uintptr(d.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (d *DigestMetadata) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(d)))
+ hdr.Len = d.SizeBytes()
+ hdr.Cap = d.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that d
+ // must live until the use above.
+ runtime.KeepAlive(d) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (d *DigestMetadata) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return d.CopyOutN(cc, addr, d.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (d *DigestMetadata) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(d)))
+ hdr.Len = d.SizeBytes()
+ hdr.Cap = d.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that d
+ // must live until the use above.
+ runtime.KeepAlive(d) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (d *DigestMetadata) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(d)))
+ hdr.Len = d.SizeBytes()
+ hdr.Cap = d.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that d
+ // must live until the use above.
+ runtime.KeepAlive(d) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPCPerm) SizeBytes() int {
+ return 48
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPCPerm) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.CUID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.CGID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Mode))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Seq))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.unused1))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.unused2))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPCPerm) UnmarshalBytes(src []byte) {
+ i.Key = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.CUID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.CGID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Mode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ i.Seq = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ i.unused1 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ i.unused2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPCPerm) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPCPerm) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPCPerm) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPCPerm) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPCPerm) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPCPerm) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPCPerm) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Sysinfo) SizeBytes() int {
+ return 78 +
+ 8*3 +
+ 1*6
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Sysinfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Uptime))
+ dst = dst[8:]
+ for idx := 0; idx < 3; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Loads[idx]))
+ dst = dst[8:]
+ }
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalRAM))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeRAM))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SharedRAM))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BufferRAM))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalSwap))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeSwap))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Procs))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(byte)*6] ~= [6]byte{0}
+ dst = dst[1*(6):]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalHigh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeHigh))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Unit))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Sysinfo) UnmarshalBytes(src []byte) {
+ s.Uptime = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 3; idx++ {
+ s.Loads[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.TotalRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FreeRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.SharedRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BufferRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.TotalSwap = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FreeSwap = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Procs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([6]byte(s._), src[:sizeof(byte)*6])
+ src = src[1*(6):]
+ s.TotalHigh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FreeHigh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Unit = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Sysinfo) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Sysinfo) MarshalUnsafe(dst []byte) {
+ // Type Sysinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Sysinfo) UnmarshalUnsafe(src []byte) {
+ // Type Sysinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Sysinfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type Sysinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Sysinfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Sysinfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type Sysinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Sysinfo) WriteTo(writer io.Writer) (int64, error) {
+ // Type Sysinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (n *NumaPolicy) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NumaPolicy) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*n))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NumaPolicy) UnmarshalBytes(src []byte) {
+ *n = NumaPolicy(int32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NumaPolicy) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NumaPolicy) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NumaPolicy) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NumaPolicy) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NumaPolicy) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NumaPolicy) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NumaPolicy) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IFConf) SizeBytes() int {
+ return 12 +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IFConf) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Len))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Ptr))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IFConf) UnmarshalBytes(src []byte) {
+ i.Len = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ i.Ptr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IFConf) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IFConf) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IFConf) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IFConf) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IFConf) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (ifr *IFReq) SizeBytes() int {
+ return 0 +
+ 1*IFNAMSIZ +
+ 1*24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (ifr *IFReq) MarshalBytes(dst []byte) {
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(ifr.IFName[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < 24; idx++ {
+ dst[0] = byte(ifr.Data[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (ifr *IFReq) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ ifr.IFName[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < 24; idx++ {
+ ifr.Data[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ifr *IFReq) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ifr *IFReq) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(ifr), uintptr(ifr.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ifr *IFReq) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(ifr), unsafe.Pointer(&src[0]), uintptr(ifr.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ifr *IFReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ifr)))
+ hdr.Len = ifr.SizeBytes()
+ hdr.Cap = ifr.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that ifr
+ // must live until the use above.
+ runtime.KeepAlive(ifr) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ifr *IFReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ifr.CopyOutN(cc, addr, ifr.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ifr *IFReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ifr)))
+ hdr.Len = ifr.SizeBytes()
+ hdr.Cap = ifr.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that ifr
+ // must live until the use above.
+ runtime.KeepAlive(ifr) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ifr *IFReq) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ifr)))
+ hdr.Len = ifr.SizeBytes()
+ hdr.Cap = ifr.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that ifr
+ // must live until the use above.
+ runtime.KeepAlive(ifr) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (en *ErrorName) SizeBytes() int {
+ return 1 * XT_FUNCTION_MAXNAMELEN
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (en *ErrorName) MarshalBytes(dst []byte) {
+ for idx := 0; idx < XT_FUNCTION_MAXNAMELEN; idx++ {
+ dst[0] = byte(en[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (en *ErrorName) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < XT_FUNCTION_MAXNAMELEN; idx++ {
+ en[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (en *ErrorName) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (en *ErrorName) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&en[0]), uintptr(en.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (en *ErrorName) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(en), unsafe.Pointer(&src[0]), uintptr(en.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (en *ErrorName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (en *ErrorName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return en.CopyOutN(cc, addr, en.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (en *ErrorName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (en *ErrorName) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (en *ExtensionName) SizeBytes() int {
+ return 1 * XT_EXTENSION_MAXNAMELEN
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (en *ExtensionName) MarshalBytes(dst []byte) {
+ for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
+ dst[0] = byte(en[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (en *ExtensionName) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
+ en[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (en *ExtensionName) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (en *ExtensionName) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&en[0]), uintptr(en.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (en *ExtensionName) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(en), unsafe.Pointer(&src[0]), uintptr(en.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (en *ExtensionName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return en.CopyOutN(cc, addr, en.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (en *ExtensionName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (en *ExtensionName) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTEntry) SizeBytes() int {
+ return 12 +
+ (*IPTIP)(nil).SizeBytes() +
+ (*XTCounters)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTEntry) MarshalBytes(dst []byte) {
+ i.IP.MarshalBytes(dst[:i.IP.SizeBytes()])
+ dst = dst[i.IP.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
+ dst = dst[4:]
+ i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()])
+ dst = dst[i.Counters.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTEntry) UnmarshalBytes(src []byte) {
+ i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()])
+ src = src[i.IP.SizeBytes():]
+ i.NFCache = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.TargetOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.NextOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Comeback = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()])
+ src = src[i.Counters.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTEntry) Packed() bool {
+ return i.Counters.Packed() && i.IP.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTEntry) MarshalUnsafe(dst []byte) {
+ if i.Counters.Packed() && i.IP.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTEntry) UnmarshalUnsafe(src []byte) {
+ if i.Counters.Packed() && i.IP.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetEntries) SizeBytes() int {
+ return 4 +
+ (*TableName)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetEntries) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetEntries) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetEntries) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetinfo) SizeBytes() int {
+ return 12 +
+ (*TableName)(nil).SizeBytes() +
+ 4*NF_INET_NUMHOOKS +
+ 4*NF_INET_NUMHOOKS
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetinfo) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ dst = dst[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ dst = dst[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ dst = dst[4:]
+ }
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetinfo) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetinfo) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTIP) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTIP) MarshalBytes(dst []byte) {
+ i.Src.MarshalBytes(dst[:i.Src.SizeBytes()])
+ dst = dst[i.Src.SizeBytes():]
+ i.Dst.MarshalBytes(dst[:i.Dst.SizeBytes()])
+ dst = dst[i.Dst.SizeBytes():]
+ i.SrcMask.MarshalBytes(dst[:i.SrcMask.SizeBytes()])
+ dst = dst[i.SrcMask.SizeBytes():]
+ i.DstMask.MarshalBytes(dst[:i.DstMask.SizeBytes()])
+ dst = dst[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
+ dst = dst[2:]
+ dst[0] = byte(i.Flags)
+ dst = dst[1:]
+ dst[0] = byte(i.InverseFlags)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTIP) UnmarshalBytes(src []byte) {
+ i.Src.UnmarshalBytes(src[:i.Src.SizeBytes()])
+ src = src[i.Src.SizeBytes():]
+ i.Dst.UnmarshalBytes(src[:i.Dst.SizeBytes()])
+ src = src[i.Dst.SizeBytes():]
+ i.SrcMask.UnmarshalBytes(src[:i.SrcMask.SizeBytes()])
+ src = src[i.SrcMask.SizeBytes():]
+ i.DstMask.UnmarshalBytes(src[:i.DstMask.SizeBytes()])
+ src = src[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ i.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Flags = uint8(src[0])
+ src = src[1:]
+ i.InverseFlags = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTIP) Packed() bool {
+ return i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTIP) MarshalUnsafe(dst []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTIP) UnmarshalUnsafe(src []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTOwnerInfo) SizeBytes() int {
+ return 18 +
+ 1*16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTOwnerInfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.PID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.SID))
+ dst = dst[4:]
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(i.Comm[idx])
+ dst = dst[1:]
+ }
+ dst[0] = byte(i.Match)
+ dst = dst[1:]
+ dst[0] = byte(i.Invert)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTOwnerInfo) UnmarshalBytes(src []byte) {
+ i.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.PID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.SID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 16; idx++ {
+ i.Comm[idx] = src[0]
+ src = src[1:]
+ }
+ i.Match = uint8(src[0])
+ src = src[1:]
+ i.Invert = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTOwnerInfo) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTOwnerInfo) MarshalUnsafe(dst []byte) {
+ // Type IPTOwnerInfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTOwnerInfo) UnmarshalUnsafe(src []byte) {
+ // Type IPTOwnerInfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTOwnerInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTOwnerInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTOwnerInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTOwnerInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTReplace) SizeBytes() int {
+ return 24 +
+ (*TableName)(nil).SizeBytes() +
+ 4*NF_INET_NUMHOOKS +
+ 4*NF_INET_NUMHOOKS
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTReplace) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ dst = dst[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ dst = dst[4:]
+ }
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTReplace) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ i.NumCounters = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Counters = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTReplace) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTReplace) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTReplace doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTReplace) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IPTReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTReplace) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTReplace) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTReplace) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTReplace) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ke *KernelIPTEntry) Packed() bool {
+ // Type KernelIPTEntry is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ke *KernelIPTEntry) MarshalUnsafe(dst []byte) {
+ // Type KernelIPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
+ ke.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ke *KernelIPTEntry) UnmarshalUnsafe(src []byte) {
+ // Type KernelIPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ ke.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ke *KernelIPTEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type KernelIPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ ke.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ke *KernelIPTEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ke.CopyOutN(cc, addr, ke.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ke *KernelIPTEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type KernelIPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ ke.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ke *KernelIPTEntry) WriteTo(writer io.Writer) (int64, error) {
+ // Type KernelIPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, ke.SizeBytes())
+ ke.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ke *KernelIPTGetEntries) Packed() bool {
+ // Type KernelIPTGetEntries is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ke *KernelIPTGetEntries) MarshalUnsafe(dst []byte) {
+ // Type KernelIPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
+ ke.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ke *KernelIPTGetEntries) UnmarshalUnsafe(src []byte) {
+ // Type KernelIPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ ke.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ke *KernelIPTGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type KernelIPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ ke.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ke *KernelIPTGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ke.CopyOutN(cc, addr, ke.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ke *KernelIPTGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type KernelIPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ ke.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ke *KernelIPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
+ // Type KernelIPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, ke.SizeBytes())
+ ke.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NfNATIPV4MultiRangeCompat) SizeBytes() int {
+ return 4 +
+ (*NfNATIPV4Range)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NfNATIPV4MultiRangeCompat) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.RangeSize))
+ dst = dst[4:]
+ n.RangeIPV4.MarshalBytes(dst[:n.RangeIPV4.SizeBytes()])
+ dst = dst[n.RangeIPV4.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NfNATIPV4MultiRangeCompat) UnmarshalBytes(src []byte) {
+ n.RangeSize = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ n.RangeIPV4.UnmarshalBytes(src[:n.RangeIPV4.SizeBytes()])
+ src = src[n.RangeIPV4.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NfNATIPV4MultiRangeCompat) Packed() bool {
+ return n.RangeIPV4.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NfNATIPV4MultiRangeCompat) MarshalUnsafe(dst []byte) {
+ if n.RangeIPV4.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+ } else {
+ // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fallback to MarshalBytes.
+ n.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NfNATIPV4MultiRangeCompat) UnmarshalUnsafe(src []byte) {
+ if n.RangeIPV4.Packed() {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+ } else {
+ // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ n.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NfNATIPV4MultiRangeCompat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !n.RangeIPV4.Packed() {
+ // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ n.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NfNATIPV4MultiRangeCompat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NfNATIPV4MultiRangeCompat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !n.RangeIPV4.Packed() {
+ // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ n.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NfNATIPV4MultiRangeCompat) WriteTo(writer io.Writer) (int64, error) {
+ if !n.RangeIPV4.Packed() {
+ // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, n.SizeBytes())
+ n.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NfNATIPV4Range) SizeBytes() int {
+ return 8 +
+ 1*4 +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NfNATIPV4Range) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Flags))
+ dst = dst[4:]
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(n.MinIP[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(n.MaxIP[idx])
+ dst = dst[1:]
+ }
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MinPort))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MaxPort))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NfNATIPV4Range) UnmarshalBytes(src []byte) {
+ n.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 4; idx++ {
+ n.MinIP[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < 4; idx++ {
+ n.MaxIP[idx] = src[0]
+ src = src[1:]
+ }
+ n.MinPort = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ n.MaxPort = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NfNATIPV4Range) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NfNATIPV4Range) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NfNATIPV4Range) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NfNATIPV4Range) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NfNATIPV4Range) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NfNATIPV4Range) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NfNATIPV4Range) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (tn *TableName) SizeBytes() int {
+ return 1 * XT_TABLE_MAXNAMELEN
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (tn *TableName) MarshalBytes(dst []byte) {
+ for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ {
+ dst[0] = byte(tn[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (tn *TableName) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ {
+ tn[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (tn *TableName) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (tn *TableName) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&tn[0]), uintptr(tn.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (tn *TableName) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(tn), unsafe.Pointer(&src[0]), uintptr(tn.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn)))
+ hdr.Len = tn.SizeBytes()
+ hdr.Cap = tn.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tn
+ // must live until the use above.
+ runtime.KeepAlive(tn) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (tn *TableName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return tn.CopyOutN(cc, addr, tn.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (tn *TableName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn)))
+ hdr.Len = tn.SizeBytes()
+ hdr.Cap = tn.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tn
+ // must live until the use above.
+ runtime.KeepAlive(tn) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (tn *TableName) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn)))
+ hdr.Len = tn.SizeBytes()
+ hdr.Cap = tn.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that tn
+ // must live until the use above.
+ runtime.KeepAlive(tn) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTCounters) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTCounters) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(x.Pcnt))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(x.Bcnt))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTCounters) UnmarshalBytes(src []byte) {
+ x.Pcnt = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ x.Bcnt = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTCounters) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTCounters) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTCounters) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTCounters) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTCounters) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTCounters) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTCounters) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTEntryMatch) SizeBytes() int {
+ return 3 +
+ (*ExtensionName)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTEntryMatch) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.MatchSize))
+ dst = dst[2:]
+ x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
+ dst = dst[x.Name.SizeBytes():]
+ dst[0] = byte(x.Revision)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTEntryMatch) UnmarshalBytes(src []byte) {
+ x.MatchSize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
+ src = src[x.Name.SizeBytes():]
+ x.Revision = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTEntryMatch) Packed() bool {
+ return x.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTEntryMatch) MarshalUnsafe(dst []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTEntryMatch doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTEntryMatch) UnmarshalUnsafe(src []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTEntryMatch doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTEntryMatch) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryMatch doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTEntryMatch) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTEntryMatch) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryMatch doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTEntryMatch) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryMatch doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTEntryTarget) SizeBytes() int {
+ return 3 +
+ (*ExtensionName)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTEntryTarget) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.TargetSize))
+ dst = dst[2:]
+ x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
+ dst = dst[x.Name.SizeBytes():]
+ dst[0] = byte(x.Revision)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTEntryTarget) UnmarshalBytes(src []byte) {
+ x.TargetSize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
+ src = src[x.Name.SizeBytes():]
+ x.Revision = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTEntryTarget) Packed() bool {
+ return x.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTEntryTarget) MarshalUnsafe(dst []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTEntryTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTEntryTarget) UnmarshalUnsafe(src []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTEntryTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTEntryTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTEntryTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTEntryTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTEntryTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Name.Packed() {
+ // Type XTEntryTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTErrorTarget) SizeBytes() int {
+ return 0 +
+ (*XTEntryTarget)(nil).SizeBytes() +
+ (*ErrorName)(nil).SizeBytes() +
+ 1*2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTErrorTarget) MarshalBytes(dst []byte) {
+ x.Target.MarshalBytes(dst[:x.Target.SizeBytes()])
+ dst = dst[x.Target.SizeBytes():]
+ x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
+ dst = dst[x.Name.SizeBytes():]
+ // Padding: dst[:sizeof(byte)*2] ~= [2]byte{0}
+ dst = dst[1*(2):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTErrorTarget) UnmarshalBytes(src []byte) {
+ x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()])
+ src = src[x.Target.SizeBytes():]
+ x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
+ src = src[x.Name.SizeBytes():]
+ // Padding: ~ copy([2]byte(x._), src[:sizeof(byte)*2])
+ src = src[1*(2):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTErrorTarget) Packed() bool {
+ return x.Name.Packed() && x.Target.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTErrorTarget) MarshalUnsafe(dst []byte) {
+ if x.Name.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTErrorTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTErrorTarget) UnmarshalUnsafe(src []byte) {
+ if x.Name.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTErrorTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTErrorTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.Name.Packed() && x.Target.Packed() {
+ // Type XTErrorTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTErrorTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTErrorTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.Name.Packed() && x.Target.Packed() {
+ // Type XTErrorTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTErrorTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Name.Packed() && x.Target.Packed() {
+ // Type XTErrorTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTGetRevision) SizeBytes() int {
+ return 1 +
+ (*ExtensionName)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTGetRevision) MarshalBytes(dst []byte) {
+ x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
+ dst = dst[x.Name.SizeBytes():]
+ dst[0] = byte(x.Revision)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTGetRevision) UnmarshalBytes(src []byte) {
+ x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
+ src = src[x.Name.SizeBytes():]
+ x.Revision = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTGetRevision) Packed() bool {
+ return x.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTGetRevision) MarshalUnsafe(dst []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTGetRevision doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTGetRevision) UnmarshalUnsafe(src []byte) {
+ if x.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTGetRevision doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTGetRevision) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTGetRevision) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTRedirectTarget) SizeBytes() int {
+ return 0 +
+ (*XTEntryTarget)(nil).SizeBytes() +
+ (*NfNATIPV4MultiRangeCompat)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTRedirectTarget) MarshalBytes(dst []byte) {
+ x.Target.MarshalBytes(dst[:x.Target.SizeBytes()])
+ dst = dst[x.Target.SizeBytes():]
+ x.NfRange.MarshalBytes(dst[:x.NfRange.SizeBytes()])
+ dst = dst[x.NfRange.SizeBytes():]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTRedirectTarget) UnmarshalBytes(src []byte) {
+ x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()])
+ src = src[x.Target.SizeBytes():]
+ x.NfRange.UnmarshalBytes(src[:x.NfRange.SizeBytes()])
+ src = src[x.NfRange.SizeBytes():]
+ // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTRedirectTarget) Packed() bool {
+ return x.NfRange.Packed() && x.Target.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTRedirectTarget) MarshalUnsafe(dst []byte) {
+ if x.NfRange.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTRedirectTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTRedirectTarget) UnmarshalUnsafe(src []byte) {
+ if x.NfRange.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTRedirectTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTRedirectTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTRedirectTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTRedirectTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTRedirectTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTSNATTarget) SizeBytes() int {
+ return 0 +
+ (*XTEntryTarget)(nil).SizeBytes() +
+ (*NfNATIPV4MultiRangeCompat)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTSNATTarget) MarshalBytes(dst []byte) {
+ x.Target.MarshalBytes(dst[:x.Target.SizeBytes()])
+ dst = dst[x.Target.SizeBytes():]
+ x.NfRange.MarshalBytes(dst[:x.NfRange.SizeBytes()])
+ dst = dst[x.NfRange.SizeBytes():]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTSNATTarget) UnmarshalBytes(src []byte) {
+ x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()])
+ src = src[x.Target.SizeBytes():]
+ x.NfRange.UnmarshalBytes(src[:x.NfRange.SizeBytes()])
+ src = src[x.NfRange.SizeBytes():]
+ // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTSNATTarget) Packed() bool {
+ return x.NfRange.Packed() && x.Target.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTSNATTarget) MarshalUnsafe(dst []byte) {
+ if x.NfRange.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTSNATTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTSNATTarget) UnmarshalUnsafe(src []byte) {
+ if x.NfRange.Packed() && x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTSNATTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTSNATTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTSNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTSNATTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTSNATTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTSNATTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTSNATTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !x.NfRange.Packed() && x.Target.Packed() {
+ // Type XTSNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTStandardTarget) SizeBytes() int {
+ return 4 +
+ (*XTEntryTarget)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTStandardTarget) MarshalBytes(dst []byte) {
+ x.Target.MarshalBytes(dst[:x.Target.SizeBytes()])
+ dst = dst[x.Target.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(x.Verdict))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTStandardTarget) UnmarshalBytes(src []byte) {
+ x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()])
+ src = src[x.Target.SizeBytes():]
+ x.Verdict = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTStandardTarget) Packed() bool {
+ return x.Target.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTStandardTarget) MarshalUnsafe(dst []byte) {
+ if x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTStandardTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTStandardTarget) UnmarshalUnsafe(src []byte) {
+ if x.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+ } else {
+ // Type XTStandardTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTStandardTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !x.Target.Packed() {
+ // Type XTStandardTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTStandardTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTStandardTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !x.Target.Packed() {
+ // Type XTStandardTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTStandardTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Target.Packed() {
+ // Type XTStandardTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTTCP) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTTCP) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortStart))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortEnd))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortStart))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortEnd))
+ dst = dst[2:]
+ dst[0] = byte(x.Option)
+ dst = dst[1:]
+ dst[0] = byte(x.FlagMask)
+ dst = dst[1:]
+ dst[0] = byte(x.FlagCompare)
+ dst = dst[1:]
+ dst[0] = byte(x.InverseFlags)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTTCP) UnmarshalBytes(src []byte) {
+ x.SourcePortStart = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.SourcePortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.DestinationPortStart = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.DestinationPortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.Option = uint8(src[0])
+ src = src[1:]
+ x.FlagMask = uint8(src[0])
+ src = src[1:]
+ x.FlagCompare = uint8(src[0])
+ src = src[1:]
+ x.InverseFlags = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTTCP) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTTCP) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTTCP) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTTCP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTTCP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTTCP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTTCP) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTUDP) SizeBytes() int {
+ return 10
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTUDP) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortStart))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortEnd))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortStart))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortEnd))
+ dst = dst[2:]
+ dst[0] = byte(x.InverseFlags)
+ dst = dst[1:]
+ // Padding: dst[:sizeof(uint8)] ~= uint8(0)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTUDP) UnmarshalBytes(src []byte) {
+ x.SourcePortStart = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.SourcePortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.DestinationPortStart = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.DestinationPortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ x.InverseFlags = uint8(src[0])
+ src = src[1:]
+ // Padding: var _ uint8 ~= src[:sizeof(uint8)]
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTUDP) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTUDP) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTUDP) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTUDP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTUDP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTUDP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTUDP) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IP6TEntry) SizeBytes() int {
+ return 12 +
+ (*IP6TIP)(nil).SizeBytes() +
+ 1*4 +
+ (*XTCounters)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IP6TEntry) MarshalBytes(dst []byte) {
+ i.IPv6.MarshalBytes(dst[:i.IPv6.SizeBytes()])
+ dst = dst[i.IPv6.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()])
+ dst = dst[i.Counters.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IP6TEntry) UnmarshalBytes(src []byte) {
+ i.IPv6.UnmarshalBytes(src[:i.IPv6.SizeBytes()])
+ src = src[i.IPv6.SizeBytes():]
+ i.NFCache = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.TargetOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.NextOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Comeback = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()])
+ src = src[i.Counters.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IP6TEntry) Packed() bool {
+ return i.Counters.Packed() && i.IPv6.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IP6TEntry) MarshalUnsafe(dst []byte) {
+ if i.Counters.Packed() && i.IPv6.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IP6TEntry) UnmarshalUnsafe(src []byte) {
+ if i.Counters.Packed() && i.IPv6.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IP6TEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
+ // Type IP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IP6TEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IP6TEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
+ // Type IP6TEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IP6TEntry) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
+ // Type IP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IP6TIP) SizeBytes() int {
+ return 5 +
+ (*Inet6Addr)(nil).SizeBytes() +
+ (*Inet6Addr)(nil).SizeBytes() +
+ (*Inet6Addr)(nil).SizeBytes() +
+ (*Inet6Addr)(nil).SizeBytes() +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*3
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IP6TIP) MarshalBytes(dst []byte) {
+ i.Src.MarshalBytes(dst[:i.Src.SizeBytes()])
+ dst = dst[i.Src.SizeBytes():]
+ i.Dst.MarshalBytes(dst[:i.Dst.SizeBytes()])
+ dst = dst[i.Dst.SizeBytes():]
+ i.SrcMask.MarshalBytes(dst[:i.SrcMask.SizeBytes()])
+ dst = dst[i.SrcMask.SizeBytes():]
+ i.DstMask.MarshalBytes(dst[:i.DstMask.SizeBytes()])
+ dst = dst[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
+ dst = dst[2:]
+ dst[0] = byte(i.TOS)
+ dst = dst[1:]
+ dst[0] = byte(i.Flags)
+ dst = dst[1:]
+ dst[0] = byte(i.InverseFlags)
+ dst = dst[1:]
+ // Padding: dst[:sizeof(byte)*3] ~= [3]byte{0}
+ dst = dst[1*(3):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IP6TIP) UnmarshalBytes(src []byte) {
+ i.Src.UnmarshalBytes(src[:i.Src.SizeBytes()])
+ src = src[i.Src.SizeBytes():]
+ i.Dst.UnmarshalBytes(src[:i.Dst.SizeBytes()])
+ src = src[i.Dst.SizeBytes():]
+ i.SrcMask.UnmarshalBytes(src[:i.SrcMask.SizeBytes()])
+ src = src[i.SrcMask.SizeBytes():]
+ i.DstMask.UnmarshalBytes(src[:i.DstMask.SizeBytes()])
+ src = src[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ i.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.TOS = uint8(src[0])
+ src = src[1:]
+ i.Flags = uint8(src[0])
+ src = src[1:]
+ i.InverseFlags = uint8(src[0])
+ src = src[1:]
+ // Padding: ~ copy([3]byte(i._), src[:sizeof(byte)*3])
+ src = src[1*(3):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IP6TIP) Packed() bool {
+ return i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IP6TIP) MarshalUnsafe(dst []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TIP doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IP6TIP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IP6TIP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IP6TIP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IP6TIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IP6TReplace) SizeBytes() int {
+ return 24 +
+ (*TableName)(nil).SizeBytes() +
+ 4*NF_INET_NUMHOOKS +
+ 4*NF_INET_NUMHOOKS
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IP6TReplace) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ dst = dst[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ dst = dst[4:]
+ }
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IP6TReplace) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ i.NumCounters = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Counters = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IP6TReplace) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IP6TReplace) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IP6TReplace) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ke *KernelIP6TEntry) Packed() bool {
+ // Type KernelIP6TEntry is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ke *KernelIP6TEntry) MarshalUnsafe(dst []byte) {
+ // Type KernelIP6TEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
+ ke.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ke *KernelIP6TEntry) UnmarshalUnsafe(src []byte) {
+ // Type KernelIP6TEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ ke.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ke *KernelIP6TEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type KernelIP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ ke.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ke *KernelIP6TEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ke.CopyOutN(cc, addr, ke.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ke *KernelIP6TEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type KernelIP6TEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ ke.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ke *KernelIP6TEntry) WriteTo(writer io.Writer) (int64, error) {
+ // Type KernelIP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, ke.SizeBytes())
+ ke.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ke *KernelIP6TGetEntries) Packed() bool {
+ // Type KernelIP6TGetEntries is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ke *KernelIP6TGetEntries) MarshalUnsafe(dst []byte) {
+ // Type KernelIP6TGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
+ ke.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ke *KernelIP6TGetEntries) UnmarshalUnsafe(src []byte) {
+ // Type KernelIP6TGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ ke.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ke *KernelIP6TGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type KernelIP6TGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ ke.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ke *KernelIP6TGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ke.CopyOutN(cc, addr, ke.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ke *KernelIP6TGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type KernelIP6TGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ ke.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ke *KernelIP6TGetEntries) WriteTo(writer io.Writer) (int64, error) {
+ // Type KernelIP6TGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, ke.SizeBytes())
+ ke.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NFNATRange) SizeBytes() int {
+ return 8 +
+ (*Inet6Addr)(nil).SizeBytes() +
+ (*Inet6Addr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NFNATRange) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Flags))
+ dst = dst[4:]
+ n.MinAddr.MarshalBytes(dst[:n.MinAddr.SizeBytes()])
+ dst = dst[n.MinAddr.SizeBytes():]
+ n.MaxAddr.MarshalBytes(dst[:n.MaxAddr.SizeBytes()])
+ dst = dst[n.MaxAddr.SizeBytes():]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MinProto))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MaxProto))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NFNATRange) UnmarshalBytes(src []byte) {
+ n.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ n.MinAddr.UnmarshalBytes(src[:n.MinAddr.SizeBytes()])
+ src = src[n.MinAddr.SizeBytes():]
+ n.MaxAddr.UnmarshalBytes(src[:n.MaxAddr.SizeBytes()])
+ src = src[n.MaxAddr.SizeBytes():]
+ n.MinProto = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ n.MaxProto = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NFNATRange) Packed() bool {
+ return n.MaxAddr.Packed() && n.MinAddr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NFNATRange) MarshalUnsafe(dst []byte) {
+ if n.MaxAddr.Packed() && n.MinAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+ } else {
+ // Type NFNATRange doesn't have a packed layout in memory, fallback to MarshalBytes.
+ n.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NFNATRange) UnmarshalUnsafe(src []byte) {
+ if n.MaxAddr.Packed() && n.MinAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+ } else {
+ // Type NFNATRange doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ n.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NFNATRange) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !n.MaxAddr.Packed() && n.MinAddr.Packed() {
+ // Type NFNATRange doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ n.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NFNATRange) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NFNATRange) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !n.MaxAddr.Packed() && n.MinAddr.Packed() {
+ // Type NFNATRange doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ n.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NFNATRange) WriteTo(writer io.Writer) (int64, error) {
+ if !n.MaxAddr.Packed() && n.MinAddr.Packed() {
+ // Type NFNATRange doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, n.SizeBytes())
+ n.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NetlinkAttrHeader) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NetlinkAttrHeader) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Length))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Type))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NetlinkAttrHeader) UnmarshalBytes(src []byte) {
+ n.Length = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ n.Type = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NetlinkAttrHeader) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NetlinkAttrHeader) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NetlinkAttrHeader) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NetlinkAttrHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NetlinkAttrHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NetlinkAttrHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NetlinkAttrHeader) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NetlinkErrorMessage) SizeBytes() int {
+ return 4 +
+ (*NetlinkMessageHeader)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NetlinkErrorMessage) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Error))
+ dst = dst[4:]
+ n.Header.MarshalBytes(dst[:n.Header.SizeBytes()])
+ dst = dst[n.Header.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NetlinkErrorMessage) UnmarshalBytes(src []byte) {
+ n.Error = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ n.Header.UnmarshalBytes(src[:n.Header.SizeBytes()])
+ src = src[n.Header.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NetlinkErrorMessage) Packed() bool {
+ return n.Header.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NetlinkErrorMessage) MarshalUnsafe(dst []byte) {
+ if n.Header.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+ } else {
+ // Type NetlinkErrorMessage doesn't have a packed layout in memory, fallback to MarshalBytes.
+ n.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NetlinkErrorMessage) UnmarshalUnsafe(src []byte) {
+ if n.Header.Packed() {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+ } else {
+ // Type NetlinkErrorMessage doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ n.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NetlinkErrorMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !n.Header.Packed() {
+ // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ n.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NetlinkErrorMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NetlinkErrorMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !n.Header.Packed() {
+ // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ n.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NetlinkErrorMessage) WriteTo(writer io.Writer) (int64, error) {
+ if !n.Header.Packed() {
+ // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, n.SizeBytes())
+ n.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *NetlinkMessageHeader) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *NetlinkMessageHeader) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Length))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Type))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Flags))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Seq))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.PortID))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *NetlinkMessageHeader) UnmarshalBytes(src []byte) {
+ n.Length = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ n.Type = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ n.Flags = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ n.Seq = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ n.PortID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *NetlinkMessageHeader) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *NetlinkMessageHeader) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *NetlinkMessageHeader) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *NetlinkMessageHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *NetlinkMessageHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *NetlinkMessageHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *NetlinkMessageHeader) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrNetlink) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrNetlink) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.PortID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Groups))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrNetlink) UnmarshalBytes(src []byte) {
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ s.PortID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Groups = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrNetlink) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrNetlink) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrNetlink) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrNetlink) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrNetlink) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrNetlink) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrNetlink) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *InterfaceAddrMessage) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InterfaceAddrMessage) MarshalBytes(dst []byte) {
+ dst[0] = byte(i.Family)
+ dst = dst[1:]
+ dst[0] = byte(i.PrefixLen)
+ dst = dst[1:]
+ dst[0] = byte(i.Flags)
+ dst = dst[1:]
+ dst[0] = byte(i.Scope)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Index))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InterfaceAddrMessage) UnmarshalBytes(src []byte) {
+ i.Family = uint8(src[0])
+ src = src[1:]
+ i.PrefixLen = uint8(src[0])
+ src = src[1:]
+ i.Flags = uint8(src[0])
+ src = src[1:]
+ i.Scope = uint8(src[0])
+ src = src[1:]
+ i.Index = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InterfaceAddrMessage) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InterfaceAddrMessage) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InterfaceAddrMessage) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InterfaceAddrMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InterfaceAddrMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InterfaceAddrMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InterfaceAddrMessage) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *InterfaceInfoMessage) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InterfaceInfoMessage) MarshalBytes(dst []byte) {
+ dst[0] = byte(i.Family)
+ dst = dst[1:]
+ // Padding: dst[:sizeof(uint8)] ~= uint8(0)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Type))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Index))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Change))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InterfaceInfoMessage) UnmarshalBytes(src []byte) {
+ i.Family = uint8(src[0])
+ src = src[1:]
+ // Padding: var _ uint8 ~= src[:sizeof(uint8)]
+ src = src[1:]
+ i.Type = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Index = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Change = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InterfaceInfoMessage) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InterfaceInfoMessage) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InterfaceInfoMessage) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InterfaceInfoMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InterfaceInfoMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InterfaceInfoMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InterfaceInfoMessage) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *RouteMessage) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *RouteMessage) MarshalBytes(dst []byte) {
+ dst[0] = byte(r.Family)
+ dst = dst[1:]
+ dst[0] = byte(r.DstLen)
+ dst = dst[1:]
+ dst[0] = byte(r.SrcLen)
+ dst = dst[1:]
+ dst[0] = byte(r.TOS)
+ dst = dst[1:]
+ dst[0] = byte(r.Table)
+ dst = dst[1:]
+ dst[0] = byte(r.Protocol)
+ dst = dst[1:]
+ dst[0] = byte(r.Scope)
+ dst = dst[1:]
+ dst[0] = byte(r.Type)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *RouteMessage) UnmarshalBytes(src []byte) {
+ r.Family = uint8(src[0])
+ src = src[1:]
+ r.DstLen = uint8(src[0])
+ src = src[1:]
+ r.SrcLen = uint8(src[0])
+ src = src[1:]
+ r.TOS = uint8(src[0])
+ src = src[1:]
+ r.Table = uint8(src[0])
+ src = src[1:]
+ r.Protocol = uint8(src[0])
+ src = src[1:]
+ r.Scope = uint8(src[0])
+ src = src[1:]
+ r.Type = uint8(src[0])
+ src = src[1:]
+ r.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *RouteMessage) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *RouteMessage) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *RouteMessage) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *RouteMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *RouteMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *RouteMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *RouteMessage) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PollFD) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PollFD) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(p.FD))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(p.Events))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(p.REvents))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PollFD) UnmarshalBytes(src []byte) {
+ p.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ p.Events = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ p.REvents = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PollFD) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PollFD) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(p.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PollFD) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(p.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PollFD) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PollFD) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return p.CopyOutN(cc, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PollFD) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PollFD) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyPollFDSliceIn copies in a slice of PollFD objects from the task's memory.
+func CopyPollFDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []PollFD) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*PollFD)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyPollFDSliceOut copies a slice of PollFD objects to the task's memory.
+func CopyPollFDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []PollFD) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*PollFD)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafePollFDSlice is like PollFD.MarshalUnsafe, but for a []PollFD.
+func MarshalUnsafePollFDSlice(src []PollFD, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*PollFD)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafePollFDSlice is like PollFD.UnmarshalUnsafe, but for a []PollFD.
+func UnmarshalUnsafePollFDSlice(dst []PollFD, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*PollFD)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *RSeqCriticalSection) SizeBytes() int {
+ return 32
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *RSeqCriticalSection) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Version))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Start))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.PostCommitOffset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Abort))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *RSeqCriticalSection) UnmarshalBytes(src []byte) {
+ r.Version = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ r.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ r.Start = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.PostCommitOffset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.Abort = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *RSeqCriticalSection) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *RSeqCriticalSection) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *RSeqCriticalSection) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *RSeqCriticalSection) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *RSeqCriticalSection) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *RSeqCriticalSection) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *RSeqCriticalSection) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *Rusage) SizeBytes() int {
+ return 112 +
+ (*Timeval)(nil).SizeBytes() +
+ (*Timeval)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *Rusage) MarshalBytes(dst []byte) {
+ r.UTime.MarshalBytes(dst[:r.UTime.SizeBytes()])
+ dst = dst[r.UTime.SizeBytes():]
+ r.STime.MarshalBytes(dst[:r.STime.SizeBytes()])
+ dst = dst[r.STime.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MaxRSS))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.IXRSS))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.IDRSS))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.ISRSS))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MinFlt))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MajFlt))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NSwap))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.InBlock))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.OuBlock))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MsgSnd))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MsgRcv))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NSignals))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NVCSw))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NIvCSw))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *Rusage) UnmarshalBytes(src []byte) {
+ r.UTime.UnmarshalBytes(src[:r.UTime.SizeBytes()])
+ src = src[r.UTime.SizeBytes():]
+ r.STime.UnmarshalBytes(src[:r.STime.SizeBytes()])
+ src = src[r.STime.SizeBytes():]
+ r.MaxRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.IXRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.IDRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.ISRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.MinFlt = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.MajFlt = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.NSwap = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.InBlock = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.OuBlock = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.MsgSnd = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.MsgRcv = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.NSignals = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.NVCSw = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.NIvCSw = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *Rusage) Packed() bool {
+ return r.STime.Packed() && r.UTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *Rusage) MarshalUnsafe(dst []byte) {
+ if r.STime.Packed() && r.UTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+ } else {
+ // Type Rusage doesn't have a packed layout in memory, fallback to MarshalBytes.
+ r.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *Rusage) UnmarshalUnsafe(src []byte) {
+ if r.STime.Packed() && r.UTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+ } else {
+ // Type Rusage doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ r.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *Rusage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !r.STime.Packed() && r.UTime.Packed() {
+ // Type Rusage doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
+ r.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *Rusage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *Rusage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !r.STime.Packed() && r.UTime.Packed() {
+ // Type Rusage doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ r.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *Rusage) WriteTo(writer io.Writer) (int64, error) {
+ if !r.STime.Packed() && r.UTime.Packed() {
+ // Type Rusage doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, r.SizeBytes())
+ r.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SeccompData) SizeBytes() int {
+ return 16 +
+ 8*6
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SeccompData) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nr))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Arch))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.InstructionPointer))
+ dst = dst[8:]
+ for idx := 0; idx < 6; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Args[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SeccompData) UnmarshalBytes(src []byte) {
+ s.Nr = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Arch = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.InstructionPointer = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 6; idx++ {
+ s.Args[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SeccompData) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SeccompData) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SeccompData) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SeccompData) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SeccompData) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SeccompData) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SeccompData) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SemInfo) SizeBytes() int {
+ return 40
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SemInfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMap))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMni))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMns))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMnu))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMsl))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemOpm))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemUme))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemUsz))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemVmx))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemAem))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SemInfo) UnmarshalBytes(src []byte) {
+ s.SemMap = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemMni = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemMns = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemMnu = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemMsl = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemOpm = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemUme = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemUsz = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemVmx = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.SemAem = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SemInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SemInfo) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SemInfo) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SemInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SemInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SemInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SemInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Sembuf) SizeBytes() int {
+ return 6
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Sembuf) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemNum))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemOp))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemFlg))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Sembuf) UnmarshalBytes(src []byte) {
+ s.SemNum = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.SemOp = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.SemFlg = int16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Sembuf) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Sembuf) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Sembuf) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Sembuf) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Sembuf) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Sembuf) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Sembuf) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopySembufSliceIn copies in a slice of Sembuf objects from the task's memory.
+func CopySembufSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Sembuf) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Sembuf)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopySembufSliceOut copies a slice of Sembuf objects to the task's memory.
+func CopySembufSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Sembuf) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Sembuf)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeSembufSlice is like Sembuf.MarshalUnsafe, but for a []Sembuf.
+func MarshalUnsafeSembufSlice(src []Sembuf, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Sembuf)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeSembufSlice is like Sembuf.UnmarshalUnsafe, but for a []Sembuf.
+func UnmarshalUnsafeSembufSlice(dst []Sembuf, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Sembuf)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *ShmInfo) SizeBytes() int {
+ return 44 +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *ShmInfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *ShmInfo) UnmarshalBytes(src []byte) {
+ s.UsedIDs = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ s.ShmTot = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmRss = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmSwp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.SwapAttempts = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.SwapSuccesses = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *ShmInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *ShmInfo) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *ShmInfo) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *ShmParams) SizeBytes() int {
+ return 40
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *ShmParams) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMax))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMin))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMni))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSeg))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmAll))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *ShmParams) UnmarshalBytes(src []byte) {
+ s.ShmMax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmMin = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmMni = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmSeg = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmAll = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *ShmParams) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *ShmParams) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *ShmParams) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *ShmParams) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *ShmParams) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *ShmidDS) SizeBytes() int {
+ return 40 +
+ (*IPCPerm)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *ShmidDS) MarshalBytes(dst []byte) {
+ s.ShmPerm.MarshalBytes(dst[:s.ShmPerm.SizeBytes()])
+ dst = dst[s.ShmPerm.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSegsz))
+ dst = dst[8:]
+ s.ShmAtime.MarshalBytes(dst[:s.ShmAtime.SizeBytes()])
+ dst = dst[s.ShmAtime.SizeBytes():]
+ s.ShmDtime.MarshalBytes(dst[:s.ShmDtime.SizeBytes()])
+ dst = dst[s.ShmDtime.SizeBytes():]
+ s.ShmCtime.MarshalBytes(dst[:s.ShmCtime.SizeBytes()])
+ dst = dst[s.ShmCtime.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.ShmCpid))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.ShmLpid))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmNattach))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Unused4))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Unused5))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *ShmidDS) UnmarshalBytes(src []byte) {
+ s.ShmPerm.UnmarshalBytes(src[:s.ShmPerm.SizeBytes()])
+ src = src[s.ShmPerm.SizeBytes():]
+ s.ShmSegsz = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmAtime.UnmarshalBytes(src[:s.ShmAtime.SizeBytes()])
+ src = src[s.ShmAtime.SizeBytes():]
+ s.ShmDtime.UnmarshalBytes(src[:s.ShmDtime.SizeBytes()])
+ src = src[s.ShmDtime.SizeBytes():]
+ s.ShmCtime.UnmarshalBytes(src[:s.ShmCtime.SizeBytes()])
+ src = src[s.ShmCtime.SizeBytes():]
+ s.ShmCpid = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ShmLpid = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ShmNattach = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Unused5 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *ShmidDS) Packed() bool {
+ return s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *ShmidDS) MarshalUnsafe(dst []byte) {
+ if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type ShmidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *ShmidDS) UnmarshalUnsafe(src []byte) {
+ if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type ShmidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *ShmidDS) WriteTo(writer io.Writer) (int64, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SigAction) SizeBytes() int {
+ return 24 +
+ (*SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SigAction) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
+ dst = dst[8:]
+ s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()])
+ dst = dst[s.Mask.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SigAction) UnmarshalBytes(src []byte) {
+ s.Handler = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Restorer = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()])
+ src = src[s.Mask.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SigAction) Packed() bool {
+ return s.Mask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SigAction) MarshalUnsafe(dst []byte) {
+ if s.Mask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SigAction doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SigAction) UnmarshalUnsafe(src []byte) {
+ if s.Mask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SigAction doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SigAction) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Mask.Packed() {
+ // Type SigAction doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SigAction) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SigAction) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Mask.Packed() {
+ // Type SigAction doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SigAction) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Mask.Packed() {
+ // Type SigAction doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Sigevent) SizeBytes() int {
+ return 20 +
+ 1*44
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Sigevent) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Value))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Notify))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Tid))
+ dst = dst[4:]
+ for idx := 0; idx < 44; idx++ {
+ dst[0] = byte(s.UnRemainder[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Sigevent) UnmarshalBytes(src []byte) {
+ s.Value = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Signo = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Notify = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Tid = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 44; idx++ {
+ s.UnRemainder[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Sigevent) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Sigevent) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Sigevent) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Sigevent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalInfo) SizeBytes() int {
+ return 16 +
+ 1*(128-16)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalInfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ dst[0] = byte(s.Fields[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalInfo) UnmarshalBytes(src []byte) {
+ s.Signo = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ s.Fields[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalInfo) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (s *SignalSet) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalSet) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*s))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalSet) UnmarshalBytes(src []byte) {
+ *s = SignalSet(uint64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalSet) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalSet) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalSet) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalSet) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalStack) SizeBytes() int {
+ return 24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalStack) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalStack) UnmarshalBytes(src []byte) {
+ s.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalStack) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalStack) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalStack) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalfdSiginfo) SizeBytes() int {
+ return 82 +
+ 1*48
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalfdSiginfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.PID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FD))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.TID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Band))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Overrun))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.TrapNo))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Status))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Int))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ptr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.UTime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.STime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.AddrLSB))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint8)*48] ~= [48]uint8{0}
+ dst = dst[1*(48):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalfdSiginfo) UnmarshalBytes(src []byte) {
+ s.Signo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.PID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.TID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Band = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Overrun = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.TrapNo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Status = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Int = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Ptr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.UTime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.STime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.AddrLSB = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([48]uint8(s._), src[:sizeof(uint8)*48])
+ src = src[1*(48):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalfdSiginfo) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalfdSiginfo) MarshalUnsafe(dst []byte) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalfdSiginfo) UnmarshalUnsafe(src []byte) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ControlMessageCredentials) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
+ c.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ControlMessageCredentials) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ControlMessageHeader) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ControlMessageHeader) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(c.Length))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Level))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Type))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ControlMessageHeader) UnmarshalBytes(src []byte) {
+ c.Length = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ c.Level = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.Type = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ControlMessageHeader) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ControlMessageHeader) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ControlMessageHeader) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ControlMessageHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ControlMessageHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ControlMessageHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ControlMessageHeader) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ControlMessageIPPacketInfo) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ControlMessageIPPacketInfo) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.NIC))
+ dst = dst[4:]
+ c.LocalAddr.MarshalBytes(dst[:c.LocalAddr.SizeBytes()])
+ dst = dst[c.LocalAddr.SizeBytes():]
+ c.DestinationAddr.MarshalBytes(dst[:c.DestinationAddr.SizeBytes()])
+ dst = dst[c.DestinationAddr.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ControlMessageIPPacketInfo) UnmarshalBytes(src []byte) {
+ c.NIC = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.LocalAddr.UnmarshalBytes(src[:c.LocalAddr.SizeBytes()])
+ src = src[c.LocalAddr.SizeBytes():]
+ c.DestinationAddr.UnmarshalBytes(src[:c.DestinationAddr.SizeBytes()])
+ src = src[c.DestinationAddr.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ControlMessageIPPacketInfo) Packed() bool {
+ return c.DestinationAddr.Packed() && c.LocalAddr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ControlMessageIPPacketInfo) MarshalUnsafe(dst []byte) {
+ if c.DestinationAddr.Packed() && c.LocalAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+ } else {
+ // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ c.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ControlMessageIPPacketInfo) UnmarshalUnsafe(src []byte) {
+ if c.DestinationAddr.Packed() && c.LocalAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+ } else {
+ // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ c.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ControlMessageIPPacketInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() {
+ // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ c.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ControlMessageIPPacketInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ControlMessageIPPacketInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() {
+ // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ c.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ControlMessageIPPacketInfo) WriteTo(writer io.Writer) (int64, error) {
+ if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() {
+ // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, c.SizeBytes())
+ c.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Inet6Addr) SizeBytes() int {
+ return 1 * 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Inet6Addr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(i[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Inet6Addr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 16; idx++ {
+ i[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Inet6Addr) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Inet6Addr) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&i[0]), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *Inet6MulticastRequest) SizeBytes() int {
+ return 4 +
+ (*Inet6Addr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Inet6MulticastRequest) MarshalBytes(dst []byte) {
+ i.MulticastAddr.MarshalBytes(dst[:i.MulticastAddr.SizeBytes()])
+ dst = dst[i.MulticastAddr.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.InterfaceIndex))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Inet6MulticastRequest) UnmarshalBytes(src []byte) {
+ i.MulticastAddr.UnmarshalBytes(src[:i.MulticastAddr.SizeBytes()])
+ src = src[i.MulticastAddr.SizeBytes():]
+ i.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Inet6MulticastRequest) Packed() bool {
+ return i.MulticastAddr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Inet6MulticastRequest) MarshalUnsafe(dst []byte) {
+ if i.MulticastAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type Inet6MulticastRequest doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Inet6MulticastRequest) UnmarshalUnsafe(src []byte) {
+ if i.MulticastAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type Inet6MulticastRequest doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Inet6MulticastRequest) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.MulticastAddr.Packed() {
+ // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Inet6MulticastRequest) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Inet6MulticastRequest) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.MulticastAddr.Packed() {
+ // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Inet6MulticastRequest) WriteTo(writer io.Writer) (int64, error) {
+ if !i.MulticastAddr.Packed() {
+ // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *InetAddr) SizeBytes() int {
+ return 1 * 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetAddr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(i[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InetAddr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 4; idx++ {
+ i[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InetAddr) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InetAddr) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&i[0]), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InetAddr) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *InetMulticastRequest) SizeBytes() int {
+ return 0 +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetMulticastRequest) MarshalBytes(dst []byte) {
+ i.MulticastAddr.MarshalBytes(dst[:i.MulticastAddr.SizeBytes()])
+ dst = dst[i.MulticastAddr.SizeBytes():]
+ i.InterfaceAddr.MarshalBytes(dst[:i.InterfaceAddr.SizeBytes()])
+ dst = dst[i.InterfaceAddr.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InetMulticastRequest) UnmarshalBytes(src []byte) {
+ i.MulticastAddr.UnmarshalBytes(src[:i.MulticastAddr.SizeBytes()])
+ src = src[i.MulticastAddr.SizeBytes():]
+ i.InterfaceAddr.UnmarshalBytes(src[:i.InterfaceAddr.SizeBytes()])
+ src = src[i.InterfaceAddr.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InetMulticastRequest) Packed() bool {
+ return i.InterfaceAddr.Packed() && i.MulticastAddr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InetMulticastRequest) MarshalUnsafe(dst []byte) {
+ if i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type InetMulticastRequest doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InetMulticastRequest) UnmarshalUnsafe(src []byte) {
+ if i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type InetMulticastRequest doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InetMulticastRequest) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() {
+ // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InetMulticastRequest) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InetMulticastRequest) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() {
+ // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InetMulticastRequest) WriteTo(writer io.Writer) (int64, error) {
+ if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() {
+ // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *InetMulticastRequestWithNIC) SizeBytes() int {
+ return 4 +
+ (*InetMulticastRequest)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetMulticastRequestWithNIC) MarshalBytes(dst []byte) {
+ i.InetMulticastRequest.MarshalBytes(dst[:i.InetMulticastRequest.SizeBytes()])
+ dst = dst[i.InetMulticastRequest.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.InterfaceIndex))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InetMulticastRequestWithNIC) UnmarshalBytes(src []byte) {
+ i.InetMulticastRequest.UnmarshalBytes(src[:i.InetMulticastRequest.SizeBytes()])
+ src = src[i.InetMulticastRequest.SizeBytes():]
+ i.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InetMulticastRequestWithNIC) Packed() bool {
+ return i.InetMulticastRequest.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InetMulticastRequestWithNIC) MarshalUnsafe(dst []byte) {
+ if i.InetMulticastRequest.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InetMulticastRequestWithNIC) UnmarshalUnsafe(src []byte) {
+ if i.InetMulticastRequest.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InetMulticastRequestWithNIC) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.InetMulticastRequest.Packed() {
+ // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InetMulticastRequestWithNIC) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InetMulticastRequestWithNIC) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.InetMulticastRequest.Packed() {
+ // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InetMulticastRequestWithNIC) WriteTo(writer io.Writer) (int64, error) {
+ if !i.InetMulticastRequest.Packed() {
+ // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (l *Linger) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (l *Linger) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (l *Linger) UnmarshalBytes(src []byte) {
+ l.OnOff = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ l.Linger = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (l *Linger) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (l *Linger) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(l), uintptr(l.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (l *Linger) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(l), unsafe.Pointer(&src[0]), uintptr(l.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (l *Linger) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (l *Linger) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return l.CopyOutN(cc, addr, l.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (l *Linger) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrInet) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ 1*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrInet) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ dst = dst[2:]
+ s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
+ dst = dst[s.Addr.SizeBytes():]
+ // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0}
+ dst = dst[1*(8):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrInet) UnmarshalBytes(src []byte) {
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Port = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
+ src = src[s.Addr.SizeBytes():]
+ // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8])
+ src = src[1*(8):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrInet) Packed() bool {
+ return s.Addr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrInet) MarshalUnsafe(dst []byte) {
+ if s.Addr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
+ if s.Addr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrInet6) SizeBytes() int {
+ return 12 +
+ 1*16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrInet6) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo))
+ dst = dst[4:]
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(s.Addr[idx])
+ dst = dst[1:]
+ }
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrInet6) UnmarshalBytes(src []byte) {
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Port = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Flowinfo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 16; idx++ {
+ s.Addr[idx] = src[0]
+ src = src[1:]
+ }
+ s.Scope_id = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrInet6) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrInet6) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrLink) SizeBytes() int {
+ return 12 +
+ 1*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrLink) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
+ dst = dst[2:]
+ dst[0] = byte(s.PacketType)
+ dst = dst[1:]
+ dst[0] = byte(s.HardwareAddrLen)
+ dst = dst[1:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s.HardwareAddr[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrLink) UnmarshalBytes(src []byte) {
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ARPHardwareType = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.PacketType = src[0]
+ src = src[1:]
+ s.HardwareAddrLen = src[0]
+ src = src[1:]
+ for idx := 0; idx < 8; idx++ {
+ s.HardwareAddr[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrLink) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrLink) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrUnix) SizeBytes() int {
+ return 2 +
+ 1*UnixPathMax
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrUnix) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ dst[0] = byte(s.Path[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ s.Path[idx] = int8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrUnix) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrUnix) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *TCPInfo) SizeBytes() int {
+ return 224
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TCPInfo) MarshalBytes(dst []byte) {
+ dst[0] = byte(t.State)
+ dst = dst[1:]
+ dst[0] = byte(t.CaState)
+ dst = dst[1:]
+ dst[0] = byte(t.Retransmits)
+ dst = dst[1:]
+ dst[0] = byte(t.Probes)
+ dst = dst[1:]
+ dst[0] = byte(t.Backoff)
+ dst = dst[1:]
+ dst[0] = byte(t.Options)
+ dst = dst[1:]
+ dst[0] = byte(t.WindowScale)
+ dst = dst[1:]
+ dst[0] = byte(t.DeliveryRateAppLimited)
+ dst = dst[1:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Delivered))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DeliveredCE))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesSent))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesRetrans))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DSACKDups))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ReordSeen))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TCPInfo) UnmarshalBytes(src []byte) {
+ t.State = uint8(src[0])
+ src = src[1:]
+ t.CaState = uint8(src[0])
+ src = src[1:]
+ t.Retransmits = uint8(src[0])
+ src = src[1:]
+ t.Probes = uint8(src[0])
+ src = src[1:]
+ t.Backoff = uint8(src[0])
+ src = src[1:]
+ t.Options = uint8(src[0])
+ src = src[1:]
+ t.WindowScale = uint8(src[0])
+ src = src[1:]
+ t.DeliveryRateAppLimited = uint8(src[0])
+ src = src[1:]
+ t.RTO = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ATO = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndMss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvMss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Unacked = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Sacked = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Lost = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Retrans = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Fackets = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataSent = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckSent = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataRecv = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckRecv = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PMTU = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSsthresh = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTTVar = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndSsthresh = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndCwnd = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Advmss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Reordering = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvRTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSpace = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.TotalRetrans = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PacingRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.MaxPacingRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesAcked = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesReceived = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SegsOut = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SegsIn = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.NotSentBytes = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.MinRTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsIn = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsOut = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DeliveryRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BusyTime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.RwndLimited = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SndBufLimited = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.Delivered = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DeliveredCE = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.BytesSent = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesRetrans = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.DSACKDups = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ReordSeen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TCPInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TCPInfo) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(t), uintptr(t.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(t), unsafe.Pointer(&src[0]), uintptr(t.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (c *ClockT) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ClockT) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*c))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ClockT) UnmarshalBytes(src []byte) {
+ *c = ClockT(int64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ClockT) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ClockT) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ClockT) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ClockT) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ClockT) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ClockT) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ClockT) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *ItimerVal) SizeBytes() int {
+ return 0 +
+ (*Timeval)(nil).SizeBytes() +
+ (*Timeval)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *ItimerVal) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *ItimerVal) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *ItimerVal) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *ItimerVal) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *Itimerspec) SizeBytes() int {
+ return 0 +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Itimerspec) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Itimerspec) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Itimerspec) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Itimerspec) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type Itimerspec doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Itimerspec) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type Itimerspec doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (sxts *StatxTimestamp) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
+ sxts.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ sxts.Nsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (sxts *StatxTimestamp) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(sxts), uintptr(sxts.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(sxts), unsafe.Pointer(&src[0]), uintptr(sxts.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
+ // must live until the use above.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
+ // must live until the use above.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
+ // must live until the use above.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (t *TimeT) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TimeT) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*t))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TimeT) UnmarshalBytes(src []byte) {
+ *t = TimeT(int64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TimeT) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TimeT) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(t), uintptr(t.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TimeT) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(t), unsafe.Pointer(&src[0]), uintptr(t.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TimeT) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TimeT) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (t *TimerID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TimerID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*t))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TimerID) UnmarshalBytes(src []byte) {
+ *t = TimerID(int32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TimerID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TimerID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(t), uintptr(t.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TimerID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(t), unsafe.Pointer(&src[0]), uintptr(t.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TimerID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TimerID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (ts *Timespec) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (ts *Timespec) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(ts.Sec))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(ts.Nsec))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (ts *Timespec) UnmarshalBytes(src []byte) {
+ ts.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ ts.Nsec = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (ts *Timespec) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (ts *Timespec) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(ts), uintptr(ts.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (ts *Timespec) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(ts), unsafe.Pointer(&src[0]), uintptr(ts.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that ts
+ // must live until the use above.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (ts *Timespec) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return ts.CopyOutN(cc, addr, ts.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (ts *Timespec) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that ts
+ // must live until the use above.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (ts *Timespec) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that ts
+ // must live until the use above.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyTimespecSliceIn copies in a slice of Timespec objects from the task's memory.
+func CopyTimespecSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Timespec) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timespec)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyTimespecSliceOut copies a slice of Timespec objects to the task's memory.
+func CopyTimespecSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Timespec) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timespec)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeTimespecSlice is like Timespec.MarshalUnsafe, but for a []Timespec.
+func MarshalUnsafeTimespecSlice(src []Timespec, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timespec)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeTimespecSlice is like Timespec.UnmarshalUnsafe, but for a []Timespec.
+func UnmarshalUnsafeTimespecSlice(dst []Timespec, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timespec)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (tv *Timeval) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (tv *Timeval) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(tv.Sec))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(tv.Usec))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (tv *Timeval) UnmarshalBytes(src []byte) {
+ tv.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ tv.Usec = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (tv *Timeval) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (tv *Timeval) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(tv), uintptr(tv.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (tv *Timeval) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(tv), unsafe.Pointer(&src[0]), uintptr(tv.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tv
+ // must live until the use above.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (tv *Timeval) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return tv.CopyOutN(cc, addr, tv.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (tv *Timeval) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tv
+ // must live until the use above.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (tv *Timeval) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that tv
+ // must live until the use above.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyTimevalSliceIn copies in a slice of Timeval objects from the task's memory.
+func CopyTimevalSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Timeval) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timeval)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyTimevalSliceOut copies a slice of Timeval objects to the task's memory.
+func CopyTimevalSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Timeval) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timeval)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeTimevalSlice is like Timeval.MarshalUnsafe, but for a []Timeval.
+func MarshalUnsafeTimevalSlice(src []Timeval, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timeval)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeTimevalSlice is like Timeval.UnmarshalUnsafe, but for a []Timeval.
+func UnmarshalUnsafeTimevalSlice(dst []Timeval, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Timeval)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *Tms) SizeBytes() int {
+ return 0 +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *Tms) MarshalBytes(dst []byte) {
+ t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()])
+ dst = dst[t.UTime.SizeBytes():]
+ t.STime.MarshalBytes(dst[:t.STime.SizeBytes()])
+ dst = dst[t.STime.SizeBytes():]
+ t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()])
+ dst = dst[t.CUTime.SizeBytes():]
+ t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()])
+ dst = dst[t.CSTime.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *Tms) UnmarshalBytes(src []byte) {
+ t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()])
+ src = src[t.UTime.SizeBytes():]
+ t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()])
+ src = src[t.STime.SizeBytes():]
+ t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()])
+ src = src[t.CUTime.SizeBytes():]
+ t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()])
+ src = src[t.CSTime.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *Tms) Packed() bool {
+ return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *Tms) MarshalUnsafe(dst []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(t), uintptr(t.SizeBytes()))
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
+ t.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *Tms) UnmarshalUnsafe(src []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(t), unsafe.Pointer(&src[0]), uintptr(t.SizeBytes()))
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ t.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *Tms) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ t.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *Tms) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *Tms) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ t.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, t.SizeBytes())
+ t.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *Utime) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *Utime) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *Utime) UnmarshalBytes(src []byte) {
+ u.Actime = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Modtime = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *Utime) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *Utime) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *Utime) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *Utime) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *Utime) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *Utime) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *Termios) SizeBytes() int {
+ return 17 +
+ 1*NumControlCharacters
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *Termios) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.InputFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.OutputFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ControlFlags))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LocalFlags))
+ dst = dst[4:]
+ dst[0] = byte(t.LineDiscipline)
+ dst = dst[1:]
+ for idx := 0; idx < NumControlCharacters; idx++ {
+ dst[0] = byte(t.ControlCharacters[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *Termios) UnmarshalBytes(src []byte) {
+ t.InputFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.OutputFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ControlFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LocalFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LineDiscipline = uint8(src[0])
+ src = src[1:]
+ for idx := 0; idx < NumControlCharacters; idx++ {
+ t.ControlCharacters[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *Termios) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *Termios) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(t), uintptr(t.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *Termios) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(t), unsafe.Pointer(&src[0]), uintptr(t.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *Termios) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *Termios) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *Termios) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *Termios) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (w *WindowSize) SizeBytes() int {
+ return 4 +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (w *WindowSize) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Rows))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Cols))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (w *WindowSize) UnmarshalBytes(src []byte) {
+ w.Rows = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ w.Cols = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([4]byte(w._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (w *WindowSize) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (w *WindowSize) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(w), uintptr(w.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (w *WindowSize) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(w), unsafe.Pointer(&src[0]), uintptr(w.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (w *WindowSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (w *WindowSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return w.CopyOutN(cc, addr, w.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (w *WindowSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (w *WindowSize) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (w *Winsize) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (w *Winsize) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Row))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Col))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Xpixel))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Ypixel))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (w *Winsize) UnmarshalBytes(src []byte) {
+ w.Row = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ w.Col = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ w.Xpixel = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ w.Ypixel = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (w *Winsize) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (w *Winsize) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(w), uintptr(w.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (w *Winsize) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(w), unsafe.Pointer(&src[0]), uintptr(w.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (w *Winsize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (w *Winsize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return w.CopyOutN(cc, addr, w.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (w *Winsize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (w *Winsize) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(w)))
+ hdr.Len = w.SizeBytes()
+ hdr.Cap = w.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that w
+ // must live until the use above.
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UtsName) SizeBytes() int {
+ return 0 +
+ 1*(UTSLen+1) +
+ 1*(UTSLen+1) +
+ 1*(UTSLen+1) +
+ 1*(UTSLen+1) +
+ 1*(UTSLen+1) +
+ 1*(UTSLen+1)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UtsName) MarshalBytes(dst []byte) {
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Sysname[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Nodename[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Release[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Version[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Machine[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ dst[0] = byte(u.Domainname[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UtsName) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Sysname[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Nodename[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Release[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Version[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Machine[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < (UTSLen+1); idx++ {
+ u.Domainname[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UtsName) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UtsName) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UtsName) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UtsName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UtsName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UtsName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UtsName) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..1d479c196
--- /dev/null
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,741 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64
+// +build amd64
+// +build amd64
+// +build amd64
+// +build amd64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*EpollEvent)(nil)
+var _ marshal.Marshallable = (*IPCPerm)(nil)
+var _ marshal.Marshallable = (*PtraceRegs)(nil)
+var _ marshal.Marshallable = (*SemidDS)(nil)
+var _ marshal.Marshallable = (*Stat)(nil)
+var _ marshal.Marshallable = (*TimeT)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *EpollEvent) SizeBytes() int {
+ return 4 +
+ 4*2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *EpollEvent) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
+ dst = dst[4:]
+ for idx := 0; idx < 2; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
+ dst = dst[4:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *EpollEvent) UnmarshalBytes(src []byte) {
+ e.Events = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 2; idx++ {
+ e.Data[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *EpollEvent) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *EpollEvent) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyEpollEventSliceIn copies in a slice of EpollEvent objects from the task's memory.
+func CopyEpollEventSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []EpollEvent) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyEpollEventSliceOut copies a slice of EpollEvent objects to the task's memory.
+func CopyEpollEventSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []EpollEvent) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeEpollEventSlice is like EpollEvent.MarshalUnsafe, but for a []EpollEvent.
+func MarshalUnsafeEpollEventSlice(src []EpollEvent, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeEpollEventSlice is like EpollEvent.UnmarshalUnsafe, but for a []EpollEvent.
+func UnmarshalUnsafeEpollEventSlice(dst []EpollEvent, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Stat) SizeBytes() int {
+ return 72 +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes() +
+ 8*3
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Stat) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Nlink))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blksize))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
+ dst = dst[s.ATime.SizeBytes():]
+ s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
+ dst = dst[s.MTime.SizeBytes():]
+ s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
+ dst = dst[s.CTime.SizeBytes():]
+ // Padding: dst[:sizeof(int64)*3] ~= [3]int64{0}
+ dst = dst[8*(3):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Stat) UnmarshalBytes(src []byte) {
+ s.Dev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Nlink = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+ s.Rdev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Size = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blksize = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blocks = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
+ src = src[s.ATime.SizeBytes():]
+ s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
+ src = src[s.MTime.SizeBytes():]
+ s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
+ src = src[s.CTime.SizeBytes():]
+ // Padding: ~ copy([3]int64(s._), src[:sizeof(int64)*3])
+ src = src[8*(3):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Stat) Packed() bool {
+ return s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Stat) MarshalUnsafe(dst []byte) {
+ if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Stat) UnmarshalUnsafe(src []byte) {
+ if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Stat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Stat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Stat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PtraceRegs) SizeBytes() int {
+ return 216
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PtraceRegs) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R15))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R14))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R13))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R12))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rbp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rbx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R11))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R10))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R9))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R8))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rax))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rcx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rdx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rsi))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rdi))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Orig_rax))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rip))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Cs))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Eflags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rsp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Ss))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Fs_base))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Gs_base))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Ds))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Es))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Fs))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Gs))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PtraceRegs) UnmarshalBytes(src []byte) {
+ p.R15 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R14 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R13 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R12 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rbp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rbx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R11 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R10 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R9 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R8 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rcx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rdx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rsi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rdi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Orig_rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rip = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Cs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Eflags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rsp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Ss = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Fs_base = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Gs_base = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Ds = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Es = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Fs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Gs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PtraceRegs) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PtraceRegs) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(p.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(p.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return p.CopyOutN(cc, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PtraceRegs) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SemidDS) SizeBytes() int {
+ return 40 +
+ (*IPCPerm)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SemidDS) MarshalBytes(dst []byte) {
+ s.SemPerm.MarshalBytes(dst[:s.SemPerm.SizeBytes()])
+ dst = dst[s.SemPerm.SizeBytes():]
+ s.SemOTime.MarshalBytes(dst[:s.SemOTime.SizeBytes()])
+ dst = dst[s.SemOTime.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused1))
+ dst = dst[8:]
+ s.SemCTime.MarshalBytes(dst[:s.SemCTime.SizeBytes()])
+ dst = dst[s.SemCTime.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused2))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SemidDS) UnmarshalBytes(src []byte) {
+ s.SemPerm.UnmarshalBytes(src[:s.SemPerm.SizeBytes()])
+ src = src[s.SemPerm.SizeBytes():]
+ s.SemOTime.UnmarshalBytes(src[:s.SemOTime.SizeBytes()])
+ src = src[s.SemOTime.SizeBytes():]
+ s.unused1 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.SemCTime.UnmarshalBytes(src[:s.SemCTime.SizeBytes()])
+ src = src[s.SemCTime.SizeBytes():]
+ s.unused2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.SemNSems = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.unused3 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SemidDS) Packed() bool {
+ return s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SemidDS) MarshalUnsafe(dst []byte) {
+ if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SemidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SemidDS) UnmarshalUnsafe(src []byte) {
+ if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SemidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SemidDS) WriteTo(writer io.Writer) (int64, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/abi/linux/linux_amd64_state_autogen.go b/pkg/abi/linux/linux_amd64_state_autogen.go
new file mode 100644
index 000000000..d233a69df
--- /dev/null
+++ b/pkg/abi/linux/linux_amd64_state_autogen.go
@@ -0,0 +1,120 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+// +build amd64
+// +build amd64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *PtraceRegs) StateTypeName() string {
+ return "pkg/abi/linux.PtraceRegs"
+}
+
+func (p *PtraceRegs) StateFields() []string {
+ return []string{
+ "R15",
+ "R14",
+ "R13",
+ "R12",
+ "Rbp",
+ "Rbx",
+ "R11",
+ "R10",
+ "R9",
+ "R8",
+ "Rax",
+ "Rcx",
+ "Rdx",
+ "Rsi",
+ "Rdi",
+ "Orig_rax",
+ "Rip",
+ "Cs",
+ "Eflags",
+ "Rsp",
+ "Ss",
+ "Fs_base",
+ "Gs_base",
+ "Ds",
+ "Es",
+ "Fs",
+ "Gs",
+ }
+}
+
+func (p *PtraceRegs) beforeSave() {}
+
+// +checklocksignore
+func (p *PtraceRegs) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.R15)
+ stateSinkObject.Save(1, &p.R14)
+ stateSinkObject.Save(2, &p.R13)
+ stateSinkObject.Save(3, &p.R12)
+ stateSinkObject.Save(4, &p.Rbp)
+ stateSinkObject.Save(5, &p.Rbx)
+ stateSinkObject.Save(6, &p.R11)
+ stateSinkObject.Save(7, &p.R10)
+ stateSinkObject.Save(8, &p.R9)
+ stateSinkObject.Save(9, &p.R8)
+ stateSinkObject.Save(10, &p.Rax)
+ stateSinkObject.Save(11, &p.Rcx)
+ stateSinkObject.Save(12, &p.Rdx)
+ stateSinkObject.Save(13, &p.Rsi)
+ stateSinkObject.Save(14, &p.Rdi)
+ stateSinkObject.Save(15, &p.Orig_rax)
+ stateSinkObject.Save(16, &p.Rip)
+ stateSinkObject.Save(17, &p.Cs)
+ stateSinkObject.Save(18, &p.Eflags)
+ stateSinkObject.Save(19, &p.Rsp)
+ stateSinkObject.Save(20, &p.Ss)
+ stateSinkObject.Save(21, &p.Fs_base)
+ stateSinkObject.Save(22, &p.Gs_base)
+ stateSinkObject.Save(23, &p.Ds)
+ stateSinkObject.Save(24, &p.Es)
+ stateSinkObject.Save(25, &p.Fs)
+ stateSinkObject.Save(26, &p.Gs)
+}
+
+func (p *PtraceRegs) afterLoad() {}
+
+// +checklocksignore
+func (p *PtraceRegs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.R15)
+ stateSourceObject.Load(1, &p.R14)
+ stateSourceObject.Load(2, &p.R13)
+ stateSourceObject.Load(3, &p.R12)
+ stateSourceObject.Load(4, &p.Rbp)
+ stateSourceObject.Load(5, &p.Rbx)
+ stateSourceObject.Load(6, &p.R11)
+ stateSourceObject.Load(7, &p.R10)
+ stateSourceObject.Load(8, &p.R9)
+ stateSourceObject.Load(9, &p.R8)
+ stateSourceObject.Load(10, &p.Rax)
+ stateSourceObject.Load(11, &p.Rcx)
+ stateSourceObject.Load(12, &p.Rdx)
+ stateSourceObject.Load(13, &p.Rsi)
+ stateSourceObject.Load(14, &p.Rdi)
+ stateSourceObject.Load(15, &p.Orig_rax)
+ stateSourceObject.Load(16, &p.Rip)
+ stateSourceObject.Load(17, &p.Cs)
+ stateSourceObject.Load(18, &p.Eflags)
+ stateSourceObject.Load(19, &p.Rsp)
+ stateSourceObject.Load(20, &p.Ss)
+ stateSourceObject.Load(21, &p.Fs_base)
+ stateSourceObject.Load(22, &p.Gs_base)
+ stateSourceObject.Load(23, &p.Ds)
+ stateSourceObject.Load(24, &p.Es)
+ stateSourceObject.Load(25, &p.Fs)
+ stateSourceObject.Load(26, &p.Gs)
+}
+
+func init() {
+ state.Register((*PtraceRegs)(nil))
+}
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..db4980c14
--- /dev/null
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,653 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+// +build arm64
+// +build arm64
+// +build arm64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*EpollEvent)(nil)
+var _ marshal.Marshallable = (*IPCPerm)(nil)
+var _ marshal.Marshallable = (*PtraceRegs)(nil)
+var _ marshal.Marshallable = (*SemidDS)(nil)
+var _ marshal.Marshallable = (*Stat)(nil)
+var _ marshal.Marshallable = (*TimeT)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *EpollEvent) SizeBytes() int {
+ return 8 +
+ 4*2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *EpollEvent) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+ for idx := 0; idx < 2; idx++ {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
+ dst = dst[4:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *EpollEvent) UnmarshalBytes(src []byte) {
+ e.Events = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+ for idx := 0; idx < 2; idx++ {
+ e.Data[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *EpollEvent) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *EpollEvent) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyEpollEventSliceIn copies in a slice of EpollEvent objects from the task's memory.
+func CopyEpollEventSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []EpollEvent) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyEpollEventSliceOut copies a slice of EpollEvent objects to the task's memory.
+func CopyEpollEventSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []EpollEvent) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeEpollEventSlice is like EpollEvent.MarshalUnsafe, but for a []EpollEvent.
+func MarshalUnsafeEpollEventSlice(src []EpollEvent, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeEpollEventSlice is like EpollEvent.UnmarshalUnsafe, but for a []EpollEvent.
+func UnmarshalUnsafeEpollEventSlice(dst []EpollEvent, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*EpollEvent)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Stat) SizeBytes() int {
+ return 72 +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes() +
+ 4*2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Stat) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ dst = dst[8:]
+ // Padding: dst[:sizeof(uint64)] ~= uint64(0)
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
+ dst = dst[s.ATime.SizeBytes():]
+ s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
+ dst = dst[s.MTime.SizeBytes():]
+ s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
+ dst = dst[s.CTime.SizeBytes():]
+ // Padding: dst[:sizeof(int32)*2] ~= [2]int32{0}
+ dst = dst[4*(2):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Stat) UnmarshalBytes(src []byte) {
+ s.Dev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Rdev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ // Padding: var _ uint64 ~= src[:sizeof(uint64)]
+ src = src[8:]
+ s.Size = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blksize = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+ s.Blocks = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
+ src = src[s.ATime.SizeBytes():]
+ s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
+ src = src[s.MTime.SizeBytes():]
+ s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
+ src = src[s.CTime.SizeBytes():]
+ // Padding: ~ copy([2]int32(s._), src[:sizeof(int32)*2])
+ src = src[4*(2):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *Stat) Packed() bool {
+ return s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Stat) MarshalUnsafe(dst []byte) {
+ if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Stat) UnmarshalUnsafe(src []byte) {
+ if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *Stat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *Stat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *Stat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
+ if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PtraceRegs) SizeBytes() int {
+ return 24 +
+ 8*31
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PtraceRegs) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 31; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Regs[idx]))
+ dst = dst[8:]
+ }
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Sp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pc))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pstate))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PtraceRegs) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 31; idx++ {
+ p.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ p.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PtraceRegs) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PtraceRegs) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(p.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(p.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return p.CopyOutN(cc, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PtraceRegs) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SemidDS) SizeBytes() int {
+ return 24 +
+ (*IPCPerm)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SemidDS) MarshalBytes(dst []byte) {
+ s.SemPerm.MarshalBytes(dst[:s.SemPerm.SizeBytes()])
+ dst = dst[s.SemPerm.SizeBytes():]
+ s.SemOTime.MarshalBytes(dst[:s.SemOTime.SizeBytes()])
+ dst = dst[s.SemOTime.SizeBytes():]
+ s.SemCTime.MarshalBytes(dst[:s.SemCTime.SizeBytes()])
+ dst = dst[s.SemCTime.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SemidDS) UnmarshalBytes(src []byte) {
+ s.SemPerm.UnmarshalBytes(src[:s.SemPerm.SizeBytes()])
+ src = src[s.SemPerm.SizeBytes():]
+ s.SemOTime.UnmarshalBytes(src[:s.SemOTime.SizeBytes()])
+ src = src[s.SemOTime.SizeBytes():]
+ s.SemCTime.UnmarshalBytes(src[:s.SemCTime.SizeBytes()])
+ src = src[s.SemCTime.SizeBytes():]
+ s.SemNSems = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.unused3 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SemidDS) Packed() bool {
+ return s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SemidDS) MarshalUnsafe(dst []byte) {
+ if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SemidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SemidDS) UnmarshalUnsafe(src []byte) {
+ if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SemidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SemidDS) WriteTo(writer io.Writer) (int64, error) {
+ if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
+ // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/abi/linux/linux_arm64_state_autogen.go b/pkg/abi/linux/linux_arm64_state_autogen.go
new file mode 100644
index 000000000..8b08b2925
--- /dev/null
+++ b/pkg/abi/linux/linux_arm64_state_autogen.go
@@ -0,0 +1,50 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+// +build arm64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *PtraceRegs) StateTypeName() string {
+ return "pkg/abi/linux.PtraceRegs"
+}
+
+func (p *PtraceRegs) StateFields() []string {
+ return []string{
+ "Regs",
+ "Sp",
+ "Pc",
+ "Pstate",
+ }
+}
+
+func (p *PtraceRegs) beforeSave() {}
+
+// +checklocksignore
+func (p *PtraceRegs) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Regs)
+ stateSinkObject.Save(1, &p.Sp)
+ stateSinkObject.Save(2, &p.Pc)
+ stateSinkObject.Save(3, &p.Pstate)
+}
+
+func (p *PtraceRegs) afterLoad() {}
+
+// +checklocksignore
+func (p *PtraceRegs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Regs)
+ stateSourceObject.Load(1, &p.Sp)
+ stateSourceObject.Load(2, &p.Pc)
+ stateSourceObject.Load(3, &p.Pstate)
+}
+
+func init() {
+ state.Register((*PtraceRegs)(nil))
+}
diff --git a/pkg/abi/linux/linux_state_autogen.go b/pkg/abi/linux/linux_state_autogen.go
new file mode 100644
index 000000000..e2043be15
--- /dev/null
+++ b/pkg/abi/linux/linux_state_autogen.go
@@ -0,0 +1,290 @@
+// automatically generated by stateify.
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *IOEvent) StateTypeName() string {
+ return "pkg/abi/linux.IOEvent"
+}
+
+func (i *IOEvent) StateFields() []string {
+ return []string{
+ "Data",
+ "Obj",
+ "Result",
+ "Result2",
+ }
+}
+
+func (i *IOEvent) beforeSave() {}
+
+// +checklocksignore
+func (i *IOEvent) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Data)
+ stateSinkObject.Save(1, &i.Obj)
+ stateSinkObject.Save(2, &i.Result)
+ stateSinkObject.Save(3, &i.Result2)
+}
+
+func (i *IOEvent) afterLoad() {}
+
+// +checklocksignore
+func (i *IOEvent) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Data)
+ stateSourceObject.Load(1, &i.Obj)
+ stateSourceObject.Load(2, &i.Result)
+ stateSourceObject.Load(3, &i.Result2)
+}
+
+func (b *BPFInstruction) StateTypeName() string {
+ return "pkg/abi/linux.BPFInstruction"
+}
+
+func (b *BPFInstruction) StateFields() []string {
+ return []string{
+ "OpCode",
+ "JumpIfTrue",
+ "JumpIfFalse",
+ "K",
+ }
+}
+
+func (b *BPFInstruction) beforeSave() {}
+
+// +checklocksignore
+func (b *BPFInstruction) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ stateSinkObject.Save(0, &b.OpCode)
+ stateSinkObject.Save(1, &b.JumpIfTrue)
+ stateSinkObject.Save(2, &b.JumpIfFalse)
+ stateSinkObject.Save(3, &b.K)
+}
+
+func (b *BPFInstruction) afterLoad() {}
+
+// +checklocksignore
+func (b *BPFInstruction) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &b.OpCode)
+ stateSourceObject.Load(1, &b.JumpIfTrue)
+ stateSourceObject.Load(2, &b.JumpIfFalse)
+ stateSourceObject.Load(3, &b.K)
+}
+
+func (s *SigAction) StateTypeName() string {
+ return "pkg/abi/linux.SigAction"
+}
+
+func (s *SigAction) StateFields() []string {
+ return []string{
+ "Handler",
+ "Flags",
+ "Restorer",
+ "Mask",
+ }
+}
+
+func (s *SigAction) beforeSave() {}
+
+// +checklocksignore
+func (s *SigAction) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Handler)
+ stateSinkObject.Save(1, &s.Flags)
+ stateSinkObject.Save(2, &s.Restorer)
+ stateSinkObject.Save(3, &s.Mask)
+}
+
+func (s *SigAction) afterLoad() {}
+
+// +checklocksignore
+func (s *SigAction) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Handler)
+ stateSourceObject.Load(1, &s.Flags)
+ stateSourceObject.Load(2, &s.Restorer)
+ stateSourceObject.Load(3, &s.Mask)
+}
+
+func (s *SignalStack) StateTypeName() string {
+ return "pkg/abi/linux.SignalStack"
+}
+
+func (s *SignalStack) StateFields() []string {
+ return []string{
+ "Addr",
+ "Flags",
+ "Size",
+ }
+}
+
+func (s *SignalStack) beforeSave() {}
+
+// +checklocksignore
+func (s *SignalStack) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Addr)
+ stateSinkObject.Save(1, &s.Flags)
+ stateSinkObject.Save(2, &s.Size)
+}
+
+func (s *SignalStack) afterLoad() {}
+
+// +checklocksignore
+func (s *SignalStack) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Addr)
+ stateSourceObject.Load(1, &s.Flags)
+ stateSourceObject.Load(2, &s.Size)
+}
+
+func (s *SignalInfo) StateTypeName() string {
+ return "pkg/abi/linux.SignalInfo"
+}
+
+func (s *SignalInfo) StateFields() []string {
+ return []string{
+ "Signo",
+ "Errno",
+ "Code",
+ "Fields",
+ }
+}
+
+func (s *SignalInfo) beforeSave() {}
+
+// +checklocksignore
+func (s *SignalInfo) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Signo)
+ stateSinkObject.Save(1, &s.Errno)
+ stateSinkObject.Save(2, &s.Code)
+ stateSinkObject.Save(3, &s.Fields)
+}
+
+func (s *SignalInfo) afterLoad() {}
+
+// +checklocksignore
+func (s *SignalInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Signo)
+ stateSourceObject.Load(1, &s.Errno)
+ stateSourceObject.Load(2, &s.Code)
+ stateSourceObject.Load(3, &s.Fields)
+}
+
+func (c *ControlMessageIPPacketInfo) StateTypeName() string {
+ return "pkg/abi/linux.ControlMessageIPPacketInfo"
+}
+
+func (c *ControlMessageIPPacketInfo) StateFields() []string {
+ return []string{
+ "NIC",
+ "LocalAddr",
+ "DestinationAddr",
+ }
+}
+
+func (c *ControlMessageIPPacketInfo) beforeSave() {}
+
+// +checklocksignore
+func (c *ControlMessageIPPacketInfo) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.NIC)
+ stateSinkObject.Save(1, &c.LocalAddr)
+ stateSinkObject.Save(2, &c.DestinationAddr)
+}
+
+func (c *ControlMessageIPPacketInfo) afterLoad() {}
+
+// +checklocksignore
+func (c *ControlMessageIPPacketInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.NIC)
+ stateSourceObject.Load(1, &c.LocalAddr)
+ stateSourceObject.Load(2, &c.DestinationAddr)
+}
+
+func (t *KernelTermios) StateTypeName() string {
+ return "pkg/abi/linux.KernelTermios"
+}
+
+func (t *KernelTermios) StateFields() []string {
+ return []string{
+ "InputFlags",
+ "OutputFlags",
+ "ControlFlags",
+ "LocalFlags",
+ "LineDiscipline",
+ "ControlCharacters",
+ "InputSpeed",
+ "OutputSpeed",
+ }
+}
+
+func (t *KernelTermios) beforeSave() {}
+
+// +checklocksignore
+func (t *KernelTermios) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.InputFlags)
+ stateSinkObject.Save(1, &t.OutputFlags)
+ stateSinkObject.Save(2, &t.ControlFlags)
+ stateSinkObject.Save(3, &t.LocalFlags)
+ stateSinkObject.Save(4, &t.LineDiscipline)
+ stateSinkObject.Save(5, &t.ControlCharacters)
+ stateSinkObject.Save(6, &t.InputSpeed)
+ stateSinkObject.Save(7, &t.OutputSpeed)
+}
+
+func (t *KernelTermios) afterLoad() {}
+
+// +checklocksignore
+func (t *KernelTermios) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.InputFlags)
+ stateSourceObject.Load(1, &t.OutputFlags)
+ stateSourceObject.Load(2, &t.ControlFlags)
+ stateSourceObject.Load(3, &t.LocalFlags)
+ stateSourceObject.Load(4, &t.LineDiscipline)
+ stateSourceObject.Load(5, &t.ControlCharacters)
+ stateSourceObject.Load(6, &t.InputSpeed)
+ stateSourceObject.Load(7, &t.OutputSpeed)
+}
+
+func (w *WindowSize) StateTypeName() string {
+ return "pkg/abi/linux.WindowSize"
+}
+
+func (w *WindowSize) StateFields() []string {
+ return []string{
+ "Rows",
+ "Cols",
+ }
+}
+
+func (w *WindowSize) beforeSave() {}
+
+// +checklocksignore
+func (w *WindowSize) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.Rows)
+ stateSinkObject.Save(1, &w.Cols)
+}
+
+func (w *WindowSize) afterLoad() {}
+
+// +checklocksignore
+func (w *WindowSize) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.Rows)
+ stateSourceObject.Load(1, &w.Cols)
+}
+
+func init() {
+ state.Register((*IOEvent)(nil))
+ state.Register((*BPFInstruction)(nil))
+ state.Register((*SigAction)(nil))
+ state.Register((*SignalStack)(nil))
+ state.Register((*SignalInfo)(nil))
+ state.Register((*ControlMessageIPPacketInfo)(nil))
+ state.Register((*KernelTermios)(nil))
+ state.Register((*WindowSize)(nil))
+}
diff --git a/pkg/abi/linux/netfilter_test.go b/pkg/abi/linux/netfilter_test.go
deleted file mode 100644
index 600820a0b..000000000
--- a/pkg/abi/linux/netfilter_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package linux
-
-import (
- "encoding/binary"
- "testing"
-)
-
-func TestSizes(t *testing.T) {
- testCases := []struct {
- typ interface{}
- defined uintptr
- }{
- {IPTEntry{}, SizeOfIPTEntry},
- {IPTGetEntries{}, SizeOfIPTGetEntries},
- {IPTGetinfo{}, SizeOfIPTGetinfo},
- {IPTIP{}, SizeOfIPTIP},
- {IPTOwnerInfo{}, SizeOfIPTOwnerInfo},
- {IPTReplace{}, SizeOfIPTReplace},
- {XTCounters{}, SizeOfXTCounters},
- {XTEntryMatch{}, SizeOfXTEntryMatch},
- {XTEntryTarget{}, SizeOfXTEntryTarget},
- {XTErrorTarget{}, SizeOfXTErrorTarget},
- {XTStandardTarget{}, SizeOfXTStandardTarget},
- {IP6TReplace{}, SizeOfIP6TReplace},
- {IP6TEntry{}, SizeOfIP6TEntry},
- {IP6TIP{}, SizeOfIP6TIP},
- }
-
- for _, tc := range testCases {
- if calculated := uintptr(binary.Size(tc.typ)); calculated != tc.defined {
- t.Errorf("%T has a defined size of %d and calculated size of %d", tc.typ, tc.defined, calculated)
- }
- }
-}
diff --git a/pkg/amutex/BUILD b/pkg/amutex/BUILD
deleted file mode 100644
index bd3a5cce9..000000000
--- a/pkg/amutex/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "amutex",
- srcs = ["amutex.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/context",
- "//pkg/syserror",
- ],
-)
-
-go_test(
- name = "amutex_test",
- size = "small",
- srcs = ["amutex_test.go"],
- library = ":amutex",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/amutex/amutex_state_autogen.go b/pkg/amutex/amutex_state_autogen.go
new file mode 100644
index 000000000..5a09c71ed
--- /dev/null
+++ b/pkg/amutex/amutex_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package amutex
diff --git a/pkg/amutex/amutex_test.go b/pkg/amutex/amutex_test.go
deleted file mode 100644
index 8a3952f2a..000000000
--- a/pkg/amutex/amutex_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package amutex
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-type sleeper struct {
- ch chan struct{}
-}
-
-func (s *sleeper) SleepStart() <-chan struct{} {
- return s.ch
-}
-
-func (*sleeper) SleepFinish(bool) {
-}
-
-func (s *sleeper) Interrupted() bool {
- return len(s.ch) != 0
-}
-
-func TestMutualExclusion(t *testing.T) {
- var m AbortableMutex
- m.Init()
-
- // Test mutual exclusion by running "gr" goroutines concurrently, and
- // have each one increment a counter "iters" times within the critical
- // section established by the mutex.
- //
- // If at the end of the counter is not gr * iters, then we know that
- // goroutines ran concurrently within the critical section.
- //
- // If one of the goroutines doesn't complete, it's likely a bug that
- // causes it to wait forever.
- const gr = 1000
- const iters = 100000
- v := 0
- var wg sync.WaitGroup
- for i := 0; i < gr; i++ {
- wg.Add(1)
- go func() {
- for j := 0; j < iters; j++ {
- m.Lock(nil)
- v++
- m.Unlock()
- }
- wg.Done()
- }()
- }
-
- wg.Wait()
-
- if v != gr*iters {
- t.Fatalf("Bad count: got %v, want %v", v, gr*iters)
- }
-}
-
-func TestAbortWait(t *testing.T) {
- var s sleeper
- var m AbortableMutex
- m.Init()
-
- // Lock the mutex.
- m.Lock(&s)
-
- // Lock again, but this time cancel after 500ms.
- s.ch = make(chan struct{}, 1)
- go func() {
- time.Sleep(500 * time.Millisecond)
- s.ch <- struct{}{}
- }()
- if v := m.Lock(&s); v {
- t.Fatalf("Lock succeeded when it should have failed")
- }
-
- // Lock again, but cancel right away.
- s.ch <- struct{}{}
- if v := m.Lock(&s); v {
- t.Fatalf("Lock succeeded when it should have failed")
- }
-}
diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD
deleted file mode 100644
index 11072d4de..000000000
--- a/pkg/atomicbitops/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "atomicbitops",
- srcs = [
- "aligned_32bit_unsafe.go",
- "aligned_64bit.go",
- "atomicbitops.go",
- "atomicbitops_amd64.s",
- "atomicbitops_arm64.s",
- "atomicbitops_noasm.go",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "atomicbitops_test",
- size = "small",
- srcs = ["atomicbitops_test.go"],
- library = ":atomicbitops",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go b/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
new file mode 100644
index 000000000..ae32b0c59
--- /dev/null
+++ b/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
@@ -0,0 +1,64 @@
+// automatically generated by stateify.
+
+// +build arm mips 386
+
+package atomicbitops
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (aa *AlignedAtomicInt64) StateTypeName() string {
+ return "pkg/atomicbitops.AlignedAtomicInt64"
+}
+
+func (aa *AlignedAtomicInt64) StateFields() []string {
+ return []string{
+ "value",
+ }
+}
+
+func (aa *AlignedAtomicInt64) beforeSave() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicInt64) StateSave(stateSinkObject state.Sink) {
+ aa.beforeSave()
+ stateSinkObject.Save(0, &aa.value)
+}
+
+func (aa *AlignedAtomicInt64) afterLoad() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicInt64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &aa.value)
+}
+
+func (aa *AlignedAtomicUint64) StateTypeName() string {
+ return "pkg/atomicbitops.AlignedAtomicUint64"
+}
+
+func (aa *AlignedAtomicUint64) StateFields() []string {
+ return []string{
+ "value",
+ }
+}
+
+func (aa *AlignedAtomicUint64) beforeSave() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicUint64) StateSave(stateSinkObject state.Sink) {
+ aa.beforeSave()
+ stateSinkObject.Save(0, &aa.value)
+}
+
+func (aa *AlignedAtomicUint64) afterLoad() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicUint64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &aa.value)
+}
+
+func init() {
+ state.Register((*AlignedAtomicInt64)(nil))
+ state.Register((*AlignedAtomicUint64)(nil))
+}
diff --git a/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go b/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
new file mode 100644
index 000000000..a084f4c57
--- /dev/null
+++ b/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
@@ -0,0 +1,64 @@
+// automatically generated by stateify.
+
+// +build !arm,!mips,!386
+
+package atomicbitops
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (aa *AlignedAtomicInt64) StateTypeName() string {
+ return "pkg/atomicbitops.AlignedAtomicInt64"
+}
+
+func (aa *AlignedAtomicInt64) StateFields() []string {
+ return []string{
+ "value",
+ }
+}
+
+func (aa *AlignedAtomicInt64) beforeSave() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicInt64) StateSave(stateSinkObject state.Sink) {
+ aa.beforeSave()
+ stateSinkObject.Save(0, &aa.value)
+}
+
+func (aa *AlignedAtomicInt64) afterLoad() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicInt64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &aa.value)
+}
+
+func (aa *AlignedAtomicUint64) StateTypeName() string {
+ return "pkg/atomicbitops.AlignedAtomicUint64"
+}
+
+func (aa *AlignedAtomicUint64) StateFields() []string {
+ return []string{
+ "value",
+ }
+}
+
+func (aa *AlignedAtomicUint64) beforeSave() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicUint64) StateSave(stateSinkObject state.Sink) {
+ aa.beforeSave()
+ stateSinkObject.Save(0, &aa.value)
+}
+
+func (aa *AlignedAtomicUint64) afterLoad() {}
+
+// +checklocksignore
+func (aa *AlignedAtomicUint64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &aa.value)
+}
+
+func init() {
+ state.Register((*AlignedAtomicInt64)(nil))
+ state.Register((*AlignedAtomicUint64)(nil))
+}
diff --git a/pkg/atomicbitops/atomicbitops_state_autogen.go b/pkg/atomicbitops/atomicbitops_state_autogen.go
new file mode 100644
index 000000000..06fcf712a
--- /dev/null
+++ b/pkg/atomicbitops/atomicbitops_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build amd64 arm64
+// +build !amd64,!arm64
+
+package atomicbitops
diff --git a/pkg/atomicbitops/atomicbitops_test.go b/pkg/atomicbitops/atomicbitops_test.go
deleted file mode 100644
index 73af71bb4..000000000
--- a/pkg/atomicbitops/atomicbitops_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package atomicbitops
-
-import (
- "runtime"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-const iterations = 100
-
-func detectRaces32(val, target uint32, fn func(*uint32, uint32)) bool {
- runtime.GOMAXPROCS(100)
- for n := 0; n < iterations; n++ {
- x := val
- var wg sync.WaitGroup
- for i := uint32(0); i < 32; i++ {
- wg.Add(1)
- go func(a *uint32, i uint32) {
- defer wg.Done()
- fn(a, uint32(1<<i))
- }(&x, i)
- }
- wg.Wait()
- if x != target {
- return true
- }
- }
- return false
-}
-
-func detectRaces64(val, target uint64, fn func(*uint64, uint64)) bool {
- runtime.GOMAXPROCS(100)
- for n := 0; n < iterations; n++ {
- x := val
- var wg sync.WaitGroup
- for i := uint64(0); i < 64; i++ {
- wg.Add(1)
- go func(a *uint64, i uint64) {
- defer wg.Done()
- fn(a, uint64(1<<i))
- }(&x, i)
- }
- wg.Wait()
- if x != target {
- return true
- }
- }
- return false
-}
-
-func TestOrUint32(t *testing.T) {
- if detectRaces32(0x0, 0xffffffff, OrUint32) {
- t.Error("Data race detected!")
- }
-}
-
-func TestAndUint32(t *testing.T) {
- if detectRaces32(0xf0f0f0f0, 0x00000000, AndUint32) {
- t.Error("Data race detected!")
- }
-}
-
-func TestXorUint32(t *testing.T) {
- if detectRaces32(0xf0f0f0f0, 0x0f0f0f0f, XorUint32) {
- t.Error("Data race detected!")
- }
-}
-
-func TestOrUint64(t *testing.T) {
- if detectRaces64(0x0, 0xffffffffffffffff, OrUint64) {
- t.Error("Data race detected!")
- }
-}
-
-func TestAndUint64(t *testing.T) {
- if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0, AndUint64) {
- t.Error("Data race detected!")
- }
-}
-
-func TestXorUint64(t *testing.T) {
- if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0f0f0f0f0f0f0f0f, XorUint64) {
- t.Error("Data race detected!")
- }
-}
-
-func TestCompareAndSwapUint32(t *testing.T) {
- tests := []struct {
- name string
- prev uint32
- old uint32
- new uint32
- next uint32
- }{
- {
- name: "Successful compare-and-swap with prev == new",
- prev: 10,
- old: 10,
- new: 10,
- next: 10,
- },
- {
- name: "Successful compare-and-swap with prev != new",
- prev: 20,
- old: 20,
- new: 22,
- next: 22,
- },
- {
- name: "Failed compare-and-swap with prev == new",
- prev: 31,
- old: 30,
- new: 31,
- next: 31,
- },
- {
- name: "Failed compare-and-swap with prev != new",
- prev: 41,
- old: 40,
- new: 42,
- next: 41,
- },
- }
- for _, test := range tests {
- val := test.prev
- prev := CompareAndSwapUint32(&val, test.old, test.new)
- if got, want := prev, test.prev; got != want {
- t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want)
- }
- if got, want := val, test.next; got != want {
- t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want)
- }
- }
-}
-
-func TestCompareAndSwapUint64(t *testing.T) {
- tests := []struct {
- name string
- prev uint64
- old uint64
- new uint64
- next uint64
- }{
- {
- name: "Successful compare-and-swap with prev == new",
- prev: 0x100000000,
- old: 0x100000000,
- new: 0x100000000,
- next: 0x100000000,
- },
- {
- name: "Successful compare-and-swap with prev != new",
- prev: 0x200000000,
- old: 0x200000000,
- new: 0x200000002,
- next: 0x200000002,
- },
- {
- name: "Failed compare-and-swap with prev == new",
- prev: 0x300000001,
- old: 0x300000000,
- new: 0x300000001,
- next: 0x300000001,
- },
- {
- name: "Failed compare-and-swap with prev != new",
- prev: 0x400000001,
- old: 0x400000000,
- new: 0x400000002,
- next: 0x400000001,
- },
- }
- for _, test := range tests {
- val := test.prev
- prev := CompareAndSwapUint64(&val, test.old, test.new)
- if got, want := prev, test.prev; got != want {
- t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want)
- }
- if got, want := val, test.next; got != want {
- t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want)
- }
- }
-}
diff --git a/pkg/binary/BUILD b/pkg/binary/BUILD
deleted file mode 100644
index 7ca2fda90..000000000
--- a/pkg/binary/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "binary",
- srcs = ["binary.go"],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "binary_test",
- size = "small",
- srcs = ["binary_test.go"],
- library = ":binary",
-)
diff --git a/pkg/binary/binary.go b/pkg/binary/binary.go
deleted file mode 100644
index 25065aef9..000000000
--- a/pkg/binary/binary.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package binary translates between select fixed-sized types and a binary
-// representation.
-package binary
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "reflect"
-)
-
-// LittleEndian is the same as encoding/binary.LittleEndian.
-//
-// It is included here as a convenience.
-var LittleEndian = binary.LittleEndian
-
-// BigEndian is the same as encoding/binary.BigEndian.
-//
-// It is included here as a convenience.
-var BigEndian = binary.BigEndian
-
-// AppendUint16 appends the binary representation of a uint16 to buf.
-func AppendUint16(buf []byte, order binary.ByteOrder, num uint16) []byte {
- buf = append(buf, make([]byte, 2)...)
- order.PutUint16(buf[len(buf)-2:], num)
- return buf
-}
-
-// AppendUint32 appends the binary representation of a uint32 to buf.
-func AppendUint32(buf []byte, order binary.ByteOrder, num uint32) []byte {
- buf = append(buf, make([]byte, 4)...)
- order.PutUint32(buf[len(buf)-4:], num)
- return buf
-}
-
-// AppendUint64 appends the binary representation of a uint64 to buf.
-func AppendUint64(buf []byte, order binary.ByteOrder, num uint64) []byte {
- buf = append(buf, make([]byte, 8)...)
- order.PutUint64(buf[len(buf)-8:], num)
- return buf
-}
-
-// Marshal appends a binary representation of data to buf.
-//
-// data must only contain fixed-length signed and unsigned ints, arrays,
-// slices, structs and compositions of said types. data may be a pointer,
-// but cannot contain pointers.
-func Marshal(buf []byte, order binary.ByteOrder, data interface{}) []byte {
- return marshal(buf, order, reflect.Indirect(reflect.ValueOf(data)))
-}
-
-func marshal(buf []byte, order binary.ByteOrder, data reflect.Value) []byte {
- switch data.Kind() {
- case reflect.Int8:
- buf = append(buf, byte(int8(data.Int())))
- case reflect.Int16:
- buf = AppendUint16(buf, order, uint16(int16(data.Int())))
- case reflect.Int32:
- buf = AppendUint32(buf, order, uint32(int32(data.Int())))
- case reflect.Int64:
- buf = AppendUint64(buf, order, uint64(data.Int()))
-
- case reflect.Uint8:
- buf = append(buf, byte(data.Uint()))
- case reflect.Uint16:
- buf = AppendUint16(buf, order, uint16(data.Uint()))
- case reflect.Uint32:
- buf = AppendUint32(buf, order, uint32(data.Uint()))
- case reflect.Uint64:
- buf = AppendUint64(buf, order, data.Uint())
-
- case reflect.Array, reflect.Slice:
- for i, l := 0, data.Len(); i < l; i++ {
- buf = marshal(buf, order, data.Index(i))
- }
-
- case reflect.Struct:
- for i, l := 0, data.NumField(); i < l; i++ {
- buf = marshal(buf, order, data.Field(i))
- }
-
- default:
- panic("invalid type: " + data.Type().String())
- }
- return buf
-}
-
-// Unmarshal unpacks buf into data.
-//
-// data must be a slice or a pointer and buf must have a length of exactly
-// Size(data). data must only contain fixed-length signed and unsigned ints,
-// arrays, slices, structs and compositions of said types.
-func Unmarshal(buf []byte, order binary.ByteOrder, data interface{}) {
- value := reflect.ValueOf(data)
- switch value.Kind() {
- case reflect.Ptr:
- value = value.Elem()
- case reflect.Slice:
- default:
- panic("invalid type: " + value.Type().String())
- }
- buf = unmarshal(buf, order, value)
- if len(buf) != 0 {
- panic(fmt.Sprintf("buffer too long by %d bytes", len(buf)))
- }
-}
-
-func unmarshal(buf []byte, order binary.ByteOrder, data reflect.Value) []byte {
- switch data.Kind() {
- case reflect.Int8:
- data.SetInt(int64(int8(buf[0])))
- buf = buf[1:]
- case reflect.Int16:
- data.SetInt(int64(int16(order.Uint16(buf))))
- buf = buf[2:]
- case reflect.Int32:
- data.SetInt(int64(int32(order.Uint32(buf))))
- buf = buf[4:]
- case reflect.Int64:
- data.SetInt(int64(order.Uint64(buf)))
- buf = buf[8:]
-
- case reflect.Uint8:
- data.SetUint(uint64(buf[0]))
- buf = buf[1:]
- case reflect.Uint16:
- data.SetUint(uint64(order.Uint16(buf)))
- buf = buf[2:]
- case reflect.Uint32:
- data.SetUint(uint64(order.Uint32(buf)))
- buf = buf[4:]
- case reflect.Uint64:
- data.SetUint(order.Uint64(buf))
- buf = buf[8:]
-
- case reflect.Array, reflect.Slice:
- for i, l := 0, data.Len(); i < l; i++ {
- buf = unmarshal(buf, order, data.Index(i))
- }
-
- case reflect.Struct:
- for i, l := 0, data.NumField(); i < l; i++ {
- if field := data.Field(i); field.CanSet() {
- buf = unmarshal(buf, order, field)
- } else {
- buf = buf[sizeof(field):]
- }
- }
-
- default:
- panic("invalid type: " + data.Type().String())
- }
- return buf
-}
-
-// Size calculates the buffer sized needed by Marshal or Unmarshal.
-//
-// Size only support the types supported by Marshal.
-func Size(v interface{}) uintptr {
- return sizeof(reflect.Indirect(reflect.ValueOf(v)))
-}
-
-func sizeof(data reflect.Value) uintptr {
- switch data.Kind() {
- case reflect.Int8, reflect.Uint8:
- return 1
- case reflect.Int16, reflect.Uint16:
- return 2
- case reflect.Int32, reflect.Uint32:
- return 4
- case reflect.Int64, reflect.Uint64:
- return 8
-
- case reflect.Array, reflect.Slice:
- var size uintptr
- for i, l := 0, data.Len(); i < l; i++ {
- size += sizeof(data.Index(i))
- }
- return size
-
- case reflect.Struct:
- var size uintptr
- for i, l := 0, data.NumField(); i < l; i++ {
- size += sizeof(data.Field(i))
- }
- return size
-
- default:
- panic("invalid type: " + data.Type().String())
- }
-}
-
-// ReadUint16 reads a uint16 from r.
-func ReadUint16(r io.Reader, order binary.ByteOrder) (uint16, error) {
- buf := make([]byte, 2)
- if _, err := io.ReadFull(r, buf); err != nil {
- return 0, err
- }
- return order.Uint16(buf), nil
-}
-
-// ReadUint32 reads a uint32 from r.
-func ReadUint32(r io.Reader, order binary.ByteOrder) (uint32, error) {
- buf := make([]byte, 4)
- if _, err := io.ReadFull(r, buf); err != nil {
- return 0, err
- }
- return order.Uint32(buf), nil
-}
-
-// ReadUint64 reads a uint64 from r.
-func ReadUint64(r io.Reader, order binary.ByteOrder) (uint64, error) {
- buf := make([]byte, 8)
- if _, err := io.ReadFull(r, buf); err != nil {
- return 0, err
- }
- return order.Uint64(buf), nil
-}
-
-// WriteUint16 writes a uint16 to w.
-func WriteUint16(w io.Writer, order binary.ByteOrder, num uint16) error {
- buf := make([]byte, 2)
- order.PutUint16(buf, num)
- _, err := w.Write(buf)
- return err
-}
-
-// WriteUint32 writes a uint32 to w.
-func WriteUint32(w io.Writer, order binary.ByteOrder, num uint32) error {
- buf := make([]byte, 4)
- order.PutUint32(buf, num)
- _, err := w.Write(buf)
- return err
-}
-
-// WriteUint64 writes a uint64 to w.
-func WriteUint64(w io.Writer, order binary.ByteOrder, num uint64) error {
- buf := make([]byte, 8)
- order.PutUint64(buf, num)
- _, err := w.Write(buf)
- return err
-}
-
-// AlignUp rounds a length up to an alignment. align must be a power of 2.
-func AlignUp(length int, align uint) int {
- return (length + int(align) - 1) & ^(int(align) - 1)
-}
-
-// AlignDown rounds a length down to an alignment. align must be a power of 2.
-func AlignDown(length int, align uint) int {
- return length & ^(int(align) - 1)
-}
diff --git a/pkg/binary/binary_test.go b/pkg/binary/binary_test.go
deleted file mode 100644
index 4d609a438..000000000
--- a/pkg/binary/binary_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package binary
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "reflect"
- "strings"
- "testing"
-)
-
-func newInt32(i int32) *int32 {
- return &i
-}
-
-func TestSize(t *testing.T) {
- if got, want := Size(uint32(10)), uintptr(4); got != want {
- t.Errorf("Got = %d, want = %d", got, want)
- }
-}
-
-func TestPanic(t *testing.T) {
- tests := []struct {
- name string
- f func([]byte, binary.ByteOrder, interface{})
- data interface{}
- want string
- }{
- {"Unmarshal int", Unmarshal, 5, "invalid type: int"},
- {"Unmarshal []int", Unmarshal, []int{5}, "invalid type: int"},
- {"Marshal int", func(_ []byte, bo binary.ByteOrder, d interface{}) { Marshal(nil, bo, d) }, 5, "invalid type: int"},
- {"Marshal int[]", func(_ []byte, bo binary.ByteOrder, d interface{}) { Marshal(nil, bo, d) }, []int{5}, "invalid type: int"},
- {"Unmarshal short buffer", Unmarshal, newInt32(5), "runtime error: index out of range"},
- {"Unmarshal long buffer", func(_ []byte, bo binary.ByteOrder, d interface{}) { Unmarshal(make([]byte, 50), bo, d) }, newInt32(5), "buffer too long by 46 bytes"},
- {"marshal int", func(_ []byte, bo binary.ByteOrder, d interface{}) { marshal(nil, bo, reflect.ValueOf(d)) }, 5, "invalid type: int"},
- {"Size int", func(_ []byte, _ binary.ByteOrder, d interface{}) { Size(d) }, 5, "invalid type: int"},
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- defer func() {
- r := recover()
- if got := fmt.Sprint(r); !strings.HasPrefix(got, test.want) {
- t.Errorf("Got recover() = %q, want prefix = %q", got, test.want)
- }
- }()
-
- test.f(nil, LittleEndian, test.data)
- })
- }
-}
-
-type inner struct {
- Field int32
-}
-
-type outer struct {
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
-
- Slice []int32
- Array [5]int32
- Struct inner
-}
-
-func TestMarshalUnmarshal(t *testing.T) {
- want := outer{
- 1, 2, 3, 4, 5, 6, 7, 8,
- []int32{9, 10, 11},
- [5]int32{12, 13, 14, 15, 16},
- inner{17},
- }
- buf := Marshal(nil, LittleEndian, want)
- got := outer{Slice: []int32{0, 0, 0}}
- Unmarshal(buf, LittleEndian, &got)
- if !reflect.DeepEqual(&got, &want) {
- t.Errorf("Got = %#v, want = %#v", got, want)
- }
-}
-
-type outerBenchmark struct {
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
-
- Array [5]int32
- Struct inner
-}
-
-func BenchmarkMarshalUnmarshal(b *testing.B) {
- b.ReportAllocs()
-
- in := outerBenchmark{
- 1, 2, 3, 4, 5, 6, 7, 8,
- [5]int32{9, 10, 11, 12, 13},
- inner{14},
- }
- buf := make([]byte, Size(&in))
- out := outerBenchmark{}
-
- for i := 0; i < b.N; i++ {
- buf := Marshal(buf[:0], LittleEndian, &in)
- Unmarshal(buf, LittleEndian, &out)
- }
-}
-
-func BenchmarkReadWrite(b *testing.B) {
- b.ReportAllocs()
-
- in := outerBenchmark{
- 1, 2, 3, 4, 5, 6, 7, 8,
- [5]int32{9, 10, 11, 12, 13},
- inner{14},
- }
- buf := bytes.NewBuffer(make([]byte, binary.Size(&in)))
- out := outerBenchmark{}
-
- for i := 0; i < b.N; i++ {
- buf.Reset()
- if err := binary.Write(buf, LittleEndian, &in); err != nil {
- b.Error("Write:", err)
- }
- if err := binary.Read(buf, LittleEndian, &out); err != nil {
- b.Error("Read:", err)
- }
- }
-}
-
-type outerPadding struct {
- _ int8
- _ int16
- _ int32
- _ int64
- _ uint8
- _ uint16
- _ uint32
- _ uint64
-
- _ []int32
- _ [5]int32
- _ inner
-}
-
-func TestMarshalUnmarshalPadding(t *testing.T) {
- var want outerPadding
- buf := Marshal(nil, LittleEndian, want)
- var got outerPadding
- Unmarshal(buf, LittleEndian, &got)
- if !reflect.DeepEqual(&got, &want) {
- t.Errorf("Got = %#v, want = %#v", got, want)
- }
-}
-
-// Numbers with bits in every byte that distinguishable in big and little endian.
-const (
- want16 = 64<<8 | 128
- want32 = 16<<24 | 32<<16 | want16
- want64 = 1<<56 | 2<<48 | 4<<40 | 8<<32 | want32
-)
-
-func TestReadWriteUint16(t *testing.T) {
- const want = uint16(want16)
- var buf bytes.Buffer
- if err := WriteUint16(&buf, LittleEndian, want); err != nil {
- t.Error("WriteUint16:", err)
- }
- got, err := ReadUint16(&buf, LittleEndian)
- if err != nil {
- t.Error("ReadUint16:", err)
- }
- if got != want {
- t.Errorf("got = %d, want = %d", got, want)
- }
-}
-
-func TestReadWriteUint32(t *testing.T) {
- const want = uint32(want32)
- var buf bytes.Buffer
- if err := WriteUint32(&buf, LittleEndian, want); err != nil {
- t.Error("WriteUint32:", err)
- }
- got, err := ReadUint32(&buf, LittleEndian)
- if err != nil {
- t.Error("ReadUint32:", err)
- }
- if got != want {
- t.Errorf("got = %d, want = %d", got, want)
- }
-}
-
-func TestReadWriteUint64(t *testing.T) {
- const want = uint64(want64)
- var buf bytes.Buffer
- if err := WriteUint64(&buf, LittleEndian, want); err != nil {
- t.Error("WriteUint64:", err)
- }
- got, err := ReadUint64(&buf, LittleEndian)
- if err != nil {
- t.Error("ReadUint64:", err)
- }
- if got != want {
- t.Errorf("got = %d, want = %d", got, want)
- }
-}
-
-type readWriter struct {
- err error
-}
-
-func (rw *readWriter) Write([]byte) (int, error) {
- return 0, rw.err
-}
-
-func (rw *readWriter) Read([]byte) (int, error) {
- return 0, rw.err
-}
-
-func TestReadWriteError(t *testing.T) {
- tests := []struct {
- name string
- f func(rw io.ReadWriter) error
- }{
- {"WriteUint16", func(rw io.ReadWriter) error { return WriteUint16(rw, LittleEndian, 0) }},
- {"ReadUint16", func(rw io.ReadWriter) error { _, err := ReadUint16(rw, LittleEndian); return err }},
- {"WriteUint32", func(rw io.ReadWriter) error { return WriteUint32(rw, LittleEndian, 0) }},
- {"ReadUint32", func(rw io.ReadWriter) error { _, err := ReadUint32(rw, LittleEndian); return err }},
- {"WriteUint64", func(rw io.ReadWriter) error { return WriteUint64(rw, LittleEndian, 0) }},
- {"ReadUint64", func(rw io.ReadWriter) error { _, err := ReadUint64(rw, LittleEndian); return err }},
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- want := errors.New("want")
- if got := test.f(&readWriter{want}); got != want {
- t.Errorf("got = %v, want = %v", got, want)
- }
- })
- }
-}
diff --git a/pkg/bits/BUILD b/pkg/bits/BUILD
deleted file mode 100644
index 63f4670d7..000000000
--- a/pkg/bits/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "bits",
- srcs = [
- "bits.go",
- "bits32.go",
- "bits64.go",
- "uint64_arch.go",
- "uint64_arch_amd64_asm.s",
- "uint64_arch_arm64_asm.s",
- "uint64_arch_generic.go",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_template(
- name = "bits_template",
- srcs = ["bits_template.go"],
- types = [
- "T",
- ],
-)
-
-go_template_instance(
- name = "bits64",
- out = "bits64.go",
- package = "bits",
- suffix = "64",
- template = ":bits_template",
- types = {
- "T": "uint64",
- },
-)
-
-go_template_instance(
- name = "bits32",
- out = "bits32.go",
- package = "bits",
- suffix = "32",
- template = ":bits_template",
- types = {
- "T": "uint32",
- },
-)
-
-go_test(
- name = "bits_test",
- size = "small",
- srcs = ["uint64_test.go"],
- library = ":bits",
-)
diff --git a/pkg/bits/bits32.go b/pkg/bits/bits32.go
new file mode 100644
index 000000000..28134a9e7
--- /dev/null
+++ b/pkg/bits/bits32.go
@@ -0,0 +1,33 @@
+package bits
+
+// IsOn returns true if *all* bits set in 'bits' are set in 'mask'.
+func IsOn32(mask, bits uint32) bool {
+ return mask&bits == bits
+}
+
+// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'.
+func IsAnyOn32(mask, bits uint32) bool {
+ return mask&bits != 0
+}
+
+// Mask returns a T with all of the given bits set.
+func Mask32(is ...int) uint32 {
+ ret := uint32(0)
+ for _, i := range is {
+ ret |= MaskOf32(i)
+ }
+ return ret
+}
+
+// MaskOf is like Mask, but sets only a single bit (more efficiently).
+func MaskOf32(i int) uint32 {
+ return uint32(1) << uint32(i)
+}
+
+// IsPowerOfTwo returns true if v is power of 2.
+func IsPowerOfTwo32(v uint32) bool {
+ if v == 0 {
+ return false
+ }
+ return v&(v-1) == 0
+}
diff --git a/pkg/bits/bits64.go b/pkg/bits/bits64.go
new file mode 100644
index 000000000..73117b19b
--- /dev/null
+++ b/pkg/bits/bits64.go
@@ -0,0 +1,33 @@
+package bits
+
+// IsOn returns true if *all* bits set in 'bits' are set in 'mask'.
+func IsOn64(mask, bits uint64) bool {
+ return mask&bits == bits
+}
+
+// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'.
+func IsAnyOn64(mask, bits uint64) bool {
+ return mask&bits != 0
+}
+
+// Mask returns a T with all of the given bits set.
+func Mask64(is ...int) uint64 {
+ ret := uint64(0)
+ for _, i := range is {
+ ret |= MaskOf64(i)
+ }
+ return ret
+}
+
+// MaskOf is like Mask, but sets only a single bit (more efficiently).
+func MaskOf64(i int) uint64 {
+ return uint64(1) << uint64(i)
+}
+
+// IsPowerOfTwo returns true if v is power of 2.
+func IsPowerOfTwo64(v uint64) bool {
+ if v == 0 {
+ return false
+ }
+ return v&(v-1) == 0
+}
diff --git a/pkg/bits/bits_state_autogen.go b/pkg/bits/bits_state_autogen.go
new file mode 100644
index 000000000..22b8250c6
--- /dev/null
+++ b/pkg/bits/bits_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build amd64 arm64
+// +build !amd64,!arm64
+
+package bits
diff --git a/pkg/bits/bits_template.go b/pkg/bits/bits_template.go
deleted file mode 100644
index 998645388..000000000
--- a/pkg/bits/bits_template.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bits
-
-// Non-atomic bit operations on a template type T.
-
-// T is a required type parameter that must be an integral type.
-type T uint64
-
-// IsOn returns true if *all* bits set in 'bits' are set in 'mask'.
-func IsOn(mask, bits T) bool {
- return mask&bits == bits
-}
-
-// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'.
-func IsAnyOn(mask, bits T) bool {
- return mask&bits != 0
-}
-
-// Mask returns a T with all of the given bits set.
-func Mask(is ...int) T {
- ret := T(0)
- for _, i := range is {
- ret |= MaskOf(i)
- }
- return ret
-}
-
-// MaskOf is like Mask, but sets only a single bit (more efficiently).
-func MaskOf(i int) T {
- return T(1) << T(i)
-}
-
-// IsPowerOfTwo returns true if v is power of 2.
-func IsPowerOfTwo(v T) bool {
- if v == 0 {
- return false
- }
- return v&(v-1) == 0
-}
diff --git a/pkg/bits/uint64_test.go b/pkg/bits/uint64_test.go
deleted file mode 100644
index 193d1ebcd..000000000
--- a/pkg/bits/uint64_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bits
-
-import (
- "reflect"
- "testing"
-)
-
-func TestTrailingZeros64(t *testing.T) {
- for i := 0; i <= 64; i++ {
- n := uint64(1) << uint(i)
- if got, want := TrailingZeros64(n), i; got != want {
- t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-
- for i := 0; i < 64; i++ {
- n := ^uint64(0) << uint(i)
- if got, want := TrailingZeros64(n), i; got != want {
- t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-
- for i := 0; i < 64; i++ {
- n := ^uint64(0) >> uint(i)
- if got, want := TrailingZeros64(n), 0; got != want {
- t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-}
-
-func TestMostSignificantOne64(t *testing.T) {
- for i := 0; i <= 64; i++ {
- n := uint64(1) << uint(i)
- if got, want := MostSignificantOne64(n), i; got != want {
- t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-
- for i := 0; i < 64; i++ {
- n := ^uint64(0) >> uint(i)
- if got, want := MostSignificantOne64(n), 63-i; got != want {
- t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-
- for i := 0; i < 64; i++ {
- n := ^uint64(0) << uint(i)
- if got, want := MostSignificantOne64(n), 63; got != want {
- t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want)
- }
- }
-}
-
-func TestForEachSetBit64(t *testing.T) {
- for _, want := range [][]int{
- {},
- {0},
- {1},
- {63},
- {0, 1},
- {1, 3, 5},
- {0, 63},
- } {
- n := Mask64(want...)
- // "Slice values are deeply equal when ... they are both nil or both
- // non-nil ..."
- got := make([]int, 0)
- ForEachSetBit64(n, func(i int) {
- got = append(got, i)
- })
- if !reflect.DeepEqual(got, want) {
- t.Errorf("ForEachSetBit64(%#x): iterated bits %v, wanted %v", n, got, want)
- }
- }
-}
-
-func TestIsOn(t *testing.T) {
- type spec struct {
- mask uint64
- bits uint64
- any bool
- all bool
- }
- for _, s := range []spec{
- {Mask64(0), Mask64(0), true, true},
- {Mask64(63), Mask64(63), true, true},
- {Mask64(0), Mask64(1), false, false},
- {Mask64(0), Mask64(0, 1), true, false},
-
- {Mask64(1, 63), Mask64(1), true, true},
- {Mask64(1, 63), Mask64(1, 63), true, true},
- {Mask64(1, 63), Mask64(0, 1, 63), true, false},
- {Mask64(1, 63), Mask64(0, 62), false, false},
- } {
- if ok := IsAnyOn64(s.mask, s.bits); ok != s.any {
- t.Errorf("IsAnyOn(%#x, %#x) = %v, wanted: %v", s.mask, s.bits, ok, s.any)
- }
- if ok := IsOn64(s.mask, s.bits); ok != s.all {
- t.Errorf("IsOn(%#x, %#x) = %v, wanted: %v", s.mask, s.bits, ok, s.all)
- }
- }
-}
-
-func TestIsPowerOfTwo(t *testing.T) {
- for _, tc := range []struct {
- v uint64
- want bool
- }{
- {v: 0, want: false},
- {v: 1, want: true},
- {v: 2, want: true},
- {v: 3, want: false},
- {v: 4, want: true},
- {v: 5, want: false},
- } {
- if got := IsPowerOfTwo64(tc.v); got != tc.want {
- t.Errorf("IsPowerOfTwo(%d) = %t, want: %t", tc.v, got, tc.want)
- }
- }
-}
diff --git a/pkg/bpf/BUILD b/pkg/bpf/BUILD
deleted file mode 100644
index c17390522..000000000
--- a/pkg/bpf/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "bpf",
- srcs = [
- "bpf.go",
- "decoder.go",
- "input_bytes.go",
- "interpreter.go",
- "program_builder.go",
- ],
- visibility = ["//visibility:public"],
- deps = ["//pkg/abi/linux"],
-)
-
-go_test(
- name = "bpf_test",
- size = "small",
- srcs = [
- "decoder_test.go",
- "interpreter_test.go",
- "program_builder_test.go",
- ],
- library = ":bpf",
- deps = [
- "//pkg/abi/linux",
- "//pkg/hostarch",
- "//pkg/marshal",
- ],
-)
diff --git a/pkg/bpf/bpf_state_autogen.go b/pkg/bpf/bpf_state_autogen.go
new file mode 100644
index 000000000..3a020fe29
--- /dev/null
+++ b/pkg/bpf/bpf_state_autogen.go
@@ -0,0 +1,36 @@
+// automatically generated by stateify.
+
+package bpf
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *Program) StateTypeName() string {
+ return "pkg/bpf.Program"
+}
+
+func (p *Program) StateFields() []string {
+ return []string{
+ "instructions",
+ }
+}
+
+func (p *Program) beforeSave() {}
+
+// +checklocksignore
+func (p *Program) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.instructions)
+}
+
+func (p *Program) afterLoad() {}
+
+// +checklocksignore
+func (p *Program) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.instructions)
+}
+
+func init() {
+ state.Register((*Program)(nil))
+}
diff --git a/pkg/bpf/decoder_test.go b/pkg/bpf/decoder_test.go
deleted file mode 100644
index bb971ce21..000000000
--- a/pkg/bpf/decoder_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bpf
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-func TestDecode(t *testing.T) {
- for _, test := range []struct {
- filter linux.BPFInstruction
- expected string
- fail bool
- }{
- {filter: Stmt(Ld+Imm, 10), expected: "A <- 10"},
- {filter: Stmt(Ld+Abs+W, 10), expected: "A <- P[10:4]"},
- {filter: Stmt(Ld+Ind+H, 10), expected: "A <- P[X+10:2]"},
- {filter: Stmt(Ld+Ind+B, 10), expected: "A <- P[X+10:1]"},
- {filter: Stmt(Ld+Mem, 10), expected: "A <- M[10]"},
- {filter: Stmt(Ld+Len, 0), expected: "A <- len"},
- {filter: Stmt(Ldx+Imm, 10), expected: "X <- 10"},
- {filter: Stmt(Ldx+Mem, 10), expected: "X <- M[10]"},
- {filter: Stmt(Ldx+Len, 0), expected: "X <- len"},
- {filter: Stmt(Ldx+Msh, 10), expected: "X <- 4*(P[10:1]&0xf)"},
- {filter: Stmt(St, 10), expected: "M[10] <- A"},
- {filter: Stmt(Stx, 10), expected: "M[10] <- X"},
- {filter: Stmt(Alu+Add+K, 10), expected: "A <- A + 10"},
- {filter: Stmt(Alu+Sub+K, 10), expected: "A <- A - 10"},
- {filter: Stmt(Alu+Mul+K, 10), expected: "A <- A * 10"},
- {filter: Stmt(Alu+Div+K, 10), expected: "A <- A / 10"},
- {filter: Stmt(Alu+Or+K, 10), expected: "A <- A | 10"},
- {filter: Stmt(Alu+And+K, 10), expected: "A <- A & 10"},
- {filter: Stmt(Alu+Lsh+K, 10), expected: "A <- A << 10"},
- {filter: Stmt(Alu+Rsh+K, 10), expected: "A <- A >> 10"},
- {filter: Stmt(Alu+Mod+K, 10), expected: "A <- A % 10"},
- {filter: Stmt(Alu+Xor+K, 10), expected: "A <- A ^ 10"},
- {filter: Stmt(Alu+Add+X, 0), expected: "A <- A + X"},
- {filter: Stmt(Alu+Sub+X, 0), expected: "A <- A - X"},
- {filter: Stmt(Alu+Mul+X, 0), expected: "A <- A * X"},
- {filter: Stmt(Alu+Div+X, 0), expected: "A <- A / X"},
- {filter: Stmt(Alu+Or+X, 0), expected: "A <- A | X"},
- {filter: Stmt(Alu+And+X, 0), expected: "A <- A & X"},
- {filter: Stmt(Alu+Lsh+X, 0), expected: "A <- A << X"},
- {filter: Stmt(Alu+Rsh+X, 0), expected: "A <- A >> X"},
- {filter: Stmt(Alu+Mod+X, 0), expected: "A <- A % X"},
- {filter: Stmt(Alu+Xor+X, 0), expected: "A <- A ^ X"},
- {filter: Stmt(Alu+Neg, 0), expected: "A <- -A"},
- {filter: Stmt(Jmp+Ja, 10), expected: "pc += 10"},
- {filter: Jump(Jmp+Jeq+K, 10, 2, 5), expected: "pc += (A == 10) ? 2 : 5"},
- {filter: Jump(Jmp+Jgt+K, 10, 2, 5), expected: "pc += (A > 10) ? 2 : 5"},
- {filter: Jump(Jmp+Jge+K, 10, 2, 5), expected: "pc += (A >= 10) ? 2 : 5"},
- {filter: Jump(Jmp+Jset+K, 10, 2, 5), expected: "pc += (A & 10) ? 2 : 5"},
- {filter: Jump(Jmp+Jeq+X, 0, 2, 5), expected: "pc += (A == X) ? 2 : 5"},
- {filter: Jump(Jmp+Jgt+X, 0, 2, 5), expected: "pc += (A > X) ? 2 : 5"},
- {filter: Jump(Jmp+Jge+X, 0, 2, 5), expected: "pc += (A >= X) ? 2 : 5"},
- {filter: Jump(Jmp+Jset+X, 0, 2, 5), expected: "pc += (A & X) ? 2 : 5"},
- {filter: Stmt(Ret+K, 10), expected: "ret 10"},
- {filter: Stmt(Ret+A, 0), expected: "ret A"},
- {filter: Stmt(Misc+Tax, 0), expected: "X <- A"},
- {filter: Stmt(Misc+Txa, 0), expected: "A <- X"},
- {filter: Stmt(Ld+Ind+Msh, 0), fail: true},
- } {
- got, err := Decode(test.filter)
- if test.fail {
- if err == nil {
- t.Errorf("Decode(%v) failed, expected: 'error', got: %q", test.filter, got)
- continue
- }
- } else {
- if err != nil {
- t.Errorf("Decode(%v) failed for test %q, error: %q", test.filter, test.expected, err)
- continue
- }
- if got != test.expected {
- t.Errorf("Decode(%v) failed, expected: %q, got: %q", test.filter, test.expected, got)
- continue
- }
- }
- }
-}
-
-func TestDecodeInstructions(t *testing.T) {
- for _, test := range []struct {
- name string
- program []linux.BPFInstruction
- expected string
- fail bool
- }{
- {name: "basic with jump indexes",
- program: []linux.BPFInstruction{
- Stmt(Ld+Abs+W, 10),
- Stmt(Ldx+Mem, 10),
- Stmt(St, 10),
- Stmt(Stx, 10),
- Stmt(Alu+Add+K, 10),
- Stmt(Jmp+Ja, 10),
- Jump(Jmp+Jeq+K, 10, 2, 5),
- Jump(Jmp+Jset+X, 0, 0, 5),
- Stmt(Misc+Tax, 0),
- },
- expected: "0: A <- P[10:4]\n" +
- "1: X <- M[10]\n" +
- "2: M[10] <- A\n" +
- "3: M[10] <- X\n" +
- "4: A <- A + 10\n" +
- "5: pc += 10 [16]\n" +
- "6: pc += (A == 10) ? 2 [9] : 5 [12]\n" +
- "7: pc += (A & X) ? 0 [8] : 5 [13]\n" +
- "8: X <- A\n",
- },
- {name: "invalid instruction",
- program: []linux.BPFInstruction{Stmt(Ld+Abs+W, 10), Stmt(Ld+Len+Mem, 0)},
- fail: true},
- } {
- got, err := DecodeInstructions(test.program)
- if test.fail {
- if err == nil {
- t.Errorf("%s: Decode(...) failed, expected: 'error', got: %q", test.name, got)
- continue
- }
- } else {
- if err != nil {
- t.Errorf("%s: Decode failed: %v", test.name, err)
- continue
- }
- if got != test.expected {
- t.Errorf("%s: Decode(...) failed, expected: %q, got: %q", test.name, test.expected, got)
- continue
- }
- }
- }
-}
diff --git a/pkg/bpf/interpreter_test.go b/pkg/bpf/interpreter_test.go
deleted file mode 100644
index f64a2dc50..000000000
--- a/pkg/bpf/interpreter_test.go
+++ /dev/null
@@ -1,799 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bpf
-
-import (
- "encoding/binary"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-func TestCompilationErrors(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // insns is the BPF instructions to be compiled.
- insns []linux.BPFInstruction
-
- // expectedErr is the expected compilation error.
- expectedErr error
- }{
- {
- desc: "Instructions must not be nil",
- expectedErr: Error{InvalidInstructionCount, 0},
- },
- {
- desc: "Instructions must not be empty",
- insns: []linux.BPFInstruction{},
- expectedErr: Error{InvalidInstructionCount, 0},
- },
- {
- desc: "A program must end with a return",
- insns: make([]linux.BPFInstruction, MaxInstructions),
- expectedErr: Error{InvalidEndOfProgram, MaxInstructions - 1},
- },
- {
- desc: "A program must have MaxInstructions or fewer instructions",
- insns: append(make([]linux.BPFInstruction, MaxInstructions), Stmt(Ret|K, 0)),
- expectedErr: Error{InvalidInstructionCount, MaxInstructions + 1},
- },
- {
- desc: "A load from an invalid M register is a compilation error",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Mem|W, ScratchMemRegisters), // A = M[16]
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidRegister, 0},
- },
- {
- desc: "A store to an invalid M register is a compilation error",
- insns: []linux.BPFInstruction{
- Stmt(St, ScratchMemRegisters), // M[16] = A
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidRegister, 0},
- },
- {
- desc: "Division by literal zero is a compilation error",
- insns: []linux.BPFInstruction{
- Stmt(Alu|Div|K, 0), // A /= 0
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{DivisionByZero, 0},
- },
- {
- desc: "An unconditional jump outside of the program is a compilation error",
- insns: []linux.BPFInstruction{
- Jump(Jmp|Ja, 1, 0, 0), // jmp nextpc+1
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidJumpTarget, 0},
- },
- {
- desc: "A conditional jump outside of the program in the true case is a compilation error",
- insns: []linux.BPFInstruction{
- Jump(Jmp|Jeq|K, 0, 1, 0), // if (A == K) jmp nextpc+1
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidJumpTarget, 0},
- },
- {
- desc: "A conditional jump outside of the program in the false case is a compilation error",
- insns: []linux.BPFInstruction{
- Jump(Jmp|Jeq|K, 0, 0, 1), // if (A != K) jmp nextpc+1
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidJumpTarget, 0},
- },
- } {
- _, err := Compile(test.insns)
- if err != test.expectedErr {
- t.Errorf("%s: expected error %q, got error %q", test.desc, test.expectedErr, err)
- }
- }
-}
-
-func TestExecErrors(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // insns is the BPF instructions to be executed.
- insns []linux.BPFInstruction
-
- // expectedErr is the expected execution error.
- expectedErr error
- }{
- {
- desc: "An out-of-bounds load of input data is an execution error",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Abs|B, 0), // A = input[0]
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{InvalidLoad, 0},
- },
- {
- desc: "Division by zero at runtime is an execution error",
- insns: []linux.BPFInstruction{
- Stmt(Alu|Div|X, 0), // A /= X
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{DivisionByZero, 0},
- },
- {
- desc: "Modulo zero at runtime is an execution error",
- insns: []linux.BPFInstruction{
- Stmt(Alu|Mod|X, 0), // A %= X
- Stmt(Ret|K, 0), // return 0
- },
- expectedErr: Error{DivisionByZero, 0},
- },
- } {
- p, err := Compile(test.insns)
- if err != nil {
- t.Errorf("%s: unexpected compilation error: %v", test.desc, err)
- continue
- }
- ret, err := Exec(p, InputBytes{nil, binary.BigEndian})
- if err != test.expectedErr {
- t.Errorf("%s: expected execution error %q, got (%d, %v)", test.desc, test.expectedErr, ret, err)
- }
- }
-}
-
-func TestValidInstructions(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // insns is the BPF instructions to be compiled.
- insns []linux.BPFInstruction
-
- // input is the input data. Note that input will be read as big-endian.
- input []byte
-
- // expectedRet is the expected return value of the BPF program.
- expectedRet uint32
- }{
- {
- desc: "Return of immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ret|K, 42), // return 42
- },
- expectedRet: 42,
- },
- {
- desc: "Load of immediate into A",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 42,
- },
- {
- desc: "Load of immediate into X and copying of X into A",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Imm|W, 42), // X = 42
- Stmt(Misc|Tax, 0), // A = X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 42,
- },
- {
- desc: "Copying of A into X and back",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Stmt(Misc|Txa, 0), // X = A
- Stmt(Ld|Imm|W, 0), // A = 0
- Stmt(Misc|Tax, 0), // A = X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 42,
- },
- {
- desc: "Load of 32-bit input by absolute offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Abs|W, 1), // A = input[1..4]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11, 0x22, 0x33, 0x44},
- expectedRet: 0x11223344,
- },
- {
- desc: "Load of 16-bit input by absolute offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Abs|H, 1), // A = input[1..2]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11, 0x22},
- expectedRet: 0x1122,
- },
- {
- desc: "Load of 8-bit input by absolute offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Abs|B, 1), // A = input[1]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11},
- expectedRet: 0x11,
- },
- {
- desc: "Load of 32-bit input by relative offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Imm|W, 1), // X = 1
- Stmt(Ld|Ind|W, 1), // A = input[X+1..X+4]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55},
- expectedRet: 0x22334455,
- },
- {
- desc: "Load of 16-bit input by relative offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Imm|W, 1), // X = 1
- Stmt(Ld|Ind|H, 1), // A = input[X+1..X+2]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11, 0x22, 0x33},
- expectedRet: 0x2233,
- },
- {
- desc: "Load of 8-bit input by relative offset into A",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Imm|W, 1), // X = 1
- Stmt(Ld|Ind|B, 1), // A = input[X+1]
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0x00, 0x11, 0x22},
- expectedRet: 0x22,
- },
- {
- desc: "Load/store between A and scratch memory",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Stmt(St, 2), // M[2] = A
- Stmt(Ld|Imm|W, 0), // A = 0
- Stmt(Ld|Mem|W, 2), // A = M[2]
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 42,
- },
- {
- desc: "Load/store between X and scratch memory",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Imm|W, 42), // X = 42
- Stmt(Stx, 3), // M[3] = X
- Stmt(Ldx|Imm|W, 0), // X = 0
- Stmt(Ldx|Mem|W, 3), // X = M[3]
- Stmt(Misc|Tax, 0), // A = X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 42,
- },
- {
- desc: "Load of input length into A",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Len|W, 0), // A = len(input)
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{1, 2, 3},
- expectedRet: 3,
- },
- {
- desc: "Load of input length into X",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Len|W, 0), // X = len(input)
- Stmt(Misc|Tax, 0), // A = X
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{1, 2, 3},
- expectedRet: 3,
- },
- {
- desc: "Load of MSH (?) into X",
- insns: []linux.BPFInstruction{
- Stmt(Ldx|Msh|B, 0), // X = 4*(input[0]&0xf)
- Stmt(Misc|Tax, 0), // A = X
- Stmt(Ret|A, 0), // return A
- },
- input: []byte{0xf1},
- expectedRet: 4,
- },
- {
- desc: "Addition of immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Alu|Add|K, 20), // A += 20
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 30,
- },
- {
- desc: "Addition of X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Ldx|Imm|W, 20), // X = 20
- Stmt(Alu|Add|X, 0), // A += X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 30,
- },
- {
- desc: "Subtraction of immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 30), // A = 30
- Stmt(Alu|Sub|K, 20), // A -= 20
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 10,
- },
- {
- desc: "Subtraction of X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 30), // A = 30
- Stmt(Ldx|Imm|W, 20), // X = 20
- Stmt(Alu|Sub|X, 0), // A -= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 10,
- },
- {
- desc: "Multiplication of immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 2), // A = 2
- Stmt(Alu|Mul|K, 3), // A *= 3
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 6,
- },
- {
- desc: "Multiplication of X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 2), // A = 2
- Stmt(Ldx|Imm|W, 3), // X = 3
- Stmt(Alu|Mul|X, 0), // A *= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 6,
- },
- {
- desc: "Division by immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 6), // A = 6
- Stmt(Alu|Div|K, 3), // A /= 3
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 2,
- },
- {
- desc: "Division by X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 6), // A = 6
- Stmt(Ldx|Imm|W, 3), // X = 3
- Stmt(Alu|Div|X, 0), // A /= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 2,
- },
- {
- desc: "Modulo immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 17), // A = 17
- Stmt(Alu|Mod|K, 7), // A %= 7
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 3,
- },
- {
- desc: "Modulo X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 17), // A = 17
- Stmt(Ldx|Imm|W, 7), // X = 7
- Stmt(Alu|Mod|X, 0), // A %= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 3,
- },
- {
- desc: "Arithmetic negation",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 1), // A = 1
- Stmt(Alu|Neg, 0), // A = -A
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0xffffffff,
- },
- {
- desc: "Bitwise OR with immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Alu|Or|K, 0xff0055aa), // A |= 0xff0055aa
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0xff00ffff,
- },
- {
- desc: "Bitwise OR with X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa
- Stmt(Alu|Or|X, 0), // A |= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0xff00ffff,
- },
- {
- desc: "Bitwise AND with immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Alu|And|K, 0xff0055aa), // A &= 0xff0055aa
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0xff000000,
- },
- {
- desc: "Bitwise AND with X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa
- Stmt(Alu|And|X, 0), // A &= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0xff000000,
- },
- {
- desc: "Bitwise XOR with immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Alu|Xor|K, 0xff0055aa), // A ^= 0xff0055aa
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0x0000ffff,
- },
- {
- desc: "Bitwise XOR with X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55
- Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa
- Stmt(Alu|Xor|X, 0), // A ^= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 0x0000ffff,
- },
- {
- desc: "Left shift by immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 1), // A = 1
- Stmt(Alu|Lsh|K, 5), // A <<= 5
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 32,
- },
- {
- desc: "Left shift by X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 1), // A = 1
- Stmt(Ldx|Imm|W, 5), // X = 5
- Stmt(Alu|Lsh|X, 0), // A <<= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 32,
- },
- {
- desc: "Right shift by immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xffffffff), // A = 0xffffffff
- Stmt(Alu|Rsh|K, 31), // A >>= 31
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 1,
- },
- {
- desc: "Right shift by X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xffffffff), // A = 0xffffffff
- Stmt(Ldx|Imm|W, 31), // X = 31
- Stmt(Alu|Rsh|X, 0), // A >>= X
- Stmt(Ret|A, 0), // return A
- },
- expectedRet: 1,
- },
- {
- desc: "Unconditional jump",
- insns: []linux.BPFInstruction{
- Jump(Jmp|Ja, 1, 0, 0), // jmp nextpc+1
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A == immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Jump(Jmp|Jeq|K, 42, 1, 2), // if (A == 42) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A != immediate",
- insns: []linux.BPFInstruction{
- Jump(Jmp|Jeq|K, 42, 1, 2), // if (A == 42) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A == X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Stmt(Ldx|Imm|W, 42), // X = 42
- Jump(Jmp|Jeq|X, 0, 1, 2), // if (A == X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A != X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 42), // A = 42
- Jump(Jmp|Jeq|X, 0, 1, 2), // if (A == X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A > immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Jump(Jmp|Jgt|K, 9, 1, 2), // if (A > 9) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A <= immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Jump(Jmp|Jgt|K, 10, 1, 2), // if (A > 10) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A > X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Ldx|Imm|W, 9), // X = 9
- Jump(Jmp|Jgt|X, 0, 1, 2), // if (A > X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A <= X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Ldx|Imm|W, 10), // X = 10
- Jump(Jmp|Jgt|X, 0, 1, 2), // if (A > X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A >= immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Jump(Jmp|Jge|K, 10, 1, 2), // if (A >= 10) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A < immediate",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Jump(Jmp|Jge|K, 11, 1, 2), // if (A >= 11) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A >= X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Ldx|Imm|W, 10), // X = 10
- Jump(Jmp|Jge|X, 0, 1, 2), // if (A >= X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A < X",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 10), // A = 10
- Stmt(Ldx|Imm|W, 11), // X = 11
- Jump(Jmp|Jge|X, 0, 1, 2), // if (A >= X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A & immediate != 0",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff), // A = 0xff
- Jump(Jmp|Jset|K, 0x101, 1, 2), // if (A & 0x101) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A & immediate == 0",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xfe), // A = 0xfe
- Jump(Jmp|Jset|K, 0x101, 1, 2), // if (A & 0x101) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- {
- desc: "Jump when A & X != 0",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xff), // A = 0xff
- Stmt(Ldx|Imm|W, 0x101), // X = 0x101
- Jump(Jmp|Jset|X, 0, 1, 2), // if (A & X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 1,
- },
- {
- desc: "Jump when A & X == 0",
- insns: []linux.BPFInstruction{
- Stmt(Ld|Imm|W, 0xfe), // A = 0xfe
- Stmt(Ldx|Imm|W, 0x101), // X = 0x101
- Jump(Jmp|Jset|X, 0, 1, 2), // if (A & X) jmp nextpc+1 else jmp nextpc+2
- Stmt(Ret|K, 0), // return 0
- Stmt(Ret|K, 1), // return 1
- Stmt(Ret|K, 2), // return 2
- },
- expectedRet: 2,
- },
- } {
- p, err := Compile(test.insns)
- if err != nil {
- t.Errorf("%s: unexpected compilation error: %v", test.desc, err)
- continue
- }
- ret, err := Exec(p, InputBytes{test.input, binary.BigEndian})
- if err != nil {
- t.Errorf("%s: expected return value of %d, got execution error: %v", test.desc, test.expectedRet, err)
- continue
- }
- if ret != test.expectedRet {
- t.Errorf("%s: expected return value of %d, got value %d", test.desc, test.expectedRet, ret)
- }
- }
-}
-
-func TestSimpleFilter(t *testing.T) {
- // Seccomp filter example given in Linux's
- // Documentation/networking/filter.txt, translated to bytecode using the
- // Linux kernel tree's tools/net/bpf_asm.
- filter := []linux.BPFInstruction{
- {0x20, 0, 0, 0x00000004}, // ld [4] /* offsetof(struct seccomp_data, arch) */
- {0x15, 0, 11, 0xc000003e}, // jne #0xc000003e, bad /* AUDIT_ARCH_X86_64 */
- {0x20, 0, 0, 0000000000}, // ld [0] /* offsetof(struct seccomp_data, nr) */
- {0x15, 10, 0, 0x0000000f}, // jeq #15, good /* __NR_rt_sigreturn */
- {0x15, 9, 0, 0x000000e7}, // jeq #231, good /* __NR_exit_group */
- {0x15, 8, 0, 0x0000003c}, // jeq #60, good /* __NR_exit */
- {0x15, 7, 0, 0000000000}, // jeq #0, good /* __NR_read */
- {0x15, 6, 0, 0x00000001}, // jeq #1, good /* __NR_write */
- {0x15, 5, 0, 0x00000005}, // jeq #5, good /* __NR_fstat */
- {0x15, 4, 0, 0x00000009}, // jeq #9, good /* __NR_mmap */
- {0x15, 3, 0, 0x0000000e}, // jeq #14, good /* __NR_rt_sigprocmask */
- {0x15, 2, 0, 0x0000000d}, // jeq #13, good /* __NR_rt_sigaction */
- {0x15, 1, 0, 0x00000023}, // jeq #35, good /* __NR_nanosleep */
- {0x06, 0, 0, 0000000000}, // bad: ret #0 /* SECCOMP_RET_KILL */
- {0x06, 0, 0, 0x7fff0000}, // good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */
- }
- p, err := Compile(filter)
- if err != nil {
- t.Fatalf("Unexpected compilation error: %v", err)
- }
-
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // SeccompData is the input data.
- data linux.SeccompData
-
- // expectedRet is the expected return value of the BPF program.
- expectedRet uint32
- }{
- {
- desc: "Invalid arch is rejected",
- data: linux.SeccompData{Nr: 1 /* x86 exit */, Arch: 0x40000003 /* AUDIT_ARCH_I386 */},
- expectedRet: 0,
- },
- {
- desc: "Disallowed syscall is rejected",
- data: linux.SeccompData{Nr: 105 /* __NR_setuid */, Arch: 0xc000003e},
- expectedRet: 0,
- },
- {
- desc: "Allowed syscall is indeed allowed",
- data: linux.SeccompData{Nr: 231 /* __NR_exit_group */, Arch: 0xc000003e},
- expectedRet: 0x7fff0000,
- },
- } {
- ret, err := Exec(p, dataAsInput(&test.data))
- if err != nil {
- t.Errorf("%s: expected return value of %d, got execution error: %v", test.desc, test.expectedRet, err)
- continue
- }
- if ret != test.expectedRet {
- t.Errorf("%s: expected return value of %d, got value %d", test.desc, test.expectedRet, ret)
- }
- }
-}
-
-// seccompData is equivalent to struct seccomp_data.
-type seccompData struct {
- nr uint32
- arch uint32
- instructionPointer uint64
- args [6]uint64
-}
-
-// asInput converts a seccompData to a bpf.Input.
-func dataAsInput(data *linux.SeccompData) Input {
- return InputBytes{marshal.Marshal(data), hostarch.ByteOrder}
-}
diff --git a/pkg/bpf/program_builder_test.go b/pkg/bpf/program_builder_test.go
deleted file mode 100644
index 37f684f25..000000000
--- a/pkg/bpf/program_builder_test.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bpf
-
-import (
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-func validate(p *ProgramBuilder, expected []linux.BPFInstruction) error {
- instructions, err := p.Instructions()
- if err != nil {
- return fmt.Errorf("Instructions() failed: %v", err)
- }
- got, err := DecodeInstructions(instructions)
- if err != nil {
- return fmt.Errorf("DecodeInstructions('instructions') failed: %v", err)
- }
- expectedDecoded, err := DecodeInstructions(expected)
- if err != nil {
- return fmt.Errorf("DecodeInstructions('expected') failed: %v", err)
- }
- if got != expectedDecoded {
- return fmt.Errorf("DecodeInstructions() failed, expected: %q, got: %q", expectedDecoded, got)
- }
- return nil
-}
-
-func TestProgramBuilderSimple(t *testing.T) {
- p := NewProgramBuilder()
- p.AddStmt(Ld+Abs+W, 10)
- p.AddJump(Jmp+Ja, 10, 0, 0)
-
- expected := []linux.BPFInstruction{
- Stmt(Ld+Abs+W, 10),
- Jump(Jmp+Ja, 10, 0, 0),
- }
-
- if err := validate(p, expected); err != nil {
- t.Errorf("Validate() failed: %v", err)
- }
-}
-
-func TestProgramBuilderLabels(t *testing.T) {
- p := NewProgramBuilder()
- p.AddJumpTrueLabel(Jmp+Jeq+K, 11, "label_1", 0)
- p.AddJumpFalseLabel(Jmp+Jeq+K, 12, 0, "label_2")
- p.AddJumpLabels(Jmp+Jeq+K, 13, "label_3", "label_4")
- if err := p.AddLabel("label_1"); err != nil {
- t.Errorf("AddLabel(label_1) failed: %v", err)
- }
- p.AddStmt(Ld+Abs+W, 1)
- if err := p.AddLabel("label_3"); err != nil {
- t.Errorf("AddLabel(label_3) failed: %v", err)
- }
- p.AddJumpLabels(Jmp+Jeq+K, 14, "label_4", "label_5")
- if err := p.AddLabel("label_2"); err != nil {
- t.Errorf("AddLabel(label_2) failed: %v", err)
- }
- p.AddJumpLabels(Jmp+Jeq+K, 15, "label_4", "label_6")
- if err := p.AddLabel("label_4"); err != nil {
- t.Errorf("AddLabel(label_4) failed: %v", err)
- }
- p.AddStmt(Ld+Abs+W, 4)
- if err := p.AddLabel("label_5"); err != nil {
- t.Errorf("AddLabel(label_5) failed: %v", err)
- }
- if err := p.AddLabel("label_6"); err != nil {
- t.Errorf("AddLabel(label_6) failed: %v", err)
- }
- p.AddStmt(Ld+Abs+W, 5)
-
- expected := []linux.BPFInstruction{
- Jump(Jmp+Jeq+K, 11, 2, 0),
- Jump(Jmp+Jeq+K, 12, 0, 3),
- Jump(Jmp+Jeq+K, 13, 1, 3),
- Stmt(Ld+Abs+W, 1),
- Jump(Jmp+Jeq+K, 14, 1, 2),
- Jump(Jmp+Jeq+K, 15, 0, 1),
- Stmt(Ld+Abs+W, 4),
- Stmt(Ld+Abs+W, 5),
- }
- if err := validate(p, expected); err != nil {
- t.Errorf("Validate() failed: %v", err)
- }
- // Calling validate()=>p.Instructions() again to make sure
- // Instructions can be called multiple times without ruining
- // the program.
- if err := validate(p, expected); err != nil {
- t.Errorf("Validate() failed: %v", err)
- }
-}
-
-func TestProgramBuilderMissingErrorTarget(t *testing.T) {
- p := NewProgramBuilder()
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0)
- if _, err := p.Instructions(); err == nil {
- t.Errorf("Instructions() should have failed")
- }
-}
-
-func TestProgramBuilderLabelWithNoInstruction(t *testing.T) {
- p := NewProgramBuilder()
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0)
- if err := p.AddLabel("label_1"); err != nil {
- t.Errorf("AddLabel(label_1) failed: %v", err)
- }
- if _, err := p.Instructions(); err == nil {
- t.Errorf("Instructions() should have failed")
- }
-}
-
-// TestProgramBuilderUnusedLabel tests that adding an unused label doesn't
-// cause program generation to fail.
-func TestProgramBuilderUnusedLabel(t *testing.T) {
- p := NewProgramBuilder()
- p.AddStmt(Ld+Abs+W, 10)
- p.AddJump(Jmp+Ja, 10, 0, 0)
-
- expected := []linux.BPFInstruction{
- Stmt(Ld+Abs+W, 10),
- Jump(Jmp+Ja, 10, 0, 0),
- }
-
- if err := p.AddLabel("unused"); err != nil {
- t.Errorf("AddLabel(unused) should have succeeded")
- }
-
- if err := validate(p, expected); err != nil {
- t.Errorf("Validate() failed: %v", err)
- }
-}
-
-// TestProgramBuilderBackwardsReference tests that including a backwards
-// reference to a label in a program causes a failure.
-func TestProgramBuilderBackwardsReference(t *testing.T) {
- p := NewProgramBuilder()
- if err := p.AddLabel("bw_label"); err != nil {
- t.Errorf("failed to add label")
- }
- p.AddStmt(Ld+Abs+W, 10)
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "bw_label", 0)
- if _, err := p.Instructions(); err == nil {
- t.Errorf("Instructions() should have failed")
- }
-}
-
-func TestProgramBuilderLabelAddedTwice(t *testing.T) {
- p := NewProgramBuilder()
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0)
- if err := p.AddLabel("label_1"); err != nil {
- t.Errorf("AddLabel(label_1) failed: %v", err)
- }
- p.AddStmt(Ld+Abs+W, 0)
- if err := p.AddLabel("label_1"); err == nil {
- t.Errorf("AddLabel(label_1) failed: %v", err)
- }
-}
-
-func TestProgramBuilderJumpBackwards(t *testing.T) {
- p := NewProgramBuilder()
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0)
- if err := p.AddLabel("label_1"); err != nil {
- t.Errorf("AddLabel(label_1) failed: %v", err)
- }
- p.AddStmt(Ld+Abs+W, 0)
- p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0)
- if _, err := p.Instructions(); err == nil {
- t.Errorf("Instructions() should have failed")
- }
-}
diff --git a/pkg/buffer/BUILD b/pkg/buffer/BUILD
deleted file mode 100644
index 19cd28a32..000000000
--- a/pkg/buffer/BUILD
+++ /dev/null
@@ -1,46 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "buffer_list",
- out = "buffer_list.go",
- package = "buffer",
- prefix = "buffer",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*buffer",
- "Linker": "*buffer",
- },
-)
-
-go_library(
- name = "buffer",
- srcs = [
- "buffer.go",
- "buffer_list.go",
- "pool.go",
- "view.go",
- "view_unsafe.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/context",
- "//pkg/log",
- ],
-)
-
-go_test(
- name = "buffer_test",
- size = "small",
- srcs = [
- "buffer_test.go",
- "pool_test.go",
- "view_test.go",
- ],
- library = ":buffer",
- deps = [
- "//pkg/state",
- ],
-)
diff --git a/pkg/buffer/buffer_list.go b/pkg/buffer/buffer_list.go
new file mode 100644
index 000000000..6b5bea3fc
--- /dev/null
+++ b/pkg/buffer/buffer_list.go
@@ -0,0 +1,221 @@
+package buffer
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type bufferElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (bufferElementMapper) linkerFor(elem *buffer) *buffer { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type bufferList struct {
+ head *buffer
+ tail *buffer
+}
+
+// Reset resets list l to the empty state.
+func (l *bufferList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *bufferList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *bufferList) Front() *buffer {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *bufferList) Back() *buffer {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *bufferList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (bufferElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *bufferList) PushFront(e *buffer) {
+ linker := bufferElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ bufferElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *bufferList) PushBack(e *buffer) {
+ linker := bufferElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ bufferElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *bufferList) PushBackList(m *bufferList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ bufferElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ bufferElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *bufferList) InsertAfter(b, e *buffer) {
+ bLinker := bufferElementMapper{}.linkerFor(b)
+ eLinker := bufferElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ bufferElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *bufferList) InsertBefore(a, e *buffer) {
+ aLinker := bufferElementMapper{}.linkerFor(a)
+ eLinker := bufferElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ bufferElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *bufferList) Remove(e *buffer) {
+ linker := bufferElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ bufferElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ bufferElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type bufferEntry struct {
+ next *buffer
+ prev *buffer
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *bufferEntry) Next() *buffer {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *bufferEntry) Prev() *buffer {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *bufferEntry) SetNext(elem *buffer) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *bufferEntry) SetPrev(elem *buffer) {
+ e.prev = elem
+}
diff --git a/pkg/buffer/buffer_state_autogen.go b/pkg/buffer/buffer_state_autogen.go
new file mode 100644
index 000000000..aaa72b723
--- /dev/null
+++ b/pkg/buffer/buffer_state_autogen.go
@@ -0,0 +1,163 @@
+// automatically generated by stateify.
+
+package buffer
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (b *buffer) StateTypeName() string {
+ return "pkg/buffer.buffer"
+}
+
+func (b *buffer) StateFields() []string {
+ return []string{
+ "data",
+ "read",
+ "write",
+ "bufferEntry",
+ }
+}
+
+func (b *buffer) beforeSave() {}
+
+// +checklocksignore
+func (b *buffer) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ stateSinkObject.Save(0, &b.data)
+ stateSinkObject.Save(1, &b.read)
+ stateSinkObject.Save(2, &b.write)
+ stateSinkObject.Save(3, &b.bufferEntry)
+}
+
+func (b *buffer) afterLoad() {}
+
+// +checklocksignore
+func (b *buffer) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &b.data)
+ stateSourceObject.Load(1, &b.read)
+ stateSourceObject.Load(2, &b.write)
+ stateSourceObject.Load(3, &b.bufferEntry)
+}
+
+func (l *bufferList) StateTypeName() string {
+ return "pkg/buffer.bufferList"
+}
+
+func (l *bufferList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *bufferList) beforeSave() {}
+
+// +checklocksignore
+func (l *bufferList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *bufferList) afterLoad() {}
+
+// +checklocksignore
+func (l *bufferList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *bufferEntry) StateTypeName() string {
+ return "pkg/buffer.bufferEntry"
+}
+
+func (e *bufferEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *bufferEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *bufferEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *bufferEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *bufferEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (p *pool) StateTypeName() string {
+ return "pkg/buffer.pool"
+}
+
+func (p *pool) StateFields() []string {
+ return []string{
+ "bufferSize",
+ "embeddedStorage",
+ }
+}
+
+func (p *pool) beforeSave() {}
+
+// +checklocksignore
+func (p *pool) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.bufferSize)
+ stateSinkObject.Save(1, &p.embeddedStorage)
+}
+
+// +checklocksignore
+func (p *pool) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.bufferSize)
+ stateSourceObject.LoadWait(1, &p.embeddedStorage)
+ stateSourceObject.AfterLoad(p.afterLoad)
+}
+
+func (v *View) StateTypeName() string {
+ return "pkg/buffer.View"
+}
+
+func (v *View) StateFields() []string {
+ return []string{
+ "data",
+ "size",
+ "pool",
+ }
+}
+
+func (v *View) beforeSave() {}
+
+// +checklocksignore
+func (v *View) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.data)
+ stateSinkObject.Save(1, &v.size)
+ stateSinkObject.Save(2, &v.pool)
+}
+
+func (v *View) afterLoad() {}
+
+// +checklocksignore
+func (v *View) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.data)
+ stateSourceObject.Load(1, &v.size)
+ stateSourceObject.Load(2, &v.pool)
+}
+
+func init() {
+ state.Register((*buffer)(nil))
+ state.Register((*bufferList)(nil))
+ state.Register((*bufferEntry)(nil))
+ state.Register((*pool)(nil))
+ state.Register((*View)(nil))
+}
diff --git a/pkg/buffer/buffer_test.go b/pkg/buffer/buffer_test.go
deleted file mode 100644
index 32db841e4..000000000
--- a/pkg/buffer/buffer_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package buffer
-
-import (
- "bytes"
- "testing"
-)
-
-func TestBufferRemove(t *testing.T) {
- sample := []byte("01234567")
-
- // Success cases
- for _, tc := range []struct {
- desc string
- data []byte
- rng Range
- want []byte
- }{
- {
- desc: "empty slice",
- },
- {
- desc: "empty range",
- data: sample,
- want: sample,
- },
- {
- desc: "empty range with positive begin",
- data: sample,
- rng: Range{begin: 1, end: 1},
- want: sample,
- },
- {
- desc: "range at beginning",
- data: sample,
- rng: Range{begin: 0, end: 1},
- want: sample[1:],
- },
- {
- desc: "range in middle",
- data: sample,
- rng: Range{begin: 2, end: 4},
- want: []byte("014567"),
- },
- {
- desc: "range at end",
- data: sample,
- rng: Range{begin: 7, end: 8},
- want: sample[:7],
- },
- {
- desc: "range all",
- data: sample,
- rng: Range{begin: 0, end: 8},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- var buf buffer
- buf.initWithData(tc.data)
- if ok := buf.Remove(tc.rng); !ok {
- t.Errorf("buf.Remove(%#v) = false, want true", tc.rng)
- } else if got := buf.ReadSlice(); !bytes.Equal(got, tc.want) {
- t.Errorf("buf.ReadSlice() = %q, want %q", got, tc.want)
- }
- })
- }
-
- // Failure cases
- for _, tc := range []struct {
- desc string
- data []byte
- rng Range
- }{
- {
- desc: "begin out-of-range",
- data: sample,
- rng: Range{begin: -1, end: 4},
- },
- {
- desc: "end out-of-range",
- data: sample,
- rng: Range{begin: 4, end: 9},
- },
- {
- desc: "both out-of-range",
- data: sample,
- rng: Range{begin: -100, end: 100},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- var buf buffer
- buf.initWithData(tc.data)
- if ok := buf.Remove(tc.rng); ok {
- t.Errorf("buf.Remove(%#v) = true, want false", tc.rng)
- }
- })
- }
-}
diff --git a/pkg/buffer/buffer_unsafe_state_autogen.go b/pkg/buffer/buffer_unsafe_state_autogen.go
new file mode 100644
index 000000000..5a5c40722
--- /dev/null
+++ b/pkg/buffer/buffer_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package buffer
diff --git a/pkg/buffer/pool_test.go b/pkg/buffer/pool_test.go
deleted file mode 100644
index 8584bac89..000000000
--- a/pkg/buffer/pool_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package buffer
-
-import (
- "testing"
-)
-
-func TestGetDefaultBufferSize(t *testing.T) {
- var p pool
- for i := 0; i < embeddedCount*2; i++ {
- buf := p.get()
- if got, want := len(buf.data), defaultBufferSize; got != want {
- t.Errorf("#%d len(buf.data) = %d, want %d", i, got, want)
- }
- }
-}
-
-func TestGetCustomBufferSize(t *testing.T) {
- const size = 100
-
- var p pool
- p.setBufferSize(size)
- for i := 0; i < embeddedCount*2; i++ {
- buf := p.get()
- if got, want := len(buf.data), size; got != want {
- t.Errorf("#%d len(buf.data) = %d, want %d", i, got, want)
- }
- }
-}
-
-func TestPut(t *testing.T) {
- var p pool
- buf := p.get()
- p.put(buf)
- if buf.data != nil {
- t.Errorf("buf.data = %x, want nil", buf.data)
- }
-}
diff --git a/pkg/buffer/view_test.go b/pkg/buffer/view_test.go
deleted file mode 100644
index 796efa240..000000000
--- a/pkg/buffer/view_test.go
+++ /dev/null
@@ -1,900 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package buffer
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "reflect"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/state"
-)
-
-const bufferSize = defaultBufferSize
-
-func fillAppend(v *View, data []byte) {
- v.Append(data)
-}
-
-func fillAppendEnd(v *View, data []byte) {
- v.Grow(bufferSize-1, false)
- v.Append(data)
- v.TrimFront(bufferSize - 1)
-}
-
-func fillWriteFromReader(v *View, data []byte) {
- b := bytes.NewBuffer(data)
- v.WriteFromReader(b, int64(len(data)))
-}
-
-func fillWriteFromReaderEnd(v *View, data []byte) {
- v.Grow(bufferSize-1, false)
- b := bytes.NewBuffer(data)
- v.WriteFromReader(b, int64(len(data)))
- v.TrimFront(bufferSize - 1)
-}
-
-var fillFuncs = map[string]func(*View, []byte){
- "append": fillAppend,
- "appendEnd": fillAppendEnd,
- "writeFromReader": fillWriteFromReader,
- "writeFromReaderEnd": fillWriteFromReaderEnd,
-}
-
-func BenchmarkReadAt(b *testing.B) {
- b.ReportAllocs()
- var v View
- v.Append(make([]byte, 100))
-
- buf := make([]byte, 10)
- for i := 0; i < b.N; i++ {
- v.ReadAt(buf, 0)
- }
-}
-
-func BenchmarkWriteRead(b *testing.B) {
- b.ReportAllocs()
- var v View
- sz := 1000
- wbuf := make([]byte, sz)
- rbuf := bytes.NewBuffer(make([]byte, sz))
- for i := 0; i < b.N; i++ {
- v.Append(wbuf)
- rbuf.Reset()
- v.ReadToWriter(rbuf, int64(sz))
- }
-}
-
-func testReadAt(t *testing.T, v *View, offset int64, n int, wantStr string, wantErr error) {
- t.Helper()
- d := make([]byte, n)
- n, err := v.ReadAt(d, offset)
- if n != len(wantStr) {
- t.Errorf("got %d, want %d", n, len(wantStr))
- }
- if err != wantErr {
- t.Errorf("got err %v, want %v", err, wantErr)
- }
- if !bytes.Equal(d[:n], []byte(wantStr)) {
- t.Errorf("got %q, want %q", string(d[:n]), wantStr)
- }
-}
-
-func TestView(t *testing.T) {
- testCases := []struct {
- name string
- input string
- output string
- op func(*testing.T, *View)
- }{
- // Preconditions.
- {
- name: "truncate-check",
- input: "hello",
- output: "hello", // Not touched.
- op: func(t *testing.T, v *View) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Truncate(-1) did not panic")
- }
- }()
- v.Truncate(-1)
- },
- },
- {
- name: "grow-check",
- input: "hello",
- output: "hello", // Not touched.
- op: func(t *testing.T, v *View) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Grow(-1) did not panic")
- }
- }()
- v.Grow(-1, false)
- },
- },
- {
- name: "advance-check",
- input: "hello",
- output: "", // Consumed.
- op: func(t *testing.T, v *View) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("advanceRead(Size()+1) did not panic")
- }
- }()
- v.advanceRead(v.Size() + 1)
- },
- },
-
- // Prepend.
- {
- name: "prepend",
- input: "world",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- v.Prepend([]byte("hello "))
- },
- },
- {
- name: "prepend-backfill-full",
- input: "hello world",
- output: "jello world",
- op: func(t *testing.T, v *View) {
- v.TrimFront(1)
- v.Prepend([]byte("j"))
- },
- },
- {
- name: "prepend-backfill-under",
- input: "hello world",
- output: "hola world",
- op: func(t *testing.T, v *View) {
- v.TrimFront(5)
- v.Prepend([]byte("hola"))
- },
- },
- {
- name: "prepend-backfill-over",
- input: "hello world",
- output: "smello world",
- op: func(t *testing.T, v *View) {
- v.TrimFront(1)
- v.Prepend([]byte("sm"))
- },
- },
- {
- name: "prepend-fill",
- input: strings.Repeat("1", bufferSize-1),
- output: "0" + strings.Repeat("1", bufferSize-1),
- op: func(t *testing.T, v *View) {
- v.Prepend([]byte("0"))
- },
- },
- {
- name: "prepend-overflow",
- input: strings.Repeat("1", bufferSize),
- output: "0" + strings.Repeat("1", bufferSize),
- op: func(t *testing.T, v *View) {
- v.Prepend([]byte("0"))
- },
- },
- {
- name: "prepend-multiple-buffers",
- input: strings.Repeat("1", bufferSize-1),
- output: strings.Repeat("0", bufferSize*3) + strings.Repeat("1", bufferSize-1),
- op: func(t *testing.T, v *View) {
- v.Prepend([]byte(strings.Repeat("0", bufferSize*3)))
- },
- },
-
- // Append and write.
- {
- name: "append",
- input: "hello",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- v.Append([]byte(" world"))
- },
- },
- {
- name: "append-fill",
- input: strings.Repeat("1", bufferSize-1),
- output: strings.Repeat("1", bufferSize-1) + "0",
- op: func(t *testing.T, v *View) {
- v.Append([]byte("0"))
- },
- },
- {
- name: "append-overflow",
- input: strings.Repeat("1", bufferSize),
- output: strings.Repeat("1", bufferSize) + "0",
- op: func(t *testing.T, v *View) {
- v.Append([]byte("0"))
- },
- },
- {
- name: "append-multiple-buffers",
- input: strings.Repeat("1", bufferSize-1),
- output: strings.Repeat("1", bufferSize-1) + strings.Repeat("0", bufferSize*3),
- op: func(t *testing.T, v *View) {
- v.Append([]byte(strings.Repeat("0", bufferSize*3)))
- },
- },
-
- // AppendOwned.
- {
- name: "append-owned",
- input: "hello",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- b := []byte("Xworld")
- v.AppendOwned(b)
- b[0] = ' '
- },
- },
-
- // Truncate.
- {
- name: "truncate",
- input: "hello world",
- output: "hello",
- op: func(t *testing.T, v *View) {
- v.Truncate(5)
- },
- },
- {
- name: "truncate-noop",
- input: "hello world",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- v.Truncate(v.Size() + 1)
- },
- },
- {
- name: "truncate-multiple-buffers",
- input: strings.Repeat("1", bufferSize*2),
- output: strings.Repeat("1", bufferSize*2-1),
- op: func(t *testing.T, v *View) {
- v.Truncate(bufferSize*2 - 1)
- },
- },
- {
- name: "truncate-multiple-buffers-to-one",
- input: strings.Repeat("1", bufferSize*2),
- output: "11111",
- op: func(t *testing.T, v *View) {
- v.Truncate(5)
- },
- },
-
- // TrimFront.
- {
- name: "trim",
- input: "hello world",
- output: "world",
- op: func(t *testing.T, v *View) {
- v.TrimFront(6)
- },
- },
- {
- name: "trim-too-large",
- input: "hello world",
- output: "",
- op: func(t *testing.T, v *View) {
- v.TrimFront(v.Size() + 1)
- },
- },
- {
- name: "trim-multiple-buffers",
- input: strings.Repeat("1", bufferSize*2),
- output: strings.Repeat("1", bufferSize*2-1),
- op: func(t *testing.T, v *View) {
- v.TrimFront(1)
- },
- },
- {
- name: "trim-multiple-buffers-to-one-buffer",
- input: strings.Repeat("1", bufferSize*2),
- output: "1",
- op: func(t *testing.T, v *View) {
- v.TrimFront(bufferSize*2 - 1)
- },
- },
-
- // Grow.
- {
- name: "grow",
- input: "hello world",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- v.Grow(1, true)
- },
- },
- {
- name: "grow-from-zero",
- output: strings.Repeat("\x00", 1024),
- op: func(t *testing.T, v *View) {
- v.Grow(1024, true)
- },
- },
- {
- name: "grow-from-non-zero",
- input: strings.Repeat("1", bufferSize),
- output: strings.Repeat("1", bufferSize) + strings.Repeat("\x00", bufferSize),
- op: func(t *testing.T, v *View) {
- v.Grow(bufferSize*2, true)
- },
- },
-
- // Copy.
- {
- name: "copy",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) {
- other := v.Copy()
- bs := other.Flatten()
- want := []byte("hello")
- if !bytes.Equal(bs, want) {
- t.Errorf("expected %v, got %v", want, bs)
- }
- },
- },
- {
- name: "copy-large",
- input: strings.Repeat("1", bufferSize+1),
- output: strings.Repeat("1", bufferSize+1),
- op: func(t *testing.T, v *View) {
- other := v.Copy()
- bs := other.Flatten()
- want := []byte(strings.Repeat("1", bufferSize+1))
- if !bytes.Equal(bs, want) {
- t.Errorf("expected %v, got %v", want, bs)
- }
- },
- },
-
- // Merge.
- {
- name: "merge",
- input: "hello",
- output: "hello world",
- op: func(t *testing.T, v *View) {
- var other View
- other.Append([]byte(" world"))
- v.Merge(&other)
- if sz := other.Size(); sz != 0 {
- t.Errorf("expected 0, got %d", sz)
- }
- },
- },
- {
- name: "merge-large",
- input: strings.Repeat("1", bufferSize+1),
- output: strings.Repeat("1", bufferSize+1) + strings.Repeat("0", bufferSize+1),
- op: func(t *testing.T, v *View) {
- var other View
- other.Append([]byte(strings.Repeat("0", bufferSize+1)))
- v.Merge(&other)
- if sz := other.Size(); sz != 0 {
- t.Errorf("expected 0, got %d", sz)
- }
- },
- },
-
- // ReadAt.
- {
- name: "readat",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 0, 6, "hello", io.EOF) },
- },
- {
- name: "readat-long",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 0, 8, "hello", io.EOF) },
- },
- {
- name: "readat-short",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 0, 3, "hel", nil) },
- },
- {
- name: "readat-offset",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 2, 3, "llo", io.EOF) },
- },
- {
- name: "readat-long-offset",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 2, 8, "llo", io.EOF) },
- },
- {
- name: "readat-short-offset",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, 2, 2, "ll", nil) },
- },
- {
- name: "readat-skip-all",
- input: "hello",
- output: "hello",
- op: func(t *testing.T, v *View) { testReadAt(t, v, bufferSize+1, 1, "", io.EOF) },
- },
- {
- name: "readat-second-buffer",
- input: strings.Repeat("0", bufferSize+1) + "12",
- output: strings.Repeat("0", bufferSize+1) + "12",
- op: func(t *testing.T, v *View) { testReadAt(t, v, bufferSize+1, 1, "1", nil) },
- },
- {
- name: "readat-second-buffer-end",
- input: strings.Repeat("0", bufferSize+1) + "12",
- output: strings.Repeat("0", bufferSize+1) + "12",
- op: func(t *testing.T, v *View) { testReadAt(t, v, bufferSize+1, 2, "12", io.EOF) },
- },
- }
-
- for _, tc := range testCases {
- for fillName, fn := range fillFuncs {
- t.Run(fillName+"/"+tc.name, func(t *testing.T) {
- // Construct & fill the view.
- var view View
- fn(&view, []byte(tc.input))
-
- // Run the operation.
- if tc.op != nil {
- tc.op(t, &view)
- }
-
- // Flatten and validate.
- out := view.Flatten()
- if !bytes.Equal([]byte(tc.output), out) {
- t.Errorf("expected %q, got %q", tc.output, string(out))
- }
-
- // Ensure the size is correct.
- if len(out) != int(view.Size()) {
- t.Errorf("size is wrong: expected %d, got %d", len(out), view.Size())
- }
-
- // Calculate contents via apply.
- var appliedOut []byte
- view.Apply(func(b []byte) {
- appliedOut = append(appliedOut, b...)
- })
- if len(appliedOut) != len(out) {
- t.Errorf("expected %d, got %d", len(out), len(appliedOut))
- }
- if !bytes.Equal(appliedOut, out) {
- t.Errorf("expected %v, got %v", out, appliedOut)
- }
-
- // Calculate contents via ReadToWriter.
- var b bytes.Buffer
- n, err := view.ReadToWriter(&b, int64(len(out)))
- if n != int64(len(out)) {
- t.Errorf("expected %d, got %d", len(out), n)
- }
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if !bytes.Equal(b.Bytes(), out) {
- t.Errorf("expected %v, got %v", out, b.Bytes())
- }
- })
- }
- }
-}
-
-func TestViewPullUp(t *testing.T) {
- for _, tc := range []struct {
- desc string
- inputs []string
- offset int
- length int
- output string
- failed bool
- // lengths is the lengths of each buffer node after the pull up.
- lengths []int
- }{
- {
- desc: "whole empty view",
- },
- {
- desc: "zero pull",
- inputs: []string{"hello", " world"},
- lengths: []int{5, 6},
- },
- {
- desc: "whole view",
- inputs: []string{"hello", " world"},
- offset: 0,
- length: 11,
- output: "hello world",
- lengths: []int{11},
- },
- {
- desc: "middle to end aligned",
- inputs: []string{"0123", "45678", "9abcd"},
- offset: 4,
- length: 10,
- output: "456789abcd",
- lengths: []int{4, 10},
- },
- {
- desc: "middle to end unaligned",
- inputs: []string{"0123", "45678", "9abcd"},
- offset: 6,
- length: 8,
- output: "6789abcd",
- lengths: []int{4, 10},
- },
- {
- desc: "middle aligned",
- inputs: []string{"0123", "45678", "9abcd", "efgh"},
- offset: 6,
- length: 5,
- output: "6789a",
- lengths: []int{4, 10, 4},
- },
-
- // Failed cases.
- {
- desc: "empty view - length too long",
- offset: 0,
- length: 1,
- failed: true,
- },
- {
- desc: "empty view - offset too large",
- offset: 1,
- length: 1,
- failed: true,
- },
- {
- desc: "length too long",
- inputs: []string{"0123", "45678", "9abcd"},
- offset: 4,
- length: 100,
- failed: true,
- lengths: []int{4, 5, 5},
- },
- {
- desc: "offset too large",
- inputs: []string{"0123", "45678", "9abcd"},
- offset: 100,
- length: 1,
- failed: true,
- lengths: []int{4, 5, 5},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- var v View
- for _, s := range tc.inputs {
- v.AppendOwned([]byte(s))
- }
-
- got, gotOk := v.PullUp(tc.offset, tc.length)
- want, wantOk := []byte(tc.output), !tc.failed
- if gotOk != wantOk || !bytes.Equal(got, want) {
- t.Errorf("v.PullUp(%d, %d) = %q, %t; %q, %t", tc.offset, tc.length, got, gotOk, want, wantOk)
- }
-
- var gotLengths []int
- for buf := v.data.Front(); buf != nil; buf = buf.Next() {
- gotLengths = append(gotLengths, buf.ReadSize())
- }
- if !reflect.DeepEqual(gotLengths, tc.lengths) {
- t.Errorf("lengths = %v; want %v", gotLengths, tc.lengths)
- }
- })
- }
-}
-
-func TestViewRemove(t *testing.T) {
- // Success cases
- for _, tc := range []struct {
- desc string
- // before is the contents for each buffer node initially.
- before []string
- // after is the contents for each buffer node after removal.
- after []string
- offset int
- length int
- }{
- {
- desc: "empty view",
- },
- {
- desc: "nothing removed",
- before: []string{"hello", " world"},
- after: []string{"hello", " world"},
- },
- {
- desc: "whole view",
- before: []string{"hello", " world"},
- offset: 0,
- length: 11,
- },
- {
- desc: "beginning to middle aligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"9abcd"},
- offset: 0,
- length: 9,
- },
- {
- desc: "beginning to middle unaligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"678", "9abcd"},
- offset: 0,
- length: 6,
- },
- {
- desc: "middle to end aligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"0123"},
- offset: 4,
- length: 10,
- },
- {
- desc: "middle to end unaligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"0123", "45"},
- offset: 6,
- length: 8,
- },
- {
- desc: "middle aligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"0123", "9abcd"},
- offset: 4,
- length: 5,
- },
- {
- desc: "middle unaligned",
- before: []string{"0123", "45678", "9abcd"},
- after: []string{"0123", "4578", "9abcd"},
- offset: 6,
- length: 1,
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- var v View
- for _, s := range tc.before {
- v.AppendOwned([]byte(s))
- }
-
- if ok := v.Remove(tc.offset, tc.length); !ok {
- t.Errorf("v.Remove(%d, %d) = false, want true", tc.offset, tc.length)
- }
-
- var got []string
- for buf := v.data.Front(); buf != nil; buf = buf.Next() {
- got = append(got, string(buf.ReadSlice()))
- }
- if !reflect.DeepEqual(got, tc.after) {
- t.Errorf("after = %v; want %v", got, tc.after)
- }
- })
- }
-
- // Failure cases
- for _, tc := range []struct {
- desc string
- // before is the contents for each buffer node initially.
- before []string
- offset int
- length int
- }{
- {
- desc: "offset out-of-range",
- before: []string{"hello", " world"},
- offset: -1,
- length: 3,
- },
- {
- desc: "length too long",
- before: []string{"hello", " world"},
- offset: 0,
- length: 12,
- },
- {
- desc: "length too long with positive offset",
- before: []string{"hello", " world"},
- offset: 3,
- length: 9,
- },
- {
- desc: "length negative",
- before: []string{"hello", " world"},
- offset: 0,
- length: -1,
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- var v View
- for _, s := range tc.before {
- v.AppendOwned([]byte(s))
- }
- if ok := v.Remove(tc.offset, tc.length); ok {
- t.Errorf("v.Remove(%d, %d) = true, want false", tc.offset, tc.length)
- }
- })
- }
-}
-
-func TestViewSubApply(t *testing.T) {
- var v View
- v.AppendOwned([]byte("0123"))
- v.AppendOwned([]byte("45678"))
- v.AppendOwned([]byte("9abcd"))
-
- data := []byte("0123456789abcd")
-
- for i := 0; i <= len(data); i++ {
- for j := i; j <= len(data); j++ {
- t.Run(fmt.Sprintf("SubApply(%d,%d)", i, j), func(t *testing.T) {
- var got []byte
- v.SubApply(i, j-i, func(b []byte) {
- got = append(got, b...)
- })
- if want := data[i:j]; !bytes.Equal(got, want) {
- t.Errorf("got = %q; want %q", got, want)
- }
- })
- }
- }
-}
-
-func doSaveAndLoad(t *testing.T, toSave, toLoad *View) {
- t.Helper()
- var buf bytes.Buffer
- ctx := context.Background()
- if _, err := state.Save(ctx, &buf, toSave); err != nil {
- t.Fatal("state.Save:", err)
- }
- if _, err := state.Load(ctx, bytes.NewReader(buf.Bytes()), toLoad); err != nil {
- t.Fatal("state.Load:", err)
- }
-}
-
-func TestSaveRestoreViewEmpty(t *testing.T) {
- var toSave View
- var v View
- doSaveAndLoad(t, &toSave, &v)
-
- if got := v.pool.avail; got != nil {
- t.Errorf("pool is not in zero state: v.pool.avail = %v, want nil", got)
- }
- if got := v.Flatten(); len(got) != 0 {
- t.Errorf("v.Flatten() = %x, want []", got)
- }
-}
-
-func TestSaveRestoreView(t *testing.T) {
- // Create data that fits 2.5 slots.
- data := bytes.Join([][]byte{
- bytes.Repeat([]byte{1, 2}, defaultBufferSize),
- bytes.Repeat([]byte{3}, defaultBufferSize/2),
- }, nil)
-
- var toSave View
- toSave.Append(data)
-
- var v View
- doSaveAndLoad(t, &toSave, &v)
-
- // Next available slot at index 3; 0-2 slot are used.
- i := 3
- if got, want := &v.pool.avail[0], &v.pool.embeddedStorage[i]; got != want {
- t.Errorf("next available buffer points to %p, want %p (&v.pool.embeddedStorage[%d])", got, want, i)
- }
- if got := v.Flatten(); !bytes.Equal(got, data) {
- t.Errorf("v.Flatten() = %x, want %x", got, data)
- }
-}
-
-func TestRangeIntersect(t *testing.T) {
- for _, tc := range []struct {
- desc string
- x, y, want Range
- }{
- {
- desc: "empty intersects empty",
- },
- {
- desc: "empty intersection",
- x: Range{end: 10},
- y: Range{begin: 10, end: 20},
- },
- {
- desc: "some intersection",
- x: Range{begin: 5, end: 20},
- y: Range{end: 10},
- want: Range{begin: 5, end: 10},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- if got := tc.x.Intersect(tc.y); got != tc.want {
- t.Errorf("(%#v).Intersect(%#v) = %#v; want %#v", tc.x, tc.y, got, tc.want)
- }
- if got := tc.y.Intersect(tc.x); got != tc.want {
- t.Errorf("(%#v).Intersect(%#v) = %#v; want %#v", tc.y, tc.x, got, tc.want)
- }
- })
- }
-}
-
-func TestRangeOffset(t *testing.T) {
- for _, tc := range []struct {
- input Range
- offset int
- output Range
- }{
- {
- input: Range{},
- offset: 0,
- output: Range{},
- },
- {
- input: Range{},
- offset: -1,
- output: Range{begin: -1, end: -1},
- },
- {
- input: Range{begin: 10, end: 20},
- offset: -1,
- output: Range{begin: 9, end: 19},
- },
- {
- input: Range{begin: 10, end: 20},
- offset: 2,
- output: Range{begin: 12, end: 22},
- },
- } {
- if got := tc.input.Offset(tc.offset); got != tc.output {
- t.Errorf("(%#v).Offset(%d) = %#v, want %#v", tc.input, tc.offset, got, tc.output)
- }
- }
-}
-
-func TestRangeLen(t *testing.T) {
- for _, tc := range []struct {
- r Range
- want int
- }{
- {r: Range{}, want: 0},
- {r: Range{begin: 1, end: 1}, want: 0},
- {r: Range{begin: -1, end: -1}, want: 0},
- {r: Range{end: 10}, want: 10},
- {r: Range{begin: 5, end: 10}, want: 5},
- } {
- if got := tc.r.Len(); got != tc.want {
- t.Errorf("(%#v).Len() = %d, want %d", tc.r, got, tc.want)
- }
- }
-}
diff --git a/pkg/cleanup/BUILD b/pkg/cleanup/BUILD
deleted file mode 100644
index 5c34b9872..000000000
--- a/pkg/cleanup/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cleanup",
- srcs = ["cleanup.go"],
- visibility = ["//:sandbox"],
- deps = [
- ],
-)
-
-go_test(
- name = "cleanup_test",
- srcs = ["cleanup_test.go"],
- library = ":cleanup",
-)
diff --git a/pkg/cleanup/cleanup_state_autogen.go b/pkg/cleanup/cleanup_state_autogen.go
new file mode 100644
index 000000000..8373268fa
--- /dev/null
+++ b/pkg/cleanup/cleanup_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cleanup
diff --git a/pkg/cleanup/cleanup_test.go b/pkg/cleanup/cleanup_test.go
deleted file mode 100644
index ab3d9ed95..000000000
--- a/pkg/cleanup/cleanup_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cleanup
-
-import "testing"
-
-func testCleanupHelper(clean, cleanAdd *bool, release bool) func() {
- cu := Make(func() {
- *clean = true
- })
- cu.Add(func() {
- *cleanAdd = true
- })
- defer cu.Clean()
- if release {
- return cu.Release()
- }
- return nil
-}
-
-func TestCleanup(t *testing.T) {
- clean := false
- cleanAdd := false
- testCleanupHelper(&clean, &cleanAdd, false)
- if !clean {
- t.Fatalf("cleanup function was not called.")
- }
- if !cleanAdd {
- t.Fatalf("added cleanup function was not called.")
- }
-}
-
-func TestRelease(t *testing.T) {
- clean := false
- cleanAdd := false
- cleaner := testCleanupHelper(&clean, &cleanAdd, true)
-
- // Check that clean was not called after release.
- if clean {
- t.Fatalf("cleanup function was called.")
- }
- if cleanAdd {
- t.Fatalf("added cleanup function was called.")
- }
-
- // Call the cleaner function and check that both cleanup functions are called.
- cleaner()
- if !clean {
- t.Fatalf("cleanup function was not called.")
- }
- if !cleanAdd {
- t.Fatalf("added cleanup function was not called.")
- }
-}
diff --git a/pkg/compressio/BUILD b/pkg/compressio/BUILD
deleted file mode 100644
index 70018cf18..000000000
--- a/pkg/compressio/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "compressio",
- srcs = ["compressio.go"],
- visibility = ["//:sandbox"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "compressio_test",
- size = "medium",
- srcs = ["compressio_test.go"],
- library = ":compressio",
-)
diff --git a/pkg/compressio/compressio_state_autogen.go b/pkg/compressio/compressio_state_autogen.go
new file mode 100644
index 000000000..c47e0dd17
--- /dev/null
+++ b/pkg/compressio/compressio_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package compressio
diff --git a/pkg/compressio/compressio_test.go b/pkg/compressio/compressio_test.go
deleted file mode 100644
index 86dc47e44..000000000
--- a/pkg/compressio/compressio_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compressio
-
-import (
- "bytes"
- "compress/flate"
- "encoding/base64"
- "fmt"
- "io"
- "math/rand"
- "runtime"
- "testing"
- "time"
-)
-
-type harness interface {
- Errorf(format string, v ...interface{})
- Fatalf(format string, v ...interface{})
- Logf(format string, v ...interface{})
-}
-
-func initTest(t harness, size int) []byte {
- // Set number of processes to number of CPUs.
- runtime.GOMAXPROCS(runtime.NumCPU())
-
- // Construct synthetic data. We do this by encoding random data with
- // base64. This gives a high level of entropy, but still quite a bit of
- // structure, to give reasonable compression ratios (~75%).
- var buf bytes.Buffer
- bufW := base64.NewEncoder(base64.RawStdEncoding, &buf)
- bufR := rand.New(rand.NewSource(0))
- if _, err := io.CopyN(bufW, bufR, int64(size)); err != nil {
- t.Fatalf("unable to seed random data: %v", err)
- }
- return buf.Bytes()
-}
-
-type testOpts struct {
- Name string
- Data []byte
- NewWriter func(*bytes.Buffer) (io.Writer, error)
- NewReader func(*bytes.Buffer) (io.Reader, error)
- PreCompress func()
- PostCompress func()
- PreDecompress func()
- PostDecompress func()
- CompressIters int
- DecompressIters int
- CorruptData bool
-}
-
-func doTest(t harness, opts testOpts) {
- // Compress.
- var compressed bytes.Buffer
- compressionStartTime := time.Now()
- if opts.PreCompress != nil {
- opts.PreCompress()
- }
- if opts.CompressIters <= 0 {
- opts.CompressIters = 1
- }
- for i := 0; i < opts.CompressIters; i++ {
- compressed.Reset()
- w, err := opts.NewWriter(&compressed)
- if err != nil {
- t.Errorf("%s: NewWriter got err %v, expected nil", opts.Name, err)
- }
- if _, err := io.Copy(w, bytes.NewBuffer(opts.Data)); err != nil {
- t.Errorf("%s: compress got err %v, expected nil", opts.Name, err)
- return
- }
- closer, ok := w.(io.Closer)
- if ok {
- if err := closer.Close(); err != nil {
- t.Errorf("%s: got err %v, expected nil", opts.Name, err)
- return
- }
- }
- }
- if opts.PostCompress != nil {
- opts.PostCompress()
- }
- compressionTime := time.Since(compressionStartTime)
- compressionRatio := float32(compressed.Len()) / float32(len(opts.Data))
-
- // Decompress.
- var decompressed bytes.Buffer
- decompressionStartTime := time.Now()
- if opts.PreDecompress != nil {
- opts.PreDecompress()
- }
- if opts.DecompressIters <= 0 {
- opts.DecompressIters = 1
- }
- if opts.CorruptData {
- b := compressed.Bytes()
- b[rand.Intn(len(b))]++
- }
- for i := 0; i < opts.DecompressIters; i++ {
- decompressed.Reset()
- r, err := opts.NewReader(bytes.NewBuffer(compressed.Bytes()))
- if err != nil {
- if opts.CorruptData {
- continue
- }
- t.Errorf("%s: NewReader got err %v, expected nil", opts.Name, err)
- return
- }
- if _, err := io.Copy(&decompressed, r); (err != nil) != opts.CorruptData {
- t.Errorf("%s: decompress got err %v unexpectly", opts.Name, err)
- return
- }
- }
- if opts.PostDecompress != nil {
- opts.PostDecompress()
- }
- decompressionTime := time.Since(decompressionStartTime)
-
- if opts.CorruptData {
- return
- }
-
- // Verify.
- if decompressed.Len() != len(opts.Data) {
- t.Errorf("%s: got %d bytes, expected %d", opts.Name, decompressed.Len(), len(opts.Data))
- }
- if !bytes.Equal(opts.Data, decompressed.Bytes()) {
- t.Errorf("%s: got mismatch, expected match", opts.Name)
- if len(opts.Data) < 32 { // Don't flood the logs.
- t.Errorf("got %v, expected %v", decompressed.Bytes(), opts.Data)
- }
- }
-
- t.Logf("%s: compression time %v, ratio %2.2f, decompression time %v",
- opts.Name, compressionTime, compressionRatio, decompressionTime)
-}
-
-var hashKey = []byte("01234567890123456789012345678901")
-
-func TestCompress(t *testing.T) {
- rand.Seed(time.Now().Unix())
-
- var (
- data = initTest(t, 10*1024*1024)
- data0 = data[:0]
- data1 = data[:1]
- data2 = data[:11]
- data3 = data[:16]
- data4 = data[:]
- )
-
- for _, data := range [][]byte{data0, data1, data2, data3, data4} {
- for _, blockSize := range []uint32{1, 4, 1024, 4 * 1024, 16 * 1024} {
- // Skip annoying tests; they just take too long.
- if blockSize <= 16 && len(data) > 16 {
- continue
- }
-
- for _, key := range [][]byte{nil, hashKey} {
- for _, corruptData := range []bool{false, true} {
- if key == nil && corruptData {
- // No need to test corrupt data
- // case when not doing hashing.
- continue
- }
- // Do the compress test.
- doTest(t, testOpts{
- Name: fmt.Sprintf("len(data)=%d, blockSize=%d, key=%s, corruptData=%v", len(data), blockSize, string(key), corruptData),
- Data: data,
- NewWriter: func(b *bytes.Buffer) (io.Writer, error) {
- return NewWriter(b, key, blockSize, flate.BestSpeed)
- },
- NewReader: func(b *bytes.Buffer) (io.Reader, error) {
- return NewReader(b, key)
- },
- CorruptData: corruptData,
- })
- }
- }
- }
-
- // Do the vanilla test.
- doTest(t, testOpts{
- Name: fmt.Sprintf("len(data)=%d, vanilla flate", len(data)),
- Data: data,
- NewWriter: func(b *bytes.Buffer) (io.Writer, error) {
- return flate.NewWriter(b, flate.BestSpeed)
- },
- NewReader: func(b *bytes.Buffer) (io.Reader, error) {
- return flate.NewReader(b), nil
- },
- })
- }
-}
-
-const (
- benchDataSize = 600 * 1024 * 1024
-)
-
-func benchmark(b *testing.B, compress bool, hash bool, blockSize uint32) {
- b.StopTimer()
- b.SetBytes(benchDataSize)
- data := initTest(b, benchDataSize)
- compIters := b.N
- decompIters := b.N
- if compress {
- decompIters = 0
- } else {
- compIters = 0
- }
- key := hashKey
- if !hash {
- key = nil
- }
- doTest(b, testOpts{
- Name: fmt.Sprintf("compress=%t, hash=%t, len(data)=%d, blockSize=%d", compress, hash, len(data), blockSize),
- Data: data,
- PreCompress: b.StartTimer,
- PostCompress: b.StopTimer,
- NewWriter: func(b *bytes.Buffer) (io.Writer, error) {
- return NewWriter(b, key, blockSize, flate.BestSpeed)
- },
- NewReader: func(b *bytes.Buffer) (io.Reader, error) {
- return NewReader(b, key)
- },
- CompressIters: compIters,
- DecompressIters: decompIters,
- })
-}
-
-func BenchmarkCompressNoHash64K(b *testing.B) {
- benchmark(b, true, false, 64*1024)
-}
-
-func BenchmarkCompressHash64K(b *testing.B) {
- benchmark(b, true, true, 64*1024)
-}
-
-func BenchmarkDecompressNoHash64K(b *testing.B) {
- benchmark(b, false, false, 64*1024)
-}
-
-func BenchmarkDecompressHash64K(b *testing.B) {
- benchmark(b, false, true, 64*1024)
-}
-
-func BenchmarkCompressNoHash1M(b *testing.B) {
- benchmark(b, true, false, 1024*1024)
-}
-
-func BenchmarkCompressHash1M(b *testing.B) {
- benchmark(b, true, true, 1024*1024)
-}
-
-func BenchmarkDecompressNoHash1M(b *testing.B) {
- benchmark(b, false, false, 1024*1024)
-}
-
-func BenchmarkDecompressHash1M(b *testing.B) {
- benchmark(b, false, true, 1024*1024)
-}
-
-func BenchmarkCompressNoHash16M(b *testing.B) {
- benchmark(b, true, false, 16*1024*1024)
-}
-
-func BenchmarkCompressHash16M(b *testing.B) {
- benchmark(b, true, true, 16*1024*1024)
-}
-
-func BenchmarkDecompressNoHash16M(b *testing.B) {
- benchmark(b, false, false, 16*1024*1024)
-}
-
-func BenchmarkDecompressHash16M(b *testing.B) {
- benchmark(b, false, true, 16*1024*1024)
-}
diff --git a/pkg/context/BUILD b/pkg/context/BUILD
deleted file mode 100644
index f33e23bf7..000000000
--- a/pkg/context/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "context",
- srcs = ["context.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- ],
-)
diff --git a/pkg/context/context_state_autogen.go b/pkg/context/context_state_autogen.go
new file mode 100644
index 000000000..fdc3c9fbb
--- /dev/null
+++ b/pkg/context/context_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package context
diff --git a/pkg/control/client/BUILD b/pkg/control/client/BUILD
deleted file mode 100644
index 1b9e10ee7..000000000
--- a/pkg/control/client/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "client",
- srcs = [
- "client.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/unet",
- "//pkg/urpc",
- ],
-)
diff --git a/pkg/control/client/client_state_autogen.go b/pkg/control/client/client_state_autogen.go
new file mode 100644
index 000000000..9872f1107
--- /dev/null
+++ b/pkg/control/client/client_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package client
diff --git a/pkg/control/server/BUILD b/pkg/control/server/BUILD
deleted file mode 100644
index 002d2ef44..000000000
--- a/pkg/control/server/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "server",
- srcs = ["server.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "//pkg/sync",
- "//pkg/unet",
- "//pkg/urpc",
- ],
-)
diff --git a/pkg/control/server/server_state_autogen.go b/pkg/control/server/server_state_autogen.go
new file mode 100644
index 000000000..c236b8da5
--- /dev/null
+++ b/pkg/control/server/server_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package server
diff --git a/pkg/coverage/BUILD b/pkg/coverage/BUILD
deleted file mode 100644
index ace5895f8..000000000
--- a/pkg/coverage/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "coverage",
- srcs = ["coverage.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/hostarch",
- "//pkg/sync",
- "@io_bazel_rules_go//go/tools/coverdata",
- ],
-)
diff --git a/pkg/coverage/coverage_state_autogen.go b/pkg/coverage/coverage_state_autogen.go
new file mode 100644
index 000000000..669eb89b2
--- /dev/null
+++ b/pkg/coverage/coverage_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package coverage
diff --git a/pkg/cpuid/BUILD b/pkg/cpuid/BUILD
deleted file mode 100644
index c0c864902..000000000
--- a/pkg/cpuid/BUILD
+++ /dev/null
@@ -1,36 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cpuid",
- srcs = [
- "cpu_amd64.s",
- "cpuid.go",
- "cpuid_arm64.go",
- "cpuid_x86.go",
- ],
- visibility = ["//:sandbox"],
- deps = ["//pkg/log"],
-)
-
-go_test(
- name = "cpuid_test",
- size = "small",
- srcs = [
- "cpuid_arm64_test.go",
- "cpuid_x86_test.go",
- ],
- library = ":cpuid",
-)
-
-go_test(
- name = "cpuid_parse_test",
- size = "small",
- srcs = [
- "cpuid_parse_x86_test.go",
- ],
- library = ":cpuid",
- tags = ["manual"],
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/cpuid/cpuid_arm64_state_autogen.go b/pkg/cpuid/cpuid_arm64_state_autogen.go
new file mode 100644
index 000000000..50e317963
--- /dev/null
+++ b/pkg/cpuid/cpuid_arm64_state_autogen.go
@@ -0,0 +1,53 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package cpuid
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fs *FeatureSet) StateTypeName() string {
+ return "pkg/cpuid.FeatureSet"
+}
+
+func (fs *FeatureSet) StateFields() []string {
+ return []string{
+ "Set",
+ "CPUImplementer",
+ "CPUArchitecture",
+ "CPUVariant",
+ "CPUPartnum",
+ "CPURevision",
+ }
+}
+
+func (fs *FeatureSet) beforeSave() {}
+
+// +checklocksignore
+func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Set)
+ stateSinkObject.Save(1, &fs.CPUImplementer)
+ stateSinkObject.Save(2, &fs.CPUArchitecture)
+ stateSinkObject.Save(3, &fs.CPUVariant)
+ stateSinkObject.Save(4, &fs.CPUPartnum)
+ stateSinkObject.Save(5, &fs.CPURevision)
+}
+
+func (fs *FeatureSet) afterLoad() {}
+
+// +checklocksignore
+func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Set)
+ stateSourceObject.Load(1, &fs.CPUImplementer)
+ stateSourceObject.Load(2, &fs.CPUArchitecture)
+ stateSourceObject.Load(3, &fs.CPUVariant)
+ stateSourceObject.Load(4, &fs.CPUPartnum)
+ stateSourceObject.Load(5, &fs.CPURevision)
+}
+
+func init() {
+ state.Register((*FeatureSet)(nil))
+}
diff --git a/pkg/cpuid/cpuid_arm64_test.go b/pkg/cpuid/cpuid_arm64_test.go
deleted file mode 100644
index 16b1c064a..000000000
--- a/pkg/cpuid/cpuid_arm64_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package cpuid
-
-import (
- "testing"
-)
-
-var justFP = &FeatureSet{
- Set: map[Feature]bool{
- ARM64FeatureFP: true,
- }}
-
-func TestHostFeatureSet(t *testing.T) {
- hostFeatures := HostFeatureSet()
- if len(hostFeatures.Set) == 0 {
- t.Errorf("Got invalid feature set %v from HostFeatureSet()", hostFeatures)
- }
-}
-
-func TestHasFeature(t *testing.T) {
- if !justFP.HasFeature(ARM64FeatureFP) {
- t.Errorf("HasFeature failed, %v should contain %v", justFP, ARM64FeatureFP)
- }
-
- if justFP.HasFeature(ARM64FeatureSM3) {
- t.Errorf("HasFeature failed, %v should not contain %v", justFP, ARM64FeatureSM3)
- }
-}
-
-func TestFeatureFromString(t *testing.T) {
- f, ok := FeatureFromString("asimd")
- if f != ARM64FeatureASIMD || !ok {
- t.Errorf("got %v want asimd", f)
- }
-
- f, ok = FeatureFromString("bad")
- if ok {
- t.Errorf("got %v want nothing", f)
- }
-}
diff --git a/pkg/cpuid/cpuid_parse_x86_test.go b/pkg/cpuid/cpuid_parse_x86_test.go
deleted file mode 100644
index 36dd20552..000000000
--- a/pkg/cpuid/cpuid_parse_x86_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build 386 || amd64
-// +build 386 amd64
-
-package cpuid
-
-import (
- "fmt"
- "io/ioutil"
- "regexp"
- "strconv"
- "strings"
- "testing"
-
- "golang.org/x/sys/unix"
-)
-
-func kernelVersion() (int, int, error) {
- var u unix.Utsname
- if err := unix.Uname(&u); err != nil {
- return 0, 0, err
- }
-
- var sb strings.Builder
- for _, b := range u.Release {
- if b == 0 {
- break
- }
- sb.WriteByte(byte(b))
- }
-
- s := strings.Split(sb.String(), ".")
- if len(s) < 2 {
- return 0, 0, fmt.Errorf("kernel release missing major and minor component: %s", sb.String())
- }
-
- major, err := strconv.Atoi(s[0])
- if err != nil {
- return 0, 0, fmt.Errorf("error parsing major version %q in %q: %w", s[0], sb.String(), err)
- }
-
- minor, err := strconv.Atoi(s[1])
- if err != nil {
- return 0, 0, fmt.Errorf("error parsing minor version %q in %q: %w", s[1], sb.String(), err)
- }
-
- return major, minor, nil
-}
-
-// TestHostFeatureFlags tests that all features detected by HostFeatureSet are
-// on the host.
-//
-// It does *not* verify that all features reported by the host are detected by
-// HostFeatureSet.
-//
-// i.e., test that HostFeatureSet is a subset of the host features.
-func TestHostFeatureFlags(t *testing.T) {
- cpuinfoBytes, _ := ioutil.ReadFile("/proc/cpuinfo")
- cpuinfo := string(cpuinfoBytes)
- t.Logf("Host cpu info:\n%s", cpuinfo)
-
- major, minor, err := kernelVersion()
- if err != nil {
- t.Fatalf("Unable to parse kernel version: %v", err)
- }
-
- re := regexp.MustCompile(`(?m)^flags\s+: (.*)$`)
- m := re.FindStringSubmatch(cpuinfo)
- if len(m) != 2 {
- t.Fatalf("Unable to extract flags from %q", cpuinfo)
- }
-
- cpuinfoFlags := make(map[string]struct{})
- for _, f := range strings.Split(m[1], " ") {
- cpuinfoFlags[f] = struct{}{}
- }
-
- fs := HostFeatureSet()
-
- // All features have a string and appear in host cpuinfo.
- for f := range fs.Set {
- name := f.flagString(false)
- if name == "" {
- t.Errorf("Non-parsable feature: %v", f)
- }
-
- // Special cases not consistently visible. We don't mind if
- // they are exposed in earlier versions.
- switch {
- // Block 0.
- case f == X86FeatureSDBG && (major < 4 || major == 4 && minor < 3):
- // SDBG only exposed in
- // b1c599b8ff80ea79b9f8277a3f9f36a7b0cfedce (4.3).
- continue
- // Block 2.
- case f == X86FeatureRDT && (major < 4 || major == 4 && minor < 10):
- // RDT only exposed in
- // 4ab1586488cb56ed8728e54c4157cc38646874d9 (4.10).
- continue
- // Block 3.
- case f == X86FeatureAVX512VBMI && (major < 4 || major == 4 && minor < 10):
- // AVX512VBMI only exposed in
- // a8d9df5a509a232a959e4ef2e281f7ecd77810d6 (4.10).
- continue
- case f == X86FeatureUMIP && (major < 4 || major == 4 && minor < 15):
- // UMIP only exposed in
- // 3522c2a6a4f341058b8291326a945e2a2d2aaf55 (4.15).
- continue
- case f == X86FeaturePKU && (major < 4 || major == 4 && minor < 9):
- // PKU only exposed in
- // dfb4a70f20c5b3880da56ee4c9484bdb4e8f1e65 (4.9).
- continue
- // Block 4.
- case f == X86FeatureXSAVES && (major < 4 || major == 4 && minor < 8):
- // XSAVES only exposed in
- // b8be15d588060a03569ac85dc4a0247460988f5b (4.8).
- continue
- // Block 5.
- case f == X86FeaturePERFCTR_LLC && (major < 4 || major == 4 && minor < 14):
- // PERFCTR_LLC renamed in
- // 910448bbed066ab1082b510eef1ae61bb792d854 (4.14).
- continue
- }
-
- hidden := f.flagString(true) == ""
- _, ok := cpuinfoFlags[name]
- if hidden && ok {
- t.Errorf("Unexpectedly hidden flag: %v", f)
- } else if !hidden && !ok {
- t.Errorf("Non-native flag: %v", f)
- }
- }
-}
diff --git a/pkg/cpuid/cpuid_state_autogen.go b/pkg/cpuid/cpuid_state_autogen.go
new file mode 100644
index 000000000..86206a6bf
--- /dev/null
+++ b/pkg/cpuid/cpuid_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cpuid
diff --git a/pkg/cpuid/cpuid_x86_state_autogen.go b/pkg/cpuid/cpuid_x86_state_autogen.go
new file mode 100644
index 000000000..9962a29ae
--- /dev/null
+++ b/pkg/cpuid/cpuid_x86_state_autogen.go
@@ -0,0 +1,115 @@
+// automatically generated by stateify.
+
+// +build 386 amd64
+
+package cpuid
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *Cache) StateTypeName() string {
+ return "pkg/cpuid.Cache"
+}
+
+func (c *Cache) StateFields() []string {
+ return []string{
+ "Level",
+ "Type",
+ "FullyAssociative",
+ "Partitions",
+ "Ways",
+ "Sets",
+ "InvalidateHierarchical",
+ "Inclusive",
+ "DirectMapped",
+ }
+}
+
+func (c *Cache) beforeSave() {}
+
+// +checklocksignore
+func (c *Cache) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.Level)
+ stateSinkObject.Save(1, &c.Type)
+ stateSinkObject.Save(2, &c.FullyAssociative)
+ stateSinkObject.Save(3, &c.Partitions)
+ stateSinkObject.Save(4, &c.Ways)
+ stateSinkObject.Save(5, &c.Sets)
+ stateSinkObject.Save(6, &c.InvalidateHierarchical)
+ stateSinkObject.Save(7, &c.Inclusive)
+ stateSinkObject.Save(8, &c.DirectMapped)
+}
+
+func (c *Cache) afterLoad() {}
+
+// +checklocksignore
+func (c *Cache) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.Level)
+ stateSourceObject.Load(1, &c.Type)
+ stateSourceObject.Load(2, &c.FullyAssociative)
+ stateSourceObject.Load(3, &c.Partitions)
+ stateSourceObject.Load(4, &c.Ways)
+ stateSourceObject.Load(5, &c.Sets)
+ stateSourceObject.Load(6, &c.InvalidateHierarchical)
+ stateSourceObject.Load(7, &c.Inclusive)
+ stateSourceObject.Load(8, &c.DirectMapped)
+}
+
+func (fs *FeatureSet) StateTypeName() string {
+ return "pkg/cpuid.FeatureSet"
+}
+
+func (fs *FeatureSet) StateFields() []string {
+ return []string{
+ "Set",
+ "VendorID",
+ "ExtendedFamily",
+ "ExtendedModel",
+ "ProcessorType",
+ "Family",
+ "Model",
+ "SteppingID",
+ "Caches",
+ "CacheLine",
+ }
+}
+
+func (fs *FeatureSet) beforeSave() {}
+
+// +checklocksignore
+func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Set)
+ stateSinkObject.Save(1, &fs.VendorID)
+ stateSinkObject.Save(2, &fs.ExtendedFamily)
+ stateSinkObject.Save(3, &fs.ExtendedModel)
+ stateSinkObject.Save(4, &fs.ProcessorType)
+ stateSinkObject.Save(5, &fs.Family)
+ stateSinkObject.Save(6, &fs.Model)
+ stateSinkObject.Save(7, &fs.SteppingID)
+ stateSinkObject.Save(8, &fs.Caches)
+ stateSinkObject.Save(9, &fs.CacheLine)
+}
+
+func (fs *FeatureSet) afterLoad() {}
+
+// +checklocksignore
+func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Set)
+ stateSourceObject.Load(1, &fs.VendorID)
+ stateSourceObject.Load(2, &fs.ExtendedFamily)
+ stateSourceObject.Load(3, &fs.ExtendedModel)
+ stateSourceObject.Load(4, &fs.ProcessorType)
+ stateSourceObject.Load(5, &fs.Family)
+ stateSourceObject.Load(6, &fs.Model)
+ stateSourceObject.Load(7, &fs.SteppingID)
+ stateSourceObject.Load(8, &fs.Caches)
+ stateSourceObject.Load(9, &fs.CacheLine)
+}
+
+func init() {
+ state.Register((*Cache)(nil))
+ state.Register((*FeatureSet)(nil))
+}
diff --git a/pkg/cpuid/cpuid_x86_test.go b/pkg/cpuid/cpuid_x86_test.go
deleted file mode 100644
index 92a2d9f81..000000000
--- a/pkg/cpuid/cpuid_x86_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build 386 || amd64
-// +build 386 amd64
-
-package cpuid
-
-import (
- "testing"
-)
-
-// These are the default values of various FeatureSet fields.
-const (
- defaultVendorID = "GenuineIntel"
-
- // These processor signature defaults are derived from the values
- // listed in Intel Application Note 485 for i7/Xeon processors.
- defaultExtFamily uint8 = 0
- defaultExtModel uint8 = 1
- defaultType uint8 = 0
- defaultFamily uint8 = 0x06
- defaultModel uint8 = 0x0a
- defaultSteppingID uint8 = 0
-)
-
-// newEmptyFeatureSet creates a new FeatureSet with a sensible default model and no features.
-func newEmptyFeatureSet() *FeatureSet {
- return &FeatureSet{
- Set: make(map[Feature]bool),
- VendorID: defaultVendorID,
- ExtendedFamily: defaultExtFamily,
- ExtendedModel: defaultExtModel,
- ProcessorType: defaultType,
- Family: defaultFamily,
- Model: defaultModel,
- SteppingID: defaultSteppingID,
- }
-}
-
-var justFPU = &FeatureSet{
- Set: map[Feature]bool{
- X86FeatureFPU: true,
- }}
-
-var justFPUandPAE = &FeatureSet{
- Set: map[Feature]bool{
- X86FeatureFPU: true,
- X86FeaturePAE: true,
- }}
-
-func TestSubtract(t *testing.T) {
- if diff := justFPU.Subtract(justFPUandPAE); diff != nil {
- t.Errorf("Got %v is not subset of %v, want diff (%v) to be nil", justFPU, justFPUandPAE, diff)
- }
-
- if justFPUandPAE.Subtract(justFPU) == nil {
- t.Errorf("Got %v is a subset of %v, want diff to be nil", justFPU, justFPUandPAE)
- }
-}
-
-// TODO(b/73346484): Run this test on a very old platform, and make sure more
-// bits are enabled than just FPU and PAE. This test currently may not detect
-// if HostFeatureSet gives back junk bits.
-func TestHostFeatureSet(t *testing.T) {
- hostFeatures := HostFeatureSet()
- if justFPUandPAE.Subtract(hostFeatures) != nil {
- t.Errorf("Got invalid feature set %v from HostFeatureSet()", hostFeatures)
- }
-}
-
-func TestHasFeature(t *testing.T) {
- if !justFPU.HasFeature(X86FeatureFPU) {
- t.Errorf("HasFeature failed, %v should contain %v", justFPU, X86FeatureFPU)
- }
-
- if justFPU.HasFeature(X86FeatureAVX) {
- t.Errorf("HasFeature failed, %v should not contain %v", justFPU, X86FeatureAVX)
- }
-}
-
-// Note: these tests are aware of and abuse internal details of FeatureSets.
-// Users of FeatureSets should not depend on this.
-func TestAdd(t *testing.T) {
- // Test a basic insertion into the FeatureSet.
- testFeatures := newEmptyFeatureSet()
- testFeatures.Add(X86FeatureCLFSH)
- if len(testFeatures.Set) != 1 {
- t.Errorf("Got length %v want 1", len(testFeatures.Set))
- }
-
- if !testFeatures.HasFeature(X86FeatureCLFSH) {
- t.Errorf("Add failed, got %v want set with %v", testFeatures, X86FeatureCLFSH)
- }
-
- // Test that duplicates are ignored.
- testFeatures.Add(X86FeatureCLFSH)
- if len(testFeatures.Set) != 1 {
- t.Errorf("Got length %v, want 1", len(testFeatures.Set))
- }
-}
-
-func TestRemove(t *testing.T) {
- // Try removing the last feature.
- testFeatures := newEmptyFeatureSet()
- testFeatures.Add(X86FeatureFPU)
- testFeatures.Add(X86FeaturePAE)
- testFeatures.Remove(X86FeaturePAE)
- if !testFeatures.HasFeature(X86FeatureFPU) || len(testFeatures.Set) != 1 || testFeatures.HasFeature(X86FeaturePAE) {
- t.Errorf("Remove failed, got %v want %v", testFeatures, justFPU)
- }
-
- // Try removing a feature not in the set.
- testFeatures.Remove(X86FeatureRDRAND)
- if !testFeatures.HasFeature(X86FeatureFPU) || len(testFeatures.Set) != 1 {
- t.Errorf("Remove failed, got %v want %v", testFeatures, justFPU)
- }
-}
-
-func TestFeatureFromString(t *testing.T) {
- f, ok := FeatureFromString("avx")
- if f != X86FeatureAVX || !ok {
- t.Errorf("got %v want avx", f)
- }
-
- f, ok = FeatureFromString("bad")
- if ok {
- t.Errorf("got %v want nothing", f)
- }
-}
-
-// This tests function 0 (eax=0), which returns the vendor ID and highest cpuid
-// function reported to be available.
-func TestEmulateIDVendorAndLength(t *testing.T) {
- testFeatures := newEmptyFeatureSet()
-
- ax, bx, cx, dx := testFeatures.EmulateID(0, 0)
- wantEax := uint32(0xd) // Highest supported cpuid function.
-
- // These magical constants are the characters of "GenuineIntel".
- // See Intel AN485 for a reference on why they are laid out like this.
- wantEbx := uint32(0x756e6547)
- wantEcx := uint32(0x6c65746e)
- wantEdx := uint32(0x49656e69)
- if wantEax != ax {
- t.Errorf("highest function failed, got %x want %x", ax, wantEax)
- }
-
- if wantEbx != bx || wantEcx != cx || wantEdx != dx {
- t.Errorf("vendor string emulation failed, bx:cx:dx, got %x:%x:%x want %x:%x:%x", bx, cx, dx, wantEbx, wantEcx, wantEdx)
- }
-}
-
-func TestEmulateIDBasicFeatures(t *testing.T) {
- // Make a minimal test feature set.
- testFeatures := newEmptyFeatureSet()
- testFeatures.Add(X86FeatureCLFSH)
- testFeatures.Add(X86FeatureAVX)
- testFeatures.CacheLine = 64
-
- ax, bx, cx, dx := testFeatures.EmulateID(1, 0)
- ECXAVXBit := uint32(1 << uint(X86FeatureAVX))
- EDXCLFlushBit := uint32(1 << uint(X86FeatureCLFSH-32)) // We adjust by 32 since it's in block 1.
-
- if EDXCLFlushBit&dx == 0 || dx&^EDXCLFlushBit != 0 {
- t.Errorf("EmulateID failed, got feature bits %x want %x", dx, testFeatures.blockMask(1))
- }
-
- if ECXAVXBit&cx == 0 || cx&^ECXAVXBit != 0 {
- t.Errorf("EmulateID failed, got feature bits %x want %x", cx, testFeatures.blockMask(0))
- }
-
- // Default signature bits, based on values for i7/Xeon.
- // See Intel AN485 for information on stepping/model bits.
- defaultSignature := uint32(0x000106a0)
- if defaultSignature != ax {
- t.Errorf("EmulateID stepping emulation failed, got %x want %x", ax, defaultSignature)
- }
-
- clflushSizeInfo := uint32(8 << 8)
- if clflushSizeInfo != bx {
- t.Errorf("EmulateID bx emulation failed, got %x want %x", bx, clflushSizeInfo)
- }
-}
-
-func TestEmulateIDExtendedFeatures(t *testing.T) {
- // Make a minimal test feature set, one bit in each extended feature word.
- testFeatures := newEmptyFeatureSet()
- testFeatures.Add(X86FeatureSMEP)
- testFeatures.Add(X86FeatureAVX512VBMI)
-
- ax, bx, cx, dx := testFeatures.EmulateID(7, 0)
- EBXSMEPBit := uint32(1 << uint(X86FeatureSMEP-2*32)) // Adjust by 2*32 since SMEP is a block 2 feature.
- ECXAVXBit := uint32(1 << uint(X86FeatureAVX512VBMI-3*32)) // We adjust by 3*32 since it's a block 3 feature.
-
- // Test that the desired bit is set and no other bits are set.
- if EBXSMEPBit&bx == 0 || bx&^EBXSMEPBit != 0 {
- t.Errorf("extended feature emulation failed, got feature bits %x want %x", bx, testFeatures.blockMask(2))
- }
-
- if ECXAVXBit&cx == 0 || cx&^ECXAVXBit != 0 {
- t.Errorf("extended feature emulation failed, got feature bits %x want %x", cx, testFeatures.blockMask(3))
- }
-
- if ax != 0 || dx != 0 {
- t.Errorf("extended feature emulation failed, ax:dx, got %x:%x want 0:0", ax, dx)
- }
-
- // Check that no subleaves other than 0 do anything.
- ax, bx, cx, dx = testFeatures.EmulateID(7, 1)
- if ax != 0 || bx != 0 || cx != 0 || dx != 0 {
- t.Errorf("extended feature emulation failed, got %x:%x:%x:%x want 0:0", ax, bx, cx, dx)
- }
-
-}
-
-// Checks that the expected extended features are available via cpuid functions
-// 0x80000000 and up.
-func TestEmulateIDExtended(t *testing.T) {
- testFeatures := newEmptyFeatureSet()
- testFeatures.Add(X86FeatureSYSCALL)
- EDXSYSCALLBit := uint32(1 << uint(X86FeatureSYSCALL-6*32)) // Adjust by 6*32 since SYSCALL is a block 6 feature.
-
- ax, bx, cx, dx := testFeatures.EmulateID(0x80000000, 0)
- if ax != 0x80000001 || bx != 0 || cx != 0 || dx != 0 {
- t.Errorf("EmulateID extended emulation failed, ax:bx:cx:dx, got %x:%x:%x:%x want 0x80000001:0:0:0", ax, bx, cx, dx)
- }
-
- _, _, _, dx = testFeatures.EmulateID(0x80000001, 0)
- if EDXSYSCALLBit&dx == 0 || dx&^EDXSYSCALLBit != 0 {
- t.Errorf("extended feature emulation failed, got feature bits %x want %x", dx, testFeatures.blockMask(6))
- }
-}
diff --git a/pkg/crypto/BUILD b/pkg/crypto/BUILD
deleted file mode 100644
index 08fa772ca..000000000
--- a/pkg/crypto/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "crypto",
- srcs = [
- "crypto.go",
- "crypto_stdlib.go",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go
deleted file mode 100644
index b26b55d37..000000000
--- a/pkg/crypto/crypto.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package crypto wraps crypto primitives.
-package crypto
diff --git a/pkg/crypto/crypto_stdlib.go b/pkg/crypto/crypto_stdlib.go
deleted file mode 100644
index 69e867386..000000000
--- a/pkg/crypto/crypto_stdlib.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package crypto
-
-import (
- "crypto/ecdsa"
- "crypto/sha512"
- "math/big"
-)
-
-// EcdsaVerify verifies the signature in r, s of hash using ECDSA and the
-// public key, pub. Its return value records whether the signature is valid.
-func EcdsaVerify(pub *ecdsa.PublicKey, hash []byte, r, s *big.Int) (bool, error) {
- return ecdsa.Verify(pub, hash, r, s), nil
-}
-
-// SumSha384 returns the SHA384 checksum of the data.
-func SumSha384(data []byte) ([sha512.Size384]byte, error) {
- return sha512.Sum384(data), nil
-}
diff --git a/pkg/errors/BUILD b/pkg/errors/BUILD
deleted file mode 100644
index 36ea5da0c..000000000
--- a/pkg/errors/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "errors",
- srcs = ["errors.go"],
- visibility = ["//:sandbox"],
- deps = ["//pkg/abi/linux/errno"],
-)
diff --git a/pkg/errors/errors_state_autogen.go b/pkg/errors/errors_state_autogen.go
new file mode 100644
index 000000000..fd4538438
--- /dev/null
+++ b/pkg/errors/errors_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package errors
diff --git a/pkg/errors/linuxerr/BUILD b/pkg/errors/linuxerr/BUILD
deleted file mode 100644
index 201727780..000000000
--- a/pkg/errors/linuxerr/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "linuxerr",
- srcs = ["linuxerr.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/abi/linux/errno",
- "//pkg/errors",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "linuxerr_test",
- srcs = ["linuxerr_test.go"],
- deps = [
- ":linuxerr",
- "//pkg/abi/linux/errno",
- "//pkg/errors",
- "//pkg/syserror",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/errors/linuxerr/linuxerr_state_autogen.go b/pkg/errors/linuxerr/linuxerr_state_autogen.go
new file mode 100644
index 000000000..8106a9d9f
--- /dev/null
+++ b/pkg/errors/linuxerr/linuxerr_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package linuxerr
diff --git a/pkg/errors/linuxerr/linuxerr_test.go b/pkg/errors/linuxerr/linuxerr_test.go
deleted file mode 100644
index f09d61b02..000000000
--- a/pkg/errors/linuxerr/linuxerr_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syserror_test
-
-import (
- "errors"
- "io"
- "io/fs"
- "syscall"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux/errno"
- gErrors "gvisor.dev/gvisor/pkg/errors"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-var globalError error
-
-func BenchmarkAssignUnix(b *testing.B) {
- for i := b.N; i > 0; i-- {
- globalError = unix.EINVAL
- }
-}
-
-func BenchmarkAssignLinuxerr(b *testing.B) {
- for i := b.N; i > 0; i-- {
- globalError = linuxerr.EINVAL
- }
-}
-
-func BenchmarkAssignSyserror(b *testing.B) {
- for i := b.N; i > 0; i-- {
- globalError = linuxerr.ENOMSG
- }
-}
-
-func BenchmarkCompareUnix(b *testing.B) {
- globalError = unix.EAGAIN
- j := 0
- for i := b.N; i > 0; i-- {
- if globalError == unix.EINVAL {
- j++
- }
- }
-}
-
-func BenchmarkCompareLinuxerr(b *testing.B) {
- globalError = linuxerr.E2BIG
- j := 0
- for i := b.N; i > 0; i-- {
- if globalError == linuxerr.EINVAL {
- j++
- }
- }
-}
-
-func BenchmarkCompareSyserror(b *testing.B) {
- globalError = linuxerr.EAGAIN
- j := 0
- for i := b.N; i > 0; i-- {
- if globalError == linuxerr.EACCES {
- j++
- }
- }
-}
-
-func BenchmarkSwitchUnix(b *testing.B) {
- globalError = unix.EPERM
- j := 0
- for i := b.N; i > 0; i-- {
- switch globalError {
- case unix.EINVAL:
- j++
- case unix.EINTR:
- j += 2
- case unix.EAGAIN:
- j += 3
- }
- }
-}
-
-func BenchmarkSwitchLinuxerr(b *testing.B) {
- globalError = linuxerr.EPERM
- j := 0
- for i := b.N; i > 0; i-- {
- switch globalError {
- case linuxerr.EINVAL:
- j++
- case linuxerr.EINTR:
- j += 2
- case linuxerr.EAGAIN:
- j += 3
- }
- }
-}
-
-func BenchmarkSwitchSyserror(b *testing.B) {
- globalError = linuxerr.EPERM
- j := 0
- for i := b.N; i > 0; i-- {
- switch globalError {
- case linuxerr.EACCES:
- j++
- case syserror.EINTR:
- j += 2
- case linuxerr.EAGAIN:
- j += 3
- }
- }
-}
-
-func BenchmarkReturnUnix(b *testing.B) {
- var localError error
- f := func() error {
- return unix.EINVAL
- }
- for i := b.N; i > 0; i-- {
- localError = f()
- }
- if localError != nil {
- return
- }
-}
-
-func BenchmarkReturnLinuxerr(b *testing.B) {
- var localError error
- f := func() error {
- return linuxerr.EINVAL
- }
- for i := b.N; i > 0; i-- {
- localError = f()
- }
- if localError != nil {
- return
- }
-}
-
-func BenchmarkConvertUnixLinuxerr(b *testing.B) {
- var localError error
- for i := b.N; i > 0; i-- {
- localError = linuxerr.ErrorFromErrno(errno.Errno(unix.EINVAL))
- }
- if localError != nil {
- return
- }
-}
-
-func BenchmarkConvertUnixLinuxerrZero(b *testing.B) {
- var localError error
- for i := b.N; i > 0; i-- {
- localError = linuxerr.ErrorFromErrno(errno.Errno(0))
- }
- if localError != nil {
- return
- }
-}
-
-type translationTestTable struct {
- fn string
- errIn error
- syscallErrorIn unix.Errno
- expectedBool bool
- expectedTranslation unix.Errno
-}
-
-func TestErrorTranslation(t *testing.T) {
- myError := errors.New("My test error")
- myError2 := errors.New("Another test error")
- testTable := []translationTestTable{
- {"TranslateError", myError, 0, false, 0},
- {"TranslateError", myError2, 0, false, 0},
- {"AddErrorTranslation", myError, unix.EAGAIN, true, 0},
- {"AddErrorTranslation", myError, unix.EAGAIN, false, 0},
- {"AddErrorTranslation", myError, unix.EPERM, false, 0},
- {"TranslateError", myError, 0, true, unix.EAGAIN},
- {"TranslateError", myError2, 0, false, 0},
- {"AddErrorTranslation", myError2, unix.EPERM, true, 0},
- {"AddErrorTranslation", myError2, unix.EPERM, false, 0},
- {"AddErrorTranslation", myError2, unix.EAGAIN, false, 0},
- {"TranslateError", myError, 0, true, unix.EAGAIN},
- {"TranslateError", myError2, 0, true, unix.EPERM},
- }
- for _, tt := range testTable {
- switch tt.fn {
- case "TranslateError":
- err, ok := syserror.TranslateError(tt.errIn)
- if ok != tt.expectedBool {
- t.Fatalf("%v(%v) => %v expected %v", tt.fn, tt.errIn, ok, tt.expectedBool)
- } else if err != tt.expectedTranslation {
- t.Fatalf("%v(%v) (error) => %v expected %v", tt.fn, tt.errIn, err, tt.expectedTranslation)
- }
- case "AddErrorTranslation":
- ok := syserror.AddErrorTranslation(tt.errIn, tt.syscallErrorIn)
- if ok != tt.expectedBool {
- t.Fatalf("%v(%v) => %v expected %v", tt.fn, tt.errIn, ok, tt.expectedBool)
- }
- default:
- t.Fatalf("Unknown function %v", tt.fn)
- }
- }
-}
-
-func TestSyscallErrnoToErrors(t *testing.T) {
- for _, tc := range []struct {
- errno syscall.Errno
- err *gErrors.Error
- }{
- {errno: syscall.EACCES, err: linuxerr.EACCES},
- {errno: syscall.EAGAIN, err: linuxerr.EAGAIN},
- {errno: syscall.EBADF, err: linuxerr.EBADF},
- {errno: syscall.EBUSY, err: linuxerr.EBUSY},
- {errno: syscall.EDOM, err: linuxerr.EDOM},
- {errno: syscall.EEXIST, err: linuxerr.EEXIST},
- {errno: syscall.EFAULT, err: linuxerr.EFAULT},
- {errno: syscall.EFBIG, err: linuxerr.EFBIG},
- {errno: syscall.EINTR, err: linuxerr.EINTR},
- {errno: syscall.EINVAL, err: linuxerr.EINVAL},
- {errno: syscall.EIO, err: linuxerr.EIO},
- {errno: syscall.ENOTDIR, err: linuxerr.ENOTDIR},
- {errno: syscall.ENOTTY, err: linuxerr.ENOTTY},
- {errno: syscall.EPERM, err: linuxerr.EPERM},
- {errno: syscall.EPIPE, err: linuxerr.EPIPE},
- {errno: syscall.ESPIPE, err: linuxerr.ESPIPE},
- {errno: syscall.EWOULDBLOCK, err: linuxerr.EAGAIN},
- } {
- t.Run(tc.errno.Error(), func(t *testing.T) {
- e := linuxerr.ErrorFromErrno(errno.Errno(tc.errno))
- if e != tc.err {
- t.Fatalf("Mismatch errors: want: %+v (%d) got: %+v %d", tc.err, tc.err.Errno(), e, e.Errno())
- }
- })
- }
-}
-
-// TestEqualsMethod tests that the Equals method correctly compares syerror,
-// unix.Errno and linuxerr.
-// TODO (b/34162363): Remove this.
-func TestEqualsMethod(t *testing.T) {
- for _, tc := range []struct {
- name string
- linuxErr []*gErrors.Error
- err []error
- equal bool
- }{
- {
- name: "compare nil",
- linuxErr: []*gErrors.Error{nil, linuxerr.NOERROR},
- err: []error{nil, linuxerr.NOERROR, unix.Errno(0)},
- equal: true,
- },
- {
- name: "linuxerr nil error not",
- linuxErr: []*gErrors.Error{nil, linuxerr.NOERROR},
- err: []error{unix.Errno(1), linuxerr.EPERM, linuxerr.EACCES},
- equal: false,
- },
- {
- name: "linuxerr not nil error nil",
- linuxErr: []*gErrors.Error{linuxerr.ENOENT},
- err: []error{nil, unix.Errno(0), linuxerr.NOERROR},
- equal: false,
- },
- {
- name: "equal errors",
- linuxErr: []*gErrors.Error{linuxerr.ESRCH},
- err: []error{linuxerr.ESRCH, linuxerr.ESRCH, unix.Errno(linuxerr.ESRCH.Errno())},
- equal: true,
- },
- {
- name: "unequal errors",
- linuxErr: []*gErrors.Error{linuxerr.ENOENT},
- err: []error{linuxerr.ESRCH, linuxerr.ESRCH, unix.Errno(linuxerr.ESRCH.Errno())},
- equal: false,
- },
- {
- name: "other error",
- linuxErr: []*gErrors.Error{nil, linuxerr.NOERROR, linuxerr.E2BIG, linuxerr.EINVAL},
- err: []error{fs.ErrInvalid, io.EOF},
- equal: false,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- for _, le := range tc.linuxErr {
- for _, e := range tc.err {
- if linuxerr.Equals(le, e) != tc.equal {
- t.Fatalf("Expected %t from Equals method for linuxerr: %s %T and error: %s %T", tc.equal, le, le, e, e)
- }
- }
- }
- })
- }
-}
diff --git a/pkg/eventchannel/BUILD b/pkg/eventchannel/BUILD
deleted file mode 100644
index ad15d3672..000000000
--- a/pkg/eventchannel/BUILD
+++ /dev/null
@@ -1,40 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "proto_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "eventchannel",
- srcs = [
- "event.go",
- "event_any.go",
- "rate.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- ":eventchannel_go_proto",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/sync",
- "//pkg/unet",
- "@org_golang_google_protobuf//encoding/prototext:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- "@org_golang_x_time//rate:go_default_library",
- ],
-)
-
-proto_library(
- name = "eventchannel",
- srcs = ["event.proto"],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "eventchannel_test",
- srcs = ["event_test.go"],
- library = ":eventchannel",
- deps = [
- "//pkg/sync",
- "@org_golang_google_protobuf//proto:go_default_library",
- ],
-)
diff --git a/pkg/eventchannel/event.proto b/pkg/eventchannel/event.proto
deleted file mode 100644
index 4b24ac47c..000000000
--- a/pkg/eventchannel/event.proto
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-// DebugEvent encapsulates any other event protobuf in text format. This is
-// useful because clients reading events emitted this way do not need to link
-// the event protobufs to display them in a human-readable format.
-message DebugEvent {
- // Name of the inner message.
- string name = 1;
- // Text representation of the inner message content.
- string text = 2;
-}
diff --git a/pkg/eventchannel/event_test.go b/pkg/eventchannel/event_test.go
deleted file mode 100644
index 0dd408f76..000000000
--- a/pkg/eventchannel/event_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventchannel
-
-import (
- "fmt"
- "testing"
- "time"
-
- "google.golang.org/protobuf/proto"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// testEmitter is an emitter that can be used in tests. It records all events
-// emitted, and whether it has been closed.
-type testEmitter struct {
- // mu protects all fields below.
- mu sync.Mutex
-
- // events contains all emitted events.
- events []proto.Message
-
- // closed records whether Close() was called.
- closed bool
-}
-
-// Emit implements Emitter.Emit.
-func (te *testEmitter) Emit(msg proto.Message) (bool, error) {
- te.mu.Lock()
- defer te.mu.Unlock()
- te.events = append(te.events, msg)
- return false, nil
-}
-
-// Close implements Emitter.Close.
-func (te *testEmitter) Close() error {
- te.mu.Lock()
- defer te.mu.Unlock()
- if te.closed {
- return fmt.Errorf("closed called twice")
- }
- te.closed = true
- return nil
-}
-
-// testMessage implements proto.Message for testing.
-type testMessage struct {
- proto.Message
-
- // name is the name of the message, used by tests to compare messages.
- name string
-}
-
-func TestMultiEmitter(t *testing.T) {
- // Create three testEmitters, tied together in a multiEmitter.
- me := &multiEmitter{}
- var emitters []*testEmitter
- for i := 0; i < 3; i++ {
- te := &testEmitter{}
- emitters = append(emitters, te)
- me.AddEmitter(te)
- }
-
- // Emit three messages to multiEmitter.
- names := []string{"foo", "bar", "baz"}
- for _, name := range names {
- m := testMessage{name: name}
- if _, err := me.Emit(m); err != nil {
- t.Fatalf("me.Emit(%v) failed: %v", m, err)
- }
- }
-
- // All three emitters should have all three events.
- for _, te := range emitters {
- if got, want := len(te.events), len(names); got != want {
- t.Fatalf("emitter got %d events, want %d", got, want)
- }
- for i, name := range names {
- if got := te.events[i].(testMessage).name; got != name {
- t.Errorf("emitter got message with name %q, want %q", got, name)
- }
- }
- }
-
- // Close multiEmitter.
- if err := me.Close(); err != nil {
- t.Fatalf("me.Close() failed: %v", err)
- }
-
- // All testEmitters should be closed.
- for _, te := range emitters {
- if !te.closed {
- t.Errorf("te.closed got false, want true")
- }
- }
-}
-
-func TestRateLimitedEmitter(t *testing.T) {
- // Create a RateLimittedEmitter that wraps a testEmitter.
- te := &testEmitter{}
- max := float64(5) // events per second
- burst := 10 // events
- rle := RateLimitedEmitterFrom(te, max, burst)
-
- // Send 50 messages in one shot.
- for i := 0; i < 50; i++ {
- if _, err := rle.Emit(testMessage{}); err != nil {
- t.Fatalf("rle.Emit failed: %v", err)
- }
- }
-
- // We should have received only 10 messages.
- if got, want := len(te.events), 10; got != want {
- t.Errorf("got %d events, want %d", got, want)
- }
-
- // Sleep for a second and then send another 50.
- time.Sleep(1 * time.Second)
- for i := 0; i < 50; i++ {
- if _, err := rle.Emit(testMessage{}); err != nil {
- t.Fatalf("rle.Emit failed: %v", err)
- }
- }
-
- // We should have at least 5 more message, plus maybe a few more if the
- // test ran slowly.
- got, wantAtLeast, wantAtMost := len(te.events), 15, 20
- if got < wantAtLeast {
- t.Errorf("got %d events, want at least %d", got, wantAtLeast)
- }
- if got > wantAtMost {
- t.Errorf("got %d events, want at most %d", got, wantAtMost)
- }
-}
diff --git a/pkg/eventchannel/eventchannel_go_proto/event.pb.go b/pkg/eventchannel/eventchannel_go_proto/event.pb.go
new file mode 100644
index 000000000..1d0812479
--- /dev/null
+++ b/pkg/eventchannel/eventchannel_go_proto/event.pb.go
@@ -0,0 +1,156 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/eventchannel/event.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type DebugEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
+}
+
+func (x *DebugEvent) Reset() {
+ *x = DebugEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_eventchannel_event_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DebugEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugEvent) ProtoMessage() {}
+
+func (x *DebugEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_eventchannel_event_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead.
+func (*DebugEvent) Descriptor() ([]byte, []int) {
+ return file_pkg_eventchannel_event_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DebugEvent) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DebugEvent) GetText() string {
+ if x != nil {
+ return x.Text
+ }
+ return ""
+}
+
+var File_pkg_eventchannel_event_proto protoreflect.FileDescriptor
+
+var file_pkg_eventchannel_event_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x63, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
+ 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x22, 0x34, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_eventchannel_event_proto_rawDescOnce sync.Once
+ file_pkg_eventchannel_event_proto_rawDescData = file_pkg_eventchannel_event_proto_rawDesc
+)
+
+func file_pkg_eventchannel_event_proto_rawDescGZIP() []byte {
+ file_pkg_eventchannel_event_proto_rawDescOnce.Do(func() {
+ file_pkg_eventchannel_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_eventchannel_event_proto_rawDescData)
+ })
+ return file_pkg_eventchannel_event_proto_rawDescData
+}
+
+var file_pkg_eventchannel_event_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pkg_eventchannel_event_proto_goTypes = []interface{}{
+ (*DebugEvent)(nil), // 0: gvisor.DebugEvent
+}
+var file_pkg_eventchannel_event_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pkg_eventchannel_event_proto_init() }
+func file_pkg_eventchannel_event_proto_init() {
+ if File_pkg_eventchannel_event_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_eventchannel_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugEvent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_eventchannel_event_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_eventchannel_event_proto_goTypes,
+ DependencyIndexes: file_pkg_eventchannel_event_proto_depIdxs,
+ MessageInfos: file_pkg_eventchannel_event_proto_msgTypes,
+ }.Build()
+ File_pkg_eventchannel_event_proto = out.File
+ file_pkg_eventchannel_event_proto_rawDesc = nil
+ file_pkg_eventchannel_event_proto_goTypes = nil
+ file_pkg_eventchannel_event_proto_depIdxs = nil
+}
diff --git a/pkg/eventchannel/eventchannel_state_autogen.go b/pkg/eventchannel/eventchannel_state_autogen.go
new file mode 100644
index 000000000..ccbae06f5
--- /dev/null
+++ b/pkg/eventchannel/eventchannel_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package eventchannel
diff --git a/pkg/fd/BUILD b/pkg/fd/BUILD
deleted file mode 100644
index c7abd9655..000000000
--- a/pkg/fd/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fd",
- srcs = ["fd.go"],
- visibility = ["//visibility:public"],
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
-
-go_test(
- name = "fd_test",
- size = "small",
- srcs = ["fd_test.go"],
- library = ":fd",
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/fd/fd_state_autogen.go b/pkg/fd/fd_state_autogen.go
new file mode 100644
index 000000000..5ad412976
--- /dev/null
+++ b/pkg/fd/fd_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fd
diff --git a/pkg/fd/fd_test.go b/pkg/fd/fd_test.go
deleted file mode 100644
index da71364d4..000000000
--- a/pkg/fd/fd_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fd
-
-import (
- "math"
- "os"
- "testing"
-
- "golang.org/x/sys/unix"
-)
-
-func TestSetNegOne(t *testing.T) {
- type entry struct {
- name string
- file *FD
- fn func() error
- }
- var tests []entry
-
- fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- t.Fatal("unix.Socket:", err)
- }
- f1 := New(fd)
- tests = append(tests, entry{
- "Release",
- f1,
- func() error {
- return unix.Close(f1.Release())
- },
- })
-
- fd, err = unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- t.Fatal("unix.Socket:", err)
- }
- f2 := New(fd)
- tests = append(tests, entry{
- "Close",
- f2,
- f2.Close,
- })
-
- for _, test := range tests {
- if err := test.fn(); err != nil {
- t.Errorf("%s: %v", test.name, err)
- continue
- }
- if fd := test.file.FD(); fd != -1 {
- t.Errorf("%s: got FD() = %d, want = -1", test.name, fd)
- }
- }
-}
-
-func TestStartsNegOne(t *testing.T) {
- type entry struct {
- name string
- file *FD
- }
-
- tests := []entry{
- {"-1", New(-1)},
- {"-2", New(-2)},
- {"MinInt32", New(math.MinInt32)},
- {"MinInt64", New(math.MinInt64)},
- }
-
- for _, test := range tests {
- if fd := test.file.FD(); fd != -1 {
- t.Errorf("%s: got FD() = %d, want = -1", test.name, fd)
- }
- }
-}
-
-func TestFileDotFile(t *testing.T) {
- fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- t.Fatal("unix.Socket:", err)
- }
-
- f := New(fd)
- of, err := f.File()
- if err != nil {
- t.Fatalf("File got err %v want nil", err)
- }
-
- if ofd, nfd := int(of.Fd()), f.FD(); ofd == nfd || ofd == -1 {
- // Try not to double close the FD.
- f.Release()
-
- t.Fatalf("got %#v.File().Fd() = %d, want new FD", f, ofd)
- }
-
- f.Close()
- of.Close()
-}
-
-func TestFileDotFileError(t *testing.T) {
- f := &FD{ReadWriter{-2}}
-
- if of, err := f.File(); err == nil {
- t.Errorf("File %v got nil err want non-nil", of)
- of.Close()
- }
-}
-
-func TestNewFromFile(t *testing.T) {
- f, err := NewFromFile(os.Stdin)
- if err != nil {
- t.Fatalf("NewFromFile got err %v want nil", err)
- }
- if nfd, ofd := f.FD(), int(os.Stdin.Fd()); nfd == -1 || nfd == ofd {
- t.Errorf("got FD() = %d, want = new FD (old FD was %d)", nfd, ofd)
- }
- f.Close()
-}
-
-func TestNewFromFileError(t *testing.T) {
- f, err := NewFromFile(nil)
- if err == nil {
- t.Errorf("NewFromFile got %v with nil err want non-nil", f)
- f.Close()
- }
-}
diff --git a/pkg/fdchannel/BUILD b/pkg/fdchannel/BUILD
deleted file mode 100644
index d240132e5..000000000
--- a/pkg/fdchannel/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "fdchannel",
- srcs = ["fdchannel_unsafe.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/gohacks",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fdchannel_test",
- size = "small",
- srcs = ["fdchannel_test.go"],
- library = ":fdchannel",
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/fdchannel/fdchannel_test.go b/pkg/fdchannel/fdchannel_test.go
deleted file mode 100644
index 9616e30c5..000000000
--- a/pkg/fdchannel/fdchannel_test.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fdchannel
-
-import (
- "io/ioutil"
- "os"
- "syscall"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestSendRecvFD(t *testing.T) {
- sendFile, err := ioutil.TempFile("", "fdchannel_test_")
- if err != nil {
- t.Fatalf("failed to create temporary file: %v", err)
- }
- defer sendFile.Close()
-
- chanFDs, err := NewConnectedSockets()
- if err != nil {
- t.Fatalf("failed to create fdchannel sockets: %v", err)
- }
- sendEP := NewEndpoint(chanFDs[0])
- defer sendEP.Destroy()
- recvEP := NewEndpoint(chanFDs[1])
- defer recvEP.Destroy()
-
- recvFD, err := recvEP.RecvFDNonblock()
- if err != unix.EAGAIN && err != unix.EWOULDBLOCK {
- t.Errorf("RecvFDNonblock before SendFD: got (%d, %v), wanted (<unspecified>, EAGAIN or EWOULDBLOCK", recvFD, err)
- }
-
- if err := sendEP.SendFD(int(sendFile.Fd())); err != nil {
- t.Fatalf("SendFD failed: %v", err)
- }
- recvFD, err = recvEP.RecvFD()
- if err != nil {
- t.Fatalf("RecvFD failed: %v", err)
- }
- recvFile := os.NewFile(uintptr(recvFD), "received file")
- defer recvFile.Close()
-
- sendInfo, err := sendFile.Stat()
- if err != nil {
- t.Fatalf("failed to stat sent file: %v", err)
- }
- sendInfoSys := sendInfo.Sys()
- sendStat, ok := sendInfoSys.(*syscall.Stat_t)
- if !ok {
- t.Fatalf("sent file's FileInfo is backed by unknown type %T", sendInfoSys)
- }
-
- recvInfo, err := recvFile.Stat()
- if err != nil {
- t.Fatalf("failed to stat received file: %v", err)
- }
- recvInfoSys := recvInfo.Sys()
- recvStat, ok := recvInfoSys.(*syscall.Stat_t)
- if !ok {
- t.Fatalf("received file's FileInfo is backed by unknown type %T", recvInfoSys)
- }
-
- if sendStat.Dev != recvStat.Dev || sendStat.Ino != recvStat.Ino {
- t.Errorf("sent file (dev=%d, ino=%d) does not match received file (dev=%d, ino=%d)", sendStat.Dev, sendStat.Ino, recvStat.Dev, recvStat.Ino)
- }
-}
-
-func TestShutdownThenRecvFD(t *testing.T) {
- sendFile, err := ioutil.TempFile("", "fdchannel_test_")
- if err != nil {
- t.Fatalf("failed to create temporary file: %v", err)
- }
- defer sendFile.Close()
-
- chanFDs, err := NewConnectedSockets()
- if err != nil {
- t.Fatalf("failed to create fdchannel sockets: %v", err)
- }
- sendEP := NewEndpoint(chanFDs[0])
- defer sendEP.Destroy()
- recvEP := NewEndpoint(chanFDs[1])
- defer recvEP.Destroy()
-
- recvEP.Shutdown()
- if _, err := recvEP.RecvFD(); err == nil {
- t.Error("RecvFD succeeded unexpectedly")
- }
-}
-
-func TestRecvFDThenShutdown(t *testing.T) {
- sendFile, err := ioutil.TempFile("", "fdchannel_test_")
- if err != nil {
- t.Fatalf("failed to create temporary file: %v", err)
- }
- defer sendFile.Close()
-
- chanFDs, err := NewConnectedSockets()
- if err != nil {
- t.Fatalf("failed to create fdchannel sockets: %v", err)
- }
- sendEP := NewEndpoint(chanFDs[0])
- defer sendEP.Destroy()
- recvEP := NewEndpoint(chanFDs[1])
- defer recvEP.Destroy()
-
- var receiverWG sync.WaitGroup
- receiverWG.Add(1)
- go func() {
- defer receiverWG.Done()
- if _, err := recvEP.RecvFD(); err == nil {
- t.Error("RecvFD succeeded unexpectedly")
- }
- }()
- defer receiverWG.Wait()
- time.Sleep(time.Second) // to ensure recvEP.RecvFD() has blocked
- recvEP.Shutdown()
-}
diff --git a/pkg/fdchannel/fdchannel_unsafe_state_autogen.go b/pkg/fdchannel/fdchannel_unsafe_state_autogen.go
new file mode 100644
index 000000000..61447d773
--- /dev/null
+++ b/pkg/fdchannel/fdchannel_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package fdchannel
diff --git a/pkg/fdnotifier/BUILD b/pkg/fdnotifier/BUILD
deleted file mode 100644
index 235dcc490..000000000
--- a/pkg/fdnotifier/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fdnotifier",
- srcs = [
- "fdnotifier.go",
- "poll_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/sync",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/fdnotifier/fdnotifier_state_autogen.go b/pkg/fdnotifier/fdnotifier_state_autogen.go
new file mode 100644
index 000000000..527d44091
--- /dev/null
+++ b/pkg/fdnotifier/fdnotifier_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package fdnotifier
diff --git a/pkg/fdnotifier/fdnotifier_unsafe_state_autogen.go b/pkg/fdnotifier/fdnotifier_unsafe_state_autogen.go
new file mode 100644
index 000000000..527d44091
--- /dev/null
+++ b/pkg/fdnotifier/fdnotifier_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package fdnotifier
diff --git a/pkg/flipcall/BUILD b/pkg/flipcall/BUILD
deleted file mode 100644
index c810c7946..000000000
--- a/pkg/flipcall/BUILD
+++ /dev/null
@@ -1,34 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "flipcall",
- srcs = [
- "ctrl_futex.go",
- "flipcall.go",
- "flipcall_unsafe.go",
- "futex_linux.go",
- "io.go",
- "packet_window.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/log",
- "//pkg/memutil",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "flipcall_test",
- size = "small",
- srcs = [
- "flipcall_example_test.go",
- "flipcall_test.go",
- ],
- library = ":flipcall",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/flipcall/flipcall_example_test.go b/pkg/flipcall/flipcall_example_test.go
deleted file mode 100644
index 2e28a149a..000000000
--- a/pkg/flipcall/flipcall_example_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package flipcall
-
-import (
- "bytes"
- "fmt"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func Example() {
- const (
- reqPrefix = "request "
- respPrefix = "response "
- count = 3
- maxMessageLen = len(respPrefix) + 1 // 1 digit
- )
-
- pwa, err := NewPacketWindowAllocator()
- if err != nil {
- panic(err)
- }
- defer pwa.Destroy()
- pwd, err := pwa.Allocate(PacketWindowLengthForDataCap(uint32(maxMessageLen)))
- if err != nil {
- panic(err)
- }
- var clientEP Endpoint
- if err := clientEP.Init(ClientSide, pwd); err != nil {
- panic(err)
- }
- defer clientEP.Destroy()
- var serverEP Endpoint
- if err := serverEP.Init(ServerSide, pwd); err != nil {
- panic(err)
- }
- defer serverEP.Destroy()
-
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- i := 0
- var buf bytes.Buffer
- // wait for first request
- n, err := serverEP.RecvFirst()
- if err != nil {
- return
- }
- for {
- // read request
- buf.Reset()
- buf.Write(serverEP.Data()[:n])
- fmt.Println(buf.String())
- // write response
- buf.Reset()
- fmt.Fprintf(&buf, "%s%d", respPrefix, i)
- copy(serverEP.Data(), buf.Bytes())
- // send response and wait for next request
- n, err = serverEP.SendRecv(uint32(buf.Len()))
- if err != nil {
- return
- }
- i++
- }
- }()
- defer func() {
- serverEP.Shutdown()
- serverRun.Wait()
- }()
-
- // establish connection as client
- if err := clientEP.Connect(); err != nil {
- panic(err)
- }
- var buf bytes.Buffer
- for i := 0; i < count; i++ {
- // write request
- buf.Reset()
- fmt.Fprintf(&buf, "%s%d", reqPrefix, i)
- copy(clientEP.Data(), buf.Bytes())
- // send request and wait for response
- n, err := clientEP.SendRecv(uint32(buf.Len()))
- if err != nil {
- panic(err)
- }
- // read response
- buf.Reset()
- buf.Write(clientEP.Data()[:n])
- fmt.Println(buf.String())
- }
-
- // Output:
- // request 0
- // response 0
- // request 1
- // response 1
- // request 2
- // response 2
-}
diff --git a/pkg/flipcall/flipcall_linux_state_autogen.go b/pkg/flipcall/flipcall_linux_state_autogen.go
new file mode 100644
index 000000000..ce37ac4e1
--- /dev/null
+++ b/pkg/flipcall/flipcall_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package flipcall
diff --git a/pkg/flipcall/flipcall_state_autogen.go b/pkg/flipcall/flipcall_state_autogen.go
new file mode 100644
index 000000000..0078f96e3
--- /dev/null
+++ b/pkg/flipcall/flipcall_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package flipcall
diff --git a/pkg/flipcall/flipcall_test.go b/pkg/flipcall/flipcall_test.go
deleted file mode 100644
index 33fd55a44..000000000
--- a/pkg/flipcall/flipcall_test.go
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package flipcall
-
-import (
- "runtime"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-var testPacketWindowSize = pageSize
-
-type testConnection struct {
- pwa PacketWindowAllocator
- clientEP Endpoint
- serverEP Endpoint
-}
-
-func newTestConnectionWithOptions(tb testing.TB, clientOpts, serverOpts []EndpointOption) *testConnection {
- c := &testConnection{}
- if err := c.pwa.Init(); err != nil {
- tb.Fatalf("failed to create PacketWindowAllocator: %v", err)
- }
- pwd, err := c.pwa.Allocate(testPacketWindowSize)
- if err != nil {
- c.pwa.Destroy()
- tb.Fatalf("PacketWindowAllocator.Allocate() failed: %v", err)
- }
- if err := c.clientEP.Init(ClientSide, pwd, clientOpts...); err != nil {
- c.pwa.Destroy()
- tb.Fatalf("failed to create client Endpoint: %v", err)
- }
- if err := c.serverEP.Init(ServerSide, pwd, serverOpts...); err != nil {
- c.pwa.Destroy()
- c.clientEP.Destroy()
- tb.Fatalf("failed to create server Endpoint: %v", err)
- }
- return c
-}
-
-func newTestConnection(tb testing.TB) *testConnection {
- return newTestConnectionWithOptions(tb, nil, nil)
-}
-
-func (c *testConnection) destroy() {
- c.pwa.Destroy()
- c.clientEP.Destroy()
- c.serverEP.Destroy()
-}
-
-func testSendRecv(t *testing.T, c *testConnection) {
- // This shared variable is used to confirm that synchronization between
- // flipcall endpoints is visible to the Go race detector.
- state := 0
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- t.Logf("server Endpoint waiting for packet 1")
- if _, err := c.serverEP.RecvFirst(); err != nil {
- t.Errorf("server Endpoint.RecvFirst() failed: %v", err)
- return
- }
- state++
- if state != 2 {
- t.Errorf("shared state counter: got %d, wanted 2", state)
- }
- t.Logf("server Endpoint got packet 1, sending packet 2 and waiting for packet 3")
- if _, err := c.serverEP.SendRecv(0); err != nil {
- t.Errorf("server Endpoint.SendRecv() failed: %v", err)
- return
- }
- state++
- if state != 4 {
- t.Errorf("shared state counter: got %d, wanted 4", state)
- }
- t.Logf("server Endpoint got packet 3")
- }()
- defer func() {
- // Ensure that the server goroutine is cleaned up before
- // c.serverEP.Destroy(), even if the test fails.
- c.serverEP.Shutdown()
- serverRun.Wait()
- }()
-
- t.Logf("client Endpoint establishing connection")
- if err := c.clientEP.Connect(); err != nil {
- t.Fatalf("client Endpoint.Connect() failed: %v", err)
- }
- state++
- if state != 1 {
- t.Errorf("shared state counter: got %d, wanted 1", state)
- }
- t.Logf("client Endpoint sending packet 1 and waiting for packet 2")
- if _, err := c.clientEP.SendRecv(0); err != nil {
- t.Fatalf("client Endpoint.SendRecv() failed: %v", err)
- }
- state++
- if state != 3 {
- t.Errorf("shared state counter: got %d, wanted 3", state)
- }
- t.Logf("client Endpoint got packet 2, sending packet 3")
- if err := c.clientEP.SendLast(0); err != nil {
- t.Fatalf("client Endpoint.SendLast() failed: %v", err)
- }
- t.Logf("waiting for server goroutine to complete")
- serverRun.Wait()
-}
-
-func TestSendRecv(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testSendRecv(t, c)
-}
-
-func testShutdownBeforeConnect(t *testing.T, c *testConnection, remoteShutdown bool) {
- if remoteShutdown {
- c.serverEP.Shutdown()
- } else {
- c.clientEP.Shutdown()
- }
- if err := c.clientEP.Connect(); err == nil {
- t.Errorf("client Endpoint.Connect() succeeded unexpectedly")
- }
-}
-
-func TestShutdownBeforeConnectLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownBeforeConnect(t, c, false)
-}
-
-func TestShutdownBeforeConnectRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownBeforeConnect(t, c, true)
-}
-
-func testShutdownDuringConnect(t *testing.T, c *testConnection, remoteShutdown bool) {
- var clientRun sync.WaitGroup
- clientRun.Add(1)
- go func() {
- defer clientRun.Done()
- if err := c.clientEP.Connect(); err == nil {
- t.Errorf("client Endpoint.Connect() succeeded unexpectedly")
- }
- }()
- time.Sleep(time.Second) // to allow c.clientEP.Connect() to block
- if remoteShutdown {
- c.serverEP.Shutdown()
- } else {
- c.clientEP.Shutdown()
- }
- clientRun.Wait()
-}
-
-func TestShutdownDuringConnectLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringConnect(t, c, false)
-}
-
-func TestShutdownDuringConnectRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringConnect(t, c, true)
-}
-
-func testShutdownBeforeRecvFirst(t *testing.T, c *testConnection, remoteShutdown bool) {
- if remoteShutdown {
- c.clientEP.Shutdown()
- } else {
- c.serverEP.Shutdown()
- }
- if _, err := c.serverEP.RecvFirst(); err == nil {
- t.Errorf("server Endpoint.RecvFirst() succeeded unexpectedly")
- }
-}
-
-func TestShutdownBeforeRecvFirstLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownBeforeRecvFirst(t, c, false)
-}
-
-func TestShutdownBeforeRecvFirstRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownBeforeRecvFirst(t, c, true)
-}
-
-func testShutdownDuringRecvFirstBeforeConnect(t *testing.T, c *testConnection, remoteShutdown bool) {
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- if _, err := c.serverEP.RecvFirst(); err == nil {
- t.Errorf("server Endpoint.RecvFirst() succeeded unexpectedly")
- }
- }()
- time.Sleep(time.Second) // to allow c.serverEP.RecvFirst() to block
- if remoteShutdown {
- c.clientEP.Shutdown()
- } else {
- c.serverEP.Shutdown()
- }
- serverRun.Wait()
-}
-
-func TestShutdownDuringRecvFirstBeforeConnectLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringRecvFirstBeforeConnect(t, c, false)
-}
-
-func TestShutdownDuringRecvFirstBeforeConnectRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringRecvFirstBeforeConnect(t, c, true)
-}
-
-func testShutdownDuringRecvFirstAfterConnect(t *testing.T, c *testConnection, remoteShutdown bool) {
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- if _, err := c.serverEP.RecvFirst(); err == nil {
- t.Errorf("server Endpoint.RecvFirst() succeeded unexpectedly")
- }
- }()
- defer func() {
- // Ensure that the server goroutine is cleaned up before
- // c.serverEP.Destroy(), even if the test fails.
- c.serverEP.Shutdown()
- serverRun.Wait()
- }()
- if err := c.clientEP.Connect(); err != nil {
- t.Fatalf("client Endpoint.Connect() failed: %v", err)
- }
- if remoteShutdown {
- c.clientEP.Shutdown()
- } else {
- c.serverEP.Shutdown()
- }
- serverRun.Wait()
-}
-
-func TestShutdownDuringRecvFirstAfterConnectLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringRecvFirstAfterConnect(t, c, false)
-}
-
-func TestShutdownDuringRecvFirstAfterConnectRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringRecvFirstAfterConnect(t, c, true)
-}
-
-func testShutdownDuringClientSendRecv(t *testing.T, c *testConnection, remoteShutdown bool) {
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- if _, err := c.serverEP.RecvFirst(); err != nil {
- t.Errorf("server Endpoint.RecvFirst() failed: %v", err)
- }
- // At this point, the client must be blocked in c.clientEP.SendRecv().
- if remoteShutdown {
- c.serverEP.Shutdown()
- } else {
- c.clientEP.Shutdown()
- }
- }()
- defer func() {
- // Ensure that the server goroutine is cleaned up before
- // c.serverEP.Destroy(), even if the test fails.
- c.serverEP.Shutdown()
- serverRun.Wait()
- }()
- if err := c.clientEP.Connect(); err != nil {
- t.Fatalf("client Endpoint.Connect() failed: %v", err)
- }
- if _, err := c.clientEP.SendRecv(0); err == nil {
- t.Errorf("client Endpoint.SendRecv() succeeded unexpectedly")
- }
-}
-
-func TestShutdownDuringClientSendRecvLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringClientSendRecv(t, c, false)
-}
-
-func TestShutdownDuringClientSendRecvRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringClientSendRecv(t, c, true)
-}
-
-func testShutdownDuringServerSendRecv(t *testing.T, c *testConnection, remoteShutdown bool) {
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- if _, err := c.serverEP.RecvFirst(); err != nil {
- t.Errorf("server Endpoint.RecvFirst() failed: %v", err)
- return
- }
- if _, err := c.serverEP.SendRecv(0); err == nil {
- t.Errorf("server Endpoint.SendRecv() succeeded unexpectedly")
- }
- }()
- defer func() {
- // Ensure that the server goroutine is cleaned up before
- // c.serverEP.Destroy(), even if the test fails.
- c.serverEP.Shutdown()
- serverRun.Wait()
- }()
- if err := c.clientEP.Connect(); err != nil {
- t.Fatalf("client Endpoint.Connect() failed: %v", err)
- }
- if _, err := c.clientEP.SendRecv(0); err != nil {
- t.Fatalf("client Endpoint.SendRecv() failed: %v", err)
- }
- time.Sleep(time.Second) // to allow serverEP.SendRecv() to block
- if remoteShutdown {
- c.clientEP.Shutdown()
- } else {
- c.serverEP.Shutdown()
- }
- serverRun.Wait()
-}
-
-func TestShutdownDuringServerSendRecvLocal(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringServerSendRecv(t, c, false)
-}
-
-func TestShutdownDuringServerSendRecvRemote(t *testing.T) {
- c := newTestConnection(t)
- defer c.destroy()
- testShutdownDuringServerSendRecv(t, c, true)
-}
-
-func benchmarkSendRecv(b *testing.B, c *testConnection) {
- var serverRun sync.WaitGroup
- serverRun.Add(1)
- go func() {
- defer serverRun.Done()
- if b.N == 0 {
- return
- }
- if _, err := c.serverEP.RecvFirst(); err != nil {
- b.Errorf("server Endpoint.RecvFirst() failed: %v", err)
- return
- }
- for i := 1; i < b.N; i++ {
- if _, err := c.serverEP.SendRecv(0); err != nil {
- b.Errorf("server Endpoint.SendRecv() failed: %v", err)
- return
- }
- }
- if err := c.serverEP.SendLast(0); err != nil {
- b.Errorf("server Endpoint.SendLast() failed: %v", err)
- }
- }()
- defer func() {
- c.serverEP.Shutdown()
- serverRun.Wait()
- }()
-
- if err := c.clientEP.Connect(); err != nil {
- b.Fatalf("client Endpoint.Connect() failed: %v", err)
- }
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if _, err := c.clientEP.SendRecv(0); err != nil {
- b.Fatalf("client Endpoint.SendRecv() failed: %v", err)
- }
- }
- b.StopTimer()
-}
-
-func BenchmarkSendRecv(b *testing.B) {
- c := newTestConnection(b)
- defer c.destroy()
- benchmarkSendRecv(b, c)
-}
diff --git a/pkg/flipcall/flipcall_unsafe_state_autogen.go b/pkg/flipcall/flipcall_unsafe_state_autogen.go
new file mode 100644
index 000000000..0e03c2a65
--- /dev/null
+++ b/pkg/flipcall/flipcall_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package flipcall
diff --git a/pkg/fspath/BUILD b/pkg/fspath/BUILD
deleted file mode 100644
index 67dd1e225..000000000
--- a/pkg/fspath/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-licenses(["notice"])
-
-go_library(
- name = "fspath",
- srcs = [
- "builder.go",
- "fspath.go",
- ],
- deps = [
- "//pkg/gohacks",
- ],
-)
-
-go_test(
- name = "fspath_test",
- size = "small",
- srcs = [
- "builder_test.go",
- "fspath_test.go",
- ],
- library = ":fspath",
-)
diff --git a/pkg/fspath/builder_test.go b/pkg/fspath/builder_test.go
deleted file mode 100644
index 22f890273..000000000
--- a/pkg/fspath/builder_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fspath
-
-import (
- "testing"
-)
-
-func TestBuilder(t *testing.T) {
- type testCase struct {
- pcs []string // path components in reverse order
- after string
- want string
- }
- tests := []testCase{
- {
- // Empty case.
- },
- {
- pcs: []string{"foo"},
- want: "foo",
- },
- {
- pcs: []string{"foo", "bar", "baz"},
- want: "baz/bar/foo",
- },
- {
- pcs: []string{"foo", "bar"},
- after: " (deleted)",
- want: "bar/foo (deleted)",
- },
- }
-
- for _, test := range tests {
- t.Run(test.want, func(t *testing.T) {
- var b Builder
- for _, pc := range test.pcs {
- b.PrependComponent(pc)
- }
- b.AppendString(test.after)
- if got := b.String(); got != test.want {
- t.Errorf("got %q, wanted %q", got, test.want)
- }
- })
- }
-}
diff --git a/pkg/fspath/fspath_state_autogen.go b/pkg/fspath/fspath_state_autogen.go
new file mode 100644
index 000000000..6ceea8003
--- /dev/null
+++ b/pkg/fspath/fspath_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fspath
diff --git a/pkg/fspath/fspath_test.go b/pkg/fspath/fspath_test.go
deleted file mode 100644
index d5e9a549a..000000000
--- a/pkg/fspath/fspath_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fspath
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-func TestParseIteratorPartialPathnames(t *testing.T) {
- path := Parse("/foo//bar///baz////")
- // Parse strips leading slashes, and records their presence as
- // Path.Absolute.
- if !path.Absolute {
- t.Errorf("Path.Absolute: got false, wanted true")
- }
- // Parse strips trailing slashes, and records their presence as Path.Dir.
- if !path.Dir {
- t.Errorf("Path.Dir: got false, wanted true")
- }
- // The first Iterator.partialPathname is the input pathname, with leading
- // and trailing slashes stripped.
- it := path.Begin
- if want := "foo//bar///baz"; it.partialPathname != want {
- t.Errorf("first Iterator.partialPathname: got %q, wanted %q", it.partialPathname, want)
- }
- // Successive Iterator.partialPathnames remove the leading path component
- // and following slashes, until we run out of path components and get a
- // terminal Iterator.
- it = it.Next()
- if want := "bar///baz"; it.partialPathname != want {
- t.Errorf("second Iterator.partialPathname: got %q, wanted %q", it.partialPathname, want)
- }
- it = it.Next()
- if want := "baz"; it.partialPathname != want {
- t.Errorf("third Iterator.partialPathname: got %q, wanted %q", it.partialPathname, want)
- }
- it = it.Next()
- if want := ""; it.partialPathname != want {
- t.Errorf("fourth Iterator.partialPathname: got %q, wanted %q", it.partialPathname, want)
- }
- if it.Ok() {
- t.Errorf("fourth Iterator.Ok(): got true, wanted false")
- }
-}
-
-func TestParse(t *testing.T) {
- type testCase struct {
- pathname string
- relpath []string
- abs bool
- dir bool
- }
- tests := []testCase{
- {
- pathname: "",
- relpath: []string{},
- abs: false,
- dir: false,
- },
- {
- pathname: "/",
- relpath: []string{},
- abs: true,
- dir: true,
- },
- {
- pathname: "//",
- relpath: []string{},
- abs: true,
- dir: true,
- },
- }
- for _, sep := range []string{"/", "//"} {
- for _, abs := range []bool{false, true} {
- for _, dir := range []bool{false, true} {
- for _, pcs := range [][]string{
- // single path component
- {"foo"},
- // multiple path components, including non-UTF-8
- {".", "foo", "..", "\xe6", "bar"},
- } {
- prefix := ""
- if abs {
- prefix = sep
- }
- suffix := ""
- if dir {
- suffix = sep
- }
- tests = append(tests, testCase{
- pathname: prefix + strings.Join(pcs, sep) + suffix,
- relpath: pcs,
- abs: abs,
- dir: dir,
- })
- }
- }
- }
- }
-
- for _, test := range tests {
- t.Run(test.pathname, func(t *testing.T) {
- p := Parse(test.pathname)
- t.Logf("pathname %q => path %q", test.pathname, p)
- if p.Absolute != test.abs {
- t.Errorf("path absoluteness: got %v, wanted %v", p.Absolute, test.abs)
- }
- if p.Dir != test.dir {
- t.Errorf("path must resolve to a directory: got %v, wanted %v", p.Dir, test.dir)
- }
- pcs := []string{}
- for pit := p.Begin; pit.Ok(); pit = pit.Next() {
- pcs = append(pcs, pit.String())
- }
- if !reflect.DeepEqual(pcs, test.relpath) {
- t.Errorf("relative path: got %v, wanted %v", pcs, test.relpath)
- }
- })
- }
-}
diff --git a/pkg/gohacks/BUILD b/pkg/gohacks/BUILD
deleted file mode 100644
index b4e05f922..000000000
--- a/pkg/gohacks/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "gohacks",
- srcs = [
- "gohacks_unsafe.go",
- ],
- stateify = False,
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "gohacks_test",
- size = "small",
- srcs = ["gohacks_test.go"],
- library = ":gohacks",
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/gohacks/gohacks_test.go b/pkg/gohacks/gohacks_test.go
deleted file mode 100644
index e18c8abc7..000000000
--- a/pkg/gohacks/gohacks_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gohacks
-
-import (
- "io/ioutil"
- "math/rand"
- "os"
- "runtime/debug"
- "testing"
-
- "golang.org/x/sys/unix"
-)
-
-func randBuf(size int) []byte {
- b := make([]byte, size)
- for i := range b {
- b[i] = byte(rand.Intn(256))
- }
- return b
-}
-
-// Size of a page in bytes. Cloned from hostarch.PageSize to avoid a circular
-// dependency.
-const pageSize = 4096
-
-func testCopy(dst, src []byte) (panicked bool) {
- defer func() {
- if r := recover(); r != nil {
- panicked = true
- }
- }()
- debug.SetPanicOnFault(true)
- copy(dst, src)
- return panicked
-}
-
-func TestSegVOnMemmove(t *testing.T) {
- // Test that SIGSEGVs received by runtime.memmove when *not* doing
- // CopyIn or CopyOut work gets propagated to the runtime.
- const bufLen = pageSize
- a, err := unix.Mmap(-1, 0, bufLen, unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
- if err != nil {
- t.Fatalf("Mmap failed: %v", err)
-
- }
- defer unix.Munmap(a)
- b := randBuf(bufLen)
-
- if !testCopy(b, a) {
- t.Fatalf("testCopy didn't panic when it should have")
- }
-
- if !testCopy(a, b) {
- t.Fatalf("testCopy didn't panic when it should have")
- }
-}
-
-func TestSigbusOnMemmove(t *testing.T) {
- // Test that SIGBUS received by runtime.memmove when *not* doing
- // CopyIn or CopyOut work gets propagated to the runtime.
- const bufLen = pageSize
- f, err := ioutil.TempFile("", "sigbus_test")
- if err != nil {
- t.Fatalf("TempFile failed: %v", err)
- }
- os.Remove(f.Name())
- defer f.Close()
-
- a, err := unix.Mmap(int(f.Fd()), 0, bufLen, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
- if err != nil {
- t.Fatalf("Mmap failed: %v", err)
-
- }
- defer unix.Munmap(a)
- b := randBuf(bufLen)
-
- if !testCopy(b, a) {
- t.Fatalf("testCopy didn't panic when it should have")
- }
-
- if !testCopy(a, b) {
- t.Fatalf("testCopy didn't panic when it should have")
- }
-}
diff --git a/pkg/goid/BUILD b/pkg/goid/BUILD
deleted file mode 100644
index 08832a8ae..000000000
--- a/pkg/goid/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "goid",
- srcs = [
- "goid.go",
- "goid_amd64.s",
- "goid_arm64.s",
- ],
- stateify = False,
- visibility = ["//visibility:public"],
-)
-
-go_test(
- name = "goid_test",
- size = "small",
- srcs = [
- "goid_test.go",
- ],
- library = ":goid",
-)
diff --git a/pkg/goid/goid_test.go b/pkg/goid/goid_test.go
deleted file mode 100644
index 54be11d63..000000000
--- a/pkg/goid/goid_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package goid
-
-import (
- "runtime"
- "sync"
- "testing"
- "time"
-)
-
-func TestUniquenessAndConsistency(t *testing.T) {
- const (
- numGoroutines = 5000
-
- // maxID is not an intrinsic property of goroutine IDs; it is only a
- // property of how the Go runtime currently assigns them. Future
- // changes to the Go runtime may require that maxID be raised, or that
- // assertions regarding it be removed entirely.
- maxID = numGoroutines + 1000
- )
-
- var (
- goidsMu sync.Mutex
- goids = make(map[int64]struct{})
- checkedWG sync.WaitGroup
- exitCh = make(chan struct{})
- )
- for i := 0; i < numGoroutines; i++ {
- checkedWG.Add(1)
- go func() {
- id := Get()
- if id > maxID {
- t.Errorf("observed unexpectedly large goroutine ID %d", id)
- }
- goidsMu.Lock()
- if _, dup := goids[id]; dup {
- t.Errorf("observed duplicate goroutine ID %d", id)
- }
- goids[id] = struct{}{}
- goidsMu.Unlock()
- checkedWG.Done()
- for {
- if curID := Get(); curID != id {
- t.Errorf("goroutine ID changed from %d to %d", id, curID)
- // Don't spam logs by repeating the check; wait quietly for
- // the test to finish.
- <-exitCh
- return
- }
- // Check if the test is over.
- select {
- case <-exitCh:
- return
- default:
- }
- // Yield to other goroutines, and possibly migrate to another P.
- runtime.Gosched()
- }
- }()
- }
- // Wait for all goroutines to perform uniqueness checks.
- checkedWG.Wait()
- // Wait for an additional second to allow goroutines to spin checking for
- // ID consistency.
- time.Sleep(time.Second)
- // Request that all goroutines exit.
- close(exitCh)
-}
diff --git a/pkg/hostarch/BUILD b/pkg/hostarch/BUILD
deleted file mode 100644
index 1e8def4d9..000000000
--- a/pkg/hostarch/BUILD
+++ /dev/null
@@ -1,42 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "addr_range",
- out = "addr_range.go",
- package = "hostarch",
- prefix = "Addr",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "Addr",
- },
-)
-
-go_test(
- name = "hostarch_test",
- size = "small",
- srcs = [
- "addr_range_seq_test.go",
- ],
- library = ":hostarch",
-)
-
-go_library(
- name = "hostarch",
- srcs = [
- "access_type.go",
- "addr.go",
- "addr_range.go",
- "addr_range_seq_unsafe.go",
- "hostarch.go",
- "hostarch_arm64.go",
- "hostarch_x86.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/gohacks",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/segment/range.go b/pkg/hostarch/addr_range.go
index b6fa96e81..8a771d6fc 100644
--- a/pkg/segment/range.go
+++ b/pkg/hostarch/addr_range.go
@@ -1,59 +1,42 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package segment
-
-// T is a required type parameter that must be an integral type.
-type T uint64
+package hostarch
// A Range represents a contiguous range of T.
//
// +stateify savable
-type Range struct {
+type AddrRange struct {
// Start is the inclusive start of the range.
- Start T
+ Start Addr
// End is the exclusive end of the range.
- End T
+ End Addr
}
// WellFormed returns true if r.Start <= r.End. All other methods on a Range
// require that the Range is well-formed.
//
//go:nosplit
-func (r Range) WellFormed() bool {
+func (r AddrRange) WellFormed() bool {
return r.Start <= r.End
}
// Length returns the length of the range.
//
//go:nosplit
-func (r Range) Length() T {
+func (r AddrRange) Length() Addr {
return r.End - r.Start
}
// Contains returns true if r contains x.
//
//go:nosplit
-func (r Range) Contains(x T) bool {
+func (r AddrRange) Contains(x Addr) bool {
return r.Start <= x && x < r.End
}
// Overlaps returns true if r and r2 overlap.
//
//go:nosplit
-func (r Range) Overlaps(r2 Range) bool {
+func (r AddrRange) Overlaps(r2 AddrRange) bool {
return r.Start < r2.End && r2.Start < r.End
}
@@ -61,7 +44,7 @@ func (r Range) Overlaps(r2 Range) bool {
// contained within r.
//
//go:nosplit
-func (r Range) IsSupersetOf(r2 Range) bool {
+func (r AddrRange) IsSupersetOf(r2 AddrRange) bool {
return r.Start <= r2.Start && r.End >= r2.End
}
@@ -70,7 +53,7 @@ func (r Range) IsSupersetOf(r2 Range) bool {
// bounds, but for which Length() == 0.
//
//go:nosplit
-func (r Range) Intersect(r2 Range) Range {
+func (r AddrRange) Intersect(r2 AddrRange) AddrRange {
if r.Start < r2.Start {
r.Start = r2.Start
}
@@ -88,6 +71,6 @@ func (r Range) Intersect(r2 Range) Range {
// non-zero length.
//
//go:nosplit
-func (r Range) CanSplitAt(x T) bool {
+func (r AddrRange) CanSplitAt(x Addr) bool {
return r.Contains(x) && r.Start < x
}
diff --git a/pkg/hostarch/addr_range_seq_test.go b/pkg/hostarch/addr_range_seq_test.go
deleted file mode 100644
index 5726dfd19..000000000
--- a/pkg/hostarch/addr_range_seq_test.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package hostarch
-
-import (
- "testing"
-)
-
-var addrRangeSeqTests = []struct {
- desc string
- ranges []AddrRange
-}{
- {
- desc: "Empty sequence",
- },
- {
- desc: "Single empty AddrRange",
- ranges: []AddrRange{
- {0x10, 0x10},
- },
- },
- {
- desc: "Single non-empty AddrRange of length 1",
- ranges: []AddrRange{
- {0x10, 0x11},
- },
- },
- {
- desc: "Single non-empty AddrRange of length 2",
- ranges: []AddrRange{
- {0x10, 0x12},
- },
- },
- {
- desc: "Multiple non-empty AddrRanges",
- ranges: []AddrRange{
- {0x10, 0x11},
- {0x20, 0x22},
- },
- },
- {
- desc: "Multiple AddrRanges including empty AddrRanges",
- ranges: []AddrRange{
- {0x10, 0x10},
- {0x20, 0x20},
- {0x30, 0x33},
- {0x40, 0x44},
- {0x50, 0x50},
- {0x60, 0x60},
- {0x70, 0x77},
- {0x80, 0x88},
- {0x90, 0x90},
- {0xa0, 0xa0},
- },
- },
-}
-
-func testAddrRangeSeqEqualityWithTailIteration(t *testing.T, ars AddrRangeSeq, wantRanges []AddrRange) {
- var wantLen int64
- for _, ar := range wantRanges {
- wantLen += int64(ar.Length())
- }
-
- var i int
- for !ars.IsEmpty() {
- if gotLen := ars.NumBytes(); gotLen != wantLen {
- t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen)
- }
- if gotN, wantN := ars.NumRanges(), len(wantRanges)-i; gotN != wantN {
- t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted %d", i, ars, gotN, wantN)
- }
- got := ars.Head()
- if i >= len(wantRanges) {
- t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got)
- } else if want := wantRanges[i]; got != want {
- t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want)
- }
- ars = ars.Tail()
- wantLen -= int64(got.Length())
- i++
- }
- if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 {
- t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen)
- }
- if gotN := ars.NumRanges(); gotN != 0 {
- t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted 0", i, ars, gotN)
- }
-}
-
-func TestAddrRangeSeqTailIteration(t *testing.T) {
- for _, test := range addrRangeSeqTests {
- t.Run(test.desc, func(t *testing.T) {
- testAddrRangeSeqEqualityWithTailIteration(t, AddrRangeSeqFromSlice(test.ranges), test.ranges)
- })
- }
-}
-
-func TestAddrRangeSeqDropFirstEmpty(t *testing.T) {
- var ars AddrRangeSeq
- if got, want := ars.DropFirst(1), ars; got != want {
- t.Errorf("%v.DropFirst(1): got %v, wanted %v", ars, got, want)
- }
-}
-
-func TestAddrRangeSeqDropSingleByteIteration(t *testing.T) {
- // Tests AddrRangeSeq iteration using Head/DropFirst, simulating
- // I/O-per-AddrRange.
- for _, test := range addrRangeSeqTests {
- t.Run(test.desc, func(t *testing.T) {
- // Figure out what AddrRanges we expect to see.
- var wantLen int64
- var wantRanges []AddrRange
- for _, ar := range test.ranges {
- wantLen += int64(ar.Length())
- wantRanges = append(wantRanges, ar)
- if ar.Length() == 0 {
- // We "do" 0 bytes of I/O and then call DropFirst(0),
- // advancing to the next AddrRange.
- continue
- }
- // Otherwise we "do" 1 byte of I/O and then call DropFirst(1),
- // advancing the AddrRange by 1 byte, or to the next AddrRange
- // if this one is exhausted.
- for ar.Start++; ar.Length() != 0; ar.Start++ {
- wantRanges = append(wantRanges, ar)
- }
- }
- t.Logf("Expected AddrRanges: %s (%d bytes)", wantRanges, wantLen)
-
- ars := AddrRangeSeqFromSlice(test.ranges)
- var i int
- for !ars.IsEmpty() {
- if gotLen := ars.NumBytes(); gotLen != wantLen {
- t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen)
- }
- got := ars.Head()
- if i >= len(wantRanges) {
- t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got)
- } else if want := wantRanges[i]; got != want {
- t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want)
- }
- if got.Length() == 0 {
- ars = ars.DropFirst(0)
- } else {
- ars = ars.DropFirst(1)
- wantLen--
- }
- i++
- }
- if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 {
- t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen)
- }
- })
- }
-}
-
-func TestAddrRangeSeqTakeFirstEmpty(t *testing.T) {
- var ars AddrRangeSeq
- if got, want := ars.TakeFirst(1), ars; got != want {
- t.Errorf("%v.TakeFirst(1): got %v, wanted %v", ars, got, want)
- }
-}
-
-func TestAddrRangeSeqTakeFirst(t *testing.T) {
- ranges := []AddrRange{
- {0x10, 0x11},
- {0x20, 0x22},
- {0x30, 0x30},
- {0x40, 0x44},
- {0x50, 0x55},
- {0x60, 0x60},
- {0x70, 0x77},
- }
- ars := AddrRangeSeqFromSlice(ranges).TakeFirst(5)
- want := []AddrRange{
- {0x10, 0x11}, // +1 byte (total 1 byte), not truncated
- {0x20, 0x22}, // +2 bytes (total 3 bytes), not truncated
- {0x30, 0x30}, // +0 bytes (total 3 bytes), no change
- {0x40, 0x42}, // +2 bytes (total 5 bytes), partially truncated
- {0x50, 0x50}, // +0 bytes (total 5 bytes), fully truncated
- {0x60, 0x60}, // +0 bytes (total 5 bytes), "fully truncated" (no change)
- {0x70, 0x70}, // +0 bytes (total 5 bytes), fully truncated
- }
- testAddrRangeSeqEqualityWithTailIteration(t, ars, want)
-}
diff --git a/pkg/hostarch/hostarch_arm64_state_autogen.go b/pkg/hostarch/hostarch_arm64_state_autogen.go
new file mode 100644
index 000000000..ca13bb4d4
--- /dev/null
+++ b/pkg/hostarch/hostarch_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package hostarch
diff --git a/pkg/hostarch/hostarch_state_autogen.go b/pkg/hostarch/hostarch_state_autogen.go
new file mode 100644
index 000000000..32939e06b
--- /dev/null
+++ b/pkg/hostarch/hostarch_state_autogen.go
@@ -0,0 +1,80 @@
+// automatically generated by stateify.
+
+package hostarch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *AccessType) StateTypeName() string {
+ return "pkg/hostarch.AccessType"
+}
+
+func (a *AccessType) StateFields() []string {
+ return []string{
+ "Read",
+ "Write",
+ "Execute",
+ }
+}
+
+func (a *AccessType) beforeSave() {}
+
+// +checklocksignore
+func (a *AccessType) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.Read)
+ stateSinkObject.Save(1, &a.Write)
+ stateSinkObject.Save(2, &a.Execute)
+}
+
+func (a *AccessType) afterLoad() {}
+
+// +checklocksignore
+func (a *AccessType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.Read)
+ stateSourceObject.Load(1, &a.Write)
+ stateSourceObject.Load(2, &a.Execute)
+}
+
+func (v *Addr) StateTypeName() string {
+ return "pkg/hostarch.Addr"
+}
+
+func (v *Addr) StateFields() []string {
+ return nil
+}
+
+func (r *AddrRange) StateTypeName() string {
+ return "pkg/hostarch.AddrRange"
+}
+
+func (r *AddrRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *AddrRange) beforeSave() {}
+
+// +checklocksignore
+func (r *AddrRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *AddrRange) afterLoad() {}
+
+// +checklocksignore
+func (r *AddrRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func init() {
+ state.Register((*AccessType)(nil))
+ state.Register((*Addr)(nil))
+ state.Register((*AddrRange)(nil))
+}
diff --git a/pkg/hostarch/hostarch_unsafe_state_autogen.go b/pkg/hostarch/hostarch_unsafe_state_autogen.go
new file mode 100644
index 000000000..419f24913
--- /dev/null
+++ b/pkg/hostarch/hostarch_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostarch
diff --git a/pkg/hostarch/hostarch_x86_state_autogen.go b/pkg/hostarch/hostarch_x86_state_autogen.go
new file mode 100644
index 000000000..98bb9050d
--- /dev/null
+++ b/pkg/hostarch/hostarch_x86_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64 386
+
+package hostarch
diff --git a/pkg/ilist/BUILD b/pkg/ilist/BUILD
deleted file mode 100644
index 3f6eb07df..000000000
--- a/pkg/ilist/BUILD
+++ /dev/null
@@ -1,56 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ilist",
- srcs = [
- "interface_list.go",
- ],
- visibility = ["//visibility:public"],
-)
-
-go_template_instance(
- name = "interface_list",
- out = "interface_list.go",
- package = "ilist",
- template = ":generic_list",
- types = {},
-)
-
-# This list is used for benchmarking.
-go_template_instance(
- name = "test_list",
- out = "test_list.go",
- package = "ilist",
- prefix = "direct",
- template = ":generic_list",
- types = {
- "Element": "*direct",
- "Linker": "*direct",
- },
-)
-
-go_test(
- name = "list_test",
- size = "small",
- srcs = [
- "list_test.go",
- "test_list.go",
- ],
- library = ":ilist",
-)
-
-go_template(
- name = "generic_list",
- srcs = [
- "list.go",
- ],
- opt_types = [
- "Element",
- "ElementMapper",
- "Linker",
- ],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/ilist/ilist_state_autogen.go b/pkg/ilist/ilist_state_autogen.go
new file mode 100644
index 000000000..d329cb299
--- /dev/null
+++ b/pkg/ilist/ilist_state_autogen.go
@@ -0,0 +1,68 @@
+// automatically generated by stateify.
+
+package ilist
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *List) StateTypeName() string {
+ return "pkg/ilist.List"
+}
+
+func (l *List) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *List) beforeSave() {}
+
+// +checklocksignore
+func (l *List) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *List) afterLoad() {}
+
+// +checklocksignore
+func (l *List) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *Entry) StateTypeName() string {
+ return "pkg/ilist.Entry"
+}
+
+func (e *Entry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *Entry) beforeSave() {}
+
+// +checklocksignore
+func (e *Entry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *Entry) afterLoad() {}
+
+// +checklocksignore
+func (e *Entry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*List)(nil))
+ state.Register((*Entry)(nil))
+}
diff --git a/pkg/ilist/list.go b/pkg/ilist/interface_list.go
index 557051d18..4d6062d7f 100644
--- a/pkg/ilist/list.go
+++ b/pkg/ilist/interface_list.go
@@ -1,18 +1,3 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ilist provides the implementation of intrusive linked lists.
package ilist
// Linker is the interface that objects must implement if they want to be added
diff --git a/pkg/ilist/list_test.go b/pkg/ilist/list_test.go
deleted file mode 100644
index 3f9abfb56..000000000
--- a/pkg/ilist/list_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ilist
-
-import (
- "testing"
-)
-
-type testEntry struct {
- Entry
- value int
-}
-
-type direct struct {
- directEntry
- value int
-}
-
-func verifyEquality(t *testing.T, entries []testEntry, l *List) {
- t.Helper()
-
- i := 0
- for it := l.Front(); it != nil; it = it.Next() {
- e := it.(*testEntry)
- if e != &entries[i] {
- t.Errorf("Wrong entry at index %d", i)
- return
- }
- i++
- }
-
- if i != len(entries) {
- t.Errorf("Wrong number of entries; want = %d, got = %d", len(entries), i)
- return
- }
-
- i = 0
- for it := l.Back(); it != nil; it = it.Prev() {
- e := it.(*testEntry)
- if e != &entries[len(entries)-1-i] {
- t.Errorf("Wrong entry at index %d", i)
- return
- }
- i++
- }
-
- if i != len(entries) {
- t.Errorf("Wrong number of entries; want = %d, got = %d", len(entries), i)
- return
- }
-}
-
-func TestZeroEmpty(t *testing.T) {
- var l List
- if l.Front() != nil {
- t.Error("Front is non-nil")
- }
- if l.Back() != nil {
- t.Error("Back is non-nil")
- }
-}
-
-func TestPushBack(t *testing.T) {
- var l List
-
- // Test single entry insertion.
- var entry testEntry
- l.PushBack(&entry)
-
- e := l.Front().(*testEntry)
- if e != &entry {
- t.Error("Wrong entry returned")
- }
-
- // Test inserting 100 entries.
- l.Reset()
- var entries [100]testEntry
- for i := range entries {
- l.PushBack(&entries[i])
- }
-
- verifyEquality(t, entries[:], &l)
-}
-
-func TestPushFront(t *testing.T) {
- var l List
-
- // Test single entry insertion.
- var entry testEntry
- l.PushFront(&entry)
-
- e := l.Front().(*testEntry)
- if e != &entry {
- t.Error("Wrong entry returned")
- }
-
- // Test inserting 100 entries.
- l.Reset()
- var entries [100]testEntry
- for i := range entries {
- l.PushFront(&entries[len(entries)-1-i])
- }
-
- verifyEquality(t, entries[:], &l)
-}
-
-func TestRemove(t *testing.T) {
- // Remove entry from single-element list.
- var l List
- var entry testEntry
- l.PushBack(&entry)
- l.Remove(&entry)
- if l.Front() != nil {
- t.Error("List is empty")
- }
-
- var entries [100]testEntry
-
- // Remove single element from lists of lengths 2 to 101.
- for n := 1; n <= len(entries); n++ {
- for extra := 0; extra <= n; extra++ {
- l.Reset()
- for i := 0; i < n; i++ {
- if extra == i {
- l.PushBack(&entry)
- }
- l.PushBack(&entries[i])
- }
- if extra == n {
- l.PushBack(&entry)
- }
-
- l.Remove(&entry)
- verifyEquality(t, entries[:n], &l)
- }
- }
-}
-
-func TestReset(t *testing.T) {
- var l List
-
- // Resetting list of one element.
- l.PushBack(&testEntry{})
- if l.Front() == nil {
- t.Error("List is empty")
- }
-
- l.Reset()
- if l.Front() != nil {
- t.Error("List is not empty")
- }
-
- // Resetting list of 10 elements.
- for i := 0; i < 10; i++ {
- l.PushBack(&testEntry{})
- }
-
- if l.Front() == nil {
- t.Error("List is empty")
- }
-
- l.Reset()
- if l.Front() != nil {
- t.Error("List is not empty")
- }
-
- // Resetting empty list.
- l.Reset()
- if l.Front() != nil {
- t.Error("List is not empty")
- }
-}
-
-func BenchmarkIterateForward(b *testing.B) {
- var l List
- for i := 0; i < 1000000; i++ {
- l.PushBack(&testEntry{value: i})
- }
-
- for i := b.N; i > 0; i-- {
- tmp := 0
- for e := l.Front(); e != nil; e = e.Next() {
- tmp += e.(*testEntry).value
- }
- }
-}
-
-func BenchmarkIterateBackward(b *testing.B) {
- var l List
- for i := 0; i < 1000000; i++ {
- l.PushBack(&testEntry{value: i})
- }
-
- for i := b.N; i > 0; i-- {
- tmp := 0
- for e := l.Back(); e != nil; e = e.Prev() {
- tmp += e.(*testEntry).value
- }
- }
-}
-
-func BenchmarkDirectIterateForward(b *testing.B) {
- var l directList
- for i := 0; i < 1000000; i++ {
- l.PushBack(&direct{value: i})
- }
-
- for i := b.N; i > 0; i-- {
- tmp := 0
- for e := l.Front(); e != nil; e = e.Next() {
- tmp += e.value
- }
- }
-}
-
-func BenchmarkDirectIterateBackward(b *testing.B) {
- var l directList
- for i := 0; i < 1000000; i++ {
- l.PushBack(&direct{value: i})
- }
-
- for i := b.N; i > 0; i-- {
- tmp := 0
- for e := l.Back(); e != nil; e = e.Prev() {
- tmp += e.value
- }
- }
-}
diff --git a/pkg/linewriter/BUILD b/pkg/linewriter/BUILD
deleted file mode 100644
index f84d03700..000000000
--- a/pkg/linewriter/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "linewriter",
- srcs = ["linewriter.go"],
- marshal = False,
- stateify = False,
- visibility = ["//visibility:public"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "linewriter_test",
- srcs = ["linewriter_test.go"],
- library = ":linewriter",
-)
diff --git a/pkg/linewriter/linewriter_test.go b/pkg/linewriter/linewriter_test.go
deleted file mode 100644
index 96dc7e6e0..000000000
--- a/pkg/linewriter/linewriter_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package linewriter
-
-import (
- "bytes"
- "testing"
-)
-
-func TestWriter(t *testing.T) {
- testCases := []struct {
- input []string
- want []string
- }{
- {
- input: []string{"1\n", "2\n"},
- want: []string{"1", "2"},
- },
- {
- input: []string{"1\n", "\n", "2\n"},
- want: []string{"1", "", "2"},
- },
- {
- input: []string{"1\n2\n", "3\n"},
- want: []string{"1", "2", "3"},
- },
- {
- input: []string{"1", "2\n"},
- want: []string{"12"},
- },
- {
- // Data with no newline yet is omitted.
- input: []string{"1\n", "2\n", "3"},
- want: []string{"1", "2"},
- },
- }
-
- for _, c := range testCases {
- var lines [][]byte
-
- w := NewWriter(func(p []byte) {
- // We must not retain p, so we must make a copy.
- b := make([]byte, len(p))
- copy(b, p)
-
- lines = append(lines, b)
- })
-
- for _, in := range c.input {
- n, err := w.Write([]byte(in))
- if err != nil {
- t.Errorf("Write(%q) err got %v want nil (case %+v)", in, err, c)
- }
- if n != len(in) {
- t.Errorf("Write(%q) b got %d want %d (case %+v)", in, n, len(in), c)
- }
- }
-
- if len(lines) != len(c.want) {
- t.Errorf("len(lines) got %d want %d (case %+v)", len(lines), len(c.want), c)
- }
-
- for i := range lines {
- if !bytes.Equal(lines[i], []byte(c.want[i])) {
- t.Errorf("item %d got %q want %q (case %+v)", i, lines[i], c.want[i], c)
- }
- }
- }
-}
diff --git a/pkg/lisafs/README.md b/pkg/lisafs/README.md
deleted file mode 100644
index 51d0d40e5..000000000
--- a/pkg/lisafs/README.md
+++ /dev/null
@@ -1,363 +0,0 @@
-# Replacing 9P
-
-## Background
-
-The Linux filesystem model consists of the following key aspects (modulo mounts,
-which are outside the scope of this discussion):
-
-- A `struct inode` represents a "filesystem object", such as a directory or a
- regular file. "Filesystem object" is most precisely defined by the practical
- properties of an inode, such as an immutable type (regular file, directory,
- symbolic link, etc.) and its independence from the path originally used to
- obtain it.
-
-- A `struct dentry` represents a node in a filesystem tree. Semantically, each
- dentry is immutably associated with an inode representing the filesystem
- object at that position. (Linux implements optimizations involving reuse of
- unreferenced dentries, which allows their associated inodes to change, but
- this is outside the scope of this discussion.)
-
-- A `struct file` represents an open file description (hereafter FD) and is
- needed to perform I/O. Each FD is immutably associated with the dentry
- through which it was opened.
-
-The current gVisor virtual filesystem implementation (hereafter VFS1) closely
-imitates the Linux design:
-
-- `struct inode` => `fs.Inode`
-
-- `struct dentry` => `fs.Dirent`
-
-- `struct file` => `fs.File`
-
-gVisor accesses most external filesystems through a variant of the 9P2000.L
-protocol, including extensions for performance (`walkgetattr`) and for features
-not supported by vanilla 9P2000.L (`flushf`, `lconnect`). The 9P protocol family
-is inode-based; 9P fids represent a file (equivalently "file system object"),
-and the protocol is structured around alternatively obtaining fids to represent
-files (with `walk` and, in gVisor, `walkgetattr`) and performing operations on
-those fids.
-
-In the sections below, a **shared** filesystem is a filesystem that is *mutably*
-accessible by multiple concurrent clients, such that a **non-shared** filesystem
-is a filesystem that is either read-only or accessible by only a single client.
-
-## Problems
-
-### Serialization of Path Component RPCs
-
-Broadly speaking, VFS1 traverses each path component in a pathname, alternating
-between verifying that each traversed dentry represents an inode that represents
-a searchable directory and moving to the next dentry in the path.
-
-In the context of a remote filesystem, the structure of this traversal means
-that - modulo caching - a path involving N components requires at least N-1
-*sequential* RPCs to obtain metadata for intermediate directories, incurring
-significant latency. (In vanilla 9P2000.L, 2(N-1) RPCs are required: N-1 `walk`
-and N-1 `getattr`. We added the `walkgetattr` RPC to reduce this overhead.) On
-non-shared filesystems, this overhead is primarily significant during
-application startup; caching mitigates much of this overhead at steady state. On
-shared filesystems, where correct caching requires revalidation (requiring RPCs
-for each revalidated directory anyway), this overhead is consistently ruinous.
-
-### Inefficient RPCs
-
-9P is not exceptionally economical with RPCs in general. In addition to the
-issue described above:
-
-- Opening an existing file in 9P involves at least 2 RPCs: `walk` to produce
- an unopened fid representing the file, and `lopen` to open the fid.
-
-- Creating a file also involves at least 2 RPCs: `walk` to produce an unopened
- fid representing the parent directory, and `lcreate` to create the file and
- convert the fid to an open fid representing the created file. In practice,
- both the Linux and gVisor 9P clients expect to have an unopened fid for the
- created file (necessitating an additional `walk`), as well as attributes for
- the created file (necessitating an additional `getattr`), for a total of 4
- RPCs. (In a shared filesystem, where whether a file already exists can
- change between RPCs, a correct implementation of `open(O_CREAT)` would have
- to alternate between these two paths (plus `clunk`ing the temporary fid
- between alternations, since the nature of the `fid` differs between the two
- paths). Neither Linux nor gVisor implement the required alternation, so
- `open(O_CREAT)` without `O_EXCL` can spuriously fail with `EEXIST` on both.)
-
-- Closing (`clunk`ing) a fid requires an RPC. VFS1 issues this RPC
- asynchronously in an attempt to reduce critical path latency, but scheduling
- overhead makes this not clearly advantageous in practice.
-
-- `read` and `readdir` can return partial reads without a way to indicate EOF,
- necessitating an additional final read to detect EOF.
-
-- Operations that affect filesystem state do not consistently return updated
- filesystem state. In gVisor, the client implementation attempts to handle
- this by tracking what it thinks updated state "should" be; this is complex,
- and especially brittle for timestamps (which are often not arbitrarily
- settable). In Linux, the client implemtation invalidates cached metadata
- whenever it performs such an operation, and reloads it when a dentry
- corresponding to an inode with no valid cached metadata is revalidated; this
- is simple, but necessitates an additional `getattr`.
-
-### Dentry/Inode Ambiguity
-
-As noted above, 9P's documentation tends to imply that unopened fids represent
-an inode. In practice, most filesystem APIs present very limited interfaces for
-working with inodes at best, such that the interpretation of unopened fids
-varies:
-
-- Linux's 9P client associates unopened fids with (dentry, uid) pairs. When
- caching is enabled, it also associates each inode with the first fid opened
- writably that references that inode, in order to support page cache
- writeback.
-
-- gVisor's 9P client associates unopened fids with inodes, and also caches
- opened fids in inodes in a manner similar to Linux.
-
-- The runsc fsgofer associates unopened fids with both "dentries" (host
- filesystem paths) and "inodes" (host file descriptors); which is used
- depends on the operation invoked on the fid.
-
-For non-shared filesystems, this confusion has resulted in correctness issues
-that are (in gVisor) currently handled by a number of coarse-grained locks that
-serialize renames with all other filesystem operations. For shared filesystems,
-this means inconsistent behavior in the presence of concurrent mutation.
-
-## Design
-
-Almost all Linux filesystem syscalls describe filesystem resources in one of two
-ways:
-
-- Path-based: A filesystem position is described by a combination of a
- starting position and a sequence of path components relative to that
- position, where the starting position is one of:
-
- - The VFS root (defined by mount namespace and chroot), for absolute paths
-
- - The VFS position of an existing FD, for relative paths passed to `*at`
- syscalls (e.g. `statat`)
-
- - The current working directory, for relative paths passed to non-`*at`
- syscalls and `*at` syscalls with `AT_FDCWD`
-
-- File-description-based: A filesystem object is described by an existing FD,
- passed to a `f*` syscall (e.g. `fstat`).
-
-Many of our issues with 9P arise from its (and VFS') interposition of a model
-based on inodes between the filesystem syscall API and filesystem
-implementations. We propose to replace 9P with a protocol that does not feature
-inodes at all, and instead closely follows the filesystem syscall API by
-featuring only path-based and FD-based operations, with minimal deviations as
-necessary to ameliorate deficiencies in the syscall interface (see below). This
-approach addresses the issues described above:
-
-- Even on shared filesystems, most application filesystem syscalls are
- translated to a single RPC (possibly excepting special cases described
- below), which is a logical lower bound.
-
-- The behavior of application syscalls on shared filesystems is
- straightforwardly predictable: path-based syscalls are translated to
- path-based RPCs, which will re-lookup the file at that path, and FD-based
- syscalls are translated to FD-based RPCs, which use an existing open file
- without performing another lookup. (This is at least true on gofers that
- proxy the host local filesystem; other filesystems that lack support for
- e.g. certain operations on FDs may have different behavior, but this
- divergence is at least still predictable and inherent to the underlying
- filesystem implementation.)
-
-Note that this approach is only feasible in gVisor's next-generation virtual
-filesystem (VFS2), which does not assume the existence of inodes and allows the
-remote filesystem client to translate whole path-based syscalls into RPCs. Thus
-one of the unavoidable tradeoffs associated with such a protocol vs. 9P is the
-inability to construct a Linux client that is performance-competitive with
-gVisor.
-
-### File Permissions
-
-Many filesystem operations are side-effectual, such that file permissions must
-be checked before such operations take effect. The simplest approach to file
-permission checking is for the sentry to obtain permissions from the remote
-filesystem, then apply permission checks in the sentry before performing the
-application-requested operation. However, this requires an additional RPC per
-application syscall (which can't be mitigated by caching on shared filesystems).
-Alternatively, we may delegate file permission checking to gofers. In general,
-file permission checks depend on the following properties of the accessor:
-
-- Filesystem UID/GID
-
-- Supplementary GIDs
-
-- Effective capabilities in the accessor's user namespace (i.e. the accessor's
- effective capability set)
-
-- All UIDs and GIDs mapped in the accessor's user namespace (which determine
- if the accessor's capabilities apply to accessed files)
-
-We may choose to delay implementation of file permission checking delegation,
-although this is potentially costly since it doubles the number of required RPCs
-for most operations on shared filesystems. We may also consider compromise
-options, such as only delegating file permission checks for accessors in the
-root user namespace.
-
-### Symbolic Links
-
-gVisor usually interprets symbolic link targets in its VFS rather than on the
-filesystem containing the symbolic link; thus e.g. a symlink to
-"/proc/self/maps" on a remote filesystem resolves to said file in the sentry's
-procfs rather than the host's. This implies that:
-
-- Remote filesystem servers that proxy filesystems supporting symlinks must
- check if each path component is a symlink during path traversal.
-
-- Absolute symlinks require that the sentry restart the operation at its
- contextual VFS root (which is task-specific and may not be on a remote
- filesystem at all), so if a remote filesystem server encounters an absolute
- symlink during path traversal on behalf of a path-based operation, it must
- terminate path traversal and return the symlink target.
-
-- Relative symlinks begin target resolution in the parent directory of the
- symlink, so in theory most relative symlinks can be handled automatically
- during the path traversal that encounters the symlink, provided that said
- traversal is supplied with the number of remaining symlinks before `ELOOP`.
- However, the new path traversed by the symlink target may cross VFS mount
- boundaries, such that it's only safe for remote filesystem servers to
- speculatively follow relative symlinks for side-effect-free operations such
- as `stat` (where the sentry can simply ignore results that are inapplicable
- due to crossing mount boundaries). We may choose to delay implementation of
- this feature, at the cost of an additional RPC per relative symlink (note
- that even if the symlink target crosses a mount boundary, the sentry will
- need to `stat` the path to the mount boundary to confirm that each traversed
- component is an accessible directory); until it is implemented, relative
- symlinks may be handled like absolute symlinks, by terminating path
- traversal and returning the symlink target.
-
-The possibility of symlinks (and the possibility of a compromised sentry) means
-that the sentry may issue RPCs with paths that, in the absence of symlinks,
-would traverse beyond the root of the remote filesystem. For example, the sentry
-may issue an RPC with a path like "/foo/../..", on the premise that if "/foo" is
-a symlink then the resulting path may be elsewhere on the remote filesystem. To
-handle this, path traversal must also track its current depth below the remote
-filesystem root, and terminate path traversal if it would ascend beyond this
-point.
-
-### Path Traversal
-
-Since path-based VFS operations will translate to path-based RPCs, filesystem
-servers will need to handle path traversal. From the perspective of a given
-filesystem implementation in the server, there are two basic approaches to path
-traversal:
-
-- Inode-walk: For each path component, obtain a handle to the underlying
- filesystem object (e.g. with `open(O_PATH)`), check if that object is a
- symlink (as described above) and that that object is accessible by the
- caller (e.g. with `fstat()`), then continue to the next path component (e.g.
- with `openat()`). This ensures that the checked filesystem object is the one
- used to obtain the next object in the traversal, which is intuitively
- appealing. However, while this approach works for host local filesystems, it
- requires features that are not widely supported by other filesystems.
-
-- Path-walk: For each path component, use a path-based operation to determine
- if the filesystem object currently referred to by that path component is a
- symlink / is accessible. This is highly portable, but suffers from quadratic
- behavior (at the level of the underlying filesystem implementation, the
- first path component will be traversed a number of times equal to the number
- of path components in the path).
-
-The implementation should support either option by delegating path traversal to
-filesystem implementations within the server (like VFS and the remote filesystem
-protocol itself), as inode-walking is still safe, efficient, amenable to FD
-caching, and implementable on non-shared host local filesystems (a sufficiently
-common case as to be worth considering in the design).
-
-Both approaches are susceptible to race conditions that may permit sandboxed
-filesystem escapes:
-
-- Under inode-walk, a malicious application may cause a directory to be moved
- (with `rename`) during path traversal, such that the filesystem
- implementation incorrectly determines whether subsequent inodes are located
- in paths that should be visible to sandboxed applications.
-
-- Under path-walk, a malicious application may cause a non-symlink file to be
- replaced with a symlink during path traversal, such that following path
- operations will incorrectly follow the symlink.
-
-Both race conditions can, to some extent, be mitigated in filesystem server
-implementations by synchronizing path traversal with the hazardous operations in
-question. However, shared filesystems are frequently used to share data between
-sandboxed and unsandboxed applications in a controlled way, and in some cases a
-malicious sandboxed application may be able to take advantage of a hazardous
-filesystem operation performed by an unsandboxed application. In some cases,
-filesystem features may be available to ensure safety even in such cases (e.g.
-[the new openat2() syscall](https://man7.org/linux/man-pages/man2/openat2.2.html)),
-but it is not clear how to solve this problem in general. (Note that this issue
-is not specific to our design; rather, it is a fundamental limitation of
-filesystem sandboxing.)
-
-### Filesystem Multiplexing
-
-A given sentry may need to access multiple distinct remote filesystems (e.g.
-different volumes for a given container). In many cases, there is no advantage
-to serving these filesystems from distinct filesystem servers, or accessing them
-through distinct connections (factors such as maximum RPC concurrency should be
-based on available host resources). Therefore, the protocol should support
-multiplexing of distinct filesystem trees within a single session. 9P supports
-this by allowing multiple calls to the `attach` RPC to produce fids representing
-distinct filesystem trees, but this is somewhat clunky; we propose a much
-simpler mechanism wherein each message that conveys a path also conveys a
-numeric filesystem ID that identifies a filesystem tree.
-
-## Alternatives Considered
-
-### Additional Extensions to 9P
-
-There are at least three conceptual aspects to 9P:
-
-- Wire format: messages with a 4-byte little-endian size prefix, strings with
- a 2-byte little-endian size prefix, etc. Whether the wire format is worth
- retaining is unclear; in particular, it's unclear that the 9P wire format
- has a significant advantage over protobufs, which are substantially easier
- to extend. Note that the official Go protobuf implementation is widely known
- to suffer from a significant number of performance deficiencies, so if we
- choose to switch to protobuf, we may need to use an alternative toolchain
- such as `gogo/protobuf` (which is also widely used in the Go ecosystem, e.g.
- by Kubernetes).
-
-- Filesystem model: fids, qids, etc. Discarding this is one of the motivations
- for this proposal.
-
-- RPCs: Twalk, Tlopen, etc. In addition to previously-described
- inefficiencies, most of these are dependent on the filesystem model and
- therefore must be discarded.
-
-### FUSE
-
-The FUSE (Filesystem in Userspace) protocol is frequently used to provide
-arbitrary userspace filesystem implementations to a host Linux kernel.
-Unfortunately, FUSE is also inode-based, and therefore doesn't address any of
-the problems we have with 9P.
-
-### virtio-fs
-
-virtio-fs is an ongoing project aimed at improving Linux VM filesystem
-performance when accessing Linux host filesystems (vs. virtio-9p). In brief, it
-is based on:
-
-- Using a FUSE client in the guest that communicates over virtio with a FUSE
- server in the host.
-
-- Using DAX to map the host page cache into the guest.
-
-- Using a file metadata table in shared memory to avoid VM exits for metadata
- updates.
-
-None of these improvements seem applicable to gVisor:
-
-- As explained above, FUSE is still inode-based, so it is still susceptible to
- most of the problems we have with 9P.
-
-- Our use of host file descriptors already allows us to leverage the host page
- cache for file contents.
-
-- Our need for shared filesystem coherence is usually based on a user
- requirement that an out-of-sandbox filesystem mutation is guaranteed to be
- visible by all subsequent observations from within the sandbox, or vice
- versa; it's not clear that this can be guaranteed without a synchronous
- signaling mechanism like an RPC.
diff --git a/pkg/log/BUILD b/pkg/log/BUILD
deleted file mode 100644
index 3ed6aba5c..000000000
--- a/pkg/log/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "log",
- srcs = [
- "glog.go",
- "json.go",
- "json_k8s.go",
- "log.go",
- ],
- marshal = False,
- stateify = False,
- visibility = [
- "//visibility:public",
- ],
- deps = [
- "//pkg/linewriter",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "log_test",
- size = "small",
- srcs = [
- "json_test.go",
- "log_test.go",
- ],
- library = ":log",
-)
diff --git a/pkg/log/json_test.go b/pkg/log/json_test.go
deleted file mode 100644
index f25224fe1..000000000
--- a/pkg/log/json_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package log
-
-import (
- "encoding/json"
- "testing"
-)
-
-// Tests that Level can marshal/unmarshal properly.
-func TestLevelMarshal(t *testing.T) {
- lvs := []Level{Warning, Info, Debug}
- for _, lv := range lvs {
- bs, err := lv.MarshalJSON()
- if err != nil {
- t.Errorf("error marshaling %v: %v", lv, err)
- }
- var lv2 Level
- if err := lv2.UnmarshalJSON(bs); err != nil {
- t.Errorf("error unmarshaling %v: %v", bs, err)
- }
- if lv != lv2 {
- t.Errorf("marshal/unmarshal level got %v wanted %v", lv2, lv)
- }
- }
-}
-
-// Test that integers can be properly unmarshaled.
-func TestUnmarshalFromInt(t *testing.T) {
- tcs := []struct {
- i int
- want Level
- }{
- {0, Warning},
- {1, Info},
- {2, Debug},
- }
-
- for _, tc := range tcs {
- j, err := json.Marshal(tc.i)
- if err != nil {
- t.Errorf("error marshaling %v: %v", tc.i, err)
- }
- var lv Level
- if err := lv.UnmarshalJSON(j); err != nil {
- t.Errorf("error unmarshaling %v: %v", j, err)
- }
- if lv != tc.want {
- t.Errorf("marshal/unmarshal %v got %v want %v", tc.i, lv, tc.want)
- }
- }
-}
diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go
deleted file mode 100644
index 9ff18559b..000000000
--- a/pkg/log/log_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package log
-
-import (
- "fmt"
- "strings"
- "testing"
-)
-
-type testWriter struct {
- lines []string
- fail bool
- limit int
-}
-
-func (w *testWriter) Write(bytes []byte) (int, error) {
- if w.fail {
- return 0, fmt.Errorf("simulated failure")
- }
- if w.limit > 0 && len(w.lines) >= w.limit {
- return len(bytes), nil
- }
- w.lines = append(w.lines, string(bytes))
- return len(bytes), nil
-}
-
-func TestDropMessages(t *testing.T) {
- tw := &testWriter{}
- w := Writer{Next: tw}
- if _, err := w.Write([]byte("line 1\n")); err != nil {
- t.Fatalf("Write failed, err: %v", err)
- }
-
- tw.fail = true
- if _, err := w.Write([]byte("error\n")); err == nil {
- t.Fatalf("Write should have failed")
- }
- if _, err := w.Write([]byte("error\n")); err == nil {
- t.Fatalf("Write should have failed")
- }
-
- fmt.Printf("writer: %#v\n", &w)
-
- tw.fail = false
- if _, err := w.Write([]byte("line 2\n")); err != nil {
- t.Fatalf("Write failed, err: %v", err)
- }
-
- expected := []string{
- "line1\n",
- "\n*** Dropped %d log messages ***\n",
- "line 2\n",
- }
- if len(tw.lines) != len(expected) {
- t.Fatalf("Writer should have logged %d lines, got: %v, expected: %v", len(expected), tw.lines, expected)
- }
- for i, l := range tw.lines {
- if l == expected[i] {
- t.Fatalf("line %d doesn't match, got: %v, expected: %v", i, l, expected[i])
- }
- }
-}
-
-func TestCaller(t *testing.T) {
- tw := &testWriter{}
- e := GoogleEmitter{Writer: &Writer{Next: tw}}
- bl := &BasicLogger{
- Emitter: e,
- Level: Debug,
- }
- bl.Debugf("testing...\n") // Just for file + line.
- if len(tw.lines) != 1 {
- t.Errorf("expected 1 line, got %d", len(tw.lines))
- }
- if !strings.Contains(tw.lines[0], "log_test.go") {
- t.Errorf("expected log_test.go, got %q", tw.lines[0])
- }
-}
-
-func BenchmarkGoogleLogging(b *testing.B) {
- tw := &testWriter{
- limit: 1, // Only record one message.
- }
- e := GoogleEmitter{Writer: &Writer{Next: tw}}
- bl := &BasicLogger{
- Emitter: e,
- Level: Debug,
- }
- for i := 0; i < b.N; i++ {
- bl.Debugf("hello %d, %d, %d", 1, 2, 3)
- }
-}
diff --git a/pkg/marshal/BUILD b/pkg/marshal/BUILD
deleted file mode 100644
index 7a5002176..000000000
--- a/pkg/marshal/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "marshal",
- srcs = [
- "marshal.go",
- "marshal_impl_util.go",
- "util.go",
- ],
- visibility = [
- "//:sandbox",
- ],
- deps = ["//pkg/hostarch"],
-)
diff --git a/pkg/marshal/marshal_state_autogen.go b/pkg/marshal/marshal_state_autogen.go
new file mode 100644
index 000000000..a0a953158
--- /dev/null
+++ b/pkg/marshal/marshal_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package marshal
diff --git a/pkg/marshal/primitive/BUILD b/pkg/marshal/primitive/BUILD
deleted file mode 100644
index 6e5ce136d..000000000
--- a/pkg/marshal/primitive/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "primitive",
- srcs = [
- "primitive.go",
- ],
- marshal = True,
- visibility = [
- "//:sandbox",
- ],
- deps = [
- "//pkg/hostarch",
- "//pkg/marshal",
- ],
-)
diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
new file mode 100644
index 000000000..4ccc2a8c8
--- /dev/null
+++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -0,0 +1,1366 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package primitive
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*Int16)(nil)
+var _ marshal.Marshallable = (*Int32)(nil)
+var _ marshal.Marshallable = (*Int64)(nil)
+var _ marshal.Marshallable = (*Int8)(nil)
+var _ marshal.Marshallable = (*Uint16)(nil)
+var _ marshal.Marshallable = (*Uint32)(nil)
+var _ marshal.Marshallable = (*Uint64)(nil)
+var _ marshal.Marshallable = (*Uint8)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Int16) SizeBytes() int {
+ return 2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Int16) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(*i))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Int16) UnmarshalBytes(src []byte) {
+ *i = Int16(int16(hostarch.ByteOrder.Uint16(src[:2])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Int16) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Int16) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Int16) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Int16) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Int16) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Int16) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Int16) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
+//go:nosplit
+func CopyInt16SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int16) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int16)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
+//go:nosplit
+func CopyInt16SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int16) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int16)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
+func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int16)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
+func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int16)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Int32) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Int32) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*i))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Int32) UnmarshalBytes(src []byte) {
+ *i = Int32(int32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Int32) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Int32) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Int32) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Int32) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Int32) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Int32) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Int32) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
+//go:nosplit
+func CopyInt32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int32) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int32)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
+//go:nosplit
+func CopyInt32SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int32) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int32)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
+func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int32)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
+func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int32)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Int64) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Int64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*i))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Int64) UnmarshalBytes(src []byte) {
+ *i = Int64(int64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Int64) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Int64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Int64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Int64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Int64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Int64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Int64) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
+//go:nosplit
+func CopyInt64SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int64) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int64)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
+//go:nosplit
+func CopyInt64SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int64) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int64)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
+func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int64)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
+func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int64)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Int8) SizeBytes() int {
+ return 1
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Int8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*i)
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Int8) UnmarshalBytes(src []byte) {
+ *i = Int8(int8(src[0]))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Int8) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Int8) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Int8) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Int8) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Int8) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Int8) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Int8) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
+//go:nosplit
+func CopyInt8SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int8) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int8)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
+//go:nosplit
+func CopyInt8SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int8) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int8)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
+func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int8)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
+func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Int8)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (u *Uint16) SizeBytes() int {
+ return 2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *Uint16) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(*u))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *Uint16) UnmarshalBytes(src []byte) {
+ *u = Uint16(uint16(hostarch.ByteOrder.Uint16(src[:2])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *Uint16) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *Uint16) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *Uint16) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *Uint16) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *Uint16) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
+//go:nosplit
+func CopyUint16SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint16) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint16)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
+//go:nosplit
+func CopyUint16SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint16) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint16)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
+func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint16)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
+func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint16)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (u *Uint32) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *Uint32) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*u))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *Uint32) UnmarshalBytes(src []byte) {
+ *u = Uint32(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *Uint32) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *Uint32) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *Uint32) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *Uint32) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *Uint32) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
+//go:nosplit
+func CopyUint32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint32) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint32)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
+//go:nosplit
+func CopyUint32SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint32) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint32)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
+func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint32)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
+func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint32)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (u *Uint64) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *Uint64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*u))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *Uint64) UnmarshalBytes(src []byte) {
+ *u = Uint64(uint64(hostarch.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *Uint64) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *Uint64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *Uint64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *Uint64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *Uint64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
+//go:nosplit
+func CopyUint64SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint64) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint64)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
+//go:nosplit
+func CopyUint64SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint64) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint64)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
+func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint64)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
+func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint64)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (u *Uint8) SizeBytes() int {
+ return 1
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *Uint8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*u)
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *Uint8) UnmarshalBytes(src []byte) {
+ *u = Uint8(uint8(src[0]))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *Uint8) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *Uint8) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *Uint8) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *Uint8) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *Uint8) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
+//go:nosplit
+func CopyUint8SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint8) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint8)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
+//go:nosplit
+func CopyUint8SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint8) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint8)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
+func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint8)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
+func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Uint8)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
diff --git a/pkg/marshal/primitive/primitive_state_autogen.go b/pkg/marshal/primitive/primitive_state_autogen.go
new file mode 100644
index 000000000..f9db3a918
--- /dev/null
+++ b/pkg/marshal/primitive/primitive_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package primitive
diff --git a/pkg/memutil/BUILD b/pkg/memutil/BUILD
deleted file mode 100644
index bea595286..000000000
--- a/pkg/memutil/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "memutil",
- srcs = [
- "memfd_linux_unsafe.go",
- "memutil.go",
- "mmap.go",
- ],
- visibility = ["//visibility:public"],
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/memutil/memutil_linux_unsafe_state_autogen.go b/pkg/memutil/memutil_linux_unsafe_state_autogen.go
new file mode 100644
index 000000000..173297149
--- /dev/null
+++ b/pkg/memutil/memutil_linux_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package memutil
diff --git a/pkg/memutil/memutil_state_autogen.go b/pkg/memutil/memutil_state_autogen.go
new file mode 100644
index 000000000..4aa01530c
--- /dev/null
+++ b/pkg/memutil/memutil_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package memutil
diff --git a/pkg/merkletree/BUILD b/pkg/merkletree/BUILD
deleted file mode 100644
index dcd6c3bf5..000000000
--- a/pkg/merkletree/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "merkletree",
- srcs = ["merkletree.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/hostarch",
- ],
-)
-
-go_test(
- name = "merkletree_test",
- srcs = ["merkletree_test.go"],
- library = ":merkletree",
- deps = [
- "//pkg/abi/linux",
- "//pkg/hostarch",
- ],
-)
diff --git a/pkg/merkletree/merkletree_state_autogen.go b/pkg/merkletree/merkletree_state_autogen.go
new file mode 100644
index 000000000..e5670cde4
--- /dev/null
+++ b/pkg/merkletree/merkletree_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package merkletree
diff --git a/pkg/merkletree/merkletree_test.go b/pkg/merkletree/merkletree_test.go
deleted file mode 100644
index 1447fd139..000000000
--- a/pkg/merkletree/merkletree_test.go
+++ /dev/null
@@ -1,905 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package merkletree
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "math/rand"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-func TestLayout(t *testing.T) {
- testCases := []struct {
- name string
- dataSize int64
- hashAlgorithms int
- dataAndTreeInSameFile bool
- expectedDigestSize int64
- expectedLevelOffset []int64
- }{
- {
- name: "SmallSizeSHA256SeparateFile",
- dataSize: 100,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{0},
- },
- {
- name: "SmallSizeSHA512SeparateFile",
- dataSize: 100,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{0},
- },
- {
- name: "SmallSizeSHA256SameFile",
- dataSize: 100,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{hostarch.PageSize},
- },
- {
- name: "SmallSizeSHA512SameFile",
- dataSize: 100,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{hostarch.PageSize},
- },
- {
- name: "MiddleSizeSHA256SeparateFile",
- dataSize: 1000000,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{0, 2 * hostarch.PageSize, 3 * hostarch.PageSize},
- },
- {
- name: "MiddleSizeSHA512SeparateFile",
- dataSize: 1000000,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{0, 4 * hostarch.PageSize, 5 * hostarch.PageSize},
- },
- {
- name: "MiddleSizeSHA256SameFile",
- dataSize: 1000000,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{245 * hostarch.PageSize, 247 * hostarch.PageSize, 248 * hostarch.PageSize},
- },
- {
- name: "MiddleSizeSHA512SameFile",
- dataSize: 1000000,
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{245 * hostarch.PageSize, 249 * hostarch.PageSize, 250 * hostarch.PageSize},
- },
- {
- name: "LargeSizeSHA256SeparateFile",
- dataSize: 4096 * int64(hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{0, 32 * hostarch.PageSize, 33 * hostarch.PageSize},
- },
- {
- name: "LargeSizeSHA512SeparateFile",
- dataSize: 4096 * int64(hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{0, 64 * hostarch.PageSize, 65 * hostarch.PageSize},
- },
- {
- name: "LargeSizeSHA256SameFile",
- dataSize: 4096 * int64(hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 32,
- expectedLevelOffset: []int64{4096 * hostarch.PageSize, 4128 * hostarch.PageSize, 4129 * hostarch.PageSize},
- },
- {
- name: "LargeSizeSHA512SameFile",
- dataSize: 4096 * int64(hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedDigestSize: 64,
- expectedLevelOffset: []int64{4096 * hostarch.PageSize, 4160 * hostarch.PageSize, 4161 * hostarch.PageSize},
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- l, err := InitLayout(tc.dataSize, tc.hashAlgorithms, tc.dataAndTreeInSameFile)
- if err != nil {
- t.Fatalf("Failed to InitLayout: %v", err)
- }
- if l.blockSize != int64(hostarch.PageSize) {
- t.Errorf("Got blockSize %d, want %d", l.blockSize, hostarch.PageSize)
- }
- if l.digestSize != tc.expectedDigestSize {
- t.Errorf("Got digestSize %d, want %d", l.digestSize, sha256DigestSize)
- }
- if l.numLevels() != len(tc.expectedLevelOffset) {
- t.Errorf("Got levels %d, want %d", l.numLevels(), len(tc.expectedLevelOffset))
- }
- for i := 0; i < l.numLevels() && i < len(tc.expectedLevelOffset); i++ {
- if l.levelOffset[i] != tc.expectedLevelOffset[i] {
- t.Errorf("Got levelStart[%d] %d, want %d", i, l.levelOffset[i], tc.expectedLevelOffset[i])
- }
- }
- })
- }
-}
-
-const (
- defaultName = "merkle_test"
- defaultMode = 0644
- defaultUID = 0
- defaultGID = 0
- defaultSymlinkPath = "merkle_test_link"
- defaultHashAlgorithm = linux.FS_VERITY_HASH_ALG_SHA256
-)
-
-// bytesReadWriter is used to read from/write to/seek in a byte array. Unlike
-// bytes.Buffer, it keeps the whole buffer during read so that it can be reused.
-type bytesReadWriter struct {
- // bytes contains the underlying byte array.
- bytes []byte
- // readPos is the currently location for Read. Write always appends to
- // the end of the array.
- readPos int
-}
-
-func (brw *bytesReadWriter) Write(p []byte) (int, error) {
- brw.bytes = append(brw.bytes, p...)
- return len(p), nil
-}
-
-func (brw *bytesReadWriter) ReadAt(p []byte, off int64) (int, error) {
- bytesRead := copy(p, brw.bytes[off:])
- if bytesRead == 0 {
- return bytesRead, io.EOF
- }
- return bytesRead, nil
-}
-
-func TestGenerate(t *testing.T) {
- // The input data has size dataSize. It starts with the data in startWith,
- // and all other bytes are zeroes.
- testCases := []struct {
- name string
- data []byte
- hashAlgorithms int
- dataAndTreeInSameFile bool
- expectedHash []byte
- }{
- {
- name: "OnePageZeroesSHA256SeparateFile",
- data: bytes.Repeat([]byte{0}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{78, 38, 225, 107, 61, 246, 26, 6, 71, 163, 254, 97, 112, 200, 87, 232, 190, 87, 231, 160, 119, 124, 61, 229, 49, 126, 90, 223, 134, 51, 77, 182},
- },
- {
- name: "OnePageZeroesSHA256SameFile",
- data: bytes.Repeat([]byte{0}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{78, 38, 225, 107, 61, 246, 26, 6, 71, 163, 254, 97, 112, 200, 87, 232, 190, 87, 231, 160, 119, 124, 61, 229, 49, 126, 90, 223, 134, 51, 77, 182},
- },
- {
- name: "OnePageZeroesSHA512SeparateFile",
- data: bytes.Repeat([]byte{0}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{221, 45, 182, 132, 61, 212, 227, 145, 150, 131, 98, 221, 195, 5, 89, 21, 188, 36, 250, 101, 85, 78, 197, 253, 193, 23, 74, 219, 28, 108, 77, 47, 65, 79, 123, 144, 50, 245, 109, 72, 71, 80, 24, 77, 158, 95, 242, 185, 109, 163, 105, 183, 67, 106, 55, 194, 223, 46, 12, 242, 165, 203, 172, 254},
- },
- {
- name: "OnePageZeroesSHA512SameFile",
- data: bytes.Repeat([]byte{0}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{221, 45, 182, 132, 61, 212, 227, 145, 150, 131, 98, 221, 195, 5, 89, 21, 188, 36, 250, 101, 85, 78, 197, 253, 193, 23, 74, 219, 28, 108, 77, 47, 65, 79, 123, 144, 50, 245, 109, 72, 71, 80, 24, 77, 158, 95, 242, 185, 109, 163, 105, 183, 67, 106, 55, 194, 223, 46, 12, 242, 165, 203, 172, 254},
- },
- {
- name: "MultiplePageZeroesSHA256SeparateFile",
- data: bytes.Repeat([]byte{0}, 128*hostarch.PageSize+1),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{131, 122, 73, 143, 4, 202, 193, 156, 218, 169, 196, 223, 70, 100, 117, 191, 241, 113, 134, 11, 229, 231, 105, 157, 156, 0, 66, 213, 122, 145, 174, 8},
- },
- {
- name: "MultiplePageZeroesSHA256SameFile",
- data: bytes.Repeat([]byte{0}, 128*hostarch.PageSize+1),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{131, 122, 73, 143, 4, 202, 193, 156, 218, 169, 196, 223, 70, 100, 117, 191, 241, 113, 134, 11, 229, 231, 105, 157, 156, 0, 66, 213, 122, 145, 174, 8},
- },
- {
- name: "MultiplePageZeroesSHA512SeparateFile",
- data: bytes.Repeat([]byte{0}, 128*hostarch.PageSize+1),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{211, 48, 232, 110, 240, 51, 99, 241, 123, 138, 42, 76, 94, 86, 59, 200, 3, 246, 137, 148, 189, 226, 111, 103, 146, 29, 12, 218, 40, 182, 33, 99, 193, 163, 238, 26, 184, 13, 165, 187, 68, 173, 139, 9, 208, 59, 0, 192, 180, 50, 221, 35, 43, 119, 194, 16, 64, 84, 116, 63, 158, 195, 194, 226},
- },
- {
- name: "MultiplePageZeroesSHA512SameFile",
- data: bytes.Repeat([]byte{0}, 128*hostarch.PageSize+1),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{211, 48, 232, 110, 240, 51, 99, 241, 123, 138, 42, 76, 94, 86, 59, 200, 3, 246, 137, 148, 189, 226, 111, 103, 146, 29, 12, 218, 40, 182, 33, 99, 193, 163, 238, 26, 184, 13, 165, 187, 68, 173, 139, 9, 208, 59, 0, 192, 180, 50, 221, 35, 43, 119, 194, 16, 64, 84, 116, 63, 158, 195, 194, 226},
- },
- {
- name: "SingleASHA256SeparateFile",
- data: []byte{'a'},
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{26, 47, 238, 138, 235, 244, 140, 231, 129, 240, 155, 252, 219, 44, 46, 72, 57, 249, 139, 88, 132, 238, 86, 108, 181, 115, 96, 72, 99, 210, 134, 47},
- },
- {
- name: "SingleASHA256SameFile",
- data: []byte{'a'},
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{26, 47, 238, 138, 235, 244, 140, 231, 129, 240, 155, 252, 219, 44, 46, 72, 57, 249, 139, 88, 132, 238, 86, 108, 181, 115, 96, 72, 99, 210, 134, 47},
- },
- {
- name: "SingleASHA512SeparateFile",
- data: []byte{'a'},
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{44, 30, 224, 12, 102, 119, 163, 171, 119, 175, 212, 121, 231, 188, 125, 171, 79, 28, 144, 234, 75, 122, 44, 75, 15, 101, 173, 92, 233, 109, 234, 60, 173, 148, 125, 85, 94, 234, 95, 91, 16, 196, 88, 175, 23, 129, 226, 110, 24, 238, 5, 49, 186, 128, 72, 188, 193, 180, 207, 193, 203, 119, 40, 191},
- },
- {
- name: "SingleASHA512SameFile",
- data: []byte{'a'},
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{44, 30, 224, 12, 102, 119, 163, 171, 119, 175, 212, 121, 231, 188, 125, 171, 79, 28, 144, 234, 75, 122, 44, 75, 15, 101, 173, 92, 233, 109, 234, 60, 173, 148, 125, 85, 94, 234, 95, 91, 16, 196, 88, 175, 23, 129, 226, 110, 24, 238, 5, 49, 186, 128, 72, 188, 193, 180, 207, 193, 203, 119, 40, 191},
- },
- {
- name: "OnePageASHA256SeparateFile",
- data: bytes.Repeat([]byte{'a'}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{166, 254, 83, 46, 241, 111, 18, 47, 79, 6, 181, 197, 176, 143, 211, 204, 53, 5, 245, 134, 172, 95, 97, 131, 236, 132, 197, 138, 123, 78, 43, 13},
- },
- {
- name: "OnePageASHA256SameFile",
- data: bytes.Repeat([]byte{'a'}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{166, 254, 83, 46, 241, 111, 18, 47, 79, 6, 181, 197, 176, 143, 211, 204, 53, 5, 245, 134, 172, 95, 97, 131, 236, 132, 197, 138, 123, 78, 43, 13},
- },
- {
- name: "OnePageASHA512SeparateFile",
- data: bytes.Repeat([]byte{'a'}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- expectedHash: []byte{23, 69, 6, 79, 39, 232, 90, 246, 62, 55, 4, 229, 47, 36, 230, 24, 233, 47, 55, 36, 26, 139, 196, 78, 242, 12, 194, 77, 109, 81, 151, 188, 63, 201, 127, 235, 81, 214, 91, 200, 19, 232, 240, 14, 197, 1, 99, 224, 18, 213, 203, 242, 44, 102, 25, 62, 90, 189, 106, 107, 129, 61, 115, 39},
- },
- {
- name: "OnePageASHA512SameFile",
- data: bytes.Repeat([]byte{'a'}, hostarch.PageSize),
- hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- expectedHash: []byte{23, 69, 6, 79, 39, 232, 90, 246, 62, 55, 4, 229, 47, 36, 230, 24, 233, 47, 55, 36, 26, 139, 196, 78, 242, 12, 194, 77, 109, 81, 151, 188, 63, 201, 127, 235, 81, 214, 91, 200, 19, 232, 240, 14, 197, 1, 99, 224, 18, 213, 203, 242, 44, 102, 25, 62, 90, 189, 106, 107, 129, 61, 115, 39},
- },
- }
-
- for _, tc := range testCases {
- t.Run(fmt.Sprintf(tc.name), func(t *testing.T) {
- var tree bytesReadWriter
- params := GenerateParams{
- Size: int64(len(tc.data)),
- Name: defaultName,
- Mode: defaultMode,
- UID: defaultUID,
- GID: defaultGID,
- Children: []string{},
- HashAlgorithms: tc.hashAlgorithms,
- TreeReader: &tree,
- TreeWriter: &tree,
- DataAndTreeInSameFile: tc.dataAndTreeInSameFile,
- }
- if tc.dataAndTreeInSameFile {
- tree.Write(tc.data)
- params.File = &tree
- } else {
- params.File = &bytesReadWriter{
- bytes: tc.data,
- }
- }
- hash, err := Generate(&params)
- if err != nil {
- t.Fatalf("Got err: %v, want nil", err)
- }
- if !bytes.Equal(hash, tc.expectedHash) {
- t.Errorf("Got hash: %v, want %v", hash, tc.expectedHash)
- }
- })
- }
-}
-
-// prepareVerify generates test data and corresponding Merkle tree, and returns
-// the prepared VerifyParams.
-// The test data has size dataSize. The data is hashed with hashAlgorithm. The
-// portion to be verified is the range [verifyStart, verifyStart + verifySize).
-func prepareVerify(t *testing.T, dataSize int64, hashAlgorithm int, dataAndTreeInSameFile, isSymlink bool, verifyStart, verifySize int64, out io.Writer) ([]byte, VerifyParams) {
- t.Helper()
- data := make([]byte, dataSize)
- // Generate random bytes in data.
- rand.Read(data)
-
- var tree bytesReadWriter
- genParams := GenerateParams{
- Size: int64(len(data)),
- Name: defaultName,
- Mode: defaultMode,
- UID: defaultUID,
- GID: defaultGID,
- Children: []string{},
- HashAlgorithms: hashAlgorithm,
- TreeReader: &tree,
- TreeWriter: &tree,
- DataAndTreeInSameFile: dataAndTreeInSameFile,
- }
- if dataAndTreeInSameFile {
- tree.Write(data)
- genParams.File = &tree
- } else {
- genParams.File = &bytesReadWriter{
- bytes: data,
- }
- }
-
- if isSymlink {
- genParams.SymlinkTarget = defaultSymlinkPath
- }
- hash, err := Generate(&genParams)
- if err != nil {
- t.Fatalf("could not generate Merkle tree:%v", err)
- }
-
- return data, VerifyParams{
- Out: out,
- File: bytes.NewReader(data),
- Tree: &tree,
- Size: dataSize,
- Name: defaultName,
- Mode: defaultMode,
- UID: defaultUID,
- GID: defaultGID,
- Children: []string{},
- HashAlgorithms: hashAlgorithm,
- ReadOffset: verifyStart,
- ReadSize: verifySize,
- Expected: hash,
- DataAndTreeInSameFile: dataAndTreeInSameFile,
- }
-}
-
-func TestVerifyInvalidRange(t *testing.T) {
- testCases := []struct {
- name string
- verifyStart int64
- verifySize int64
- }{
- // Verify range starts outside data range.
- {
- name: "StartOutsideRange",
- verifyStart: hostarch.PageSize,
- verifySize: 1,
- },
- // Verify range ends outside data range.
- {
- name: "EndOutsideRange",
- verifyStart: 0,
- verifySize: 2 * hostarch.PageSize,
- },
- // Verify range with negative size.
- {
- name: "NegativeSize",
- verifyStart: 1,
- verifySize: -1,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, false /* dataAndTreeInSameFile */, false /* isSymlink */, tc.verifyStart, tc.verifySize, &buf)
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyUnmodifiedMetadata(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- isSymlink bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- isSymlink: true,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- isSymlink: false,
- },
- {
- name: "SameFileSymlink",
- dataAndTreeInSameFile: true,
- isSymlink: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, tc.isSymlink, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- if tc.isSymlink {
- params.SymlinkTarget = defaultSymlinkPath
- }
- if _, err := Verify(&params); !errors.Is(err, nil) {
- t.Errorf("Verification failed when expected to succeed: %v", err)
- }
- })
- }
-}
-
-func TestVerifyModifiedName(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.Name += "abc"
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedSize(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.Size--
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedMode(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.Mode++
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedUID(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.UID++
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedGID(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.GID++
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedChildren(t *testing.T) {
- testCases := []struct {
- name string
- dataAndTreeInSameFile bool
- }{
- {
- name: "SeparateFile",
- dataAndTreeInSameFile: false,
- },
- {
- name: "SameFile",
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.Children = append(params.Children, "abc")
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyModifiedSymlink(t *testing.T) {
- var buf bytes.Buffer
- _, params := prepareVerify(t, hostarch.PageSize /* dataSize */, defaultHashAlgorithm, false /* dataAndTreeInSameFile */, true /* isSymlink */, 0 /* verifyStart */, 0 /* verifySize */, &buf)
- params.SymlinkTarget = "merkle_modified_test_link"
- if _, err := Verify(&params); err == nil {
- t.Errorf("Verification succeeded when expected to fail")
- }
-}
-
-func TestModifyOutsideVerifyRange(t *testing.T) {
- testCases := []struct {
- name string
- // The byte with index modifyByte is modified.
- modifyByte int64
- dataAndTreeInSameFile bool
- }{
- {
- name: "BeforeRangeSeparateFile",
- modifyByte: 4*hostarch.PageSize - 1,
- dataAndTreeInSameFile: false,
- },
- {
- name: "BeforeRangeSameFile",
- modifyByte: 4*hostarch.PageSize - 1,
- dataAndTreeInSameFile: true,
- },
- {
- name: "AfterRangeSeparateFile",
- modifyByte: 5 * hostarch.PageSize,
- dataAndTreeInSameFile: false,
- },
- {
- name: "AfterRangeSameFile",
- modifyByte: 5 * hostarch.PageSize,
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- dataSize := int64(8 * hostarch.PageSize)
- verifyStart := int64(4 * hostarch.PageSize)
- verifySize := int64(hostarch.PageSize)
- var buf bytes.Buffer
- // Modified byte is outside verify range. Verify should succeed.
- data, params := prepareVerify(t, dataSize, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, verifyStart, verifySize, &buf)
- // Flip a bit in data and checks Verify results.
- data[tc.modifyByte] ^= 1
- n, err := Verify(&params)
- if !errors.Is(err, nil) {
- t.Errorf("Verification failed when expected to succeed: %v", err)
- }
- if n != verifySize {
- t.Errorf("Got Verify output size %d, want %d", n, verifySize)
- }
- if int64(buf.Len()) != verifySize {
- t.Errorf("Got Verify output buf size %d, want %d,", buf.Len(), verifySize)
- }
- if !bytes.Equal(data[verifyStart:verifyStart+verifySize], buf.Bytes()) {
- t.Errorf("Incorrect output buf from Verify")
- }
- })
- }
-}
-
-func TestModifyInsideVerifyRange(t *testing.T) {
- testCases := []struct {
- name string
- verifyStart int64
- verifySize int64
- // The byte with index modifyByte is modified.
- modifyByte int64
- dataAndTreeInSameFile bool
- }{
- // Test a block-aligned verify range.
- // Modifying a byte in the verified range should cause verify
- // to fail.
- {
- name: "BlockAlignedRangeSeparateFile",
- verifyStart: 4 * hostarch.PageSize,
- verifySize: hostarch.PageSize,
- modifyByte: 4 * hostarch.PageSize,
- dataAndTreeInSameFile: false,
- },
- {
- name: "BlockAlignedRangeSameFile",
- verifyStart: 4 * hostarch.PageSize,
- verifySize: hostarch.PageSize,
- modifyByte: 4 * hostarch.PageSize,
- dataAndTreeInSameFile: true,
- },
- // The tests below use a non-block-aligned verify range.
- // Modifying a byte at strat of verify range should cause
- // verify to fail.
- {
- name: "ModifyStartSeparateFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 4*hostarch.PageSize + 123,
- dataAndTreeInSameFile: false,
- },
- {
- name: "ModifyStartSameFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 4*hostarch.PageSize + 123,
- dataAndTreeInSameFile: true,
- },
- // Modifying a byte at the end of verify range should cause
- // verify to fail.
- {
- name: "ModifyEndSeparateFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 6*hostarch.PageSize + 123,
- dataAndTreeInSameFile: false,
- },
- {
- name: "ModifyEndSameFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 6*hostarch.PageSize + 123,
- dataAndTreeInSameFile: true,
- },
- // Modifying a byte in the middle verified block should cause
- // verify to fail.
- {
- name: "ModifyMiddleSeparateFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 5*hostarch.PageSize + 123,
- dataAndTreeInSameFile: false,
- },
- {
- name: "ModifyMiddleSameFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 5*hostarch.PageSize + 123,
- dataAndTreeInSameFile: true,
- },
- // Modifying a byte in the first block in the verified range
- // should cause verify to fail, even the modified bit itself is
- // out of verify range.
- {
- name: "ModifyFirstBlockSeparateFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 4*hostarch.PageSize + 122,
- dataAndTreeInSameFile: false,
- },
- {
- name: "ModifyFirstBlockSameFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 4*hostarch.PageSize + 122,
- dataAndTreeInSameFile: true,
- },
- // Modifying a byte in the last block in the verified range
- // should cause verify to fail, even the modified bit itself is
- // out of verify range.
- {
- name: "ModifyLastBlockSeparateFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 6*hostarch.PageSize + 124,
- dataAndTreeInSameFile: false,
- },
- {
- name: "ModifyLastBlockSameFile",
- verifyStart: 4*hostarch.PageSize + 123,
- verifySize: 2 * hostarch.PageSize,
- modifyByte: 6*hostarch.PageSize + 124,
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- dataSize := int64(8 * hostarch.PageSize)
- var buf bytes.Buffer
- data, params := prepareVerify(t, dataSize, defaultHashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, tc.verifyStart, tc.verifySize, &buf)
- // Flip a bit in data and checks Verify results.
- data[tc.modifyByte] ^= 1
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Errorf("Verification succeeded when expected to fail")
- }
- })
- }
-}
-
-func TestVerifyRandom(t *testing.T) {
- testCases := []struct {
- name string
- hashAlgorithm int
- dataAndTreeInSameFile bool
- }{
- {
- name: "SHA256SeparateFile",
- hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: false,
- },
- {
- name: "SHA512SeparateFile",
- hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: false,
- },
- {
- name: "SHA256SameFile",
- hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA256,
- dataAndTreeInSameFile: true,
- },
- {
- name: "SHA512SameFile",
- hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA512,
- dataAndTreeInSameFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- rand.Seed(time.Now().UnixNano())
- // Use a random dataSize. Minimum size 2 so that we can pick a random
- // portion from it.
- dataSize := rand.Int63n(200*hostarch.PageSize) + 2
-
- // Pick a random portion of data.
- start := rand.Int63n(dataSize - 1)
- size := rand.Int63n(dataSize) + 1
-
- var buf bytes.Buffer
- data, params := prepareVerify(t, dataSize, tc.hashAlgorithm, tc.dataAndTreeInSameFile, false /* isSymlink */, start, size, &buf)
-
- // Checks that the random portion of data from the original data is
- // verified successfully.
- n, err := Verify(&params)
- if err != nil && err != io.EOF {
- t.Errorf("Verification failed for correct data: %v", err)
- }
- if size > dataSize-start {
- size = dataSize - start
- }
- if n != size {
- t.Errorf("Got Verify output size %d, want %d", n, size)
- }
- if int64(buf.Len()) != size {
- t.Errorf("Got Verify output buf size %d, want %d", buf.Len(), size)
- }
- if !bytes.Equal(data[start:start+size], buf.Bytes()) {
- t.Errorf("Incorrect output buf from Verify")
- }
-
- // Verify that modified metadata should fail verification.
- buf.Reset()
- params.Name = defaultName + "abc"
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Error("Verify succeeded for modified metadata, expect failure")
- }
-
- // Flip a random bit in randPortion, and check that verification fails.
- buf.Reset()
- randBytePos := rand.Int63n(size)
- data[start+randBytePos] ^= 1
- params.File = bytes.NewReader(data)
- params.Name = defaultName
-
- if _, err := Verify(&params); errors.Is(err, nil) {
- t.Error("Verification succeeded for modified data, expect failure")
- }
- })
- }
-}
diff --git a/pkg/metric/BUILD b/pkg/metric/BUILD
deleted file mode 100644
index c08792751..000000000
--- a/pkg/metric/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "proto_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "metric",
- srcs = [
- "metric.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- ":metric_go_proto",
- "//pkg/eventchannel",
- "//pkg/log",
- "//pkg/sync",
- "@org_golang_google_protobuf//types/known/timestamppb",
- ],
-)
-
-proto_library(
- name = "metric",
- srcs = ["metric.proto"],
- visibility = ["//:sandbox"],
- deps = [
- "@com_google_protobuf//:timestamp_proto",
- ],
-)
-
-go_test(
- name = "metric_test",
- srcs = ["metric_test.go"],
- library = ":metric",
- deps = [
- ":metric_go_proto",
- "//pkg/eventchannel",
- "@org_golang_google_protobuf//proto:go_default_library",
- ],
-)
diff --git a/pkg/metric/metric.proto b/pkg/metric/metric.proto
deleted file mode 100644
index d466b6904..000000000
--- a/pkg/metric/metric.proto
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-import "google/protobuf/timestamp.proto";
-
-// MetricMetadata contains all of the metadata describing a single metric.
-message MetricMetadata {
- // name is the unique name of the metric, usually in a "directory" format
- // (e.g., /foo/count).
- string name = 1;
-
- // description is a human-readable description of the metric.
- string description = 2;
-
- // cumulative indicates that this metric is never decremented.
- bool cumulative = 3;
-
- // sync indicates that values from the final metric event should be
- // synchronized to the backing monitoring system at exit.
- //
- // If sync is false, values are only sent to the monitoring system
- // periodically. There is no guarantee that values will ever be received by
- // the monitoring system.
- bool sync = 4;
-
- enum Type { TYPE_UINT64 = 0; }
-
- // type is the type of the metric value.
- Type type = 5;
-
- enum Units {
- UNITS_NONE = 0;
- UNITS_NANOSECONDS = 1;
- }
-
- // units is the units of the metric value.
- Units units = 6;
-
- message Field {
- string field_name = 1;
- repeated string allowed_values = 2;
- }
-
- // fields contains the metric fields. Currently a metric can have at most
- // one field.
- repeated Field fields = 7;
-}
-
-// MetricRegistration contains the metadata for all metrics that will be in
-// future MetricUpdates.
-message MetricRegistration {
- repeated MetricMetadata metrics = 1;
- repeated string stages = 2;
-}
-
-// MetricValue the value of a metric at a single point in time.
-message MetricValue {
- // name is the unique name of the metric, as in MetricMetadata.
- string name = 1;
-
- // value is the value of the metric at a single point in time. The field set
- // depends on the type of the metric.
- oneof value {
- uint64 uint64_value = 2;
- }
-
- repeated string field_values = 4;
-}
-
-// StageTiming represents a new stage that's been reached by the Sentry.
-message StageTiming {
- string stage = 1;
- google.protobuf.Timestamp started = 2;
- google.protobuf.Timestamp ended = 3;
-}
-
-// MetricUpdate contains new values for multiple distinct metrics.
-//
-// Metrics whose values have not changed are not included.
-message MetricUpdate {
- repeated MetricValue metrics = 1;
- // Timing information of initialization stages reached since last update.
- // The first MetricUpdate will include multiple entries, since metric
- // initialization happens relatively late in the Sentry startup process.
- repeated StageTiming stage_timing = 2;
-}
diff --git a/pkg/metric/metric_go_proto/metric.pb.go b/pkg/metric/metric_go_proto/metric.pb.go
new file mode 100644
index 000000000..7d327e3a0
--- /dev/null
+++ b/pkg/metric/metric_go_proto/metric.pb.go
@@ -0,0 +1,732 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/metric/metric.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type MetricMetadata_Type int32
+
+const (
+ MetricMetadata_TYPE_UINT64 MetricMetadata_Type = 0
+)
+
+// Enum value maps for MetricMetadata_Type.
+var (
+ MetricMetadata_Type_name = map[int32]string{
+ 0: "TYPE_UINT64",
+ }
+ MetricMetadata_Type_value = map[string]int32{
+ "TYPE_UINT64": 0,
+ }
+)
+
+func (x MetricMetadata_Type) Enum() *MetricMetadata_Type {
+ p := new(MetricMetadata_Type)
+ *p = x
+ return p
+}
+
+func (x MetricMetadata_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricMetadata_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_metric_metric_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricMetadata_Type) Type() protoreflect.EnumType {
+ return &file_pkg_metric_metric_proto_enumTypes[0]
+}
+
+func (x MetricMetadata_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MetricMetadata_Type.Descriptor instead.
+func (MetricMetadata_Type) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type MetricMetadata_Units int32
+
+const (
+ MetricMetadata_UNITS_NONE MetricMetadata_Units = 0
+ MetricMetadata_UNITS_NANOSECONDS MetricMetadata_Units = 1
+)
+
+// Enum value maps for MetricMetadata_Units.
+var (
+ MetricMetadata_Units_name = map[int32]string{
+ 0: "UNITS_NONE",
+ 1: "UNITS_NANOSECONDS",
+ }
+ MetricMetadata_Units_value = map[string]int32{
+ "UNITS_NONE": 0,
+ "UNITS_NANOSECONDS": 1,
+ }
+)
+
+func (x MetricMetadata_Units) Enum() *MetricMetadata_Units {
+ p := new(MetricMetadata_Units)
+ *p = x
+ return p
+}
+
+func (x MetricMetadata_Units) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricMetadata_Units) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_metric_metric_proto_enumTypes[1].Descriptor()
+}
+
+func (MetricMetadata_Units) Type() protoreflect.EnumType {
+ return &file_pkg_metric_metric_proto_enumTypes[1]
+}
+
+func (x MetricMetadata_Units) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MetricMetadata_Units.Descriptor instead.
+func (MetricMetadata_Units) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type MetricMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Cumulative bool `protobuf:"varint,3,opt,name=cumulative,proto3" json:"cumulative,omitempty"`
+ Sync bool `protobuf:"varint,4,opt,name=sync,proto3" json:"sync,omitempty"`
+ Type MetricMetadata_Type `protobuf:"varint,5,opt,name=type,proto3,enum=gvisor.MetricMetadata_Type" json:"type,omitempty"`
+ Units MetricMetadata_Units `protobuf:"varint,6,opt,name=units,proto3,enum=gvisor.MetricMetadata_Units" json:"units,omitempty"`
+ Fields []*MetricMetadata_Field `protobuf:"bytes,7,rep,name=fields,proto3" json:"fields,omitempty"`
+}
+
+func (x *MetricMetadata) Reset() {
+ *x = MetricMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricMetadata) ProtoMessage() {}
+
+func (x *MetricMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricMetadata.ProtoReflect.Descriptor instead.
+func (*MetricMetadata) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetricMetadata) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MetricMetadata) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *MetricMetadata) GetCumulative() bool {
+ if x != nil {
+ return x.Cumulative
+ }
+ return false
+}
+
+func (x *MetricMetadata) GetSync() bool {
+ if x != nil {
+ return x.Sync
+ }
+ return false
+}
+
+func (x *MetricMetadata) GetType() MetricMetadata_Type {
+ if x != nil {
+ return x.Type
+ }
+ return MetricMetadata_TYPE_UINT64
+}
+
+func (x *MetricMetadata) GetUnits() MetricMetadata_Units {
+ if x != nil {
+ return x.Units
+ }
+ return MetricMetadata_UNITS_NONE
+}
+
+func (x *MetricMetadata) GetFields() []*MetricMetadata_Field {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+type MetricRegistration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Metrics []*MetricMetadata `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"`
+ Stages []string `protobuf:"bytes,2,rep,name=stages,proto3" json:"stages,omitempty"`
+}
+
+func (x *MetricRegistration) Reset() {
+ *x = MetricRegistration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricRegistration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricRegistration) ProtoMessage() {}
+
+func (x *MetricRegistration) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricRegistration.ProtoReflect.Descriptor instead.
+func (*MetricRegistration) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *MetricRegistration) GetMetrics() []*MetricMetadata {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
+func (x *MetricRegistration) GetStages() []string {
+ if x != nil {
+ return x.Stages
+ }
+ return nil
+}
+
+type MetricValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to Value:
+ // *MetricValue_Uint64Value
+ Value isMetricValue_Value `protobuf_oneof:"value"`
+ FieldValues []string `protobuf:"bytes,4,rep,name=field_values,json=fieldValues,proto3" json:"field_values,omitempty"`
+}
+
+func (x *MetricValue) Reset() {
+ *x = MetricValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricValue) ProtoMessage() {}
+
+func (x *MetricValue) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricValue.ProtoReflect.Descriptor instead.
+func (*MetricValue) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *MetricValue) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *MetricValue) GetValue() isMetricValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *MetricValue) GetUint64Value() uint64 {
+ if x, ok := x.GetValue().(*MetricValue_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *MetricValue) GetFieldValues() []string {
+ if x != nil {
+ return x.FieldValues
+ }
+ return nil
+}
+
+type isMetricValue_Value interface {
+ isMetricValue_Value()
+}
+
+type MetricValue_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,2,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+func (*MetricValue_Uint64Value) isMetricValue_Value() {}
+
+type StageTiming struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Stage string `protobuf:"bytes,1,opt,name=stage,proto3" json:"stage,omitempty"`
+ Started *timestamp.Timestamp `protobuf:"bytes,2,opt,name=started,proto3" json:"started,omitempty"`
+ Ended *timestamp.Timestamp `protobuf:"bytes,3,opt,name=ended,proto3" json:"ended,omitempty"`
+}
+
+func (x *StageTiming) Reset() {
+ *x = StageTiming{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StageTiming) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StageTiming) ProtoMessage() {}
+
+func (x *StageTiming) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StageTiming.ProtoReflect.Descriptor instead.
+func (*StageTiming) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StageTiming) GetStage() string {
+ if x != nil {
+ return x.Stage
+ }
+ return ""
+}
+
+func (x *StageTiming) GetStarted() *timestamp.Timestamp {
+ if x != nil {
+ return x.Started
+ }
+ return nil
+}
+
+func (x *StageTiming) GetEnded() *timestamp.Timestamp {
+ if x != nil {
+ return x.Ended
+ }
+ return nil
+}
+
+type MetricUpdate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Metrics []*MetricValue `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"`
+ StageTiming []*StageTiming `protobuf:"bytes,2,rep,name=stage_timing,json=stageTiming,proto3" json:"stage_timing,omitempty"`
+}
+
+func (x *MetricUpdate) Reset() {
+ *x = MetricUpdate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricUpdate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricUpdate) ProtoMessage() {}
+
+func (x *MetricUpdate) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricUpdate.ProtoReflect.Descriptor instead.
+func (*MetricUpdate) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *MetricUpdate) GetMetrics() []*MetricValue {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
+func (x *MetricUpdate) GetStageTiming() []*StageTiming {
+ if x != nil {
+ return x.StageTiming
+ }
+ return nil
+}
+
+type MetricMetadata_Field struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FieldName string `protobuf:"bytes,1,opt,name=field_name,json=fieldName,proto3" json:"field_name,omitempty"`
+ AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"`
+}
+
+func (x *MetricMetadata_Field) Reset() {
+ *x = MetricMetadata_Field{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_metric_metric_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricMetadata_Field) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricMetadata_Field) ProtoMessage() {}
+
+func (x *MetricMetadata_Field) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_metric_metric_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricMetadata_Field.ProtoReflect.Descriptor instead.
+func (*MetricMetadata_Field) Descriptor() ([]byte, []int) {
+ return file_pkg_metric_metric_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *MetricMetadata_Field) GetFieldName() string {
+ if x != nil {
+ return x.FieldName
+ }
+ return ""
+}
+
+func (x *MetricMetadata_Field) GetAllowedValues() []string {
+ if x != nil {
+ return x.AllowedValues
+ }
+ return nil
+}
+
+var File_pkg_metric_metric_proto protoreflect.FileDescriptor
+
+var file_pkg_metric_metric_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2f, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f,
+ 0x72, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0xad, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0a, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73,
+ 0x79, 0x6e, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x12,
+ 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e,
+ 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x32, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x1c, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x05, 0x75,
+ 0x6e, 0x69, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x07,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x4d, 0x0a, 0x05, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x04, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x00, 0x22, 0x2e, 0x0a, 0x05, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
+ 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x55,
+ 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x4e, 0x41, 0x4e, 0x4f, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x53,
+ 0x10, 0x01, 0x22, 0x5e, 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x76, 0x69, 0x73,
+ 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74,
+ 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x67,
+ 0x65, 0x73, 0x22, 0x72, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75,
+ 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x42, 0x07, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x67, 0x65,
+ 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x07,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x65, 0x64, 0x12, 0x30, 0x0a, 0x05, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x65,
+ 0x6e, 0x64, 0x65, 0x64, 0x22, 0x75, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x76, 0x69, 0x73,
+ 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x52, 0x0b,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x54, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_metric_metric_proto_rawDescOnce sync.Once
+ file_pkg_metric_metric_proto_rawDescData = file_pkg_metric_metric_proto_rawDesc
+)
+
+func file_pkg_metric_metric_proto_rawDescGZIP() []byte {
+ file_pkg_metric_metric_proto_rawDescOnce.Do(func() {
+ file_pkg_metric_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_metric_metric_proto_rawDescData)
+ })
+ return file_pkg_metric_metric_proto_rawDescData
+}
+
+var file_pkg_metric_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_pkg_metric_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_pkg_metric_metric_proto_goTypes = []interface{}{
+ (MetricMetadata_Type)(0), // 0: gvisor.MetricMetadata.Type
+ (MetricMetadata_Units)(0), // 1: gvisor.MetricMetadata.Units
+ (*MetricMetadata)(nil), // 2: gvisor.MetricMetadata
+ (*MetricRegistration)(nil), // 3: gvisor.MetricRegistration
+ (*MetricValue)(nil), // 4: gvisor.MetricValue
+ (*StageTiming)(nil), // 5: gvisor.StageTiming
+ (*MetricUpdate)(nil), // 6: gvisor.MetricUpdate
+ (*MetricMetadata_Field)(nil), // 7: gvisor.MetricMetadata.Field
+ (*timestamp.Timestamp)(nil), // 8: google.protobuf.Timestamp
+}
+var file_pkg_metric_metric_proto_depIdxs = []int32{
+ 0, // 0: gvisor.MetricMetadata.type:type_name -> gvisor.MetricMetadata.Type
+ 1, // 1: gvisor.MetricMetadata.units:type_name -> gvisor.MetricMetadata.Units
+ 7, // 2: gvisor.MetricMetadata.fields:type_name -> gvisor.MetricMetadata.Field
+ 2, // 3: gvisor.MetricRegistration.metrics:type_name -> gvisor.MetricMetadata
+ 8, // 4: gvisor.StageTiming.started:type_name -> google.protobuf.Timestamp
+ 8, // 5: gvisor.StageTiming.ended:type_name -> google.protobuf.Timestamp
+ 4, // 6: gvisor.MetricUpdate.metrics:type_name -> gvisor.MetricValue
+ 5, // 7: gvisor.MetricUpdate.stage_timing:type_name -> gvisor.StageTiming
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 8, // [8:8] is the sub-list for extension type_name
+ 8, // [8:8] is the sub-list for extension extendee
+ 0, // [0:8] is the sub-list for field type_name
+}
+
+func init() { file_pkg_metric_metric_proto_init() }
+func file_pkg_metric_metric_proto_init() {
+ if File_pkg_metric_metric_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_metric_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricRegistration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StageTiming); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricUpdate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricMetadata_Field); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_pkg_metric_metric_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*MetricValue_Uint64Value)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_metric_metric_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_metric_metric_proto_goTypes,
+ DependencyIndexes: file_pkg_metric_metric_proto_depIdxs,
+ EnumInfos: file_pkg_metric_metric_proto_enumTypes,
+ MessageInfos: file_pkg_metric_metric_proto_msgTypes,
+ }.Build()
+ File_pkg_metric_metric_proto = out.File
+ file_pkg_metric_metric_proto_rawDesc = nil
+ file_pkg_metric_metric_proto_goTypes = nil
+ file_pkg_metric_metric_proto_depIdxs = nil
+}
diff --git a/pkg/metric/metric_state_autogen.go b/pkg/metric/metric_state_autogen.go
new file mode 100644
index 000000000..36e5ed81b
--- /dev/null
+++ b/pkg/metric/metric_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package metric
diff --git a/pkg/metric/metric_test.go b/pkg/metric/metric_test.go
deleted file mode 100644
index 0654bdf07..000000000
--- a/pkg/metric/metric_test.go
+++ /dev/null
@@ -1,499 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "testing"
- "time"
-
- "google.golang.org/protobuf/proto"
- "gvisor.dev/gvisor/pkg/eventchannel"
- pb "gvisor.dev/gvisor/pkg/metric/metric_go_proto"
-)
-
-// sliceEmitter implements eventchannel.Emitter by appending all messages to a
-// slice.
-type sliceEmitter []proto.Message
-
-// Emit implements eventchannel.Emitter.Emit.
-func (s *sliceEmitter) Emit(msg proto.Message) (bool, error) {
- *s = append(*s, msg)
- return false, nil
-}
-
-// Emit implements eventchannel.Emitter.Close.
-func (s *sliceEmitter) Close() error {
- return nil
-}
-
-// Reset clears all events in s.
-func (s *sliceEmitter) Reset() {
- *s = nil
-}
-
-// emitter is the eventchannel.Emitter used for all tests. Package eventchannel
-// doesn't allow removing Emitters, so we must use one global emitter for all
-// test cases.
-var emitter sliceEmitter
-
-func init() {
- reset()
-
- eventchannel.AddEmitter(&emitter)
-}
-
-// reset clears all global state in the metric package.
-func reset() {
- initialized = false
- allMetrics = makeMetricSet()
- emitter.Reset()
-}
-
-const (
- fooDescription = "Foo!"
- barDescription = "Bar Baz"
- counterDescription = "Counter"
-)
-
-func TestInitialize(t *testing.T) {
- defer reset()
-
- _, err := NewUint64Metric("/foo", false, pb.MetricMetadata_UNITS_NONE, fooDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- _, err = NewUint64Metric("/bar", true, pb.MetricMetadata_UNITS_NANOSECONDS, barDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- if err := Initialize(); err != nil {
- t.Fatalf("Initialize(): %s", err)
- }
-
- if len(emitter) != 1 {
- t.Fatalf("Initialize emitted %d events want 1", len(emitter))
- }
-
- mr, ok := emitter[0].(*pb.MetricRegistration)
- if !ok {
- t.Fatalf("emitter %v got %T want pb.MetricRegistration", emitter[0], emitter[0])
- }
-
- if len(mr.Metrics) != 2 {
- t.Errorf("MetricRegistration got %d metrics want 2", len(mr.Metrics))
- }
-
- foundFoo := false
- foundBar := false
- for _, m := range mr.Metrics {
- if m.Type != pb.MetricMetadata_TYPE_UINT64 {
- t.Errorf("Metadata %+v Type got %v want pb.MetricMetadata_TYPE_UINT64", m, m.Type)
- }
- if !m.Cumulative {
- t.Errorf("Metadata %+v Cumulative got false want true", m)
- }
-
- switch m.Name {
- case "/foo":
- foundFoo = true
- if m.Description != fooDescription {
- t.Errorf("/foo %+v Description got %q want %q", m, m.Description, fooDescription)
- }
- if m.Sync {
- t.Errorf("/foo %+v Sync got true want false", m)
- }
- if m.Units != pb.MetricMetadata_UNITS_NONE {
- t.Errorf("/foo %+v Units got %v want %v", m, m.Units, pb.MetricMetadata_UNITS_NONE)
- }
- case "/bar":
- foundBar = true
- if m.Description != barDescription {
- t.Errorf("/bar %+v Description got %q want %q", m, m.Description, barDescription)
- }
- if !m.Sync {
- t.Errorf("/bar %+v Sync got true want false", m)
- }
- if m.Units != pb.MetricMetadata_UNITS_NANOSECONDS {
- t.Errorf("/bar %+v Units got %v want %v", m, m.Units, pb.MetricMetadata_UNITS_NANOSECONDS)
- }
- }
- }
-
- if !foundFoo {
- t.Errorf("/foo not found: %+v", emitter)
- }
- if !foundBar {
- t.Errorf("/bar not found: %+v", emitter)
- }
-}
-
-func TestDisable(t *testing.T) {
- defer reset()
-
- _, err := NewUint64Metric("/foo", false, pb.MetricMetadata_UNITS_NONE, fooDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- _, err = NewUint64Metric("/bar", true, pb.MetricMetadata_UNITS_NONE, barDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- if err := Disable(); err != nil {
- t.Fatalf("Disable(): %s", err)
- }
-
- if len(emitter) != 1 {
- t.Fatalf("Initialize emitted %d events want 1", len(emitter))
- }
-
- mr, ok := emitter[0].(*pb.MetricRegistration)
- if !ok {
- t.Fatalf("emitter %v got %T want pb.MetricRegistration", emitter[0], emitter[0])
- }
-
- if len(mr.Metrics) != 0 {
- t.Errorf("MetricRegistration got %d metrics want 0", len(mr.Metrics))
- }
-}
-
-func TestEmitMetricUpdate(t *testing.T) {
- defer reset()
-
- foo, err := NewUint64Metric("/foo", false, pb.MetricMetadata_UNITS_NONE, fooDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- _, err = NewUint64Metric("/bar", true, pb.MetricMetadata_UNITS_NONE, barDescription)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- if err := Initialize(); err != nil {
- t.Fatalf("Initialize(): %s", err)
- }
-
- // Don't care about the registration metrics.
- emitter.Reset()
- EmitMetricUpdate()
-
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want 1", len(emitter))
- }
-
- update, ok := emitter[0].(*pb.MetricUpdate)
- if !ok {
- t.Fatalf("emitter %v got %T want pb.MetricUpdate", emitter[0], emitter[0])
- }
-
- if len(update.Metrics) != 2 {
- t.Errorf("MetricUpdate got %d metrics want 2", len(update.Metrics))
- }
-
- // Both are included for their initial values.
- foundFoo := false
- foundBar := false
- for _, m := range update.Metrics {
- switch m.Name {
- case "/foo":
- foundFoo = true
- case "/bar":
- foundBar = true
- }
- uv, ok := m.Value.(*pb.MetricValue_Uint64Value)
- if !ok {
- t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value)
- continue
- }
- if uv.Uint64Value != 0 {
- t.Errorf("%v: Value got %v want 0", m, uv.Uint64Value)
- }
- }
-
- if !foundFoo {
- t.Errorf("/foo not found: %+v", emitter)
- }
- if !foundBar {
- t.Errorf("/bar not found: %+v", emitter)
- }
-
- // Increment foo. Only it is included in the next update.
- foo.Increment()
-
- emitter.Reset()
- EmitMetricUpdate()
-
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want 1", len(emitter))
- }
-
- update, ok = emitter[0].(*pb.MetricUpdate)
- if !ok {
- t.Fatalf("emitter %v got %T want pb.MetricUpdate", emitter[0], emitter[0])
- }
-
- if len(update.Metrics) != 1 {
- t.Errorf("MetricUpdate got %d metrics want 1", len(update.Metrics))
- }
-
- m := update.Metrics[0]
-
- if m.Name != "/foo" {
- t.Errorf("Metric %+v name got %q want '/foo'", m, m.Name)
- }
-
- uv, ok := m.Value.(*pb.MetricValue_Uint64Value)
- if !ok {
- t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value)
- }
- if uv.Uint64Value != 1 {
- t.Errorf("%v: Value got %v want 1", m, uv.Uint64Value)
- }
-}
-
-func TestEmitMetricUpdateWithFields(t *testing.T) {
- defer reset()
-
- field := Field{
- name: "weirdness_type",
- allowedValues: []string{"weird1", "weird2"}}
-
- counter, err := NewUint64Metric("/weirdness", false, pb.MetricMetadata_UNITS_NONE, counterDescription, field)
- if err != nil {
- t.Fatalf("NewUint64Metric got err %v want nil", err)
- }
-
- if err := Initialize(); err != nil {
- t.Fatalf("Initialize(): %s", err)
- }
-
- // Don't care about the registration metrics.
- emitter.Reset()
- EmitMetricUpdate()
-
- // For metrics with fields, we do not emit data unless the value is
- // incremented.
- if len(emitter) != 0 {
- t.Fatalf("EmitMetricUpdate emitted %d events want 0", len(emitter))
- }
-
- counter.IncrementBy(4, "weird1")
- counter.Increment("weird2")
-
- emitter.Reset()
- EmitMetricUpdate()
-
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want 1", len(emitter))
- }
-
- update, ok := emitter[0].(*pb.MetricUpdate)
- if !ok {
- t.Fatalf("emitter %v got %T want pb.MetricUpdate", emitter[0], emitter[0])
- }
-
- if len(update.Metrics) != 2 {
- t.Errorf("MetricUpdate got %d metrics want 2", len(update.Metrics))
- }
-
- foundWeird1 := false
- foundWeird2 := false
- for i := 0; i < len(update.Metrics); i++ {
- m := update.Metrics[i]
-
- if m.Name != "/weirdness" {
- t.Errorf("Metric %+v name got %q want '/weirdness'", m, m.Name)
- }
- if len(m.FieldValues) != 1 {
- t.Errorf("MetricUpdate got %d fields want 1", len(m.FieldValues))
- }
-
- switch m.FieldValues[0] {
- case "weird1":
- uv, ok := m.Value.(*pb.MetricValue_Uint64Value)
- if !ok {
- t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value)
- }
- if uv.Uint64Value != 4 {
- t.Errorf("%v: Value got %v want 4", m, uv.Uint64Value)
- }
- foundWeird1 = true
- case "weird2":
- uv, ok := m.Value.(*pb.MetricValue_Uint64Value)
- if !ok {
- t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value)
- }
- if uv.Uint64Value != 1 {
- t.Errorf("%v: Value got %v want 1", m, uv.Uint64Value)
- }
- foundWeird2 = true
- }
- }
-
- if !foundWeird1 {
- t.Errorf("Field value weird1 not found: %+v", emitter)
- }
- if !foundWeird2 {
- t.Errorf("Field value weird2 not found: %+v", emitter)
- }
-}
-
-func TestMetricUpdateStageTiming(t *testing.T) {
- defer reset()
-
- expectedTimings := map[InitStage]struct{ min, max time.Duration }{}
- measureStage := func(stage InitStage, body func()) {
- stageStarted := time.Now()
- endStage := StartStage(stage)
- bodyStarted := time.Now()
- body()
- bodyEnded := time.Now()
- endStage()
- stageEnded := time.Now()
-
- expectedTimings[stage] = struct{ min, max time.Duration }{
- min: bodyEnded.Sub(bodyStarted),
- max: stageEnded.Sub(stageStarted),
- }
- }
- checkStage := func(got *pb.StageTiming, want InitStage) {
- if InitStage(got.GetStage()) != want {
- t.Errorf("%v: got stage %q expected %q", got, got.GetStage(), want)
- }
- timingBounds, found := expectedTimings[want]
- if !found {
- t.Fatalf("invalid init stage name %q", want)
- }
- started := got.Started.AsTime()
- ended := got.Ended.AsTime()
- duration := ended.Sub(started)
- if duration < timingBounds.min {
- t.Errorf("stage %v: lasted %v, expected at least %v", want, duration, timingBounds.min)
- } else if duration > timingBounds.max {
- t.Errorf("stage %v: lasted %v, expected no more than %v", want, duration, timingBounds.max)
- }
- }
-
- // Test that it's legit to go through stages before metric registration.
- measureStage("before_first_update_1", func() {
- time.Sleep(100 * time.Millisecond)
- })
- measureStage("before_first_update_2", func() {
- time.Sleep(100 * time.Millisecond)
- })
-
- fooMetric, err := NewUint64Metric("/foo", false, pb.MetricMetadata_UNITS_NONE, fooDescription)
- if err != nil {
- t.Fatalf("Cannot register /foo: %v", err)
- }
- emitter.Reset()
- Initialize()
- EmitMetricUpdate()
-
- // We should have gotten the metric registration and the first MetricUpdate.
- if len(emitter) != 2 {
- t.Fatalf("emitter has %d messages (%v), expected %d", len(emitter), emitter, 2)
- }
-
- if registration, ok := emitter[0].(*pb.MetricRegistration); !ok {
- t.Errorf("first message is not MetricRegistration: %T / %v", emitter[0], emitter[0])
- } else if len(registration.Stages) != len(allStages) {
- t.Errorf("MetricRegistration has %d stages (%v), expected %d (%v)", len(registration.Stages), registration.Stages, len(allStages), allStages)
- } else {
- for i := 0; i < len(allStages); i++ {
- if InitStage(registration.Stages[i]) != allStages[i] {
- t.Errorf("MetricRegistration.Stages[%d]: got %q want %q", i, registration.Stages[i], allStages[i])
- }
- }
- }
-
- if firstUpdate, ok := emitter[1].(*pb.MetricUpdate); !ok {
- t.Errorf("second message is not MetricUpdate: %T / %v", emitter[1], emitter[1])
- } else if len(firstUpdate.StageTiming) != 2 {
- t.Errorf("MetricUpdate has %d stage timings (%v), expected %d", len(firstUpdate.StageTiming), firstUpdate.StageTiming, 2)
- } else {
- checkStage(firstUpdate.StageTiming[0], "before_first_update_1")
- checkStage(firstUpdate.StageTiming[1], "before_first_update_2")
- }
-
- // Ensure re-emitting doesn't cause another event to be sent.
- emitter.Reset()
- EmitMetricUpdate()
- if len(emitter) != 0 {
- t.Fatalf("EmitMetricUpdate emitted %d events want %d", len(emitter), 0)
- }
-
- // Generate monitoring data, we should get an event with no stages.
- fooMetric.Increment()
- emitter.Reset()
- EmitMetricUpdate()
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want %d", len(emitter), 1)
- } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {
- t.Errorf("message is not MetricUpdate: %T / %v", emitter[1], emitter[1])
- } else if len(update.StageTiming) != 0 {
- t.Errorf("unexpected stage timing information: %v", update.StageTiming)
- }
-
- // Now generate new stages.
- measureStage("foo_stage_1", func() {
- time.Sleep(100 * time.Millisecond)
- })
- measureStage("foo_stage_2", func() {
- time.Sleep(100 * time.Millisecond)
- })
- emitter.Reset()
- EmitMetricUpdate()
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want %d", len(emitter), 1)
- } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {
- t.Errorf("message is not MetricUpdate: %T / %v", emitter[1], emitter[1])
- } else if len(update.Metrics) != 0 {
- t.Errorf("MetricUpdate has %d metric value changes (%v), expected %d", len(update.Metrics), update.Metrics, 0)
- } else if len(update.StageTiming) != 2 {
- t.Errorf("MetricUpdate has %d stages (%v), expected %d", len(update.StageTiming), update.StageTiming, 2)
- } else {
- checkStage(update.StageTiming[0], "foo_stage_1")
- checkStage(update.StageTiming[1], "foo_stage_2")
- }
-
- // Now try generating data for both metrics and stages.
- fooMetric.Increment()
- measureStage("last_stage_1", func() {
- time.Sleep(100 * time.Millisecond)
- })
- measureStage("last_stage_2", func() {
- time.Sleep(100 * time.Millisecond)
- })
- fooMetric.Increment()
- emitter.Reset()
- EmitMetricUpdate()
- if len(emitter) != 1 {
- t.Fatalf("EmitMetricUpdate emitted %d events want %d", len(emitter), 1)
- } else if update, ok := emitter[0].(*pb.MetricUpdate); !ok {
- t.Errorf("message is not MetricUpdate: %T / %v", emitter[1], emitter[1])
- } else if len(update.Metrics) != 1 {
- t.Errorf("MetricUpdate has %d metric value changes (%v), expected %d", len(update.Metrics), update.Metrics, 1)
- } else if len(update.StageTiming) != 2 {
- t.Errorf("MetricUpdate has %d stages (%v), expected %d", len(update.StageTiming), update.StageTiming, 2)
- } else {
- checkStage(update.StageTiming[0], "last_stage_1")
- checkStage(update.StageTiming[1], "last_stage_2")
- }
-}
diff --git a/pkg/p9/BUILD b/pkg/p9/BUILD
deleted file mode 100644
index 2b22b2203..000000000
--- a/pkg/p9/BUILD
+++ /dev/null
@@ -1,56 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"],
-)
-
-go_library(
- name = "p9",
- srcs = [
- "buffer.go",
- "client.go",
- "client_file.go",
- "file.go",
- "handlers.go",
- "messages.go",
- "p9.go",
- "path_tree.go",
- "server.go",
- "transport.go",
- "transport_flipcall.go",
- "version.go",
- ],
- deps = [
- "//pkg/abi/linux/errno",
- "//pkg/errors",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdchannel",
- "//pkg/flipcall",
- "//pkg/log",
- "//pkg/pool",
- "//pkg/sync",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "p9_test",
- size = "small",
- srcs = [
- "buffer_test.go",
- "client_test.go",
- "messages_test.go",
- "p9_test.go",
- "transport_test.go",
- "version_test.go",
- ],
- library = ":p9",
- deps = [
- "//pkg/fd",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/p9/buffer_test.go b/pkg/p9/buffer_test.go
deleted file mode 100644
index a9c75f86b..000000000
--- a/pkg/p9/buffer_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "testing"
-)
-
-func TestBufferOverrun(t *testing.T) {
- buf := &buffer{
- // This header indicates that a large string should follow, but
- // it is only two bytes. Reading a string should cause an
- // overrun.
- data: []byte{0x0, 0x16},
- }
- if s := buf.ReadString(); s != "" {
- t.Errorf("overrun read got %s, want empty", s)
- }
-}
diff --git a/pkg/p9/client_test.go b/pkg/p9/client_test.go
deleted file mode 100644
index 24e0dd7e8..000000000
--- a/pkg/p9/client_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-// TestVersion tests the version negotiation.
-func TestVersion(t *testing.T) {
- // First, create a new server and connection.
- serverSocket, clientSocket, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer clientSocket.Close()
-
- // Create a new server and client.
- s := NewServer(nil)
- go s.Handle(serverSocket)
-
- // NewClient does a Tversion exchange, so this is our test for success.
- c, err := NewClient(clientSocket, DefaultMessageSize, HighestVersionString())
- if err != nil {
- t.Fatalf("got %v, expected nil", err)
- }
-
- // Check a bogus version string.
- if err := c.sendRecv(&Tversion{Version: "notokay", MSize: DefaultMessageSize}, &Rversion{}); err != unix.EINVAL {
- t.Errorf("got %v expected %v", err, unix.EINVAL)
- }
-
- // Check a bogus version number.
- if err := c.sendRecv(&Tversion{Version: "9P1000.L", MSize: DefaultMessageSize}, &Rversion{}); err != unix.EINVAL {
- t.Errorf("got %v expected %v", err, unix.EINVAL)
- }
-
- // Check a too high version number.
- if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion + 1), MSize: DefaultMessageSize}, &Rversion{}); err != unix.EAGAIN {
- t.Errorf("got %v expected %v", err, unix.EAGAIN)
- }
-
- // Check an invalid MSize.
- if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion), MSize: 0}, &Rversion{}); err != unix.EINVAL {
- t.Errorf("got %v expected %v", err, unix.EINVAL)
- }
-}
-
-func benchmarkSendRecv(b *testing.B, fn func(c *Client) func(message, message) error) {
- b.ReportAllocs()
-
- // See above.
- serverSocket, clientSocket, err := unet.SocketPair(false)
- if err != nil {
- b.Fatalf("socketpair got err %v expected nil", err)
- }
- defer clientSocket.Close()
-
- // See above.
- s := NewServer(nil)
- go s.Handle(serverSocket)
-
- // See above.
- c, err := NewClient(clientSocket, DefaultMessageSize, HighestVersionString())
- if err != nil {
- b.Fatalf("got %v, expected nil", err)
- }
-
- // Initialize messages.
- sendRecv := fn(c)
- tversion := &Tversion{
- Version: versionString(highestSupportedVersion),
- MSize: DefaultMessageSize,
- }
- rversion := new(Rversion)
-
- // Run in a loop.
- for i := 0; i < b.N; i++ {
- if err := sendRecv(tversion, rversion); err != nil {
- b.Fatalf("got unexpected err: %v", err)
- }
- }
-}
-
-func BenchmarkSendRecvLegacy(b *testing.B) {
- benchmarkSendRecv(b, func(c *Client) func(message, message) error {
- return func(t message, r message) error {
- _, err := c.sendRecvLegacy(t, r)
- return err
- }
- })
-}
-
-func BenchmarkSendRecvChannel(b *testing.B) {
- benchmarkSendRecv(b, func(c *Client) func(message, message) error { return c.sendRecvChannel })
-}
diff --git a/pkg/p9/messages_test.go b/pkg/p9/messages_test.go
deleted file mode 100644
index bfeb6c236..000000000
--- a/pkg/p9/messages_test.go
+++ /dev/null
@@ -1,507 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "fmt"
- "reflect"
- "testing"
-)
-
-func TestEncodeDecode(t *testing.T) {
- objs := []encoder{
- &QID{
- Type: 1,
- Version: 2,
- Path: 3,
- },
- &FSStat{
- Type: 1,
- BlockSize: 2,
- Blocks: 3,
- BlocksFree: 4,
- BlocksAvailable: 5,
- Files: 6,
- FilesFree: 7,
- FSID: 8,
- NameLength: 9,
- },
- &AttrMask{
- Mode: true,
- NLink: true,
- UID: true,
- GID: true,
- RDev: true,
- ATime: true,
- MTime: true,
- CTime: true,
- INo: true,
- Size: true,
- Blocks: true,
- BTime: true,
- Gen: true,
- DataVersion: true,
- },
- &Attr{
- Mode: Exec,
- UID: 2,
- GID: 3,
- NLink: 4,
- RDev: 5,
- Size: 6,
- BlockSize: 7,
- Blocks: 8,
- ATimeSeconds: 9,
- ATimeNanoSeconds: 10,
- MTimeSeconds: 11,
- MTimeNanoSeconds: 12,
- CTimeSeconds: 13,
- CTimeNanoSeconds: 14,
- BTimeSeconds: 15,
- BTimeNanoSeconds: 16,
- Gen: 17,
- DataVersion: 18,
- },
- &SetAttrMask{
- Permissions: true,
- UID: true,
- GID: true,
- Size: true,
- ATime: true,
- MTime: true,
- CTime: true,
- ATimeNotSystemTime: true,
- MTimeNotSystemTime: true,
- },
- &SetAttr{
- Permissions: 1,
- UID: 2,
- GID: 3,
- Size: 4,
- ATimeSeconds: 5,
- ATimeNanoSeconds: 6,
- MTimeSeconds: 7,
- MTimeNanoSeconds: 8,
- },
- &Dirent{
- QID: QID{Type: 1},
- Offset: 2,
- Type: 3,
- Name: "a",
- },
- &Rlerror{
- Error: 1,
- },
- &Tstatfs{
- FID: 1,
- },
- &Rstatfs{
- FSStat: FSStat{Type: 1},
- },
- &Tlopen{
- FID: 1,
- Flags: WriteOnly,
- },
- &Rlopen{
- QID: QID{Type: 1},
- IoUnit: 2,
- },
- &Tlconnect{
- FID: 1,
- },
- &Rlconnect{},
- &Tlcreate{
- FID: 1,
- Name: "a",
- OpenFlags: 2,
- Permissions: 3,
- GID: 4,
- },
- &Rlcreate{
- Rlopen{QID: QID{Type: 1}},
- },
- &Tsymlink{
- Directory: 1,
- Name: "a",
- Target: "b",
- GID: 2,
- },
- &Rsymlink{
- QID: QID{Type: 1},
- },
- &Tmknod{
- Directory: 1,
- Name: "a",
- Mode: 2,
- Major: 3,
- Minor: 4,
- GID: 5,
- },
- &Rmknod{
- QID: QID{Type: 1},
- },
- &Trename{
- FID: 1,
- Directory: 2,
- Name: "a",
- },
- &Rrename{},
- &Treadlink{
- FID: 1,
- },
- &Rreadlink{
- Target: "a",
- },
- &Tgetattr{
- FID: 1,
- AttrMask: AttrMask{Mode: true},
- },
- &Rgetattr{
- Valid: AttrMask{Mode: true},
- QID: QID{Type: 1},
- Attr: Attr{Mode: Write},
- },
- &Tsetattr{
- FID: 1,
- Valid: SetAttrMask{Permissions: true},
- SetAttr: SetAttr{Permissions: Write},
- },
- &Rsetattr{},
- &Txattrwalk{
- FID: 1,
- NewFID: 2,
- Name: "a",
- },
- &Rxattrwalk{
- Size: 1,
- },
- &Txattrcreate{
- FID: 1,
- Name: "a",
- AttrSize: 2,
- Flags: 3,
- },
- &Rxattrcreate{},
- &Tgetxattr{
- FID: 1,
- Name: "abc",
- Size: 2,
- },
- &Rgetxattr{
- Value: "xyz",
- },
- &Tsetxattr{
- FID: 1,
- Name: "abc",
- Value: "xyz",
- Flags: 2,
- },
- &Rsetxattr{},
- &Treaddir{
- Directory: 1,
- Offset: 2,
- Count: 3,
- },
- &Rreaddir{
- // Count must be sufficient to encode a dirent.
- Count: 0x1a,
- Entries: []Dirent{{QID: QID{Type: 2}}},
- },
- &Tfsync{
- FID: 1,
- },
- &Rfsync{},
- &Tlink{
- Directory: 1,
- Target: 2,
- Name: "a",
- },
- &Rlink{},
- &Tmkdir{
- Directory: 1,
- Name: "a",
- Permissions: 2,
- GID: 3,
- },
- &Rmkdir{
- QID: QID{Type: 1},
- },
- &Trenameat{
- OldDirectory: 1,
- OldName: "a",
- NewDirectory: 2,
- NewName: "b",
- },
- &Rrenameat{},
- &Tunlinkat{
- Directory: 1,
- Name: "a",
- Flags: 2,
- },
- &Runlinkat{},
- &Tversion{
- MSize: 1,
- Version: "a",
- },
- &Rversion{
- MSize: 1,
- Version: "a",
- },
- &Tauth{
- AuthenticationFID: 1,
- UserName: "a",
- AttachName: "b",
- UID: 2,
- },
- &Rauth{
- QID: QID{Type: 1},
- },
- &Tattach{
- FID: 1,
- Auth: Tauth{AuthenticationFID: 2},
- },
- &Rattach{
- QID: QID{Type: 1},
- },
- &Tflush{
- OldTag: 1,
- },
- &Rflush{},
- &Twalk{
- FID: 1,
- NewFID: 2,
- Names: []string{"a"},
- },
- &Rwalk{
- QIDs: []QID{{Type: 1}},
- },
- &Tread{
- FID: 1,
- Offset: 2,
- Count: 3,
- },
- &Rread{
- Data: []byte{'a'},
- },
- &Twrite{
- FID: 1,
- Offset: 2,
- Data: []byte{'a'},
- },
- &Rwrite{
- Count: 1,
- },
- &Tclunk{
- FID: 1,
- },
- &Rclunk{},
- &Tremove{
- FID: 1,
- },
- &Rremove{},
- &Tflushf{
- FID: 1,
- },
- &Rflushf{},
- &Twalkgetattr{
- FID: 1,
- NewFID: 2,
- Names: []string{"a"},
- },
- &Rwalkgetattr{
- QIDs: []QID{{Type: 1}},
- Valid: AttrMask{Mode: true},
- Attr: Attr{Mode: Write},
- },
- &Tucreate{
- Tlcreate: Tlcreate{
- FID: 1,
- Name: "a",
- OpenFlags: 2,
- Permissions: 3,
- GID: 4,
- },
- UID: 5,
- },
- &Rucreate{
- Rlcreate{Rlopen{QID: QID{Type: 1}}},
- },
- &Tumkdir{
- Tmkdir: Tmkdir{
- Directory: 1,
- Name: "a",
- Permissions: 2,
- GID: 3,
- },
- UID: 4,
- },
- &Rumkdir{
- Rmkdir{QID: QID{Type: 1}},
- },
- &Tusymlink{
- Tsymlink: Tsymlink{
- Directory: 1,
- Name: "a",
- Target: "b",
- GID: 2,
- },
- UID: 3,
- },
- &Rusymlink{
- Rsymlink{QID: QID{Type: 1}},
- },
- &Tumknod{
- Tmknod: Tmknod{
- Directory: 1,
- Name: "a",
- Mode: 2,
- Major: 3,
- Minor: 4,
- GID: 5,
- },
- UID: 6,
- },
- &Rumknod{
- Rmknod{QID: QID{Type: 1}},
- },
- &Tsetattrclunk{
- FID: 1,
- Valid: SetAttrMask{
- Permissions: true,
- UID: true,
- GID: true,
- Size: true,
- ATime: true,
- MTime: true,
- CTime: true,
- ATimeNotSystemTime: true,
- MTimeNotSystemTime: true,
- },
- SetAttr: SetAttr{
- Permissions: 1,
- UID: 2,
- GID: 3,
- Size: 4,
- ATimeSeconds: 5,
- ATimeNanoSeconds: 6,
- MTimeSeconds: 7,
- MTimeNanoSeconds: 8,
- },
- },
- }
-
- for _, enc := range objs {
- // Encode the original.
- data := make([]byte, initialBufferLength)
- buf := buffer{data: data[:0]}
- enc.encode(&buf)
-
- // Create a new object, same as the first.
- enc2 := reflect.New(reflect.ValueOf(enc).Elem().Type()).Interface().(encoder)
- buf2 := buffer{data: buf.data}
-
- // To be fair, we need to add any payloads (directly).
- if pl, ok := enc.(payloader); ok {
- enc2.(payloader).SetPayload(pl.Payload())
- }
-
- // And any file payloads (directly).
- if fl, ok := enc.(filer); ok {
- enc2.(filer).SetFilePayload(fl.FilePayload())
- }
-
- // Mark sure it was okay.
- enc2.decode(&buf2)
- if buf2.isOverrun() {
- t.Errorf("object %#v->%#v got overrun on decode", enc, enc2)
- continue
- }
-
- // Check that they are equal.
- if !reflect.DeepEqual(enc, enc2) {
- t.Errorf("object %#v and %#v differ", enc, enc2)
- continue
- }
- }
-}
-
-func TestMessageStrings(t *testing.T) {
- for typ := range msgRegistry.factories {
- entry := &msgRegistry.factories[typ]
- if entry.create != nil {
- name := fmt.Sprintf("%+v", typ)
- t.Run(name, func(t *testing.T) {
- defer func() { // Ensure no panic.
- if r := recover(); r != nil {
- t.Errorf("printing %s failed: %v", name, r)
- }
- }()
- m := entry.create()
- _ = fmt.Sprintf("%v", m)
- err := ErrInvalidMsgType{MsgType(typ)}
- _ = err.Error()
- })
- }
- }
-}
-
-func TestRegisterDuplicate(t *testing.T) {
- defer func() {
- if r := recover(); r == nil {
- // We expect a panic.
- t.FailNow()
- }
- }()
-
- // Register a duplicate.
- msgRegistry.register(MsgRlerror, func() message { return &Rlerror{} })
-}
-
-func TestMsgCache(t *testing.T) {
- // Cache starts empty.
- if got, want := len(msgRegistry.factories[MsgRlerror].cache), 0; got != want {
- t.Errorf("Wrong cache size, got: %d, want: %d", got, want)
- }
-
- // Message can be created with an empty cache.
- msg, err := msgRegistry.get(0, MsgRlerror)
- if err != nil {
- t.Errorf("msgRegistry.get(): %v", err)
- }
- if got, want := len(msgRegistry.factories[MsgRlerror].cache), 0; got != want {
- t.Errorf("Wrong cache size, got: %d, want: %d", got, want)
- }
-
- // Check that message is added to the cache when returned.
- msgRegistry.put(msg)
- if got, want := len(msgRegistry.factories[MsgRlerror].cache), 1; got != want {
- t.Errorf("Wrong cache size, got: %d, want: %d", got, want)
- }
-
- // Check that returned message is reused.
- if got, err := msgRegistry.get(0, MsgRlerror); err != nil {
- t.Errorf("msgRegistry.get(): %v", err)
- } else if msg != got {
- t.Errorf("Message not reused, got: %d, want: %d", got, msg)
- }
-
- // Check that cache doesn't grow beyond max size.
- for i := 0; i < maxCacheSize+1; i++ {
- msgRegistry.put(&Rlerror{})
- }
- if got, want := len(msgRegistry.factories[MsgRlerror].cache), maxCacheSize; got != want {
- t.Errorf("Wrong cache size, got: %d, want: %d", got, want)
- }
-}
diff --git a/pkg/p9/p9_state_autogen.go b/pkg/p9/p9_state_autogen.go
new file mode 100644
index 000000000..bc9b1bd57
--- /dev/null
+++ b/pkg/p9/p9_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package p9
diff --git a/pkg/p9/p9_test.go b/pkg/p9/p9_test.go
deleted file mode 100644
index 8dda6cc64..000000000
--- a/pkg/p9/p9_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "os"
- "testing"
-)
-
-func TestFileModeHelpers(t *testing.T) {
- fns := map[FileMode]struct {
- // name identifies the file mode.
- name string
-
- // function is the function that should return true given the
- // right FileMode.
- function func(m FileMode) bool
- }{
- ModeRegular: {
- name: "regular",
- function: FileMode.IsRegular,
- },
- ModeDirectory: {
- name: "directory",
- function: FileMode.IsDir,
- },
- ModeNamedPipe: {
- name: "named pipe",
- function: FileMode.IsNamedPipe,
- },
- ModeCharacterDevice: {
- name: "character device",
- function: FileMode.IsCharacterDevice,
- },
- ModeBlockDevice: {
- name: "block device",
- function: FileMode.IsBlockDevice,
- },
- ModeSymlink: {
- name: "symlink",
- function: FileMode.IsSymlink,
- },
- ModeSocket: {
- name: "socket",
- function: FileMode.IsSocket,
- },
- }
- for mode, info := range fns {
- // Make sure the mode doesn't identify as anything but itself.
- for testMode, testfns := range fns {
- if mode != testMode && testfns.function(mode) {
- t.Errorf("Mode %s returned true when asked if it was mode %s", info.name, testfns.name)
- }
- }
-
- // Make sure mode identifies as itself.
- if !info.function(mode) {
- t.Errorf("Mode %s returned false when asked if it was itself", info.name)
- }
- }
-}
-
-func TestFileModeToQID(t *testing.T) {
- for _, test := range []struct {
- // name identifies the test.
- name string
-
- // mode is the FileMode we start out with.
- mode FileMode
-
- // want is the corresponding QIDType we expect.
- want QIDType
- }{
- {
- name: "Directories are of type directory",
- mode: ModeDirectory,
- want: TypeDir,
- },
- {
- name: "Sockets are append-only files",
- mode: ModeSocket,
- want: TypeAppendOnly,
- },
- {
- name: "Named pipes are append-only files",
- mode: ModeNamedPipe,
- want: TypeAppendOnly,
- },
- {
- name: "Character devices are append-only files",
- mode: ModeCharacterDevice,
- want: TypeAppendOnly,
- },
- {
- name: "Symlinks are of type symlink",
- mode: ModeSymlink,
- want: TypeSymlink,
- },
- {
- name: "Regular files are of type regular",
- mode: ModeRegular,
- want: TypeRegular,
- },
- {
- name: "Block devices are regular files",
- mode: ModeBlockDevice,
- want: TypeRegular,
- },
- } {
- if qidType := test.mode.QIDType(); qidType != test.want {
- t.Errorf("ModeToQID test %s failed: got %o, wanted %o", test.name, qidType, test.want)
- }
- }
-}
-
-func TestP9ModeConverters(t *testing.T) {
- for _, m := range []FileMode{
- ModeRegular,
- ModeDirectory,
- ModeCharacterDevice,
- ModeBlockDevice,
- ModeSocket,
- ModeSymlink,
- ModeNamedPipe,
- } {
- if mb := ModeFromOS(m.OSMode()); mb != m {
- t.Errorf("Converting %o to OS.FileMode gives %o and is converted back as %o", m, m.OSMode(), mb)
- }
- }
-}
-
-func TestOSModeConverters(t *testing.T) {
- // Modes that can be converted back and forth.
- for _, m := range []os.FileMode{
- 0, // Regular file.
- os.ModeDir,
- os.ModeCharDevice | os.ModeDevice,
- os.ModeDevice,
- os.ModeSocket,
- os.ModeSymlink,
- os.ModeNamedPipe,
- } {
- if mb := ModeFromOS(m).OSMode(); mb != m {
- t.Errorf("Converting %o to p9.FileMode gives %o and is converted back as %o", m, ModeFromOS(m), mb)
- }
- }
-
- // Modes that will be converted to a regular file since p9 cannot
- // express these.
- for _, m := range []os.FileMode{
- os.ModeAppend,
- os.ModeExclusive,
- os.ModeTemporary,
- } {
- if p9Mode := ModeFromOS(m); p9Mode != ModeRegular {
- t.Errorf("Converting %o to p9.FileMode should have given ModeRegular, but yielded %o", m, p9Mode)
- }
- }
-}
-
-func TestAttrMaskContains(t *testing.T) {
- req := AttrMask{Mode: true, Size: true}
- have := AttrMask{}
- if have.Contains(req) {
- t.Fatalf("AttrMask %v should not be a superset of %v", have, req)
- }
- have.Mode = true
- if have.Contains(req) {
- t.Fatalf("AttrMask %v should not be a superset of %v", have, req)
- }
- have.Size = true
- have.MTime = true
- if !have.Contains(req) {
- t.Fatalf("AttrMask %v should be a superset of %v", have, req)
- }
-}
diff --git a/pkg/p9/p9test/BUILD b/pkg/p9/p9test/BUILD
deleted file mode 100644
index 9c1ada0cb..000000000
--- a/pkg/p9/p9test/BUILD
+++ /dev/null
@@ -1,90 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-alias(
- name = "mockgen",
- actual = "@com_github_golang_mock//mockgen:mockgen",
-)
-
-MOCK_SRC_PACKAGE = "gvisor.dev/gvisor/pkg/p9"
-
-# mockgen_reflect is a source file that contains mock generation code that
-# imports the p9 package and generates a specification via reflection. The
-# usual generation path must be split into two distinct parts because the full
-# source tree is not available to all build targets. Only declared depencies
-# are available (and even then, not the Go source files).
-genrule(
- name = "mockgen_reflect",
- testonly = 1,
- outs = ["mockgen_reflect.go"],
- cmd = (
- "$(location :mockgen) " +
- "-package p9test " +
- "-prog_only " + MOCK_SRC_PACKAGE + " " +
- "Attacher,File > $@"
- ),
- tools = [":mockgen"],
-)
-
-# mockgen_exec is the binary that includes the above reflection generator.
-# Running this binary will emit an encoded version of the p9 Attacher and File
-# structures. This is consumed by the mocks genrule, below.
-go_binary(
- name = "mockgen_exec",
- testonly = 1,
- srcs = ["mockgen_reflect.go"],
- deps = [
- "//pkg/p9",
- "@com_github_golang_mock//mockgen/model:go_default_library",
- ],
-)
-
-# mocks consumes the encoded output above, and generates the full source for a
-# set of mocks. These are included directly in the p9test library.
-genrule(
- name = "mocks",
- testonly = 1,
- outs = ["mocks.go"],
- cmd = (
- "$(location :mockgen) " +
- "-package p9test " +
- "-exec_only $(location :mockgen_exec) " + MOCK_SRC_PACKAGE + " File > $@"
- ),
- tools = [
- ":mockgen",
- ":mockgen_exec",
- ],
-)
-
-go_library(
- name = "p9test",
- srcs = [
- "mocks.go",
- "p9test.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/fd",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/sync",
- "//pkg/unet",
- "@com_github_golang_mock//gomock:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "client_test",
- size = "medium",
- srcs = ["client_test.go"],
- library = ":p9test",
- deps = [
- "//pkg/fd",
- "//pkg/p9",
- "//pkg/sync",
- "@com_github_golang_mock//gomock:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/p9/p9test/client_test.go b/pkg/p9/p9test/client_test.go
deleted file mode 100644
index bb77e8e5f..000000000
--- a/pkg/p9/p9test/client_test.go
+++ /dev/null
@@ -1,2252 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9test
-
-import (
- "bytes"
- "fmt"
- "io"
- "math/rand"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestPanic(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- // Create a new root.
- d := h.NewDirectory(nil)(nil)
- defer d.Close() // Needed manually.
- h.Attacher.EXPECT().Attach().Return(d, nil).Do(func() {
- // Panic here, and ensure that we get back EFAULT.
- panic("handler")
- })
-
- // Attach to the client.
- if _, err := c.Attach("/"); err != unix.EFAULT {
- t.Fatalf("got attach err %v, want EFAULT", err)
- }
-}
-
-func TestAttachNoLeak(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- // Create a new root.
- d := h.NewDirectory(nil)(nil)
- h.Attacher.EXPECT().Attach().Return(d, nil).Times(1)
-
- // Attach to the client.
- f, err := c.Attach("/")
- if err != nil {
- t.Fatalf("got attach err %v, want nil", err)
- }
-
- // Don't close the file. This should be closed automatically when the
- // client disconnects. The mock asserts that everything is closed
- // exactly once. This statement just removes the unused variable error.
- _ = f
-}
-
-func TestBadAttach(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- // Return an error on attach.
- h.Attacher.EXPECT().Attach().Return(nil, unix.EINVAL).Times(1)
-
- // Attach to the client.
- if _, err := c.Attach("/"); err != unix.EINVAL {
- t.Fatalf("got attach err %v, want unix.EINVAL", err)
- }
-}
-
-func TestWalkAttach(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- // Create a new root.
- d := h.NewDirectory(map[string]Generator{
- "a": h.NewDirectory(map[string]Generator{
- "b": h.NewFile(),
- }),
- })(nil)
- h.Attacher.EXPECT().Attach().Return(d, nil).Times(1)
-
- // Attach to the client as a non-root, and ensure that the walk above
- // occurs as expected. We should get back b, and all references should
- // be dropped when the file is closed.
- f, err := c.Attach("/a/b")
- if err != nil {
- t.Fatalf("got attach err %v, want nil", err)
- }
- defer f.Close()
-
- // Check that's a regular file.
- if _, _, attr, err := f.GetAttr(p9.AttrMaskAll()); err != nil {
- t.Errorf("got err %v, want nil", err)
- } else if !attr.Mode.IsRegular() {
- t.Errorf("got mode %v, want regular file", err)
- }
-}
-
-// newTypeMap returns a new type map dictionary.
-func newTypeMap(h *Harness) map[string]Generator {
- return map[string]Generator{
- "directory": h.NewDirectory(map[string]Generator{}),
- "file": h.NewFile(),
- "symlink": h.NewSymlink(),
- "block-device": h.NewBlockDevice(),
- "character-device": h.NewCharacterDevice(),
- "named-pipe": h.NewNamedPipe(),
- "socket": h.NewSocket(),
- }
-}
-
-// newRoot returns a new root filesystem.
-//
-// This is set up in a deterministic way for testing most operations.
-//
-// The represented file system looks like:
-// - file
-// - symlink
-// - directory
-// ...
-// + one
-// - file
-// - symlink
-// - directory
-// ...
-// + two
-// - file
-// - symlink
-// - directory
-// ...
-// + three
-// - file
-// - symlink
-// - directory
-// ...
-func newRoot(h *Harness, c *p9.Client) (*Mock, p9.File) {
- root := newTypeMap(h)
- one := newTypeMap(h)
- two := newTypeMap(h)
- three := newTypeMap(h)
- one["two"] = h.NewDirectory(two) // Will be nested in one.
- root["one"] = h.NewDirectory(one) // Top level.
- root["three"] = h.NewDirectory(three) // Alternate top-level.
-
- // Create a new root.
- rootBackend := h.NewDirectory(root)(nil)
- h.Attacher.EXPECT().Attach().Return(rootBackend, nil)
-
- // Attach to the client.
- r, err := c.Attach("/")
- if err != nil {
- h.t.Fatalf("got attach err %v, want nil", err)
- }
-
- return rootBackend, r
-}
-
-func allInvalidNames(from string) []string {
- return []string{
- from + "/other",
- from + "/..",
- from + "/.",
- from + "/",
- "other/" + from,
- "/" + from,
- "./" + from,
- "../" + from,
- ".",
- "..",
- "/",
- "",
- }
-}
-
-func TestWalkInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Run relevant tests.
- for name := range newTypeMap(h) {
- // These are all the various ways that one might attempt to
- // construct compound paths. They should all be rejected, as
- // any compound that contains a / is not allowed, as well as
- // the singular paths of '.' and '..'.
- if _, _, err := root.Walk([]string{".", name}); err != unix.EINVAL {
- t.Errorf("Walk through . %s wanted EINVAL, got %v", name, err)
- }
- if _, _, err := root.Walk([]string{"..", name}); err != unix.EINVAL {
- t.Errorf("Walk through . %s wanted EINVAL, got %v", name, err)
- }
- if _, _, err := root.Walk([]string{name, "."}); err != unix.EINVAL {
- t.Errorf("Walk through %s . wanted EINVAL, got %v", name, err)
- }
- if _, _, err := root.Walk([]string{name, ".."}); err != unix.EINVAL {
- t.Errorf("Walk through %s .. wanted EINVAL, got %v", name, err)
- }
- for _, invalidName := range allInvalidNames(name) {
- if _, _, err := root.Walk([]string{invalidName}); err != unix.EINVAL {
- t.Errorf("Walk through %s wanted EINVAL, got %v", invalidName, err)
- }
- }
- wantErr := unix.EINVAL
- if name == "directory" {
- // We can attempt a walk through a directory. However,
- // we should never see a file named "other", so we
- // expect this to return ENOENT.
- wantErr = unix.ENOENT
- }
- if _, _, err := root.Walk([]string{name, "other"}); err != wantErr {
- t.Errorf("Walk through %s/other wanted %v, got %v", name, wantErr, err)
- }
-
- // Do a successful walk.
- _, f, err := root.Walk([]string{name})
- if err != nil {
- t.Errorf("Walk to %s wanted nil, got %v", name, err)
- }
- defer f.Close()
- local := h.Pop(f)
-
- // Check that the file matches.
- _, localMask, localAttr, localErr := local.GetAttr(p9.AttrMaskAll())
- if _, mask, attr, err := f.GetAttr(p9.AttrMaskAll()); mask != localMask || attr != localAttr || err != localErr {
- t.Errorf("GetAttr got (%v, %v, %v), wanted (%v, %v, %v)",
- mask, attr, err, localMask, localAttr, localErr)
- }
-
- // Ensure we can't walk backwards.
- if _, _, err := f.Walk([]string{"."}); err != unix.EINVAL {
- t.Errorf("Walk through %s/. wanted EINVAL, got %v", name, err)
- }
- if _, _, err := f.Walk([]string{".."}); err != unix.EINVAL {
- t.Errorf("Walk through %s/.. wanted EINVAL, got %v", name, err)
- }
- }
-}
-
-// fileGenerator is a function to generate files via walk or create.
-//
-// Examples are:
-// - walkHelper
-// - walkAndOpenHelper
-// - createHelper
-type fileGenerator func(*Harness, string, p9.File) (*Mock, *Mock, p9.File)
-
-// walkHelper walks to the given file.
-//
-// The backends of the parent and walked file are returned, as well as the
-// walked client file.
-func walkHelper(h *Harness, name string, dir p9.File) (parentBackend *Mock, walkedBackend *Mock, walked p9.File) {
- _, parent, err := dir.Walk(nil)
- if err != nil {
- h.t.Fatalf("Walk(nil) got err %v, want nil", err)
- }
- defer parent.Close()
- parentBackend = h.Pop(parent)
-
- _, walked, err = parent.Walk([]string{name})
- if err != nil {
- h.t.Fatalf("Walk(%s) got err %v, want nil", name, err)
- }
- walkedBackend = h.Pop(walked)
-
- return parentBackend, walkedBackend, walked
-}
-
-// walkAndOpenHelper additionally opens the walked file, if possible.
-func walkAndOpenHelper(h *Harness, name string, dir p9.File) (*Mock, *Mock, p9.File) {
- parentBackend, walkedBackend, walked := walkHelper(h, name, dir)
- if p9.CanOpen(walkedBackend.Attr.Mode) {
- // Open for all file types that we can. We stick to a read-only
- // open here because directories may not be opened otherwise.
- walkedBackend.EXPECT().Open(p9.ReadOnly).Times(1)
- if _, _, _, err := walked.Open(p9.ReadOnly); err != nil {
- h.t.Errorf("got open err %v, want nil", err)
- }
- } else {
- // ... or assert an error for others.
- if _, _, _, err := walked.Open(p9.ReadOnly); err != unix.EINVAL {
- h.t.Errorf("got open err %v, want EINVAL", err)
- }
- }
- return parentBackend, walkedBackend, walked
-}
-
-// createHelper creates the given file and returns the parent directory,
-// created file and client file, which must be closed when done.
-func createHelper(h *Harness, name string, dir p9.File) (*Mock, *Mock, p9.File) {
- // Clone the directory first, since Create replaces the existing file.
- // We change the type after calling create.
- _, dirThenFile, err := dir.Walk(nil)
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
-
- // Create a new server-side file. On the server-side, the a new file is
- // returned from a create call. The client will reuse the same file,
- // but we still expect the normal chain of closes. This complicates
- // things a bit because the "parent" will always chain to the cloned
- // dir above.
- dirBackend := h.Pop(dirThenFile) // New backend directory.
- newFile := h.NewFile()(dirBackend) // New file with backend parent.
- dirBackend.EXPECT().Create(name, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, newFile, newFile.QID, uint32(0), nil)
-
- // Create via the client.
- _, dirThenFile, _, _, err = dirThenFile.Create(name, p9.ReadOnly, 0, 0, 0)
- if err != nil {
- h.t.Fatalf("got create err %v, want nil", err)
- }
-
- // Ensure subsequent walks succeed.
- dirBackend.AddChild(name, h.NewFile())
- return dirBackend, newFile, dirThenFile
-}
-
-// deprecatedRemover allows us to access the deprecated Remove operation within
-// the p9.File client object.
-type deprecatedRemover interface {
- Remove() error
-}
-
-// checkDeleted asserts that relevant methods fail for an unlinked file.
-//
-// This function will close the file at the end.
-func checkDeleted(h *Harness, file p9.File) {
- defer file.Close() // See doc.
-
- if _, _, _, err := file.Open(p9.ReadOnly); err != unix.EINVAL {
- h.t.Errorf("open while deleted, got %v, want EINVAL", err)
- }
- if _, _, _, _, err := file.Create("created", p9.ReadOnly, 0, 0, 0); err != unix.EINVAL {
- h.t.Errorf("create while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Symlink("old", "new", 0, 0); err != unix.EINVAL {
- h.t.Errorf("symlink while deleted, got %v, want EINVAL", err)
- }
- // N.B. This link is technically invalid, but if a call to link is
- // actually made in the backend then the mock will panic.
- if err := file.Link(file, "new"); err != unix.EINVAL {
- h.t.Errorf("link while deleted, got %v, want EINVAL", err)
- }
- if err := file.RenameAt("src", file, "dst"); err != unix.EINVAL {
- h.t.Errorf("renameAt while deleted, got %v, want EINVAL", err)
- }
- if err := file.UnlinkAt("file", 0); err != unix.EINVAL {
- h.t.Errorf("unlinkAt while deleted, got %v, want EINVAL", err)
- }
- if err := file.Rename(file, "dst"); err != unix.EINVAL {
- h.t.Errorf("rename while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Readlink(); err != unix.EINVAL {
- h.t.Errorf("readlink while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Mkdir("dir", p9.ModeDirectory, 0, 0); err != unix.EINVAL {
- h.t.Errorf("mkdir while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Mknod("dir", p9.ModeDirectory, 0, 0, 0, 0); err != unix.EINVAL {
- h.t.Errorf("mknod while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Readdir(0, 1); err != unix.EINVAL {
- h.t.Errorf("readdir while deleted, got %v, want EINVAL", err)
- }
- if _, err := file.Connect(p9.ConnectFlags(0)); err != unix.EINVAL {
- h.t.Errorf("connect while deleted, got %v, want EINVAL", err)
- }
-
- // The remove method is technically deprecated, but we want to ensure
- // that it still checks for deleted appropriately. We must first clone
- // the file because remove is equivalent to close.
- _, newFile, err := file.Walk(nil)
- if err == unix.EBUSY {
- // We can't walk from here because this reference is open
- // already. Okay, we will also have unopened cases through
- // TestUnlink, just skip the remove operation for now.
- return
- } else if err != nil {
- h.t.Fatalf("clone failed, got %v, want nil", err)
- }
- if err := newFile.(deprecatedRemover).Remove(); err != unix.EINVAL {
- h.t.Errorf("remove while deleted, got %v, want EINVAL", err)
- }
-}
-
-// deleter is a function to remove a file.
-type deleter func(parent p9.File, name string) error
-
-// unlinkAt is a deleter.
-func unlinkAt(parent p9.File, name string) error {
- // Call unlink. Note that a filesystem may normally impose additional
- // constaints on unlinkat success, such as ensuring that a directory is
- // empty, requiring AT_REMOVEDIR in flags to remove a directory, etc.
- // None of that is required internally (entire trees can be marked
- // deleted when this operation succeeds), so the mock will succeed.
- return parent.UnlinkAt(name, 0)
-}
-
-// remove is a deleter.
-func remove(parent p9.File, name string) error {
- // See notes above re: remove.
- _, newFile, err := parent.Walk([]string{name})
- if err != nil {
- // Should not be expected.
- return err
- }
-
- // Do the actual remove.
- if err := newFile.(deprecatedRemover).Remove(); err != nil {
- return err
- }
-
- // Ensure that the remove closed the file.
- if err := newFile.(deprecatedRemover).Remove(); err != unix.EBADF {
- return unix.EBADF // Propagate this code.
- }
-
- return nil
-}
-
-// unlinkHelper unlinks the noted path, and ensures that all relevant
-// operations on that path, acquired from multiple paths, start failing.
-func unlinkHelper(h *Harness, root p9.File, targetNames []string, targetGen fileGenerator, deleteFn deleter) {
- // name is the file to be unlinked.
- name := targetNames[len(targetNames)-1]
-
- // Walk to the directory containing the target.
- _, parent, err := root.Walk(targetNames[:len(targetNames)-1])
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer parent.Close()
- parentBackend := h.Pop(parent)
-
- // Walk to or generate the target file.
- _, _, target := targetGen(h, name, parent)
- defer checkDeleted(h, target)
-
- // Walk to a second reference.
- _, second, err := parent.Walk([]string{name})
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer checkDeleted(h, second)
-
- // Walk to a third reference, from the start.
- _, third, err := root.Walk(targetNames)
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer checkDeleted(h, third)
-
- // This will be translated in the backend to an unlinkat.
- parentBackend.EXPECT().UnlinkAt(name, uint32(0)).Return(nil)
-
- // Actually perform the deletion.
- if err := deleteFn(parent, name); err != nil {
- h.t.Fatalf("got delete err %v, want nil", err)
- }
-}
-
-func unlinkTest(t *testing.T, targetNames []string, targetGen fileGenerator) {
- t.Run(fmt.Sprintf("unlinkAt(%s)", strings.Join(targetNames, "/")), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- unlinkHelper(h, root, targetNames, targetGen, unlinkAt)
- })
- t.Run(fmt.Sprintf("remove(%s)", strings.Join(targetNames, "/")), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- unlinkHelper(h, root, targetNames, targetGen, remove)
- })
-}
-
-func TestUnlink(t *testing.T) {
- // Unlink all files.
- for name := range newTypeMap(nil) {
- unlinkTest(t, []string{name}, walkHelper)
- unlinkTest(t, []string{name}, walkAndOpenHelper)
- unlinkTest(t, []string{"one", name}, walkHelper)
- unlinkTest(t, []string{"one", name}, walkAndOpenHelper)
- unlinkTest(t, []string{"one", "two", name}, walkHelper)
- unlinkTest(t, []string{"one", "two", name}, walkAndOpenHelper)
- }
-
- // Unlink a directory.
- unlinkTest(t, []string{"one"}, walkHelper)
- unlinkTest(t, []string{"one"}, walkAndOpenHelper)
- unlinkTest(t, []string{"one", "two"}, walkHelper)
- unlinkTest(t, []string{"one", "two"}, walkAndOpenHelper)
-
- // Unlink created files.
- unlinkTest(t, []string{"created"}, createHelper)
- unlinkTest(t, []string{"one", "created"}, createHelper)
- unlinkTest(t, []string{"one", "two", "created"}, createHelper)
-}
-
-func TestUnlinkAtInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if err := root.UnlinkAt(invalidName, 0); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-// expectRenamed asserts an ordered sequence of rename calls, based on all the
-// elements in elements being the source, and the first element therein
-// changing to dstName, parented at dstParent.
-func expectRenamed(file *Mock, elements []string, dstParent *Mock, dstName string) *gomock.Call {
- if len(elements) > 0 {
- // Recurse to the parent, if necessary.
- call := expectRenamed(file.parent, elements[:len(elements)-1], dstParent, dstName)
-
- // Recursive case: this element is unchanged, but should have
- // it's hook called after the parent.
- return file.EXPECT().Renamed(file.parent, elements[len(elements)-1]).Do(func(p p9.File, _ string) {
- file.parent = p.(*Mock)
- }).After(call)
- }
-
- // Base case: this is the changed element.
- return file.EXPECT().Renamed(dstParent, dstName).Do(func(p p9.File, name string) {
- file.parent = p.(*Mock)
- })
-}
-
-// renamer is a rename function.
-type renamer func(h *Harness, srcParent, dstParent p9.File, origName, newName string, selfRename bool) error
-
-// renameAt is a renamer.
-func renameAt(_ *Harness, srcParent, dstParent p9.File, srcName, dstName string, selfRename bool) error {
- return srcParent.RenameAt(srcName, dstParent, dstName)
-}
-
-// rename is a renamer.
-func rename(h *Harness, srcParent, dstParent p9.File, srcName, dstName string, selfRename bool) error {
- _, f, err := srcParent.Walk([]string{srcName})
- if err != nil {
- return err
- }
- defer f.Close()
- if !selfRename {
- backend := h.Pop(f)
- backend.EXPECT().Renamed(gomock.Any(), dstName).Do(func(p p9.File, name string) {
- backend.parent = p.(*Mock) // Required for close ordering.
- })
- }
- return f.Rename(dstParent, dstName)
-}
-
-// renameHelper executes a rename, and asserts that all relevant elements
-// receive expected notifications. If overwriting a file, this includes
-// ensuring that the target has been appropriately marked as unlinked.
-func renameHelper(h *Harness, root p9.File, srcNames []string, dstNames []string, target fileGenerator, renameFn renamer) {
- // Walk to the directory containing the target.
- srcQID, targetParent, err := root.Walk(srcNames[:len(srcNames)-1])
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer targetParent.Close()
- targetParentBackend := h.Pop(targetParent)
-
- // Walk to or generate the target file.
- _, targetBackend, src := target(h, srcNames[len(srcNames)-1], targetParent)
- defer src.Close()
-
- // Walk to a second reference.
- _, second, err := targetParent.Walk([]string{srcNames[len(srcNames)-1]})
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer second.Close()
- secondBackend := h.Pop(second)
-
- // Walk to a third reference, from the start.
- _, third, err := root.Walk(srcNames)
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer third.Close()
- thirdBackend := h.Pop(third)
-
- // Find the common suffix to identify the rename parent.
- var (
- renameDestPath []string
- renameSrcPath []string
- selfRename bool
- )
- for i := 1; i <= len(srcNames) && i <= len(dstNames); i++ {
- if srcNames[len(srcNames)-i] != dstNames[len(dstNames)-i] {
- // Take the full prefix of dstNames up until this
- // point, including the first mismatched name. The
- // first mismatch must be the renamed entry.
- renameDestPath = dstNames[:len(dstNames)-i+1]
- renameSrcPath = srcNames[:len(srcNames)-i+1]
-
- // Does the renameDestPath fully contain the
- // renameSrcPath here? If yes, then this is a mismatch.
- // We can't rename the src to some subpath of itself.
- if len(renameDestPath) > len(renameSrcPath) &&
- reflect.DeepEqual(renameDestPath[:len(renameSrcPath)], renameSrcPath) {
- renameDestPath = nil
- renameSrcPath = nil
- continue
- }
- break
- }
- }
- if len(renameSrcPath) == 0 || len(renameDestPath) == 0 {
- // This must be a rename to self, or a tricky look-alike. This
- // happens iff we fail to find a suitable divergence in the two
- // paths. It's a true self move if the path length is the same.
- renameDestPath = dstNames
- renameSrcPath = srcNames
- selfRename = len(srcNames) == len(dstNames)
- }
-
- // Walk to the source parent.
- _, srcParent, err := root.Walk(renameSrcPath[:len(renameSrcPath)-1])
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer srcParent.Close()
- srcParentBackend := h.Pop(srcParent)
-
- // Walk to the destination parent.
- _, dstParent, err := root.Walk(renameDestPath[:len(renameDestPath)-1])
- if err != nil {
- h.t.Fatalf("got walk err %v, want nil", err)
- }
- defer dstParent.Close()
- dstParentBackend := h.Pop(dstParent)
-
- // expectedErr is the result of the rename operation.
- var expectedErr error
-
- // Walk to the target file, if one exists.
- dstQID, dst, err := root.Walk(renameDestPath)
- if err == nil {
- if !selfRename && srcQID[0].Type == dstQID[0].Type {
- // If there is a destination file, and is it of the
- // same type as the source file, then we expect the
- // rename to succeed. We expect the destination file to
- // be deleted, so we run a deletion test on it in this
- // case.
- defer checkDeleted(h, dst)
- } else {
- // If the type is different than the destination, then
- // we expect the rename to fail. We expect that this
- // is returned.
- //
- // If the file being renamed to itself, this is
- // technically allowed and a no-op, but all the
- // triggers will fire.
- if !selfRename {
- expectedErr = unix.EINVAL
- }
- dst.Close()
- }
- }
- dstName := renameDestPath[len(renameDestPath)-1] // Renamed element.
- srcName := renameSrcPath[len(renameSrcPath)-1] // Renamed element.
- if expectedErr == nil && !selfRename {
- // Expect all to be renamed appropriately. Note that if this is
- // a final file being renamed, then we expect the file to be
- // called with the new parent. If not, then we expect the
- // rename hook to be called, but the parent will remain
- // unchanged.
- elements := srcNames[len(renameSrcPath):]
- expectRenamed(targetBackend, elements, dstParentBackend, dstName)
- expectRenamed(secondBackend, elements, dstParentBackend, dstName)
- expectRenamed(thirdBackend, elements, dstParentBackend, dstName)
-
- // The target parent has also been opened, and may be moved
- // directly or indirectly.
- if len(elements) > 1 {
- expectRenamed(targetParentBackend, elements[:len(elements)-1], dstParentBackend, dstName)
- }
- }
-
- // Expect the rename if it's not the same file. Note that like unlink,
- // renames are always translated to the at variant in the backend.
- if !selfRename {
- srcParentBackend.EXPECT().RenameAt(srcName, dstParentBackend, dstName).Return(expectedErr)
- }
-
- // Perform the actual rename; everything has been lined up.
- if err := renameFn(h, srcParent, dstParent, srcName, dstName, selfRename); err != expectedErr {
- h.t.Fatalf("got rename err %v, want %v", err, expectedErr)
- }
-}
-
-func renameTest(t *testing.T, srcNames []string, dstNames []string, target fileGenerator) {
- t.Run(fmt.Sprintf("renameAt(%s->%s)", strings.Join(srcNames, "/"), strings.Join(dstNames, "/")), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- renameHelper(h, root, srcNames, dstNames, target, renameAt)
- })
- t.Run(fmt.Sprintf("rename(%s->%s)", strings.Join(srcNames, "/"), strings.Join(dstNames, "/")), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- renameHelper(h, root, srcNames, dstNames, target, rename)
- })
-}
-
-func TestRename(t *testing.T) {
- // In-directory rename, simple case.
- for name := range newTypeMap(nil) {
- // Within the root.
- renameTest(t, []string{name}, []string{"renamed"}, walkHelper)
- renameTest(t, []string{name}, []string{"renamed"}, walkAndOpenHelper)
-
- // Within a subdirectory.
- renameTest(t, []string{"one", name}, []string{"one", "renamed"}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"one", "renamed"}, walkAndOpenHelper)
- }
-
- // ... with created files.
- renameTest(t, []string{"created"}, []string{"renamed"}, createHelper)
- renameTest(t, []string{"one", "created"}, []string{"one", "renamed"}, createHelper)
-
- // Across directories.
- for name := range newTypeMap(nil) {
- // Down one level.
- renameTest(t, []string{"one", name}, []string{"one", "two", "renamed"}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"one", "two", "renamed"}, walkAndOpenHelper)
-
- // Up one level.
- renameTest(t, []string{"one", "two", name}, []string{"one", "renamed"}, walkHelper)
- renameTest(t, []string{"one", "two", name}, []string{"one", "renamed"}, walkAndOpenHelper)
-
- // Across at the same level.
- renameTest(t, []string{"one", name}, []string{"three", "renamed"}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"three", "renamed"}, walkAndOpenHelper)
- }
-
- // ... with created files.
- renameTest(t, []string{"one", "created"}, []string{"one", "two", "renamed"}, createHelper)
- renameTest(t, []string{"one", "two", "created"}, []string{"one", "renamed"}, createHelper)
- renameTest(t, []string{"one", "created"}, []string{"three", "renamed"}, createHelper)
-
- // Renaming parents.
- for name := range newTypeMap(nil) {
- // Rename a parent.
- renameTest(t, []string{"one", name}, []string{"renamed", name}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"renamed", name}, walkAndOpenHelper)
-
- // Rename a super parent.
- renameTest(t, []string{"one", "two", name}, []string{"renamed", name}, walkHelper)
- renameTest(t, []string{"one", "two", name}, []string{"renamed", name}, walkAndOpenHelper)
- }
-
- // ... with created files.
- renameTest(t, []string{"one", "created"}, []string{"renamed", "created"}, createHelper)
- renameTest(t, []string{"one", "two", "created"}, []string{"renamed", "created"}, createHelper)
-
- // Over existing files, including itself.
- for name := range newTypeMap(nil) {
- for other := range newTypeMap(nil) {
- // Overwrite the noted file (may be itself).
- renameTest(t, []string{"one", name}, []string{"one", other}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"one", other}, walkAndOpenHelper)
-
- // Overwrite other files in another directory.
- renameTest(t, []string{"one", name}, []string{"one", "two", other}, walkHelper)
- renameTest(t, []string{"one", name}, []string{"one", "two", other}, walkAndOpenHelper)
- }
-
- // Overwrite by moving the parent.
- renameTest(t, []string{"three", name}, []string{"one", name}, walkHelper)
- renameTest(t, []string{"three", name}, []string{"one", name}, walkAndOpenHelper)
-
- // Create over the types.
- renameTest(t, []string{"one", "created"}, []string{"one", name}, createHelper)
- renameTest(t, []string{"one", "created"}, []string{"one", "two", name}, createHelper)
- renameTest(t, []string{"three", "created"}, []string{"one", name}, createHelper)
- }
-}
-
-func TestRenameInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if err := root.Rename(root, invalidName); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-func TestRenameAtInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if err := root.RenameAt(invalidName, root, "okay"); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- if err := root.RenameAt("okay", root, invalidName); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-// TestRenameSecondOrder tests that indirect rename targets continue to receive
-// Renamed calls after a rename of its renamed parent. i.e.,
-//
-// 1. Create /one/file
-// 2. Create /directory
-// 3. Rename /one -> /directory/one
-// 4. Rename /directory -> /three/foo
-// 5. file from (1) should still receive Renamed.
-//
-// This is a regression test for b/135219260.
-func TestRenameSecondOrder(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- rootBackend, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to /one.
- _, oneBackend, oneFile := walkHelper(h, "one", root)
- defer oneFile.Close()
-
- // Walk to and generate /one/file.
- //
- // walkHelper re-walks to oneFile, so we need the second backend,
- // which will also receive Renamed calls.
- oneSecondBackend, fileBackend, fileFile := walkHelper(h, "file", oneFile)
- defer fileFile.Close()
-
- // Walk to and generate /directory.
- _, directoryBackend, directoryFile := walkHelper(h, "directory", root)
- defer directoryFile.Close()
-
- // Rename /one to /directory/one.
- rootBackend.EXPECT().RenameAt("one", directoryBackend, "one").Return(nil)
- expectRenamed(oneBackend, []string{}, directoryBackend, "one")
- expectRenamed(oneSecondBackend, []string{}, directoryBackend, "one")
- expectRenamed(fileBackend, []string{}, oneBackend, "file")
- if err := renameAt(h, root, directoryFile, "one", "one", false); err != nil {
- h.t.Fatalf("got rename err %v, want nil", err)
- }
-
- // Walk to /three.
- _, threeBackend, threeFile := walkHelper(h, "three", root)
- defer threeFile.Close()
-
- // Rename /directory to /three/foo.
- rootBackend.EXPECT().RenameAt("directory", threeBackend, "foo").Return(nil)
- expectRenamed(directoryBackend, []string{}, threeBackend, "foo")
- expectRenamed(oneBackend, []string{}, directoryBackend, "one")
- expectRenamed(oneSecondBackend, []string{}, directoryBackend, "one")
- expectRenamed(fileBackend, []string{}, oneBackend, "file")
- if err := renameAt(h, root, threeFile, "directory", "foo", false); err != nil {
- h.t.Fatalf("got rename err %v, want nil", err)
- }
-}
-
-func TestReadlink(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to the file normally.
- _, f, err := root.Walk([]string{name})
- if err != nil {
- t.Fatalf("walk failed: got %v, wanted nil", err)
- }
- defer f.Close()
- backend := h.Pop(f)
-
- const symlinkTarget = "symlink-target"
-
- if backend.Attr.Mode.IsSymlink() {
- // This should only go through on symlinks.
- backend.EXPECT().Readlink().Return(symlinkTarget, nil)
- }
-
- // Attempt a Readlink operation.
- target, err := f.Readlink()
- if err != nil && err != unix.EINVAL {
- t.Errorf("readlink got %v, wanted EINVAL", err)
- } else if err == nil && target != symlinkTarget {
- t.Errorf("readlink got %v, wanted %v", target, symlinkTarget)
- }
- })
- }
-}
-
-// fdTest is a wrapper around operations that may send file descriptors. This
-// asserts that the file descriptors are working as intended.
-func fdTest(t *testing.T, sendFn func(*fd.FD) *fd.FD) {
- // Create a pipe that we can read from.
- r, w, err := os.Pipe()
- if err != nil {
- t.Fatalf("unable to create pipe: %v", err)
- }
- defer r.Close()
- defer w.Close()
-
- // Attempt to send the write end.
- wFD, err := fd.NewFromFile(w)
- if err != nil {
- t.Fatalf("unable to convert file: %v", err)
- }
- defer wFD.Close() // This is a copy.
-
- // Send wFD and receive newFD.
- newFD := sendFn(wFD)
- defer newFD.Close()
-
- // Attempt to write.
- const message = "hello"
- if _, err := newFD.Write([]byte(message)); err != nil {
- t.Fatalf("write got %v, wanted nil", err)
- }
-
- // Should see the message on our end.
- buffer := []byte(message)
- if _, err := io.ReadFull(r, buffer); err != nil {
- t.Fatalf("read got %v, wanted nil", err)
- }
- if string(buffer) != message {
- t.Errorf("got message %v, wanted %v", string(buffer), message)
- }
-}
-
-func TestConnect(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- // Catch all the non-socket cases.
- if !backend.Attr.Mode.IsSocket() {
- // This has been set up to fail if Connect is called.
- if _, err := f.Connect(p9.ConnectFlags(0)); err != unix.EINVAL {
- t.Errorf("connect got %v, wanted EINVAL", err)
- }
- return
- }
-
- // Ensure the fd exchange works.
- fdTest(t, func(send *fd.FD) *fd.FD {
- backend.EXPECT().Connect(p9.ConnectFlags(0)).Return(send, nil)
- recv, err := backend.Connect(p9.ConnectFlags(0))
- if err != nil {
- t.Fatalf("connect got %v, wanted nil", err)
- }
- return recv
- })
- })
- }
-}
-
-func TestReaddir(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- // Catch all the non-directory cases.
- if !backend.Attr.Mode.IsDir() {
- // This has also been set up to fail if Readdir is called.
- if _, err := f.Readdir(0, 1); err != unix.EINVAL {
- t.Errorf("readdir got %v, wanted EINVAL", err)
- }
- return
- }
-
- // Ensure that readdir works for directories.
- if _, err := f.Readdir(0, 1); err != unix.EINVAL {
- t.Errorf("readdir got %v, wanted EINVAL", err)
- }
- if _, _, _, err := f.Open(p9.ReadWrite); err != unix.EISDIR {
- t.Errorf("readdir got %v, wanted EISDIR", err)
- }
- if _, _, _, err := f.Open(p9.WriteOnly); err != unix.EISDIR {
- t.Errorf("readdir got %v, wanted EISDIR", err)
- }
- backend.EXPECT().Open(p9.ReadOnly).Times(1)
- if _, _, _, err := f.Open(p9.ReadOnly); err != nil {
- t.Errorf("readdir got %v, wanted nil", err)
- }
- backend.EXPECT().Readdir(uint64(0), uint32(1)).Times(1)
- if _, err := f.Readdir(0, 1); err != nil {
- t.Errorf("readdir got %v, wanted nil", err)
- }
- })
- }
-}
-
-func TestOpen(t *testing.T) {
- type openTest struct {
- name string
- flags p9.OpenFlags
- err error
- match func(p9.FileMode) bool
- }
-
- cases := []openTest{
- {
- name: "not-openable-read-only",
- flags: p9.ReadOnly,
- err: unix.EINVAL,
- match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) },
- },
- {
- name: "not-openable-write-only",
- flags: p9.WriteOnly,
- err: unix.EINVAL,
- match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) },
- },
- {
- name: "not-openable-read-write",
- flags: p9.ReadWrite,
- err: unix.EINVAL,
- match: func(mode p9.FileMode) bool { return !p9.CanOpen(mode) },
- },
- {
- name: "directory-read-only",
- flags: p9.ReadOnly,
- err: nil,
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- },
- {
- name: "directory-read-write",
- flags: p9.ReadWrite,
- err: unix.EISDIR,
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- },
- {
- name: "directory-write-only",
- flags: p9.WriteOnly,
- err: unix.EISDIR,
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- },
- {
- name: "read-only",
- flags: p9.ReadOnly,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) },
- },
- {
- name: "write-only",
- flags: p9.WriteOnly,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() },
- },
- {
- name: "read-write",
- flags: p9.ReadWrite,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() },
- },
- {
- name: "directory-read-only-truncate",
- flags: p9.ReadOnly | p9.OpenTruncate,
- err: unix.EISDIR,
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- },
- {
- name: "read-only-truncate",
- flags: p9.ReadOnly | p9.OpenTruncate,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() },
- },
- {
- name: "write-only-truncate",
- flags: p9.WriteOnly | p9.OpenTruncate,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() },
- },
- {
- name: "read-write-truncate",
- flags: p9.ReadWrite | p9.OpenTruncate,
- err: nil,
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) && !mode.IsDir() },
- },
- }
-
- // Open(flags OpenFlags) (*fd.FD, QID, uint32, error)
- // - only works on Regular, NamedPipe, BLockDevice, CharacterDevice
- // - returning a file works as expected
- for name := range newTypeMap(nil) {
- for _, tc := range cases {
- t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- // Does this match the case?
- if !tc.match(backend.Attr.Mode) {
- t.SkipNow()
- }
-
- // Ensure open-required operations fail.
- if _, err := f.ReadAt([]byte("hello"), 0); err != unix.EINVAL {
- t.Errorf("readAt got %v, wanted EINVAL", err)
- }
- if _, err := f.WriteAt(make([]byte, 6), 0); err != unix.EINVAL {
- t.Errorf("writeAt got %v, wanted EINVAL", err)
- }
- if err := f.FSync(); err != unix.EINVAL {
- t.Errorf("fsync got %v, wanted EINVAL", err)
- }
- if _, err := f.Readdir(0, 1); err != unix.EINVAL {
- t.Errorf("readdir got %v, wanted EINVAL", err)
- }
-
- // Attempt the given open.
- if tc.err != nil {
- // We expect an error, just test and return.
- if _, _, _, err := f.Open(tc.flags); err != tc.err {
- t.Fatalf("open with flags %v got %v, want %v", tc.flags, err, tc.err)
- }
- return
- }
-
- // Run an FD test, since we expect success.
- fdTest(t, func(send *fd.FD) *fd.FD {
- backend.EXPECT().Open(tc.flags).Return(send, p9.QID{}, uint32(0), nil).Times(1)
- recv, _, _, err := f.Open(tc.flags)
- if err != tc.err {
- t.Fatalf("open with flags %v got %v, want %v", tc.flags, err, tc.err)
- }
- return recv
- })
-
- // If the open was successful, attempt another one.
- if _, _, _, err := f.Open(tc.flags); err != unix.EINVAL {
- t.Errorf("second open with flags %v got %v, want EINVAL", tc.flags, err)
- }
-
- // Ensure that all illegal operations fail.
- if _, _, err := f.Walk(nil); err != unix.EINVAL && err != unix.EBUSY {
- t.Errorf("walk got %v, wanted EINVAL or EBUSY", err)
- }
- if _, _, _, _, err := f.WalkGetAttr(nil); err != unix.EINVAL && err != unix.EBUSY {
- t.Errorf("walkgetattr got %v, wanted EINVAL or EBUSY", err)
- }
- })
- }
- }
-}
-
-func TestClose(t *testing.T) {
- type closeTest struct {
- name string
- closeFn func(backend *Mock, f p9.File) error
- }
-
- cases := []closeTest{
- {
- name: "close",
- closeFn: func(_ *Mock, f p9.File) error {
- return f.Close()
- },
- },
- {
- name: "remove",
- closeFn: func(backend *Mock, f p9.File) error {
- // Allow the rename call in the parent, automatically translated.
- backend.parent.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Times(1)
- return f.(deprecatedRemover).Remove()
- },
- },
- {
- name: "setAttrClose",
- closeFn: func(backend *Mock, f p9.File) error {
- valid := p9.SetAttrMask{ATime: true}
- attr := p9.SetAttr{ATimeSeconds: 1, ATimeNanoSeconds: 2}
- backend.EXPECT().SetAttr(valid, attr).Times(1)
- return f.SetAttrClose(valid, attr)
- },
- },
- }
-
- for name := range newTypeMap(nil) {
- for _, tc := range cases {
- t.Run(fmt.Sprintf("%s(%s)", tc.name, name), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
-
- // Close via the prescribed method.
- if err := tc.closeFn(backend, f); err != nil {
- t.Fatalf("closeFn failed: %v", err)
- }
-
- // Everything should fail with EBADF.
- if _, _, err := f.Walk(nil); err != unix.EBADF {
- t.Errorf("walk got %v, wanted EBADF", err)
- }
- if _, err := f.StatFS(); err != unix.EBADF {
- t.Errorf("statfs got %v, wanted EBADF", err)
- }
- if _, _, _, err := f.GetAttr(p9.AttrMaskAll()); err != unix.EBADF {
- t.Errorf("getattr got %v, wanted EBADF", err)
- }
- if err := f.SetAttr(p9.SetAttrMask{}, p9.SetAttr{}); err != unix.EBADF {
- t.Errorf("setattrk got %v, wanted EBADF", err)
- }
- if err := f.Rename(root, "new-name"); err != unix.EBADF {
- t.Errorf("rename got %v, wanted EBADF", err)
- }
- if err := f.Close(); err != unix.EBADF {
- t.Errorf("close got %v, wanted EBADF", err)
- }
- if _, _, _, err := f.Open(p9.ReadOnly); err != unix.EBADF {
- t.Errorf("open got %v, wanted EBADF", err)
- }
- if _, err := f.ReadAt([]byte("hello"), 0); err != unix.EBADF {
- t.Errorf("readAt got %v, wanted EBADF", err)
- }
- if _, err := f.WriteAt(make([]byte, 6), 0); err != unix.EBADF {
- t.Errorf("writeAt got %v, wanted EBADF", err)
- }
- if err := f.FSync(); err != unix.EBADF {
- t.Errorf("fsync got %v, wanted EBADF", err)
- }
- if _, _, _, _, err := f.Create("new-file", p9.ReadWrite, 0, 0, 0); err != unix.EBADF {
- t.Errorf("create got %v, wanted EBADF", err)
- }
- if _, err := f.Mkdir("new-directory", 0, 0, 0); err != unix.EBADF {
- t.Errorf("mkdir got %v, wanted EBADF", err)
- }
- if _, err := f.Symlink("old-name", "new-name", 0, 0); err != unix.EBADF {
- t.Errorf("symlink got %v, wanted EBADF", err)
- }
- if err := f.Link(root, "new-name"); err != unix.EBADF {
- t.Errorf("link got %v, wanted EBADF", err)
- }
- if _, err := f.Mknod("new-block-device", 0, 0, 0, 0, 0); err != unix.EBADF {
- t.Errorf("mknod got %v, wanted EBADF", err)
- }
- if err := f.RenameAt("old-name", root, "new-name"); err != unix.EBADF {
- t.Errorf("renameAt got %v, wanted EBADF", err)
- }
- if err := f.UnlinkAt("name", 0); err != unix.EBADF {
- t.Errorf("unlinkAt got %v, wanted EBADF", err)
- }
- if _, err := f.Readdir(0, 1); err != unix.EBADF {
- t.Errorf("readdir got %v, wanted EBADF", err)
- }
- if _, err := f.Readlink(); err != unix.EBADF {
- t.Errorf("readlink got %v, wanted EBADF", err)
- }
- if err := f.Flush(); err != unix.EBADF {
- t.Errorf("flush got %v, wanted EBADF", err)
- }
- if _, _, _, _, err := f.WalkGetAttr(nil); err != unix.EBADF {
- t.Errorf("walkgetattr got %v, wanted EBADF", err)
- }
- if _, err := f.Connect(p9.ConnectFlags(0)); err != unix.EBADF {
- t.Errorf("connect got %v, wanted EBADF", err)
- }
- })
- }
- }
-}
-
-// onlyWorksOnOpenThings is a helper test method for operations that should
-// only work on files that have been explicitly opened.
-func onlyWorksOnOpenThings(h *Harness, t *testing.T, name string, root p9.File, mode p9.OpenFlags, expectedErr error, fn func(backend *Mock, f p9.File, shouldSucceed bool) error) {
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- // Does it work before opening?
- if err := fn(backend, f, false); err != unix.EINVAL {
- t.Errorf("operation got %v, wanted EINVAL", err)
- }
-
- // Is this openable?
- if !p9.CanOpen(backend.Attr.Mode) {
- return // Nothing to do.
- }
-
- // If this is a directory, we can't handle writing.
- if backend.Attr.Mode.IsDir() && (mode == p9.ReadWrite || mode == p9.WriteOnly) {
- return // Skip.
- }
-
- // Open the file.
- backend.EXPECT().Open(mode)
- if _, _, _, err := f.Open(mode); err != nil {
- t.Fatalf("open got %v, wanted nil", err)
- }
-
- // Attempt the operation.
- if err := fn(backend, f, expectedErr == nil); err != expectedErr {
- t.Fatalf("operation got %v, wanted %v", err, expectedErr)
- }
-}
-
-func TestRead(t *testing.T) {
- type readTest struct {
- name string
- mode p9.OpenFlags
- err error
- }
-
- cases := []readTest{
- {
- name: "read-only",
- mode: p9.ReadOnly,
- err: nil,
- },
- {
- name: "read-write",
- mode: p9.ReadWrite,
- err: nil,
- },
- {
- name: "write-only",
- mode: p9.WriteOnly,
- err: unix.EPERM,
- },
- }
-
- for name := range newTypeMap(nil) {
- for _, tc := range cases {
- t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- const message = "hello"
-
- onlyWorksOnOpenThings(h, t, name, root, tc.mode, tc.err, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if !shouldSucceed {
- _, err := f.ReadAt([]byte(message), 0)
- return err
- }
-
- // Prepare for the call to readAt in the backend.
- backend.EXPECT().ReadAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) {
- copy(p, message)
- }).Return(len(message), nil)
-
- // Make the client call.
- p := make([]byte, 2*len(message)) // Double size.
- n, err := f.ReadAt(p, 0)
-
- // Sanity check result.
- if err != nil {
- return err
- }
- if n != len(message) {
- t.Fatalf("message length incorrect, got %d, want %d", n, len(message))
- }
- if !bytes.Equal(p[:n], []byte(message)) {
- t.Fatalf("message incorrect, got %v, want %v", p, []byte(message))
- }
- return nil // Success.
- })
- })
- }
- }
-}
-
-func TestWrite(t *testing.T) {
- type writeTest struct {
- name string
- mode p9.OpenFlags
- err error
- }
-
- cases := []writeTest{
- {
- name: "read-only",
- mode: p9.ReadOnly,
- err: unix.EPERM,
- },
- {
- name: "read-write",
- mode: p9.ReadWrite,
- err: nil,
- },
- {
- name: "write-only",
- mode: p9.WriteOnly,
- err: nil,
- },
- }
-
- for name := range newTypeMap(nil) {
- for _, tc := range cases {
- t.Run(fmt.Sprintf("%s-%s", tc.name, name), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- const message = "hello"
-
- onlyWorksOnOpenThings(h, t, name, root, tc.mode, tc.err, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if !shouldSucceed {
- _, err := f.WriteAt([]byte(message), 0)
- return err
- }
-
- // Prepare for the call to readAt in the backend.
- var output []byte // Saved by Do below.
- backend.EXPECT().WriteAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) {
- output = p
- }).Return(len(message), nil)
-
- // Make the client call.
- n, err := f.WriteAt([]byte(message), 0)
-
- // Sanity check result.
- if err != nil {
- return err
- }
- if n != len(message) {
- t.Fatalf("message length incorrect, got %d, want %d", n, len(message))
- }
- if !bytes.Equal(output, []byte(message)) {
- t.Fatalf("message incorrect, got %v, want %v", output, []byte(message))
- }
- return nil // Success.
- })
- })
- }
- }
-}
-
-func TestFSync(t *testing.T) {
- for name := range newTypeMap(nil) {
- for _, mode := range []p9.OpenFlags{p9.ReadOnly, p9.WriteOnly, p9.ReadWrite} {
- t.Run(fmt.Sprintf("%s-%s", mode, name), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnOpenThings(h, t, name, root, mode, nil, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if shouldSucceed {
- backend.EXPECT().FSync().Times(1)
- }
- return f.FSync()
- })
- })
- }
- }
-}
-
-func TestFlush(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- backend.EXPECT().Flush()
- f.Flush()
- })
- }
-}
-
-// onlyWorksOnDirectories is a helper test method for operations that should
-// only work on unopened directories, such as create, mkdir and symlink.
-func onlyWorksOnDirectories(h *Harness, t *testing.T, name string, root p9.File, fn func(backend *Mock, f p9.File, shouldSucceed bool) error) {
- // Walk to the file normally.
- _, backend, f := walkHelper(h, name, root)
- defer f.Close()
-
- // Only directories support mknod.
- if !backend.Attr.Mode.IsDir() {
- if err := fn(backend, f, false); err != unix.EINVAL {
- t.Errorf("operation got %v, wanted EINVAL", err)
- }
- return // Nothing else to do.
- }
-
- // Should succeed.
- if err := fn(backend, f, true); err != nil {
- t.Fatalf("operation got %v, wanted nil", err)
- }
-
- // Open the directory.
- backend.EXPECT().Open(p9.ReadOnly).Times(1)
- if _, _, _, err := f.Open(p9.ReadOnly); err != nil {
- t.Fatalf("open got %v, wanted nil", err)
- }
-
- // Should not work again.
- if err := fn(backend, f, false); err != unix.EINVAL {
- t.Fatalf("operation got %v, wanted EINVAL", err)
- }
-}
-
-func TestCreate(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if !shouldSucceed {
- _, _, _, _, err := f.Create("new-file", p9.ReadWrite, 0, 1, 2)
- return err
- }
-
- // If the create is going to succeed, then we
- // need to create a new backend file, and we
- // clone to ensure that we don't close the
- // original.
- _, newF, err := f.Walk(nil)
- if err != nil {
- t.Fatalf("clone got %v, wanted nil", err)
- }
- defer newF.Close()
- newBackend := h.Pop(newF)
-
- // Run a regular FD test to validate that path.
- fdTest(t, func(send *fd.FD) *fd.FD {
- // Return the send FD on success.
- newFile := h.NewFile()(backend) // New file with the parent backend.
- newBackend.EXPECT().Create("new-file", p9.ReadWrite, p9.FileMode(0), p9.UID(1), p9.GID(2)).Return(send, newFile, p9.QID{}, uint32(0), nil)
-
- // Receive the fd back.
- recv, _, _, _, err := newF.Create("new-file", p9.ReadWrite, 0, 1, 2)
- if err != nil {
- t.Fatalf("create got %v, wanted nil", err)
- }
- return recv
- })
-
- // The above will fail via normal test flow, so
- // we can assume that it passed.
- return nil
- })
- })
- }
-}
-
-func TestCreateInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if _, _, _, _, err := root.Create(invalidName, p9.ReadWrite, 0, 0, 0); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-func TestMkdir(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if shouldSucceed {
- backend.EXPECT().Mkdir("new-directory", p9.FileMode(0), p9.UID(1), p9.GID(2))
- }
- _, err := f.Mkdir("new-directory", 0, 1, 2)
- return err
- })
- })
- }
-}
-
-func TestMkdirInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if _, err := root.Mkdir(invalidName, 0, 0, 0); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-func TestSymlink(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if shouldSucceed {
- backend.EXPECT().Symlink("old-name", "new-name", p9.UID(1), p9.GID(2))
- }
- _, err := f.Symlink("old-name", "new-name", 1, 2)
- return err
- })
- })
- }
-}
-
-func TestSyminkInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- // We need only test for invalid names in the new name,
- // the target can be an arbitrary string and we don't
- // need to sanity check it.
- if _, err := root.Symlink("old-name", invalidName, 0, 0); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-func TestLink(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if shouldSucceed {
- backend.EXPECT().Link(gomock.Any(), "new-link")
- }
- return f.Link(f, "new-link")
- })
- })
- }
-}
-
-func TestLinkInvalid(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- for name := range newTypeMap(nil) {
- for _, invalidName := range allInvalidNames(name) {
- if err := root.Link(root, invalidName); err != unix.EINVAL {
- t.Errorf("got %v for name %q, want EINVAL", err, invalidName)
- }
- }
- }
-}
-
-func TestMknod(t *testing.T) {
- for name := range newTypeMap(nil) {
- t.Run(name, func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- onlyWorksOnDirectories(h, t, name, root, func(backend *Mock, f p9.File, shouldSucceed bool) error {
- if shouldSucceed {
- backend.EXPECT().Mknod("new-block-device", p9.FileMode(0), uint32(1), uint32(2), p9.UID(3), p9.GID(4)).Times(1)
- }
- _, err := f.Mknod("new-block-device", 0, 1, 2, 3, 4)
- return err
- })
- })
- }
-}
-
-// concurrentFn is a specification of a concurrent operation. This is used to
-// drive the concurrency tests below.
-type concurrentFn struct {
- name string
- match func(p9.FileMode) bool
- op func(h *Harness, backend *Mock, f p9.File, callback func())
-}
-
-func concurrentTest(t *testing.T, name string, fn1, fn2 concurrentFn, sameDir, expectedOkay bool) {
- var (
- names1 []string
- names2 []string
- )
- if sameDir {
- // Use the same file one directory up.
- names1, names2 = []string{"one", name}, []string{"one", name}
- } else {
- // For different directories, just use siblings.
- names1, names2 = []string{"one", name}, []string{"three", name}
- }
-
- t.Run(fmt.Sprintf("%s(%v)+%s(%v)", fn1.name, names1, fn2.name, names2), func(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- // Walk to both files as given.
- _, f1, err := root.Walk(names1)
- if err != nil {
- t.Fatalf("error walking, got %v, want nil", err)
- }
- defer f1.Close()
- b1 := h.Pop(f1)
- _, f2, err := root.Walk(names2)
- if err != nil {
- t.Fatalf("error walking, got %v, want nil", err)
- }
- defer f2.Close()
- b2 := h.Pop(f2)
-
- // Are these a good match for the current test case?
- if !fn1.match(b1.Attr.Mode) {
- t.SkipNow()
- }
- if !fn2.match(b2.Attr.Mode) {
- t.SkipNow()
- }
-
- // Construct our "concurrency creator".
- in1 := make(chan struct{}, 1)
- in2 := make(chan struct{}, 1)
- var top sync.WaitGroup
- var fns sync.WaitGroup
- defer top.Wait()
- top.Add(2) // Accounting for below.
- defer fns.Done()
- fns.Add(1) // See line above; released before top.Wait.
- go func() {
- defer top.Done()
- fn1.op(h, b1, f1, func() {
- in1 <- struct{}{}
- fns.Wait()
- })
- }()
- go func() {
- defer top.Done()
- fn2.op(h, b2, f2, func() {
- in2 <- struct{}{}
- fns.Wait()
- })
- }()
-
- // Compute a reasonable timeout. If we expect the operation to hang,
- // give it 10 milliseconds before we assert that it's fine. After all,
- // there will be a lot of these tests. If we don't expect it to hang,
- // give it a full minute, since the machine could be slow.
- timeout := 10 * time.Millisecond
- if expectedOkay {
- timeout = 1 * time.Minute
- }
-
- // Read the first channel.
- var second chan struct{}
- select {
- case <-in1:
- second = in2
- case <-in2:
- second = in1
- }
-
- // Catch concurrency.
- select {
- case <-second:
- // We finished successful. Is this good? Depends on the
- // expected result.
- if !expectedOkay {
- t.Errorf("%q and %q proceeded concurrently!", fn1.name, fn2.name)
- }
- case <-time.After(timeout):
- // Great, things did not proceed concurrently. Is that what we
- // expected?
- if expectedOkay {
- t.Errorf("%q and %q hung concurrently!", fn1.name, fn2.name)
- }
- }
- })
-}
-
-func randomFileName() string {
- return fmt.Sprintf("%x", rand.Int63())
-}
-
-func TestConcurrency(t *testing.T) {
- readExclusive := []concurrentFn{
- {
- // N.B. We can't explicitly check WalkGetAttr behavior,
- // but we rely on the fact that the internal code paths
- // are the same.
- name: "walk",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- // See the documentation of WalkCallback.
- // Because walk is actually implemented by the
- // mock, we need a special place for this
- // callback.
- //
- // Note that a clone actually locks the parent
- // node. So we walk from this node to test
- // concurrent operations appropriately.
- backend.WalkCallback = func() error {
- callback()
- return nil
- }
- f.Walk([]string{randomFileName()}) // Won't exist.
- },
- },
- {
- name: "fsync",
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Open(gomock.Any())
- backend.EXPECT().FSync().Do(func() {
- callback()
- })
- f.Open(p9.ReadOnly) // Required.
- f.FSync()
- },
- },
- {
- name: "readdir",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Open(gomock.Any())
- backend.EXPECT().Readdir(gomock.Any(), gomock.Any()).Do(func(uint64, uint32) {
- callback()
- })
- f.Open(p9.ReadOnly) // Required.
- f.Readdir(0, 1)
- },
- },
- {
- name: "readlink",
- match: func(mode p9.FileMode) bool { return mode.IsSymlink() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Readlink().Do(func() {
- callback()
- })
- f.Readlink()
- },
- },
- {
- name: "connect",
- match: func(mode p9.FileMode) bool { return mode.IsSocket() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Connect(gomock.Any()).Do(func(p9.ConnectFlags) {
- callback()
- })
- f.Connect(0)
- },
- },
- {
- name: "open",
- match: func(mode p9.FileMode) bool { return p9.CanOpen(mode) },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Open(gomock.Any()).Do(func(p9.OpenFlags) {
- callback()
- })
- f.Open(p9.ReadOnly)
- },
- },
- {
- name: "flush",
- match: func(mode p9.FileMode) bool { return true },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Flush().Do(func() {
- callback()
- })
- f.Flush()
- },
- },
- }
- writeExclusive := []concurrentFn{
- {
- // N.B. We can't really check getattr. But this is an
- // extremely low-risk function, it seems likely that
- // this check is paranoid anyways.
- name: "setattr",
- match: func(mode p9.FileMode) bool { return true },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().SetAttr(gomock.Any(), gomock.Any()).Do(func(p9.SetAttrMask, p9.SetAttr) {
- callback()
- })
- f.SetAttr(p9.SetAttrMask{}, p9.SetAttr{})
- },
- },
- {
- name: "unlinkAt",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Do(func(string, uint32) {
- callback()
- })
- f.UnlinkAt(randomFileName(), 0)
- },
- },
- {
- name: "mknod",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Mknod(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.FileMode, uint32, uint32, p9.UID, p9.GID) {
- callback()
- })
- f.Mknod(randomFileName(), 0, 0, 0, 0, 0)
- },
- },
- {
- name: "link",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Link(gomock.Any(), gomock.Any()).Do(func(p9.File, string) {
- callback()
- })
- f.Link(f, randomFileName())
- },
- },
- {
- name: "symlink",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Symlink(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, string, p9.UID, p9.GID) {
- callback()
- })
- f.Symlink(randomFileName(), randomFileName(), 0, 0)
- },
- },
- {
- name: "mkdir",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().Mkdir(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.FileMode, p9.UID, p9.GID) {
- callback()
- })
- f.Mkdir(randomFileName(), 0, 0, 0)
- },
- },
- {
- name: "create",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- // Return an error for the creation operation, as this is the simplest.
- backend.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, p9.QID{}, uint32(0), unix.EINVAL).Do(func(string, p9.OpenFlags, p9.FileMode, p9.UID, p9.GID) {
- callback()
- })
- f.Create(randomFileName(), p9.ReadOnly, 0, 0, 0)
- },
- },
- }
- globalExclusive := []concurrentFn{
- {
- name: "remove",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- // Remove operates on a locked parent. So we
- // add a child, walk to it and call remove.
- // Note that because this operation can operate
- // concurrently with itself, we need to
- // generate a random file name.
- randomFile := randomFileName()
- backend.AddChild(randomFile, h.NewFile())
- defer backend.RemoveChild(randomFile)
- _, file, err := f.Walk([]string{randomFile})
- if err != nil {
- h.t.Fatalf("walk got %v, want nil", err)
- }
-
- // Remove is automatically translated to the parent.
- backend.EXPECT().UnlinkAt(gomock.Any(), gomock.Any()).Do(func(string, uint32) {
- callback()
- })
-
- // Remove is also a close.
- file.(deprecatedRemover).Remove()
- },
- },
- {
- name: "rename",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- // Similarly to remove, because we need to
- // operate on a child, we allow a walk.
- randomFile := randomFileName()
- backend.AddChild(randomFile, h.NewFile())
- defer backend.RemoveChild(randomFile)
- _, file, err := f.Walk([]string{randomFile})
- if err != nil {
- h.t.Fatalf("walk got %v, want nil", err)
- }
- defer file.Close()
- fileBackend := h.Pop(file)
-
- // Rename is automatically translated to the parent.
- backend.EXPECT().RenameAt(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.File, string) {
- callback()
- })
-
- // Attempt the rename.
- fileBackend.EXPECT().Renamed(gomock.Any(), gomock.Any())
- file.Rename(f, randomFileName())
- },
- },
- {
- name: "renameAt",
- match: func(mode p9.FileMode) bool { return mode.IsDir() },
- op: func(h *Harness, backend *Mock, f p9.File, callback func()) {
- backend.EXPECT().RenameAt(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(string, p9.File, string) {
- callback()
- })
-
- // Attempt the rename. There are no active fids
- // with this name, so we don't need to expect
- // Renamed hooks on anything.
- f.RenameAt(randomFileName(), f, randomFileName())
- },
- },
- }
-
- for _, fn1 := range readExclusive {
- for _, fn2 := range readExclusive {
- for name := range newTypeMap(nil) {
- // Everything should be able to proceed in parallel.
- concurrentTest(t, name, fn1, fn2, true, true)
- concurrentTest(t, name, fn1, fn2, false, true)
- }
- }
- }
-
- for _, fn1 := range append(readExclusive, writeExclusive...) {
- for _, fn2 := range writeExclusive {
- for name := range newTypeMap(nil) {
- // Only cross-directory functions should proceed in parallel.
- concurrentTest(t, name, fn1, fn2, true, false)
- concurrentTest(t, name, fn1, fn2, false, true)
- }
- }
- }
-
- for _, fn1 := range append(append(readExclusive, writeExclusive...), globalExclusive...) {
- for _, fn2 := range globalExclusive {
- for name := range newTypeMap(nil) {
- // Nothing should be able to run in parallel.
- concurrentTest(t, name, fn1, fn2, true, false)
- concurrentTest(t, name, fn1, fn2, false, false)
- }
- }
- }
-}
-
-func TestReadWriteConcurrent(t *testing.T) {
- h, c := NewHarness(t)
- defer h.Finish()
-
- _, root := newRoot(h, c)
- defer root.Close()
-
- const (
- instances = 10
- iterations = 10000
- dataSize = 1024
- )
- var (
- dataSets [instances][dataSize]byte
- backends [instances]*Mock
- files [instances]p9.File
- )
-
- // Walk to the file normally.
- for i := 0; i < instances; i++ {
- _, backends[i], files[i] = walkHelper(h, "file", root)
- defer files[i].Close()
- }
-
- // Open the files.
- for i := 0; i < instances; i++ {
- backends[i].EXPECT().Open(p9.ReadWrite)
- if _, _, _, err := files[i].Open(p9.ReadWrite); err != nil {
- t.Fatalf("open got %v, wanted nil", err)
- }
- }
-
- // Initialize random data for each instance.
- for i := 0; i < instances; i++ {
- if _, err := rand.Read(dataSets[i][:]); err != nil {
- t.Fatalf("error initializing dataSet#%d, got %v", i, err)
- }
- }
-
- // Define our random read/write mechanism.
- randRead := func(h *Harness, backend *Mock, f p9.File, data, test []byte) {
- // Prepare the backend.
- backend.EXPECT().ReadAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) {
- if n := copy(p, data); n != len(data) {
- // Note that we have to assert the result here, as the Return statement
- // below cannot be dynamic: it will be bound before this call is made.
- h.t.Errorf("wanted length %d, got %d", len(data), n)
- }
- }).Return(len(data), nil)
-
- // Execute the read.
- if n, err := f.ReadAt(test, 0); n != len(test) || err != nil {
- t.Errorf("failed read: wanted (%d, nil), got (%d, %v)", len(test), n, err)
- return // No sense doing check below.
- }
- if !bytes.Equal(test, data) {
- t.Errorf("data integrity failed during read") // Not as expected.
- }
- }
- randWrite := func(h *Harness, backend *Mock, f p9.File, data []byte) {
- // Prepare the backend.
- backend.EXPECT().WriteAt(gomock.Any(), uint64(0)).Do(func(p []byte, offset uint64) {
- if !bytes.Equal(p, data) {
- h.t.Errorf("data integrity failed during write") // Not as expected.
- }
- }).Return(len(data), nil)
-
- // Execute the write.
- if n, err := f.WriteAt(data, 0); n != len(data) || err != nil {
- t.Errorf("failed read: wanted (%d, nil), got (%d, %v)", len(data), n, err)
- }
- }
- randReadWrite := func(n int, h *Harness, backend *Mock, f p9.File, data []byte) {
- test := make([]byte, len(data))
- for i := 0; i < n; i++ {
- if rand.Intn(2) == 0 {
- randRead(h, backend, f, data, test)
- } else {
- randWrite(h, backend, f, data)
- }
- }
- }
-
- // Start reading and writing.
- var wg sync.WaitGroup
- for i := 0; i < instances; i++ {
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
- randReadWrite(iterations, h, backends[i], files[i], dataSets[i][:])
- }(i)
- }
- wg.Wait()
-}
diff --git a/pkg/p9/p9test/p9test.go b/pkg/p9/p9test/p9test.go
deleted file mode 100644
index fd5ac3dbe..000000000
--- a/pkg/p9/p9test/p9test.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package p9test provides standard mocks for p9.
-package p9test
-
-import (
- "fmt"
- "sync/atomic"
- "testing"
-
- "github.com/golang/mock/gomock"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-// Harness is an attacher mock.
-type Harness struct {
- t *testing.T
- mockCtrl *gomock.Controller
- Attacher *MockAttacher
- wg sync.WaitGroup
- clientSocket *unet.Socket
- mu sync.Mutex
- created []*Mock
-}
-
-// globalPath is a QID.Path Generator.
-var globalPath uint64
-
-// MakePath returns a globally unique path.
-func MakePath() uint64 {
- return atomic.AddUint64(&globalPath, 1)
-}
-
-// Generator is a function that generates a new file.
-type Generator func(parent *Mock) *Mock
-
-// Mock is a common mock element.
-type Mock struct {
- p9.DefaultWalkGetAttr
- *MockFile
- parent *Mock
- closed bool
- harness *Harness
- QID p9.QID
- Attr p9.Attr
- children map[string]Generator
-
- // WalkCallback is a special function that will be called from within
- // the walk context. This is needed for the concurrent tests within
- // this package.
- WalkCallback func() error
-}
-
-// globalMu protects the children maps in all mocks. Note that this is not a
-// particularly elegant solution, but because the test has walks from the root
-// through to final nodes, we must share maps below, and it's easiest to simply
-// protect against concurrent access globally.
-var globalMu sync.RWMutex
-
-// AddChild adds a new child to the Mock.
-func (m *Mock) AddChild(name string, generator Generator) {
- globalMu.Lock()
- defer globalMu.Unlock()
- m.children[name] = generator
-}
-
-// RemoveChild removes the child with the given name.
-func (m *Mock) RemoveChild(name string) {
- globalMu.Lock()
- defer globalMu.Unlock()
- delete(m.children, name)
-}
-
-// Matches implements gomock.Matcher.Matches.
-func (m *Mock) Matches(x interface{}) bool {
- if om, ok := x.(*Mock); ok {
- return m.QID.Path == om.QID.Path
- }
- return false
-}
-
-// String implements gomock.Matcher.String.
-func (m *Mock) String() string {
- return fmt.Sprintf("Mock{Mode: 0x%x, QID.Path: %d}", m.Attr.Mode, m.QID.Path)
-}
-
-// GetAttr returns the current attributes.
-func (m *Mock) GetAttr(mask p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) {
- return m.QID, p9.AttrMaskAll(), m.Attr, nil
-}
-
-// Walk supports clone and walking in directories.
-func (m *Mock) Walk(names []string) ([]p9.QID, p9.File, error) {
- if m.WalkCallback != nil {
- if err := m.WalkCallback(); err != nil {
- return nil, nil, err
- }
- }
- if len(names) == 0 {
- // Clone the file appropriately.
- nm := m.harness.NewMock(m.parent, m.QID.Path, m.Attr)
- nm.children = m.children // Inherit children.
- return []p9.QID{nm.QID}, nm, nil
- } else if len(names) != 1 {
- m.harness.t.Fail() // Should not happen.
- return nil, nil, unix.EINVAL
- }
-
- if m.Attr.Mode.IsDir() {
- globalMu.RLock()
- defer globalMu.RUnlock()
- if fn, ok := m.children[names[0]]; ok {
- // Generate the child.
- nm := fn(m)
- return []p9.QID{nm.QID}, nm, nil
- }
- // No child found.
- return nil, nil, unix.ENOENT
- }
-
- // Call the underlying mock.
- return m.MockFile.Walk(names)
-}
-
-// WalkGetAttr calls the default implementation; this is a client-side optimization.
-func (m *Mock) WalkGetAttr(names []string) ([]p9.QID, p9.File, p9.AttrMask, p9.Attr, error) {
- return m.DefaultWalkGetAttr.WalkGetAttr(names)
-}
-
-// Pop pops off the most recently created Mock and assert that this mock
-// represents the same file passed in. If nil is passed in, no check is
-// performed.
-//
-// Precondition: there must be at least one Mock or this will panic.
-func (h *Harness) Pop(clientFile p9.File) *Mock {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- if clientFile == nil {
- // If no clientFile is provided, then we always return the last
- // created file. The caller can safely use this as long as
- // there is no concurrency.
- m := h.created[len(h.created)-1]
- h.created = h.created[:len(h.created)-1]
- return m
- }
-
- qid, _, _, err := clientFile.GetAttr(p9.AttrMaskAll())
- if err != nil {
- // We do not expect this to happen.
- panic(fmt.Sprintf("err during Pop: %v", err))
- }
-
- // Find the relevant file in our created list. We must scan the last
- // from back to front to ensure that we favor the most recently
- // generated file.
- for i := len(h.created) - 1; i >= 0; i-- {
- m := h.created[i]
- if qid.Path == m.QID.Path {
- // Copy and truncate.
- copy(h.created[i:], h.created[i+1:])
- h.created = h.created[:len(h.created)-1]
- return m
- }
- }
-
- // Unable to find relevant file.
- panic(fmt.Sprintf("unable to locate file with QID %+v", qid.Path))
-}
-
-// NewMock returns a new base file.
-func (h *Harness) NewMock(parent *Mock, path uint64, attr p9.Attr) *Mock {
- m := &Mock{
- MockFile: NewMockFile(h.mockCtrl),
- parent: parent,
- harness: h,
- QID: p9.QID{
- Type: p9.QIDType((attr.Mode & p9.FileModeMask) >> 12),
- Path: path,
- },
- Attr: attr,
- }
-
- // Always ensure Close is after the parent's close. Note that this
- // can't be done via a straight-forward After call, because the parent
- // might change after initial creation. We ensure that this is true at
- // close time.
- m.EXPECT().Close().Return(nil).Times(1).Do(func() {
- if m.parent != nil && m.parent.closed {
- h.t.FailNow()
- }
- // Note that this should not be racy, as this operation should
- // be protected by the Times(1) above first.
- m.closed = true
- })
-
- // Remember what was created.
- h.mu.Lock()
- defer h.mu.Unlock()
- h.created = append(h.created, m)
-
- return m
-}
-
-// NewFile returns a new file mock.
-//
-// Note that ReadAt and WriteAt must be mocked separately.
-func (h *Harness) NewFile() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeRegular})
- }
-}
-
-// NewDirectory returns a new mock directory.
-//
-// Note that Mkdir, Link, Mknod, RenameAt, UnlinkAt and Readdir must be mocked
-// separately. Walk is provided and children may be manipulated via AddChild
-// and RemoveChild. After calling Walk remotely, one can use Pop to find the
-// corresponding backend mock on the server side.
-func (h *Harness) NewDirectory(contents map[string]Generator) Generator {
- return func(parent *Mock) *Mock {
- m := h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeDirectory})
- m.children = contents // Save contents.
- return m
- }
-}
-
-// NewSymlink returns a new mock directory.
-//
-// Note that Readlink must be mocked separately.
-func (h *Harness) NewSymlink() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeSymlink})
- }
-}
-
-// NewBlockDevice returns a new mock block device.
-func (h *Harness) NewBlockDevice() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeBlockDevice})
- }
-}
-
-// NewCharacterDevice returns a new mock character device.
-func (h *Harness) NewCharacterDevice() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeCharacterDevice})
- }
-}
-
-// NewNamedPipe returns a new mock named pipe.
-func (h *Harness) NewNamedPipe() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeNamedPipe})
- }
-}
-
-// NewSocket returns a new mock socket.
-func (h *Harness) NewSocket() Generator {
- return func(parent *Mock) *Mock {
- return h.NewMock(parent, MakePath(), p9.Attr{Mode: p9.ModeSocket})
- }
-}
-
-// Finish completes all checks and shuts down the server.
-func (h *Harness) Finish() {
- h.clientSocket.Shutdown()
- h.wg.Wait()
- h.mockCtrl.Finish()
-}
-
-// NewHarness creates and returns a new test server.
-//
-// It should always be used as:
-//
-// h, c := NewHarness(t)
-// defer h.Finish()
-//
-func NewHarness(t *testing.T) (*Harness, *p9.Client) {
- // Create the mock.
- mockCtrl := gomock.NewController(t)
- h := &Harness{
- t: t,
- mockCtrl: mockCtrl,
- Attacher: NewMockAttacher(mockCtrl),
- }
-
- // Make socket pair.
- serverSocket, clientSocket, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v wanted nil", err)
- }
-
- // Start the server, synchronized on exit.
- server := p9.NewServer(h.Attacher)
- h.wg.Add(1)
- go func() {
- defer h.wg.Done()
- server.Handle(serverSocket)
- }()
-
- // Create the client.
- client, err := p9.NewClient(clientSocket, p9.DefaultMessageSize, p9.HighestVersionString())
- if err != nil {
- serverSocket.Close()
- clientSocket.Close()
- t.Fatalf("new client got %v, expected nil", err)
- return nil, nil // Never hit.
- }
-
- // Capture the client socket.
- h.clientSocket = clientSocket
- return h, client
-}
diff --git a/pkg/p9/transport_test.go b/pkg/p9/transport_test.go
deleted file mode 100644
index a29f06ddb..000000000
--- a/pkg/p9/transport_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "io/ioutil"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-const (
- MsgTypeBadEncode = iota + 252
- MsgTypeBadDecode
- MsgTypeUnregistered
-)
-
-func TestSendRecv(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- defer client.Close()
-
- if err := send(client, Tag(1), &Tlopen{}); err != nil {
- t.Fatalf("send got err %v expected nil", err)
- }
-
- tag, m, err := recv(server, maximumLength, msgRegistry.get)
- if err != nil {
- t.Fatalf("recv got err %v expected nil", err)
- }
- if tag != Tag(1) {
- t.Fatalf("got tag %v expected 1", tag)
- }
- if _, ok := m.(*Tlopen); !ok {
- t.Fatalf("got message %v expected *Tlopen", m)
- }
-}
-
-// badDecode overruns on decode.
-type badDecode struct{}
-
-func (*badDecode) decode(b *buffer) { b.markOverrun() }
-func (*badDecode) encode(b *buffer) {}
-func (*badDecode) Type() MsgType { return MsgTypeBadDecode }
-func (*badDecode) String() string { return "badDecode{}" }
-
-func TestRecvOverrun(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- defer client.Close()
-
- if err := send(client, Tag(1), &badDecode{}); err != nil {
- t.Fatalf("send got err %v expected nil", err)
- }
-
- if _, _, err := recv(server, maximumLength, msgRegistry.get); err == nil {
- t.Fatalf("recv got err %v expected ErrSocket{ErrNoValidMessage}", err)
- }
-}
-
-// unregistered is not registered on decode.
-type unregistered struct{}
-
-func (*unregistered) decode(b *buffer) {}
-func (*unregistered) encode(b *buffer) {}
-func (*unregistered) Type() MsgType { return MsgTypeUnregistered }
-func (*unregistered) String() string { return "unregistered{}" }
-
-func TestRecvInvalidType(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- defer client.Close()
-
- if err := send(client, Tag(1), &unregistered{}); err != nil {
- t.Fatalf("send got err %v expected nil", err)
- }
-
- _, _, err = recv(server, maximumLength, msgRegistry.get)
- if _, ok := err.(*ErrInvalidMsgType); !ok {
- t.Fatalf("recv got err %v expected ErrInvalidMsgType", err)
- }
-}
-
-func TestSendRecvWithFile(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- defer client.Close()
-
- // Create a tempfile.
- osf, err := ioutil.TempFile("", "p9")
- if err != nil {
- t.Fatalf("tempfile got err %v expected nil", err)
- }
- os.Remove(osf.Name())
- f, err := fd.NewFromFile(osf)
- osf.Close()
- if err != nil {
- t.Fatalf("unable to create file: %v", err)
- }
-
- rlopen := &Rlopen{}
- rlopen.SetFilePayload(f)
- if err := send(client, Tag(1), rlopen); err != nil {
- t.Fatalf("send got err %v expected nil", err)
- }
-
- // Enable withFile.
- tag, m, err := recv(server, maximumLength, msgRegistry.get)
- if err != nil {
- t.Fatalf("recv got err %v expected nil", err)
- }
- if tag != Tag(1) {
- t.Fatalf("got tag %v expected 1", tag)
- }
- rlopen, ok := m.(*Rlopen)
- if !ok {
- t.Fatalf("got m %v expected *Rlopen", m)
- }
- if rlopen.File == nil {
- t.Fatalf("got nil file expected non-nil")
- }
-}
-
-func TestRecvClosed(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- client.Close()
-
- _, _, err = recv(server, maximumLength, msgRegistry.get)
- if err == nil {
- t.Fatalf("got err nil expected non-nil")
- }
- if _, ok := err.(ErrSocket); !ok {
- t.Fatalf("got err %v expected ErrSocket", err)
- }
-}
-
-func TestSendClosed(t *testing.T) {
- server, client, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- server.Close()
- defer client.Close()
-
- err = send(client, Tag(1), &Tlopen{})
- if err == nil {
- t.Fatalf("send got err nil expected non-nil")
- }
- if _, ok := err.(ErrSocket); !ok {
- t.Fatalf("got err %v expected ErrSocket", err)
- }
-}
-
-func BenchmarkSendRecv(b *testing.B) {
- b.ReportAllocs()
-
- server, client, err := unet.SocketPair(false)
- if err != nil {
- b.Fatalf("socketpair got err %v expected nil", err)
- }
- defer server.Close()
- defer client.Close()
-
- // Exchange Rflush messages since these contain no data and therefore incur
- // no additional marshaling overhead.
- go func() {
- for i := 0; i < b.N; i++ {
- tag, m, err := recv(server, maximumLength, msgRegistry.get)
- if err != nil {
- b.Errorf("recv got err %v expected nil", err)
- }
- if tag != Tag(1) {
- b.Errorf("got tag %v expected 1", tag)
- }
- if _, ok := m.(*Rflush); !ok {
- b.Errorf("got message %T expected *Rflush", m)
- }
- if err := send(server, Tag(2), &Rflush{}); err != nil {
- b.Errorf("send got err %v expected nil", err)
- }
- }
- }()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if err := send(client, Tag(1), &Rflush{}); err != nil {
- b.Errorf("send got err %v expected nil", err)
- }
- tag, m, err := recv(client, maximumLength, msgRegistry.get)
- if err != nil {
- b.Errorf("recv got err %v expected nil", err)
- }
- if tag != Tag(2) {
- b.Errorf("got tag %v expected 2", tag)
- }
- if _, ok := m.(*Rflush); !ok {
- b.Errorf("got message %v expected *Rflush", m)
- }
- }
-}
-
-func init() {
- msgRegistry.register(MsgTypeBadDecode, func() message { return &badDecode{} })
-}
diff --git a/pkg/p9/version_test.go b/pkg/p9/version_test.go
deleted file mode 100644
index 291e8580e..000000000
--- a/pkg/p9/version_test.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package p9
-
-import (
- "testing"
-)
-
-func TestVersionNumberEquivalent(t *testing.T) {
- for i := uint32(0); i < 1024; i++ {
- str := versionString(i)
- version, ok := parseVersion(str)
- if !ok {
- t.Errorf("#%d: parseVersion(%q) failed, want success", i, str)
- continue
- }
- if i != version {
- t.Errorf("#%d: got version %d, want %d", i, i, version)
- }
- }
-}
-
-func TestVersionStringEquivalent(t *testing.T) {
- // There is one case where the version is not equivalent on purpose,
- // that is 9P2000.L.Google.0. It is not equivalent because versionString
- // must always return the more generic 9P2000.L for legacy servers that
- // check for it. See net/9p/client.c.
- str := "9P2000.L.Google.0"
- version, ok := parseVersion(str)
- if !ok {
- t.Errorf("parseVersion(%q) failed, want success", str)
- }
- if got := versionString(version); got != "9P2000.L" {
- t.Errorf("versionString(%d) got %q, want %q", version, got, "9P2000.L")
- }
-
- for _, test := range []struct {
- versionString string
- }{
- {
- versionString: "9P2000.L",
- },
- {
- versionString: "9P2000.L.Google.1",
- },
- {
- versionString: "9P2000.L.Google.347823894",
- },
- } {
- version, ok := parseVersion(test.versionString)
- if !ok {
- t.Errorf("parseVersion(%q) failed, want success", test.versionString)
- continue
- }
- if got := versionString(version); got != test.versionString {
- t.Errorf("versionString(%d) got %q, want %q", version, got, test.versionString)
- }
- }
-}
-
-func TestParseVersion(t *testing.T) {
- for _, test := range []struct {
- versionString string
- expectSuccess bool
- expectedVersion uint32
- }{
- {
- versionString: "9P",
- expectSuccess: false,
- },
- {
- versionString: "9P.L",
- expectSuccess: false,
- },
- {
- versionString: "9P200.L",
- expectSuccess: false,
- },
- {
- versionString: "9P2000",
- expectSuccess: false,
- },
- {
- versionString: "9P2000.L.Google.-1",
- expectSuccess: false,
- },
- {
- versionString: "9P2000.L.Google.",
- expectSuccess: false,
- },
- {
- versionString: "9P2000.L.Google.3546343826724305832",
- expectSuccess: false,
- },
- {
- versionString: "9P2001.L",
- expectSuccess: false,
- },
- {
- versionString: "9P2000.L",
- expectSuccess: true,
- expectedVersion: 0,
- },
- {
- versionString: "9P2000.L.Google.0",
- expectSuccess: true,
- expectedVersion: 0,
- },
- {
- versionString: "9P2000.L.Google.1",
- expectSuccess: true,
- expectedVersion: 1,
- },
- } {
- version, ok := parseVersion(test.versionString)
- if ok != test.expectSuccess {
- t.Errorf("parseVersion(%q) got (_, %v), want (_, %v)", test.versionString, ok, test.expectSuccess)
- continue
- }
- if !test.expectSuccess {
- continue
- }
- if version != test.expectedVersion {
- t.Errorf("parseVersion(%q) got (%d, _), want (%d, _)", test.versionString, version, test.expectedVersion)
- }
- }
-}
-
-func BenchmarkParseVersion(b *testing.B) {
- for n := 0; n < b.N; n++ {
- parseVersion("9P2000.L.Google.1")
- }
-}
diff --git a/pkg/pool/BUILD b/pkg/pool/BUILD
deleted file mode 100644
index 7b1c6b75b..000000000
--- a/pkg/pool/BUILD
+++ /dev/null
@@ -1,25 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"],
-)
-
-go_library(
- name = "pool",
- srcs = [
- "pool.go",
- ],
- deps = [
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "pool_test",
- size = "small",
- srcs = [
- "pool_test.go",
- ],
- library = ":pool",
-)
diff --git a/pkg/pool/pool_state_autogen.go b/pkg/pool/pool_state_autogen.go
new file mode 100644
index 000000000..1f4164c00
--- /dev/null
+++ b/pkg/pool/pool_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pool
diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go
deleted file mode 100644
index d928439c1..000000000
--- a/pkg/pool/pool_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pool
-
-import (
- "testing"
-)
-
-func TestPoolUnique(t *testing.T) {
- p := Pool{Start: 1, Limit: 3}
- got := make(map[uint64]bool)
-
- for {
- n, ok := p.Get()
- if !ok {
- break
- }
-
- // Check unique.
- if _, ok := got[n]; ok {
- t.Errorf("pool spit out %v multiple times", n)
- }
-
- // Record.
- got[n] = true
- }
-}
-
-func TestExausted(t *testing.T) {
- p := Pool{Start: 1, Limit: 500}
- for i := 0; i < 499; i++ {
- _, ok := p.Get()
- if !ok {
- t.Fatalf("pool exhausted before 499 items")
- }
- }
-
- _, ok := p.Get()
- if ok {
- t.Errorf("pool not exhausted when it should be")
- }
-}
-
-func TestPoolRecycle(t *testing.T) {
- p := Pool{Start: 1, Limit: 500}
- n1, _ := p.Get()
- p.Put(n1)
- n2, _ := p.Get()
- if n1 != n2 {
- t.Errorf("pool not recycling items")
- }
-}
diff --git a/pkg/procid/BUILD b/pkg/procid/BUILD
deleted file mode 100644
index 2838b5aca..000000000
--- a/pkg/procid/BUILD
+++ /dev/null
@@ -1,40 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "procid",
- srcs = [
- "procid.go",
- "procid_amd64.s",
- "procid_arm64.s",
- ],
- visibility = ["//visibility:public"],
-)
-
-go_test(
- name = "procid_test",
- size = "small",
- srcs = [
- "procid_test.go",
- ],
- library = ":procid",
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "procid_net_test",
- size = "small",
- srcs = [
- "procid_net_test.go",
- "procid_test.go",
- ],
- library = ":procid",
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/procid/procid_net_test.go b/pkg/procid/procid_net_test.go
deleted file mode 100644
index b628e2285..000000000
--- a/pkg/procid/procid_net_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procid
-
-// This file is just to force the inclusion of the "net" package, which will
-// make the test binary a cgo one.
-import (
- _ "net"
-)
diff --git a/pkg/procid/procid_state_autogen.go b/pkg/procid/procid_state_autogen.go
new file mode 100644
index 000000000..59cb991c0
--- /dev/null
+++ b/pkg/procid/procid_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package procid
diff --git a/pkg/procid/procid_test.go b/pkg/procid/procid_test.go
deleted file mode 100644
index a08110b35..000000000
--- a/pkg/procid/procid_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procid
-
-import (
- "os"
- "runtime"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// runOnMain is used to send functions to run on the main (initial) thread.
-var runOnMain = make(chan func(), 10)
-
-func checkProcid(t *testing.T, start *sync.WaitGroup, done *sync.WaitGroup) {
- defer done.Done()
-
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- start.Done()
- start.Wait()
-
- procID := Current()
- tid := unix.Gettid()
-
- if procID != uint64(tid) {
- t.Logf("Bad procid: expected %v, got %v", tid, procID)
- t.Fail()
- }
-}
-
-func TestProcidInitialized(t *testing.T) {
- var start sync.WaitGroup
- var done sync.WaitGroup
-
- count := 100
- start.Add(count + 1)
- done.Add(count + 1)
-
- // Run the check on the main thread.
- //
- // When cgo is not included, the only case when procid isn't initialized
- // is in the main (initial) thread, so we have to test this case
- // specifically.
- runOnMain <- func() {
- checkProcid(t, &start, &done)
- }
-
- // Run the check on a number of different threads.
- for i := 0; i < count; i++ {
- go checkProcid(t, &start, &done)
- }
-
- done.Wait()
-}
-
-func TestMain(m *testing.M) {
- // Make sure we remain at the main (initial) thread.
- runtime.LockOSThread()
-
- // Start running tests in a different goroutine.
- go func() {
- os.Exit(m.Run())
- }()
-
- // Execute any functions that have been sent for execution in the main
- // thread.
- for f := range runOnMain {
- f()
- }
-}
diff --git a/pkg/rand/BUILD b/pkg/rand/BUILD
deleted file mode 100644
index 80b8ceb02..000000000
--- a/pkg/rand/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "rand",
- srcs = [
- "rand.go",
- "rand_linux.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/rand/rand_linux_state_autogen.go b/pkg/rand/rand_linux_state_autogen.go
new file mode 100644
index 000000000..f727c9314
--- /dev/null
+++ b/pkg/rand/rand_linux_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package rand
diff --git a/pkg/rand/rand_state_autogen.go b/pkg/rand/rand_state_autogen.go
new file mode 100644
index 000000000..e0a5cd184
--- /dev/null
+++ b/pkg/rand/rand_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build !linux
+
+package rand
diff --git a/pkg/refs/BUILD b/pkg/refs/BUILD
deleted file mode 100644
index 9888cce9c..000000000
--- a/pkg/refs/BUILD
+++ /dev/null
@@ -1,42 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "weak_ref_list",
- out = "weak_ref_list.go",
- package = "refs",
- prefix = "weakRef",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*WeakRef",
- "Linker": "*WeakRef",
- },
-)
-
-go_library(
- name = "refs",
- srcs = [
- "refcounter.go",
- "refcounter_state.go",
- "weak_ref_list.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/context",
- "//pkg/log",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "refs_test",
- size = "small",
- srcs = ["refcounter_test.go"],
- library = ":refs",
- deps = [
- "//pkg/context",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/refs/refcounter_test.go b/pkg/refs/refcounter_test.go
deleted file mode 100644
index 6d0dd1018..000000000
--- a/pkg/refs/refcounter_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package refs
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-type testCounter struct {
- AtomicRefCount
-
- // mu protects the boolean below.
- mu sync.Mutex
-
- // destroyed indicates whether this was destroyed.
- destroyed bool
-}
-
-func (t *testCounter) DecRef(ctx context.Context) {
- t.AtomicRefCount.DecRefWithDestructor(ctx, t.destroy)
-}
-
-func (t *testCounter) destroy(context.Context) {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.destroyed = true
-}
-
-func (t *testCounter) IsDestroyed() bool {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.destroyed
-}
-
-func newTestCounter() *testCounter {
- return &testCounter{destroyed: false}
-}
-
-func TestOneRef(t *testing.T) {
- tc := newTestCounter()
- tc.DecRef(context.Background())
-
- if !tc.IsDestroyed() {
- t.Errorf("object should have been destroyed")
- }
-}
-
-func TestTwoRefs(t *testing.T) {
- tc := newTestCounter()
- tc.IncRef()
- ctx := context.Background()
- tc.DecRef(ctx)
- tc.DecRef(ctx)
-
- if !tc.IsDestroyed() {
- t.Errorf("object should have been destroyed")
- }
-}
-
-func TestMultiRefs(t *testing.T) {
- tc := newTestCounter()
- tc.IncRef()
- ctx := context.Background()
- tc.DecRef(ctx)
-
- tc.IncRef()
- tc.DecRef(ctx)
-
- tc.DecRef(ctx)
-
- if !tc.IsDestroyed() {
- t.Errorf("object should have been destroyed")
- }
-}
-
-func TestWeakRef(t *testing.T) {
- tc := newTestCounter()
- w := NewWeakRef(tc, nil)
- ctx := context.Background()
-
- // Try resolving.
- if x := w.Get(); x == nil {
- t.Errorf("weak reference didn't resolve: expected %v, got nil", tc)
- } else {
- x.DecRef(ctx)
- }
-
- // Try resolving again.
- if x := w.Get(); x == nil {
- t.Errorf("weak reference didn't resolve: expected %v, got nil", tc)
- } else {
- x.DecRef(ctx)
- }
-
- // Shouldn't be destroyed yet. (Can't continue if this fails.)
- if tc.IsDestroyed() {
- t.Fatalf("original object destroyed earlier than expected")
- }
-
- // Drop the original reference.
- tc.DecRef(ctx)
-
- // Assert destroyed.
- if !tc.IsDestroyed() {
- t.Errorf("original object not destroyed as expected")
- }
-
- // Shouldn't be anything.
- if x := w.Get(); x != nil {
- t.Errorf("weak reference resolved: expected nil, got %v", x)
- }
-}
-
-func TestWeakRefDrop(t *testing.T) {
- tc := newTestCounter()
- w := NewWeakRef(tc, nil)
- ctx := context.Background()
- w.Drop(ctx)
-
- // Just assert the list is empty.
- if !tc.weakRefs.Empty() {
- t.Errorf("weak reference not dropped")
- }
-
- // Drop the original reference.
- tc.DecRef(ctx)
-}
-
-type testWeakRefUser struct {
- weakRefGone func()
-}
-
-func (u *testWeakRefUser) WeakRefGone(ctx context.Context) {
- u.weakRefGone()
-}
-
-func TestCallback(t *testing.T) {
- called := false
- tc := newTestCounter()
- var w *WeakRef
- w = NewWeakRef(tc, &testWeakRefUser{func() {
- called = true
-
- // Check that the weak ref has been zapped.
- rc := w.obj.Load().(RefCounter)
- if v := reflect.ValueOf(rc); v != reflect.Zero(v.Type()) {
- t.Fatalf("Callback called with non-nil ptr")
- }
-
- // Check that we're not holding the mutex by acquiring and
- // releasing it.
- tc.mu.Lock()
- tc.mu.Unlock()
- }})
-
- // Drop the original reference, this must trigger the callback.
- ctx := context.Background()
- tc.DecRef(ctx)
-
- if !called {
- t.Fatalf("Callback not called")
- }
-}
diff --git a/pkg/refs/refs_state_autogen.go b/pkg/refs/refs_state_autogen.go
new file mode 100644
index 000000000..53d22d315
--- /dev/null
+++ b/pkg/refs/refs_state_autogen.go
@@ -0,0 +1,156 @@
+// automatically generated by stateify.
+
+package refs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (w *WeakRef) StateTypeName() string {
+ return "pkg/refs.WeakRef"
+}
+
+func (w *WeakRef) StateFields() []string {
+ return []string{
+ "obj",
+ "user",
+ }
+}
+
+func (w *WeakRef) beforeSave() {}
+
+// +checklocksignore
+func (w *WeakRef) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ var objValue savedReference = w.saveObj()
+ stateSinkObject.SaveValue(0, objValue)
+ stateSinkObject.Save(1, &w.user)
+}
+
+func (w *WeakRef) afterLoad() {}
+
+// +checklocksignore
+func (w *WeakRef) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(1, &w.user)
+ stateSourceObject.LoadValue(0, new(savedReference), func(y interface{}) { w.loadObj(y.(savedReference)) })
+}
+
+func (r *AtomicRefCount) StateTypeName() string {
+ return "pkg/refs.AtomicRefCount"
+}
+
+func (r *AtomicRefCount) StateFields() []string {
+ return []string{
+ "refCount",
+ "name",
+ "stack",
+ }
+}
+
+func (r *AtomicRefCount) beforeSave() {}
+
+// +checklocksignore
+func (r *AtomicRefCount) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+ stateSinkObject.Save(1, &r.name)
+ stateSinkObject.Save(2, &r.stack)
+}
+
+func (r *AtomicRefCount) afterLoad() {}
+
+// +checklocksignore
+func (r *AtomicRefCount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.Load(1, &r.name)
+ stateSourceObject.Load(2, &r.stack)
+}
+
+func (s *savedReference) StateTypeName() string {
+ return "pkg/refs.savedReference"
+}
+
+func (s *savedReference) StateFields() []string {
+ return []string{
+ "obj",
+ }
+}
+
+func (s *savedReference) beforeSave() {}
+
+// +checklocksignore
+func (s *savedReference) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.obj)
+}
+
+func (s *savedReference) afterLoad() {}
+
+// +checklocksignore
+func (s *savedReference) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.obj)
+}
+
+func (l *weakRefList) StateTypeName() string {
+ return "pkg/refs.weakRefList"
+}
+
+func (l *weakRefList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *weakRefList) beforeSave() {}
+
+// +checklocksignore
+func (l *weakRefList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *weakRefList) afterLoad() {}
+
+// +checklocksignore
+func (l *weakRefList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *weakRefEntry) StateTypeName() string {
+ return "pkg/refs.weakRefEntry"
+}
+
+func (e *weakRefEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *weakRefEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *weakRefEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *weakRefEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *weakRefEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*WeakRef)(nil))
+ state.Register((*AtomicRefCount)(nil))
+ state.Register((*savedReference)(nil))
+ state.Register((*weakRefList)(nil))
+ state.Register((*weakRefEntry)(nil))
+}
diff --git a/pkg/refs/weak_ref_list.go b/pkg/refs/weak_ref_list.go
new file mode 100644
index 000000000..920e89eb3
--- /dev/null
+++ b/pkg/refs/weak_ref_list.go
@@ -0,0 +1,221 @@
+package refs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type weakRefElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (weakRefElementMapper) linkerFor(elem *WeakRef) *WeakRef { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type weakRefList struct {
+ head *WeakRef
+ tail *WeakRef
+}
+
+// Reset resets list l to the empty state.
+func (l *weakRefList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *weakRefList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *weakRefList) Front() *WeakRef {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *weakRefList) Back() *WeakRef {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *weakRefList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (weakRefElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *weakRefList) PushFront(e *WeakRef) {
+ linker := weakRefElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ weakRefElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *weakRefList) PushBack(e *WeakRef) {
+ linker := weakRefElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ weakRefElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *weakRefList) PushBackList(m *weakRefList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ weakRefElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ weakRefElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *weakRefList) InsertAfter(b, e *WeakRef) {
+ bLinker := weakRefElementMapper{}.linkerFor(b)
+ eLinker := weakRefElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ weakRefElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *weakRefList) InsertBefore(a, e *WeakRef) {
+ aLinker := weakRefElementMapper{}.linkerFor(a)
+ eLinker := weakRefElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ weakRefElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *weakRefList) Remove(e *WeakRef) {
+ linker := weakRefElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ weakRefElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ weakRefElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type weakRefEntry struct {
+ next *WeakRef
+ prev *WeakRef
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *weakRefEntry) Next() *WeakRef {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *weakRefEntry) Prev() *WeakRef {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *weakRefEntry) SetNext(elem *WeakRef) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *weakRefEntry) SetPrev(elem *WeakRef) {
+ e.prev = elem
+}
diff --git a/pkg/refsvfs2/BUILD b/pkg/refsvfs2/BUILD
deleted file mode 100644
index 7c1a8c792..000000000
--- a/pkg/refsvfs2/BUILD
+++ /dev/null
@@ -1,39 +0,0 @@
-# TODO(gvisor.dev/issue/1624): rename this directory/package to "refs" once VFS1
-# is gone and the current refs package can be deleted.
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template")
-
-package(licenses = ["notice"])
-
-go_template(
- name = "refs_template",
- srcs = [
- "refs_template.go",
- ],
- opt_consts = [
- "enableLogging",
- ],
- types = [
- "T",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "//pkg/refs",
- ],
-)
-
-go_library(
- name = "refsvfs2",
- srcs = [
- "refs.go",
- "refs_map.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/context",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/refsvfs2/README.md b/pkg/refsvfs2/README.md
deleted file mode 100644
index eca53c282..000000000
--- a/pkg/refsvfs2/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Reference Counting
-
-Go does not offer a reliable way to couple custom resource management with
-object lifetime. As a result, we need to manually implement reference counting
-for many objects in gVisor to make sure that resources are acquired and released
-appropriately. For example, the filesystem has many reference-counted objects
-(file descriptions, dentries, inodes, etc.), and it is important that each
-object persists while anything holds a reference on it and is destroyed once all
-references are dropped.
-
-We provide a template in `refs_template.go` that can be applied to most objects
-in need of reference counting. It contains a simple `Refs` struct that can be
-incremented and decremented, and once the reference count reaches zero, a
-destructor can be called. Note that there are some objects (e.g. `gofer.dentry`,
-`overlay.dentry`) that should not immediately be destroyed upon reaching zero
-references; in these cases, this template cannot be applied.
-
-# Reference Checking
-
-Unfortunately, manually keeping track of reference counts is extremely error
-prone, and improper accounting can lead to production bugs that are very
-difficult to root cause.
-
-We have several ways of discovering reference count errors in gVisor. Any
-attempt to increment/decrement a `Refs` struct with a count of zero will trigger
-a sentry panic, since the object should have been destroyed and become
-unreachable. This allows us to identify missing increments or extra decrements,
-which cause the reference count to be lower than it should be: the count will
-reach zero earlier than expected, and the next increment/decrement--which should
-be valid--will result in a panic.
-
-It is trickier to identify extra increments and missing decrements, which cause
-the reference count to be higher than expected (i.e. a “reference leak”).
-Reference leaks prevent resources from being released properly and can translate
-to various issues that are tricky to diagnose, such as memory leaks. The
-following section discusses how we implement leak checking.
-
-## Leak Checking
-
-When leak checking is enabled, reference-counted objects are added to a global
-map when constructed and removed when destroyed. Near the very end of sandbox
-execution, once no reference-counted objects should still be reachable, we
-report everything left in the map as having leaked. Leak-checking objects
-implement the `CheckedObject` interface, which allows us to print informative
-warnings for each of the leaked objects.
-
-Leak checking is provided by `refs_template`, but objects that do not use the
-template will also need to implement `CheckedObject` and be manually
-registered/unregistered from the map in order to be checked.
-
-Note that leak checking affects performance and memory usage, so it should only
-be enabled in testing environments.
-
-## Debugging
-
-Even with the checks described above, it can be difficult to track down the
-exact source of a reference counting error. The error may occur far before it is
-discovered (for instance, a missing `IncRef` may not be discovered until a
-future `DecRef` makes the count negative). To aid in debugging, `refs_template`
-provides the `enableLogging` option to log every `IncRef`, `DecRef`, and leak
-check registration/unregistration, along with the object address and a call
-stack. This allows us to search a log for all of the changes to a particular
-object's reference count, which makes it much easier to identify the absent or
-extraneous operation(s). The reference-counted objects that do not use
-`refs_template` also provide logging, and others defined in the future should do
-so as well.
diff --git a/pkg/refsvfs2/refsvfs2_state_autogen.go b/pkg/refsvfs2/refsvfs2_state_autogen.go
new file mode 100644
index 000000000..ca5fbb104
--- /dev/null
+++ b/pkg/refsvfs2/refsvfs2_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package refsvfs2
diff --git a/pkg/ring0/BUILD b/pkg/ring0/BUILD
deleted file mode 100644
index fda6ba601..000000000
--- a/pkg/ring0/BUILD
+++ /dev/null
@@ -1,86 +0,0 @@
-load("//tools:defs.bzl", "arch_genrule", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template(
- name = "defs_amd64",
- srcs = [
- "defs.go",
- "defs_amd64.go",
- "offsets_amd64.go",
- "x86.go",
- ],
- visibility = [":__subpackages__"],
-)
-
-go_template(
- name = "defs_arm64",
- srcs = [
- "aarch64.go",
- "defs.go",
- "defs_arm64.go",
- "offsets_arm64.go",
- ],
- visibility = [":__subpackages__"],
-)
-
-go_template_instance(
- name = "defs_impl_amd64",
- out = "defs_impl_amd64.go",
- package = "ring0",
- template = ":defs_amd64",
-)
-
-go_template_instance(
- name = "defs_impl_arm64",
- out = "defs_impl_arm64.go",
- package = "ring0",
- template = ":defs_arm64",
-)
-
-arch_genrule(
- name = "entry_impl_amd64",
- srcs = ["entry_amd64.s"],
- outs = ["entry_impl_amd64.s"],
- cmd = "(echo -e '// build +amd64\\n' && QEMU $(location //pkg/ring0/gen_offsets) && cat $(location entry_amd64.s)) > $@",
- tools = ["//pkg/ring0/gen_offsets"],
-)
-
-arch_genrule(
- name = "entry_impl_arm64",
- srcs = ["entry_arm64.s"],
- outs = ["entry_impl_arm64.s"],
- cmd = "(echo -e '// build +arm64\\n' && QEMU $(location //pkg/ring0/gen_offsets) && cat $(location entry_arm64.s)) > $@",
- tools = ["//pkg/ring0/gen_offsets"],
-)
-
-go_library(
- name = "ring0",
- srcs = [
- "defs_impl_amd64.go",
- "defs_impl_arm64.go",
- "entry_amd64.go",
- "entry_arm64.go",
- "entry_impl_amd64.s",
- "entry_impl_arm64.s",
- "kernel.go",
- "kernel_amd64.go",
- "kernel_arm64.go",
- "kernel_unsafe.go",
- "lib_amd64.go",
- "lib_amd64.s",
- "lib_arm64.go",
- "lib_arm64.s",
- "ring0.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/cpuid",
- "//pkg/hostarch",
- "//pkg/ring0/pagetables",
- "//pkg/safecopy",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch/fpu",
- ],
-)
diff --git a/pkg/ring0/aarch64.go b/pkg/ring0/aarch64.go
deleted file mode 100644
index 96c884844..000000000
--- a/pkg/ring0/aarch64.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package ring0
-
-// Useful bits.
-const (
- _PGD_PGT_BASE = 0x1000
- _PGD_PGT_SIZE = 0x1000
- _PUD_PGT_BASE = 0x2000
- _PUD_PGT_SIZE = 0x1000
- _PMD_PGT_BASE = 0x3000
- _PMD_PGT_SIZE = 0x4000
- _PTE_PGT_BASE = 0x7000
- _PTE_PGT_SIZE = 0x1000
-)
-
-const (
- // DAIF bits:debug, sError, IRQ, FIQ.
- _PSR_D_BIT = 0x00000200
- _PSR_A_BIT = 0x00000100
- _PSR_I_BIT = 0x00000080
- _PSR_F_BIT = 0x00000040
- _PSR_DAIF_SHIFT = 6
- _PSR_DAIF_MASK = 0xf << _PSR_DAIF_SHIFT
-
- // PSR bits.
- _PSR_MODE_EL0t = 0x00000000
- _PSR_MODE_EL1t = 0x00000004
- _PSR_MODE_EL1h = 0x00000005
- _PSR_MODE_MASK = 0x0000000f
-
- PsrFlagsClear = _PSR_MODE_MASK | _PSR_DAIF_MASK
- PsrModeMask = _PSR_MODE_MASK
-
- // KernelFlagsSet should always be set in the kernel.
- KernelFlagsSet = _PSR_MODE_EL1h | _PSR_D_BIT | _PSR_A_BIT | _PSR_I_BIT | _PSR_F_BIT
-
- // UserFlagsSet are always set in userspace.
- UserFlagsSet = _PSR_MODE_EL0t
-)
-
-// Vector is an exception vector.
-type Vector uintptr
-
-// Exception vectors.
-const (
- El1InvSync = iota
- El1InvIrq
- El1InvFiq
- El1InvError
-
- El1Sync
- El1Irq
- El1Fiq
- El1Err
-
- El0Sync
- El0Irq
- El0Fiq
- El0Err
-
- El0InvSync
- El0InvIrq
- El0InvFiq
- El0InvErr
-
- El1SyncDa
- El1SyncIa
- El1SyncSpPc
- El1SyncUndef
- El1SyncDbg
- El1SyncInv
-
- El0SyncSVC
- El0SyncDa
- El0SyncIa
- El0SyncFpsimdAcc
- El0SyncSveAcc
- El0SyncFpsimdExc
- El0SyncSys
- El0SyncSpPc
- El0SyncUndef
- El0SyncDbg
- El0SyncWfx
- El0SyncInv
-
- El0ErrNMI
- El0ErrBounce
-
- _NR_INTERRUPTS
-)
-
-// System call vectors.
-const (
- Syscall Vector = El0SyncSVC
- PageFault Vector = El0SyncDa
- VirtualizationException Vector = El0ErrBounce
-)
-
-// VirtualAddressBits returns the number bits available for virtual addresses.
-func VirtualAddressBits() uint32 {
- return 48
-}
-
-// PhysicalAddressBits returns the number of bits available for physical addresses.
-func PhysicalAddressBits() uint32 {
- return 40
-}
diff --git a/pkg/ring0/defs.go b/pkg/ring0/defs.go
deleted file mode 100644
index b6e2012e8..000000000
--- a/pkg/ring0/defs.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ring0
-
-import (
- "gvisor.dev/gvisor/pkg/ring0/pagetables"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/arch/fpu"
-)
-
-// Kernel is a global kernel object.
-//
-// This contains global state, shared by multiple CPUs.
-type Kernel struct {
- // PageTables are the kernel pagetables; this must be provided.
- PageTables *pagetables.PageTables
-
- KernelArchState
-}
-
-// Hooks are hooks for kernel functions.
-type Hooks interface {
- // KernelSyscall is called for kernel system calls.
- //
- // Return from this call will restore registers and return to the kernel: the
- // registers must be modified directly.
- //
- // If this function is not provided, a kernel exception results in halt.
- //
- // This must be go:nosplit, as this will be on the interrupt stack.
- // Closures are permitted, as the pointer to the closure frame is not
- // passed on the stack.
- KernelSyscall()
-
- // KernelException handles an exception during kernel execution.
- //
- // Return from this call will restore registers and return to the kernel: the
- // registers must be modified directly.
- //
- // If this function is not provided, a kernel exception results in halt.
- //
- // This must be go:nosplit, as this will be on the interrupt stack.
- // Closures are permitted, as the pointer to the closure frame is not
- // passed on the stack.
- KernelException(Vector)
-}
-
-// CPU is the per-CPU struct.
-type CPU struct {
- // self is a self reference.
- //
- // This is always guaranteed to be at offset zero.
- self *CPU
-
- // kernel is reference to the kernel that this CPU was initialized
- // with. This reference is kept for garbage collection purposes: CPU
- // registers may refer to objects within the Kernel object that cannot
- // be safely freed.
- kernel *Kernel
-
- // CPUArchState is architecture-specific state.
- CPUArchState
-
- // registers is a set of registers; these may be used on kernel system
- // calls and exceptions via the Registers function.
- registers arch.Registers
-
- // hooks are kernel hooks.
- hooks Hooks
-}
-
-// Registers returns a modifiable-copy of the kernel registers.
-//
-// This is explicitly safe to call during KernelException and KernelSyscall.
-//
-//go:nosplit
-func (c *CPU) Registers() *arch.Registers {
- return &c.registers
-}
-
-// SwitchOpts are passed to the Switch function.
-type SwitchOpts struct {
- // Registers are the user register state.
- Registers *arch.Registers
-
- // FloatingPointState is a byte pointer where floating point state is
- // saved and restored.
- FloatingPointState *fpu.State
-
- // PageTables are the application page tables.
- PageTables *pagetables.PageTables
-
- // Flush indicates that a TLB flush should be forced on switch.
- Flush bool
-
- // FullRestore indicates that an iret-based restore should be used.
- FullRestore bool
-
- // SwitchArchOpts are architecture-specific options.
- SwitchArchOpts
-}
diff --git a/pkg/ring0/defs_amd64.go b/pkg/ring0/defs_amd64.go
deleted file mode 100644
index 24f6e4cde..000000000
--- a/pkg/ring0/defs_amd64.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package ring0
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-var (
- // UserspaceSize is the total size of userspace.
- UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
-
- // MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
-
- // KernelStartAddress is the starting kernel address.
- KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
-)
-
-// Segment indices and Selectors.
-const (
- // Index into GDT array.
- _ = iota // Null descriptor first.
- _ // Reserved (Linux is kernel 32).
- segKcode // Kernel code (64-bit).
- segKdata // Kernel data.
- segUcode32 // User code (32-bit).
- segUdata // User data.
- segUcode64 // User code (64-bit).
- segTss // Task segment descriptor.
- segTssHi // Upper bits for TSS.
- segLast // Last segment (terminal, not included).
-)
-
-// Selectors.
-const (
- Kcode Selector = segKcode << 3
- Kdata Selector = segKdata << 3
- Ucode32 Selector = (segUcode32 << 3) | 3
- Udata Selector = (segUdata << 3) | 3
- Ucode64 Selector = (segUcode64 << 3) | 3
- Tss Selector = segTss << 3
-)
-
-// Standard segments.
-var (
- UserCodeSegment32 SegmentDescriptor
- UserDataSegment SegmentDescriptor
- UserCodeSegment64 SegmentDescriptor
- KernelCodeSegment SegmentDescriptor
- KernelDataSegment SegmentDescriptor
-)
-
-// KernelArchState contains architecture-specific state.
-type KernelArchState struct {
- // cpuEntries is array of kernelEntry for all cpus.
- cpuEntries []kernelEntry
-
- // globalIDT is our set of interrupt gates.
- globalIDT *idt64
-}
-
-// kernelEntry contains minimal CPU-specific arch state
-// that can be mapped at the upper of the address space.
-// Malicious APP might steal info from it via CPU bugs.
-type kernelEntry struct {
- // stack is the stack used for interrupts on this CPU.
- stack [256]byte
-
- // scratch space for temporary usage.
- scratch0 uint64
-
- // stackTop is the top of the stack.
- stackTop uint64
-
- // cpuSelf is back reference to CPU.
- cpuSelf *CPU
-
- // kernelCR3 is the cr3 used for sentry kernel.
- kernelCR3 uintptr
-
- // gdt is the CPU's descriptor table.
- gdt descriptorTable
-
- // tss is the CPU's task state.
- tss TaskState64
-}
-
-// CPUArchState contains CPU-specific arch state.
-type CPUArchState struct {
- // errorCode is the error code from the last exception.
- errorCode uintptr
-
- // errorType indicates the type of error code here, it is always set
- // along with the errorCode value above.
- //
- // It will either by 1, which indicates a user error, or 0 indicating a
- // kernel error. If the error code below returns false (kernel error),
- // then it cannot provide relevant information about the last
- // exception.
- errorType uintptr
-
- *kernelEntry
-}
-
-// ErrorCode returns the last error code.
-//
-// The returned boolean indicates whether the error code corresponds to the
-// last user error or not. If it does not, then fault information must be
-// ignored. This is generally the result of a kernel fault while servicing a
-// user fault.
-//
-//go:nosplit
-func (c *CPU) ErrorCode() (value uintptr, user bool) {
- return c.errorCode, c.errorType != 0
-}
-
-// ClearErrorCode resets the error code.
-//
-//go:nosplit
-func (c *CPU) ClearErrorCode() {
- c.errorCode = 0 // No code.
- c.errorType = 1 // User mode.
-}
-
-// SwitchArchOpts are embedded in SwitchOpts.
-type SwitchArchOpts struct {
- // UserPCID indicates that the application PCID to be used on switch,
- // assuming that PCIDs are supported.
- //
- // Per pagetables_x86.go, a zero PCID implies a flush.
- UserPCID uint16
-
- // KernelPCID indicates that the kernel PCID to be used on return,
- // assuming that PCIDs are supported.
- //
- // Per pagetables_x86.go, a zero PCID implies a flush.
- KernelPCID uint16
-}
-
-func init() {
- KernelCodeSegment.setCode64(0, 0, 0)
- KernelDataSegment.setData(0, 0xffffffff, 0)
- UserCodeSegment32.setCode64(0, 0, 3)
- UserDataSegment.setData(0, 0xffffffff, 3)
- UserCodeSegment64.setCode64(0, 0, 3)
-}
diff --git a/pkg/ring0/defs_arm64.go b/pkg/ring0/defs_arm64.go
deleted file mode 100644
index 3e212516f..000000000
--- a/pkg/ring0/defs_arm64.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package ring0
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-var (
- // UserspaceSize is the total size of userspace.
- UserspaceSize = uintptr(1) << (VirtualAddressBits())
-
- // MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
-
- // KernelStartAddress is the starting kernel address.
- KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
-)
-
-// KernelArchState contains architecture-specific state.
-type KernelArchState struct {
-}
-
-// CPUArchState contains CPU-specific arch state.
-type CPUArchState struct {
- // stack is the stack used for interrupts on this CPU.
- stack [128]byte
-
- // errorCode is the error code from the last exception.
- errorCode uintptr
-
- // errorType indicates the type of error code here, it is always set
- // along with the errorCode value above.
- //
- // It will either by 1, which indicates a user error, or 0 indicating a
- // kernel error. If the error code below returns false (kernel error),
- // then it cannot provide relevant information about the last
- // exception.
- errorType uintptr
-
- // faultAddr is the value of far_el1.
- faultAddr uintptr
-
- // el0Fp is the address of application's fpstate.
- el0Fp uintptr
-
- // ttbr0Kvm is the value of ttbr0_el1 for sentry.
- ttbr0Kvm uintptr
-
- // ttbr0App is the value of ttbr0_el1 for applicaton.
- ttbr0App uintptr
-
- // exception vector.
- vecCode Vector
-
- // application context pointer.
- appAddr uintptr
-
- // lazyVFP is the value of cpacr_el1.
- lazyVFP uintptr
-
- // appASID is the asid value of guest application.
- appASID uintptr
-}
-
-// ErrorCode returns the last error code.
-//
-// The returned boolean indicates whether the error code corresponds to the
-// last user error or not. If it does not, then fault information must be
-// ignored. This is generally the result of a kernel fault while servicing a
-// user fault.
-//
-//go:nosplit
-func (c *CPU) ErrorCode() (value uintptr, user bool) {
- return c.errorCode, c.errorType != 0
-}
-
-// ClearErrorCode resets the error code.
-//
-//go:nosplit
-func (c *CPU) ClearErrorCode() {
- c.errorCode = 0 // No code.
- c.errorType = 1 // User mode.
-}
-
-//go:nosplit
-func (c *CPU) GetFaultAddr() (value uintptr) {
- return c.faultAddr
-}
-
-//go:nosplit
-func (c *CPU) SetTtbr0Kvm(value uintptr) {
- c.ttbr0Kvm = value
-}
-
-//go:nosplit
-func (c *CPU) SetTtbr0App(value uintptr) {
- c.ttbr0App = value
-}
-
-//go:nosplit
-func (c *CPU) GetVector() (value Vector) {
- return c.vecCode
-}
-
-//go:nosplit
-func (c *CPU) SetAppAddr(value uintptr) {
- c.appAddr = value
-}
-
-// GetLazyVFP returns the value of cpacr_el1.
-//go:nosplit
-func (c *CPU) GetLazyVFP() (value uintptr) {
- return c.lazyVFP
-}
-
-// SwitchArchOpts are embedded in SwitchOpts.
-type SwitchArchOpts struct {
- // UserASID indicates that the application ASID to be used on switch,
- UserASID uint16
-
- // KernelASID indicates that the kernel ASID to be used on return,
- KernelASID uint16
-}
-
-func init() {
-}
diff --git a/pkg/ring0/defs_impl_amd64.go b/pkg/ring0/defs_impl_amd64.go
new file mode 100644
index 000000000..8005c6d94
--- /dev/null
+++ b/pkg/ring0/defs_impl_amd64.go
@@ -0,0 +1,598 @@
+// +build amd64
+// +build amd64
+// +build 386 amd64
+
+package ring0
+
+import (
+ "fmt"
+ "gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/ring0/pagetables"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
+ "gvisor.dev/gvisor/pkg/sentry/arch/fpu"
+ "io"
+ "reflect"
+)
+
+// Kernel is a global kernel object.
+//
+// This contains global state, shared by multiple CPUs.
+type Kernel struct {
+ // PageTables are the kernel pagetables; this must be provided.
+ PageTables *pagetables.PageTables
+
+ KernelArchState
+}
+
+// Hooks are hooks for kernel functions.
+type Hooks interface {
+ // KernelSyscall is called for kernel system calls.
+ //
+ // Return from this call will restore registers and return to the kernel: the
+ // registers must be modified directly.
+ //
+ // If this function is not provided, a kernel exception results in halt.
+ //
+ // This must be go:nosplit, as this will be on the interrupt stack.
+ // Closures are permitted, as the pointer to the closure frame is not
+ // passed on the stack.
+ KernelSyscall()
+
+ // KernelException handles an exception during kernel execution.
+ //
+ // Return from this call will restore registers and return to the kernel: the
+ // registers must be modified directly.
+ //
+ // If this function is not provided, a kernel exception results in halt.
+ //
+ // This must be go:nosplit, as this will be on the interrupt stack.
+ // Closures are permitted, as the pointer to the closure frame is not
+ // passed on the stack.
+ KernelException(Vector)
+}
+
+// CPU is the per-CPU struct.
+type CPU struct {
+ // self is a self reference.
+ //
+ // This is always guaranteed to be at offset zero.
+ self *CPU
+
+ // kernel is reference to the kernel that this CPU was initialized
+ // with. This reference is kept for garbage collection purposes: CPU
+ // registers may refer to objects within the Kernel object that cannot
+ // be safely freed.
+ kernel *Kernel
+
+ // CPUArchState is architecture-specific state.
+ CPUArchState
+
+ // registers is a set of registers; these may be used on kernel system
+ // calls and exceptions via the Registers function.
+ registers arch.Registers
+
+ // hooks are kernel hooks.
+ hooks Hooks
+}
+
+// Registers returns a modifiable-copy of the kernel registers.
+//
+// This is explicitly safe to call during KernelException and KernelSyscall.
+//
+//go:nosplit
+func (c *CPU) Registers() *arch.Registers {
+ return &c.registers
+}
+
+// SwitchOpts are passed to the Switch function.
+type SwitchOpts struct {
+ // Registers are the user register state.
+ Registers *arch.Registers
+
+ // FloatingPointState is a byte pointer where floating point state is
+ // saved and restored.
+ FloatingPointState *fpu.State
+
+ // PageTables are the application page tables.
+ PageTables *pagetables.PageTables
+
+ // Flush indicates that a TLB flush should be forced on switch.
+ Flush bool
+
+ // FullRestore indicates that an iret-based restore should be used.
+ FullRestore bool
+
+ // SwitchArchOpts are architecture-specific options.
+ SwitchArchOpts
+}
+
+var (
+ // UserspaceSize is the total size of userspace.
+ UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
+
+ // MaximumUserAddress is the largest possible user address.
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
+
+ // KernelStartAddress is the starting kernel address.
+ KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
+)
+
+// Segment indices and Selectors.
+const (
+ // Index into GDT array.
+ _ = iota // Null descriptor first.
+ _ // Reserved (Linux is kernel 32).
+ segKcode // Kernel code (64-bit).
+ segKdata // Kernel data.
+ segUcode32 // User code (32-bit).
+ segUdata // User data.
+ segUcode64 // User code (64-bit).
+ segTss // Task segment descriptor.
+ segTssHi // Upper bits for TSS.
+ segLast // Last segment (terminal, not included).
+)
+
+// Selectors.
+const (
+ Kcode Selector = segKcode << 3
+ Kdata Selector = segKdata << 3
+ Ucode32 Selector = (segUcode32 << 3) | 3
+ Udata Selector = (segUdata << 3) | 3
+ Ucode64 Selector = (segUcode64 << 3) | 3
+ Tss Selector = segTss << 3
+)
+
+// Standard segments.
+var (
+ UserCodeSegment32 SegmentDescriptor
+ UserDataSegment SegmentDescriptor
+ UserCodeSegment64 SegmentDescriptor
+ KernelCodeSegment SegmentDescriptor
+ KernelDataSegment SegmentDescriptor
+)
+
+// KernelArchState contains architecture-specific state.
+type KernelArchState struct {
+ // cpuEntries is array of kernelEntry for all cpus.
+ cpuEntries []kernelEntry
+
+ // globalIDT is our set of interrupt gates.
+ globalIDT *idt64
+}
+
+// kernelEntry contains minimal CPU-specific arch state
+// that can be mapped at the upper of the address space.
+// Malicious APP might steal info from it via CPU bugs.
+type kernelEntry struct {
+ // stack is the stack used for interrupts on this CPU.
+ stack [256]byte
+
+ // scratch space for temporary usage.
+ scratch0 uint64
+
+ // stackTop is the top of the stack.
+ stackTop uint64
+
+ // cpuSelf is back reference to CPU.
+ cpuSelf *CPU
+
+ // kernelCR3 is the cr3 used for sentry kernel.
+ kernelCR3 uintptr
+
+ // gdt is the CPU's descriptor table.
+ gdt descriptorTable
+
+ // tss is the CPU's task state.
+ tss TaskState64
+}
+
+// CPUArchState contains CPU-specific arch state.
+type CPUArchState struct {
+ // errorCode is the error code from the last exception.
+ errorCode uintptr
+
+ // errorType indicates the type of error code here, it is always set
+ // along with the errorCode value above.
+ //
+ // It will either by 1, which indicates a user error, or 0 indicating a
+ // kernel error. If the error code below returns false (kernel error),
+ // then it cannot provide relevant information about the last
+ // exception.
+ errorType uintptr
+
+ *kernelEntry
+}
+
+// ErrorCode returns the last error code.
+//
+// The returned boolean indicates whether the error code corresponds to the
+// last user error or not. If it does not, then fault information must be
+// ignored. This is generally the result of a kernel fault while servicing a
+// user fault.
+//
+//go:nosplit
+func (c *CPU) ErrorCode() (value uintptr, user bool) {
+ return c.errorCode, c.errorType != 0
+}
+
+// ClearErrorCode resets the error code.
+//
+//go:nosplit
+func (c *CPU) ClearErrorCode() {
+ c.errorCode = 0
+ c.errorType = 1
+}
+
+// SwitchArchOpts are embedded in SwitchOpts.
+type SwitchArchOpts struct {
+ // UserPCID indicates that the application PCID to be used on switch,
+ // assuming that PCIDs are supported.
+ //
+ // Per pagetables_x86.go, a zero PCID implies a flush.
+ UserPCID uint16
+
+ // KernelPCID indicates that the kernel PCID to be used on return,
+ // assuming that PCIDs are supported.
+ //
+ // Per pagetables_x86.go, a zero PCID implies a flush.
+ KernelPCID uint16
+}
+
+func init() {
+ KernelCodeSegment.setCode64(0, 0, 0)
+ KernelDataSegment.setData(0, 0xffffffff, 0)
+ UserCodeSegment32.setCode64(0, 0, 3)
+ UserDataSegment.setData(0, 0xffffffff, 3)
+ UserCodeSegment64.setCode64(0, 0, 3)
+}
+
+// Emit prints architecture-specific offsets.
+func Emit(w io.Writer) {
+ fmt.Fprintf(w, "// Automatically generated, do not edit.\n")
+
+ c := &CPU{}
+ fmt.Fprintf(w, "\n// CPU offsets.\n")
+ fmt.Fprintf(w, "#define CPU_REGISTERS 0x%02x\n", reflect.ValueOf(&c.registers).Pointer()-reflect.ValueOf(c).Pointer())
+ fmt.Fprintf(w, "#define CPU_ERROR_CODE 0x%02x\n", reflect.ValueOf(&c.errorCode).Pointer()-reflect.ValueOf(c).Pointer())
+ fmt.Fprintf(w, "#define CPU_ERROR_TYPE 0x%02x\n", reflect.ValueOf(&c.errorType).Pointer()-reflect.ValueOf(c).Pointer())
+ fmt.Fprintf(w, "#define CPU_ENTRY 0x%02x\n", reflect.ValueOf(&c.kernelEntry).Pointer()-reflect.ValueOf(c).Pointer())
+
+ e := &kernelEntry{}
+ fmt.Fprintf(w, "\n// CPU entry offsets.\n")
+ fmt.Fprintf(w, "#define ENTRY_SCRATCH0 0x%02x\n", reflect.ValueOf(&e.scratch0).Pointer()-reflect.ValueOf(e).Pointer())
+ fmt.Fprintf(w, "#define ENTRY_STACK_TOP 0x%02x\n", reflect.ValueOf(&e.stackTop).Pointer()-reflect.ValueOf(e).Pointer())
+ fmt.Fprintf(w, "#define ENTRY_CPU_SELF 0x%02x\n", reflect.ValueOf(&e.cpuSelf).Pointer()-reflect.ValueOf(e).Pointer())
+ fmt.Fprintf(w, "#define ENTRY_KERNEL_CR3 0x%02x\n", reflect.ValueOf(&e.kernelCR3).Pointer()-reflect.ValueOf(e).Pointer())
+
+ fmt.Fprintf(w, "\n// Bits.\n")
+ fmt.Fprintf(w, "#define _RFLAGS_IF 0x%02x\n", _RFLAGS_IF)
+ fmt.Fprintf(w, "#define _RFLAGS_IOPL0 0x%02x\n", _RFLAGS_IOPL0)
+ fmt.Fprintf(w, "#define _KERNEL_FLAGS 0x%02x\n", KernelFlagsSet)
+
+ fmt.Fprintf(w, "\n// Vectors.\n")
+ fmt.Fprintf(w, "#define DivideByZero 0x%02x\n", DivideByZero)
+ fmt.Fprintf(w, "#define Debug 0x%02x\n", Debug)
+ fmt.Fprintf(w, "#define NMI 0x%02x\n", NMI)
+ fmt.Fprintf(w, "#define Breakpoint 0x%02x\n", Breakpoint)
+ fmt.Fprintf(w, "#define Overflow 0x%02x\n", Overflow)
+ fmt.Fprintf(w, "#define BoundRangeExceeded 0x%02x\n", BoundRangeExceeded)
+ fmt.Fprintf(w, "#define InvalidOpcode 0x%02x\n", InvalidOpcode)
+ fmt.Fprintf(w, "#define DeviceNotAvailable 0x%02x\n", DeviceNotAvailable)
+ fmt.Fprintf(w, "#define DoubleFault 0x%02x\n", DoubleFault)
+ fmt.Fprintf(w, "#define CoprocessorSegmentOverrun 0x%02x\n", CoprocessorSegmentOverrun)
+ fmt.Fprintf(w, "#define InvalidTSS 0x%02x\n", InvalidTSS)
+ fmt.Fprintf(w, "#define SegmentNotPresent 0x%02x\n", SegmentNotPresent)
+ fmt.Fprintf(w, "#define StackSegmentFault 0x%02x\n", StackSegmentFault)
+ fmt.Fprintf(w, "#define GeneralProtectionFault 0x%02x\n", GeneralProtectionFault)
+ fmt.Fprintf(w, "#define PageFault 0x%02x\n", PageFault)
+ fmt.Fprintf(w, "#define X87FloatingPointException 0x%02x\n", X87FloatingPointException)
+ fmt.Fprintf(w, "#define AlignmentCheck 0x%02x\n", AlignmentCheck)
+ fmt.Fprintf(w, "#define MachineCheck 0x%02x\n", MachineCheck)
+ fmt.Fprintf(w, "#define SIMDFloatingPointException 0x%02x\n", SIMDFloatingPointException)
+ fmt.Fprintf(w, "#define VirtualizationException 0x%02x\n", VirtualizationException)
+ fmt.Fprintf(w, "#define SecurityException 0x%02x\n", SecurityException)
+ fmt.Fprintf(w, "#define SyscallInt80 0x%02x\n", SyscallInt80)
+ fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall)
+
+ p := &arch.Registers{}
+ fmt.Fprintf(w, "\n// Ptrace registers.\n")
+ fmt.Fprintf(w, "#define PTRACE_R15 0x%02x\n", reflect.ValueOf(&p.R15).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R14 0x%02x\n", reflect.ValueOf(&p.R14).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R13 0x%02x\n", reflect.ValueOf(&p.R13).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R12 0x%02x\n", reflect.ValueOf(&p.R12).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RBP 0x%02x\n", reflect.ValueOf(&p.Rbp).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RBX 0x%02x\n", reflect.ValueOf(&p.Rbx).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R11 0x%02x\n", reflect.ValueOf(&p.R11).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R10 0x%02x\n", reflect.ValueOf(&p.R10).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R9 0x%02x\n", reflect.ValueOf(&p.R9).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_R8 0x%02x\n", reflect.ValueOf(&p.R8).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RAX 0x%02x\n", reflect.ValueOf(&p.Rax).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RCX 0x%02x\n", reflect.ValueOf(&p.Rcx).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RDX 0x%02x\n", reflect.ValueOf(&p.Rdx).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RSI 0x%02x\n", reflect.ValueOf(&p.Rsi).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RDI 0x%02x\n", reflect.ValueOf(&p.Rdi).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_ORIGRAX 0x%02x\n", reflect.ValueOf(&p.Orig_rax).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RIP 0x%02x\n", reflect.ValueOf(&p.Rip).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_CS 0x%02x\n", reflect.ValueOf(&p.Cs).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_FLAGS 0x%02x\n", reflect.ValueOf(&p.Eflags).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_RSP 0x%02x\n", reflect.ValueOf(&p.Rsp).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_SS 0x%02x\n", reflect.ValueOf(&p.Ss).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_FS_BASE 0x%02x\n", reflect.ValueOf(&p.Fs_base).Pointer()-reflect.ValueOf(p).Pointer())
+ fmt.Fprintf(w, "#define PTRACE_GS_BASE 0x%02x\n", reflect.ValueOf(&p.Gs_base).Pointer()-reflect.ValueOf(p).Pointer())
+}
+
+// Useful bits.
+const (
+ _CR0_PE = 1 << 0
+ _CR0_ET = 1 << 4
+ _CR0_AM = 1 << 18
+ _CR0_PG = 1 << 31
+
+ _CR4_PSE = 1 << 4
+ _CR4_PAE = 1 << 5
+ _CR4_PGE = 1 << 7
+ _CR4_OSFXSR = 1 << 9
+ _CR4_OSXMMEXCPT = 1 << 10
+ _CR4_FSGSBASE = 1 << 16
+ _CR4_PCIDE = 1 << 17
+ _CR4_OSXSAVE = 1 << 18
+ _CR4_SMEP = 1 << 20
+
+ _RFLAGS_AC = 1 << 18
+ _RFLAGS_NT = 1 << 14
+ _RFLAGS_IOPL0 = 1 << 12
+ _RFLAGS_IOPL1 = 1 << 13
+ _RFLAGS_IOPL = _RFLAGS_IOPL0 | _RFLAGS_IOPL1
+ _RFLAGS_DF = 1 << 10
+ _RFLAGS_IF = 1 << 9
+ _RFLAGS_STEP = 1 << 8
+ _RFLAGS_RESERVED = 1 << 1
+
+ _EFER_SCE = 0x001
+ _EFER_LME = 0x100
+ _EFER_LMA = 0x400
+ _EFER_NX = 0x800
+
+ _MSR_STAR = 0xc0000081
+ _MSR_LSTAR = 0xc0000082
+ _MSR_CSTAR = 0xc0000083
+ _MSR_SYSCALL_MASK = 0xc0000084
+ _MSR_PLATFORM_INFO = 0xce
+ _MSR_MISC_FEATURES = 0x140
+
+ _PLATFORM_INFO_CPUID_FAULT = 1 << 31
+
+ _MISC_FEATURE_CPUID_TRAP = 0x1
+)
+
+const (
+ // KernelFlagsSet should always be set in the kernel.
+ KernelFlagsSet = _RFLAGS_RESERVED
+
+ // UserFlagsSet are always set in userspace.
+ //
+ // _RFLAGS_IOPL is a set of two bits and it shows the I/O privilege
+ // level. The Current Privilege Level (CPL) of the task must be less
+ // than or equal to the IOPL in order for the task or program to access
+ // I/O ports.
+ //
+ // Here, _RFLAGS_IOPL0 is used only to determine whether the task is
+ // running in the kernel or userspace mode. In the user mode, the CPL is
+ // always 3 and it doesn't matter what IOPL is set if it is bellow CPL.
+ //
+ // We need to have one bit which will be always different in user and
+ // kernel modes. And we have to remember that even though we have
+ // KernelFlagsClear, we still can see some of these flags in the kernel
+ // mode. This can happen when the goruntime switches on a goroutine
+ // which has been saved in the host mode. On restore, the popf
+ // instruction is used to restore flags and this means that all flags
+ // what the goroutine has in the host mode will be restored in the
+ // kernel mode.
+ //
+ // _RFLAGS_IOPL0 is never set in host and kernel modes and we always set
+ // it in the user mode. So if this flag is set, the task is running in
+ // the user mode and if it isn't set, the task is running in the kernel
+ // mode.
+ UserFlagsSet = _RFLAGS_RESERVED | _RFLAGS_IF | _RFLAGS_IOPL0
+
+ // KernelFlagsClear should always be clear in the kernel.
+ KernelFlagsClear = _RFLAGS_STEP | _RFLAGS_IF | _RFLAGS_IOPL | _RFLAGS_AC | _RFLAGS_NT
+
+ // UserFlagsClear are always cleared in userspace.
+ UserFlagsClear = _RFLAGS_NT | _RFLAGS_IOPL1
+)
+
+// IsKernelFlags returns true if rflags coresponds to the kernel mode.
+//
+// go:nosplit
+func IsKernelFlags(rflags uint64) bool {
+ return rflags&_RFLAGS_IOPL0 == 0
+}
+
+// Vector is an exception vector.
+type Vector uintptr
+
+// Exception vectors.
+const (
+ DivideByZero Vector = iota
+ Debug
+ NMI
+ Breakpoint
+ Overflow
+ BoundRangeExceeded
+ InvalidOpcode
+ DeviceNotAvailable
+ DoubleFault
+ CoprocessorSegmentOverrun
+ InvalidTSS
+ SegmentNotPresent
+ StackSegmentFault
+ GeneralProtectionFault
+ PageFault
+ _
+ X87FloatingPointException
+ AlignmentCheck
+ MachineCheck
+ SIMDFloatingPointException
+ VirtualizationException
+ SecurityException = 0x1e
+ SyscallInt80 = 0x80
+ _NR_INTERRUPTS = 0x100
+)
+
+// System call vectors.
+const (
+ Syscall Vector = _NR_INTERRUPTS
+)
+
+// VirtualAddressBits returns the number bits available for virtual addresses.
+//
+// Note that sign-extension semantics apply to the highest order bit.
+//
+// FIXME(b/69382326): This should use the cpuid passed to Init.
+func VirtualAddressBits() uint32 {
+ ax, _, _, _ := cpuid.HostID(0x80000008, 0)
+ return (ax >> 8) & 0xff
+}
+
+// PhysicalAddressBits returns the number of bits available for physical addresses.
+//
+// FIXME(b/69382326): This should use the cpuid passed to Init.
+func PhysicalAddressBits() uint32 {
+ ax, _, _, _ := cpuid.HostID(0x80000008, 0)
+ return ax & 0xff
+}
+
+// Selector is a segment Selector.
+type Selector uint16
+
+// SegmentDescriptor is a segment descriptor.
+type SegmentDescriptor struct {
+ bits [2]uint32
+}
+
+// descriptorTable is a collection of descriptors.
+type descriptorTable [32]SegmentDescriptor
+
+// SegmentDescriptorFlags are typed flags within a descriptor.
+type SegmentDescriptorFlags uint32
+
+// SegmentDescriptorFlag declarations.
+const (
+ SegmentDescriptorAccess SegmentDescriptorFlags = 1 << 8 // Access bit (always set).
+ SegmentDescriptorWrite = 1 << 9 // Write permission.
+ SegmentDescriptorExpandDown = 1 << 10 // Grows down, not used.
+ SegmentDescriptorExecute = 1 << 11 // Execute permission.
+ SegmentDescriptorSystem = 1 << 12 // Zero => system, 1 => user code/data.
+ SegmentDescriptorPresent = 1 << 15 // Present.
+ SegmentDescriptorAVL = 1 << 20 // Available.
+ SegmentDescriptorLong = 1 << 21 // Long mode.
+ SegmentDescriptorDB = 1 << 22 // 16 or 32-bit.
+ SegmentDescriptorG = 1 << 23 // Granularity: page or byte.
+)
+
+// Base returns the descriptor's base linear address.
+func (d *SegmentDescriptor) Base() uint32 {
+ return d.bits[1]&0xFF000000 | (d.bits[1]&0x000000FF)<<16 | d.bits[0]>>16
+}
+
+// Limit returns the descriptor size.
+func (d *SegmentDescriptor) Limit() uint32 {
+ l := d.bits[0]&0xFFFF | d.bits[1]&0xF0000
+ if d.bits[1]&uint32(SegmentDescriptorG) != 0 {
+ l <<= 12
+ l |= 0xFFF
+ }
+ return l
+}
+
+// Flags returns descriptor flags.
+func (d *SegmentDescriptor) Flags() SegmentDescriptorFlags {
+ return SegmentDescriptorFlags(d.bits[1] & 0x00F09F00)
+}
+
+// DPL returns the descriptor privilege level.
+func (d *SegmentDescriptor) DPL() int {
+ return int((d.bits[1] >> 13) & 3)
+}
+
+func (d *SegmentDescriptor) setNull() {
+ d.bits[0] = 0
+ d.bits[1] = 0
+}
+
+func (d *SegmentDescriptor) set(base, limit uint32, dpl int, flags SegmentDescriptorFlags) {
+ flags |= SegmentDescriptorPresent
+ if limit>>12 != 0 {
+ limit >>= 12
+ flags |= SegmentDescriptorG
+ }
+ d.bits[0] = base<<16 | limit&0xFFFF
+ d.bits[1] = base&0xFF000000 | (base>>16)&0xFF | limit&0x000F0000 | uint32(flags) | uint32(dpl)<<13
+}
+
+func (d *SegmentDescriptor) setCode32(base, limit uint32, dpl int) {
+ d.set(base, limit, dpl,
+ SegmentDescriptorDB|
+ SegmentDescriptorExecute|
+ SegmentDescriptorSystem)
+}
+
+func (d *SegmentDescriptor) setCode64(base, limit uint32, dpl int) {
+ d.set(base, limit, dpl,
+ SegmentDescriptorG|
+ SegmentDescriptorLong|
+ SegmentDescriptorExecute|
+ SegmentDescriptorSystem)
+}
+
+func (d *SegmentDescriptor) setData(base, limit uint32, dpl int) {
+ d.set(base, limit, dpl,
+ SegmentDescriptorWrite|
+ SegmentDescriptorSystem)
+}
+
+// setHi is only used for the TSS segment, which is magically 64-bits.
+func (d *SegmentDescriptor) setHi(base uint32) {
+ d.bits[0] = base
+ d.bits[1] = 0
+}
+
+// Gate64 is a 64-bit task, trap, or interrupt gate.
+type Gate64 struct {
+ bits [4]uint32
+}
+
+// idt64 is a 64-bit interrupt descriptor table.
+type idt64 [_NR_INTERRUPTS]Gate64
+
+func (g *Gate64) setInterrupt(cs Selector, rip uint64, dpl int, ist int) {
+ g.bits[0] = uint32(cs)<<16 | uint32(rip)&0xFFFF
+ g.bits[1] = uint32(rip)&0xFFFF0000 | SegmentDescriptorPresent | uint32(dpl)<<13 | 14<<8 | uint32(ist)&0x7
+ g.bits[2] = uint32(rip >> 32)
+}
+
+func (g *Gate64) setTrap(cs Selector, rip uint64, dpl int, ist int) {
+ g.setInterrupt(cs, rip, dpl, ist)
+ g.bits[1] |= 1 << 8
+}
+
+// TaskState64 is a 64-bit task state structure.
+type TaskState64 struct {
+ _ uint32
+ rsp0Lo, rsp0Hi uint32
+ rsp1Lo, rsp1Hi uint32
+ rsp2Lo, rsp2Hi uint32
+ _ [2]uint32
+ ist1Lo, ist1Hi uint32
+ ist2Lo, ist2Hi uint32
+ ist3Lo, ist3Hi uint32
+ ist4Lo, ist4Hi uint32
+ ist5Lo, ist5Hi uint32
+ ist6Lo, ist6Hi uint32
+ ist7Lo, ist7Hi uint32
+ _ [2]uint32
+ _ uint16
+ ioPerm uint16
+}
diff --git a/pkg/ring0/offsets_arm64.go b/pkg/ring0/defs_impl_arm64.go
index 60b2c4074..90f6af9fc 100644
--- a/pkg/ring0/offsets_arm64.go
+++ b/pkg/ring0/defs_impl_arm64.go
@@ -1,30 +1,336 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
+// +build arm64
+// +build arm64
// +build arm64
package ring0
import (
"fmt"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/ring0/pagetables"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
+ "gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"io"
"reflect"
+)
- "gvisor.dev/gvisor/pkg/sentry/arch"
+// Useful bits.
+const (
+ _PGD_PGT_BASE = 0x1000
+ _PGD_PGT_SIZE = 0x1000
+ _PUD_PGT_BASE = 0x2000
+ _PUD_PGT_SIZE = 0x1000
+ _PMD_PGT_BASE = 0x3000
+ _PMD_PGT_SIZE = 0x4000
+ _PTE_PGT_BASE = 0x7000
+ _PTE_PGT_SIZE = 0x1000
+)
+
+const (
+ // DAIF bits:debug, sError, IRQ, FIQ.
+ _PSR_D_BIT = 0x00000200
+ _PSR_A_BIT = 0x00000100
+ _PSR_I_BIT = 0x00000080
+ _PSR_F_BIT = 0x00000040
+ _PSR_DAIF_SHIFT = 6
+ _PSR_DAIF_MASK = 0xf << _PSR_DAIF_SHIFT
+
+ // PSR bits.
+ _PSR_MODE_EL0t = 0x00000000
+ _PSR_MODE_EL1t = 0x00000004
+ _PSR_MODE_EL1h = 0x00000005
+ _PSR_MODE_MASK = 0x0000000f
+
+ PsrFlagsClear = _PSR_MODE_MASK | _PSR_DAIF_MASK
+ PsrModeMask = _PSR_MODE_MASK
+
+ // KernelFlagsSet should always be set in the kernel.
+ KernelFlagsSet = _PSR_MODE_EL1h | _PSR_D_BIT | _PSR_A_BIT | _PSR_I_BIT | _PSR_F_BIT
+
+ // UserFlagsSet are always set in userspace.
+ UserFlagsSet = _PSR_MODE_EL0t
+)
+
+// Vector is an exception vector.
+type Vector uintptr
+
+// Exception vectors.
+const (
+ El1InvSync = iota
+ El1InvIrq
+ El1InvFiq
+ El1InvError
+
+ El1Sync
+ El1Irq
+ El1Fiq
+ El1Err
+
+ El0Sync
+ El0Irq
+ El0Fiq
+ El0Err
+
+ El0InvSync
+ El0InvIrq
+ El0InvFiq
+ El0InvErr
+
+ El1SyncDa
+ El1SyncIa
+ El1SyncSpPc
+ El1SyncUndef
+ El1SyncDbg
+ El1SyncInv
+
+ El0SyncSVC
+ El0SyncDa
+ El0SyncIa
+ El0SyncFpsimdAcc
+ El0SyncSveAcc
+ El0SyncFpsimdExc
+ El0SyncSys
+ El0SyncSpPc
+ El0SyncUndef
+ El0SyncDbg
+ El0SyncWfx
+ El0SyncInv
+
+ El0ErrNMI
+ El0ErrBounce
+
+ _NR_INTERRUPTS
)
+// System call vectors.
+const (
+ Syscall Vector = El0SyncSVC
+ PageFault Vector = El0SyncDa
+ VirtualizationException Vector = El0ErrBounce
+)
+
+// VirtualAddressBits returns the number bits available for virtual addresses.
+func VirtualAddressBits() uint32 {
+ return 48
+}
+
+// PhysicalAddressBits returns the number of bits available for physical addresses.
+func PhysicalAddressBits() uint32 {
+ return 40
+}
+
+// Kernel is a global kernel object.
+//
+// This contains global state, shared by multiple CPUs.
+type Kernel struct {
+ // PageTables are the kernel pagetables; this must be provided.
+ PageTables *pagetables.PageTables
+
+ KernelArchState
+}
+
+// Hooks are hooks for kernel functions.
+type Hooks interface {
+ // KernelSyscall is called for kernel system calls.
+ //
+ // Return from this call will restore registers and return to the kernel: the
+ // registers must be modified directly.
+ //
+ // If this function is not provided, a kernel exception results in halt.
+ //
+ // This must be go:nosplit, as this will be on the interrupt stack.
+ // Closures are permitted, as the pointer to the closure frame is not
+ // passed on the stack.
+ KernelSyscall()
+
+ // KernelException handles an exception during kernel execution.
+ //
+ // Return from this call will restore registers and return to the kernel: the
+ // registers must be modified directly.
+ //
+ // If this function is not provided, a kernel exception results in halt.
+ //
+ // This must be go:nosplit, as this will be on the interrupt stack.
+ // Closures are permitted, as the pointer to the closure frame is not
+ // passed on the stack.
+ KernelException(Vector)
+}
+
+// CPU is the per-CPU struct.
+type CPU struct {
+ // self is a self reference.
+ //
+ // This is always guaranteed to be at offset zero.
+ self *CPU
+
+ // kernel is reference to the kernel that this CPU was initialized
+ // with. This reference is kept for garbage collection purposes: CPU
+ // registers may refer to objects within the Kernel object that cannot
+ // be safely freed.
+ kernel *Kernel
+
+ // CPUArchState is architecture-specific state.
+ CPUArchState
+
+ // registers is a set of registers; these may be used on kernel system
+ // calls and exceptions via the Registers function.
+ registers arch.Registers
+
+ // hooks are kernel hooks.
+ hooks Hooks
+}
+
+// Registers returns a modifiable-copy of the kernel registers.
+//
+// This is explicitly safe to call during KernelException and KernelSyscall.
+//
+//go:nosplit
+func (c *CPU) Registers() *arch.Registers {
+ return &c.registers
+}
+
+// SwitchOpts are passed to the Switch function.
+type SwitchOpts struct {
+ // Registers are the user register state.
+ Registers *arch.Registers
+
+ // FloatingPointState is a byte pointer where floating point state is
+ // saved and restored.
+ FloatingPointState *fpu.State
+
+ // PageTables are the application page tables.
+ PageTables *pagetables.PageTables
+
+ // Flush indicates that a TLB flush should be forced on switch.
+ Flush bool
+
+ // FullRestore indicates that an iret-based restore should be used.
+ FullRestore bool
+
+ // SwitchArchOpts are architecture-specific options.
+ SwitchArchOpts
+}
+
+var (
+ // UserspaceSize is the total size of userspace.
+ UserspaceSize = uintptr(1) << (VirtualAddressBits())
+
+ // MaximumUserAddress is the largest possible user address.
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
+
+ // KernelStartAddress is the starting kernel address.
+ KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
+)
+
+// KernelArchState contains architecture-specific state.
+type KernelArchState struct {
+}
+
+// CPUArchState contains CPU-specific arch state.
+type CPUArchState struct {
+ // stack is the stack used for interrupts on this CPU.
+ stack [128]byte
+
+ // errorCode is the error code from the last exception.
+ errorCode uintptr
+
+ // errorType indicates the type of error code here, it is always set
+ // along with the errorCode value above.
+ //
+ // It will either by 1, which indicates a user error, or 0 indicating a
+ // kernel error. If the error code below returns false (kernel error),
+ // then it cannot provide relevant information about the last
+ // exception.
+ errorType uintptr
+
+ // faultAddr is the value of far_el1.
+ faultAddr uintptr
+
+ // el0Fp is the address of application's fpstate.
+ el0Fp uintptr
+
+ // ttbr0Kvm is the value of ttbr0_el1 for sentry.
+ ttbr0Kvm uintptr
+
+ // ttbr0App is the value of ttbr0_el1 for applicaton.
+ ttbr0App uintptr
+
+ // exception vector.
+ vecCode Vector
+
+ // application context pointer.
+ appAddr uintptr
+
+ // lazyVFP is the value of cpacr_el1.
+ lazyVFP uintptr
+
+ // appASID is the asid value of guest application.
+ appASID uintptr
+}
+
+// ErrorCode returns the last error code.
+//
+// The returned boolean indicates whether the error code corresponds to the
+// last user error or not. If it does not, then fault information must be
+// ignored. This is generally the result of a kernel fault while servicing a
+// user fault.
+//
+//go:nosplit
+func (c *CPU) ErrorCode() (value uintptr, user bool) {
+ return c.errorCode, c.errorType != 0
+}
+
+// ClearErrorCode resets the error code.
+//
+//go:nosplit
+func (c *CPU) ClearErrorCode() {
+ c.errorCode = 0
+ c.errorType = 1
+}
+
+//go:nosplit
+func (c *CPU) GetFaultAddr() (value uintptr) {
+ return c.faultAddr
+}
+
+//go:nosplit
+func (c *CPU) SetTtbr0Kvm(value uintptr) {
+ c.ttbr0Kvm = value
+}
+
+//go:nosplit
+func (c *CPU) SetTtbr0App(value uintptr) {
+ c.ttbr0App = value
+}
+
+//go:nosplit
+func (c *CPU) GetVector() (value Vector) {
+ return c.vecCode
+}
+
+//go:nosplit
+func (c *CPU) SetAppAddr(value uintptr) {
+ c.appAddr = value
+}
+
+// GetLazyVFP returns the value of cpacr_el1.
+//go:nosplit
+func (c *CPU) GetLazyVFP() (value uintptr) {
+ return c.lazyVFP
+}
+
+// SwitchArchOpts are embedded in SwitchOpts.
+type SwitchArchOpts struct {
+ // UserASID indicates that the application ASID to be used on switch,
+ UserASID uint16
+
+ // KernelASID indicates that the kernel ASID to be used on return,
+ KernelASID uint16
+}
+
+func init() {
+}
+
// Emit prints architecture-specific offsets.
func Emit(w io.Writer) {
fmt.Fprintf(w, "// Automatically generated, do not edit.\n")
diff --git a/pkg/ring0/entry_amd64.s b/pkg/ring0/entry_impl_amd64.s
index 520bd9f57..1d0262a18 100644
--- a/pkg/ring0/entry_amd64.s
+++ b/pkg/ring0/entry_impl_amd64.s
@@ -1,3 +1,73 @@
+// build +amd64
+
+// Automatically generated, do not edit.
+
+// CPU offsets.
+#define CPU_REGISTERS 0x28
+#define CPU_ERROR_CODE 0x10
+#define CPU_ERROR_TYPE 0x18
+#define CPU_ENTRY 0x20
+
+// CPU entry offsets.
+#define ENTRY_SCRATCH0 0x100
+#define ENTRY_STACK_TOP 0x108
+#define ENTRY_CPU_SELF 0x110
+#define ENTRY_KERNEL_CR3 0x118
+
+// Bits.
+#define _RFLAGS_IF 0x200
+#define _RFLAGS_IOPL0 0x1000
+#define _KERNEL_FLAGS 0x02
+
+// Vectors.
+#define DivideByZero 0x00
+#define Debug 0x01
+#define NMI 0x02
+#define Breakpoint 0x03
+#define Overflow 0x04
+#define BoundRangeExceeded 0x05
+#define InvalidOpcode 0x06
+#define DeviceNotAvailable 0x07
+#define DoubleFault 0x08
+#define CoprocessorSegmentOverrun 0x09
+#define InvalidTSS 0x0a
+#define SegmentNotPresent 0x0b
+#define StackSegmentFault 0x0c
+#define GeneralProtectionFault 0x0d
+#define PageFault 0x0e
+#define X87FloatingPointException 0x10
+#define AlignmentCheck 0x11
+#define MachineCheck 0x12
+#define SIMDFloatingPointException 0x13
+#define VirtualizationException 0x14
+#define SecurityException 0x1e
+#define SyscallInt80 0x80
+#define Syscall 0x100
+
+// Ptrace registers.
+#define PTRACE_R15 0x00
+#define PTRACE_R14 0x08
+#define PTRACE_R13 0x10
+#define PTRACE_R12 0x18
+#define PTRACE_RBP 0x20
+#define PTRACE_RBX 0x28
+#define PTRACE_R11 0x30
+#define PTRACE_R10 0x38
+#define PTRACE_R9 0x40
+#define PTRACE_R8 0x48
+#define PTRACE_RAX 0x50
+#define PTRACE_RCX 0x58
+#define PTRACE_RDX 0x60
+#define PTRACE_RSI 0x68
+#define PTRACE_RDI 0x70
+#define PTRACE_ORIGRAX 0x78
+#define PTRACE_RIP 0x80
+#define PTRACE_CS 0x88
+#define PTRACE_FLAGS 0x90
+#define PTRACE_RSP 0x98
+#define PTRACE_SS 0xa0
+#define PTRACE_FS_BASE 0xa8
+#define PTRACE_GS_BASE 0xb0
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/pkg/ring0/entry_arm64.s b/pkg/ring0/entry_impl_arm64.s
index f801b8e11..9cc09524a 100644
--- a/pkg/ring0/entry_arm64.s
+++ b/pkg/ring0/entry_impl_arm64.s
@@ -1,3 +1,93 @@
+// build +arm64
+
+// Automatically generated, do not edit.
+
+// CPU offsets.
+#define CPU_SELF 0x00
+#define CPU_REGISTERS 0xe0
+#define CPU_STACK_TOP 0x90
+#define CPU_ERROR_CODE 0x90
+#define CPU_ERROR_TYPE 0x98
+#define CPU_FAULT_ADDR 0xa0
+#define CPU_FPSTATE_EL0 0xa8
+#define CPU_TTBR0_KVM 0xb0
+#define CPU_TTBR0_APP 0xb8
+#define CPU_VECTOR_CODE 0xc0
+#define CPU_APP_ADDR 0xc8
+#define CPU_LAZY_VFP 0xd0
+#define CPU_APP_ASID 0xd8
+
+// Bits.
+#define _KERNEL_FLAGS 0x3c5
+
+// Vectors.
+#define El1Sync 0x04
+#define El1Irq 0x05
+#define El1Fiq 0x06
+#define El1Err 0x07
+#define El0Sync 0x08
+#define El0Irq 0x09
+#define El0Fiq 0x0a
+#define El0Err 0x0b
+#define El1SyncDa 0x10
+#define El1SyncIa 0x11
+#define El1SyncSpPc 0x12
+#define El1SyncUndef 0x13
+#define El1SyncDbg 0x14
+#define El1SyncInv 0x15
+#define El0SyncSVC 0x16
+#define El0SyncDa 0x17
+#define El0SyncIa 0x18
+#define El0SyncFpsimdAcc 0x19
+#define El0SyncSveAcc 0x1a
+#define El0SyncFpsimdExc 0x1b
+#define El0SyncSys 0x1c
+#define El0SyncSpPc 0x1d
+#define El0SyncUndef 0x1e
+#define El0SyncDbg 0x1f
+#define El0SyncWfx 0x20
+#define El0SyncInv 0x21
+#define El0ErrNMI 0x22
+#define PageFault 0x17
+#define Syscall 0x16
+#define VirtualizationException 0x23
+
+// Ptrace registers.
+#define PTRACE_R0 0x00
+#define PTRACE_R1 0x08
+#define PTRACE_R2 0x10
+#define PTRACE_R3 0x18
+#define PTRACE_R4 0x20
+#define PTRACE_R5 0x28
+#define PTRACE_R6 0x30
+#define PTRACE_R7 0x38
+#define PTRACE_R8 0x40
+#define PTRACE_R9 0x48
+#define PTRACE_R10 0x50
+#define PTRACE_R11 0x58
+#define PTRACE_R12 0x60
+#define PTRACE_R13 0x68
+#define PTRACE_R14 0x70
+#define PTRACE_R15 0x78
+#define PTRACE_R16 0x80
+#define PTRACE_R17 0x88
+#define PTRACE_R18 0x90
+#define PTRACE_R19 0x98
+#define PTRACE_R20 0xa0
+#define PTRACE_R21 0xa8
+#define PTRACE_R22 0xb0
+#define PTRACE_R23 0xb8
+#define PTRACE_R24 0xc0
+#define PTRACE_R25 0xc8
+#define PTRACE_R26 0xd0
+#define PTRACE_R27 0xd8
+#define PTRACE_R28 0xe0
+#define PTRACE_R29 0xe8
+#define PTRACE_R30 0xf0
+#define PTRACE_SP 0xf8
+#define PTRACE_PC 0x100
+#define PTRACE_PSTATE 0x108
+#define PTRACE_TLS 0x110
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/pkg/ring0/gen_offsets/BUILD b/pkg/ring0/gen_offsets/BUILD
deleted file mode 100644
index 9ea8f9a4f..000000000
--- a/pkg/ring0/gen_offsets/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "defs_impl_arm64",
- out = "defs_impl_arm64.go",
- package = "main",
- template = "//pkg/ring0:defs_arm64",
-)
-
-go_template_instance(
- name = "defs_impl_amd64",
- out = "defs_impl_amd64.go",
- package = "main",
- template = "//pkg/ring0:defs_amd64",
-)
-
-go_binary(
- name = "gen_offsets",
- srcs = [
- "defs_impl_amd64.go",
- "defs_impl_arm64.go",
- "main.go",
- ],
- # Use the libc malloc to avoid any extra dependencies. This is required to
- # pass the sentry deps test.
- system_malloc = True,
- visibility = [
- "//pkg/ring0:__pkg__",
- "//pkg/sentry/platform/kvm:__pkg__",
- ],
- deps = [
- "//pkg/cpuid",
- "//pkg/hostarch",
- "//pkg/ring0/pagetables",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch/fpu",
- ],
-)
diff --git a/pkg/ring0/gen_offsets/main.go b/pkg/ring0/gen_offsets/main.go
deleted file mode 100644
index a4927da2f..000000000
--- a/pkg/ring0/gen_offsets/main.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary gen_offsets is a helper for generating offset headers.
-package main
-
-import (
- "os"
-)
-
-func main() {
- Emit(os.Stdout)
-}
diff --git a/pkg/ring0/offsets_amd64.go b/pkg/ring0/offsets_amd64.go
deleted file mode 100644
index 75f6218b3..000000000
--- a/pkg/ring0/offsets_amd64.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package ring0
-
-import (
- "fmt"
- "io"
- "reflect"
-
- "gvisor.dev/gvisor/pkg/sentry/arch"
-)
-
-// Emit prints architecture-specific offsets.
-func Emit(w io.Writer) {
- fmt.Fprintf(w, "// Automatically generated, do not edit.\n")
-
- c := &CPU{}
- fmt.Fprintf(w, "\n// CPU offsets.\n")
- fmt.Fprintf(w, "#define CPU_REGISTERS 0x%02x\n", reflect.ValueOf(&c.registers).Pointer()-reflect.ValueOf(c).Pointer())
- fmt.Fprintf(w, "#define CPU_ERROR_CODE 0x%02x\n", reflect.ValueOf(&c.errorCode).Pointer()-reflect.ValueOf(c).Pointer())
- fmt.Fprintf(w, "#define CPU_ERROR_TYPE 0x%02x\n", reflect.ValueOf(&c.errorType).Pointer()-reflect.ValueOf(c).Pointer())
- fmt.Fprintf(w, "#define CPU_ENTRY 0x%02x\n", reflect.ValueOf(&c.kernelEntry).Pointer()-reflect.ValueOf(c).Pointer())
-
- e := &kernelEntry{}
- fmt.Fprintf(w, "\n// CPU entry offsets.\n")
- fmt.Fprintf(w, "#define ENTRY_SCRATCH0 0x%02x\n", reflect.ValueOf(&e.scratch0).Pointer()-reflect.ValueOf(e).Pointer())
- fmt.Fprintf(w, "#define ENTRY_STACK_TOP 0x%02x\n", reflect.ValueOf(&e.stackTop).Pointer()-reflect.ValueOf(e).Pointer())
- fmt.Fprintf(w, "#define ENTRY_CPU_SELF 0x%02x\n", reflect.ValueOf(&e.cpuSelf).Pointer()-reflect.ValueOf(e).Pointer())
- fmt.Fprintf(w, "#define ENTRY_KERNEL_CR3 0x%02x\n", reflect.ValueOf(&e.kernelCR3).Pointer()-reflect.ValueOf(e).Pointer())
-
- fmt.Fprintf(w, "\n// Bits.\n")
- fmt.Fprintf(w, "#define _RFLAGS_IF 0x%02x\n", _RFLAGS_IF)
- fmt.Fprintf(w, "#define _RFLAGS_IOPL0 0x%02x\n", _RFLAGS_IOPL0)
- fmt.Fprintf(w, "#define _KERNEL_FLAGS 0x%02x\n", KernelFlagsSet)
-
- fmt.Fprintf(w, "\n// Vectors.\n")
- fmt.Fprintf(w, "#define DivideByZero 0x%02x\n", DivideByZero)
- fmt.Fprintf(w, "#define Debug 0x%02x\n", Debug)
- fmt.Fprintf(w, "#define NMI 0x%02x\n", NMI)
- fmt.Fprintf(w, "#define Breakpoint 0x%02x\n", Breakpoint)
- fmt.Fprintf(w, "#define Overflow 0x%02x\n", Overflow)
- fmt.Fprintf(w, "#define BoundRangeExceeded 0x%02x\n", BoundRangeExceeded)
- fmt.Fprintf(w, "#define InvalidOpcode 0x%02x\n", InvalidOpcode)
- fmt.Fprintf(w, "#define DeviceNotAvailable 0x%02x\n", DeviceNotAvailable)
- fmt.Fprintf(w, "#define DoubleFault 0x%02x\n", DoubleFault)
- fmt.Fprintf(w, "#define CoprocessorSegmentOverrun 0x%02x\n", CoprocessorSegmentOverrun)
- fmt.Fprintf(w, "#define InvalidTSS 0x%02x\n", InvalidTSS)
- fmt.Fprintf(w, "#define SegmentNotPresent 0x%02x\n", SegmentNotPresent)
- fmt.Fprintf(w, "#define StackSegmentFault 0x%02x\n", StackSegmentFault)
- fmt.Fprintf(w, "#define GeneralProtectionFault 0x%02x\n", GeneralProtectionFault)
- fmt.Fprintf(w, "#define PageFault 0x%02x\n", PageFault)
- fmt.Fprintf(w, "#define X87FloatingPointException 0x%02x\n", X87FloatingPointException)
- fmt.Fprintf(w, "#define AlignmentCheck 0x%02x\n", AlignmentCheck)
- fmt.Fprintf(w, "#define MachineCheck 0x%02x\n", MachineCheck)
- fmt.Fprintf(w, "#define SIMDFloatingPointException 0x%02x\n", SIMDFloatingPointException)
- fmt.Fprintf(w, "#define VirtualizationException 0x%02x\n", VirtualizationException)
- fmt.Fprintf(w, "#define SecurityException 0x%02x\n", SecurityException)
- fmt.Fprintf(w, "#define SyscallInt80 0x%02x\n", SyscallInt80)
- fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall)
-
- p := &arch.Registers{}
- fmt.Fprintf(w, "\n// Ptrace registers.\n")
- fmt.Fprintf(w, "#define PTRACE_R15 0x%02x\n", reflect.ValueOf(&p.R15).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R14 0x%02x\n", reflect.ValueOf(&p.R14).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R13 0x%02x\n", reflect.ValueOf(&p.R13).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R12 0x%02x\n", reflect.ValueOf(&p.R12).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RBP 0x%02x\n", reflect.ValueOf(&p.Rbp).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RBX 0x%02x\n", reflect.ValueOf(&p.Rbx).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R11 0x%02x\n", reflect.ValueOf(&p.R11).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R10 0x%02x\n", reflect.ValueOf(&p.R10).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R9 0x%02x\n", reflect.ValueOf(&p.R9).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_R8 0x%02x\n", reflect.ValueOf(&p.R8).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RAX 0x%02x\n", reflect.ValueOf(&p.Rax).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RCX 0x%02x\n", reflect.ValueOf(&p.Rcx).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RDX 0x%02x\n", reflect.ValueOf(&p.Rdx).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RSI 0x%02x\n", reflect.ValueOf(&p.Rsi).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RDI 0x%02x\n", reflect.ValueOf(&p.Rdi).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_ORIGRAX 0x%02x\n", reflect.ValueOf(&p.Orig_rax).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RIP 0x%02x\n", reflect.ValueOf(&p.Rip).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_CS 0x%02x\n", reflect.ValueOf(&p.Cs).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_FLAGS 0x%02x\n", reflect.ValueOf(&p.Eflags).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_RSP 0x%02x\n", reflect.ValueOf(&p.Rsp).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_SS 0x%02x\n", reflect.ValueOf(&p.Ss).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_FS_BASE 0x%02x\n", reflect.ValueOf(&p.Fs_base).Pointer()-reflect.ValueOf(p).Pointer())
- fmt.Fprintf(w, "#define PTRACE_GS_BASE 0x%02x\n", reflect.ValueOf(&p.Gs_base).Pointer()-reflect.ValueOf(p).Pointer())
-}
diff --git a/pkg/ring0/pagetables/BUILD b/pkg/ring0/pagetables/BUILD
deleted file mode 100644
index f855f4d42..000000000
--- a/pkg/ring0/pagetables/BUILD
+++ /dev/null
@@ -1,88 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-[
- # These files are tagged with relevant build architectures. We can always
- # build all the input files, which will be included only in the relevant
- # architecture builds.
- go_template(
- name = "generic_walker_%s" % arch,
- srcs = [
- "walker_generic.go",
- "walker_%s.go" % arch,
- ],
- opt_types = [
- "Visitor",
- ],
- visibility = [":__pkg__"],
- )
- for arch in ("amd64", "arm64")
-]
-
-[
- # See above.
- go_template_instance(
- name = "walker_%s_%s" % (op, arch),
- out = "walker_%s_%s.go" % (op, arch),
- package = "pagetables",
- prefix = op,
- template = ":generic_walker_%s" % arch,
- types = {
- "Visitor": "%sVisitor" % op,
- },
- )
- for op in ("map", "unmap", "lookup", "empty", "check")
- for arch in ("amd64", "arm64")
-]
-
-go_library(
- name = "pagetables",
- srcs = [
- "allocator.go",
- "allocator_unsafe.go",
- "pagetables.go",
- "pagetables_aarch64.go",
- "pagetables_amd64.go",
- "pagetables_arm64.go",
- "pagetables_x86.go",
- "pcids.go",
- "pcids_aarch64.go",
- "pcids_aarch64.s",
- "pcids_x86.go",
- "walker_amd64.go",
- "walker_arm64.go",
- "walker_generic.go",
- ":walker_empty_amd64",
- ":walker_empty_arm64",
- ":walker_lookup_amd64",
- ":walker_lookup_arm64",
- ":walker_map_amd64",
- ":walker_map_arm64",
- ":walker_unmap_amd64",
- ":walker_unmap_arm64",
- ],
- visibility = [
- "//pkg/ring0:__subpackages__",
- "//pkg/sentry/platform/kvm:__subpackages__",
- ],
- deps = [
- "//pkg/hostarch",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "pagetables_test",
- size = "small",
- srcs = [
- "pagetables_amd64_test.go",
- "pagetables_arm64_test.go",
- "pagetables_test.go",
- ":walker_check_amd64",
- ":walker_check_arm64",
- ],
- library = ":pagetables",
- deps = ["//pkg/hostarch"],
-)
diff --git a/pkg/ring0/pagetables/pagetables_aarch64_state_autogen.go b/pkg/ring0/pagetables/pagetables_aarch64_state_autogen.go
new file mode 100644
index 000000000..a24523f87
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_aarch64_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+
+package pagetables
diff --git a/pkg/ring0/pagetables/pagetables_amd64_state_autogen.go b/pkg/ring0/pagetables/pagetables_amd64_state_autogen.go
new file mode 100644
index 000000000..f48a8acd1
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package pagetables
diff --git a/pkg/ring0/pagetables/pagetables_amd64_test.go b/pkg/ring0/pagetables/pagetables_amd64_test.go
deleted file mode 100644
index c27b3b10a..000000000
--- a/pkg/ring0/pagetables/pagetables_amd64_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package pagetables
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-func Test2MAnd4K(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a small page and a huge page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
- {0x00007f0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read}},
- })
-}
-
-func Test1GAnd4K(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a small page and a super page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
- {0x00007f0000000000, pudSize, pudSize * 47, MapOpts{AccessType: hostarch.Read}},
- })
-}
-
-func TestSplit1GPage(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a super page and knock out the middle.
- pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*42)
- pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pudSize-(2*pteSize))
-
- checkMappings(t, pt, []mapping{
- {0x00007f0000000000, pteSize, pudSize * 42, MapOpts{AccessType: hostarch.Read}},
- {0x00007f0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: hostarch.Read}},
- })
-}
-
-func TestSplit2MPage(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a huge page and knock out the middle.
- pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*42)
- pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pmdSize-(2*pteSize))
-
- checkMappings(t, pt, []mapping{
- {0x00007f0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: hostarch.Read}},
- {0x00007f0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: hostarch.Read}},
- })
-}
diff --git a/pkg/ring0/pagetables/pagetables_arm64_state_autogen.go b/pkg/ring0/pagetables/pagetables_arm64_state_autogen.go
new file mode 100644
index 000000000..ae9d2b272
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package pagetables
diff --git a/pkg/ring0/pagetables/pagetables_arm64_test.go b/pkg/ring0/pagetables/pagetables_arm64_test.go
deleted file mode 100644
index 1c919ec7d..000000000
--- a/pkg/ring0/pagetables/pagetables_arm64_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package pagetables
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-func Test2MAnd4K(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a small page and a huge page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: true}, pteSize*42)
- pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: true}, pmdSize*47)
-
- pt.Map(0xffff000000400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: false}, pteSize*42)
- pt.Map(0xffffff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: false}, pmdSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: true}},
- {0x0000ff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read, User: true}},
- {0xffff000000400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: false}},
- {0xffffff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read, User: false}},
- })
-}
-
-func Test1GAnd4K(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a small page and a super page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: true}, pteSize*42)
- pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: hostarch.Read, User: true}, pudSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: true}},
- {0x0000ff0000000000, pudSize, pudSize * 47, MapOpts{AccessType: hostarch.Read, User: true}},
- })
-}
-
-func TestSplit1GPage(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a super page and knock out the middle.
- pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: hostarch.Read, User: true}, pudSize*42)
- pt.Unmap(hostarch.Addr(0x0000ff0000000000+pteSize), pudSize-(2*pteSize))
-
- checkMappings(t, pt, []mapping{
- {0x0000ff0000000000, pteSize, pudSize * 42, MapOpts{AccessType: hostarch.Read, User: true}},
- {0x0000ff0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: hostarch.Read, User: true}},
- })
-}
-
-func TestSplit2MPage(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map a huge page and knock out the middle.
- pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: true}, pmdSize*42)
- pt.Unmap(hostarch.Addr(0x0000ff0000000000+pteSize), pmdSize-(2*pteSize))
-
- checkMappings(t, pt, []mapping{
- {0x0000ff0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: hostarch.Read, User: true}},
- {0x0000ff0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: hostarch.Read, User: true}},
- })
-}
diff --git a/pkg/ring0/pagetables/pagetables_state_autogen.go b/pkg/ring0/pagetables/pagetables_state_autogen.go
new file mode 100644
index 000000000..4c4540603
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pagetables
diff --git a/pkg/ring0/pagetables/pagetables_test.go b/pkg/ring0/pagetables/pagetables_test.go
deleted file mode 100644
index df93dcb6a..000000000
--- a/pkg/ring0/pagetables/pagetables_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pagetables
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
- "testing"
-)
-
-type mapping struct {
- start uintptr
- length uintptr
- addr uintptr
- opts MapOpts
-}
-
-type checkVisitor struct {
- expected []mapping // Input.
- current int // Temporary.
- found []mapping // Output.
- failed string // Output.
-}
-
-func (v *checkVisitor) visit(start uintptr, pte *PTE, align uintptr) bool {
- v.found = append(v.found, mapping{
- start: start,
- length: align + 1,
- addr: pte.Address(),
- opts: pte.Opts(),
- })
- if v.failed != "" {
- // Don't keep looking for errors.
- return false
- }
-
- if v.current >= len(v.expected) {
- v.failed = "more mappings than expected"
- } else if v.expected[v.current].start != start {
- v.failed = "start didn't match expected"
- } else if v.expected[v.current].length != (align + 1) {
- v.failed = "end didn't match expected"
- } else if v.expected[v.current].addr != pte.Address() {
- v.failed = "address didn't match expected"
- } else if v.expected[v.current].opts != pte.Opts() {
- v.failed = "opts didn't match"
- }
- v.current++
- return true
-}
-
-func (*checkVisitor) requiresAlloc() bool { return false }
-
-func (*checkVisitor) requiresSplit() bool { return false }
-
-func checkMappings(t *testing.T, pt *PageTables, m []mapping) {
- // Iterate over all the mappings.
- w := checkWalker{
- pageTables: pt,
- visitor: checkVisitor{
- expected: m,
- },
- }
- w.iterateRange(0, ^uintptr(0))
-
- // Were we expected additional mappings?
- if w.visitor.failed == "" && w.visitor.current != len(w.visitor.expected) {
- w.visitor.failed = "insufficient mappings found"
- }
-
- // Emit a meaningful error message on failure.
- if w.visitor.failed != "" {
- t.Errorf("%s; got %#v, wanted %#v", w.visitor.failed, w.visitor.found, w.visitor.expected)
- }
-}
-
-func TestUnmap(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map and unmap one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
- pt.Unmap(0x400000, pteSize)
-
- checkMappings(t, pt, nil)
-}
-
-func TestReadOnly(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*42)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.Read}},
- })
-}
-
-func TestReadWrite(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
- })
-}
-
-func TestSerialEntries(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map two sequential entries.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
- pt.Map(0x401000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
- {0x401000, pteSize, pteSize * 47, MapOpts{AccessType: hostarch.ReadWrite}},
- })
-}
-
-func TestSpanningEntries(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Span a pgd with two pages.
- pt.Map(0x00007efffffff000, 2*pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*42)
-
- checkMappings(t, pt, []mapping{
- {0x00007efffffff000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.Read}},
- {0x00007f0000000000, pteSize, pteSize * 43, MapOpts{AccessType: hostarch.Read}},
- })
-}
-
-func TestSparseEntries(t *testing.T) {
- pt := New(NewRuntimeAllocator())
-
- // Map two entries in different pgds.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*47)
-
- checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
- {0x00007f0000000000, pteSize, pteSize * 47, MapOpts{AccessType: hostarch.Read}},
- })
-}
diff --git a/pkg/ring0/pagetables/pagetables_unsafe_state_autogen.go b/pkg/ring0/pagetables/pagetables_unsafe_state_autogen.go
new file mode 100644
index 000000000..4c4540603
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pagetables
diff --git a/pkg/ring0/pagetables/pagetables_x86_state_autogen.go b/pkg/ring0/pagetables/pagetables_x86_state_autogen.go
new file mode 100644
index 000000000..6fe78c51c
--- /dev/null
+++ b/pkg/ring0/pagetables/pagetables_x86_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build 386 amd64
+// +build i386 amd64
+
+package pagetables
diff --git a/pkg/ring0/pagetables/walker_empty_amd64.go b/pkg/ring0/pagetables/walker_empty_amd64.go
new file mode 100644
index 000000000..a3cd7a1a2
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_empty_amd64.go
@@ -0,0 +1,265 @@
+// +build amd64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *emptyWalker) iterateRangeCanonical(start, end uintptr) bool {
+ for pgdIndex := uint16((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &w.pageTables.root[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = emptynext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = emptynext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = emptynext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < emptynext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSuper()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = emptynext(start, pudSize)
+ continue
+ }
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = emptynext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = emptynext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < emptynext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = emptynext(start, pmdSize)
+ continue
+ }
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start&^(pteSize-1)), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type emptyWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor emptyVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *emptyWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func emptynext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_empty_arm64.go b/pkg/ring0/pagetables/walker_empty_arm64.go
new file mode 100644
index 000000000..d61b44b65
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_empty_arm64.go
@@ -0,0 +1,275 @@
+// +build arm64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *emptyWalker) iterateRangeCanonical(start, end uintptr) bool {
+ pgdEntryIndex := w.pageTables.root
+ if start >= upperBottom {
+ pgdEntryIndex = w.pageTables.archPageTables.root
+ }
+
+ for pgdIndex := (uint16((start & pgdMask) >> pgdShift)); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &pgdEntryIndex[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = emptynext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = emptynext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = emptynext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < emptynext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSect()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = emptynext(start, pudSize)
+ continue
+ }
+
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = emptynext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = emptynext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < emptynext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = emptynext(start, pmdSize)
+ continue
+ }
+
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() {
+ if w.visitor.requiresAlloc() {
+ panic("PTE not set after iteration with requiresAlloc!")
+ }
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type emptyWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor emptyVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *emptyWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func emptynext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_lookup_amd64.go b/pkg/ring0/pagetables/walker_lookup_amd64.go
new file mode 100644
index 000000000..c92c1cb44
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_lookup_amd64.go
@@ -0,0 +1,265 @@
+// +build amd64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *lookupWalker) iterateRangeCanonical(start, end uintptr) bool {
+ for pgdIndex := uint16((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &w.pageTables.root[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = lookupnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = lookupnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = lookupnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < lookupnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSuper()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = lookupnext(start, pudSize)
+ continue
+ }
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < lookupnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start&^(pteSize-1)), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type lookupWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor lookupVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *lookupWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func lookupnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_lookup_arm64.go b/pkg/ring0/pagetables/walker_lookup_arm64.go
new file mode 100644
index 000000000..74062a00a
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_lookup_arm64.go
@@ -0,0 +1,275 @@
+// +build arm64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *lookupWalker) iterateRangeCanonical(start, end uintptr) bool {
+ pgdEntryIndex := w.pageTables.root
+ if start >= upperBottom {
+ pgdEntryIndex = w.pageTables.archPageTables.root
+ }
+
+ for pgdIndex := (uint16((start & pgdMask) >> pgdShift)); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &pgdEntryIndex[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = lookupnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = lookupnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = lookupnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < lookupnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSect()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = lookupnext(start, pudSize)
+ continue
+ }
+
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < lookupnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = lookupnext(start, pmdSize)
+ continue
+ }
+
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() {
+ if w.visitor.requiresAlloc() {
+ panic("PTE not set after iteration with requiresAlloc!")
+ }
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type lookupWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor lookupVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *lookupWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func lookupnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_map_amd64.go b/pkg/ring0/pagetables/walker_map_amd64.go
new file mode 100644
index 000000000..1c6c1a032
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_map_amd64.go
@@ -0,0 +1,265 @@
+// +build amd64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *mapWalker) iterateRangeCanonical(start, end uintptr) bool {
+ for pgdIndex := uint16((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &w.pageTables.root[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = mapnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = mapnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = mapnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < mapnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSuper()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = mapnext(start, pudSize)
+ continue
+ }
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = mapnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = mapnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < mapnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = mapnext(start, pmdSize)
+ continue
+ }
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start&^(pteSize-1)), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type mapWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor mapVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *mapWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func mapnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_map_arm64.go b/pkg/ring0/pagetables/walker_map_arm64.go
new file mode 100644
index 000000000..8223de306
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_map_arm64.go
@@ -0,0 +1,275 @@
+// +build arm64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *mapWalker) iterateRangeCanonical(start, end uintptr) bool {
+ pgdEntryIndex := w.pageTables.root
+ if start >= upperBottom {
+ pgdEntryIndex = w.pageTables.archPageTables.root
+ }
+
+ for pgdIndex := (uint16((start & pgdMask) >> pgdShift)); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &pgdEntryIndex[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = mapnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = mapnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = mapnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < mapnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSect()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = mapnext(start, pudSize)
+ continue
+ }
+
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = mapnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = mapnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < mapnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = mapnext(start, pmdSize)
+ continue
+ }
+
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() {
+ if w.visitor.requiresAlloc() {
+ panic("PTE not set after iteration with requiresAlloc!")
+ }
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type mapWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor mapVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *mapWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func mapnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_unmap_amd64.go b/pkg/ring0/pagetables/walker_unmap_amd64.go
new file mode 100644
index 000000000..82b27ab64
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_unmap_amd64.go
@@ -0,0 +1,265 @@
+// +build amd64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *unmapWalker) iterateRangeCanonical(start, end uintptr) bool {
+ for pgdIndex := uint16((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &w.pageTables.root[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = unmapnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = unmapnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = unmapnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < unmapnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSuper()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = unmapnext(start, pudSize)
+ continue
+ }
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSuper()
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSuper() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < unmapnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start&^(pteSize-1)), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type unmapWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor unmapVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *unmapWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func unmapnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/pagetables/walker_unmap_arm64.go b/pkg/ring0/pagetables/walker_unmap_arm64.go
new file mode 100644
index 000000000..1ecccbf27
--- /dev/null
+++ b/pkg/ring0/pagetables/walker_unmap_arm64.go
@@ -0,0 +1,275 @@
+// +build arm64
+
+package pagetables
+
+// iterateRangeCanonical walks a canonical range.
+//
+//go:nosplit
+func (w *unmapWalker) iterateRangeCanonical(start, end uintptr) bool {
+ pgdEntryIndex := w.pageTables.root
+ if start >= upperBottom {
+ pgdEntryIndex = w.pageTables.archPageTables.root
+ }
+
+ for pgdIndex := (uint16((start & pgdMask) >> pgdShift)); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
+ var (
+ pgdEntry = &pgdEntryIndex[pgdIndex]
+ pudEntries *PTEs
+ )
+ if !pgdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ start = unmapnext(start, pgdSize)
+ continue
+ }
+
+ pudEntries = w.pageTables.Allocator.NewPTEs()
+ pgdEntry.setPageTable(w.pageTables, pudEntries)
+ } else {
+ pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
+ }
+
+ clearPUDEntries := uint16(0)
+
+ for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
+ var (
+ pudEntry = &pudEntries[pudIndex]
+ pmdEntries *PTEs
+ )
+ if !pudEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPUDEntries++
+ start = unmapnext(start, pudSize)
+ continue
+ }
+
+ if start&(pudSize-1) == 0 && end-start >= pudSize {
+ pudEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+ if pudEntry.Valid() {
+ start = unmapnext(start, pudSize)
+ continue
+ }
+ }
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+
+ } else if pudEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < unmapnext(start, pudSize)) {
+
+ pmdEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pmdEntries[index].SetSect()
+ pmdEntries[index].Set(
+ pudEntry.Address()+(pmdSize*uintptr(index)),
+ pudEntry.Opts())
+ }
+ pudEntry.setPageTable(w.pageTables, pmdEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
+ return false
+ }
+
+ if !pudEntry.Valid() {
+ clearPUDEntries++
+ }
+
+ start = unmapnext(start, pudSize)
+ continue
+ }
+
+ } else {
+ pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
+ }
+
+ clearPMDEntries := uint16(0)
+
+ for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
+ var (
+ pmdEntry = &pmdEntries[pmdIndex]
+ pteEntries *PTEs
+ )
+ if !pmdEntry.Valid() {
+ if !w.visitor.requiresAlloc() {
+
+ clearPMDEntries++
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+
+ if start&(pmdSize-1) == 0 && end-start >= pmdSize {
+ pmdEntry.SetSect()
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+ if pmdEntry.Valid() {
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+ }
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+
+ } else if pmdEntry.IsSect() {
+
+ if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < unmapnext(start, pmdSize)) {
+
+ pteEntries = w.pageTables.Allocator.NewPTEs()
+ for index := uint16(0); index < entriesPerPage; index++ {
+ pteEntries[index].Set(
+ pmdEntry.Address()+(pteSize*uintptr(index)),
+ pmdEntry.Opts())
+ }
+ pmdEntry.setPageTable(w.pageTables, pteEntries)
+ } else {
+
+ if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
+ return false
+ }
+
+ if !pmdEntry.Valid() {
+ clearPMDEntries++
+ }
+
+ start = unmapnext(start, pmdSize)
+ continue
+ }
+
+ } else {
+ pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
+ }
+
+ clearPTEEntries := uint16(0)
+
+ for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
+ var (
+ pteEntry = &pteEntries[pteIndex]
+ )
+ if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
+ clearPTEEntries++
+ start += pteSize
+ continue
+ }
+
+ if !w.visitor.visit(uintptr(start), pteEntry, pteSize-1) {
+ return false
+ }
+ if !pteEntry.Valid() {
+ if w.visitor.requiresAlloc() {
+ panic("PTE not set after iteration with requiresAlloc!")
+ }
+ clearPTEEntries++
+ }
+
+ start += pteSize
+ continue
+ }
+
+ if clearPTEEntries == entriesPerPage {
+ pmdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pteEntries)
+ clearPMDEntries++
+ }
+ }
+
+ if clearPMDEntries == entriesPerPage {
+ pudEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pmdEntries)
+ clearPUDEntries++
+ }
+ }
+
+ if clearPUDEntries == entriesPerPage {
+ pgdEntry.Clear()
+ w.pageTables.Allocator.FreePTEs(pudEntries)
+ }
+ }
+ return true
+}
+
+// Walker walks page tables.
+type unmapWalker struct {
+ // pageTables are the tables to walk.
+ pageTables *PageTables
+
+ // Visitor is the set of arguments.
+ visitor unmapVisitor
+}
+
+// iterateRange iterates over all appropriate levels of page tables for the given range.
+//
+// If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
+// exception is super pages. If a valid super page (huge or jumbo) cannot be
+// installed, then the walk will continue to individual entries.
+//
+// This algorithm will attempt to maximize the use of super/sect pages whenever
+// possible. Whether a super page is provided will be clear through the range
+// provided in the callback.
+//
+// Note that if requiresAlloc is true, then no gaps will be present. However,
+// if alloc is not set, then the iteration will likely be full of gaps.
+//
+// Note that this function should generally be avoided in favor of Map, Unmap,
+// etc. when not necessary.
+//
+// Precondition: start must be page-aligned.
+// Precondition: start must be less than end.
+// Precondition: If requiresAlloc is true, then start and end should not span
+// non-canonical ranges. If they do, a panic will result.
+//
+//go:nosplit
+func (w *unmapWalker) iterateRange(start, end uintptr) {
+ if start%pteSize != 0 {
+ panic("unaligned start")
+ }
+ if end < start {
+ panic("start > end")
+ }
+ if start < lowerTop {
+ if end <= lowerTop {
+ w.iterateRangeCanonical(start, end)
+ } else if end > lowerTop && end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(start, lowerTop)
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ if !w.iterateRangeCanonical(start, lowerTop) {
+ return
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else if start < upperBottom {
+ if end <= upperBottom {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ } else {
+ if w.visitor.requiresAlloc() {
+ panic("alloc spans non-canonical range")
+ }
+ w.iterateRangeCanonical(upperBottom, end)
+ }
+ } else {
+ w.iterateRangeCanonical(start, end)
+ }
+}
+
+// next returns the next address quantized by the given size.
+//
+//go:nosplit
+func unmapnext(start uintptr, size uintptr) uintptr {
+ start &= ^(size - 1)
+ start += size
+ return start
+}
diff --git a/pkg/ring0/ring0_amd64_state_autogen.go b/pkg/ring0/ring0_amd64_state_autogen.go
new file mode 100644
index 000000000..96cf5d331
--- /dev/null
+++ b/pkg/ring0/ring0_amd64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package ring0
diff --git a/pkg/ring0/ring0_arm64_state_autogen.go b/pkg/ring0/ring0_arm64_state_autogen.go
new file mode 100644
index 000000000..7f2ab3537
--- /dev/null
+++ b/pkg/ring0/ring0_arm64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package ring0
diff --git a/pkg/ring0/ring0_impl_amd64_state_autogen.go b/pkg/ring0/ring0_impl_amd64_state_autogen.go
new file mode 100644
index 000000000..770a13dfa
--- /dev/null
+++ b/pkg/ring0/ring0_impl_amd64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build 386 amd64
+
+package ring0
diff --git a/pkg/ring0/ring0_impl_arm64_state_autogen.go b/pkg/ring0/ring0_impl_arm64_state_autogen.go
new file mode 100644
index 000000000..7f2ab3537
--- /dev/null
+++ b/pkg/ring0/ring0_impl_arm64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package ring0
diff --git a/pkg/ring0/ring0_state_autogen.go b/pkg/ring0/ring0_state_autogen.go
new file mode 100644
index 000000000..327aba163
--- /dev/null
+++ b/pkg/ring0/ring0_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package ring0
diff --git a/pkg/ring0/ring0_unsafe_state_autogen.go b/pkg/ring0/ring0_unsafe_state_autogen.go
new file mode 100644
index 000000000..327aba163
--- /dev/null
+++ b/pkg/ring0/ring0_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package ring0
diff --git a/pkg/ring0/x86.go b/pkg/ring0/x86.go
deleted file mode 100644
index 7c96cca6b..000000000
--- a/pkg/ring0/x86.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build 386 || amd64
-// +build 386 amd64
-
-package ring0
-
-import (
- "gvisor.dev/gvisor/pkg/cpuid"
-)
-
-// Useful bits.
-const (
- _CR0_PE = 1 << 0
- _CR0_ET = 1 << 4
- _CR0_AM = 1 << 18
- _CR0_PG = 1 << 31
-
- _CR4_PSE = 1 << 4
- _CR4_PAE = 1 << 5
- _CR4_PGE = 1 << 7
- _CR4_OSFXSR = 1 << 9
- _CR4_OSXMMEXCPT = 1 << 10
- _CR4_FSGSBASE = 1 << 16
- _CR4_PCIDE = 1 << 17
- _CR4_OSXSAVE = 1 << 18
- _CR4_SMEP = 1 << 20
-
- _RFLAGS_AC = 1 << 18
- _RFLAGS_NT = 1 << 14
- _RFLAGS_IOPL0 = 1 << 12
- _RFLAGS_IOPL1 = 1 << 13
- _RFLAGS_IOPL = _RFLAGS_IOPL0 | _RFLAGS_IOPL1
- _RFLAGS_DF = 1 << 10
- _RFLAGS_IF = 1 << 9
- _RFLAGS_STEP = 1 << 8
- _RFLAGS_RESERVED = 1 << 1
-
- _EFER_SCE = 0x001
- _EFER_LME = 0x100
- _EFER_LMA = 0x400
- _EFER_NX = 0x800
-
- _MSR_STAR = 0xc0000081
- _MSR_LSTAR = 0xc0000082
- _MSR_CSTAR = 0xc0000083
- _MSR_SYSCALL_MASK = 0xc0000084
- _MSR_PLATFORM_INFO = 0xce
- _MSR_MISC_FEATURES = 0x140
-
- _PLATFORM_INFO_CPUID_FAULT = 1 << 31
-
- _MISC_FEATURE_CPUID_TRAP = 0x1
-)
-
-const (
- // KernelFlagsSet should always be set in the kernel.
- KernelFlagsSet = _RFLAGS_RESERVED
-
- // UserFlagsSet are always set in userspace.
- //
- // _RFLAGS_IOPL is a set of two bits and it shows the I/O privilege
- // level. The Current Privilege Level (CPL) of the task must be less
- // than or equal to the IOPL in order for the task or program to access
- // I/O ports.
- //
- // Here, _RFLAGS_IOPL0 is used only to determine whether the task is
- // running in the kernel or userspace mode. In the user mode, the CPL is
- // always 3 and it doesn't matter what IOPL is set if it is bellow CPL.
- //
- // We need to have one bit which will be always different in user and
- // kernel modes. And we have to remember that even though we have
- // KernelFlagsClear, we still can see some of these flags in the kernel
- // mode. This can happen when the goruntime switches on a goroutine
- // which has been saved in the host mode. On restore, the popf
- // instruction is used to restore flags and this means that all flags
- // what the goroutine has in the host mode will be restored in the
- // kernel mode.
- //
- // _RFLAGS_IOPL0 is never set in host and kernel modes and we always set
- // it in the user mode. So if this flag is set, the task is running in
- // the user mode and if it isn't set, the task is running in the kernel
- // mode.
- UserFlagsSet = _RFLAGS_RESERVED | _RFLAGS_IF | _RFLAGS_IOPL0
-
- // KernelFlagsClear should always be clear in the kernel.
- KernelFlagsClear = _RFLAGS_STEP | _RFLAGS_IF | _RFLAGS_IOPL | _RFLAGS_AC | _RFLAGS_NT
-
- // UserFlagsClear are always cleared in userspace.
- UserFlagsClear = _RFLAGS_NT | _RFLAGS_IOPL1
-)
-
-// IsKernelFlags returns true if rflags coresponds to the kernel mode.
-//
-// go:nosplit
-func IsKernelFlags(rflags uint64) bool {
- return rflags&_RFLAGS_IOPL0 == 0
-}
-
-// Vector is an exception vector.
-type Vector uintptr
-
-// Exception vectors.
-const (
- DivideByZero Vector = iota
- Debug
- NMI
- Breakpoint
- Overflow
- BoundRangeExceeded
- InvalidOpcode
- DeviceNotAvailable
- DoubleFault
- CoprocessorSegmentOverrun
- InvalidTSS
- SegmentNotPresent
- StackSegmentFault
- GeneralProtectionFault
- PageFault
- _
- X87FloatingPointException
- AlignmentCheck
- MachineCheck
- SIMDFloatingPointException
- VirtualizationException
- SecurityException = 0x1e
- SyscallInt80 = 0x80
- _NR_INTERRUPTS = 0x100
-)
-
-// System call vectors.
-const (
- Syscall Vector = _NR_INTERRUPTS
-)
-
-// VirtualAddressBits returns the number bits available for virtual addresses.
-//
-// Note that sign-extension semantics apply to the highest order bit.
-//
-// FIXME(b/69382326): This should use the cpuid passed to Init.
-func VirtualAddressBits() uint32 {
- ax, _, _, _ := cpuid.HostID(0x80000008, 0)
- return (ax >> 8) & 0xff
-}
-
-// PhysicalAddressBits returns the number of bits available for physical addresses.
-//
-// FIXME(b/69382326): This should use the cpuid passed to Init.
-func PhysicalAddressBits() uint32 {
- ax, _, _, _ := cpuid.HostID(0x80000008, 0)
- return ax & 0xff
-}
-
-// Selector is a segment Selector.
-type Selector uint16
-
-// SegmentDescriptor is a segment descriptor.
-type SegmentDescriptor struct {
- bits [2]uint32
-}
-
-// descriptorTable is a collection of descriptors.
-type descriptorTable [32]SegmentDescriptor
-
-// SegmentDescriptorFlags are typed flags within a descriptor.
-type SegmentDescriptorFlags uint32
-
-// SegmentDescriptorFlag declarations.
-const (
- SegmentDescriptorAccess SegmentDescriptorFlags = 1 << 8 // Access bit (always set).
- SegmentDescriptorWrite = 1 << 9 // Write permission.
- SegmentDescriptorExpandDown = 1 << 10 // Grows down, not used.
- SegmentDescriptorExecute = 1 << 11 // Execute permission.
- SegmentDescriptorSystem = 1 << 12 // Zero => system, 1 => user code/data.
- SegmentDescriptorPresent = 1 << 15 // Present.
- SegmentDescriptorAVL = 1 << 20 // Available.
- SegmentDescriptorLong = 1 << 21 // Long mode.
- SegmentDescriptorDB = 1 << 22 // 16 or 32-bit.
- SegmentDescriptorG = 1 << 23 // Granularity: page or byte.
-)
-
-// Base returns the descriptor's base linear address.
-func (d *SegmentDescriptor) Base() uint32 {
- return d.bits[1]&0xFF000000 | (d.bits[1]&0x000000FF)<<16 | d.bits[0]>>16
-}
-
-// Limit returns the descriptor size.
-func (d *SegmentDescriptor) Limit() uint32 {
- l := d.bits[0]&0xFFFF | d.bits[1]&0xF0000
- if d.bits[1]&uint32(SegmentDescriptorG) != 0 {
- l <<= 12
- l |= 0xFFF
- }
- return l
-}
-
-// Flags returns descriptor flags.
-func (d *SegmentDescriptor) Flags() SegmentDescriptorFlags {
- return SegmentDescriptorFlags(d.bits[1] & 0x00F09F00)
-}
-
-// DPL returns the descriptor privilege level.
-func (d *SegmentDescriptor) DPL() int {
- return int((d.bits[1] >> 13) & 3)
-}
-
-func (d *SegmentDescriptor) setNull() {
- d.bits[0] = 0
- d.bits[1] = 0
-}
-
-func (d *SegmentDescriptor) set(base, limit uint32, dpl int, flags SegmentDescriptorFlags) {
- flags |= SegmentDescriptorPresent
- if limit>>12 != 0 {
- limit >>= 12
- flags |= SegmentDescriptorG
- }
- d.bits[0] = base<<16 | limit&0xFFFF
- d.bits[1] = base&0xFF000000 | (base>>16)&0xFF | limit&0x000F0000 | uint32(flags) | uint32(dpl)<<13
-}
-
-func (d *SegmentDescriptor) setCode32(base, limit uint32, dpl int) {
- d.set(base, limit, dpl,
- SegmentDescriptorDB|
- SegmentDescriptorExecute|
- SegmentDescriptorSystem)
-}
-
-func (d *SegmentDescriptor) setCode64(base, limit uint32, dpl int) {
- d.set(base, limit, dpl,
- SegmentDescriptorG|
- SegmentDescriptorLong|
- SegmentDescriptorExecute|
- SegmentDescriptorSystem)
-}
-
-func (d *SegmentDescriptor) setData(base, limit uint32, dpl int) {
- d.set(base, limit, dpl,
- SegmentDescriptorWrite|
- SegmentDescriptorSystem)
-}
-
-// setHi is only used for the TSS segment, which is magically 64-bits.
-func (d *SegmentDescriptor) setHi(base uint32) {
- d.bits[0] = base
- d.bits[1] = 0
-}
-
-// Gate64 is a 64-bit task, trap, or interrupt gate.
-type Gate64 struct {
- bits [4]uint32
-}
-
-// idt64 is a 64-bit interrupt descriptor table.
-type idt64 [_NR_INTERRUPTS]Gate64
-
-func (g *Gate64) setInterrupt(cs Selector, rip uint64, dpl int, ist int) {
- g.bits[0] = uint32(cs)<<16 | uint32(rip)&0xFFFF
- g.bits[1] = uint32(rip)&0xFFFF0000 | SegmentDescriptorPresent | uint32(dpl)<<13 | 14<<8 | uint32(ist)&0x7
- g.bits[2] = uint32(rip >> 32)
-}
-
-func (g *Gate64) setTrap(cs Selector, rip uint64, dpl int, ist int) {
- g.setInterrupt(cs, rip, dpl, ist)
- g.bits[1] |= 1 << 8
-}
-
-// TaskState64 is a 64-bit task state structure.
-type TaskState64 struct {
- _ uint32
- rsp0Lo, rsp0Hi uint32
- rsp1Lo, rsp1Hi uint32
- rsp2Lo, rsp2Hi uint32
- _ [2]uint32
- ist1Lo, ist1Hi uint32
- ist2Lo, ist2Hi uint32
- ist3Lo, ist3Hi uint32
- ist4Lo, ist4Hi uint32
- ist5Lo, ist5Hi uint32
- ist6Lo, ist6Hi uint32
- ist7Lo, ist7Hi uint32
- _ [2]uint32
- _ uint16
- ioPerm uint16
-}
diff --git a/pkg/safecopy/BUILD b/pkg/safecopy/BUILD
deleted file mode 100644
index db5787302..000000000
--- a/pkg/safecopy/BUILD
+++ /dev/null
@@ -1,34 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "safecopy",
- srcs = [
- "atomic_amd64.s",
- "atomic_arm64.s",
- "memclr_amd64.s",
- "memclr_arm64.s",
- "memcpy_amd64.s",
- "memcpy_arm64.s",
- "safecopy.go",
- "safecopy_unsafe.go",
- "sighandler_amd64.s",
- "sighandler_arm64.s",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/syserror",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "safecopy_test",
- srcs = [
- "safecopy_test.go",
- ],
- library = ":safecopy",
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/safecopy/LICENSE b/pkg/safecopy/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/pkg/safecopy/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/safecopy/safecopy_state_autogen.go b/pkg/safecopy/safecopy_state_autogen.go
new file mode 100644
index 000000000..791eef959
--- /dev/null
+++ b/pkg/safecopy/safecopy_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package safecopy
diff --git a/pkg/safecopy/safecopy_test.go b/pkg/safecopy/safecopy_test.go
deleted file mode 100644
index 55743e69c..000000000
--- a/pkg/safecopy/safecopy_test.go
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package safecopy
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "math/rand"
- "testing"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Size of a page in bytes. Cloned from hostarch.PageSize to avoid a circular
-// dependency.
-const pageSize = 4096
-
-func initRandom(b []byte) {
- for i := range b {
- b[i] = byte(rand.Intn(256))
- }
-}
-
-func randBuf(size int) []byte {
- b := make([]byte, size)
- initRandom(b)
- return b
-}
-
-func TestCopyInSuccess(t *testing.T) {
- // Test that CopyIn does not return an error when all pages are accessible.
- const bufLen = 8192
- a := randBuf(bufLen)
- b := make([]byte, bufLen)
-
- n, err := CopyIn(b, unsafe.Pointer(&a[0]))
- if n != bufLen {
- t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen)
- }
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !bytes.Equal(a, b) {
- t.Errorf("Buffers are not equal when they should be: %v %v", a, b)
- }
-}
-
-func TestCopyOutSuccess(t *testing.T) {
- // Test that CopyOut does not return an error when all pages are
- // accessible.
- const bufLen = 8192
- a := randBuf(bufLen)
- b := make([]byte, bufLen)
-
- n, err := CopyOut(unsafe.Pointer(&b[0]), a)
- if n != bufLen {
- t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen)
- }
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !bytes.Equal(a, b) {
- t.Errorf("Buffers are not equal when they should be: %v %v", a, b)
- }
-}
-
-func TestCopySuccess(t *testing.T) {
- // Test that Copy does not return an error when all pages are accessible.
- const bufLen = 8192
- a := randBuf(bufLen)
- b := make([]byte, bufLen)
-
- n, err := Copy(unsafe.Pointer(&b[0]), unsafe.Pointer(&a[0]), bufLen)
- if n != bufLen {
- t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen)
- }
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !bytes.Equal(a, b) {
- t.Errorf("Buffers are not equal when they should be: %v %v", a, b)
- }
-}
-
-func TestZeroOutSuccess(t *testing.T) {
- // Test that ZeroOut does not return an error when all pages are
- // accessible.
- const bufLen = 8192
- a := make([]byte, bufLen)
- b := randBuf(bufLen)
-
- n, err := ZeroOut(unsafe.Pointer(&b[0]), bufLen)
- if n != bufLen {
- t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen)
- }
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !bytes.Equal(a, b) {
- t.Errorf("Buffers are not equal when they should be: %v %v", a, b)
- }
-}
-
-func TestSwapUint32Success(t *testing.T) {
- // Test that SwapUint32 does not return an error when the page is
- // accessible.
- before := uint32(rand.Int31())
- after := uint32(rand.Int31())
- val := before
-
- old, err := SwapUint32(unsafe.Pointer(&val), after)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if old != before {
- t.Errorf("Unexpected old value: got %v, want %v", old, before)
- }
- if val != after {
- t.Errorf("Unexpected new value: got %v, want %v", val, after)
- }
-}
-
-func TestSwapUint32AlignmentError(t *testing.T) {
- // Test that SwapUint32 returns an AlignmentError when passed an unaligned
- // address.
- data := make([]byte, 8) // 2 * sizeof(uint32).
- alignedIndex := uintptr(0)
- if offset := uintptr(unsafe.Pointer(&data[0])) % 4; offset != 0 {
- alignedIndex = 4 - offset
- }
- ptr := unsafe.Pointer(&data[alignedIndex+1])
- want := AlignmentError{Addr: uintptr(ptr), Alignment: 4}
- if _, err := SwapUint32(ptr, 1); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
-}
-
-func TestSwapUint64Success(t *testing.T) {
- // Test that SwapUint64 does not return an error when the page is
- // accessible.
- before := uint64(rand.Int63())
- after := uint64(rand.Int63())
- // "The first word in ... an allocated struct or slice can be relied upon
- // to be 64-bit aligned." - sync/atomic docs
- data := new(struct{ val uint64 })
- data.val = before
-
- old, err := SwapUint64(unsafe.Pointer(&data.val), after)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if old != before {
- t.Errorf("Unexpected old value: got %v, want %v", old, before)
- }
- if data.val != after {
- t.Errorf("Unexpected new value: got %v, want %v", data.val, after)
- }
-}
-
-func TestSwapUint64AlignmentError(t *testing.T) {
- // Test that SwapUint64 returns an AlignmentError when passed an unaligned
- // address.
- data := make([]byte, 16) // 2 * sizeof(uint64).
- alignedIndex := uintptr(0)
- if offset := uintptr(unsafe.Pointer(&data[0])) % 8; offset != 0 {
- alignedIndex = 8 - offset
- }
- ptr := unsafe.Pointer(&data[alignedIndex+1])
- want := AlignmentError{Addr: uintptr(ptr), Alignment: 8}
- if _, err := SwapUint64(ptr, 1); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
-}
-
-func TestCompareAndSwapUint32Success(t *testing.T) {
- // Test that CompareAndSwapUint32 does not return an error when the page is
- // accessible.
- before := uint32(rand.Int31())
- after := uint32(rand.Int31())
- val := before
-
- old, err := CompareAndSwapUint32(unsafe.Pointer(&val), before, after)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if old != before {
- t.Errorf("Unexpected old value: got %v, want %v", old, before)
- }
- if val != after {
- t.Errorf("Unexpected new value: got %v, want %v", val, after)
- }
-}
-
-func TestCompareAndSwapUint32AlignmentError(t *testing.T) {
- // Test that CompareAndSwapUint32 returns an AlignmentError when passed an
- // unaligned address.
- data := make([]byte, 8) // 2 * sizeof(uint32).
- alignedIndex := uintptr(0)
- if offset := uintptr(unsafe.Pointer(&data[0])) % 4; offset != 0 {
- alignedIndex = 4 - offset
- }
- ptr := unsafe.Pointer(&data[alignedIndex+1])
- want := AlignmentError{Addr: uintptr(ptr), Alignment: 4}
- if _, err := CompareAndSwapUint32(ptr, 0, 1); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
-}
-
-// withSegvErrorTestMapping calls fn with a two-page mapping. The first page
-// contains random data, and the second page generates SIGSEGV when accessed.
-func withSegvErrorTestMapping(t *testing.T, fn func(m []byte)) {
- mapping, err := unix.Mmap(-1, 0, 2*pageSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_ANONYMOUS|unix.MAP_PRIVATE)
- if err != nil {
- t.Fatalf("Mmap failed: %v", err)
- }
- defer unix.Munmap(mapping)
- if err := unix.Mprotect(mapping[pageSize:], unix.PROT_NONE); err != nil {
- t.Fatalf("Mprotect failed: %v", err)
- }
- initRandom(mapping[:pageSize])
-
- fn(mapping)
-}
-
-// withBusErrorTestMapping calls fn with a two-page mapping. The first page
-// contains random data, and the second page generates SIGBUS when accessed.
-func withBusErrorTestMapping(t *testing.T, fn func(m []byte)) {
- f, err := ioutil.TempFile("", "sigbus_test")
- if err != nil {
- t.Fatalf("TempFile failed: %v", err)
- }
- defer f.Close()
- if err := f.Truncate(pageSize); err != nil {
- t.Fatalf("Truncate failed: %v", err)
- }
- mapping, err := unix.Mmap(int(f.Fd()), 0, 2*pageSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
- if err != nil {
- t.Fatalf("Mmap failed: %v", err)
- }
- defer unix.Munmap(mapping)
- initRandom(mapping[:pageSize])
-
- fn(mapping)
-}
-
-func TestCopyInSegvError(t *testing.T) {
- // Test that CopyIn returns a SegvError when reaching a page that signals
- // SIGSEGV.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- src := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- dst := randBuf(pageSize)
- n, err := CopyIn(dst, src)
- if n != bytesBeforeFault {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopyInBusError(t *testing.T) {
- // Test that CopyIn returns a BusError when reaching a page that signals
- // SIGBUS.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) {
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- src := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- dst := randBuf(pageSize)
- n, err := CopyIn(dst, src)
- if n != bytesBeforeFault {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopyOutSegvError(t *testing.T) {
- // Test that CopyOut returns a SegvError when reaching a page that signals
- // SIGSEGV.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- src := randBuf(pageSize)
- n, err := CopyOut(dst, src)
- if n != bytesBeforeFault {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopyOutBusError(t *testing.T) {
- // Test that CopyOut returns a BusError when reaching a page that signals
- // SIGBUS.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- src := randBuf(pageSize)
- n, err := CopyOut(dst, src)
- if n != bytesBeforeFault {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopySourceSegvError(t *testing.T) {
- // Test that Copy returns a SegvError when copying from a page that signals
- // SIGSEGV.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- src := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- dst := randBuf(pageSize)
- n, err := Copy(unsafe.Pointer(&dst[0]), src, pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopySourceBusError(t *testing.T) {
- // Test that Copy returns a BusError when copying from a page that signals
- // SIGBUS.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) {
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- src := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- dst := randBuf(pageSize)
- n, err := Copy(unsafe.Pointer(&dst[0]), src, pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopyDestinationSegvError(t *testing.T) {
- // Test that Copy returns a SegvError when copying to a page that signals
- // SIGSEGV.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- src := randBuf(pageSize)
- n, err := Copy(dst, unsafe.Pointer(&src[0]), pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestCopyDestinationBusError(t *testing.T) {
- // Test that Copy returns a BusError when copying to a page that signals
- // SIGBUS.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) {
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- src := randBuf(pageSize)
- n, err := Copy(dst, unsafe.Pointer(&src[0]), pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) {
- t.Errorf("Buffers are not equal when they should be: %v %v", got, want)
- }
- })
- })
- }
-}
-
-func TestZeroOutSegvError(t *testing.T) {
- // Test that ZeroOut returns a SegvError when reaching a page that signals
- // SIGSEGV.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting write %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) {
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- n, err := ZeroOut(dst, pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected write length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], make([]byte, bytesBeforeFault); !bytes.Equal(got, want) {
- t.Errorf("Non-zero bytes in written part of mapping: %v", got)
- }
- })
- })
- }
-}
-
-func TestZeroOutBusError(t *testing.T) {
- // Test that ZeroOut returns a BusError when reaching a page that signals
- // SIGBUS.
- for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ {
- t.Run(fmt.Sprintf("starting write %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) {
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- dst := unsafe.Pointer(&mapping[pageSize-bytesBeforeFault])
- n, err := ZeroOut(dst, pageSize)
- if n != uintptr(bytesBeforeFault) {
- t.Errorf("Unexpected write length: got %v, want %v", n, bytesBeforeFault)
- }
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- if got, want := mapping[pageSize-bytesBeforeFault:pageSize], make([]byte, bytesBeforeFault); !bytes.Equal(got, want) {
- t.Errorf("Non-zero bytes in written part of mapping: %v", got)
- }
- })
- })
- }
-}
-
-func TestSwapUint32SegvError(t *testing.T) {
- // Test that SwapUint32 returns a SegvError when reaching a page that
- // signals SIGSEGV.
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := SwapUint32(unsafe.Pointer(secondPage), 1)
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
-
-func TestSwapUint32BusError(t *testing.T) {
- // Test that SwapUint32 returns a BusError when reaching a page that
- // signals SIGBUS.
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := SwapUint32(unsafe.Pointer(secondPage), 1)
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
-
-func TestSwapUint64SegvError(t *testing.T) {
- // Test that SwapUint64 returns a SegvError when reaching a page that
- // signals SIGSEGV.
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := SwapUint64(unsafe.Pointer(secondPage), 1)
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
-
-func TestSwapUint64BusError(t *testing.T) {
- // Test that SwapUint64 returns a BusError when reaching a page that
- // signals SIGBUS.
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := SwapUint64(unsafe.Pointer(secondPage), 1)
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
-
-func TestCompareAndSwapUint32SegvError(t *testing.T) {
- // Test that CompareAndSwapUint32 returns a SegvError when reaching a page
- // that signals SIGSEGV.
- withSegvErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := CompareAndSwapUint32(unsafe.Pointer(secondPage), 0, 1)
- if want := (SegvError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
-
-func TestCompareAndSwapUint32BusError(t *testing.T) {
- // Test that CompareAndSwapUint32 returns a BusError when reaching a page
- // that signals SIGBUS.
- withBusErrorTestMapping(t, func(mapping []byte) {
- secondPage := uintptr(unsafe.Pointer(&mapping[pageSize]))
- _, err := CompareAndSwapUint32(unsafe.Pointer(secondPage), 0, 1)
- if want := (BusError{secondPage}); err != want {
- t.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- })
-}
diff --git a/pkg/safecopy/safecopy_unsafe_state_autogen.go b/pkg/safecopy/safecopy_unsafe_state_autogen.go
new file mode 100644
index 000000000..791eef959
--- /dev/null
+++ b/pkg/safecopy/safecopy_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package safecopy
diff --git a/pkg/safemem/BUILD b/pkg/safemem/BUILD
deleted file mode 100644
index 2c7cc8769..000000000
--- a/pkg/safemem/BUILD
+++ /dev/null
@@ -1,30 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "safemem",
- srcs = [
- "block_unsafe.go",
- "io.go",
- "safemem.go",
- "seq_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/gohacks",
- "//pkg/safecopy",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "safemem_test",
- size = "small",
- srcs = [
- "io_test.go",
- "seq_test.go",
- ],
- library = ":safemem",
-)
diff --git a/pkg/safemem/io_test.go b/pkg/safemem/io_test.go
deleted file mode 100644
index 629741bee..000000000
--- a/pkg/safemem/io_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package safemem
-
-import (
- "bytes"
- "io"
- "testing"
-)
-
-func makeBlocks(slices ...[]byte) []Block {
- blocks := make([]Block, 0, len(slices))
- for _, s := range slices {
- blocks = append(blocks, BlockFromSafeSlice(s))
- }
- return blocks
-}
-
-func TestFromIOReaderFullRead(t *testing.T) {
- r := FromIOReader{bytes.NewBufferString("foobar")}
- dsts := makeBlocks(make([]byte, 3), make([]byte, 3))
- n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts))
- if wantN := uint64(6); n != wantN || err != nil {
- t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- for i, want := range [][]byte{[]byte("foo"), []byte("bar")} {
- if got := dsts[i].ToSlice(); !bytes.Equal(got, want) {
- t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want)
- }
- }
-}
-
-type eofHidingReader struct {
- Reader io.Reader
-}
-
-func (r eofHidingReader) Read(dst []byte) (int, error) {
- n, err := r.Reader.Read(dst)
- if err == io.EOF {
- return n, nil
- }
- return n, err
-}
-
-func TestFromIOReaderPartialRead(t *testing.T) {
- r := FromIOReader{eofHidingReader{bytes.NewBufferString("foob")}}
- dsts := makeBlocks(make([]byte, 3), make([]byte, 3))
- n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts))
- // FromIOReader should stop after the eofHidingReader returns (1, nil)
- // for a 3-byte read.
- if wantN := uint64(4); n != wantN || err != nil {
- t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- for i, want := range [][]byte{[]byte("foo"), []byte("b\x00\x00")} {
- if got := dsts[i].ToSlice(); !bytes.Equal(got, want) {
- t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want)
- }
- }
-}
-
-type singleByteReader struct {
- Reader io.Reader
-}
-
-func (r singleByteReader) Read(dst []byte) (int, error) {
- if len(dst) == 0 {
- return r.Reader.Read(dst)
- }
- return r.Reader.Read(dst[:1])
-}
-
-func TestSingleByteReader(t *testing.T) {
- r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}}
- dsts := makeBlocks(make([]byte, 3), make([]byte, 3))
- n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts))
- // FromIOReader should stop after the singleByteReader returns (1, nil)
- // for a 3-byte read.
- if wantN := uint64(1); n != wantN || err != nil {
- t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- for i, want := range [][]byte{[]byte("f\x00\x00"), []byte("\x00\x00\x00")} {
- if got := dsts[i].ToSlice(); !bytes.Equal(got, want) {
- t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want)
- }
- }
-}
-
-func TestReadFullToBlocks(t *testing.T) {
- r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}}
- dsts := makeBlocks(make([]byte, 3), make([]byte, 3))
- n, err := ReadFullToBlocks(r, BlockSeqFromSlice(dsts))
- // ReadFullToBlocks should call into FromIOReader => singleByteReader
- // repeatedly until dsts is exhausted.
- if wantN := uint64(6); n != wantN || err != nil {
- t.Errorf("ReadFullToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- for i, want := range [][]byte{[]byte("foo"), []byte("bar")} {
- if got := dsts[i].ToSlice(); !bytes.Equal(got, want) {
- t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want)
- }
- }
-}
-
-func TestFromIOWriterFullWrite(t *testing.T) {
- srcs := makeBlocks([]byte("foo"), []byte("bar"))
- var dst bytes.Buffer
- w := FromIOWriter{&dst}
- n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs))
- if wantN := uint64(6); n != wantN || err != nil {
- t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
-
-type limitedWriter struct {
- Writer io.Writer
- Done int
- Limit int
-}
-
-func (w *limitedWriter) Write(src []byte) (int, error) {
- count := len(src)
- if count > (w.Limit - w.Done) {
- count = w.Limit - w.Done
- }
- n, err := w.Writer.Write(src[:count])
- w.Done += n
- return n, err
-}
-
-func TestFromIOWriterPartialWrite(t *testing.T) {
- srcs := makeBlocks([]byte("foo"), []byte("bar"))
- var dst bytes.Buffer
- w := FromIOWriter{&limitedWriter{&dst, 0, 4}}
- n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs))
- // FromIOWriter should stop after the limitedWriter returns (1, nil) for a
- // 3-byte write.
- if wantN := uint64(4); n != wantN || err != nil {
- t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
-
-type singleByteWriter struct {
- Writer io.Writer
-}
-
-func (w singleByteWriter) Write(src []byte) (int, error) {
- if len(src) == 0 {
- return w.Writer.Write(src)
- }
- return w.Writer.Write(src[:1])
-}
-
-func TestSingleByteWriter(t *testing.T) {
- srcs := makeBlocks([]byte("foo"), []byte("bar"))
- var dst bytes.Buffer
- w := FromIOWriter{singleByteWriter{&dst}}
- n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs))
- // FromIOWriter should stop after the singleByteWriter returns (1, nil)
- // for a 3-byte write.
- if wantN := uint64(1); n != wantN || err != nil {
- t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst.Bytes(), []byte("f"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
-
-func TestWriteFullToBlocks(t *testing.T) {
- srcs := makeBlocks([]byte("foo"), []byte("bar"))
- var dst bytes.Buffer
- w := FromIOWriter{singleByteWriter{&dst}}
- n, err := WriteFullFromBlocks(w, BlockSeqFromSlice(srcs))
- // WriteFullToBlocks should call into FromIOWriter => singleByteWriter
- // repeatedly until srcs is exhausted.
- if wantN := uint64(6); n != wantN || err != nil {
- t.Errorf("WriteFullFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
diff --git a/pkg/safemem/safemem_state_autogen.go b/pkg/safemem/safemem_state_autogen.go
new file mode 100644
index 000000000..66d53f22d
--- /dev/null
+++ b/pkg/safemem/safemem_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package safemem
diff --git a/pkg/safemem/safemem_unsafe_state_autogen.go b/pkg/safemem/safemem_unsafe_state_autogen.go
new file mode 100644
index 000000000..66d53f22d
--- /dev/null
+++ b/pkg/safemem/safemem_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package safemem
diff --git a/pkg/safemem/seq_test.go b/pkg/safemem/seq_test.go
deleted file mode 100644
index de34005e9..000000000
--- a/pkg/safemem/seq_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package safemem
-
-import (
- "bytes"
- "reflect"
- "testing"
-)
-
-func TestBlockSeqOfEmptyBlock(t *testing.T) {
- bs := BlockSeqOf(Block{})
- if !bs.IsEmpty() {
- t.Errorf("BlockSeqOf(Block{}).IsEmpty(): got false, wanted true; BlockSeq is %v", bs)
- }
-}
-
-func TestBlockSeqOfNonemptyBlock(t *testing.T) {
- b := BlockFromSafeSlice(make([]byte, 1))
- bs := BlockSeqOf(b)
- if bs.IsEmpty() {
- t.Fatalf("BlockSeqOf(non-empty Block).IsEmpty(): got true, wanted false; BlockSeq is %v", bs)
- }
- if head := bs.Head(); head != b {
- t.Fatalf("BlockSeqOf(non-empty Block).Head(): got %v, wanted %v", head, b)
- }
- if tail := bs.Tail(); !tail.IsEmpty() {
- t.Fatalf("BlockSeqOf(non-empty Block).Tail().IsEmpty(): got false, wanted true: tail is %v", tail)
- }
-}
-
-type blockSeqTest struct {
- desc string
-
- pieces []string
- haveOffset bool
- offset uint64
- haveLimit bool
- limit uint64
-
- want string
-}
-
-func (t blockSeqTest) NonEmptyByteSlices() [][]byte {
- // t is a value, so we can mutate it freely.
- slices := make([][]byte, 0, len(t.pieces))
- for _, str := range t.pieces {
- if t.haveOffset {
- strOff := t.offset
- if strOff > uint64(len(str)) {
- strOff = uint64(len(str))
- }
- str = str[strOff:]
- t.offset -= strOff
- }
- if t.haveLimit {
- strLim := t.limit
- if strLim > uint64(len(str)) {
- strLim = uint64(len(str))
- }
- str = str[:strLim]
- t.limit -= strLim
- }
- if len(str) != 0 {
- slices = append(slices, []byte(str))
- }
- }
- return slices
-}
-
-func (t blockSeqTest) BlockSeq() BlockSeq {
- blocks := make([]Block, 0, len(t.pieces))
- for _, str := range t.pieces {
- blocks = append(blocks, BlockFromSafeSlice([]byte(str)))
- }
- bs := BlockSeqFromSlice(blocks)
- if t.haveOffset {
- bs = bs.DropFirst64(t.offset)
- }
- if t.haveLimit {
- bs = bs.TakeFirst64(t.limit)
- }
- return bs
-}
-
-var blockSeqTests = []blockSeqTest{
- {
- desc: "Empty sequence",
- },
- {
- desc: "Sequence of length 1",
- pieces: []string{"foobar"},
- want: "foobar",
- },
- {
- desc: "Sequence of length 2",
- pieces: []string{"foo", "bar"},
- want: "foobar",
- },
- {
- desc: "Empty Blocks",
- pieces: []string{"", "foo", "", "", "bar", ""},
- want: "foobar",
- },
- {
- desc: "Sequence with non-zero offset",
- pieces: []string{"foo", "bar"},
- haveOffset: true,
- offset: 2,
- want: "obar",
- },
- {
- desc: "Sequence with non-maximal limit",
- pieces: []string{"foo", "bar"},
- haveLimit: true,
- limit: 5,
- want: "fooba",
- },
- {
- desc: "Sequence with offset and limit",
- pieces: []string{"foo", "bar"},
- haveOffset: true,
- offset: 2,
- haveLimit: true,
- limit: 3,
- want: "oba",
- },
-}
-
-func TestBlockSeqNumBytes(t *testing.T) {
- for _, test := range blockSeqTests {
- t.Run(test.desc, func(t *testing.T) {
- if got, want := test.BlockSeq().NumBytes(), uint64(len(test.want)); got != want {
- t.Errorf("NumBytes: got %d, wanted %d", got, want)
- }
- })
- }
-}
-
-func TestBlockSeqIterBlocks(t *testing.T) {
- // Tests BlockSeq iteration using Head/Tail.
- for _, test := range blockSeqTests {
- t.Run(test.desc, func(t *testing.T) {
- srcs := test.BlockSeq()
- // "Note that a non-nil empty slice and a nil slice ... are not
- // deeply equal." - reflect
- slices := make([][]byte, 0, 0)
- for !srcs.IsEmpty() {
- src := srcs.Head()
- slices = append(slices, src.ToSlice())
- nextSrcs := srcs.Tail()
- if got, want := nextSrcs.NumBytes(), srcs.NumBytes()-uint64(src.Len()); got != want {
- t.Fatalf("%v.Tail(): got %v (%d bytes), wanted %d bytes", srcs, nextSrcs, got, want)
- }
- srcs = nextSrcs
- }
- if wantSlices := test.NonEmptyByteSlices(); !reflect.DeepEqual(slices, wantSlices) {
- t.Errorf("Accumulated slices: got %v, wanted %v", slices, wantSlices)
- }
- })
- }
-}
-
-func TestBlockSeqIterBytes(t *testing.T) {
- // Tests BlockSeq iteration using Head/DropFirst.
- for _, test := range blockSeqTests {
- t.Run(test.desc, func(t *testing.T) {
- srcs := test.BlockSeq()
- var dst bytes.Buffer
- for !srcs.IsEmpty() {
- src := srcs.Head()
- var b [1]byte
- n, err := Copy(BlockFromSafeSlice(b[:]), src)
- if n != 1 || err != nil {
- t.Fatalf("Copy: got (%v, %v), wanted (1, nil)", n, err)
- }
- dst.WriteByte(b[0])
- nextSrcs := srcs.DropFirst(1)
- if got, want := nextSrcs.NumBytes(), srcs.NumBytes()-1; got != want {
- t.Fatalf("%v.DropFirst(1): got %v (%d bytes), wanted %d bytes", srcs, nextSrcs, got, want)
- }
- srcs = nextSrcs
- }
- if got := string(dst.Bytes()); got != test.want {
- t.Errorf("Copied string: got %q, wanted %q", got, test.want)
- }
- })
- }
-}
-
-func TestBlockSeqDropBeyondLimit(t *testing.T) {
- blocks := []Block{BlockFromSafeSlice([]byte("123")), BlockFromSafeSlice([]byte("4"))}
- bs := BlockSeqFromSlice(blocks)
- if got, want := bs.NumBytes(), uint64(4); got != want {
- t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want)
- }
- bs = bs.TakeFirst(1)
- if got, want := bs.NumBytes(), uint64(1); got != want {
- t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want)
- }
- bs = bs.DropFirst(2)
- if got, want := bs.NumBytes(), uint64(0); got != want {
- t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want)
- }
-}
diff --git a/pkg/seccomp/BUILD b/pkg/seccomp/BUILD
deleted file mode 100644
index 169fc0ac3..000000000
--- a/pkg/seccomp/BUILD
+++ /dev/null
@@ -1,59 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "go_embed_data", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "victim",
- testonly = 1,
- srcs = [
- "seccomp_test_victim.go",
- "seccomp_test_victim_amd64.go",
- "seccomp_test_victim_arm64.go",
- ],
- nogo = False,
- deps = [
- ":seccomp",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_embed_data(
- name = "victim_data",
- testonly = 1,
- src = "victim",
- package = "seccomp",
- var = "victimData",
-)
-
-go_library(
- name = "seccomp",
- srcs = [
- "seccomp.go",
- "seccomp_amd64.go",
- "seccomp_arm64.go",
- "seccomp_rules.go",
- "seccomp_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/log",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "seccomp_test",
- size = "small",
- srcs = [
- "seccomp_test.go",
- ":victim_data",
- ],
- library = ":seccomp",
- deps = [
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/hostarch",
- ],
-)
diff --git a/pkg/seccomp/seccomp_amd64_state_autogen.go b/pkg/seccomp/seccomp_amd64_state_autogen.go
new file mode 100644
index 000000000..27a96018b
--- /dev/null
+++ b/pkg/seccomp/seccomp_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package seccomp
diff --git a/pkg/seccomp/seccomp_arm64_state_autogen.go b/pkg/seccomp/seccomp_arm64_state_autogen.go
new file mode 100644
index 000000000..96c64c23d
--- /dev/null
+++ b/pkg/seccomp/seccomp_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package seccomp
diff --git a/pkg/seccomp/seccomp_state_autogen.go b/pkg/seccomp/seccomp_state_autogen.go
new file mode 100644
index 000000000..e16b5d7c2
--- /dev/null
+++ b/pkg/seccomp/seccomp_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package seccomp
diff --git a/pkg/seccomp/seccomp_test.go b/pkg/seccomp/seccomp_test.go
deleted file mode 100644
index 68feddf31..000000000
--- a/pkg/seccomp/seccomp_test.go
+++ /dev/null
@@ -1,1059 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package seccomp
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "math/rand"
- "os"
- "os/exec"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/bpf"
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-// newVictim makes a victim binary.
-func newVictim() (string, error) {
- f, err := ioutil.TempFile("", "victim")
- if err != nil {
- return "", err
- }
- defer f.Close()
- path := f.Name()
- if _, err := io.Copy(f, bytes.NewBuffer(victimData)); err != nil {
- os.Remove(path)
- return "", err
- }
- if err := os.Chmod(path, 0755); err != nil {
- os.Remove(path)
- return "", err
- }
- return path, nil
-}
-
-// dataAsInput converts a linux.SeccompData to a bpf.Input.
-func dataAsInput(d *linux.SeccompData) bpf.Input {
- buf := make([]byte, d.SizeBytes())
- d.MarshalUnsafe(buf)
- return bpf.InputBytes{
- Data: buf,
- Order: hostarch.ByteOrder,
- }
-}
-
-func TestBasic(t *testing.T) {
- type spec struct {
- // desc is the test's description.
- desc string
-
- // data is the input data.
- data linux.SeccompData
-
- // want is the expected return value of the BPF program.
- want linux.BPFAction
- }
-
- for _, test := range []struct {
- name string
- ruleSets []RuleSet
- defaultAction linux.BPFAction
- badArchAction linux.BPFAction
- specs []spec
- }{
- {
- name: "Single syscall",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{1: {}},
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "syscall allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "syscall disallowed",
- data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "Multiple rulesets",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- EqualTo(0x1),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- {
- Rules: SyscallRules{
- 1: {},
- 2: {},
- },
- Action: linux.SECCOMP_RET_TRAP,
- },
- },
- defaultAction: linux.SECCOMP_RET_KILL_THREAD,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "allowed (1a)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "allowed (1b)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "syscall 1 matched 2nd rule",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "no match",
- data: linux.SeccompData{Nr: 0, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_KILL_THREAD,
- },
- },
- },
- {
- name: "Multiple syscalls",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: {},
- 3: {},
- 5: {},
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "allowed (1)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "allowed (3)",
- data: linux.SeccompData{Nr: 3, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "allowed (5)",
- data: linux.SeccompData{Nr: 5, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "disallowed (0)",
- data: linux.SeccompData{Nr: 0, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "disallowed (2)",
- data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "disallowed (4)",
- data: linux.SeccompData{Nr: 4, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "disallowed (6)",
- data: linux.SeccompData{Nr: 6, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "disallowed (100)",
- data: linux.SeccompData{Nr: 100, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "Wrong architecture",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: {},
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arch (123)",
- data: linux.SeccompData{Nr: 1, Arch: 123},
- want: linux.SECCOMP_RET_KILL_THREAD,
- },
- },
- },
- {
- name: "Syscall disallowed",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: {},
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "action trap",
- data: linux.SeccompData{Nr: 2, Arch: LINUX_AUDIT_ARCH},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "Syscall arguments",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- MatchAny{},
- EqualTo(0xf),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xf}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "disallowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xe}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "Multiple arguments",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- EqualTo(0xf),
- },
- {
- EqualTo(0xe),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "match first rule",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "match 2nd rule",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xe}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- },
- },
- {
- name: "EqualTo",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- EqualTo(0),
- EqualTo(math.MaxUint64 - 1),
- EqualTo(math.MaxUint32),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "argument allowed (all match)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0, math.MaxUint64 - 1, math.MaxUint32},
- },
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "argument disallowed (one mismatch)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0, math.MaxUint64, math.MaxUint32},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "argument disallowed (multiple mismatch)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "NotEqual",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- NotEqual(0x7aabbccdd),
- NotEqual(math.MaxUint64 - 1),
- NotEqual(math.MaxUint32),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0, math.MaxUint64, math.MaxUint32 - 1},
- },
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (one equal)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0x7aabbccdd, math.MaxUint64, math.MaxUint32 - 1},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (all equal)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- Args: [6]uint64{0x7aabbccdd, math.MaxUint64 - 1, math.MaxUint32},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "GreaterThan",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- // 4294967298
- // Both upper 32 bits and lower 32 bits are non-zero.
- // 00000000000000000000000000000010
- // 00000000000000000000000000000010
- GreaterThan(0x00000002_00000002),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "high 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits equal",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000003}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "GreaterThan (multi)",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- GreaterThan(0xf),
- GreaterThan(0xabcd000d),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xffffffff}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (first arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (first arg smaller)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (second arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xabcd000d}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (second arg smaller)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xa000ffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "GreaterThanOrEqual",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- // 4294967298
- // Both upper 32 bits and lower 32 bits are non-zero.
- // 00000000000000000000000000000010
- // 00000000000000000000000000000010
- GreaterThanOrEqual(0x00000002_00000002),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "high 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits equal",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "GreaterThanOrEqual (multi)",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- GreaterThanOrEqual(0xf),
- GreaterThanOrEqual(0xabcd000d),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed (both greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xffffffff}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg allowed (first arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0xf, 0xffffffff}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (first arg smaller)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg allowed (second arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xabcd000d}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (second arg smaller)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x10, 0xa000ffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (both arg smaller)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xa000ffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "LessThan",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- // 4294967298
- // Both upper 32 bits and lower 32 bits are non-zero.
- // 00000000000000000000000000000010
- // 00000000000000000000000000000010
- LessThan(0x00000002_00000002),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "high 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits equal",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- },
- },
- {
- name: "LessThan (multi)",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- LessThan(0x1),
- LessThan(0xabcd000d),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0x0}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (first arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1, 0x0}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (first arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0x0}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (second arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xabcd000d}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (second arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (both arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "LessThanOrEqual",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- // 4294967298
- // Both upper 32 bits and lower 32 bits are non-zero.
- // 00000000000000000000000000000010
- // 00000000000000000000000000000010
- LessThanOrEqual(0x00000002_00000002),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "high 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000003_00000002}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits greater",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000003}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "high 32bits equal, low 32bits equal",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits equal, low 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000002_00000001}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "high 32bits less",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x00000001_00000002}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- },
- },
-
- {
- name: "LessThanOrEqual (multi)",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- LessThanOrEqual(0x1),
- LessThanOrEqual(0xabcd000d),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0x0}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg allowed (first arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x1, 0x0}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (first arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0x0}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg allowed (second arg equal)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xabcd000d}},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (second arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x0, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (both arg greater)",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{0x2, 0xffffffff}},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "MaskedEqual",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- // x & 00000001 00000011 (0x103) == 00000000 00000001 (0x1)
- // Input x must have lowest order bit set and
- // must *not* have 8th or second lowest order bit set.
- MaskedEqual(0x103, 0x1),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "arg allowed (low order mandatory bit)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- // 00000000 00000000 00000000 00000001
- Args: [6]uint64{0x1},
- },
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg allowed (low order optional bit)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- // 00000000 00000000 00000000 00000101
- Args: [6]uint64{0x5},
- },
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "arg disallowed (lowest order bit not set)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- // 00000000 00000000 00000000 00000010
- Args: [6]uint64{0x2},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (second lowest order bit set)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- // 00000000 00000000 00000000 00000011
- Args: [6]uint64{0x3},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- {
- desc: "arg disallowed (8th bit set)",
- data: linux.SeccompData{
- Nr: 1,
- Arch: LINUX_AUDIT_ARCH,
- // 00000000 00000000 00000001 00000000
- Args: [6]uint64{0x100},
- },
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- {
- name: "Instruction Pointer",
- ruleSets: []RuleSet{
- {
- Rules: SyscallRules{
- 1: []Rule{
- {
- RuleIP: EqualTo(0x7aabbccdd),
- },
- },
- },
- Action: linux.SECCOMP_RET_ALLOW,
- },
- },
- defaultAction: linux.SECCOMP_RET_TRAP,
- badArchAction: linux.SECCOMP_RET_KILL_THREAD,
- specs: []spec{
- {
- desc: "allowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{}, InstructionPointer: 0x7aabbccdd},
- want: linux.SECCOMP_RET_ALLOW,
- },
- {
- desc: "disallowed",
- data: linux.SeccompData{Nr: 1, Arch: LINUX_AUDIT_ARCH, Args: [6]uint64{}, InstructionPointer: 0x711223344},
- want: linux.SECCOMP_RET_TRAP,
- },
- },
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- instrs, err := BuildProgram(test.ruleSets, test.defaultAction, test.badArchAction)
- if err != nil {
- t.Fatalf("BuildProgram() got error: %v", err)
- }
- p, err := bpf.Compile(instrs)
- if err != nil {
- t.Fatalf("bpf.Compile() got error: %v", err)
- }
- for _, spec := range test.specs {
- got, err := bpf.Exec(p, dataAsInput(&spec.data))
- if err != nil {
- t.Fatalf("%s: bpf.Exec() got error: %v", spec.desc, err)
- }
- if got != uint32(spec.want) {
- // Include a decoded version of the program in output for debugging purposes.
- decoded, _ := bpf.DecodeInstructions(instrs)
- t.Fatalf("%s: got: %d, want: %d\nBPF Program\n%s", spec.desc, got, spec.want, decoded)
- }
- }
- })
- }
-}
-
-// TestRandom tests that randomly generated rules are encoded correctly.
-func TestRandom(t *testing.T) {
- rand.Seed(time.Now().UnixNano())
- size := rand.Intn(50) + 1
- syscallRules := make(map[uintptr][]Rule)
- for len(syscallRules) < size {
- n := uintptr(rand.Intn(200))
- if _, ok := syscallRules[n]; !ok {
- syscallRules[n] = []Rule{}
- }
- }
-
- t.Logf("Testing filters: %v", syscallRules)
- instrs, err := BuildProgram([]RuleSet{
- {
- Rules: syscallRules,
- Action: linux.SECCOMP_RET_ALLOW,
- },
- }, linux.SECCOMP_RET_TRAP, linux.SECCOMP_RET_KILL_THREAD)
- if err != nil {
- t.Fatalf("buildProgram() got error: %v", err)
- }
- p, err := bpf.Compile(instrs)
- if err != nil {
- t.Fatalf("bpf.Compile() got error: %v", err)
- }
- for i := uint32(0); i < 200; i++ {
- data := linux.SeccompData{Nr: int32(i), Arch: LINUX_AUDIT_ARCH}
- got, err := bpf.Exec(p, dataAsInput(&data))
- if err != nil {
- t.Errorf("bpf.Exec() got error: %v, for syscall %d", err, i)
- continue
- }
- want := linux.SECCOMP_RET_TRAP
- if _, ok := syscallRules[uintptr(i)]; ok {
- want = linux.SECCOMP_RET_ALLOW
- }
- if got != uint32(want) {
- t.Errorf("bpf.Exec() = %d, want: %d, for syscall %d", got, want, i)
- }
- }
-}
-
-// TestReadDeal checks that a process dies when it trips over the filter and
-// that it doesn't die when the filter is not triggered.
-func TestRealDeal(t *testing.T) {
- for _, test := range []struct {
- die bool
- want string
- }{
- {die: true, want: "bad system call"},
- {die: false, want: "Syscall was allowed!!!"},
- } {
- victim, err := newVictim()
- if err != nil {
- t.Fatalf("unable to get victim: %v", err)
- }
- defer os.Remove(victim)
- dieFlag := fmt.Sprintf("-die=%v", test.die)
- cmd := exec.Command(victim, dieFlag)
-
- out, err := cmd.CombinedOutput()
- if test.die {
- if err == nil {
- t.Errorf("victim was not killed as expected, output: %s", out)
- continue
- }
- // Depending on kernel version, either RET_TRAP or RET_KILL_PROCESS is
- // used. RET_TRAP dumps reason for exit in output, while RET_KILL_PROCESS
- // returns SIGSYS as exit status.
- if !strings.Contains(string(out), test.want) &&
- !strings.Contains(err.Error(), test.want) {
- t.Errorf("Victim error is wrong, got: %v, err: %v, want: %v", string(out), err, test.want)
- continue
- }
- } else {
- if err != nil {
- t.Errorf("victim failed to execute, err: %v", err)
- continue
- }
- if !strings.Contains(string(out), test.want) {
- t.Errorf("Victim output is wrong, got: %v, want: %v", string(out), test.want)
- continue
- }
- }
- }
-}
-
-// TestMerge ensures that empty rules are not erased when rules are merged.
-func TestMerge(t *testing.T) {
- for _, tst := range []struct {
- name string
- main []Rule
- merge []Rule
- want []Rule
- }{
- {
- name: "empty both",
- main: nil,
- merge: nil,
- want: []Rule{{}, {}},
- },
- {
- name: "empty main",
- main: nil,
- merge: []Rule{{}},
- want: []Rule{{}, {}},
- },
- {
- name: "empty merge",
- main: []Rule{{}},
- merge: nil,
- want: []Rule{{}, {}},
- },
- } {
- t.Run(tst.name, func(t *testing.T) {
- mainRules := SyscallRules{1: tst.main}
- mergeRules := SyscallRules{1: tst.merge}
- mainRules.Merge(mergeRules)
- if got, want := len(mainRules[1]), len(tst.want); got != want {
- t.Errorf("wrong length, got: %d, want: %d", got, want)
- }
- for i, r := range mainRules[1] {
- if r != tst.want[i] {
- t.Errorf("result, got: %v, want: %v", r, tst.want[i])
- }
- }
- })
- }
-}
-
-// TestAddRule ensures that empty rules are not erased when rules are added.
-func TestAddRule(t *testing.T) {
- rules := SyscallRules{1: {}}
- rules.AddRule(1, Rule{})
- if got, want := len(rules[1]), 2; got != want {
- t.Errorf("len(rules[1]), got: %d, want: %d", got, want)
- }
-}
diff --git a/pkg/seccomp/seccomp_test_victim.go b/pkg/seccomp/seccomp_test_victim.go
deleted file mode 100644
index a96b1e327..000000000
--- a/pkg/seccomp/seccomp_test_victim.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Test binary used to test that seccomp filters are properly constructed and
-// indeed kill the process on violation.
-package main
-
-import (
- "flag"
- "fmt"
- "os"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/seccomp"
-)
-
-func main() {
- dieFlag := flag.Bool("die", false, "trips over the filter if true")
- flag.Parse()
-
- syscalls := seccomp.SyscallRules{
- unix.SYS_ACCEPT: {},
- unix.SYS_BIND: {},
- unix.SYS_BRK: {},
- unix.SYS_CLOCK_GETTIME: {},
- unix.SYS_CLONE: {},
- unix.SYS_CLOSE: {},
- unix.SYS_DUP: {},
- unix.SYS_DUP3: {},
- unix.SYS_EPOLL_CREATE1: {},
- unix.SYS_EPOLL_CTL: {},
- unix.SYS_EPOLL_PWAIT: {},
- unix.SYS_EXIT: {},
- unix.SYS_EXIT_GROUP: {},
- unix.SYS_FALLOCATE: {},
- unix.SYS_FCHMOD: {},
- unix.SYS_FCNTL: {},
- unix.SYS_FSTAT: {},
- unix.SYS_FSYNC: {},
- unix.SYS_FTRUNCATE: {},
- unix.SYS_FUTEX: {},
- unix.SYS_GETDENTS64: {},
- unix.SYS_GETPEERNAME: {},
- unix.SYS_GETPID: {},
- unix.SYS_GETSOCKNAME: {},
- unix.SYS_GETSOCKOPT: {},
- unix.SYS_GETTID: {},
- unix.SYS_GETTIMEOFDAY: {},
- unix.SYS_LISTEN: {},
- unix.SYS_LSEEK: {},
- unix.SYS_MADVISE: {},
- unix.SYS_MINCORE: {},
- unix.SYS_MMAP: {},
- unix.SYS_MPROTECT: {},
- unix.SYS_MUNLOCK: {},
- unix.SYS_MUNMAP: {},
- unix.SYS_NANOSLEEP: {},
- unix.SYS_PPOLL: {},
- unix.SYS_PREAD64: {},
- unix.SYS_PSELECT6: {},
- unix.SYS_PWRITE64: {},
- unix.SYS_READ: {},
- unix.SYS_READLINKAT: {},
- unix.SYS_READV: {},
- unix.SYS_RECVMSG: {},
- unix.SYS_RENAMEAT: {},
- unix.SYS_RESTART_SYSCALL: {},
- unix.SYS_RT_SIGACTION: {},
- unix.SYS_RT_SIGPROCMASK: {},
- unix.SYS_RT_SIGRETURN: {},
- unix.SYS_SCHED_YIELD: {},
- unix.SYS_SENDMSG: {},
- unix.SYS_SETITIMER: {},
- unix.SYS_SET_ROBUST_LIST: {},
- unix.SYS_SETSOCKOPT: {},
- unix.SYS_SHUTDOWN: {},
- unix.SYS_SIGALTSTACK: {},
- unix.SYS_SOCKET: {},
- unix.SYS_SYNC_FILE_RANGE: {},
- unix.SYS_TGKILL: {},
- unix.SYS_UTIMENSAT: {},
- unix.SYS_WRITE: {},
- unix.SYS_WRITEV: {},
- }
-
- arch_syscalls(syscalls)
-
- die := *dieFlag
- if !die {
- syscalls[unix.SYS_OPENAT] = []seccomp.Rule{
- {
- seccomp.EqualTo(10),
- },
- }
- }
-
- if err := seccomp.Install(syscalls); err != nil {
- fmt.Printf("Failed to install seccomp: %v", err)
- os.Exit(1)
- }
- fmt.Printf("Filters installed\n")
-
- unix.RawSyscall(unix.SYS_OPENAT, 10, 0, 0)
- fmt.Printf("Syscall was allowed!!!\n")
-}
diff --git a/pkg/seccomp/seccomp_test_victim_amd64.go b/pkg/seccomp/seccomp_test_victim_amd64.go
deleted file mode 100644
index 5c1ecc301..000000000
--- a/pkg/seccomp/seccomp_test_victim_amd64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Test binary used to test that seccomp filters are properly constructed and
-// indeed kill the process on violation.
-
-//go:build amd64
-// +build amd64
-
-package main
-
-import (
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/seccomp"
-)
-
-func arch_syscalls(syscalls seccomp.SyscallRules) {
- syscalls[unix.SYS_ARCH_PRCTL] = []seccomp.Rule{}
- syscalls[unix.SYS_EPOLL_WAIT] = []seccomp.Rule{}
- syscalls[unix.SYS_NEWFSTATAT] = []seccomp.Rule{}
- syscalls[unix.SYS_OPEN] = []seccomp.Rule{}
-}
diff --git a/pkg/seccomp/seccomp_test_victim_arm64.go b/pkg/seccomp/seccomp_test_victim_arm64.go
deleted file mode 100644
index 9647e2758..000000000
--- a/pkg/seccomp/seccomp_test_victim_arm64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Test binary used to test that seccomp filters are properly constructed and
-// indeed kill the process on violation.
-
-//go:build arm64
-// +build arm64
-
-package main
-
-import (
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/seccomp"
-)
-
-func arch_syscalls(syscalls seccomp.SyscallRules) {
- syscalls[unix.SYS_FSTATAT] = []seccomp.Rule{}
-}
diff --git a/pkg/seccomp/seccomp_unsafe_state_autogen.go b/pkg/seccomp/seccomp_unsafe_state_autogen.go
new file mode 100644
index 000000000..e16b5d7c2
--- /dev/null
+++ b/pkg/seccomp/seccomp_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package seccomp
diff --git a/pkg/secio/BUILD b/pkg/secio/BUILD
deleted file mode 100644
index 60f63c7a6..000000000
--- a/pkg/secio/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "secio",
- srcs = [
- "full_reader.go",
- "secio.go",
- ],
- visibility = ["//pkg/sentry:internal"],
-)
-
-go_test(
- name = "secio_test",
- size = "small",
- srcs = ["secio_test.go"],
- library = ":secio",
-)
diff --git a/pkg/secio/secio_state_autogen.go b/pkg/secio/secio_state_autogen.go
new file mode 100644
index 000000000..372ac4b92
--- /dev/null
+++ b/pkg/secio/secio_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package secio
diff --git a/pkg/secio/secio_test.go b/pkg/secio/secio_test.go
deleted file mode 100644
index d1d905187..000000000
--- a/pkg/secio/secio_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package secio
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "math"
- "testing"
-)
-
-var errEndOfBuffer = errors.New("write beyond end of buffer")
-
-// buffer resembles bytes.Buffer, but implements io.ReaderAt and io.WriterAt.
-// Reads beyond the end of the buffer return io.EOF. Writes beyond the end of
-// the buffer return errEndOfBuffer.
-type buffer struct {
- Bytes []byte
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (b *buffer) ReadAt(dst []byte, off int64) (int, error) {
- if off >= int64(len(b.Bytes)) {
- return 0, io.EOF
- }
- n := copy(dst, b.Bytes[off:])
- if n < len(dst) {
- return n, io.EOF
- }
- return n, nil
-}
-
-// WriteAt implements io.WriterAt.WriteAt.
-func (b *buffer) WriteAt(src []byte, off int64) (int, error) {
- if off >= int64(len(b.Bytes)) {
- return 0, errEndOfBuffer
- }
- n := copy(b.Bytes[off:], src)
- if n < len(src) {
- return n, errEndOfBuffer
- }
- return n, nil
-}
-
-func newBufferString(s string) *buffer {
- return &buffer{[]byte(s)}
-}
-
-func TestOffsetReader(t *testing.T) {
- buf := newBufferString("foobar")
- r := NewOffsetReader(buf, 3)
- dst, err := ioutil.ReadAll(r)
- if want := []byte("bar"); !bytes.Equal(dst, want) || err != nil {
- t.Errorf("ReadAll: got (%q, %v), wanted (%q, nil)", dst, err, want)
- }
-}
-
-func TestSectionReader(t *testing.T) {
- buf := newBufferString("foobarbaz")
- r := NewSectionReader(buf, 3, 3)
- dst, err := ioutil.ReadAll(r)
- if want, wantErr := []byte("bar"), ErrReachedLimit; !bytes.Equal(dst, want) || err != wantErr {
- t.Errorf("ReadAll: got (%q, %v), wanted (%q, %v)", dst, err, want, wantErr)
- }
-}
-
-func TestSectionReaderLimitOverflow(t *testing.T) {
- // SectionReader behaves like OffsetReader when limit overflows int64.
- buf := newBufferString("foobar")
- r := NewSectionReader(buf, 3, math.MaxInt64)
- dst, err := ioutil.ReadAll(r)
- if want := []byte("bar"); !bytes.Equal(dst, want) || err != nil {
- t.Errorf("ReadAll: got (%q, %v), wanted (%q, nil)", dst, err, want)
- }
-}
-
-func TestOffsetWriter(t *testing.T) {
- buf := newBufferString("ABCDEF")
- w := NewOffsetWriter(buf, 3)
- n, err := w.Write([]byte("foobar"))
- if wantN, wantErr := 3, errEndOfBuffer; n != wantN || err != wantErr {
- t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := buf.Bytes, []byte("ABCfoo"); !bytes.Equal(got, want) {
- t.Errorf("buf.Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestSectionWriter(t *testing.T) {
- buf := newBufferString("ABCDEFGHI")
- w := NewSectionWriter(buf, 3, 3)
- n, err := w.Write([]byte("foobar"))
- if wantN, wantErr := 3, ErrReachedLimit; n != wantN || err != wantErr {
- t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := buf.Bytes, []byte("ABCfooGHI"); !bytes.Equal(got, want) {
- t.Errorf("buf.Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestSectionWriterLimitOverflow(t *testing.T) {
- // SectionWriter behaves like OffsetWriter when limit overflows int64.
- buf := newBufferString("ABCDEF")
- w := NewSectionWriter(buf, 3, math.MaxInt64)
- n, err := w.Write([]byte("foobar"))
- if wantN, wantErr := 3, errEndOfBuffer; n != wantN || err != wantErr {
- t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := buf.Bytes, []byte("ABCfoo"); !bytes.Equal(got, want) {
- t.Errorf("buf.Bytes: got %q, wanted %q", got, want)
- }
-}
diff --git a/pkg/segment/BUILD b/pkg/segment/BUILD
deleted file mode 100644
index f57ccc170..000000000
--- a/pkg/segment/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//tools/go_generics:defs.bzl", "go_template")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-go_template(
- name = "generic_range",
- srcs = ["range.go"],
- types = [
- "T",
- ],
-)
-
-go_template(
- name = "generic_set",
- srcs = [
- "set.go",
- "set_state.go",
- ],
- opt_consts = [
- "minDegree",
- # trackGaps must either be 0 or 1.
- "trackGaps",
- ],
- types = [
- "Key",
- "Range",
- "Value",
- "Functions",
- ],
-)
diff --git a/pkg/segment/set_state.go b/pkg/segment/set_state.go
deleted file mode 100644
index 76de92591..000000000
--- a/pkg/segment/set_state.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package segment
-
-func (s *Set) saveRoot() *SegmentDataSlices {
- return s.ExportSortedSlices()
-}
-
-func (s *Set) loadRoot(sds *SegmentDataSlices) {
- if err := s.ImportSortedSlices(sds); err != nil {
- panic(err)
- }
-}
diff --git a/pkg/segment/test/BUILD b/pkg/segment/test/BUILD
deleted file mode 100644
index 131bf09b9..000000000
--- a/pkg/segment/test/BUILD
+++ /dev/null
@@ -1,68 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(
- default_visibility = ["//visibility:private"],
- licenses = ["notice"],
-)
-
-go_template_instance(
- name = "int_range",
- out = "int_range.go",
- package = "segment",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "int",
- },
-)
-
-go_template_instance(
- name = "int_set",
- out = "int_set.go",
- package = "segment",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "int",
- "Range": "Range",
- "Value": "int",
- "Functions": "setFunctions",
- },
-)
-
-go_template_instance(
- name = "gap_set",
- out = "gap_set.go",
- consts = {
- "trackGaps": "1",
- },
- package = "segment",
- prefix = "gap",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "int",
- "Range": "Range",
- "Value": "int",
- "Functions": "gapSetFunctions",
- },
-)
-
-go_library(
- name = "segment",
- testonly = 1,
- srcs = [
- "gap_set.go",
- "int_range.go",
- "int_set.go",
- "set_functions.go",
- ],
- deps = [
- "//pkg/state",
- ],
-)
-
-go_test(
- name = "segment_test",
- size = "small",
- srcs = ["segment_test.go"],
- library = ":segment",
-)
diff --git a/pkg/segment/test/segment_test.go b/pkg/segment/test/segment_test.go
deleted file mode 100644
index 85fa19096..000000000
--- a/pkg/segment/test/segment_test.go
+++ /dev/null
@@ -1,865 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package segment
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "testing"
-)
-
-const (
- // testSize is the baseline number of elements inserted into sets under
- // test, and is chosen to be large enough to ensure interesting amounts of
- // tree rebalancing.
- //
- // Note that because checkSet is called between each insertion/removal in
- // some tests that use it, tests may be quadratic in testSize.
- testSize = 8000
-
- // valueOffset is the difference between the value and start of test
- // segments.
- valueOffset = 100000
-
- // intervalLength is the interval used by random gap tests.
- intervalLength = 10
-)
-
-func shuffle(xs []int) {
- rand.Shuffle(len(xs), func(i, j int) { xs[i], xs[j] = xs[j], xs[i] })
-}
-
-func randIntervalPermutation(size int) []int {
- p := make([]int, size)
- for i := range p {
- p[i] = intervalLength * i
- }
- shuffle(p)
- return p
-}
-
-// validate can be passed to Check.
-func validate(nr int, r Range, v int) error {
- if got, want := v, r.Start+valueOffset; got != want {
- return fmt.Errorf("segment %d has key %d, value %d (expected %d)", nr, r.Start, got, want)
- }
- return nil
-}
-
-// checkSetMaxGap returns an error if maxGap inside all nodes of s is not well
-// maintained.
-func checkSetMaxGap(s *gapSet) error {
- n := s.root
- return checkNodeMaxGap(&n)
-}
-
-// checkNodeMaxGap returns an error if maxGap inside the subtree rooted by n is
-// not well maintained.
-func checkNodeMaxGap(n *gapnode) error {
- var max int
- if !n.hasChildren {
- max = n.calculateMaxGapLeaf()
- } else {
- for i := 0; i <= n.nrSegments; i++ {
- child := n.children[i]
- if err := checkNodeMaxGap(child); err != nil {
- return err
- }
- if temp := child.maxGap.Get(); i == 0 || temp > max {
- max = temp
- }
- }
- }
- if max != n.maxGap.Get() {
- return fmt.Errorf("maxGap wrong in node\n%vexpected: %d got: %d", n, max, n.maxGap)
- }
- return nil
-}
-
-func TestAddRandom(t *testing.T) {
- var s Set
- order := rand.Perm(testSize)
- var nrInsertions int
- for i, j := range order {
- if !s.AddWithoutMerging(Range{j, j + 1}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- nrInsertions++
- if err := s.segmentTestCheck(nrInsertions, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Insertion order: %v", order[:nrInsertions])
- t.Logf("Set contents:\n%v", &s)
- }
-}
-
-func TestRemoveRandom(t *testing.T) {
- var s Set
- for i := 0; i < testSize; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) {
- t.Fatalf("Failed to insert segment %d", i)
- }
- }
- order := rand.Perm(testSize)
- var nrRemovals int
- for i, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- t.Errorf("Iteration %d: failed to find segment with key %d", i, j)
- break
- }
- s.Remove(seg)
- nrRemovals++
- if err := s.segmentTestCheck(testSize-nrRemovals, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- }
- if got, want := s.countSegments(), testSize-nrRemovals; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Removal order: %v", order[:nrRemovals])
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestMaxGapAddRandom(t *testing.T) {
- var s gapSet
- order := rand.Perm(testSize)
- var nrInsertions int
- for i, j := range order {
- if !s.AddWithoutMerging(Range{j, j + 1}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- nrInsertions++
- if err := s.segmentTestCheck(nrInsertions, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Insertion order: %v", order[:nrInsertions])
- t.Logf("Set contents:\n%v", &s)
- }
-}
-
-func TestMaxGapAddRandomWithRandomInterval(t *testing.T) {
- var s gapSet
- order := randIntervalPermutation(testSize)
- var nrInsertions int
- for i, j := range order {
- if !s.AddWithoutMerging(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- nrInsertions++
- if err := s.segmentTestCheck(nrInsertions, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Insertion order: %v", order[:nrInsertions])
- t.Logf("Set contents:\n%v", &s)
- }
-}
-
-func TestMaxGapAddRandomWithMerge(t *testing.T) {
- var s gapSet
- order := randIntervalPermutation(testSize)
- nrInsertions := 1
- for i, j := range order {
- if !s.Add(Range{j, j + intervalLength}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Insertion order: %v", order)
- t.Logf("Set contents:\n%v", &s)
- }
-}
-
-func TestMaxGapRemoveRandom(t *testing.T) {
- var s gapSet
- for i := 0; i < testSize; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) {
- t.Fatalf("Failed to insert segment %d", i)
- }
- }
- order := rand.Perm(testSize)
- var nrRemovals int
- for i, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- t.Errorf("Iteration %d: failed to find segment with key %d", i, j)
- break
- }
- temprange := seg.Range()
- s.Remove(seg)
- nrRemovals++
- if err := s.segmentTestCheck(testSize-nrRemovals, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When removing %v: %v", temprange, err)
- break
- }
- }
- if got, want := s.countSegments(), testSize-nrRemovals; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Removal order: %v", order[:nrRemovals])
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestMaxGapRemoveHalfRandom(t *testing.T) {
- var s gapSet
- for i := 0; i < testSize; i++ {
- if !s.AddWithoutMerging(Range{intervalLength * i, intervalLength*i + rand.Intn(intervalLength-1) + 1}, intervalLength*i+valueOffset) {
- t.Fatalf("Failed to insert segment %d", i)
- }
- }
- order := randIntervalPermutation(testSize)
- order = order[:testSize/2]
- var nrRemovals int
- for i, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- t.Errorf("Iteration %d: failed to find segment with key %d", i, j)
- break
- }
- temprange := seg.Range()
- s.Remove(seg)
- nrRemovals++
- if err := s.segmentTestCheck(testSize-nrRemovals, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When removing %v: %v", temprange, err)
- break
- }
- }
- if got, want := s.countSegments(), testSize-nrRemovals; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Removal order: %v", order[:nrRemovals])
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestMaxGapAddRandomRemoveRandomHalfWithMerge(t *testing.T) {
- var s gapSet
- order := randIntervalPermutation(testSize * 2)
- order = order[:testSize]
- for i, j := range order {
- if !s.Add(Range{j, j + intervalLength}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- shuffle(order)
- var nrRemovals int
- for _, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- continue
- }
- temprange := seg.Range()
- s.Remove(seg)
- nrRemovals++
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When removing %v: %v", temprange, err)
- break
- }
- }
- if t.Failed() {
- t.Logf("Removal order: %v", order[:nrRemovals])
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestNextLargeEnoughGap(t *testing.T) {
- var s gapSet
- order := randIntervalPermutation(testSize * 2)
- order = order[:testSize]
- for i, j := range order {
- if !s.Add(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- shuffle(order)
- order = order[:testSize/2]
- for _, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- continue
- }
- temprange := seg.Range()
- s.Remove(seg)
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When removing %v: %v", temprange, err)
- break
- }
- }
- minSize := 7
- var gapArr1 []int
- for gap := s.LowerBoundGap(0).NextLargeEnoughGap(minSize); gap.Ok(); gap = gap.NextLargeEnoughGap(minSize) {
- if gap.Range().Length() < minSize {
- t.Errorf("NextLargeEnoughGap wrong, gap %v has length %d, wanted %d", gap.Range(), gap.Range().Length(), minSize)
- } else {
- gapArr1 = append(gapArr1, gap.Range().Start)
- }
- }
- var gapArr2 []int
- for gap := s.LowerBoundGap(0).NextGap(); gap.Ok(); gap = gap.NextGap() {
- if gap.Range().Length() >= minSize {
- gapArr2 = append(gapArr2, gap.Range().Start)
- }
- }
-
- if !reflect.DeepEqual(gapArr2, gapArr1) {
- t.Errorf("Search result not correct, got: %v, wanted: %v", gapArr1, gapArr2)
- }
- if t.Failed() {
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestPrevLargeEnoughGap(t *testing.T) {
- var s gapSet
- order := randIntervalPermutation(testSize * 2)
- order = order[:testSize]
- for i, j := range order {
- if !s.Add(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) {
- t.Errorf("Iteration %d: failed to insert segment with key %d", i, j)
- break
- }
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When inserting %d: %v", j, err)
- break
- }
- }
- end := s.LastSegment().End()
- shuffle(order)
- order = order[:testSize/2]
- for _, j := range order {
- seg := s.FindSegment(j)
- if !seg.Ok() {
- continue
- }
- temprange := seg.Range()
- s.Remove(seg)
- if err := checkSetMaxGap(&s); err != nil {
- t.Errorf("When removing %v: %v", temprange, err)
- break
- }
- }
- minSize := 7
- var gapArr1 []int
- for gap := s.UpperBoundGap(end + intervalLength).PrevLargeEnoughGap(minSize); gap.Ok(); gap = gap.PrevLargeEnoughGap(minSize) {
- if gap.Range().Length() < minSize {
- t.Errorf("PrevLargeEnoughGap wrong, gap length %d, wanted %d", gap.Range().Length(), minSize)
- } else {
- gapArr1 = append(gapArr1, gap.Range().Start)
- }
- }
- var gapArr2 []int
- for gap := s.UpperBoundGap(end + intervalLength).PrevGap(); gap.Ok(); gap = gap.PrevGap() {
- if gap.Range().Length() >= minSize {
- gapArr2 = append(gapArr2, gap.Range().Start)
- }
- }
- if !reflect.DeepEqual(gapArr2, gapArr1) {
- t.Errorf("Search result not correct, got: %v, wanted: %v", gapArr1, gapArr2)
- }
- if t.Failed() {
- t.Logf("Set contents:\n%v", &s)
- t.FailNow()
- }
-}
-
-func TestAddSequentialAdjacent(t *testing.T) {
- var s Set
- var nrInsertions int
- for i := 0; i < testSize; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) {
- t.Fatalf("Failed to insert segment %d", i)
- }
- nrInsertions++
- if err := s.segmentTestCheck(nrInsertions, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Set contents:\n%v", &s)
- }
-
- first := s.FirstSegment()
- gotSeg, gotGap := first.PrevNonEmpty()
- if wantGap := s.FirstGap(); gotSeg.Ok() || gotGap != wantGap {
- t.Errorf("FirstSegment().PrevNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", gotSeg, gotGap, wantGap)
- }
- gotSeg, gotGap = first.NextNonEmpty()
- if wantSeg := first.NextSegment(); gotSeg != wantSeg || gotGap.Ok() {
- t.Errorf("FirstSegment().NextNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", gotSeg, gotGap, wantSeg)
- }
-
- last := s.LastSegment()
- gotSeg, gotGap = last.PrevNonEmpty()
- if wantSeg := last.PrevSegment(); gotSeg != wantSeg || gotGap.Ok() {
- t.Errorf("LastSegment().PrevNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", gotSeg, gotGap, wantSeg)
- }
- gotSeg, gotGap = last.NextNonEmpty()
- if wantGap := s.LastGap(); gotSeg.Ok() || gotGap != wantGap {
- t.Errorf("LastSegment().NextNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", gotSeg, gotGap, wantGap)
- }
-
- for seg := first.NextSegment(); seg != last; seg = seg.NextSegment() {
- gotSeg, gotGap = seg.PrevNonEmpty()
- if wantSeg := seg.PrevSegment(); gotSeg != wantSeg || gotGap.Ok() {
- t.Errorf("%v.PrevNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", seg, gotSeg, gotGap, wantSeg)
- }
- gotSeg, gotGap = seg.NextNonEmpty()
- if wantSeg := seg.NextSegment(); gotSeg != wantSeg || gotGap.Ok() {
- t.Errorf("%v.NextNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", seg, gotSeg, gotGap, wantSeg)
- }
- }
-}
-
-func TestAddSequentialNonAdjacent(t *testing.T) {
- var s Set
- var nrInsertions int
- for i := 0; i < testSize; i++ {
- // The range here differs from TestAddSequentialAdjacent so that
- // consecutive segments are not adjacent.
- if !s.AddWithoutMerging(Range{2 * i, 2*i + 1}, 2*i+valueOffset) {
- t.Fatalf("Failed to insert segment %d", i)
- }
- nrInsertions++
- if err := s.segmentTestCheck(nrInsertions, validate); err != nil {
- t.Errorf("Iteration %d: %v", i, err)
- break
- }
- }
- if got, want := s.countSegments(), nrInsertions; got != want {
- t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want)
- }
- if t.Failed() {
- t.Logf("Set contents:\n%v", &s)
- }
-
- for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- gotSeg, gotGap := seg.PrevNonEmpty()
- if wantGap := seg.PrevGap(); gotSeg.Ok() || gotGap != wantGap {
- t.Errorf("%v.PrevNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", seg, gotSeg, gotGap, wantGap)
- }
- gotSeg, gotGap = seg.NextNonEmpty()
- if wantGap := seg.NextGap(); gotSeg.Ok() || gotGap != wantGap {
- t.Errorf("%v.NextNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", seg, gotSeg, gotGap, wantGap)
- }
- }
-}
-
-func TestMergeSplit(t *testing.T) {
- tests := []struct {
- name string
- initial []Range
- split bool
- splitAddr int
- final []Range
- }{
- {
- name: "Add merges after existing segment",
- initial: []Range{{1000, 1100}, {1100, 1200}},
- final: []Range{{1000, 1200}},
- },
- {
- name: "Add merges before existing segment",
- initial: []Range{{1100, 1200}, {1000, 1100}},
- final: []Range{{1000, 1200}},
- },
- {
- name: "Add merges between existing segments",
- initial: []Range{{1000, 1100}, {1200, 1300}, {1100, 1200}},
- final: []Range{{1000, 1300}},
- },
- {
- name: "SplitAt does nothing at a free address",
- initial: []Range{{100, 200}},
- split: true,
- splitAddr: 300,
- final: []Range{{100, 200}},
- },
- {
- name: "SplitAt does nothing at the beginning of a segment",
- initial: []Range{{100, 200}},
- split: true,
- splitAddr: 100,
- final: []Range{{100, 200}},
- },
- {
- name: "SplitAt does nothing at the end of a segment",
- initial: []Range{{100, 200}},
- split: true,
- splitAddr: 200,
- final: []Range{{100, 200}},
- },
- {
- name: "SplitAt splits in the middle of a segment",
- initial: []Range{{100, 200}},
- split: true,
- splitAddr: 150,
- final: []Range{{100, 150}, {150, 200}},
- },
- }
-Tests:
- for _, test := range tests {
- var s Set
- for _, r := range test.initial {
- if !s.Add(r, 0) {
- t.Errorf("%s: Add(%v) failed; set contents:\n%v", test.name, r, &s)
- continue Tests
- }
- }
- if test.split {
- s.SplitAt(test.splitAddr)
- }
- var i int
- for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- if i > len(test.final) {
- t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, s.countSegments(), len(test.final), &s)
- continue Tests
- }
- if got, want := seg.Range(), test.final[i]; got != want {
- t.Errorf("%s: Segment %d mismatch: got %v, wanted %v; set contents:\n%v", test.name, i, got, want, &s)
- continue Tests
- }
- i++
- }
- if i < len(test.final) {
- t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, i, len(test.final), &s)
- }
- }
-}
-
-func TestIsolate(t *testing.T) {
- tests := []struct {
- name string
- initial Range
- bounds Range
- final []Range
- }{
- {
- name: "Isolate does not split a segment that falls inside bounds",
- initial: Range{100, 200},
- bounds: Range{100, 200},
- final: []Range{{100, 200}},
- },
- {
- name: "Isolate splits at beginning of segment",
- initial: Range{50, 200},
- bounds: Range{100, 200},
- final: []Range{{50, 100}, {100, 200}},
- },
- {
- name: "Isolate splits at end of segment",
- initial: Range{100, 250},
- bounds: Range{100, 200},
- final: []Range{{100, 200}, {200, 250}},
- },
- {
- name: "Isolate splits at beginning and end of segment",
- initial: Range{50, 250},
- bounds: Range{100, 200},
- final: []Range{{50, 100}, {100, 200}, {200, 250}},
- },
- }
-Tests:
- for _, test := range tests {
- var s Set
- seg := s.Insert(s.FirstGap(), test.initial, 0)
- seg = s.Isolate(seg, test.bounds)
- if !test.bounds.IsSupersetOf(seg.Range()) {
- t.Errorf("%s: Isolated segment %v lies outside bounds %v; set contents:\n%v", test.name, seg.Range(), test.bounds, &s)
- }
- var i int
- for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- if i > len(test.final) {
- t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, s.countSegments(), len(test.final), &s)
- continue Tests
- }
- if got, want := seg.Range(), test.final[i]; got != want {
- t.Errorf("%s: Segment %d mismatch: got %v, wanted %v; set contents:\n%v", test.name, i, got, want, &s)
- continue Tests
- }
- i++
- }
- if i < len(test.final) {
- t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, i, len(test.final), &s)
- }
- }
-}
-
-func benchmarkAddSequential(b *testing.B, size int) {
- for n := 0; n < b.N; n++ {
- var s Set
- for i := 0; i < size; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
- }
-}
-
-func benchmarkAddRandom(b *testing.B, size int) {
- order := rand.Perm(size)
-
- b.ResetTimer()
- for n := 0; n < b.N; n++ {
- var s Set
- for _, i := range order {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
- }
-}
-
-func benchmarkFindSequential(b *testing.B, size int) {
- var s Set
- for i := 0; i < size; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
-
- b.ResetTimer()
- for n := 0; n < b.N; n++ {
- for i := 0; i < size; i++ {
- if seg := s.FindSegment(i); !seg.Ok() {
- b.Fatalf("Failed to find segment %d", i)
- }
- }
- }
-}
-
-func benchmarkFindRandom(b *testing.B, size int) {
- var s Set
- for i := 0; i < size; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
- order := rand.Perm(size)
-
- b.ResetTimer()
- for n := 0; n < b.N; n++ {
- for _, i := range order {
- if si := s.FindSegment(i); !si.Ok() {
- b.Fatalf("Failed to find segment %d", i)
- }
- }
- }
-}
-
-func benchmarkIteration(b *testing.B, size int) {
- var s Set
- for i := 0; i < size; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
-
- b.ResetTimer()
- var count uint64
- for n := 0; n < b.N; n++ {
- for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- count++
- }
- }
- if got, want := count, uint64(size)*uint64(b.N); got != want {
- b.Fatalf("Iterated wrong number of segments: got %d, wanted %d", got, want)
- }
-}
-
-func benchmarkAddFindRemoveSequential(b *testing.B, size int) {
- for n := 0; n < b.N; n++ {
- var s Set
- for i := 0; i < size; i++ {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
- for i := 0; i < size; i++ {
- seg := s.FindSegment(i)
- if !seg.Ok() {
- b.Fatalf("Failed to find segment %d", i)
- }
- s.Remove(seg)
- }
- if !s.IsEmpty() {
- b.Fatalf("Set not empty after all removals:\n%v", &s)
- }
- }
-}
-
-func benchmarkAddFindRemoveRandom(b *testing.B, size int) {
- order := rand.Perm(size)
-
- b.ResetTimer()
- for n := 0; n < b.N; n++ {
- var s Set
- for _, i := range order {
- if !s.AddWithoutMerging(Range{i, i + 1}, i) {
- b.Fatalf("Failed to insert segment %d", i)
- }
- }
- for _, i := range order {
- seg := s.FindSegment(i)
- if !seg.Ok() {
- b.Fatalf("Failed to find segment %d", i)
- }
- s.Remove(seg)
- }
- if !s.IsEmpty() {
- b.Fatalf("Set not empty after all removals:\n%v", &s)
- }
- }
-}
-
-// Although we don't generally expect our segment sets to get this big, they're
-// useful for emulating the effect of cache pressure.
-var testSizes = []struct {
- desc string
- size int
-}{
- {"64", 1 << 6},
- {"256", 1 << 8},
- {"1K", 1 << 10},
- {"4K", 1 << 12},
- {"16K", 1 << 14},
- {"64K", 1 << 16},
-}
-
-func BenchmarkAddSequential(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkAddSequential(b, test.size)
- })
- }
-}
-
-func BenchmarkAddRandom(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkAddRandom(b, test.size)
- })
- }
-}
-
-func BenchmarkFindSequential(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkFindSequential(b, test.size)
- })
- }
-}
-
-func BenchmarkFindRandom(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkFindRandom(b, test.size)
- })
- }
-}
-
-func BenchmarkIteration(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkIteration(b, test.size)
- })
- }
-}
-
-func BenchmarkAddFindRemoveSequential(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkAddFindRemoveSequential(b, test.size)
- })
- }
-}
-
-func BenchmarkAddFindRemoveRandom(b *testing.B) {
- for _, test := range testSizes {
- b.Run(test.desc, func(b *testing.B) {
- benchmarkAddFindRemoveRandom(b, test.size)
- })
- }
-}
diff --git a/pkg/segment/test/set_functions.go b/pkg/segment/test/set_functions.go
deleted file mode 100644
index 652c010da..000000000
--- a/pkg/segment/test/set_functions.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package segment is a test package.
-package segment
-
-type setFunctions struct{}
-
-// MinKey returns the minimum key for the set.
-func (s setFunctions) MinKey() int {
- return -s.MaxKey() - 1
-}
-
-// MaxKey returns the maximum key for the set.
-func (setFunctions) MaxKey() int {
- return int(^uint(0) >> 1)
-}
-
-func (setFunctions) ClearValue(*int) {}
-
-func (setFunctions) Merge(_ Range, val1 int, _ Range, _ int) (int, bool) {
- return val1, true
-}
-
-func (setFunctions) Split(_ Range, val int, _ int) (int, int) {
- return val, val
-}
-
-type gapSetFunctions struct {
- setFunctions
-}
-
-// MinKey is adjusted to make sure no add overflow would happen in test cases.
-// e.g. A gap with range {MinInt32, 2} would cause overflow in Range().Length().
-//
-// Normally Keys should be unsigned to avoid these issues.
-func (s gapSetFunctions) MinKey() int {
- return s.setFunctions.MinKey() / 2
-}
-
-// MaxKey returns the maximum key for the set.
-func (s gapSetFunctions) MaxKey() int {
- return s.setFunctions.MaxKey() / 2
-}
diff --git a/pkg/sentry/BUILD b/pkg/sentry/BUILD
deleted file mode 100644
index e759dc36f..000000000
--- a/pkg/sentry/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-package(licenses = ["notice"])
-
-# The "internal" package_group should be used as much as possible by packages
-# that should remain Sentry-internal (i.e. not be exposed directly to command
-# line tooling or APIs).
-package_group(
- name = "internal",
- packages = [
- "//pkg/sentry/...",
- "//runsc/...",
- # Code generated by go_marshal relies on go_marshal libraries.
- "//tools/go_marshal/...",
- ],
-)
diff --git a/pkg/sentry/arch/BUILD b/pkg/sentry/arch/BUILD
deleted file mode 100644
index e0dbc436d..000000000
--- a/pkg/sentry/arch/BUILD
+++ /dev/null
@@ -1,47 +0,0 @@
-load("//tools:defs.bzl", "go_library", "proto_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "arch",
- srcs = [
- "aligned.go",
- "arch.go",
- "arch_aarch64.go",
- "arch_amd64.go",
- "arch_arm64.go",
- "arch_state_x86.go",
- "arch_x86.go",
- "arch_x86_impl.go",
- "auxv.go",
- "signal_amd64.go",
- "signal_arm64.go",
- "stack.go",
- "stack_unsafe.go",
- "syscalls_amd64.go",
- "syscalls_arm64.go",
- ],
- marshal = True,
- visibility = ["//:sandbox"],
- deps = [
- ":registers_go_proto",
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/cpuid",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/sentry/arch/fpu",
- "//pkg/sentry/limits",
- "//pkg/usermem",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-proto_library(
- name = "registers",
- srcs = ["registers.proto"],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..7d3405576
--- /dev/null
+++ b/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_aarch64_state_autogen.go b/pkg/sentry/arch/arch_aarch64_state_autogen.go
new file mode 100644
index 000000000..0463c0125
--- /dev/null
+++ b/pkg/sentry/arch/arch_aarch64_state_autogen.go
@@ -0,0 +1,76 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *Registers) StateTypeName() string {
+ return "pkg/sentry/arch.Registers"
+}
+
+func (r *Registers) StateFields() []string {
+ return []string{
+ "PtraceRegs",
+ "TPIDR_EL0",
+ }
+}
+
+func (r *Registers) beforeSave() {}
+
+// +checklocksignore
+func (r *Registers) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.PtraceRegs)
+ stateSinkObject.Save(1, &r.TPIDR_EL0)
+}
+
+func (r *Registers) afterLoad() {}
+
+// +checklocksignore
+func (r *Registers) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.PtraceRegs)
+ stateSourceObject.Load(1, &r.TPIDR_EL0)
+}
+
+func (s *State) StateTypeName() string {
+ return "pkg/sentry/arch.State"
+}
+
+func (s *State) StateFields() []string {
+ return []string{
+ "Regs",
+ "fpState",
+ "FeatureSet",
+ "OrigR0",
+ }
+}
+
+func (s *State) beforeSave() {}
+
+// +checklocksignore
+func (s *State) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Regs)
+ stateSinkObject.Save(1, &s.fpState)
+ stateSinkObject.Save(2, &s.FeatureSet)
+ stateSinkObject.Save(3, &s.OrigR0)
+}
+
+func (s *State) afterLoad() {}
+
+// +checklocksignore
+func (s *State) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Regs)
+ stateSourceObject.LoadWait(1, &s.fpState)
+ stateSourceObject.Load(2, &s.FeatureSet)
+ stateSourceObject.Load(3, &s.OrigR0)
+}
+
+func init() {
+ state.Register((*Registers)(nil))
+ state.Register((*State)(nil))
+}
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
new file mode 100644
index 000000000..2237dde67
--- /dev/null
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -0,0 +1,13 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..dc203ca63
--- /dev/null
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,412 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*SignalContext64)(nil)
+var _ marshal.Marshallable = (*UContext64)(nil)
+var _ marshal.Marshallable = (*linux.SignalSet)(nil)
+var _ marshal.Marshallable = (*linux.SignalStack)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 184 +
+ (*linux.SignalSet)(nil).SizeBytes() +
+ 8*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
+ dst = dst[8:]
+ s.Oldmask.MarshalBytes(dst[:s.Oldmask.SizeBytes()])
+ dst = dst[s.Oldmask.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.R8 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R9 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R10 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R11 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R12 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R13 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R14 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R15 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rdi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rsi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rbp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rbx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rdx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rcx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rsp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rip = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Eflags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Cs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Gs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Fs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Ss = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Err = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Trapno = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Oldmask.UnmarshalBytes(src[:s.Oldmask.SizeBytes()])
+ src = src[s.Oldmask.SizeBytes():]
+ s.Cr2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Fpstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s.Reserved[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Oldmask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Oldmask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Oldmask.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*linux.SignalStack)(nil).SizeBytes() +
+ (*SignalContext64)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_amd64_state_autogen.go b/pkg/sentry/arch/arch_amd64_state_autogen.go
new file mode 100644
index 000000000..d74b4267b
--- /dev/null
+++ b/pkg/sentry/arch/arch_amd64_state_autogen.go
@@ -0,0 +1,43 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *context64) StateTypeName() string {
+ return "pkg/sentry/arch.context64"
+}
+
+func (c *context64) StateFields() []string {
+ return []string{
+ "State",
+ "sigFPState",
+ }
+}
+
+func (c *context64) beforeSave() {}
+
+// +checklocksignore
+func (c *context64) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.State)
+ stateSinkObject.Save(1, &c.sigFPState)
+}
+
+func (c *context64) afterLoad() {}
+
+// +checklocksignore
+func (c *context64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.State)
+ stateSourceObject.Load(1, &c.sigFPState)
+}
+
+func init() {
+ state.Register((*context64)(nil))
+}
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..c17ced511
--- /dev/null
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,588 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*FpsimdContext)(nil)
+var _ marshal.Marshallable = (*SignalContext64)(nil)
+var _ marshal.Marshallable = (*UContext64)(nil)
+var _ marshal.Marshallable = (*aarch64Ctx)(nil)
+var _ marshal.Marshallable = (*linux.SignalSet)(nil)
+var _ marshal.Marshallable = (*linux.SignalStack)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FpsimdContext) SizeBytes() int {
+ return 8 +
+ (*aarch64Ctx)(nil).SizeBytes() +
+ 8*64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FpsimdContext) MarshalBytes(dst []byte) {
+ f.Head.MarshalBytes(dst[:f.Head.SizeBytes()])
+ dst = dst[f.Head.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
+ dst = dst[4:]
+ for idx := 0; idx < 64; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FpsimdContext) UnmarshalBytes(src []byte) {
+ f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()])
+ src = src[f.Head.SizeBytes():]
+ f.Fpsr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Fpcr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 64; idx++ {
+ f.Vregs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FpsimdContext) Packed() bool {
+ return f.Head.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FpsimdContext) MarshalUnsafe(dst []byte) {
+ if f.Head.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FpsimdContext doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FpsimdContext) UnmarshalUnsafe(src []byte) {
+ if f.Head.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FpsimdContext doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*linux.SignalStack)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes() +
+ 1*120 +
+ 1*8 +
+ (*SignalContext64)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ dst[0] = byte(u._pad[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(u._pad2[idx])
+ dst = dst[1:]
+ }
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ u._pad[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ u._pad2[idx] = src[0]
+ src = src[1:]
+ }
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (a *aarch64Ctx) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (a *aarch64Ctx) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (a *aarch64Ctx) UnmarshalBytes(src []byte) {
+ a.Magic = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ a.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (a *aarch64Ctx) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (a *aarch64Ctx) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(a), uintptr(a.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(a), unsafe.Pointer(&src[0]), uintptr(a.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return a.CopyOutN(cc, addr, a.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (a *aarch64Ctx) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_arm64_state_autogen.go b/pkg/sentry/arch/arch_arm64_state_autogen.go
new file mode 100644
index 000000000..4f6fe0744
--- /dev/null
+++ b/pkg/sentry/arch/arch_arm64_state_autogen.go
@@ -0,0 +1,43 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *context64) StateTypeName() string {
+ return "pkg/sentry/arch.context64"
+}
+
+func (c *context64) StateFields() []string {
+ return []string{
+ "State",
+ "sigFPState",
+ }
+}
+
+func (c *context64) beforeSave() {}
+
+// +checklocksignore
+func (c *context64) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.State)
+ stateSinkObject.Save(1, &c.sigFPState)
+}
+
+func (c *context64) afterLoad() {}
+
+// +checklocksignore
+func (c *context64) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.State)
+ stateSourceObject.Load(1, &c.sigFPState)
+}
+
+func init() {
+ state.Register((*context64)(nil))
+}
diff --git a/pkg/sentry/arch/arch_state_autogen.go b/pkg/sentry/arch/arch_state_autogen.go
new file mode 100644
index 000000000..8a151f95c
--- /dev/null
+++ b/pkg/sentry/arch/arch_state_autogen.go
@@ -0,0 +1,80 @@
+// automatically generated by stateify.
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (m *MmapLayout) StateTypeName() string {
+ return "pkg/sentry/arch.MmapLayout"
+}
+
+func (m *MmapLayout) StateFields() []string {
+ return []string{
+ "MinAddr",
+ "MaxAddr",
+ "BottomUpBase",
+ "TopDownBase",
+ "DefaultDirection",
+ "MaxStackRand",
+ }
+}
+
+func (m *MmapLayout) beforeSave() {}
+
+// +checklocksignore
+func (m *MmapLayout) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.MinAddr)
+ stateSinkObject.Save(1, &m.MaxAddr)
+ stateSinkObject.Save(2, &m.BottomUpBase)
+ stateSinkObject.Save(3, &m.TopDownBase)
+ stateSinkObject.Save(4, &m.DefaultDirection)
+ stateSinkObject.Save(5, &m.MaxStackRand)
+}
+
+func (m *MmapLayout) afterLoad() {}
+
+// +checklocksignore
+func (m *MmapLayout) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.MinAddr)
+ stateSourceObject.Load(1, &m.MaxAddr)
+ stateSourceObject.Load(2, &m.BottomUpBase)
+ stateSourceObject.Load(3, &m.TopDownBase)
+ stateSourceObject.Load(4, &m.DefaultDirection)
+ stateSourceObject.Load(5, &m.MaxStackRand)
+}
+
+func (a *AuxEntry) StateTypeName() string {
+ return "pkg/sentry/arch.AuxEntry"
+}
+
+func (a *AuxEntry) StateFields() []string {
+ return []string{
+ "Key",
+ "Value",
+ }
+}
+
+func (a *AuxEntry) beforeSave() {}
+
+// +checklocksignore
+func (a *AuxEntry) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.Key)
+ stateSinkObject.Save(1, &a.Value)
+}
+
+func (a *AuxEntry) afterLoad() {}
+
+// +checklocksignore
+func (a *AuxEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.Key)
+ stateSourceObject.Load(1, &a.Value)
+}
+
+func init() {
+ state.Register((*MmapLayout)(nil))
+ state.Register((*AuxEntry)(nil))
+}
diff --git a/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go
new file mode 100644
index 000000000..2237dde67
--- /dev/null
+++ b/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go
@@ -0,0 +1,13 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_unsafe_state_autogen.go b/pkg/sentry/arch/arch_unsafe_state_autogen.go
new file mode 100644
index 000000000..9a45071b9
--- /dev/null
+++ b/pkg/sentry/arch/arch_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package arch
diff --git a/pkg/sentry/arch/arch_x86_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_x86_abi_autogen_unsafe.go
new file mode 100644
index 000000000..f0ec25e8d
--- /dev/null
+++ b/pkg/sentry/arch/arch_x86_abi_autogen_unsafe.go
@@ -0,0 +1,16 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64 386
+// +build amd64 386
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_x86_impl_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_x86_impl_abi_autogen_unsafe.go
new file mode 100644
index 000000000..f1968e054
--- /dev/null
+++ b/pkg/sentry/arch/arch_x86_impl_abi_autogen_unsafe.go
@@ -0,0 +1,16 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64 386
+// +build go1.1
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_x86_impl_state_autogen.go b/pkg/sentry/arch/arch_x86_impl_state_autogen.go
new file mode 100644
index 000000000..1865a64dd
--- /dev/null
+++ b/pkg/sentry/arch/arch_x86_impl_state_autogen.go
@@ -0,0 +1,44 @@
+// automatically generated by stateify.
+
+// +build amd64 386
+// +build go1.1
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *State) StateTypeName() string {
+ return "pkg/sentry/arch.State"
+}
+
+func (s *State) StateFields() []string {
+ return []string{
+ "Regs",
+ "fpState",
+ "FeatureSet",
+ }
+}
+
+func (s *State) beforeSave() {}
+
+// +checklocksignore
+func (s *State) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Regs)
+ stateSinkObject.Save(1, &s.fpState)
+ stateSinkObject.Save(2, &s.FeatureSet)
+}
+
+// +checklocksignore
+func (s *State) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Regs)
+ stateSourceObject.LoadWait(1, &s.fpState)
+ stateSourceObject.Load(2, &s.FeatureSet)
+ stateSourceObject.AfterLoad(s.afterLoad)
+}
+
+func init() {
+ state.Register((*State)(nil))
+}
diff --git a/pkg/sentry/arch/arch_x86_state_autogen.go b/pkg/sentry/arch/arch_x86_state_autogen.go
new file mode 100644
index 000000000..7584e42c5
--- /dev/null
+++ b/pkg/sentry/arch/arch_x86_state_autogen.go
@@ -0,0 +1,39 @@
+// automatically generated by stateify.
+
+// +build amd64 386
+// +build amd64 386
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *Registers) StateTypeName() string {
+ return "pkg/sentry/arch.Registers"
+}
+
+func (r *Registers) StateFields() []string {
+ return []string{
+ "PtraceRegs",
+ }
+}
+
+func (r *Registers) beforeSave() {}
+
+// +checklocksignore
+func (r *Registers) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.PtraceRegs)
+}
+
+func (r *Registers) afterLoad() {}
+
+// +checklocksignore
+func (r *Registers) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.PtraceRegs)
+}
+
+func init() {
+ state.Register((*Registers)(nil))
+}
diff --git a/pkg/sentry/arch/fpu/BUILD b/pkg/sentry/arch/fpu/BUILD
deleted file mode 100644
index 6cdd21b1b..000000000
--- a/pkg/sentry/arch/fpu/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fpu",
- srcs = [
- "fpu.go",
- "fpu_amd64.go",
- "fpu_amd64.s",
- "fpu_arm64.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cpuid",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/arch/fpu/fpu_amd64_state_autogen.go b/pkg/sentry/arch/fpu/fpu_amd64_state_autogen.go
new file mode 100644
index 000000000..38db6e6f4
--- /dev/null
+++ b/pkg/sentry/arch/fpu/fpu_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64 i386
+
+package fpu
diff --git a/pkg/sentry/arch/fpu/fpu_arm64_state_autogen.go b/pkg/sentry/arch/fpu/fpu_arm64_state_autogen.go
new file mode 100644
index 000000000..cb7d1ca8a
--- /dev/null
+++ b/pkg/sentry/arch/fpu/fpu_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package fpu
diff --git a/pkg/sentry/arch/fpu/fpu_state_autogen.go b/pkg/sentry/arch/fpu/fpu_state_autogen.go
new file mode 100644
index 000000000..7cadc6b8f
--- /dev/null
+++ b/pkg/sentry/arch/fpu/fpu_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fpu
diff --git a/pkg/sentry/arch/registers.proto b/pkg/sentry/arch/registers.proto
deleted file mode 100644
index 2727ba08a..000000000
--- a/pkg/sentry/arch/registers.proto
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-message AMD64Registers {
- uint64 rax = 1;
- uint64 rbx = 2;
- uint64 rcx = 3;
- uint64 rdx = 4;
- uint64 rsi = 5;
- uint64 rdi = 6;
- uint64 rsp = 7;
- uint64 rbp = 8;
-
- uint64 r8 = 9;
- uint64 r9 = 10;
- uint64 r10 = 11;
- uint64 r11 = 12;
- uint64 r12 = 13;
- uint64 r13 = 14;
- uint64 r14 = 15;
- uint64 r15 = 16;
-
- uint64 rip = 17;
- uint64 rflags = 18;
- uint64 orig_rax = 19;
- uint64 cs = 20;
- uint64 ds = 21;
- uint64 es = 22;
- uint64 fs = 23;
- uint64 gs = 24;
- uint64 ss = 25;
- uint64 fs_base = 26;
- uint64 gs_base = 27;
-}
-
-message ARM64Registers {
- uint64 r0 = 1;
- uint64 r1 = 2;
- uint64 r2 = 3;
- uint64 r3 = 4;
- uint64 r4 = 5;
- uint64 r5 = 6;
- uint64 r6 = 7;
- uint64 r7 = 8;
- uint64 r8 = 9;
- uint64 r9 = 10;
- uint64 r10 = 11;
- uint64 r11 = 12;
- uint64 r12 = 13;
- uint64 r13 = 14;
- uint64 r14 = 15;
- uint64 r15 = 16;
- uint64 r16 = 17;
- uint64 r17 = 18;
- uint64 r18 = 19;
- uint64 r19 = 20;
- uint64 r20 = 21;
- uint64 r21 = 22;
- uint64 r22 = 23;
- uint64 r23 = 24;
- uint64 r24 = 25;
- uint64 r25 = 26;
- uint64 r26 = 27;
- uint64 r27 = 28;
- uint64 r28 = 29;
- uint64 r29 = 30;
- uint64 r30 = 31;
- uint64 sp = 32;
- uint64 pc = 33;
- uint64 pstate = 34;
- uint64 tls = 35;
-}
-message Registers {
- oneof arch {
- AMD64Registers amd64 = 1;
- ARM64Registers arm64 = 2;
- }
-}
diff --git a/pkg/sentry/arch/registers_go_proto/registers.pb.go b/pkg/sentry/arch/registers_go_proto/registers.pb.go
new file mode 100644
index 000000000..0e73a10b8
--- /dev/null
+++ b/pkg/sentry/arch/registers_go_proto/registers.pb.go
@@ -0,0 +1,863 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/sentry/arch/registers.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type AMD64Registers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rax uint64 `protobuf:"varint,1,opt,name=rax,proto3" json:"rax,omitempty"`
+ Rbx uint64 `protobuf:"varint,2,opt,name=rbx,proto3" json:"rbx,omitempty"`
+ Rcx uint64 `protobuf:"varint,3,opt,name=rcx,proto3" json:"rcx,omitempty"`
+ Rdx uint64 `protobuf:"varint,4,opt,name=rdx,proto3" json:"rdx,omitempty"`
+ Rsi uint64 `protobuf:"varint,5,opt,name=rsi,proto3" json:"rsi,omitempty"`
+ Rdi uint64 `protobuf:"varint,6,opt,name=rdi,proto3" json:"rdi,omitempty"`
+ Rsp uint64 `protobuf:"varint,7,opt,name=rsp,proto3" json:"rsp,omitempty"`
+ Rbp uint64 `protobuf:"varint,8,opt,name=rbp,proto3" json:"rbp,omitempty"`
+ R8 uint64 `protobuf:"varint,9,opt,name=r8,proto3" json:"r8,omitempty"`
+ R9 uint64 `protobuf:"varint,10,opt,name=r9,proto3" json:"r9,omitempty"`
+ R10 uint64 `protobuf:"varint,11,opt,name=r10,proto3" json:"r10,omitempty"`
+ R11 uint64 `protobuf:"varint,12,opt,name=r11,proto3" json:"r11,omitempty"`
+ R12 uint64 `protobuf:"varint,13,opt,name=r12,proto3" json:"r12,omitempty"`
+ R13 uint64 `protobuf:"varint,14,opt,name=r13,proto3" json:"r13,omitempty"`
+ R14 uint64 `protobuf:"varint,15,opt,name=r14,proto3" json:"r14,omitempty"`
+ R15 uint64 `protobuf:"varint,16,opt,name=r15,proto3" json:"r15,omitempty"`
+ Rip uint64 `protobuf:"varint,17,opt,name=rip,proto3" json:"rip,omitempty"`
+ Rflags uint64 `protobuf:"varint,18,opt,name=rflags,proto3" json:"rflags,omitempty"`
+ OrigRax uint64 `protobuf:"varint,19,opt,name=orig_rax,json=origRax,proto3" json:"orig_rax,omitempty"`
+ Cs uint64 `protobuf:"varint,20,opt,name=cs,proto3" json:"cs,omitempty"`
+ Ds uint64 `protobuf:"varint,21,opt,name=ds,proto3" json:"ds,omitempty"`
+ Es uint64 `protobuf:"varint,22,opt,name=es,proto3" json:"es,omitempty"`
+ Fs uint64 `protobuf:"varint,23,opt,name=fs,proto3" json:"fs,omitempty"`
+ Gs uint64 `protobuf:"varint,24,opt,name=gs,proto3" json:"gs,omitempty"`
+ Ss uint64 `protobuf:"varint,25,opt,name=ss,proto3" json:"ss,omitempty"`
+ FsBase uint64 `protobuf:"varint,26,opt,name=fs_base,json=fsBase,proto3" json:"fs_base,omitempty"`
+ GsBase uint64 `protobuf:"varint,27,opt,name=gs_base,json=gsBase,proto3" json:"gs_base,omitempty"`
+}
+
+func (x *AMD64Registers) Reset() {
+ *x = AMD64Registers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AMD64Registers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AMD64Registers) ProtoMessage() {}
+
+func (x *AMD64Registers) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AMD64Registers.ProtoReflect.Descriptor instead.
+func (*AMD64Registers) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_arch_registers_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AMD64Registers) GetRax() uint64 {
+ if x != nil {
+ return x.Rax
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRbx() uint64 {
+ if x != nil {
+ return x.Rbx
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRcx() uint64 {
+ if x != nil {
+ return x.Rcx
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRdx() uint64 {
+ if x != nil {
+ return x.Rdx
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRsi() uint64 {
+ if x != nil {
+ return x.Rsi
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRdi() uint64 {
+ if x != nil {
+ return x.Rdi
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRsp() uint64 {
+ if x != nil {
+ return x.Rsp
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRbp() uint64 {
+ if x != nil {
+ return x.Rbp
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR8() uint64 {
+ if x != nil {
+ return x.R8
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR9() uint64 {
+ if x != nil {
+ return x.R9
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR10() uint64 {
+ if x != nil {
+ return x.R10
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR11() uint64 {
+ if x != nil {
+ return x.R11
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR12() uint64 {
+ if x != nil {
+ return x.R12
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR13() uint64 {
+ if x != nil {
+ return x.R13
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR14() uint64 {
+ if x != nil {
+ return x.R14
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetR15() uint64 {
+ if x != nil {
+ return x.R15
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRip() uint64 {
+ if x != nil {
+ return x.Rip
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetRflags() uint64 {
+ if x != nil {
+ return x.Rflags
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetOrigRax() uint64 {
+ if x != nil {
+ return x.OrigRax
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetCs() uint64 {
+ if x != nil {
+ return x.Cs
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetDs() uint64 {
+ if x != nil {
+ return x.Ds
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetEs() uint64 {
+ if x != nil {
+ return x.Es
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetFs() uint64 {
+ if x != nil {
+ return x.Fs
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetGs() uint64 {
+ if x != nil {
+ return x.Gs
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetSs() uint64 {
+ if x != nil {
+ return x.Ss
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetFsBase() uint64 {
+ if x != nil {
+ return x.FsBase
+ }
+ return 0
+}
+
+func (x *AMD64Registers) GetGsBase() uint64 {
+ if x != nil {
+ return x.GsBase
+ }
+ return 0
+}
+
+type ARM64Registers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ R0 uint64 `protobuf:"varint,1,opt,name=r0,proto3" json:"r0,omitempty"`
+ R1 uint64 `protobuf:"varint,2,opt,name=r1,proto3" json:"r1,omitempty"`
+ R2 uint64 `protobuf:"varint,3,opt,name=r2,proto3" json:"r2,omitempty"`
+ R3 uint64 `protobuf:"varint,4,opt,name=r3,proto3" json:"r3,omitempty"`
+ R4 uint64 `protobuf:"varint,5,opt,name=r4,proto3" json:"r4,omitempty"`
+ R5 uint64 `protobuf:"varint,6,opt,name=r5,proto3" json:"r5,omitempty"`
+ R6 uint64 `protobuf:"varint,7,opt,name=r6,proto3" json:"r6,omitempty"`
+ R7 uint64 `protobuf:"varint,8,opt,name=r7,proto3" json:"r7,omitempty"`
+ R8 uint64 `protobuf:"varint,9,opt,name=r8,proto3" json:"r8,omitempty"`
+ R9 uint64 `protobuf:"varint,10,opt,name=r9,proto3" json:"r9,omitempty"`
+ R10 uint64 `protobuf:"varint,11,opt,name=r10,proto3" json:"r10,omitempty"`
+ R11 uint64 `protobuf:"varint,12,opt,name=r11,proto3" json:"r11,omitempty"`
+ R12 uint64 `protobuf:"varint,13,opt,name=r12,proto3" json:"r12,omitempty"`
+ R13 uint64 `protobuf:"varint,14,opt,name=r13,proto3" json:"r13,omitempty"`
+ R14 uint64 `protobuf:"varint,15,opt,name=r14,proto3" json:"r14,omitempty"`
+ R15 uint64 `protobuf:"varint,16,opt,name=r15,proto3" json:"r15,omitempty"`
+ R16 uint64 `protobuf:"varint,17,opt,name=r16,proto3" json:"r16,omitempty"`
+ R17 uint64 `protobuf:"varint,18,opt,name=r17,proto3" json:"r17,omitempty"`
+ R18 uint64 `protobuf:"varint,19,opt,name=r18,proto3" json:"r18,omitempty"`
+ R19 uint64 `protobuf:"varint,20,opt,name=r19,proto3" json:"r19,omitempty"`
+ R20 uint64 `protobuf:"varint,21,opt,name=r20,proto3" json:"r20,omitempty"`
+ R21 uint64 `protobuf:"varint,22,opt,name=r21,proto3" json:"r21,omitempty"`
+ R22 uint64 `protobuf:"varint,23,opt,name=r22,proto3" json:"r22,omitempty"`
+ R23 uint64 `protobuf:"varint,24,opt,name=r23,proto3" json:"r23,omitempty"`
+ R24 uint64 `protobuf:"varint,25,opt,name=r24,proto3" json:"r24,omitempty"`
+ R25 uint64 `protobuf:"varint,26,opt,name=r25,proto3" json:"r25,omitempty"`
+ R26 uint64 `protobuf:"varint,27,opt,name=r26,proto3" json:"r26,omitempty"`
+ R27 uint64 `protobuf:"varint,28,opt,name=r27,proto3" json:"r27,omitempty"`
+ R28 uint64 `protobuf:"varint,29,opt,name=r28,proto3" json:"r28,omitempty"`
+ R29 uint64 `protobuf:"varint,30,opt,name=r29,proto3" json:"r29,omitempty"`
+ R30 uint64 `protobuf:"varint,31,opt,name=r30,proto3" json:"r30,omitempty"`
+ Sp uint64 `protobuf:"varint,32,opt,name=sp,proto3" json:"sp,omitempty"`
+ Pc uint64 `protobuf:"varint,33,opt,name=pc,proto3" json:"pc,omitempty"`
+ Pstate uint64 `protobuf:"varint,34,opt,name=pstate,proto3" json:"pstate,omitempty"`
+ Tls uint64 `protobuf:"varint,35,opt,name=tls,proto3" json:"tls,omitempty"`
+}
+
+func (x *ARM64Registers) Reset() {
+ *x = ARM64Registers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ARM64Registers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ARM64Registers) ProtoMessage() {}
+
+func (x *ARM64Registers) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ARM64Registers.ProtoReflect.Descriptor instead.
+func (*ARM64Registers) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_arch_registers_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ARM64Registers) GetR0() uint64 {
+ if x != nil {
+ return x.R0
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR1() uint64 {
+ if x != nil {
+ return x.R1
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR2() uint64 {
+ if x != nil {
+ return x.R2
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR3() uint64 {
+ if x != nil {
+ return x.R3
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR4() uint64 {
+ if x != nil {
+ return x.R4
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR5() uint64 {
+ if x != nil {
+ return x.R5
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR6() uint64 {
+ if x != nil {
+ return x.R6
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR7() uint64 {
+ if x != nil {
+ return x.R7
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR8() uint64 {
+ if x != nil {
+ return x.R8
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR9() uint64 {
+ if x != nil {
+ return x.R9
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR10() uint64 {
+ if x != nil {
+ return x.R10
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR11() uint64 {
+ if x != nil {
+ return x.R11
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR12() uint64 {
+ if x != nil {
+ return x.R12
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR13() uint64 {
+ if x != nil {
+ return x.R13
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR14() uint64 {
+ if x != nil {
+ return x.R14
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR15() uint64 {
+ if x != nil {
+ return x.R15
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR16() uint64 {
+ if x != nil {
+ return x.R16
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR17() uint64 {
+ if x != nil {
+ return x.R17
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR18() uint64 {
+ if x != nil {
+ return x.R18
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR19() uint64 {
+ if x != nil {
+ return x.R19
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR20() uint64 {
+ if x != nil {
+ return x.R20
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR21() uint64 {
+ if x != nil {
+ return x.R21
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR22() uint64 {
+ if x != nil {
+ return x.R22
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR23() uint64 {
+ if x != nil {
+ return x.R23
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR24() uint64 {
+ if x != nil {
+ return x.R24
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR25() uint64 {
+ if x != nil {
+ return x.R25
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR26() uint64 {
+ if x != nil {
+ return x.R26
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR27() uint64 {
+ if x != nil {
+ return x.R27
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR28() uint64 {
+ if x != nil {
+ return x.R28
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR29() uint64 {
+ if x != nil {
+ return x.R29
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetR30() uint64 {
+ if x != nil {
+ return x.R30
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetSp() uint64 {
+ if x != nil {
+ return x.Sp
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetPc() uint64 {
+ if x != nil {
+ return x.Pc
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetPstate() uint64 {
+ if x != nil {
+ return x.Pstate
+ }
+ return 0
+}
+
+func (x *ARM64Registers) GetTls() uint64 {
+ if x != nil {
+ return x.Tls
+ }
+ return 0
+}
+
+type Registers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Arch:
+ // *Registers_Amd64
+ // *Registers_Arm64
+ Arch isRegisters_Arch `protobuf_oneof:"arch"`
+}
+
+func (x *Registers) Reset() {
+ *x = Registers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Registers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Registers) ProtoMessage() {}
+
+func (x *Registers) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_arch_registers_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Registers.ProtoReflect.Descriptor instead.
+func (*Registers) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_arch_registers_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Registers) GetArch() isRegisters_Arch {
+ if m != nil {
+ return m.Arch
+ }
+ return nil
+}
+
+func (x *Registers) GetAmd64() *AMD64Registers {
+ if x, ok := x.GetArch().(*Registers_Amd64); ok {
+ return x.Amd64
+ }
+ return nil
+}
+
+func (x *Registers) GetArm64() *ARM64Registers {
+ if x, ok := x.GetArch().(*Registers_Arm64); ok {
+ return x.Arm64
+ }
+ return nil
+}
+
+type isRegisters_Arch interface {
+ isRegisters_Arch()
+}
+
+type Registers_Amd64 struct {
+ Amd64 *AMD64Registers `protobuf:"bytes,1,opt,name=amd64,proto3,oneof"`
+}
+
+type Registers_Arm64 struct {
+ Arm64 *ARM64Registers `protobuf:"bytes,2,opt,name=arm64,proto3,oneof"`
+}
+
+func (*Registers_Amd64) isRegisters_Arch() {}
+
+func (*Registers_Arm64) isRegisters_Arch() {}
+
+var File_pkg_sentry_arch_registers_proto protoreflect.FileDescriptor
+
+var file_pkg_sentry_arch_registers_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x61, 0x72, 0x63,
+ 0x68, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x22, 0x83, 0x04, 0x0a, 0x0e, 0x41, 0x4d,
+ 0x44, 0x36, 0x34, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03,
+ 0x72, 0x61, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x61, 0x78, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x62, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x62, 0x78,
+ 0x12, 0x10, 0x0a, 0x03, 0x72, 0x63, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72,
+ 0x63, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x64, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x72, 0x64, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x73, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x72, 0x73, 0x69, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x64, 0x69, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x64, 0x69, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x73, 0x70, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x62,
+ 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x62, 0x70, 0x12, 0x0e, 0x0a, 0x02,
+ 0x72, 0x38, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x72, 0x38, 0x12, 0x0e, 0x0a, 0x02,
+ 0x72, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x72, 0x39, 0x12, 0x10, 0x0a, 0x03,
+ 0x72, 0x31, 0x30, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x30, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x31, 0x31, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x31,
+ 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x32, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72,
+ 0x31, 0x32, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x33, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x72, 0x31, 0x33, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x34, 0x18, 0x0f, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x72, 0x31, 0x34, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x35, 0x18, 0x10, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x35, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x69, 0x70, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x69, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x66,
+ 0x6c, 0x61, 0x67, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x72, 0x66, 0x6c, 0x61,
+ 0x67, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x69, 0x67, 0x5f, 0x72, 0x61, 0x78, 0x18, 0x13,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x69, 0x67, 0x52, 0x61, 0x78, 0x12, 0x0e, 0x0a,
+ 0x02, 0x63, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x63, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x64, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x64, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x65, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x66, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x66, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x67, 0x73, 0x12, 0x0e, 0x0a,
+ 0x02, 0x73, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x73, 0x73, 0x12, 0x17, 0x0a,
+ 0x07, 0x66, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x66, 0x73, 0x42, 0x61, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x67, 0x73, 0x5f, 0x62, 0x61, 0x73,
+ 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x67, 0x73, 0x42, 0x61, 0x73, 0x65, 0x22,
+ 0xf4, 0x04, 0x0a, 0x0e, 0x41, 0x52, 0x4d, 0x36, 0x34, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x30, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x30, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x31, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x31, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x32, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x33, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x33, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x34, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x34, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x35, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x35, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x36, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x36, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x37, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x37, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x38, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x38, 0x12, 0x0e, 0x0a, 0x02, 0x72, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+ 0x72, 0x39, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x30, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x72, 0x31, 0x30, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x31, 0x18, 0x0c, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x72, 0x31, 0x31, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x32, 0x18, 0x0d, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x32, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x33, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x33, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31,
+ 0x34, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x34, 0x12, 0x10, 0x0a, 0x03,
+ 0x72, 0x31, 0x35, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x35, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x31, 0x36, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x31, 0x36,
+ 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x37, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72,
+ 0x31, 0x37, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x38, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x72, 0x31, 0x38, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x31, 0x39, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x72, 0x31, 0x39, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x30, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x30, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x31, 0x18,
+ 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x31, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32,
+ 0x32, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x32, 0x12, 0x10, 0x0a, 0x03,
+ 0x72, 0x32, 0x33, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x33, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x32, 0x34, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x34,
+ 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x35, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72,
+ 0x32, 0x35, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x36, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x03, 0x72, 0x32, 0x36, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x37, 0x18, 0x1c, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x72, 0x32, 0x37, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x38, 0x18, 0x1d, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x38, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x32, 0x39, 0x18,
+ 0x1e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x32, 0x39, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x33,
+ 0x30, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x33, 0x30, 0x12, 0x0e, 0x0a, 0x02,
+ 0x73, 0x70, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x73, 0x70, 0x12, 0x0e, 0x0a, 0x02,
+ 0x70, 0x63, 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x70, 0x63, 0x12, 0x16, 0x0a, 0x06,
+ 0x70, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x22, 0x73, 0x0a, 0x09, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x61, 0x6d, 0x64, 0x36, 0x34, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x41, 0x4d, 0x44, 0x36,
+ 0x34, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x05, 0x61, 0x6d,
+ 0x64, 0x36, 0x34, 0x12, 0x2e, 0x0a, 0x05, 0x61, 0x72, 0x6d, 0x36, 0x34, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x41, 0x52, 0x4d, 0x36,
+ 0x34, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x05, 0x61, 0x72,
+ 0x6d, 0x36, 0x34, 0x42, 0x06, 0x0a, 0x04, 0x61, 0x72, 0x63, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_sentry_arch_registers_proto_rawDescOnce sync.Once
+ file_pkg_sentry_arch_registers_proto_rawDescData = file_pkg_sentry_arch_registers_proto_rawDesc
+)
+
+func file_pkg_sentry_arch_registers_proto_rawDescGZIP() []byte {
+ file_pkg_sentry_arch_registers_proto_rawDescOnce.Do(func() {
+ file_pkg_sentry_arch_registers_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_sentry_arch_registers_proto_rawDescData)
+ })
+ return file_pkg_sentry_arch_registers_proto_rawDescData
+}
+
+var file_pkg_sentry_arch_registers_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_pkg_sentry_arch_registers_proto_goTypes = []interface{}{
+ (*AMD64Registers)(nil), // 0: gvisor.AMD64Registers
+ (*ARM64Registers)(nil), // 1: gvisor.ARM64Registers
+ (*Registers)(nil), // 2: gvisor.Registers
+}
+var file_pkg_sentry_arch_registers_proto_depIdxs = []int32{
+ 0, // 0: gvisor.Registers.amd64:type_name -> gvisor.AMD64Registers
+ 1, // 1: gvisor.Registers.arm64:type_name -> gvisor.ARM64Registers
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_pkg_sentry_arch_registers_proto_init() }
+func file_pkg_sentry_arch_registers_proto_init() {
+ if File_pkg_sentry_arch_registers_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_sentry_arch_registers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AMD64Registers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_sentry_arch_registers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ARM64Registers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_sentry_arch_registers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Registers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_pkg_sentry_arch_registers_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Registers_Amd64)(nil),
+ (*Registers_Arm64)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_sentry_arch_registers_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_sentry_arch_registers_proto_goTypes,
+ DependencyIndexes: file_pkg_sentry_arch_registers_proto_depIdxs,
+ MessageInfos: file_pkg_sentry_arch_registers_proto_msgTypes,
+ }.Build()
+ File_pkg_sentry_arch_registers_proto = out.File
+ file_pkg_sentry_arch_registers_proto_rawDesc = nil
+ file_pkg_sentry_arch_registers_proto_goTypes = nil
+ file_pkg_sentry_arch_registers_proto_depIdxs = nil
+}
diff --git a/pkg/sentry/contexttest/BUILD b/pkg/sentry/contexttest/BUILD
deleted file mode 100644
index 6f4c86684..000000000
--- a/pkg/sentry/contexttest/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "contexttest",
- testonly = 1,
- srcs = ["contexttest.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/memutil",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/ptrace",
- "//pkg/sentry/uniqueid",
- ],
-)
diff --git a/pkg/sentry/contexttest/contexttest.go b/pkg/sentry/contexttest/contexttest.go
deleted file mode 100644
index dfd195a23..000000000
--- a/pkg/sentry/contexttest/contexttest.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package contexttest builds a test context.Context.
-package contexttest
-
-import (
- "os"
- "sync/atomic"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/memutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/sentry/platform/ptrace"
- "gvisor.dev/gvisor/pkg/sentry/uniqueid"
-)
-
-// Context returns a Context that may be used in tests. Uses ptrace as the
-// platform.Platform.
-//
-// Note that some filesystems may require a minimal kernel for testing, which
-// this test context does not provide. For such tests, see kernel/contexttest.
-func Context(tb testing.TB) context.Context {
- const memfileName = "contexttest-memory"
- memfd, err := memutil.CreateMemFD(memfileName, 0)
- if err != nil {
- tb.Fatalf("error creating application memory file: %v", err)
- }
- memfile := os.NewFile(uintptr(memfd), memfileName)
- mf, err := pgalloc.NewMemoryFile(memfile, pgalloc.MemoryFileOpts{})
- if err != nil {
- memfile.Close()
- tb.Fatalf("error creating pgalloc.MemoryFile: %v", err)
- }
- p, err := ptrace.New()
- if err != nil {
- tb.Fatal(err)
- }
- // Test usage of context.Background is fine.
- return &TestContext{
- Context: context.Background(),
- l: limits.NewLimitSet(),
- mf: mf,
- platform: p,
- creds: auth.NewAnonymousCredentials(),
- otherValues: make(map[interface{}]interface{}),
- }
-}
-
-// TestContext represents a context with minimal functionality suitable for
-// running tests.
-type TestContext struct {
- context.Context
- l *limits.LimitSet
- mf *pgalloc.MemoryFile
- platform platform.Platform
- creds *auth.Credentials
- otherValues map[interface{}]interface{}
-}
-
-// globalUniqueID tracks incremental unique identifiers for tests.
-var globalUniqueID uint64
-
-// globalUniqueIDProvider implements unix.UniqueIDProvider.
-type globalUniqueIDProvider struct{}
-
-// UniqueID implements unix.UniqueIDProvider.UniqueID.
-func (*globalUniqueIDProvider) UniqueID() uint64 {
- return atomic.AddUint64(&globalUniqueID, 1)
-}
-
-// lastInotifyCookie is a monotonically increasing counter for generating unique
-// inotify cookies. Must be accessed using atomic ops.
-var lastInotifyCookie uint32
-
-// hostClock implements ktime.Clock.
-type hostClock struct {
- ktime.WallRateClock
- ktime.NoClockEvents
-}
-
-// Now implements ktime.Clock.Now.
-func (*hostClock) Now() ktime.Time {
- return ktime.FromNanoseconds(time.Now().UnixNano())
-}
-
-// RegisterValue registers additional values with this test context. Useful for
-// providing values from external packages that contexttest can't depend on.
-func (t *TestContext) RegisterValue(key, value interface{}) {
- t.otherValues[key] = value
-}
-
-// Value implements context.Context.
-func (t *TestContext) Value(key interface{}) interface{} {
- switch key {
- case auth.CtxCredentials:
- return t.creds
- case limits.CtxLimits:
- return t.l
- case pgalloc.CtxMemoryFile:
- return t.mf
- case pgalloc.CtxMemoryFileProvider:
- return t
- case platform.CtxPlatform:
- return t.platform
- case uniqueid.CtxGlobalUniqueID:
- return (*globalUniqueIDProvider).UniqueID(nil)
- case uniqueid.CtxGlobalUniqueIDProvider:
- return &globalUniqueIDProvider{}
- case uniqueid.CtxInotifyCookie:
- return atomic.AddUint32(&lastInotifyCookie, 1)
- case ktime.CtxRealtimeClock:
- return &hostClock{}
- default:
- if val, ok := t.otherValues[key]; ok {
- return val
- }
- return t.Context.Value(key)
- }
-}
-
-// MemoryFile implements pgalloc.MemoryFileProvider.MemoryFile.
-func (t *TestContext) MemoryFile() *pgalloc.MemoryFile {
- return t.mf
-}
-
-// RootContext returns a Context that may be used in tests that need root
-// credentials. Uses ptrace as the platform.Platform.
-func RootContext(tb testing.TB) context.Context {
- return auth.ContextWithCredentials(Context(tb), auth.NewRootCredentials(auth.NewRootUserNamespace()))
-}
-
-// WithLimitSet returns a copy of ctx carrying l.
-func WithLimitSet(ctx context.Context, l *limits.LimitSet) context.Context {
- return limitContext{ctx, l}
-}
-
-type limitContext struct {
- context.Context
- l *limits.LimitSet
-}
-
-// Value implements context.Context.
-func (lc limitContext) Value(key interface{}) interface{} {
- switch key {
- case limits.CtxLimits:
- return lc.l
- default:
- return lc.Context.Value(key)
- }
-}
diff --git a/pkg/sentry/control/BUILD b/pkg/sentry/control/BUILD
deleted file mode 100644
index deaf5fa23..000000000
--- a/pkg/sentry/control/BUILD
+++ /dev/null
@@ -1,51 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "control",
- srcs = [
- "control.go",
- "logging.go",
- "pprof.go",
- "proc.go",
- "state.go",
- ],
- visibility = [
- "//:sandbox",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/fd",
- "//pkg/log",
- "//pkg/sentry/fdimport",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/host",
- "//pkg/sentry/fs/user",
- "//pkg/sentry/fsimpl/host",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/state",
- "//pkg/sentry/strace",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sentry/watchdog",
- "//pkg/sync",
- "//pkg/tcpip/link/sniffer",
- "//pkg/urpc",
- ],
-)
-
-go_test(
- name = "control_test",
- size = "small",
- srcs = ["proc_test.go"],
- library = ":control",
- deps = [
- "//pkg/log",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/usage",
- ],
-)
diff --git a/pkg/sentry/control/control_state_autogen.go b/pkg/sentry/control/control_state_autogen.go
new file mode 100644
index 000000000..bd5797221
--- /dev/null
+++ b/pkg/sentry/control/control_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package control
diff --git a/pkg/sentry/control/proc_test.go b/pkg/sentry/control/proc_test.go
deleted file mode 100644
index 0a88459b2..000000000
--- a/pkg/sentry/control/proc_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package control
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/log"
- ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/sentry/usage"
-)
-
-func init() {
- log.SetLevel(log.Debug)
-}
-
-// Tests that ProcessData.Table() prints with the correct format.
-func TestProcessListTable(t *testing.T) {
- testCases := []struct {
- pl []*Process
- expected string
- }{
- {
- pl: []*Process{},
- expected: "UID PID PPID C TTY STIME TIME CMD",
- },
- {
- pl: []*Process{
- {
- UID: 0,
- PID: 0,
- PPID: 0,
- C: 0,
- TTY: "?",
- STime: "0",
- Time: "0",
- Cmd: "zero",
- },
- {
- UID: 1,
- PID: 1,
- PPID: 1,
- C: 1,
- TTY: "pts/4",
- STime: "1",
- Time: "1",
- Cmd: "one",
- },
- },
- expected: `UID PID PPID C TTY STIME TIME CMD
-0 0 0 0 ? 0 0 zero
-1 1 1 1 pts/4 1 1 one`,
- },
- }
-
- for _, tc := range testCases {
- output := ProcessListToTable(tc.pl)
-
- if tc.expected != output {
- t.Errorf("PrintTable(%v): got:\n%s\nwant:\n%s", tc.pl, output, tc.expected)
- }
- }
-}
-
-func TestProcessListJSON(t *testing.T) {
- testCases := []struct {
- pl []*Process
- expected string
- }{
- {
- pl: []*Process{},
- expected: "[]",
- },
- {
- pl: []*Process{
- {
- UID: 0,
- PID: 0,
- PPID: 0,
- C: 0,
- STime: "0",
- Time: "0",
- Cmd: "zero",
- },
- {
- UID: 1,
- PID: 1,
- PPID: 1,
- C: 1,
- STime: "1",
- Time: "1",
- Cmd: "one",
- },
- },
- expected: "[0,1]",
- },
- }
-
- for _, tc := range testCases {
- output, err := PrintPIDsJSON(tc.pl)
- if err != nil {
- t.Errorf("failed to generate JSON: %v", err)
- }
-
- if tc.expected != output {
- t.Errorf("PrintJSON(%v): got:\n%s\nwant:\n%s", tc.pl, output, tc.expected)
- }
- }
-}
-
-func TestPercentCPU(t *testing.T) {
- testCases := []struct {
- stats usage.CPUStats
- startTime ktime.Time
- now ktime.Time
- expected int32
- }{
- {
- // Verify that 100% use is capped at 99.
- stats: usage.CPUStats{UserTime: 1e9, SysTime: 1e9},
- startTime: ktime.FromNanoseconds(7e9),
- now: ktime.FromNanoseconds(9e9),
- expected: 99,
- },
- {
- // Verify that if usage > lifetime, we get at most 99%
- // usage.
- stats: usage.CPUStats{UserTime: 2e9, SysTime: 2e9},
- startTime: ktime.FromNanoseconds(7e9),
- now: ktime.FromNanoseconds(9e9),
- expected: 99,
- },
- {
- // Verify that 50% usage is reported correctly.
- stats: usage.CPUStats{UserTime: 1e9, SysTime: 1e9},
- startTime: ktime.FromNanoseconds(12e9),
- now: ktime.FromNanoseconds(16e9),
- expected: 50,
- },
- {
- // Verify that 0% usage is reported correctly.
- stats: usage.CPUStats{UserTime: 0, SysTime: 0},
- startTime: ktime.FromNanoseconds(12e9),
- now: ktime.FromNanoseconds(14e9),
- expected: 0,
- },
- }
-
- for _, tc := range testCases {
- if pcpu := percentCPU(tc.stats, tc.startTime, tc.now); pcpu != tc.expected {
- t.Errorf("percentCPU(%v, %v, %v): got %d, want %d", tc.stats, tc.startTime, tc.now, pcpu, tc.expected)
- }
- }
-}
diff --git a/pkg/sentry/device/BUILD b/pkg/sentry/device/BUILD
deleted file mode 100644
index e403cbd8b..000000000
--- a/pkg/sentry/device/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "device",
- srcs = ["device.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "device_test",
- size = "small",
- srcs = ["device_test.go"],
- library = ":device",
-)
diff --git a/pkg/sentry/device/device_state_autogen.go b/pkg/sentry/device/device_state_autogen.go
new file mode 100644
index 000000000..b68325291
--- /dev/null
+++ b/pkg/sentry/device/device_state_autogen.go
@@ -0,0 +1,97 @@
+// automatically generated by stateify.
+
+package device
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *Registry) StateTypeName() string {
+ return "pkg/sentry/device.Registry"
+}
+
+func (r *Registry) StateFields() []string {
+ return []string{
+ "lastAnonDeviceMinor",
+ "devices",
+ }
+}
+
+func (r *Registry) beforeSave() {}
+
+// +checklocksignore
+func (r *Registry) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.lastAnonDeviceMinor)
+ stateSinkObject.Save(1, &r.devices)
+}
+
+func (r *Registry) afterLoad() {}
+
+// +checklocksignore
+func (r *Registry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.lastAnonDeviceMinor)
+ stateSourceObject.Load(1, &r.devices)
+}
+
+func (i *ID) StateTypeName() string {
+ return "pkg/sentry/device.ID"
+}
+
+func (i *ID) StateFields() []string {
+ return []string{
+ "Major",
+ "Minor",
+ }
+}
+
+func (i *ID) beforeSave() {}
+
+// +checklocksignore
+func (i *ID) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Major)
+ stateSinkObject.Save(1, &i.Minor)
+}
+
+func (i *ID) afterLoad() {}
+
+// +checklocksignore
+func (i *ID) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Major)
+ stateSourceObject.Load(1, &i.Minor)
+}
+
+func (d *Device) StateTypeName() string {
+ return "pkg/sentry/device.Device"
+}
+
+func (d *Device) StateFields() []string {
+ return []string{
+ "ID",
+ "last",
+ }
+}
+
+func (d *Device) beforeSave() {}
+
+// +checklocksignore
+func (d *Device) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.ID)
+ stateSinkObject.Save(1, &d.last)
+}
+
+func (d *Device) afterLoad() {}
+
+// +checklocksignore
+func (d *Device) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.ID)
+ stateSourceObject.Load(1, &d.last)
+}
+
+func init() {
+ state.Register((*Registry)(nil))
+ state.Register((*ID)(nil))
+ state.Register((*Device)(nil))
+}
diff --git a/pkg/sentry/device/device_test.go b/pkg/sentry/device/device_test.go
deleted file mode 100644
index e3f51ce4f..000000000
--- a/pkg/sentry/device/device_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package device
-
-import (
- "testing"
-)
-
-func TestMultiDevice(t *testing.T) {
- device := &MultiDevice{}
-
- // Check that Load fails to install virtual inodes that are
- // uninitialized.
- if device.Load(MultiDeviceKey{}, 0) {
- t.Fatalf("got load of invalid virtual inode 0, want unsuccessful")
- }
-
- inode := device.Map(MultiDeviceKey{})
-
- // Assert that the same raw device and inode map to
- // a consistent virtual inode.
- if i := device.Map(MultiDeviceKey{}); i != inode {
- t.Fatalf("got inode %d, want %d in %s", i, inode, device)
- }
-
- // Assert that a new inode or new device does not conflict.
- if i := device.Map(MultiDeviceKey{Device: 0, Inode: 1}); i == inode {
- t.Fatalf("got reused inode %d, want new distinct inode in %s", i, device)
- }
- last := device.Map(MultiDeviceKey{Device: 1, Inode: 0})
- if last == inode {
- t.Fatalf("got reused inode %d, want new distinct inode in %s", last, device)
- }
-
- // Virtual is the virtual inode we want to load.
- virtual := last + 1
-
- // Assert that we can load a virtual inode at a new place.
- if !device.Load(MultiDeviceKey{Device: 0, Inode: 2}, virtual) {
- t.Fatalf("got load of virtual inode %d failed, want success in %s", virtual, device)
- }
-
- // Assert that the next inode skips over the loaded one.
- if i := device.Map(MultiDeviceKey{Device: 0, Inode: 3}); i != virtual+1 {
- t.Fatalf("got inode %d, want %d in %s", i, virtual+1, device)
- }
-}
diff --git a/pkg/sentry/devices/memdev/BUILD b/pkg/sentry/devices/memdev/BUILD
deleted file mode 100644
index 4c8604d58..000000000
--- a/pkg/sentry/devices/memdev/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "memdev",
- srcs = [
- "full.go",
- "memdev.go",
- "null.go",
- "random.go",
- "zero.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/rand",
- "//pkg/safemem",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/devices/memdev/memdev_state_autogen.go b/pkg/sentry/devices/memdev/memdev_state_autogen.go
new file mode 100644
index 000000000..a8faafbbf
--- /dev/null
+++ b/pkg/sentry/devices/memdev/memdev_state_autogen.go
@@ -0,0 +1,241 @@
+// automatically generated by stateify.
+
+package memdev
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *fullDevice) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.fullDevice"
+}
+
+func (f *fullDevice) StateFields() []string {
+ return []string{}
+}
+
+func (f *fullDevice) beforeSave() {}
+
+// +checklocksignore
+func (f *fullDevice) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *fullDevice) afterLoad() {}
+
+// +checklocksignore
+func (f *fullDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *fullFD) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.fullFD"
+}
+
+func (fd *fullFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ }
+}
+
+func (fd *fullFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *fullFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+}
+
+func (fd *fullFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *fullFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+}
+
+func (n *nullDevice) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.nullDevice"
+}
+
+func (n *nullDevice) StateFields() []string {
+ return []string{}
+}
+
+func (n *nullDevice) beforeSave() {}
+
+// +checklocksignore
+func (n *nullDevice) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+}
+
+func (n *nullDevice) afterLoad() {}
+
+// +checklocksignore
+func (n *nullDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *nullFD) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.nullFD"
+}
+
+func (fd *nullFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ }
+}
+
+func (fd *nullFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *nullFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+}
+
+func (fd *nullFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *nullFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+}
+
+func (r *randomDevice) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.randomDevice"
+}
+
+func (r *randomDevice) StateFields() []string {
+ return []string{}
+}
+
+func (r *randomDevice) beforeSave() {}
+
+// +checklocksignore
+func (r *randomDevice) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *randomDevice) afterLoad() {}
+
+// +checklocksignore
+func (r *randomDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *randomFD) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.randomFD"
+}
+
+func (fd *randomFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "off",
+ }
+}
+
+func (fd *randomFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *randomFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+ stateSinkObject.Save(4, &fd.off)
+}
+
+func (fd *randomFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *randomFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+ stateSourceObject.Load(4, &fd.off)
+}
+
+func (z *zeroDevice) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.zeroDevice"
+}
+
+func (z *zeroDevice) StateFields() []string {
+ return []string{}
+}
+
+func (z *zeroDevice) beforeSave() {}
+
+// +checklocksignore
+func (z *zeroDevice) StateSave(stateSinkObject state.Sink) {
+ z.beforeSave()
+}
+
+func (z *zeroDevice) afterLoad() {}
+
+// +checklocksignore
+func (z *zeroDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *zeroFD) StateTypeName() string {
+ return "pkg/sentry/devices/memdev.zeroFD"
+}
+
+func (fd *zeroFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ }
+}
+
+func (fd *zeroFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *zeroFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+}
+
+func (fd *zeroFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *zeroFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+}
+
+func init() {
+ state.Register((*fullDevice)(nil))
+ state.Register((*fullFD)(nil))
+ state.Register((*nullDevice)(nil))
+ state.Register((*nullFD)(nil))
+ state.Register((*randomDevice)(nil))
+ state.Register((*randomFD)(nil))
+ state.Register((*zeroDevice)(nil))
+ state.Register((*zeroFD)(nil))
+}
diff --git a/pkg/sentry/devices/quotedev/BUILD b/pkg/sentry/devices/quotedev/BUILD
deleted file mode 100644
index d09214e3e..000000000
--- a/pkg/sentry/devices/quotedev/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "quotedev",
- srcs = ["quotedev.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/devices/quotedev/quotedev.go b/pkg/sentry/devices/quotedev/quotedev.go
deleted file mode 100644
index 6114cb724..000000000
--- a/pkg/sentry/devices/quotedev/quotedev.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package quotedev implements a vfs.Device for /dev/gvisor_quote.
-package quotedev
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-const (
- quoteDevMinor = 0
-)
-
-// quoteDevice implements vfs.Device for /dev/gvisor_quote
-//
-// +stateify savable
-type quoteDevice struct{}
-
-// Open implements vfs.Device.Open.
-// TODO(b/157161182): Add support for attestation ioctls.
-func (quoteDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- return nil, syserror.EIO
-}
-
-// Register registers all devices implemented by this package in vfsObj.
-func Register(vfsObj *vfs.VirtualFilesystem) error {
- return vfsObj.RegisterDevice(vfs.CharDevice, linux.UNNAMED_MAJOR, quoteDevMinor, quoteDevice{}, &vfs.RegisterDeviceOptions{
- GroupName: "gvisor_quote",
- })
-}
-
-// CreateDevtmpfsFiles creates device special files in dev representing all
-// devices implemented by this package.
-func CreateDevtmpfsFiles(ctx context.Context, dev *devtmpfs.Accessor) error {
- return dev.CreateDeviceFile(ctx, "gvisor_quote", vfs.CharDevice, linux.UNNAMED_MAJOR, quoteDevMinor, 0666 /* mode */)
-}
diff --git a/pkg/sentry/devices/ttydev/BUILD b/pkg/sentry/devices/ttydev/BUILD
deleted file mode 100644
index b4b6ca38a..000000000
--- a/pkg/sentry/devices/ttydev/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "ttydev",
- srcs = ["ttydev.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/devices/ttydev/ttydev_state_autogen.go b/pkg/sentry/devices/ttydev/ttydev_state_autogen.go
new file mode 100644
index 000000000..88a1cd0a6
--- /dev/null
+++ b/pkg/sentry/devices/ttydev/ttydev_state_autogen.go
@@ -0,0 +1,32 @@
+// automatically generated by stateify.
+
+package ttydev
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *ttyDevice) StateTypeName() string {
+ return "pkg/sentry/devices/ttydev.ttyDevice"
+}
+
+func (t *ttyDevice) StateFields() []string {
+ return []string{}
+}
+
+func (t *ttyDevice) beforeSave() {}
+
+// +checklocksignore
+func (t *ttyDevice) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+}
+
+func (t *ttyDevice) afterLoad() {}
+
+// +checklocksignore
+func (t *ttyDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*ttyDevice)(nil))
+}
diff --git a/pkg/sentry/devices/tundev/BUILD b/pkg/sentry/devices/tundev/BUILD
deleted file mode 100644
index 60c971030..000000000
--- a/pkg/sentry/devices/tundev/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "tundev",
- srcs = ["tundev.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/socket/netstack",
- "//pkg/sentry/vfs",
- "//pkg/tcpip/link/tun",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/devices/tundev/tundev_state_autogen.go b/pkg/sentry/devices/tundev/tundev_state_autogen.go
new file mode 100644
index 000000000..ad7c7feb4
--- /dev/null
+++ b/pkg/sentry/devices/tundev/tundev_state_autogen.go
@@ -0,0 +1,70 @@
+// automatically generated by stateify.
+
+package tundev
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *tunDevice) StateTypeName() string {
+ return "pkg/sentry/devices/tundev.tunDevice"
+}
+
+func (t *tunDevice) StateFields() []string {
+ return []string{}
+}
+
+func (t *tunDevice) beforeSave() {}
+
+// +checklocksignore
+func (t *tunDevice) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+}
+
+func (t *tunDevice) afterLoad() {}
+
+// +checklocksignore
+func (t *tunDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *tunFD) StateTypeName() string {
+ return "pkg/sentry/devices/tundev.tunFD"
+}
+
+func (fd *tunFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "device",
+ }
+}
+
+func (fd *tunFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *tunFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+ stateSinkObject.Save(4, &fd.device)
+}
+
+func (fd *tunFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *tunFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+ stateSourceObject.Load(4, &fd.device)
+}
+
+func init() {
+ state.Register((*tunDevice)(nil))
+ state.Register((*tunFD)(nil))
+}
diff --git a/pkg/sentry/fdimport/BUILD b/pkg/sentry/fdimport/BUILD
deleted file mode 100644
index 563e96e0d..000000000
--- a/pkg/sentry/fdimport/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fdimport",
- srcs = [
- "fdimport.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/fd",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/host",
- "//pkg/sentry/fsimpl/host",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fdimport/fdimport_state_autogen.go b/pkg/sentry/fdimport/fdimport_state_autogen.go
new file mode 100644
index 000000000..fbb89b276
--- /dev/null
+++ b/pkg/sentry/fdimport/fdimport_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fdimport
diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD
deleted file mode 100644
index 58fe1e77c..000000000
--- a/pkg/sentry/fs/BUILD
+++ /dev/null
@@ -1,139 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fs",
- srcs = [
- "attr.go",
- "context.go",
- "copy_up.go",
- "dentry.go",
- "dirent.go",
- "dirent_cache.go",
- "dirent_cache_limiter.go",
- "dirent_list.go",
- "dirent_state.go",
- "event_list.go",
- "file.go",
- "file_operations.go",
- "file_overlay.go",
- "file_state.go",
- "filesystems.go",
- "flags.go",
- "fs.go",
- "inode.go",
- "inode_inotify.go",
- "inode_operations.go",
- "inode_overlay.go",
- "inotify.go",
- "inotify_event.go",
- "inotify_watch.go",
- "mock.go",
- "mount.go",
- "mount_overlay.go",
- "mounts.go",
- "offset.go",
- "overlay.go",
- "path.go",
- "restore.go",
- "save.go",
- "seek.go",
- "splice.go",
- "sync.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/amutex",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/refs",
- "//pkg/secio",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/memmap",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/usage",
- "//pkg/state",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_template_instance(
- name = "dirent_list",
- out = "dirent_list.go",
- package = "fs",
- prefix = "dirent",
- template = "//pkg/ilist:generic_list",
- types = {
- "Linker": "*Dirent",
- "Element": "*Dirent",
- },
-)
-
-go_template_instance(
- name = "event_list",
- out = "event_list.go",
- package = "fs",
- prefix = "event",
- template = "//pkg/ilist:generic_list",
- types = {
- "Linker": "*Event",
- "Element": "*Event",
- },
-)
-
-go_test(
- name = "fs_x_test",
- size = "small",
- srcs = [
- "copy_up_test.go",
- "file_overlay_test.go",
- "inode_overlay_test.go",
- "mounts_test.go",
- ],
- deps = [
- ":fs",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/kernel/contexttest",
- "//pkg/sync",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "fs_test",
- size = "small",
- srcs = [
- "dirent_cache_test.go",
- "dirent_refs_test.go",
- "mount_test.go",
- "path_test.go",
- ],
- library = ":fs",
- deps = [
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fs/README.md b/pkg/sentry/fs/README.md
deleted file mode 100644
index db4a1b730..000000000
--- a/pkg/sentry/fs/README.md
+++ /dev/null
@@ -1,229 +0,0 @@
-This package provides an implementation of the Linux virtual filesystem.
-
-[TOC]
-
-## Overview
-
-- An `fs.Dirent` caches an `fs.Inode` in memory at a path in the VFS, giving
- the `fs.Inode` a relative position with respect to other `fs.Inode`s.
-
-- If an `fs.Dirent` is referenced by two file descriptors, then those file
- descriptors are coherent with each other: they depend on the same
- `fs.Inode`.
-
-- A mount point is an `fs.Dirent` for which `fs.Dirent.mounted` is true. It
- exposes the root of a mounted filesystem.
-
-- The `fs.Inode` produced by a registered filesystem on mount(2) owns an
- `fs.MountedFilesystem` from which other `fs.Inode`s will be looked up. For a
- remote filesystem, the `fs.MountedFilesystem` owns the connection to that
- remote filesystem.
-
-- In general:
-
-```
-fs.Inode <------------------------------
-| |
-| |
-produced by |
-exactly one |
-| responsible for the
-| virtual identity of
-v |
-fs.MountedFilesystem -------------------
-```
-
-Glossary:
-
-- VFS: virtual filesystem.
-
-- inode: a virtual file object holding a cached view of a file on a backing
- filesystem (includes metadata and page caches).
-
-- superblock: the virtual state of a mounted filesystem (e.g. the virtual
- inode number set).
-
-- mount namespace: a view of the mounts under a root (during path traversal,
- the VFS makes visible/follows the mount point that is in the current task's
- mount namespace).
-
-## Save and restore
-
-An application's hard dependencies on filesystem state can be broken down into
-two categories:
-
-- The state necessary to execute a traversal on or view the *virtual*
- filesystem hierarchy, regardless of what files an application has open.
-
-- The state necessary to represent open files.
-
-The first is always necessary to save and restore. An application may never have
-any open file descriptors, but across save and restore it should see a coherent
-view of any mount namespace. NOTE(b/63601033): Currently only one "initial"
-mount namespace is supported.
-
-The second is so that system calls across save and restore are coherent with
-each other (e.g. so that unintended re-reads or overwrites do not occur).
-
-Specifically this state is:
-
-- An `fs.MountManager` containing mount points.
-
-- A `kernel.FDTable` containing pointers to open files.
-
-Anything else managed by the VFS that can be easily loaded into memory from a
-filesystem is synced back to those filesystems and is not saved. Examples are
-pages in page caches used for optimizations (i.e. readahead and writeback), and
-directory entries used to accelerate path lookups.
-
-### Mount points
-
-Saving and restoring a mount point means saving and restoring:
-
-- The root of the mounted filesystem.
-
-- Mount flags, which control how the VFS interacts with the mounted
- filesystem.
-
-- Any relevant metadata about the mounted filesystem.
-
-- All `fs.Inode`s referenced by the application that reside under the mount
- point.
-
-`fs.MountedFilesystem` is metadata about a filesystem that is mounted. It is
-referenced by every `fs.Inode` loaded into memory under the mount point
-including the `fs.Inode` of the mount point itself. The `fs.MountedFilesystem`
-maps file objects on the filesystem to a virtualized `fs.Inode` number and vice
-versa.
-
-To restore all `fs.Inode`s under a given mount point, each `fs.Inode` leverages
-its dependency on an `fs.MountedFilesystem`. Since the `fs.MountedFilesystem`
-knows how an `fs.Inode` maps to a file object on a backing filesystem, this
-mapping can be trivially consulted by each `fs.Inode` when the `fs.Inode` is
-restored.
-
-In detail, a mount point is saved in two steps:
-
-- First, after the kernel is paused but before state.Save, we walk all mount
- namespaces and install a mapping from `fs.Inode` numbers to file paths
- relative to the root of the mounted filesystem in each
- `fs.MountedFilesystem`. This is subsequently called the set of `fs.Inode`
- mappings.
-
-- Second, during state.Save, each `fs.MountedFilesystem` decides whether to
- save the set of `fs.Inode` mappings. In-memory filesystems, like tmpfs, have
- no need to save a set of `fs.Inode` mappings, since the `fs.Inode`s can be
- entirely encoded in state file. Each `fs.MountedFilesystem` also optionally
- saves the device name from when the filesystem was originally mounted. Each
- `fs.Inode` saves its virtual identifier and a reference to a
- `fs.MountedFilesystem`.
-
-A mount point is restored in two steps:
-
-- First, before state.Load, all mount configurations are stored in a global
- `fs.RestoreEnvironment`. This tells us what mount points the user wants to
- restore and how to re-establish pointers to backing filesystems.
-
-- Second, during state.Load, each `fs.MountedFilesystem` optionally searches
- for a mount in the `fs.RestoreEnvironment` that matches its saved device
- name. The `fs.MountedFilesystem` then reestablishes a pointer to the root of
- the mounted filesystem. For example, the mount specification provides the
- network connection for a mounted remote filesystem client to communicate
- with its remote file server. The `fs.MountedFilesystem` also trivially loads
- its set of `fs.Inode` mappings. When an `fs.Inode` is encountered, the
- `fs.Inode` loads its virtual identifier and its reference a
- `fs.MountedFilesystem`. It uses the `fs.MountedFilesystem` to obtain the
- root of the mounted filesystem and the `fs.Inode` mappings to obtain the
- relative file path to its data. With these, the `fs.Inode` re-establishes a
- pointer to its file object.
-
-A mount point can trivially restore its `fs.Inode`s in parallel since
-`fs.Inode`s have a restore dependency on their `fs.MountedFilesystem` and not on
-each other.
-
-### Open files
-
-An `fs.File` references the following filesystem objects:
-
-```go
-fs.File -> fs.Dirent -> fs.Inode -> fs.MountedFilesystem
-```
-
-The `fs.Inode` is restored using its `fs.MountedFilesystem`. The
-[Mount points](#mount-points) section above describes how this happens in
-detail. The `fs.Dirent` restores its pointer to an `fs.Inode`, pointers to
-parent and children `fs.Dirents`, and the basename of the file.
-
-Otherwise an `fs.File` restores flags, an offset, and a unique identifier (only
-used internally).
-
-It may use the `fs.Inode`, which it indirectly holds a reference on through the
-`fs.Dirent`, to reestablish an open file handle on the backing filesystem (e.g.
-to continue reading and writing).
-
-## Overlay
-
-The overlay implementation in the fs package takes Linux overlayfs as a frame of
-reference but corrects for several POSIX consistency errors.
-
-In Linux overlayfs, the `struct inode` used for reading and writing to the same
-file may be different. This is because the `struct inode` is dissociated with
-the process of copying up the file from the upper to the lower directory. Since
-flock(2) and fcntl(2) locks, inotify(7) watches, page caches, and a file's
-identity are all stored directly or indirectly off the `struct inode`, these
-properties of the `struct inode` may be stale after the first modification. This
-can lead to file locking bugs, missed inotify events, and inconsistent data in
-shared memory mappings of files, to name a few problems.
-
-The fs package maintains a single `fs.Inode` to represent a directory entry in
-an overlay and defines operations on this `fs.Inode` which synchronize with the
-copy up process. This achieves several things:
-
-+ File locks, inotify watches, and the identity of the file need not be copied
- at all.
-
-+ Memory mappings of files coordinate with the copy up process so that if a
- file in the lower directory is memory mapped, all references to it are
- invalidated, forcing the application to re-fault on memory mappings of the
- file under the upper directory.
-
-The `fs.Inode` holds metadata about files in the upper and/or lower directories
-via an `fs.overlayEntry`. The `fs.overlayEntry` implements the `fs.Mappable`
-interface. It multiplexes between upper and lower directory memory mappings and
-stores a copy of memory references so they can be transferred to the upper
-directory `fs.Mappable` when the file is copied up.
-
-The lower filesystem in an overlay may contain another (nested) overlay, but the
-upper filesystem may not contain another overlay. In other words, nested
-overlays form a tree structure that only allows branching in the lower
-filesystem.
-
-Caching decisions in the overlay are delegated to the upper filesystem, meaning
-that the Keep and Revalidate methods on the overlay return the same values as
-the upper filesystem. A small wrinkle is that the lower filesystem is not
-allowed to return `true` from Revalidate, as the overlay can not reload inodes
-from the lower filesystem. A lower filesystem that does return `true` from
-Revalidate will trigger a panic.
-
-The `fs.Inode` also holds a reference to a `fs.MountedFilesystem` that
-normalizes across the mounted filesystem state of the upper and lower
-directories.
-
-When a file is copied from the lower to the upper directory, attempts to
-interact with the file block until the copy completes. All copying synchronizes
-with rename(2).
-
-## Future Work
-
-### Overlay
-
-When a file is copied from a lower directory to an upper directory, several
-locks are taken: the global renamuMu and the copyMu of the `fs.Inode` being
-copied. This blocks operations on the file, including fault handling of memory
-mappings. Performance could be improved by copying files into a temporary
-directory that resides on the same filesystem as the upper directory and doing
-an atomic rename, holding locks only during the rename operation.
-
-Additionally files are copied up synchronously. For large files, this causes a
-noticeable latency. Performance could be improved by pipelining copies at
-non-overlapping file offsets.
diff --git a/pkg/sentry/fs/anon/BUILD b/pkg/sentry/fs/anon/BUILD
deleted file mode 100644
index 1ce56d79f..000000000
--- a/pkg/sentry/fs/anon/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "anon",
- srcs = [
- "anon.go",
- "device.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- ],
-)
diff --git a/pkg/sentry/fs/anon/anon_state_autogen.go b/pkg/sentry/fs/anon/anon_state_autogen.go
new file mode 100644
index 000000000..b2b1a466e
--- /dev/null
+++ b/pkg/sentry/fs/anon/anon_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package anon
diff --git a/pkg/sentry/fs/copy_up_test.go b/pkg/sentry/fs/copy_up_test.go
deleted file mode 100644
index e04784db2..000000000
--- a/pkg/sentry/fs/copy_up_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs_test
-
-import (
- "bytes"
- "crypto/rand"
- "fmt"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/fs"
- _ "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- // origFileSize is the original file size. This many bytes should be
- // copied up before the test file is modified.
- origFileSize = 4096
-
- // truncatedFileSize is the size to truncate all test files.
- truncateFileSize = 10
-)
-
-// TestConcurrentCopyUp is a copy up stress test for an overlay.
-//
-// It creates a 64-level deep directory tree in the lower filesystem and
-// populates the last subdirectory with 64 files containing random content:
-//
-// /lower
-// /sudir0/.../subdir63/
-// /file0
-// ...
-// /file63
-//
-// The files are truncated concurrently by 4 goroutines per file.
-// These goroutines contend with copying up all parent 64 subdirectories
-// as well as the final file content.
-//
-// At the end of the test, we assert that the files respect the new truncated
-// size and contain the content we expect.
-func TestConcurrentCopyUp(t *testing.T) {
- ctx := contexttest.Context(t)
- files := makeOverlayTestFiles(t)
-
- var wg sync.WaitGroup
- for _, file := range files {
- for i := 0; i < 4; i++ {
- wg.Add(1)
- go func(o *overlayTestFile) {
- if err := o.File.Dirent.Inode.Truncate(ctx, o.File.Dirent, truncateFileSize); err != nil {
- t.Errorf("failed to copy up: %v", err)
- }
- wg.Done()
- }(file)
- }
- }
- wg.Wait()
-
- for _, file := range files {
- got := make([]byte, origFileSize)
- n, err := file.File.Readv(ctx, usermem.BytesIOSequence(got))
- if int(n) != truncateFileSize {
- t.Fatalf("read %d bytes from file, want %d", n, truncateFileSize)
- }
- if err != nil && err != io.EOF {
- t.Fatalf("read got error %v, want nil", err)
- }
- if !bytes.Equal(got[:n], file.content[:truncateFileSize]) {
- t.Fatalf("file content is %v, want %v", got[:n], file.content[:truncateFileSize])
- }
- }
-}
-
-type overlayTestFile struct {
- File *fs.File
- name string
- content []byte
-}
-
-func makeOverlayTestFiles(t *testing.T) []*overlayTestFile {
- ctx := contexttest.Context(t)
-
- // Create a lower tmpfs mount.
- fsys, _ := fs.FindFilesystem("tmpfs")
- lower, err := fsys.Mount(contexttest.Context(t), "", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- t.Fatalf("failed to mount tmpfs: %v", err)
- }
- lowerRoot := fs.NewDirent(ctx, lower, "")
-
- // Make a deep set of subdirectories that everyone shares.
- next := lowerRoot
- for i := 0; i < 64; i++ {
- name := fmt.Sprintf("subdir%d", i)
- err := next.CreateDirectory(ctx, lowerRoot, name, fs.FilePermsFromMode(0777))
- if err != nil {
- t.Fatalf("failed to create dir %q: %v", name, err)
- }
- next, err = next.Walk(ctx, lowerRoot, name)
- if err != nil {
- t.Fatalf("failed to walk to %q: %v", name, err)
- }
- }
-
- // Make a bunch of files in the last directory.
- var files []*overlayTestFile
- for i := 0; i < 64; i++ {
- name := fmt.Sprintf("file%d", i)
- f, err := next.Create(ctx, next, name, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666))
- if err != nil {
- t.Fatalf("failed to create file %q: %v", name, err)
- }
- defer f.DecRef(ctx)
-
- relname, _ := f.Dirent.FullName(lowerRoot)
-
- o := &overlayTestFile{
- name: relname,
- content: make([]byte, origFileSize),
- }
-
- if _, err := rand.Read(o.content); err != nil {
- t.Fatalf("failed to read from /dev/urandom: %v", err)
- }
-
- if _, err := f.Writev(ctx, usermem.BytesIOSequence(o.content)); err != nil {
- t.Fatalf("failed to write content to file %q: %v", name, err)
- }
-
- files = append(files, o)
- }
-
- // Create an empty upper tmpfs mount which we will copy up into.
- upper, err := fsys.Mount(ctx, "", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- t.Fatalf("failed to mount tmpfs: %v", err)
- }
-
- // Construct an overlay root.
- overlay, err := fs.NewOverlayRoot(ctx, upper, lower, fs.MountSourceFlags{})
- if err != nil {
- t.Fatalf("failed to construct overlay root: %v", err)
- }
-
- // Create a MountNamespace to traverse the file system.
- mns, err := fs.NewMountNamespace(ctx, overlay)
- if err != nil {
- t.Fatalf("failed to construct mount manager: %v", err)
- }
-
- // Walk to all of the files in the overlay, open them readable.
- for _, f := range files {
- maxTraversals := uint(0)
- d, err := mns.FindInode(ctx, mns.Root(), mns.Root(), f.name, &maxTraversals)
- if err != nil {
- t.Fatalf("failed to find %q: %v", f.name, err)
- }
- defer d.DecRef(ctx)
-
- f.File, err = d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true})
- if err != nil {
- t.Fatalf("failed to open file %q readable: %v", f.name, err)
- }
- }
-
- return files
-}
diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD
deleted file mode 100644
index e28a8961b..000000000
--- a/pkg/sentry/fs/dev/BUILD
+++ /dev/null
@@ -1,42 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "dev",
- srcs = [
- "dev.go",
- "device.go",
- "fs.go",
- "full.go",
- "net_tun.go",
- "null.go",
- "random.go",
- "tty.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/rand",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/memmap",
- "//pkg/sentry/mm",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/socket/netstack",
- "//pkg/syserror",
- "//pkg/tcpip/link/tun",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fs/dev/dev_state_autogen.go b/pkg/sentry/fs/dev/dev_state_autogen.go
new file mode 100644
index 000000000..23945d391
--- /dev/null
+++ b/pkg/sentry/fs/dev/dev_state_autogen.go
@@ -0,0 +1,324 @@
+// automatically generated by stateify.
+
+package dev
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/dev.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *fullDevice) StateTypeName() string {
+ return "pkg/sentry/fs/dev.fullDevice"
+}
+
+func (f *fullDevice) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (f *fullDevice) beforeSave() {}
+
+// +checklocksignore
+func (f *fullDevice) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeSimpleAttributes)
+}
+
+func (f *fullDevice) afterLoad() {}
+
+// +checklocksignore
+func (f *fullDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeSimpleAttributes)
+}
+
+func (f *fullFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.fullFileOperations"
+}
+
+func (f *fullFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (f *fullFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (f *fullFileOperations) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *fullFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (f *fullFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (n *netTunInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.netTunInodeOperations"
+}
+
+func (n *netTunInodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (n *netTunInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (n *netTunInodeOperations) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.InodeSimpleAttributes)
+}
+
+func (n *netTunInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (n *netTunInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.InodeSimpleAttributes)
+}
+
+func (n *netTunFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.netTunFileOperations"
+}
+
+func (n *netTunFileOperations) StateFields() []string {
+ return []string{
+ "device",
+ }
+}
+
+func (n *netTunFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (n *netTunFileOperations) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.device)
+}
+
+func (n *netTunFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (n *netTunFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.device)
+}
+
+func (n *nullDevice) StateTypeName() string {
+ return "pkg/sentry/fs/dev.nullDevice"
+}
+
+func (n *nullDevice) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (n *nullDevice) beforeSave() {}
+
+// +checklocksignore
+func (n *nullDevice) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.InodeSimpleAttributes)
+}
+
+func (n *nullDevice) afterLoad() {}
+
+// +checklocksignore
+func (n *nullDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.InodeSimpleAttributes)
+}
+
+func (n *nullFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.nullFileOperations"
+}
+
+func (n *nullFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (n *nullFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (n *nullFileOperations) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+}
+
+func (n *nullFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (n *nullFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (zd *zeroDevice) StateTypeName() string {
+ return "pkg/sentry/fs/dev.zeroDevice"
+}
+
+func (zd *zeroDevice) StateFields() []string {
+ return []string{
+ "nullDevice",
+ }
+}
+
+func (zd *zeroDevice) beforeSave() {}
+
+// +checklocksignore
+func (zd *zeroDevice) StateSave(stateSinkObject state.Sink) {
+ zd.beforeSave()
+ stateSinkObject.Save(0, &zd.nullDevice)
+}
+
+func (zd *zeroDevice) afterLoad() {}
+
+// +checklocksignore
+func (zd *zeroDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &zd.nullDevice)
+}
+
+func (z *zeroFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.zeroFileOperations"
+}
+
+func (z *zeroFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (z *zeroFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (z *zeroFileOperations) StateSave(stateSinkObject state.Sink) {
+ z.beforeSave()
+}
+
+func (z *zeroFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (z *zeroFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *randomDevice) StateTypeName() string {
+ return "pkg/sentry/fs/dev.randomDevice"
+}
+
+func (r *randomDevice) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (r *randomDevice) beforeSave() {}
+
+// +checklocksignore
+func (r *randomDevice) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.InodeSimpleAttributes)
+}
+
+func (r *randomDevice) afterLoad() {}
+
+// +checklocksignore
+func (r *randomDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.InodeSimpleAttributes)
+}
+
+func (r *randomFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.randomFileOperations"
+}
+
+func (r *randomFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (r *randomFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (r *randomFileOperations) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *randomFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (r *randomFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (t *ttyInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.ttyInodeOperations"
+}
+
+func (t *ttyInodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (t *ttyInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (t *ttyInodeOperations) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.InodeSimpleAttributes)
+}
+
+func (t *ttyInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (t *ttyInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.InodeSimpleAttributes)
+}
+
+func (t *ttyFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/dev.ttyFileOperations"
+}
+
+func (t *ttyFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (t *ttyFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (t *ttyFileOperations) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+}
+
+func (t *ttyFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (t *ttyFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*filesystem)(nil))
+ state.Register((*fullDevice)(nil))
+ state.Register((*fullFileOperations)(nil))
+ state.Register((*netTunInodeOperations)(nil))
+ state.Register((*netTunFileOperations)(nil))
+ state.Register((*nullDevice)(nil))
+ state.Register((*nullFileOperations)(nil))
+ state.Register((*zeroDevice)(nil))
+ state.Register((*zeroFileOperations)(nil))
+ state.Register((*randomDevice)(nil))
+ state.Register((*randomFileOperations)(nil))
+ state.Register((*ttyInodeOperations)(nil))
+ state.Register((*ttyFileOperations)(nil))
+}
diff --git a/pkg/sentry/fs/dirent_cache_test.go b/pkg/sentry/fs/dirent_cache_test.go
deleted file mode 100644
index 395c879f5..000000000
--- a/pkg/sentry/fs/dirent_cache_test.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "testing"
-)
-
-func TestDirentCache(t *testing.T) {
- const maxSize = 5
-
- c := NewDirentCache(maxSize)
-
- // Size starts at 0.
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // Create a Dirent d.
- d := NewNegativeDirent("")
-
- // c does not contain d.
- if got, want := c.contains(d), false; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // Add d to the cache.
- c.Add(d)
-
- // Size is now 1.
- if got, want := c.Size(), uint64(1); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // c contains d.
- if got, want := c.contains(d), true; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // Add maxSize-1 more elements. d should be oldest element.
- for i := 0; i < maxSize-1; i++ {
- c.Add(NewNegativeDirent(""))
- }
-
- // Size is maxSize.
- if got, want := c.Size(), uint64(maxSize); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // c contains d.
- if got, want := c.contains(d), true; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // "Bump" d to the front by re-adding it.
- c.Add(d)
-
- // Size is maxSize.
- if got, want := c.Size(), uint64(maxSize); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // c contains d.
- if got, want := c.contains(d), true; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // Add maxSize-1 more elements. d should again be oldest element.
- for i := 0; i < maxSize-1; i++ {
- c.Add(NewNegativeDirent(""))
- }
-
- // Size is maxSize.
- if got, want := c.Size(), uint64(maxSize); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // c contains d.
- if got, want := c.contains(d), true; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // Add one more element, which will bump d from the cache.
- c.Add(NewNegativeDirent(""))
-
- // Size is maxSize.
- if got, want := c.Size(), uint64(maxSize); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // c does not contain d.
- if got, want := c.contains(d), false; got != want {
- t.Errorf("c.contains(d) got %v want %v", got, want)
- }
-
- // Invalidating causes size to be 0 and list to be empty.
- c.Invalidate()
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
- if got, want := c.list.Empty(), true; got != want {
- t.Errorf("c.list.Empty() got %v, want %v", got, want)
- }
-
- // Fill cache with maxSize dirents.
- for i := 0; i < maxSize; i++ {
- c.Add(NewNegativeDirent(""))
- }
-}
-
-func TestDirentCacheLimiter(t *testing.T) {
- const (
- globalMaxSize = 5
- maxSize = 3
- )
-
- limit := NewDirentCacheLimiter(globalMaxSize)
- c1 := NewDirentCache(maxSize)
- c1.limit = limit
- c2 := NewDirentCache(maxSize)
- c2.limit = limit
-
- // Create a Dirent d.
- d := NewNegativeDirent("")
-
- // Add d to the cache.
- c1.Add(d)
- if got, want := c1.Size(), uint64(1); got != want {
- t.Errorf("c1.Size() got %v, want %v", got, want)
- }
-
- // Add maxSize-1 more elements. d should be oldest element.
- for i := 0; i < maxSize-1; i++ {
- c1.Add(NewNegativeDirent(""))
- }
- if got, want := c1.Size(), uint64(maxSize); got != want {
- t.Errorf("c1.Size() got %v, want %v", got, want)
- }
-
- // Check that d is still there.
- if got, want := c1.contains(d), true; got != want {
- t.Errorf("c1.contains(d) got %v want %v", got, want)
- }
-
- // Fill up the other cache, it will start dropping old entries from the cache
- // when the global limit is reached.
- for i := 0; i < maxSize; i++ {
- c2.Add(NewNegativeDirent(""))
- }
-
- // Check is what's remaining from global max.
- if got, want := c2.Size(), globalMaxSize-maxSize; int(got) != want {
- t.Errorf("c2.Size() got %v, want %v", got, want)
- }
-
- // Check that d was not dropped.
- if got, want := c1.contains(d), true; got != want {
- t.Errorf("c1.contains(d) got %v want %v", got, want)
- }
-
- // Add an entry that will eventually be dropped. Check is done later...
- drop := NewNegativeDirent("")
- c1.Add(drop)
-
- // Check that d is bumped to front even when global limit is reached.
- c1.Add(d)
- if got, want := c1.contains(d), true; got != want {
- t.Errorf("c1.contains(d) got %v want %v", got, want)
- }
-
- // Add 2 more element and check that:
- // - d is still in the list: to verify that d was bumped
- // - d2/d3 are in the list: older entries are dropped when global limit is
- // reached.
- // - drop is not in the list: indeed older elements are dropped.
- d2 := NewNegativeDirent("")
- c1.Add(d2)
- d3 := NewNegativeDirent("")
- c1.Add(d3)
- if got, want := c1.contains(d), true; got != want {
- t.Errorf("c1.contains(d) got %v want %v", got, want)
- }
- if got, want := c1.contains(d2), true; got != want {
- t.Errorf("c1.contains(d2) got %v want %v", got, want)
- }
- if got, want := c1.contains(d3), true; got != want {
- t.Errorf("c1.contains(d3) got %v want %v", got, want)
- }
- if got, want := c1.contains(drop), false; got != want {
- t.Errorf("c1.contains(drop) got %v want %v", got, want)
- }
-
- // Drop all entries from one cache. The other will be allowed to grow.
- c1.Invalidate()
- c2.Add(NewNegativeDirent(""))
- if got, want := c2.Size(), uint64(maxSize); got != want {
- t.Errorf("c2.Size() got %v, want %v", got, want)
- }
-}
-
-// TestNilDirentCache tests that a nil cache supports all cache operations, but
-// treats them as noop.
-func TestNilDirentCache(t *testing.T) {
- // Create a nil cache.
- var c *DirentCache
-
- // Size is zero.
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // Call Add.
- c.Add(NewNegativeDirent(""))
-
- // Size is zero.
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // Call Remove.
- c.Remove(NewNegativeDirent(""))
-
- // Size is zero.
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-
- // Call Invalidate.
- c.Invalidate()
-
- // Size is zero.
- if got, want := c.Size(), uint64(0); got != want {
- t.Errorf("c.Size() got %v, want %v", got, want)
- }
-}
diff --git a/pkg/sentry/fs/dirent_list.go b/pkg/sentry/fs/dirent_list.go
new file mode 100644
index 000000000..00dd39146
--- /dev/null
+++ b/pkg/sentry/fs/dirent_list.go
@@ -0,0 +1,221 @@
+package fs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type direntElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (direntElementMapper) linkerFor(elem *Dirent) *Dirent { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type direntList struct {
+ head *Dirent
+ tail *Dirent
+}
+
+// Reset resets list l to the empty state.
+func (l *direntList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *direntList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *direntList) Front() *Dirent {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *direntList) Back() *Dirent {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *direntList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (direntElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *direntList) PushFront(e *Dirent) {
+ linker := direntElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ direntElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *direntList) PushBack(e *Dirent) {
+ linker := direntElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ direntElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *direntList) PushBackList(m *direntList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ direntElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ direntElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *direntList) InsertAfter(b, e *Dirent) {
+ bLinker := direntElementMapper{}.linkerFor(b)
+ eLinker := direntElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ direntElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *direntList) InsertBefore(a, e *Dirent) {
+ aLinker := direntElementMapper{}.linkerFor(a)
+ eLinker := direntElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ direntElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *direntList) Remove(e *Dirent) {
+ linker := direntElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ direntElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ direntElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type direntEntry struct {
+ next *Dirent
+ prev *Dirent
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *direntEntry) Next() *Dirent {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *direntEntry) Prev() *Dirent {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *direntEntry) SetNext(elem *Dirent) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *direntEntry) SetPrev(elem *Dirent) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fs/dirent_refs_test.go b/pkg/sentry/fs/dirent_refs_test.go
deleted file mode 100644
index e2b66f357..000000000
--- a/pkg/sentry/fs/dirent_refs_test.go
+++ /dev/null
@@ -1,418 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
-)
-
-func newMockDirInode(ctx context.Context, cache *DirentCache) *Inode {
- return NewMockInode(ctx, NewMockMountSource(cache), StableAttr{Type: Directory})
-}
-
-func TestWalkPositive(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- ctx := contexttest.Context(t)
- root := NewDirent(ctx, newMockDirInode(ctx, nil), "root")
-
- if got := root.ReadRefs(); got != 1 {
- t.Fatalf("root has a ref count of %d, want %d", got, 1)
- }
-
- name := "d"
- d, err := root.walk(ctx, root, name, false)
- if err != nil {
- t.Fatalf("root.walk(root, %q) got %v, want nil", name, err)
- }
-
- if got := root.ReadRefs(); got != 2 {
- t.Fatalf("root has a ref count of %d, want %d", got, 2)
- }
-
- if got := d.ReadRefs(); got != 1 {
- t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, 1)
- }
-
- d.DecRef(ctx)
-
- if got := root.ReadRefs(); got != 1 {
- t.Fatalf("root has a ref count of %d, want %d", got, 1)
- }
-
- if got := d.ReadRefs(); got != 0 {
- t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, 0)
- }
-
- root.flush(ctx)
-
- if got := len(root.children); got != 0 {
- t.Fatalf("root has %d children, want %d", got, 0)
- }
-}
-
-func TestWalkNegative(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- ctx := contexttest.Context(t)
- root := NewDirent(ctx, NewEmptyDir(ctx, nil), "root")
- mn := root.Inode.InodeOperations.(*mockInodeOperationsLookupNegative)
-
- if got := root.ReadRefs(); got != 1 {
- t.Fatalf("root has a ref count of %d, want %d", got, 1)
- }
-
- name := "d"
- for i := 0; i < 100; i++ {
- _, err := root.walk(ctx, root, name, false)
- if err != unix.ENOENT {
- t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, unix.ENOENT)
- }
- }
-
- if got := root.ReadRefs(); got != 1 {
- t.Fatalf("root has a ref count of %d, want %d", got, 1)
- }
-
- if got := len(root.children); got != 1 {
- t.Fatalf("root has %d children, want %d", got, 1)
- }
-
- w, ok := root.children[name]
- if !ok {
- t.Fatalf("root wants child at %q", name)
- }
-
- child := w.Get()
- if child == nil {
- t.Fatalf("root wants to resolve weak reference")
- }
-
- if !child.(*Dirent).IsNegative() {
- t.Fatalf("root found positive child at %q, want negative", name)
- }
-
- if got := child.(*Dirent).ReadRefs(); got != 2 {
- t.Fatalf("child has a ref count of %d, want %d", got, 2)
- }
-
- child.DecRef(ctx)
-
- if got := child.(*Dirent).ReadRefs(); got != 1 {
- t.Fatalf("child has a ref count of %d, want %d", got, 1)
- }
-
- if got := len(root.children); got != 1 {
- t.Fatalf("root has %d children, want %d", got, 1)
- }
-
- root.DecRef(ctx)
-
- if got := root.ReadRefs(); got != 0 {
- t.Fatalf("root has a ref count of %d, want %d", got, 0)
- }
-
- AsyncBarrier()
-
- if got := mn.releaseCalled; got != true {
- t.Fatalf("root.Close was called %v, want true", got)
- }
-}
-
-type mockInodeOperationsLookupNegative struct {
- *MockInodeOperations
- releaseCalled bool
-}
-
-func NewEmptyDir(ctx context.Context, cache *DirentCache) *Inode {
- m := NewMockMountSource(cache)
- return NewInode(ctx, &mockInodeOperationsLookupNegative{
- MockInodeOperations: NewMockInodeOperations(ctx),
- }, m, StableAttr{Type: Directory})
-}
-
-func (m *mockInodeOperationsLookupNegative) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) {
- return NewNegativeDirent(p), nil
-}
-
-func (m *mockInodeOperationsLookupNegative) Release(context.Context) {
- m.releaseCalled = true
-}
-
-func TestHashNegativeToPositive(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- ctx := contexttest.Context(t)
- root := NewDirent(ctx, NewEmptyDir(ctx, nil), "root")
-
- name := "d"
- _, err := root.walk(ctx, root, name, false)
- if err != unix.ENOENT {
- t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, unix.ENOENT)
- }
-
- if got := root.exists(ctx, root, name); got != false {
- t.Fatalf("got %q exists, want does not exist", name)
- }
-
- f, err := root.Create(ctx, root, name, FileFlags{}, FilePermissions{})
- if err != nil {
- t.Fatalf("root.Create(%q, _), got error %v, want nil", name, err)
- }
- d := f.Dirent
-
- if d.IsNegative() {
- t.Fatalf("got negative Dirent, want positive")
- }
-
- if got := d.ReadRefs(); got != 1 {
- t.Fatalf("child %q has a ref count of %d, want %d", name, got, 1)
- }
-
- if got := root.ReadRefs(); got != 2 {
- t.Fatalf("root has a ref count of %d, want %d", got, 2)
- }
-
- if got := len(root.children); got != 1 {
- t.Fatalf("got %d children, want %d", got, 1)
- }
-
- w, ok := root.children[name]
- if !ok {
- t.Fatalf("failed to find weak reference to %q", name)
- }
-
- child := w.Get()
- if child == nil {
- t.Fatalf("want to resolve weak reference")
- }
-
- if child.(*Dirent) != d {
- t.Fatalf("got foreign child")
- }
-}
-
-func TestRevalidate(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // Whether to make negative Dirents.
- makeNegative bool
- }{
- {
- desc: "Revalidate negative Dirent",
- makeNegative: true,
- },
- {
- desc: "Revalidate positive Dirent",
- makeNegative: false,
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- ctx := contexttest.Context(t)
- root := NewDirent(ctx, NewMockInodeRevalidate(ctx, test.makeNegative), "root")
-
- name := "d"
- d1, err := root.walk(ctx, root, name, false)
- if !test.makeNegative && err != nil {
- t.Fatalf("root.walk(root, %q) got %v, want nil", name, err)
- }
- d2, err := root.walk(ctx, root, name, false)
- if !test.makeNegative && err != nil {
- t.Fatalf("root.walk(root, %q) got %v, want nil", name, err)
- }
- if !test.makeNegative && d1 == d2 {
- t.Fatalf("revalidating walk got same *Dirent, want different")
- }
- if got := len(root.children); got != 1 {
- t.Errorf("revalidating walk got %d children, want %d", got, 1)
- }
- })
- }
-}
-
-type MockInodeOperationsRevalidate struct {
- *MockInodeOperations
- makeNegative bool
-}
-
-func NewMockInodeRevalidate(ctx context.Context, makeNegative bool) *Inode {
- mn := NewMockInodeOperations(ctx)
- m := NewMockMountSource(nil)
- m.MountSourceOperations.(*MockMountSourceOps).revalidate = true
- return NewInode(ctx, &MockInodeOperationsRevalidate{MockInodeOperations: mn, makeNegative: makeNegative}, m, StableAttr{Type: Directory})
-}
-
-func (m *MockInodeOperationsRevalidate) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) {
- if !m.makeNegative {
- return m.MockInodeOperations.Lookup(ctx, dir, p)
- }
- return NewNegativeDirent(p), nil
-}
-
-func TestCreateExtraRefs(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- ctx := contexttest.Context(t)
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // root is the Dirent to create from.
- root *Dirent
-
- // expected references on walked Dirent.
- refs int64
- }{
- {
- desc: "Create caching",
- root: NewDirent(ctx, NewEmptyDir(ctx, NewDirentCache(1)), "root"),
- refs: 2,
- },
- {
- desc: "Create not caching",
- root: NewDirent(ctx, NewEmptyDir(ctx, nil), "root"),
- refs: 1,
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- name := "d"
- f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{})
- if err != nil {
- t.Fatalf("root.Create(root, %q) failed: %v", name, err)
- }
- d := f.Dirent
-
- if got := d.ReadRefs(); got != test.refs {
- t.Errorf("dirent has a ref count of %d, want %d", got, test.refs)
- }
- })
- }
-}
-
-func TestRemoveExtraRefs(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- ctx := contexttest.Context(t)
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // root is the Dirent to make and remove from.
- root *Dirent
- }{
- {
- desc: "Remove caching",
- root: NewDirent(ctx, NewEmptyDir(ctx, NewDirentCache(1)), "root"),
- },
- {
- desc: "Remove not caching",
- root: NewDirent(ctx, NewEmptyDir(ctx, nil), "root"),
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- name := "d"
- f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{})
- if err != nil {
- t.Fatalf("root.Create(%q, _) failed: %v", name, err)
- }
- d := f.Dirent
-
- if err := test.root.Remove(contexttest.Context(t), test.root, name, false /* dirPath */); err != nil {
- t.Fatalf("root.Remove(root, %q) failed: %v", name, err)
- }
-
- if got := d.ReadRefs(); got != 1 {
- t.Fatalf("dirent has a ref count of %d, want %d", got, 1)
- }
-
- d.DecRef(ctx)
-
- test.root.flush(ctx)
-
- if got := len(test.root.children); got != 0 {
- t.Errorf("root has %d children, want %d", got, 0)
- }
- })
- }
-}
-
-func TestRenameExtraRefs(t *testing.T) {
- // refs == 0 -> one reference.
- // refs == -1 -> has been destroyed.
-
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // cache of extra Dirent references, may be nil.
- cache *DirentCache
- }{
- {
- desc: "Rename no caching",
- cache: nil,
- },
- {
- desc: "Rename caching",
- cache: NewDirentCache(5),
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- ctx := contexttest.Context(t)
-
- dirAttr := StableAttr{Type: Directory}
-
- oldParent := NewDirent(ctx, NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "old_parent")
- newParent := NewDirent(ctx, NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "new_parent")
-
- renamed, err := oldParent.Walk(ctx, oldParent, "old_child")
- if err != nil {
- t.Fatalf("Walk(oldParent, %q) got error %v, want nil", "old_child", err)
- }
- replaced, err := newParent.Walk(ctx, oldParent, "new_child")
- if err != nil {
- t.Fatalf("Walk(newParent, %q) got error %v, want nil", "new_child", err)
- }
-
- if err := Rename(contexttest.RootContext(t), oldParent /*root */, oldParent, "old_child", newParent, "new_child"); err != nil {
- t.Fatalf("Rename got error %v, want nil", err)
- }
-
- oldParent.flush(ctx)
- newParent.flush(ctx)
-
- // Expect to have only active references.
- if got := renamed.ReadRefs(); got != 1 {
- t.Errorf("renamed has ref count %d, want only active references %d", got, 1)
- }
- if got := replaced.ReadRefs(); got != 1 {
- t.Errorf("replaced has ref count %d, want only active references %d", got, 1)
- }
- })
- }
-}
diff --git a/pkg/sentry/fs/event_list.go b/pkg/sentry/fs/event_list.go
new file mode 100644
index 000000000..ceb97b703
--- /dev/null
+++ b/pkg/sentry/fs/event_list.go
@@ -0,0 +1,221 @@
+package fs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type eventElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (eventElementMapper) linkerFor(elem *Event) *Event { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type eventList struct {
+ head *Event
+ tail *Event
+}
+
+// Reset resets list l to the empty state.
+func (l *eventList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *eventList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Front() *Event {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Back() *Event {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *eventList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (eventElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *eventList) PushFront(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ eventElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *eventList) PushBack(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *eventList) PushBackList(m *eventList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ eventElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *eventList) InsertAfter(b, e *Event) {
+ bLinker := eventElementMapper{}.linkerFor(b)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ eventElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *eventList) InsertBefore(a, e *Event) {
+ aLinker := eventElementMapper{}.linkerFor(a)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ eventElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *eventList) Remove(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ eventElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ eventElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type eventEntry struct {
+ next *Event
+ prev *Event
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Next() *Event {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Prev() *Event {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetNext(elem *Event) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetPrev(elem *Event) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD
deleted file mode 100644
index 5c889c861..000000000
--- a/pkg/sentry/fs/fdpipe/BUILD
+++ /dev/null
@@ -1,54 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fdpipe",
- srcs = [
- "pipe.go",
- "pipe_opener.go",
- "pipe_state.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/sentry/fs"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/log",
- "//pkg/safemem",
- "//pkg/secio",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fdpipe_test",
- size = "small",
- srcs = [
- "pipe_opener_test.go",
- "pipe_test.go",
- ],
- library = ":fdpipe",
- deps = [
- "//pkg/context",
- "//pkg/errors",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/hostarch",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/syserror",
- "//pkg/usermem",
- "@com_github_google_uuid//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go b/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go
new file mode 100644
index 000000000..92dcd2391
--- /dev/null
+++ b/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go
@@ -0,0 +1,41 @@
+// automatically generated by stateify.
+
+package fdpipe
+
+import (
+ "gvisor.dev/gvisor/pkg/sentry/fs"
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *pipeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/fdpipe.pipeOperations"
+}
+
+func (p *pipeOperations) StateFields() []string {
+ return []string{
+ "flags",
+ "opener",
+ "readAheadBuffer",
+ }
+}
+
+// +checklocksignore
+func (p *pipeOperations) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var flagsValue fs.FileFlags = p.saveFlags()
+ stateSinkObject.SaveValue(0, flagsValue)
+ stateSinkObject.Save(1, &p.opener)
+ stateSinkObject.Save(2, &p.readAheadBuffer)
+}
+
+// +checklocksignore
+func (p *pipeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(1, &p.opener)
+ stateSourceObject.Load(2, &p.readAheadBuffer)
+ stateSourceObject.LoadValue(0, new(fs.FileFlags), func(y interface{}) { p.loadFlags(y.(fs.FileFlags)) })
+ stateSourceObject.AfterLoad(p.afterLoad)
+}
+
+func init() {
+ state.Register((*pipeOperations)(nil))
+}
diff --git a/pkg/sentry/fs/fdpipe/pipe_opener_test.go b/pkg/sentry/fs/fdpipe/pipe_opener_test.go
deleted file mode 100644
index 89d8be741..000000000
--- a/pkg/sentry/fs/fdpipe/pipe_opener_test.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fdpipe
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "path"
- "testing"
- "time"
-
- "github.com/google/uuid"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-type hostOpener struct {
- name string
-}
-
-func (h *hostOpener) NonBlockingOpen(_ context.Context, p fs.PermMask) (*fd.FD, error) {
- var flags int
- switch {
- case p.Read && p.Write:
- flags = unix.O_RDWR
- case p.Write:
- flags = unix.O_WRONLY
- case p.Read:
- flags = unix.O_RDONLY
- default:
- return nil, unix.EINVAL
- }
- f, err := unix.Open(h.name, flags|unix.O_NONBLOCK, 0666)
- if err != nil {
- return nil, err
- }
- return fd.New(f), nil
-}
-
-func pipename() string {
- return fmt.Sprintf(path.Join(os.TempDir(), "test-named-pipe-%s"), uuid.New())
-}
-
-func mkpipe(name string) error {
- return unix.Mknod(name, unix.S_IFIFO|0666, 0)
-}
-
-func TestTryOpen(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // makePipe is true if the test case should create the pipe.
- makePipe bool
-
- // flags are the fs.FileFlags used to open the pipe.
- flags fs.FileFlags
-
- // expectFile is true if a fs.File is expected.
- expectFile bool
-
- // err is the expected error
- err error
- }{
- {
- desc: "FileFlags lacking Read and Write are invalid",
- makePipe: false,
- flags: fs.FileFlags{}, /* bogus */
- expectFile: false,
- err: unix.EINVAL,
- },
- {
- desc: "NonBlocking Read only error returns immediately",
- makePipe: false, /* causes the error */
- flags: fs.FileFlags{Read: true, NonBlocking: true},
- expectFile: false,
- err: unix.ENOENT,
- },
- {
- desc: "NonBlocking Read only success returns immediately",
- makePipe: true,
- flags: fs.FileFlags{Read: true, NonBlocking: true},
- expectFile: true,
- err: nil,
- },
- {
- desc: "NonBlocking Write only error returns immediately",
- makePipe: false, /* causes the error */
- flags: fs.FileFlags{Write: true, NonBlocking: true},
- expectFile: false,
- err: unix.ENOENT,
- },
- {
- desc: "NonBlocking Write only no reader error returns immediately",
- makePipe: true,
- flags: fs.FileFlags{Write: true, NonBlocking: true},
- expectFile: false,
- err: unix.ENXIO,
- },
- {
- desc: "ReadWrite error returns immediately",
- makePipe: false, /* causes the error */
- flags: fs.FileFlags{Read: true, Write: true},
- expectFile: false,
- err: unix.ENOENT,
- },
- {
- desc: "ReadWrite returns immediately",
- makePipe: true,
- flags: fs.FileFlags{Read: true, Write: true},
- expectFile: true,
- err: nil,
- },
- {
- desc: "Blocking Write only returns open error",
- makePipe: false, /* causes the error */
- flags: fs.FileFlags{Write: true},
- expectFile: false,
- err: unix.ENOENT, /* from bogus perms */
- },
- {
- desc: "Blocking Read only returns open error",
- makePipe: false, /* causes the error */
- flags: fs.FileFlags{Read: true},
- expectFile: false,
- err: unix.ENOENT,
- },
- {
- desc: "Blocking Write only returns with syserror.ErrWouldBlock",
- makePipe: true,
- flags: fs.FileFlags{Write: true},
- expectFile: false,
- err: syserror.ErrWouldBlock,
- },
- {
- desc: "Blocking Read only returns with syserror.ErrWouldBlock",
- makePipe: true,
- flags: fs.FileFlags{Read: true},
- expectFile: false,
- err: syserror.ErrWouldBlock,
- },
- } {
- name := pipename()
- if test.makePipe {
- // Create the pipe. We do this per-test case to keep tests independent.
- if err := mkpipe(name); err != nil {
- t.Errorf("%s: failed to make host pipe: %v", test.desc, err)
- continue
- }
- defer unix.Unlink(name)
- }
-
- // Use a host opener to keep things simple.
- opener := &hostOpener{name: name}
-
- pipeOpenState := &pipeOpenState{}
- ctx := contexttest.Context(t)
- pipeOps, err := pipeOpenState.TryOpen(ctx, opener, test.flags)
- if unwrapError(err) != test.err {
- t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
- if pipeOps != nil {
- // Cleanup the state of the pipe, and remove the fd from the
- // fdnotifier. Sadly this needed to maintain the correctness
- // of other tests because the fdnotifier is global.
- pipeOps.Release(ctx)
- }
- continue
- }
- if (pipeOps != nil) != test.expectFile {
- t.Errorf("%s: got non-nil file %v, want %v", test.desc, pipeOps != nil, test.expectFile)
- }
- if pipeOps != nil {
- // Same as above.
- pipeOps.Release(ctx)
- }
- }
-}
-
-func TestPipeOpenUnblocksEventually(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // partnerIsReader is true if the goroutine opening the same pipe as the test case
- // should open the pipe read only. Otherwise write only. This also means that the
- // test case will open the pipe in the opposite way.
- partnerIsReader bool
-
- // partnerIsBlocking is true if the goroutine opening the same pipe as the test case
- // should do so without the O_NONBLOCK flag, otherwise opens the pipe with O_NONBLOCK
- // until ENXIO is not returned.
- partnerIsBlocking bool
- }{
- {
- desc: "Blocking Read with blocking writer partner opens eventually",
- partnerIsReader: false,
- partnerIsBlocking: true,
- },
- {
- desc: "Blocking Write with blocking reader partner opens eventually",
- partnerIsReader: true,
- partnerIsBlocking: true,
- },
- {
- desc: "Blocking Read with non-blocking writer partner opens eventually",
- partnerIsReader: false,
- partnerIsBlocking: false,
- },
- {
- desc: "Blocking Write with non-blocking reader partner opens eventually",
- partnerIsReader: true,
- partnerIsBlocking: false,
- },
- } {
- // Create the pipe. We do this per-test case to keep tests independent.
- name := pipename()
- if err := mkpipe(name); err != nil {
- t.Errorf("%s: failed to make host pipe: %v", test.desc, err)
- continue
- }
- defer unix.Unlink(name)
-
- // Spawn the partner.
- type fderr struct {
- fd int
- err error
- }
- errch := make(chan fderr, 1)
- go func() {
- var flags int
- if test.partnerIsReader {
- flags = unix.O_RDONLY
- } else {
- flags = unix.O_WRONLY
- }
- if test.partnerIsBlocking {
- fd, err := unix.Open(name, flags, 0666)
- errch <- fderr{fd: fd, err: err}
- } else {
- var fd int
- err := error(unix.ENXIO)
- for err == unix.ENXIO {
- fd, err = unix.Open(name, flags|unix.O_NONBLOCK, 0666)
- time.Sleep(1 * time.Second)
- }
- errch <- fderr{fd: fd, err: err}
- }
- }()
-
- // Setup file flags for either a read only or write only open.
- flags := fs.FileFlags{
- Read: !test.partnerIsReader,
- Write: test.partnerIsReader,
- }
-
- // Open the pipe in a blocking way, which should succeed eventually.
- opener := &hostOpener{name: name}
- ctx := contexttest.Context(t)
- pipeOps, err := Open(ctx, opener, flags)
- if pipeOps != nil {
- // Same as TestTryOpen.
- pipeOps.Release(ctx)
- }
-
- // Check that the partner opened the file successfully.
- e := <-errch
- if e.err != nil {
- t.Errorf("%s: partner got error %v, wanted nil", test.desc, e.err)
- continue
- }
- // If so, then close the partner fd to avoid leaking an fd.
- unix.Close(e.fd)
-
- // Check that our blocking open was successful.
- if err != nil {
- t.Errorf("%s: blocking open got error %v, wanted nil", test.desc, err)
- continue
- }
- if pipeOps == nil {
- t.Errorf("%s: blocking open got nil file, wanted non-nil", test.desc)
- continue
- }
- }
-}
-
-func TestCopiedReadAheadBuffer(t *testing.T) {
- // Create the pipe.
- name := pipename()
- if err := mkpipe(name); err != nil {
- t.Fatalf("failed to make host pipe: %v", err)
- }
- defer unix.Unlink(name)
-
- // We're taking advantage of the fact that pipes opened read only always return
- // success, but internally they are not deemed "opened" until we're sure that
- // another writer comes along. This means we can open the same pipe write only
- // with no problems + write to it, given that opener.Open already tried to open
- // the pipe RDONLY and succeeded, which we know happened if TryOpen returns
- // syserror.ErrwouldBlock.
- //
- // This simulates the open(RDONLY) <-> open(WRONLY)+write race we care about, but
- // does not cause our test to be racy (which would be terrible).
- opener := &hostOpener{name: name}
- pipeOpenState := &pipeOpenState{}
- ctx := contexttest.Context(t)
- pipeOps, err := pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true})
- if pipeOps != nil {
- pipeOps.Release(ctx)
- t.Fatalf("open(%s, %o) got file, want nil", name, unix.O_RDONLY)
- }
- if err != syserror.ErrWouldBlock {
- t.Fatalf("open(%s, %o) got error %v, want %v", name, unix.O_RDONLY, err, syserror.ErrWouldBlock)
- }
-
- // Then open the same pipe write only and write some bytes to it. The next
- // time we try to open the pipe read only again via the pipeOpenState, we should
- // succeed and buffer some of the bytes written.
- fd, err := unix.Open(name, unix.O_WRONLY, 0666)
- if err != nil {
- t.Fatalf("open(%s, %o) got error %v, want nil", name, unix.O_WRONLY, err)
- }
- defer unix.Close(fd)
-
- data := []byte("hello")
- if n, err := unix.Write(fd, data); n != len(data) || err != nil {
- t.Fatalf("write(%v) got (%d, %v), want (%d, nil)", data, n, err, len(data))
- }
-
- // Try the read again, knowing that it should succeed this time.
- pipeOps, err = pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true})
- if pipeOps == nil {
- t.Fatalf("open(%s, %o) got nil file, want not nil", name, unix.O_RDONLY)
- }
- defer pipeOps.Release(ctx)
-
- if err != nil {
- t.Fatalf("open(%s, %o) got error %v, want nil", name, unix.O_RDONLY, err)
- }
-
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{
- Type: fs.Pipe,
- })
- file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, pipeOps)
-
- // Check that the file we opened points to a pipe with a non-empty read ahead buffer.
- bufsize := len(pipeOps.readAheadBuffer)
- if bufsize != 1 {
- t.Fatalf("read ahead buffer got %d bytes, want %d", bufsize, 1)
- }
-
- // Now for the final test, try to read everything in, expecting to get back all of
- // the bytes that were written at once. Note that in the wild there is no atomic
- // read size so expecting to get all bytes from a single writer when there are
- // multiple readers is a bad expectation.
- buf := make([]byte, len(data))
- ioseq := usermem.BytesIOSequence(buf)
- n, err := pipeOps.Read(ctx, file, ioseq, 0)
- if err != nil {
- t.Fatalf("read request got error %v, want nil", err)
- }
- if n != int64(len(data)) {
- t.Fatalf("read request got %d bytes, want %d", n, len(data))
- }
- if !bytes.Equal(buf, data) {
- t.Errorf("read request got bytes [%v], want [%v]", buf, data)
- }
-}
-
-func TestPipeHangup(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // flags control how we open our end of the pipe and must be read
- // only or write only. They also dicate how a coordinating partner
- // fd is opened, which is their inverse (read only -> write only, etc).
- flags fs.FileFlags
-
- // hangupSelf if true causes the test case to close our end of the pipe
- // and causes hangup errors to be asserted on our coordinating partner's
- // fd. If hangupSelf is false, then our partner's fd is closed and the
- // hangup errors are expected on our end of the pipe.
- hangupSelf bool
- }{
- {
- desc: "Read only gets hangup error",
- flags: fs.FileFlags{Read: true},
- },
- {
- desc: "Write only gets hangup error",
- flags: fs.FileFlags{Write: true},
- },
- {
- desc: "Read only generates hangup error",
- flags: fs.FileFlags{Read: true},
- hangupSelf: true,
- },
- {
- desc: "Write only generates hangup error",
- flags: fs.FileFlags{Write: true},
- hangupSelf: true,
- },
- } {
- if test.flags.Read == test.flags.Write {
- t.Errorf("%s: test requires a single reader or writer", test.desc)
- continue
- }
-
- // Create the pipe. We do this per-test case to keep tests independent.
- name := pipename()
- if err := mkpipe(name); err != nil {
- t.Errorf("%s: failed to make host pipe: %v", test.desc, err)
- continue
- }
- defer unix.Unlink(name)
-
- // Fire off a partner routine which tries to open the same pipe blocking,
- // which will synchronize with us. The channel allows us to get back the
- // fd once we expect this partner routine to succeed, so we can manifest
- // hangup events more directly.
- fdchan := make(chan int, 1)
- go func() {
- // Be explicit about the flags to protect the test from
- // misconfiguration.
- var flags int
- if test.flags.Read {
- flags = unix.O_WRONLY
- } else {
- flags = unix.O_RDONLY
- }
- fd, err := unix.Open(name, flags, 0666)
- if err != nil {
- t.Logf("Open(%q, %o, 0666) partner failed: %v", name, flags, err)
- }
- fdchan <- fd
- }()
-
- // Open our end in a blocking way to ensure that we coordinate.
- opener := &hostOpener{name: name}
- ctx := contexttest.Context(t)
- pipeOps, err := Open(ctx, opener, test.flags)
- if err != nil {
- t.Errorf("%s: Open got error %v, want nil", test.desc, err)
- continue
- }
- // Don't defer file.DecRef here because that causes the hangup we're
- // trying to test for.
-
- // Expect the partner routine to have coordinated with us and get back
- // its open fd.
- f := <-fdchan
- if f < 0 {
- t.Errorf("%s: partner routine got fd %d, want > 0", test.desc, f)
- pipeOps.Release(ctx)
- continue
- }
-
- if test.hangupSelf {
- // Hangup self and assert that our partner got the expected hangup
- // error.
- pipeOps.Release(ctx)
-
- if test.flags.Read {
- // Partner is writer.
- assertWriterHungup(t, test.desc, fd.NewReadWriter(f))
- } else {
- // Partner is reader.
- assertReaderHungup(t, test.desc, fd.NewReadWriter(f))
- }
- } else {
- // Hangup our partner and expect us to get the hangup error.
- unix.Close(f)
- defer pipeOps.Release(ctx)
-
- if test.flags.Read {
- assertReaderHungup(t, test.desc, pipeOps.(*pipeOperations).file)
- } else {
- assertWriterHungup(t, test.desc, pipeOps.(*pipeOperations).file)
- }
- }
- }
-}
-
-func assertReaderHungup(t *testing.T, desc string, reader io.Reader) bool {
- // Drain the pipe completely, it might have crap in it, but expect EOF eventually.
- var err error
- for err == nil {
- _, err = reader.Read(make([]byte, 10))
- }
- if err != io.EOF {
- t.Errorf("%s: read from self after hangup got error %v, want %v", desc, err, io.EOF)
- return false
- }
- return true
-}
-
-func assertWriterHungup(t *testing.T, desc string, writer io.Writer) bool {
- if _, err := writer.Write([]byte("hello")); !linuxerr.Equals(linuxerr.EPIPE, unwrapError(err)) {
- t.Errorf("%s: write to self after hangup got error %v, want %v", desc, err, linuxerr.EPIPE)
- return false
- }
- return true
-}
diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go
deleted file mode 100644
index 4c8905a7e..000000000
--- a/pkg/sentry/fs/fdpipe/pipe_test.go
+++ /dev/null
@@ -1,514 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fdpipe
-
-import (
- "bytes"
- "io"
- "os"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/errors"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/fdnotifier"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func singlePipeFD() (int, error) {
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- return -1, err
- }
- unix.Close(fds[1])
- return fds[0], nil
-}
-
-func singleDirFD() (int, error) {
- return unix.Open(os.TempDir(), unix.O_RDONLY, 0666)
-}
-
-func mockPipeDirent(t *testing.T) *fs.Dirent {
- ctx := contexttest.Context(t)
- node := fs.NewMockInodeOperations(ctx)
- node.UAttr = fs.UnstableAttr{
- Perms: fs.FilePermissions{
- User: fs.PermMask{Read: true, Write: true},
- },
- }
- inode := fs.NewInode(ctx, node, fs.NewMockMountSource(nil), fs.StableAttr{
- Type: fs.Pipe,
- BlockSize: hostarch.PageSize,
- })
- return fs.NewDirent(ctx, inode, "")
-}
-
-func TestNewPipe(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // getfd generates the fd to pass to newPipeOperations.
- getfd func() (int, error)
-
- // flags are the fs.FileFlags passed to newPipeOperations.
- flags fs.FileFlags
-
- // readAheadBuffer is the buffer passed to newPipeOperations.
- readAheadBuffer []byte
-
- // err is the expected error.
- err error
- }{
- {
- desc: "Cannot make new pipe from bad fd",
- getfd: func() (int, error) { return -1, nil },
- err: unix.EINVAL,
- },
- {
- desc: "Cannot make new pipe from non-pipe fd",
- getfd: singleDirFD,
- err: unix.EINVAL,
- },
- {
- desc: "Can make new pipe from pipe fd",
- getfd: singlePipeFD,
- flags: fs.FileFlags{Read: true},
- readAheadBuffer: []byte("hello"),
- },
- } {
- gfd, err := test.getfd()
- if err != nil {
- t.Errorf("%s: getfd got (%d, %v), want (fd, nil)", test.desc, gfd, err)
- continue
- }
- f := fd.New(gfd)
-
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, test.flags, f, test.readAheadBuffer)
- if p != nil {
- // This is necessary to remove the fd from the global fd notifier.
- defer p.Release(ctx)
- } else {
- // If there is no p to DecRef on, because newPipeOperations failed, then the
- // file still needs to be closed.
- defer f.Close()
- }
-
- if err != test.err {
- t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
- continue
- }
- // Check the state of the pipe given that it was successfully opened.
- if err == nil {
- if p == nil {
- t.Errorf("%s: got nil pipe and nil error, want (pipe, nil)", test.desc)
- continue
- }
- if flags := p.flags; test.flags != flags {
- t.Errorf("%s: got file flags %v, want %v", test.desc, flags, test.flags)
- continue
- }
- if len(test.readAheadBuffer) != len(p.readAheadBuffer) {
- t.Errorf("%s: got read ahead buffer length %d, want %d", test.desc, len(p.readAheadBuffer), len(test.readAheadBuffer))
- continue
- }
- fileFlags, _, errno := unix.Syscall(unix.SYS_FCNTL, uintptr(p.file.FD()), unix.F_GETFL, 0)
- if errno != 0 {
- t.Errorf("%s: failed to get file flags for fd %d, got %v, want 0", test.desc, p.file.FD(), errno)
- continue
- }
- if fileFlags&unix.O_NONBLOCK == 0 {
- t.Errorf("%s: pipe is blocking, expected non-blocking", test.desc)
- continue
- }
- if !fdnotifier.HasFD(int32(f.FD())) {
- t.Errorf("%s: pipe fd %d is not registered for events", test.desc, f.FD())
- }
- }
- }
-}
-
-func TestPipeDestruction(t *testing.T) {
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- t.Fatalf("failed to create pipes: got %v, want nil", err)
- }
- f := fd.New(fds[0])
-
- // We don't care about the other end, just use the read end.
- unix.Close(fds[1])
-
- // Test the read end, but it doesn't really matter which.
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, f, nil)
- if err != nil {
- f.Close()
- t.Fatalf("newPipeOperations got error %v, want nil", err)
- }
- // Drop our only reference, which should trigger the destructor.
- p.Release(ctx)
-
- if fdnotifier.HasFD(int32(fds[0])) {
- t.Fatalf("after DecRef fdnotifier has fd %d, want no longer registered", fds[0])
- }
- if p.file != nil {
- t.Errorf("after DecRef got file, want nil")
- }
-}
-
-type Seek struct{}
-
-type ReadDir struct{}
-
-type Writev struct {
- Src usermem.IOSequence
-}
-
-type Readv struct {
- Dst usermem.IOSequence
-}
-
-type Fsync struct{}
-
-func TestPipeRequest(t *testing.T) {
- for _, test := range []struct {
- // desc is the test's description.
- desc string
-
- // request to execute.
- context interface{}
-
- // flags determines whether to use the read or write end
- // of the pipe, for this test it can only be Read or Write.
- flags fs.FileFlags
-
- // keepOpenPartner if false closes the other end of the pipe,
- // otherwise this is delayed until the end of the test.
- keepOpenPartner bool
-
- // expected error
- err error
- }{
- {
- desc: "ReadDir on pipe returns ENOTDIR",
- context: &ReadDir{},
- err: linuxerr.ENOTDIR,
- },
- {
- desc: "Fsync on pipe returns EINVAL",
- context: &Fsync{},
- err: linuxerr.EINVAL,
- },
- {
- desc: "Seek on pipe returns ESPIPE",
- context: &Seek{},
- err: linuxerr.ESPIPE,
- },
- {
- desc: "Readv on pipe from empty buffer returns nil",
- context: &Readv{Dst: usermem.BytesIOSequence(nil)},
- flags: fs.FileFlags{Read: true},
- },
- {
- desc: "Readv on pipe from non-empty buffer and closed partner returns EOF",
- context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))},
- flags: fs.FileFlags{Read: true},
- err: io.EOF,
- },
- {
- desc: "Readv on pipe from non-empty buffer and open partner returns EWOULDBLOCK",
- context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))},
- flags: fs.FileFlags{Read: true},
- keepOpenPartner: true,
- err: syserror.ErrWouldBlock,
- },
- {
- desc: "Writev on pipe from empty buffer returns nil",
- context: &Writev{Src: usermem.BytesIOSequence(nil)},
- flags: fs.FileFlags{Write: true},
- },
- {
- desc: "Writev on pipe from non-empty buffer and closed partner returns EPIPE",
- context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))},
- flags: fs.FileFlags{Write: true},
- err: linuxerr.EPIPE,
- },
- {
- desc: "Writev on pipe from non-empty buffer and open partner succeeds",
- context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))},
- flags: fs.FileFlags{Write: true},
- keepOpenPartner: true,
- },
- } {
- if test.flags.Read && test.flags.Write {
- panic("both read and write not supported for this test")
- }
-
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- t.Errorf("%s: failed to create pipes: got %v, want nil", test.desc, err)
- continue
- }
-
- // Configure the fd and partner fd based on the file flags.
- testFd, partnerFd := fds[0], fds[1]
- if test.flags.Write {
- testFd, partnerFd = fds[1], fds[0]
- }
-
- // Configure closing the fds.
- if test.keepOpenPartner {
- defer unix.Close(partnerFd)
- } else {
- unix.Close(partnerFd)
- }
-
- // Create the pipe.
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, test.flags, fd.New(testFd), nil)
- if err != nil {
- t.Fatalf("%s: newPipeOperations got error %v, want nil", test.desc, err)
- }
- defer p.Release(ctx)
-
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Pipe})
- file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p)
-
- // Issue request via the appropriate function.
- switch c := test.context.(type) {
- case *Seek:
- _, err = p.Seek(ctx, file, 0, 0)
- case *ReadDir:
- _, err = p.Readdir(ctx, file, nil)
- case *Readv:
- _, err = p.Read(ctx, file, c.Dst, 0)
- case *Writev:
- _, err = p.Write(ctx, file, c.Src, 0)
- case *Fsync:
- err = p.Fsync(ctx, file, 0, fs.FileMaxOffset, fs.SyncAll)
- default:
- t.Errorf("%s: unknown request type %T", test.desc, test.context)
- }
-
- if linuxErr, ok := test.err.(*errors.Error); ok {
- if !linuxerr.Equals(linuxErr, unwrapError(err)) {
- t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
- }
- } else if test.err != unwrapError(err) {
- t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
- }
- }
-}
-
-func TestPipeReadAheadBuffer(t *testing.T) {
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- t.Fatalf("failed to create pipes: got %v, want nil", err)
- }
- rfile := fd.New(fds[0])
-
- // Eventually close the write end, which is not wrapped in a pipe object.
- defer unix.Close(fds[1])
-
- // Write some bytes to this end.
- data := []byte("world")
- if n, err := unix.Write(fds[1], data); n != len(data) || err != nil {
- rfile.Close()
- t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data))
- }
- // Close the write end immediately, we don't care about it.
-
- buffered := []byte("hello ")
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, buffered)
- if err != nil {
- rfile.Close()
- t.Fatalf("newPipeOperations got error %v, want nil", err)
- }
- defer p.Release(ctx)
-
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{
- Type: fs.Pipe,
- })
- file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p)
-
- // In total we expect to read data + buffered.
- total := append(buffered, data...)
-
- buf := make([]byte, len(total))
- iov := usermem.BytesIOSequence(buf)
- n, err := p.Read(contexttest.Context(t), file, iov, 0)
- if err != nil {
- t.Fatalf("read request got error %v, want nil", err)
- }
- if n != int64(len(total)) {
- t.Fatalf("read request got %d bytes, want %d", n, len(total))
- }
- if !bytes.Equal(buf, total) {
- t.Errorf("read request got bytes [%v], want [%v]", buf, total)
- }
-}
-
-// This is very important for pipes in general because they can return
-// EWOULDBLOCK and for those that block they must continue until they have read
-// all of the data (and report it as such).
-func TestPipeReadsAccumulate(t *testing.T) {
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- t.Fatalf("failed to create pipes: got %v, want nil", err)
- }
- rfile := fd.New(fds[0])
-
- // Eventually close the write end, it doesn't depend on a pipe object.
- defer unix.Close(fds[1])
-
- // Get a new read only pipe reference.
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, nil)
- if err != nil {
- rfile.Close()
- t.Fatalf("newPipeOperations got error %v, want nil", err)
- }
- // Don't forget to remove the fd from the fd notifier. Otherwise other tests will
- // likely be borked, because it's global :(
- defer p.Release(ctx)
-
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{
- Type: fs.Pipe,
- })
- file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p)
-
- // Write some some bytes to the pipe.
- data := []byte("some message")
- if n, err := unix.Write(fds[1], data); n != len(data) || err != nil {
- t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data))
- }
-
- // Construct a segment vec that is a bit more than we have written so we
- // trigger an EWOULDBLOCK.
- wantBytes := len(data) + 1
- readBuffer := make([]byte, wantBytes)
- iov := usermem.BytesIOSequence(readBuffer)
- n, err := p.Read(ctx, file, iov, 0)
- total := n
- iov = iov.DropFirst64(n)
- if err != syserror.ErrWouldBlock {
- t.Fatalf("Readv got error %v, want %v", err, syserror.ErrWouldBlock)
- }
-
- // Write a few more bytes to allow us to read more/accumulate.
- extra := []byte("extra")
- if n, err := unix.Write(fds[1], extra); n != len(extra) || err != nil {
- t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(extra))
- }
-
- // This time, using the same request, we should not block.
- n, err = p.Read(ctx, file, iov, 0)
- total += n
- if err != nil {
- t.Fatalf("Readv got error %v, want nil", err)
- }
-
- // Assert that the result we got back is cumulative.
- if total != int64(wantBytes) {
- t.Fatalf("Readv sequence got %d bytes, want %d", total, wantBytes)
- }
-
- if want := append(data, extra[0]); !bytes.Equal(readBuffer, want) {
- t.Errorf("Readv sequence got %v, want %v", readBuffer, want)
- }
-}
-
-// Same as TestReadsAccumulate.
-func TestPipeWritesAccumulate(t *testing.T) {
- fds := make([]int, 2)
- if err := unix.Pipe(fds); err != nil {
- t.Fatalf("failed to create pipes: got %v, want nil", err)
- }
- wfile := fd.New(fds[1])
-
- // Eventually close the read end, it doesn't depend on a pipe object.
- defer unix.Close(fds[0])
-
- // Get a new write only pipe reference.
- ctx := contexttest.Context(t)
- p, err := newPipeOperations(ctx, nil, fs.FileFlags{Write: true}, wfile, nil)
- if err != nil {
- wfile.Close()
- t.Fatalf("newPipeOperations got error %v, want nil", err)
- }
- // Don't forget to remove the fd from the fd notifier. Otherwise other tests
- // will likely be borked, because it's global :(
- defer p.Release(ctx)
-
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{
- Type: fs.Pipe,
- })
- file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p)
-
- pipeSize, _, errno := unix.Syscall(unix.SYS_FCNTL, uintptr(wfile.FD()), unix.F_GETPIPE_SZ, 0)
- if errno != 0 {
- t.Fatalf("fcntl(F_GETPIPE_SZ) failed: %v", errno)
- }
- t.Logf("Pipe buffer size: %d", pipeSize)
-
- // Construct a segment vec that is larger than the pipe size to trigger an
- // EWOULDBLOCK.
- wantBytes := int(pipeSize) * 2
- writeBuffer := make([]byte, wantBytes)
- for i := 0; i < wantBytes; i++ {
- writeBuffer[i] = 'a'
- }
- iov := usermem.BytesIOSequence(writeBuffer)
- n, err := p.Write(ctx, file, iov, 0)
- if err != syserror.ErrWouldBlock {
- t.Fatalf("Writev got error %v, want %v", err, syserror.ErrWouldBlock)
- }
- if n != int64(pipeSize) {
- t.Fatalf("Writev partial write, got: %v, want %v", n, pipeSize)
- }
- total := n
- iov = iov.DropFirst64(n)
-
- // Read the entire pipe buf size to make space for the second half.
- readBuffer := make([]byte, n)
- if n, err := unix.Read(fds[0], readBuffer); n != len(readBuffer) || err != nil {
- t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(readBuffer))
- }
- if !bytes.Equal(readBuffer, writeBuffer[:len(readBuffer)]) {
- t.Fatalf("wrong data read from pipe, got: %v, want: %v", readBuffer, writeBuffer)
- }
-
- // This time we should not block.
- n, err = p.Write(ctx, file, iov, 0)
- if err != nil {
- t.Fatalf("Writev got error %v, want nil", err)
- }
- if n != int64(pipeSize) {
- t.Fatalf("Writev partial write, got: %v, want %v", n, pipeSize)
- }
- total += n
-
- // Assert that the result we got back is cumulative.
- if total != int64(wantBytes) {
- t.Fatalf("Writev sequence got %d bytes, want %d", total, wantBytes)
- }
-}
diff --git a/pkg/sentry/fs/file_overlay_test.go b/pkg/sentry/fs/file_overlay_test.go
deleted file mode 100644
index 1971cc680..000000000
--- a/pkg/sentry/fs/file_overlay_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs_test
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
-)
-
-func TestReaddir(t *testing.T) {
- ctx := contexttest.Context(t)
- ctx = &rootContext{
- Context: ctx,
- root: fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root"),
- }
- for _, test := range []struct {
- // Test description.
- desc string
-
- // Lookup parameters.
- dir *fs.Inode
-
- // Want from lookup.
- err error
- names []string
- }{
- {
- desc: "no upper, lower has entries",
- dir: fs.NewTestOverlayDir(ctx,
- nil, /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {name: "a"},
- {name: "b"},
- }, nil), /* lower */
- false /* revalidate */),
- names: []string{".", "..", "a", "b"},
- },
- {
- desc: "upper has entries, no lower",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {name: "a"},
- {name: "b"},
- }, nil), /* upper */
- nil, /* lower */
- false /* revalidate */),
- names: []string{".", "..", "a", "b"},
- },
- {
- desc: "upper and lower, entries combine",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {name: "a"},
- }, nil), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {name: "b"},
- }, nil), /* lower */
- false /* revalidate */),
- names: []string{".", "..", "a", "b"},
- },
- {
- desc: "upper and lower, entries combine, none are masked",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {name: "a"},
- }, []string{"b"}), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {name: "c"},
- }, nil), /* lower */
- false /* revalidate */),
- names: []string{".", "..", "a", "c"},
- },
- {
- desc: "upper and lower, entries combine, upper masks some of lower",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {name: "a"},
- }, []string{"b"}), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {name: "b"}, /* will be masked */
- {name: "c"},
- }, nil), /* lower */
- false /* revalidate */),
- names: []string{".", "..", "a", "c"},
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- openDir, err := test.dir.GetFile(ctx, fs.NewDirent(ctx, test.dir, "stub"), fs.FileFlags{Read: true})
- if err != nil {
- t.Fatalf("GetFile got error %v, want nil", err)
- }
- stubSerializer := &fs.CollectEntriesSerializer{}
- err = openDir.Readdir(ctx, stubSerializer)
- if err != test.err {
- t.Fatalf("Readdir got error %v, want nil", err)
- }
- if err != nil {
- return
- }
- if !reflect.DeepEqual(stubSerializer.Order, test.names) {
- t.Errorf("Readdir got names %v, want %v", stubSerializer.Order, test.names)
- }
- })
- }
-}
-
-func TestReaddirRevalidation(t *testing.T) {
- ctx := contexttest.Context(t)
- ctx = &rootContext{
- Context: ctx,
- root: fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root"),
- }
-
- // Create an overlay with two directories, each with one file.
- upper := newTestRamfsDir(ctx, []dirContent{{name: "a"}}, nil)
- lower := newTestRamfsDir(ctx, []dirContent{{name: "b"}}, nil)
- overlay := fs.NewTestOverlayDir(ctx, upper, lower, true /* revalidate */)
-
- // Get a handle to the dirent in the upper filesystem so that we can
- // modify it without going through the dirent.
- upperDir := upper.InodeOperations.(*dir).InodeOperations.(*ramfs.Dir)
-
- // Check that overlay returns the files from both upper and lower.
- openDir, err := overlay.GetFile(ctx, fs.NewDirent(ctx, overlay, "stub"), fs.FileFlags{Read: true})
- if err != nil {
- t.Fatalf("GetFile got error %v, want nil", err)
- }
- ser := &fs.CollectEntriesSerializer{}
- if err := openDir.Readdir(ctx, ser); err != nil {
- t.Fatalf("Readdir got error %v, want nil", err)
- }
- got, want := ser.Order, []string{".", "..", "a", "b"}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Readdir got names %v, want %v", got, want)
- }
-
- // Remove "a" from the upper and add "c".
- if err := upperDir.Remove(ctx, upper, "a"); err != nil {
- t.Fatalf("error removing child: %v", err)
- }
- upperDir.AddChild(ctx, "c", fs.NewInode(ctx, fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermissions{}, 0),
- upper.MountSource, fs.StableAttr{Type: fs.RegularFile}))
-
- // Seek to beginning of the directory and do the readdir again.
- if _, err := openDir.Seek(ctx, fs.SeekSet, 0); err != nil {
- t.Fatalf("error seeking to beginning of dir: %v", err)
- }
- ser = &fs.CollectEntriesSerializer{}
- if err := openDir.Readdir(ctx, ser); err != nil {
- t.Fatalf("Readdir got error %v, want nil", err)
- }
-
- // Readdir should return the updated children.
- got, want = ser.Order, []string{".", "..", "b", "c"}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Readdir got names %v, want %v", got, want)
- }
-}
-
-type rootContext struct {
- context.Context
- root *fs.Dirent
-}
-
-// Value implements context.Context.
-func (r *rootContext) Value(key interface{}) interface{} {
- switch key {
- case fs.CtxRoot:
- r.root.IncRef()
- return r.root
- default:
- return r.Context.Value(key)
- }
-}
diff --git a/pkg/sentry/fs/filetest/BUILD b/pkg/sentry/fs/filetest/BUILD
deleted file mode 100644
index a8000e010..000000000
--- a/pkg/sentry/fs/filetest/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "filetest",
- testonly = 1,
- srcs = ["filetest.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fs/filetest/filetest.go b/pkg/sentry/fs/filetest/filetest.go
deleted file mode 100644
index ec3d3f96c..000000000
--- a/pkg/sentry/fs/filetest/filetest.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package filetest provides a test implementation of an fs.File.
-package filetest
-
-import (
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/anon"
- "gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// TestFileOperations is an implementation of the File interface. It provides all
-// required methods.
-type TestFileOperations struct {
- fsutil.FileNoopRelease `state:"nosave"`
- fsutil.FilePipeSeek `state:"nosave"`
- fsutil.FileNotDirReaddir `state:"nosave"`
- fsutil.FileNoFsync `state:"nosave"`
- fsutil.FileNoopFlush `state:"nosave"`
- fsutil.FileNoMMap `state:"nosave"`
- fsutil.FileNoIoctl `state:"nosave"`
- fsutil.FileNoSplice `state:"nosave"`
- fsutil.FileUseInodeUnstableAttr `state:"nosave"`
- waiter.AlwaysReady `state:"nosave"`
-}
-
-// NewTestFile creates and initializes a new test file.
-func NewTestFile(tb testing.TB) *fs.File {
- ctx := contexttest.Context(tb)
- dirent := fs.NewDirent(ctx, anon.NewInode(ctx), "test")
- return fs.NewFile(ctx, dirent, fs.FileFlags{}, &TestFileOperations{})
-}
-
-// Read just fails the request.
-func (*TestFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, fmt.Errorf("TestFileOperations.Read not implemented")
-}
-
-// Write just fails the request.
-func (*TestFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, fmt.Errorf("TestFileOperations.Write not implemented")
-}
diff --git a/pkg/sentry/fs/fs_state_autogen.go b/pkg/sentry/fs/fs_state_autogen.go
new file mode 100644
index 000000000..a4a67fca2
--- /dev/null
+++ b/pkg/sentry/fs/fs_state_autogen.go
@@ -0,0 +1,1207 @@
+// automatically generated by stateify.
+
+package fs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *StableAttr) StateTypeName() string {
+ return "pkg/sentry/fs.StableAttr"
+}
+
+func (s *StableAttr) StateFields() []string {
+ return []string{
+ "Type",
+ "DeviceID",
+ "InodeID",
+ "BlockSize",
+ "DeviceFileMajor",
+ "DeviceFileMinor",
+ }
+}
+
+func (s *StableAttr) beforeSave() {}
+
+// +checklocksignore
+func (s *StableAttr) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Type)
+ stateSinkObject.Save(1, &s.DeviceID)
+ stateSinkObject.Save(2, &s.InodeID)
+ stateSinkObject.Save(3, &s.BlockSize)
+ stateSinkObject.Save(4, &s.DeviceFileMajor)
+ stateSinkObject.Save(5, &s.DeviceFileMinor)
+}
+
+func (s *StableAttr) afterLoad() {}
+
+// +checklocksignore
+func (s *StableAttr) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Type)
+ stateSourceObject.Load(1, &s.DeviceID)
+ stateSourceObject.Load(2, &s.InodeID)
+ stateSourceObject.Load(3, &s.BlockSize)
+ stateSourceObject.Load(4, &s.DeviceFileMajor)
+ stateSourceObject.Load(5, &s.DeviceFileMinor)
+}
+
+func (ua *UnstableAttr) StateTypeName() string {
+ return "pkg/sentry/fs.UnstableAttr"
+}
+
+func (ua *UnstableAttr) StateFields() []string {
+ return []string{
+ "Size",
+ "Usage",
+ "Perms",
+ "Owner",
+ "AccessTime",
+ "ModificationTime",
+ "StatusChangeTime",
+ "Links",
+ }
+}
+
+func (ua *UnstableAttr) beforeSave() {}
+
+// +checklocksignore
+func (ua *UnstableAttr) StateSave(stateSinkObject state.Sink) {
+ ua.beforeSave()
+ stateSinkObject.Save(0, &ua.Size)
+ stateSinkObject.Save(1, &ua.Usage)
+ stateSinkObject.Save(2, &ua.Perms)
+ stateSinkObject.Save(3, &ua.Owner)
+ stateSinkObject.Save(4, &ua.AccessTime)
+ stateSinkObject.Save(5, &ua.ModificationTime)
+ stateSinkObject.Save(6, &ua.StatusChangeTime)
+ stateSinkObject.Save(7, &ua.Links)
+}
+
+func (ua *UnstableAttr) afterLoad() {}
+
+// +checklocksignore
+func (ua *UnstableAttr) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ua.Size)
+ stateSourceObject.Load(1, &ua.Usage)
+ stateSourceObject.Load(2, &ua.Perms)
+ stateSourceObject.Load(3, &ua.Owner)
+ stateSourceObject.Load(4, &ua.AccessTime)
+ stateSourceObject.Load(5, &ua.ModificationTime)
+ stateSourceObject.Load(6, &ua.StatusChangeTime)
+ stateSourceObject.Load(7, &ua.Links)
+}
+
+func (a *AttrMask) StateTypeName() string {
+ return "pkg/sentry/fs.AttrMask"
+}
+
+func (a *AttrMask) StateFields() []string {
+ return []string{
+ "Type",
+ "DeviceID",
+ "InodeID",
+ "BlockSize",
+ "Size",
+ "Usage",
+ "Perms",
+ "UID",
+ "GID",
+ "AccessTime",
+ "ModificationTime",
+ "StatusChangeTime",
+ "Links",
+ }
+}
+
+func (a *AttrMask) beforeSave() {}
+
+// +checklocksignore
+func (a *AttrMask) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.Type)
+ stateSinkObject.Save(1, &a.DeviceID)
+ stateSinkObject.Save(2, &a.InodeID)
+ stateSinkObject.Save(3, &a.BlockSize)
+ stateSinkObject.Save(4, &a.Size)
+ stateSinkObject.Save(5, &a.Usage)
+ stateSinkObject.Save(6, &a.Perms)
+ stateSinkObject.Save(7, &a.UID)
+ stateSinkObject.Save(8, &a.GID)
+ stateSinkObject.Save(9, &a.AccessTime)
+ stateSinkObject.Save(10, &a.ModificationTime)
+ stateSinkObject.Save(11, &a.StatusChangeTime)
+ stateSinkObject.Save(12, &a.Links)
+}
+
+func (a *AttrMask) afterLoad() {}
+
+// +checklocksignore
+func (a *AttrMask) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.Type)
+ stateSourceObject.Load(1, &a.DeviceID)
+ stateSourceObject.Load(2, &a.InodeID)
+ stateSourceObject.Load(3, &a.BlockSize)
+ stateSourceObject.Load(4, &a.Size)
+ stateSourceObject.Load(5, &a.Usage)
+ stateSourceObject.Load(6, &a.Perms)
+ stateSourceObject.Load(7, &a.UID)
+ stateSourceObject.Load(8, &a.GID)
+ stateSourceObject.Load(9, &a.AccessTime)
+ stateSourceObject.Load(10, &a.ModificationTime)
+ stateSourceObject.Load(11, &a.StatusChangeTime)
+ stateSourceObject.Load(12, &a.Links)
+}
+
+func (p *PermMask) StateTypeName() string {
+ return "pkg/sentry/fs.PermMask"
+}
+
+func (p *PermMask) StateFields() []string {
+ return []string{
+ "Read",
+ "Write",
+ "Execute",
+ }
+}
+
+func (p *PermMask) beforeSave() {}
+
+// +checklocksignore
+func (p *PermMask) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Read)
+ stateSinkObject.Save(1, &p.Write)
+ stateSinkObject.Save(2, &p.Execute)
+}
+
+func (p *PermMask) afterLoad() {}
+
+// +checklocksignore
+func (p *PermMask) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Read)
+ stateSourceObject.Load(1, &p.Write)
+ stateSourceObject.Load(2, &p.Execute)
+}
+
+func (f *FilePermissions) StateTypeName() string {
+ return "pkg/sentry/fs.FilePermissions"
+}
+
+func (f *FilePermissions) StateFields() []string {
+ return []string{
+ "User",
+ "Group",
+ "Other",
+ "Sticky",
+ "SetUID",
+ "SetGID",
+ }
+}
+
+func (f *FilePermissions) beforeSave() {}
+
+// +checklocksignore
+func (f *FilePermissions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.User)
+ stateSinkObject.Save(1, &f.Group)
+ stateSinkObject.Save(2, &f.Other)
+ stateSinkObject.Save(3, &f.Sticky)
+ stateSinkObject.Save(4, &f.SetUID)
+ stateSinkObject.Save(5, &f.SetGID)
+}
+
+func (f *FilePermissions) afterLoad() {}
+
+// +checklocksignore
+func (f *FilePermissions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.User)
+ stateSourceObject.Load(1, &f.Group)
+ stateSourceObject.Load(2, &f.Other)
+ stateSourceObject.Load(3, &f.Sticky)
+ stateSourceObject.Load(4, &f.SetUID)
+ stateSourceObject.Load(5, &f.SetGID)
+}
+
+func (f *FileOwner) StateTypeName() string {
+ return "pkg/sentry/fs.FileOwner"
+}
+
+func (f *FileOwner) StateFields() []string {
+ return []string{
+ "UID",
+ "GID",
+ }
+}
+
+func (f *FileOwner) beforeSave() {}
+
+// +checklocksignore
+func (f *FileOwner) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.UID)
+ stateSinkObject.Save(1, &f.GID)
+}
+
+func (f *FileOwner) afterLoad() {}
+
+// +checklocksignore
+func (f *FileOwner) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.UID)
+ stateSourceObject.Load(1, &f.GID)
+}
+
+func (d *DentAttr) StateTypeName() string {
+ return "pkg/sentry/fs.DentAttr"
+}
+
+func (d *DentAttr) StateFields() []string {
+ return []string{
+ "Type",
+ "InodeID",
+ }
+}
+
+func (d *DentAttr) beforeSave() {}
+
+// +checklocksignore
+func (d *DentAttr) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Type)
+ stateSinkObject.Save(1, &d.InodeID)
+}
+
+func (d *DentAttr) afterLoad() {}
+
+// +checklocksignore
+func (d *DentAttr) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Type)
+ stateSourceObject.Load(1, &d.InodeID)
+}
+
+func (s *SortedDentryMap) StateTypeName() string {
+ return "pkg/sentry/fs.SortedDentryMap"
+}
+
+func (s *SortedDentryMap) StateFields() []string {
+ return []string{
+ "names",
+ "entries",
+ }
+}
+
+func (s *SortedDentryMap) beforeSave() {}
+
+// +checklocksignore
+func (s *SortedDentryMap) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.names)
+ stateSinkObject.Save(1, &s.entries)
+}
+
+func (s *SortedDentryMap) afterLoad() {}
+
+// +checklocksignore
+func (s *SortedDentryMap) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.names)
+ stateSourceObject.Load(1, &s.entries)
+}
+
+func (d *Dirent) StateTypeName() string {
+ return "pkg/sentry/fs.Dirent"
+}
+
+func (d *Dirent) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "userVisible",
+ "Inode",
+ "name",
+ "parent",
+ "deleted",
+ "mounted",
+ "children",
+ }
+}
+
+// +checklocksignore
+func (d *Dirent) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ var childrenValue map[string]*Dirent = d.saveChildren()
+ stateSinkObject.SaveValue(7, childrenValue)
+ stateSinkObject.Save(0, &d.AtomicRefCount)
+ stateSinkObject.Save(1, &d.userVisible)
+ stateSinkObject.Save(2, &d.Inode)
+ stateSinkObject.Save(3, &d.name)
+ stateSinkObject.Save(4, &d.parent)
+ stateSinkObject.Save(5, &d.deleted)
+ stateSinkObject.Save(6, &d.mounted)
+}
+
+// +checklocksignore
+func (d *Dirent) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.AtomicRefCount)
+ stateSourceObject.Load(1, &d.userVisible)
+ stateSourceObject.Load(2, &d.Inode)
+ stateSourceObject.Load(3, &d.name)
+ stateSourceObject.Load(4, &d.parent)
+ stateSourceObject.Load(5, &d.deleted)
+ stateSourceObject.Load(6, &d.mounted)
+ stateSourceObject.LoadValue(7, new(map[string]*Dirent), func(y interface{}) { d.loadChildren(y.(map[string]*Dirent)) })
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (c *DirentCache) StateTypeName() string {
+ return "pkg/sentry/fs.DirentCache"
+}
+
+func (c *DirentCache) StateFields() []string {
+ return []string{
+ "maxSize",
+ "limit",
+ }
+}
+
+func (c *DirentCache) beforeSave() {}
+
+// +checklocksignore
+func (c *DirentCache) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ if !state.IsZeroValue(&c.currentSize) {
+ state.Failf("currentSize is %#v, expected zero", &c.currentSize)
+ }
+ if !state.IsZeroValue(&c.list) {
+ state.Failf("list is %#v, expected zero", &c.list)
+ }
+ stateSinkObject.Save(0, &c.maxSize)
+ stateSinkObject.Save(1, &c.limit)
+}
+
+func (c *DirentCache) afterLoad() {}
+
+// +checklocksignore
+func (c *DirentCache) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.maxSize)
+ stateSourceObject.Load(1, &c.limit)
+}
+
+func (d *DirentCacheLimiter) StateTypeName() string {
+ return "pkg/sentry/fs.DirentCacheLimiter"
+}
+
+func (d *DirentCacheLimiter) StateFields() []string {
+ return []string{
+ "max",
+ }
+}
+
+func (d *DirentCacheLimiter) beforeSave() {}
+
+// +checklocksignore
+func (d *DirentCacheLimiter) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ if !state.IsZeroValue(&d.count) {
+ state.Failf("count is %#v, expected zero", &d.count)
+ }
+ stateSinkObject.Save(0, &d.max)
+}
+
+func (d *DirentCacheLimiter) afterLoad() {}
+
+// +checklocksignore
+func (d *DirentCacheLimiter) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.max)
+}
+
+func (l *direntList) StateTypeName() string {
+ return "pkg/sentry/fs.direntList"
+}
+
+func (l *direntList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *direntList) beforeSave() {}
+
+// +checklocksignore
+func (l *direntList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *direntList) afterLoad() {}
+
+// +checklocksignore
+func (l *direntList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *direntEntry) StateTypeName() string {
+ return "pkg/sentry/fs.direntEntry"
+}
+
+func (e *direntEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *direntEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *direntEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *direntEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *direntEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (l *eventList) StateTypeName() string {
+ return "pkg/sentry/fs.eventList"
+}
+
+func (l *eventList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *eventList) beforeSave() {}
+
+// +checklocksignore
+func (l *eventList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *eventList) afterLoad() {}
+
+// +checklocksignore
+func (l *eventList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *eventEntry) StateTypeName() string {
+ return "pkg/sentry/fs.eventEntry"
+}
+
+func (e *eventEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *eventEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *eventEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *eventEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *eventEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (f *File) StateTypeName() string {
+ return "pkg/sentry/fs.File"
+}
+
+func (f *File) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "UniqueID",
+ "Dirent",
+ "flags",
+ "async",
+ "FileOperations",
+ "offset",
+ }
+}
+
+// +checklocksignore
+func (f *File) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.AtomicRefCount)
+ stateSinkObject.Save(1, &f.UniqueID)
+ stateSinkObject.Save(2, &f.Dirent)
+ stateSinkObject.Save(3, &f.flags)
+ stateSinkObject.Save(4, &f.async)
+ stateSinkObject.Save(5, &f.FileOperations)
+ stateSinkObject.Save(6, &f.offset)
+}
+
+// +checklocksignore
+func (f *File) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.AtomicRefCount)
+ stateSourceObject.Load(1, &f.UniqueID)
+ stateSourceObject.Load(2, &f.Dirent)
+ stateSourceObject.Load(3, &f.flags)
+ stateSourceObject.Load(4, &f.async)
+ stateSourceObject.LoadWait(5, &f.FileOperations)
+ stateSourceObject.Load(6, &f.offset)
+ stateSourceObject.AfterLoad(f.afterLoad)
+}
+
+func (f *overlayFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs.overlayFileOperations"
+}
+
+func (f *overlayFileOperations) StateFields() []string {
+ return []string{
+ "upper",
+ "lower",
+ "dirCursor",
+ }
+}
+
+func (f *overlayFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (f *overlayFileOperations) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.upper)
+ stateSinkObject.Save(1, &f.lower)
+ stateSinkObject.Save(2, &f.dirCursor)
+}
+
+func (f *overlayFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (f *overlayFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.upper)
+ stateSourceObject.Load(1, &f.lower)
+ stateSourceObject.Load(2, &f.dirCursor)
+}
+
+func (omi *overlayMappingIdentity) StateTypeName() string {
+ return "pkg/sentry/fs.overlayMappingIdentity"
+}
+
+func (omi *overlayMappingIdentity) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "id",
+ "overlayFile",
+ }
+}
+
+func (omi *overlayMappingIdentity) beforeSave() {}
+
+// +checklocksignore
+func (omi *overlayMappingIdentity) StateSave(stateSinkObject state.Sink) {
+ omi.beforeSave()
+ stateSinkObject.Save(0, &omi.AtomicRefCount)
+ stateSinkObject.Save(1, &omi.id)
+ stateSinkObject.Save(2, &omi.overlayFile)
+}
+
+func (omi *overlayMappingIdentity) afterLoad() {}
+
+// +checklocksignore
+func (omi *overlayMappingIdentity) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &omi.AtomicRefCount)
+ stateSourceObject.Load(1, &omi.id)
+ stateSourceObject.Load(2, &omi.overlayFile)
+}
+
+func (m *MountSourceFlags) StateTypeName() string {
+ return "pkg/sentry/fs.MountSourceFlags"
+}
+
+func (m *MountSourceFlags) StateFields() []string {
+ return []string{
+ "ReadOnly",
+ "NoAtime",
+ "ForcePageCache",
+ "NoExec",
+ }
+}
+
+func (m *MountSourceFlags) beforeSave() {}
+
+// +checklocksignore
+func (m *MountSourceFlags) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.ReadOnly)
+ stateSinkObject.Save(1, &m.NoAtime)
+ stateSinkObject.Save(2, &m.ForcePageCache)
+ stateSinkObject.Save(3, &m.NoExec)
+}
+
+func (m *MountSourceFlags) afterLoad() {}
+
+// +checklocksignore
+func (m *MountSourceFlags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.ReadOnly)
+ stateSourceObject.Load(1, &m.NoAtime)
+ stateSourceObject.Load(2, &m.ForcePageCache)
+ stateSourceObject.Load(3, &m.NoExec)
+}
+
+func (f *FileFlags) StateTypeName() string {
+ return "pkg/sentry/fs.FileFlags"
+}
+
+func (f *FileFlags) StateFields() []string {
+ return []string{
+ "Direct",
+ "NonBlocking",
+ "DSync",
+ "Sync",
+ "Append",
+ "Read",
+ "Write",
+ "Pread",
+ "Pwrite",
+ "Directory",
+ "Async",
+ "LargeFile",
+ "NonSeekable",
+ "Truncate",
+ }
+}
+
+func (f *FileFlags) beforeSave() {}
+
+// +checklocksignore
+func (f *FileFlags) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.Direct)
+ stateSinkObject.Save(1, &f.NonBlocking)
+ stateSinkObject.Save(2, &f.DSync)
+ stateSinkObject.Save(3, &f.Sync)
+ stateSinkObject.Save(4, &f.Append)
+ stateSinkObject.Save(5, &f.Read)
+ stateSinkObject.Save(6, &f.Write)
+ stateSinkObject.Save(7, &f.Pread)
+ stateSinkObject.Save(8, &f.Pwrite)
+ stateSinkObject.Save(9, &f.Directory)
+ stateSinkObject.Save(10, &f.Async)
+ stateSinkObject.Save(11, &f.LargeFile)
+ stateSinkObject.Save(12, &f.NonSeekable)
+ stateSinkObject.Save(13, &f.Truncate)
+}
+
+func (f *FileFlags) afterLoad() {}
+
+// +checklocksignore
+func (f *FileFlags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.Direct)
+ stateSourceObject.Load(1, &f.NonBlocking)
+ stateSourceObject.Load(2, &f.DSync)
+ stateSourceObject.Load(3, &f.Sync)
+ stateSourceObject.Load(4, &f.Append)
+ stateSourceObject.Load(5, &f.Read)
+ stateSourceObject.Load(6, &f.Write)
+ stateSourceObject.Load(7, &f.Pread)
+ stateSourceObject.Load(8, &f.Pwrite)
+ stateSourceObject.Load(9, &f.Directory)
+ stateSourceObject.Load(10, &f.Async)
+ stateSourceObject.Load(11, &f.LargeFile)
+ stateSourceObject.Load(12, &f.NonSeekable)
+ stateSourceObject.Load(13, &f.Truncate)
+}
+
+func (i *Inode) StateTypeName() string {
+ return "pkg/sentry/fs.Inode"
+}
+
+func (i *Inode) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "InodeOperations",
+ "StableAttr",
+ "LockCtx",
+ "Watches",
+ "MountSource",
+ "overlay",
+ }
+}
+
+func (i *Inode) beforeSave() {}
+
+// +checklocksignore
+func (i *Inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.AtomicRefCount)
+ stateSinkObject.Save(1, &i.InodeOperations)
+ stateSinkObject.Save(2, &i.StableAttr)
+ stateSinkObject.Save(3, &i.LockCtx)
+ stateSinkObject.Save(4, &i.Watches)
+ stateSinkObject.Save(5, &i.MountSource)
+ stateSinkObject.Save(6, &i.overlay)
+}
+
+func (i *Inode) afterLoad() {}
+
+// +checklocksignore
+func (i *Inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.AtomicRefCount)
+ stateSourceObject.Load(1, &i.InodeOperations)
+ stateSourceObject.Load(2, &i.StableAttr)
+ stateSourceObject.Load(3, &i.LockCtx)
+ stateSourceObject.Load(4, &i.Watches)
+ stateSourceObject.Load(5, &i.MountSource)
+ stateSourceObject.Load(6, &i.overlay)
+}
+
+func (l *LockCtx) StateTypeName() string {
+ return "pkg/sentry/fs.LockCtx"
+}
+
+func (l *LockCtx) StateFields() []string {
+ return []string{
+ "Posix",
+ "BSD",
+ }
+}
+
+func (l *LockCtx) beforeSave() {}
+
+// +checklocksignore
+func (l *LockCtx) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Posix)
+ stateSinkObject.Save(1, &l.BSD)
+}
+
+func (l *LockCtx) afterLoad() {}
+
+// +checklocksignore
+func (l *LockCtx) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Posix)
+ stateSourceObject.Load(1, &l.BSD)
+}
+
+func (w *Watches) StateTypeName() string {
+ return "pkg/sentry/fs.Watches"
+}
+
+func (w *Watches) StateFields() []string {
+ return []string{
+ "ws",
+ "unlinked",
+ }
+}
+
+func (w *Watches) beforeSave() {}
+
+// +checklocksignore
+func (w *Watches) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.ws)
+ stateSinkObject.Save(1, &w.unlinked)
+}
+
+func (w *Watches) afterLoad() {}
+
+// +checklocksignore
+func (w *Watches) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.ws)
+ stateSourceObject.Load(1, &w.unlinked)
+}
+
+func (i *Inotify) StateTypeName() string {
+ return "pkg/sentry/fs.Inotify"
+}
+
+func (i *Inotify) StateFields() []string {
+ return []string{
+ "id",
+ "events",
+ "scratch",
+ "nextWatch",
+ "watches",
+ }
+}
+
+func (i *Inotify) beforeSave() {}
+
+// +checklocksignore
+func (i *Inotify) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.id)
+ stateSinkObject.Save(1, &i.events)
+ stateSinkObject.Save(2, &i.scratch)
+ stateSinkObject.Save(3, &i.nextWatch)
+ stateSinkObject.Save(4, &i.watches)
+}
+
+func (i *Inotify) afterLoad() {}
+
+// +checklocksignore
+func (i *Inotify) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.id)
+ stateSourceObject.Load(1, &i.events)
+ stateSourceObject.Load(2, &i.scratch)
+ stateSourceObject.Load(3, &i.nextWatch)
+ stateSourceObject.Load(4, &i.watches)
+}
+
+func (e *Event) StateTypeName() string {
+ return "pkg/sentry/fs.Event"
+}
+
+func (e *Event) StateFields() []string {
+ return []string{
+ "eventEntry",
+ "wd",
+ "mask",
+ "cookie",
+ "len",
+ "name",
+ }
+}
+
+func (e *Event) beforeSave() {}
+
+// +checklocksignore
+func (e *Event) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.eventEntry)
+ stateSinkObject.Save(1, &e.wd)
+ stateSinkObject.Save(2, &e.mask)
+ stateSinkObject.Save(3, &e.cookie)
+ stateSinkObject.Save(4, &e.len)
+ stateSinkObject.Save(5, &e.name)
+}
+
+func (e *Event) afterLoad() {}
+
+// +checklocksignore
+func (e *Event) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.eventEntry)
+ stateSourceObject.Load(1, &e.wd)
+ stateSourceObject.Load(2, &e.mask)
+ stateSourceObject.Load(3, &e.cookie)
+ stateSourceObject.Load(4, &e.len)
+ stateSourceObject.Load(5, &e.name)
+}
+
+func (w *Watch) StateTypeName() string {
+ return "pkg/sentry/fs.Watch"
+}
+
+func (w *Watch) StateFields() []string {
+ return []string{
+ "owner",
+ "wd",
+ "target",
+ "unpinned",
+ "mask",
+ "pins",
+ }
+}
+
+func (w *Watch) beforeSave() {}
+
+// +checklocksignore
+func (w *Watch) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.owner)
+ stateSinkObject.Save(1, &w.wd)
+ stateSinkObject.Save(2, &w.target)
+ stateSinkObject.Save(3, &w.unpinned)
+ stateSinkObject.Save(4, &w.mask)
+ stateSinkObject.Save(5, &w.pins)
+}
+
+func (w *Watch) afterLoad() {}
+
+// +checklocksignore
+func (w *Watch) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.owner)
+ stateSourceObject.Load(1, &w.wd)
+ stateSourceObject.Load(2, &w.target)
+ stateSourceObject.Load(3, &w.unpinned)
+ stateSourceObject.Load(4, &w.mask)
+ stateSourceObject.Load(5, &w.pins)
+}
+
+func (msrc *MountSource) StateTypeName() string {
+ return "pkg/sentry/fs.MountSource"
+}
+
+func (msrc *MountSource) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "MountSourceOperations",
+ "FilesystemType",
+ "Flags",
+ "fscache",
+ "direntRefs",
+ }
+}
+
+func (msrc *MountSource) beforeSave() {}
+
+// +checklocksignore
+func (msrc *MountSource) StateSave(stateSinkObject state.Sink) {
+ msrc.beforeSave()
+ stateSinkObject.Save(0, &msrc.AtomicRefCount)
+ stateSinkObject.Save(1, &msrc.MountSourceOperations)
+ stateSinkObject.Save(2, &msrc.FilesystemType)
+ stateSinkObject.Save(3, &msrc.Flags)
+ stateSinkObject.Save(4, &msrc.fscache)
+ stateSinkObject.Save(5, &msrc.direntRefs)
+}
+
+func (msrc *MountSource) afterLoad() {}
+
+// +checklocksignore
+func (msrc *MountSource) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &msrc.AtomicRefCount)
+ stateSourceObject.Load(1, &msrc.MountSourceOperations)
+ stateSourceObject.Load(2, &msrc.FilesystemType)
+ stateSourceObject.Load(3, &msrc.Flags)
+ stateSourceObject.Load(4, &msrc.fscache)
+ stateSourceObject.Load(5, &msrc.direntRefs)
+}
+
+func (smo *SimpleMountSourceOperations) StateTypeName() string {
+ return "pkg/sentry/fs.SimpleMountSourceOperations"
+}
+
+func (smo *SimpleMountSourceOperations) StateFields() []string {
+ return []string{
+ "keep",
+ "revalidate",
+ "cacheReaddir",
+ }
+}
+
+func (smo *SimpleMountSourceOperations) beforeSave() {}
+
+// +checklocksignore
+func (smo *SimpleMountSourceOperations) StateSave(stateSinkObject state.Sink) {
+ smo.beforeSave()
+ stateSinkObject.Save(0, &smo.keep)
+ stateSinkObject.Save(1, &smo.revalidate)
+ stateSinkObject.Save(2, &smo.cacheReaddir)
+}
+
+func (smo *SimpleMountSourceOperations) afterLoad() {}
+
+// +checklocksignore
+func (smo *SimpleMountSourceOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &smo.keep)
+ stateSourceObject.Load(1, &smo.revalidate)
+ stateSourceObject.Load(2, &smo.cacheReaddir)
+}
+
+func (o *overlayMountSourceOperations) StateTypeName() string {
+ return "pkg/sentry/fs.overlayMountSourceOperations"
+}
+
+func (o *overlayMountSourceOperations) StateFields() []string {
+ return []string{
+ "upper",
+ "lower",
+ }
+}
+
+func (o *overlayMountSourceOperations) beforeSave() {}
+
+// +checklocksignore
+func (o *overlayMountSourceOperations) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.upper)
+ stateSinkObject.Save(1, &o.lower)
+}
+
+func (o *overlayMountSourceOperations) afterLoad() {}
+
+// +checklocksignore
+func (o *overlayMountSourceOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.upper)
+ stateSourceObject.Load(1, &o.lower)
+}
+
+func (ofs *overlayFilesystem) StateTypeName() string {
+ return "pkg/sentry/fs.overlayFilesystem"
+}
+
+func (ofs *overlayFilesystem) StateFields() []string {
+ return []string{}
+}
+
+func (ofs *overlayFilesystem) beforeSave() {}
+
+// +checklocksignore
+func (ofs *overlayFilesystem) StateSave(stateSinkObject state.Sink) {
+ ofs.beforeSave()
+}
+
+func (ofs *overlayFilesystem) afterLoad() {}
+
+// +checklocksignore
+func (ofs *overlayFilesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (m *Mount) StateTypeName() string {
+ return "pkg/sentry/fs.Mount"
+}
+
+func (m *Mount) StateFields() []string {
+ return []string{
+ "ID",
+ "ParentID",
+ "root",
+ "previous",
+ }
+}
+
+func (m *Mount) beforeSave() {}
+
+// +checklocksignore
+func (m *Mount) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.ID)
+ stateSinkObject.Save(1, &m.ParentID)
+ stateSinkObject.Save(2, &m.root)
+ stateSinkObject.Save(3, &m.previous)
+}
+
+func (m *Mount) afterLoad() {}
+
+// +checklocksignore
+func (m *Mount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.ID)
+ stateSourceObject.Load(1, &m.ParentID)
+ stateSourceObject.Load(2, &m.root)
+ stateSourceObject.Load(3, &m.previous)
+}
+
+func (mns *MountNamespace) StateTypeName() string {
+ return "pkg/sentry/fs.MountNamespace"
+}
+
+func (mns *MountNamespace) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "userns",
+ "root",
+ "mounts",
+ "mountID",
+ }
+}
+
+func (mns *MountNamespace) beforeSave() {}
+
+// +checklocksignore
+func (mns *MountNamespace) StateSave(stateSinkObject state.Sink) {
+ mns.beforeSave()
+ stateSinkObject.Save(0, &mns.AtomicRefCount)
+ stateSinkObject.Save(1, &mns.userns)
+ stateSinkObject.Save(2, &mns.root)
+ stateSinkObject.Save(3, &mns.mounts)
+ stateSinkObject.Save(4, &mns.mountID)
+}
+
+func (mns *MountNamespace) afterLoad() {}
+
+// +checklocksignore
+func (mns *MountNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mns.AtomicRefCount)
+ stateSourceObject.Load(1, &mns.userns)
+ stateSourceObject.Load(2, &mns.root)
+ stateSourceObject.Load(3, &mns.mounts)
+ stateSourceObject.Load(4, &mns.mountID)
+}
+
+func (o *overlayEntry) StateTypeName() string {
+ return "pkg/sentry/fs.overlayEntry"
+}
+
+func (o *overlayEntry) StateFields() []string {
+ return []string{
+ "lowerExists",
+ "lower",
+ "mappings",
+ "upper",
+ "dirCache",
+ }
+}
+
+func (o *overlayEntry) beforeSave() {}
+
+// +checklocksignore
+func (o *overlayEntry) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.lowerExists)
+ stateSinkObject.Save(1, &o.lower)
+ stateSinkObject.Save(2, &o.mappings)
+ stateSinkObject.Save(3, &o.upper)
+ stateSinkObject.Save(4, &o.dirCache)
+}
+
+func (o *overlayEntry) afterLoad() {}
+
+// +checklocksignore
+func (o *overlayEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.lowerExists)
+ stateSourceObject.Load(1, &o.lower)
+ stateSourceObject.Load(2, &o.mappings)
+ stateSourceObject.Load(3, &o.upper)
+ stateSourceObject.Load(4, &o.dirCache)
+}
+
+func init() {
+ state.Register((*StableAttr)(nil))
+ state.Register((*UnstableAttr)(nil))
+ state.Register((*AttrMask)(nil))
+ state.Register((*PermMask)(nil))
+ state.Register((*FilePermissions)(nil))
+ state.Register((*FileOwner)(nil))
+ state.Register((*DentAttr)(nil))
+ state.Register((*SortedDentryMap)(nil))
+ state.Register((*Dirent)(nil))
+ state.Register((*DirentCache)(nil))
+ state.Register((*DirentCacheLimiter)(nil))
+ state.Register((*direntList)(nil))
+ state.Register((*direntEntry)(nil))
+ state.Register((*eventList)(nil))
+ state.Register((*eventEntry)(nil))
+ state.Register((*File)(nil))
+ state.Register((*overlayFileOperations)(nil))
+ state.Register((*overlayMappingIdentity)(nil))
+ state.Register((*MountSourceFlags)(nil))
+ state.Register((*FileFlags)(nil))
+ state.Register((*Inode)(nil))
+ state.Register((*LockCtx)(nil))
+ state.Register((*Watches)(nil))
+ state.Register((*Inotify)(nil))
+ state.Register((*Event)(nil))
+ state.Register((*Watch)(nil))
+ state.Register((*MountSource)(nil))
+ state.Register((*SimpleMountSourceOperations)(nil))
+ state.Register((*overlayMountSourceOperations)(nil))
+ state.Register((*overlayFilesystem)(nil))
+ state.Register((*Mount)(nil))
+ state.Register((*MountNamespace)(nil))
+ state.Register((*overlayEntry)(nil))
+}
diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD
deleted file mode 100644
index 6bf2d51cb..000000000
--- a/pkg/sentry/fs/fsutil/BUILD
+++ /dev/null
@@ -1,119 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "dirty_set_impl",
- out = "dirty_set_impl.go",
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "fsutil",
- prefix = "Dirty",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.MappableRange",
- "Value": "DirtyInfo",
- "Functions": "dirtySetFunctions",
- },
-)
-
-go_template_instance(
- name = "frame_ref_set_impl",
- out = "frame_ref_set_impl.go",
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "fsutil",
- prefix = "FrameRef",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.FileRange",
- "Value": "uint64",
- "Functions": "FrameRefSetFunctions",
- },
-)
-
-go_template_instance(
- name = "file_range_set_impl",
- out = "file_range_set_impl.go",
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "fsutil",
- prefix = "FileRange",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.MappableRange",
- "Value": "uint64",
- "Functions": "FileRangeSetFunctions",
- },
-)
-
-go_library(
- name = "fsutil",
- srcs = [
- "dirty_set.go",
- "dirty_set_impl.go",
- "file.go",
- "file_range_set.go",
- "file_range_set_impl.go",
- "frame_ref_set.go",
- "frame_ref_set_impl.go",
- "fsutil.go",
- "host_file_mapper.go",
- "host_file_mapper_state.go",
- "host_file_mapper_unsafe.go",
- "host_mappable.go",
- "inode.go",
- "inode_cached.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/state",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fsutil_test",
- size = "small",
- srcs = [
- "dirty_set_test.go",
- "inode_cached_test.go",
- ],
- library = ":fsutil",
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/safemem",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/fsutil/README.md b/pkg/sentry/fs/fsutil/README.md
deleted file mode 100644
index 8be367334..000000000
--- a/pkg/sentry/fs/fsutil/README.md
+++ /dev/null
@@ -1,207 +0,0 @@
-This package provides utilities for implementing virtual filesystem objects.
-
-[TOC]
-
-## Page cache
-
-`CachingInodeOperations` implements a page cache for files that cannot use the
-host page cache. Normally these are files that store their data in a remote
-filesystem. This also applies to files that are accessed on a platform that does
-not support directly memory mapping host file descriptors (e.g. the ptrace
-platform).
-
-An `CachingInodeOperations` buffers regions of a single file into memory. It is
-owned by an `fs.Inode`, the in-memory representation of a file (all open file
-descriptors are backed by an `fs.Inode`). The `fs.Inode` provides operations for
-reading memory into an `CachingInodeOperations`, to represent the contents of
-the file in-memory, and for writing memory out, to relieve memory pressure on
-the kernel and to synchronize in-memory changes to filesystems.
-
-An `CachingInodeOperations` enables readable and/or writable memory access to
-file content. Files can be mapped shared or private, see mmap(2). When a file is
-mapped shared, changes to the file via write(2) and truncate(2) are reflected in
-the shared memory region. Conversely, when the shared memory region is modified,
-changes to the file are visible via read(2). Multiple shared mappings of the
-same file are coherent with each other. This is consistent with Linux.
-
-When a file is mapped private, updates to the mapped memory are not visible to
-other memory mappings. Updates to the mapped memory are also not reflected in
-the file content as seen by read(2). If the file is changed after a private
-mapping is created, for instance by write(2), the change to the file may or may
-not be reflected in the private mapping. This is consistent with Linux.
-
-An `CachingInodeOperations` keeps track of ranges of memory that were modified
-(or "dirtied"). When the file is explicitly synced via fsync(2), only the dirty
-ranges are written out to the filesystem. Any error returned indicates a failure
-to write all dirty memory of an `CachingInodeOperations` to the filesystem. In
-this case the filesystem may be in an inconsistent state. The same operation can
-be performed on the shared memory itself using msync(2). If neither fsync(2) nor
-msync(2) is performed, then the dirty memory is written out in accordance with
-the `CachingInodeOperations` eviction strategy (see below) and there is no
-guarantee that memory will be written out successfully in full.
-
-### Memory allocation and eviction
-
-An `CachingInodeOperations` implements the following allocation and eviction
-strategy:
-
-- Memory is allocated and brought up to date with the contents of a file when
- a region of mapped memory is accessed (or "faulted on").
-
-- Dirty memory is written out to filesystems when an fsync(2) or msync(2)
- operation is performed on a memory mapped file, for all memory mapped files
- when saved, and/or when there are no longer any memory mappings of a range
- of a file, see munmap(2). As the latter implies, in the absence of a panic
- or SIGKILL, dirty memory is written out for all memory mapped files when an
- application exits.
-
-- Memory is freed when there are no longer any memory mappings of a range of a
- file (e.g. when an application exits). This behavior is consistent with
- Linux for shared memory that has been locked via mlock(2).
-
-Notably, memory is not allocated for read(2) or write(2) operations. This means
-that reads and writes to the file are only accelerated by an
-`CachingInodeOperations` if the file being read or written has been memory
-mapped *and* if the shared memory has been accessed at the region being read or
-written. This diverges from Linux which buffers memory into a page cache on
-read(2) proactively (i.e. readahead) and delays writing it out to filesystems on
-write(2) (i.e. writeback). The absence of these optimizations is not visible to
-applications beyond less than optimal performance when repeatedly reading and/or
-writing to same region of a file. See [Future Work](#future-work) for plans to
-implement these optimizations.
-
-Additionally, memory held by `CachingInodeOperationss` is currently unbounded in
-size. An `CachingInodeOperations` does not write out dirty memory and free it
-under system memory pressure. This can cause pathological memory usage.
-
-When memory is written back, an `CachingInodeOperations` may write regions of
-shared memory that were never modified. This is due to the strategy of
-minimizing page faults (see below) and handling only a subset of memory write
-faults. In the absence of an application or sentry crash, it is guaranteed that
-if a region of shared memory was written to, it is written back to a filesystem.
-
-### Life of a shared memory mapping
-
-A file is memory mapped via mmap(2). For example, if `A` is an address, an
-application may execute:
-
-```
-mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
-```
-
-This creates a shared mapping of fd that reflects 4k of the contents of fd
-starting at offset 0, accessible at address `A`. This in turn creates a virtual
-memory area region ("vma") which indicates that [`A`, `A`+0x1000) is now a valid
-address range for this application to access.
-
-At this point, memory has not been allocated in the file's
-`CachingInodeOperations`. It is also the case that the address range [`A`,
-`A`+0x1000) has not been mapped on the host on behalf of the application. If the
-application then tries to modify 8 bytes of the shared memory:
-
-```
-char buffer[] = "aaaaaaaa";
-memcpy(A, buffer, 8);
-```
-
-The host then sends a `SIGSEGV` to the sentry because the address range [`A`,
-`A`+8) is not mapped on the host. The `SIGSEGV` indicates that the memory was
-accessed writable. The sentry looks up the vma associated with [`A`, `A`+8),
-finds the file that was mapped and its `CachingInodeOperations`. It then calls
-`CachingInodeOperations.Translate` which allocates memory to back [`A`, `A`+8).
-It may choose to allocate more memory (i.e. do "readahead") to minimize
-subsequent faults.
-
-Memory that is allocated comes from a host tmpfs file (see
-`pgalloc.MemoryFile`). The host tmpfs file memory is brought up to date with the
-contents of the mapped file on its filesystem. The region of the host tmpfs file
-that reflects the mapped file is then mapped into the host address space of the
-application so that subsequent memory accesses do not repeatedly generate a
-`SIGSEGV`.
-
-The range that was allocated, including any extra memory allocation to minimize
-faults, is marked dirty due to the write fault. This overcounts dirty memory if
-the extra memory allocated is never modified.
-
-To make the scenario more interesting, imagine that this application spawns
-another process and maps the same file in the exact same way:
-
-```
-mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
-```
-
-Imagine that this process then tries to modify the file again but with only 4
-bytes:
-
-```
-char buffer[] = "bbbb";
-memcpy(A, buffer, 4);
-```
-
-Since the first process has already mapped and accessed the same region of the
-file writable, `CachingInodeOperations.Translate` is called but returns the
-memory that has already been allocated rather than allocating new memory. The
-address range [`A`, `A`+0x1000) reflects the same cached view of the file as the
-first process sees. For example, reading 8 bytes from the file from either
-process via read(2) starting at offset 0 returns a consistent "bbbbaaaa".
-
-When this process no longer needs the shared memory, it may do:
-
-```
-munmap(A, 0x1000);
-```
-
-At this point, the modified memory cached by the `CachingInodeOperations` is not
-written back to the file because it is still in use by the first process that
-mapped it. When the first process also does:
-
-```
-munmap(A, 0x1000);
-```
-
-Then the last memory mapping of the file at the range [0, 0x1000) is gone. The
-file's `CachingInodeOperations` then starts writing back memory marked dirty to
-the file on its filesystem. Once writing completes, regardless of whether it was
-successful, the `CachingInodeOperations` frees the memory cached at the range
-[0, 0x1000).
-
-Subsequent read(2) or write(2) operations on the file go directly to the
-filesystem since there no longer exists memory for it in its
-`CachingInodeOperations`.
-
-## Future Work
-
-### Page cache
-
-The sentry does not yet implement the readahead and writeback optimizations for
-read(2) and write(2) respectively. To do so, on read(2) and/or write(2) the
-sentry must ensure that memory is allocated in a page cache to read or write
-into. However, the sentry cannot boundlessly allocate memory. If it did, the
-host would eventually OOM-kill the sentry+application process. This means that
-the sentry must implement a page cache memory allocation strategy that is
-bounded by a global user or container imposed limit. When this limit is
-approached, the sentry must decide from which page cache memory should be freed
-so that it can allocate more memory. If it makes a poor decision, the sentry may
-end up freeing and re-allocating memory to back regions of files that are
-frequently used, nullifying the optimization (and in some cases causing worse
-performance due to the overhead of memory allocation and general management).
-This is a form of "cache thrashing".
-
-In Linux, much research has been done to select and implement a lightweight but
-optimal page cache eviction algorithm. Linux makes use of hardware page bits to
-keep track of whether memory has been accessed. The sentry does not have direct
-access to hardware. Implementing a similarly lightweight and optimal page cache
-eviction algorithm will need to either introduce a kernel interface to obtain
-these page bits or find a suitable alternative proxy for access events.
-
-In Linux, readahead happens by default but is not always ideal. For instance,
-for files that are not read sequentially, it would be more ideal to simply read
-from only those regions of the file rather than to optimistically cache some
-number of bytes ahead of the read (up to 2MB in Linux) if the bytes cached won't
-be accessed. Linux implements the fadvise64(2) system call for applications to
-specify that a range of a file will not be accessed sequentially. The advice bit
-FADV_RANDOM turns off the readahead optimization for the given range in the
-given file. However fadvise64 is rarely used by applications so Linux implements
-a readahead backoff strategy if reads are not sequential. To ensure that
-application performance is not degraded, the sentry must implement a similar
-backoff strategy.
diff --git a/pkg/sentry/fs/fsutil/dirty_set_impl.go b/pkg/sentry/fs/fsutil/dirty_set_impl.go
new file mode 100644
index 000000000..2c6a10fc4
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/dirty_set_impl.go
@@ -0,0 +1,1647 @@
+package fsutil
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const DirtytrackGaps = 0
+
+var _ = uint8(DirtytrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type DirtydynamicGap [DirtytrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *DirtydynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *DirtydynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ DirtyminDegree = 3
+
+ DirtymaxDegree = 2 * DirtyminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type DirtySet struct {
+ root Dirtynode `state:".(*DirtySegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *DirtySet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *DirtySet) IsEmptyRange(r __generics_imported0.MappableRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *DirtySet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *DirtySet) SpanRange(r __generics_imported0.MappableRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *DirtySet) FirstSegment() DirtyIterator {
+ if s.root.nrSegments == 0 {
+ return DirtyIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *DirtySet) LastSegment() DirtyIterator {
+ if s.root.nrSegments == 0 {
+ return DirtyIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *DirtySet) FirstGap() DirtyGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return DirtyGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *DirtySet) LastGap() DirtyGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return DirtyGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *DirtySet) Find(key uint64) (DirtyIterator, DirtyGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return DirtyIterator{n, i}, DirtyGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return DirtyIterator{}, DirtyGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *DirtySet) FindSegment(key uint64) DirtyIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *DirtySet) LowerBoundSegment(min uint64) DirtyIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *DirtySet) UpperBoundSegment(max uint64) DirtyIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *DirtySet) FindGap(key uint64) DirtyGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *DirtySet) LowerBoundGap(min uint64) DirtyGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *DirtySet) UpperBoundGap(max uint64) DirtyGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *DirtySet) Add(r __generics_imported0.MappableRange, val DirtyInfo) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *DirtySet) AddWithoutMerging(r __generics_imported0.MappableRange, val DirtyInfo) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *DirtySet) Insert(gap DirtyGapIterator, r __generics_imported0.MappableRange, val DirtyInfo) DirtyIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (dirtySetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := DirtytrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (dirtySetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (dirtySetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := DirtytrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *DirtySet) InsertWithoutMerging(gap DirtyGapIterator, r __generics_imported0.MappableRange, val DirtyInfo) DirtyIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *DirtySet) InsertWithoutMergingUnchecked(gap DirtyGapIterator, r __generics_imported0.MappableRange, val DirtyInfo) DirtyIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := DirtytrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return DirtyIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *DirtySet) Remove(seg DirtyIterator) DirtyGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if DirtytrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ dirtySetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if DirtytrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(DirtyGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *DirtySet) RemoveAll() {
+ s.root = Dirtynode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *DirtySet) RemoveRange(r __generics_imported0.MappableRange) DirtyGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *DirtySet) Merge(first, second DirtyIterator) DirtyIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *DirtySet) MergeUnchecked(first, second DirtyIterator) DirtyIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (dirtySetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return DirtyIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *DirtySet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *DirtySet) MergeRange(r __generics_imported0.MappableRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *DirtySet) MergeAdjacent(r __generics_imported0.MappableRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *DirtySet) Split(seg DirtyIterator, split uint64) (DirtyIterator, DirtyIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *DirtySet) SplitUnchecked(seg DirtyIterator, split uint64) (DirtyIterator, DirtyIterator) {
+ val1, val2 := (dirtySetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.MappableRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *DirtySet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *DirtySet) Isolate(seg DirtyIterator, r __generics_imported0.MappableRange) DirtyIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *DirtySet) ApplyContiguous(r __generics_imported0.MappableRange, fn func(seg DirtyIterator)) DirtyGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return DirtyGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return DirtyGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type Dirtynode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *Dirtynode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap DirtydynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [DirtymaxDegree - 1]__generics_imported0.MappableRange
+ values [DirtymaxDegree - 1]DirtyInfo
+ children [DirtymaxDegree]*Dirtynode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Dirtynode) firstSegment() DirtyIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return DirtyIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Dirtynode) lastSegment() DirtyIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return DirtyIterator{n, n.nrSegments - 1}
+}
+
+func (n *Dirtynode) prevSibling() *Dirtynode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *Dirtynode) nextSibling() *Dirtynode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *Dirtynode) rebalanceBeforeInsert(gap DirtyGapIterator) DirtyGapIterator {
+ if n.nrSegments < DirtymaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &Dirtynode{
+ nrSegments: DirtyminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &Dirtynode{
+ nrSegments: DirtyminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:DirtyminDegree-1], n.keys[:DirtyminDegree-1])
+ copy(left.values[:DirtyminDegree-1], n.values[:DirtyminDegree-1])
+ copy(right.keys[:DirtyminDegree-1], n.keys[DirtyminDegree:])
+ copy(right.values[:DirtyminDegree-1], n.values[DirtyminDegree:])
+ n.keys[0], n.values[0] = n.keys[DirtyminDegree-1], n.values[DirtyminDegree-1]
+ DirtyzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:DirtyminDegree], n.children[:DirtyminDegree])
+ copy(right.children[:DirtyminDegree], n.children[DirtyminDegree:])
+ DirtyzeroNodeSlice(n.children[2:])
+ for i := 0; i < DirtyminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if DirtytrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < DirtyminDegree {
+ return DirtyGapIterator{left, gap.index}
+ }
+ return DirtyGapIterator{right, gap.index - DirtyminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[DirtyminDegree-1], n.values[DirtyminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &Dirtynode{
+ nrSegments: DirtyminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:DirtyminDegree-1], n.keys[DirtyminDegree:])
+ copy(sibling.values[:DirtyminDegree-1], n.values[DirtyminDegree:])
+ DirtyzeroValueSlice(n.values[DirtyminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:DirtyminDegree], n.children[DirtyminDegree:])
+ DirtyzeroNodeSlice(n.children[DirtyminDegree:])
+ for i := 0; i < DirtyminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = DirtyminDegree - 1
+
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < DirtyminDegree {
+ return gap
+ }
+ return DirtyGapIterator{sibling, gap.index - DirtyminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *Dirtynode) rebalanceAfterRemove(gap DirtyGapIterator) DirtyGapIterator {
+ for {
+ if n.nrSegments >= DirtyminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= DirtyminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ dirtySetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return DirtyGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return DirtyGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= DirtyminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ dirtySetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return DirtyGapIterator{n, n.nrSegments}
+ }
+ return DirtyGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return DirtyGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return DirtyGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *Dirtynode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = DirtyGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ dirtySetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if DirtytrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *Dirtynode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *Dirtynode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *Dirtynode) calculateMaxGapLeaf() uint64 {
+ max := DirtyGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (DirtyGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *Dirtynode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Dirtynode) searchFirstLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if n.maxGap.Get() < minSize {
+ return DirtyGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := DirtyGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Dirtynode) searchLastLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if n.maxGap.Get() < minSize {
+ return DirtyGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := DirtyGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type DirtyIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *Dirtynode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg DirtyIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg DirtyIterator) Range() __generics_imported0.MappableRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg DirtyIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg DirtyIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg DirtyIterator) SetRangeUnchecked(r __generics_imported0.MappableRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg DirtyIterator) SetRange(r __generics_imported0.MappableRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg DirtyIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg DirtyIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg DirtyIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg DirtyIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg DirtyIterator) Value() DirtyInfo {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg DirtyIterator) ValuePtr() *DirtyInfo {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg DirtyIterator) SetValue(val DirtyInfo) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg DirtyIterator) PrevSegment() DirtyIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return DirtyIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return DirtyIterator{}
+ }
+ return DirtysegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg DirtyIterator) NextSegment() DirtyIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return DirtyIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return DirtyIterator{}
+ }
+ return DirtysegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg DirtyIterator) PrevGap() DirtyGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return DirtyGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg DirtyIterator) NextGap() DirtyGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return DirtyGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg DirtyIterator) PrevNonEmpty() (DirtyIterator, DirtyGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return DirtyIterator{}, gap
+ }
+ return gap.PrevSegment(), DirtyGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg DirtyIterator) NextNonEmpty() (DirtyIterator, DirtyGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return DirtyIterator{}, gap
+ }
+ return gap.NextSegment(), DirtyGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type DirtyGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *Dirtynode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap DirtyGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap DirtyGapIterator) Range() __generics_imported0.MappableRange {
+ return __generics_imported0.MappableRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap DirtyGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return dirtySetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap DirtyGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return dirtySetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap DirtyGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap DirtyGapIterator) PrevSegment() DirtyIterator {
+ return DirtysegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap DirtyGapIterator) NextSegment() DirtyIterator {
+ return DirtysegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap DirtyGapIterator) PrevGap() DirtyGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return DirtyGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap DirtyGapIterator) NextGap() DirtyGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return DirtyGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap DirtyGapIterator) NextLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if DirtytrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap DirtyGapIterator) nextLargeEnoughGapHelper(minSize uint64) DirtyGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return DirtyGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap DirtyGapIterator) PrevLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if DirtytrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap DirtyGapIterator) prevLargeEnoughGapHelper(minSize uint64) DirtyGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return DirtyGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func DirtysegmentBeforePosition(n *Dirtynode, i int) DirtyIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return DirtyIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return DirtyIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func DirtysegmentAfterPosition(n *Dirtynode, i int) DirtyIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return DirtyIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return DirtyIterator{n, i}
+}
+
+func DirtyzeroValueSlice(slice []DirtyInfo) {
+
+ for i := range slice {
+ dirtySetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func DirtyzeroNodeSlice(slice []*Dirtynode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *DirtySet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *Dirtynode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *Dirtynode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if DirtytrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type DirtySegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []DirtyInfo
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *DirtySet) ExportSortedSlices() *DirtySegmentDataSlices {
+ var sds DirtySegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *DirtySet) ImportSortedSlices(sds *DirtySegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.MappableRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *DirtySet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.MappableRange, DirtyInfo) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *DirtySet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *DirtySet) saveRoot() *DirtySegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *DirtySet) loadRoot(sds *DirtySegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/fs/fsutil/dirty_set_test.go b/pkg/sentry/fs/fsutil/dirty_set_test.go
deleted file mode 100644
index 48448c97c..000000000
--- a/pkg/sentry/fs/fsutil/dirty_set_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsutil
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
-)
-
-func TestDirtySet(t *testing.T) {
- var set DirtySet
- set.MarkDirty(memmap.MappableRange{0, 2 * hostarch.PageSize})
- set.KeepDirty(memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize})
- set.MarkClean(memmap.MappableRange{0, 2 * hostarch.PageSize})
- want := &DirtySegmentDataSlices{
- Start: []uint64{hostarch.PageSize},
- End: []uint64{2 * hostarch.PageSize},
- Values: []DirtyInfo{{Keep: true}},
- }
- if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) {
- t.Errorf("set:\n\tgot %v,\n\twant %v", got, want)
- }
-}
diff --git a/pkg/sentry/fs/fsutil/file_range_set_impl.go b/pkg/sentry/fs/fsutil/file_range_set_impl.go
new file mode 100644
index 000000000..7568fb790
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/file_range_set_impl.go
@@ -0,0 +1,1647 @@
+package fsutil
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const FileRangetrackGaps = 0
+
+var _ = uint8(FileRangetrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type FileRangedynamicGap [FileRangetrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FileRangedynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FileRangedynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ FileRangeminDegree = 3
+
+ FileRangemaxDegree = 2 * FileRangeminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type FileRangeSet struct {
+ root FileRangenode `state:".(*FileRangeSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *FileRangeSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *FileRangeSet) IsEmptyRange(r __generics_imported0.MappableRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *FileRangeSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *FileRangeSet) SpanRange(r __generics_imported0.MappableRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *FileRangeSet) FirstSegment() FileRangeIterator {
+ if s.root.nrSegments == 0 {
+ return FileRangeIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *FileRangeSet) LastSegment() FileRangeIterator {
+ if s.root.nrSegments == 0 {
+ return FileRangeIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *FileRangeSet) FirstGap() FileRangeGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return FileRangeGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *FileRangeSet) LastGap() FileRangeGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return FileRangeGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *FileRangeSet) Find(key uint64) (FileRangeIterator, FileRangeGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return FileRangeIterator{n, i}, FileRangeGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return FileRangeIterator{}, FileRangeGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *FileRangeSet) FindSegment(key uint64) FileRangeIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *FileRangeSet) LowerBoundSegment(min uint64) FileRangeIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *FileRangeSet) UpperBoundSegment(max uint64) FileRangeIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *FileRangeSet) FindGap(key uint64) FileRangeGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *FileRangeSet) LowerBoundGap(min uint64) FileRangeGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *FileRangeSet) UpperBoundGap(max uint64) FileRangeGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *FileRangeSet) Add(r __generics_imported0.MappableRange, val uint64) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *FileRangeSet) AddWithoutMerging(r __generics_imported0.MappableRange, val uint64) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *FileRangeSet) Insert(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (FileRangeSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *FileRangeSet) InsertWithoutMerging(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *FileRangeSet) InsertWithoutMergingUnchecked(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := FileRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return FileRangeIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *FileRangeSet) Remove(seg FileRangeIterator) FileRangeGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if FileRangetrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ FileRangeSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if FileRangetrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(FileRangeGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *FileRangeSet) RemoveAll() {
+ s.root = FileRangenode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *FileRangeSet) RemoveRange(r __generics_imported0.MappableRange) FileRangeGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *FileRangeSet) Merge(first, second FileRangeIterator) FileRangeIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *FileRangeSet) MergeUnchecked(first, second FileRangeIterator) FileRangeIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (FileRangeSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return FileRangeIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *FileRangeSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *FileRangeSet) MergeRange(r __generics_imported0.MappableRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *FileRangeSet) MergeAdjacent(r __generics_imported0.MappableRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *FileRangeSet) Split(seg FileRangeIterator, split uint64) (FileRangeIterator, FileRangeIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *FileRangeSet) SplitUnchecked(seg FileRangeIterator, split uint64) (FileRangeIterator, FileRangeIterator) {
+ val1, val2 := (FileRangeSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.MappableRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *FileRangeSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *FileRangeSet) Isolate(seg FileRangeIterator, r __generics_imported0.MappableRange) FileRangeIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *FileRangeSet) ApplyContiguous(r __generics_imported0.MappableRange, fn func(seg FileRangeIterator)) FileRangeGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return FileRangeGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return FileRangeGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type FileRangenode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *FileRangenode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap FileRangedynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [FileRangemaxDegree - 1]__generics_imported0.MappableRange
+ values [FileRangemaxDegree - 1]uint64
+ children [FileRangemaxDegree]*FileRangenode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *FileRangenode) firstSegment() FileRangeIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return FileRangeIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *FileRangenode) lastSegment() FileRangeIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return FileRangeIterator{n, n.nrSegments - 1}
+}
+
+func (n *FileRangenode) prevSibling() *FileRangenode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *FileRangenode) nextSibling() *FileRangenode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *FileRangenode) rebalanceBeforeInsert(gap FileRangeGapIterator) FileRangeGapIterator {
+ if n.nrSegments < FileRangemaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &FileRangenode{
+ nrSegments: FileRangeminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &FileRangenode{
+ nrSegments: FileRangeminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:FileRangeminDegree-1], n.keys[:FileRangeminDegree-1])
+ copy(left.values[:FileRangeminDegree-1], n.values[:FileRangeminDegree-1])
+ copy(right.keys[:FileRangeminDegree-1], n.keys[FileRangeminDegree:])
+ copy(right.values[:FileRangeminDegree-1], n.values[FileRangeminDegree:])
+ n.keys[0], n.values[0] = n.keys[FileRangeminDegree-1], n.values[FileRangeminDegree-1]
+ FileRangezeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:FileRangeminDegree], n.children[:FileRangeminDegree])
+ copy(right.children[:FileRangeminDegree], n.children[FileRangeminDegree:])
+ FileRangezeroNodeSlice(n.children[2:])
+ for i := 0; i < FileRangeminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if FileRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < FileRangeminDegree {
+ return FileRangeGapIterator{left, gap.index}
+ }
+ return FileRangeGapIterator{right, gap.index - FileRangeminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[FileRangeminDegree-1], n.values[FileRangeminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &FileRangenode{
+ nrSegments: FileRangeminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:FileRangeminDegree-1], n.keys[FileRangeminDegree:])
+ copy(sibling.values[:FileRangeminDegree-1], n.values[FileRangeminDegree:])
+ FileRangezeroValueSlice(n.values[FileRangeminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:FileRangeminDegree], n.children[FileRangeminDegree:])
+ FileRangezeroNodeSlice(n.children[FileRangeminDegree:])
+ for i := 0; i < FileRangeminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = FileRangeminDegree - 1
+
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < FileRangeminDegree {
+ return gap
+ }
+ return FileRangeGapIterator{sibling, gap.index - FileRangeminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRangeGapIterator {
+ for {
+ if n.nrSegments >= FileRangeminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= FileRangeminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ FileRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return FileRangeGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return FileRangeGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= FileRangeminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ FileRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return FileRangeGapIterator{n, n.nrSegments}
+ }
+ return FileRangeGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return FileRangeGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return FileRangeGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *FileRangenode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = FileRangeGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ FileRangeSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if FileRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *FileRangenode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *FileRangenode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *FileRangenode) calculateMaxGapLeaf() uint64 {
+ max := FileRangeGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (FileRangeGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *FileRangenode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FileRangenode) searchFirstLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FileRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := FileRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FileRangenode) searchLastLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FileRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := FileRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type FileRangeIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *FileRangenode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg FileRangeIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg FileRangeIterator) Range() __generics_imported0.MappableRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg FileRangeIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg FileRangeIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg FileRangeIterator) SetRangeUnchecked(r __generics_imported0.MappableRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg FileRangeIterator) SetRange(r __generics_imported0.MappableRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg FileRangeIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg FileRangeIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg FileRangeIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg FileRangeIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg FileRangeIterator) Value() uint64 {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg FileRangeIterator) ValuePtr() *uint64 {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg FileRangeIterator) SetValue(val uint64) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg FileRangeIterator) PrevSegment() FileRangeIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return FileRangeIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return FileRangeIterator{}
+ }
+ return FileRangesegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg FileRangeIterator) NextSegment() FileRangeIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return FileRangeIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return FileRangeIterator{}
+ }
+ return FileRangesegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg FileRangeIterator) PrevGap() FileRangeGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return FileRangeGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg FileRangeIterator) NextGap() FileRangeGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return FileRangeGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg FileRangeIterator) PrevNonEmpty() (FileRangeIterator, FileRangeGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return FileRangeIterator{}, gap
+ }
+ return gap.PrevSegment(), FileRangeGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg FileRangeIterator) NextNonEmpty() (FileRangeIterator, FileRangeGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return FileRangeIterator{}, gap
+ }
+ return gap.NextSegment(), FileRangeGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type FileRangeGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *FileRangenode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap FileRangeGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap FileRangeGapIterator) Range() __generics_imported0.MappableRange {
+ return __generics_imported0.MappableRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap FileRangeGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return FileRangeSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap FileRangeGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return FileRangeSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap FileRangeGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap FileRangeGapIterator) PrevSegment() FileRangeIterator {
+ return FileRangesegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap FileRangeGapIterator) NextSegment() FileRangeIterator {
+ return FileRangesegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap FileRangeGapIterator) PrevGap() FileRangeGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return FileRangeGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap FileRangeGapIterator) NextGap() FileRangeGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return FileRangeGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FileRangeGapIterator) NextLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if FileRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap FileRangeGapIterator) nextLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FileRangeGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FileRangeGapIterator) PrevLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if FileRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap FileRangeGapIterator) prevLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FileRangeGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func FileRangesegmentBeforePosition(n *FileRangenode, i int) FileRangeIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return FileRangeIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return FileRangeIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func FileRangesegmentAfterPosition(n *FileRangenode, i int) FileRangeIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return FileRangeIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return FileRangeIterator{n, i}
+}
+
+func FileRangezeroValueSlice(slice []uint64) {
+
+ for i := range slice {
+ FileRangeSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func FileRangezeroNodeSlice(slice []*FileRangenode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *FileRangeSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *FileRangenode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *FileRangenode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if FileRangetrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type FileRangeSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []uint64
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *FileRangeSet) ExportSortedSlices() *FileRangeSegmentDataSlices {
+ var sds FileRangeSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *FileRangeSet) ImportSortedSlices(sds *FileRangeSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.MappableRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *FileRangeSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.MappableRange, uint64) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *FileRangeSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *FileRangeSet) saveRoot() *FileRangeSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *FileRangeSet) loadRoot(sds *FileRangeSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/fs/fsutil/frame_ref_set_impl.go b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
new file mode 100644
index 000000000..6657addf4
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
@@ -0,0 +1,1647 @@
+package fsutil
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const FrameReftrackGaps = 0
+
+var _ = uint8(FrameReftrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type FrameRefdynamicGap [FrameReftrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FrameRefdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FrameRefdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ FrameRefminDegree = 3
+
+ FrameRefmaxDegree = 2 * FrameRefminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type FrameRefSet struct {
+ root FrameRefnode `state:".(*FrameRefSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *FrameRefSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *FrameRefSet) IsEmptyRange(r __generics_imported0.FileRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *FrameRefSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *FrameRefSet) SpanRange(r __generics_imported0.FileRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *FrameRefSet) FirstSegment() FrameRefIterator {
+ if s.root.nrSegments == 0 {
+ return FrameRefIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *FrameRefSet) LastSegment() FrameRefIterator {
+ if s.root.nrSegments == 0 {
+ return FrameRefIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *FrameRefSet) FirstGap() FrameRefGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return FrameRefGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *FrameRefSet) LastGap() FrameRefGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return FrameRefGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *FrameRefSet) Find(key uint64) (FrameRefIterator, FrameRefGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return FrameRefIterator{n, i}, FrameRefGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return FrameRefIterator{}, FrameRefGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *FrameRefSet) FindSegment(key uint64) FrameRefIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *FrameRefSet) LowerBoundSegment(min uint64) FrameRefIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *FrameRefSet) UpperBoundSegment(max uint64) FrameRefIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *FrameRefSet) FindGap(key uint64) FrameRefGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *FrameRefSet) LowerBoundGap(min uint64) FrameRefGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *FrameRefSet) UpperBoundGap(max uint64) FrameRefGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *FrameRefSet) Add(r __generics_imported0.FileRange, val uint64) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *FrameRefSet) AddWithoutMerging(r __generics_imported0.FileRange, val uint64) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *FrameRefSet) Insert(gap FrameRefGapIterator, r __generics_imported0.FileRange, val uint64) FrameRefIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (FrameRefSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := FrameReftrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (FrameRefSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (FrameRefSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := FrameReftrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *FrameRefSet) InsertWithoutMerging(gap FrameRefGapIterator, r __generics_imported0.FileRange, val uint64) FrameRefIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *FrameRefSet) InsertWithoutMergingUnchecked(gap FrameRefGapIterator, r __generics_imported0.FileRange, val uint64) FrameRefIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := FrameReftrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return FrameRefIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *FrameRefSet) Remove(seg FrameRefIterator) FrameRefGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if FrameReftrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ FrameRefSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if FrameReftrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(FrameRefGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *FrameRefSet) RemoveAll() {
+ s.root = FrameRefnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *FrameRefSet) RemoveRange(r __generics_imported0.FileRange) FrameRefGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *FrameRefSet) Merge(first, second FrameRefIterator) FrameRefIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *FrameRefSet) MergeUnchecked(first, second FrameRefIterator) FrameRefIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (FrameRefSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return FrameRefIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *FrameRefSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *FrameRefSet) MergeRange(r __generics_imported0.FileRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *FrameRefSet) MergeAdjacent(r __generics_imported0.FileRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *FrameRefSet) Split(seg FrameRefIterator, split uint64) (FrameRefIterator, FrameRefIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *FrameRefSet) SplitUnchecked(seg FrameRefIterator, split uint64) (FrameRefIterator, FrameRefIterator) {
+ val1, val2 := (FrameRefSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.FileRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *FrameRefSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *FrameRefSet) Isolate(seg FrameRefIterator, r __generics_imported0.FileRange) FrameRefIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *FrameRefSet) ApplyContiguous(r __generics_imported0.FileRange, fn func(seg FrameRefIterator)) FrameRefGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return FrameRefGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return FrameRefGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type FrameRefnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *FrameRefnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap FrameRefdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [FrameRefmaxDegree - 1]__generics_imported0.FileRange
+ values [FrameRefmaxDegree - 1]uint64
+ children [FrameRefmaxDegree]*FrameRefnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *FrameRefnode) firstSegment() FrameRefIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return FrameRefIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *FrameRefnode) lastSegment() FrameRefIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return FrameRefIterator{n, n.nrSegments - 1}
+}
+
+func (n *FrameRefnode) prevSibling() *FrameRefnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *FrameRefnode) nextSibling() *FrameRefnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *FrameRefnode) rebalanceBeforeInsert(gap FrameRefGapIterator) FrameRefGapIterator {
+ if n.nrSegments < FrameRefmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &FrameRefnode{
+ nrSegments: FrameRefminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &FrameRefnode{
+ nrSegments: FrameRefminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:FrameRefminDegree-1], n.keys[:FrameRefminDegree-1])
+ copy(left.values[:FrameRefminDegree-1], n.values[:FrameRefminDegree-1])
+ copy(right.keys[:FrameRefminDegree-1], n.keys[FrameRefminDegree:])
+ copy(right.values[:FrameRefminDegree-1], n.values[FrameRefminDegree:])
+ n.keys[0], n.values[0] = n.keys[FrameRefminDegree-1], n.values[FrameRefminDegree-1]
+ FrameRefzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:FrameRefminDegree], n.children[:FrameRefminDegree])
+ copy(right.children[:FrameRefminDegree], n.children[FrameRefminDegree:])
+ FrameRefzeroNodeSlice(n.children[2:])
+ for i := 0; i < FrameRefminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if FrameReftrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < FrameRefminDegree {
+ return FrameRefGapIterator{left, gap.index}
+ }
+ return FrameRefGapIterator{right, gap.index - FrameRefminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[FrameRefminDegree-1], n.values[FrameRefminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &FrameRefnode{
+ nrSegments: FrameRefminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:FrameRefminDegree-1], n.keys[FrameRefminDegree:])
+ copy(sibling.values[:FrameRefminDegree-1], n.values[FrameRefminDegree:])
+ FrameRefzeroValueSlice(n.values[FrameRefminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:FrameRefminDegree], n.children[FrameRefminDegree:])
+ FrameRefzeroNodeSlice(n.children[FrameRefminDegree:])
+ for i := 0; i < FrameRefminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = FrameRefminDegree - 1
+
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < FrameRefminDegree {
+ return gap
+ }
+ return FrameRefGapIterator{sibling, gap.index - FrameRefminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *FrameRefnode) rebalanceAfterRemove(gap FrameRefGapIterator) FrameRefGapIterator {
+ for {
+ if n.nrSegments >= FrameRefminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= FrameRefminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ FrameRefSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return FrameRefGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return FrameRefGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= FrameRefminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ FrameRefSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return FrameRefGapIterator{n, n.nrSegments}
+ }
+ return FrameRefGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return FrameRefGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return FrameRefGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *FrameRefnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = FrameRefGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ FrameRefSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if FrameReftrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *FrameRefnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *FrameRefnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *FrameRefnode) calculateMaxGapLeaf() uint64 {
+ max := FrameRefGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (FrameRefGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *FrameRefnode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FrameRefnode) searchFirstLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FrameRefGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := FrameRefGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FrameRefnode) searchLastLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FrameRefGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := FrameRefGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type FrameRefIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *FrameRefnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg FrameRefIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg FrameRefIterator) Range() __generics_imported0.FileRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg FrameRefIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg FrameRefIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg FrameRefIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg FrameRefIterator) SetRange(r __generics_imported0.FileRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg FrameRefIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg FrameRefIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg FrameRefIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg FrameRefIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg FrameRefIterator) Value() uint64 {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg FrameRefIterator) ValuePtr() *uint64 {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg FrameRefIterator) SetValue(val uint64) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg FrameRefIterator) PrevSegment() FrameRefIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return FrameRefIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return FrameRefIterator{}
+ }
+ return FrameRefsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg FrameRefIterator) NextSegment() FrameRefIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return FrameRefIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return FrameRefIterator{}
+ }
+ return FrameRefsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg FrameRefIterator) PrevGap() FrameRefGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return FrameRefGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg FrameRefIterator) NextGap() FrameRefGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return FrameRefGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg FrameRefIterator) PrevNonEmpty() (FrameRefIterator, FrameRefGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return FrameRefIterator{}, gap
+ }
+ return gap.PrevSegment(), FrameRefGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg FrameRefIterator) NextNonEmpty() (FrameRefIterator, FrameRefGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return FrameRefIterator{}, gap
+ }
+ return gap.NextSegment(), FrameRefGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type FrameRefGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *FrameRefnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap FrameRefGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap FrameRefGapIterator) Range() __generics_imported0.FileRange {
+ return __generics_imported0.FileRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap FrameRefGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return FrameRefSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap FrameRefGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return FrameRefSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap FrameRefGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap FrameRefGapIterator) PrevSegment() FrameRefIterator {
+ return FrameRefsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap FrameRefGapIterator) NextSegment() FrameRefIterator {
+ return FrameRefsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap FrameRefGapIterator) PrevGap() FrameRefGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return FrameRefGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap FrameRefGapIterator) NextGap() FrameRefGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return FrameRefGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FrameRefGapIterator) NextLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if FrameReftrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap FrameRefGapIterator) nextLargeEnoughGapHelper(minSize uint64) FrameRefGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FrameRefGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FrameRefGapIterator) PrevLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if FrameReftrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap FrameRefGapIterator) prevLargeEnoughGapHelper(minSize uint64) FrameRefGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FrameRefGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func FrameRefsegmentBeforePosition(n *FrameRefnode, i int) FrameRefIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return FrameRefIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return FrameRefIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func FrameRefsegmentAfterPosition(n *FrameRefnode, i int) FrameRefIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return FrameRefIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return FrameRefIterator{n, i}
+}
+
+func FrameRefzeroValueSlice(slice []uint64) {
+
+ for i := range slice {
+ FrameRefSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func FrameRefzeroNodeSlice(slice []*FrameRefnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *FrameRefSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *FrameRefnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *FrameRefnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if FrameReftrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type FrameRefSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []uint64
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *FrameRefSet) ExportSortedSlices() *FrameRefSegmentDataSlices {
+ var sds FrameRefSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *FrameRefSet) ImportSortedSlices(sds *FrameRefSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.FileRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *FrameRefSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.FileRange, uint64) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *FrameRefSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *FrameRefSet) saveRoot() *FrameRefSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *FrameRefSet) loadRoot(sds *FrameRefSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go
new file mode 100644
index 000000000..b7f95bde7
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go
@@ -0,0 +1,328 @@
+// automatically generated by stateify.
+
+package fsutil
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *DirtySet) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.DirtySet"
+}
+
+func (s *DirtySet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *DirtySet) beforeSave() {}
+
+// +checklocksignore
+func (s *DirtySet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *DirtySegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *DirtySet) afterLoad() {}
+
+// +checklocksignore
+func (s *DirtySet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*DirtySegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*DirtySegmentDataSlices)) })
+}
+
+func (n *Dirtynode) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.Dirtynode"
+}
+
+func (n *Dirtynode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *Dirtynode) beforeSave() {}
+
+// +checklocksignore
+func (n *Dirtynode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *Dirtynode) afterLoad() {}
+
+// +checklocksignore
+func (n *Dirtynode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (d *DirtySegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.DirtySegmentDataSlices"
+}
+
+func (d *DirtySegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (d *DirtySegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (d *DirtySegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Start)
+ stateSinkObject.Save(1, &d.End)
+ stateSinkObject.Save(2, &d.Values)
+}
+
+func (d *DirtySegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (d *DirtySegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Start)
+ stateSourceObject.Load(1, &d.End)
+ stateSourceObject.Load(2, &d.Values)
+}
+
+func (s *FileRangeSet) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FileRangeSet"
+}
+
+func (s *FileRangeSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *FileRangeSet) beforeSave() {}
+
+// +checklocksignore
+func (s *FileRangeSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *FileRangeSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *FileRangeSet) afterLoad() {}
+
+// +checklocksignore
+func (s *FileRangeSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*FileRangeSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*FileRangeSegmentDataSlices)) })
+}
+
+func (n *FileRangenode) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FileRangenode"
+}
+
+func (n *FileRangenode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *FileRangenode) beforeSave() {}
+
+// +checklocksignore
+func (n *FileRangenode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *FileRangenode) afterLoad() {}
+
+// +checklocksignore
+func (n *FileRangenode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (f *FileRangeSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FileRangeSegmentDataSlices"
+}
+
+func (f *FileRangeSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (f *FileRangeSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (f *FileRangeSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.Start)
+ stateSinkObject.Save(1, &f.End)
+ stateSinkObject.Save(2, &f.Values)
+}
+
+func (f *FileRangeSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (f *FileRangeSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.Start)
+ stateSourceObject.Load(1, &f.End)
+ stateSourceObject.Load(2, &f.Values)
+}
+
+func (s *FrameRefSet) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FrameRefSet"
+}
+
+func (s *FrameRefSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *FrameRefSet) beforeSave() {}
+
+// +checklocksignore
+func (s *FrameRefSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *FrameRefSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *FrameRefSet) afterLoad() {}
+
+// +checklocksignore
+func (s *FrameRefSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*FrameRefSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*FrameRefSegmentDataSlices)) })
+}
+
+func (n *FrameRefnode) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FrameRefnode"
+}
+
+func (n *FrameRefnode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *FrameRefnode) beforeSave() {}
+
+// +checklocksignore
+func (n *FrameRefnode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *FrameRefnode) afterLoad() {}
+
+// +checklocksignore
+func (n *FrameRefnode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (f *FrameRefSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FrameRefSegmentDataSlices"
+}
+
+func (f *FrameRefSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (f *FrameRefSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (f *FrameRefSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.Start)
+ stateSinkObject.Save(1, &f.End)
+ stateSinkObject.Save(2, &f.Values)
+}
+
+func (f *FrameRefSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (f *FrameRefSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.Start)
+ stateSourceObject.Load(1, &f.End)
+ stateSourceObject.Load(2, &f.Values)
+}
+
+func init() {
+ state.Register((*DirtySet)(nil))
+ state.Register((*Dirtynode)(nil))
+ state.Register((*DirtySegmentDataSlices)(nil))
+ state.Register((*FileRangeSet)(nil))
+ state.Register((*FileRangenode)(nil))
+ state.Register((*FileRangeSegmentDataSlices)(nil))
+ state.Register((*FrameRefSet)(nil))
+ state.Register((*FrameRefnode)(nil))
+ state.Register((*FrameRefSegmentDataSlices)(nil))
+}
diff --git a/pkg/sentry/fs/fsutil/fsutil_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_state_autogen.go
new file mode 100644
index 000000000..748eb9272
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/fsutil_state_autogen.go
@@ -0,0 +1,411 @@
+// automatically generated by stateify.
+
+package fsutil
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (d *DirtyInfo) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.DirtyInfo"
+}
+
+func (d *DirtyInfo) StateFields() []string {
+ return []string{
+ "Keep",
+ }
+}
+
+func (d *DirtyInfo) beforeSave() {}
+
+// +checklocksignore
+func (d *DirtyInfo) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Keep)
+}
+
+func (d *DirtyInfo) afterLoad() {}
+
+// +checklocksignore
+func (d *DirtyInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Keep)
+}
+
+func (sdfo *StaticDirFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.StaticDirFileOperations"
+}
+
+func (sdfo *StaticDirFileOperations) StateFields() []string {
+ return []string{
+ "dentryMap",
+ "dirCursor",
+ }
+}
+
+func (sdfo *StaticDirFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (sdfo *StaticDirFileOperations) StateSave(stateSinkObject state.Sink) {
+ sdfo.beforeSave()
+ stateSinkObject.Save(0, &sdfo.dentryMap)
+ stateSinkObject.Save(1, &sdfo.dirCursor)
+}
+
+func (sdfo *StaticDirFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (sdfo *StaticDirFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sdfo.dentryMap)
+ stateSourceObject.Load(1, &sdfo.dirCursor)
+}
+
+func (n *NoReadWriteFile) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.NoReadWriteFile"
+}
+
+func (n *NoReadWriteFile) StateFields() []string {
+ return []string{}
+}
+
+func (n *NoReadWriteFile) beforeSave() {}
+
+// +checklocksignore
+func (n *NoReadWriteFile) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+}
+
+func (n *NoReadWriteFile) afterLoad() {}
+
+// +checklocksignore
+func (n *NoReadWriteFile) StateLoad(stateSourceObject state.Source) {
+}
+
+func (scr *FileStaticContentReader) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.FileStaticContentReader"
+}
+
+func (scr *FileStaticContentReader) StateFields() []string {
+ return []string{
+ "content",
+ }
+}
+
+func (scr *FileStaticContentReader) beforeSave() {}
+
+// +checklocksignore
+func (scr *FileStaticContentReader) StateSave(stateSinkObject state.Sink) {
+ scr.beforeSave()
+ stateSinkObject.Save(0, &scr.content)
+}
+
+func (scr *FileStaticContentReader) afterLoad() {}
+
+// +checklocksignore
+func (scr *FileStaticContentReader) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &scr.content)
+}
+
+func (f *HostFileMapper) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.HostFileMapper"
+}
+
+func (f *HostFileMapper) StateFields() []string {
+ return []string{
+ "refs",
+ }
+}
+
+func (f *HostFileMapper) beforeSave() {}
+
+// +checklocksignore
+func (f *HostFileMapper) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.refs)
+}
+
+// +checklocksignore
+func (f *HostFileMapper) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.refs)
+ stateSourceObject.AfterLoad(f.afterLoad)
+}
+
+func (h *HostMappable) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.HostMappable"
+}
+
+func (h *HostMappable) StateFields() []string {
+ return []string{
+ "hostFileMapper",
+ "backingFile",
+ "mappings",
+ }
+}
+
+func (h *HostMappable) beforeSave() {}
+
+// +checklocksignore
+func (h *HostMappable) StateSave(stateSinkObject state.Sink) {
+ h.beforeSave()
+ stateSinkObject.Save(0, &h.hostFileMapper)
+ stateSinkObject.Save(1, &h.backingFile)
+ stateSinkObject.Save(2, &h.mappings)
+}
+
+func (h *HostMappable) afterLoad() {}
+
+// +checklocksignore
+func (h *HostMappable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &h.hostFileMapper)
+ stateSourceObject.Load(1, &h.backingFile)
+ stateSourceObject.Load(2, &h.mappings)
+}
+
+func (s *SimpleFileInode) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.SimpleFileInode"
+}
+
+func (s *SimpleFileInode) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (s *SimpleFileInode) beforeSave() {}
+
+// +checklocksignore
+func (s *SimpleFileInode) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeSimpleAttributes)
+}
+
+func (s *SimpleFileInode) afterLoad() {}
+
+// +checklocksignore
+func (s *SimpleFileInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeSimpleAttributes)
+}
+
+func (n *NoReadWriteFileInode) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.NoReadWriteFileInode"
+}
+
+func (n *NoReadWriteFileInode) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ }
+}
+
+func (n *NoReadWriteFileInode) beforeSave() {}
+
+// +checklocksignore
+func (n *NoReadWriteFileInode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.InodeSimpleAttributes)
+}
+
+func (n *NoReadWriteFileInode) afterLoad() {}
+
+// +checklocksignore
+func (n *NoReadWriteFileInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.InodeSimpleAttributes)
+}
+
+func (i *InodeSimpleAttributes) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.InodeSimpleAttributes"
+}
+
+func (i *InodeSimpleAttributes) StateFields() []string {
+ return []string{
+ "fsType",
+ "unstable",
+ }
+}
+
+func (i *InodeSimpleAttributes) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeSimpleAttributes) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fsType)
+ stateSinkObject.Save(1, &i.unstable)
+}
+
+func (i *InodeSimpleAttributes) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeSimpleAttributes) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fsType)
+ stateSourceObject.Load(1, &i.unstable)
+}
+
+func (i *InodeSimpleExtendedAttributes) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.InodeSimpleExtendedAttributes"
+}
+
+func (i *InodeSimpleExtendedAttributes) StateFields() []string {
+ return []string{
+ "xattrs",
+ }
+}
+
+func (i *InodeSimpleExtendedAttributes) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeSimpleExtendedAttributes) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.xattrs)
+}
+
+func (i *InodeSimpleExtendedAttributes) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeSimpleExtendedAttributes) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.xattrs)
+}
+
+func (s *staticFile) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.staticFile"
+}
+
+func (s *staticFile) StateFields() []string {
+ return []string{
+ "FileStaticContentReader",
+ }
+}
+
+func (s *staticFile) beforeSave() {}
+
+// +checklocksignore
+func (s *staticFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.FileStaticContentReader)
+}
+
+func (s *staticFile) afterLoad() {}
+
+// +checklocksignore
+func (s *staticFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.FileStaticContentReader)
+}
+
+func (i *InodeStaticFileGetter) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.InodeStaticFileGetter"
+}
+
+func (i *InodeStaticFileGetter) StateFields() []string {
+ return []string{
+ "Contents",
+ }
+}
+
+func (i *InodeStaticFileGetter) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeStaticFileGetter) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Contents)
+}
+
+func (i *InodeStaticFileGetter) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeStaticFileGetter) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Contents)
+}
+
+func (c *CachingInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.CachingInodeOperations"
+}
+
+func (c *CachingInodeOperations) StateFields() []string {
+ return []string{
+ "backingFile",
+ "mfp",
+ "opts",
+ "attr",
+ "dirtyAttr",
+ "mappings",
+ "cache",
+ "dirty",
+ "hostFileMapper",
+ "refs",
+ }
+}
+
+func (c *CachingInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (c *CachingInodeOperations) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.backingFile)
+ stateSinkObject.Save(1, &c.mfp)
+ stateSinkObject.Save(2, &c.opts)
+ stateSinkObject.Save(3, &c.attr)
+ stateSinkObject.Save(4, &c.dirtyAttr)
+ stateSinkObject.Save(5, &c.mappings)
+ stateSinkObject.Save(6, &c.cache)
+ stateSinkObject.Save(7, &c.dirty)
+ stateSinkObject.Save(8, &c.hostFileMapper)
+ stateSinkObject.Save(9, &c.refs)
+}
+
+func (c *CachingInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (c *CachingInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.backingFile)
+ stateSourceObject.Load(1, &c.mfp)
+ stateSourceObject.Load(2, &c.opts)
+ stateSourceObject.Load(3, &c.attr)
+ stateSourceObject.Load(4, &c.dirtyAttr)
+ stateSourceObject.Load(5, &c.mappings)
+ stateSourceObject.Load(6, &c.cache)
+ stateSourceObject.Load(7, &c.dirty)
+ stateSourceObject.Load(8, &c.hostFileMapper)
+ stateSourceObject.Load(9, &c.refs)
+}
+
+func (c *CachingInodeOperationsOptions) StateTypeName() string {
+ return "pkg/sentry/fs/fsutil.CachingInodeOperationsOptions"
+}
+
+func (c *CachingInodeOperationsOptions) StateFields() []string {
+ return []string{
+ "ForcePageCache",
+ "LimitHostFDTranslation",
+ }
+}
+
+func (c *CachingInodeOperationsOptions) beforeSave() {}
+
+// +checklocksignore
+func (c *CachingInodeOperationsOptions) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.ForcePageCache)
+ stateSinkObject.Save(1, &c.LimitHostFDTranslation)
+}
+
+func (c *CachingInodeOperationsOptions) afterLoad() {}
+
+// +checklocksignore
+func (c *CachingInodeOperationsOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.ForcePageCache)
+ stateSourceObject.Load(1, &c.LimitHostFDTranslation)
+}
+
+func init() {
+ state.Register((*DirtyInfo)(nil))
+ state.Register((*StaticDirFileOperations)(nil))
+ state.Register((*NoReadWriteFile)(nil))
+ state.Register((*FileStaticContentReader)(nil))
+ state.Register((*HostFileMapper)(nil))
+ state.Register((*HostMappable)(nil))
+ state.Register((*SimpleFileInode)(nil))
+ state.Register((*NoReadWriteFileInode)(nil))
+ state.Register((*InodeSimpleAttributes)(nil))
+ state.Register((*InodeSimpleExtendedAttributes)(nil))
+ state.Register((*staticFile)(nil))
+ state.Register((*InodeStaticFileGetter)(nil))
+ state.Register((*CachingInodeOperations)(nil))
+ state.Register((*CachingInodeOperationsOptions)(nil))
+}
diff --git a/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go
new file mode 100644
index 000000000..00b0994f6
--- /dev/null
+++ b/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fsutil
diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go
deleted file mode 100644
index 25e76d9f2..000000000
--- a/pkg/sentry/fs/fsutil/inode_cached_test.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsutil
-
-import (
- "bytes"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/safemem"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-type noopBackingFile struct{}
-
-func (noopBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) {
- return dsts.NumBytes(), nil
-}
-
-func (noopBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) {
- return srcs.NumBytes(), nil
-}
-
-func (noopBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr, bool) error {
- return nil
-}
-
-func (noopBackingFile) Sync(context.Context) error {
- return nil
-}
-
-func (noopBackingFile) FD() int {
- return -1
-}
-
-func (noopBackingFile) Allocate(ctx context.Context, offset int64, length int64) error {
- return nil
-}
-
-func TestSetPermissions(t *testing.T) {
- ctx := contexttest.Context(t)
-
- uattr := fs.WithCurrentTime(ctx, fs.UnstableAttr{
- Perms: fs.FilePermsFromMode(0444),
- })
- iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{})
- defer iops.Release()
-
- perms := fs.FilePermsFromMode(0777)
- if !iops.SetPermissions(ctx, nil, perms) {
- t.Fatalf("SetPermissions failed, want success")
- }
-
- // Did permissions change?
- if iops.attr.Perms != perms {
- t.Fatalf("got perms +%v, want +%v", iops.attr.Perms, perms)
- }
-
- // Did status change time change?
- if !iops.dirtyAttr.StatusChangeTime {
- t.Fatalf("got status change time not dirty, want dirty")
- }
- if iops.attr.StatusChangeTime.Equal(uattr.StatusChangeTime) {
- t.Fatalf("got status change time unchanged")
- }
-}
-
-func TestSetTimestamps(t *testing.T) {
- ctx := contexttest.Context(t)
- for _, test := range []struct {
- desc string
- ts fs.TimeSpec
- wantChanged fs.AttrMask
- }{
- {
- desc: "noop",
- ts: fs.TimeSpec{
- ATimeOmit: true,
- MTimeOmit: true,
- },
- wantChanged: fs.AttrMask{},
- },
- {
- desc: "access time only",
- ts: fs.TimeSpec{
- ATime: ktime.NowFromContext(ctx),
- MTimeOmit: true,
- },
- wantChanged: fs.AttrMask{
- AccessTime: true,
- },
- },
- {
- desc: "modification time only",
- ts: fs.TimeSpec{
- ATimeOmit: true,
- MTime: ktime.NowFromContext(ctx),
- },
- wantChanged: fs.AttrMask{
- ModificationTime: true,
- },
- },
- {
- desc: "access and modification time",
- ts: fs.TimeSpec{
- ATime: ktime.NowFromContext(ctx),
- MTime: ktime.NowFromContext(ctx),
- },
- wantChanged: fs.AttrMask{
- AccessTime: true,
- ModificationTime: true,
- },
- },
- {
- desc: "system time access and modification time",
- ts: fs.TimeSpec{
- ATimeSetSystemTime: true,
- MTimeSetSystemTime: true,
- },
- wantChanged: fs.AttrMask{
- AccessTime: true,
- ModificationTime: true,
- },
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- ctx := contexttest.Context(t)
-
- epoch := ktime.ZeroTime
- uattr := fs.UnstableAttr{
- AccessTime: epoch,
- ModificationTime: epoch,
- StatusChangeTime: epoch,
- }
- iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{})
- defer iops.Release()
-
- if err := iops.SetTimestamps(ctx, nil, test.ts); err != nil {
- t.Fatalf("SetTimestamps got error %v, want nil", err)
- }
- if test.wantChanged.AccessTime {
- if !iops.attr.AccessTime.After(uattr.AccessTime) {
- t.Fatalf("diritied access time did not advance, want %v > %v", iops.attr.AccessTime, uattr.AccessTime)
- }
- if !iops.dirtyAttr.StatusChangeTime {
- t.Fatalf("dirty access time requires dirty status change time")
- }
- if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) {
- t.Fatalf("dirtied status change time did not advance")
- }
- }
- if test.wantChanged.ModificationTime {
- if !iops.attr.ModificationTime.After(uattr.ModificationTime) {
- t.Fatalf("diritied modification time did not advance")
- }
- if !iops.dirtyAttr.StatusChangeTime {
- t.Fatalf("dirty modification time requires dirty status change time")
- }
- if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) {
- t.Fatalf("dirtied status change time did not advance")
- }
- }
- })
- }
-}
-
-func TestTruncate(t *testing.T) {
- ctx := contexttest.Context(t)
-
- uattr := fs.UnstableAttr{
- Size: 0,
- }
- iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{})
- defer iops.Release()
-
- if err := iops.Truncate(ctx, nil, uattr.Size); err != nil {
- t.Fatalf("Truncate got error %v, want nil", err)
- }
- var size int64 = 4096
- if err := iops.Truncate(ctx, nil, size); err != nil {
- t.Fatalf("Truncate got error %v, want nil", err)
- }
- if iops.attr.Size != size {
- t.Fatalf("Truncate got %d, want %d", iops.attr.Size, size)
- }
- if !iops.dirtyAttr.ModificationTime || !iops.dirtyAttr.StatusChangeTime {
- t.Fatalf("Truncate did not dirty modification and status change time")
- }
- if !iops.attr.ModificationTime.After(uattr.ModificationTime) {
- t.Fatalf("dirtied modification time did not change")
- }
- if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) {
- t.Fatalf("dirtied status change time did not change")
- }
-}
-
-type sliceBackingFile struct {
- data []byte
-}
-
-func newSliceBackingFile(data []byte) *sliceBackingFile {
- return &sliceBackingFile{data}
-}
-
-func (f *sliceBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) {
- r := safemem.BlockSeqReader{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)}
- return r.ReadToBlocks(dsts)
-}
-
-func (f *sliceBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) {
- w := safemem.BlockSeqWriter{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)}
- return w.WriteFromBlocks(srcs)
-}
-
-func (*sliceBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr, bool) error {
- return nil
-}
-
-func (*sliceBackingFile) Sync(context.Context) error {
- return nil
-}
-
-func (*sliceBackingFile) FD() int {
- return -1
-}
-
-func (f *sliceBackingFile) Allocate(ctx context.Context, offset int64, length int64) error {
- return linuxerr.EOPNOTSUPP
-}
-
-type noopMappingSpace struct{}
-
-// Invalidate implements memmap.MappingSpace.Invalidate.
-func (noopMappingSpace) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
-}
-
-func anonInode(ctx context.Context) *fs.Inode {
- return fs.NewInode(ctx, &SimpleFileInode{
- InodeSimpleAttributes: NewInodeSimpleAttributes(ctx, fs.FileOwnerFromContext(ctx), fs.FilePermissions{
- User: fs.PermMask{Read: true, Write: true},
- }, 0),
- }, fs.NewPseudoMountSource(ctx), fs.StableAttr{
- Type: fs.Anonymous,
- BlockSize: hostarch.PageSize,
- })
-}
-
-func pagesOf(bs ...byte) []byte {
- buf := make([]byte, 0, len(bs)*hostarch.PageSize)
- for _, b := range bs {
- buf = append(buf, bytes.Repeat([]byte{b}, hostarch.PageSize)...)
- }
- return buf
-}
-
-func TestRead(t *testing.T) {
- ctx := contexttest.Context(t)
-
- // Construct a 3-page file.
- buf := pagesOf('a', 'b', 'c')
- file := fs.NewFile(ctx, fs.NewDirent(ctx, anonInode(ctx), "anon"), fs.FileFlags{}, nil)
- uattr := fs.UnstableAttr{
- Size: int64(len(buf)),
- }
- iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, CachingInodeOperationsOptions{})
- defer iops.Release()
-
- // Expect the cache to be initially empty.
- if cached := iops.cache.Span(); cached != 0 {
- t.Errorf("Span got %d, want 0", cached)
- }
-
- // Create a memory mapping of the second page (as CachingInodeOperations
- // expects to only cache mapped pages), then call Translate to force it to
- // be cached.
- var ms noopMappingSpace
- ar := hostarch.AddrRange{hostarch.PageSize, 2 * hostarch.PageSize}
- if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil {
- t.Fatalf("AddMapping got %v, want nil", err)
- }
- mr := memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize}
- if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil {
- t.Fatalf("Translate got %v, want nil", err)
- }
- if cached := iops.cache.Span(); cached != hostarch.PageSize {
- t.Errorf("SpanRange got %d, want %d", cached, hostarch.PageSize)
- }
-
- // Try to read 4 pages. The first and third pages should be read directly
- // from the "file", the second page should be read from the cache, and only
- // 3 pages (the size of the file) should be readable.
- rbuf := make([]byte, 4*hostarch.PageSize)
- dst := usermem.BytesIOSequence(rbuf)
- n, err := iops.Read(ctx, file, dst, 0)
- if n != 3*hostarch.PageSize || (err != nil && err != io.EOF) {
- t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*hostarch.PageSize)
- }
- rbuf = rbuf[:3*hostarch.PageSize]
-
- // Did we get the bytes we expect?
- if !bytes.Equal(rbuf, buf) {
- t.Errorf("Read back bytes %v, want %v", rbuf, buf)
- }
-
- // Delete the memory mapping before iops.Release(). The cached page will
- // either be evicted by ctx's pgalloc.MemoryFile, or dropped by
- // iops.Release().
- iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true)
-}
-
-func TestWrite(t *testing.T) {
- ctx := contexttest.Context(t)
-
- // Construct a 4-page file.
- buf := pagesOf('a', 'b', 'c', 'd')
- orig := append([]byte(nil), buf...)
- inode := anonInode(ctx)
- uattr := fs.UnstableAttr{
- Size: int64(len(buf)),
- }
- iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, CachingInodeOperationsOptions{})
- defer iops.Release()
-
- // Expect the cache to be initially empty.
- if cached := iops.cache.Span(); cached != 0 {
- t.Errorf("Span got %d, want 0", cached)
- }
-
- // Create a memory mapping of the second and third pages (as
- // CachingInodeOperations expects to only cache mapped pages), then call
- // Translate to force them to be cached.
- var ms noopMappingSpace
- ar := hostarch.AddrRange{hostarch.PageSize, 3 * hostarch.PageSize}
- if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil {
- t.Fatalf("AddMapping got %v, want nil", err)
- }
- defer iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true)
- mr := memmap.MappableRange{hostarch.PageSize, 3 * hostarch.PageSize}
- if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil {
- t.Fatalf("Translate got %v, want nil", err)
- }
- if cached := iops.cache.Span(); cached != 2*hostarch.PageSize {
- t.Errorf("SpanRange got %d, want %d", cached, 2*hostarch.PageSize)
- }
-
- // Write to the first 2 pages.
- wbuf := pagesOf('e', 'f')
- src := usermem.BytesIOSequence(wbuf)
- n, err := iops.Write(ctx, src, 0)
- if n != 2*hostarch.PageSize || err != nil {
- t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*hostarch.PageSize)
- }
-
- // The first page should have been written directly, since it was not cached.
- want := append([]byte(nil), orig...)
- copy(want, pagesOf('e'))
- if !bytes.Equal(buf, want) {
- t.Errorf("File contents are %v, want %v", buf, want)
- }
-
- // Sync back to the "backing file".
- if err := iops.WriteOut(ctx, inode); err != nil {
- t.Errorf("Sync got %v, want nil", err)
- }
-
- // Now the second page should have been written as well.
- copy(want[hostarch.PageSize:], pagesOf('f'))
- if !bytes.Equal(buf, want) {
- t.Errorf("File contents are %v, want %v", buf, want)
- }
-}
diff --git a/pkg/sentry/fs/g3doc/.gitignore b/pkg/sentry/fs/g3doc/.gitignore
deleted file mode 100644
index 2d19fc766..000000000
--- a/pkg/sentry/fs/g3doc/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.html
diff --git a/pkg/sentry/fs/g3doc/fuse.md b/pkg/sentry/fs/g3doc/fuse.md
deleted file mode 100644
index 05e043583..000000000
--- a/pkg/sentry/fs/g3doc/fuse.md
+++ /dev/null
@@ -1,360 +0,0 @@
-# Foreword
-
-This document describes an on-going project to support FUSE filesystems within
-the sentry. This is intended to become the final documentation for this
-subsystem, and is therefore written in the past tense. However FUSE support is
-currently incomplete and the document will be updated as things progress.
-
-# FUSE: Filesystem in Userspace
-
-The sentry supports dispatching filesystem operations to a FUSE server, allowing
-FUSE filesystem to be used with a sandbox.
-
-## Overview
-
-FUSE has two main components:
-
-1. A client kernel driver (canonically `fuse.ko` in Linux), which forwards
- filesystem operations (usually initiated by syscalls) to the server.
-
-2. A server, which is a userspace daemon that implements the actual filesystem.
-
-The sentry implements the client component, which allows a server daemon running
-within the sandbox to implement a filesystem within the sandbox.
-
-A FUSE filesystem is initialized with `mount(2)`, typically with the help of a
-utility like `fusermount(1)`. Various mount options exist for establishing
-ownership and access permissions on the filesystem, but the most important mount
-option is a file descriptor used to establish communication between the client
-and server.
-
-The FUSE device FD is obtained by opening `/dev/fuse`. During regular operation,
-the client and server use the FUSE protocol described in `fuse(4)` to service
-filesystem operations. See the "Protocol" section below for more information
-about this protocol. The core of the sentry support for FUSE is the client-side
-implementation of this protocol.
-
-## FUSE in the Sentry
-
-The sentry's FUSE client targets VFS2 and has the following components:
-
-- An implementation of `/dev/fuse`.
-
-- A VFS2 filesystem for mapping syscalls to FUSE ops. Since we're targeting
- VFS2, one point of contention may be the lack of inodes in VFS2. We can
- tentatively implement a kernfs-based filesystem to bridge the gap in APIs.
- The kernfs base functionality can serve the role of the Linux inode cache
- and, the filesystem can map VFS2 syscalls to kernfs inode operations; see
- the `kernfs.Inode` interface.
-
-The FUSE protocol lends itself well to marshaling with `go_marshal`. The various
-request and response packets can be defined in the ABI package and converted to
-and from the wire format using `go_marshal`.
-
-### Design Goals
-
-- While filesystem performance is always important, the sentry's FUSE support
- is primarily concerned with compatibility, with performance as a secondary
- concern.
-
-- Avoiding deadlocks from a hung server daemon.
-
-- Consider the potential for denial of service from a malicious server daemon.
- Protecting itself from userspace is already a design goal for the sentry,
- but needs additional consideration for FUSE. Normally, an operating system
- doesn't rely on userspace to make progress with filesystem operations. Since
- this changes with FUSE, it opens up the possibility of creating a chain of
- dependencies controlled by userspace, which could affect an entire sandbox.
- For example: a FUSE op can block a syscall, which could be holding a
- subsystem lock, which can then block another task goroutine.
-
-### Milestones
-
-Below are some broad goals to aim for while implementing FUSE in the sentry.
-Many FUSE ops can be grouped into broad categories of functionality, and most
-ops can be implemented in parallel.
-
-#### Minimal client that can mount a trivial FUSE filesystem.
-
-- Implement `/dev/fuse` - a character device used to establish an FD for
- communication between the sentry and the server daemon.
-
-- Implement basic FUSE ops like `FUSE_INIT`.
-
-#### Read-only mount with basic file operations
-
-- Implement the majority of file, directory and file descriptor FUSE ops. For
- this milestone, we can skip uncommon or complex operations like mmap, mknod,
- file locking, poll, and extended attributes. We can stub these out along
- with any ops that modify the filesystem. The exact list of required ops are
- to be determined, but the goal is to mount a real filesystem as read-only,
- and be able to read contents from the filesystem in the sentry.
-
-#### Full read-write support
-
-- Implement the remaining FUSE ops and decide if we can omit rarely used
- operations like ioctl.
-
-### Design Details
-
-#### Lifecycle for a FUSE Request
-
-- User invokes a syscall
-- Sentry prepares corresponding request
- - If FUSE device is available
- - Write the request in binary
- - If FUSE device is full
- - Kernel task blocked until available
-- Sentry notifies the readers of fuse device that it's ready for read
-- FUSE daemon reads the request and processes it
-- Sentry waits until a reply is written to the FUSE device
- - but returns directly for async requests
-- FUSE daemon writes to the fuse device
-- Sentry processes the reply
- - For sync requests, unblock blocked kernel task
- - For async requests, execute pre-specified callback if any
-- Sentry returns the syscall to the user
-
-#### Channels and Queues for Requests in Different Stages
-
-`connection.initializedChan`
-
-- a channel that the requests issued before connection initialization blocks
- on.
-
-`fd.queue`
-
-- a queue of requests that haven’t been read by the FUSE daemon yet.
-
-`fd.completions`
-
-- a map of the requests that have been prepared but not yet received a
- response, including the ones on the `fd.queue`.
-
-`fd.waitQueue`
-
-- a queue of waiters that is waiting for the fuse device fd to be available,
- such as the FUSE daemon.
-
-`fd.fullQueueCh`
-
-- a channel that the kernel task will be blocked on when the fd is not
- available.
-
-#### Basic I/O Implementation
-
-Currently we have implemented basic functionalities of read and write for our
-FUSE. We describe the design and ways to improve it here:
-
-##### Basic FUSE Read
-
-The vfs2 expects implementations of `vfs.FileDescriptionImpl.Read()` and
-`vfs.FileDescriptionImpl.PRead()`. When a syscall is made, it will eventually
-reach our implementation of those interface functions located at
-`pkg/sentry/fsimpl/fuse/regular_file.go` for regular files.
-
-After validation checks of the input, sentry sends `FUSE_READ` requests to the
-FUSE daemon. The FUSE daemon returns data after the `fuse_out_header` as the
-responses. For the first version, we create a copy in kernel memory of those
-data. They are represented as a byte slice in the marshalled struct. This
-happens as a common process for all the FUSE responses at this moment at
-`pkg/sentry/fsimpl/fuse/dev.go:writeLocked()`. We then directly copy from this
-intermediate buffer to the input buffer provided by the read syscall.
-
-There is an extra requirement for FUSE: When mounting the FUSE fs, the mounter
-or the FUSE daemon can specify a `max_read` or a `max_pages` parameter. They are
-the upperbound of the bytes to read in each `FUSE_READ` request. We implemented
-the code to handle the fragmented reads.
-
-To improve the performance: ideally we should have buffer cache to copy those
-data from the responses of FUSE daemon into, as is also the design of several
-other existing file system implementations for sentry, instead of a single-use
-temporary buffer. Directly mapping the memory of one process to another could
-also boost the performance, but to keep them isolated, we did not choose to do
-so.
-
-##### Basic FUSE Write
-
-The vfs2 invokes implementations of `vfs.FileDescriptionImpl.Write()` and
-`vfs.FileDescriptionImpl.PWrite()` on the regular file descriptor of FUSE when a
-user makes write(2) and pwrite(2) syscall.
-
-For valid writes, sentry sends the bytes to write after a `FUSE_WRITE` header
-(can be regarded as a request with 2 payloads) to the FUSE daemon. For the first
-version, we allocate a buffer inside kernel memory to store the bytes from the
-user, and copy directly from that buffer to the memory of FUSE daemon. This
-happens at `pkg/sentry/fsimpl/fuse/dev.go:readLocked()`
-
-The parameters `max_write` and `max_pages` restrict the number of bytes in one
-`FUSE_WRITE`. There are code handling fragmented writes in current
-implementation.
-
-To have better performance: the extra copy created to store the bytes to write
-can be replaced by the buffer cache as well.
-
-# Appendix
-
-## FUSE Protocol
-
-The FUSE protocol is a request-response protocol. All requests are initiated by
-the client. The wire-format for the protocol is raw C structs serialized to
-memory.
-
-All FUSE requests begin with the following request header:
-
-```c
-struct fuse_in_header {
- uint32_t len; // Length of the request, including this header.
- uint32_t opcode; // Requested operation.
- uint64_t unique; // A unique identifier for this request.
- uint64_t nodeid; // ID of the filesystem object being operated on.
- uint32_t uid; // UID of the requesting process.
- uint32_t gid; // GID of the requesting process.
- uint32_t pid; // PID of the requesting process.
- uint32_t padding;
-};
-```
-
-The request is then followed by a payload specific to the `opcode`.
-
-All responses begin with this response header:
-
-```c
-struct fuse_out_header {
- uint32_t len; // Length of the response, including this header.
- int32_t error; // Status of the request, 0 if success.
- uint64_t unique; // The unique identifier from the corresponding request.
-};
-```
-
-The response payload also depends on the request `opcode`. If `error != 0`, the
-response payload must be empty.
-
-### Operations
-
-The following is a list of all FUSE operations used in `fuse_in_header.opcode`
-as of Linux v4.4, and a brief description of their purpose. These are defined in
-`uapi/linux/fuse.h`. Many of these have a corresponding request and response
-payload struct; `fuse(4)` has details for some of these. We also note how these
-operations map to the sentry virtual filesystem.
-
-#### FUSE meta-operations
-
-These operations are specific to FUSE and don't have a corresponding action in a
-generic filesystem.
-
-- `FUSE_INIT`: This operation initializes a new FUSE filesystem, and is the
- first message sent by the client after mount. This is used for version and
- feature negotiation. This is related to `mount(2)`.
-- `FUSE_DESTROY`: Teardown a FUSE filesystem, related to `unmount(2)`.
-- `FUSE_INTERRUPT`: Interrupts an in-flight operation, specified by the
- `fuse_in_header.unique` value provided in the corresponding request header.
- The client can send at most one of these per request, and will enter an
- uninterruptible wait for a reply. The server is expected to reply promptly.
-- `FUSE_FORGET`: A hint to the server that server should evict the indicate
- node from any caches. This is wired up to `(struct
- super_operations).evict_inode` in Linux, which is in turned hooked as the
- inode cache shrinker which is typically triggered by system memory pressure.
-- `FUSE_BATCH_FORGET`: Batch version of `FUSE_FORGET`.
-
-#### Filesystem Syscalls
-
-These FUSE ops map directly to an equivalent filesystem syscall, or family of
-syscalls. The relevant syscalls have a similar name to the operation, unless
-otherwise noted.
-
-Node creation:
-
-- `FUSE_MKNOD`
-- `FUSE_MKDIR`
-- `FUSE_CREATE`: This is equivalent to `open(2)` and `creat(2)`, which
- atomically creates and opens a node.
-
-Node attributes and extended attributes:
-
-- `FUSE_GETATTR`
-- `FUSE_SETATTR`
-- `FUSE_SETXATTR`
-- `FUSE_GETXATTR`
-- `FUSE_LISTXATTR`
-- `FUSE_REMOVEXATTR`
-
-Node link manipulation:
-
-- `FUSE_READLINK`
-- `FUSE_LINK`
-- `FUSE_SYMLINK`
-- `FUSE_UNLINK`
-
-Directory operations:
-
-- `FUSE_RMDIR`
-- `FUSE_RENAME`
-- `FUSE_RENAME2`
-- `FUSE_OPENDIR`: `open(2)` for directories.
-- `FUSE_RELEASEDIR`: `close(2)` for directories.
-- `FUSE_READDIR`
-- `FUSE_READDIRPLUS`
-- `FUSE_FSYNCDIR`: `fsync(2)` for directories.
-- `FUSE_LOOKUP`: Establishes a unique identifier for a FS node. This is
- reminiscent of `VirtualFilesystem.GetDentryAt` in that it resolves a path
- component to a node. However the returned identifier is opaque to the
- client. The server must remember this mapping, as this is how the client
- will reference the node in the future.
-
-File operations:
-
-- `FUSE_OPEN`: `open(2)` for files.
-- `FUSE_RELEASE`: `close(2)` for files.
-- `FUSE_FSYNC`
-- `FUSE_FALLOCATE`
-- `FUSE_SETUPMAPPING`: Creates a memory map on a file for `mmap(2)`.
-- `FUSE_REMOVEMAPPING`: Removes a memory map for `munmap(2)`.
-
-File locking:
-
-- `FUSE_GETLK`
-- `FUSE_SETLK`
-- `FUSE_SETLKW`
-- `FUSE_COPY_FILE_RANGE`
-
-File descriptor operations:
-
-- `FUSE_IOCTL`
-- `FUSE_POLL`
-- `FUSE_LSEEK`
-
-Filesystem operations:
-
-- `FUSE_STATFS`
-
-#### Permissions
-
-- `FUSE_ACCESS` is used to check if a node is accessible, as part of many
- syscall implementations. Maps to `vfs.FilesystemImpl.AccessAt` in the
- sentry.
-
-#### I/O Operations
-
-These ops are used to read and write file pages. They're used to implement both
-I/O syscalls like `read(2)`, `write(2)` and `mmap(2)`.
-
-- `FUSE_READ`
-- `FUSE_WRITE`
-
-#### Miscellaneous
-
-- `FUSE_FLUSH`: Used by the client to indicate when a file descriptor is
- closed. Distinct from `FUSE_FSYNC`, which corresponds to an `fsync(2)`
- syscall from the user. Maps to `vfs.FileDescriptorImpl.Release` in the
- sentry.
-- `FUSE_BMAP`: Old address space API for block defrag. Probably not needed.
-- `FUSE_NOTIFY_REPLY`: [TODO: what does this do?]
-
-# References
-
-- [fuse(4) Linux manual page](https://www.man7.org/linux/man-pages/man4/fuse.4.html)
-- [Linux kernel FUSE documentation](https://www.kernel.org/doc/html/latest/filesystems/fuse.html)
-- [The reference implementation of the Linux FUSE (Filesystem in Userspace)
- interface](https://github.com/libfuse/libfuse)
-- [The kernel interface of FUSE](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fuse.h)
diff --git a/pkg/sentry/fs/g3doc/inotify.md b/pkg/sentry/fs/g3doc/inotify.md
deleted file mode 100644
index 85063d4e6..000000000
--- a/pkg/sentry/fs/g3doc/inotify.md
+++ /dev/null
@@ -1,122 +0,0 @@
-# Inotify
-
-Inotify implements the like-named filesystem event notification system for the
-sentry, see `inotify(7)`.
-
-## Architecture
-
-For the most part, the sentry implementation of inotify mirrors the Linux
-architecture. Inotify instances (i.e. the fd returned by inotify_init(2)) are
-backed by a pseudo-filesystem. Events are generated from various places in the
-sentry, including the [syscall layer][syscall_dir], the [vfs layer][dirent] and
-the [process fd table][fd_table]. Watches are stored in inodes and generated
-events are queued to the inotify instance owning the watches for delivery to the
-user.
-
-## Objects
-
-Here is a brief description of the existing and new objects involved in the
-sentry inotify mechanism, and how they interact:
-
-### [`fs.Inotify`][inotify]
-
-- An inotify instances, created by inotify_init(2)/inotify_init1(2).
-- The inotify fd has a `fs.Dirent`, supports filesystem syscalls to read
- events.
-- Has multiple `fs.Watch`es, with at most one watch per target inode, per
- inotify instance.
-- Has an instance `id` which is globally unique. This is *not* the fd number
- for this instance, since the fd can be duped. This `id` is not externally
- visible.
-
-### [`fs.Watch`][watch]
-
-- An inotify watch, created/deleted by
- inotify_add_watch(2)/inotify_rm_watch(2).
-- Owned by an `fs.Inotify` instance, each watch keeps a pointer to the
- `owner`.
-- Associated with a single `fs.Inode`, which is the watch `target`. While the
- watch is active, it indirectly pins `target` to memory. See the "Reference
- Model" section for a detailed explanation.
-- Filesystem operations on `target` generate `fs.Event`s.
-
-### [`fs.Event`][event]
-
-- A simple struct encapsulating all the fields for an inotify event.
-- Generated by `fs.Watch`es and forwarded to the watches' `owner`s.
-- Serialized to the user during read(2) syscalls on the associated
- `fs.Inotify`'s fd.
-
-### [`fs.Dirent`][dirent]
-
-- Many inotify events are generated inside dirent methods. Events are
- generated in the dirent methods rather than `fs.Inode` methods because some
- events carry the name of the subject node, and node names are generally
- unavailable in an `fs.Inode`.
-- Dirents do not directly contain state for any watches. Instead, they forward
- notifications to the underlying `fs.Inode`.
-
-### [`fs.Inode`][inode]
-
-- Interacts with inotify through `fs.Watch`es.
-- Inodes contain a map of all active `fs.Watch`es on them.
-- An `fs.Inotify` instance can have at most one `fs.Watch` per inode.
- `fs.Watch`es on an inode are indexed by their `owner`'s `id`.
-- All inotify logic is encapsulated in the [`Watches`][inode_watches] struct
- in an inode. Logically, `Watches` is the set of inotify watches on the
- inode.
-
-## Reference Model
-
-The sentry inotify implementation has a complex reference model. An inotify
-watch observes a single inode. For efficient lookup, the state for a watch is
-stored directly on the target inode. This state needs to be persistent for the
-lifetime of watch. Unlike usual filesystem metadata, the watch state has no
-"on-disk" representation, so they cannot be reconstructed by the filesystem if
-the inode is flushed from memory. This effectively means we need to keep any
-inodes with actives watches pinned to memory.
-
-We can't just hold an extra ref on the inode to pin it to memory because some
-filesystems (such as gofer-based filesystems) don't have persistent inodes. In
-such a filesystem, if we just pin the inode, nothing prevents the enclosing
-dirent from being GCed. Once the dirent is GCed, the pinned inode is
-unreachable -- these filesystems generate a new inode by re-reading the node
-state on the next walk. Incidentally, hardlinks also don't work on these
-filesystems for this reason.
-
-To prevent the above scenario, when a new watch is added on an inode, we *pin*
-the dirent we used to reach the inode. Note that due to hardlinks, this dirent
-may not be the only dirent pointing to the inode. Attempting to set an inotify
-watch via multiple hardlinks to the same file results in the same watch being
-returned for both links. However, for each new dirent we use to reach the same
-inode, we add a new pin. We need a new pin for each new dirent used to reach the
-inode because we have no guarantees about the deletion order of the different
-links to the inode.
-
-## Lock Ordering
-
-There are 4 locks related to the inotify implementation:
-
-- `Inotify.mu`: the inotify instance lock.
-- `Inotify.evMu`: the inotify event queue lock.
-- `Watch.mu`: the watch lock, used to protect pins.
-- `fs.Watches.mu`: the inode watch set mu, used to protect the collection of
- watches on the inode.
-
-The correct lock ordering for inotify code is:
-
-`Inotify.mu` -> `fs.Watches.mu` -> `Watch.mu` -> `Inotify.evMu`.
-
-We need a distinct lock for the event queue because by the time a goroutine
-attempts to queue a new event, it is already holding `fs.Watches.mu`. If we used
-`Inotify.mu` to also protect the event queue, this would violate the above lock
-ordering.
-
-[dirent]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/dirent.go
-[event]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify_event.go
-[fd_table]: https://github.com/google/gvisor/blob/master/pkg/sentry/kernel/fd_table.go
-[inode]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inode.go
-[inode_watches]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inode_inotify.go
-[inotify]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify.go
-[syscall_dir]: https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/linux/
-[watch]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify_watch.go
diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD
deleted file mode 100644
index c08301d19..000000000
--- a/pkg/sentry/fs/gofer/BUILD
+++ /dev/null
@@ -1,72 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "gofer",
- srcs = [
- "attr.go",
- "cache_policy.go",
- "context_file.go",
- "device.go",
- "fifo.go",
- "file.go",
- "file_state.go",
- "fs.go",
- "handles.go",
- "inode.go",
- "inode_state.go",
- "path.go",
- "session.go",
- "session_state.go",
- "socket.go",
- "util.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/metric",
- "//pkg/p9",
- "//pkg/refs",
- "//pkg/safemem",
- "//pkg/secio",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fdpipe",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/host",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "gofer_test",
- size = "small",
- srcs = ["gofer_test.go"],
- library = ":gofer",
- deps = [
- "//pkg/context",
- "//pkg/p9",
- "//pkg/p9/p9test",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fs/gofer/gofer_state_autogen.go b/pkg/sentry/fs/gofer/gofer_state_autogen.go
new file mode 100644
index 000000000..d22ecc3c2
--- /dev/null
+++ b/pkg/sentry/fs/gofer/gofer_state_autogen.go
@@ -0,0 +1,271 @@
+// automatically generated by stateify.
+
+package gofer
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *fifo) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.fifo"
+}
+
+func (i *fifo) StateFields() []string {
+ return []string{
+ "InodeOperations",
+ "fileIops",
+ }
+}
+
+func (i *fifo) beforeSave() {}
+
+// +checklocksignore
+func (i *fifo) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeOperations)
+ stateSinkObject.Save(1, &i.fileIops)
+}
+
+func (i *fifo) afterLoad() {}
+
+// +checklocksignore
+func (i *fifo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeOperations)
+ stateSourceObject.Load(1, &i.fileIops)
+}
+
+func (f *fileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.fileOperations"
+}
+
+func (f *fileOperations) StateFields() []string {
+ return []string{
+ "inodeOperations",
+ "dirCursor",
+ "flags",
+ }
+}
+
+func (f *fileOperations) beforeSave() {}
+
+// +checklocksignore
+func (f *fileOperations) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.inodeOperations)
+ stateSinkObject.Save(1, &f.dirCursor)
+ stateSinkObject.Save(2, &f.flags)
+}
+
+// +checklocksignore
+func (f *fileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &f.inodeOperations)
+ stateSourceObject.Load(1, &f.dirCursor)
+ stateSourceObject.LoadWait(2, &f.flags)
+ stateSourceObject.AfterLoad(f.afterLoad)
+}
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *inodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.inodeOperations"
+}
+
+func (i *inodeOperations) StateFields() []string {
+ return []string{
+ "fileState",
+ "cachingInodeOps",
+ }
+}
+
+func (i *inodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fileState)
+ stateSinkObject.Save(1, &i.cachingInodeOps)
+}
+
+func (i *inodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &i.fileState)
+ stateSourceObject.Load(1, &i.cachingInodeOps)
+}
+
+func (i *inodeFileState) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.inodeFileState"
+}
+
+func (i *inodeFileState) StateFields() []string {
+ return []string{
+ "s",
+ "sattr",
+ "loading",
+ "savedUAttr",
+ "hostMappable",
+ }
+}
+
+// +checklocksignore
+func (i *inodeFileState) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ var loadingValue struct{} = i.saveLoading()
+ stateSinkObject.SaveValue(2, loadingValue)
+ stateSinkObject.Save(0, &i.s)
+ stateSinkObject.Save(1, &i.sattr)
+ stateSinkObject.Save(3, &i.savedUAttr)
+ stateSinkObject.Save(4, &i.hostMappable)
+}
+
+// +checklocksignore
+func (i *inodeFileState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &i.s)
+ stateSourceObject.LoadWait(1, &i.sattr)
+ stateSourceObject.Load(3, &i.savedUAttr)
+ stateSourceObject.Load(4, &i.hostMappable)
+ stateSourceObject.LoadValue(2, new(struct{}), func(y interface{}) { i.loadLoading(y.(struct{})) })
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (l *overrideInfo) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.overrideInfo"
+}
+
+func (l *overrideInfo) StateFields() []string {
+ return []string{
+ "dirent",
+ "endpoint",
+ "inode",
+ }
+}
+
+func (l *overrideInfo) beforeSave() {}
+
+// +checklocksignore
+func (l *overrideInfo) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.dirent)
+ stateSinkObject.Save(1, &l.endpoint)
+ stateSinkObject.Save(2, &l.inode)
+}
+
+func (l *overrideInfo) afterLoad() {}
+
+// +checklocksignore
+func (l *overrideInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.dirent)
+ stateSourceObject.Load(1, &l.endpoint)
+ stateSourceObject.Load(2, &l.inode)
+}
+
+func (e *overrideMaps) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.overrideMaps"
+}
+
+func (e *overrideMaps) StateFields() []string {
+ return []string{
+ "pathMap",
+ }
+}
+
+func (e *overrideMaps) beforeSave() {}
+
+// +checklocksignore
+func (e *overrideMaps) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.pathMap)
+}
+
+func (e *overrideMaps) afterLoad() {}
+
+// +checklocksignore
+func (e *overrideMaps) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.pathMap)
+}
+
+func (s *session) StateTypeName() string {
+ return "pkg/sentry/fs/gofer.session"
+}
+
+func (s *session) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "msize",
+ "version",
+ "cachePolicy",
+ "aname",
+ "superBlockFlags",
+ "limitHostFDTranslation",
+ "overlayfsStaleRead",
+ "connID",
+ "inodeMappings",
+ "mounter",
+ "overrides",
+ }
+}
+
+// +checklocksignore
+func (s *session) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.AtomicRefCount)
+ stateSinkObject.Save(1, &s.msize)
+ stateSinkObject.Save(2, &s.version)
+ stateSinkObject.Save(3, &s.cachePolicy)
+ stateSinkObject.Save(4, &s.aname)
+ stateSinkObject.Save(5, &s.superBlockFlags)
+ stateSinkObject.Save(6, &s.limitHostFDTranslation)
+ stateSinkObject.Save(7, &s.overlayfsStaleRead)
+ stateSinkObject.Save(8, &s.connID)
+ stateSinkObject.Save(9, &s.inodeMappings)
+ stateSinkObject.Save(10, &s.mounter)
+ stateSinkObject.Save(11, &s.overrides)
+}
+
+// +checklocksignore
+func (s *session) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.AtomicRefCount)
+ stateSourceObject.LoadWait(1, &s.msize)
+ stateSourceObject.LoadWait(2, &s.version)
+ stateSourceObject.LoadWait(3, &s.cachePolicy)
+ stateSourceObject.LoadWait(4, &s.aname)
+ stateSourceObject.LoadWait(5, &s.superBlockFlags)
+ stateSourceObject.Load(6, &s.limitHostFDTranslation)
+ stateSourceObject.Load(7, &s.overlayfsStaleRead)
+ stateSourceObject.LoadWait(8, &s.connID)
+ stateSourceObject.LoadWait(9, &s.inodeMappings)
+ stateSourceObject.LoadWait(10, &s.mounter)
+ stateSourceObject.LoadWait(11, &s.overrides)
+ stateSourceObject.AfterLoad(s.afterLoad)
+}
+
+func init() {
+ state.Register((*fifo)(nil))
+ state.Register((*fileOperations)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inodeOperations)(nil))
+ state.Register((*inodeFileState)(nil))
+ state.Register((*overrideInfo)(nil))
+ state.Register((*overrideMaps)(nil))
+ state.Register((*session)(nil))
+}
diff --git a/pkg/sentry/fs/gofer/gofer_test.go b/pkg/sentry/fs/gofer/gofer_test.go
deleted file mode 100644
index 546ee7d04..000000000
--- a/pkg/sentry/fs/gofer/gofer_test.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gofer
-
-import (
- "fmt"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/p9/p9test"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-// rootTest runs a test with a p9 mock and an fs.InodeOperations created from
-// the attached root directory. The root file will be closed and client
-// disconnected, but additional files must be closed manually.
-func rootTest(t *testing.T, name string, cp cachePolicy, fn func(context.Context, *p9test.Harness, *p9test.Mock, *fs.Inode)) {
- t.Run(name, func(t *testing.T) {
- h, c := p9test.NewHarness(t)
- defer h.Finish()
-
- // Create a new root. Note that we pass an empty, but non-nil
- // map here. This allows tests to extend the root children
- // dynamically.
- root := h.NewDirectory(map[string]p9test.Generator{})(nil)
-
- // Return this as the root.
- h.Attacher.EXPECT().Attach().Return(root, nil).Times(1)
-
- // ... and open via the client.
- rootFile, err := c.Attach("/")
- if err != nil {
- t.Fatalf("unable to attach: %v", err)
- }
- defer rootFile.Close()
-
- // Wrap an a session.
- s := &session{
- mounter: fs.RootOwner,
- cachePolicy: cp,
- client: c,
- }
-
- // ... and an INode, with only the mode being explicitly valid for now.
- ctx := contexttest.Context(t)
- sattr, rootInodeOperations := newInodeOperations(ctx, s, contextFile{
- file: rootFile,
- }, root.QID, p9.AttrMaskAll(), root.Attr)
- m := fs.NewMountSource(ctx, s, &filesystem{}, fs.MountSourceFlags{})
- rootInode := fs.NewInode(ctx, rootInodeOperations, m, sattr)
-
- // Ensure that the cache is fully invalidated, so that any
- // close actions actually take place before the full harness is
- // torn down.
- defer func() {
- m.FlushDirentRefs()
-
- // Wait for all resources to be released, otherwise the
- // operations may fail after we close the rootFile.
- fs.AsyncBarrier()
- }()
-
- // Execute the test.
- fn(ctx, h, root, rootInode)
- })
-}
-
-func TestLookup(t *testing.T) {
- type lookupTest struct {
- // Name of the test.
- name string
-
- // Expected return value.
- want error
- }
-
- tests := []lookupTest{
- {
- name: "mock Walk passes (function succeeds)",
- want: nil,
- },
- {
- name: "mock Walk fails (function fails)",
- want: unix.ENOENT,
- },
- }
-
- const file = "file" // The walked target file.
-
- for _, test := range tests {
- rootTest(t, test.name, cacheNone, func(ctx context.Context, h *p9test.Harness, rootFile *p9test.Mock, rootInode *fs.Inode) {
- // Setup the appropriate result.
- rootFile.WalkCallback = func() error {
- return test.want
- }
- if test.want == nil {
- // Set the contents of the root. We expect a
- // normal file generator for ppp above. This is
- // overriden by setting WalkErr in the mock.
- rootFile.AddChild(file, h.NewFile())
- }
-
- // Call function.
- dirent, err := rootInode.Lookup(ctx, file)
-
- // Unwrap the InodeOperations.
- var newInodeOperations fs.InodeOperations
- if dirent != nil {
- if dirent.IsNegative() {
- err = unix.ENOENT
- } else {
- newInodeOperations = dirent.Inode.InodeOperations
- }
- }
-
- // Check return values.
- if err != test.want {
- t.Errorf("Lookup got err %v, want %v", err, test.want)
- }
- if err == nil && newInodeOperations == nil {
- t.Errorf("Lookup got non-nil err and non-nil node, wanted at least one non-nil")
- }
- })
- }
-}
-
-func TestRevalidation(t *testing.T) {
- type revalidationTest struct {
- cachePolicy cachePolicy
-
- // Whether dirent should be reloaded before any modifications.
- preModificationWantReload bool
-
- // Whether dirent should be reloaded after updating an unstable
- // attribute on the remote fs.
- postModificationWantReload bool
-
- // Whether dirent unstable attributes should be updated after
- // updating an attribute on the remote fs.
- postModificationWantUpdatedAttrs bool
-
- // Whether dirent should be reloaded after the remote has
- // removed the file.
- postRemovalWantReload bool
- }
-
- tests := []revalidationTest{
- {
- // Policy cacheNone causes Revalidate to always return
- // true.
- cachePolicy: cacheNone,
- preModificationWantReload: true,
- postModificationWantReload: true,
- postModificationWantUpdatedAttrs: true,
- postRemovalWantReload: true,
- },
- {
- // Policy cacheAll causes Revalidate to always return
- // false.
- cachePolicy: cacheAll,
- preModificationWantReload: false,
- postModificationWantReload: false,
- postModificationWantUpdatedAttrs: false,
- postRemovalWantReload: false,
- },
- {
- // Policy cacheAllWritethrough causes Revalidate to
- // always return false.
- cachePolicy: cacheAllWritethrough,
- preModificationWantReload: false,
- postModificationWantReload: false,
- postModificationWantUpdatedAttrs: false,
- postRemovalWantReload: false,
- },
- {
- // Policy cacheRemoteRevalidating causes Revalidate to
- // return update cached unstable attrs, and returns
- // true only when the remote inode itself has been
- // removed or replaced.
- cachePolicy: cacheRemoteRevalidating,
- preModificationWantReload: false,
- postModificationWantReload: false,
- postModificationWantUpdatedAttrs: true,
- postRemovalWantReload: true,
- },
- }
-
- const file = "file" // The file walked below.
-
- for _, test := range tests {
- name := fmt.Sprintf("cachepolicy=%s", test.cachePolicy)
- rootTest(t, name, test.cachePolicy, func(ctx context.Context, h *p9test.Harness, rootFile *p9test.Mock, rootInode *fs.Inode) {
- // Wrap in a dirent object.
- rootDir := fs.NewDirent(ctx, rootInode, "root")
-
- // Create a mock file a child of the root. We save when
- // this is generated, so that when the time changed, we
- // can update the original entry.
- var origMocks []*p9test.Mock
- rootFile.AddChild(file, func(parent *p9test.Mock) *p9test.Mock {
- // Regular a regular file that has a consistent
- // path number. This might be used by
- // validation so we don't change it.
- m := h.NewMock(parent, 0, p9.Attr{
- Mode: p9.ModeRegular,
- })
- origMocks = append(origMocks, m)
- return m
- })
-
- // Do the walk.
- dirent, err := rootDir.Walk(ctx, rootDir, file)
- if err != nil {
- t.Fatalf("Lookup failed: %v", err)
- }
-
- // We must release the dirent, of the test will fail
- // with a reference leak. This is tracked by p9test.
- defer dirent.DecRef(ctx)
-
- // Walk again. Depending on the cache policy, we may
- // get a new dirent.
- newDirent, err := rootDir.Walk(ctx, rootDir, file)
- if err != nil {
- t.Fatalf("Lookup failed: %v", err)
- }
- if test.preModificationWantReload && dirent == newDirent {
- t.Errorf("Lookup with cachePolicy=%s got old dirent %+v, wanted a new dirent", test.cachePolicy, dirent)
- }
- if !test.preModificationWantReload && dirent != newDirent {
- t.Errorf("Lookup with cachePolicy=%s got new dirent %+v, wanted old dirent %+v", test.cachePolicy, newDirent, dirent)
- }
- newDirent.DecRef(ctx) // See above.
-
- // Modify the underlying mocked file's modification
- // time for the next walk that occurs.
- nowSeconds := time.Now().Unix()
- rootFile.AddChild(file, func(parent *p9test.Mock) *p9test.Mock {
- // Ensure that the path is the same as above,
- // but we change only the modification time of
- // the file.
- return h.NewMock(parent, 0, p9.Attr{
- Mode: p9.ModeRegular,
- MTimeSeconds: uint64(nowSeconds),
- })
- })
-
- // We also modify the original time, so that GetAttr
- // behaves as expected for the caching case.
- for _, m := range origMocks {
- m.Attr.MTimeSeconds = uint64(nowSeconds)
- }
-
- // Walk again. Depending on the cache policy, we may
- // get a new dirent.
- newDirent, err = rootDir.Walk(ctx, rootDir, file)
- if err != nil {
- t.Fatalf("Lookup failed: %v", err)
- }
- if test.postModificationWantReload && dirent == newDirent {
- t.Errorf("Lookup with cachePolicy=%s got old dirent, wanted a new dirent", test.cachePolicy)
- }
- if !test.postModificationWantReload && dirent != newDirent {
- t.Errorf("Lookup with cachePolicy=%s got new dirent, wanted old dirent", test.cachePolicy)
- }
- uattrs, err := newDirent.Inode.UnstableAttr(ctx)
- if err != nil {
- t.Fatalf("Error getting unstable attrs: %v", err)
- }
- gotModTimeSeconds := uattrs.ModificationTime.Seconds()
- if test.postModificationWantUpdatedAttrs && gotModTimeSeconds != nowSeconds {
- t.Fatalf("Lookup with cachePolicy=%s got new modification time %v, wanted %v", test.cachePolicy, gotModTimeSeconds, nowSeconds)
- }
- newDirent.DecRef(ctx) // See above.
-
- // Remove the file from the remote fs, subsequent walks
- // should now fail to find anything.
- rootFile.RemoveChild(file)
-
- // Walk again. Depending on the cache policy, we may
- // get ENOENT.
- newDirent, err = rootDir.Walk(ctx, rootDir, file)
- if test.postRemovalWantReload && err == nil {
- t.Errorf("Lookup with cachePolicy=%s got nil error, wanted ENOENT", test.cachePolicy)
- }
- if !test.postRemovalWantReload && (err != nil || dirent != newDirent) {
- t.Errorf("Lookup with cachePolicy=%s got new dirent and error %v, wanted old dirent and nil error", test.cachePolicy, err)
- }
- if err == nil {
- newDirent.DecRef(ctx) // See above.
- }
- })
- }
-}
diff --git a/pkg/sentry/fs/host/BUILD b/pkg/sentry/fs/host/BUILD
deleted file mode 100644
index 24fc6305c..000000000
--- a/pkg/sentry/fs/host/BUILD
+++ /dev/null
@@ -1,87 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "host",
- srcs = [
- "control.go",
- "descriptor.go",
- "descriptor_state.go",
- "device.go",
- "file.go",
- "host.go",
- "inode.go",
- "inode_state.go",
- "ioctl_unsafe.go",
- "socket.go",
- "socket_iovec.go",
- "socket_state.go",
- "socket_unsafe.go",
- "tty.go",
- "util.go",
- "util_amd64_unsafe.go",
- "util_arm64_unsafe.go",
- "util_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/log",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/safemem",
- "//pkg/secio",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/uniqueid",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "host_test",
- size = "small",
- srcs = [
- "descriptor_test.go",
- "inode_test.go",
- "socket_test.go",
- "wait_test.go",
- ],
- library = ":host",
- deps = [
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/syserr",
- "//pkg/tcpip",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fs/host/descriptor_test.go b/pkg/sentry/fs/host/descriptor_test.go
deleted file mode 100644
index cb809ab2d..000000000
--- a/pkg/sentry/fs/host/descriptor_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package host
-
-import (
- "io/ioutil"
- "path/filepath"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/fdnotifier"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestDescriptorRelease(t *testing.T) {
- for _, tc := range []struct {
- name string
- saveable bool
- wouldBlock bool
- }{
- {name: "all false"},
- {name: "saveable", saveable: true},
- {name: "wouldBlock", wouldBlock: true},
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir("", "descriptor_test")
- if err != nil {
- t.Fatal("ioutil.TempDir() failed:", err)
- }
-
- fd, err := unix.Open(filepath.Join(dir, "file"), unix.O_RDWR|unix.O_CREAT, 0666)
- if err != nil {
- t.Fatal("failed to open temp file:", err)
- }
-
- // FD ownership is transferred to the descritor.
- queue := &waiter.Queue{}
- d, err := newDescriptor(fd, tc.saveable, tc.wouldBlock, queue)
- if err != nil {
- unix.Close(fd)
- t.Fatalf("newDescriptor(%d, %t, %t, queue) failed, err: %v", fd, tc.saveable, tc.wouldBlock, err)
- }
- if tc.saveable {
- if d.origFD < 0 {
- t.Errorf("saveable descriptor must preserve origFD, desc: %+v", d)
- }
- }
- if tc.wouldBlock {
- if !fdnotifier.HasFD(int32(d.value)) {
- t.Errorf("FD not registered with notifier, desc: %+v", d)
- }
- }
-
- oldVal := d.value
- d.Release()
- if d.value != -1 {
- t.Errorf("d.value want: -1, got: %d", d.value)
- }
- if tc.wouldBlock {
- if fdnotifier.HasFD(int32(oldVal)) {
- t.Errorf("FD not unregistered with notifier, desc: %+v", d)
- }
- }
- })
- }
-}
diff --git a/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go
new file mode 100644
index 000000000..488cbdfcf
--- /dev/null
+++ b/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package host
diff --git a/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go
new file mode 100644
index 000000000..7371b44db
--- /dev/null
+++ b/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package host
diff --git a/pkg/sentry/fs/host/host_state_autogen.go b/pkg/sentry/fs/host/host_state_autogen.go
new file mode 100644
index 000000000..b06247919
--- /dev/null
+++ b/pkg/sentry/fs/host/host_state_autogen.go
@@ -0,0 +1,220 @@
+// automatically generated by stateify.
+
+package host
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (d *descriptor) StateTypeName() string {
+ return "pkg/sentry/fs/host.descriptor"
+}
+
+func (d *descriptor) StateFields() []string {
+ return []string{
+ "origFD",
+ "wouldBlock",
+ }
+}
+
+// +checklocksignore
+func (d *descriptor) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.origFD)
+ stateSinkObject.Save(1, &d.wouldBlock)
+}
+
+// +checklocksignore
+func (d *descriptor) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.origFD)
+ stateSourceObject.Load(1, &d.wouldBlock)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (f *fileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/host.fileOperations"
+}
+
+func (f *fileOperations) StateFields() []string {
+ return []string{
+ "iops",
+ "dirCursor",
+ }
+}
+
+func (f *fileOperations) beforeSave() {}
+
+// +checklocksignore
+func (f *fileOperations) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.iops)
+ stateSinkObject.Save(1, &f.dirCursor)
+}
+
+func (f *fileOperations) afterLoad() {}
+
+// +checklocksignore
+func (f *fileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &f.iops)
+ stateSourceObject.Load(1, &f.dirCursor)
+}
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/host.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *inodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/host.inodeOperations"
+}
+
+func (i *inodeOperations) StateFields() []string {
+ return []string{
+ "fileState",
+ "cachingInodeOps",
+ }
+}
+
+func (i *inodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fileState)
+ stateSinkObject.Save(1, &i.cachingInodeOps)
+}
+
+func (i *inodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &i.fileState)
+ stateSourceObject.Load(1, &i.cachingInodeOps)
+}
+
+func (i *inodeFileState) StateTypeName() string {
+ return "pkg/sentry/fs/host.inodeFileState"
+}
+
+func (i *inodeFileState) StateFields() []string {
+ return []string{
+ "descriptor",
+ "sattr",
+ "savedUAttr",
+ }
+}
+
+func (i *inodeFileState) beforeSave() {}
+
+// +checklocksignore
+func (i *inodeFileState) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ if !state.IsZeroValue(&i.queue) {
+ state.Failf("queue is %#v, expected zero", &i.queue)
+ }
+ stateSinkObject.Save(0, &i.descriptor)
+ stateSinkObject.Save(1, &i.sattr)
+ stateSinkObject.Save(2, &i.savedUAttr)
+}
+
+// +checklocksignore
+func (i *inodeFileState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &i.descriptor)
+ stateSourceObject.LoadWait(1, &i.sattr)
+ stateSourceObject.Load(2, &i.savedUAttr)
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (c *ConnectedEndpoint) StateTypeName() string {
+ return "pkg/sentry/fs/host.ConnectedEndpoint"
+}
+
+func (c *ConnectedEndpoint) StateFields() []string {
+ return []string{
+ "ref",
+ "queue",
+ "path",
+ "srfd",
+ "stype",
+ }
+}
+
+// +checklocksignore
+func (c *ConnectedEndpoint) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.ref)
+ stateSinkObject.Save(1, &c.queue)
+ stateSinkObject.Save(2, &c.path)
+ stateSinkObject.Save(3, &c.srfd)
+ stateSinkObject.Save(4, &c.stype)
+}
+
+// +checklocksignore
+func (c *ConnectedEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.ref)
+ stateSourceObject.Load(1, &c.queue)
+ stateSourceObject.Load(2, &c.path)
+ stateSourceObject.LoadWait(3, &c.srfd)
+ stateSourceObject.Load(4, &c.stype)
+ stateSourceObject.AfterLoad(c.afterLoad)
+}
+
+func (t *TTYFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/host.TTYFileOperations"
+}
+
+func (t *TTYFileOperations) StateFields() []string {
+ return []string{
+ "fileOperations",
+ "session",
+ "fgProcessGroup",
+ "termios",
+ }
+}
+
+func (t *TTYFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (t *TTYFileOperations) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.fileOperations)
+ stateSinkObject.Save(1, &t.session)
+ stateSinkObject.Save(2, &t.fgProcessGroup)
+ stateSinkObject.Save(3, &t.termios)
+}
+
+func (t *TTYFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (t *TTYFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.fileOperations)
+ stateSourceObject.Load(1, &t.session)
+ stateSourceObject.Load(2, &t.fgProcessGroup)
+ stateSourceObject.Load(3, &t.termios)
+}
+
+func init() {
+ state.Register((*descriptor)(nil))
+ state.Register((*fileOperations)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inodeOperations)(nil))
+ state.Register((*inodeFileState)(nil))
+ state.Register((*ConnectedEndpoint)(nil))
+ state.Register((*TTYFileOperations)(nil))
+}
diff --git a/pkg/sentry/fs/host/host_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_unsafe_state_autogen.go
new file mode 100644
index 000000000..b2d8c661f
--- /dev/null
+++ b/pkg/sentry/fs/host/host_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package host
diff --git a/pkg/sentry/fs/host/inode_test.go b/pkg/sentry/fs/host/inode_test.go
deleted file mode 100644
index 11738871b..000000000
--- a/pkg/sentry/fs/host/inode_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package host
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
-)
-
-// TestCloseFD verifies fds will be closed.
-func TestCloseFD(t *testing.T) {
- var p [2]int
- if err := unix.Pipe(p[0:]); err != nil {
- t.Fatalf("Failed to create pipe %v", err)
- }
- defer unix.Close(p[0])
- defer unix.Close(p[1])
-
- // Use the write-end because we will detect if it's closed on the read end.
- ctx := contexttest.Context(t)
- file, err := NewFile(ctx, p[1])
- if err != nil {
- t.Fatalf("Failed to create File: %v", err)
- }
- file.DecRef(ctx)
-
- s := make([]byte, 10)
- if c, err := unix.Read(p[0], s); c != 0 || err != nil {
- t.Errorf("want 0, nil (EOF) from read end, got %v, %v", c, err)
- }
-}
diff --git a/pkg/sentry/fs/host/socket_test.go b/pkg/sentry/fs/host/socket_test.go
deleted file mode 100644
index f7014b6b1..000000000
--- a/pkg/sentry/fs/host/socket_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package host
-
-import (
- "reflect"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/fdnotifier"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/sentry/socket"
- "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-var (
- // Make sure that ConnectedEndpoint implements transport.ConnectedEndpoint.
- _ = transport.ConnectedEndpoint(new(ConnectedEndpoint))
-
- // Make sure that ConnectedEndpoint implements transport.Receiver.
- _ = transport.Receiver(new(ConnectedEndpoint))
-)
-
-func getFl(fd int) (uint32, error) {
- fl, _, err := unix.RawSyscall(unix.SYS_FCNTL, uintptr(fd), unix.F_GETFL, 0)
- if err == 0 {
- return uint32(fl), nil
- }
- return 0, err
-}
-
-func TestSocketIsBlocking(t *testing.T) {
- // Using socketpair here because it's already connected.
- pair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("host socket creation failed: %v", err)
- }
-
- fl, err := getFl(pair[0])
- if err != nil {
- t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err)
- }
- if fl&unix.O_NONBLOCK == unix.O_NONBLOCK {
- t.Fatalf("Expected socket %v to be blocking", pair[0])
- }
- if fl, err = getFl(pair[1]); err != nil {
- t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err)
- }
- if fl&unix.O_NONBLOCK == unix.O_NONBLOCK {
- t.Fatalf("Expected socket %v to be blocking", pair[1])
- }
- ctx := contexttest.Context(t)
- sock, err := newSocket(ctx, pair[0], false)
- if err != nil {
- t.Fatalf("newSocket(%v) failed => %v", pair[0], err)
- }
- defer sock.DecRef(ctx)
- // Test that the socket now is non-blocking.
- if fl, err = getFl(pair[0]); err != nil {
- t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err)
- }
- if fl&unix.O_NONBLOCK != unix.O_NONBLOCK {
- t.Errorf("Expected socket %v to have become non-blocking", pair[0])
- }
- if fl, err = getFl(pair[1]); err != nil {
- t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err)
- }
- if fl&unix.O_NONBLOCK == unix.O_NONBLOCK {
- t.Errorf("Did not expect socket %v to become non-blocking", pair[1])
- }
-}
-
-func TestSocketWritev(t *testing.T) {
- // Using socketpair here because it's already connected.
- pair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("host socket creation failed: %v", err)
- }
- ctx := contexttest.Context(t)
- socket, err := newSocket(ctx, pair[0], false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", pair[0], err)
- }
- defer socket.DecRef(ctx)
- buf := []byte("hello world\n")
- n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(buf))
- if err != nil {
- t.Fatalf("socket writev failed: %v", err)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("socket writev wrote incorrect bytes: %d", n)
- }
-}
-
-func TestSocketWritevLen0(t *testing.T) {
- // Using socketpair here because it's already connected.
- pair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("host socket creation failed: %v", err)
- }
- ctx := contexttest.Context(t)
- socket, err := newSocket(ctx, pair[0], false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", pair[0], err)
- }
- defer socket.DecRef(ctx)
- n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(nil))
- if err != nil {
- t.Fatalf("socket writev failed: %v", err)
- }
-
- if n != 0 {
- t.Fatalf("socket writev wrote incorrect bytes: %d", n)
- }
-}
-
-func TestSocketSendMsgLen0(t *testing.T) {
- // Using socketpair here because it's already connected.
- pair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("host socket creation failed: %v", err)
- }
- ctx := contexttest.Context(t)
- sfile, err := newSocket(ctx, pair[0], false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", pair[0], err)
- }
- defer sfile.DecRef(ctx)
-
- s := sfile.FileOperations.(socket.Socket)
- n, terr := s.SendMsg(nil, usermem.BytesIOSequence(nil), []byte{}, 0, false, ktime.Time{}, socket.ControlMessages{})
- if n != 0 {
- t.Fatalf("socket sendmsg() failed: %v wrote: %d", terr, n)
- }
-
- if terr != nil {
- t.Fatalf("socket sendmsg() failed: %v", terr)
- }
-}
-
-func TestListen(t *testing.T) {
- pair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0) => %v", err)
- }
- ctx := contexttest.Context(t)
- sfile1, err := newSocket(ctx, pair[0], false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", pair[0], err)
- }
- defer sfile1.DecRef(ctx)
- socket1 := sfile1.FileOperations.(socket.Socket)
-
- sfile2, err := newSocket(ctx, pair[1], false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", pair[1], err)
- }
- defer sfile2.DecRef(ctx)
- socket2 := sfile2.FileOperations.(socket.Socket)
-
- // Socketpairs can not be listened to.
- if err := socket1.Listen(nil, 64); err != syserr.ErrInvalidEndpointState {
- t.Fatalf("socket1.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err)
- }
- if err := socket2.Listen(nil, 64); err != syserr.ErrInvalidEndpointState {
- t.Fatalf("socket2.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err)
- }
-
- // Create a Unix socket, do not bind it.
- sock, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0) => %v", err)
- }
- sfile3, err := newSocket(ctx, sock, false)
- if err != nil {
- t.Fatalf("newSocket(%v) => %v", sock, err)
- }
- defer sfile3.DecRef(ctx)
- socket3 := sfile3.FileOperations.(socket.Socket)
-
- // This socket is not bound so we can't listen on it.
- if err := socket3.Listen(nil, 64); err != syserr.ErrInvalidEndpointState {
- t.Fatalf("socket3.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err)
- }
-}
-
-func TestPasscred(t *testing.T) {
- e := &ConnectedEndpoint{}
- if got, want := e.Passcred(), false; got != want {
- t.Errorf("Got %#v.Passcred() = %t, want = %t", e, got, want)
- }
-}
-
-func TestGetLocalAddress(t *testing.T) {
- e := &ConnectedEndpoint{path: "foo"}
- want := tcpip.FullAddress{Addr: tcpip.Address("foo")}
- if got, err := e.GetLocalAddress(); err != nil || got != want {
- t.Errorf("Got %#v.GetLocalAddress() = %#v, %v, want = %#v, %v", e, got, err, want, nil)
- }
-}
-
-func TestQueuedSize(t *testing.T) {
- e := &ConnectedEndpoint{}
- tests := []struct {
- name string
- f func() int64
- }{
- {"SendQueuedSize", e.SendQueuedSize},
- {"RecvQueuedSize", e.RecvQueuedSize},
- }
-
- for _, test := range tests {
- if got, want := test.f(), int64(-1); got != want {
- t.Errorf("Got %#v.%s() = %d, want = %d", e, test.name, got, want)
- }
- }
-}
-
-func TestRelease(t *testing.T) {
- f, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- t.Fatal("Creating socket:", err)
- }
- c := &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)}
- want := &ConnectedEndpoint{queue: c.queue}
- ctx := contexttest.Context(t)
- want.ref.DecRef(ctx)
- fdnotifier.AddFD(int32(c.file.FD()), nil)
- c.Release(ctx)
- if !reflect.DeepEqual(c, want) {
- t.Errorf("got = %#v, want = %#v", c, want)
- }
-}
diff --git a/pkg/sentry/fs/host/wait_test.go b/pkg/sentry/fs/host/wait_test.go
deleted file mode 100644
index bd6188e03..000000000
--- a/pkg/sentry/fs/host/wait_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package host
-
-import (
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestWait(t *testing.T) {
- var fds [2]int
- err := unix.Pipe(fds[:])
- if err != nil {
- t.Fatalf("Unable to create pipe: %v", err)
- }
-
- defer unix.Close(fds[1])
-
- ctx := contexttest.Context(t)
- file, err := NewFile(ctx, fds[0])
- if err != nil {
- unix.Close(fds[0])
- t.Fatalf("NewFile failed: %v", err)
- }
-
- defer file.DecRef(ctx)
-
- r := file.Readiness(waiter.ReadableEvents)
- if r != 0 {
- t.Fatalf("File is ready for read when it shouldn't be.")
- }
-
- e, ch := waiter.NewChannelEntry(nil)
- file.EventRegister(&e, waiter.ReadableEvents)
- defer file.EventUnregister(&e)
-
- // Check that there are no notifications yet.
- if len(ch) != 0 {
- t.Fatalf("Channel is non-empty")
- }
-
- // Write to the pipe, so it should be writable now.
- unix.Write(fds[1], []byte{1})
-
- // Check that we get a notification. We need to yield the current thread
- // so that the fdnotifier can deliver notifications, so we use a
- // 1-second timeout instead of just checking the length of the channel.
- select {
- case <-ch:
- case <-time.After(1 * time.Second):
- t.Fatalf("Channel not notified")
- }
-}
diff --git a/pkg/sentry/fs/inode_overlay_test.go b/pkg/sentry/fs/inode_overlay_test.go
deleted file mode 100644
index a3800d700..000000000
--- a/pkg/sentry/fs/inode_overlay_test.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
-)
-
-func TestLookup(t *testing.T) {
- ctx := contexttest.Context(t)
- for _, test := range []struct {
- // Test description.
- desc string
-
- // Lookup parameters.
- dir *fs.Inode
- name string
-
- // Want from lookup.
- found bool
- hasUpper bool
- hasLower bool
- }{
- {
- desc: "no upper, lower has name",
- dir: fs.NewTestOverlayDir(ctx,
- nil, /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: false,
- hasLower: true,
- },
- {
- desc: "no lower, upper has name",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* upper */
- nil, /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: true,
- hasLower: false,
- },
- {
- desc: "upper and lower, only lower has name",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "b",
- dir: false,
- },
- }, nil), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: false,
- hasLower: true,
- },
- {
- desc: "upper and lower, only upper has name",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "b",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: true,
- hasLower: false,
- },
- {
- desc: "upper and lower, both have file",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: true,
- hasLower: false,
- },
- {
- desc: "upper and lower, both have directory",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: true,
- },
- }, nil), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: true,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: true,
- hasLower: true,
- },
- {
- desc: "upper and lower, upper negative masks lower file",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, nil, []string{"a"}), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: false,
- hasUpper: false,
- hasLower: false,
- },
- {
- desc: "upper and lower, upper negative does not mask lower file",
- dir: fs.NewTestOverlayDir(ctx,
- newTestRamfsDir(ctx, nil, []string{"b"}), /* upper */
- newTestRamfsDir(ctx, []dirContent{
- {
- name: "a",
- dir: false,
- },
- }, nil), /* lower */
- false /* revalidate */),
- name: "a",
- found: true,
- hasUpper: false,
- hasLower: true,
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- dirent, err := test.dir.Lookup(ctx, test.name)
- if test.found && (linuxerr.Equals(linuxerr.ENOENT, err) || dirent.IsNegative()) {
- t.Fatalf("lookup %q expected to find positive dirent, got dirent %v err %v", test.name, dirent, err)
- }
- if !test.found {
- if !linuxerr.Equals(linuxerr.ENOENT, err) && !dirent.IsNegative() {
- t.Errorf("lookup %q expected to return ENOENT or negative dirent, got dirent %v err %v", test.name, dirent, err)
- }
- // Nothing more to check.
- return
- }
- if hasUpper := dirent.Inode.TestHasUpperFS(); hasUpper != test.hasUpper {
- t.Fatalf("lookup got upper filesystem %v, want %v", hasUpper, test.hasUpper)
- }
- if hasLower := dirent.Inode.TestHasLowerFS(); hasLower != test.hasLower {
- t.Errorf("lookup got lower filesystem %v, want %v", hasLower, test.hasLower)
- }
- })
- }
-}
-
-func TestLookupRevalidation(t *testing.T) {
- // File name used in the tests.
- fileName := "foofile"
- ctx := contexttest.Context(t)
- for _, tc := range []struct {
- // Test description.
- desc string
-
- // Upper and lower fs for the overlay.
- upper *fs.Inode
- lower *fs.Inode
-
- // Whether the upper requires revalidation.
- revalidate bool
-
- // Whether we should get the same dirent on second lookup.
- wantSame bool
- }{
- {
- desc: "file from upper with no revalidation",
- upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- lower: newTestRamfsDir(ctx, nil, nil),
- revalidate: false,
- wantSame: true,
- },
- {
- desc: "file from upper with revalidation",
- upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- lower: newTestRamfsDir(ctx, nil, nil),
- revalidate: true,
- wantSame: false,
- },
- {
- desc: "file from lower with no revalidation",
- upper: newTestRamfsDir(ctx, nil, nil),
- lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- revalidate: false,
- wantSame: true,
- },
- {
- desc: "file from lower with revalidation",
- upper: newTestRamfsDir(ctx, nil, nil),
- lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- revalidate: true,
- // The file does not exist in the upper, so we do not
- // need to revalidate it.
- wantSame: true,
- },
- {
- desc: "file from upper and lower with no revalidation",
- upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- revalidate: false,
- wantSame: true,
- },
- {
- desc: "file from upper and lower with revalidation",
- upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil),
- revalidate: true,
- wantSame: false,
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- root := fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root")
- ctx = &rootContext{
- Context: ctx,
- root: root,
- }
- overlay := fs.NewDirent(ctx, fs.NewTestOverlayDir(ctx, tc.upper, tc.lower, tc.revalidate), "overlay")
- // Lookup the file twice through the overlay.
- first, err := overlay.Walk(ctx, root, fileName)
- if err != nil {
- t.Fatalf("overlay.Walk(%q) failed: %v", fileName, err)
- }
- second, err := overlay.Walk(ctx, root, fileName)
- if err != nil {
- t.Fatalf("overlay.Walk(%q) failed: %v", fileName, err)
- }
-
- if tc.wantSame && first != second {
- t.Errorf("dirent lookup got different dirents, wanted same\nfirst=%+v\nsecond=%+v", first, second)
- } else if !tc.wantSame && first == second {
- t.Errorf("dirent lookup got the same dirent, wanted different: %+v", first)
- }
- })
- }
-}
-
-func TestCacheFlush(t *testing.T) {
- ctx := contexttest.Context(t)
-
- // Upper and lower each have a file.
- upperFileName := "file-from-upper"
- lowerFileName := "file-from-lower"
- upper := newTestRamfsDir(ctx, []dirContent{{name: upperFileName}}, nil)
- lower := newTestRamfsDir(ctx, []dirContent{{name: lowerFileName}}, nil)
-
- overlay := fs.NewTestOverlayDir(ctx, upper, lower, true /* revalidate */)
-
- mns, err := fs.NewMountNamespace(ctx, overlay)
- if err != nil {
- t.Fatalf("NewMountNamespace failed: %v", err)
- }
- root := mns.Root()
- defer root.DecRef(ctx)
-
- ctx = &rootContext{
- Context: ctx,
- root: root,
- }
-
- for _, fileName := range []string{upperFileName, lowerFileName} {
- // Walk to the file.
- maxTraversals := uint(0)
- dirent, err := mns.FindInode(ctx, root, nil, fileName, &maxTraversals)
- if err != nil {
- t.Fatalf("FindInode(%q) failed: %v", fileName, err)
- }
-
- // Get a file from the dirent.
- file, err := dirent.Inode.GetFile(ctx, dirent, fs.FileFlags{Read: true})
- if err != nil {
- t.Fatalf("GetFile() failed: %v", err)
- }
-
- // The dirent should have 3 refs, one from us, one from the
- // file, and one from the dirent cache.
- // dirent cache.
- if got, want := dirent.ReadRefs(), 3; int(got) != want {
- t.Errorf("dirent.ReadRefs() got %d want %d", got, want)
- }
-
- // Drop the file reference.
- file.DecRef(ctx)
-
- // Dirent should have 2 refs left.
- if got, want := dirent.ReadRefs(), 2; int(got) != want {
- t.Errorf("dirent.ReadRefs() got %d want %d", got, want)
- }
-
- // Flush the dirent cache.
- mns.FlushMountSourceRefs()
-
- // Dirent should have 1 ref left from the dirent cache.
- if got, want := dirent.ReadRefs(), 1; int(got) != want {
- t.Errorf("dirent.ReadRefs() got %d want %d", got, want)
- }
-
- // Drop our ref.
- dirent.DecRef(ctx)
-
- // We should be back to zero refs.
- if got, want := dirent.ReadRefs(), 0; int(got) != want {
- t.Errorf("dirent.ReadRefs() got %d want %d", got, want)
- }
- }
-
-}
-
-type dir struct {
- fs.InodeOperations
-
- // List of negative child names.
- negative []string
-
- // ReaddirCalled records whether Readdir was called on a file
- // corresponding to this inode.
- ReaddirCalled bool
-}
-
-// GetXattr implements InodeOperations.GetXattr.
-func (d *dir) GetXattr(_ context.Context, _ *fs.Inode, name string, _ uint64) (string, error) {
- for _, n := range d.negative {
- if name == fs.XattrOverlayWhiteout(n) {
- return "y", nil
- }
- }
- return "", linuxerr.ENOATTR
-}
-
-// GetFile implements InodeOperations.GetFile.
-func (d *dir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
- file, err := d.InodeOperations.GetFile(ctx, dirent, flags)
- if err != nil {
- return nil, err
- }
- defer file.DecRef(ctx)
- // Wrap the file's FileOperations in a dirFile.
- fops := &dirFile{
- FileOperations: file.FileOperations,
- inode: d,
- }
- return fs.NewFile(ctx, dirent, flags, fops), nil
-}
-
-type dirContent struct {
- name string
- dir bool
-}
-
-type dirFile struct {
- fs.FileOperations
- inode *dir
-}
-
-type inode struct {
- fsutil.InodeGenericChecker `state:"nosave"`
- fsutil.InodeNoExtendedAttributes `state:"nosave"`
- fsutil.InodeNoopRelease `state:"nosave"`
- fsutil.InodeNoopWriteOut `state:"nosave"`
- fsutil.InodeNotAllocatable `state:"nosave"`
- fsutil.InodeNotDirectory `state:"nosave"`
- fsutil.InodeNotMappable `state:"nosave"`
- fsutil.InodeNotSocket `state:"nosave"`
- fsutil.InodeNotSymlink `state:"nosave"`
- fsutil.InodeNotTruncatable `state:"nosave"`
- fsutil.InodeNotVirtual `state:"nosave"`
-
- fsutil.InodeSimpleAttributes
- fsutil.InodeStaticFileGetter
-}
-
-// Readdir implements fs.FileOperations.Readdir. It sets the ReaddirCalled
-// field on the inode.
-func (f *dirFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) {
- f.inode.ReaddirCalled = true
- return f.FileOperations.Readdir(ctx, file, ser)
-}
-
-func newTestRamfsInode(ctx context.Context, msrc *fs.MountSource) *fs.Inode {
- inode := fs.NewInode(ctx, &inode{
- InodeStaticFileGetter: fsutil.InodeStaticFileGetter{
- Contents: []byte("foobar"),
- },
- }, msrc, fs.StableAttr{Type: fs.RegularFile})
- return inode
-}
-
-func newTestRamfsDir(ctx context.Context, contains []dirContent, negative []string) *fs.Inode {
- msrc := fs.NewPseudoMountSource(ctx)
- contents := make(map[string]*fs.Inode)
- for _, c := range contains {
- if c.dir {
- contents[c.name] = newTestRamfsDir(ctx, nil, nil)
- } else {
- contents[c.name] = newTestRamfsInode(ctx, msrc)
- }
- }
- dops := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermissions{
- User: fs.PermMask{Read: true, Execute: true},
- })
- return fs.NewInode(ctx, &dir{
- InodeOperations: dops,
- negative: negative,
- }, msrc, fs.StableAttr{Type: fs.Directory})
-}
diff --git a/pkg/sentry/fs/lock/BUILD b/pkg/sentry/fs/lock/BUILD
deleted file mode 100644
index c09d8463b..000000000
--- a/pkg/sentry/fs/lock/BUILD
+++ /dev/null
@@ -1,62 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "lock_range",
- out = "lock_range.go",
- package = "lock",
- prefix = "Lock",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uint64",
- },
-)
-
-go_template_instance(
- name = "lock_set",
- out = "lock_set.go",
- consts = {
- "minDegree": "3",
- },
- package = "lock",
- prefix = "Lock",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "LockRange",
- "Value": "Lock",
- "Functions": "lockSetFunctions",
- },
-)
-
-go_library(
- name = "lock",
- srcs = [
- "lock.go",
- "lock_range.go",
- "lock_set.go",
- "lock_set_functions.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/log",
- "//pkg/sync",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "lock_test",
- size = "small",
- srcs = [
- "lock_range_test.go",
- "lock_test.go",
- ],
- library = ":lock",
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/sentry/fs/lock/lock_range.go b/pkg/sentry/fs/lock/lock_range.go
new file mode 100644
index 000000000..13a2cce6e
--- /dev/null
+++ b/pkg/sentry/fs/lock/lock_range.go
@@ -0,0 +1,76 @@
+package lock
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type LockRange struct {
+ // Start is the inclusive start of the range.
+ Start uint64
+
+ // End is the exclusive end of the range.
+ End uint64
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r LockRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r LockRange) Length() uint64 {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r LockRange) Contains(x uint64) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r LockRange) Overlaps(r2 LockRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r LockRange) IsSupersetOf(r2 LockRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r LockRange) Intersect(r2 LockRange) LockRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r LockRange) CanSplitAt(x uint64) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/sentry/fs/lock/lock_range_test.go b/pkg/sentry/fs/lock/lock_range_test.go
deleted file mode 100644
index 2b6f8630b..000000000
--- a/pkg/sentry/fs/lock/lock_range_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lock
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
-)
-
-func TestComputeRange(t *testing.T) {
- tests := []struct {
- // Description of test.
- name string
-
- // Requested start of the lock range.
- start int64
-
- // Requested length of the lock range,
- // can be negative :(
- length int64
-
- // Pre-computed file offset based on whence.
- // Will be added to start.
- offset int64
-
- // Expected error.
- err error
-
- // If error is nil, the expected LockRange.
- LockRange
- }{
- {
- name: "offset, start, and length all zero",
- LockRange: LockRange{Start: 0, End: LockEOF},
- },
- {
- name: "zero offset, zero start, positive length",
- start: 0,
- length: 4096,
- offset: 0,
- LockRange: LockRange{Start: 0, End: 4096},
- },
- {
- name: "zero offset, negative start",
- start: -4096,
- offset: 0,
- err: unix.EINVAL,
- },
- {
- name: "large offset, negative start, positive length",
- start: -2048,
- length: 2048,
- offset: 4096,
- LockRange: LockRange{Start: 2048, End: 4096},
- },
- {
- name: "large offset, negative start, zero length",
- start: -2048,
- length: 0,
- offset: 4096,
- LockRange: LockRange{Start: 2048, End: LockEOF},
- },
- {
- name: "zero offset, zero start, negative length",
- start: 0,
- length: -4096,
- offset: 0,
- err: unix.EINVAL,
- },
- {
- name: "large offset, zero start, negative length",
- start: 0,
- length: -4096,
- offset: 4096,
- LockRange: LockRange{Start: 0, End: 4096},
- },
- {
- name: "offset, start, and length equal, length is negative",
- start: 1024,
- length: -1024,
- offset: 1024,
- LockRange: LockRange{Start: 1024, End: 2048},
- },
- {
- name: "offset, start, and length equal, start is negative",
- start: -1024,
- length: 1024,
- offset: 1024,
- LockRange: LockRange{Start: 0, End: 1024},
- },
- {
- name: "offset, start, and length equal, offset is negative",
- start: 1024,
- length: 1024,
- offset: -1024,
- LockRange: LockRange{Start: 0, End: 1024},
- },
- {
- name: "offset, start, and length equal, all negative",
- start: -1024,
- length: -1024,
- offset: -1024,
- err: unix.EINVAL,
- },
- {
- name: "offset, start, and length equal, all positive",
- start: 1024,
- length: 1024,
- offset: 1024,
- LockRange: LockRange{Start: 2048, End: 3072},
- },
- }
-
- for _, test := range tests {
- rng, err := ComputeRange(test.start, test.length, test.offset)
- if err != test.err {
- t.Errorf("%s: lockRange(%d, %d, %d) got error %v, want %v", test.name, test.start, test.length, test.offset, err, test.err)
- continue
- }
- if err == nil && rng != test.LockRange {
- t.Errorf("%s: lockRange(%d, %d, %d) got LockRange %v, want %v", test.name, test.start, test.length, test.offset, rng, test.LockRange)
- }
- }
-}
diff --git a/pkg/sentry/fs/lock/lock_set.go b/pkg/sentry/fs/lock/lock_set.go
new file mode 100644
index 000000000..4bc830883
--- /dev/null
+++ b/pkg/sentry/fs/lock/lock_set.go
@@ -0,0 +1,1643 @@
+package lock
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const LocktrackGaps = 0
+
+var _ = uint8(LocktrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type LockdynamicGap [LocktrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *LockdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *LockdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ LockminDegree = 3
+
+ LockmaxDegree = 2 * LockminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type LockSet struct {
+ root Locknode `state:".(*LockSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *LockSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *LockSet) IsEmptyRange(r LockRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *LockSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *LockSet) SpanRange(r LockRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *LockSet) FirstSegment() LockIterator {
+ if s.root.nrSegments == 0 {
+ return LockIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *LockSet) LastSegment() LockIterator {
+ if s.root.nrSegments == 0 {
+ return LockIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *LockSet) FirstGap() LockGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return LockGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *LockSet) LastGap() LockGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return LockGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *LockSet) Find(key uint64) (LockIterator, LockGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return LockIterator{n, i}, LockGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return LockIterator{}, LockGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *LockSet) FindSegment(key uint64) LockIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *LockSet) LowerBoundSegment(min uint64) LockIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *LockSet) UpperBoundSegment(max uint64) LockIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *LockSet) FindGap(key uint64) LockGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *LockSet) LowerBoundGap(min uint64) LockGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *LockSet) UpperBoundGap(max uint64) LockGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *LockSet) Add(r LockRange, val Lock) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *LockSet) AddWithoutMerging(r LockRange, val Lock) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *LockSet) Insert(gap LockGapIterator, r LockRange, val Lock) LockIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (lockSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := LocktrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (lockSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (lockSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := LocktrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *LockSet) InsertWithoutMerging(gap LockGapIterator, r LockRange, val Lock) LockIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *LockSet) InsertWithoutMergingUnchecked(gap LockGapIterator, r LockRange, val Lock) LockIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := LocktrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return LockIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *LockSet) Remove(seg LockIterator) LockGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if LocktrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ lockSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if LocktrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(LockGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *LockSet) RemoveAll() {
+ s.root = Locknode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *LockSet) RemoveRange(r LockRange) LockGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *LockSet) Merge(first, second LockIterator) LockIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *LockSet) MergeUnchecked(first, second LockIterator) LockIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (lockSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return LockIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *LockSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *LockSet) MergeRange(r LockRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *LockSet) MergeAdjacent(r LockRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *LockSet) Split(seg LockIterator, split uint64) (LockIterator, LockIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *LockSet) SplitUnchecked(seg LockIterator, split uint64) (LockIterator, LockIterator) {
+ val1, val2 := (lockSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), LockRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *LockSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *LockSet) Isolate(seg LockIterator, r LockRange) LockIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *LockSet) ApplyContiguous(r LockRange, fn func(seg LockIterator)) LockGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return LockGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return LockGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type Locknode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *Locknode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap LockdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [LockmaxDegree - 1]LockRange
+ values [LockmaxDegree - 1]Lock
+ children [LockmaxDegree]*Locknode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Locknode) firstSegment() LockIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return LockIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Locknode) lastSegment() LockIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return LockIterator{n, n.nrSegments - 1}
+}
+
+func (n *Locknode) prevSibling() *Locknode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *Locknode) nextSibling() *Locknode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *Locknode) rebalanceBeforeInsert(gap LockGapIterator) LockGapIterator {
+ if n.nrSegments < LockmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &Locknode{
+ nrSegments: LockminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &Locknode{
+ nrSegments: LockminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:LockminDegree-1], n.keys[:LockminDegree-1])
+ copy(left.values[:LockminDegree-1], n.values[:LockminDegree-1])
+ copy(right.keys[:LockminDegree-1], n.keys[LockminDegree:])
+ copy(right.values[:LockminDegree-1], n.values[LockminDegree:])
+ n.keys[0], n.values[0] = n.keys[LockminDegree-1], n.values[LockminDegree-1]
+ LockzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:LockminDegree], n.children[:LockminDegree])
+ copy(right.children[:LockminDegree], n.children[LockminDegree:])
+ LockzeroNodeSlice(n.children[2:])
+ for i := 0; i < LockminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if LocktrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < LockminDegree {
+ return LockGapIterator{left, gap.index}
+ }
+ return LockGapIterator{right, gap.index - LockminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[LockminDegree-1], n.values[LockminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &Locknode{
+ nrSegments: LockminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:LockminDegree-1], n.keys[LockminDegree:])
+ copy(sibling.values[:LockminDegree-1], n.values[LockminDegree:])
+ LockzeroValueSlice(n.values[LockminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:LockminDegree], n.children[LockminDegree:])
+ LockzeroNodeSlice(n.children[LockminDegree:])
+ for i := 0; i < LockminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = LockminDegree - 1
+
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < LockminDegree {
+ return gap
+ }
+ return LockGapIterator{sibling, gap.index - LockminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *Locknode) rebalanceAfterRemove(gap LockGapIterator) LockGapIterator {
+ for {
+ if n.nrSegments >= LockminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= LockminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ lockSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return LockGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return LockGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= LockminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ lockSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return LockGapIterator{n, n.nrSegments}
+ }
+ return LockGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return LockGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return LockGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *Locknode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = LockGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ lockSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if LocktrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *Locknode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *Locknode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *Locknode) calculateMaxGapLeaf() uint64 {
+ max := LockGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (LockGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *Locknode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Locknode) searchFirstLargeEnoughGap(minSize uint64) LockGapIterator {
+ if n.maxGap.Get() < minSize {
+ return LockGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := LockGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Locknode) searchLastLargeEnoughGap(minSize uint64) LockGapIterator {
+ if n.maxGap.Get() < minSize {
+ return LockGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := LockGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type LockIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *Locknode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg LockIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg LockIterator) Range() LockRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg LockIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg LockIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg LockIterator) SetRangeUnchecked(r LockRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg LockIterator) SetRange(r LockRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg LockIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg LockIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg LockIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg LockIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg LockIterator) Value() Lock {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg LockIterator) ValuePtr() *Lock {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg LockIterator) SetValue(val Lock) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg LockIterator) PrevSegment() LockIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return LockIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return LockIterator{}
+ }
+ return LocksegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg LockIterator) NextSegment() LockIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return LockIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return LockIterator{}
+ }
+ return LocksegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg LockIterator) PrevGap() LockGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return LockGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg LockIterator) NextGap() LockGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return LockGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg LockIterator) PrevNonEmpty() (LockIterator, LockGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return LockIterator{}, gap
+ }
+ return gap.PrevSegment(), LockGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg LockIterator) NextNonEmpty() (LockIterator, LockGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return LockIterator{}, gap
+ }
+ return gap.NextSegment(), LockGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type LockGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *Locknode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap LockGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap LockGapIterator) Range() LockRange {
+ return LockRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap LockGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return lockSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap LockGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return lockSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap LockGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap LockGapIterator) PrevSegment() LockIterator {
+ return LocksegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap LockGapIterator) NextSegment() LockIterator {
+ return LocksegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap LockGapIterator) PrevGap() LockGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return LockGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap LockGapIterator) NextGap() LockGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return LockGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap LockGapIterator) NextLargeEnoughGap(minSize uint64) LockGapIterator {
+ if LocktrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap LockGapIterator) nextLargeEnoughGapHelper(minSize uint64) LockGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return LockGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap LockGapIterator) PrevLargeEnoughGap(minSize uint64) LockGapIterator {
+ if LocktrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap LockGapIterator) prevLargeEnoughGapHelper(minSize uint64) LockGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return LockGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func LocksegmentBeforePosition(n *Locknode, i int) LockIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return LockIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return LockIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func LocksegmentAfterPosition(n *Locknode, i int) LockIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return LockIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return LockIterator{n, i}
+}
+
+func LockzeroValueSlice(slice []Lock) {
+
+ for i := range slice {
+ lockSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func LockzeroNodeSlice(slice []*Locknode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *LockSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *Locknode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *Locknode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if LocktrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type LockSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []Lock
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *LockSet) ExportSortedSlices() *LockSegmentDataSlices {
+ var sds LockSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *LockSet) ImportSortedSlices(sds *LockSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := LockRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *LockSet) segmentTestCheck(expectedSegments int, segFunc func(int, LockRange, Lock) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *LockSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *LockSet) saveRoot() *LockSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *LockSet) loadRoot(sds *LockSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/fs/lock/lock_state_autogen.go b/pkg/sentry/fs/lock/lock_state_autogen.go
new file mode 100644
index 000000000..a0ad18763
--- /dev/null
+++ b/pkg/sentry/fs/lock/lock_state_autogen.go
@@ -0,0 +1,232 @@
+// automatically generated by stateify.
+
+package lock
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (o *OwnerInfo) StateTypeName() string {
+ return "pkg/sentry/fs/lock.OwnerInfo"
+}
+
+func (o *OwnerInfo) StateFields() []string {
+ return []string{
+ "PID",
+ }
+}
+
+func (o *OwnerInfo) beforeSave() {}
+
+// +checklocksignore
+func (o *OwnerInfo) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.PID)
+}
+
+func (o *OwnerInfo) afterLoad() {}
+
+// +checklocksignore
+func (o *OwnerInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.PID)
+}
+
+func (l *Lock) StateTypeName() string {
+ return "pkg/sentry/fs/lock.Lock"
+}
+
+func (l *Lock) StateFields() []string {
+ return []string{
+ "Readers",
+ "Writer",
+ "WriterInfo",
+ }
+}
+
+func (l *Lock) beforeSave() {}
+
+// +checklocksignore
+func (l *Lock) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Readers)
+ stateSinkObject.Save(1, &l.Writer)
+ stateSinkObject.Save(2, &l.WriterInfo)
+}
+
+func (l *Lock) afterLoad() {}
+
+// +checklocksignore
+func (l *Lock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Readers)
+ stateSourceObject.Load(1, &l.Writer)
+ stateSourceObject.Load(2, &l.WriterInfo)
+}
+
+func (l *Locks) StateTypeName() string {
+ return "pkg/sentry/fs/lock.Locks"
+}
+
+func (l *Locks) StateFields() []string {
+ return []string{
+ "locks",
+ }
+}
+
+func (l *Locks) beforeSave() {}
+
+// +checklocksignore
+func (l *Locks) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ if !state.IsZeroValue(&l.blockedQueue) {
+ state.Failf("blockedQueue is %#v, expected zero", &l.blockedQueue)
+ }
+ stateSinkObject.Save(0, &l.locks)
+}
+
+func (l *Locks) afterLoad() {}
+
+// +checklocksignore
+func (l *Locks) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.locks)
+}
+
+func (r *LockRange) StateTypeName() string {
+ return "pkg/sentry/fs/lock.LockRange"
+}
+
+func (r *LockRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *LockRange) beforeSave() {}
+
+// +checklocksignore
+func (r *LockRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *LockRange) afterLoad() {}
+
+// +checklocksignore
+func (r *LockRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func (s *LockSet) StateTypeName() string {
+ return "pkg/sentry/fs/lock.LockSet"
+}
+
+func (s *LockSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *LockSet) beforeSave() {}
+
+// +checklocksignore
+func (s *LockSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *LockSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *LockSet) afterLoad() {}
+
+// +checklocksignore
+func (s *LockSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*LockSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*LockSegmentDataSlices)) })
+}
+
+func (n *Locknode) StateTypeName() string {
+ return "pkg/sentry/fs/lock.Locknode"
+}
+
+func (n *Locknode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *Locknode) beforeSave() {}
+
+// +checklocksignore
+func (n *Locknode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *Locknode) afterLoad() {}
+
+// +checklocksignore
+func (n *Locknode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (l *LockSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/fs/lock.LockSegmentDataSlices"
+}
+
+func (l *LockSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (l *LockSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (l *LockSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Start)
+ stateSinkObject.Save(1, &l.End)
+ stateSinkObject.Save(2, &l.Values)
+}
+
+func (l *LockSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (l *LockSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Start)
+ stateSourceObject.Load(1, &l.End)
+ stateSourceObject.Load(2, &l.Values)
+}
+
+func init() {
+ state.Register((*OwnerInfo)(nil))
+ state.Register((*Lock)(nil))
+ state.Register((*Locks)(nil))
+ state.Register((*LockRange)(nil))
+ state.Register((*LockSet)(nil))
+ state.Register((*Locknode)(nil))
+ state.Register((*LockSegmentDataSlices)(nil))
+}
diff --git a/pkg/sentry/fs/lock/lock_test.go b/pkg/sentry/fs/lock/lock_test.go
deleted file mode 100644
index 9878c04e1..000000000
--- a/pkg/sentry/fs/lock/lock_test.go
+++ /dev/null
@@ -1,1060 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lock
-
-import (
- "reflect"
- "testing"
-)
-
-type entry struct {
- Lock
- LockRange
-}
-
-func equals(e0, e1 []entry) bool {
- if len(e0) != len(e1) {
- return false
- }
- for i := range e0 {
- for k := range e0[i].Lock.Readers {
- if _, ok := e1[i].Lock.Readers[k]; !ok {
- return false
- }
- }
- for k := range e1[i].Lock.Readers {
- if _, ok := e0[i].Lock.Readers[k]; !ok {
- return false
- }
- }
- if !reflect.DeepEqual(e0[i].LockRange, e1[i].LockRange) {
- return false
- }
- if e0[i].Lock.Writer != e1[i].Lock.Writer {
- return false
- }
- }
- return true
-}
-
-// fill a LockSet with consecutive region locks. Will panic if
-// LockRanges are not consecutive.
-func fill(entries []entry) LockSet {
- l := LockSet{}
- for _, e := range entries {
- gap := l.FindGap(e.LockRange.Start)
- if !gap.Ok() {
- panic("cannot insert into existing segment")
- }
- l.Insert(gap, e.LockRange, e.Lock)
- }
- return l
-}
-
-func TestCanLockEmpty(t *testing.T) {
- l := LockSet{}
-
- // Expect to be able to take any locks given that the set is empty.
- eof := l.FirstGap().End()
- r := LockRange{0, eof}
- if !l.canLock(1, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1)
- }
- if !l.canLock(2, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2)
- }
- if !l.canLock(1, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1)
- }
- if !l.canLock(2, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2)
- }
-}
-
-func TestCanLock(t *testing.T) {
- // + -------------- + ---------- + -------------- + --------- +
- // | Readers 1 & 2 | Readers 1 | Readers 1 & 3 | Writer 1 |
- // + ------------- + ---------- + -------------- + --------- +
- // 0 1024 2048 3072 4096
- l := fill([]entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}}},
- LockRange: LockRange{1024, 2048},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 3: OwnerInfo{}}},
- LockRange: LockRange{2048, 3072},
- },
- {
- Lock: Lock{Writer: 1},
- LockRange: LockRange{3072, 4096},
- },
- })
-
- // Now that we have a mildly interesting layout, try some checks on different
- // ranges, uids, and lock types.
- //
- // Expect to be able to extend the read lock, despite the writer lock, because
- // the writer has the same uid as the requested read lock.
- r := LockRange{0, 8192}
- if !l.canLock(1, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1)
- }
- // Expect to *not* be able to extend the read lock since there is an overlapping
- // writer region locked by someone other than the uid.
- if l.canLock(2, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got true, want false", ReadLock, r, 2)
- }
- // Expect to be able to extend the read lock if there are only other readers in
- // the way.
- r = LockRange{64, 3072}
- if !l.canLock(2, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2)
- }
- // Expect to be able to set a read lock beyond the range of any existing locks.
- r = LockRange{4096, 10240}
- if !l.canLock(2, ReadLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2)
- }
-
- // Expect to not be able to take a write lock with other readers in the way.
- r = LockRange{0, 8192}
- if l.canLock(1, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 1)
- }
- // Expect to be able to extend the write lock for the same uid.
- r = LockRange{3072, 8192}
- if !l.canLock(1, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1)
- }
- // Expect to not be able to overlap a write lock for two different uids.
- if l.canLock(2, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 2)
- }
- // Expect to be able to set a write lock that is beyond the range of any
- // existing locks.
- r = LockRange{8192, 10240}
- if !l.canLock(2, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2)
- }
- // Expect to be able to upgrade a read lock (any portion of it).
- r = LockRange{1024, 2048}
- if !l.canLock(1, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1)
- }
- r = LockRange{1080, 2000}
- if !l.canLock(1, WriteLock, r) {
- t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1)
- }
-}
-
-func TestSetLock(t *testing.T) {
- tests := []struct {
- // description of test.
- name string
-
- // LockSet entries to pre-fill.
- before []entry
-
- // Description of region to lock:
- //
- // start is the file offset of the lock.
- start uint64
- // end is the end file offset of the lock.
- end uint64
- // uid of lock attempter.
- uid UniqueID
- // lock type requested.
- lockType LockType
-
- // success is true if taking the above
- // lock should succeed.
- success bool
-
- // Expected layout of the set after locking
- // if success is true.
- after []entry
- }{
- {
- name: "set zero length ReadLock on empty set",
- start: 0,
- end: 0,
- uid: 0,
- lockType: ReadLock,
- success: true,
- },
- {
- name: "set zero length WriteLock on empty set",
- start: 0,
- end: 0,
- uid: 0,
- lockType: WriteLock,
- success: true,
- },
- {
- name: "set ReadLock on empty set",
- start: 0,
- end: LockEOF,
- uid: 0,
- lockType: ReadLock,
- success: true,
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- },
- {
- name: "set WriteLock on empty set",
- start: 0,
- end: LockEOF,
- uid: 0,
- lockType: WriteLock,
- success: true,
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- },
- {
- name: "set ReadLock on WriteLock same uid",
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 0,
- lockType: ReadLock,
- success: true,
- // + ----------- + --------------------------- +
- // | Readers 0 | Writer 0 |
- // + ----------- + --------------------------- +
- // 0 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, 4096},
- },
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "set WriteLock on ReadLock same uid",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 0,
- lockType: WriteLock,
- success: true,
- // + ----------- + --------------------------- +
- // | Writer 0 | Readers 0 |
- // + ----------- + --------------------------- +
- // 0 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "set ReadLock on WriteLock different uid",
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 1,
- lockType: ReadLock,
- success: false,
- },
- {
- name: "set WriteLock on ReadLock different uid",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 1,
- lockType: WriteLock,
- success: false,
- },
- {
- name: "split ReadLock for overlapping lock at start 0",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 1,
- lockType: ReadLock,
- success: true,
- // + -------------- + --------------------------- +
- // | Readers 0 & 1 | Readers 0 |
- // + -------------- + --------------------------- +
- // 0 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "split ReadLock for overlapping lock at non-zero start",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 4096,
- end: 8192,
- uid: 1,
- lockType: ReadLock,
- success: true,
- // + ---------- + -------------- + ----------- +
- // | Readers 0 | Readers 0 & 1 | Readers 0 |
- // + ---------- + -------------- + ----------- +
- // 0 4096 8192 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{4096, 8192},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{8192, LockEOF},
- },
- },
- },
- {
- name: "fill front gap with ReadLock",
- // + --------- + ---------------------------- +
- // | gap | Readers 0 |
- // + --------- + ---------------------------- +
- // 0 1024 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, LockEOF},
- },
- },
- start: 0,
- end: 8192,
- uid: 0,
- lockType: ReadLock,
- success: true,
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- },
- {
- name: "fill end gap with ReadLock",
- // + ---------------------------- +
- // | Readers 0 |
- // + ---------------------------- +
- // 0 4096
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, 4096},
- },
- },
- start: 1024,
- end: LockEOF,
- uid: 0,
- lockType: ReadLock,
- success: true,
- // Note that this is not merged after lock does a Split. This is
- // fine because the two locks will still *behave* as one. In other
- // words we can fragment any lock all we want and semantically it
- // makes no difference.
- //
- // + ----------- + --------------------------- +
- // | Readers 0 | Readers 0 |
- // + ----------- + --------------------------- +
- // 0 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, LockEOF},
- },
- },
- },
- {
- name: "fill gap with ReadLock and split",
- // + --------- + ---------------------------- +
- // | gap | Readers 0 |
- // + --------- + ---------------------------- +
- // 0 1024 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 1,
- lockType: ReadLock,
- success: true,
- // + --------- + ------------- + ------------- +
- // | Reader 1 | Readers 0 & 1 | Reader 0 |
- // + ----------+ ------------- + ------------- +
- // 0 1024 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{1024, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "upgrade ReadLock to WriteLock for single uid fill gap",
- // + ------------- + --------- + --- + ------------- +
- // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 |
- // + ------------- + --------- + --- + ------------- +
- // 0 1024 2048 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, 2048},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 1024,
- end: 4096,
- uid: 0,
- lockType: WriteLock,
- success: true,
- // + ------------- + -------- + ------------- +
- // | Readers 0 & 1 | Writer 0 | Readers 0 & 2 |
- // + ------------- + -------- + ------------- +
- // 0 1024 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{1024, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "upgrade ReadLock to WriteLock for single uid keep gap",
- // + ------------- + --------- + --- + ------------- +
- // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 |
- // + ------------- + --------- + --- + ------------- +
- // 0 1024 2048 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, 2048},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 1024,
- end: 3072,
- uid: 0,
- lockType: WriteLock,
- success: true,
- // + ------------- + -------- + --- + ------------- +
- // | Readers 0 & 1 | Writer 0 | gap | Readers 0 & 2 |
- // + ------------- + -------- + --- + ------------- +
- // 0 1024 3072 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{1024, 3072},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "fail to upgrade ReadLock to WriteLock with conflicting Reader",
- // + ------------- + --------- +
- // | Readers 0 & 1 | Readers 0 |
- // + ------------- + --------- +
- // 0 1024 2048
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, 2048},
- },
- },
- start: 0,
- end: 2048,
- uid: 0,
- lockType: WriteLock,
- success: false,
- },
- {
- name: "take WriteLock on whole file if all uids are the same",
- // + ------------- + --------- + --------- + ---------- +
- // | Writer 0 | Readers 0 | Readers 0 | Readers 0 |
- // + ------------- + --------- + --------- + ---------- +
- // 0 1024 2048 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{1024, 2048},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{2048, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 0,
- end: LockEOF,
- uid: 0,
- lockType: WriteLock,
- success: true,
- // We do not manually merge locks. Semantically a fragmented lock
- // held by the same uid will behave as one lock so it makes no difference.
- //
- // + ------------- + ---------------------------- +
- // | Writer 0 | Writer 0 |
- // + ------------- + ---------------------------- +
- // 0 1024 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{1024, LockEOF},
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- l := fill(test.before)
-
- r := LockRange{Start: test.start, End: test.end}
- success := l.lock(test.uid, 0 /* ownerPID */, test.lockType, r)
- var got []entry
- for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- got = append(got, entry{
- Lock: seg.Value(),
- LockRange: seg.Range(),
- })
- }
-
- if success != test.success {
- t.Errorf("setlock(%v, %+v, %d, %d) got success %v, want %v", test.before, r, test.uid, test.lockType, success, test.success)
- return
- }
-
- if success {
- if !equals(got, test.after) {
- t.Errorf("got set %+v, want %+v", got, test.after)
- }
- }
- })
- }
-}
-
-func TestUnlock(t *testing.T) {
- tests := []struct {
- // description of test.
- name string
-
- // LockSet entries to pre-fill.
- before []entry
-
- // Description of region to unlock:
- //
- // start is the file start of the lock.
- start uint64
- // end is the end file start of the lock.
- end uint64
- // uid of lock holder.
- uid UniqueID
-
- // Expected layout of the set after unlocking.
- after []entry
- }{
- {
- name: "unlock zero length on empty set",
- start: 0,
- end: 0,
- uid: 0,
- },
- {
- name: "unlock on empty set (no-op)",
- start: 0,
- end: LockEOF,
- uid: 0,
- },
- {
- name: "unlock uid not locked (no-op)",
- // + --------------------------- +
- // | Readers 1 & 2 |
- // + --------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 1024,
- end: 4096,
- uid: 0,
- // + --------------------------- +
- // | Readers 1 & 2 |
- // + --------------------------- +
- // 0 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- },
- {
- name: "unlock ReadLock over entire file",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: LockEOF,
- uid: 0,
- },
- {
- name: "unlock WriteLock over entire file",
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: LockEOF,
- uid: 0,
- },
- {
- name: "unlock partial ReadLock (start)",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 0,
- // + ------ + --------------------------- +
- // | gap | Readers 0 |
- // +------- + --------------------------- +
- // 0 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "unlock partial WriteLock (start)",
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 0,
- end: 4096,
- uid: 0,
- // + ------ + --------------------------- +
- // | gap | Writer 0 |
- // +------- + --------------------------- +
- // 0 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "unlock partial ReadLock (end)",
- // + ----------------------------------------- +
- // | Readers 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 4096,
- end: LockEOF,
- uid: 0,
- // + --------------------------- +
- // | Readers 0 |
- // +---------------------------- +
- // 0 4096
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}}},
- LockRange: LockRange{0, 4096},
- },
- },
- },
- {
- name: "unlock partial WriteLock (end)",
- // + ----------------------------------------- +
- // | Writer 0 |
- // + ----------------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 4096,
- end: LockEOF,
- uid: 0,
- // + --------------------------- +
- // | Writer 0 |
- // +---------------------------- +
- // 0 4096
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 4096},
- },
- },
- },
- {
- name: "unlock for single uid",
- // + ------------- + --------- + ------------------- +
- // | Readers 0 & 1 | Writer 0 | Readers 0 & 1 & 2 |
- // + ------------- + --------- + ------------------- +
- // 0 1024 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{1024, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 0,
- end: LockEOF,
- uid: 0,
- // + --------- + --- + --------------- +
- // | Readers 1 | gap | Readers 1 & 2 |
- // + --------- + --- + --------------- +
- // 0 1024 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "unlock subsection locked",
- // + ------------------------------- +
- // | Readers 0 & 1 & 2 |
- // + ------------------------------- +
- // 0 max uint64
- before: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{0, LockEOF},
- },
- },
- start: 1024,
- end: 4096,
- uid: 0,
- // + ----------------- + ------------- + ----------------- +
- // | Readers 0 & 1 & 2 | Readers 1 & 2 | Readers 0 & 1 & 2 |
- // + ----------------- + ------------- + ----------------- +
- // 0 1024 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{1024, 4096},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}, 2: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "unlock mid-gap to increase gap",
- // + --------- + ----- + ------------------- +
- // | Writer 0 | gap | Readers 0 & 1 |
- // + --------- + ----- + ------------------- +
- // 0 1024 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 8,
- end: 2048,
- uid: 0,
- // + --------- + ----- + ------------------- +
- // | Writer 0 | gap | Readers 0 & 1 |
- // + --------- + ----- + ------------------- +
- // 0 8 4096 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 8},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- },
- {
- name: "unlock split region on uid mid-gap",
- // + --------- + ----- + ------------------- +
- // | Writer 0 | gap | Readers 0 & 1 |
- // + --------- + ----- + ------------------- +
- // 0 1024 4096 max uint64
- before: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{4096, LockEOF},
- },
- },
- start: 2048,
- end: 8192,
- uid: 0,
- // + --------- + ----- + --------- + ------------- +
- // | Writer 0 | gap | Readers 1 | Readers 0 & 1 |
- // + --------- + ----- + --------- + ------------- +
- // 0 1024 4096 8192 max uint64
- after: []entry{
- {
- Lock: Lock{Writer: 0},
- LockRange: LockRange{0, 1024},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{1: OwnerInfo{}}},
- LockRange: LockRange{4096, 8192},
- },
- {
- Lock: Lock{Readers: map[UniqueID]OwnerInfo{0: OwnerInfo{}, 1: OwnerInfo{}}},
- LockRange: LockRange{8192, LockEOF},
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- l := fill(test.before)
-
- r := LockRange{Start: test.start, End: test.end}
- l.unlock(test.uid, r)
- var got []entry
- for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- got = append(got, entry{
- Lock: seg.Value(),
- LockRange: seg.Range(),
- })
- }
- if !equals(got, test.after) {
- t.Errorf("got set %+v, want %+v", got, test.after)
- }
- })
- }
-}
diff --git a/pkg/sentry/fs/mount_test.go b/pkg/sentry/fs/mount_test.go
deleted file mode 100644
index 6c296f5d0..000000000
--- a/pkg/sentry/fs/mount_test.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
-)
-
-// cacheReallyContains iterates through the dirent cache to determine whether
-// it contains the given dirent.
-func cacheReallyContains(cache *DirentCache, d *Dirent) bool {
- for i := cache.list.Front(); i != nil; i = i.Next() {
- if i == d {
- return true
- }
- }
- return false
-}
-
-func mountPathsAre(ctx context.Context, root *Dirent, got []*Mount, want ...string) error {
- gotPaths := make(map[string]struct{}, len(got))
- gotStr := make([]string, len(got))
- for i, g := range got {
- if groot := g.Root(); groot != nil {
- name, _ := groot.FullName(root)
- groot.DecRef(ctx)
- gotStr[i] = name
- gotPaths[name] = struct{}{}
- }
- }
- if len(got) != len(want) {
- return fmt.Errorf("mount paths are different, got: %q, want: %q", gotStr, want)
- }
- for _, w := range want {
- if _, ok := gotPaths[w]; !ok {
- return fmt.Errorf("no mount with path %q found", w)
- }
- }
- return nil
-}
-
-// TestMountSourceOnlyCachedOnce tests that a Dirent that is mounted over only ends
-// up in a single Dirent Cache. NOTE(b/63848693): Having a dirent in multiple
-// caches causes major consistency issues.
-func TestMountSourceOnlyCachedOnce(t *testing.T) {
- ctx := contexttest.Context(t)
-
- rootCache := NewDirentCache(100)
- rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{
- Type: Directory,
- })
- mm, err := NewMountNamespace(ctx, rootInode)
- if err != nil {
- t.Fatalf("NewMountNamespace failed: %v", err)
- }
- rootDirent := mm.Root()
- defer rootDirent.DecRef(ctx)
-
- // Get a child of the root which we will mount over. Note that the
- // MockInodeOperations causes Walk to always succeed.
- child, err := rootDirent.Walk(ctx, rootDirent, "child")
- if err != nil {
- t.Fatalf("failed to walk to child dirent: %v", err)
- }
- child.maybeExtendReference() // Cache.
-
- // Ensure that the root cache contains the child.
- if !cacheReallyContains(rootCache, child) {
- t.Errorf("wanted rootCache to contain child dirent, but it did not")
- }
-
- // Create a new cache and inode, and mount it over child.
- submountCache := NewDirentCache(100)
- submountInode := NewMockInode(ctx, NewMockMountSource(submountCache), StableAttr{
- Type: Directory,
- })
- if err := mm.Mount(ctx, child, submountInode); err != nil {
- t.Fatalf("failed to mount over child: %v", err)
- }
-
- // Walk to the child again.
- child2, err := rootDirent.Walk(ctx, rootDirent, "child")
- if err != nil {
- t.Fatalf("failed to walk to child dirent: %v", err)
- }
-
- // Should have a different Dirent than before.
- if child == child2 {
- t.Fatalf("expected %v not equal to %v, but they are the same", child, child2)
- }
-
- // Neither of the caches should no contain the child.
- if cacheReallyContains(rootCache, child) {
- t.Errorf("wanted rootCache not to contain child dirent, but it did")
- }
- if cacheReallyContains(submountCache, child) {
- t.Errorf("wanted submountCache not to contain child dirent, but it did")
- }
-}
-
-func TestAllMountsUnder(t *testing.T) {
- ctx := contexttest.Context(t)
-
- rootCache := NewDirentCache(100)
- rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{
- Type: Directory,
- })
- mm, err := NewMountNamespace(ctx, rootInode)
- if err != nil {
- t.Fatalf("NewMountNamespace failed: %v", err)
- }
- rootDirent := mm.Root()
- defer rootDirent.DecRef(ctx)
-
- // Add mounts at the following paths:
- paths := []string{
- "/foo",
- "/foo/bar",
- "/foo/bar/baz",
- "/foo/qux",
- "/waldo",
- }
-
- var maxTraversals uint
- for _, p := range paths {
- maxTraversals = 0
- d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals)
- if err != nil {
- t.Fatalf("could not find path %q in mount manager: %v", p, err)
- }
-
- submountInode := NewMockInode(ctx, NewMockMountSource(nil), StableAttr{
- Type: Directory,
- })
- if err := mm.Mount(ctx, d, submountInode); err != nil {
- t.Fatalf("could not mount at %q: %v", p, err)
- }
- d.DecRef(ctx)
- }
-
- // mm root should contain all submounts (and does not include the root mount).
- rootMnt := mm.FindMount(rootDirent)
- submounts := mm.AllMountsUnder(rootMnt)
- allPaths := append(paths, "/")
- if err := mountPathsAre(ctx, rootDirent, submounts, allPaths...); err != nil {
- t.Error(err)
- }
-
- // Each mount should have a unique ID.
- foundIDs := make(map[uint64]struct{})
- for _, m := range submounts {
- if _, ok := foundIDs[m.ID]; ok {
- t.Errorf("got multiple mounts with id %d", m.ID)
- }
- foundIDs[m.ID] = struct{}{}
- }
-
- // Root mount should have no parent.
- if p := rootMnt.ParentID; p != invalidMountID {
- t.Errorf("root.Parent got %v wanted nil", p)
- }
-
- // Check that "foo" mount has 3 children.
- maxTraversals = 0
- d, err := mm.FindLink(ctx, rootDirent, nil, "/foo", &maxTraversals)
- if err != nil {
- t.Fatalf("could not find path %q in mount manager: %v", "/foo", err)
- }
- defer d.DecRef(ctx)
- submounts = mm.AllMountsUnder(mm.FindMount(d))
- if err := mountPathsAre(ctx, rootDirent, submounts, "/foo", "/foo/bar", "/foo/qux", "/foo/bar/baz"); err != nil {
- t.Error(err)
- }
-
- // "waldo" mount should have no children.
- maxTraversals = 0
- waldo, err := mm.FindLink(ctx, rootDirent, nil, "/waldo", &maxTraversals)
- if err != nil {
- t.Fatalf("could not find path %q in mount manager: %v", "/waldo", err)
- }
- defer waldo.DecRef(ctx)
- submounts = mm.AllMountsUnder(mm.FindMount(waldo))
- if err := mountPathsAre(ctx, rootDirent, submounts, "/waldo"); err != nil {
- t.Error(err)
- }
-}
-
-func TestUnmount(t *testing.T) {
- ctx := contexttest.Context(t)
-
- rootCache := NewDirentCache(100)
- rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{
- Type: Directory,
- })
- mm, err := NewMountNamespace(ctx, rootInode)
- if err != nil {
- t.Fatalf("NewMountNamespace failed: %v", err)
- }
- rootDirent := mm.Root()
- defer rootDirent.DecRef(ctx)
-
- // Add mounts at the following paths:
- paths := []string{
- "/foo",
- "/foo/bar",
- "/foo/bar/goo",
- "/foo/bar/goo/abc",
- "/foo/abc",
- "/foo/def",
- "/waldo",
- "/wally",
- }
-
- var maxTraversals uint
- for _, p := range paths {
- maxTraversals = 0
- d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals)
- if err != nil {
- t.Fatalf("could not find path %q in mount manager: %v", p, err)
- }
-
- submountInode := NewMockInode(ctx, NewMockMountSource(nil), StableAttr{
- Type: Directory,
- })
- if err := mm.Mount(ctx, d, submountInode); err != nil {
- t.Fatalf("could not mount at %q: %v", p, err)
- }
- d.DecRef(ctx)
- }
-
- allPaths := make([]string, len(paths)+1)
- allPaths[0] = "/"
- copy(allPaths[1:], paths)
-
- rootMnt := mm.FindMount(rootDirent)
- for i := len(paths) - 1; i >= 0; i-- {
- maxTraversals = 0
- p := paths[i]
- d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals)
- if err != nil {
- t.Fatalf("could not find path %q in mount manager: %v", p, err)
- }
-
- if err := mm.Unmount(ctx, d, false); err != nil {
- t.Fatalf("could not unmount at %q: %v", p, err)
- }
- d.DecRef(ctx)
-
- // Remove the path that has been unmounted and the check that the remaining
- // mounts are still there.
- allPaths = allPaths[:len(allPaths)-1]
- submounts := mm.AllMountsUnder(rootMnt)
- if err := mountPathsAre(ctx, rootDirent, submounts, allPaths...); err != nil {
- t.Error(err)
- }
- }
-}
diff --git a/pkg/sentry/fs/mounts_test.go b/pkg/sentry/fs/mounts_test.go
deleted file mode 100644
index 975d6cbc9..000000000
--- a/pkg/sentry/fs/mounts_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
-)
-
-// Creates a new MountNamespace with filesystem:
-// / (root dir)
-// |-foo (dir)
-// |-bar (file)
-func createMountNamespace(ctx context.Context) (*fs.MountNamespace, error) {
- perms := fs.FilePermsFromMode(0777)
- m := fs.NewPseudoMountSource(ctx)
-
- barFile := fsutil.NewSimpleFileInode(ctx, fs.RootOwner, perms, 0)
- fooDir := ramfs.NewDir(ctx, map[string]*fs.Inode{
- "bar": fs.NewInode(ctx, barFile, m, fs.StableAttr{Type: fs.RegularFile}),
- }, fs.RootOwner, perms)
- rootDir := ramfs.NewDir(ctx, map[string]*fs.Inode{
- "foo": fs.NewInode(ctx, fooDir, m, fs.StableAttr{Type: fs.Directory}),
- }, fs.RootOwner, perms)
-
- return fs.NewMountNamespace(ctx, fs.NewInode(ctx, rootDir, m, fs.StableAttr{Type: fs.Directory}))
-}
-
-func TestFindLink(t *testing.T) {
- ctx := contexttest.Context(t)
- mm, err := createMountNamespace(ctx)
- if err != nil {
- t.Fatalf("createMountNamespace failed: %v", err)
- }
-
- root := mm.Root()
- defer root.DecRef(ctx)
- foo, err := root.Walk(ctx, root, "foo")
- if err != nil {
- t.Fatalf("Error walking to foo: %v", err)
- }
-
- // Positive cases.
- for _, tc := range []struct {
- findPath string
- wd *fs.Dirent
- wantPath string
- }{
- {".", root, "/"},
- {".", foo, "/foo"},
- {"..", foo, "/"},
- {"../../..", foo, "/"},
- {"///foo", foo, "/foo"},
- {"/foo", foo, "/foo"},
- {"/foo/bar", foo, "/foo/bar"},
- {"/foo/.///./bar", foo, "/foo/bar"},
- {"/foo///bar", foo, "/foo/bar"},
- {"/foo/../foo/bar", foo, "/foo/bar"},
- {"foo/bar", root, "/foo/bar"},
- {"foo////bar", root, "/foo/bar"},
- {"bar", foo, "/foo/bar"},
- } {
- wdPath, _ := tc.wd.FullName(root)
- maxTraversals := uint(0)
- if d, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, &maxTraversals); err != nil {
- t.Errorf("FindLink(%q, wd=%q) failed: %v", tc.findPath, wdPath, err)
- } else if got, _ := d.FullName(root); got != tc.wantPath {
- t.Errorf("FindLink(%q, wd=%q) got dirent %q, want %q", tc.findPath, wdPath, got, tc.wantPath)
- }
- }
-
- // Negative cases.
- for _, tc := range []struct {
- findPath string
- wd *fs.Dirent
- }{
- {"bar", root},
- {"/bar", root},
- {"/foo/../../bar", root},
- {"foo", foo},
- } {
- wdPath, _ := tc.wd.FullName(root)
- maxTraversals := uint(0)
- if _, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, &maxTraversals); err == nil {
- t.Errorf("FindLink(%q, wd=%q) did not return error", tc.findPath, wdPath)
- }
- }
-}
diff --git a/pkg/sentry/fs/path_test.go b/pkg/sentry/fs/path_test.go
deleted file mode 100644
index e6f57ebba..000000000
--- a/pkg/sentry/fs/path_test.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "testing"
-)
-
-// TestSplitLast tests variants of path splitting.
-func TestSplitLast(t *testing.T) {
- cases := []struct {
- path string
- dir string
- file string
- }{
- {path: "/", dir: "/", file: "."},
- {path: "/.", dir: "/", file: "."},
- {path: "/./", dir: "/", file: "."},
- {path: "/./.", dir: "/.", file: "."},
- {path: "/././", dir: "/.", file: "."},
- {path: "/./..", dir: "/.", file: ".."},
- {path: "/./../", dir: "/.", file: ".."},
- {path: "/..", dir: "/", file: ".."},
- {path: "/../", dir: "/", file: ".."},
- {path: "/../.", dir: "/..", file: "."},
- {path: "/.././", dir: "/..", file: "."},
- {path: "/../..", dir: "/..", file: ".."},
- {path: "/../../", dir: "/..", file: ".."},
-
- {path: "", dir: ".", file: "."},
- {path: ".", dir: ".", file: "."},
- {path: "./", dir: ".", file: "."},
- {path: "./.", dir: ".", file: "."},
- {path: "././", dir: ".", file: "."},
- {path: "./..", dir: ".", file: ".."},
- {path: "./../", dir: ".", file: ".."},
- {path: "..", dir: ".", file: ".."},
- {path: "../", dir: ".", file: ".."},
- {path: "../.", dir: "..", file: "."},
- {path: ".././", dir: "..", file: "."},
- {path: "../..", dir: "..", file: ".."},
- {path: "../../", dir: "..", file: ".."},
-
- {path: "/foo", dir: "/", file: "foo"},
- {path: "/foo/", dir: "/", file: "foo"},
- {path: "/foo/.", dir: "/foo", file: "."},
- {path: "/foo/./", dir: "/foo", file: "."},
- {path: "/foo/./.", dir: "/foo/.", file: "."},
- {path: "/foo/./..", dir: "/foo/.", file: ".."},
- {path: "/foo/..", dir: "/foo", file: ".."},
- {path: "/foo/../", dir: "/foo", file: ".."},
- {path: "/foo/../.", dir: "/foo/..", file: "."},
- {path: "/foo/../..", dir: "/foo/..", file: ".."},
-
- {path: "/foo/bar", dir: "/foo", file: "bar"},
- {path: "/foo/bar/", dir: "/foo", file: "bar"},
- {path: "/foo/bar/.", dir: "/foo/bar", file: "."},
- {path: "/foo/bar/./", dir: "/foo/bar", file: "."},
- {path: "/foo/bar/./.", dir: "/foo/bar/.", file: "."},
- {path: "/foo/bar/./..", dir: "/foo/bar/.", file: ".."},
- {path: "/foo/bar/..", dir: "/foo/bar", file: ".."},
- {path: "/foo/bar/../", dir: "/foo/bar", file: ".."},
- {path: "/foo/bar/../.", dir: "/foo/bar/..", file: "."},
- {path: "/foo/bar/../..", dir: "/foo/bar/..", file: ".."},
-
- {path: "foo", dir: ".", file: "foo"},
- {path: "foo", dir: ".", file: "foo"},
- {path: "foo/", dir: ".", file: "foo"},
- {path: "foo/.", dir: "foo", file: "."},
- {path: "foo/./", dir: "foo", file: "."},
- {path: "foo/./.", dir: "foo/.", file: "."},
- {path: "foo/./..", dir: "foo/.", file: ".."},
- {path: "foo/..", dir: "foo", file: ".."},
- {path: "foo/../", dir: "foo", file: ".."},
- {path: "foo/../.", dir: "foo/..", file: "."},
- {path: "foo/../..", dir: "foo/..", file: ".."},
- {path: "foo/", dir: ".", file: "foo"},
- {path: "foo/.", dir: "foo", file: "."},
-
- {path: "foo/bar", dir: "foo", file: "bar"},
- {path: "foo/bar/", dir: "foo", file: "bar"},
- {path: "foo/bar/.", dir: "foo/bar", file: "."},
- {path: "foo/bar/./", dir: "foo/bar", file: "."},
- {path: "foo/bar/./.", dir: "foo/bar/.", file: "."},
- {path: "foo/bar/./..", dir: "foo/bar/.", file: ".."},
- {path: "foo/bar/..", dir: "foo/bar", file: ".."},
- {path: "foo/bar/../", dir: "foo/bar", file: ".."},
- {path: "foo/bar/../.", dir: "foo/bar/..", file: "."},
- {path: "foo/bar/../..", dir: "foo/bar/..", file: ".."},
- {path: "foo/bar/", dir: "foo", file: "bar"},
- {path: "foo/bar/.", dir: "foo/bar", file: "."},
- }
-
- for _, c := range cases {
- dir, file := SplitLast(c.path)
- if dir != c.dir || file != c.file {
- t.Errorf("SplitLast(%q) got (%q, %q), expected (%q, %q)", c.path, dir, file, c.dir, c.file)
- }
- }
-}
-
-// TestSplitFirst tests variants of path splitting.
-func TestSplitFirst(t *testing.T) {
- cases := []struct {
- path string
- first string
- remainder string
- }{
- {path: "/", first: "/", remainder: ""},
- {path: "/.", first: "/", remainder: "."},
- {path: "///.", first: "/", remainder: "//."},
- {path: "/.///", first: "/", remainder: "."},
- {path: "/./.", first: "/", remainder: "./."},
- {path: "/././", first: "/", remainder: "./."},
- {path: "/./..", first: "/", remainder: "./.."},
- {path: "/./../", first: "/", remainder: "./.."},
- {path: "/..", first: "/", remainder: ".."},
- {path: "/../", first: "/", remainder: ".."},
- {path: "/../.", first: "/", remainder: "../."},
- {path: "/.././", first: "/", remainder: "../."},
- {path: "/../..", first: "/", remainder: "../.."},
- {path: "/../../", first: "/", remainder: "../.."},
-
- {path: "", first: ".", remainder: ""},
- {path: ".", first: ".", remainder: ""},
- {path: "./", first: ".", remainder: ""},
- {path: ".///", first: ".", remainder: ""},
- {path: "./.", first: ".", remainder: "."},
- {path: "././", first: ".", remainder: "."},
- {path: "./..", first: ".", remainder: ".."},
- {path: "./../", first: ".", remainder: ".."},
- {path: "..", first: "..", remainder: ""},
- {path: "../", first: "..", remainder: ""},
- {path: "../.", first: "..", remainder: "."},
- {path: ".././", first: "..", remainder: "."},
- {path: "../..", first: "..", remainder: ".."},
- {path: "../../", first: "..", remainder: ".."},
-
- {path: "/foo", first: "/", remainder: "foo"},
- {path: "/foo/", first: "/", remainder: "foo"},
- {path: "/foo///", first: "/", remainder: "foo"},
- {path: "/foo/.", first: "/", remainder: "foo/."},
- {path: "/foo/./", first: "/", remainder: "foo/."},
- {path: "/foo/./.", first: "/", remainder: "foo/./."},
- {path: "/foo/./..", first: "/", remainder: "foo/./.."},
- {path: "/foo/..", first: "/", remainder: "foo/.."},
- {path: "/foo/../", first: "/", remainder: "foo/.."},
- {path: "/foo/../.", first: "/", remainder: "foo/../."},
- {path: "/foo/../..", first: "/", remainder: "foo/../.."},
-
- {path: "/foo/bar", first: "/", remainder: "foo/bar"},
- {path: "///foo/bar", first: "/", remainder: "//foo/bar"},
- {path: "/foo///bar", first: "/", remainder: "foo///bar"},
- {path: "/foo/bar/.", first: "/", remainder: "foo/bar/."},
- {path: "/foo/bar/./", first: "/", remainder: "foo/bar/."},
- {path: "/foo/bar/./.", first: "/", remainder: "foo/bar/./."},
- {path: "/foo/bar/./..", first: "/", remainder: "foo/bar/./.."},
- {path: "/foo/bar/..", first: "/", remainder: "foo/bar/.."},
- {path: "/foo/bar/../", first: "/", remainder: "foo/bar/.."},
- {path: "/foo/bar/../.", first: "/", remainder: "foo/bar/../."},
- {path: "/foo/bar/../..", first: "/", remainder: "foo/bar/../.."},
-
- {path: "foo", first: "foo", remainder: ""},
- {path: "foo", first: "foo", remainder: ""},
- {path: "foo/", first: "foo", remainder: ""},
- {path: "foo///", first: "foo", remainder: ""},
- {path: "foo/.", first: "foo", remainder: "."},
- {path: "foo/./", first: "foo", remainder: "."},
- {path: "foo/./.", first: "foo", remainder: "./."},
- {path: "foo/./..", first: "foo", remainder: "./.."},
- {path: "foo/..", first: "foo", remainder: ".."},
- {path: "foo/../", first: "foo", remainder: ".."},
- {path: "foo/../.", first: "foo", remainder: "../."},
- {path: "foo/../..", first: "foo", remainder: "../.."},
- {path: "foo/", first: "foo", remainder: ""},
- {path: "foo/.", first: "foo", remainder: "."},
-
- {path: "foo/bar", first: "foo", remainder: "bar"},
- {path: "foo///bar", first: "foo", remainder: "bar"},
- {path: "foo/bar/", first: "foo", remainder: "bar"},
- {path: "foo/bar/.", first: "foo", remainder: "bar/."},
- {path: "foo/bar/./", first: "foo", remainder: "bar/."},
- {path: "foo/bar/./.", first: "foo", remainder: "bar/./."},
- {path: "foo/bar/./..", first: "foo", remainder: "bar/./.."},
- {path: "foo/bar/..", first: "foo", remainder: "bar/.."},
- {path: "foo/bar/../", first: "foo", remainder: "bar/.."},
- {path: "foo/bar/../.", first: "foo", remainder: "bar/../."},
- {path: "foo/bar/../..", first: "foo", remainder: "bar/../.."},
- {path: "foo/bar/", first: "foo", remainder: "bar"},
- {path: "foo/bar/.", first: "foo", remainder: "bar/."},
- }
-
- for _, c := range cases {
- first, remainder := SplitFirst(c.path)
- if first != c.first || remainder != c.remainder {
- t.Errorf("SplitFirst(%q) got (%q, %q), expected (%q, %q)", c.path, first, remainder, c.first, c.remainder)
- }
- }
-}
-
-// TestIsSubpath tests the IsSubpath method.
-func TestIsSubpath(t *testing.T) {
- tcs := []struct {
- // Two absolute paths.
- pathA string
- pathB string
-
- // Whether pathA is a subpath of pathB.
- wantIsSubpath bool
-
- // Relative path from pathA to pathB. Only checked if
- // wantIsSubpath is true.
- wantRelpath string
- }{
- {
- pathA: "/foo/bar/baz",
- pathB: "/foo",
- wantIsSubpath: true,
- wantRelpath: "bar/baz",
- },
- {
- pathA: "/foo",
- pathB: "/foo/bar/baz",
- wantIsSubpath: false,
- },
- {
- pathA: "/foo",
- pathB: "/foo",
- wantIsSubpath: false,
- },
- {
- pathA: "/foobar",
- pathB: "/foo",
- wantIsSubpath: false,
- },
- {
- pathA: "/foo",
- pathB: "/foobar",
- wantIsSubpath: false,
- },
- {
- pathA: "/foo",
- pathB: "/foobar",
- wantIsSubpath: false,
- },
- {
- pathA: "/",
- pathB: "/foo",
- wantIsSubpath: false,
- },
- {
- pathA: "/foo",
- pathB: "/",
- wantIsSubpath: true,
- wantRelpath: "foo",
- },
- {
- pathA: "/foo/bar/../bar",
- pathB: "/foo",
- wantIsSubpath: true,
- wantRelpath: "bar",
- },
- {
- pathA: "/foo/bar",
- pathB: "/foo/../foo",
- wantIsSubpath: true,
- wantRelpath: "bar",
- },
- }
-
- for _, tc := range tcs {
- gotRelpath, gotIsSubpath := IsSubpath(tc.pathA, tc.pathB)
- if gotRelpath != tc.wantRelpath || gotIsSubpath != tc.wantIsSubpath {
- t.Errorf("IsSubpath(%q, %q) got %q %t, want %q %t", tc.pathA, tc.pathB, gotRelpath, gotIsSubpath, tc.wantRelpath, tc.wantIsSubpath)
- }
- }
-}
diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD
deleted file mode 100644
index e6d74b949..000000000
--- a/pkg/sentry/fs/proc/BUILD
+++ /dev/null
@@ -1,75 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "proc",
- srcs = [
- "cgroup.go",
- "cpuinfo.go",
- "exec_args.go",
- "fds.go",
- "filesystems.go",
- "fs.go",
- "inode.go",
- "loadavg.go",
- "meminfo.go",
- "mounts.go",
- "net.go",
- "proc.go",
- "stat.go",
- "sys.go",
- "sys_net.go",
- "sys_net_state.go",
- "task.go",
- "uid_gid_map.go",
- "uptime.go",
- "version.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/proc/device",
- "//pkg/sentry/fs/proc/seqfile",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/mm",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/tcpip/header",
- "//pkg/tcpip/network/ipv4",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "proc_test",
- size = "small",
- srcs = [
- "net_test.go",
- "sys_net_test.go",
- ],
- library = ":proc",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/inet",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/proc/README.md b/pkg/sentry/fs/proc/README.md
deleted file mode 100644
index 6667a0916..000000000
--- a/pkg/sentry/fs/proc/README.md
+++ /dev/null
@@ -1,336 +0,0 @@
-This document tracks what is implemented in procfs. Refer to
-Documentation/filesystems/proc.txt in the Linux project for information about
-procfs generally.
-
-**NOTE**: This document is not guaranteed to be up to date. If you find an
-inconsistency, please file a bug.
-
-[TOC]
-
-## Kernel data
-
-The following files are implemented:
-
-<!-- mdformat off(don't wrap the table) -->
-
-| File /proc/ | Content |
-| :------------------------ | :---------------------------------------------------- |
-| [cpuinfo](#cpuinfo) | Info about the CPU |
-| [filesystems](#filesystems) | Supported filesystems |
-| [loadavg](#loadavg) | Load average of last 1, 5 & 15 minutes |
-| [meminfo](#meminfo) | Overall memory info |
-| [stat](#stat) | Overall kernel statistics |
-| [sys](#sys) | Change parameters within the kernel |
-| [uptime](#uptime) | Wall clock since boot, combined idle time of all cpus |
-| [version](#version) | Kernel version |
-
-<!-- mdformat on -->
-
-### cpuinfo
-
-```bash
-$ cat /proc/cpuinfo
-processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 45
-model name : unknown
-stepping : unknown
-cpu MHz : 1234.588
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx xsaveopt
-bogomips : 1234.59
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:
-
-...
-```
-
-Notable divergences:
-
-Field name | Notes
-:--------------- | :---------------------------------------
-model name | Always unknown
-stepping | Always unknown
-fpu | Always yes
-fpu_exception | Always yes
-wp | Always yes
-bogomips | Bogus value (matches cpu MHz)
-clflush size | Always 64
-cache_alignment | Always 64
-address sizes | Always 46 bits physical, 48 bits virtual
-power management | Always blank
-
-Otherwise fields are derived from the sentry configuration.
-
-### filesystems
-
-```bash
-$ cat /proc/filesystems
-nodev 9p
-nodev devpts
-nodev devtmpfs
-nodev proc
-nodev sysfs
-nodev tmpfs
-```
-
-### loadavg
-
-```bash
-$ cat /proc/loadavg
-0.00 0.00 0.00 0/0 0
-```
-
-Column | Notes
-:------------------------------------ | :----------
-CPU.IO utilization in last 1 minute | Always zero
-CPU.IO utilization in last 5 minutes | Always zero
-CPU.IO utilization in last 10 minutes | Always zero
-Num currently running processes | Always zero
-Total num processes | Always zero
-
-TODO(b/62345059): Populate the columns with accurate statistics.
-
-### meminfo
-
-```bash
-$ cat /proc/meminfo
-MemTotal: 2097152 kB
-MemFree: 2083540 kB
-MemAvailable: 2083540 kB
-Buffers: 0 kB
-Cached: 4428 kB
-SwapCache: 0 kB
-Active: 10812 kB
-Inactive: 2216 kB
-Active(anon): 8600 kB
-Inactive(anon): 0 kB
-Active(file): 2212 kB
-Inactive(file): 2216 kB
-Unevictable: 0 kB
-Mlocked: 0 kB
-SwapTotal: 0 kB
-SwapFree: 0 kB
-Dirty: 0 kB
-Writeback: 0 kB
-AnonPages: 8600 kB
-Mapped: 4428 kB
-Shmem: 0 kB
-
-```
-
-Notable divergences:
-
-Field name | Notes
-:---------------- | :-----------------------------------------------------
-Buffers | Always zero, no block devices
-SwapCache | Always zero, no swap
-Inactive(anon) | Always zero, see SwapCache
-Unevictable | Always zero TODO(b/31823263)
-Mlocked | Always zero TODO(b/31823263)
-SwapTotal | Always zero, no swap
-SwapFree | Always zero, no swap
-Dirty | Always zero TODO(b/31823263)
-Writeback | Always zero TODO(b/31823263)
-MemAvailable | Uses the same value as MemFree since there is no swap.
-Slab | Missing
-SReclaimable | Missing
-SUnreclaim | Missing
-KernelStack | Missing
-PageTables | Missing
-NFS_Unstable | Missing
-Bounce | Missing
-WritebackTmp | Missing
-CommitLimit | Missing
-Committed_AS | Missing
-VmallocTotal | Missing
-VmallocUsed | Missing
-VmallocChunk | Missing
-HardwareCorrupted | Missing
-AnonHugePages | Missing
-ShmemHugePages | Missing
-ShmemPmdMapped | Missing
-HugePages_Total | Missing
-HugePages_Free | Missing
-HugePages_Rsvd | Missing
-HugePages_Surp | Missing
-Hugepagesize | Missing
-DirectMap4k | Missing
-DirectMap2M | Missing
-DirectMap1G | Missing
-
-### stat
-
-```bash
-$ cat /proc/stat
-cpu 0 0 0 0 0 0 0 0 0 0
-cpu0 0 0 0 0 0 0 0 0 0 0
-cpu1 0 0 0 0 0 0 0 0 0 0
-cpu2 0 0 0 0 0 0 0 0 0 0
-cpu3 0 0 0 0 0 0 0 0 0 0
-cpu4 0 0 0 0 0 0 0 0 0 0
-cpu5 0 0 0 0 0 0 0 0 0 0
-cpu6 0 0 0 0 0 0 0 0 0 0
-cpu7 0 0 0 0 0 0 0 0 0 0
-intr 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ctxt 0
-btime 1504040968
-processes 0
-procs_running 0
-procs_blokkcked 0
-softirq 0 0 0 0 0 0 0 0 0 0 0
-```
-
-All fields except for `btime` are always zero.
-
-TODO(b/37226836): Populate with accurate fields.
-
-### sys
-
-```bash
-$ ls /proc/sys
-kernel vm
-```
-
-Directory | Notes
-:-------- | :----------------------------
-abi | Missing
-debug | Missing
-dev | Missing
-fs | Missing
-kernel | Contains hostname (only)
-net | Missing
-user | Missing
-vm | Contains mmap_min_addr (only)
-
-### uptime
-
-```bash
-$ cat /proc/uptime
-3204.62 0.00
-```
-
-Column | Notes
-:------------------------------- | :----------------------------
-Total num seconds system running | Time since procfs was mounted
-Number of seconds idle | Always zero
-
-### version
-
-```bash
-$ cat /proc/version
-Linux version 4.4 #1 SMP Sun Jan 10 15:06:54 PST 2016
-```
-
-## Process-specific data
-
-The following files are implemented:
-
-File /proc/PID | Content
-:---------------------- | :---------------------------------------------------
-[auxv](#auxv) | Copy of auxiliary vector for the process
-[cmdline](#cmdline) | Command line arguments
-[comm](#comm) | Command name associated with the process
-[environ](#environ) | Process environment
-[exe](#exe) | Symlink to the process's executable
-[fd](#fd) | Directory containing links to open file descriptors
-[fdinfo](#fdinfo) | Information associated with open file descriptors
-[gid_map](#gid_map) | Mappings for group IDs inside the user namespace
-[io](#io) | IO statistics
-[maps](#maps) | Memory mappings (anon, executables, library files)
-[mounts](#mounts) | Mounted filesystems
-[mountinfo](#mountinfo) | Information about mounts
-[ns](#ns) | Directory containing info about supported namespaces
-[stat](#stat) | Process statistics
-[statm](#statm) | Process memory statistics
-[status](#status) | Process status in human readable format
-[task](#task) | Directory containing info about running threads
-[uid_map](#uid_map) | Mappings for user IDs inside the user namespace
-
-### auxv
-
-TODO
-
-### cmdline
-
-TODO
-
-### comm
-
-TODO
-
-### environment
-
-TODO
-
-### exe
-
-TODO
-
-### fd
-
-TODO
-
-### fdinfo
-
-TODO
-
-### gid_map
-
-TODO
-
-### io
-
-Only has data for rchar, wchar, syscr, and syscw.
-
-TODO: add more detail.
-
-### maps
-
-TODO
-
-### mounts
-
-TODO
-
-### mountinfo
-
-TODO
-
-### ns
-
-TODO
-
-### stat
-
-Only has data for pid, comm, state, ppid, utime, stime, cutime, cstime,
-num_threads, and exit_signal.
-
-TODO: add more detail.
-
-### statm
-
-Only has data for vss and rss.
-
-TODO: add more detail.
-
-### status
-
-Contains data for Name, State, Tgid, Pid, Ppid, TracerPid, FDSize, VmSize,
-VmRSS, Threads, CapInh, CapPrm, CapEff, CapBnd, Seccomp.
-
-TODO: add more detail.
-
-### task
-
-TODO
-
-### uid_map
-
-TODO
diff --git a/pkg/sentry/fs/proc/device/BUILD b/pkg/sentry/fs/proc/device/BUILD
deleted file mode 100644
index 52c9aa93d..000000000
--- a/pkg/sentry/fs/proc/device/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "device",
- srcs = ["device.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = ["//pkg/sentry/device"],
-)
diff --git a/pkg/sentry/fs/proc/device/device_state_autogen.go b/pkg/sentry/fs/proc/device/device_state_autogen.go
new file mode 100644
index 000000000..4a5e3cc88
--- /dev/null
+++ b/pkg/sentry/fs/proc/device/device_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package device
diff --git a/pkg/sentry/fs/proc/net_test.go b/pkg/sentry/fs/proc/net_test.go
deleted file mode 100644
index f18681405..000000000
--- a/pkg/sentry/fs/proc/net_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/inet"
-)
-
-func newIPv6TestStack() *inet.TestStack {
- s := inet.NewTestStack()
- s.SupportsIPv6Flag = true
- return s
-}
-
-func TestIfinet6NoAddresses(t *testing.T) {
- n := &ifinet6{s: newIPv6TestStack()}
- if got := n.contents(); got != nil {
- t.Errorf("Got n.contents() = %v, want = %v", got, nil)
- }
-}
-
-func TestIfinet6(t *testing.T) {
- s := newIPv6TestStack()
- s.InterfacesMap[1] = inet.Interface{Name: "eth0"}
- s.InterfaceAddrsMap[1] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"),
- },
- }
- s.InterfacesMap[2] = inet.Interface{Name: "eth1"}
- s.InterfaceAddrsMap[2] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"),
- },
- }
- want := map[string]struct{}{
- "000102030405060708090a0b0c0d0e0f 01 80 00 00 eth0\n": {},
- "101112131415161718191a1b1c1d1e1f 02 80 00 00 eth1\n": {},
- }
-
- n := &ifinet6{s: s}
- contents := n.contents()
- if len(contents) != len(want) {
- t.Errorf("Got len(n.contents()) = %d, want = %d", len(contents), len(want))
- }
- got := map[string]struct{}{}
- for _, l := range contents {
- got[l] = struct{}{}
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got n.contents() = %v, want = %v", got, want)
- }
-}
diff --git a/pkg/sentry/fs/proc/proc_state_autogen.go b/pkg/sentry/fs/proc/proc_state_autogen.go
new file mode 100644
index 000000000..8046c8bf9
--- /dev/null
+++ b/pkg/sentry/fs/proc/proc_state_autogen.go
@@ -0,0 +1,1835 @@
+// automatically generated by stateify.
+
+package proc
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *execArgInode) StateTypeName() string {
+ return "pkg/sentry/fs/proc.execArgInode"
+}
+
+func (i *execArgInode) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "arg",
+ "t",
+ }
+}
+
+func (i *execArgInode) beforeSave() {}
+
+// +checklocksignore
+func (i *execArgInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.SimpleFileInode)
+ stateSinkObject.Save(1, &i.arg)
+ stateSinkObject.Save(2, &i.t)
+}
+
+func (i *execArgInode) afterLoad() {}
+
+// +checklocksignore
+func (i *execArgInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.SimpleFileInode)
+ stateSourceObject.Load(1, &i.arg)
+ stateSourceObject.Load(2, &i.t)
+}
+
+func (f *execArgFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.execArgFile"
+}
+
+func (f *execArgFile) StateFields() []string {
+ return []string{
+ "arg",
+ "t",
+ }
+}
+
+func (f *execArgFile) beforeSave() {}
+
+// +checklocksignore
+func (f *execArgFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.arg)
+ stateSinkObject.Save(1, &f.t)
+}
+
+func (f *execArgFile) afterLoad() {}
+
+// +checklocksignore
+func (f *execArgFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.arg)
+ stateSourceObject.Load(1, &f.t)
+}
+
+func (f *fdDir) StateTypeName() string {
+ return "pkg/sentry/fs/proc.fdDir"
+}
+
+func (f *fdDir) StateFields() []string {
+ return []string{
+ "Dir",
+ "t",
+ }
+}
+
+func (f *fdDir) beforeSave() {}
+
+// +checklocksignore
+func (f *fdDir) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.Dir)
+ stateSinkObject.Save(1, &f.t)
+}
+
+func (f *fdDir) afterLoad() {}
+
+// +checklocksignore
+func (f *fdDir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.Dir)
+ stateSourceObject.Load(1, &f.t)
+}
+
+func (f *fdDirFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.fdDirFile"
+}
+
+func (f *fdDirFile) StateFields() []string {
+ return []string{
+ "isInfoFile",
+ "t",
+ }
+}
+
+func (f *fdDirFile) beforeSave() {}
+
+// +checklocksignore
+func (f *fdDirFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.isInfoFile)
+ stateSinkObject.Save(1, &f.t)
+}
+
+func (f *fdDirFile) afterLoad() {}
+
+// +checklocksignore
+func (f *fdDirFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.isInfoFile)
+ stateSourceObject.Load(1, &f.t)
+}
+
+func (fdid *fdInfoDir) StateTypeName() string {
+ return "pkg/sentry/fs/proc.fdInfoDir"
+}
+
+func (fdid *fdInfoDir) StateFields() []string {
+ return []string{
+ "Dir",
+ "t",
+ }
+}
+
+func (fdid *fdInfoDir) beforeSave() {}
+
+// +checklocksignore
+func (fdid *fdInfoDir) StateSave(stateSinkObject state.Sink) {
+ fdid.beforeSave()
+ stateSinkObject.Save(0, &fdid.Dir)
+ stateSinkObject.Save(1, &fdid.t)
+}
+
+func (fdid *fdInfoDir) afterLoad() {}
+
+// +checklocksignore
+func (fdid *fdInfoDir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fdid.Dir)
+ stateSourceObject.Load(1, &fdid.t)
+}
+
+func (f *filesystemsData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.filesystemsData"
+}
+
+func (f *filesystemsData) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystemsData) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystemsData) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystemsData) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystemsData) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/proc.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *taskOwnedInodeOps) StateTypeName() string {
+ return "pkg/sentry/fs/proc.taskOwnedInodeOps"
+}
+
+func (i *taskOwnedInodeOps) StateFields() []string {
+ return []string{
+ "InodeOperations",
+ "t",
+ }
+}
+
+func (i *taskOwnedInodeOps) beforeSave() {}
+
+// +checklocksignore
+func (i *taskOwnedInodeOps) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeOperations)
+ stateSinkObject.Save(1, &i.t)
+}
+
+func (i *taskOwnedInodeOps) afterLoad() {}
+
+// +checklocksignore
+func (i *taskOwnedInodeOps) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeOperations)
+ stateSourceObject.Load(1, &i.t)
+}
+
+func (s *staticFileInodeOps) StateTypeName() string {
+ return "pkg/sentry/fs/proc.staticFileInodeOps"
+}
+
+func (s *staticFileInodeOps) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeStaticFileGetter",
+ }
+}
+
+func (s *staticFileInodeOps) beforeSave() {}
+
+// +checklocksignore
+func (s *staticFileInodeOps) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &s.InodeStaticFileGetter)
+}
+
+func (s *staticFileInodeOps) afterLoad() {}
+
+// +checklocksignore
+func (s *staticFileInodeOps) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &s.InodeStaticFileGetter)
+}
+
+func (d *loadavgData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.loadavgData"
+}
+
+func (d *loadavgData) StateFields() []string {
+ return []string{}
+}
+
+func (d *loadavgData) beforeSave() {}
+
+// +checklocksignore
+func (d *loadavgData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *loadavgData) afterLoad() {}
+
+// +checklocksignore
+func (d *loadavgData) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *meminfoData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.meminfoData"
+}
+
+func (d *meminfoData) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (d *meminfoData) beforeSave() {}
+
+// +checklocksignore
+func (d *meminfoData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.k)
+}
+
+func (d *meminfoData) afterLoad() {}
+
+// +checklocksignore
+func (d *meminfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.k)
+}
+
+func (mif *mountInfoFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.mountInfoFile"
+}
+
+func (mif *mountInfoFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (mif *mountInfoFile) beforeSave() {}
+
+// +checklocksignore
+func (mif *mountInfoFile) StateSave(stateSinkObject state.Sink) {
+ mif.beforeSave()
+ stateSinkObject.Save(0, &mif.t)
+}
+
+func (mif *mountInfoFile) afterLoad() {}
+
+// +checklocksignore
+func (mif *mountInfoFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mif.t)
+}
+
+func (mf *mountsFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.mountsFile"
+}
+
+func (mf *mountsFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (mf *mountsFile) beforeSave() {}
+
+// +checklocksignore
+func (mf *mountsFile) StateSave(stateSinkObject state.Sink) {
+ mf.beforeSave()
+ stateSinkObject.Save(0, &mf.t)
+}
+
+func (mf *mountsFile) afterLoad() {}
+
+// +checklocksignore
+func (mf *mountsFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mf.t)
+}
+
+func (n *ifinet6) StateTypeName() string {
+ return "pkg/sentry/fs/proc.ifinet6"
+}
+
+func (n *ifinet6) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (n *ifinet6) beforeSave() {}
+
+// +checklocksignore
+func (n *ifinet6) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.s)
+}
+
+func (n *ifinet6) afterLoad() {}
+
+// +checklocksignore
+func (n *ifinet6) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.s)
+}
+
+func (n *netDev) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netDev"
+}
+
+func (n *netDev) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (n *netDev) beforeSave() {}
+
+// +checklocksignore
+func (n *netDev) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.s)
+}
+
+func (n *netDev) afterLoad() {}
+
+// +checklocksignore
+func (n *netDev) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.s)
+}
+
+func (n *netSnmp) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netSnmp"
+}
+
+func (n *netSnmp) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (n *netSnmp) beforeSave() {}
+
+// +checklocksignore
+func (n *netSnmp) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.s)
+}
+
+func (n *netSnmp) afterLoad() {}
+
+// +checklocksignore
+func (n *netSnmp) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.s)
+}
+
+func (n *netRoute) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netRoute"
+}
+
+func (n *netRoute) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (n *netRoute) beforeSave() {}
+
+// +checklocksignore
+func (n *netRoute) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.s)
+}
+
+func (n *netRoute) afterLoad() {}
+
+// +checklocksignore
+func (n *netRoute) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.s)
+}
+
+func (n *netUnix) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netUnix"
+}
+
+func (n *netUnix) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (n *netUnix) beforeSave() {}
+
+// +checklocksignore
+func (n *netUnix) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.k)
+}
+
+func (n *netUnix) afterLoad() {}
+
+// +checklocksignore
+func (n *netUnix) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.k)
+}
+
+func (n *netTCP) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netTCP"
+}
+
+func (n *netTCP) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (n *netTCP) beforeSave() {}
+
+// +checklocksignore
+func (n *netTCP) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.k)
+}
+
+func (n *netTCP) afterLoad() {}
+
+// +checklocksignore
+func (n *netTCP) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.k)
+}
+
+func (n *netTCP6) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netTCP6"
+}
+
+func (n *netTCP6) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (n *netTCP6) beforeSave() {}
+
+// +checklocksignore
+func (n *netTCP6) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.k)
+}
+
+func (n *netTCP6) afterLoad() {}
+
+// +checklocksignore
+func (n *netTCP6) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.k)
+}
+
+func (n *netUDP) StateTypeName() string {
+ return "pkg/sentry/fs/proc.netUDP"
+}
+
+func (n *netUDP) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (n *netUDP) beforeSave() {}
+
+// +checklocksignore
+func (n *netUDP) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.k)
+}
+
+func (n *netUDP) afterLoad() {}
+
+// +checklocksignore
+func (n *netUDP) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.k)
+}
+
+func (p *proc) StateTypeName() string {
+ return "pkg/sentry/fs/proc.proc"
+}
+
+func (p *proc) StateFields() []string {
+ return []string{
+ "Dir",
+ "k",
+ "pidns",
+ "cgroupControllers",
+ }
+}
+
+func (p *proc) beforeSave() {}
+
+// +checklocksignore
+func (p *proc) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Dir)
+ stateSinkObject.Save(1, &p.k)
+ stateSinkObject.Save(2, &p.pidns)
+ stateSinkObject.Save(3, &p.cgroupControllers)
+}
+
+func (p *proc) afterLoad() {}
+
+// +checklocksignore
+func (p *proc) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Dir)
+ stateSourceObject.Load(1, &p.k)
+ stateSourceObject.Load(2, &p.pidns)
+ stateSourceObject.Load(3, &p.cgroupControllers)
+}
+
+func (s *self) StateTypeName() string {
+ return "pkg/sentry/fs/proc.self"
+}
+
+func (s *self) StateFields() []string {
+ return []string{
+ "Symlink",
+ "pidns",
+ }
+}
+
+func (s *self) beforeSave() {}
+
+// +checklocksignore
+func (s *self) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Symlink)
+ stateSinkObject.Save(1, &s.pidns)
+}
+
+func (s *self) afterLoad() {}
+
+// +checklocksignore
+func (s *self) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Symlink)
+ stateSourceObject.Load(1, &s.pidns)
+}
+
+func (s *threadSelf) StateTypeName() string {
+ return "pkg/sentry/fs/proc.threadSelf"
+}
+
+func (s *threadSelf) StateFields() []string {
+ return []string{
+ "Symlink",
+ "pidns",
+ }
+}
+
+func (s *threadSelf) beforeSave() {}
+
+// +checklocksignore
+func (s *threadSelf) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Symlink)
+ stateSinkObject.Save(1, &s.pidns)
+}
+
+func (s *threadSelf) afterLoad() {}
+
+// +checklocksignore
+func (s *threadSelf) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Symlink)
+ stateSourceObject.Load(1, &s.pidns)
+}
+
+func (rpf *rootProcFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.rootProcFile"
+}
+
+func (rpf *rootProcFile) StateFields() []string {
+ return []string{
+ "iops",
+ }
+}
+
+func (rpf *rootProcFile) beforeSave() {}
+
+// +checklocksignore
+func (rpf *rootProcFile) StateSave(stateSinkObject state.Sink) {
+ rpf.beforeSave()
+ stateSinkObject.Save(0, &rpf.iops)
+}
+
+func (rpf *rootProcFile) afterLoad() {}
+
+// +checklocksignore
+func (rpf *rootProcFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rpf.iops)
+}
+
+func (s *statData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.statData"
+}
+
+func (s *statData) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (s *statData) beforeSave() {}
+
+// +checklocksignore
+func (s *statData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.k)
+}
+
+func (s *statData) afterLoad() {}
+
+// +checklocksignore
+func (s *statData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.k)
+}
+
+func (d *mmapMinAddrData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.mmapMinAddrData"
+}
+
+func (d *mmapMinAddrData) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (d *mmapMinAddrData) beforeSave() {}
+
+// +checklocksignore
+func (d *mmapMinAddrData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.k)
+}
+
+func (d *mmapMinAddrData) afterLoad() {}
+
+// +checklocksignore
+func (d *mmapMinAddrData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.k)
+}
+
+func (o *overcommitMemory) StateTypeName() string {
+ return "pkg/sentry/fs/proc.overcommitMemory"
+}
+
+func (o *overcommitMemory) StateFields() []string {
+ return []string{}
+}
+
+func (o *overcommitMemory) beforeSave() {}
+
+// +checklocksignore
+func (o *overcommitMemory) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+}
+
+func (o *overcommitMemory) afterLoad() {}
+
+// +checklocksignore
+func (o *overcommitMemory) StateLoad(stateSourceObject state.Source) {
+}
+
+func (m *maxMapCount) StateTypeName() string {
+ return "pkg/sentry/fs/proc.maxMapCount"
+}
+
+func (m *maxMapCount) StateFields() []string {
+ return []string{}
+}
+
+func (m *maxMapCount) beforeSave() {}
+
+// +checklocksignore
+func (m *maxMapCount) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+}
+
+func (m *maxMapCount) afterLoad() {}
+
+// +checklocksignore
+func (m *maxMapCount) StateLoad(stateSourceObject state.Source) {
+}
+
+func (h *hostname) StateTypeName() string {
+ return "pkg/sentry/fs/proc.hostname"
+}
+
+func (h *hostname) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ }
+}
+
+func (h *hostname) beforeSave() {}
+
+// +checklocksignore
+func (h *hostname) StateSave(stateSinkObject state.Sink) {
+ h.beforeSave()
+ stateSinkObject.Save(0, &h.SimpleFileInode)
+}
+
+func (h *hostname) afterLoad() {}
+
+// +checklocksignore
+func (h *hostname) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &h.SimpleFileInode)
+}
+
+func (hf *hostnameFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.hostnameFile"
+}
+
+func (hf *hostnameFile) StateFields() []string {
+ return []string{}
+}
+
+func (hf *hostnameFile) beforeSave() {}
+
+// +checklocksignore
+func (hf *hostnameFile) StateSave(stateSinkObject state.Sink) {
+ hf.beforeSave()
+}
+
+func (hf *hostnameFile) afterLoad() {}
+
+// +checklocksignore
+func (hf *hostnameFile) StateLoad(stateSourceObject state.Source) {
+}
+
+func (t *tcpMemInode) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpMemInode"
+}
+
+func (t *tcpMemInode) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "dir",
+ "s",
+ "size",
+ }
+}
+
+// +checklocksignore
+func (t *tcpMemInode) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.SimpleFileInode)
+ stateSinkObject.Save(1, &t.dir)
+ stateSinkObject.Save(2, &t.s)
+ stateSinkObject.Save(3, &t.size)
+}
+
+// +checklocksignore
+func (t *tcpMemInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.SimpleFileInode)
+ stateSourceObject.Load(1, &t.dir)
+ stateSourceObject.LoadWait(2, &t.s)
+ stateSourceObject.Load(3, &t.size)
+ stateSourceObject.AfterLoad(t.afterLoad)
+}
+
+func (f *tcpMemFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpMemFile"
+}
+
+func (f *tcpMemFile) StateFields() []string {
+ return []string{
+ "tcpMemInode",
+ }
+}
+
+func (f *tcpMemFile) beforeSave() {}
+
+// +checklocksignore
+func (f *tcpMemFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.tcpMemInode)
+}
+
+func (f *tcpMemFile) afterLoad() {}
+
+// +checklocksignore
+func (f *tcpMemFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.tcpMemInode)
+}
+
+func (s *tcpSack) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpSack"
+}
+
+func (s *tcpSack) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "stack",
+ "enabled",
+ }
+}
+
+func (s *tcpSack) beforeSave() {}
+
+// +checklocksignore
+func (s *tcpSack) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SimpleFileInode)
+ stateSinkObject.Save(1, &s.stack)
+ stateSinkObject.Save(2, &s.enabled)
+}
+
+// +checklocksignore
+func (s *tcpSack) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SimpleFileInode)
+ stateSourceObject.LoadWait(1, &s.stack)
+ stateSourceObject.Load(2, &s.enabled)
+ stateSourceObject.AfterLoad(s.afterLoad)
+}
+
+func (f *tcpSackFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpSackFile"
+}
+
+func (f *tcpSackFile) StateFields() []string {
+ return []string{
+ "tcpSack",
+ "stack",
+ }
+}
+
+func (f *tcpSackFile) beforeSave() {}
+
+// +checklocksignore
+func (f *tcpSackFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.tcpSack)
+ stateSinkObject.Save(1, &f.stack)
+}
+
+func (f *tcpSackFile) afterLoad() {}
+
+// +checklocksignore
+func (f *tcpSackFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.tcpSack)
+ stateSourceObject.LoadWait(1, &f.stack)
+}
+
+func (r *tcpRecovery) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpRecovery"
+}
+
+func (r *tcpRecovery) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "stack",
+ "recovery",
+ }
+}
+
+func (r *tcpRecovery) beforeSave() {}
+
+// +checklocksignore
+func (r *tcpRecovery) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.SimpleFileInode)
+ stateSinkObject.Save(1, &r.stack)
+ stateSinkObject.Save(2, &r.recovery)
+}
+
+func (r *tcpRecovery) afterLoad() {}
+
+// +checklocksignore
+func (r *tcpRecovery) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.SimpleFileInode)
+ stateSourceObject.LoadWait(1, &r.stack)
+ stateSourceObject.Load(2, &r.recovery)
+}
+
+func (f *tcpRecoveryFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.tcpRecoveryFile"
+}
+
+func (f *tcpRecoveryFile) StateFields() []string {
+ return []string{
+ "tcpRecovery",
+ "stack",
+ }
+}
+
+func (f *tcpRecoveryFile) beforeSave() {}
+
+// +checklocksignore
+func (f *tcpRecoveryFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.tcpRecovery)
+ stateSinkObject.Save(1, &f.stack)
+}
+
+func (f *tcpRecoveryFile) afterLoad() {}
+
+// +checklocksignore
+func (f *tcpRecoveryFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.tcpRecovery)
+ stateSourceObject.LoadWait(1, &f.stack)
+}
+
+func (ipf *ipForwarding) StateTypeName() string {
+ return "pkg/sentry/fs/proc.ipForwarding"
+}
+
+func (ipf *ipForwarding) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "stack",
+ "enabled",
+ }
+}
+
+func (ipf *ipForwarding) beforeSave() {}
+
+// +checklocksignore
+func (ipf *ipForwarding) StateSave(stateSinkObject state.Sink) {
+ ipf.beforeSave()
+ stateSinkObject.Save(0, &ipf.SimpleFileInode)
+ stateSinkObject.Save(1, &ipf.stack)
+ stateSinkObject.Save(2, &ipf.enabled)
+}
+
+// +checklocksignore
+func (ipf *ipForwarding) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ipf.SimpleFileInode)
+ stateSourceObject.LoadWait(1, &ipf.stack)
+ stateSourceObject.Load(2, &ipf.enabled)
+ stateSourceObject.AfterLoad(ipf.afterLoad)
+}
+
+func (f *ipForwardingFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.ipForwardingFile"
+}
+
+func (f *ipForwardingFile) StateFields() []string {
+ return []string{
+ "ipf",
+ "stack",
+ }
+}
+
+func (f *ipForwardingFile) beforeSave() {}
+
+// +checklocksignore
+func (f *ipForwardingFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.ipf)
+ stateSinkObject.Save(1, &f.stack)
+}
+
+func (f *ipForwardingFile) afterLoad() {}
+
+// +checklocksignore
+func (f *ipForwardingFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.ipf)
+ stateSourceObject.LoadWait(1, &f.stack)
+}
+
+func (in *portRangeInode) StateTypeName() string {
+ return "pkg/sentry/fs/proc.portRangeInode"
+}
+
+func (in *portRangeInode) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "stack",
+ "start",
+ "end",
+ }
+}
+
+func (in *portRangeInode) beforeSave() {}
+
+// +checklocksignore
+func (in *portRangeInode) StateSave(stateSinkObject state.Sink) {
+ in.beforeSave()
+ stateSinkObject.Save(0, &in.SimpleFileInode)
+ stateSinkObject.Save(1, &in.stack)
+ stateSinkObject.Save(2, &in.start)
+ stateSinkObject.Save(3, &in.end)
+}
+
+func (in *portRangeInode) afterLoad() {}
+
+// +checklocksignore
+func (in *portRangeInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &in.SimpleFileInode)
+ stateSourceObject.LoadWait(1, &in.stack)
+ stateSourceObject.Load(2, &in.start)
+ stateSourceObject.Load(3, &in.end)
+}
+
+func (pf *portRangeFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.portRangeFile"
+}
+
+func (pf *portRangeFile) StateFields() []string {
+ return []string{
+ "inode",
+ }
+}
+
+func (pf *portRangeFile) beforeSave() {}
+
+// +checklocksignore
+func (pf *portRangeFile) StateSave(stateSinkObject state.Sink) {
+ pf.beforeSave()
+ stateSinkObject.Save(0, &pf.inode)
+}
+
+func (pf *portRangeFile) afterLoad() {}
+
+// +checklocksignore
+func (pf *portRangeFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &pf.inode)
+}
+
+func (t *taskDir) StateTypeName() string {
+ return "pkg/sentry/fs/proc.taskDir"
+}
+
+func (t *taskDir) StateFields() []string {
+ return []string{
+ "Dir",
+ "t",
+ }
+}
+
+func (t *taskDir) beforeSave() {}
+
+// +checklocksignore
+func (t *taskDir) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.Dir)
+ stateSinkObject.Save(1, &t.t)
+}
+
+func (t *taskDir) afterLoad() {}
+
+// +checklocksignore
+func (t *taskDir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.Dir)
+ stateSourceObject.Load(1, &t.t)
+}
+
+func (s *subtasks) StateTypeName() string {
+ return "pkg/sentry/fs/proc.subtasks"
+}
+
+func (s *subtasks) StateFields() []string {
+ return []string{
+ "Dir",
+ "t",
+ "p",
+ }
+}
+
+func (s *subtasks) beforeSave() {}
+
+// +checklocksignore
+func (s *subtasks) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Dir)
+ stateSinkObject.Save(1, &s.t)
+ stateSinkObject.Save(2, &s.p)
+}
+
+func (s *subtasks) afterLoad() {}
+
+// +checklocksignore
+func (s *subtasks) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Dir)
+ stateSourceObject.Load(1, &s.t)
+ stateSourceObject.Load(2, &s.p)
+}
+
+func (f *subtasksFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.subtasksFile"
+}
+
+func (f *subtasksFile) StateFields() []string {
+ return []string{
+ "t",
+ "pidns",
+ }
+}
+
+func (f *subtasksFile) beforeSave() {}
+
+// +checklocksignore
+func (f *subtasksFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.t)
+ stateSinkObject.Save(1, &f.pidns)
+}
+
+func (f *subtasksFile) afterLoad() {}
+
+// +checklocksignore
+func (f *subtasksFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.t)
+ stateSourceObject.Load(1, &f.pidns)
+}
+
+func (e *exe) StateTypeName() string {
+ return "pkg/sentry/fs/proc.exe"
+}
+
+func (e *exe) StateFields() []string {
+ return []string{
+ "Symlink",
+ "t",
+ }
+}
+
+func (e *exe) beforeSave() {}
+
+// +checklocksignore
+func (e *exe) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Symlink)
+ stateSinkObject.Save(1, &e.t)
+}
+
+func (e *exe) afterLoad() {}
+
+// +checklocksignore
+func (e *exe) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Symlink)
+ stateSourceObject.Load(1, &e.t)
+}
+
+func (e *cwd) StateTypeName() string {
+ return "pkg/sentry/fs/proc.cwd"
+}
+
+func (e *cwd) StateFields() []string {
+ return []string{
+ "Symlink",
+ "t",
+ }
+}
+
+func (e *cwd) beforeSave() {}
+
+// +checklocksignore
+func (e *cwd) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Symlink)
+ stateSinkObject.Save(1, &e.t)
+}
+
+func (e *cwd) afterLoad() {}
+
+// +checklocksignore
+func (e *cwd) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Symlink)
+ stateSourceObject.Load(1, &e.t)
+}
+
+func (n *namespaceSymlink) StateTypeName() string {
+ return "pkg/sentry/fs/proc.namespaceSymlink"
+}
+
+func (n *namespaceSymlink) StateFields() []string {
+ return []string{
+ "Symlink",
+ "t",
+ }
+}
+
+func (n *namespaceSymlink) beforeSave() {}
+
+// +checklocksignore
+func (n *namespaceSymlink) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.Symlink)
+ stateSinkObject.Save(1, &n.t)
+}
+
+func (n *namespaceSymlink) afterLoad() {}
+
+// +checklocksignore
+func (n *namespaceSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.Symlink)
+ stateSourceObject.Load(1, &n.t)
+}
+
+func (m *memData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.memData"
+}
+
+func (m *memData) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "t",
+ }
+}
+
+func (m *memData) beforeSave() {}
+
+// +checklocksignore
+func (m *memData) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.SimpleFileInode)
+ stateSinkObject.Save(1, &m.t)
+}
+
+func (m *memData) afterLoad() {}
+
+// +checklocksignore
+func (m *memData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.SimpleFileInode)
+ stateSourceObject.Load(1, &m.t)
+}
+
+func (m *memDataFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.memDataFile"
+}
+
+func (m *memDataFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (m *memDataFile) beforeSave() {}
+
+// +checklocksignore
+func (m *memDataFile) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.t)
+}
+
+func (m *memDataFile) afterLoad() {}
+
+// +checklocksignore
+func (m *memDataFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.t)
+}
+
+func (md *mapsData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.mapsData"
+}
+
+func (md *mapsData) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (md *mapsData) beforeSave() {}
+
+// +checklocksignore
+func (md *mapsData) StateSave(stateSinkObject state.Sink) {
+ md.beforeSave()
+ stateSinkObject.Save(0, &md.t)
+}
+
+func (md *mapsData) afterLoad() {}
+
+// +checklocksignore
+func (md *mapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &md.t)
+}
+
+func (sd *smapsData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.smapsData"
+}
+
+func (sd *smapsData) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (sd *smapsData) beforeSave() {}
+
+// +checklocksignore
+func (sd *smapsData) StateSave(stateSinkObject state.Sink) {
+ sd.beforeSave()
+ stateSinkObject.Save(0, &sd.t)
+}
+
+func (sd *smapsData) afterLoad() {}
+
+// +checklocksignore
+func (sd *smapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sd.t)
+}
+
+func (s *taskStatData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.taskStatData"
+}
+
+func (s *taskStatData) StateFields() []string {
+ return []string{
+ "t",
+ "tgstats",
+ "pidns",
+ }
+}
+
+func (s *taskStatData) beforeSave() {}
+
+// +checklocksignore
+func (s *taskStatData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.t)
+ stateSinkObject.Save(1, &s.tgstats)
+ stateSinkObject.Save(2, &s.pidns)
+}
+
+func (s *taskStatData) afterLoad() {}
+
+// +checklocksignore
+func (s *taskStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.t)
+ stateSourceObject.Load(1, &s.tgstats)
+ stateSourceObject.Load(2, &s.pidns)
+}
+
+func (s *statmData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.statmData"
+}
+
+func (s *statmData) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (s *statmData) beforeSave() {}
+
+// +checklocksignore
+func (s *statmData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.t)
+}
+
+func (s *statmData) afterLoad() {}
+
+// +checklocksignore
+func (s *statmData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.t)
+}
+
+func (s *statusData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.statusData"
+}
+
+func (s *statusData) StateFields() []string {
+ return []string{
+ "t",
+ "pidns",
+ }
+}
+
+func (s *statusData) beforeSave() {}
+
+// +checklocksignore
+func (s *statusData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.t)
+ stateSinkObject.Save(1, &s.pidns)
+}
+
+func (s *statusData) afterLoad() {}
+
+// +checklocksignore
+func (s *statusData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.t)
+ stateSourceObject.Load(1, &s.pidns)
+}
+
+func (i *ioData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.ioData"
+}
+
+func (i *ioData) StateFields() []string {
+ return []string{
+ "ioUsage",
+ }
+}
+
+func (i *ioData) beforeSave() {}
+
+// +checklocksignore
+func (i *ioData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.ioUsage)
+}
+
+func (i *ioData) afterLoad() {}
+
+// +checklocksignore
+func (i *ioData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.ioUsage)
+}
+
+func (c *comm) StateTypeName() string {
+ return "pkg/sentry/fs/proc.comm"
+}
+
+func (c *comm) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "t",
+ }
+}
+
+func (c *comm) beforeSave() {}
+
+// +checklocksignore
+func (c *comm) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.SimpleFileInode)
+ stateSinkObject.Save(1, &c.t)
+}
+
+func (c *comm) afterLoad() {}
+
+// +checklocksignore
+func (c *comm) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.SimpleFileInode)
+ stateSourceObject.Load(1, &c.t)
+}
+
+func (f *commFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.commFile"
+}
+
+func (f *commFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (f *commFile) beforeSave() {}
+
+// +checklocksignore
+func (f *commFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.t)
+}
+
+func (f *commFile) afterLoad() {}
+
+// +checklocksignore
+func (f *commFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.t)
+}
+
+func (a *auxvec) StateTypeName() string {
+ return "pkg/sentry/fs/proc.auxvec"
+}
+
+func (a *auxvec) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "t",
+ }
+}
+
+func (a *auxvec) beforeSave() {}
+
+// +checklocksignore
+func (a *auxvec) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.SimpleFileInode)
+ stateSinkObject.Save(1, &a.t)
+}
+
+func (a *auxvec) afterLoad() {}
+
+// +checklocksignore
+func (a *auxvec) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.SimpleFileInode)
+ stateSourceObject.Load(1, &a.t)
+}
+
+func (f *auxvecFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.auxvecFile"
+}
+
+func (f *auxvecFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (f *auxvecFile) beforeSave() {}
+
+// +checklocksignore
+func (f *auxvecFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.t)
+}
+
+func (f *auxvecFile) afterLoad() {}
+
+// +checklocksignore
+func (f *auxvecFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.t)
+}
+
+func (o *oomScoreAdj) StateTypeName() string {
+ return "pkg/sentry/fs/proc.oomScoreAdj"
+}
+
+func (o *oomScoreAdj) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "t",
+ }
+}
+
+func (o *oomScoreAdj) beforeSave() {}
+
+// +checklocksignore
+func (o *oomScoreAdj) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.SimpleFileInode)
+ stateSinkObject.Save(1, &o.t)
+}
+
+func (o *oomScoreAdj) afterLoad() {}
+
+// +checklocksignore
+func (o *oomScoreAdj) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.SimpleFileInode)
+ stateSourceObject.Load(1, &o.t)
+}
+
+func (f *oomScoreAdjFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.oomScoreAdjFile"
+}
+
+func (f *oomScoreAdjFile) StateFields() []string {
+ return []string{
+ "t",
+ }
+}
+
+func (f *oomScoreAdjFile) beforeSave() {}
+
+// +checklocksignore
+func (f *oomScoreAdjFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.t)
+}
+
+func (f *oomScoreAdjFile) afterLoad() {}
+
+// +checklocksignore
+func (f *oomScoreAdjFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.t)
+}
+
+func (imio *idMapInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/proc.idMapInodeOperations"
+}
+
+func (imio *idMapInodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeSimpleExtendedAttributes",
+ "t",
+ "gids",
+ }
+}
+
+func (imio *idMapInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (imio *idMapInodeOperations) StateSave(stateSinkObject state.Sink) {
+ imio.beforeSave()
+ stateSinkObject.Save(0, &imio.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &imio.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(2, &imio.t)
+ stateSinkObject.Save(3, &imio.gids)
+}
+
+func (imio *idMapInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (imio *idMapInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &imio.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &imio.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(2, &imio.t)
+ stateSourceObject.Load(3, &imio.gids)
+}
+
+func (imfo *idMapFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/proc.idMapFileOperations"
+}
+
+func (imfo *idMapFileOperations) StateFields() []string {
+ return []string{
+ "iops",
+ }
+}
+
+func (imfo *idMapFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (imfo *idMapFileOperations) StateSave(stateSinkObject state.Sink) {
+ imfo.beforeSave()
+ stateSinkObject.Save(0, &imfo.iops)
+}
+
+func (imfo *idMapFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (imfo *idMapFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &imfo.iops)
+}
+
+func (u *uptime) StateTypeName() string {
+ return "pkg/sentry/fs/proc.uptime"
+}
+
+func (u *uptime) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "startTime",
+ }
+}
+
+func (u *uptime) beforeSave() {}
+
+// +checklocksignore
+func (u *uptime) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.SimpleFileInode)
+ stateSinkObject.Save(1, &u.startTime)
+}
+
+func (u *uptime) afterLoad() {}
+
+// +checklocksignore
+func (u *uptime) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.SimpleFileInode)
+ stateSourceObject.Load(1, &u.startTime)
+}
+
+func (f *uptimeFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc.uptimeFile"
+}
+
+func (f *uptimeFile) StateFields() []string {
+ return []string{
+ "startTime",
+ }
+}
+
+func (f *uptimeFile) beforeSave() {}
+
+// +checklocksignore
+func (f *uptimeFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.startTime)
+}
+
+func (f *uptimeFile) afterLoad() {}
+
+// +checklocksignore
+func (f *uptimeFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.startTime)
+}
+
+func (v *versionData) StateTypeName() string {
+ return "pkg/sentry/fs/proc.versionData"
+}
+
+func (v *versionData) StateFields() []string {
+ return []string{
+ "k",
+ }
+}
+
+func (v *versionData) beforeSave() {}
+
+// +checklocksignore
+func (v *versionData) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.k)
+}
+
+func (v *versionData) afterLoad() {}
+
+// +checklocksignore
+func (v *versionData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.k)
+}
+
+func init() {
+ state.Register((*execArgInode)(nil))
+ state.Register((*execArgFile)(nil))
+ state.Register((*fdDir)(nil))
+ state.Register((*fdDirFile)(nil))
+ state.Register((*fdInfoDir)(nil))
+ state.Register((*filesystemsData)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*taskOwnedInodeOps)(nil))
+ state.Register((*staticFileInodeOps)(nil))
+ state.Register((*loadavgData)(nil))
+ state.Register((*meminfoData)(nil))
+ state.Register((*mountInfoFile)(nil))
+ state.Register((*mountsFile)(nil))
+ state.Register((*ifinet6)(nil))
+ state.Register((*netDev)(nil))
+ state.Register((*netSnmp)(nil))
+ state.Register((*netRoute)(nil))
+ state.Register((*netUnix)(nil))
+ state.Register((*netTCP)(nil))
+ state.Register((*netTCP6)(nil))
+ state.Register((*netUDP)(nil))
+ state.Register((*proc)(nil))
+ state.Register((*self)(nil))
+ state.Register((*threadSelf)(nil))
+ state.Register((*rootProcFile)(nil))
+ state.Register((*statData)(nil))
+ state.Register((*mmapMinAddrData)(nil))
+ state.Register((*overcommitMemory)(nil))
+ state.Register((*maxMapCount)(nil))
+ state.Register((*hostname)(nil))
+ state.Register((*hostnameFile)(nil))
+ state.Register((*tcpMemInode)(nil))
+ state.Register((*tcpMemFile)(nil))
+ state.Register((*tcpSack)(nil))
+ state.Register((*tcpSackFile)(nil))
+ state.Register((*tcpRecovery)(nil))
+ state.Register((*tcpRecoveryFile)(nil))
+ state.Register((*ipForwarding)(nil))
+ state.Register((*ipForwardingFile)(nil))
+ state.Register((*portRangeInode)(nil))
+ state.Register((*portRangeFile)(nil))
+ state.Register((*taskDir)(nil))
+ state.Register((*subtasks)(nil))
+ state.Register((*subtasksFile)(nil))
+ state.Register((*exe)(nil))
+ state.Register((*cwd)(nil))
+ state.Register((*namespaceSymlink)(nil))
+ state.Register((*memData)(nil))
+ state.Register((*memDataFile)(nil))
+ state.Register((*mapsData)(nil))
+ state.Register((*smapsData)(nil))
+ state.Register((*taskStatData)(nil))
+ state.Register((*statmData)(nil))
+ state.Register((*statusData)(nil))
+ state.Register((*ioData)(nil))
+ state.Register((*comm)(nil))
+ state.Register((*commFile)(nil))
+ state.Register((*auxvec)(nil))
+ state.Register((*auxvecFile)(nil))
+ state.Register((*oomScoreAdj)(nil))
+ state.Register((*oomScoreAdjFile)(nil))
+ state.Register((*idMapInodeOperations)(nil))
+ state.Register((*idMapFileOperations)(nil))
+ state.Register((*uptime)(nil))
+ state.Register((*uptimeFile)(nil))
+ state.Register((*versionData)(nil))
+}
diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD
deleted file mode 100644
index 90bd32345..000000000
--- a/pkg/sentry/fs/proc/seqfile/BUILD
+++ /dev/null
@@ -1,36 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "seqfile",
- srcs = ["seqfile.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/proc/device",
- "//pkg/sentry/kernel/time",
- "//pkg/sync",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "seqfile_test",
- size = "small",
- srcs = ["seqfile_test.go"],
- library = ":seqfile",
- deps = [
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/ramfs",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go b/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go
new file mode 100644
index 000000000..1cd9b313b
--- /dev/null
+++ b/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go
@@ -0,0 +1,106 @@
+// automatically generated by stateify.
+
+package seqfile
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *SeqData) StateTypeName() string {
+ return "pkg/sentry/fs/proc/seqfile.SeqData"
+}
+
+func (s *SeqData) StateFields() []string {
+ return []string{
+ "Buf",
+ "Handle",
+ }
+}
+
+func (s *SeqData) beforeSave() {}
+
+// +checklocksignore
+func (s *SeqData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Buf)
+ stateSinkObject.Save(1, &s.Handle)
+}
+
+func (s *SeqData) afterLoad() {}
+
+// +checklocksignore
+func (s *SeqData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Buf)
+ stateSourceObject.Load(1, &s.Handle)
+}
+
+func (s *SeqFile) StateTypeName() string {
+ return "pkg/sentry/fs/proc/seqfile.SeqFile"
+}
+
+func (s *SeqFile) StateFields() []string {
+ return []string{
+ "InodeSimpleExtendedAttributes",
+ "InodeSimpleAttributes",
+ "SeqSource",
+ "source",
+ "generation",
+ "lastRead",
+ }
+}
+
+func (s *SeqFile) beforeSave() {}
+
+// +checklocksignore
+func (s *SeqFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(1, &s.InodeSimpleAttributes)
+ stateSinkObject.Save(2, &s.SeqSource)
+ stateSinkObject.Save(3, &s.source)
+ stateSinkObject.Save(4, &s.generation)
+ stateSinkObject.Save(5, &s.lastRead)
+}
+
+func (s *SeqFile) afterLoad() {}
+
+// +checklocksignore
+func (s *SeqFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(1, &s.InodeSimpleAttributes)
+ stateSourceObject.Load(2, &s.SeqSource)
+ stateSourceObject.Load(3, &s.source)
+ stateSourceObject.Load(4, &s.generation)
+ stateSourceObject.Load(5, &s.lastRead)
+}
+
+func (sfo *seqFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/proc/seqfile.seqFileOperations"
+}
+
+func (sfo *seqFileOperations) StateFields() []string {
+ return []string{
+ "seqFile",
+ }
+}
+
+func (sfo *seqFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (sfo *seqFileOperations) StateSave(stateSinkObject state.Sink) {
+ sfo.beforeSave()
+ stateSinkObject.Save(0, &sfo.seqFile)
+}
+
+func (sfo *seqFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (sfo *seqFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sfo.seqFile)
+}
+
+func init() {
+ state.Register((*SeqData)(nil))
+ state.Register((*SeqFile)(nil))
+ state.Register((*seqFileOperations)(nil))
+}
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go
deleted file mode 100644
index 98e394569..000000000
--- a/pkg/sentry/fs/proc/seqfile/seqfile_test.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package seqfile
-
-import (
- "bytes"
- "fmt"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-type seqTest struct {
- actual []SeqData
- update bool
-}
-
-func (s *seqTest) Init() {
- var sq []SeqData
- // Create some SeqData.
- for i := 0; i < 10; i++ {
- var b []byte
- for j := 0; j < 10; j++ {
- b = append(b, byte(i))
- }
- sq = append(sq, SeqData{
- Buf: b,
- Handle: &testHandle{i: i},
- })
- }
- s.actual = sq
-}
-
-// NeedsUpdate reports whether we need to update the data we've previously read.
-func (s *seqTest) NeedsUpdate(int64) bool {
- return s.update
-}
-
-// ReadSeqFiledata returns a slice of SeqData which contains elements
-// greater than the handle.
-func (s *seqTest) ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64) {
- if handle == nil {
- return s.actual, 0
- }
- h := *handle.(*testHandle)
- var ret []SeqData
- for _, b := range s.actual {
- // We want the next one.
- h2 := *b.Handle.(*testHandle)
- if h2.i > h.i {
- ret = append(ret, b)
- }
- }
- return ret, 0
-}
-
-// Flatten a slice of slices into one slice.
-func flatten(buf ...[]byte) []byte {
- var flat []byte
- for _, b := range buf {
- flat = append(flat, b...)
- }
- return flat
-}
-
-type testHandle struct {
- i int
-}
-
-type testTable struct {
- offset int64
- readBufferSize int
- expectedData []byte
- expectedError error
-}
-
-func runTableTests(ctx context.Context, table []testTable, dirent *fs.Dirent) error {
- for _, tt := range table {
- file, err := dirent.Inode.InodeOperations.GetFile(ctx, dirent, fs.FileFlags{Read: true})
- if err != nil {
- return fmt.Errorf("GetFile returned error: %v", err)
- }
-
- data := make([]byte, tt.readBufferSize)
- resultLen, err := file.Preadv(ctx, usermem.BytesIOSequence(data), tt.offset)
- if err != tt.expectedError {
- return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError)
- }
- expectedLen := int64(len(tt.expectedData))
- if resultLen != expectedLen {
- // We make this just an error so we wall through and print the data below.
- return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen)
- }
- if !bytes.Equal(data[:expectedLen], tt.expectedData) {
- return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData)
- }
- }
- return nil
-}
-
-func TestSeqFile(t *testing.T) {
- testSource := &seqTest{}
- testSource.Init()
-
- // Create a file that can be R/W.
- ctx := contexttest.Context(t)
- m := fs.NewPseudoMountSource(ctx)
- contents := map[string]*fs.Inode{
- "foo": NewSeqFileInode(ctx, testSource, m),
- }
- root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777))
-
- // How about opening it?
- inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory})
- dirent2, err := root.Lookup(ctx, inode, "foo")
- if err != nil {
- t.Fatalf("failed to walk to foo for n2: %v", err)
- }
- n2 := dirent2.Inode.InodeOperations
- file2, err := n2.GetFile(ctx, dirent2, fs.FileFlags{Read: true, Write: true})
- if err != nil {
- t.Fatalf("GetFile returned error: %v", err)
- }
-
- // Writing?
- if _, err := file2.Writev(ctx, usermem.BytesIOSequence([]byte("test"))); err == nil {
- t.Fatalf("managed to write to n2: %v", err)
- }
-
- // How about reading?
- dirent3, err := root.Lookup(ctx, inode, "foo")
- if err != nil {
- t.Fatalf("failed to walk to foo: %v", err)
- }
- n3 := dirent3.Inode.InodeOperations
- if n2 != n3 {
- t.Error("got n2 != n3, want same")
- }
-
- testSource.update = true
-
- table := []testTable{
- // Read past the end.
- {100, 4, []byte{}, io.EOF},
- {110, 4, []byte{}, io.EOF},
- {200, 4, []byte{}, io.EOF},
- // Read a truncated first line.
- {0, 4, testSource.actual[0].Buf[:4], nil},
- // Read the whole first line.
- {0, 10, testSource.actual[0].Buf, nil},
- // Read the whole first line + 5 bytes of second line.
- {0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil},
- // First 4 bytes of the second line.
- {10, 4, testSource.actual[1].Buf[:4], nil},
- // Read the two first lines.
- {0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil},
- // Read three lines.
- {0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil},
- // Read everything, but use a bigger buffer than necessary.
- {0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil},
- // Read the last 3 bytes.
- {97, 10, testSource.actual[9].Buf[7:], nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err)
- }
-
- // Disable updates and do it again.
- testSource.update = false
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err)
- }
-}
-
-// Test that we behave correctly when the file is updated.
-func TestSeqFileFileUpdated(t *testing.T) {
- testSource := &seqTest{}
- testSource.Init()
- testSource.update = true
-
- // Create a file that can be R/W.
- ctx := contexttest.Context(t)
- m := fs.NewPseudoMountSource(ctx)
- contents := map[string]*fs.Inode{
- "foo": NewSeqFileInode(ctx, testSource, m),
- }
- root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777))
-
- // How about opening it?
- inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory})
- dirent2, err := root.Lookup(ctx, inode, "foo")
- if err != nil {
- t.Fatalf("failed to walk to foo for dirent2: %v", err)
- }
-
- table := []testTable{
- {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed: %v", err)
- }
- // Delete the first entry.
- cut := testSource.actual[0].Buf
- testSource.actual = testSource.actual[1:]
-
- table = []testTable{
- // Try reading buffer 0 with an offset. This will not delete the old data.
- {1, 5, cut[1:6], nil},
- // Reset our file by reading at offset 0.
- {0, 10, testSource.actual[0].Buf, nil},
- {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
- // Read the same data a second time.
- {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
- // Read the following two lines.
- {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed after removing first entry: %v", err)
- }
-
- // Add a new duplicate line in the middle (6666...)
- after := testSource.actual[5:]
- testSource.actual = testSource.actual[:4]
- // Note the list must be sorted.
- testSource.actual = append(testSource.actual, after[0])
- testSource.actual = append(testSource.actual, after...)
-
- table = []testTable{
- {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed after adding middle entry: %v", err)
- }
- // This will be used in a later test.
- oldTestData := testSource.actual
-
- // Delete everything.
- testSource.actual = testSource.actual[:0]
- table = []testTable{
- {20, 20, []byte{}, io.EOF},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed after removing all entries: %v", err)
- }
- // Restore some of the data.
- testSource.actual = oldTestData[:1]
- table = []testTable{
- {6, 20, testSource.actual[0].Buf[6:], nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed after adding first entry back: %v", err)
- }
-
- // Re-extend the data
- testSource.actual = oldTestData
- table = []testTable{
- {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
- }
- if err := runTableTests(ctx, table, dirent2); err != nil {
- t.Errorf("runTableTest failed after extending testSource: %v", err)
- }
-}
diff --git a/pkg/sentry/fs/proc/sys_net_test.go b/pkg/sentry/fs/proc/sys_net_test.go
deleted file mode 100644
index 6ef5738e7..000000000
--- a/pkg/sentry/fs/proc/sys_net_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/inet"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func TestQuerySendBufferSize(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
- s.TCPSendBufSize = inet.TCPBufferSize{100, 200, 300}
- tmi := &tcpMemInode{s: s, dir: tcpWMem}
- tmf := &tcpMemFile{tcpMemInode: tmi}
-
- buf := make([]byte, 100)
- dst := usermem.BytesIOSequence(buf)
- n, err := tmf.Read(ctx, nil, dst, 0)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
-
- if got, want := string(buf[:n]), "100\t200\t300\n"; got != want {
- t.Fatalf("Bad string: got %v, want %v", got, want)
- }
-}
-
-func TestQueryRecvBufferSize(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
- s.TCPRecvBufSize = inet.TCPBufferSize{100, 200, 300}
- tmi := &tcpMemInode{s: s, dir: tcpRMem}
- tmf := &tcpMemFile{tcpMemInode: tmi}
-
- buf := make([]byte, 100)
- dst := usermem.BytesIOSequence(buf)
- n, err := tmf.Read(ctx, nil, dst, 0)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
-
- if got, want := string(buf[:n]), "100\t200\t300\n"; got != want {
- t.Fatalf("Bad string: got %v, want %v", got, want)
- }
-}
-
-var cases = []struct {
- str string
- initial inet.TCPBufferSize
- final inet.TCPBufferSize
-}{
- {
- str: "",
- initial: inet.TCPBufferSize{1, 2, 3},
- final: inet.TCPBufferSize{1, 2, 3},
- },
- {
- str: "100\n",
- initial: inet.TCPBufferSize{1, 100, 200},
- final: inet.TCPBufferSize{100, 100, 200},
- },
- {
- str: "100 200 300\n",
- initial: inet.TCPBufferSize{1, 2, 3},
- final: inet.TCPBufferSize{100, 200, 300},
- },
-}
-
-func TestConfigureSendBufferSize(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
- for _, c := range cases {
- s.TCPSendBufSize = c.initial
- tmi := &tcpMemInode{s: s, dir: tcpWMem}
- tmf := &tcpMemFile{tcpMemInode: tmi}
-
- // Write the values.
- src := usermem.BytesIOSequence([]byte(c.str))
- if n, err := tmf.Write(ctx, nil, src, 0); n != int64(len(c.str)) || err != nil {
- t.Errorf("Write, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str))
- }
-
- // Read the values from the stack and check them.
- if s.TCPSendBufSize != c.final {
- t.Errorf("TCPSendBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPSendBufSize, c.final)
- }
- }
-}
-
-func TestConfigureRecvBufferSize(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
- for _, c := range cases {
- s.TCPRecvBufSize = c.initial
- tmi := &tcpMemInode{s: s, dir: tcpRMem}
- tmf := &tcpMemFile{tcpMemInode: tmi}
-
- // Write the values.
- src := usermem.BytesIOSequence([]byte(c.str))
- if n, err := tmf.Write(ctx, nil, src, 0); n != int64(len(c.str)) || err != nil {
- t.Errorf("Write, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str))
- }
-
- // Read the values from the stack and check them.
- if s.TCPRecvBufSize != c.final {
- t.Errorf("TCPRecvBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPRecvBufSize, c.final)
- }
- }
-}
-
-// TestIPForwarding tests the implementation of
-// /proc/sys/net/ipv4/ip_forwarding
-func TestIPForwarding(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
-
- var cases = []struct {
- comment string
- initial bool
- str string
- final bool
- }{
- {
- comment: `Forwarding is disabled; write 1 and enable forwarding`,
- initial: false,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is disabled; write 0 and disable forwarding`,
- initial: false,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is enabled; write 1 and enable forwarding`,
- initial: true,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 0 and disable forwarding`,
- initial: true,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is disabled; write 2404 and enable forwarding`,
- initial: false,
- str: "2404",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 2404 and enable forwarding`,
- initial: true,
- str: "2404",
- final: true,
- },
- }
- for _, c := range cases {
- t.Run(c.comment, func(t *testing.T) {
- s.IPForwarding = c.initial
- ipf := &ipForwarding{stack: s}
- file := &ipForwardingFile{
- stack: s,
- ipf: ipf,
- }
-
- // Write the values.
- src := usermem.BytesIOSequence([]byte(c.str))
- if n, err := file.Write(ctx, nil, src, 0); n != int64(len(c.str)) || err != nil {
- t.Errorf("file.Write(ctx, nil, %q, 0) = (%d, %v); want (%d, nil)", c.str, n, err, len(c.str))
- }
-
- // Read the values from the stack and check them.
- if got, want := s.IPForwarding, c.final; got != want {
- t.Errorf("s.IPForwarding incorrect; got: %v, want: %v", got, want)
- }
-
- })
- }
-}
diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD
deleted file mode 100644
index b46567cf8..000000000
--- a/pkg/sentry/fs/ramfs/BUILD
+++ /dev/null
@@ -1,39 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ramfs",
- srcs = [
- "dir.go",
- "socket.go",
- "symlink.go",
- "tree.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "ramfs_test",
- size = "small",
- srcs = ["tree_test.go"],
- library = ":ramfs",
- deps = [
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- ],
-)
diff --git a/pkg/sentry/fs/ramfs/ramfs_state_autogen.go b/pkg/sentry/fs/ramfs/ramfs_state_autogen.go
new file mode 100644
index 000000000..53c836415
--- /dev/null
+++ b/pkg/sentry/fs/ramfs/ramfs_state_autogen.go
@@ -0,0 +1,182 @@
+// automatically generated by stateify.
+
+package ramfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (d *Dir) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.Dir"
+}
+
+func (d *Dir) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeSimpleExtendedAttributes",
+ "children",
+ "dentryMap",
+ }
+}
+
+func (d *Dir) beforeSave() {}
+
+// +checklocksignore
+func (d *Dir) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &d.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(2, &d.children)
+ stateSinkObject.Save(3, &d.dentryMap)
+}
+
+func (d *Dir) afterLoad() {}
+
+// +checklocksignore
+func (d *Dir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &d.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(2, &d.children)
+ stateSourceObject.Load(3, &d.dentryMap)
+}
+
+func (dfo *dirFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.dirFileOperations"
+}
+
+func (dfo *dirFileOperations) StateFields() []string {
+ return []string{
+ "dirCursor",
+ "dir",
+ }
+}
+
+func (dfo *dirFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (dfo *dirFileOperations) StateSave(stateSinkObject state.Sink) {
+ dfo.beforeSave()
+ stateSinkObject.Save(0, &dfo.dirCursor)
+ stateSinkObject.Save(1, &dfo.dir)
+}
+
+func (dfo *dirFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (dfo *dirFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &dfo.dirCursor)
+ stateSourceObject.Load(1, &dfo.dir)
+}
+
+func (s *Socket) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.Socket"
+}
+
+func (s *Socket) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeSimpleExtendedAttributes",
+ "ep",
+ }
+}
+
+func (s *Socket) beforeSave() {}
+
+// +checklocksignore
+func (s *Socket) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &s.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(2, &s.ep)
+}
+
+func (s *Socket) afterLoad() {}
+
+// +checklocksignore
+func (s *Socket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &s.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(2, &s.ep)
+}
+
+func (s *socketFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.socketFileOperations"
+}
+
+func (s *socketFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (s *socketFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *socketFileOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+}
+
+func (s *socketFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *socketFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (s *Symlink) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.Symlink"
+}
+
+func (s *Symlink) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeSimpleExtendedAttributes",
+ "Target",
+ }
+}
+
+func (s *Symlink) beforeSave() {}
+
+// +checklocksignore
+func (s *Symlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &s.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(2, &s.Target)
+}
+
+func (s *Symlink) afterLoad() {}
+
+// +checklocksignore
+func (s *Symlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &s.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(2, &s.Target)
+}
+
+func (s *symlinkFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/ramfs.symlinkFileOperations"
+}
+
+func (s *symlinkFileOperations) StateFields() []string {
+ return []string{}
+}
+
+func (s *symlinkFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *symlinkFileOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+}
+
+func (s *symlinkFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *symlinkFileOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*Dir)(nil))
+ state.Register((*dirFileOperations)(nil))
+ state.Register((*Socket)(nil))
+ state.Register((*socketFileOperations)(nil))
+ state.Register((*Symlink)(nil))
+ state.Register((*symlinkFileOperations)(nil))
+}
diff --git a/pkg/sentry/fs/ramfs/tree_test.go b/pkg/sentry/fs/ramfs/tree_test.go
deleted file mode 100644
index 3e0d1e07e..000000000
--- a/pkg/sentry/fs/ramfs/tree_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ramfs
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-func TestMakeDirectoryTree(t *testing.T) {
-
- for _, test := range []struct {
- name string
- subdirs []string
- }{
- {
- name: "abs paths",
- subdirs: []string{
- "/tmp",
- "/tmp/a/b",
- "/tmp/a/c/d",
- "/tmp/c",
- "/proc",
- "/dev/a/b",
- "/tmp",
- },
- },
- {
- name: "rel paths",
- subdirs: []string{
- "tmp",
- "tmp/a/b",
- "tmp/a/c/d",
- "tmp/c",
- "proc",
- "dev/a/b",
- "tmp",
- },
- },
- } {
- ctx := contexttest.Context(t)
- mount := fs.NewPseudoMountSource(ctx)
- tree, err := MakeDirectoryTree(ctx, mount, test.subdirs)
- if err != nil {
- t.Errorf("%s: failed to make ramfs tree, got error %v, want nil", test.name, err)
- continue
- }
-
- // Expect to be able to find each of the paths.
- mm, err := fs.NewMountNamespace(ctx, tree)
- if err != nil {
- t.Errorf("%s: failed to create mount manager: %v", test.name, err)
- continue
- }
- root := mm.Root()
- defer mm.DecRef(ctx)
-
- for _, p := range test.subdirs {
- maxTraversals := uint(0)
- if _, err := mm.FindInode(ctx, root, nil, p, &maxTraversals); err != nil {
- t.Errorf("%s: failed to find node %s: %v", test.name, p, err)
- break
- }
- }
- }
-}
diff --git a/pkg/sentry/fs/sys/BUILD b/pkg/sentry/fs/sys/BUILD
deleted file mode 100644
index fdbc5f912..000000000
--- a/pkg/sentry/fs/sys/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sys",
- srcs = [
- "device.go",
- "devices.go",
- "fs.go",
- "sys.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/kernel",
- ],
-)
diff --git a/pkg/sentry/fs/sys/sys_state_autogen.go b/pkg/sentry/fs/sys/sys_state_autogen.go
new file mode 100644
index 000000000..9c4f22243
--- /dev/null
+++ b/pkg/sentry/fs/sys/sys_state_autogen.go
@@ -0,0 +1,61 @@
+// automatically generated by stateify.
+
+package sys
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *cpunum) StateTypeName() string {
+ return "pkg/sentry/fs/sys.cpunum"
+}
+
+func (c *cpunum) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "InodeStaticFileGetter",
+ }
+}
+
+func (c *cpunum) beforeSave() {}
+
+// +checklocksignore
+func (c *cpunum) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &c.InodeStaticFileGetter)
+}
+
+func (c *cpunum) afterLoad() {}
+
+// +checklocksignore
+func (c *cpunum) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &c.InodeStaticFileGetter)
+}
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/sys.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*cpunum)(nil))
+ state.Register((*filesystem)(nil))
+}
diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD
deleted file mode 100644
index 0148b33cf..000000000
--- a/pkg/sentry/fs/timerfd/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "timerfd",
- srcs = ["timerfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/kernel/time",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fs/timerfd/timerfd_state_autogen.go b/pkg/sentry/fs/timerfd/timerfd_state_autogen.go
new file mode 100644
index 000000000..822eecdd6
--- /dev/null
+++ b/pkg/sentry/fs/timerfd/timerfd_state_autogen.go
@@ -0,0 +1,42 @@
+// automatically generated by stateify.
+
+package timerfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *TimerOperations) StateTypeName() string {
+ return "pkg/sentry/fs/timerfd.TimerOperations"
+}
+
+func (t *TimerOperations) StateFields() []string {
+ return []string{
+ "timer",
+ "val",
+ }
+}
+
+func (t *TimerOperations) beforeSave() {}
+
+// +checklocksignore
+func (t *TimerOperations) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ if !state.IsZeroValue(&t.events) {
+ state.Failf("events is %#v, expected zero", &t.events)
+ }
+ stateSinkObject.Save(0, &t.timer)
+ stateSinkObject.Save(1, &t.val)
+}
+
+func (t *TimerOperations) afterLoad() {}
+
+// +checklocksignore
+func (t *TimerOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.timer)
+ stateSourceObject.Load(1, &t.val)
+}
+
+func init() {
+ state.Register((*TimerOperations)(nil))
+}
diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD
deleted file mode 100644
index 511fffb43..000000000
--- a/pkg/sentry/fs/tmpfs/BUILD
+++ /dev/null
@@ -1,52 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tmpfs",
- srcs = [
- "device.go",
- "file_regular.go",
- "fs.go",
- "inode_file.go",
- "tmpfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/safemem",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sync",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "tmpfs_test",
- size = "small",
- srcs = ["file_test.go"],
- library = ":tmpfs",
- deps = [
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/contexttest",
- "//pkg/sentry/usage",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/tmpfs/file_test.go b/pkg/sentry/fs/tmpfs/file_test.go
deleted file mode 100644
index 1718f9372..000000000
--- a/pkg/sentry/fs/tmpfs/file_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "bytes"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func newFileInode(ctx context.Context) *fs.Inode {
- m := fs.NewCachingMountSource(ctx, &Filesystem{}, fs.MountSourceFlags{})
- iops := NewInMemoryFile(ctx, usage.Tmpfs, fs.WithCurrentTime(ctx, fs.UnstableAttr{}))
- return fs.NewInode(ctx, iops, m, fs.StableAttr{
- DeviceID: tmpfsDevice.DeviceID(),
- InodeID: tmpfsDevice.NextIno(),
- BlockSize: hostarch.PageSize,
- Type: fs.RegularFile,
- })
-}
-
-func newFile(ctx context.Context) *fs.File {
- inode := newFileInode(ctx)
- f, _ := inode.GetFile(ctx, fs.NewDirent(ctx, inode, "stub"), fs.FileFlags{Read: true, Write: true})
- return f
-}
-
-// Allocate once, write twice.
-func TestGrow(t *testing.T) {
- ctx := contexttest.Context(t)
- f := newFile(ctx)
- defer f.DecRef(ctx)
-
- abuf := bytes.Repeat([]byte{'a'}, 68)
- n, err := f.Pwritev(ctx, usermem.BytesIOSequence(abuf), 0)
- if n != int64(len(abuf)) || err != nil {
- t.Fatalf("Pwritev got (%d, %v) want (%d, nil)", n, err, len(abuf))
- }
-
- bbuf := bytes.Repeat([]byte{'b'}, 856)
- n, err = f.Pwritev(ctx, usermem.BytesIOSequence(bbuf), 68)
- if n != int64(len(bbuf)) || err != nil {
- t.Fatalf("Pwritev got (%d, %v) want (%d, nil)", n, err, len(bbuf))
- }
-
- rbuf := make([]byte, len(abuf)+len(bbuf))
- n, err = f.Preadv(ctx, usermem.BytesIOSequence(rbuf), 0)
- if n != int64(len(rbuf)) || err != nil {
- t.Fatalf("Preadv got (%d, %v) want (%d, nil)", n, err, len(rbuf))
- }
-
- if want := append(abuf, bbuf...); !bytes.Equal(rbuf, want) {
- t.Fatalf("Read %v, want %v", rbuf, want)
- }
-}
diff --git a/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go b/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go
new file mode 100644
index 000000000..ab5f75fea
--- /dev/null
+++ b/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go
@@ -0,0 +1,211 @@
+// automatically generated by stateify.
+
+package tmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *regularFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.regularFileOperations"
+}
+
+func (r *regularFileOperations) StateFields() []string {
+ return []string{
+ "iops",
+ }
+}
+
+func (r *regularFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (r *regularFileOperations) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.iops)
+}
+
+func (r *regularFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (r *regularFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.iops)
+}
+
+func (f *Filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.Filesystem"
+}
+
+func (f *Filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *Filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *Filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *Filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *Filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *fileInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.fileInodeOperations"
+}
+
+func (f *fileInodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleExtendedAttributes",
+ "kernel",
+ "memUsage",
+ "attr",
+ "mappings",
+ "writableMappingPages",
+ "data",
+ "seals",
+ }
+}
+
+func (f *fileInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (f *fileInodeOperations) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeSimpleExtendedAttributes)
+ stateSinkObject.Save(1, &f.kernel)
+ stateSinkObject.Save(2, &f.memUsage)
+ stateSinkObject.Save(3, &f.attr)
+ stateSinkObject.Save(4, &f.mappings)
+ stateSinkObject.Save(5, &f.writableMappingPages)
+ stateSinkObject.Save(6, &f.data)
+ stateSinkObject.Save(7, &f.seals)
+}
+
+func (f *fileInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (f *fileInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeSimpleExtendedAttributes)
+ stateSourceObject.Load(1, &f.kernel)
+ stateSourceObject.Load(2, &f.memUsage)
+ stateSourceObject.Load(3, &f.attr)
+ stateSourceObject.Load(4, &f.mappings)
+ stateSourceObject.Load(5, &f.writableMappingPages)
+ stateSourceObject.Load(6, &f.data)
+ stateSourceObject.Load(7, &f.seals)
+}
+
+func (d *Dir) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.Dir"
+}
+
+func (d *Dir) StateFields() []string {
+ return []string{
+ "ramfsDir",
+ "kernel",
+ }
+}
+
+func (d *Dir) beforeSave() {}
+
+// +checklocksignore
+func (d *Dir) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.ramfsDir)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+// +checklocksignore
+func (d *Dir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.ramfsDir)
+ stateSourceObject.Load(1, &d.kernel)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (s *Symlink) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.Symlink"
+}
+
+func (s *Symlink) StateFields() []string {
+ return []string{
+ "Symlink",
+ }
+}
+
+func (s *Symlink) beforeSave() {}
+
+// +checklocksignore
+func (s *Symlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Symlink)
+}
+
+func (s *Symlink) afterLoad() {}
+
+// +checklocksignore
+func (s *Symlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Symlink)
+}
+
+func (s *Socket) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.Socket"
+}
+
+func (s *Socket) StateFields() []string {
+ return []string{
+ "Socket",
+ }
+}
+
+func (s *Socket) beforeSave() {}
+
+// +checklocksignore
+func (s *Socket) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Socket)
+}
+
+func (s *Socket) afterLoad() {}
+
+// +checklocksignore
+func (s *Socket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Socket)
+}
+
+func (f *Fifo) StateTypeName() string {
+ return "pkg/sentry/fs/tmpfs.Fifo"
+}
+
+func (f *Fifo) StateFields() []string {
+ return []string{
+ "InodeOperations",
+ }
+}
+
+func (f *Fifo) beforeSave() {}
+
+// +checklocksignore
+func (f *Fifo) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeOperations)
+}
+
+func (f *Fifo) afterLoad() {}
+
+// +checklocksignore
+func (f *Fifo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeOperations)
+}
+
+func init() {
+ state.Register((*regularFileOperations)(nil))
+ state.Register((*Filesystem)(nil))
+ state.Register((*fileInodeOperations)(nil))
+ state.Register((*Dir)(nil))
+ state.Register((*Symlink)(nil))
+ state.Register((*Socket)(nil))
+ state.Register((*Fifo)(nil))
+}
diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD
deleted file mode 100644
index 5933cb67b..000000000
--- a/pkg/sentry/fs/tty/BUILD
+++ /dev/null
@@ -1,50 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tty",
- srcs = [
- "dir.go",
- "fs.go",
- "line_discipline.go",
- "master.go",
- "queue.go",
- "replica.go",
- "terminal.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/unimpl",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "tty_test",
- size = "small",
- srcs = ["tty_test.go"],
- library = ":tty",
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/contexttest",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/tty/tty_state_autogen.go b/pkg/sentry/fs/tty/tty_state_autogen.go
new file mode 100644
index 000000000..2fb0a8d27
--- /dev/null
+++ b/pkg/sentry/fs/tty/tty_state_autogen.go
@@ -0,0 +1,407 @@
+// automatically generated by stateify.
+
+package tty
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (d *dirInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.dirInodeOperations"
+}
+
+func (d *dirInodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "msrc",
+ "master",
+ "replicas",
+ "dentryMap",
+ "next",
+ }
+}
+
+func (d *dirInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (d *dirInodeOperations) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &d.msrc)
+ stateSinkObject.Save(2, &d.master)
+ stateSinkObject.Save(3, &d.replicas)
+ stateSinkObject.Save(4, &d.dentryMap)
+ stateSinkObject.Save(5, &d.next)
+}
+
+func (d *dirInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (d *dirInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &d.msrc)
+ stateSourceObject.Load(2, &d.master)
+ stateSourceObject.Load(3, &d.replicas)
+ stateSourceObject.Load(4, &d.dentryMap)
+ stateSourceObject.Load(5, &d.next)
+}
+
+func (df *dirFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.dirFileOperations"
+}
+
+func (df *dirFileOperations) StateFields() []string {
+ return []string{
+ "di",
+ "dirCursor",
+ }
+}
+
+func (df *dirFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (df *dirFileOperations) StateSave(stateSinkObject state.Sink) {
+ df.beforeSave()
+ stateSinkObject.Save(0, &df.di)
+ stateSinkObject.Save(1, &df.dirCursor)
+}
+
+func (df *dirFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (df *dirFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &df.di)
+ stateSourceObject.Load(1, &df.dirCursor)
+}
+
+func (f *filesystem) StateTypeName() string {
+ return "pkg/sentry/fs/tty.filesystem"
+}
+
+func (f *filesystem) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystem) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystem) StateLoad(stateSourceObject state.Source) {
+}
+
+func (s *superOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.superOperations"
+}
+
+func (s *superOperations) StateFields() []string {
+ return []string{}
+}
+
+func (s *superOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *superOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+}
+
+func (s *superOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *superOperations) StateLoad(stateSourceObject state.Source) {
+}
+
+func (l *lineDiscipline) StateTypeName() string {
+ return "pkg/sentry/fs/tty.lineDiscipline"
+}
+
+func (l *lineDiscipline) StateFields() []string {
+ return []string{
+ "size",
+ "inQueue",
+ "outQueue",
+ "termios",
+ "column",
+ }
+}
+
+func (l *lineDiscipline) beforeSave() {}
+
+// +checklocksignore
+func (l *lineDiscipline) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ if !state.IsZeroValue(&l.masterWaiter) {
+ state.Failf("masterWaiter is %#v, expected zero", &l.masterWaiter)
+ }
+ if !state.IsZeroValue(&l.replicaWaiter) {
+ state.Failf("replicaWaiter is %#v, expected zero", &l.replicaWaiter)
+ }
+ stateSinkObject.Save(0, &l.size)
+ stateSinkObject.Save(1, &l.inQueue)
+ stateSinkObject.Save(2, &l.outQueue)
+ stateSinkObject.Save(3, &l.termios)
+ stateSinkObject.Save(4, &l.column)
+}
+
+func (l *lineDiscipline) afterLoad() {}
+
+// +checklocksignore
+func (l *lineDiscipline) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.size)
+ stateSourceObject.Load(1, &l.inQueue)
+ stateSourceObject.Load(2, &l.outQueue)
+ stateSourceObject.Load(3, &l.termios)
+ stateSourceObject.Load(4, &l.column)
+}
+
+func (o *outputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fs/tty.outputQueueTransformer"
+}
+
+func (o *outputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (o *outputQueueTransformer) beforeSave() {}
+
+// +checklocksignore
+func (o *outputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+}
+
+func (o *outputQueueTransformer) afterLoad() {}
+
+// +checklocksignore
+func (o *outputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *inputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fs/tty.inputQueueTransformer"
+}
+
+func (i *inputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (i *inputQueueTransformer) beforeSave() {}
+
+// +checklocksignore
+func (i *inputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *inputQueueTransformer) afterLoad() {}
+
+// +checklocksignore
+func (i *inputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (mi *masterInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.masterInodeOperations"
+}
+
+func (mi *masterInodeOperations) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "d",
+ }
+}
+
+func (mi *masterInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (mi *masterInodeOperations) StateSave(stateSinkObject state.Sink) {
+ mi.beforeSave()
+ stateSinkObject.Save(0, &mi.SimpleFileInode)
+ stateSinkObject.Save(1, &mi.d)
+}
+
+func (mi *masterInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (mi *masterInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mi.SimpleFileInode)
+ stateSourceObject.Load(1, &mi.d)
+}
+
+func (mf *masterFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.masterFileOperations"
+}
+
+func (mf *masterFileOperations) StateFields() []string {
+ return []string{
+ "d",
+ "t",
+ }
+}
+
+func (mf *masterFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (mf *masterFileOperations) StateSave(stateSinkObject state.Sink) {
+ mf.beforeSave()
+ stateSinkObject.Save(0, &mf.d)
+ stateSinkObject.Save(1, &mf.t)
+}
+
+func (mf *masterFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (mf *masterFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mf.d)
+ stateSourceObject.Load(1, &mf.t)
+}
+
+func (q *queue) StateTypeName() string {
+ return "pkg/sentry/fs/tty.queue"
+}
+
+func (q *queue) StateFields() []string {
+ return []string{
+ "readBuf",
+ "waitBuf",
+ "waitBufLen",
+ "readable",
+ "transformer",
+ }
+}
+
+func (q *queue) beforeSave() {}
+
+// +checklocksignore
+func (q *queue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.readBuf)
+ stateSinkObject.Save(1, &q.waitBuf)
+ stateSinkObject.Save(2, &q.waitBufLen)
+ stateSinkObject.Save(3, &q.readable)
+ stateSinkObject.Save(4, &q.transformer)
+}
+
+func (q *queue) afterLoad() {}
+
+// +checklocksignore
+func (q *queue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.readBuf)
+ stateSourceObject.Load(1, &q.waitBuf)
+ stateSourceObject.Load(2, &q.waitBufLen)
+ stateSourceObject.Load(3, &q.readable)
+ stateSourceObject.Load(4, &q.transformer)
+}
+
+func (si *replicaInodeOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.replicaInodeOperations"
+}
+
+func (si *replicaInodeOperations) StateFields() []string {
+ return []string{
+ "SimpleFileInode",
+ "d",
+ "t",
+ }
+}
+
+func (si *replicaInodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (si *replicaInodeOperations) StateSave(stateSinkObject state.Sink) {
+ si.beforeSave()
+ stateSinkObject.Save(0, &si.SimpleFileInode)
+ stateSinkObject.Save(1, &si.d)
+ stateSinkObject.Save(2, &si.t)
+}
+
+func (si *replicaInodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (si *replicaInodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &si.SimpleFileInode)
+ stateSourceObject.Load(1, &si.d)
+ stateSourceObject.Load(2, &si.t)
+}
+
+func (sf *replicaFileOperations) StateTypeName() string {
+ return "pkg/sentry/fs/tty.replicaFileOperations"
+}
+
+func (sf *replicaFileOperations) StateFields() []string {
+ return []string{
+ "si",
+ }
+}
+
+func (sf *replicaFileOperations) beforeSave() {}
+
+// +checklocksignore
+func (sf *replicaFileOperations) StateSave(stateSinkObject state.Sink) {
+ sf.beforeSave()
+ stateSinkObject.Save(0, &sf.si)
+}
+
+func (sf *replicaFileOperations) afterLoad() {}
+
+// +checklocksignore
+func (sf *replicaFileOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sf.si)
+}
+
+func (tm *Terminal) StateTypeName() string {
+ return "pkg/sentry/fs/tty.Terminal"
+}
+
+func (tm *Terminal) StateFields() []string {
+ return []string{
+ "AtomicRefCount",
+ "n",
+ "d",
+ "ld",
+ "masterKTTY",
+ "replicaKTTY",
+ }
+}
+
+func (tm *Terminal) beforeSave() {}
+
+// +checklocksignore
+func (tm *Terminal) StateSave(stateSinkObject state.Sink) {
+ tm.beforeSave()
+ stateSinkObject.Save(0, &tm.AtomicRefCount)
+ stateSinkObject.Save(1, &tm.n)
+ stateSinkObject.Save(2, &tm.d)
+ stateSinkObject.Save(3, &tm.ld)
+ stateSinkObject.Save(4, &tm.masterKTTY)
+ stateSinkObject.Save(5, &tm.replicaKTTY)
+}
+
+func (tm *Terminal) afterLoad() {}
+
+// +checklocksignore
+func (tm *Terminal) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tm.AtomicRefCount)
+ stateSourceObject.Load(1, &tm.n)
+ stateSourceObject.Load(2, &tm.d)
+ stateSourceObject.Load(3, &tm.ld)
+ stateSourceObject.Load(4, &tm.masterKTTY)
+ stateSourceObject.Load(5, &tm.replicaKTTY)
+}
+
+func init() {
+ state.Register((*dirInodeOperations)(nil))
+ state.Register((*dirFileOperations)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*superOperations)(nil))
+ state.Register((*lineDiscipline)(nil))
+ state.Register((*outputQueueTransformer)(nil))
+ state.Register((*inputQueueTransformer)(nil))
+ state.Register((*masterInodeOperations)(nil))
+ state.Register((*masterFileOperations)(nil))
+ state.Register((*queue)(nil))
+ state.Register((*replicaInodeOperations)(nil))
+ state.Register((*replicaFileOperations)(nil))
+ state.Register((*Terminal)(nil))
+}
diff --git a/pkg/sentry/fs/tty/tty_test.go b/pkg/sentry/fs/tty/tty_test.go
deleted file mode 100644
index 49edee83d..000000000
--- a/pkg/sentry/fs/tty/tty_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tty
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func TestSimpleMasterToReplica(t *testing.T) {
- ld := newLineDiscipline(linux.DefaultReplicaTermios)
- ctx := contexttest.Context(t)
- inBytes := []byte("hello, tty\n")
- src := usermem.BytesIOSequence(inBytes)
- outBytes := make([]byte, 32)
- dst := usermem.BytesIOSequence(outBytes)
-
- // Write to the input queue.
- nw, err := ld.inputQueueWrite(ctx, src)
- if err != nil {
- t.Fatalf("error writing to input queue: %v", err)
- }
- if nw != int64(len(inBytes)) {
- t.Fatalf("wrote wrong length: got %d, want %d", nw, len(inBytes))
- }
-
- // Read from the input queue.
- nr, err := ld.inputQueueRead(ctx, dst)
- if err != nil {
- t.Fatalf("error reading from input queue: %v", err)
- }
- if nr != int64(len(inBytes)) {
- t.Fatalf("read wrong length: got %d, want %d", nr, len(inBytes))
- }
-
- outStr := string(outBytes[:nr])
- inStr := string(inBytes)
- if outStr != inStr {
- t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
- }
-}
diff --git a/pkg/sentry/fs/user/BUILD b/pkg/sentry/fs/user/BUILD
deleted file mode 100644
index 4acc73ee0..000000000
--- a/pkg/sentry/fs/user/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "user",
- srcs = [
- "path.go",
- "user.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "user_test",
- size = "small",
- srcs = ["user_test.go"],
- library = ":user",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/contexttest",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fs/user/user_state_autogen.go b/pkg/sentry/fs/user/user_state_autogen.go
new file mode 100644
index 000000000..8083e036c
--- /dev/null
+++ b/pkg/sentry/fs/user/user_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package user
diff --git a/pkg/sentry/fs/user/user_test.go b/pkg/sentry/fs/user/user_test.go
deleted file mode 100644
index 7f8fa8038..000000000
--- a/pkg/sentry/fs/user/user_test.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package user
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// createEtcPasswd creates /etc/passwd with the given contents and mode. If
-// mode is empty, then no file will be created. If mode is not a regular file
-// mode, then contents is ignored.
-func createEtcPasswd(ctx context.Context, root *fs.Dirent, contents string, mode linux.FileMode) error {
- if err := root.CreateDirectory(ctx, root, "etc", fs.FilePermsFromMode(0755)); err != nil {
- return err
- }
- etc, err := root.Walk(ctx, root, "etc")
- if err != nil {
- return err
- }
- defer etc.DecRef(ctx)
- switch mode.FileType() {
- case 0:
- // Don't create anything.
- return nil
- case linux.S_IFREG:
- passwd, err := etc.Create(ctx, root, "passwd", fs.FileFlags{Write: true}, fs.FilePermsFromMode(mode))
- if err != nil {
- return err
- }
- defer passwd.DecRef(ctx)
- if _, err := passwd.Writev(ctx, usermem.BytesIOSequence([]byte(contents))); err != nil {
- return err
- }
- return nil
- case linux.S_IFDIR:
- return etc.CreateDirectory(ctx, root, "passwd", fs.FilePermsFromMode(mode))
- case linux.S_IFIFO:
- return etc.CreateFifo(ctx, root, "passwd", fs.FilePermsFromMode(mode))
- default:
- return fmt.Errorf("unknown file type %x", mode.FileType())
- }
-}
-
-// TestGetExecUserHome tests the getExecUserHome function.
-func TestGetExecUserHome(t *testing.T) {
- tests := map[string]struct {
- uid auth.KUID
- passwdContents string
- passwdMode linux.FileMode
- expected string
- }{
- "success": {
- uid: 1000,
- passwdContents: "adin::1000:1111::/home/adin:/bin/sh",
- passwdMode: linux.S_IFREG | 0666,
- expected: "/home/adin",
- },
- "no_perms": {
- uid: 1000,
- passwdContents: "adin::1000:1111::/home/adin:/bin/sh",
- passwdMode: linux.S_IFREG,
- expected: "/",
- },
- "no_passwd": {
- uid: 1000,
- expected: "/",
- },
- "directory": {
- uid: 1000,
- passwdMode: linux.S_IFDIR | 0666,
- expected: "/",
- },
- // Currently we don't allow named pipes.
- "named_pipe": {
- uid: 1000,
- passwdMode: linux.S_IFIFO | 0666,
- expected: "/",
- },
- }
-
- for name, tc := range tests {
- t.Run(name, func(t *testing.T) {
- ctx := contexttest.Context(t)
- msrc := fs.NewPseudoMountSource(ctx)
- rootInode, err := tmpfs.NewDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0777), msrc, nil /* parent */)
- if err != nil {
- t.Fatalf("tmpfs.NewDir failed: %v", err)
- }
-
- mns, err := fs.NewMountNamespace(ctx, rootInode)
- if err != nil {
- t.Fatalf("NewMountNamespace failed: %v", err)
- }
- defer mns.DecRef(ctx)
- root := mns.Root()
- defer root.DecRef(ctx)
- ctx = fs.WithRoot(ctx, root)
-
- if err := createEtcPasswd(ctx, root, tc.passwdContents, tc.passwdMode); err != nil {
- t.Fatalf("createEtcPasswd failed: %v", err)
- }
-
- got, err := getExecUserHome(ctx, mns, tc.uid)
- if err != nil {
- t.Fatalf("failed to get user home: %v", err)
- }
-
- if got != tc.expected {
- t.Fatalf("expected %v, got: %v", tc.expected, got)
- }
- })
- }
-}
-
-// TestFindHomeInPasswd tests the findHomeInPasswd function's passwd file parsing.
-func TestFindHomeInPasswd(t *testing.T) {
- tests := map[string]struct {
- uid uint32
- passwd string
- expected string
- def string
- }{
- "empty": {
- uid: 1000,
- passwd: "",
- expected: "/",
- def: "/",
- },
- "whitespace": {
- uid: 1000,
- passwd: " ",
- expected: "/",
- def: "/",
- },
- "full": {
- uid: 1000,
- passwd: "adin::1000:1111::/home/adin:/bin/sh",
- expected: "/home/adin",
- def: "/",
- },
- // For better or worse, this is how runc works.
- "partial": {
- uid: 1000,
- passwd: "adin::1000:1111:",
- expected: "",
- def: "/",
- },
- "multiple": {
- uid: 1001,
- passwd: "adin::1000:1111::/home/adin:/bin/sh\nian::1001:1111::/home/ian:/bin/sh",
- expected: "/home/ian",
- def: "/",
- },
- "duplicate": {
- uid: 1000,
- passwd: "adin::1000:1111::/home/adin:/bin/sh\nian::1000:1111::/home/ian:/bin/sh",
- expected: "/home/adin",
- def: "/",
- },
- "empty_lines": {
- uid: 1001,
- passwd: "adin::1000:1111::/home/adin:/bin/sh\n\n\nian::1001:1111::/home/ian:/bin/sh",
- expected: "/home/ian",
- def: "/",
- },
- }
-
- for name, tc := range tests {
- t.Run(name, func(t *testing.T) {
- got, err := findHomeInPasswd(tc.uid, strings.NewReader(tc.passwd), tc.def)
- if err != nil {
- t.Fatalf("error parsing passwd: %v", err)
- }
- if tc.expected != got {
- t.Fatalf("expected %v, got: %v", tc.expected, got)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsbridge/BUILD b/pkg/sentry/fsbridge/BUILD
deleted file mode 100644
index 4631db2bb..000000000
--- a/pkg/sentry/fsbridge/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "fsbridge",
- srcs = [
- "bridge.go",
- "fs.go",
- "vfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/vfs",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsbridge/fsbridge_state_autogen.go b/pkg/sentry/fsbridge/fsbridge_state_autogen.go
new file mode 100644
index 000000000..b4b240bfa
--- /dev/null
+++ b/pkg/sentry/fsbridge/fsbridge_state_autogen.go
@@ -0,0 +1,126 @@
+// automatically generated by stateify.
+
+package fsbridge
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *fsFile) StateTypeName() string {
+ return "pkg/sentry/fsbridge.fsFile"
+}
+
+func (f *fsFile) StateFields() []string {
+ return []string{
+ "file",
+ }
+}
+
+func (f *fsFile) beforeSave() {}
+
+// +checklocksignore
+func (f *fsFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.file)
+}
+
+func (f *fsFile) afterLoad() {}
+
+// +checklocksignore
+func (f *fsFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.file)
+}
+
+func (l *fsLookup) StateTypeName() string {
+ return "pkg/sentry/fsbridge.fsLookup"
+}
+
+func (l *fsLookup) StateFields() []string {
+ return []string{
+ "mntns",
+ "root",
+ "workingDir",
+ }
+}
+
+func (l *fsLookup) beforeSave() {}
+
+// +checklocksignore
+func (l *fsLookup) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.mntns)
+ stateSinkObject.Save(1, &l.root)
+ stateSinkObject.Save(2, &l.workingDir)
+}
+
+func (l *fsLookup) afterLoad() {}
+
+// +checklocksignore
+func (l *fsLookup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.mntns)
+ stateSourceObject.Load(1, &l.root)
+ stateSourceObject.Load(2, &l.workingDir)
+}
+
+func (f *VFSFile) StateTypeName() string {
+ return "pkg/sentry/fsbridge.VFSFile"
+}
+
+func (f *VFSFile) StateFields() []string {
+ return []string{
+ "file",
+ }
+}
+
+func (f *VFSFile) beforeSave() {}
+
+// +checklocksignore
+func (f *VFSFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.file)
+}
+
+func (f *VFSFile) afterLoad() {}
+
+// +checklocksignore
+func (f *VFSFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.file)
+}
+
+func (l *vfsLookup) StateTypeName() string {
+ return "pkg/sentry/fsbridge.vfsLookup"
+}
+
+func (l *vfsLookup) StateFields() []string {
+ return []string{
+ "mntns",
+ "root",
+ "workingDir",
+ }
+}
+
+func (l *vfsLookup) beforeSave() {}
+
+// +checklocksignore
+func (l *vfsLookup) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.mntns)
+ stateSinkObject.Save(1, &l.root)
+ stateSinkObject.Save(2, &l.workingDir)
+}
+
+func (l *vfsLookup) afterLoad() {}
+
+// +checklocksignore
+func (l *vfsLookup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.mntns)
+ stateSourceObject.Load(1, &l.root)
+ stateSourceObject.Load(2, &l.workingDir)
+}
+
+func init() {
+ state.Register((*fsFile)(nil))
+ state.Register((*fsLookup)(nil))
+ state.Register((*VFSFile)(nil))
+ state.Register((*vfsLookup)(nil))
+}
diff --git a/pkg/sentry/fsimpl/cgroupfs/BUILD b/pkg/sentry/fsimpl/cgroupfs/BUILD
deleted file mode 100644
index 4c9c5b344..000000000
--- a/pkg/sentry/fsimpl/cgroupfs/BUILD
+++ /dev/null
@@ -1,49 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dir_refs",
- out = "dir_refs.go",
- package = "cgroupfs",
- prefix = "dir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "dir",
- },
-)
-
-go_library(
- name = "cgroupfs",
- srcs = [
- "base.go",
- "cgroupfs.go",
- "cpu.go",
- "cpuacct.go",
- "cpuset.go",
- "dir_refs.go",
- "job.go",
- "memory.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/coverage",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go b/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go
new file mode 100644
index 000000000..3142ab6f8
--- /dev/null
+++ b/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go
@@ -0,0 +1,687 @@
+// automatically generated by stateify.
+
+package cgroupfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *controllerCommon) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.controllerCommon"
+}
+
+func (c *controllerCommon) StateFields() []string {
+ return []string{
+ "ty",
+ "fs",
+ }
+}
+
+func (c *controllerCommon) beforeSave() {}
+
+// +checklocksignore
+func (c *controllerCommon) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.ty)
+ stateSinkObject.Save(1, &c.fs)
+}
+
+func (c *controllerCommon) afterLoad() {}
+
+// +checklocksignore
+func (c *controllerCommon) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.ty)
+ stateSourceObject.Load(1, &c.fs)
+}
+
+func (c *cgroupInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cgroupInode"
+}
+
+func (c *cgroupInode) StateFields() []string {
+ return []string{
+ "dir",
+ "fs",
+ "ts",
+ }
+}
+
+func (c *cgroupInode) beforeSave() {}
+
+// +checklocksignore
+func (c *cgroupInode) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.dir)
+ stateSinkObject.Save(1, &c.fs)
+ stateSinkObject.Save(2, &c.ts)
+}
+
+func (c *cgroupInode) afterLoad() {}
+
+// +checklocksignore
+func (c *cgroupInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.dir)
+ stateSourceObject.Load(1, &c.fs)
+ stateSourceObject.Load(2, &c.ts)
+}
+
+func (d *cgroupProcsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cgroupProcsData"
+}
+
+func (d *cgroupProcsData) StateFields() []string {
+ return []string{
+ "cgroupInode",
+ }
+}
+
+func (d *cgroupProcsData) beforeSave() {}
+
+// +checklocksignore
+func (d *cgroupProcsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cgroupInode)
+}
+
+func (d *cgroupProcsData) afterLoad() {}
+
+// +checklocksignore
+func (d *cgroupProcsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cgroupInode)
+}
+
+func (d *tasksData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.tasksData"
+}
+
+func (d *tasksData) StateFields() []string {
+ return []string{
+ "cgroupInode",
+ }
+}
+
+func (d *tasksData) beforeSave() {}
+
+// +checklocksignore
+func (d *tasksData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cgroupInode)
+}
+
+func (d *tasksData) afterLoad() {}
+
+// +checklocksignore
+func (d *tasksData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cgroupInode)
+}
+
+func (fsType *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.FilesystemType"
+}
+
+func (fsType *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InternalData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.InternalData"
+}
+
+func (i *InternalData) StateFields() []string {
+ return []string{
+ "DefaultControlValues",
+ }
+}
+
+func (i *InternalData) beforeSave() {}
+
+// +checklocksignore
+func (i *InternalData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DefaultControlValues)
+}
+
+func (i *InternalData) afterLoad() {}
+
+// +checklocksignore
+func (i *InternalData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DefaultControlValues)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ "hierarchyID",
+ "controllers",
+ "kcontrollers",
+ "numCgroups",
+ "root",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+ stateSinkObject.Save(2, &fs.hierarchyID)
+ stateSinkObject.Save(3, &fs.controllers)
+ stateSinkObject.Save(4, &fs.kcontrollers)
+ stateSinkObject.Save(5, &fs.numCgroups)
+ stateSinkObject.Save(6, &fs.root)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+ stateSourceObject.Load(2, &fs.hierarchyID)
+ stateSourceObject.Load(3, &fs.controllers)
+ stateSourceObject.Load(4, &fs.kcontrollers)
+ stateSourceObject.Load(5, &fs.numCgroups)
+ stateSourceObject.Load(6, &fs.root)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+// +checklocksignore
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+// +checklocksignore
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *dir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.dir"
+}
+
+func (d *dir) StateFields() []string {
+ return []string{
+ "dirRefs",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeNotSymlink",
+ "InodeDirectoryNoNewChildren",
+ "OrderedChildren",
+ "implStatFS",
+ "locks",
+ }
+}
+
+func (d *dir) beforeSave() {}
+
+// +checklocksignore
+func (d *dir) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dirRefs)
+ stateSinkObject.Save(1, &d.InodeAlwaysValid)
+ stateSinkObject.Save(2, &d.InodeAttrs)
+ stateSinkObject.Save(3, &d.InodeNotSymlink)
+ stateSinkObject.Save(4, &d.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(5, &d.OrderedChildren)
+ stateSinkObject.Save(6, &d.implStatFS)
+ stateSinkObject.Save(7, &d.locks)
+}
+
+func (d *dir) afterLoad() {}
+
+// +checklocksignore
+func (d *dir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dirRefs)
+ stateSourceObject.Load(1, &d.InodeAlwaysValid)
+ stateSourceObject.Load(2, &d.InodeAttrs)
+ stateSourceObject.Load(3, &d.InodeNotSymlink)
+ stateSourceObject.Load(4, &d.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(5, &d.OrderedChildren)
+ stateSourceObject.Load(6, &d.implStatFS)
+ stateSourceObject.Load(7, &d.locks)
+}
+
+func (c *controllerFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.controllerFile"
+}
+
+func (c *controllerFile) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (c *controllerFile) beforeSave() {}
+
+// +checklocksignore
+func (c *controllerFile) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.DynamicBytesFile)
+}
+
+func (c *controllerFile) afterLoad() {}
+
+// +checklocksignore
+func (c *controllerFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.DynamicBytesFile)
+}
+
+func (s *staticControllerFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.staticControllerFile"
+}
+
+func (s *staticControllerFile) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "StaticData",
+ }
+}
+
+func (s *staticControllerFile) beforeSave() {}
+
+// +checklocksignore
+func (s *staticControllerFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.StaticData)
+}
+
+func (s *staticControllerFile) afterLoad() {}
+
+// +checklocksignore
+func (s *staticControllerFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.StaticData)
+}
+
+func (c *cpuController) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuController"
+}
+
+func (c *cpuController) StateFields() []string {
+ return []string{
+ "controllerCommon",
+ "cfsPeriod",
+ "cfsQuota",
+ "shares",
+ }
+}
+
+func (c *cpuController) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuController) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.controllerCommon)
+ stateSinkObject.Save(1, &c.cfsPeriod)
+ stateSinkObject.Save(2, &c.cfsQuota)
+ stateSinkObject.Save(3, &c.shares)
+}
+
+func (c *cpuController) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuController) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.controllerCommon)
+ stateSourceObject.Load(1, &c.cfsPeriod)
+ stateSourceObject.Load(2, &c.cfsQuota)
+ stateSourceObject.Load(3, &c.shares)
+}
+
+func (c *cpuacctController) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctController"
+}
+
+func (c *cpuacctController) StateFields() []string {
+ return []string{
+ "controllerCommon",
+ }
+}
+
+func (c *cpuacctController) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuacctController) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.controllerCommon)
+}
+
+func (c *cpuacctController) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuacctController) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.controllerCommon)
+}
+
+func (c *cpuacctCgroup) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctCgroup"
+}
+
+func (c *cpuacctCgroup) StateFields() []string {
+ return []string{
+ "cgroupInode",
+ }
+}
+
+func (c *cpuacctCgroup) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuacctCgroup) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.cgroupInode)
+}
+
+func (c *cpuacctCgroup) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuacctCgroup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.cgroupInode)
+}
+
+func (d *cpuacctStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctStatData"
+}
+
+func (d *cpuacctStatData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctStatData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctStatData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctStatData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageData"
+}
+
+func (d *cpuacctUsageData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageUserData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageUserData"
+}
+
+func (d *cpuacctUsageUserData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageUserData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageUserData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageUserData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageUserData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageSysData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageSysData"
+}
+
+func (d *cpuacctUsageSysData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageSysData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageSysData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageSysData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageSysData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (c *cpusetController) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpusetController"
+}
+
+func (c *cpusetController) StateFields() []string {
+ return []string{
+ "controllerCommon",
+ }
+}
+
+func (c *cpusetController) beforeSave() {}
+
+// +checklocksignore
+func (c *cpusetController) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.controllerCommon)
+}
+
+func (c *cpusetController) afterLoad() {}
+
+// +checklocksignore
+func (c *cpusetController) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.controllerCommon)
+}
+
+func (r *dirRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.dirRefs"
+}
+
+func (r *dirRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *dirRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *dirRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *dirRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (c *jobController) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.jobController"
+}
+
+func (c *jobController) StateFields() []string {
+ return []string{
+ "controllerCommon",
+ "id",
+ }
+}
+
+func (c *jobController) beforeSave() {}
+
+// +checklocksignore
+func (c *jobController) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.controllerCommon)
+ stateSinkObject.Save(1, &c.id)
+}
+
+func (c *jobController) afterLoad() {}
+
+// +checklocksignore
+func (c *jobController) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.controllerCommon)
+ stateSourceObject.Load(1, &c.id)
+}
+
+func (d *jobIDData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.jobIDData"
+}
+
+func (d *jobIDData) StateFields() []string {
+ return []string{
+ "c",
+ }
+}
+
+func (d *jobIDData) beforeSave() {}
+
+// +checklocksignore
+func (d *jobIDData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.c)
+}
+
+func (d *jobIDData) afterLoad() {}
+
+// +checklocksignore
+func (d *jobIDData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.c)
+}
+
+func (c *memoryController) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.memoryController"
+}
+
+func (c *memoryController) StateFields() []string {
+ return []string{
+ "controllerCommon",
+ "limitBytes",
+ }
+}
+
+func (c *memoryController) beforeSave() {}
+
+// +checklocksignore
+func (c *memoryController) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.controllerCommon)
+ stateSinkObject.Save(1, &c.limitBytes)
+}
+
+func (c *memoryController) afterLoad() {}
+
+// +checklocksignore
+func (c *memoryController) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.controllerCommon)
+ stateSourceObject.Load(1, &c.limitBytes)
+}
+
+func (d *memoryUsageInBytesData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.memoryUsageInBytesData"
+}
+
+func (d *memoryUsageInBytesData) StateFields() []string {
+ return []string{}
+}
+
+func (d *memoryUsageInBytesData) beforeSave() {}
+
+// +checklocksignore
+func (d *memoryUsageInBytesData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *memoryUsageInBytesData) afterLoad() {}
+
+// +checklocksignore
+func (d *memoryUsageInBytesData) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*controllerCommon)(nil))
+ state.Register((*cgroupInode)(nil))
+ state.Register((*cgroupProcsData)(nil))
+ state.Register((*tasksData)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*InternalData)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*implStatFS)(nil))
+ state.Register((*dir)(nil))
+ state.Register((*controllerFile)(nil))
+ state.Register((*staticControllerFile)(nil))
+ state.Register((*cpuController)(nil))
+ state.Register((*cpuacctController)(nil))
+ state.Register((*cpuacctCgroup)(nil))
+ state.Register((*cpuacctStatData)(nil))
+ state.Register((*cpuacctUsageData)(nil))
+ state.Register((*cpuacctUsageUserData)(nil))
+ state.Register((*cpuacctUsageSysData)(nil))
+ state.Register((*cpusetController)(nil))
+ state.Register((*dirRefs)(nil))
+ state.Register((*jobController)(nil))
+ state.Register((*jobIDData)(nil))
+ state.Register((*memoryController)(nil))
+ state.Register((*memoryUsageInBytesData)(nil))
+}
diff --git a/pkg/refsvfs2/refs_template.go b/pkg/sentry/fsimpl/cgroupfs/dir_refs.go
index 55b0a60a1..c29f0c9ae 100644
--- a/pkg/refsvfs2/refs_template.go
+++ b/pkg/sentry/fsimpl/cgroupfs/dir_refs.go
@@ -1,20 +1,4 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package refs_template defines a template that can be used by reference
-// counted objects. The template comes with leak checking capabilities.
-package refs_template
+package cgroupfs
import (
"fmt"
@@ -27,15 +11,11 @@ import (
// stack traces). This is false by default and should only be set to true for
// debugging purposes, as it can generate an extremely large amount of output
// and drastically degrade performance.
-const enableLogging = false
-
-// T is the type of the reference counted object. It is only used to customize
-// debug output when leak checking.
-type T interface{}
+const direnableLogging = false
// obj is used to customize logging. Note that we use a pointer to T so that
// we do not copy the entire object when passed as a format parameter.
-var obj *T
+var dirobj *dir
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
@@ -49,7 +29,7 @@ var obj *T
// interfaces manually.
//
// +stateify savable
-type Refs struct {
+type dirRefs struct {
// refCount is composed of two fields:
//
// [32-bit speculative references]:[32-bit real references]
@@ -62,38 +42,38 @@ type Refs struct {
// InitRefs initializes r with one reference and, if enabled, activates leak
// checking.
-func (r *Refs) InitRefs() {
+func (r *dirRefs) InitRefs() {
atomic.StoreInt64(&r.refCount, 1)
refsvfs2.Register(r)
}
// RefType implements refsvfs2.CheckedObject.RefType.
-func (r *Refs) RefType() string {
- return fmt.Sprintf("%T", obj)[1:]
+func (r *dirRefs) RefType() string {
+ return fmt.Sprintf("%T", dirobj)[1:]
}
// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
-func (r *Refs) LeakMessage() string {
+func (r *dirRefs) LeakMessage() string {
return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
}
// LogRefs implements refsvfs2.CheckedObject.LogRefs.
-func (r *Refs) LogRefs() bool {
- return enableLogging
+func (r *dirRefs) LogRefs() bool {
+ return direnableLogging
}
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
-func (r *Refs) ReadRefs() int64 {
+func (r *dirRefs) ReadRefs() int64 {
return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
//
//go:nosplit
-func (r *Refs) IncRef() {
+func (r *dirRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- if enableLogging {
+ if direnableLogging {
refsvfs2.LogIncRef(r, v)
}
if v <= 1 {
@@ -108,17 +88,16 @@ func (r *Refs) IncRef() {
// other TryIncRef calls from genuine references held.
//
//go:nosplit
-func (r *Refs) TryIncRef() bool {
+func (r *dirRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
- // This object has already been freed.
+
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
- // Turn into a real reference.
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- if enableLogging {
+ if direnableLogging {
refsvfs2.LogTryIncRef(r, v)
}
return true
@@ -136,9 +115,9 @@ func (r *Refs) TryIncRef() bool {
// A: TryIncRef [transform speculative to real]
//
//go:nosplit
-func (r *Refs) DecRef(destroy func()) {
+func (r *dirRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- if enableLogging {
+ if direnableLogging {
refsvfs2.LogDecRef(r, v)
}
switch {
@@ -147,14 +126,14 @@ func (r *Refs) DecRef(destroy func()) {
case v == 0:
refsvfs2.Unregister(r)
- // Call the destructor.
+
if destroy != nil {
destroy()
}
}
}
-func (r *Refs) afterLoad() {
+func (r *dirRefs) afterLoad() {
if r.ReadRefs() > 0 {
refsvfs2.Register(r)
}
diff --git a/pkg/sentry/fsimpl/devpts/BUILD b/pkg/sentry/fsimpl/devpts/BUILD
deleted file mode 100644
index f981ff296..000000000
--- a/pkg/sentry/fsimpl/devpts/BUILD
+++ /dev/null
@@ -1,65 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "root_inode_refs",
- out = "root_inode_refs.go",
- package = "devpts",
- prefix = "rootInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "rootInode",
- },
-)
-
-go_library(
- name = "devpts",
- srcs = [
- "devpts.go",
- "line_discipline.go",
- "master.go",
- "queue.go",
- "replica.go",
- "root_inode_refs.go",
- "terminal.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "devpts_test",
- size = "small",
- srcs = ["devpts_test.go"],
- library = ":devpts",
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/contexttest",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go b/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go
new file mode 100644
index 000000000..78e685c7e
--- /dev/null
+++ b/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go
@@ -0,0 +1,502 @@
+// automatically generated by stateify.
+
+package devpts
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{
+ "initErr",
+ "fs",
+ "root",
+ }
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+ stateSinkObject.Save(0, &fstype.initErr)
+ stateSinkObject.Save(1, &fstype.fs)
+ stateSinkObject.Save(2, &fstype.root)
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fstype.initErr)
+ stateSourceObject.Load(1, &fstype.fs)
+ stateSourceObject.Load(2, &fstype.root)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *rootInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.rootInode"
+}
+
+func (i *rootInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "rootInodeRefs",
+ "locks",
+ "master",
+ "replicas",
+ "nextIdx",
+ }
+}
+
+func (i *rootInode) beforeSave() {}
+
+// +checklocksignore
+func (i *rootInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.rootInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.master)
+ stateSinkObject.Save(10, &i.replicas)
+ stateSinkObject.Save(11, &i.nextIdx)
+}
+
+func (i *rootInode) afterLoad() {}
+
+// +checklocksignore
+func (i *rootInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.rootInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.master)
+ stateSourceObject.Load(10, &i.replicas)
+ stateSourceObject.Load(11, &i.nextIdx)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+// +checklocksignore
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+// +checklocksignore
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (l *lineDiscipline) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.lineDiscipline"
+}
+
+func (l *lineDiscipline) StateFields() []string {
+ return []string{
+ "size",
+ "inQueue",
+ "outQueue",
+ "termios",
+ "column",
+ "masterWaiter",
+ "replicaWaiter",
+ }
+}
+
+func (l *lineDiscipline) beforeSave() {}
+
+// +checklocksignore
+func (l *lineDiscipline) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.size)
+ stateSinkObject.Save(1, &l.inQueue)
+ stateSinkObject.Save(2, &l.outQueue)
+ stateSinkObject.Save(3, &l.termios)
+ stateSinkObject.Save(4, &l.column)
+ stateSinkObject.Save(5, &l.masterWaiter)
+ stateSinkObject.Save(6, &l.replicaWaiter)
+}
+
+func (l *lineDiscipline) afterLoad() {}
+
+// +checklocksignore
+func (l *lineDiscipline) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.size)
+ stateSourceObject.Load(1, &l.inQueue)
+ stateSourceObject.Load(2, &l.outQueue)
+ stateSourceObject.Load(3, &l.termios)
+ stateSourceObject.Load(4, &l.column)
+ stateSourceObject.Load(5, &l.masterWaiter)
+ stateSourceObject.Load(6, &l.replicaWaiter)
+}
+
+func (o *outputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.outputQueueTransformer"
+}
+
+func (o *outputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (o *outputQueueTransformer) beforeSave() {}
+
+// +checklocksignore
+func (o *outputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+}
+
+func (o *outputQueueTransformer) afterLoad() {}
+
+// +checklocksignore
+func (o *outputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *inputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.inputQueueTransformer"
+}
+
+func (i *inputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (i *inputQueueTransformer) beforeSave() {}
+
+// +checklocksignore
+func (i *inputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *inputQueueTransformer) afterLoad() {}
+
+// +checklocksignore
+func (i *inputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (mi *masterInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.masterInode"
+}
+
+func (mi *masterInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "root",
+ }
+}
+
+func (mi *masterInode) beforeSave() {}
+
+// +checklocksignore
+func (mi *masterInode) StateSave(stateSinkObject state.Sink) {
+ mi.beforeSave()
+ stateSinkObject.Save(0, &mi.implStatFS)
+ stateSinkObject.Save(1, &mi.InodeAttrs)
+ stateSinkObject.Save(2, &mi.InodeNoopRefCount)
+ stateSinkObject.Save(3, &mi.InodeNotDirectory)
+ stateSinkObject.Save(4, &mi.InodeNotSymlink)
+ stateSinkObject.Save(5, &mi.locks)
+ stateSinkObject.Save(6, &mi.root)
+}
+
+func (mi *masterInode) afterLoad() {}
+
+// +checklocksignore
+func (mi *masterInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mi.implStatFS)
+ stateSourceObject.Load(1, &mi.InodeAttrs)
+ stateSourceObject.Load(2, &mi.InodeNoopRefCount)
+ stateSourceObject.Load(3, &mi.InodeNotDirectory)
+ stateSourceObject.Load(4, &mi.InodeNotSymlink)
+ stateSourceObject.Load(5, &mi.locks)
+ stateSourceObject.Load(6, &mi.root)
+}
+
+func (mfd *masterFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.masterFileDescription"
+}
+
+func (mfd *masterFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "t",
+ }
+}
+
+func (mfd *masterFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (mfd *masterFileDescription) StateSave(stateSinkObject state.Sink) {
+ mfd.beforeSave()
+ stateSinkObject.Save(0, &mfd.vfsfd)
+ stateSinkObject.Save(1, &mfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &mfd.LockFD)
+ stateSinkObject.Save(3, &mfd.inode)
+ stateSinkObject.Save(4, &mfd.t)
+}
+
+func (mfd *masterFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (mfd *masterFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mfd.vfsfd)
+ stateSourceObject.Load(1, &mfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &mfd.LockFD)
+ stateSourceObject.Load(3, &mfd.inode)
+ stateSourceObject.Load(4, &mfd.t)
+}
+
+func (q *queue) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.queue"
+}
+
+func (q *queue) StateFields() []string {
+ return []string{
+ "readBuf",
+ "waitBuf",
+ "waitBufLen",
+ "readable",
+ "transformer",
+ }
+}
+
+func (q *queue) beforeSave() {}
+
+// +checklocksignore
+func (q *queue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.readBuf)
+ stateSinkObject.Save(1, &q.waitBuf)
+ stateSinkObject.Save(2, &q.waitBufLen)
+ stateSinkObject.Save(3, &q.readable)
+ stateSinkObject.Save(4, &q.transformer)
+}
+
+func (q *queue) afterLoad() {}
+
+// +checklocksignore
+func (q *queue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.readBuf)
+ stateSourceObject.Load(1, &q.waitBuf)
+ stateSourceObject.Load(2, &q.waitBufLen)
+ stateSourceObject.Load(3, &q.readable)
+ stateSourceObject.Load(4, &q.transformer)
+}
+
+func (ri *replicaInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.replicaInode"
+}
+
+func (ri *replicaInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "root",
+ "t",
+ }
+}
+
+func (ri *replicaInode) beforeSave() {}
+
+// +checklocksignore
+func (ri *replicaInode) StateSave(stateSinkObject state.Sink) {
+ ri.beforeSave()
+ stateSinkObject.Save(0, &ri.implStatFS)
+ stateSinkObject.Save(1, &ri.InodeAttrs)
+ stateSinkObject.Save(2, &ri.InodeNoopRefCount)
+ stateSinkObject.Save(3, &ri.InodeNotDirectory)
+ stateSinkObject.Save(4, &ri.InodeNotSymlink)
+ stateSinkObject.Save(5, &ri.locks)
+ stateSinkObject.Save(6, &ri.root)
+ stateSinkObject.Save(7, &ri.t)
+}
+
+func (ri *replicaInode) afterLoad() {}
+
+// +checklocksignore
+func (ri *replicaInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ri.implStatFS)
+ stateSourceObject.Load(1, &ri.InodeAttrs)
+ stateSourceObject.Load(2, &ri.InodeNoopRefCount)
+ stateSourceObject.Load(3, &ri.InodeNotDirectory)
+ stateSourceObject.Load(4, &ri.InodeNotSymlink)
+ stateSourceObject.Load(5, &ri.locks)
+ stateSourceObject.Load(6, &ri.root)
+ stateSourceObject.Load(7, &ri.t)
+}
+
+func (rfd *replicaFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.replicaFileDescription"
+}
+
+func (rfd *replicaFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ }
+}
+
+func (rfd *replicaFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (rfd *replicaFileDescription) StateSave(stateSinkObject state.Sink) {
+ rfd.beforeSave()
+ stateSinkObject.Save(0, &rfd.vfsfd)
+ stateSinkObject.Save(1, &rfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &rfd.LockFD)
+ stateSinkObject.Save(3, &rfd.inode)
+}
+
+func (rfd *replicaFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (rfd *replicaFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rfd.vfsfd)
+ stateSourceObject.Load(1, &rfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &rfd.LockFD)
+ stateSourceObject.Load(3, &rfd.inode)
+}
+
+func (r *rootInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.rootInodeRefs"
+}
+
+func (r *rootInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *rootInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *rootInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *rootInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (tm *Terminal) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.Terminal"
+}
+
+func (tm *Terminal) StateFields() []string {
+ return []string{
+ "n",
+ "ld",
+ "masterKTTY",
+ "replicaKTTY",
+ }
+}
+
+func (tm *Terminal) beforeSave() {}
+
+// +checklocksignore
+func (tm *Terminal) StateSave(stateSinkObject state.Sink) {
+ tm.beforeSave()
+ stateSinkObject.Save(0, &tm.n)
+ stateSinkObject.Save(1, &tm.ld)
+ stateSinkObject.Save(2, &tm.masterKTTY)
+ stateSinkObject.Save(3, &tm.replicaKTTY)
+}
+
+func (tm *Terminal) afterLoad() {}
+
+// +checklocksignore
+func (tm *Terminal) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tm.n)
+ stateSourceObject.Load(1, &tm.ld)
+ stateSourceObject.Load(2, &tm.masterKTTY)
+ stateSourceObject.Load(3, &tm.replicaKTTY)
+}
+
+func init() {
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*rootInode)(nil))
+ state.Register((*implStatFS)(nil))
+ state.Register((*lineDiscipline)(nil))
+ state.Register((*outputQueueTransformer)(nil))
+ state.Register((*inputQueueTransformer)(nil))
+ state.Register((*masterInode)(nil))
+ state.Register((*masterFileDescription)(nil))
+ state.Register((*queue)(nil))
+ state.Register((*replicaInode)(nil))
+ state.Register((*replicaFileDescription)(nil))
+ state.Register((*rootInodeRefs)(nil))
+ state.Register((*Terminal)(nil))
+}
diff --git a/pkg/sentry/fsimpl/devpts/devpts_test.go b/pkg/sentry/fsimpl/devpts/devpts_test.go
deleted file mode 100644
index 1ef07d702..000000000
--- a/pkg/sentry/fsimpl/devpts/devpts_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package devpts
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestSimpleMasterToReplica(t *testing.T) {
- ld := newLineDiscipline(linux.DefaultReplicaTermios)
- ctx := contexttest.Context(t)
- inBytes := []byte("hello, tty\n")
- src := usermem.BytesIOSequence(inBytes)
- outBytes := make([]byte, 32)
- dst := usermem.BytesIOSequence(outBytes)
-
- // Write to the input queue.
- nw, err := ld.inputQueueWrite(ctx, src)
- if err != nil {
- t.Fatalf("error writing to input queue: %v", err)
- }
- if nw != int64(len(inBytes)) {
- t.Fatalf("wrote wrong length: got %d, want %d", nw, len(inBytes))
- }
-
- // Read from the input queue.
- nr, err := ld.inputQueueRead(ctx, dst)
- if err != nil {
- t.Fatalf("error reading from input queue: %v", err)
- }
- if nr != int64(len(inBytes)) {
- t.Fatalf("read wrong length: got %d, want %d", nr, len(inBytes))
- }
-
- outStr := string(outBytes[:nr])
- inStr := string(inBytes)
- if outStr != inStr {
- t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
- }
-}
-
-type callback func(*waiter.Entry, waiter.EventMask)
-
-func (cb callback) Callback(entry *waiter.Entry, mask waiter.EventMask) {
- cb(entry, mask)
-}
-
-func TestEchoDeadlock(t *testing.T) {
- ctx := contexttest.Context(t)
- termios := linux.DefaultReplicaTermios
- termios.LocalFlags |= linux.ECHO
- ld := newLineDiscipline(termios)
- outBytes := make([]byte, 32)
- dst := usermem.BytesIOSequence(outBytes)
- entry := &waiter.Entry{Callback: callback(func(*waiter.Entry, waiter.EventMask) {
- ld.inputQueueRead(ctx, dst)
- })}
- ld.masterWaiter.EventRegister(entry, waiter.ReadableEvents)
- defer ld.masterWaiter.EventUnregister(entry)
- inBytes := []byte("hello, tty\n")
- n, err := ld.inputQueueWrite(ctx, usermem.BytesIOSequence(inBytes))
- if err != nil {
- t.Fatalf("inputQueueWrite: %v", err)
- }
- if int(n) != len(inBytes) {
- t.Fatalf("read wrong length: got %d, want %d", n, len(inBytes))
- }
- outStr := string(outBytes[:n])
- inStr := string(inBytes)
- if outStr != inStr {
- t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
- }
-}
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
new file mode 100644
index 000000000..e53739a90
--- /dev/null
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -0,0 +1,140 @@
+package devpts
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const rootInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var rootInodeobj *rootInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type rootInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *rootInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *rootInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", rootInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *rootInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *rootInodeRefs) LogRefs() bool {
+ return rootInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *rootInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *rootInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if rootInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *rootInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if rootInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *rootInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if rootInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *rootInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/devtmpfs/BUILD b/pkg/sentry/fsimpl/devtmpfs/BUILD
deleted file mode 100644
index e49a04c1b..000000000
--- a/pkg/sentry/fsimpl/devtmpfs/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "devtmpfs",
- srcs = [
- "devtmpfs.go",
- "save_restore.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "devtmpfs_test",
- size = "small",
- srcs = ["devtmpfs_test.go"],
- library = ":devtmpfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go
new file mode 100644
index 000000000..900c7d8fe
--- /dev/null
+++ b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go
@@ -0,0 +1,41 @@
+// automatically generated by stateify.
+
+package devtmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fst *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devtmpfs.FilesystemType"
+}
+
+func (fst *FilesystemType) StateFields() []string {
+ return []string{
+ "initErr",
+ "fs",
+ "root",
+ }
+}
+
+func (fst *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fst *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fst.beforeSave()
+ stateSinkObject.Save(0, &fst.initErr)
+ stateSinkObject.Save(1, &fst.fs)
+ stateSinkObject.Save(2, &fst.root)
+}
+
+// +checklocksignore
+func (fst *FilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fst.initErr)
+ stateSourceObject.Load(1, &fst.fs)
+ stateSourceObject.Load(2, &fst.root)
+ stateSourceObject.AfterLoad(fst.afterLoad)
+}
+
+func init() {
+ state.Register((*FilesystemType)(nil))
+}
diff --git a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go
deleted file mode 100644
index e058eda7a..000000000
--- a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package devtmpfs
-
-import (
- "path"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-const devPath = "/dev"
-
-func setupDevtmpfs(t *testing.T) (context.Context, *auth.Credentials, *vfs.VirtualFilesystem, vfs.VirtualDentry, func()) {
- t.Helper()
-
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- // Register tmpfs just so that we can have a root filesystem that isn't
- // devtmpfs.
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- vfsObj.MustRegisterFilesystemType("devtmpfs", &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- // Create a test mount namespace with devtmpfs mounted at "/dev".
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "tmpfs" /* source */, "tmpfs" /* fsTypeName */, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- devpop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(devPath),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &devpop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- t.Fatalf("failed to create mount point: %v", err)
- }
- if _, err := vfsObj.MountAt(ctx, creds, "devtmpfs" /* source */, &devpop, "devtmpfs" /* fsTypeName */, &vfs.MountOptions{}); err != nil {
- t.Fatalf("failed to mount devtmpfs: %v", err)
- }
-
- return ctx, creds, vfsObj, root, func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- }
-}
-
-func TestUserspaceInit(t *testing.T) {
- ctx, creds, vfsObj, root, cleanup := setupDevtmpfs(t)
- defer cleanup()
-
- a, err := NewAccessor(ctx, vfsObj, creds, "devtmpfs")
- if err != nil {
- t.Fatalf("failed to create devtmpfs.Accessor: %v", err)
- }
- defer a.Release(ctx)
-
- // Create "userspace-initialized" files using a devtmpfs.Accessor.
- if err := a.UserspaceInit(ctx); err != nil {
- t.Fatalf("failed to userspace-initialize devtmpfs: %v", err)
- }
-
- // Created files should be visible in the test mount namespace.
- links := []struct {
- source string
- target string
- }{
- {
- source: "fd",
- target: "/proc/self/fd",
- },
- {
- source: "stdin",
- target: "/proc/self/fd/0",
- },
- {
- source: "stdout",
- target: "/proc/self/fd/1",
- },
- {
- source: "stderr",
- target: "/proc/self/fd/2",
- },
- {
- source: "ptmx",
- target: "pts/ptmx",
- },
- }
-
- for _, link := range links {
- abspath := path.Join(devPath, link.source)
- if gotTarget, err := vfsObj.ReadlinkAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }); err != nil || gotTarget != link.target {
- t.Errorf("readlink(%q): got (%q, %v), wanted (%q, nil)", abspath, gotTarget, err, link.target)
- }
- }
-
- dirs := []string{"shm", "pts"}
- for _, dir := range dirs {
- abspath := path.Join(devPath, dir)
- statx, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }, &vfs.StatOptions{
- Mask: linux.STATX_MODE,
- })
- if err != nil {
- t.Errorf("stat(%q): got error %v ", abspath, err)
- continue
- }
- if want := uint16(0755) | linux.S_IFDIR; statx.Mode != want {
- t.Errorf("stat(%q): got mode %x, want %x", abspath, statx.Mode, want)
- }
- }
-}
-
-func TestCreateDeviceFile(t *testing.T) {
- ctx, creds, vfsObj, root, cleanup := setupDevtmpfs(t)
- defer cleanup()
-
- a, err := NewAccessor(ctx, vfsObj, creds, "devtmpfs")
- if err != nil {
- t.Fatalf("failed to create devtmpfs.Accessor: %v", err)
- }
- defer a.Release(ctx)
-
- devFiles := []struct {
- path string
- kind vfs.DeviceKind
- major uint32
- minor uint32
- perms uint16
- }{
- {
- path: "dummy",
- kind: vfs.CharDevice,
- major: 12,
- minor: 34,
- perms: 0600,
- },
- {
- path: "foo/bar",
- kind: vfs.BlockDevice,
- major: 13,
- minor: 35,
- perms: 0660,
- },
- {
- path: "foo/baz",
- kind: vfs.CharDevice,
- major: 12,
- minor: 40,
- perms: 0666,
- },
- {
- path: "a/b/c/d/e",
- kind: vfs.BlockDevice,
- major: 12,
- minor: 34,
- perms: 0600,
- },
- }
-
- for _, f := range devFiles {
- if err := a.CreateDeviceFile(ctx, f.path, f.kind, f.major, f.minor, f.perms); err != nil {
- t.Fatalf("failed to create device file: %v", err)
- }
- // The device special file should be visible in the test mount namespace.
- abspath := path.Join(devPath, f.path)
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }, &vfs.StatOptions{
- Mask: linux.STATX_TYPE | linux.STATX_MODE,
- })
- if err != nil {
- t.Fatalf("failed to stat device file at %q: %v", abspath, err)
- }
- if stat.RdevMajor != f.major {
- t.Errorf("major device number: got %v, wanted %v", stat.RdevMajor, f.major)
- }
- if stat.RdevMinor != f.minor {
- t.Errorf("minor device number: got %v, wanted %v", stat.RdevMinor, f.minor)
- }
- wantMode := f.perms
- switch f.kind {
- case vfs.CharDevice:
- wantMode |= linux.S_IFCHR
- case vfs.BlockDevice:
- wantMode |= linux.S_IFBLK
- }
- if stat.Mode != wantMode {
- t.Errorf("device file mode: got %v, wanted %v", stat.Mode, wantMode)
- }
- }
-}
diff --git a/pkg/sentry/fsimpl/eventfd/BUILD b/pkg/sentry/fsimpl/eventfd/BUILD
deleted file mode 100644
index c09fdc7f9..000000000
--- a/pkg/sentry/fsimpl/eventfd/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "eventfd",
- srcs = ["eventfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fdnotifier",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "eventfd_test",
- size = "small",
- srcs = ["eventfd_test.go"],
- library = ":eventfd",
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/vfs",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go b/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go
new file mode 100644
index 000000000..93de7f32e
--- /dev/null
+++ b/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go
@@ -0,0 +1,57 @@
+// automatically generated by stateify.
+
+package eventfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (efd *EventFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/eventfd.EventFileDescription"
+}
+
+func (efd *EventFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "queue",
+ "val",
+ "semMode",
+ "hostfd",
+ }
+}
+
+func (efd *EventFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (efd *EventFileDescription) StateSave(stateSinkObject state.Sink) {
+ efd.beforeSave()
+ stateSinkObject.Save(0, &efd.vfsfd)
+ stateSinkObject.Save(1, &efd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &efd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &efd.NoLockFD)
+ stateSinkObject.Save(4, &efd.queue)
+ stateSinkObject.Save(5, &efd.val)
+ stateSinkObject.Save(6, &efd.semMode)
+ stateSinkObject.Save(7, &efd.hostfd)
+}
+
+func (efd *EventFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (efd *EventFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &efd.vfsfd)
+ stateSourceObject.Load(1, &efd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &efd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &efd.NoLockFD)
+ stateSourceObject.Load(4, &efd.queue)
+ stateSourceObject.Load(5, &efd.val)
+ stateSourceObject.Load(6, &efd.semMode)
+ stateSourceObject.Load(7, &efd.hostfd)
+}
+
+func init() {
+ state.Register((*EventFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd_test.go b/pkg/sentry/fsimpl/eventfd/eventfd_test.go
deleted file mode 100644
index 85718f813..000000000
--- a/pkg/sentry/fsimpl/eventfd/eventfd_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventfd
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestEventFD(t *testing.T) {
- initVals := []uint64{
- 0,
- // Using a non-zero initial value verifies that writing to an
- // eventfd signals when the eventfd's counter was already
- // non-zero.
- 343,
- }
-
- for _, initVal := range initVals {
- ctx := contexttest.Context(t)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
-
- // Make a new eventfd that is writable.
- eventfd, err := New(ctx, vfsObj, initVal, false, linux.O_RDWR)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- defer eventfd.DecRef(ctx)
-
- // Register a callback for a write event.
- w, ch := waiter.NewChannelEntry(nil)
- eventfd.EventRegister(&w, waiter.ReadableEvents)
- defer eventfd.EventUnregister(&w)
-
- data := []byte("00000124")
- // Create and submit a write request.
- n, err := eventfd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatal(err)
- }
- if n != 8 {
- t.Errorf("eventfd.write wrote %d bytes, not full int64", n)
- }
-
- // Check if the callback fired due to the write event.
- select {
- case <-ch:
- default:
- t.Errorf("Didn't get notified of EventIn after write")
- }
- }
-}
-
-func TestEventFDStat(t *testing.T) {
- ctx := contexttest.Context(t)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
-
- // Make a new eventfd that is writable.
- eventfd, err := New(ctx, vfsObj, 0, false, linux.O_RDWR)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- defer eventfd.DecRef(ctx)
-
- statx, err := eventfd.Stat(ctx, vfs.StatOptions{
- Mask: linux.STATX_BASIC_STATS,
- })
- if err != nil {
- t.Fatalf("eventfd.Stat failed: %v", err)
- }
- if statx.Size != 0 {
- t.Errorf("eventfd size should be 0")
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/BUILD b/pkg/sentry/fsimpl/ext/BUILD
deleted file mode 100644
index 5e8b464a0..000000000
--- a/pkg/sentry/fsimpl/ext/BUILD
+++ /dev/null
@@ -1,104 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "dirent_list",
- out = "dirent_list.go",
- package = "ext",
- prefix = "dirent",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dirent",
- "Linker": "*dirent",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "ext",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "ext",
- srcs = [
- "block_map_file.go",
- "dentry.go",
- "directory.go",
- "dirent_list.go",
- "ext.go",
- "extent_file.go",
- "file_description.go",
- "filesystem.go",
- "fstree.go",
- "inode.go",
- "regular_file.go",
- "symlink.go",
- "utils.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/syscalls/linux",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "ext_test",
- size = "small",
- srcs = [
- "block_map_test.go",
- "ext_test.go",
- "extent_test.go",
- ],
- data = [
- "//pkg/sentry/fsimpl/ext:assets/bigfile.txt",
- "//pkg/sentry/fsimpl/ext:assets/file.txt",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext2",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext3",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext4",
- ],
- library = ":ext",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/marshal/primitive",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/test/testutil",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/README.md b/pkg/sentry/fsimpl/ext/README.md
deleted file mode 100644
index af00cfda8..000000000
--- a/pkg/sentry/fsimpl/ext/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-## EXT(2/3/4) File System
-
-This is a filesystem driver which supports ext2, ext3 and ext4 filesystems.
-Linux has specialized drivers for each variant but none which supports all. This
-library takes advantage of ext's backward compatibility and understands the
-internal organization of on-disk structures to support all variants.
-
-This driver implementation diverges from the Linux implementations in being more
-forgiving about versioning. For instance, if a filesystem contains both extent
-based inodes and classical block map based inodes, this driver will not complain
-and interpret them both correctly. While in Linux this would be an issue. This
-blurs the line between the three ext fs variants.
-
-Ext2 is considered deprecated as of Red Hat Enterprise Linux 7, and ext3 has
-been superseded by ext4 by large performance gains. Thus it is recommended to
-upgrade older filesystem images to ext4 using e2fsprogs for better performance.
-
-### Read Only
-
-This driver currently only allows read only operations. A lot of the design
-decisions are based on this feature. There are plans to implement write (the
-process for which is documented in the future work section).
-
-### Performance
-
-One of the biggest wins about this driver is that it directly talks to the
-underlying block device (or whatever persistent storage is being used), instead
-of making expensive RPCs to a gofer.
-
-Another advantage is that ext fs supports fast concurrent reads. Currently the
-device is represented using a `io.ReaderAt` which allows for concurrent reads.
-All reads are directly passed to the device driver which intelligently serves
-the read requests in the optimal order. There is no congestion due to locking
-while reading in the filesystem level.
-
-Reads are optimized further in the way file data is transferred over to user
-memory. Ext fs directly copies over file data from disk into user memory with no
-additional allocations on the way. We can only get faster by preloading file
-data into memory (see future work section).
-
-The internal structures used to represent files, inodes and file descriptors use
-a lot of inheritance. With the level of indirection that an interface adds with
-an internal pointer, it can quickly fragment a structure across memory. As this
-runs along side a full blown kernel (which is memory intensive), having a
-fragmented struct might hurt performance. Hence these internal structures,
-though interfaced, are tightly packed in memory using the same inheritance
-pattern that pkg/sentry/vfs uses. The pkg/sentry/fsimpl/ext/disklayout package
-makes an execption to this pattern for reasons documented in the package.
-
-### Security
-
-This driver also intends to help sandbox the container better by reducing the
-surface of the host kernel that the application touches. It prevents the
-application from exploiting vulnerabilities in the host filesystem driver. All
-`io.ReaderAt.ReadAt()` calls are translated to `pread(2)` which are directly
-passed to the device driver in the kernel. Hence this reduces the surface for
-attack.
-
-The application can not affect any host filesystems other than the one passed
-via block device by the user.
-
-### Future Work
-
-#### Write
-
-To support write operations we would need to modify the block device underneath.
-Currently, the driver does not modify the device at all, not even for updating
-the access times for reads. Modifying the filesystem incorrectly can corrupt it
-and render it unreadable for other correct ext(x) drivers. Hence caution must be
-maintained while modifying metadata structures.
-
-Ext4 specifically is built for performance and has added a lot of complexity as
-to how metadata structures are modified. For instance, files that are organized
-via an extent tree which must be balanced and file data blocks must be placed in
-the same extent as much as possible to increase locality. Such properties must
-be maintained while modifying the tree.
-
-Ext filesystems boast a lot about locality, which plays a big role in them being
-performant. The block allocation algorithm in Linux does a good job in keeping
-related data together. This behavior must be maintained as much as possible,
-else we might end up degrading the filesystem performance over time.
-
-Ext4 also supports a wide variety of features which are specialized for varying
-use cases. Implementing all of them can get difficult very quickly.
-
-Ext(x) checksums all its metadata structures to check for corruption, so
-modification of any metadata struct must correspond with re-checksumming the
-struct. Linux filesystem drivers also order on-disk updates intelligently to not
-corrupt the filesystem and also remain performant. The in-memory metadata
-structures must be kept in sync with what is on disk.
-
-There is also replication of some important structures across the filesystem.
-All replicas must be updated when their original copy is updated. There is also
-provisioning for snapshotting which must be kept in mind, although it should not
-affect this implementation unless we allow users to create filesystem snapshots.
-
-Ext4 also introduced journaling (jbd2). The journal must be updated
-appropriately.
-
-#### Performance
-
-To improve performance we should implement a buffer cache, and optionally, read
-ahead for small files. While doing so we must also keep in mind the memory usage
-and have a reasonable cap on how much file data we want to hold in memory.
-
-#### Features
-
-Our current implementation will work with most ext4 filesystems for readonly
-purposed. However, the following features are not supported yet:
-
-- Journal
-- Snapshotting
-- Extended Attributes
-- Hash Tree Directories
-- Meta Block Groups
-- Multiple Mount Protection
-- Bigalloc
diff --git a/pkg/sentry/fsimpl/ext/assets/README.md b/pkg/sentry/fsimpl/ext/assets/README.md
deleted file mode 100644
index 6f1e81b3a..000000000
--- a/pkg/sentry/fsimpl/ext/assets/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-### Tiny Ext(2/3/4) Images
-
-The images are of size 64Kb which supports 64 1k blocks and 16 inodes. This is
-the smallest size mkfs.ext(2/3/4) works with.
-
-These images were generated using the following commands.
-
-```bash
-fallocate -l 64K tiny.ext$VERSION
-mkfs.ext$VERSION -j tiny.ext$VERSION
-```
-
-where `VERSION` is `2`, `3` or `4`.
-
-You can mount it using:
-
-```bash
-sudo mount -o loop tiny.ext$VERSION $MOUNTPOINT
-```
-
-`file.txt`, `bigfile.txt` and `symlink.txt` were added to this image by just
-mounting it and copying (while preserving links) those files to the mountpoint
-directory using:
-
-```bash
-sudo cp -P {file.txt,symlink.txt,bigfile.txt} $MOUNTPOINT
-```
-
-The files in this directory mirror the contents and organisation of the files
-stored in the image.
-
-You can umount the filesystem using:
-
-```bash
-sudo umount $MOUNTPOINT
-```
diff --git a/pkg/sentry/fsimpl/ext/assets/bigfile.txt b/pkg/sentry/fsimpl/ext/assets/bigfile.txt
deleted file mode 100644
index 3857cf516..000000000
--- a/pkg/sentry/fsimpl/ext/assets/bigfile.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus faucibus eleifend orci, ut ornare nibh faucibus eu. Cras at condimentum massa. Nullam luctus, elit non porttitor congue, sapien diam feugiat sapien, sed eleifend nulla mauris non arcu. Sed lacinia mauris magna, eu mollis libero varius sit amet. Donec mollis, quam convallis commodo posuere, dolor nisi placerat nisi, in faucibus augue mi eu lorem. In pharetra consectetur faucibus. Ut euismod ex efficitur egestas tincidunt. Maecenas condimentum ut ante in rutrum. Vivamus sed arcu tempor, faucibus turpis et, lacinia diam.
-
-Sed in lacus vel nisl interdum bibendum in sed justo. Nunc tellus risus, molestie vitae arcu sed, molestie tempus ligula. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc risus neque, volutpat et ante non, ullamcorper condimentum ante. Aliquam sed metus in urna condimentum convallis. Vivamus ut libero mauris. Proin mollis posuere consequat. Vestibulum placerat mollis est et pulvinar.
-
-Donec rutrum odio ac diam pharetra, id fermentum magna cursus. Pellentesque in dapibus elit, et condimentum orci. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Suspendisse euismod dapibus est, id vestibulum mauris. Nulla facilisi. Nulla cursus gravida nisi. Phasellus vestibulum rutrum lectus, a dignissim mauris hendrerit vitae. In at elementum mauris. Integer vel efficitur velit. Nullam fringilla sapien mi, quis luctus neque efficitur ac. Aenean nec quam dapibus nunc commodo pharetra. Proin sapien mi, fermentum aliquet vulputate non, aliquet porttitor diam. Quisque lacinia, urna et finibus fermentum, nunc lacus vehicula ex, sed congue metus lectus ac quam. Aliquam erat volutpat. Suspendisse sodales, dolor ut tincidunt finibus, augue erat varius tellus, a interdum erat sem at nunc. Vestibulum cursus iaculis sapien, vitae feugiat dui auctor quis.
-
-Pellentesque nec maximus nulla, eu blandit diam. Maecenas quis arcu ornare, congue ante at, vehicula ipsum. Praesent feugiat mauris rutrum sem fermentum, nec luctus ipsum placerat. Pellentesque placerat ipsum at dignissim fringilla. Vivamus et posuere sem, eget hendrerit felis. Aenean vulputate, augue vel mollis feugiat, justo ipsum mollis dolor, eu mollis elit neque ut ipsum. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Fusce bibendum sem quam, vulputate laoreet mi dapibus imperdiet. Sed a purus non nibh pretium aliquet. Integer eget luctus augue, vitae tincidunt magna. Ut eros enim, egestas eu nulla et, lobortis egestas arcu. Cras id ipsum ac justo lacinia rutrum. Vivamus lectus leo, ultricies sed justo at, pellentesque feugiat magna. Ut sollicitudin neque elit, vel ornare mauris commodo id.
-
-Duis dapibus orci et sapien finibus finibus. Mauris eleifend, lacus at vestibulum maximus, quam ligula pharetra erat, sit amet dapibus neque elit vitae neque. In bibendum sollicitudin erat, eget ultricies tortor malesuada at. Sed sit amet orci turpis. Donec feugiat ligula nibh, molestie tincidunt lectus elementum id. Donec volutpat maximus nibh, in vulputate felis posuere eu. Cras tincidunt ullamcorper lacus. Phasellus porta lorem auctor, congue magna a, commodo elit.
-
-Etiam auctor mi quis elit sodales, eu pulvinar arcu condimentum. Aenean imperdiet risus et dapibus tincidunt. Nullam tincidunt dictum dui, sed commodo urna rutrum id. Ut mollis libero vel elit laoreet bibendum. Quisque arcu arcu, tincidunt at ultricies id, vulputate nec metus. In tristique posuere quam sit amet volutpat. Vivamus scelerisque et nunc at dapibus. Fusce finibus libero ut ligula pretium rhoncus. Mauris non elit in arcu finibus imperdiet. Pellentesque nec massa odio. Proin rutrum mauris non sagittis efficitur. Aliquam auctor quam at dignissim faucibus. Ut eget ligula in magna posuere ultricies vitae sit amet turpis. Duis maximus odio nulla. Donec gravida sem tristique tempus scelerisque.
-
-Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce pharetra magna vulputate aliquet tempus. Duis id hendrerit arcu. Quisque ut ex elit. Integer velit orci, venenatis ut sapien ac, placerat porttitor dui. Interdum et malesuada fames ac ante ipsum primis in faucibus. Nunc hendrerit cursus diam, hendrerit finibus ipsum scelerisque ut. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.
-
-Nulla non euismod neque. Phasellus vel sapien eu metus pulvinar rhoncus. Suspendisse eu mollis tellus, quis vestibulum tortor. Maecenas interdum dolor sed nulla fermentum maximus. Donec imperdiet ullamcorper condimentum. Nam quis nibh ante. Praesent quis tellus ut tortor pulvinar blandit sit amet ut sapien. Vestibulum est orci, pellentesque vitae tristique sit amet, tristique non felis.
-
-Vivamus sodales pellentesque varius. Sed vel tempus ligula. Nulla tristique nisl vel dui facilisis, ac sodales augue hendrerit. Proin augue nisi, vestibulum quis augue nec, sagittis tincidunt velit. Vestibulum euismod, nulla nec sodales faucibus, urna sapien vulputate magna, id varius metus sapien ut neque. Duis in mollis urna, in scelerisque enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc condimentum dictum turpis, et egestas neque dapibus eget. Quisque fringilla, dui eu venenatis eleifend, erat nibh lacinia urna, at lacinia lacus sapien eu dui. Duis eu erat ut mi lacinia convallis a sed ex.
-
-Fusce elit metus, tincidunt nec eleifend a, hendrerit nec ligula. Duis placerat finibus sollicitudin. In euismod porta tellus, in luctus justo bibendum bibendum. Maecenas at magna eleifend lectus tincidunt suscipit ut a ligula. Nulla tempor accumsan felis, fermentum dapibus est eleifend vitae. Mauris urna sem, fringilla at ultricies non, ultrices in arcu. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam vehicula nunc at laoreet imperdiet. Nunc tristique ut risus id aliquet. Integer eleifend massa orci.
-
-Vestibulum sed ante sollicitudin nisi fringilla bibendum nec vel quam. Sed pretium augue eu ligula congue pulvinar. Donec vitae magna tincidunt, pharetra lacus id, convallis nulla. Cras viverra nisl nisl, varius convallis leo vulputate nec. Morbi at consequat dui, sed aliquet metus. Sed suscipit fermentum mollis. Maecenas nec mi sodales, tincidunt purus in, tristique mauris. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec interdum mi in velit efficitur, quis ultrices ex imperdiet. Sed vestibulum, magna ut tristique pretium, mi ipsum placerat tellus, non tempor enim augue et ex. Pellentesque eget felis quis ante sodales viverra ac sed lacus. Donec suscipit tempus massa, eget laoreet massa molestie at.
-
-Aenean fringilla dui non aliquet consectetur. Fusce cursus quam nec orci hendrerit faucibus. Donec consequat suscipit enim, non volutpat lectus auctor interdum. Proin lorem purus, maximus vel orci vitae, suscipit egestas turpis. Donec risus urna, congue a sem eu, aliquet placerat odio. Morbi gravida tristique turpis, quis efficitur enim. Nunc interdum gravida ipsum vel facilisis. Nunc congue finibus sollicitudin. Quisque euismod aliquet lectus et tincidunt. Curabitur ultrices sem ut mi fringilla fermentum. Morbi pretium, nisi sit amet dapibus congue, dolor enim consectetur risus, a interdum ligula odio sed odio. Quisque facilisis, mi at suscipit gravida, nunc sapien cursus justo, ut luctus odio nulla quis leo. Integer condimentum lobortis mauris, non egestas tellus lobortis sit amet.
-
-In sollicitudin velit ac ante vehicula, vitae varius tortor mollis. In hac habitasse platea dictumst. Quisque et orci lorem. Integer malesuada fringilla luctus. Pellentesque malesuada, mi non lobortis porttitor, ante ligula vulputate ante, nec dictum risus eros sit amet sapien. Nulla aliquam lorem libero, ac varius nulla tristique eget. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Ut pellentesque mauris orci, vel consequat mi varius a. Ut sit amet elit vulputate, lacinia metus non, fermentum nisl. Pellentesque eu nisi sed quam egestas blandit. Duis sit amet lobortis dolor. Donec consectetur sem interdum, tristique elit sit amet, sodales lacus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Fusce id aliquam augue. Sed pretium congue risus vitae lacinia. Vestibulum non vulputate risus, ut malesuada justo.
-
-Sed odio elit, consectetur ac mauris quis, consequat commodo libero. Fusce sodales velit vulputate pulvinar fermentum. Donec iaculis nec nisl eget faucibus. Mauris at dictum velit. Donec fermentum lectus eu viverra volutpat. Aliquam consequat facilisis lorem, cursus consequat dui bibendum ullamcorper. Pellentesque nulla magna, imperdiet at magna et, cursus egestas enim. Nullam semper molestie lectus sit amet semper. Duis eget tincidunt est. Integer id neque risus. Integer ultricies hendrerit vestibulum. Donec blandit blandit sagittis. Nunc consectetur vitae nisi consectetur volutpat.
-
-Nulla id lorem fermentum, efficitur magna a, hendrerit dui. Vivamus sagittis orci gravida, bibendum quam eget, molestie est. Phasellus nec enim tincidunt, volutpat sapien non, laoreet diam. Nulla posuere enim nec porttitor lobortis. Donec auctor odio ut orci eleifend, ut eleifend purus convallis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut hendrerit, purus eget viverra tincidunt, sem magna imperdiet libero, et aliquam turpis neque vitae elit. Maecenas semper varius iaculis. Cras non lorem quis quam bibendum eleifend in et libero. Curabitur at purus mauris. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus porta diam sed elit eleifend gravida.
-
-Nulla facilisi. Ut ultricies diam vel diam consectetur, vel porta augue molestie. Fusce interdum sapien et metus facilisis pellentesque. Nulla convallis sem at nunc vehicula facilisis. Nam ac rutrum purus. Nunc bibendum, dolor sit amet tempus ullamcorper, lorem leo tempor sem, id fringilla nunc augue scelerisque augue. Nullam sit amet rutrum nisl. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec sed mauris gravida eros vehicula sagittis at eget orci. Cras elementum, eros at accumsan bibendum, libero neque blandit purus, vitae vestibulum libero massa ac nibh. Integer at placerat nulla. Mauris eu eleifend orci. Aliquam consequat ligula vitae erat porta lobortis. Duis fermentum elit ac aliquet ornare.
-
-Mauris eget cursus tellus, eget sodales purus. Aliquam malesuada, augue id vulputate finibus, nisi ex bibendum nisl, sit amet laoreet quam urna a dolor. Nullam ultricies, sapien eu laoreet consequat, erat eros dignissim diam, ultrices sodales lectus mauris et leo. Morbi lacinia eu ante at tempus. Sed iaculis finibus magna malesuada efficitur. Donec faucibus erat sit amet elementum feugiat. Praesent a placerat nisi. Etiam lacinia gravida diam, et sollicitudin sapien tincidunt ut.
-
-Maecenas felis quam, tincidunt vitae venenatis scelerisque, viverra vitae odio. Phasellus enim neque, ultricies suscipit malesuada sit amet, vehicula sit amet purus. Nulla placerat sit amet dui vel tincidunt. Nam quis neque vel magna commodo egestas. Vestibulum sagittis rutrum lorem ut congue. Maecenas vel ultrices tellus. Donec efficitur, urna ac consequat iaculis, lorem felis pharetra eros, eget faucibus orci lectus sit amet arcu.
-
-Ut a tempus nisi. Nulla facilisi. Praesent vulputate maximus mi et dapibus. Sed sit amet libero ac augue hendrerit efficitur in a sapien. Mauris placerat velit sit amet tellus sollicitudin faucibus. Donec egestas a magna ac suscipit. Duis enim sapien, mollis sed egestas et, vestibulum vel leo.
-
-Proin quis dapibus dui. Donec eu tincidunt nunc. Vivamus eget purus consectetur, maximus ante vitae, tincidunt elit. Aenean mattis dolor a gravida aliquam. Praesent quis tellus id sem maximus vulputate nec sed nulla. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur metus nulla, volutpat volutpat est eu, hendrerit congue erat. Aliquam sollicitudin augue ante. Sed sollicitudin, magna eu consequat elementum, mi augue ullamcorper felis, molestie imperdiet erat metus iaculis est. Proin ac tortor nisi. Pellentesque quis nisi risus. Integer enim sapien, tincidunt quis tortor id, accumsan venenatis mi. Nulla facilisi.
-
-Cras pretium sit amet quam congue maximus. Morbi lacus libero, imperdiet commodo massa sed, scelerisque placerat libero. Cras nisl nisi, consectetur sed bibendum eu, venenatis at enim. Proin sodales justo at quam aliquam, a consectetur mi ornare. Donec porta ac est sit amet efficitur. Suspendisse vestibulum tortor id neque imperdiet, id lacinia risus vehicula. Phasellus ac eleifend purus. Mauris vel gravida ante. Aliquam vitae lobortis risus. Sed vehicula consectetur tincidunt. Nam et justo vitae purus molestie consequat. Pellentesque ipsum ex, convallis quis blandit non, gravida et urna. Donec diam ligula amet.
diff --git a/pkg/sentry/fsimpl/ext/assets/file.txt b/pkg/sentry/fsimpl/ext/assets/file.txt
deleted file mode 100644
index 980a0d5f1..000000000
--- a/pkg/sentry/fsimpl/ext/assets/file.txt
+++ /dev/null
@@ -1 +0,0 @@
-Hello World!
diff --git a/pkg/sentry/fsimpl/ext/assets/symlink.txt b/pkg/sentry/fsimpl/ext/assets/symlink.txt
deleted file mode 120000
index 4c330738c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/symlink.txt
+++ /dev/null
@@ -1 +0,0 @@
-file.txt \ No newline at end of file
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext2 b/pkg/sentry/fsimpl/ext/assets/tiny.ext2
deleted file mode 100644
index 381ade9bf..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext2
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext3 b/pkg/sentry/fsimpl/ext/assets/tiny.ext3
deleted file mode 100644
index 0e97a324c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext3
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext4 b/pkg/sentry/fsimpl/ext/assets/tiny.ext4
deleted file mode 100644
index a6859736d..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext4
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/benchmark/BUILD b/pkg/sentry/fsimpl/ext/benchmark/BUILD
deleted file mode 100644
index 6c5a559fd..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "benchmark_test",
- size = "small",
- srcs = ["benchmark_test.go"],
- deps = [
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go b/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
deleted file mode 100644
index 2ee7cc7ac..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// These benchmarks emulate memfs benchmarks. Ext4 images must be created
-// before this benchmark is run using the `make_deep_ext4.sh` script at
-// /tmp/image-{depth}.ext4 for all the depths tested below.
-//
-// The benchmark itself cannot run the script because the script requires
-// sudo privileges to create the file system images.
-package benchmark_test
-
-import (
- "fmt"
- "os"
- "runtime"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-var depths = []int{1, 2, 3, 8, 64, 100}
-
-const filename = "file.txt"
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(b *testing.B, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- f, err := os.Open(imagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, nil, nil, nil, err
- }
- vfsObj.MustRegisterFilesystemType("extfs", ext.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, imagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// mount mounts extfs at the path operation passed. Returns a tear down
-// function which must be called after the test is run for clean up.
-func mount(b *testing.B, imagePath string, vfsfs *vfs.VirtualFilesystem, pop *vfs.PathOperation) func() {
- b.Helper()
-
- f, err := os.Open(imagePath)
- if err != nil {
- b.Fatalf("could not open image at %s: %v", imagePath, err)
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- if _, err := vfsfs.MountAt(ctx, creds, imagePath, pop, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- }); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- return func() {
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
-}
-
-// BenchmarkVFS2Ext4fsStat emulates BenchmarkVFS2MemfsStat.
-func BenchmarkVFS2Ext4fsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", depth))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
-
-// BenchmarkVFS2ExtfsMountStat emulates BenchmarkVFS2MemfsMountStat.
-func BenchmarkVFS2ExtfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- // Create root extfs with depth 1 so we can mount extfs again at /1/.
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", 1))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- mountPointName := "/1/"
- pop := vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(mountPointName),
- }
-
- // Save the mount point for later use.
- mountPoint, err := vfsfs.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
-
- // Create extfs submount.
- mountTearDown := mount(b, fmt.Sprintf("/tmp/image-%d.ext4", depth), vfsfs, &pop)
- defer mountTearDown()
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteString(mountPointName)
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check. touch(1) always creates files of size 0 (empty).
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh b/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
deleted file mode 100755
index d0910da1f..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script creates an ext4 image with $1 depth of directories and a file in
-# the inner most directory. The created file is at path /1/2/.../depth/file.txt.
-# The ext4 image is written to $2. The image is temporarily mounted at
-# /tmp/mountpoint. This script must be run with sudo privileges.
-
-# Usage:
-# sudo bash make_deep_ext4.sh {depth} {output path}
-
-# Check positional arguments.
-if [ "$#" -ne 2 ]; then
- echo "Usage: sudo bash make_deep_ext4.sh {depth} {output path}"
- exit 1
-fi
-
-# Make sure depth is a non-negative number.
-if ! [[ "$1" =~ ^[0-9]+$ ]]; then
- echo "Depth must be a non-negative number."
- exit 1
-fi
-
-# Create a 1 MB filesystem image at the requested output path.
-rm -f $2
-fallocate -l 1M $2
-if [ $? -ne 0 ]; then
- echo "fallocate failed"
- exit $?
-fi
-
-# Convert that blank into an ext4 image.
-mkfs.ext4 -j $2
-if [ $? -ne 0 ]; then
- echo "mkfs.ext4 failed"
- exit $?
-fi
-
-# Mount the image.
-MOUNTPOINT=/tmp/mountpoint
-mkdir -p $MOUNTPOINT
-mount -o loop $2 $MOUNTPOINT
-if [ $? -ne 0 ]; then
- echo "mount failed"
- exit $?
-fi
-
-# Create nested directories and the file.
-if [ "$1" -eq 0 ]; then
- FILEPATH=$MOUNTPOINT/file.txt
-else
- FILEPATH=$MOUNTPOINT/$(seq -s '/' 1 $1)/file.txt
-fi
-mkdir -p $(dirname $FILEPATH) || exit
-touch $FILEPATH
-
-# Clean up.
-umount $MOUNTPOINT
-rm -rf $MOUNTPOINT
diff --git a/pkg/sentry/fsimpl/ext/block_map_file.go b/pkg/sentry/fsimpl/ext/block_map_file.go
deleted file mode 100644
index 79719faed..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_file.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "math"
-
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-const (
- // numDirectBlks is the number of direct blocks in ext block map inodes.
- numDirectBlks = 12
-)
-
-// blockMapFile is a type of regular file which uses direct/indirect block
-// addressing to store file data. This was deprecated in ext4.
-type blockMapFile struct {
- regFile regularFile
-
- // directBlks are the direct blocks numbers. The physical blocks pointed by
- // these holds file data. Contains file blocks 0 to 11.
- directBlks [numDirectBlks]primitive.Uint32
-
- // indirectBlk is the physical block which contains (blkSize/4) direct block
- // numbers (as uint32 integers).
- indirectBlk primitive.Uint32
-
- // doubleIndirectBlk is the physical block which contains (blkSize/4) indirect
- // block numbers (as uint32 integers).
- doubleIndirectBlk primitive.Uint32
-
- // tripleIndirectBlk is the physical block which contains (blkSize/4) doubly
- // indirect block numbers (as uint32 integers).
- tripleIndirectBlk primitive.Uint32
-
- // coverage at (i)th index indicates the amount of file data a node at
- // height (i) covers. Height 0 is the direct block.
- coverage [4]uint64
-}
-
-// Compiles only if blockMapFile implements io.ReaderAt.
-var _ io.ReaderAt = (*blockMapFile)(nil)
-
-// newBlockMapFile is the blockMapFile constructor. It initializes the file to
-// physical blocks map with (at most) the first 12 (direct) blocks.
-func newBlockMapFile(args inodeArgs) (*blockMapFile, error) {
- file := &blockMapFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
-
- for i := uint(0); i < 4; i++ {
- file.coverage[i] = getCoverage(file.regFile.inode.blkSize, i)
- }
-
- blkMap := file.regFile.inode.diskInode.Data()
- for i := 0; i < numDirectBlks; i++ {
- file.directBlks[i].UnmarshalBytes(blkMap[i*4 : (i+1)*4])
- }
- file.indirectBlk.UnmarshalBytes(blkMap[numDirectBlks*4 : (numDirectBlks+1)*4])
- file.doubleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+1)*4 : (numDirectBlks+2)*4])
- file.tripleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+2)*4 : (numDirectBlks+3)*4])
- return file, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *blockMapFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, linuxerr.EINVAL
- }
-
- offset := uint64(off)
- size := f.regFile.inode.diskInode.Size()
- if offset >= size {
- return 0, io.EOF
- }
-
- // dirBlksEnd is the file offset until which direct blocks cover file data.
- // Direct blocks cover 0 <= file offset < dirBlksEnd.
- dirBlksEnd := numDirectBlks * f.coverage[0]
-
- // indirBlkEnd is the file offset until which the indirect block covers file
- // data. The indirect block covers dirBlksEnd <= file offset < indirBlkEnd.
- indirBlkEnd := dirBlksEnd + f.coverage[1]
-
- // doubIndirBlkEnd is the file offset until which the double indirect block
- // covers file data. The double indirect block covers the range
- // indirBlkEnd <= file offset < doubIndirBlkEnd.
- doubIndirBlkEnd := indirBlkEnd + f.coverage[2]
-
- read := 0
- toRead := len(dst)
- if uint64(toRead)+offset > size {
- toRead = int(size - offset)
- }
- for read < toRead {
- var err error
- var curR int
-
- // Figure out which block to delegate the read to.
- switch {
- case offset < dirBlksEnd:
- // Direct block.
- curR, err = f.read(uint32(f.directBlks[offset/f.regFile.inode.blkSize]), offset%f.regFile.inode.blkSize, 0, dst[read:])
- case offset < indirBlkEnd:
- // Indirect block.
- curR, err = f.read(uint32(f.indirectBlk), offset-dirBlksEnd, 1, dst[read:])
- case offset < doubIndirBlkEnd:
- // Doubly indirect block.
- curR, err = f.read(uint32(f.doubleIndirectBlk), offset-indirBlkEnd, 2, dst[read:])
- default:
- // Triply indirect block.
- curR, err = f.read(uint32(f.tripleIndirectBlk), offset-doubIndirBlkEnd, 3, dst[read:])
- }
-
- read += curR
- offset += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- if read < len(dst) {
- return read, io.EOF
- }
- return read, nil
-}
-
-// read is the recursive step of the ReadAt function. It relies on knowing the
-// current node's location on disk (curPhyBlk) and its height in the block map
-// tree. A height of 0 shows that the current node is actually holding file
-// data. relFileOff tells the offset from which we need to start to reading
-// under the current node. It is completely relative to the current node.
-func (f *blockMapFile) read(curPhyBlk uint32, relFileOff uint64, height uint, dst []byte) (int, error) {
- curPhyBlkOff := int64(curPhyBlk) * int64(f.regFile.inode.blkSize)
- if height == 0 {
- toRead := int(f.regFile.inode.blkSize - relFileOff)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], curPhyBlkOff+int64(relFileOff))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
- }
-
- childCov := f.coverage[height-1]
- startIdx := relFileOff / childCov
- endIdx := f.regFile.inode.blkSize / 4 // This is exclusive.
- wantEndIdx := (relFileOff + uint64(len(dst))) / childCov
- wantEndIdx++ // Make this exclusive.
- if wantEndIdx < endIdx {
- endIdx = wantEndIdx
- }
-
- read := 0
- curChildOff := relFileOff % childCov
- for i := startIdx; i < endIdx; i++ {
- var childPhyBlk primitive.Uint32
- err := readFromDisk(f.regFile.inode.fs.dev, curPhyBlkOff+int64(i*4), &childPhyBlk)
- if err != nil {
- return read, err
- }
-
- n, err := f.read(uint32(childPhyBlk), curChildOff, height-1, dst[read:])
- read += n
- if err != nil {
- return read, err
- }
-
- curChildOff = 0
- }
-
- return read, nil
-}
-
-// getCoverage returns the number of bytes a node at the given height covers.
-// Height 0 is the file data block itself. Height 1 is the indirect block.
-//
-// Formula: blkSize * ((blkSize / 4)^height)
-func getCoverage(blkSize uint64, height uint) uint64 {
- return blkSize * uint64(math.Pow(float64(blkSize/4), float64(height)))
-}
diff --git a/pkg/sentry/fsimpl/ext/block_map_test.go b/pkg/sentry/fsimpl/ext/block_map_test.go
deleted file mode 100644
index ed98b482e..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-// These consts are for mocking the block map tree.
-const (
- mockBMBlkSize = uint32(16)
- mockBMDiskSize = 2500
-)
-
-// TestBlockMapReader stress tests block map reader functionality. It performs
-// random length reads from all possible positions in the block map structure.
-func TestBlockMapReader(t *testing.T) {
- mockBMFile, want := blockMapSetUp(t)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockBMFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// blkNumGen is a number generator which gives block numbers for building the
-// block map file on disk. It gives unique numbers in a random order which
-// facilitates in creating an extremely fragmented filesystem.
-type blkNumGen struct {
- nums []uint32
-}
-
-// newBlkNumGen is the blkNumGen constructor.
-func newBlkNumGen() *blkNumGen {
- blkNums := &blkNumGen{}
- lim := mockBMDiskSize / mockBMBlkSize
- blkNums.nums = make([]uint32, lim)
- for i := uint32(0); i < lim; i++ {
- blkNums.nums[i] = i
- }
-
- rand.Shuffle(int(lim), func(i, j int) {
- blkNums.nums[i], blkNums.nums[j] = blkNums.nums[j], blkNums.nums[i]
- })
- return blkNums
-}
-
-// next returns the next random block number.
-func (n *blkNumGen) next() uint32 {
- ret := n.nums[0]
- n.nums = n.nums[1:]
- return ret
-}
-
-// blockMapSetUp creates a mock disk and a block map file. It initializes the
-// block map file with 12 direct block, 1 indirect block, 1 double indirect
-// block and 1 triple indirect block (basically fill it till the rim). It
-// initializes the disk to reflect the inode. Also returns the file data that
-// the inode covers and that is written to disk.
-func blockMapSetUp(t *testing.T) (*blockMapFile, []byte) {
- mockDisk := make([]byte, mockBMDiskSize)
- var fileData []byte
- blkNums := newBlkNumGen()
- off := 0
- data := make([]byte, (numDirectBlks+3)*(*primitive.Uint32)(nil).SizeBytes())
-
- // Write the direct blocks.
- for i := 0; i < numDirectBlks; i++ {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(data[off:])
- off += curBlkNum.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(curBlkNum), 0, blkNums)...)
- }
-
- // Write to indirect block.
- indirectBlk := primitive.Uint32(blkNums.next())
- indirectBlk.MarshalBytes(data[off:])
- off += indirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(indirectBlk), 1, blkNums)...)
-
- // Write to double indirect block.
- doublyIndirectBlk := primitive.Uint32(blkNums.next())
- doublyIndirectBlk.MarshalBytes(data[off:])
- off += doublyIndirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(doublyIndirectBlk), 2, blkNums)...)
-
- // Write to triple indirect block.
- triplyIndirectBlk := primitive.Uint32(blkNums.next())
- triplyIndirectBlk.MarshalBytes(data[off:])
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(triplyIndirectBlk), 3, blkNums)...)
-
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: getMockBMFileFize(),
- },
- },
- blkSize: uint64(mockBMBlkSize),
- }
- copy(args.diskInode.Data(), data)
-
- mockFile, err := newBlockMapFile(args)
- if err != nil {
- t.Fatalf("newBlockMapFile failed: %v", err)
- }
- return mockFile, fileData
-}
-
-// writeFileDataToBlock writes random bytes to the block on disk.
-func writeFileDataToBlock(disk []byte, blkNum uint32, height uint, blkNums *blkNumGen) []byte {
- if height == 0 {
- start := blkNum * mockBMBlkSize
- end := start + mockBMBlkSize
- rand.Read(disk[start:end])
- return disk[start:end]
- }
-
- var fileData []byte
- for off := blkNum * mockBMBlkSize; off < (blkNum+1)*mockBMBlkSize; off += 4 {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(disk[off : off+4])
- fileData = append(fileData, writeFileDataToBlock(disk, uint32(curBlkNum), height-1, blkNums)...)
- }
- return fileData
-}
-
-// getMockBMFileFize gets the size of the mock block map file which is used for
-// testing.
-func getMockBMFileFize() uint32 {
- return uint32(numDirectBlks*getCoverage(uint64(mockBMBlkSize), 0) + getCoverage(uint64(mockBMBlkSize), 1) + getCoverage(uint64(mockBMBlkSize), 2) + getCoverage(uint64(mockBMBlkSize), 3))
-}
diff --git a/pkg/sentry/fsimpl/ext/dentry.go b/pkg/sentry/fsimpl/ext/dentry.go
deleted file mode 100644
index 9bfed883a..000000000
--- a/pkg/sentry/fsimpl/ext/dentry.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// dentry implements vfs.DentryImpl.
-//
-// +stateify savable
-type dentry struct {
- vfsd vfs.Dentry
-
- // Protected by filesystem.mu.
- parent *dentry
- name string
-
- // inode is the inode represented by this dentry. Multiple Dentries may
- // share a single non-directory Inode (with hard links). inode is
- // immutable.
- inode *inode
-}
-
-// Compiles only if dentry implements vfs.DentryImpl.
-var _ vfs.DentryImpl = (*dentry)(nil)
-
-// newDentry is the dentry constructor.
-func newDentry(in *inode) *dentry {
- d := &dentry{
- inode: in,
- }
- d.vfsd.Init(d)
- return d
-}
-
-// IncRef implements vfs.DentryImpl.IncRef.
-func (d *dentry) IncRef() {
- d.inode.incRef()
-}
-
-// TryIncRef implements vfs.DentryImpl.TryIncRef.
-func (d *dentry) TryIncRef() bool {
- return d.inode.tryIncRef()
-}
-
-// DecRef implements vfs.DentryImpl.DecRef.
-func (d *dentry) DecRef(ctx context.Context) {
- // FIXME(b/134676337): filesystem.mu may not be locked as required by
- // inode.decRef().
- d.inode.decRef()
-}
-
-// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {}
-
-// Watches implements vfs.DentryImpl.Watches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) Watches() *vfs.Watches {
- return nil
-}
-
-// OnZeroWatches implements vfs.Dentry.OnZeroWatches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) OnZeroWatches(context.Context) {}
diff --git a/pkg/sentry/fsimpl/ext/directory.go b/pkg/sentry/fsimpl/ext/directory.go
deleted file mode 100644
index cc067c20e..000000000
--- a/pkg/sentry/fsimpl/ext/directory.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// directory represents a directory inode. It holds the childList in memory.
-//
-// +stateify savable
-type directory struct {
- inode inode
-
- // childCache maps filenames to dentries for children for which dentries
- // have been instantiated. childCache is protected by filesystem.mu.
- childCache map[string]*dentry
-
- // mu serializes the changes to childList.
- // Lock Order (outermost locks must be taken first):
- // directory.mu
- // filesystem.mu
- mu sync.Mutex `state:"nosave"`
-
- // childList is a list containing (1) child dirents and (2) fake dirents
- // (with diskDirent == nil) that represent the iteration position of
- // directoryFDs. childList is used to support directoryFD.IterDirents()
- // efficiently. childList is protected by mu.
- childList direntList
-
- // childMap maps the child's filename to the dirent structure stored in
- // childList. This adds some data replication but helps in faster path
- // traversal. For consistency, key == childMap[key].diskDirent.FileName().
- // Immutable.
- childMap map[string]*dirent
-}
-
-// newDirectory is the directory constructor.
-func newDirectory(args inodeArgs, newDirent bool) (*directory, error) {
- file := &directory{
- childCache: make(map[string]*dentry),
- childMap: make(map[string]*dirent),
- }
- file.inode.init(args, file)
-
- // Initialize childList by reading dirents from the underlying file.
- if args.diskInode.Flags().Index {
- // TODO(b/134676337): Support hash tree directories. Currently only the '.'
- // and '..' entries are read in.
-
- // Users cannot navigate this hash tree directory yet.
- log.Warningf("hash tree directory being used which is unsupported")
- return file, nil
- }
-
- // The dirents are organized in a linear array in the file data.
- // Extract the file data and decode the dirents.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- // buf is used as scratch space for reading in dirents from disk and
- // unmarshalling them into dirent structs.
- buf := make([]byte, disklayout.DirentSize)
- size := args.diskInode.Size()
- for off, inc := uint64(0), uint64(0); off < size; off += inc {
- toRead := size - off
- if toRead > disklayout.DirentSize {
- toRead = disklayout.DirentSize
- }
- if n, err := regFile.impl.ReadAt(buf[:toRead], int64(off)); uint64(n) < toRead {
- return nil, err
- }
-
- var curDirent dirent
- if newDirent {
- curDirent.diskDirent = &disklayout.DirentNew{}
- } else {
- curDirent.diskDirent = &disklayout.DirentOld{}
- }
- curDirent.diskDirent.UnmarshalBytes(buf)
-
- if curDirent.diskDirent.Inode() != 0 && len(curDirent.diskDirent.FileName()) != 0 {
- // Inode number and name length fields being set to 0 is used to indicate
- // an unused dirent.
- file.childList.PushBack(&curDirent)
- file.childMap[curDirent.diskDirent.FileName()] = &curDirent
- }
-
- // The next dirent is placed exactly after this dirent record on disk.
- inc = uint64(curDirent.diskDirent.RecordSize())
- }
-
- return file, nil
-}
-
-func (i *inode) isDir() bool {
- _, ok := i.impl.(*directory)
- return ok
-}
-
-// dirent is the directory.childList node.
-//
-// +stateify savable
-type dirent struct {
- diskDirent disklayout.Dirent
-
- // direntEntry links dirents into their parent directory.childList.
- direntEntry
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type directoryFD struct {
- fileDescription
- vfs.DirectoryFileDescriptionDefaultImpl
-
- // Protected by directory.mu.
- iter *dirent
- off int64
-}
-
-// Compiles only if directoryFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*directoryFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *directoryFD) Release(ctx context.Context) {
- if fd.iter == nil {
- return
- }
-
- dir := fd.inode().impl.(*directory)
- dir.mu.Lock()
- dir.childList.Remove(fd.iter)
- dir.mu.Unlock()
- fd.iter = nil
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- extfs := fd.filesystem()
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Ensure that fd.iter exists and is not linked into dir.childList.
- var child *dirent
- if fd.iter == nil {
- // Start iteration at the beginning of dir.
- child = dir.childList.Front()
- fd.iter = &dirent{}
- } else {
- // Continue iteration from where we left off.
- child = fd.iter.Next()
- dir.childList.Remove(fd.iter)
- }
- for ; child != nil; child = child.Next() {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- childType, ok := child.diskDirent.FileType()
- if !ok {
- // We will need to read the inode off disk. Do not increment
- // ref count here because this inode is not being added to the
- // dentry tree.
- extfs.mu.Lock()
- childInode, err := extfs.getOrCreateInodeLocked(child.diskDirent.Inode())
- extfs.mu.Unlock()
- if err != nil {
- // Usage of the file description after the error is
- // undefined. This implementation would continue reading
- // from the next dirent.
- fd.off++
- dir.childList.InsertAfter(child, fd.iter)
- return err
- }
- childType = fs.ToInodeType(childInode.diskInode.Mode().FileType())
- }
-
- if err := cb.Handle(vfs.Dirent{
- Name: child.diskDirent.FileName(),
- Type: fs.ToDirentType(childType),
- Ino: uint64(child.diskDirent.Inode()),
- NextOff: fd.off + 1,
- }); err != nil {
- dir.childList.InsertBefore(child, fd.iter)
- return err
- }
- fd.off++
- }
- }
- dir.childList.PushBack(fd.iter)
- return nil
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- if whence != linux.SEEK_SET && whence != linux.SEEK_CUR {
- return 0, linuxerr.EINVAL
- }
-
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Find resulting offset.
- if whence == linux.SEEK_CUR {
- offset += fd.off
- }
-
- if offset < 0 {
- // lseek(2) specifies that EINVAL should be returned if the resulting offset
- // is negative.
- return 0, linuxerr.EINVAL
- }
-
- n := int64(len(dir.childMap))
- realWantOff := offset
- if realWantOff > n {
- realWantOff = n
- }
- realCurOff := fd.off
- if realCurOff > n {
- realCurOff = n
- }
-
- // Ensure that fd.iter exists and is linked into dir.childList so we can
- // intelligently seek from the optimal position.
- if fd.iter == nil {
- fd.iter = &dirent{}
- dir.childList.PushFront(fd.iter)
- }
-
- // Guess that iterating from the current position is optimal.
- child := fd.iter
- diff := realWantOff - realCurOff // Shows direction and magnitude of travel.
-
- // See if starting from the beginning or end is better.
- abDiff := diff
- if diff < 0 {
- abDiff = -diff
- }
- if abDiff > realWantOff {
- // Starting from the beginning is best.
- child = dir.childList.Front()
- diff = realWantOff
- } else if abDiff > (n - realWantOff) {
- // Starting from the end is best.
- child = dir.childList.Back()
- // (n - 1) because the last non-nil dirent represents the (n-1)th offset.
- diff = realWantOff - (n - 1)
- }
-
- for child != nil {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- if diff == 0 {
- if child != fd.iter {
- dir.childList.Remove(fd.iter)
- dir.childList.InsertBefore(child, fd.iter)
- }
-
- fd.off = offset
- return offset, nil
- }
-
- if diff < 0 {
- diff++
- child = child.Prev()
- } else {
- diff--
- child = child.Next()
- }
- continue
- }
-
- if diff < 0 {
- child = child.Prev()
- } else {
- child = child.Next()
- }
- }
-
- // Reaching here indicates that the offset is beyond the end of the childList.
- dir.childList.Remove(fd.iter)
- dir.childList.PushBack(fd.iter)
- fd.off = offset
- return offset, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/BUILD b/pkg/sentry/fsimpl/ext/disklayout/BUILD
deleted file mode 100644
index d98a05dd8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/BUILD
+++ /dev/null
@@ -1,48 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "disklayout",
- srcs = [
- "block_group.go",
- "block_group_32.go",
- "block_group_64.go",
- "dirent.go",
- "dirent_new.go",
- "dirent_old.go",
- "disklayout.go",
- "extent.go",
- "inode.go",
- "inode_new.go",
- "inode_old.go",
- "superblock.go",
- "superblock_32.go",
- "superblock_64.go",
- "superblock_old.go",
- "test_utils.go",
- ],
- marshal = True,
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/marshal",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- ],
-)
-
-go_test(
- name = "disklayout_test",
- size = "small",
- srcs = [
- "block_group_test.go",
- "dirent_test.go",
- "extent_test.go",
- "inode_test.go",
- "superblock_test.go",
- ],
- library = ":disklayout",
- deps = ["//pkg/sentry/kernel/time"],
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group.go b/pkg/sentry/fsimpl/ext/disklayout/block_group.go
deleted file mode 100644
index 0d56ae9da..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// BlockGroup represents a Linux ext block group descriptor. An ext file system
-// is split into a series of block groups. This provides an access layer to
-// information needed to access and use a block group.
-//
-// Location:
-// - The block group descriptor table is always placed in the blocks
-// immediately after the block containing the superblock.
-// - The 1st block group descriptor in the original table is in the
-// (sb.FirstDataBlock() + 1)th block.
-// - See SuperBlock docs to see where the block group descriptor table is
-// replicated.
-// - sb.BgDescSize() must be used as the block group descriptor entry size
-// while reading the table from disk.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#block-group-descriptors.
-type BlockGroup interface {
- marshal.Marshallable
-
- // InodeTable returns the absolute block number of the block containing the
- // inode table. This points to an array of Inode structs. Inode tables are
- // statically allocated at mkfs time. The superblock records the number of
- // inodes per group (length of this table) and the size of each inode struct.
- InodeTable() uint64
-
- // BlockBitmap returns the absolute block number of the block containing the
- // block bitmap. This bitmap tracks the usage of data blocks within this block
- // group and has its own checksum.
- BlockBitmap() uint64
-
- // InodeBitmap returns the absolute block number of the block containing the
- // inode bitmap. This bitmap tracks the usage of this group's inode table
- // entries and has its own checksum.
- InodeBitmap() uint64
-
- // ExclusionBitmap returns the absolute block number of the snapshot exclusion
- // bitmap.
- ExclusionBitmap() uint64
-
- // FreeBlocksCount returns the number of free blocks in the group.
- FreeBlocksCount() uint32
-
- // FreeInodesCount returns the number of free inodes in the group.
- FreeInodesCount() uint32
-
- // DirectoryCount returns the number of inodes that represent directories
- // under this block group.
- DirectoryCount() uint32
-
- // UnusedInodeCount returns the number of unused inodes beyond the last used
- // inode in this group's inode table. As a result, we needn’t scan past the
- // (InodesPerGroup - UnusedInodeCount())th entry in the inode table.
- UnusedInodeCount() uint32
-
- // BlockBitmapChecksum returns the block bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- BlockBitmapChecksum() uint32
-
- // InodeBitmapChecksum returns the inode bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- InodeBitmapChecksum() uint32
-
- // Checksum returns this block group's checksum.
- //
- // If SbMetadataCsum feature is set:
- // - checksum is crc32c(FS UUID + group number + group descriptor
- // structure) & 0xFFFF.
- //
- // If SbGdtCsum feature is set:
- // - checksum is crc16(FS UUID + group number + group descriptor
- // structure).
- //
- // SbMetadataCsum and SbGdtCsum should not be both set.
- // If they are, Linux warns and asks to run fsck.
- Checksum() uint16
-
- // Flags returns BGFlags which represents the block group flags.
- Flags() BGFlags
-}
-
-// These are the different block group flags.
-const (
- // BgInodeUninit indicates that inode table and bitmap are not initialized.
- BgInodeUninit uint16 = 0x1
-
- // BgBlockUninit indicates that block bitmap is not initialized.
- BgBlockUninit uint16 = 0x2
-
- // BgInodeZeroed indicates that inode table is zeroed.
- BgInodeZeroed uint16 = 0x4
-)
-
-// BGFlags represents all the different combinations of block group flags.
-type BGFlags struct {
- InodeUninit bool
- BlockUninit bool
- InodeZeroed bool
-}
-
-// ToInt converts a BGFlags struct back to its 16-bit representation.
-func (f BGFlags) ToInt() uint16 {
- var res uint16
-
- if f.InodeUninit {
- res |= BgInodeUninit
- }
- if f.BlockUninit {
- res |= BgBlockUninit
- }
- if f.InodeZeroed {
- res |= BgInodeZeroed
- }
-
- return res
-}
-
-// BGFlagsFromInt converts the 16-bit flag representation to a BGFlags struct.
-func BGFlagsFromInt(flags uint16) BGFlags {
- return BGFlags{
- InodeUninit: flags&BgInodeUninit > 0,
- BlockUninit: flags&BgBlockUninit > 0,
- InodeZeroed: flags&BgInodeZeroed > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
deleted file mode 100644
index a35fa22a0..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup32Bit emulates the first half of struct ext4_group_desc in
-// fs/ext4/ext4.h. It is the block group descriptor struct for ext2, ext3 and
-// 32-bit ext4 filesystems. It implements BlockGroup interface.
-//
-// +marshal
-type BlockGroup32Bit struct {
- BlockBitmapLo uint32
- InodeBitmapLo uint32
- InodeTableLo uint32
- FreeBlocksCountLo uint16
- FreeInodesCountLo uint16
- UsedDirsCountLo uint16
- FlagsRaw uint16
- ExcludeBitmapLo uint32
- BlockBitmapChecksumLo uint16
- InodeBitmapChecksumLo uint16
- ItableUnusedLo uint16
- ChecksumRaw uint16
-}
-
-// Compiles only if BlockGroup32Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup32Bit)(nil)
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup32Bit) InodeTable() uint64 { return uint64(bg.InodeTableLo) }
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup32Bit) BlockBitmap() uint64 { return uint64(bg.BlockBitmapLo) }
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup32Bit) InodeBitmap() uint64 { return uint64(bg.InodeBitmapLo) }
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup32Bit) ExclusionBitmap() uint64 { return uint64(bg.ExcludeBitmapLo) }
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup32Bit) FreeBlocksCount() uint32 { return uint32(bg.FreeBlocksCountLo) }
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup32Bit) FreeInodesCount() uint32 { return uint32(bg.FreeInodesCountLo) }
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup32Bit) DirectoryCount() uint32 { return uint32(bg.UsedDirsCountLo) }
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup32Bit) UnusedInodeCount() uint32 { return uint32(bg.ItableUnusedLo) }
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup32Bit) BlockBitmapChecksum() uint32 { return uint32(bg.BlockBitmapChecksumLo) }
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup32Bit) InodeBitmapChecksum() uint32 { return uint32(bg.InodeBitmapChecksumLo) }
-
-// Checksum implements BlockGroup.Checksum.
-func (bg *BlockGroup32Bit) Checksum() uint16 { return bg.ChecksumRaw }
-
-// Flags implements BlockGroup.Flags.
-func (bg *BlockGroup32Bit) Flags() BGFlags { return BGFlagsFromInt(bg.FlagsRaw) }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
deleted file mode 100644
index d54d1d345..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup64Bit emulates struct ext4_group_desc in fs/ext4/ext4.h.
-// It is the block group descriptor struct for 64-bit ext4 filesystems.
-// It implements BlockGroup interface. It is an extension of the 32-bit
-// version of BlockGroup.
-//
-// +marshal
-type BlockGroup64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- BlockGroup32Bit
-
- // 64-bit specific fields.
- BlockBitmapHi uint32
- InodeBitmapHi uint32
- InodeTableHi uint32
- FreeBlocksCountHi uint16
- FreeInodesCountHi uint16
- UsedDirsCountHi uint16
- ItableUnusedHi uint16
- ExcludeBitmapHi uint32
- BlockBitmapChecksumHi uint16
- InodeBitmapChecksumHi uint16
- _ uint32 // Padding to 64 bytes.
-}
-
-// Compiles only if BlockGroup64Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup64Bit)(nil)
-
-// Methods to override. Checksum() and Flags() are not overridden.
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup64Bit) InodeTable() uint64 {
- return (uint64(bg.InodeTableHi) << 32) | uint64(bg.InodeTableLo)
-}
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup64Bit) BlockBitmap() uint64 {
- return (uint64(bg.BlockBitmapHi) << 32) | uint64(bg.BlockBitmapLo)
-}
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup64Bit) InodeBitmap() uint64 {
- return (uint64(bg.InodeBitmapHi) << 32) | uint64(bg.InodeBitmapLo)
-}
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup64Bit) ExclusionBitmap() uint64 {
- return (uint64(bg.ExcludeBitmapHi) << 32) | uint64(bg.ExcludeBitmapLo)
-}
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup64Bit) FreeBlocksCount() uint32 {
- return (uint32(bg.FreeBlocksCountHi) << 16) | uint32(bg.FreeBlocksCountLo)
-}
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup64Bit) FreeInodesCount() uint32 {
- return (uint32(bg.FreeInodesCountHi) << 16) | uint32(bg.FreeInodesCountLo)
-}
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup64Bit) DirectoryCount() uint32 {
- return (uint32(bg.UsedDirsCountHi) << 16) | uint32(bg.UsedDirsCountLo)
-}
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup64Bit) UnusedInodeCount() uint32 {
- return (uint32(bg.ItableUnusedHi) << 16) | uint32(bg.ItableUnusedLo)
-}
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup64Bit) BlockBitmapChecksum() uint32 {
- return (uint32(bg.BlockBitmapChecksumHi) << 16) | uint32(bg.BlockBitmapChecksumLo)
-}
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup64Bit) InodeBitmapChecksum() uint32 {
- return (uint32(bg.InodeBitmapChecksumHi) << 16) | uint32(bg.InodeBitmapChecksumLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
deleted file mode 100644
index e4ce484e4..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestBlockGroupSize tests that the block group descriptor structs are of the
-// correct size.
-func TestBlockGroupSize(t *testing.T) {
- var bgSmall BlockGroup32Bit
- assertSize(t, &bgSmall, 32)
- var bgBig BlockGroup64Bit
- assertSize(t, &bgBig, 64)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent.go b/pkg/sentry/fsimpl/ext/disklayout/dirent.go
deleted file mode 100644
index 568c8cb4c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-const (
- // MaxFileName is the maximum length of an ext fs file's name.
- MaxFileName = 255
-
- // DirentSize is the size of ext dirent structures.
- DirentSize = 263
-)
-
-var (
- // inodeTypeByFileType maps ext4 file types to vfs inode types.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#ftype.
- inodeTypeByFileType = map[uint8]fs.InodeType{
- 0: fs.Anonymous,
- 1: fs.RegularFile,
- 2: fs.Directory,
- 3: fs.CharacterDevice,
- 4: fs.BlockDevice,
- 5: fs.Pipe,
- 6: fs.Socket,
- 7: fs.Symlink,
- }
-)
-
-// The Dirent interface should be implemented by structs representing ext
-// directory entries. These are for the linear classical directories which
-// just store a list of dirent structs. A directory is a series of data blocks
-// where is each data block contains a linear array of dirents. The last entry
-// of the block has a record size that takes it to the end of the block. The
-// end of the directory is when you read dirInode.Size() bytes from the blocks.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#linear-classic-directories.
-type Dirent interface {
- marshal.Marshallable
-
- // Inode returns the absolute inode number of the underlying inode.
- // Inode number 0 signifies an unused dirent.
- Inode() uint32
-
- // RecordSize returns the record length of this dirent on disk. The next
- // dirent in the dirent list should be read after these many bytes from
- // the current dirent. Must be a multiple of 4.
- RecordSize() uint16
-
- // FileName returns the name of the file. Can be at most 255 is length.
- FileName() string
-
- // FileType returns the inode type of the underlying inode. This is a
- // performance hack so that we do not have to read the underlying inode struct
- // to know the type of inode. This will only work when the SbDirentFileType
- // feature is set. If not, the second returned value will be false indicating
- // that user code has to use the inode mode to extract the file type.
- FileType() (fs.InodeType, bool)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
deleted file mode 100644
index 51f9c2946..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
-
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-// DirentNew represents the ext4 directory entry struct. This emulates Linux's
-// ext4_dir_entry_2 struct. The FileName can not be more than 255 bytes so we
-// only need 8 bits to store the NameLength. As a result, NameLength has been
-// shortened and the other 8 bits are used to encode the file type. Use the
-// FileTypeRaw field only if the SbDirentFileType feature is set.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentNew struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint8
- FileTypeRaw uint8
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentNew implements Dirent.
-var _ Dirent = (*DirentNew)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentNew) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentNew) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentNew) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentNew) FileType() (fs.InodeType, bool) {
- if inodeType, ok := inodeTypeByFileType[d.FileTypeRaw]; ok {
- return inodeType, true
- }
-
- panic(fmt.Sprintf("unknown file type %v", d.FileTypeRaw))
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
deleted file mode 100644
index d4b19e086..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/fs"
-
-// DirentOld represents the old directory entry struct which does not contain
-// the file type. This emulates Linux's ext4_dir_entry struct.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentOld struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint16
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentOld implements Dirent.
-var _ Dirent = (*DirentOld)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentOld) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentOld) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentOld) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentOld) FileType() (fs.InodeType, bool) {
- return fs.Anonymous, false
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
deleted file mode 100644
index 3486864dc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestDirentSize tests that the dirent structs are of the correct
-// size.
-func TestDirentSize(t *testing.T) {
- var dOld DirentOld
- assertSize(t, &dOld, DirentSize)
- var dNew DirentNew
- assertSize(t, &dNew, DirentSize)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go b/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
deleted file mode 100644
index 0834e9ba8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package disklayout provides Linux ext file system's disk level structures
-// which can be directly read into from the underlying device. Structs aim to
-// emulate structures `exactly` how they are layed out on disk.
-//
-// This library aims to be compatible with all ext(2/3/4) systems so it
-// provides a generic interface for all major structures and various
-// implementations (for different versions). The user code is responsible for
-// using appropriate implementations based on the underlying device.
-//
-// Interfacing all major structures here serves a few purposes:
-// - Abstracts away the complexity of the underlying structure from client
-// code. The client only has to figure out versioning on set up and then
-// can use these as black boxes and pass it higher up the stack.
-// - Having pointer receivers forces the user to use pointers to these
-// heavy structs. Hence, prevents the client code from unintentionally
-// copying these by value while passing the interface around.
-// - Version-based implementation selection is resolved on set up hence
-// avoiding per call overhead of choosing implementation.
-// - All interface methods are pretty light weight (do not take in any
-// parameters by design). Passing pointer arguments to interface methods
-// can lead to heap allocation as the compiler won't be able to perform
-// escape analysis on an unknown implementation at compile time.
-//
-// Notes:
-// - All structures on disk are in little-endian order. Only jbd2 (journal)
-// structures are in big-endian order.
-// - All OS dependent fields in these structures will be interpretted using
-// the Linux version of that field.
-// - The suffix `Lo` in field names stands for lower bits of that field.
-// - The suffix `Hi` in field names stands for upper bits of that field.
-// - The suffix `Raw` has been added to indicate that the field is not split
-// into Lo and Hi fields and also to resolve name collision with the
-// respective interface.
-package disklayout
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent.go b/pkg/sentry/fsimpl/ext/disklayout/extent.go
deleted file mode 100644
index b13999bfc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/extent.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// Extents were introduced in ext4 and provide huge performance gains in terms
-// data locality and reduced metadata block usage. Extents are organized in
-// extent trees. The root node is contained in inode.BlocksRaw.
-//
-// Terminology:
-// - Physical Block:
-// Filesystem data block which is addressed normally wrt the entire
-// filesystem (addressed with 48 bits).
-//
-// - File Block:
-// Data block containing *only* file data and addressed wrt to the file
-// with only 32 bits. The (i)th file block contains file data from
-// byte (i * sb.BlockSize()) to ((i+1) * sb.BlockSize()).
-
-const (
- // ExtentHeaderSize is the size of the header of an extent tree node.
- ExtentHeaderSize = 12
-
- // ExtentEntrySize is the size of an entry in an extent tree node.
- // This size is the same for both leaf and internal nodes.
- ExtentEntrySize = 12
-
- // ExtentMagic is the magic number which must be present in the header.
- ExtentMagic = 0xf30a
-)
-
-// ExtentEntryPair couples an in-memory ExtendNode with the ExtentEntry that
-// points to it. We want to cache these structs in memory to avoid repeated
-// disk reads.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentEntryPair struct {
- // Entry points to the child node on disk.
- Entry ExtentEntry
- // Node points to child node in memory. Is nil if the current node is a leaf.
- Node *ExtentNode
-}
-
-// ExtentNode represents an extent tree node. For internal nodes, all Entries
-// will be ExtendIdxs. For leaf nodes, they will all be Extents.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentNode struct {
- Header ExtentHeader
- Entries []ExtentEntryPair
-}
-
-// ExtentEntry represents an extent tree node entry. The entry can either be
-// an ExtentIdx or Extent itself. This exists to simplify navigation logic.
-type ExtentEntry interface {
- marshal.Marshallable
-
- // FileBlock returns the first file block number covered by this entry.
- FileBlock() uint32
-
- // PhysicalBlock returns the child physical block that this entry points to.
- PhysicalBlock() uint64
-}
-
-// ExtentHeader emulates the ext4_extent_header struct in ext4. Each extent
-// tree node begins with this and is followed by `NumEntries` number of:
-// - Extent if `Depth` == 0
-// - ExtentIdx otherwise
-//
-// +marshal
-type ExtentHeader struct {
- // Magic in the extent magic number, must be 0xf30a.
- Magic uint16
-
- // NumEntries indicates the number of valid entries following the header.
- NumEntries uint16
-
- // MaxEntries that could follow the header. Used while adding entries.
- MaxEntries uint16
-
- // Height represents the distance of this node from the farthest leaf. Please
- // note that Linux incorrectly calls this `Depth` (which means the distance
- // of the node from the root).
- Height uint16
- _ uint32
-}
-
-// ExtentIdx emulates the ext4_extent_idx struct in ext4. Only present in
-// internal nodes. Sorted in ascending order based on FirstFileBlock since
-// Linux does a binary search on this. This points to a block containing the
-// child node.
-//
-// +marshal
-type ExtentIdx struct {
- FirstFileBlock uint32
- ChildBlockLo uint32
- ChildBlockHi uint16
- _ uint16
-}
-
-// Compiles only if ExtentIdx implements ExtentEntry.
-var _ ExtentEntry = (*ExtentIdx)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (ei *ExtentIdx) FileBlock() uint32 {
- return ei.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the child block.
-func (ei *ExtentIdx) PhysicalBlock() uint64 {
- return (uint64(ei.ChildBlockHi) << 32) | uint64(ei.ChildBlockLo)
-}
-
-// Extent represents the ext4_extent struct in ext4. Only present in leaf
-// nodes. Sorted in ascending order based on FirstFileBlock since Linux does a
-// binary search on this. This points to an array of data blocks containing the
-// file data. It covers `Length` data blocks starting from `StartBlock`.
-//
-// +marshal
-type Extent struct {
- FirstFileBlock uint32
- Length uint16
- StartBlockHi uint16
- StartBlockLo uint32
-}
-
-// Compiles only if Extent implements ExtentEntry.
-var _ ExtentEntry = (*Extent)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (e *Extent) FileBlock() uint32 {
- return e.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the first data block this extent covers.
-func (e *Extent) PhysicalBlock() uint64 {
- return (uint64(e.StartBlockHi) << 32) | uint64(e.StartBlockLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go b/pkg/sentry/fsimpl/ext/disklayout/extent_test.go
deleted file mode 100644
index c96002e19..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestExtentSize tests that the extent structs are of the correct
-// size.
-func TestExtentSize(t *testing.T) {
- var h ExtentHeader
- assertSize(t, &h, ExtentHeaderSize)
- var i ExtentIdx
- assertSize(t, &i, ExtentEntrySize)
- var e Extent
- assertSize(t, &e, ExtentEntrySize)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode.go b/pkg/sentry/fsimpl/ext/disklayout/inode.go
deleted file mode 100644
index ef25040a9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// Special inodes. See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#special-inodes.
-const (
- // RootDirInode is the inode number of the root directory inode.
- RootDirInode = 2
-)
-
-// The Inode interface must be implemented by structs representing ext inodes.
-// The inode stores all the metadata pertaining to the file (except for the
-// file name which is held by the directory entry). It does NOT expose all
-// fields and should be extended if need be.
-//
-// Some file systems (e.g. FAT) use the directory entry to store all this
-// information. Ext file systems do not so that they can support hard links.
-// However, ext4 cheats a little bit and duplicates the file type in the
-// directory entry for performance gains.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#index-nodes.
-type Inode interface {
- marshal.Marshallable
-
- // Mode returns the linux file mode which is majorly used to extract
- // information like:
- // - File permissions (read/write/execute by user/group/others).
- // - Sticky, set UID and GID bits.
- // - File type.
- //
- // Masks to extract this information are provided in pkg/abi/linux/file.go.
- Mode() linux.FileMode
-
- // UID returns the owner UID.
- UID() auth.KUID
-
- // GID returns the owner GID.
- GID() auth.KGID
-
- // Size returns the size of the file in bytes.
- Size() uint64
-
- // InodeSize returns the size of this inode struct in bytes.
- // In ext2 and ext3, the inode struct and inode disk record size was fixed at
- // 128 bytes. Ext4 makes it possible for the inode struct to be bigger.
- // However, accessing any field beyond the 128 bytes marker must be verified
- // using this method.
- InodeSize() uint16
-
- // AccessTime returns the last access time. Shows when the file was last read.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the extended attribute value checksum.
- AccessTime() time.Time
-
- // ChangeTime returns the last change time. Shows when the file meta data
- // (like permissions) was last changed.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the lower 32 bits of the attribute
- // value’s reference count.
- ChangeTime() time.Time
-
- // ModificationTime returns the last modification time. Shows when the file
- // content was last modified.
- //
- // If InExtendedAttr is set, then this should NOT be used because
- // the underlying field contains the number of the inode that owns the
- // extended attribute.
- ModificationTime() time.Time
-
- // DeletionTime returns the deletion time. Inodes are marked as deleted by
- // writing to the underlying field. FS tools can restore files until they are
- // actually overwritten.
- DeletionTime() time.Time
-
- // LinksCount returns the number of hard links to this inode.
- //
- // Normally there is an upper limit on the number of hard links:
- // - ext2/ext3 = 32,000
- // - ext4 = 65,000
- //
- // This implies that an ext4 directory cannot have more than 64,998
- // subdirectories because each subdirectory will have a hard link to the
- // directory via the `..` entry. The directory has hard link via the `.` entry
- // of its own. And finally the inode is initiated with 1 hard link (itself).
- //
- // The underlying value is reset to 1 if all the following hold:
- // - Inode is a directory.
- // - SbDirNlink is enabled.
- // - Number of hard links is incremented past 64,999.
- // Hard link value of 1 for a directory would indicate that the number of hard
- // links is unknown because a directory can have minimum 2 hard links (itself
- // and `.` entry).
- LinksCount() uint16
-
- // Flags returns InodeFlags which represents the inode flags.
- Flags() InodeFlags
-
- // Data returns the underlying inode.i_block array as a slice so it's
- // modifiable. This field is special and is used to store various kinds of
- // things depending on the filesystem version and inode type. The underlying
- // field name in Linux is a little misleading.
- // - In ext2/ext3, it contains the block map.
- // - In ext4, it contains the extent tree root node.
- // - For inline files, it contains the file contents.
- // - For symlinks, it contains the link path (if it fits here).
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#the-contents-of-inode-i-block.
- Data() []byte
-}
-
-// Inode flags. This is not comprehensive and flags which were not used in
-// the Linux kernel have been excluded.
-const (
- // InSync indicates that all writes to the file must be synchronous.
- InSync = 0x8
-
- // InImmutable indicates that this file is immutable.
- InImmutable = 0x10
-
- // InAppend indicates that this file can only be appended to.
- InAppend = 0x20
-
- // InNoDump indicates that teh dump(1) utility should not dump this file.
- InNoDump = 0x40
-
- // InNoAccessTime indicates that the access time of this inode must not be
- // updated.
- InNoAccessTime = 0x80
-
- // InIndex indicates that this directory has hashed indexes.
- InIndex = 0x1000
-
- // InJournalData indicates that file data must always be written through a
- // journal device.
- InJournalData = 0x4000
-
- // InDirSync indicates that all the directory entiry data must be written
- // synchronously.
- InDirSync = 0x10000
-
- // InTopDir indicates that this inode is at the top of the directory hierarchy.
- InTopDir = 0x20000
-
- // InHugeFile indicates that this is a huge file.
- InHugeFile = 0x40000
-
- // InExtents indicates that this inode uses extents.
- InExtents = 0x80000
-
- // InExtendedAttr indicates that this inode stores a large extended attribute
- // value in its data blocks.
- InExtendedAttr = 0x200000
-
- // InInline indicates that this inode has inline data.
- InInline = 0x10000000
-
- // InReserved indicates that this inode is reserved for the ext4 library.
- InReserved = 0x80000000
-)
-
-// InodeFlags represents all possible combinations of inode flags. It aims to
-// cover the bit masks and provide a more user-friendly interface.
-type InodeFlags struct {
- Sync bool
- Immutable bool
- Append bool
- NoDump bool
- NoAccessTime bool
- Index bool
- JournalData bool
- DirSync bool
- TopDir bool
- HugeFile bool
- Extents bool
- ExtendedAttr bool
- Inline bool
- Reserved bool
-}
-
-// ToInt converts inode flags back to its 32-bit rep.
-func (f InodeFlags) ToInt() uint32 {
- var res uint32
-
- if f.Sync {
- res |= InSync
- }
- if f.Immutable {
- res |= InImmutable
- }
- if f.Append {
- res |= InAppend
- }
- if f.NoDump {
- res |= InNoDump
- }
- if f.NoAccessTime {
- res |= InNoAccessTime
- }
- if f.Index {
- res |= InIndex
- }
- if f.JournalData {
- res |= InJournalData
- }
- if f.DirSync {
- res |= InDirSync
- }
- if f.TopDir {
- res |= InTopDir
- }
- if f.HugeFile {
- res |= InHugeFile
- }
- if f.Extents {
- res |= InExtents
- }
- if f.ExtendedAttr {
- res |= InExtendedAttr
- }
- if f.Inline {
- res |= InInline
- }
- if f.Reserved {
- res |= InReserved
- }
-
- return res
-}
-
-// InodeFlagsFromInt converts the integer representation of inode flags to
-// a InodeFlags struct.
-func InodeFlagsFromInt(f uint32) InodeFlags {
- return InodeFlags{
- Sync: f&InSync > 0,
- Immutable: f&InImmutable > 0,
- Append: f&InAppend > 0,
- NoDump: f&InNoDump > 0,
- NoAccessTime: f&InNoAccessTime > 0,
- Index: f&InIndex > 0,
- JournalData: f&InJournalData > 0,
- DirSync: f&InDirSync > 0,
- TopDir: f&InTopDir > 0,
- HugeFile: f&InHugeFile > 0,
- Extents: f&InExtents > 0,
- ExtendedAttr: f&InExtendedAttr > 0,
- Inline: f&InInline > 0,
- Reserved: f&InReserved > 0,
- }
-}
-
-// These masks define how users can view/modify inode flags. The rest of the
-// flags are for internal kernel usage only.
-const (
- InUserReadFlagMask = 0x4BDFFF
- InUserWriteFlagMask = 0x4B80FF
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go b/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
deleted file mode 100644
index a4503f5cf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-
-// InodeNew represents ext4 inode structure which can be bigger than
-// OldInodeSize. The actual size of this struct should be determined using
-// inode.ExtraInodeSize. Accessing any field here should be verified with the
-// actual size. The extra space between the end of the inode struct and end of
-// the inode record can be used to store extended attr.
-//
-// If the TimeExtra fields are in scope, the lower 2 bits of those are used
-// to extend their counter part to be 34 bits wide; the rest (upper) 30 bits
-// are used to provide nanoscond precision. Hence, these timestamps will now
-// overflow in May 2446.
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-//
-// +marshal
-type InodeNew struct {
- InodeOld
-
- ExtraInodeSize uint16
- ChecksumHi uint16
- ChangeTimeExtra uint32
- ModificationTimeExtra uint32
- AccessTimeExtra uint32
- CreationTime uint32
- CreationTimeExtra uint32
- VersionHi uint32
- ProjectID uint32
-}
-
-// Compiles only if InodeNew implements Inode.
-var _ Inode = (*InodeNew)(nil)
-
-// fromExtraTime decodes the extra time and constructs the kernel time struct
-// with nanosecond precision.
-func fromExtraTime(lo int32, extra uint32) time.Time {
- // See description above InodeNew for format.
- seconds := (int64(extra&0x3) << 32) + int64(lo)
- nanoseconds := int64(extra >> 2)
- return time.FromUnix(seconds, nanoseconds)
-}
-
-// Only override methods which change due to ext4 specific fields.
-
-// Size implements Inode.Size.
-func (in *InodeNew) Size() uint64 {
- return (uint64(in.SizeHi) << 32) | uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeNew) InodeSize() uint16 {
- return OldInodeSize + in.ExtraInodeSize
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeNew) ChangeTime() time.Time {
- // Apply new timestamp logic if inode.ChangeTimeExtra is in scope.
- if in.ExtraInodeSize >= 8 {
- return fromExtraTime(in.ChangeTimeRaw, in.ChangeTimeExtra)
- }
-
- return in.InodeOld.ChangeTime()
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeNew) ModificationTime() time.Time {
- // Apply new timestamp logic if inode.ModificationTimeExtra is in scope.
- if in.ExtraInodeSize >= 12 {
- return fromExtraTime(in.ModificationTimeRaw, in.ModificationTimeExtra)
- }
-
- return in.InodeOld.ModificationTime()
-}
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeNew) AccessTime() time.Time {
- // Apply new timestamp logic if inode.AccessTimeExtra is in scope.
- if in.ExtraInodeSize >= 16 {
- return fromExtraTime(in.AccessTimeRaw, in.AccessTimeExtra)
- }
-
- return in.InodeOld.AccessTime()
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go b/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
deleted file mode 100644
index e6b28babf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-const (
- // OldInodeSize is the inode size in ext2/ext3.
- OldInodeSize = 128
-)
-
-// InodeOld implements Inode interface. It emulates ext2/ext3 inode struct.
-// Inode struct size and record size are both 128 bytes for this.
-//
-// All fields representing time are in seconds since the epoch. Which means that
-// they will overflow in January 2038.
-//
-// +marshal
-type InodeOld struct {
- ModeRaw uint16
- UIDLo uint16
- SizeLo uint32
-
- // The time fields are signed integers because they could be negative to
- // represent time before the epoch.
- AccessTimeRaw int32
- ChangeTimeRaw int32
- ModificationTimeRaw int32
- DeletionTimeRaw int32
-
- GIDLo uint16
- LinksCountRaw uint16
- BlocksCountLo uint32
- FlagsRaw uint32
- VersionLo uint32 // This is OS dependent.
- DataRaw [60]byte
- Generation uint32
- FileACLLo uint32
- SizeHi uint32
- ObsoFaddr uint32
-
- // OS dependent fields have been inlined here.
- BlocksCountHi uint16
- FileACLHi uint16
- UIDHi uint16
- GIDHi uint16
- ChecksumLo uint16
- _ uint16
-}
-
-// Compiles only if InodeOld implements Inode.
-var _ Inode = (*InodeOld)(nil)
-
-// Mode implements Inode.Mode.
-func (in *InodeOld) Mode() linux.FileMode { return linux.FileMode(in.ModeRaw) }
-
-// UID implements Inode.UID.
-func (in *InodeOld) UID() auth.KUID {
- return auth.KUID((uint32(in.UIDHi) << 16) | uint32(in.UIDLo))
-}
-
-// GID implements Inode.GID.
-func (in *InodeOld) GID() auth.KGID {
- return auth.KGID((uint32(in.GIDHi) << 16) | uint32(in.GIDLo))
-}
-
-// Size implements Inode.Size.
-func (in *InodeOld) Size() uint64 {
- // In ext2/ext3, in.SizeHi did not exist, it was instead named in.DirACL.
- return uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeOld) InodeSize() uint16 { return OldInodeSize }
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeOld) AccessTime() time.Time {
- return time.FromUnix(int64(in.AccessTimeRaw), 0)
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeOld) ChangeTime() time.Time {
- return time.FromUnix(int64(in.ChangeTimeRaw), 0)
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeOld) ModificationTime() time.Time {
- return time.FromUnix(int64(in.ModificationTimeRaw), 0)
-}
-
-// DeletionTime implements Inode.DeletionTime.
-func (in *InodeOld) DeletionTime() time.Time {
- return time.FromUnix(int64(in.DeletionTimeRaw), 0)
-}
-
-// LinksCount implements Inode.LinksCount.
-func (in *InodeOld) LinksCount() uint16 { return in.LinksCountRaw }
-
-// Flags implements Inode.Flags.
-func (in *InodeOld) Flags() InodeFlags { return InodeFlagsFromInt(in.FlagsRaw) }
-
-// Data implements Inode.Data.
-func (in *InodeOld) Data() []byte { return in.DataRaw[:] }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go b/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
deleted file mode 100644
index 90744e956..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// TestInodeSize tests that the inode structs are of the correct size.
-func TestInodeSize(t *testing.T) {
- var iOld InodeOld
- assertSize(t, &iOld, OldInodeSize)
-
- // This was updated from 156 bytes to 160 bytes in Oct 2015.
- var iNew InodeNew
- assertSize(t, &iNew, 160)
-}
-
-// TestTimestampSeconds tests that the seconds part of [a/c/m] timestamps in
-// ext4 inode structs are decoded correctly.
-//
-// These tests are derived from the table under https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-func TestTimestampSeconds(t *testing.T) {
- type timestampTest struct {
- // msbSet tells if the most significant bit of InodeOld.[X]TimeRaw is set.
- // If this is set then the 32-bit time is negative.
- msbSet bool
-
- // lowerBound tells if we should take the lowest possible value of
- // InodeOld.[X]TimeRaw while satisfying test.msbSet condition. If set to
- // false it tells to take the highest possible value.
- lowerBound bool
-
- // extraBits is InodeNew.[X]TimeExtra.
- extraBits uint32
-
- // want is the kernel time struct that is expected.
- want time.Time
- }
-
- tests := []timestampTest{
- // 1901-12-13
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(-0x80000000), 0),
- },
-
- // 1969-12-31
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(-1), 0),
- },
-
- // 1970-01-01
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(0), 0),
- },
-
- // 2038-01-19
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(0x7fffffff), 0),
- },
-
- // 2038-01-19
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x80000000), 0),
- },
-
- // 2106-02-07
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0xffffffff), 0),
- },
-
- // 2106-02-07
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x100000000), 0),
- },
-
- // 2174-02-25
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0x17fffffff), 0),
- },
-
- // 2174-02-25
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x180000000), 0),
- },
-
- // 2242-03-16
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x1ffffffff), 0),
- },
-
- // 2242-03-16
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x200000000), 0),
- },
-
- // 2310-04-04
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x27fffffff), 0),
- },
-
- // 2310-04-04
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x280000000), 0),
- },
-
- // 2378-04-22
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x2ffffffff), 0),
- },
-
- // 2378-04-22
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x300000000), 0),
- },
-
- // 2446-05-10
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x37fffffff), 0),
- },
- }
-
- lowerMSB0 := int32(0) // binary: 00000000 00000000 00000000 00000000
- upperMSB0 := int32(0x7fffffff) // binary: 01111111 11111111 11111111 11111111
- lowerMSB1 := int32(-0x80000000) // binary: 10000000 00000000 00000000 00000000
- upperMSB1 := int32(-1) // binary: 11111111 11111111 11111111 11111111
-
- get32BitTime := func(test timestampTest) int32 {
- if test.msbSet {
- if test.lowerBound {
- return lowerMSB1
- }
-
- return upperMSB1
- }
-
- if test.lowerBound {
- return lowerMSB0
- }
-
- return upperMSB0
- }
-
- getTestName := func(test timestampTest) string {
- return fmt.Sprintf(
- "Tests time decoding with epoch bits 0b%s and 32-bit raw time: MSB set=%t, lower bound=%t",
- strconv.FormatInt(int64(test.extraBits), 2),
- test.msbSet,
- test.lowerBound,
- )
- }
-
- for _, test := range tests {
- t.Run(getTestName(test), func(t *testing.T) {
- if got := fromExtraTime(get32BitTime(test), test.extraBits); got != test.want {
- t.Errorf("Expected: %v, Got: %v", test.want, got)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock.go b/pkg/sentry/fsimpl/ext/disklayout/superblock.go
deleted file mode 100644
index 70948ebe9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock.go
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-const (
- // SbOffset is the absolute offset at which the superblock is placed.
- SbOffset = 1024
-)
-
-// SuperBlock should be implemented by structs representing the ext superblock.
-// The superblock holds a lot of information about the enclosing filesystem.
-// This interface aims to provide access methods to important information held
-// by the superblock. It does NOT expose all fields of the superblock, only the
-// ones necessary. This can be expanded when need be.
-//
-// Location and replication:
-// - The superblock is located at offset 1024 in block group 0.
-// - Redundant copies of the superblock and group descriptors are kept in
-// all groups if SbSparse feature flag is NOT set. If it is set, the
-// replicas only exist in groups whose group number is either 0 or a
-// power of 3, 5, or 7.
-// - There is also a sparse superblock feature v2 in which there are just
-// two replicas saved in the block groups pointed by sb.s_backup_bgs.
-//
-// Replicas should eventually be updated if the superblock is updated.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#super-block.
-type SuperBlock interface {
- marshal.Marshallable
-
- // InodesCount returns the total number of inodes in this filesystem.
- InodesCount() uint32
-
- // BlocksCount returns the total number of data blocks in this filesystem.
- BlocksCount() uint64
-
- // FreeBlocksCount returns the number of free blocks in this filesystem.
- FreeBlocksCount() uint64
-
- // FreeInodesCount returns the number of free inodes in this filesystem.
- FreeInodesCount() uint32
-
- // MountCount returns the number of mounts since the last fsck.
- MountCount() uint16
-
- // MaxMountCount returns the number of mounts allowed beyond which a fsck is
- // needed.
- MaxMountCount() uint16
-
- // FirstDataBlock returns the absolute block number of the first data block,
- // which contains the super block itself.
- //
- // If the filesystem has 1kb data blocks then this should return 1. For all
- // other configurations, this typically returns 0.
- FirstDataBlock() uint32
-
- // BlockSize returns the size of one data block in this filesystem.
- // This can be calculated by 2^(10 + sb.s_log_block_size). This ensures that
- // the smallest block size is 1kb.
- BlockSize() uint64
-
- // BlocksPerGroup returns the number of data blocks in a block group.
- BlocksPerGroup() uint32
-
- // ClusterSize returns block cluster size (set during mkfs time by admin).
- // This can be calculated by 2^(10 + sb.s_log_cluster_size). This ensures that
- // the smallest cluster size is 1kb.
- //
- // sb.s_log_cluster_size must equal sb.s_log_block_size if bigalloc feature
- // is NOT set and consequently BlockSize() = ClusterSize() in that case.
- ClusterSize() uint64
-
- // ClustersPerGroup returns:
- // - number of clusters per group if bigalloc is enabled.
- // - BlocksPerGroup() otherwise.
- ClustersPerGroup() uint32
-
- // InodeSize returns the size of the inode disk record size in bytes. Use this
- // to iterate over inode arrays on disk.
- //
- // In ext2 and ext3:
- // - Each inode had a disk record of 128 bytes.
- // - The inode struct size was fixed at 128 bytes.
- //
- // In ext4 its possible to allocate larger on-disk inodes:
- // - Inode disk record size = sb.s_inode_size (function return value).
- // = 256 (default)
- // - Inode struct size = 128 + inode.i_extra_isize.
- // = 128 + 32 = 160 (default)
- InodeSize() uint16
-
- // InodesPerGroup returns the number of inodes in a block group.
- InodesPerGroup() uint32
-
- // BgDescSize returns the size of the block group descriptor struct.
- //
- // In ext2, ext3, ext4 (without 64-bit feature), the block group descriptor
- // is only 32 bytes long.
- // In ext4 with 64-bit feature, the block group descriptor expands to AT LEAST
- // 64 bytes. It might be bigger than that.
- BgDescSize() uint16
-
- // CompatibleFeatures returns the CompatFeatures struct which holds all the
- // compatible features this fs supports.
- CompatibleFeatures() CompatFeatures
-
- // IncompatibleFeatures returns the CompatFeatures struct which holds all the
- // incompatible features this fs supports.
- IncompatibleFeatures() IncompatFeatures
-
- // ReadOnlyCompatibleFeatures returns the CompatFeatures struct which holds all the
- // readonly compatible features this fs supports.
- ReadOnlyCompatibleFeatures() RoCompatFeatures
-
- // Magic() returns the magic signature which must be 0xef53.
- Magic() uint16
-
- // Revision returns the superblock revision. Superblock struct fields from
- // offset 0x54 till 0x150 should only be used if superblock has DynamicRev.
- Revision() SbRevision
-}
-
-// SbRevision is the type for superblock revisions.
-type SbRevision uint32
-
-// Super block revisions.
-const (
- // OldRev is the good old (original) format.
- OldRev SbRevision = 0
-
- // DynamicRev is v2 format w/ dynamic inode sizes.
- DynamicRev SbRevision = 1
-)
-
-// Superblock compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirPrealloc indicates directory preallocation.
- SbDirPrealloc = 0x1
-
- // SbHasJournal indicates the presence of a journal. jbd2 should only work
- // with this being set.
- SbHasJournal = 0x4
-
- // SbExtAttr indicates extended attributes support.
- SbExtAttr = 0x8
-
- // SbResizeInode indicates that the fs has reserved GDT blocks (right after
- // group descriptors) for fs expansion.
- SbResizeInode = 0x10
-
- // SbDirIndex indicates that the fs has directory indices.
- SbDirIndex = 0x20
-
- // SbSparseV2 stands for Sparse superblock version 2.
- SbSparseV2 = 0x200
-)
-
-// CompatFeatures represents a superblock's compatible feature set. If the
-// kernel does not understand any of these feature, it can still read/write
-// to this fs.
-type CompatFeatures struct {
- DirPrealloc bool
- HasJournal bool
- ExtAttr bool
- ResizeInode bool
- DirIndex bool
- SparseV2 bool
-}
-
-// ToInt converts superblock compatible features back to its 32-bit rep.
-func (f CompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirPrealloc {
- res |= SbDirPrealloc
- }
- if f.HasJournal {
- res |= SbHasJournal
- }
- if f.ExtAttr {
- res |= SbExtAttr
- }
- if f.ResizeInode {
- res |= SbResizeInode
- }
- if f.DirIndex {
- res |= SbDirIndex
- }
- if f.SparseV2 {
- res |= SbSparseV2
- }
-
- return res
-}
-
-// CompatFeaturesFromInt converts the integer representation of superblock
-// compatible features to CompatFeatures struct.
-func CompatFeaturesFromInt(f uint32) CompatFeatures {
- return CompatFeatures{
- DirPrealloc: f&SbDirPrealloc > 0,
- HasJournal: f&SbHasJournal > 0,
- ExtAttr: f&SbExtAttr > 0,
- ResizeInode: f&SbResizeInode > 0,
- DirIndex: f&SbDirIndex > 0,
- SparseV2: f&SbSparseV2 > 0,
- }
-}
-
-// Superblock incompatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirentFileType indicates that directory entries record the file type.
- // We should use struct DirentNew for dirents then.
- SbDirentFileType = 0x2
-
- // SbRecovery indicates that the filesystem needs recovery.
- SbRecovery = 0x4
-
- // SbJournalDev indicates that the filesystem has a separate journal device.
- SbJournalDev = 0x8
-
- // SbMetaBG indicates that the filesystem is using Meta block groups. Moves
- // the group descriptors from the congested first block group into the first
- // group of each metablock group to increase the maximum block groups limit
- // and hence support much larger filesystems.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#meta-block-groups.
- SbMetaBG = 0x10
-
- // SbExtents indicates that the filesystem uses extents. Must be set in ext4
- // filesystems.
- SbExtents = 0x40
-
- // SbIs64Bit indicates that this filesystem addresses blocks with 64-bits.
- // Hence can support 2^64 data blocks.
- SbIs64Bit = 0x80
-
- // SbMMP indicates that this filesystem has multiple mount protection.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#multiple-mount-protection.
- SbMMP = 0x100
-
- // SbFlexBg indicates that this filesystem has flexible block groups. Several
- // block groups are tied into one logical block group so that all the metadata
- // for the block groups (bitmaps and inode tables) are close together for
- // faster loading. Consequently, large files will be continuous on disk.
- // However, this does not affect the placement of redundant superblocks and
- // group descriptors.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#flexible-block-groups.
- SbFlexBg = 0x200
-
- // SbLargeDir shows that large directory enabled. Directory htree can be 3
- // levels deep. Directory htrees are allowed to be 2 levels deep otherwise.
- SbLargeDir = 0x4000
-
- // SbInlineData allows inline data in inodes for really small files.
- SbInlineData = 0x8000
-
- // SbEncrypted indicates that this fs contains encrypted inodes.
- SbEncrypted = 0x10000
-)
-
-// IncompatFeatures represents a superblock's incompatible feature set. If the
-// kernel does not understand any of these feature, it should refuse to mount.
-type IncompatFeatures struct {
- DirentFileType bool
- Recovery bool
- JournalDev bool
- MetaBG bool
- Extents bool
- Is64Bit bool
- MMP bool
- FlexBg bool
- LargeDir bool
- InlineData bool
- Encrypted bool
-}
-
-// ToInt converts superblock incompatible features back to its 32-bit rep.
-func (f IncompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirentFileType {
- res |= SbDirentFileType
- }
- if f.Recovery {
- res |= SbRecovery
- }
- if f.JournalDev {
- res |= SbJournalDev
- }
- if f.MetaBG {
- res |= SbMetaBG
- }
- if f.Extents {
- res |= SbExtents
- }
- if f.Is64Bit {
- res |= SbIs64Bit
- }
- if f.MMP {
- res |= SbMMP
- }
- if f.FlexBg {
- res |= SbFlexBg
- }
- if f.LargeDir {
- res |= SbLargeDir
- }
- if f.InlineData {
- res |= SbInlineData
- }
- if f.Encrypted {
- res |= SbEncrypted
- }
-
- return res
-}
-
-// IncompatFeaturesFromInt converts the integer representation of superblock
-// incompatible features to IncompatFeatures struct.
-func IncompatFeaturesFromInt(f uint32) IncompatFeatures {
- return IncompatFeatures{
- DirentFileType: f&SbDirentFileType > 0,
- Recovery: f&SbRecovery > 0,
- JournalDev: f&SbJournalDev > 0,
- MetaBG: f&SbMetaBG > 0,
- Extents: f&SbExtents > 0,
- Is64Bit: f&SbIs64Bit > 0,
- MMP: f&SbMMP > 0,
- FlexBg: f&SbFlexBg > 0,
- LargeDir: f&SbLargeDir > 0,
- InlineData: f&SbInlineData > 0,
- Encrypted: f&SbEncrypted > 0,
- }
-}
-
-// Superblock readonly compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbSparse indicates sparse superblocks. Only groups with number either 0 or
- // a power of 3, 5, or 7 will have redundant copies of the superblock and
- // block descriptors.
- SbSparse = 0x1
-
- // SbLargeFile indicates that this fs has been used to store a file >= 2GiB.
- SbLargeFile = 0x2
-
- // SbHugeFile indicates that this fs contains files whose sizes are
- // represented in units of logicals blocks, not 512-byte sectors.
- SbHugeFile = 0x8
-
- // SbGdtCsum indicates that group descriptors have checksums.
- SbGdtCsum = 0x10
-
- // SbDirNlink indicates that the new subdirectory limit is 64,999. Ext3 has a
- // 32,000 subdirectory limit.
- SbDirNlink = 0x20
-
- // SbExtraIsize indicates that large inodes exist on this filesystem.
- SbExtraIsize = 0x40
-
- // SbHasSnapshot indicates the existence of a snapshot.
- SbHasSnapshot = 0x80
-
- // SbQuota enables usage tracking for all quota types.
- SbQuota = 0x100
-
- // SbBigalloc maps to the bigalloc feature. When set, the minimum allocation
- // unit becomes a cluster rather than a data block. Then block bitmaps track
- // clusters, not data blocks.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#bigalloc.
- SbBigalloc = 0x200
-
- // SbMetadataCsum indicates that the fs supports metadata checksumming.
- SbMetadataCsum = 0x400
-
- // SbReadOnly marks this filesystem as readonly. Should refuse to mount in
- // read/write mode.
- SbReadOnly = 0x1000
-)
-
-// RoCompatFeatures represents a superblock's readonly compatible feature set.
-// If the kernel does not understand any of these feature, it can still mount
-// readonly. But if the user wants to mount read/write, the kernel should
-// refuse to mount.
-type RoCompatFeatures struct {
- Sparse bool
- LargeFile bool
- HugeFile bool
- GdtCsum bool
- DirNlink bool
- ExtraIsize bool
- HasSnapshot bool
- Quota bool
- Bigalloc bool
- MetadataCsum bool
- ReadOnly bool
-}
-
-// ToInt converts superblock readonly compatible features to its 32-bit rep.
-func (f RoCompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.Sparse {
- res |= SbSparse
- }
- if f.LargeFile {
- res |= SbLargeFile
- }
- if f.HugeFile {
- res |= SbHugeFile
- }
- if f.GdtCsum {
- res |= SbGdtCsum
- }
- if f.DirNlink {
- res |= SbDirNlink
- }
- if f.ExtraIsize {
- res |= SbExtraIsize
- }
- if f.HasSnapshot {
- res |= SbHasSnapshot
- }
- if f.Quota {
- res |= SbQuota
- }
- if f.Bigalloc {
- res |= SbBigalloc
- }
- if f.MetadataCsum {
- res |= SbMetadataCsum
- }
- if f.ReadOnly {
- res |= SbReadOnly
- }
-
- return res
-}
-
-// RoCompatFeaturesFromInt converts the integer representation of superblock
-// readonly compatible features to RoCompatFeatures struct.
-func RoCompatFeaturesFromInt(f uint32) RoCompatFeatures {
- return RoCompatFeatures{
- Sparse: f&SbSparse > 0,
- LargeFile: f&SbLargeFile > 0,
- HugeFile: f&SbHugeFile > 0,
- GdtCsum: f&SbGdtCsum > 0,
- DirNlink: f&SbDirNlink > 0,
- ExtraIsize: f&SbExtraIsize > 0,
- HasSnapshot: f&SbHasSnapshot > 0,
- Quota: f&SbQuota > 0,
- Bigalloc: f&SbBigalloc > 0,
- MetadataCsum: f&SbMetadataCsum > 0,
- ReadOnly: f&SbReadOnly > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
deleted file mode 100644
index 4dc6080fb..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock32Bit implements SuperBlock and represents the 32-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. Should be used only if
-// RevLevel = DynamicRev and 64-bit feature is disabled.
-//
-// +marshal
-type SuperBlock32Bit struct {
- // We embed the old superblock struct here because the 32-bit version is just
- // an extension of the old version.
- SuperBlockOld
-
- FirstInode uint32
- InodeSizeRaw uint16
- BlockGroupNumber uint16
- FeatureCompat uint32
- FeatureIncompat uint32
- FeatureRoCompat uint32
- UUID [16]byte
- VolumeName [16]byte
- LastMounted [64]byte
- AlgoUsageBitmap uint32
- PreallocBlocks uint8
- PreallocDirBlocks uint8
- ReservedGdtBlocks uint16
- JournalUUID [16]byte
- JournalInum uint32
- JournalDev uint32
- LastOrphan uint32
- HashSeed [4]uint32
- DefaultHashVersion uint8
- JnlBackupType uint8
- BgDescSizeRaw uint16
- DefaultMountOpts uint32
- FirstMetaBg uint32
- MkfsTime uint32
- JnlBlocks [17]uint32
-}
-
-// Compiles only if SuperBlock32Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock32Bit)(nil)
-
-// Only override methods which change based on the additional fields above.
-// Not overriding SuperBlock.BgDescSize because it would still return 32 here.
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlock32Bit) InodeSize() uint16 {
- return sb.InodeSizeRaw
-}
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlock32Bit) CompatibleFeatures() CompatFeatures {
- return CompatFeaturesFromInt(sb.FeatureCompat)
-}
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlock32Bit) IncompatibleFeatures() IncompatFeatures {
- return IncompatFeaturesFromInt(sb.FeatureIncompat)
-}
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlock32Bit) ReadOnlyCompatibleFeatures() RoCompatFeatures {
- return RoCompatFeaturesFromInt(sb.FeatureRoCompat)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
deleted file mode 100644
index 2c9039327..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock64Bit implements SuperBlock and represents the 64-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. This sums up to be exactly
-// 1024 bytes (smallest possible block size) and hence the superblock always
-// fits in no more than one data block. Should only be used when the 64-bit
-// feature is set.
-//
-// +marshal
-type SuperBlock64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- SuperBlock32Bit
-
- BlocksCountHi uint32
- ReservedBlocksCountHi uint32
- FreeBlocksCountHi uint32
- MinInodeSize uint16
- WantInodeSize uint16
- Flags uint32
- RaidStride uint16
- MmpInterval uint16
- MmpBlock uint64
- RaidStripeWidth uint32
- LogGroupsPerFlex uint8
- ChecksumType uint8
- _ uint16
- KbytesWritten uint64
- SnapshotInum uint32
- SnapshotID uint32
- SnapshotRsrvBlocksCount uint64
- SnapshotList uint32
- ErrorCount uint32
- FirstErrorTime uint32
- FirstErrorInode uint32
- FirstErrorBlock uint64
- FirstErrorFunction [32]byte
- FirstErrorLine uint32
- LastErrorTime uint32
- LastErrorInode uint32
- LastErrorLine uint32
- LastErrorBlock uint64
- LastErrorFunction [32]byte
- MountOpts [64]byte
- UserQuotaInum uint32
- GroupQuotaInum uint32
- OverheadBlocks uint32
- BackupBgs [2]uint32
- EncryptAlgos [4]uint8
- EncryptPwSalt [16]uint8
- LostFoundInode uint32
- ProjectQuotaInode uint32
- ChecksumSeed uint32
- WtimeHi uint8
- MtimeHi uint8
- MkfsTimeHi uint8
- LastCheckHi uint8
- FirstErrorTimeHi uint8
- LastErrorTimeHi uint8
- _ [2]uint8
- Encoding uint16
- EncodingFlags uint16
- _ [95]uint32
- Checksum uint32
-}
-
-// Compiles only if SuperBlock64Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock64Bit)(nil)
-
-// Only override methods which change based on the 64-bit feature.
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlock64Bit) BlocksCount() uint64 {
- return (uint64(sb.BlocksCountHi) << 32) | uint64(sb.BlocksCountLo)
-}
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlock64Bit) FreeBlocksCount() uint64 {
- return (uint64(sb.FreeBlocksCountHi) << 32) | uint64(sb.FreeBlocksCountLo)
-}
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlock64Bit) BgDescSize() uint16 { return sb.BgDescSizeRaw }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
deleted file mode 100644
index e4709f23c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlockOld implements SuperBlock and represents the old version of the
-// superblock struct. Should be used only if RevLevel = OldRev.
-//
-// +marshal
-type SuperBlockOld struct {
- InodesCountRaw uint32
- BlocksCountLo uint32
- ReservedBlocksCount uint32
- FreeBlocksCountLo uint32
- FreeInodesCountRaw uint32
- FirstDataBlockRaw uint32
- LogBlockSize uint32
- LogClusterSize uint32
- BlocksPerGroupRaw uint32
- ClustersPerGroupRaw uint32
- InodesPerGroupRaw uint32
- Mtime uint32
- Wtime uint32
- MountCountRaw uint16
- MaxMountCountRaw uint16
- MagicRaw uint16
- State uint16
- Errors uint16
- MinorRevLevel uint16
- LastCheck uint32
- CheckInterval uint32
- CreatorOS uint32
- RevLevel uint32
- DefResUID uint16
- DefResGID uint16
-}
-
-// Compiles only if SuperBlockOld implements SuperBlock.
-var _ SuperBlock = (*SuperBlockOld)(nil)
-
-// InodesCount implements SuperBlock.InodesCount.
-func (sb *SuperBlockOld) InodesCount() uint32 { return sb.InodesCountRaw }
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlockOld) BlocksCount() uint64 { return uint64(sb.BlocksCountLo) }
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlockOld) FreeBlocksCount() uint64 { return uint64(sb.FreeBlocksCountLo) }
-
-// FreeInodesCount implements SuperBlock.FreeInodesCount.
-func (sb *SuperBlockOld) FreeInodesCount() uint32 { return sb.FreeInodesCountRaw }
-
-// MountCount implements SuperBlock.MountCount.
-func (sb *SuperBlockOld) MountCount() uint16 { return sb.MountCountRaw }
-
-// MaxMountCount implements SuperBlock.MaxMountCount.
-func (sb *SuperBlockOld) MaxMountCount() uint16 { return sb.MaxMountCountRaw }
-
-// FirstDataBlock implements SuperBlock.FirstDataBlock.
-func (sb *SuperBlockOld) FirstDataBlock() uint32 { return sb.FirstDataBlockRaw }
-
-// BlockSize implements SuperBlock.BlockSize.
-func (sb *SuperBlockOld) BlockSize() uint64 { return 1 << (10 + sb.LogBlockSize) }
-
-// BlocksPerGroup implements SuperBlock.BlocksPerGroup.
-func (sb *SuperBlockOld) BlocksPerGroup() uint32 { return sb.BlocksPerGroupRaw }
-
-// ClusterSize implements SuperBlock.ClusterSize.
-func (sb *SuperBlockOld) ClusterSize() uint64 { return 1 << (10 + sb.LogClusterSize) }
-
-// ClustersPerGroup implements SuperBlock.ClustersPerGroup.
-func (sb *SuperBlockOld) ClustersPerGroup() uint32 { return sb.ClustersPerGroupRaw }
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlockOld) InodeSize() uint16 { return OldInodeSize }
-
-// InodesPerGroup implements SuperBlock.InodesPerGroup.
-func (sb *SuperBlockOld) InodesPerGroup() uint32 { return sb.InodesPerGroupRaw }
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlockOld) BgDescSize() uint16 { return 32 }
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlockOld) CompatibleFeatures() CompatFeatures { return CompatFeatures{} }
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlockOld) IncompatibleFeatures() IncompatFeatures { return IncompatFeatures{} }
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlockOld) ReadOnlyCompatibleFeatures() RoCompatFeatures { return RoCompatFeatures{} }
-
-// Magic implements SuperBlock.Magic.
-func (sb *SuperBlockOld) Magic() uint16 { return sb.MagicRaw }
-
-// Revision implements SuperBlock.Revision.
-func (sb *SuperBlockOld) Revision() SbRevision { return SbRevision(sb.RevLevel) }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go
deleted file mode 100644
index b734b6987..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestSuperBlockSize tests that the superblock structs are of the correct
-// size.
-func TestSuperBlockSize(t *testing.T) {
- var sbOld SuperBlockOld
- assertSize(t, &sbOld, 84)
- var sb32 SuperBlock32Bit
- assertSize(t, &sb32, 336)
- var sb64 SuperBlock64Bit
- assertSize(t, &sb64, 1024)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go b/pkg/sentry/fsimpl/ext/disklayout/test_utils.go
deleted file mode 100644
index a4bc08411..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-func assertSize(t *testing.T, v marshal.Marshallable, want int) {
- t.Helper()
-
- if got := v.SizeBytes(); got != want {
- t.Errorf("struct %s should be exactly %d bytes but is %d bytes", reflect.TypeOf(v).Name(), want, got)
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/ext.go b/pkg/sentry/fsimpl/ext/ext.go
deleted file mode 100644
index 80854b501..000000000
--- a/pkg/sentry/fsimpl/ext/ext.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ext implements readonly ext(2/3/4) filesystems.
-package ext
-
-import (
- "errors"
- "fmt"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// Name is the name of this filesystem.
-const Name = "ext"
-
-// FilesystemType implements vfs.FilesystemType.
-//
-// +stateify savable
-type FilesystemType struct{}
-
-// getDeviceFd returns an io.ReaderAt to the underlying device.
-// Currently there are two ways of mounting an ext(2/3/4) fs:
-// 1. Specify a mount with our internal special MountType in the OCI spec.
-// 2. Expose the device to the container and mount it from application layer.
-func getDeviceFd(source string, opts vfs.GetFilesystemOptions) (io.ReaderAt, error) {
- if opts.InternalData == nil {
- // User mount call.
- // TODO(b/134676337): Open the device specified by `source` and return that.
- panic("unimplemented")
- }
-
- // GetFilesystem call originated from within the sentry.
- devFd, ok := opts.InternalData.(int)
- if !ok {
- return nil, errors.New("internal data for ext fs must be an int containing the file descriptor to device")
- }
-
- if devFd < 0 {
- return nil, fmt.Errorf("ext device file descriptor is not valid: %d", devFd)
- }
-
- // The fd.ReadWriter returned from fd.NewReadWriter() does not take ownership
- // of the file descriptor and hence will not close it when it is garbage
- // collected.
- return fd.NewReadWriter(devFd), nil
-}
-
-// isCompatible checks if the superblock has feature sets which are compatible.
-// We only need to check the superblock incompatible feature set since we are
-// mounting readonly. We will also need to check readonly compatible feature
-// set when mounting for read/write.
-func isCompatible(sb disklayout.SuperBlock) bool {
- // Please note that what is being checked is limited based on the fact that we
- // are mounting readonly and that we are not journaling. When mounting
- // read/write or with a journal, this must be reevaluated.
- incompatFeatures := sb.IncompatibleFeatures()
- if incompatFeatures.MetaBG {
- log.Warningf("ext fs: meta block groups are not supported")
- return false
- }
- if incompatFeatures.MMP {
- log.Warningf("ext fs: multiple mount protection is not supported")
- return false
- }
- if incompatFeatures.Encrypted {
- log.Warningf("ext fs: encrypted inodes not supported")
- return false
- }
- if incompatFeatures.InlineData {
- log.Warningf("ext fs: inline files not supported")
- return false
- }
- return true
-}
-
-// Name implements vfs.FilesystemType.Name.
-func (FilesystemType) Name() string {
- return Name
-}
-
-// Release implements vfs.FilesystemType.Release.
-func (FilesystemType) Release(ctx context.Context) {}
-
-// GetFilesystem implements vfs.FilesystemType.GetFilesystem.
-func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- // TODO(b/134676337): Ensure that the user is mounting readonly. If not,
- // EACCESS should be returned according to mount(2). Filesystem independent
- // flags (like readonly) are currently not available in pkg/sentry/vfs.
-
- devMinor, err := vfsObj.GetAnonBlockDevMinor()
- if err != nil {
- return nil, nil, err
- }
-
- dev, err := getDeviceFd(source, opts)
- if err != nil {
- return nil, nil, err
- }
-
- fs := filesystem{
- dev: dev,
- inodeCache: make(map[uint32]*inode),
- devMinor: devMinor,
- }
- fs.vfsfs.Init(vfsObj, &fsType, &fs)
- fs.sb, err = readSuperBlock(dev)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- if fs.sb.Magic() != linux.EXT_SUPER_MAGIC {
- // mount(2) specifies that EINVAL should be returned if the superblock is
- // invalid.
- fs.vfsfs.DecRef(ctx)
- return nil, nil, linuxerr.EINVAL
- }
-
- // Refuse to mount if the filesystem is incompatible.
- if !isCompatible(fs.sb) {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, linuxerr.EINVAL
- }
-
- fs.bgs, err = readBlockGroups(dev, fs.sb)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- rootInode, err := fs.getOrCreateInodeLocked(disklayout.RootDirInode)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
- rootInode.incRef()
-
- return &fs.vfsfs, &newDentry(rootInode).vfsd, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/ext_test.go b/pkg/sentry/fsimpl/ext/ext_test.go
deleted file mode 100644
index db712e71f..000000000
--- a/pkg/sentry/fsimpl/ext/ext_test.go
+++ /dev/null
@@ -1,926 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "io"
- "os"
- "path"
- "sort"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- assetsDir = "pkg/sentry/fsimpl/ext/assets"
-)
-
-var (
- ext2ImagePath = path.Join(assetsDir, "tiny.ext2")
- ext3ImagePath = path.Join(assetsDir, "tiny.ext3")
- ext4ImagePath = path.Join(assetsDir, "tiny.ext4")
-)
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is non-nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(t *testing.T, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- localImagePath, err := testutil.FindFile(imagePath)
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("failed to open local image at path %s: %v", imagePath, err)
- }
-
- f, err := os.Open(localImagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("extfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, localImagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- t.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// TODO(b/134676337): Test vfs.FilesystemImpl.ReadlinkAt and
-// vfs.FilesystemImpl.StatFSAt which are not implemented in
-// vfs.VirtualFilesystem yet.
-
-// TestSeek tests vfs.FileDescriptionImpl.Seek functionality.
-func TestSeek(t *testing.T) {
- type seekTest struct {
- name string
- image string
- path string
- }
-
- tests := []seekTest{
- {
- name: "ext4 root dir seek",
- image: ext4ImagePath,
- path: "/",
- },
- {
- name: "ext3 root dir seek",
- image: ext3ImagePath,
- path: "/",
- },
- {
- name: "ext2 root dir seek",
- image: ext2ImagePath,
- path: "/",
- },
- {
- name: "ext4 reg file seek",
- image: ext4ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext3 reg file seek",
- image: ext3ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext2 reg file seek",
- image: ext2ImagePath,
- path: "/file.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- if n, err := fd.Seek(ctx, 0, linux.SEEK_SET); n != 0 || err != nil {
- t.Errorf("expected seek position 0, got %d and error %v", n, err)
- }
-
- stat, err := fd.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("fd.stat failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- // We should be able to seek beyond the end of file.
- size := int64(stat.Size)
- if n, err := fd.Seek(ctx, size, linux.SEEK_SET); n != size || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -1, linux.SEEK_SET); !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- if n, err := fd.Seek(ctx, 3, linux.SEEK_CUR); n != size+3 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+3, n, err)
- }
-
- // Make sure negative offsets work with SEEK_CUR.
- if n, err := fd.Seek(ctx, -2, linux.SEEK_CUR); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 2), linux.SEEK_CUR); !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- // Make sure SEEK_END works with regular files.
- if _, ok := fd.Impl().(*regularFileFD); ok {
- // Seek back to 0.
- if n, err := fd.Seek(ctx, -size, linux.SEEK_END); n != 0 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", 0, n, err)
- }
-
- // Seek forward beyond EOF.
- if n, err := fd.Seek(ctx, 1, linux.SEEK_END); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 1), linux.SEEK_END); !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("expected error EINVAL but got %v", err)
- }
- }
- })
- }
-}
-
-// TestStatAt tests filesystem.StatAt functionality.
-func TestStatAt(t *testing.T) {
- type statAtTest struct {
- name string
- image string
- path string
- want linux.Statx
- }
-
- tests := []statAtTest{
- {
- name: "ext4 statx small file",
- image: ext4ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext3 statx small file",
- image: ext3ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext2 statx small file",
- image: ext2ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext4 statx big file",
- image: ext4ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext3 statx big file",
- image: ext3ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext2 statx big file",
- image: ext2ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext4 statx symlink file",
- image: ext4ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext3 statx symlink file",
- image: ext3ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext2 statx symlink file",
- image: ext2ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- }
-
- // Ignore the fields that are not supported by filesystem.StatAt yet and
- // those which are likely to change as the image does.
- ignoredFields := map[string]bool{
- "Attributes": true,
- "AttributesMask": true,
- "Atime": true,
- "Blocks": true,
- "Btime": true,
- "Ctime": true,
- "DevMajor": true,
- "DevMinor": true,
- "Ino": true,
- "Mask": true,
- "Mtime": true,
- "RdevMajor": true,
- "RdevMinor": true,
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- got, err := vfsfs.StatAt(ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.StatOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.StatAt failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- _, ok := ignoredFields[p.String()]
- return ok
- }, cmp.Ignore())
- if diff := cmp.Diff(got, test.want, cmpIgnoreFields, cmpopts.IgnoreUnexported(linux.Statx{})); diff != "" {
- t.Errorf("stat mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRead tests the read functionality for vfs file descriptions.
-func TestRead(t *testing.T) {
- type readTest struct {
- name string
- image string
- absPath string
- }
-
- tests := []readTest{
- {
- name: "ext4 read small file",
- image: ext4ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext3 read small file",
- image: ext3ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext2 read small file",
- image: ext2ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext4 read big file",
- image: ext4ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext3 read big file",
- image: ext3ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext2 read big file",
- image: ext2ImagePath,
- absPath: "/bigfile.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.absPath)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- // Get a local file descriptor and compare its functionality with a vfs file
- // description for the same file.
- localFile, err := testutil.FindFile(path.Join(assetsDir, test.absPath))
- if err != nil {
- t.Fatalf("testutil.FindFile failed for %s: %v", test.absPath, err)
- }
-
- f, err := os.Open(localFile)
- if err != nil {
- t.Fatalf("os.Open failed for %s: %v", localFile, err)
- }
- defer f.Close()
-
- // Read the entire file by reading one byte repeatedly. Doing this stress
- // tests the underlying file reader implementation.
- got := make([]byte, 1)
- want := make([]byte, 1)
- for {
- n, err := f.Read(want)
- fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{})
-
- if diff := cmp.Diff(got, want); diff != "" {
- t.Errorf("file data mismatch (-want +got):\n%s", diff)
- }
-
- // Make sure there is no more file data left after getting EOF.
- if n == 0 || err == io.EOF {
- if n, _ := fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{}); n != 0 {
- t.Errorf("extra unexpected file data in file %s in image %s", test.absPath, test.image)
- }
-
- break
- }
-
- if err != nil {
- t.Fatalf("read failed: %v", err)
- }
- }
- })
- }
-}
-
-// iterDirentsCb is a simple callback which just keeps adding the dirents to an
-// internal list. Implements vfs.IterDirentsCallback.
-type iterDirentsCb struct {
- dirents []vfs.Dirent
-}
-
-// Compiles only if iterDirentCb implements vfs.IterDirentsCallback.
-var _ vfs.IterDirentsCallback = (*iterDirentsCb)(nil)
-
-// newIterDirentsCb is the iterDirent
-func newIterDirentCb() *iterDirentsCb {
- return &iterDirentsCb{dirents: make([]vfs.Dirent, 0)}
-}
-
-// Handle implements vfs.IterDirentsCallback.Handle.
-func (cb *iterDirentsCb) Handle(dirent vfs.Dirent) error {
- cb.dirents = append(cb.dirents, dirent)
- return nil
-}
-
-// TestIterDirents tests the FileDescriptionImpl.IterDirents functionality.
-func TestIterDirents(t *testing.T) {
- type iterDirentTest struct {
- name string
- image string
- path string
- want []vfs.Dirent
- }
-
- wantDirents := []vfs.Dirent{
- {
- Name: ".",
- Type: linux.DT_DIR,
- },
- {
- Name: "..",
- Type: linux.DT_DIR,
- },
- {
- Name: "lost+found",
- Type: linux.DT_DIR,
- },
- {
- Name: "file.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "bigfile.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "symlink.txt",
- Type: linux.DT_LNK,
- },
- }
- tests := []iterDirentTest{
- {
- name: "ext4 root dir iteration",
- image: ext4ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext3 root dir iteration",
- image: ext3ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext2 root dir iteration",
- image: ext2ImagePath,
- path: "/",
- want: wantDirents,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- cb := &iterDirentsCb{}
- if err = fd.IterDirents(ctx, cb); err != nil {
- t.Fatalf("dir fd.IterDirents() failed: %v", err)
- }
-
- sort.Slice(cb.dirents, func(i int, j int) bool { return cb.dirents[i].Name < cb.dirents[j].Name })
- sort.Slice(test.want, func(i int, j int) bool { return test.want[i].Name < test.want[j].Name })
-
- // Ignore the inode number and offset of dirents because those are likely to
- // change as the underlying image changes.
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- return p.String() == "Ino" || p.String() == "NextOff"
- }, cmp.Ignore())
- if diff := cmp.Diff(cb.dirents, test.want, cmpIgnoreFields); diff != "" {
- t.Errorf("dirents mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRootDir tests that the root directory inode is correctly initialized and
-// returned from setUp.
-func TestRootDir(t *testing.T) {
- type inodeProps struct {
- Mode linux.FileMode
- UID auth.KUID
- GID auth.KGID
- Size uint64
- InodeSize uint16
- Links uint16
- Flags disklayout.InodeFlags
- }
-
- type rootDirTest struct {
- name string
- image string
- wantInode inodeProps
- }
-
- tests := []rootDirTest{
- {
- name: "ext4 root dir",
- image: ext4ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- Flags: disklayout.InodeFlags{Extents: true},
- },
- },
- {
- name: "ext3 root dir",
- image: ext3ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- {
- name: "ext2 root dir",
- image: ext2ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- d, ok := vd.Dentry().Impl().(*dentry)
- if !ok {
- t.Fatalf("ext dentry of incorrect type: %T", vd.Dentry().Impl())
- }
-
- // Offload inode contents into local structs for comparison.
- gotInode := inodeProps{
- Mode: d.inode.diskInode.Mode(),
- UID: d.inode.diskInode.UID(),
- GID: d.inode.diskInode.GID(),
- Size: d.inode.diskInode.Size(),
- InodeSize: d.inode.diskInode.InodeSize(),
- Links: d.inode.diskInode.LinksCount(),
- Flags: d.inode.diskInode.Flags(),
- }
-
- if diff := cmp.Diff(gotInode, test.wantInode); diff != "" {
- t.Errorf("inode mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestFilesystemInit tests that the filesystem superblock and block group
-// descriptors are correctly read in and initialized.
-func TestFilesystemInit(t *testing.T) {
- // sb only contains the immutable properties of the superblock.
- type sb struct {
- InodesCount uint32
- BlocksCount uint64
- MaxMountCount uint16
- FirstDataBlock uint32
- BlockSize uint64
- BlocksPerGroup uint32
- ClusterSize uint64
- ClustersPerGroup uint32
- InodeSize uint16
- InodesPerGroup uint32
- BgDescSize uint16
- Magic uint16
- Revision disklayout.SbRevision
- CompatFeatures disklayout.CompatFeatures
- IncompatFeatures disklayout.IncompatFeatures
- RoCompatFeatures disklayout.RoCompatFeatures
- }
-
- // bg only contains the immutable properties of the block group descriptor.
- type bg struct {
- InodeTable uint64
- BlockBitmap uint64
- InodeBitmap uint64
- ExclusionBitmap uint64
- Flags disklayout.BGFlags
- }
-
- type fsInitTest struct {
- name string
- image string
- wantSb sb
- wantBgs []bg
- }
-
- tests := []fsInitTest{
- {
- name: "ext4 filesystem init",
- image: ext4ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x40,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- Extents: true,
- Is64Bit: true,
- FlexBg: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- HugeFile: true,
- DirNlink: true,
- ExtraIsize: true,
- MetadataCsum: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x23,
- BlockBitmap: 0x3,
- InodeBitmap: 0x13,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext3 filesystem init",
- image: ext3ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext2 filesystem init",
- image: ext2ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fs, ok := vd.Mount().Filesystem().Impl().(*filesystem)
- if !ok {
- t.Fatalf("ext filesystem of incorrect type: %T", vd.Mount().Filesystem().Impl())
- }
-
- // Offload superblock and block group descriptors contents into
- // local structs for comparison.
- totalFreeInodes := uint32(0)
- totalFreeBlocks := uint64(0)
- gotSb := sb{
- InodesCount: fs.sb.InodesCount(),
- BlocksCount: fs.sb.BlocksCount(),
- MaxMountCount: fs.sb.MaxMountCount(),
- FirstDataBlock: fs.sb.FirstDataBlock(),
- BlockSize: fs.sb.BlockSize(),
- BlocksPerGroup: fs.sb.BlocksPerGroup(),
- ClusterSize: fs.sb.ClusterSize(),
- ClustersPerGroup: fs.sb.ClustersPerGroup(),
- InodeSize: fs.sb.InodeSize(),
- InodesPerGroup: fs.sb.InodesPerGroup(),
- BgDescSize: fs.sb.BgDescSize(),
- Magic: fs.sb.Magic(),
- Revision: fs.sb.Revision(),
- CompatFeatures: fs.sb.CompatibleFeatures(),
- IncompatFeatures: fs.sb.IncompatibleFeatures(),
- RoCompatFeatures: fs.sb.ReadOnlyCompatibleFeatures(),
- }
- gotNumBgs := len(fs.bgs)
- gotBgs := make([]bg, gotNumBgs)
- for i := 0; i < gotNumBgs; i++ {
- gotBgs[i].InodeTable = fs.bgs[i].InodeTable()
- gotBgs[i].BlockBitmap = fs.bgs[i].BlockBitmap()
- gotBgs[i].InodeBitmap = fs.bgs[i].InodeBitmap()
- gotBgs[i].ExclusionBitmap = fs.bgs[i].ExclusionBitmap()
- gotBgs[i].Flags = fs.bgs[i].Flags()
-
- totalFreeInodes += fs.bgs[i].FreeInodesCount()
- totalFreeBlocks += uint64(fs.bgs[i].FreeBlocksCount())
- }
-
- if diff := cmp.Diff(gotSb, test.wantSb); diff != "" {
- t.Errorf("superblock mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(gotBgs, test.wantBgs); diff != "" {
- t.Errorf("block group descriptors mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeInodes, fs.sb.FreeInodesCount()); diff != "" {
- t.Errorf("total free inodes mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeBlocks, fs.sb.FreeBlocksCount()); diff != "" {
- t.Errorf("total free blocks mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_file.go b/pkg/sentry/fsimpl/ext/extent_file.go
deleted file mode 100644
index f449bc8bd..000000000
--- a/pkg/sentry/fsimpl/ext/extent_file.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "sort"
-
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// extentFile is a type of regular file which uses extents to store file data.
-//
-// +stateify savable
-type extentFile struct {
- regFile regularFile
-
- // root is the root extent node. This lives in the 60 byte diskInode.Data().
- // Immutable.
- root disklayout.ExtentNode
-}
-
-// Compiles only if extentFile implements io.ReaderAt.
-var _ io.ReaderAt = (*extentFile)(nil)
-
-// newExtentFile is the extent file constructor. It reads the entire extent
-// tree into memory.
-// TODO(b/134676337): Build extent tree on demand to reduce memory usage.
-func newExtentFile(args inodeArgs) (*extentFile, error) {
- file := &extentFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
- err := file.buildExtTree()
- if err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// buildExtTree builds the extent tree by reading it from disk by doing
-// running a simple DFS. It first reads the root node from the inode struct in
-// memory. Then it recursively builds the rest of the tree by reading it off
-// disk.
-//
-// Precondition: inode flag InExtents must be set.
-func (f *extentFile) buildExtTree() error {
- rootNodeData := f.regFile.inode.diskInode.Data()
-
- f.root.Header.UnmarshalBytes(rootNodeData[:disklayout.ExtentHeaderSize])
-
- // Root node can not have more than 4 entries: 60 bytes = 1 header + 4 entries.
- if f.root.Header.NumEntries > 4 {
- // read(2) specifies that EINVAL should be returned if the file is unsuitable
- // for reading.
- return linuxerr.EINVAL
- }
-
- f.root.Entries = make([]disklayout.ExtentEntryPair, f.root.Header.NumEntries)
- for i, off := uint16(0), disklayout.ExtentEntrySize; i < f.root.Header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if f.root.Header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
- curEntry.UnmarshalBytes(rootNodeData[off : off+disklayout.ExtentEntrySize])
- f.root.Entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if f.root.Header.Height > 0 {
- for i := uint16(0); i < f.root.Header.NumEntries; i++ {
- var err error
- if f.root.Entries[i].Node, err = f.buildExtTreeFromDisk(f.root.Entries[i].Entry); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// buildExtTreeFromDisk reads the extent tree nodes from disk and recursively
-// builds the tree. Performs a simple DFS. It returns the ExtentNode pointed to
-// by the ExtentEntry.
-func (f *extentFile) buildExtTreeFromDisk(entry disklayout.ExtentEntry) (*disklayout.ExtentNode, error) {
- var header disklayout.ExtentHeader
- off := entry.PhysicalBlock() * f.regFile.inode.blkSize
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), &header)
- if err != nil {
- return nil, err
- }
-
- entries := make([]disklayout.ExtentEntryPair, header.NumEntries)
- for i, off := uint16(0), off+disklayout.ExtentEntrySize; i < header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
-
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), curEntry)
- if err != nil {
- return nil, err
- }
- entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if header.Height > 0 {
- for i := uint16(0); i < header.NumEntries; i++ {
- var err error
- entries[i].Node, err = f.buildExtTreeFromDisk(entries[i].Entry)
- if err != nil {
- return nil, err
- }
- }
- }
-
- return &disklayout.ExtentNode{header, entries}, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *extentFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, linuxerr.EINVAL
- }
-
- if uint64(off) >= f.regFile.inode.diskInode.Size() {
- return 0, io.EOF
- }
-
- n, err := f.read(&f.root, uint64(off), dst)
- if n < len(dst) && err == nil {
- err = io.EOF
- }
- return n, err
-}
-
-// read is the recursive step of extentFile.ReadAt which traverses the extent
-// tree from the node passed and reads file data.
-func (f *extentFile) read(node *disklayout.ExtentNode, off uint64, dst []byte) (int, error) {
- // Perform a binary search for the node covering bytes starting at r.fileOff.
- // A highly fragmented filesystem can have upto 340 entries and so linear
- // search should be avoided. Finds the first entry which does not cover the
- // file block we want and subtracts 1 to get the desired index.
- fileBlk := uint32(off / f.regFile.inode.blkSize)
- n := len(node.Entries)
- found := sort.Search(n, func(i int) bool {
- return node.Entries[i].Entry.FileBlock() > fileBlk
- }) - 1
-
- // We should be in this recursive step only if the data we want exists under
- // the current node.
- if found < 0 {
- panic("searching for a file block in an extent entry which does not cover it")
- }
-
- read := 0
- toRead := len(dst)
- var curR int
- var err error
- for i := found; i < n && read < toRead; i++ {
- if node.Header.Height == 0 {
- curR, err = f.readFromExtent(node.Entries[i].Entry.(*disklayout.Extent), off, dst[read:])
- } else {
- curR, err = f.read(node.Entries[i].Node, off, dst[read:])
- }
-
- read += curR
- off += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- return read, nil
-}
-
-// readFromExtent reads file data from the extent. It takes advantage of the
-// sequential nature of extents and reads file data from multiple blocks in one
-// call.
-//
-// A non-nil error indicates that this is a partial read and there is probably
-// more to read from this extent. The caller should propagate the error upward
-// and not move to the next extent in the tree.
-//
-// A subsequent call to extentReader.Read should continue reading from where we
-// left off as expected.
-func (f *extentFile) readFromExtent(ex *disklayout.Extent, off uint64, dst []byte) (int, error) {
- curFileBlk := uint32(off / f.regFile.inode.blkSize)
- exFirstFileBlk := ex.FileBlock()
- exLastFileBlk := exFirstFileBlk + uint32(ex.Length) // This is exclusive.
-
- // We should be in this recursive step only if the data we want exists under
- // the current extent.
- if curFileBlk < exFirstFileBlk || exLastFileBlk <= curFileBlk {
- panic("searching for a file block in an extent which does not cover it")
- }
-
- curPhyBlk := uint64(curFileBlk-exFirstFileBlk) + ex.PhysicalBlock()
- readStart := curPhyBlk*f.regFile.inode.blkSize + (off % f.regFile.inode.blkSize)
-
- endPhyBlk := ex.PhysicalBlock() + uint64(ex.Length)
- extentEnd := endPhyBlk * f.regFile.inode.blkSize // This is exclusive.
-
- toRead := int(extentEnd - readStart)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], int64(readStart))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_test.go b/pkg/sentry/fsimpl/ext/extent_test.go
deleted file mode 100644
index 985f76ac0..000000000
--- a/pkg/sentry/fsimpl/ext/extent_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-const (
- // mockExtentBlkSize is the mock block size used for testing.
- // No block has more than 1 header + 4 entries.
- mockExtentBlkSize = uint64(64)
-)
-
-// The tree described below looks like:
-//
-// 0.{Head}[Idx][Idx]
-// / \
-// / \
-// 1.{Head}[Ext][Ext] 2.{Head}[Idx]
-// / | \
-// [Phy] [Phy, Phy] 3.{Head}[Ext]
-// |
-// [Phy, Phy, Phy]
-//
-// Legend:
-// - Head = ExtentHeader
-// - Idx = ExtentIdx
-// - Ext = Extent
-// - Phy = Physical Block
-//
-// Please note that ext4 might not construct extent trees looking like this.
-// This is purely for testing the tree traversal logic.
-var (
- node3 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 3,
- Length: 3,
- StartBlockLo: 6,
- },
- Node: nil,
- },
- },
- }
-
- node2 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 1,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 2,
- },
- Node: node3,
- },
- },
- }
-
- node1 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 0,
- Length: 1,
- StartBlockLo: 3,
- },
- Node: nil,
- },
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 1,
- Length: 2,
- StartBlockLo: 4,
- },
- Node: nil,
- },
- },
- }
-
- node0 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 2,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 0,
- ChildBlockLo: 0,
- },
- Node: node1,
- },
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 1,
- },
- Node: node2,
- },
- },
- }
-)
-
-// TestExtentReader stress tests extentReader functionality. It performs random
-// length reads from all possible positions in the extent tree.
-func TestExtentReader(t *testing.T) {
- mockExtentFile, want := extentTreeSetUp(t, node0)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockExtentFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// TestBuildExtentTree tests the extent tree building logic.
-func TestBuildExtentTree(t *testing.T) {
- mockExtentFile, _ := extentTreeSetUp(t, node0)
-
- opt := cmpopts.IgnoreUnexported(disklayout.ExtentIdx{}, disklayout.ExtentHeader{})
- if diff := cmp.Diff(&mockExtentFile.root, node0, opt); diff != "" {
- t.Errorf("extent tree mismatch (-want +got):\n%s", diff)
- }
-}
-
-// extentTreeSetUp writes the passed extent tree to a mock disk as an extent
-// tree. It also constucts a mock extent file with the same tree built in it.
-// It also writes random data file data and returns it.
-func extentTreeSetUp(t *testing.T, root *disklayout.ExtentNode) (*extentFile, []byte) {
- t.Helper()
-
- mockDisk := make([]byte, mockExtentBlkSize*10)
- mockExtentFile := &extentFile{}
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: uint32(mockExtentBlkSize) * getNumPhyBlks(root),
- },
- },
- blkSize: mockExtentBlkSize,
- }
- mockExtentFile.regFile.inode.init(args, &mockExtentFile.regFile)
-
- fileData := writeTree(&mockExtentFile.regFile.inode, mockDisk, node0, mockExtentBlkSize)
-
- if err := mockExtentFile.buildExtTree(); err != nil {
- t.Fatalf("inode.buildExtTree failed: %v", err)
- }
- return mockExtentFile, fileData
-}
-
-// writeTree writes the tree represented by `root` to the inode and disk. It
-// also writes random file data on disk.
-func writeTree(in *inode, disk []byte, root *disklayout.ExtentNode, mockExtentBlkSize uint64) []byte {
- rootData := in.diskInode.Data()
- root.Header.MarshalBytes(rootData)
- off := root.Header.SizeBytes()
- for _, ep := range root.Entries {
- ep.Entry.MarshalBytes(rootData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range root.Entries {
- if root.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeTreeToDisk is the recursive step for writeTree which writes the tree
-// on the disk only. Also writes random file data on disk.
-func writeTreeToDisk(disk []byte, curNode disklayout.ExtentEntryPair) []byte {
- nodeData := disk[curNode.Entry.PhysicalBlock()*mockExtentBlkSize:]
- curNode.Node.Header.MarshalBytes(nodeData)
- off := curNode.Node.Header.SizeBytes()
- for _, ep := range curNode.Node.Entries {
- ep.Entry.MarshalBytes(nodeData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range curNode.Node.Entries {
- if curNode.Node.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeFileDataToExtent writes random bytes to the blocks on disk that the
-// passed extent points to.
-func writeFileDataToExtent(disk []byte, ex *disklayout.Extent) []byte {
- phyExStartBlk := ex.PhysicalBlock()
- phyExStartOff := phyExStartBlk * mockExtentBlkSize
- phyExEndOff := phyExStartOff + uint64(ex.Length)*mockExtentBlkSize
- rand.Read(disk[phyExStartOff:phyExEndOff])
- return disk[phyExStartOff:phyExEndOff]
-}
-
-// getNumPhyBlks returns the number of physical blocks covered under the node.
-func getNumPhyBlks(node *disklayout.ExtentNode) uint32 {
- var res uint32
- for _, ep := range node.Entries {
- if node.Header.Height == 0 {
- res += uint32(ep.Entry.(*disklayout.Extent).Length)
- } else {
- res += getNumPhyBlks(ep.Node)
- }
- }
- return res
-}
diff --git a/pkg/sentry/fsimpl/ext/file_description.go b/pkg/sentry/fsimpl/ext/file_description.go
deleted file mode 100644
index 2e9033c1d..000000000
--- a/pkg/sentry/fsimpl/ext/file_description.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// fileDescription is embedded by ext implementations of
-// vfs.FileDescriptionImpl.
-type fileDescription struct {
- vfsfd vfs.FileDescription
- vfs.FileDescriptionDefaultImpl
- vfs.LockFD
-}
-
-func (fd *fileDescription) filesystem() *filesystem {
- return fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)
-}
-
-func (fd *fileDescription) inode() *inode {
- return fd.vfsfd.Dentry().Impl().(*dentry).inode
-}
-
-// Stat implements vfs.FileDescriptionImpl.Stat.
-func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
- var stat linux.Statx
- fd.inode().statTo(&stat)
- return stat, nil
-}
-
-// SetStat implements vfs.FileDescriptionImpl.SetStat.
-func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
- if opts.Stat.Mask == 0 {
- return nil
- }
- return linuxerr.EPERM
-}
-
-// SetStat implements vfs.FileDescriptionImpl.StatFS.
-func (fd *fileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {
- var stat linux.Statfs
- fd.filesystem().statTo(&stat)
- return stat, nil
-}
-
-// Sync implements vfs.FileDescriptionImpl.Sync.
-func (fd *fileDescription) Sync(ctx context.Context) error {
- return nil
-}
diff --git a/pkg/sentry/fsimpl/ext/filesystem.go b/pkg/sentry/fsimpl/ext/filesystem.go
deleted file mode 100644
index bcc7588da..000000000
--- a/pkg/sentry/fsimpl/ext/filesystem.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "errors"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-var (
- // errResolveDirent indicates that the vfs.ResolvingPath.Component() does
- // not exist on the dentry tree but does exist on disk. So it has to be read in
- // using the in-memory dirent and added to the dentry tree. Usually indicates
- // the need to lock filesystem.mu for writing.
- errResolveDirent = errors.New("resolve path component using dirent")
-)
-
-// filesystem implements vfs.FilesystemImpl.
-//
-// +stateify savable
-type filesystem struct {
- vfsfs vfs.Filesystem
-
- // mu serializes changes to the Dentry tree.
- mu sync.RWMutex `state:"nosave"`
-
- // dev represents the underlying fs device. It does not require protection
- // because io.ReaderAt permits concurrent read calls to it. It translates to
- // the pread syscall which passes on the read request directly to the device
- // driver. Device drivers are intelligent in serving multiple concurrent read
- // requests in the optimal order (taking locality into consideration).
- dev io.ReaderAt
-
- // inodeCache maps absolute inode numbers to the corresponding Inode struct.
- // Inodes should be removed from this once their reference count hits 0.
- //
- // Protected by mu because most additions (see IterDirents) and all removals
- // from this corresponds to a change in the dentry tree.
- inodeCache map[uint32]*inode
-
- // sb represents the filesystem superblock. Immutable after initialization.
- sb disklayout.SuperBlock
-
- // bgs represents all the block group descriptors for the filesystem.
- // Immutable after initialization.
- bgs []disklayout.BlockGroup
-
- // devMinor is this filesystem's device minor number. Immutable after
- // initialization.
- devMinor uint32
-}
-
-// Compiles only if filesystem implements vfs.FilesystemImpl.
-var _ vfs.FilesystemImpl = (*filesystem)(nil)
-
-// stepLocked resolves rp.Component() in parent directory vfsd. The write
-// parameter passed tells if the caller has acquired filesystem.mu for writing
-// or not. If set to true, an existing inode on disk can be added to the dentry
-// tree if not present already.
-//
-// stepLocked is loosely analogous to fs/namei.c:walk_component().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-// * inode == vfsd.Impl().(*Dentry).inode.
-func stepLocked(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, inode *inode, write bool) (*vfs.Dentry, *inode, error) {
- if !inode.isDir() {
- return nil, nil, linuxerr.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, nil, err
- }
-
- for {
- name := rp.Component()
- if name == "." {
- rp.Advance()
- return vfsd, inode, nil
- }
- d := vfsd.Impl().(*dentry)
- if name == ".." {
- isRoot, err := rp.CheckRoot(ctx, vfsd)
- if err != nil {
- return nil, nil, err
- }
- if isRoot || d.parent == nil {
- rp.Advance()
- return vfsd, inode, nil
- }
- if err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil {
- return nil, nil, err
- }
- rp.Advance()
- return &d.parent.vfsd, d.parent.inode, nil
- }
-
- dir := inode.impl.(*directory)
- child, ok := dir.childCache[name]
- if !ok {
- // We may need to instantiate a new dentry for this child.
- childDirent, ok := dir.childMap[name]
- if !ok {
- // The underlying inode does not exist on disk.
- return nil, nil, syserror.ENOENT
- }
-
- if !write {
- // filesystem.mu must be held for writing to add to the dentry tree.
- return nil, nil, errResolveDirent
- }
-
- // Create and add the component's dirent to the dentry tree.
- fs := rp.Mount().Filesystem().Impl().(*filesystem)
- childInode, err := fs.getOrCreateInodeLocked(childDirent.diskDirent.Inode())
- if err != nil {
- return nil, nil, err
- }
- // incRef because this is being added to the dentry tree.
- childInode.incRef()
- child = newDentry(childInode)
- child.parent = d
- child.name = name
- dir.childCache[name] = child
- }
- if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
- return nil, nil, err
- }
- if child.inode.isSymlink() && rp.ShouldFollowSymlink() {
- if err := rp.HandleSymlink(child.inode.impl.(*symlink).target); err != nil {
- return nil, nil, err
- }
- continue
- }
- rp.Advance()
- return &child.vfsd, child.inode, nil
- }
-}
-
-// walkLocked resolves rp to an existing file. The write parameter
-// passed tells if the caller has acquired filesystem.mu for writing or not.
-// If set to true, additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkLocked is loosely analogous to Linux's fs/namei.c:path_lookupat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-func walkLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Done() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if rp.MustBeDir() && !inode.isDir() {
- return nil, nil, linuxerr.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walkParentLocked resolves all but the last path component of rp to an
-// existing directory. It does not check that the returned directory is
-// searchable by the provider of rp. The write parameter passed tells if the
-// caller has acquired filesystem.mu for writing or not. If set to true,
-// additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkParentLocked is loosely analogous to Linux's fs/namei.c:path_parentat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-func walkParentLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Final() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if !inode.isDir() {
- return nil, nil, linuxerr.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walk resolves rp to an existing file. If parent is set to true, it resolves
-// the rp till the parent of the last component which should be an existing
-// directory. If parent is false then resolves rp entirely. Attemps to resolve
-// the path as far as it can with a read lock and upgrades the lock if needed.
-func (fs *filesystem) walk(ctx context.Context, rp *vfs.ResolvingPath, parent bool) (*vfs.Dentry, *inode, error) {
- var (
- vfsd *vfs.Dentry
- inode *inode
- err error
- )
-
- // Try walking with the hopes that all dentries have already been pulled out
- // of disk. This reduces congestion (allows concurrent walks).
- fs.mu.RLock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, false)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, false)
- }
- fs.mu.RUnlock()
-
- if err == errResolveDirent {
- // Upgrade lock and continue walking. Lock upgrading in the middle of the
- // walk is fine as this is a read only filesystem.
- fs.mu.Lock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, true)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, true)
- }
- fs.mu.Unlock()
- }
-
- return vfsd, inode, err
-}
-
-// getOrCreateInodeLocked gets the inode corresponding to the inode number passed in.
-// It creates a new one with the given inode number if one does not exist.
-// The caller must increment the ref count if adding this to the dentry tree.
-//
-// Precondition: must be holding fs.mu for writing.
-func (fs *filesystem) getOrCreateInodeLocked(inodeNum uint32) (*inode, error) {
- if in, ok := fs.inodeCache[inodeNum]; ok {
- return in, nil
- }
-
- in, err := newInode(fs, inodeNum)
- if err != nil {
- return nil, err
- }
-
- fs.inodeCache[inodeNum] = in
- return in, nil
-}
-
-// statTo writes the statfs fields to the output parameter.
-func (fs *filesystem) statTo(stat *linux.Statfs) {
- stat.Type = uint64(fs.sb.Magic())
- stat.BlockSize = int64(fs.sb.BlockSize())
- stat.Blocks = fs.sb.BlocksCount()
- stat.BlocksFree = fs.sb.FreeBlocksCount()
- stat.BlocksAvailable = fs.sb.FreeBlocksCount()
- stat.Files = uint64(fs.sb.InodesCount())
- stat.FilesFree = uint64(fs.sb.FreeInodesCount())
- stat.NameLength = disklayout.MaxFileName
- stat.FragmentSize = int64(fs.sb.BlockSize())
- // TODO(b/134676337): Set Statfs.Flags and Statfs.FSID.
-}
-
-// AccessAt implements vfs.Filesystem.Impl.AccessAt.
-func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return inode.checkPermissions(rp.Credentials(), ats)
-}
-
-// GetDentryAt implements vfs.FilesystemImpl.GetDentryAt.
-func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- if opts.CheckSearchable {
- if !inode.isDir() {
- return nil, linuxerr.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
- }
-
- inode.incRef()
- return vfsd, nil
-}
-
-// GetParentDentryAt implements vfs.FilesystemImpl.GetParentDentryAt.
-func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, true)
- if err != nil {
- return nil, err
- }
- inode.incRef()
- return vfsd, nil
-}
-
-// OpenAt implements vfs.FilesystemImpl.OpenAt.
-func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- // EROFS is returned if write access is needed.
- if vfs.MayWriteFileWithOpenFlags(opts.Flags) || opts.Flags&(linux.O_CREAT|linux.O_EXCL|linux.O_TMPFILE) != 0 {
- return nil, linuxerr.EROFS
- }
- return inode.open(rp, vfsd, &opts)
-}
-
-// ReadlinkAt implements vfs.FilesystemImpl.ReadlinkAt.
-func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- symlink, ok := inode.impl.(*symlink)
- if !ok {
- return "", linuxerr.EINVAL
- }
- return symlink.target, nil
-}
-
-// StatAt implements vfs.FilesystemImpl.StatAt.
-func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return linux.Statx{}, err
- }
- var stat linux.Statx
- inode.statTo(&stat)
- return stat, nil
-}
-
-// StatFSAt implements vfs.FilesystemImpl.StatFSAt.
-func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linux.Statfs, error) {
- if _, _, err := fs.walk(ctx, rp, false); err != nil {
- return linux.Statfs{}, err
- }
-
- var stat linux.Statfs
- fs.statTo(&stat)
- return stat, nil
-}
-
-// Release implements vfs.FilesystemImpl.Release.
-func (fs *filesystem) Release(ctx context.Context) {
- fs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
-}
-
-// Sync implements vfs.FilesystemImpl.Sync.
-func (fs *filesystem) Sync(ctx context.Context) error {
- // This is a readonly filesystem for now.
- return nil
-}
-
-// The vfs.FilesystemImpl functions below return EROFS because their respective
-// man pages say that EROFS must be returned if the path resolves to a file on
-// this read-only filesystem.
-
-// LinkAt implements vfs.FilesystemImpl.LinkAt.
-func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
- if rp.Done() {
- return linuxerr.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
-func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
- if rp.Done() {
- return linuxerr.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// MknodAt implements vfs.FilesystemImpl.MknodAt.
-func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
- if rp.Done() {
- return linuxerr.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// RenameAt implements vfs.FilesystemImpl.RenameAt.
-func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error {
- if rp.Done() {
- return syserror.ENOENT
- }
-
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// RmdirAt implements vfs.FilesystemImpl.RmdirAt.
-func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if !inode.isDir() {
- return linuxerr.ENOTDIR
- }
-
- return linuxerr.EROFS
-}
-
-// SetStatAt implements vfs.FilesystemImpl.SetStatAt.
-func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
-func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
- if rp.Done() {
- return linuxerr.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return linuxerr.EROFS
-}
-
-// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.
-func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if inode.isDir() {
- return syserror.EISDIR
- }
-
- return linuxerr.EROFS
-}
-
-// BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt.
-func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {
- return nil, err
- }
-
- // TODO(b/134676337): Support sockets.
- return nil, linuxerr.ECONNREFUSED
-}
-
-// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
-func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- return nil, linuxerr.ENOTSUP
-}
-
-// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.
-func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- return "", linuxerr.ENOTSUP
-}
-
-// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
-func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return linuxerr.ENOTSUP
-}
-
-// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
-func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return linuxerr.ENOTSUP
-}
-
-// PrependPath implements vfs.FilesystemImpl.PrependPath.
-func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDentry, b *fspath.Builder) error {
- fs.mu.RLock()
- defer fs.mu.RUnlock()
- return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
-}
-
-// MountOptions implements vfs.FilesystemImpl.MountOptions.
-func (fs *filesystem) MountOptions() string {
- return ""
-}
diff --git a/pkg/sentry/fsimpl/ext/inode.go b/pkg/sentry/fsimpl/ext/inode.go
deleted file mode 100644
index 46658f855..000000000
--- a/pkg/sentry/fsimpl/ext/inode.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// inode represents an ext inode.
-//
-// inode uses the same inheritance pattern that pkg/sentry/vfs structures use.
-// This has been done to increase memory locality.
-//
-// Implementations:
-// inode --
-// |-- dir
-// |-- symlink
-// |-- regular--
-// |-- extent file
-// |-- block map file
-//
-// +stateify savable
-type inode struct {
- // refs is a reference count. refs is accessed using atomic memory operations.
- refs int64
-
- // fs is the containing filesystem.
- fs *filesystem
-
- // inodeNum is the inode number of this inode on disk. This is used to
- // identify inodes within the ext filesystem.
- inodeNum uint32
-
- // blkSize is the fs data block size. Same as filesystem.sb.BlockSize().
- blkSize uint64
-
- // diskInode gives us access to the inode struct on disk. Immutable.
- diskInode disklayout.Inode
-
- locks vfs.FileLocks
-
- // This is immutable. The first field of the implementations must have inode
- // as the first field to ensure temporality.
- impl interface{}
-}
-
-// incRef increments the inode ref count.
-func (in *inode) incRef() {
- atomic.AddInt64(&in.refs, 1)
-}
-
-// tryIncRef tries to increment the ref count. Returns true if successful.
-func (in *inode) tryIncRef() bool {
- for {
- refs := atomic.LoadInt64(&in.refs)
- if refs == 0 {
- return false
- }
- if atomic.CompareAndSwapInt64(&in.refs, refs, refs+1) {
- return true
- }
- }
-}
-
-// decRef decrements the inode ref count and releases the inode resources if
-// the ref count hits 0.
-//
-// Precondition: Must have locked filesystem.mu.
-func (in *inode) decRef() {
- if refs := atomic.AddInt64(&in.refs, -1); refs == 0 {
- delete(in.fs.inodeCache, in.inodeNum)
- } else if refs < 0 {
- panic("ext.inode.decRef() called without holding a reference")
- }
-}
-
-// newInode is the inode constructor. Reads the inode off disk. Identifies
-// inodes based on the absolute inode number on disk.
-func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {
- if inodeNum == 0 {
- panic("inode number 0 on ext filesystems is not possible")
- }
-
- inodeRecordSize := fs.sb.InodeSize()
- var diskInode disklayout.Inode
- if inodeRecordSize == disklayout.OldInodeSize {
- diskInode = &disklayout.InodeOld{}
- } else {
- diskInode = &disklayout.InodeNew{}
- }
-
- // Calculate where the inode is actually placed.
- inodesPerGrp := fs.sb.InodesPerGroup()
- blkSize := fs.sb.BlockSize()
- inodeTableOff := fs.bgs[getBGNum(inodeNum, inodesPerGrp)].InodeTable() * blkSize
- inodeOff := inodeTableOff + uint64(uint32(inodeRecordSize)*getBGOff(inodeNum, inodesPerGrp))
-
- if err := readFromDisk(fs.dev, int64(inodeOff), diskInode); err != nil {
- return nil, err
- }
-
- // Build the inode based on its type.
- args := inodeArgs{
- fs: fs,
- inodeNum: inodeNum,
- blkSize: blkSize,
- diskInode: diskInode,
- }
-
- switch diskInode.Mode().FileType() {
- case linux.ModeSymlink:
- f, err := newSymlink(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeRegular:
- f, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeDirectory:
- f, err := newDirectory(args, fs.sb.IncompatibleFeatures().DirentFileType)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- default:
- // TODO(b/134676337): Return appropriate errors for sockets, pipes and devices.
- return nil, linuxerr.EINVAL
- }
-}
-
-type inodeArgs struct {
- fs *filesystem
- inodeNum uint32
- blkSize uint64
- diskInode disklayout.Inode
-}
-
-func (in *inode) init(args inodeArgs, impl interface{}) {
- in.fs = args.fs
- in.inodeNum = args.inodeNum
- in.blkSize = args.blkSize
- in.diskInode = args.diskInode
- in.impl = impl
-}
-
-// open creates and returns a file description for the dentry passed in.
-func (in *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
- ats := vfs.AccessTypesForOpenFlags(opts)
- if err := in.checkPermissions(rp.Credentials(), ats); err != nil {
- return nil, err
- }
- mnt := rp.Mount()
- switch in.impl.(type) {
- case *regularFile:
- var fd regularFileFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *directory:
- // Can't open directories writably. This check is not necessary for a read
- // only filesystem but will be required when write is implemented.
- if ats&vfs.MayWrite != 0 {
- return nil, syserror.EISDIR
- }
- var fd directoryFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *symlink:
- if opts.Flags&linux.O_PATH == 0 {
- // Can't open symlinks without O_PATH.
- return nil, linuxerr.ELOOP
- }
- var fd symlinkFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- default:
- panic(fmt.Sprintf("unknown inode type: %T", in.impl))
- }
-}
-
-func (in *inode) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {
- return vfs.GenericCheckPermissions(creds, ats, in.diskInode.Mode(), in.diskInode.UID(), in.diskInode.GID())
-}
-
-// statTo writes the statx fields to the output parameter.
-func (in *inode) statTo(stat *linux.Statx) {
- stat.Mask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK |
- linux.STATX_UID | linux.STATX_GID | linux.STATX_INO | linux.STATX_SIZE |
- linux.STATX_ATIME | linux.STATX_CTIME | linux.STATX_MTIME
- stat.Blksize = uint32(in.blkSize)
- stat.Mode = uint16(in.diskInode.Mode())
- stat.Nlink = uint32(in.diskInode.LinksCount())
- stat.UID = uint32(in.diskInode.UID())
- stat.GID = uint32(in.diskInode.GID())
- stat.Ino = uint64(in.inodeNum)
- stat.Size = in.diskInode.Size()
- stat.Atime = in.diskInode.AccessTime().StatxTimestamp()
- stat.Ctime = in.diskInode.ChangeTime().StatxTimestamp()
- stat.Mtime = in.diskInode.ModificationTime().StatxTimestamp()
- stat.DevMajor = linux.UNNAMED_MAJOR
- stat.DevMinor = in.fs.devMinor
- // TODO(b/134676337): Set stat.Blocks which is the number of 512 byte blocks
- // (including metadata blocks) required to represent this file.
-}
-
-// getBGNum returns the block group number that a given inode belongs to.
-func getBGNum(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) / inodesPerGrp
-}
-
-// getBGOff returns the offset at which the given inode lives in the block
-// group's inode table, i.e. the index of the inode in the inode table.
-func getBGOff(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) % inodesPerGrp
-}
diff --git a/pkg/sentry/fsimpl/ext/regular_file.go b/pkg/sentry/fsimpl/ext/regular_file.go
deleted file mode 100644
index 6613f0e1d..000000000
--- a/pkg/sentry/fsimpl/ext/regular_file.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/safemem"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// regularFile represents a regular file's inode. This too follows the
-// inheritance pattern prevelant in the vfs layer described in
-// pkg/sentry/vfs/README.md.
-//
-// +stateify savable
-type regularFile struct {
- inode inode
-
- // This is immutable. The first field of fileReader implementations must be
- // regularFile to ensure temporality.
- // io.ReaderAt is more strict than io.Reader in the sense that a partial read
- // is always accompanied by an error. If a read spans past the end of file, a
- // partial read (within file range) is done and io.EOF is returned.
- impl io.ReaderAt
-}
-
-// newRegularFile is the regularFile constructor. It figures out what kind of
-// file this is and initializes the fileReader.
-func newRegularFile(args inodeArgs) (*regularFile, error) {
- if args.diskInode.Flags().Extents {
- file, err := newExtentFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
- }
-
- file, err := newBlockMapFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
-}
-
-func (in *inode) isRegular() bool {
- _, ok := in.impl.(*regularFile)
- return ok
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type regularFileFD struct {
- fileDescription
- vfs.LockFD
-
- // off is the file offset. off is accessed using atomic memory operations.
- off int64
-
- // offMu serializes operations that may mutate off.
- offMu sync.Mutex `state:"nosave"`
-}
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *regularFileFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- safeReader := safemem.FromIOReaderAt{
- ReaderAt: fd.inode().impl.(*regularFile).impl,
- Offset: offset,
- }
-
- // Copies data from disk directly into usermem without any intermediate
- // allocations (if dst is converted into BlockSeq such that it does not need
- // safe copying).
- return dst.CopyOutFrom(ctx, safeReader)
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *regularFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- n, err := fd.PRead(ctx, dst, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- // write(2) specifies that EBADF must be returned if the fd is not open for
- // writing.
- return 0, linuxerr.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- n, err := fd.PWrite(ctx, src, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *regularFileFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return linuxerr.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- fd.offMu.Lock()
- defer fd.offMu.Unlock()
- switch whence {
- case linux.SEEK_SET:
- // Use offset as specified.
- case linux.SEEK_CUR:
- offset += fd.off
- case linux.SEEK_END:
- offset += int64(fd.inode().diskInode.Size())
- default:
- return 0, linuxerr.EINVAL
- }
- if offset < 0 {
- return 0, linuxerr.EINVAL
- }
- fd.off = offset
- return offset, nil
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- // TODO(b/134676337): Implement mmap(2).
- return linuxerr.ENODEV
-}
diff --git a/pkg/sentry/fsimpl/ext/symlink.go b/pkg/sentry/fsimpl/ext/symlink.go
deleted file mode 100644
index 385651dc3..000000000
--- a/pkg/sentry/fsimpl/ext/symlink.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// symlink represents a symlink inode.
-//
-// +stateify savable
-type symlink struct {
- inode inode
- target string // immutable
-}
-
-// newSymlink is the symlink constructor. It reads out the symlink target from
-// the inode (however it might have been stored).
-func newSymlink(args inodeArgs) (*symlink, error) {
- var link []byte
-
- // If the symlink target is lesser than 60 bytes, its stores in inode.Data().
- // Otherwise either extents or block maps will be used to store the link.
- size := args.diskInode.Size()
- if size < 60 {
- link = args.diskInode.Data()[:size]
- } else {
- // Create a regular file out of this inode and read out the target.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- link = make([]byte, size)
- if n, err := regFile.impl.ReadAt(link, 0); uint64(n) < size {
- return nil, err
- }
- }
-
- file := &symlink{target: string(link)}
- file.inode.init(args, file)
- return file, nil
-}
-
-func (in *inode) isSymlink() bool {
- _, ok := in.impl.(*symlink)
- return ok
-}
-
-// symlinkFD represents a symlink file description and implements
-// vfs.FileDescriptionImpl. which may only be used if open options contains
-// O_PATH. For this reason most of the functions return EBADF.
-//
-// +stateify savable
-type symlinkFD struct {
- fileDescription
- vfs.NoLockFD
-}
-
-// Compiles only if symlinkFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*symlinkFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *symlinkFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *symlinkFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, linuxerr.EBADF
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *symlinkFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- return 0, linuxerr.EBADF
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *symlinkFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, linuxerr.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *symlinkFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, linuxerr.EBADF
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *symlinkFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return linuxerr.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *symlinkFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- return 0, linuxerr.EBADF
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *symlinkFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- return linuxerr.EBADF
-}
diff --git a/pkg/sentry/fsimpl/ext/utils.go b/pkg/sentry/fsimpl/ext/utils.go
deleted file mode 100644
index 58ef7b9b8..000000000
--- a/pkg/sentry/fsimpl/ext/utils.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// readFromDisk performs a binary read from disk into the given struct from
-// the absolute offset provided.
-func readFromDisk(dev io.ReaderAt, abOff int64, v marshal.Marshallable) error {
- n := v.SizeBytes()
- buf := make([]byte, n)
- if read, _ := dev.ReadAt(buf, abOff); read < int(n) {
- return syserror.EIO
- }
-
- v.UnmarshalBytes(buf)
- return nil
-}
-
-// readSuperBlock reads the SuperBlock from block group 0 in the underlying
-// device. There are three versions of the superblock. This function identifies
-// and returns the correct version.
-func readSuperBlock(dev io.ReaderAt) (disklayout.SuperBlock, error) {
- var sb disklayout.SuperBlock = &disklayout.SuperBlockOld{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if sb.Revision() == disklayout.OldRev {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock32Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if !sb.IncompatibleFeatures().Is64Bit {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock64Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- return sb, nil
-}
-
-// blockGroupsCount returns the number of block groups in the ext fs.
-func blockGroupsCount(sb disklayout.SuperBlock) uint64 {
- blocksCount := sb.BlocksCount()
- blocksPerGroup := uint64(sb.BlocksPerGroup())
-
- // Round up the result. float64 can compromise precision so do it manually.
- return (blocksCount + blocksPerGroup - 1) / blocksPerGroup
-}
-
-// readBlockGroups reads the block group descriptor table from block group 0 in
-// the underlying device.
-func readBlockGroups(dev io.ReaderAt, sb disklayout.SuperBlock) ([]disklayout.BlockGroup, error) {
- bgCount := blockGroupsCount(sb)
- bgdSize := uint64(sb.BgDescSize())
- is64Bit := sb.IncompatibleFeatures().Is64Bit
- bgds := make([]disklayout.BlockGroup, bgCount)
-
- for i, off := uint64(0), uint64(sb.FirstDataBlock()+1)*sb.BlockSize(); i < bgCount; i, off = i+1, off+bgdSize {
- if is64Bit {
- bgds[i] = &disklayout.BlockGroup64Bit{}
- } else {
- bgds[i] = &disklayout.BlockGroup32Bit{}
- }
-
- if err := readFromDisk(dev, int64(off), bgds[i]); err != nil {
- return nil, err
- }
- }
- return bgds, nil
-}
diff --git a/pkg/sentry/fsimpl/fuse/BUILD b/pkg/sentry/fsimpl/fuse/BUILD
deleted file mode 100644
index 871df5984..000000000
--- a/pkg/sentry/fsimpl/fuse/BUILD
+++ /dev/null
@@ -1,92 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "request_list",
- out = "request_list.go",
- package = "fuse",
- prefix = "request",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Request",
- "Linker": "*Request",
- },
-)
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "fuse",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_library(
- name = "fuse",
- srcs = [
- "connection.go",
- "connection_control.go",
- "dev.go",
- "directory.go",
- "file.go",
- "fusefs.go",
- "inode_refs.go",
- "read_write.go",
- "register.go",
- "regular_file.go",
- "request_list.go",
- "request_response.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fuse_test",
- size = "small",
- srcs = [
- "connection_test.go",
- "dev_test.go",
- "utils_test.go",
- ],
- library = ":fuse",
- deps = [
- "//pkg/abi/linux",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/fuse/connection_test.go b/pkg/sentry/fsimpl/fuse/connection_test.go
deleted file mode 100644
index 1fddd858e..000000000
--- a/pkg/sentry/fsimpl/fuse/connection_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "math/rand"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
-)
-
-// TestConnectionInitBlock tests if initialization
-// correctly blocks and unblocks the connection.
-// Since it's unfeasible to test kernelTask.Block() in unit test,
-// the code in Call() are not tested here.
-func TestConnectionInitBlock(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
-
- conn, _, err := newTestConnection(s, k, maxActiveRequestsDefault)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- select {
- case <-conn.initializedChan:
- t.Fatalf("initializedChan should be blocking before SetInitialized")
- default:
- }
-
- conn.SetInitialized()
-
- select {
- case <-conn.initializedChan:
- default:
- t.Fatalf("initializedChan should not be blocking after SetInitialized")
- }
-}
-
-func TestConnectionAbort(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- creds := auth.CredentialsFromContext(s.Ctx)
- task := kernel.TaskFromContext(s.Ctx)
-
- const numRequests uint64 = 256
-
- conn, _, err := newTestConnection(s, k, numRequests)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- testObj := &testPayload{
- data: rand.Uint32(),
- }
-
- var futNormal []*futureResponse
-
- for i := 0; i < int(numRequests); i++ {
- req := conn.NewRequest(creds, uint32(i), uint64(i), 0, testObj)
- fut, err := conn.callFutureLocked(task, req)
- if err != nil {
- t.Fatalf("callFutureLocked failed: %v", err)
- }
- futNormal = append(futNormal, fut)
- }
-
- conn.Abort(s.Ctx)
-
- // Abort should unblock the initialization channel.
- // Note: no test requests are actually blocked on `conn.initializedChan`.
- select {
- case <-conn.initializedChan:
- default:
- t.Fatalf("initializedChan should not be blocking after SetInitialized")
- }
-
- // Abort will return ECONNABORTED error to unblocked requests.
- for _, fut := range futNormal {
- if fut.getResponse().hdr.Error != -int32(unix.ECONNABORTED) {
- t.Fatalf("Incorrect error code received for aborted connection: %v", fut.getResponse().hdr.Error)
- }
- }
-
- // After abort, Call() should return directly with ENOTCONN.
- req := conn.NewRequest(creds, 0, 0, 0, testObj)
- _, err = conn.Call(task, req)
- if !linuxerr.Equals(linuxerr.ENOTCONN, err) {
- t.Fatalf("Incorrect error code received for Call() after connection aborted")
- }
-
-}
diff --git a/pkg/sentry/fsimpl/fuse/dev_test.go b/pkg/sentry/fsimpl/fuse/dev_test.go
deleted file mode 100644
index 04250d796..000000000
--- a/pkg/sentry/fsimpl/fuse/dev_test.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "fmt"
- "math/rand"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// echoTestOpcode is the Opcode used during testing. The server used in tests
-// will simply echo the payload back with the appropriate headers.
-const echoTestOpcode linux.FUSEOpcode = 1000
-
-// TestFUSECommunication tests that the communication layer between the Sentry and the
-// FUSE server daemon works as expected.
-func TestFUSECommunication(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- creds := auth.CredentialsFromContext(s.Ctx)
-
- // Create test cases with different number of concurrent clients and servers.
- testCases := []struct {
- Name string
- NumClients int
- NumServers int
- MaxActiveRequests uint64
- }{
- {
- Name: "SingleClientSingleServer",
- NumClients: 1,
- NumServers: 1,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "SingleClientMultipleServers",
- NumClients: 1,
- NumServers: 10,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "MultipleClientsSingleServer",
- NumClients: 10,
- NumServers: 1,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "MultipleClientsMultipleServers",
- NumClients: 10,
- NumServers: 10,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "RequestCapacityFull",
- NumClients: 10,
- NumServers: 1,
- MaxActiveRequests: 1,
- },
- {
- Name: "RequestCapacityContinuouslyFull",
- NumClients: 100,
- NumServers: 2,
- MaxActiveRequests: 2,
- },
- }
-
- for _, testCase := range testCases {
- t.Run(testCase.Name, func(t *testing.T) {
- conn, fd, err := newTestConnection(s, k, testCase.MaxActiveRequests)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- clientsDone := make([]chan struct{}, testCase.NumClients)
- serversDone := make([]chan struct{}, testCase.NumServers)
- serversKill := make([]chan struct{}, testCase.NumServers)
-
- // FUSE clients.
- for i := 0; i < testCase.NumClients; i++ {
- clientsDone[i] = make(chan struct{})
- go func(i int) {
- fuseClientRun(t, s, k, conn, creds, uint32(i), uint64(i), clientsDone[i])
- }(i)
- }
-
- // FUSE servers.
- for j := 0; j < testCase.NumServers; j++ {
- serversDone[j] = make(chan struct{})
- serversKill[j] = make(chan struct{}, 1) // The kill command shouldn't block.
- go func(j int) {
- fuseServerRun(t, s, k, fd, serversDone[j], serversKill[j])
- }(j)
- }
-
- // Tear down.
- //
- // Make sure all the clients are done.
- for i := 0; i < testCase.NumClients; i++ {
- <-clientsDone[i]
- }
-
- // Kill any server that is potentially waiting.
- for j := 0; j < testCase.NumServers; j++ {
- serversKill[j] <- struct{}{}
- }
-
- // Make sure all the servers are done.
- for j := 0; j < testCase.NumServers; j++ {
- <-serversDone[j]
- }
- })
- }
-}
-
-// CallTest makes a request to the server and blocks the invoking
-// goroutine until a server responds with a response. Doesn't block
-// a kernel.Task. Analogous to Connection.Call but used for testing.
-func CallTest(conn *connection, t *kernel.Task, r *Request, i uint32) (*Response, error) {
- conn.fd.mu.Lock()
-
- // Wait until we're certain that a new request can be processed.
- for conn.fd.numActiveRequests == conn.fd.fs.opts.maxActiveRequests {
- conn.fd.mu.Unlock()
- select {
- case <-conn.fd.fullQueueCh:
- }
- conn.fd.mu.Lock()
- }
-
- fut, err := conn.callFutureLocked(t, r) // No task given.
- conn.fd.mu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- // Resolve the response.
- //
- // Block without a task.
- select {
- case <-fut.ch:
- }
-
- // A response is ready. Resolve and return it.
- return fut.getResponse(), nil
-}
-
-// ReadTest is analogous to vfs.FileDescription.Read and reads from the FUSE
-// device. However, it does so by - not blocking the task that is calling - and
-// instead just waits on a channel. The behaviour is essentially the same as
-// DeviceFD.Read except it guarantees that the task is not blocked.
-func ReadTest(serverTask *kernel.Task, fd *vfs.FileDescription, inIOseq usermem.IOSequence, killServer chan struct{}) (int64, bool, error) {
- var err error
- var n, total int64
-
- dev := fd.Impl().(*DeviceFD)
-
- // Register for notifications.
- w, ch := waiter.NewChannelEntry(nil)
- dev.EventRegister(&w, waiter.ReadableEvents)
- for {
- // Issue the request and break out if it completes with anything other than
- // "would block".
- n, err = dev.Read(serverTask, inIOseq, vfs.ReadOptions{})
- total += n
- if err != syserror.ErrWouldBlock {
- break
- }
-
- // Wait for a notification that we should retry.
- // Emulate the blocking for when no requests are available
- select {
- case <-ch:
- case <-killServer:
- // Server killed by the main program.
- return 0, true, nil
- }
- }
-
- dev.EventUnregister(&w)
- return total, false, err
-}
-
-// fuseClientRun emulates all the actions of a normal FUSE request. It creates
-// a header, a payload, calls the server, waits for the response, and processes
-// the response.
-func fuseClientRun(t *testing.T, s *testutil.System, k *kernel.Kernel, conn *connection, creds *auth.Credentials, pid uint32, inode uint64, clientDone chan struct{}) {
- defer func() { clientDone <- struct{}{} }()
-
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- clientTask, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("fuse-client-%v", pid), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatal(err)
- }
- testObj := &testPayload{
- data: rand.Uint32(),
- }
-
- req := conn.NewRequest(creds, pid, inode, echoTestOpcode, testObj)
-
- // Queue up a request.
- // Analogous to Call except it doesn't block on the task.
- resp, err := CallTest(conn, clientTask, req, pid)
- if err != nil {
- t.Fatalf("CallTaskNonBlock failed: %v", err)
- }
-
- if err = resp.Error(); err != nil {
- t.Fatalf("Server responded with an error: %v", err)
- }
-
- var respTestPayload testPayload
- if err := resp.UnmarshalPayload(&respTestPayload); err != nil {
- t.Fatalf("Unmarshalling payload error: %v", err)
- }
-
- if resp.hdr.Unique != req.hdr.Unique {
- t.Fatalf("got response for another request. Expected response for req %v but got response for req %v",
- req.hdr.Unique, resp.hdr.Unique)
- }
-
- if respTestPayload.data != testObj.data {
- t.Fatalf("read incorrect data. Data expected: %v, but got %v", testObj.data, respTestPayload.data)
- }
-
-}
-
-// fuseServerRun creates a task and emulates all the actions of a simple FUSE server
-// that simply reads a request and echos the same struct back as a response using the
-// appropriate headers.
-func fuseServerRun(t *testing.T, s *testutil.System, k *kernel.Kernel, fd *vfs.FileDescription, serverDone, killServer chan struct{}) {
- defer func() { serverDone <- struct{}{} }()
-
- // Create the tasks that the server will be using.
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- var readPayload testPayload
-
- serverTask, err := testutil.CreateTask(s.Ctx, "fuse-server", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatal(err)
- }
-
- // Read the request.
- for {
- inHdrLen := uint32((*linux.FUSEHeaderIn)(nil).SizeBytes())
- payloadLen := uint32(readPayload.SizeBytes())
-
- // The raed buffer must meet some certain size criteria.
- buffSize := inHdrLen + payloadLen
- if buffSize < linux.FUSE_MIN_READ_BUFFER {
- buffSize = linux.FUSE_MIN_READ_BUFFER
- }
- inBuf := make([]byte, buffSize)
- inIOseq := usermem.BytesIOSequence(inBuf)
-
- n, serverKilled, err := ReadTest(serverTask, fd, inIOseq, killServer)
- if err != nil {
- t.Fatalf("Read failed :%v", err)
- }
-
- // Server should shut down. No new requests are going to be made.
- if serverKilled {
- break
- }
-
- if n <= 0 {
- t.Fatalf("Read read no bytes")
- }
-
- var readFUSEHeaderIn linux.FUSEHeaderIn
- readFUSEHeaderIn.UnmarshalUnsafe(inBuf[:inHdrLen])
- readPayload.UnmarshalUnsafe(inBuf[inHdrLen : inHdrLen+payloadLen])
-
- if readFUSEHeaderIn.Opcode != echoTestOpcode {
- t.Fatalf("read incorrect data. Header: %v, Payload: %v", readFUSEHeaderIn, readPayload)
- }
-
- // Write the response.
- outHdrLen := uint32((*linux.FUSEHeaderOut)(nil).SizeBytes())
- outBuf := make([]byte, outHdrLen+payloadLen)
- outHeader := linux.FUSEHeaderOut{
- Len: outHdrLen + payloadLen,
- Error: 0,
- Unique: readFUSEHeaderIn.Unique,
- }
-
- // Echo the payload back.
- outHeader.MarshalUnsafe(outBuf[:outHdrLen])
- readPayload.MarshalUnsafe(outBuf[outHdrLen:])
- outIOseq := usermem.BytesIOSequence(outBuf)
-
- _, err = fd.Write(s.Ctx, outIOseq, vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed :%v", err)
- }
- }
-}
diff --git a/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go b/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go
new file mode 100644
index 000000000..f61e010dc
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go
@@ -0,0 +1,560 @@
+// automatically generated by stateify.
+
+package fuse
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (conn *connection) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.connection"
+}
+
+func (conn *connection) StateFields() []string {
+ return []string{
+ "fd",
+ "attributeVersion",
+ "initialized",
+ "initializedChan",
+ "connected",
+ "connInitError",
+ "connInitSuccess",
+ "aborted",
+ "numWaiting",
+ "asyncNum",
+ "asyncCongestionThreshold",
+ "asyncNumMax",
+ "maxRead",
+ "maxWrite",
+ "maxPages",
+ "minor",
+ "atomicOTrunc",
+ "asyncRead",
+ "writebackCache",
+ "bigWrites",
+ "dontMask",
+ "noOpen",
+ }
+}
+
+func (conn *connection) beforeSave() {}
+
+// +checklocksignore
+func (conn *connection) StateSave(stateSinkObject state.Sink) {
+ conn.beforeSave()
+ var initializedChanValue bool = conn.saveInitializedChan()
+ stateSinkObject.SaveValue(3, initializedChanValue)
+ stateSinkObject.Save(0, &conn.fd)
+ stateSinkObject.Save(1, &conn.attributeVersion)
+ stateSinkObject.Save(2, &conn.initialized)
+ stateSinkObject.Save(4, &conn.connected)
+ stateSinkObject.Save(5, &conn.connInitError)
+ stateSinkObject.Save(6, &conn.connInitSuccess)
+ stateSinkObject.Save(7, &conn.aborted)
+ stateSinkObject.Save(8, &conn.numWaiting)
+ stateSinkObject.Save(9, &conn.asyncNum)
+ stateSinkObject.Save(10, &conn.asyncCongestionThreshold)
+ stateSinkObject.Save(11, &conn.asyncNumMax)
+ stateSinkObject.Save(12, &conn.maxRead)
+ stateSinkObject.Save(13, &conn.maxWrite)
+ stateSinkObject.Save(14, &conn.maxPages)
+ stateSinkObject.Save(15, &conn.minor)
+ stateSinkObject.Save(16, &conn.atomicOTrunc)
+ stateSinkObject.Save(17, &conn.asyncRead)
+ stateSinkObject.Save(18, &conn.writebackCache)
+ stateSinkObject.Save(19, &conn.bigWrites)
+ stateSinkObject.Save(20, &conn.dontMask)
+ stateSinkObject.Save(21, &conn.noOpen)
+}
+
+func (conn *connection) afterLoad() {}
+
+// +checklocksignore
+func (conn *connection) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &conn.fd)
+ stateSourceObject.Load(1, &conn.attributeVersion)
+ stateSourceObject.Load(2, &conn.initialized)
+ stateSourceObject.Load(4, &conn.connected)
+ stateSourceObject.Load(5, &conn.connInitError)
+ stateSourceObject.Load(6, &conn.connInitSuccess)
+ stateSourceObject.Load(7, &conn.aborted)
+ stateSourceObject.Load(8, &conn.numWaiting)
+ stateSourceObject.Load(9, &conn.asyncNum)
+ stateSourceObject.Load(10, &conn.asyncCongestionThreshold)
+ stateSourceObject.Load(11, &conn.asyncNumMax)
+ stateSourceObject.Load(12, &conn.maxRead)
+ stateSourceObject.Load(13, &conn.maxWrite)
+ stateSourceObject.Load(14, &conn.maxPages)
+ stateSourceObject.Load(15, &conn.minor)
+ stateSourceObject.Load(16, &conn.atomicOTrunc)
+ stateSourceObject.Load(17, &conn.asyncRead)
+ stateSourceObject.Load(18, &conn.writebackCache)
+ stateSourceObject.Load(19, &conn.bigWrites)
+ stateSourceObject.Load(20, &conn.dontMask)
+ stateSourceObject.Load(21, &conn.noOpen)
+ stateSourceObject.LoadValue(3, new(bool), func(y interface{}) { conn.loadInitializedChan(y.(bool)) })
+}
+
+func (f *fuseDevice) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.fuseDevice"
+}
+
+func (f *fuseDevice) StateFields() []string {
+ return []string{}
+}
+
+func (f *fuseDevice) beforeSave() {}
+
+// +checklocksignore
+func (f *fuseDevice) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *fuseDevice) afterLoad() {}
+
+// +checklocksignore
+func (f *fuseDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *DeviceFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.DeviceFD"
+}
+
+func (fd *DeviceFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "nextOpID",
+ "queue",
+ "numActiveRequests",
+ "completions",
+ "writeCursor",
+ "writeBuf",
+ "writeCursorFR",
+ "waitQueue",
+ "fullQueueCh",
+ "fs",
+ }
+}
+
+func (fd *DeviceFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *DeviceFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ var fullQueueChValue int = fd.saveFullQueueCh()
+ stateSinkObject.SaveValue(12, fullQueueChValue)
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+ stateSinkObject.Save(4, &fd.nextOpID)
+ stateSinkObject.Save(5, &fd.queue)
+ stateSinkObject.Save(6, &fd.numActiveRequests)
+ stateSinkObject.Save(7, &fd.completions)
+ stateSinkObject.Save(8, &fd.writeCursor)
+ stateSinkObject.Save(9, &fd.writeBuf)
+ stateSinkObject.Save(10, &fd.writeCursorFR)
+ stateSinkObject.Save(11, &fd.waitQueue)
+ stateSinkObject.Save(13, &fd.fs)
+}
+
+func (fd *DeviceFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *DeviceFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+ stateSourceObject.Load(4, &fd.nextOpID)
+ stateSourceObject.Load(5, &fd.queue)
+ stateSourceObject.Load(6, &fd.numActiveRequests)
+ stateSourceObject.Load(7, &fd.completions)
+ stateSourceObject.Load(8, &fd.writeCursor)
+ stateSourceObject.Load(9, &fd.writeBuf)
+ stateSourceObject.Load(10, &fd.writeCursorFR)
+ stateSourceObject.Load(11, &fd.waitQueue)
+ stateSourceObject.Load(13, &fd.fs)
+ stateSourceObject.LoadValue(12, new(int), func(y interface{}) { fd.loadFullQueueCh(y.(int)) })
+}
+
+func (fsType *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.FilesystemType"
+}
+
+func (fsType *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *filesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.filesystemOptions"
+}
+
+func (f *filesystemOptions) StateFields() []string {
+ return []string{
+ "mopts",
+ "uid",
+ "gid",
+ "rootMode",
+ "maxActiveRequests",
+ "maxRead",
+ "defaultPermissions",
+ "allowOther",
+ }
+}
+
+func (f *filesystemOptions) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.mopts)
+ stateSinkObject.Save(1, &f.uid)
+ stateSinkObject.Save(2, &f.gid)
+ stateSinkObject.Save(3, &f.rootMode)
+ stateSinkObject.Save(4, &f.maxActiveRequests)
+ stateSinkObject.Save(5, &f.maxRead)
+ stateSinkObject.Save(6, &f.defaultPermissions)
+ stateSinkObject.Save(7, &f.allowOther)
+}
+
+func (f *filesystemOptions) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.mopts)
+ stateSourceObject.Load(1, &f.uid)
+ stateSourceObject.Load(2, &f.gid)
+ stateSourceObject.Load(3, &f.rootMode)
+ stateSourceObject.Load(4, &f.maxActiveRequests)
+ stateSourceObject.Load(5, &f.maxRead)
+ stateSourceObject.Load(6, &f.defaultPermissions)
+ stateSourceObject.Load(7, &f.allowOther)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ "conn",
+ "opts",
+ "umounted",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+ stateSinkObject.Save(2, &fs.conn)
+ stateSinkObject.Save(3, &fs.opts)
+ stateSinkObject.Save(4, &fs.umounted)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+ stateSourceObject.Load(2, &fs.conn)
+ stateSourceObject.Load(3, &fs.opts)
+ stateSourceObject.Load(4, &fs.umounted)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "inodeRefs",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "OrderedChildren",
+ "fs",
+ "metadataMu",
+ "nodeID",
+ "locks",
+ "size",
+ "attributeVersion",
+ "attributeTime",
+ "version",
+ "link",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+// +checklocksignore
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.inodeRefs)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.OrderedChildren)
+ stateSinkObject.Save(6, &i.fs)
+ stateSinkObject.Save(7, &i.metadataMu)
+ stateSinkObject.Save(8, &i.nodeID)
+ stateSinkObject.Save(9, &i.locks)
+ stateSinkObject.Save(10, &i.size)
+ stateSinkObject.Save(11, &i.attributeVersion)
+ stateSinkObject.Save(12, &i.attributeTime)
+ stateSinkObject.Save(13, &i.version)
+ stateSinkObject.Save(14, &i.link)
+}
+
+func (i *inode) afterLoad() {}
+
+// +checklocksignore
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.inodeRefs)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.OrderedChildren)
+ stateSourceObject.Load(6, &i.fs)
+ stateSourceObject.Load(7, &i.metadataMu)
+ stateSourceObject.Load(8, &i.nodeID)
+ stateSourceObject.Load(9, &i.locks)
+ stateSourceObject.Load(10, &i.size)
+ stateSourceObject.Load(11, &i.attributeVersion)
+ stateSourceObject.Load(12, &i.attributeTime)
+ stateSourceObject.Load(13, &i.version)
+ stateSourceObject.Load(14, &i.link)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (l *requestList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.requestList"
+}
+
+func (l *requestList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *requestList) beforeSave() {}
+
+// +checklocksignore
+func (l *requestList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *requestList) afterLoad() {}
+
+// +checklocksignore
+func (l *requestList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *requestEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.requestEntry"
+}
+
+func (e *requestEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *requestEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *requestEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *requestEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *requestEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *Request) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.Request"
+}
+
+func (r *Request) StateFields() []string {
+ return []string{
+ "requestEntry",
+ "id",
+ "hdr",
+ "data",
+ "payload",
+ "async",
+ "noReply",
+ }
+}
+
+func (r *Request) beforeSave() {}
+
+// +checklocksignore
+func (r *Request) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.requestEntry)
+ stateSinkObject.Save(1, &r.id)
+ stateSinkObject.Save(2, &r.hdr)
+ stateSinkObject.Save(3, &r.data)
+ stateSinkObject.Save(4, &r.payload)
+ stateSinkObject.Save(5, &r.async)
+ stateSinkObject.Save(6, &r.noReply)
+}
+
+func (r *Request) afterLoad() {}
+
+// +checklocksignore
+func (r *Request) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.requestEntry)
+ stateSourceObject.Load(1, &r.id)
+ stateSourceObject.Load(2, &r.hdr)
+ stateSourceObject.Load(3, &r.data)
+ stateSourceObject.Load(4, &r.payload)
+ stateSourceObject.Load(5, &r.async)
+ stateSourceObject.Load(6, &r.noReply)
+}
+
+func (f *futureResponse) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.futureResponse"
+}
+
+func (f *futureResponse) StateFields() []string {
+ return []string{
+ "opcode",
+ "ch",
+ "hdr",
+ "data",
+ "async",
+ }
+}
+
+func (f *futureResponse) beforeSave() {}
+
+// +checklocksignore
+func (f *futureResponse) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.opcode)
+ stateSinkObject.Save(1, &f.ch)
+ stateSinkObject.Save(2, &f.hdr)
+ stateSinkObject.Save(3, &f.data)
+ stateSinkObject.Save(4, &f.async)
+}
+
+func (f *futureResponse) afterLoad() {}
+
+// +checklocksignore
+func (f *futureResponse) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.opcode)
+ stateSourceObject.Load(1, &f.ch)
+ stateSourceObject.Load(2, &f.hdr)
+ stateSourceObject.Load(3, &f.data)
+ stateSourceObject.Load(4, &f.async)
+}
+
+func (r *Response) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.Response"
+}
+
+func (r *Response) StateFields() []string {
+ return []string{
+ "opcode",
+ "hdr",
+ "data",
+ }
+}
+
+func (r *Response) beforeSave() {}
+
+// +checklocksignore
+func (r *Response) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.opcode)
+ stateSinkObject.Save(1, &r.hdr)
+ stateSinkObject.Save(2, &r.data)
+}
+
+func (r *Response) afterLoad() {}
+
+// +checklocksignore
+func (r *Response) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.opcode)
+ stateSourceObject.Load(1, &r.hdr)
+ stateSourceObject.Load(2, &r.data)
+}
+
+func init() {
+ state.Register((*connection)(nil))
+ state.Register((*fuseDevice)(nil))
+ state.Register((*DeviceFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystemOptions)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*requestList)(nil))
+ state.Register((*requestEntry)(nil))
+ state.Register((*Request)(nil))
+ state.Register((*futureResponse)(nil))
+ state.Register((*Response)(nil))
+}
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
new file mode 100644
index 000000000..74489cf5e
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -0,0 +1,140 @@
+package fuse
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/fuse/request_list.go b/pkg/sentry/fsimpl/fuse/request_list.go
new file mode 100644
index 000000000..060ac4a3f
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/request_list.go
@@ -0,0 +1,221 @@
+package fuse
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type requestElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (requestElementMapper) linkerFor(elem *Request) *Request { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type requestList struct {
+ head *Request
+ tail *Request
+}
+
+// Reset resets list l to the empty state.
+func (l *requestList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *requestList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *requestList) Front() *Request {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *requestList) Back() *Request {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *requestList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (requestElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *requestList) PushFront(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ requestElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *requestList) PushBack(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ requestElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *requestList) PushBackList(m *requestList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ requestElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ requestElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *requestList) InsertAfter(b, e *Request) {
+ bLinker := requestElementMapper{}.linkerFor(b)
+ eLinker := requestElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ requestElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *requestList) InsertBefore(a, e *Request) {
+ aLinker := requestElementMapper{}.linkerFor(a)
+ eLinker := requestElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ requestElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *requestList) Remove(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ requestElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ requestElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type requestEntry struct {
+ next *Request
+ prev *Request
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *requestEntry) Next() *Request {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *requestEntry) Prev() *Request {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *requestEntry) SetNext(elem *Request) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *requestEntry) SetPrev(elem *Request) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/fuse/utils_test.go b/pkg/sentry/fsimpl/fuse/utils_test.go
deleted file mode 100644
index b0bab0066..000000000
--- a/pkg/sentry/fsimpl/fuse/utils_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-func setup(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Error creating kernel: %v", err)
- }
-
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
-
- k.VFS().MustRegisterFilesystemType(Name, &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserList: true,
- AllowUserMount: true,
- })
-
- mntns, err := k.VFS().NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("NewMountNamespace(): %v", err)
- }
-
- return testutil.NewSystem(ctx, t, k.VFS(), mntns)
-}
-
-// newTestConnection creates a fuse connection that the sentry can communicate with
-// and the FD for the server to communicate with.
-func newTestConnection(system *testutil.System, k *kernel.Kernel, maxActiveRequests uint64) (*connection, *vfs.FileDescription, error) {
- fuseDev := &DeviceFD{}
-
- vd := system.VFS.NewAnonVirtualDentry("fuse")
- defer vd.DecRef(system.Ctx)
- if err := fuseDev.vfsfd.Init(fuseDev, linux.O_RDWR, vd.Mount(), vd.Dentry(), &vfs.FileDescriptionOptions{}); err != nil {
- return nil, nil, err
- }
-
- fsopts := filesystemOptions{
- maxActiveRequests: maxActiveRequests,
- }
- fs, err := newFUSEFilesystem(system.Ctx, system.VFS, &FilesystemType{}, fuseDev, 0, &fsopts)
- if err != nil {
- return nil, nil, err
- }
- return fs.conn, &fuseDev.vfsfd, nil
-}
-
-type testPayload struct {
- marshal.StubMarshallable
- data uint32
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *testPayload) SizeBytes() int {
- return 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *testPayload) MarshalBytes(dst []byte) {
- hostarch.ByteOrder.PutUint32(dst[:4], t.data)
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *testPayload) UnmarshalBytes(src []byte) {
- *t = testPayload{data: hostarch.ByteOrder.Uint32(src[:4])}
-}
-
-// Packed implements marshal.Marshallable.Packed.
-func (t *testPayload) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *testPayload) MarshalUnsafe(dst []byte) {
- t.MarshalBytes(dst)
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *testPayload) UnmarshalUnsafe(src []byte) {
- t.UnmarshalBytes(src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-func (t *testPayload) CopyOutN(task marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
- panic("not implemented")
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (t *testPayload) CopyOut(task marshal.CopyContext, addr hostarch.Addr) (int, error) {
- panic("not implemented")
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-func (t *testPayload) CopyIn(task marshal.CopyContext, addr hostarch.Addr) (int, error) {
- panic("not implemented")
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *testPayload) WriteTo(w io.Writer) (int64, error) {
- panic("not implemented")
-}
diff --git a/pkg/sentry/fsimpl/gofer/BUILD b/pkg/sentry/fsimpl/gofer/BUILD
deleted file mode 100644
index 752060044..000000000
--- a/pkg/sentry/fsimpl/gofer/BUILD
+++ /dev/null
@@ -1,99 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "gofer",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dentry",
- "Linker": "*dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "gofer",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "gofer",
- srcs = [
- "dentry_list.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "gofer.go",
- "handle.go",
- "host_named_pipe.go",
- "p9file.go",
- "regular_file.go",
- "revalidate.go",
- "save_restore.go",
- "socket.go",
- "special_file.go",
- "symlink.go",
- "time.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/metric",
- "//pkg/p9",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/host",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "gofer_test",
- srcs = ["gofer_test.go"],
- library = ":gofer",
- deps = [
- "//pkg/p9",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/pgalloc",
- ],
-)
diff --git a/pkg/sentry/fsimpl/gofer/dentry_list.go b/pkg/sentry/fsimpl/gofer/dentry_list.go
new file mode 100644
index 000000000..2e43b8e02
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/dentry_list.go
@@ -0,0 +1,221 @@
+package gofer
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *dentry) *dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *dentry
+ tail *dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Front() *dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Back() *dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *dentryList) PushFront(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *dentryList) PushBack(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *dentryList) InsertAfter(b, e *dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *dentryList) InsertBefore(a, e *dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *dentryList) Remove(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *dentry
+ prev *dentry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Next() *dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Prev() *dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetNext(elem *dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetPrev(elem *dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/gofer/fstree.go b/pkg/sentry/fsimpl/gofer/fstree.go
new file mode 100644
index 000000000..6e43d4a4b
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/fstree.go
@@ -0,0 +1,55 @@
+package gofer
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
new file mode 100644
index 000000000..11635f249
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
@@ -0,0 +1,608 @@
+// automatically generated by stateify.
+
+package gofer
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+// +checklocksignore
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+// +checklocksignore
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "off",
+ "dirents",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.off)
+ stateSinkObject.Save(3, &fd.dirents)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.off)
+ stateSourceObject.Load(3, &fd.dirents)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "mfp",
+ "opts",
+ "iopts",
+ "clock",
+ "devMinor",
+ "root",
+ "cachedDentries",
+ "cachedDentriesLen",
+ "syncableDentries",
+ "specialFileFDs",
+ "lastIno",
+ "savedDentryRW",
+ "released",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.mfp)
+ stateSinkObject.Save(2, &fs.opts)
+ stateSinkObject.Save(3, &fs.iopts)
+ stateSinkObject.Save(4, &fs.clock)
+ stateSinkObject.Save(5, &fs.devMinor)
+ stateSinkObject.Save(6, &fs.root)
+ stateSinkObject.Save(7, &fs.cachedDentries)
+ stateSinkObject.Save(8, &fs.cachedDentriesLen)
+ stateSinkObject.Save(9, &fs.syncableDentries)
+ stateSinkObject.Save(10, &fs.specialFileFDs)
+ stateSinkObject.Save(11, &fs.lastIno)
+ stateSinkObject.Save(12, &fs.savedDentryRW)
+ stateSinkObject.Save(13, &fs.released)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.mfp)
+ stateSourceObject.Load(2, &fs.opts)
+ stateSourceObject.Load(3, &fs.iopts)
+ stateSourceObject.Load(4, &fs.clock)
+ stateSourceObject.Load(5, &fs.devMinor)
+ stateSourceObject.Load(6, &fs.root)
+ stateSourceObject.Load(7, &fs.cachedDentries)
+ stateSourceObject.Load(8, &fs.cachedDentriesLen)
+ stateSourceObject.Load(9, &fs.syncableDentries)
+ stateSourceObject.Load(10, &fs.specialFileFDs)
+ stateSourceObject.Load(11, &fs.lastIno)
+ stateSourceObject.Load(12, &fs.savedDentryRW)
+ stateSourceObject.Load(13, &fs.released)
+}
+
+func (f *filesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.filesystemOptions"
+}
+
+func (f *filesystemOptions) StateFields() []string {
+ return []string{
+ "fd",
+ "aname",
+ "interop",
+ "dfltuid",
+ "dfltgid",
+ "msize",
+ "version",
+ "maxCachedDentries",
+ "forcePageCache",
+ "limitHostFDTranslation",
+ "overlayfsStaleRead",
+ "regularFilesUseSpecialFileFD",
+ }
+}
+
+func (f *filesystemOptions) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.fd)
+ stateSinkObject.Save(1, &f.aname)
+ stateSinkObject.Save(2, &f.interop)
+ stateSinkObject.Save(3, &f.dfltuid)
+ stateSinkObject.Save(4, &f.dfltgid)
+ stateSinkObject.Save(5, &f.msize)
+ stateSinkObject.Save(6, &f.version)
+ stateSinkObject.Save(7, &f.maxCachedDentries)
+ stateSinkObject.Save(8, &f.forcePageCache)
+ stateSinkObject.Save(9, &f.limitHostFDTranslation)
+ stateSinkObject.Save(10, &f.overlayfsStaleRead)
+ stateSinkObject.Save(11, &f.regularFilesUseSpecialFileFD)
+}
+
+func (f *filesystemOptions) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.fd)
+ stateSourceObject.Load(1, &f.aname)
+ stateSourceObject.Load(2, &f.interop)
+ stateSourceObject.Load(3, &f.dfltuid)
+ stateSourceObject.Load(4, &f.dfltgid)
+ stateSourceObject.Load(5, &f.msize)
+ stateSourceObject.Load(6, &f.version)
+ stateSourceObject.Load(7, &f.maxCachedDentries)
+ stateSourceObject.Load(8, &f.forcePageCache)
+ stateSourceObject.Load(9, &f.limitHostFDTranslation)
+ stateSourceObject.Load(10, &f.overlayfsStaleRead)
+ stateSourceObject.Load(11, &f.regularFilesUseSpecialFileFD)
+}
+
+func (i *InteropMode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.InteropMode"
+}
+
+func (i *InteropMode) StateFields() []string {
+ return nil
+}
+
+func (i *InternalFilesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.InternalFilesystemOptions"
+}
+
+func (i *InternalFilesystemOptions) StateFields() []string {
+ return []string{
+ "UniqueID",
+ "LeakConnection",
+ "OpenSocketsByConnecting",
+ }
+}
+
+func (i *InternalFilesystemOptions) beforeSave() {}
+
+// +checklocksignore
+func (i *InternalFilesystemOptions) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.UniqueID)
+ stateSinkObject.Save(1, &i.LeakConnection)
+ stateSinkObject.Save(2, &i.OpenSocketsByConnecting)
+}
+
+func (i *InternalFilesystemOptions) afterLoad() {}
+
+// +checklocksignore
+func (i *InternalFilesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.UniqueID)
+ stateSourceObject.Load(1, &i.LeakConnection)
+ stateSourceObject.Load(2, &i.OpenSocketsByConnecting)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "parent",
+ "name",
+ "qidPath",
+ "deleted",
+ "cached",
+ "dentryEntry",
+ "children",
+ "syntheticChildren",
+ "dirents",
+ "ino",
+ "mode",
+ "uid",
+ "gid",
+ "blockSize",
+ "atime",
+ "mtime",
+ "ctime",
+ "btime",
+ "size",
+ "atimeDirty",
+ "mtimeDirty",
+ "nlink",
+ "mappings",
+ "cache",
+ "dirty",
+ "pf",
+ "haveTarget",
+ "target",
+ "endpoint",
+ "pipe",
+ "locks",
+ "watches",
+ }
+}
+
+// +checklocksignore
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.parent)
+ stateSinkObject.Save(4, &d.name)
+ stateSinkObject.Save(5, &d.qidPath)
+ stateSinkObject.Save(6, &d.deleted)
+ stateSinkObject.Save(7, &d.cached)
+ stateSinkObject.Save(8, &d.dentryEntry)
+ stateSinkObject.Save(9, &d.children)
+ stateSinkObject.Save(10, &d.syntheticChildren)
+ stateSinkObject.Save(11, &d.dirents)
+ stateSinkObject.Save(12, &d.ino)
+ stateSinkObject.Save(13, &d.mode)
+ stateSinkObject.Save(14, &d.uid)
+ stateSinkObject.Save(15, &d.gid)
+ stateSinkObject.Save(16, &d.blockSize)
+ stateSinkObject.Save(17, &d.atime)
+ stateSinkObject.Save(18, &d.mtime)
+ stateSinkObject.Save(19, &d.ctime)
+ stateSinkObject.Save(20, &d.btime)
+ stateSinkObject.Save(21, &d.size)
+ stateSinkObject.Save(22, &d.atimeDirty)
+ stateSinkObject.Save(23, &d.mtimeDirty)
+ stateSinkObject.Save(24, &d.nlink)
+ stateSinkObject.Save(25, &d.mappings)
+ stateSinkObject.Save(26, &d.cache)
+ stateSinkObject.Save(27, &d.dirty)
+ stateSinkObject.Save(28, &d.pf)
+ stateSinkObject.Save(29, &d.haveTarget)
+ stateSinkObject.Save(30, &d.target)
+ stateSinkObject.Save(31, &d.endpoint)
+ stateSinkObject.Save(32, &d.pipe)
+ stateSinkObject.Save(33, &d.locks)
+ stateSinkObject.Save(34, &d.watches)
+}
+
+// +checklocksignore
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.parent)
+ stateSourceObject.Load(4, &d.name)
+ stateSourceObject.Load(5, &d.qidPath)
+ stateSourceObject.Load(6, &d.deleted)
+ stateSourceObject.Load(7, &d.cached)
+ stateSourceObject.Load(8, &d.dentryEntry)
+ stateSourceObject.Load(9, &d.children)
+ stateSourceObject.Load(10, &d.syntheticChildren)
+ stateSourceObject.Load(11, &d.dirents)
+ stateSourceObject.Load(12, &d.ino)
+ stateSourceObject.Load(13, &d.mode)
+ stateSourceObject.Load(14, &d.uid)
+ stateSourceObject.Load(15, &d.gid)
+ stateSourceObject.Load(16, &d.blockSize)
+ stateSourceObject.Load(17, &d.atime)
+ stateSourceObject.Load(18, &d.mtime)
+ stateSourceObject.Load(19, &d.ctime)
+ stateSourceObject.Load(20, &d.btime)
+ stateSourceObject.Load(21, &d.size)
+ stateSourceObject.Load(22, &d.atimeDirty)
+ stateSourceObject.Load(23, &d.mtimeDirty)
+ stateSourceObject.Load(24, &d.nlink)
+ stateSourceObject.Load(25, &d.mappings)
+ stateSourceObject.Load(26, &d.cache)
+ stateSourceObject.Load(27, &d.dirty)
+ stateSourceObject.Load(28, &d.pf)
+ stateSourceObject.Load(29, &d.haveTarget)
+ stateSourceObject.Load(30, &d.target)
+ stateSourceObject.Load(31, &d.endpoint)
+ stateSourceObject.Load(32, &d.pipe)
+ stateSourceObject.Load(33, &d.locks)
+ stateSourceObject.Load(34, &d.watches)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "off",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.off)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.off)
+}
+
+func (d *dentryPlatformFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryPlatformFile"
+}
+
+func (d *dentryPlatformFile) StateFields() []string {
+ return []string{
+ "dentry",
+ "fdRefs",
+ "hostFileMapper",
+ }
+}
+
+func (d *dentryPlatformFile) beforeSave() {}
+
+// +checklocksignore
+func (d *dentryPlatformFile) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dentry)
+ stateSinkObject.Save(1, &d.fdRefs)
+ stateSinkObject.Save(2, &d.hostFileMapper)
+}
+
+// +checklocksignore
+func (d *dentryPlatformFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dentry)
+ stateSourceObject.Load(1, &d.fdRefs)
+ stateSourceObject.Load(2, &d.hostFileMapper)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (s *savedDentryRW) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.savedDentryRW"
+}
+
+func (s *savedDentryRW) StateFields() []string {
+ return []string{
+ "read",
+ "write",
+ }
+}
+
+func (s *savedDentryRW) beforeSave() {}
+
+// +checklocksignore
+func (s *savedDentryRW) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.read)
+ stateSinkObject.Save(1, &s.write)
+}
+
+func (s *savedDentryRW) afterLoad() {}
+
+// +checklocksignore
+func (s *savedDentryRW) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.read)
+ stateSourceObject.Load(1, &s.write)
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "dentry",
+ "path",
+ }
+}
+
+func (e *endpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.dentry)
+ stateSinkObject.Save(1, &e.path)
+}
+
+func (e *endpoint) afterLoad() {}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.dentry)
+ stateSourceObject.Load(1, &e.path)
+}
+
+func (fd *specialFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.specialFileFD"
+}
+
+func (fd *specialFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "isRegularFile",
+ "seekable",
+ "queue",
+ "off",
+ "haveBuf",
+ "buf",
+ }
+}
+
+func (fd *specialFileFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *specialFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.isRegularFile)
+ stateSinkObject.Save(2, &fd.seekable)
+ stateSinkObject.Save(3, &fd.queue)
+ stateSinkObject.Save(4, &fd.off)
+ stateSinkObject.Save(5, &fd.haveBuf)
+ stateSinkObject.Save(6, &fd.buf)
+}
+
+// +checklocksignore
+func (fd *specialFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.isRegularFile)
+ stateSourceObject.Load(2, &fd.seekable)
+ stateSourceObject.Load(3, &fd.queue)
+ stateSourceObject.Load(4, &fd.off)
+ stateSourceObject.Load(5, &fd.haveBuf)
+ stateSourceObject.Load(6, &fd.buf)
+ stateSourceObject.AfterLoad(fd.afterLoad)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*directoryFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*filesystemOptions)(nil))
+ state.Register((*InteropMode)(nil))
+ state.Register((*InternalFilesystemOptions)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*regularFileFD)(nil))
+ state.Register((*dentryPlatformFile)(nil))
+ state.Register((*savedDentryRW)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*specialFileFD)(nil))
+}
diff --git a/pkg/sentry/fsimpl/gofer/gofer_test.go b/pkg/sentry/fsimpl/gofer/gofer_test.go
deleted file mode 100644
index 806392d50..000000000
--- a/pkg/sentry/fsimpl/gofer/gofer_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gofer
-
-import (
- "sync/atomic"
- "testing"
-
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
-)
-
-func TestDestroyIdempotent(t *testing.T) {
- ctx := contexttest.Context(t)
- fs := filesystem{
- mfp: pgalloc.MemoryFileProviderFromContext(ctx),
- opts: filesystemOptions{
- // Test relies on no dentry being held in the cache.
- maxCachedDentries: 0,
- },
- syncableDentries: make(map[*dentry]struct{}),
- inoByQIDPath: make(map[uint64]uint64),
- }
-
- attr := &p9.Attr{
- Mode: p9.ModeRegular,
- }
- mask := p9.AttrMask{
- Mode: true,
- Size: true,
- }
- parent, err := fs.newDentry(ctx, p9file{}, p9.QID{}, mask, attr)
- if err != nil {
- t.Fatalf("fs.newDentry(): %v", err)
- }
-
- child, err := fs.newDentry(ctx, p9file{}, p9.QID{}, mask, attr)
- if err != nil {
- t.Fatalf("fs.newDentry(): %v", err)
- }
- parent.cacheNewChildLocked(child, "child")
-
- fs.renameMu.Lock()
- defer fs.renameMu.Unlock()
- child.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
- if got := atomic.LoadInt64(&child.refs); got != -1 {
- t.Fatalf("child.refs=%d, want: -1", got)
- }
- // Parent will also be destroyed when child reference is removed.
- if got := atomic.LoadInt64(&parent.refs); got != -1 {
- t.Fatalf("parent.refs=%d, want: -1", got)
- }
- child.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
- child.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
-}
diff --git a/pkg/sentry/fsimpl/host/BUILD b/pkg/sentry/fsimpl/host/BUILD
deleted file mode 100644
index 476545d00..000000000
--- a/pkg/sentry/fsimpl/host/BUILD
+++ /dev/null
@@ -1,80 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "host",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_template_instance(
- name = "connected_endpoint_refs",
- out = "connected_endpoint_refs.go",
- package = "host",
- prefix = "ConnectedEndpoint",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "ConnectedEndpoint",
- },
-)
-
-go_library(
- name = "host",
- srcs = [
- "connected_endpoint_refs.go",
- "control.go",
- "host.go",
- "inode_refs.go",
- "ioctl_unsafe.go",
- "save_restore.go",
- "socket.go",
- "socket_iovec.go",
- "socket_unsafe.go",
- "tty.go",
- "util.go",
- "util_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
new file mode 100644
index 000000000..c0a87f656
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -0,0 +1,140 @@
+package host
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const ConnectedEndpointenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var ConnectedEndpointobj *ConnectedEndpoint
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type ConnectedEndpointRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ConnectedEndpointRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *ConnectedEndpointRefs) RefType() string {
+ return fmt.Sprintf("%T", ConnectedEndpointobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *ConnectedEndpointRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *ConnectedEndpointRefs) LogRefs() bool {
+ return ConnectedEndpointenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *ConnectedEndpointRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *ConnectedEndpointRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/host/host_state_autogen.go b/pkg/sentry/fsimpl/host/host_state_autogen.go
new file mode 100644
index 000000000..607474165
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/host_state_autogen.go
@@ -0,0 +1,327 @@
+// automatically generated by stateify.
+
+package host
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *ConnectedEndpointRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.ConnectedEndpointRefs"
+}
+
+func (r *ConnectedEndpointRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *ConnectedEndpointRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *ConnectedEndpointRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *ConnectedEndpointRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (v *virtualOwner) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.virtualOwner"
+}
+
+func (v *virtualOwner) StateFields() []string {
+ return []string{
+ "enabled",
+ "uid",
+ "gid",
+ "mode",
+ }
+}
+
+func (v *virtualOwner) beforeSave() {}
+
+// +checklocksignore
+func (v *virtualOwner) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.enabled)
+ stateSinkObject.Save(1, &v.uid)
+ stateSinkObject.Save(2, &v.gid)
+ stateSinkObject.Save(3, &v.mode)
+}
+
+func (v *virtualOwner) afterLoad() {}
+
+// +checklocksignore
+func (v *virtualOwner) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.enabled)
+ stateSourceObject.Load(1, &v.uid)
+ stateSourceObject.Load(2, &v.gid)
+ stateSourceObject.Load(3, &v.mode)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeNoStatFS",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "CachedMappable",
+ "InodeTemporary",
+ "locks",
+ "inodeRefs",
+ "hostFD",
+ "ino",
+ "ftype",
+ "mayBlock",
+ "seekable",
+ "isTTY",
+ "savable",
+ "queue",
+ "virtualOwner",
+ "haveBuf",
+ "buf",
+ }
+}
+
+// +checklocksignore
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNoStatFS)
+ stateSinkObject.Save(1, &i.InodeNotDirectory)
+ stateSinkObject.Save(2, &i.InodeNotSymlink)
+ stateSinkObject.Save(3, &i.CachedMappable)
+ stateSinkObject.Save(4, &i.InodeTemporary)
+ stateSinkObject.Save(5, &i.locks)
+ stateSinkObject.Save(6, &i.inodeRefs)
+ stateSinkObject.Save(7, &i.hostFD)
+ stateSinkObject.Save(8, &i.ino)
+ stateSinkObject.Save(9, &i.ftype)
+ stateSinkObject.Save(10, &i.mayBlock)
+ stateSinkObject.Save(11, &i.seekable)
+ stateSinkObject.Save(12, &i.isTTY)
+ stateSinkObject.Save(13, &i.savable)
+ stateSinkObject.Save(14, &i.queue)
+ stateSinkObject.Save(15, &i.virtualOwner)
+ stateSinkObject.Save(16, &i.haveBuf)
+ stateSinkObject.Save(17, &i.buf)
+}
+
+// +checklocksignore
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNoStatFS)
+ stateSourceObject.Load(1, &i.InodeNotDirectory)
+ stateSourceObject.Load(2, &i.InodeNotSymlink)
+ stateSourceObject.Load(3, &i.CachedMappable)
+ stateSourceObject.Load(4, &i.InodeTemporary)
+ stateSourceObject.Load(5, &i.locks)
+ stateSourceObject.Load(6, &i.inodeRefs)
+ stateSourceObject.Load(7, &i.hostFD)
+ stateSourceObject.Load(8, &i.ino)
+ stateSourceObject.Load(9, &i.ftype)
+ stateSourceObject.Load(10, &i.mayBlock)
+ stateSourceObject.Load(11, &i.seekable)
+ stateSourceObject.Load(12, &i.isTTY)
+ stateSourceObject.Load(13, &i.savable)
+ stateSourceObject.Load(14, &i.queue)
+ stateSourceObject.Load(15, &i.virtualOwner)
+ stateSourceObject.Load(16, &i.haveBuf)
+ stateSourceObject.Load(17, &i.buf)
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (f *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.filesystemType"
+}
+
+func (f *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystemType) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystemType) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystemType) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (f *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.fileDescription"
+}
+
+func (f *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "offset",
+ }
+}
+
+func (f *fileDescription) beforeSave() {}
+
+// +checklocksignore
+func (f *fileDescription) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.vfsfd)
+ stateSinkObject.Save(1, &f.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &f.LockFD)
+ stateSinkObject.Save(3, &f.inode)
+ stateSinkObject.Save(4, &f.offset)
+}
+
+func (f *fileDescription) afterLoad() {}
+
+// +checklocksignore
+func (f *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.vfsfd)
+ stateSourceObject.Load(1, &f.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &f.LockFD)
+ stateSourceObject.Load(3, &f.inode)
+ stateSourceObject.Load(4, &f.offset)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (c *ConnectedEndpoint) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.ConnectedEndpoint"
+}
+
+func (c *ConnectedEndpoint) StateFields() []string {
+ return []string{
+ "ConnectedEndpointRefs",
+ "fd",
+ "addr",
+ "stype",
+ }
+}
+
+func (c *ConnectedEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (c *ConnectedEndpoint) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.ConnectedEndpointRefs)
+ stateSinkObject.Save(1, &c.fd)
+ stateSinkObject.Save(2, &c.addr)
+ stateSinkObject.Save(3, &c.stype)
+}
+
+// +checklocksignore
+func (c *ConnectedEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.ConnectedEndpointRefs)
+ stateSourceObject.Load(1, &c.fd)
+ stateSourceObject.Load(2, &c.addr)
+ stateSourceObject.Load(3, &c.stype)
+ stateSourceObject.AfterLoad(c.afterLoad)
+}
+
+func (t *TTYFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.TTYFileDescription"
+}
+
+func (t *TTYFileDescription) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "session",
+ "fgProcessGroup",
+ "termios",
+ }
+}
+
+func (t *TTYFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (t *TTYFileDescription) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.fileDescription)
+ stateSinkObject.Save(1, &t.session)
+ stateSinkObject.Save(2, &t.fgProcessGroup)
+ stateSinkObject.Save(3, &t.termios)
+}
+
+func (t *TTYFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (t *TTYFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.fileDescription)
+ stateSourceObject.Load(1, &t.session)
+ stateSourceObject.Load(2, &t.fgProcessGroup)
+ stateSourceObject.Load(3, &t.termios)
+}
+
+func init() {
+ state.Register((*ConnectedEndpointRefs)(nil))
+ state.Register((*virtualOwner)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*ConnectedEndpoint)(nil))
+ state.Register((*TTYFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go b/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go
new file mode 100644
index 000000000..b2d8c661f
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package host
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
new file mode 100644
index 000000000..112f39850
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -0,0 +1,140 @@
+package host
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/kernfs/BUILD b/pkg/sentry/fsimpl/kernfs/BUILD
deleted file mode 100644
index d53937db6..000000000
--- a/pkg/sentry/fsimpl/kernfs/BUILD
+++ /dev/null
@@ -1,151 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "kernfs",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Dentry",
- "Linker": "*Dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "kernfs",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "Dentry",
- },
-)
-
-go_template_instance(
- name = "slot_list",
- out = "slot_list.go",
- package = "kernfs",
- prefix = "slot",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*slot",
- "Linker": "*slot",
- },
-)
-
-go_template_instance(
- name = "static_directory_refs",
- out = "static_directory_refs.go",
- package = "kernfs",
- prefix = "StaticDirectory",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "StaticDirectory",
- },
-)
-
-go_template_instance(
- name = "dir_refs",
- out = "dir_refs.go",
- package = "kernfs_test",
- prefix = "dir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "dir",
- },
-)
-
-go_template_instance(
- name = "readonly_dir_refs",
- out = "readonly_dir_refs.go",
- package = "kernfs_test",
- prefix = "readonlyDir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "readonlyDir",
- },
-)
-
-go_template_instance(
- name = "synthetic_directory_refs",
- out = "synthetic_directory_refs.go",
- package = "kernfs",
- prefix = "syntheticDirectory",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "syntheticDirectory",
- },
-)
-
-go_library(
- name = "kernfs",
- srcs = [
- "dentry_list.go",
- "dynamic_bytes_file.go",
- "fd_impl_util.go",
- "filesystem.go",
- "fstree.go",
- "inode_impl_util.go",
- "kernfs.go",
- "mmap_util.go",
- "save_restore.go",
- "slot_list.go",
- "static_directory_refs.go",
- "symlink.go",
- "synthetic_directory.go",
- "synthetic_directory_refs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "kernfs_test",
- size = "small",
- srcs = [
- "dir_refs.go",
- "kernfs_test.go",
- "readonly_dir_refs.go",
- ],
- deps = [
- ":kernfs",
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/kernfs/dentry_list.go b/pkg/sentry/fsimpl/kernfs/dentry_list.go
new file mode 100644
index 000000000..e73cde1f1
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/dentry_list.go
@@ -0,0 +1,221 @@
+package kernfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *Dentry) *Dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *Dentry
+ tail *Dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Front() *Dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Back() *Dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *dentryList) PushFront(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *dentryList) PushBack(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *dentryList) InsertAfter(b, e *Dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *dentryList) InsertBefore(a, e *Dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *dentryList) Remove(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *Dentry
+ prev *Dentry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Next() *Dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Prev() *Dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetNext(elem *Dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetPrev(elem *Dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/kernfs/fstree.go b/pkg/sentry/fsimpl/kernfs/fstree.go
new file mode 100644
index 000000000..9dc52dabc
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/fstree.go
@@ -0,0 +1,55 @@
+package kernfs
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *Dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *Dentry) *Dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *Dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *Dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go b/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go
new file mode 100644
index 000000000..f8add23f8
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go
@@ -0,0 +1,965 @@
+// automatically generated by stateify.
+
+package kernfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+// +checklocksignore
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+// +checklocksignore
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (f *DynamicBytesFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.DynamicBytesFile"
+}
+
+func (f *DynamicBytesFile) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "data",
+ }
+}
+
+func (f *DynamicBytesFile) beforeSave() {}
+
+// +checklocksignore
+func (f *DynamicBytesFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeAttrs)
+ stateSinkObject.Save(1, &f.InodeNoStatFS)
+ stateSinkObject.Save(2, &f.InodeNoopRefCount)
+ stateSinkObject.Save(3, &f.InodeNotDirectory)
+ stateSinkObject.Save(4, &f.InodeNotSymlink)
+ stateSinkObject.Save(5, &f.locks)
+ stateSinkObject.Save(6, &f.data)
+}
+
+func (f *DynamicBytesFile) afterLoad() {}
+
+// +checklocksignore
+func (f *DynamicBytesFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeAttrs)
+ stateSourceObject.Load(1, &f.InodeNoStatFS)
+ stateSourceObject.Load(2, &f.InodeNoopRefCount)
+ stateSourceObject.Load(3, &f.InodeNotDirectory)
+ stateSourceObject.Load(4, &f.InodeNotSymlink)
+ stateSourceObject.Load(5, &f.locks)
+ stateSourceObject.Load(6, &f.data)
+}
+
+func (fd *DynamicBytesFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.DynamicBytesFD"
+}
+
+func (fd *DynamicBytesFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "DynamicBytesFileDescriptionImpl",
+ "LockFD",
+ "vfsfd",
+ "inode",
+ }
+}
+
+func (fd *DynamicBytesFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.DynamicBytesFileDescriptionImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.vfsfd)
+ stateSinkObject.Save(4, &fd.inode)
+}
+
+func (fd *DynamicBytesFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.DynamicBytesFileDescriptionImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.vfsfd)
+ stateSourceObject.Load(4, &fd.inode)
+}
+
+func (s *SeekEndConfig) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.SeekEndConfig"
+}
+
+func (s *SeekEndConfig) StateFields() []string {
+ return nil
+}
+
+func (g *GenericDirectoryFDOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.GenericDirectoryFDOptions"
+}
+
+func (g *GenericDirectoryFDOptions) StateFields() []string {
+ return []string{
+ "SeekEnd",
+ }
+}
+
+func (g *GenericDirectoryFDOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GenericDirectoryFDOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.SeekEnd)
+}
+
+func (g *GenericDirectoryFDOptions) afterLoad() {}
+
+// +checklocksignore
+func (g *GenericDirectoryFDOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.SeekEnd)
+}
+
+func (fd *GenericDirectoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.GenericDirectoryFD"
+}
+
+func (fd *GenericDirectoryFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "DirectoryFileDescriptionDefaultImpl",
+ "LockFD",
+ "seekEnd",
+ "vfsfd",
+ "children",
+ "off",
+ }
+}
+
+func (fd *GenericDirectoryFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *GenericDirectoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.seekEnd)
+ stateSinkObject.Save(4, &fd.vfsfd)
+ stateSinkObject.Save(5, &fd.children)
+ stateSinkObject.Save(6, &fd.off)
+}
+
+func (fd *GenericDirectoryFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *GenericDirectoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.seekEnd)
+ stateSourceObject.Load(4, &fd.vfsfd)
+ stateSourceObject.Load(5, &fd.children)
+ stateSourceObject.Load(6, &fd.off)
+}
+
+func (i *InodeNoopRefCount) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNoopRefCount"
+}
+
+func (i *InodeNoopRefCount) StateFields() []string {
+ return []string{
+ "InodeTemporary",
+ }
+}
+
+func (i *InodeNoopRefCount) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeNoopRefCount) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeTemporary)
+}
+
+func (i *InodeNoopRefCount) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeNoopRefCount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeTemporary)
+}
+
+func (i *InodeDirectoryNoNewChildren) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeDirectoryNoNewChildren"
+}
+
+func (i *InodeDirectoryNoNewChildren) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeDirectoryNoNewChildren) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeDirectoryNoNewChildren) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeDirectoryNoNewChildren) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeDirectoryNoNewChildren) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeNotDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNotDirectory"
+}
+
+func (i *InodeNotDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ }
+}
+
+func (i *InodeNotDirectory) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeNotDirectory) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAlwaysValid)
+}
+
+func (i *InodeNotDirectory) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeNotDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAlwaysValid)
+}
+
+func (i *InodeNotSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNotSymlink"
+}
+
+func (i *InodeNotSymlink) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeNotSymlink) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeNotSymlink) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeNotSymlink) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeNotSymlink) StateLoad(stateSourceObject state.Source) {
+}
+
+func (a *InodeAttrs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeAttrs"
+}
+
+func (a *InodeAttrs) StateFields() []string {
+ return []string{
+ "devMajor",
+ "devMinor",
+ "ino",
+ "mode",
+ "uid",
+ "gid",
+ "nlink",
+ "blockSize",
+ "atime",
+ "mtime",
+ "ctime",
+ }
+}
+
+func (a *InodeAttrs) beforeSave() {}
+
+// +checklocksignore
+func (a *InodeAttrs) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.devMajor)
+ stateSinkObject.Save(1, &a.devMinor)
+ stateSinkObject.Save(2, &a.ino)
+ stateSinkObject.Save(3, &a.mode)
+ stateSinkObject.Save(4, &a.uid)
+ stateSinkObject.Save(5, &a.gid)
+ stateSinkObject.Save(6, &a.nlink)
+ stateSinkObject.Save(7, &a.blockSize)
+ stateSinkObject.Save(8, &a.atime)
+ stateSinkObject.Save(9, &a.mtime)
+ stateSinkObject.Save(10, &a.ctime)
+}
+
+func (a *InodeAttrs) afterLoad() {}
+
+// +checklocksignore
+func (a *InodeAttrs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.devMajor)
+ stateSourceObject.Load(1, &a.devMinor)
+ stateSourceObject.Load(2, &a.ino)
+ stateSourceObject.Load(3, &a.mode)
+ stateSourceObject.Load(4, &a.uid)
+ stateSourceObject.Load(5, &a.gid)
+ stateSourceObject.Load(6, &a.nlink)
+ stateSourceObject.Load(7, &a.blockSize)
+ stateSourceObject.Load(8, &a.atime)
+ stateSourceObject.Load(9, &a.mtime)
+ stateSourceObject.Load(10, &a.ctime)
+}
+
+func (s *slot) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slot"
+}
+
+func (s *slot) StateFields() []string {
+ return []string{
+ "name",
+ "inode",
+ "static",
+ "slotEntry",
+ }
+}
+
+func (s *slot) beforeSave() {}
+
+// +checklocksignore
+func (s *slot) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.name)
+ stateSinkObject.Save(1, &s.inode)
+ stateSinkObject.Save(2, &s.static)
+ stateSinkObject.Save(3, &s.slotEntry)
+}
+
+func (s *slot) afterLoad() {}
+
+// +checklocksignore
+func (s *slot) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.name)
+ stateSourceObject.Load(1, &s.inode)
+ stateSourceObject.Load(2, &s.static)
+ stateSourceObject.Load(3, &s.slotEntry)
+}
+
+func (o *OrderedChildrenOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.OrderedChildrenOptions"
+}
+
+func (o *OrderedChildrenOptions) StateFields() []string {
+ return []string{
+ "Writable",
+ }
+}
+
+func (o *OrderedChildrenOptions) beforeSave() {}
+
+// +checklocksignore
+func (o *OrderedChildrenOptions) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.Writable)
+}
+
+func (o *OrderedChildrenOptions) afterLoad() {}
+
+// +checklocksignore
+func (o *OrderedChildrenOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.Writable)
+}
+
+func (o *OrderedChildren) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.OrderedChildren"
+}
+
+func (o *OrderedChildren) StateFields() []string {
+ return []string{
+ "writable",
+ "order",
+ "set",
+ }
+}
+
+func (o *OrderedChildren) beforeSave() {}
+
+// +checklocksignore
+func (o *OrderedChildren) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.writable)
+ stateSinkObject.Save(1, &o.order)
+ stateSinkObject.Save(2, &o.set)
+}
+
+func (o *OrderedChildren) afterLoad() {}
+
+// +checklocksignore
+func (o *OrderedChildren) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.writable)
+ stateSourceObject.Load(1, &o.order)
+ stateSourceObject.Load(2, &o.set)
+}
+
+func (i *InodeSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeSymlink"
+}
+
+func (i *InodeSymlink) StateFields() []string {
+ return []string{
+ "InodeNotDirectory",
+ }
+}
+
+func (i *InodeSymlink) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeSymlink) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNotDirectory)
+}
+
+func (i *InodeSymlink) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNotDirectory)
+}
+
+func (s *StaticDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticDirectory"
+}
+
+func (s *StaticDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNoStatFS",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "StaticDirectoryRefs",
+ "locks",
+ "fdOpts",
+ }
+}
+
+func (s *StaticDirectory) beforeSave() {}
+
+// +checklocksignore
+func (s *StaticDirectory) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeAlwaysValid)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(3, &s.InodeNoStatFS)
+ stateSinkObject.Save(4, &s.InodeNotSymlink)
+ stateSinkObject.Save(5, &s.InodeTemporary)
+ stateSinkObject.Save(6, &s.OrderedChildren)
+ stateSinkObject.Save(7, &s.StaticDirectoryRefs)
+ stateSinkObject.Save(8, &s.locks)
+ stateSinkObject.Save(9, &s.fdOpts)
+}
+
+func (s *StaticDirectory) afterLoad() {}
+
+// +checklocksignore
+func (s *StaticDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeAlwaysValid)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(3, &s.InodeNoStatFS)
+ stateSourceObject.Load(4, &s.InodeNotSymlink)
+ stateSourceObject.Load(5, &s.InodeTemporary)
+ stateSourceObject.Load(6, &s.OrderedChildren)
+ stateSourceObject.Load(7, &s.StaticDirectoryRefs)
+ stateSourceObject.Load(8, &s.locks)
+ stateSourceObject.Load(9, &s.fdOpts)
+}
+
+func (i *InodeAlwaysValid) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeAlwaysValid"
+}
+
+func (i *InodeAlwaysValid) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeAlwaysValid) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeAlwaysValid) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeAlwaysValid) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeAlwaysValid) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeTemporary) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeTemporary"
+}
+
+func (i *InodeTemporary) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeTemporary) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeTemporary) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeTemporary) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeTemporary) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeNoStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNoStatFS"
+}
+
+func (i *InodeNoStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeNoStatFS) beforeSave() {}
+
+// +checklocksignore
+func (i *InodeNoStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeNoStatFS) afterLoad() {}
+
+// +checklocksignore
+func (i *InodeNoStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *Filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.Filesystem"
+}
+
+func (fs *Filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "deferredDecRefs",
+ "nextInoMinusOne",
+ "cachedDentries",
+ "cachedDentriesLen",
+ "MaxCachedDentries",
+ "root",
+ }
+}
+
+func (fs *Filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.deferredDecRefs)
+ stateSinkObject.Save(2, &fs.nextInoMinusOne)
+ stateSinkObject.Save(3, &fs.cachedDentries)
+ stateSinkObject.Save(4, &fs.cachedDentriesLen)
+ stateSinkObject.Save(5, &fs.MaxCachedDentries)
+ stateSinkObject.Save(6, &fs.root)
+}
+
+func (fs *Filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.deferredDecRefs)
+ stateSourceObject.Load(2, &fs.nextInoMinusOne)
+ stateSourceObject.Load(3, &fs.cachedDentries)
+ stateSourceObject.Load(4, &fs.cachedDentriesLen)
+ stateSourceObject.Load(5, &fs.MaxCachedDentries)
+ stateSourceObject.Load(6, &fs.root)
+}
+
+func (d *Dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.Dentry"
+}
+
+func (d *Dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "flags",
+ "parent",
+ "name",
+ "cached",
+ "dentryEntry",
+ "children",
+ "inode",
+ }
+}
+
+func (d *Dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *Dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.flags)
+ stateSinkObject.Save(4, &d.parent)
+ stateSinkObject.Save(5, &d.name)
+ stateSinkObject.Save(6, &d.cached)
+ stateSinkObject.Save(7, &d.dentryEntry)
+ stateSinkObject.Save(8, &d.children)
+ stateSinkObject.Save(9, &d.inode)
+}
+
+// +checklocksignore
+func (d *Dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.flags)
+ stateSourceObject.Load(4, &d.parent)
+ stateSourceObject.Load(5, &d.name)
+ stateSourceObject.Load(6, &d.cached)
+ stateSourceObject.Load(7, &d.dentryEntry)
+ stateSourceObject.Load(8, &d.children)
+ stateSourceObject.Load(9, &d.inode)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (i *inodePlatformFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.inodePlatformFile"
+}
+
+func (i *inodePlatformFile) StateFields() []string {
+ return []string{
+ "hostFD",
+ "fdRefs",
+ "fileMapper",
+ }
+}
+
+func (i *inodePlatformFile) beforeSave() {}
+
+// +checklocksignore
+func (i *inodePlatformFile) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.hostFD)
+ stateSinkObject.Save(1, &i.fdRefs)
+ stateSinkObject.Save(2, &i.fileMapper)
+}
+
+// +checklocksignore
+func (i *inodePlatformFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.hostFD)
+ stateSourceObject.Load(1, &i.fdRefs)
+ stateSourceObject.Load(2, &i.fileMapper)
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (i *CachedMappable) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.CachedMappable"
+}
+
+func (i *CachedMappable) StateFields() []string {
+ return []string{
+ "mappings",
+ "pf",
+ }
+}
+
+func (i *CachedMappable) beforeSave() {}
+
+// +checklocksignore
+func (i *CachedMappable) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.mappings)
+ stateSinkObject.Save(1, &i.pf)
+}
+
+func (i *CachedMappable) afterLoad() {}
+
+// +checklocksignore
+func (i *CachedMappable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.mappings)
+ stateSourceObject.Load(1, &i.pf)
+}
+
+func (l *slotList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slotList"
+}
+
+func (l *slotList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *slotList) beforeSave() {}
+
+// +checklocksignore
+func (l *slotList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *slotList) afterLoad() {}
+
+// +checklocksignore
+func (l *slotList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *slotEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slotEntry"
+}
+
+func (e *slotEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *slotEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *slotEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *slotEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *slotEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *StaticDirectoryRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticDirectoryRefs"
+}
+
+func (r *StaticDirectoryRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *StaticDirectoryRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *StaticDirectoryRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *StaticDirectoryRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *StaticSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticSymlink"
+}
+
+func (s *StaticSymlink) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "InodeNoStatFS",
+ "target",
+ }
+}
+
+func (s *StaticSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *StaticSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeAttrs)
+ stateSinkObject.Save(1, &s.InodeNoopRefCount)
+ stateSinkObject.Save(2, &s.InodeSymlink)
+ stateSinkObject.Save(3, &s.InodeNoStatFS)
+ stateSinkObject.Save(4, &s.target)
+}
+
+func (s *StaticSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *StaticSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeAttrs)
+ stateSourceObject.Load(1, &s.InodeNoopRefCount)
+ stateSourceObject.Load(2, &s.InodeSymlink)
+ stateSourceObject.Load(3, &s.InodeNoStatFS)
+ stateSourceObject.Load(4, &s.target)
+}
+
+func (dir *syntheticDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.syntheticDirectory"
+}
+
+func (dir *syntheticDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNotSymlink",
+ "OrderedChildren",
+ "syntheticDirectoryRefs",
+ "locks",
+ }
+}
+
+func (dir *syntheticDirectory) beforeSave() {}
+
+// +checklocksignore
+func (dir *syntheticDirectory) StateSave(stateSinkObject state.Sink) {
+ dir.beforeSave()
+ stateSinkObject.Save(0, &dir.InodeAlwaysValid)
+ stateSinkObject.Save(1, &dir.InodeAttrs)
+ stateSinkObject.Save(2, &dir.InodeNoStatFS)
+ stateSinkObject.Save(3, &dir.InodeNotSymlink)
+ stateSinkObject.Save(4, &dir.OrderedChildren)
+ stateSinkObject.Save(5, &dir.syntheticDirectoryRefs)
+ stateSinkObject.Save(6, &dir.locks)
+}
+
+func (dir *syntheticDirectory) afterLoad() {}
+
+// +checklocksignore
+func (dir *syntheticDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &dir.InodeAlwaysValid)
+ stateSourceObject.Load(1, &dir.InodeAttrs)
+ stateSourceObject.Load(2, &dir.InodeNoStatFS)
+ stateSourceObject.Load(3, &dir.InodeNotSymlink)
+ stateSourceObject.Load(4, &dir.OrderedChildren)
+ stateSourceObject.Load(5, &dir.syntheticDirectoryRefs)
+ stateSourceObject.Load(6, &dir.locks)
+}
+
+func (r *syntheticDirectoryRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.syntheticDirectoryRefs"
+}
+
+func (r *syntheticDirectoryRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *syntheticDirectoryRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *syntheticDirectoryRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *syntheticDirectoryRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*DynamicBytesFile)(nil))
+ state.Register((*DynamicBytesFD)(nil))
+ state.Register((*SeekEndConfig)(nil))
+ state.Register((*GenericDirectoryFDOptions)(nil))
+ state.Register((*GenericDirectoryFD)(nil))
+ state.Register((*InodeNoopRefCount)(nil))
+ state.Register((*InodeDirectoryNoNewChildren)(nil))
+ state.Register((*InodeNotDirectory)(nil))
+ state.Register((*InodeNotSymlink)(nil))
+ state.Register((*InodeAttrs)(nil))
+ state.Register((*slot)(nil))
+ state.Register((*OrderedChildrenOptions)(nil))
+ state.Register((*OrderedChildren)(nil))
+ state.Register((*InodeSymlink)(nil))
+ state.Register((*StaticDirectory)(nil))
+ state.Register((*InodeAlwaysValid)(nil))
+ state.Register((*InodeTemporary)(nil))
+ state.Register((*InodeNoStatFS)(nil))
+ state.Register((*Filesystem)(nil))
+ state.Register((*Dentry)(nil))
+ state.Register((*inodePlatformFile)(nil))
+ state.Register((*CachedMappable)(nil))
+ state.Register((*slotList)(nil))
+ state.Register((*slotEntry)(nil))
+ state.Register((*StaticDirectoryRefs)(nil))
+ state.Register((*StaticSymlink)(nil))
+ state.Register((*syntheticDirectory)(nil))
+ state.Register((*syntheticDirectoryRefs)(nil))
+}
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_test.go b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
deleted file mode 100644
index 609887943..000000000
--- a/pkg/sentry/fsimpl/kernfs/kernfs_test.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernfs_test
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const defaultMode linux.FileMode = 01777
-const staticFileContent = "This is sample content for a static test file."
-
-// RootDentryFn is a generator function for creating the root dentry of a test
-// filesystem. See newTestSystem.
-type RootDentryFn func(context.Context, *auth.Credentials, *filesystem) kernfs.Inode
-
-// newTestSystem sets up a minimal environment for running a test, including an
-// instance of a test filesystem. Tests can control the contents of the
-// filesystem by providing an appropriate rootFn, which should return a
-// pre-populated root dentry.
-func newTestSystem(t *testing.T, rootFn RootDentryFn) *testutil.System {
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
- v := &vfs.VirtualFilesystem{}
- if err := v.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- v.MustRegisterFilesystemType("testfs", &fsType{rootFn: rootFn}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mns, err := v.NewMountNamespace(ctx, creds, "", "testfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("Failed to create testfs root mount: %v", err)
- }
- return testutil.NewSystem(ctx, t, v, mns)
-}
-
-type fsType struct {
- rootFn RootDentryFn
-}
-
-type filesystem struct {
- kernfs.Filesystem
-}
-
-// MountOptions implements vfs.FilesystemImpl.MountOptions.
-func (fs *filesystem) MountOptions() string {
- return ""
-}
-
-type file struct {
- kernfs.DynamicBytesFile
- content string
-}
-
-func (fs *filesystem) newFile(ctx context.Context, creds *auth.Credentials, content string) kernfs.Inode {
- f := &file{}
- f.content = content
- f.DynamicBytesFile.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), f, 0777)
- return f
-}
-
-func (f *file) Generate(ctx context.Context, buf *bytes.Buffer) error {
- fmt.Fprintf(buf, "%s", f.content)
- return nil
-}
-
-type attrs struct {
- kernfs.InodeAttrs
-}
-
-func (*attrs) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return linuxerr.EPERM
-}
-
-type readonlyDir struct {
- readonlyDirRefs
- attrs
- kernfs.InodeAlwaysValid
- kernfs.InodeDirectoryNoNewChildren
- kernfs.InodeNoStatFS
- kernfs.InodeNotSymlink
- kernfs.InodeTemporary
- kernfs.OrderedChildren
-
- locks vfs.FileLocks
-}
-
-func (fs *filesystem) newReadonlyDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
- dir := &readonlyDir{}
- dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
- dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- dir.InitRefs()
- dir.IncLinks(dir.OrderedChildren.Populate(contents))
- return dir
-}
-
-func (d *readonlyDir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
- SeekEnd: kernfs.SeekEndStaticEntries,
- })
- if err != nil {
- return nil, err
- }
- return fd.VFSFileDescription(), nil
-}
-
-func (d *readonlyDir) DecRef(ctx context.Context) {
- d.readonlyDirRefs.DecRef(func() { d.Destroy(ctx) })
-}
-
-type dir struct {
- dirRefs
- attrs
- kernfs.InodeAlwaysValid
- kernfs.InodeNotSymlink
- kernfs.InodeNoStatFS
- kernfs.InodeTemporary
- kernfs.OrderedChildren
-
- locks vfs.FileLocks
-
- fs *filesystem
-}
-
-func (fs *filesystem) newDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
- dir := &dir{}
- dir.fs = fs
- dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
- dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{Writable: true})
- dir.InitRefs()
-
- dir.IncLinks(dir.OrderedChildren.Populate(contents))
- return dir
-}
-
-func (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
- SeekEnd: kernfs.SeekEndStaticEntries,
- })
- if err != nil {
- return nil, err
- }
- return fd.VFSFileDescription(), nil
-}
-
-func (d *dir) DecRef(ctx context.Context) {
- d.dirRefs.DecRef(func() { d.Destroy(ctx) })
-}
-
-func (d *dir) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (kernfs.Inode, error) {
- creds := auth.CredentialsFromContext(ctx)
- dir := d.fs.newDir(ctx, creds, opts.Mode, nil)
- if err := d.OrderedChildren.Insert(name, dir); err != nil {
- dir.DecRef(ctx)
- return nil, err
- }
- d.TouchCMtime(ctx)
- d.IncLinks(1)
- return dir, nil
-}
-
-func (d *dir) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (kernfs.Inode, error) {
- creds := auth.CredentialsFromContext(ctx)
- f := d.fs.newFile(ctx, creds, "")
- if err := d.OrderedChildren.Insert(name, f); err != nil {
- f.DecRef(ctx)
- return nil, err
- }
- d.TouchCMtime(ctx)
- return f, nil
-}
-
-func (*dir) NewLink(context.Context, string, kernfs.Inode) (kernfs.Inode, error) {
- return nil, linuxerr.EPERM
-}
-
-func (*dir) NewSymlink(context.Context, string, string) (kernfs.Inode, error) {
- return nil, linuxerr.EPERM
-}
-
-func (*dir) NewNode(context.Context, string, vfs.MknodOptions) (kernfs.Inode, error) {
- return nil, linuxerr.EPERM
-}
-
-func (fsType) Name() string {
- return "kernfs"
-}
-
-func (fsType) Release(ctx context.Context) {}
-
-func (fst fsType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opt vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- fs := &filesystem{}
- fs.VFSFilesystem().Init(vfsObj, &fst, fs)
- root := fst.rootFn(ctx, creds, fs)
- var d kernfs.Dentry
- d.Init(&fs.Filesystem, root)
- return fs.VFSFilesystem(), d.VFSDentry(), nil
-}
-
-// -------------------- Remainder of the file are test cases --------------------
-
-func TestBasic(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
- sys.GetDentryOrDie(sys.PathOpAtRoot("file1")).DecRef(sys.Ctx)
-}
-
-func TestMkdirGetDentry(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir1": fs.newDir(ctx, creds, 0755, nil),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("dir1/a new directory")
- if err := sys.VFS.MkdirAt(sys.Ctx, sys.Creds, pop, &vfs.MkdirOptions{Mode: 0755}); err != nil {
- t.Fatalf("MkdirAt for PathOperation %+v failed: %v", pop, err)
- }
- sys.GetDentryOrDie(pop).DecRef(sys.Ctx)
-}
-
-func TestReadStaticFile(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("file1")
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(sys.Ctx)
-
- content, err := sys.ReadToEnd(fd)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- if diff := cmp.Diff(staticFileContent, content); diff != "" {
- t.Fatalf("Read returned unexpected data:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-func TestCreateNewFileInStaticDir(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir1": fs.newDir(ctx, creds, 0755, nil),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("dir1/newfile")
- opts := &vfs.OpenOptions{Flags: linux.O_CREAT | linux.O_EXCL, Mode: defaultMode}
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, opts)
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v, opts:%+v) failed: %v", pop, opts, err)
- }
-
- // Close the file. The file should persist.
- fd.DecRef(sys.Ctx)
-
- fd, err = sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v) = %+v failed: %v", pop, fd, err)
- }
- fd.DecRef(sys.Ctx)
-}
-
-func TestDirFDReadWrite(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, nil)
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("/")
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(sys.Ctx)
-
- // Read/Write should fail for directory FDs.
- if _, err := fd.Read(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.ReadOptions{}); !linuxerr.Equals(linuxerr.EISDIR, err) {
- t.Fatalf("Read for directory FD failed with unexpected error: %v", err)
- }
- if _, err := fd.Write(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.WriteOptions{}); !linuxerr.Equals(linuxerr.EBADF, err) {
- t.Fatalf("Write for directory FD failed with unexpected error: %v", err)
- }
-}
-
-func TestDirFDIterDirents(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- // Fill root with nodes backed by various inode implementations.
- "dir1": fs.newReadonlyDir(ctx, creds, 0755, nil),
- "dir2": fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir3": fs.newDir(ctx, creds, 0755, nil),
- }),
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("/")
- sys.AssertAllDirentTypes(sys.ListDirents(pop), map[string]testutil.DirentType{
- "dir1": linux.DT_DIR,
- "dir2": linux.DT_DIR,
- "file1": linux.DT_REG,
- })
-}
diff --git a/pkg/sentry/fsimpl/kernfs/slot_list.go b/pkg/sentry/fsimpl/kernfs/slot_list.go
new file mode 100644
index 000000000..181fe7c8f
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/slot_list.go
@@ -0,0 +1,221 @@
+package kernfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type slotElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (slotElementMapper) linkerFor(elem *slot) *slot { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type slotList struct {
+ head *slot
+ tail *slot
+}
+
+// Reset resets list l to the empty state.
+func (l *slotList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *slotList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *slotList) Front() *slot {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *slotList) Back() *slot {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *slotList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (slotElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *slotList) PushFront(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ slotElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *slotList) PushBack(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ slotElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *slotList) PushBackList(m *slotList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ slotElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ slotElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *slotList) InsertAfter(b, e *slot) {
+ bLinker := slotElementMapper{}.linkerFor(b)
+ eLinker := slotElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ slotElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *slotList) InsertBefore(a, e *slot) {
+ aLinker := slotElementMapper{}.linkerFor(a)
+ eLinker := slotElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ slotElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *slotList) Remove(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ slotElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ slotElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type slotEntry struct {
+ next *slot
+ prev *slot
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *slotEntry) Next() *slot {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *slotEntry) Prev() *slot {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *slotEntry) SetNext(elem *slot) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *slotEntry) SetPrev(elem *slot) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
new file mode 100644
index 000000000..69534a2d2
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -0,0 +1,140 @@
+package kernfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const StaticDirectoryenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var StaticDirectoryobj *StaticDirectory
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type StaticDirectoryRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *StaticDirectoryRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *StaticDirectoryRefs) RefType() string {
+ return fmt.Sprintf("%T", StaticDirectoryobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *StaticDirectoryRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *StaticDirectoryRefs) LogRefs() bool {
+ return StaticDirectoryenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *StaticDirectoryRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *StaticDirectoryRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
new file mode 100644
index 000000000..3c5fdf15e
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
@@ -0,0 +1,140 @@
+package kernfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const syntheticDirectoryenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var syntheticDirectoryobj *syntheticDirectory
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type syntheticDirectoryRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *syntheticDirectoryRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *syntheticDirectoryRefs) RefType() string {
+ return fmt.Sprintf("%T", syntheticDirectoryobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *syntheticDirectoryRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *syntheticDirectoryRefs) LogRefs() bool {
+ return syntheticDirectoryenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *syntheticDirectoryRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *syntheticDirectoryRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/overlay/BUILD b/pkg/sentry/fsimpl/overlay/BUILD
deleted file mode 100644
index ed730e215..000000000
--- a/pkg/sentry/fsimpl/overlay/BUILD
+++ /dev/null
@@ -1,49 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "overlay",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "overlay",
- srcs = [
- "copy_up.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "overlay.go",
- "regular_file.go",
- "save_restore.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/overlay/fstree.go b/pkg/sentry/fsimpl/overlay/fstree.go
new file mode 100644
index 000000000..c3eb062ed
--- /dev/null
+++ b/pkg/sentry/fsimpl/overlay/fstree.go
@@ -0,0 +1,55 @@
+package overlay
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go b/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go
new file mode 100644
index 000000000..923a2e71a
--- /dev/null
+++ b/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go
@@ -0,0 +1,321 @@
+// automatically generated by stateify.
+
+package overlay
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "off",
+ "dirents",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.off)
+ stateSinkObject.Save(4, &fd.dirents)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.off)
+ stateSourceObject.Load(4, &fd.dirents)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *FilesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.FilesystemOptions"
+}
+
+func (f *FilesystemOptions) StateFields() []string {
+ return []string{
+ "UpperRoot",
+ "LowerRoots",
+ }
+}
+
+func (f *FilesystemOptions) beforeSave() {}
+
+// +checklocksignore
+func (f *FilesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.UpperRoot)
+ stateSinkObject.Save(1, &f.LowerRoots)
+}
+
+func (f *FilesystemOptions) afterLoad() {}
+
+// +checklocksignore
+func (f *FilesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.UpperRoot)
+ stateSourceObject.Load(1, &f.LowerRoots)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "opts",
+ "creds",
+ "privateDevMinors",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.opts)
+ stateSinkObject.Save(2, &fs.creds)
+ stateSinkObject.Save(3, &fs.privateDevMinors)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.opts)
+ stateSourceObject.Load(2, &fs.creds)
+ stateSourceObject.Load(3, &fs.privateDevMinors)
+}
+
+func (l *layerDevNumber) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.layerDevNumber"
+}
+
+func (l *layerDevNumber) StateFields() []string {
+ return []string{
+ "major",
+ "minor",
+ }
+}
+
+func (l *layerDevNumber) beforeSave() {}
+
+// +checklocksignore
+func (l *layerDevNumber) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.major)
+ stateSinkObject.Save(1, &l.minor)
+}
+
+func (l *layerDevNumber) afterLoad() {}
+
+// +checklocksignore
+func (l *layerDevNumber) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.major)
+ stateSourceObject.Load(1, &l.minor)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "mode",
+ "uid",
+ "gid",
+ "copiedUp",
+ "parent",
+ "name",
+ "children",
+ "dirents",
+ "upperVD",
+ "lowerVDs",
+ "inlineLowerVDs",
+ "devMajor",
+ "devMinor",
+ "ino",
+ "lowerMappings",
+ "wrappedMappable",
+ "isMappable",
+ "locks",
+ "watches",
+ }
+}
+
+func (d *dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.mode)
+ stateSinkObject.Save(4, &d.uid)
+ stateSinkObject.Save(5, &d.gid)
+ stateSinkObject.Save(6, &d.copiedUp)
+ stateSinkObject.Save(7, &d.parent)
+ stateSinkObject.Save(8, &d.name)
+ stateSinkObject.Save(9, &d.children)
+ stateSinkObject.Save(10, &d.dirents)
+ stateSinkObject.Save(11, &d.upperVD)
+ stateSinkObject.Save(12, &d.lowerVDs)
+ stateSinkObject.Save(13, &d.inlineLowerVDs)
+ stateSinkObject.Save(14, &d.devMajor)
+ stateSinkObject.Save(15, &d.devMinor)
+ stateSinkObject.Save(16, &d.ino)
+ stateSinkObject.Save(17, &d.lowerMappings)
+ stateSinkObject.Save(18, &d.wrappedMappable)
+ stateSinkObject.Save(19, &d.isMappable)
+ stateSinkObject.Save(20, &d.locks)
+ stateSinkObject.Save(21, &d.watches)
+}
+
+// +checklocksignore
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.mode)
+ stateSourceObject.Load(4, &d.uid)
+ stateSourceObject.Load(5, &d.gid)
+ stateSourceObject.Load(6, &d.copiedUp)
+ stateSourceObject.Load(7, &d.parent)
+ stateSourceObject.Load(8, &d.name)
+ stateSourceObject.Load(9, &d.children)
+ stateSourceObject.Load(10, &d.dirents)
+ stateSourceObject.Load(11, &d.upperVD)
+ stateSourceObject.Load(12, &d.lowerVDs)
+ stateSourceObject.Load(13, &d.inlineLowerVDs)
+ stateSourceObject.Load(14, &d.devMajor)
+ stateSourceObject.Load(15, &d.devMinor)
+ stateSourceObject.Load(16, &d.ino)
+ stateSourceObject.Load(17, &d.lowerMappings)
+ stateSourceObject.Load(18, &d.wrappedMappable)
+ stateSourceObject.Load(19, &d.isMappable)
+ stateSourceObject.Load(20, &d.locks)
+ stateSourceObject.Load(21, &d.watches)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "copiedUp",
+ "cachedFD",
+ "cachedFlags",
+ "lowerWaiters",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.copiedUp)
+ stateSinkObject.Save(2, &fd.cachedFD)
+ stateSinkObject.Save(3, &fd.cachedFlags)
+ stateSinkObject.Save(4, &fd.lowerWaiters)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.copiedUp)
+ stateSourceObject.Load(2, &fd.cachedFD)
+ stateSourceObject.Load(3, &fd.cachedFlags)
+ stateSourceObject.Load(4, &fd.lowerWaiters)
+}
+
+func init() {
+ state.Register((*directoryFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*FilesystemOptions)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*layerDevNumber)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*regularFileFD)(nil))
+}
diff --git a/pkg/sentry/fsimpl/pipefs/BUILD b/pkg/sentry/fsimpl/pipefs/BUILD
deleted file mode 100644
index a50510031..000000000
--- a/pkg/sentry/fsimpl/pipefs/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "pipefs",
- srcs = ["pipefs.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go b/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go
new file mode 100644
index 000000000..5f9e117c3
--- /dev/null
+++ b/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go
@@ -0,0 +1,111 @@
+// automatically generated by stateify.
+
+package pipefs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.filesystemType"
+}
+
+func (f *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystemType) beforeSave() {}
+
+// +checklocksignore
+func (f *filesystemType) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystemType) afterLoad() {}
+
+// +checklocksignore
+func (f *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "InodeNoopRefCount",
+ "locks",
+ "pipe",
+ "ino",
+ "uid",
+ "gid",
+ "ctime",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+// +checklocksignore
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNotDirectory)
+ stateSinkObject.Save(1, &i.InodeNotSymlink)
+ stateSinkObject.Save(2, &i.InodeNoopRefCount)
+ stateSinkObject.Save(3, &i.locks)
+ stateSinkObject.Save(4, &i.pipe)
+ stateSinkObject.Save(5, &i.ino)
+ stateSinkObject.Save(6, &i.uid)
+ stateSinkObject.Save(7, &i.gid)
+ stateSinkObject.Save(8, &i.ctime)
+}
+
+func (i *inode) afterLoad() {}
+
+// +checklocksignore
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNotDirectory)
+ stateSourceObject.Load(1, &i.InodeNotSymlink)
+ stateSourceObject.Load(2, &i.InodeNoopRefCount)
+ stateSourceObject.Load(3, &i.locks)
+ stateSourceObject.Load(4, &i.pipe)
+ stateSourceObject.Load(5, &i.ino)
+ stateSourceObject.Load(6, &i.uid)
+ stateSourceObject.Load(7, &i.gid)
+ stateSourceObject.Load(8, &i.ctime)
+}
+
+func init() {
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+}
diff --git a/pkg/sentry/fsimpl/proc/BUILD b/pkg/sentry/fsimpl/proc/BUILD
deleted file mode 100644
index 1d3d2d95f..000000000
--- a/pkg/sentry/fsimpl/proc/BUILD
+++ /dev/null
@@ -1,134 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "fd_dir_inode_refs",
- out = "fd_dir_inode_refs.go",
- package = "proc",
- prefix = "fdDirInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "fdDirInode",
- },
-)
-
-go_template_instance(
- name = "fd_info_dir_inode_refs",
- out = "fd_info_dir_inode_refs.go",
- package = "proc",
- prefix = "fdInfoDirInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "fdInfoDirInode",
- },
-)
-
-go_template_instance(
- name = "subtasks_inode_refs",
- out = "subtasks_inode_refs.go",
- package = "proc",
- prefix = "subtasksInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "subtasksInode",
- },
-)
-
-go_template_instance(
- name = "task_inode_refs",
- out = "task_inode_refs.go",
- package = "proc",
- prefix = "taskInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "taskInode",
- },
-)
-
-go_template_instance(
- name = "tasks_inode_refs",
- out = "tasks_inode_refs.go",
- package = "proc",
- prefix = "tasksInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "tasksInode",
- },
-)
-
-go_library(
- name = "proc",
- srcs = [
- "fd_dir_inode_refs.go",
- "fd_info_dir_inode_refs.go",
- "filesystem.go",
- "subtasks.go",
- "subtasks_inode_refs.go",
- "task.go",
- "task_fds.go",
- "task_files.go",
- "task_inode_refs.go",
- "task_net.go",
- "tasks.go",
- "tasks_files.go",
- "tasks_inode_refs.go",
- "tasks_sys.go",
- "yama.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/mm",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/tcpip/header",
- "//pkg/tcpip/network/ipv4",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "proc_test",
- size = "small",
- srcs = [
- "tasks_sys_test.go",
- "tasks_test.go",
- ],
- library = ":proc",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
new file mode 100644
index 000000000..61138f055
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -0,0 +1,140 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const fdDirInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var fdDirInodeobj *fdDirInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type fdDirInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *fdDirInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *fdDirInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", fdDirInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *fdDirInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *fdDirInodeRefs) LogRefs() bool {
+ return fdDirInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *fdDirInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *fdDirInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if fdDirInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *fdDirInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if fdDirInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *fdDirInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if fdDirInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *fdDirInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
new file mode 100644
index 000000000..53fb0910a
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -0,0 +1,140 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const fdInfoDirInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var fdInfoDirInodeobj *fdInfoDirInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type fdInfoDirInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *fdInfoDirInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *fdInfoDirInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", fdInfoDirInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *fdInfoDirInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *fdInfoDirInodeRefs) LogRefs() bool {
+ return fdInfoDirInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *fdInfoDirInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *fdInfoDirInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/proc_state_autogen.go b/pkg/sentry/fsimpl/proc/proc_state_autogen.go
new file mode 100644
index 000000000..f87739c23
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/proc_state_autogen.go
@@ -0,0 +1,2366 @@
+// automatically generated by stateify.
+
+package proc
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *fdDirInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDirInodeRefs"
+}
+
+func (r *fdDirInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *fdDirInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *fdDirInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *fdDirInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (r *fdInfoDirInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoDirInodeRefs"
+}
+
+func (r *fdInfoDirInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *fdInfoDirInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *fdInfoDirInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *fdInfoDirInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (ft *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.FilesystemType"
+}
+
+func (ft *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (ft *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (ft *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ ft.beforeSave()
+}
+
+func (ft *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (ft *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (s *staticFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.staticFile"
+}
+
+func (s *staticFile) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "StaticData",
+ }
+}
+
+func (s *staticFile) beforeSave() {}
+
+// +checklocksignore
+func (s *staticFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.StaticData)
+}
+
+func (s *staticFile) afterLoad() {}
+
+// +checklocksignore
+func (s *staticFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.StaticData)
+}
+
+func (i *InternalData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.InternalData"
+}
+
+func (i *InternalData) StateFields() []string {
+ return []string{
+ "Cgroups",
+ }
+}
+
+func (i *InternalData) beforeSave() {}
+
+// +checklocksignore
+func (i *InternalData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Cgroups)
+}
+
+func (i *InternalData) afterLoad() {}
+
+// +checklocksignore
+func (i *InternalData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Cgroups)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+// +checklocksignore
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+// +checklocksignore
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *subtasksInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksInode"
+}
+
+func (i *subtasksInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "subtasksInodeRefs",
+ "locks",
+ "fs",
+ "task",
+ "pidns",
+ "cgroupControllers",
+ }
+}
+
+func (i *subtasksInode) beforeSave() {}
+
+// +checklocksignore
+func (i *subtasksInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.subtasksInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.fs)
+ stateSinkObject.Save(10, &i.task)
+ stateSinkObject.Save(11, &i.pidns)
+ stateSinkObject.Save(12, &i.cgroupControllers)
+}
+
+func (i *subtasksInode) afterLoad() {}
+
+// +checklocksignore
+func (i *subtasksInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.subtasksInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.fs)
+ stateSourceObject.Load(10, &i.task)
+ stateSourceObject.Load(11, &i.pidns)
+ stateSourceObject.Load(12, &i.cgroupControllers)
+}
+
+func (fd *subtasksFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksFD"
+}
+
+func (fd *subtasksFD) StateFields() []string {
+ return []string{
+ "GenericDirectoryFD",
+ "task",
+ }
+}
+
+func (fd *subtasksFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *subtasksFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.GenericDirectoryFD)
+ stateSinkObject.Save(1, &fd.task)
+}
+
+func (fd *subtasksFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *subtasksFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.GenericDirectoryFD)
+ stateSourceObject.Load(1, &fd.task)
+}
+
+func (r *subtasksInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksInodeRefs"
+}
+
+func (r *subtasksInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *subtasksInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *subtasksInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *subtasksInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *taskInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskInode"
+}
+
+func (i *taskInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "taskInodeRefs",
+ "locks",
+ "task",
+ }
+}
+
+func (i *taskInode) beforeSave() {}
+
+// +checklocksignore
+func (i *taskInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAttrs)
+ stateSinkObject.Save(2, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+ stateSinkObject.Save(4, &i.InodeTemporary)
+ stateSinkObject.Save(5, &i.OrderedChildren)
+ stateSinkObject.Save(6, &i.taskInodeRefs)
+ stateSinkObject.Save(7, &i.locks)
+ stateSinkObject.Save(8, &i.task)
+}
+
+func (i *taskInode) afterLoad() {}
+
+// +checklocksignore
+func (i *taskInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAttrs)
+ stateSourceObject.Load(2, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+ stateSourceObject.Load(4, &i.InodeTemporary)
+ stateSourceObject.Load(5, &i.OrderedChildren)
+ stateSourceObject.Load(6, &i.taskInodeRefs)
+ stateSourceObject.Load(7, &i.locks)
+ stateSourceObject.Load(8, &i.task)
+}
+
+func (i *taskOwnedInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskOwnedInode"
+}
+
+func (i *taskOwnedInode) StateFields() []string {
+ return []string{
+ "Inode",
+ "owner",
+ }
+}
+
+func (i *taskOwnedInode) beforeSave() {}
+
+// +checklocksignore
+func (i *taskOwnedInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Inode)
+ stateSinkObject.Save(1, &i.owner)
+}
+
+func (i *taskOwnedInode) afterLoad() {}
+
+// +checklocksignore
+func (i *taskOwnedInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Inode)
+ stateSourceObject.Load(1, &i.owner)
+}
+
+func (i *fdDir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDir"
+}
+
+func (i *fdDir) StateFields() []string {
+ return []string{
+ "locks",
+ "fs",
+ "task",
+ "produceSymlink",
+ }
+}
+
+func (i *fdDir) beforeSave() {}
+
+// +checklocksignore
+func (i *fdDir) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.locks)
+ stateSinkObject.Save(1, &i.fs)
+ stateSinkObject.Save(2, &i.task)
+ stateSinkObject.Save(3, &i.produceSymlink)
+}
+
+func (i *fdDir) afterLoad() {}
+
+// +checklocksignore
+func (i *fdDir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.locks)
+ stateSourceObject.Load(1, &i.fs)
+ stateSourceObject.Load(2, &i.task)
+ stateSourceObject.Load(3, &i.produceSymlink)
+}
+
+func (i *fdDirInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDirInode"
+}
+
+func (i *fdDirInode) StateFields() []string {
+ return []string{
+ "fdDir",
+ "fdDirInodeRefs",
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ }
+}
+
+func (i *fdDirInode) beforeSave() {}
+
+// +checklocksignore
+func (i *fdDirInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fdDir)
+ stateSinkObject.Save(1, &i.fdDirInodeRefs)
+ stateSinkObject.Save(2, &i.implStatFS)
+ stateSinkObject.Save(3, &i.InodeAlwaysValid)
+ stateSinkObject.Save(4, &i.InodeAttrs)
+ stateSinkObject.Save(5, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(6, &i.InodeNotSymlink)
+ stateSinkObject.Save(7, &i.InodeTemporary)
+ stateSinkObject.Save(8, &i.OrderedChildren)
+}
+
+func (i *fdDirInode) afterLoad() {}
+
+// +checklocksignore
+func (i *fdDirInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fdDir)
+ stateSourceObject.Load(1, &i.fdDirInodeRefs)
+ stateSourceObject.Load(2, &i.implStatFS)
+ stateSourceObject.Load(3, &i.InodeAlwaysValid)
+ stateSourceObject.Load(4, &i.InodeAttrs)
+ stateSourceObject.Load(5, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(6, &i.InodeNotSymlink)
+ stateSourceObject.Load(7, &i.InodeTemporary)
+ stateSourceObject.Load(8, &i.OrderedChildren)
+}
+
+func (s *fdSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdSymlink"
+}
+
+func (s *fdSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "fs",
+ "task",
+ "fd",
+ }
+}
+
+func (s *fdSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *fdSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.fs)
+ stateSinkObject.Save(5, &s.task)
+ stateSinkObject.Save(6, &s.fd)
+}
+
+func (s *fdSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *fdSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.fs)
+ stateSourceObject.Load(5, &s.task)
+ stateSourceObject.Load(6, &s.fd)
+}
+
+func (i *fdInfoDirInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoDirInode"
+}
+
+func (i *fdInfoDirInode) StateFields() []string {
+ return []string{
+ "fdDir",
+ "fdInfoDirInodeRefs",
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ }
+}
+
+func (i *fdInfoDirInode) beforeSave() {}
+
+// +checklocksignore
+func (i *fdInfoDirInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fdDir)
+ stateSinkObject.Save(1, &i.fdInfoDirInodeRefs)
+ stateSinkObject.Save(2, &i.implStatFS)
+ stateSinkObject.Save(3, &i.InodeAlwaysValid)
+ stateSinkObject.Save(4, &i.InodeAttrs)
+ stateSinkObject.Save(5, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(6, &i.InodeNotSymlink)
+ stateSinkObject.Save(7, &i.InodeTemporary)
+ stateSinkObject.Save(8, &i.OrderedChildren)
+}
+
+func (i *fdInfoDirInode) afterLoad() {}
+
+// +checklocksignore
+func (i *fdInfoDirInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fdDir)
+ stateSourceObject.Load(1, &i.fdInfoDirInodeRefs)
+ stateSourceObject.Load(2, &i.implStatFS)
+ stateSourceObject.Load(3, &i.InodeAlwaysValid)
+ stateSourceObject.Load(4, &i.InodeAttrs)
+ stateSourceObject.Load(5, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(6, &i.InodeNotSymlink)
+ stateSourceObject.Load(7, &i.InodeTemporary)
+ stateSourceObject.Load(8, &i.OrderedChildren)
+}
+
+func (d *fdInfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoData"
+}
+
+func (d *fdInfoData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "fs",
+ "task",
+ "fd",
+ }
+}
+
+func (d *fdInfoData) beforeSave() {}
+
+// +checklocksignore
+func (d *fdInfoData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.fs)
+ stateSinkObject.Save(2, &d.task)
+ stateSinkObject.Save(3, &d.fd)
+}
+
+func (d *fdInfoData) afterLoad() {}
+
+// +checklocksignore
+func (d *fdInfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.fs)
+ stateSourceObject.Load(2, &d.task)
+ stateSourceObject.Load(3, &d.fd)
+}
+
+func (d *auxvData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.auxvData"
+}
+
+func (d *auxvData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *auxvData) beforeSave() {}
+
+// +checklocksignore
+func (d *auxvData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *auxvData) afterLoad() {}
+
+// +checklocksignore
+func (d *auxvData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *cmdlineData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cmdlineData"
+}
+
+func (d *cmdlineData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "arg",
+ }
+}
+
+func (d *cmdlineData) beforeSave() {}
+
+// +checklocksignore
+func (d *cmdlineData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+ stateSinkObject.Save(2, &d.arg)
+}
+
+func (d *cmdlineData) afterLoad() {}
+
+// +checklocksignore
+func (d *cmdlineData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+ stateSourceObject.Load(2, &d.arg)
+}
+
+func (i *commInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.commInode"
+}
+
+func (i *commInode) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (i *commInode) beforeSave() {}
+
+// +checklocksignore
+func (i *commInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.task)
+}
+
+func (i *commInode) afterLoad() {}
+
+// +checklocksignore
+func (i *commInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.task)
+}
+
+func (d *commData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.commData"
+}
+
+func (d *commData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *commData) beforeSave() {}
+
+// +checklocksignore
+func (d *commData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *commData) afterLoad() {}
+
+// +checklocksignore
+func (d *commData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *idMapData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.idMapData"
+}
+
+func (d *idMapData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "gids",
+ }
+}
+
+func (d *idMapData) beforeSave() {}
+
+// +checklocksignore
+func (d *idMapData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+ stateSinkObject.Save(2, &d.gids)
+}
+
+func (d *idMapData) afterLoad() {}
+
+// +checklocksignore
+func (d *idMapData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+ stateSourceObject.Load(2, &d.gids)
+}
+
+func (f *memInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.memInode"
+}
+
+func (f *memInode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "task",
+ "locks",
+ }
+}
+
+func (f *memInode) beforeSave() {}
+
+// +checklocksignore
+func (f *memInode) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeAttrs)
+ stateSinkObject.Save(1, &f.InodeNoStatFS)
+ stateSinkObject.Save(2, &f.InodeNoopRefCount)
+ stateSinkObject.Save(3, &f.InodeNotDirectory)
+ stateSinkObject.Save(4, &f.InodeNotSymlink)
+ stateSinkObject.Save(5, &f.task)
+ stateSinkObject.Save(6, &f.locks)
+}
+
+func (f *memInode) afterLoad() {}
+
+// +checklocksignore
+func (f *memInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeAttrs)
+ stateSourceObject.Load(1, &f.InodeNoStatFS)
+ stateSourceObject.Load(2, &f.InodeNoopRefCount)
+ stateSourceObject.Load(3, &f.InodeNotDirectory)
+ stateSourceObject.Load(4, &f.InodeNotSymlink)
+ stateSourceObject.Load(5, &f.task)
+ stateSourceObject.Load(6, &f.locks)
+}
+
+func (fd *memFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.memFD"
+}
+
+func (fd *memFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "offset",
+ }
+}
+
+func (fd *memFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *memFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.inode)
+ stateSinkObject.Save(4, &fd.offset)
+}
+
+func (fd *memFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *memFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.inode)
+ stateSourceObject.Load(4, &fd.offset)
+}
+
+func (d *mapsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mapsData"
+}
+
+func (d *mapsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *mapsData) beforeSave() {}
+
+// +checklocksignore
+func (d *mapsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *mapsData) afterLoad() {}
+
+// +checklocksignore
+func (d *mapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *smapsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.smapsData"
+}
+
+func (d *smapsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *smapsData) beforeSave() {}
+
+// +checklocksignore
+func (d *smapsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *smapsData) afterLoad() {}
+
+// +checklocksignore
+func (d *smapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (s *taskStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskStatData"
+}
+
+func (s *taskStatData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "tgstats",
+ "pidns",
+ }
+}
+
+func (s *taskStatData) beforeSave() {}
+
+// +checklocksignore
+func (s *taskStatData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+ stateSinkObject.Save(2, &s.tgstats)
+ stateSinkObject.Save(3, &s.pidns)
+}
+
+func (s *taskStatData) afterLoad() {}
+
+// +checklocksignore
+func (s *taskStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+ stateSourceObject.Load(2, &s.tgstats)
+ stateSourceObject.Load(3, &s.pidns)
+}
+
+func (s *statmData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statmData"
+}
+
+func (s *statmData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (s *statmData) beforeSave() {}
+
+// +checklocksignore
+func (s *statmData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+}
+
+func (s *statmData) afterLoad() {}
+
+// +checklocksignore
+func (s *statmData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+}
+
+func (s *statusData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statusData"
+}
+
+func (s *statusData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "pidns",
+ }
+}
+
+func (s *statusData) beforeSave() {}
+
+// +checklocksignore
+func (s *statusData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+ stateSinkObject.Save(2, &s.pidns)
+}
+
+func (s *statusData) afterLoad() {}
+
+// +checklocksignore
+func (s *statusData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+ stateSourceObject.Load(2, &s.pidns)
+}
+
+func (i *ioData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ioData"
+}
+
+func (i *ioData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "ioUsage",
+ }
+}
+
+func (i *ioData) beforeSave() {}
+
+// +checklocksignore
+func (i *ioData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.ioUsage)
+}
+
+func (i *ioData) afterLoad() {}
+
+// +checklocksignore
+func (i *ioData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.ioUsage)
+}
+
+func (o *oomScoreAdj) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.oomScoreAdj"
+}
+
+func (o *oomScoreAdj) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (o *oomScoreAdj) beforeSave() {}
+
+// +checklocksignore
+func (o *oomScoreAdj) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.DynamicBytesFile)
+ stateSinkObject.Save(1, &o.task)
+}
+
+func (o *oomScoreAdj) afterLoad() {}
+
+// +checklocksignore
+func (o *oomScoreAdj) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.DynamicBytesFile)
+ stateSourceObject.Load(1, &o.task)
+}
+
+func (s *exeSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.exeSymlink"
+}
+
+func (s *exeSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "fs",
+ "task",
+ }
+}
+
+func (s *exeSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *exeSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.fs)
+ stateSinkObject.Save(5, &s.task)
+}
+
+func (s *exeSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *exeSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.fs)
+ stateSourceObject.Load(5, &s.task)
+}
+
+func (s *cwdSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cwdSymlink"
+}
+
+func (s *cwdSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "fs",
+ "task",
+ }
+}
+
+func (s *cwdSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *cwdSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.fs)
+ stateSinkObject.Save(5, &s.task)
+}
+
+func (s *cwdSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *cwdSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.fs)
+ stateSourceObject.Load(5, &s.task)
+}
+
+func (i *mountInfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mountInfoData"
+}
+
+func (i *mountInfoData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "fs",
+ "task",
+ }
+}
+
+func (i *mountInfoData) beforeSave() {}
+
+// +checklocksignore
+func (i *mountInfoData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.fs)
+ stateSinkObject.Save(2, &i.task)
+}
+
+func (i *mountInfoData) afterLoad() {}
+
+// +checklocksignore
+func (i *mountInfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.fs)
+ stateSourceObject.Load(2, &i.task)
+}
+
+func (i *mountsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mountsData"
+}
+
+func (i *mountsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "fs",
+ "task",
+ }
+}
+
+func (i *mountsData) beforeSave() {}
+
+// +checklocksignore
+func (i *mountsData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.fs)
+ stateSinkObject.Save(2, &i.task)
+}
+
+func (i *mountsData) afterLoad() {}
+
+// +checklocksignore
+func (i *mountsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.fs)
+ stateSourceObject.Load(2, &i.task)
+}
+
+func (s *namespaceSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceSymlink"
+}
+
+func (s *namespaceSymlink) StateFields() []string {
+ return []string{
+ "StaticSymlink",
+ "task",
+ }
+}
+
+func (s *namespaceSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *namespaceSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.StaticSymlink)
+ stateSinkObject.Save(1, &s.task)
+}
+
+func (s *namespaceSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *namespaceSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.StaticSymlink)
+ stateSourceObject.Load(1, &s.task)
+}
+
+func (i *namespaceInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceInode"
+}
+
+func (i *namespaceInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ }
+}
+
+func (i *namespaceInode) beforeSave() {}
+
+// +checklocksignore
+func (i *namespaceInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAttrs)
+ stateSinkObject.Save(2, &i.InodeNoopRefCount)
+ stateSinkObject.Save(3, &i.InodeNotDirectory)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.locks)
+}
+
+func (i *namespaceInode) afterLoad() {}
+
+// +checklocksignore
+func (i *namespaceInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAttrs)
+ stateSourceObject.Load(2, &i.InodeNoopRefCount)
+ stateSourceObject.Load(3, &i.InodeNotDirectory)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.locks)
+}
+
+func (fd *namespaceFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceFD"
+}
+
+func (fd *namespaceFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "vfsfd",
+ "inode",
+ }
+}
+
+func (fd *namespaceFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *namespaceFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.LockFD)
+ stateSinkObject.Save(2, &fd.vfsfd)
+ stateSinkObject.Save(3, &fd.inode)
+}
+
+func (fd *namespaceFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *namespaceFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.LockFD)
+ stateSourceObject.Load(2, &fd.vfsfd)
+ stateSourceObject.Load(3, &fd.inode)
+}
+
+func (d *taskCgroupData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskCgroupData"
+}
+
+func (d *taskCgroupData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ "task",
+ }
+}
+
+func (d *taskCgroupData) beforeSave() {}
+
+// +checklocksignore
+func (d *taskCgroupData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dynamicBytesFileSetAttr)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *taskCgroupData) afterLoad() {}
+
+// +checklocksignore
+func (d *taskCgroupData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dynamicBytesFileSetAttr)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (r *taskInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskInodeRefs"
+}
+
+func (r *taskInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *taskInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *taskInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *taskInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (n *ifinet6) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ifinet6"
+}
+
+func (n *ifinet6) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (n *ifinet6) beforeSave() {}
+
+// +checklocksignore
+func (n *ifinet6) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.stack)
+}
+
+func (n *ifinet6) afterLoad() {}
+
+// +checklocksignore
+func (n *ifinet6) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.stack)
+}
+
+func (n *netDevData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netDevData"
+}
+
+func (n *netDevData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (n *netDevData) beforeSave() {}
+
+// +checklocksignore
+func (n *netDevData) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.stack)
+}
+
+func (n *netDevData) afterLoad() {}
+
+// +checklocksignore
+func (n *netDevData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.stack)
+}
+
+func (n *netUnixData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netUnixData"
+}
+
+func (n *netUnixData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (n *netUnixData) beforeSave() {}
+
+// +checklocksignore
+func (n *netUnixData) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.kernel)
+}
+
+func (n *netUnixData) afterLoad() {}
+
+// +checklocksignore
+func (n *netUnixData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.kernel)
+}
+
+func (d *netTCPData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netTCPData"
+}
+
+func (d *netTCPData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netTCPData) beforeSave() {}
+
+// +checklocksignore
+func (d *netTCPData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netTCPData) afterLoad() {}
+
+// +checklocksignore
+func (d *netTCPData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netTCP6Data) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netTCP6Data"
+}
+
+func (d *netTCP6Data) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netTCP6Data) beforeSave() {}
+
+// +checklocksignore
+func (d *netTCP6Data) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netTCP6Data) afterLoad() {}
+
+// +checklocksignore
+func (d *netTCP6Data) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netUDPData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netUDPData"
+}
+
+func (d *netUDPData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netUDPData) beforeSave() {}
+
+// +checklocksignore
+func (d *netUDPData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netUDPData) afterLoad() {}
+
+// +checklocksignore
+func (d *netUDPData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netSnmpData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netSnmpData"
+}
+
+func (d *netSnmpData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netSnmpData) beforeSave() {}
+
+// +checklocksignore
+func (d *netSnmpData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netSnmpData) afterLoad() {}
+
+// +checklocksignore
+func (d *netSnmpData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (s *snmpLine) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.snmpLine"
+}
+
+func (s *snmpLine) StateFields() []string {
+ return []string{
+ "prefix",
+ "header",
+ }
+}
+
+func (s *snmpLine) beforeSave() {}
+
+// +checklocksignore
+func (s *snmpLine) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.prefix)
+ stateSinkObject.Save(1, &s.header)
+}
+
+func (s *snmpLine) afterLoad() {}
+
+// +checklocksignore
+func (s *snmpLine) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.prefix)
+ stateSourceObject.Load(1, &s.header)
+}
+
+func (d *netRouteData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netRouteData"
+}
+
+func (d *netRouteData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netRouteData) beforeSave() {}
+
+// +checklocksignore
+func (d *netRouteData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netRouteData) afterLoad() {}
+
+// +checklocksignore
+func (d *netRouteData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (d *netStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netStatData"
+}
+
+func (d *netStatData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netStatData) beforeSave() {}
+
+// +checklocksignore
+func (d *netStatData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netStatData) afterLoad() {}
+
+// +checklocksignore
+func (d *netStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (i *tasksInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tasksInode"
+}
+
+func (i *tasksInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "tasksInodeRefs",
+ "locks",
+ "fs",
+ "pidns",
+ "fakeCgroupControllers",
+ }
+}
+
+func (i *tasksInode) beforeSave() {}
+
+// +checklocksignore
+func (i *tasksInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.tasksInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.fs)
+ stateSinkObject.Save(10, &i.pidns)
+ stateSinkObject.Save(11, &i.fakeCgroupControllers)
+}
+
+func (i *tasksInode) afterLoad() {}
+
+// +checklocksignore
+func (i *tasksInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.tasksInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.fs)
+ stateSourceObject.Load(10, &i.pidns)
+ stateSourceObject.Load(11, &i.fakeCgroupControllers)
+}
+
+func (s *staticFileSetStat) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.staticFileSetStat"
+}
+
+func (s *staticFileSetStat) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ "StaticData",
+ }
+}
+
+func (s *staticFileSetStat) beforeSave() {}
+
+// +checklocksignore
+func (s *staticFileSetStat) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.dynamicBytesFileSetAttr)
+ stateSinkObject.Save(1, &s.StaticData)
+}
+
+func (s *staticFileSetStat) afterLoad() {}
+
+// +checklocksignore
+func (s *staticFileSetStat) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.dynamicBytesFileSetAttr)
+ stateSourceObject.Load(1, &s.StaticData)
+}
+
+func (s *selfSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.selfSymlink"
+}
+
+func (s *selfSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "pidns",
+ }
+}
+
+func (s *selfSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *selfSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.pidns)
+}
+
+func (s *selfSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *selfSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.pidns)
+}
+
+func (s *threadSelfSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.threadSelfSymlink"
+}
+
+func (s *threadSelfSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "pidns",
+ }
+}
+
+func (s *threadSelfSymlink) beforeSave() {}
+
+// +checklocksignore
+func (s *threadSelfSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.pidns)
+}
+
+func (s *threadSelfSymlink) afterLoad() {}
+
+// +checklocksignore
+func (s *threadSelfSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.pidns)
+}
+
+func (d *dynamicBytesFileSetAttr) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.dynamicBytesFileSetAttr"
+}
+
+func (d *dynamicBytesFileSetAttr) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (d *dynamicBytesFileSetAttr) beforeSave() {}
+
+// +checklocksignore
+func (d *dynamicBytesFileSetAttr) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+}
+
+func (d *dynamicBytesFileSetAttr) afterLoad() {}
+
+// +checklocksignore
+func (d *dynamicBytesFileSetAttr) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+}
+
+func (c *cpuStats) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cpuStats"
+}
+
+func (c *cpuStats) StateFields() []string {
+ return []string{
+ "user",
+ "nice",
+ "system",
+ "idle",
+ "ioWait",
+ "irq",
+ "softirq",
+ "steal",
+ "guest",
+ "guestNice",
+ }
+}
+
+func (c *cpuStats) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuStats) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.user)
+ stateSinkObject.Save(1, &c.nice)
+ stateSinkObject.Save(2, &c.system)
+ stateSinkObject.Save(3, &c.idle)
+ stateSinkObject.Save(4, &c.ioWait)
+ stateSinkObject.Save(5, &c.irq)
+ stateSinkObject.Save(6, &c.softirq)
+ stateSinkObject.Save(7, &c.steal)
+ stateSinkObject.Save(8, &c.guest)
+ stateSinkObject.Save(9, &c.guestNice)
+}
+
+func (c *cpuStats) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuStats) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.user)
+ stateSourceObject.Load(1, &c.nice)
+ stateSourceObject.Load(2, &c.system)
+ stateSourceObject.Load(3, &c.idle)
+ stateSourceObject.Load(4, &c.ioWait)
+ stateSourceObject.Load(5, &c.irq)
+ stateSourceObject.Load(6, &c.softirq)
+ stateSourceObject.Load(7, &c.steal)
+ stateSourceObject.Load(8, &c.guest)
+ stateSourceObject.Load(9, &c.guestNice)
+}
+
+func (s *statData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statData"
+}
+
+func (s *statData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (s *statData) beforeSave() {}
+
+// +checklocksignore
+func (s *statData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.dynamicBytesFileSetAttr)
+}
+
+func (s *statData) afterLoad() {}
+
+// +checklocksignore
+func (s *statData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.dynamicBytesFileSetAttr)
+}
+
+func (l *loadavgData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.loadavgData"
+}
+
+func (l *loadavgData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (l *loadavgData) beforeSave() {}
+
+// +checklocksignore
+func (l *loadavgData) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.dynamicBytesFileSetAttr)
+}
+
+func (l *loadavgData) afterLoad() {}
+
+// +checklocksignore
+func (l *loadavgData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.dynamicBytesFileSetAttr)
+}
+
+func (m *meminfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.meminfoData"
+}
+
+func (m *meminfoData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (m *meminfoData) beforeSave() {}
+
+// +checklocksignore
+func (m *meminfoData) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.dynamicBytesFileSetAttr)
+}
+
+func (m *meminfoData) afterLoad() {}
+
+// +checklocksignore
+func (m *meminfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.dynamicBytesFileSetAttr)
+}
+
+func (u *uptimeData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.uptimeData"
+}
+
+func (u *uptimeData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (u *uptimeData) beforeSave() {}
+
+// +checklocksignore
+func (u *uptimeData) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.dynamicBytesFileSetAttr)
+}
+
+func (u *uptimeData) afterLoad() {}
+
+// +checklocksignore
+func (u *uptimeData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.dynamicBytesFileSetAttr)
+}
+
+func (v *versionData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.versionData"
+}
+
+func (v *versionData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (v *versionData) beforeSave() {}
+
+// +checklocksignore
+func (v *versionData) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.dynamicBytesFileSetAttr)
+}
+
+func (v *versionData) afterLoad() {}
+
+// +checklocksignore
+func (v *versionData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.dynamicBytesFileSetAttr)
+}
+
+func (d *filesystemsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.filesystemsData"
+}
+
+func (d *filesystemsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (d *filesystemsData) beforeSave() {}
+
+// +checklocksignore
+func (d *filesystemsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+}
+
+func (d *filesystemsData) afterLoad() {}
+
+// +checklocksignore
+func (d *filesystemsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+}
+
+func (c *cgroupsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cgroupsData"
+}
+
+func (c *cgroupsData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (c *cgroupsData) beforeSave() {}
+
+// +checklocksignore
+func (c *cgroupsData) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.dynamicBytesFileSetAttr)
+}
+
+func (c *cgroupsData) afterLoad() {}
+
+// +checklocksignore
+func (c *cgroupsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.dynamicBytesFileSetAttr)
+}
+
+func (c *cmdLineData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cmdLineData"
+}
+
+func (c *cmdLineData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (c *cmdLineData) beforeSave() {}
+
+// +checklocksignore
+func (c *cmdLineData) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.dynamicBytesFileSetAttr)
+}
+
+func (c *cmdLineData) afterLoad() {}
+
+// +checklocksignore
+func (c *cmdLineData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.dynamicBytesFileSetAttr)
+}
+
+func (r *tasksInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tasksInodeRefs"
+}
+
+func (r *tasksInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *tasksInodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *tasksInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *tasksInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (t *tcpMemDir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpMemDir"
+}
+
+func (t *tcpMemDir) StateFields() []string {
+ return nil
+}
+
+func (d *mmapMinAddrData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mmapMinAddrData"
+}
+
+func (d *mmapMinAddrData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "k",
+ }
+}
+
+func (d *mmapMinAddrData) beforeSave() {}
+
+// +checklocksignore
+func (d *mmapMinAddrData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.k)
+}
+
+func (d *mmapMinAddrData) afterLoad() {}
+
+// +checklocksignore
+func (d *mmapMinAddrData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.k)
+}
+
+func (h *hostnameData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.hostnameData"
+}
+
+func (h *hostnameData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (h *hostnameData) beforeSave() {}
+
+// +checklocksignore
+func (h *hostnameData) StateSave(stateSinkObject state.Sink) {
+ h.beforeSave()
+ stateSinkObject.Save(0, &h.DynamicBytesFile)
+}
+
+func (h *hostnameData) afterLoad() {}
+
+// +checklocksignore
+func (h *hostnameData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &h.DynamicBytesFile)
+}
+
+func (d *tcpSackData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpSackData"
+}
+
+func (d *tcpSackData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ "enabled",
+ }
+}
+
+func (d *tcpSackData) beforeSave() {}
+
+// +checklocksignore
+func (d *tcpSackData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+ stateSinkObject.Save(2, &d.enabled)
+}
+
+func (d *tcpSackData) afterLoad() {}
+
+// +checklocksignore
+func (d *tcpSackData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &d.stack)
+ stateSourceObject.Load(2, &d.enabled)
+}
+
+func (d *tcpRecoveryData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpRecoveryData"
+}
+
+func (d *tcpRecoveryData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *tcpRecoveryData) beforeSave() {}
+
+// +checklocksignore
+func (d *tcpRecoveryData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *tcpRecoveryData) afterLoad() {}
+
+// +checklocksignore
+func (d *tcpRecoveryData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &d.stack)
+}
+
+func (d *tcpMemData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpMemData"
+}
+
+func (d *tcpMemData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "dir",
+ "stack",
+ }
+}
+
+func (d *tcpMemData) beforeSave() {}
+
+// +checklocksignore
+func (d *tcpMemData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.dir)
+ stateSinkObject.Save(2, &d.stack)
+}
+
+func (d *tcpMemData) afterLoad() {}
+
+// +checklocksignore
+func (d *tcpMemData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.dir)
+ stateSourceObject.LoadWait(2, &d.stack)
+}
+
+func (ipf *ipForwarding) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ipForwarding"
+}
+
+func (ipf *ipForwarding) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ "enabled",
+ }
+}
+
+func (ipf *ipForwarding) beforeSave() {}
+
+// +checklocksignore
+func (ipf *ipForwarding) StateSave(stateSinkObject state.Sink) {
+ ipf.beforeSave()
+ stateSinkObject.Save(0, &ipf.DynamicBytesFile)
+ stateSinkObject.Save(1, &ipf.stack)
+ stateSinkObject.Save(2, &ipf.enabled)
+}
+
+func (ipf *ipForwarding) afterLoad() {}
+
+// +checklocksignore
+func (ipf *ipForwarding) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ipf.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &ipf.stack)
+ stateSourceObject.Load(2, &ipf.enabled)
+}
+
+func (pr *portRange) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.portRange"
+}
+
+func (pr *portRange) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ "start",
+ "end",
+ }
+}
+
+func (pr *portRange) beforeSave() {}
+
+// +checklocksignore
+func (pr *portRange) StateSave(stateSinkObject state.Sink) {
+ pr.beforeSave()
+ stateSinkObject.Save(0, &pr.DynamicBytesFile)
+ stateSinkObject.Save(1, &pr.stack)
+ stateSinkObject.Save(2, &pr.start)
+ stateSinkObject.Save(3, &pr.end)
+}
+
+func (pr *portRange) afterLoad() {}
+
+// +checklocksignore
+func (pr *portRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &pr.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &pr.stack)
+ stateSourceObject.Load(2, &pr.start)
+ stateSourceObject.Load(3, &pr.end)
+}
+
+func (s *yamaPtraceScope) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.yamaPtraceScope"
+}
+
+func (s *yamaPtraceScope) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "level",
+ }
+}
+
+func (s *yamaPtraceScope) beforeSave() {}
+
+// +checklocksignore
+func (s *yamaPtraceScope) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.level)
+}
+
+func (s *yamaPtraceScope) afterLoad() {}
+
+// +checklocksignore
+func (s *yamaPtraceScope) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.level)
+}
+
+func init() {
+ state.Register((*fdDirInodeRefs)(nil))
+ state.Register((*fdInfoDirInodeRefs)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*staticFile)(nil))
+ state.Register((*InternalData)(nil))
+ state.Register((*implStatFS)(nil))
+ state.Register((*subtasksInode)(nil))
+ state.Register((*subtasksFD)(nil))
+ state.Register((*subtasksInodeRefs)(nil))
+ state.Register((*taskInode)(nil))
+ state.Register((*taskOwnedInode)(nil))
+ state.Register((*fdDir)(nil))
+ state.Register((*fdDirInode)(nil))
+ state.Register((*fdSymlink)(nil))
+ state.Register((*fdInfoDirInode)(nil))
+ state.Register((*fdInfoData)(nil))
+ state.Register((*auxvData)(nil))
+ state.Register((*cmdlineData)(nil))
+ state.Register((*commInode)(nil))
+ state.Register((*commData)(nil))
+ state.Register((*idMapData)(nil))
+ state.Register((*memInode)(nil))
+ state.Register((*memFD)(nil))
+ state.Register((*mapsData)(nil))
+ state.Register((*smapsData)(nil))
+ state.Register((*taskStatData)(nil))
+ state.Register((*statmData)(nil))
+ state.Register((*statusData)(nil))
+ state.Register((*ioData)(nil))
+ state.Register((*oomScoreAdj)(nil))
+ state.Register((*exeSymlink)(nil))
+ state.Register((*cwdSymlink)(nil))
+ state.Register((*mountInfoData)(nil))
+ state.Register((*mountsData)(nil))
+ state.Register((*namespaceSymlink)(nil))
+ state.Register((*namespaceInode)(nil))
+ state.Register((*namespaceFD)(nil))
+ state.Register((*taskCgroupData)(nil))
+ state.Register((*taskInodeRefs)(nil))
+ state.Register((*ifinet6)(nil))
+ state.Register((*netDevData)(nil))
+ state.Register((*netUnixData)(nil))
+ state.Register((*netTCPData)(nil))
+ state.Register((*netTCP6Data)(nil))
+ state.Register((*netUDPData)(nil))
+ state.Register((*netSnmpData)(nil))
+ state.Register((*snmpLine)(nil))
+ state.Register((*netRouteData)(nil))
+ state.Register((*netStatData)(nil))
+ state.Register((*tasksInode)(nil))
+ state.Register((*staticFileSetStat)(nil))
+ state.Register((*selfSymlink)(nil))
+ state.Register((*threadSelfSymlink)(nil))
+ state.Register((*dynamicBytesFileSetAttr)(nil))
+ state.Register((*cpuStats)(nil))
+ state.Register((*statData)(nil))
+ state.Register((*loadavgData)(nil))
+ state.Register((*meminfoData)(nil))
+ state.Register((*uptimeData)(nil))
+ state.Register((*versionData)(nil))
+ state.Register((*filesystemsData)(nil))
+ state.Register((*cgroupsData)(nil))
+ state.Register((*cmdLineData)(nil))
+ state.Register((*tasksInodeRefs)(nil))
+ state.Register((*tcpMemDir)(nil))
+ state.Register((*mmapMinAddrData)(nil))
+ state.Register((*hostnameData)(nil))
+ state.Register((*tcpSackData)(nil))
+ state.Register((*tcpRecoveryData)(nil))
+ state.Register((*tcpMemData)(nil))
+ state.Register((*ipForwarding)(nil))
+ state.Register((*portRange)(nil))
+ state.Register((*yamaPtraceScope)(nil))
+}
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
new file mode 100644
index 000000000..bd4998cbc
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -0,0 +1,140 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const subtasksInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var subtasksInodeobj *subtasksInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type subtasksInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *subtasksInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *subtasksInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", subtasksInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *subtasksInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *subtasksInodeRefs) LogRefs() bool {
+ return subtasksInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *subtasksInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *subtasksInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if subtasksInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *subtasksInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if subtasksInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *subtasksInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if subtasksInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *subtasksInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
new file mode 100644
index 000000000..82c63213a
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -0,0 +1,140 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const taskInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var taskInodeobj *taskInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type taskInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *taskInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *taskInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", taskInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *taskInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *taskInodeRefs) LogRefs() bool {
+ return taskInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *taskInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *taskInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if taskInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *taskInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if taskInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *taskInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if taskInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *taskInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
new file mode 100644
index 000000000..73adc5278
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -0,0 +1,140 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const tasksInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var tasksInodeobj *tasksInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type tasksInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *tasksInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *tasksInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", tasksInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *tasksInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *tasksInodeRefs) LogRefs() bool {
+ return tasksInodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *tasksInodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *tasksInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if tasksInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *tasksInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if tasksInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *tasksInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if tasksInodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *tasksInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/tasks_sys_test.go b/pkg/sentry/fsimpl/proc/tasks_sys_test.go
deleted file mode 100644
index 19b012f7d..000000000
--- a/pkg/sentry/fsimpl/proc/tasks_sys_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/inet"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func newIPv6TestStack() *inet.TestStack {
- s := inet.NewTestStack()
- s.SupportsIPv6Flag = true
- return s
-}
-
-func TestIfinet6NoAddresses(t *testing.T) {
- n := &ifinet6{stack: newIPv6TestStack()}
- var buf bytes.Buffer
- n.Generate(contexttest.Context(t), &buf)
- if buf.Len() > 0 {
- t.Errorf("n.Generate() generated = %v, want = %v", buf.Bytes(), []byte{})
- }
-}
-
-func TestIfinet6(t *testing.T) {
- s := newIPv6TestStack()
- s.InterfacesMap[1] = inet.Interface{Name: "eth0"}
- s.InterfaceAddrsMap[1] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"),
- },
- }
- s.InterfacesMap[2] = inet.Interface{Name: "eth1"}
- s.InterfaceAddrsMap[2] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"),
- },
- }
- want := map[string]struct{}{
- "000102030405060708090a0b0c0d0e0f 01 80 00 00 eth0\n": {},
- "101112131415161718191a1b1c1d1e1f 02 80 00 00 eth1\n": {},
- }
-
- n := &ifinet6{stack: s}
- contents := n.contents()
- if len(contents) != len(want) {
- t.Errorf("Got len(n.contents()) = %d, want = %d", len(contents), len(want))
- }
- got := map[string]struct{}{}
- for _, l := range contents {
- got[l] = struct{}{}
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got n.contents() = %v, want = %v", got, want)
- }
-}
-
-// TestIPForwarding tests the implementation of
-// /proc/sys/net/ipv4/ip_forwarding
-func TestConfigureIPForwarding(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
-
- var cases = []struct {
- comment string
- initial bool
- str string
- final bool
- }{
- {
- comment: `Forwarding is disabled; write 1 and enable forwarding`,
- initial: false,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is disabled; write 0 and disable forwarding`,
- initial: false,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is enabled; write 1 and enable forwarding`,
- initial: true,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 0 and disable forwarding`,
- initial: true,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is disabled; write 2404 and enable forwarding`,
- initial: false,
- str: "2404",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 2404 and enable forwarding`,
- initial: true,
- str: "2404",
- final: true,
- },
- }
- for _, c := range cases {
- t.Run(c.comment, func(t *testing.T) {
- s.IPForwarding = c.initial
-
- file := &ipForwarding{stack: s, enabled: c.initial}
-
- // Write the values.
- src := usermem.BytesIOSequence([]byte(c.str))
- if n, err := file.Write(ctx, src, 0); n != int64(len(c.str)) || err != nil {
- t.Errorf("file.Write(ctx, nil, %q, 0) = (%d, %v); want (%d, nil)", c.str, n, err, len(c.str))
- }
-
- // Read the values from the stack and check them.
- if got, want := s.IPForwarding, c.final; got != want {
- t.Errorf("s.IPForwarding incorrect; got: %v, want: %v", got, want)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/proc/tasks_test.go b/pkg/sentry/fsimpl/proc/tasks_test.go
deleted file mode 100644
index 14f806c3c..000000000
--- a/pkg/sentry/fsimpl/proc/tasks_test.go
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "fmt"
- "math"
- "path"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-var (
- // Next offset 256 by convention. Adds 1 for the next offset.
- selfLink = vfs.Dirent{Type: linux.DT_LNK, NextOff: 256 + 0 + 1}
- threadSelfLink = vfs.Dirent{Type: linux.DT_LNK, NextOff: 256 + 1 + 1}
-
- // /proc/[pid] next offset starts at 256+2 (files above), then adds the
- // PID, and adds 1 for the next offset.
- proc1 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 1 + 1}
- proc2 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 2 + 1}
- proc3 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 3 + 1}
-)
-
-var (
- tasksStaticFiles = map[string]testutil.DirentType{
- "cmdline": linux.DT_REG,
- "cpuinfo": linux.DT_REG,
- "filesystems": linux.DT_REG,
- "loadavg": linux.DT_REG,
- "meminfo": linux.DT_REG,
- "mounts": linux.DT_LNK,
- "net": linux.DT_LNK,
- "self": linux.DT_LNK,
- "stat": linux.DT_REG,
- "sys": linux.DT_DIR,
- "thread-self": linux.DT_LNK,
- "uptime": linux.DT_REG,
- "version": linux.DT_REG,
- }
- tasksStaticFilesNextOffs = map[string]int64{
- "self": selfLink.NextOff,
- "thread-self": threadSelfLink.NextOff,
- }
- taskStaticFiles = map[string]testutil.DirentType{
- "auxv": linux.DT_REG,
- "cgroup": linux.DT_REG,
- "cwd": linux.DT_LNK,
- "cmdline": linux.DT_REG,
- "comm": linux.DT_REG,
- "environ": linux.DT_REG,
- "exe": linux.DT_LNK,
- "fd": linux.DT_DIR,
- "fdinfo": linux.DT_DIR,
- "gid_map": linux.DT_REG,
- "io": linux.DT_REG,
- "maps": linux.DT_REG,
- "mem": linux.DT_REG,
- "mountinfo": linux.DT_REG,
- "mounts": linux.DT_REG,
- "net": linux.DT_DIR,
- "ns": linux.DT_DIR,
- "oom_score": linux.DT_REG,
- "oom_score_adj": linux.DT_REG,
- "smaps": linux.DT_REG,
- "stat": linux.DT_REG,
- "statm": linux.DT_REG,
- "status": linux.DT_REG,
- "task": linux.DT_DIR,
- "uid_map": linux.DT_REG,
- }
-)
-
-func setup(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Error creating kernel: %v", err)
- }
-
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
-
- k.VFS().MustRegisterFilesystemType(Name, &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- mntns, err := k.VFS().NewMountNamespace(ctx, creds, "", tmpfs.Name, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("NewMountNamespace(): %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- pop := &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse("/proc"),
- }
- if err := k.VFS().MkdirAt(ctx, creds, pop, &vfs.MkdirOptions{Mode: 0777}); err != nil {
- t.Fatalf("MkDir(/proc): %v", err)
- }
-
- pop = &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse("/proc"),
- }
- mntOpts := &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: &InternalData{
- Cgroups: map[string]string{
- "cpuset": "/foo/cpuset",
- "memory": "/foo/memory",
- },
- },
- },
- }
- if _, err := k.VFS().MountAt(ctx, creds, "", pop, Name, mntOpts); err != nil {
- t.Fatalf("MountAt(/proc): %v", err)
- }
- return testutil.NewSystem(ctx, t, k.VFS(), mntns)
-}
-
-func TestTasksEmpty(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc"))
- s.AssertAllDirentTypes(collector, tasksStaticFiles)
- s.AssertDirentOffsets(collector, tasksStaticFilesNextOffs)
-}
-
-func TestTasks(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- expectedDirents := make(map[string]testutil.DirentType)
- for n, d := range tasksStaticFiles {
- expectedDirents[n] = d
- }
-
- k := kernel.KernelFromContext(s.Ctx)
- var tasks []*kernel.Task
- for i := 0; i < 5; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- tasks = append(tasks, task)
- expectedDirents[fmt.Sprintf("%d", i+1)] = linux.DT_DIR
- }
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc"))
- s.AssertAllDirentTypes(collector, expectedDirents)
- s.AssertDirentOffsets(collector, tasksStaticFilesNextOffs)
-
- lastPid := 0
- dirents := collector.OrderedDirents()
- doneSkippingNonTaskDirs := false
- for _, d := range dirents {
- pid, err := strconv.Atoi(d.Name)
- if err != nil {
- if !doneSkippingNonTaskDirs {
- // We haven't gotten to the task dirs yet.
- continue
- }
- t.Fatalf("Invalid process directory %q", d.Name)
- }
- doneSkippingNonTaskDirs = true
- if lastPid > pid {
- t.Errorf("pids not in order: %v", dirents)
- }
- found := false
- for _, t := range tasks {
- if k.TaskSet().Root.IDOfTask(t) == kernel.ThreadID(pid) {
- found = true
- }
- }
- if !found {
- t.Errorf("Additional task ID %d listed: %v", pid, tasks)
- }
- // Next offset starts at 256+2 ('self' and 'thread-self'), then adds the
- // PID, and adds 1 for the next offset.
- if want := int64(256 + 2 + pid + 1); d.NextOff != want {
- t.Errorf("Wrong dirent offset want: %d got: %d: %+v", want, d.NextOff, d)
- }
- }
- if !doneSkippingNonTaskDirs {
- t.Fatalf("Never found any process directories.")
- }
-
- // Test lookup.
- for _, path := range []string{"/proc/1", "/proc/2"} {
- fd, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot(path),
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(%q) failed: %v", path, err)
- }
- defer fd.DecRef(s.Ctx)
- buf := make([]byte, 1)
- bufIOSeq := usermem.BytesIOSequence(buf)
- if _, err := fd.Read(s.Ctx, bufIOSeq, vfs.ReadOptions{}); !linuxerr.Equals(linuxerr.EISDIR, err) {
- t.Errorf("wrong error reading directory: %v", err)
- }
- }
-
- if _, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot("/proc/9999"),
- &vfs.OpenOptions{},
- ); !linuxerr.Equals(linuxerr.ENOENT, err) {
- t.Fatalf("wrong error from vfsfs.OpenAt(/proc/9999): %v", err)
- }
-}
-
-func TestTasksOffset(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- for i := 0; i < 3; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- if _, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root); err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- }
-
- for _, tc := range []struct {
- name string
- offset int64
- wants map[string]vfs.Dirent
- }{
- {
- name: "small offset",
- offset: 100,
- wants: map[string]vfs.Dirent{
- "self": selfLink,
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "offset at start",
- offset: 256,
- wants: map[string]vfs.Dirent{
- "self": selfLink,
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip /proc/self",
- offset: 257,
- wants: map[string]vfs.Dirent{
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip symlinks",
- offset: 258,
- wants: map[string]vfs.Dirent{
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip first process",
- offset: 260,
- wants: map[string]vfs.Dirent{
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "last process",
- offset: 261,
- wants: map[string]vfs.Dirent{
- "3": proc3,
- },
- },
- {
- name: "after last",
- offset: 262,
- wants: nil,
- },
- {
- name: "TaskLimit+1",
- offset: kernel.TasksLimit + 1,
- wants: nil,
- },
- {
- name: "max",
- offset: math.MaxInt64,
- wants: nil,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- s := s.WithSubtest(t)
- fd, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot("/proc"),
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(/) failed: %v", err)
- }
- defer fd.DecRef(s.Ctx)
- if _, err := fd.Seek(s.Ctx, tc.offset, linux.SEEK_SET); err != nil {
- t.Fatalf("Seek(%d, SEEK_SET): %v", tc.offset, err)
- }
-
- var collector testutil.DirentCollector
- if err := fd.IterDirents(s.Ctx, &collector); err != nil {
- t.Fatalf("IterDirent(): %v", err)
- }
-
- expectedTypes := make(map[string]testutil.DirentType)
- expectedOffsets := make(map[string]int64)
- for name, want := range tc.wants {
- expectedTypes[name] = want.Type
- if want.NextOff != 0 {
- expectedOffsets[name] = want.NextOff
- }
- }
-
- collector.SkipDotsChecks(true) // We seek()ed past the dots.
- s.AssertAllDirentTypes(&collector, expectedTypes)
- s.AssertDirentOffsets(&collector, expectedOffsets)
- })
- }
-}
-
-func TestTask(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- _, err := testutil.CreateTask(s.Ctx, "name", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc/1"))
- s.AssertAllDirentTypes(collector, taskStaticFiles)
-}
-
-func TestProcSelf(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, "name", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
-
- collector := s.WithTemporaryContext(task.AsyncContext()).ListDirents(&vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse("/proc/self/"),
- FollowFinalSymlink: true,
- })
- s.AssertAllDirentTypes(collector, taskStaticFiles)
-}
-
-func iterateDir(ctx context.Context, t *testing.T, s *testutil.System, fd *vfs.FileDescription) {
- t.Logf("Iterating: %s", fd.MappedName(ctx))
-
- var collector testutil.DirentCollector
- if err := fd.IterDirents(ctx, &collector); err != nil {
- t.Fatalf("IterDirents(): %v", err)
- }
- if err := collector.Contains(".", linux.DT_DIR); err != nil {
- t.Error(err.Error())
- }
- if err := collector.Contains("..", linux.DT_DIR); err != nil {
- t.Error(err.Error())
- }
-
- for _, d := range collector.Dirents() {
- if d.Name == "." || d.Name == ".." {
- continue
- }
- absPath := path.Join(fd.MappedName(ctx), d.Name)
- if d.Type == linux.DT_LNK {
- link, err := s.VFS.ReadlinkAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse(absPath)},
- )
- if err != nil {
- t.Errorf("vfsfs.ReadlinkAt(%v) failed: %v", absPath, err)
- } else {
- t.Logf("Skipping symlink: %s => %s", absPath, link)
- }
- continue
- }
-
- t.Logf("Opening: %s", absPath)
- child, err := s.VFS.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse(absPath)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Errorf("vfsfs.OpenAt(%v) failed: %v", absPath, err)
- continue
- }
- defer child.DecRef(ctx)
- stat, err := child.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("Stat(%v) failed: %v", absPath, err)
- }
- if got := linux.FileMode(stat.Mode).DirentType(); got != d.Type {
- t.Errorf("wrong file mode, stat: %v, dirent: %v", got, d.Type)
- }
- if d.Type == linux.DT_DIR {
- // Found another dir, let's do it again!
- iterateDir(ctx, t, s, child)
- }
- }
-}
-
-// TestTree iterates all directories and stats every file.
-func TestTree(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
-
- pop := &vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse("test-file"),
- }
- opts := &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_CREAT,
- Mode: 0777,
- }
- file, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, opts)
- if err != nil {
- t.Fatalf("failed to create test file: %v", err)
- }
- defer file.DecRef(s.Ctx)
-
- var tasks []*kernel.Task
- for i := 0; i < 5; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- // Add file to populate /proc/[pid]/fd and fdinfo directories.
- task.FDTable().NewFDVFS2(task.AsyncContext(), 0, file, kernel.FDFlags{})
- tasks = append(tasks, task)
- }
-
- ctx := tasks[0].AsyncContext()
- fd, err := s.VFS.OpenAt(
- ctx,
- auth.CredentialsFromContext(s.Ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse("/proc")},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(/proc) failed: %v", err)
- }
- iterateDir(ctx, t, s, fd)
- fd.DecRef(ctx)
-}
diff --git a/pkg/sentry/fsimpl/signalfd/BUILD b/pkg/sentry/fsimpl/signalfd/BUILD
deleted file mode 100644
index adb610213..000000000
--- a/pkg/sentry/fsimpl/signalfd/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "signalfd",
- srcs = ["signalfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/kernel",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go b/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go
new file mode 100644
index 000000000..3bf27c6a6
--- /dev/null
+++ b/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go
@@ -0,0 +1,51 @@
+// automatically generated by stateify.
+
+package signalfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (sfd *SignalFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/signalfd.SignalFileDescription"
+}
+
+func (sfd *SignalFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "target",
+ "mask",
+ }
+}
+
+func (sfd *SignalFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (sfd *SignalFileDescription) StateSave(stateSinkObject state.Sink) {
+ sfd.beforeSave()
+ stateSinkObject.Save(0, &sfd.vfsfd)
+ stateSinkObject.Save(1, &sfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &sfd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &sfd.NoLockFD)
+ stateSinkObject.Save(4, &sfd.target)
+ stateSinkObject.Save(5, &sfd.mask)
+}
+
+func (sfd *SignalFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (sfd *SignalFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sfd.vfsfd)
+ stateSourceObject.Load(1, &sfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &sfd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &sfd.NoLockFD)
+ stateSourceObject.Load(4, &sfd.target)
+ stateSourceObject.Load(5, &sfd.mask)
+}
+
+func init() {
+ state.Register((*SignalFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sockfs/BUILD b/pkg/sentry/fsimpl/sockfs/BUILD
deleted file mode 100644
index 9defca936..000000000
--- a/pkg/sentry/fsimpl/sockfs/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "sockfs",
- srcs = ["sockfs.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go b/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go
new file mode 100644
index 000000000..cf6eddef2
--- /dev/null
+++ b/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go
@@ -0,0 +1,96 @@
+// automatically generated by stateify.
+
+package sockfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fsType *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.filesystemType"
+}
+
+func (fsType *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *filesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fsType *filesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *filesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fsType *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+// +checklocksignore
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAttrs)
+ stateSinkObject.Save(1, &i.InodeNoopRefCount)
+ stateSinkObject.Save(2, &i.InodeNotDirectory)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+}
+
+func (i *inode) afterLoad() {}
+
+// +checklocksignore
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAttrs)
+ stateSourceObject.Load(1, &i.InodeNoopRefCount)
+ stateSourceObject.Load(2, &i.InodeNotDirectory)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+}
+
+func init() {
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sys/BUILD b/pkg/sentry/fsimpl/sys/BUILD
deleted file mode 100644
index 1af0a5cbc..000000000
--- a/pkg/sentry/fsimpl/sys/BUILD
+++ /dev/null
@@ -1,56 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dir_refs",
- out = "dir_refs.go",
- package = "sys",
- prefix = "dir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "dir",
- },
-)
-
-go_library(
- name = "sys",
- srcs = [
- "dir_refs.go",
- "kcov.go",
- "sys.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/coverage",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "sys_test",
- srcs = ["sys_test.go"],
- deps = [
- ":sys",
- "//pkg/abi/linux",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
new file mode 100644
index 000000000..17bc43d2e
--- /dev/null
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -0,0 +1,140 @@
+package sys
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const direnableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var dirobj *dir
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type dirRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *dirRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *dirRefs) RefType() string {
+ return fmt.Sprintf("%T", dirobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *dirRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *dirRefs) LogRefs() bool {
+ return direnableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *dirRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *dirRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if direnableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *dirRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if direnableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *dirRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if direnableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *dirRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/sys/sys_state_autogen.go b/pkg/sentry/fsimpl/sys/sys_state_autogen.go
new file mode 100644
index 000000000..c5adf7db3
--- /dev/null
+++ b/pkg/sentry/fsimpl/sys/sys_state_autogen.go
@@ -0,0 +1,263 @@
+// automatically generated by stateify.
+
+package sys
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *dirRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.dirRefs"
+}
+
+func (r *dirRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *dirRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *dirRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *dirRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *kcovInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.kcovInode"
+}
+
+func (i *kcovInode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "implStatFS",
+ }
+}
+
+func (i *kcovInode) beforeSave() {}
+
+// +checklocksignore
+func (i *kcovInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAttrs)
+ stateSinkObject.Save(1, &i.InodeNoopRefCount)
+ stateSinkObject.Save(2, &i.InodeNotDirectory)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+ stateSinkObject.Save(4, &i.implStatFS)
+}
+
+func (i *kcovInode) afterLoad() {}
+
+// +checklocksignore
+func (i *kcovInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAttrs)
+ stateSourceObject.Load(1, &i.InodeNoopRefCount)
+ stateSourceObject.Load(2, &i.InodeNotDirectory)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+ stateSourceObject.Load(4, &i.implStatFS)
+}
+
+func (fd *kcovFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.kcovFD"
+}
+
+func (fd *kcovFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "NoLockFD",
+ "vfsfd",
+ "inode",
+ "kcov",
+ }
+}
+
+func (fd *kcovFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *kcovFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.NoLockFD)
+ stateSinkObject.Save(2, &fd.vfsfd)
+ stateSinkObject.Save(3, &fd.inode)
+ stateSinkObject.Save(4, &fd.kcov)
+}
+
+func (fd *kcovFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *kcovFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.NoLockFD)
+ stateSourceObject.Load(2, &fd.vfsfd)
+ stateSourceObject.Load(3, &fd.inode)
+ stateSourceObject.Load(4, &fd.kcov)
+}
+
+func (fsType *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.FilesystemType"
+}
+
+func (fsType *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fsType *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (d *dir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.dir"
+}
+
+func (d *dir) StateFields() []string {
+ return []string{
+ "dirRefs",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeNotSymlink",
+ "InodeDirectoryNoNewChildren",
+ "InodeTemporary",
+ "OrderedChildren",
+ "locks",
+ }
+}
+
+func (d *dir) beforeSave() {}
+
+// +checklocksignore
+func (d *dir) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dirRefs)
+ stateSinkObject.Save(1, &d.InodeAlwaysValid)
+ stateSinkObject.Save(2, &d.InodeAttrs)
+ stateSinkObject.Save(3, &d.InodeNotSymlink)
+ stateSinkObject.Save(4, &d.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(5, &d.InodeTemporary)
+ stateSinkObject.Save(6, &d.OrderedChildren)
+ stateSinkObject.Save(7, &d.locks)
+}
+
+func (d *dir) afterLoad() {}
+
+// +checklocksignore
+func (d *dir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dirRefs)
+ stateSourceObject.Load(1, &d.InodeAlwaysValid)
+ stateSourceObject.Load(2, &d.InodeAttrs)
+ stateSourceObject.Load(3, &d.InodeNotSymlink)
+ stateSourceObject.Load(4, &d.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(5, &d.InodeTemporary)
+ stateSourceObject.Load(6, &d.OrderedChildren)
+ stateSourceObject.Load(7, &d.locks)
+}
+
+func (c *cpuFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.cpuFile"
+}
+
+func (c *cpuFile) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "DynamicBytesFile",
+ "maxCores",
+ }
+}
+
+func (c *cpuFile) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuFile) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.implStatFS)
+ stateSinkObject.Save(1, &c.DynamicBytesFile)
+ stateSinkObject.Save(2, &c.maxCores)
+}
+
+func (c *cpuFile) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.implStatFS)
+ stateSourceObject.Load(1, &c.DynamicBytesFile)
+ stateSourceObject.Load(2, &c.maxCores)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+// +checklocksignore
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+// +checklocksignore
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*dirRefs)(nil))
+ state.Register((*kcovInode)(nil))
+ state.Register((*kcovFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*dir)(nil))
+ state.Register((*cpuFile)(nil))
+ state.Register((*implStatFS)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sys/sys_test.go b/pkg/sentry/fsimpl/sys/sys_test.go
deleted file mode 100644
index 0a0d914cc..000000000
--- a/pkg/sentry/fsimpl/sys/sys_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sys_test
-
-import (
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/sys"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-func newTestSystem(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Failed to create test kernel: %v", err)
- }
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
- k.VFS().MustRegisterFilesystemType(sys.Name, sys.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- mns, err := k.VFS().NewMountNamespace(ctx, creds, "", sys.Name, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("Failed to create new mount namespace: %v", err)
- }
- return testutil.NewSystem(ctx, t, k.VFS(), mns)
-}
-
-func TestReadCPUFile(t *testing.T) {
- s := newTestSystem(t)
- defer s.Destroy()
- k := kernel.KernelFromContext(s.Ctx)
- maxCPUCores := k.ApplicationCores()
-
- expected := fmt.Sprintf("0-%d\n", maxCPUCores-1)
-
- for _, fname := range []string{"online", "possible", "present"} {
- pop := s.PathOpAtRoot(fmt.Sprintf("devices/system/cpu/%s", fname))
- fd, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, &vfs.OpenOptions{})
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v) = %+v failed: %v", pop, fd, err)
- }
- defer fd.DecRef(s.Ctx)
- content, err := s.ReadToEnd(fd)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- if diff := cmp.Diff(expected, content); diff != "" {
- t.Fatalf("Read returned unexpected data:\n--- want\n+++ got\n%v", diff)
- }
- }
-}
-
-func TestSysRootContainsExpectedEntries(t *testing.T) {
- s := newTestSystem(t)
- defer s.Destroy()
- pop := s.PathOpAtRoot("/")
- s.AssertAllDirentTypes(s.ListDirents(pop), map[string]testutil.DirentType{
- "block": linux.DT_DIR,
- "bus": linux.DT_DIR,
- "class": linux.DT_DIR,
- "dev": linux.DT_DIR,
- "devices": linux.DT_DIR,
- "firmware": linux.DT_DIR,
- "fs": linux.DT_DIR,
- "kernel": linux.DT_DIR,
- "module": linux.DT_DIR,
- "power": linux.DT_DIR,
- })
-}
diff --git a/pkg/sentry/fsimpl/testutil/BUILD b/pkg/sentry/fsimpl/testutil/BUILD
deleted file mode 100644
index b3f9d1010..000000000
--- a/pkg/sentry/fsimpl/testutil/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "testutil",
- testonly = 1,
- srcs = [
- "kernel.go",
- "testutil.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/cpuid",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/memutil",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/sched",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/mm",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/kvm",
- "//pkg/sentry/platform/ptrace",
- "//pkg/sentry/time",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/testutil/kernel.go b/pkg/sentry/fsimpl/testutil/kernel.go
deleted file mode 100644
index 473b41cff..000000000
--- a/pkg/sentry/fsimpl/testutil/kernel.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "flag"
- "fmt"
- "os"
- "runtime"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/cpuid"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/memutil"
- "gvisor.dev/gvisor/pkg/sentry/fsbridge"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/sched"
- "gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/sentry/loader"
- "gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/sentry/time"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-
- // Platforms are plugable.
- _ "gvisor.dev/gvisor/pkg/sentry/platform/kvm"
- _ "gvisor.dev/gvisor/pkg/sentry/platform/ptrace"
-)
-
-var (
- platformFlag = flag.String("platform", "ptrace", "specify which platform to use")
-)
-
-// Boot initializes a new bare bones kernel for test.
-func Boot() (*kernel.Kernel, error) {
- platformCtr, err := platform.Lookup(*platformFlag)
- if err != nil {
- return nil, fmt.Errorf("platform not found: %v", err)
- }
- deviceFile, err := platformCtr.OpenDevice()
- if err != nil {
- return nil, fmt.Errorf("creating platform: %v", err)
- }
- plat, err := platformCtr.New(deviceFile)
- if err != nil {
- return nil, fmt.Errorf("creating platform: %v", err)
- }
-
- kernel.VFS2Enabled = true
- k := &kernel.Kernel{
- Platform: plat,
- }
-
- mf, err := createMemoryFile()
- if err != nil {
- return nil, err
- }
- k.SetMemoryFile(mf)
-
- // Pass k as the platform since it is savable, unlike the actual platform.
- vdso, err := loader.PrepareVDSO(k)
- if err != nil {
- return nil, fmt.Errorf("creating vdso: %v", err)
- }
-
- // Create timekeeper.
- tk := kernel.NewTimekeeper(k, vdso.ParamPage.FileRange())
- tk.SetClocks(time.NewCalibratedClocks())
-
- creds := auth.NewRootCredentials(auth.NewRootUserNamespace())
-
- // Initiate the Kernel object, which is required by the Context passed
- // to createVFS in order to mount (among other things) procfs.
- if err = k.Init(kernel.InitKernelArgs{
- ApplicationCores: uint(runtime.GOMAXPROCS(-1)),
- FeatureSet: cpuid.HostFeatureSet(),
- Timekeeper: tk,
- RootUserNamespace: creds.UserNamespace,
- Vdso: vdso,
- RootUTSNamespace: kernel.NewUTSNamespace("hostname", "domain", creds.UserNamespace),
- RootIPCNamespace: kernel.NewIPCNamespace(creds.UserNamespace),
- RootAbstractSocketNamespace: kernel.NewAbstractSocketNamespace(),
- PIDNamespace: kernel.NewRootPIDNamespace(creds.UserNamespace),
- }); err != nil {
- return nil, fmt.Errorf("initializing kernel: %v", err)
- }
-
- k.VFS().MustRegisterFilesystemType(tmpfs.Name, &tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- AllowUserList: true,
- })
-
- ls, err := limits.NewLinuxLimitSet()
- if err != nil {
- return nil, err
- }
- tg := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, ls)
- k.TestOnlySetGlobalInit(tg)
-
- return k, nil
-}
-
-// CreateTask creates a new bare bones task for tests.
-func CreateTask(ctx context.Context, name string, tc *kernel.ThreadGroup, mntns *vfs.MountNamespace, root, cwd vfs.VirtualDentry) (*kernel.Task, error) {
- k := kernel.KernelFromContext(ctx)
- if k == nil {
- return nil, fmt.Errorf("cannot find kernel from context")
- }
-
- exe, err := newFakeExecutable(ctx, k.VFS(), auth.CredentialsFromContext(ctx), root)
- if err != nil {
- return nil, err
- }
- m := mm.NewMemoryManager(k, k, k.SleepForAddressSpaceActivation)
- m.SetExecutable(ctx, fsbridge.NewVFSFile(exe))
-
- config := &kernel.TaskConfig{
- Kernel: k,
- ThreadGroup: tc,
- TaskImage: &kernel.TaskImage{Name: name, MemoryManager: m},
- Credentials: auth.CredentialsFromContext(ctx),
- NetworkNamespace: k.RootNetworkNamespace(),
- AllowedCPUMask: sched.NewFullCPUSet(k.ApplicationCores()),
- UTSNamespace: kernel.UTSNamespaceFromContext(ctx),
- IPCNamespace: kernel.IPCNamespaceFromContext(ctx),
- AbstractSocketNamespace: kernel.NewAbstractSocketNamespace(),
- MountNamespaceVFS2: mntns,
- FSContext: kernel.NewFSContextVFS2(root, cwd, 0022),
- FDTable: k.NewFDTable(),
- }
- t, err := k.TaskSet().NewTask(ctx, config)
- if err != nil {
- config.ThreadGroup.Release(ctx)
- return nil, err
- }
- return t, nil
-}
-
-func newFakeExecutable(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, root vfs.VirtualDentry) (*vfs.FileDescription, error) {
- const name = "executable"
- pop := &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }
- opts := &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_CREAT,
- Mode: 0777,
- }
- return vfsObj.OpenAt(ctx, creds, pop, opts)
-}
-
-func createMemoryFile() (*pgalloc.MemoryFile, error) {
- const memfileName = "test-memory"
- memfd, err := memutil.CreateMemFD(memfileName, 0)
- if err != nil {
- return nil, fmt.Errorf("error creating memfd: %v", err)
- }
- memfile := os.NewFile(uintptr(memfd), memfileName)
- mf, err := pgalloc.NewMemoryFile(memfile, pgalloc.MemoryFileOpts{})
- if err != nil {
- _ = memfile.Close()
- return nil, fmt.Errorf("error creating pgalloc.MemoryFile: %v", err)
- }
- return mf, nil
-}
diff --git a/pkg/sentry/fsimpl/testutil/testutil.go b/pkg/sentry/fsimpl/testutil/testutil.go
deleted file mode 100644
index 59e6f9c92..000000000
--- a/pkg/sentry/fsimpl/testutil/testutil.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides common test utilities for kernfs-based
-// filesystems.
-package testutil
-
-import (
- "fmt"
- "io"
- "strings"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-// System represents the context for a single test.
-//
-// Test systems must be explicitly destroyed with System.Destroy.
-type System struct {
- t *testing.T
- Ctx context.Context
- Creds *auth.Credentials
- VFS *vfs.VirtualFilesystem
- Root vfs.VirtualDentry
- MntNs *vfs.MountNamespace
-}
-
-// NewSystem constructs a System.
-//
-// Precondition: Caller must hold a reference on mns, whose ownership
-// is transferred to the new System.
-func NewSystem(ctx context.Context, t *testing.T, v *vfs.VirtualFilesystem, mns *vfs.MountNamespace) *System {
- root := mns.Root()
- root.IncRef()
- s := &System{
- t: t,
- Ctx: ctx,
- Creds: auth.CredentialsFromContext(ctx),
- VFS: v,
- MntNs: mns,
- Root: root,
- }
- return s
-}
-
-// WithSubtest creates a temporary test system with a new test harness,
-// referencing all other resources from the original system. This is useful when
-// a system is reused for multiple subtests, and the T needs to change for each
-// case. Note that this is safe when test cases run in parallel, as all
-// resources referenced by the system are immutable, or handle interior
-// mutations in a thread-safe manner.
-//
-// The returned system must not outlive the original and should not be destroyed
-// via System.Destroy.
-func (s *System) WithSubtest(t *testing.T) *System {
- return &System{
- t: t,
- Ctx: s.Ctx,
- Creds: s.Creds,
- VFS: s.VFS,
- MntNs: s.MntNs,
- Root: s.Root,
- }
-}
-
-// WithTemporaryContext constructs a temporary test system with a new context
-// ctx. The temporary system borrows all resources and references from the
-// original system. The returned temporary system must not outlive the original
-// system, and should not be destroyed via System.Destroy.
-func (s *System) WithTemporaryContext(ctx context.Context) *System {
- return &System{
- t: s.t,
- Ctx: ctx,
- Creds: s.Creds,
- VFS: s.VFS,
- MntNs: s.MntNs,
- Root: s.Root,
- }
-}
-
-// Destroy release resources associated with a test system.
-func (s *System) Destroy() {
- s.Root.DecRef(s.Ctx)
- s.MntNs.DecRef(s.Ctx) // Reference on MntNs passed to NewSystem.
-}
-
-// ReadToEnd reads the contents of fd until EOF to a string.
-func (s *System) ReadToEnd(fd *vfs.FileDescription) (string, error) {
- buf := make([]byte, hostarch.PageSize)
- bufIOSeq := usermem.BytesIOSequence(buf)
- opts := vfs.ReadOptions{}
-
- var content strings.Builder
- for {
- n, err := fd.Read(s.Ctx, bufIOSeq, opts)
- if n == 0 || err != nil {
- if err == io.EOF {
- err = nil
- }
- return content.String(), err
- }
- content.Write(buf[:n])
- }
-}
-
-// PathOpAtRoot constructs a PathOperation with the given path from
-// the root of the filesystem.
-func (s *System) PathOpAtRoot(path string) *vfs.PathOperation {
- return &vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse(path),
- }
-}
-
-// GetDentryOrDie attempts to resolve a dentry referred to by the
-// provided path operation. If unsuccessful, the test fails.
-func (s *System) GetDentryOrDie(pop *vfs.PathOperation) vfs.VirtualDentry {
- vd, err := s.VFS.GetDentryAt(s.Ctx, s.Creds, pop, &vfs.GetDentryOptions{})
- if err != nil {
- s.t.Fatalf("GetDentryAt(pop:%+v) failed: %v", pop, err)
- }
- return vd
-}
-
-// DirentType is an alias for values for linux_dirent64.d_type.
-type DirentType = uint8
-
-// ListDirents lists the Dirents for a directory at pop.
-func (s *System) ListDirents(pop *vfs.PathOperation) *DirentCollector {
- fd, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, &vfs.OpenOptions{Flags: linux.O_RDONLY})
- if err != nil {
- s.t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(s.Ctx)
-
- collector := &DirentCollector{}
- if err := fd.IterDirents(s.Ctx, collector); err != nil {
- s.t.Fatalf("IterDirent failed: %v", err)
- }
- return collector
-}
-
-// AssertAllDirentTypes verifies that the set of dirents in collector contains
-// exactly the specified set of expected entries. AssertAllDirentTypes respects
-// collector.skipDots, and implicitly checks for "." and ".." accordingly.
-func (s *System) AssertAllDirentTypes(collector *DirentCollector, expected map[string]DirentType) {
- if expected == nil {
- expected = make(map[string]DirentType)
- }
- // Also implicitly check for "." and "..", if enabled.
- if !collector.skipDots {
- expected["."] = linux.DT_DIR
- expected[".."] = linux.DT_DIR
- }
-
- dentryTypes := make(map[string]DirentType)
- collector.mu.Lock()
- for _, dirent := range collector.dirents {
- dentryTypes[dirent.Name] = dirent.Type
- }
- collector.mu.Unlock()
- if diff := cmp.Diff(expected, dentryTypes); diff != "" {
- s.t.Fatalf("IterDirent had unexpected results:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-// AssertDirentOffsets verifies that collector contains at least the entries
-// specified in expected, with the given NextOff field. Entries specified in
-// expected but missing from collector result in failure. Extra entries in
-// collector are ignored. AssertDirentOffsets respects collector.skipDots, and
-// implicitly checks for "." and ".." accordingly.
-func (s *System) AssertDirentOffsets(collector *DirentCollector, expected map[string]int64) {
- // Also implicitly check for "." and "..", if enabled.
- if !collector.skipDots {
- expected["."] = 1
- expected[".."] = 2
- }
-
- dentryNextOffs := make(map[string]int64)
- collector.mu.Lock()
- for _, dirent := range collector.dirents {
- // Ignore extra entries in dentries that are not in expected.
- if _, ok := expected[dirent.Name]; ok {
- dentryNextOffs[dirent.Name] = dirent.NextOff
- }
- }
- collector.mu.Unlock()
- if diff := cmp.Diff(expected, dentryNextOffs); diff != "" {
- s.t.Fatalf("IterDirent had unexpected results:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-// DirentCollector provides an implementation for vfs.IterDirentsCallback for
-// testing. It simply iterates to the end of a given directory FD and collects
-// all dirents emitted by the callback.
-type DirentCollector struct {
- mu sync.Mutex
- order []*vfs.Dirent
- dirents map[string]*vfs.Dirent
- // When the collector is used in various Assert* functions, should "." and
- // ".." be implicitly checked?
- skipDots bool
-}
-
-// SkipDotsChecks enables or disables the implicit checks on "." and ".." when
-// the collector is used in various Assert* functions. Note that "." and ".."
-// are still collected if passed to d.Handle, so the caller should only disable
-// the checks when they aren't expected.
-func (d *DirentCollector) SkipDotsChecks(value bool) {
- d.skipDots = value
-}
-
-// Handle implements vfs.IterDirentsCallback.Handle.
-func (d *DirentCollector) Handle(dirent vfs.Dirent) error {
- d.mu.Lock()
- if d.dirents == nil {
- d.dirents = make(map[string]*vfs.Dirent)
- }
- d.order = append(d.order, &dirent)
- d.dirents[dirent.Name] = &dirent
- d.mu.Unlock()
- return nil
-}
-
-// Count returns the number of dirents currently in the collector.
-func (d *DirentCollector) Count() int {
- d.mu.Lock()
- defer d.mu.Unlock()
- return len(d.dirents)
-}
-
-// Contains checks whether the collector has a dirent with the given name and
-// type.
-func (d *DirentCollector) Contains(name string, typ uint8) error {
- d.mu.Lock()
- defer d.mu.Unlock()
- dirent, ok := d.dirents[name]
- if !ok {
- return fmt.Errorf("no dirent named %q found", name)
- }
- if dirent.Type != typ {
- return fmt.Errorf("dirent named %q found, but was expecting type %s, got: %+v", name, linux.DirentType.Parse(uint64(typ)), dirent)
- }
- return nil
-}
-
-// Dirents returns all dirents discovered by this collector.
-func (d *DirentCollector) Dirents() map[string]*vfs.Dirent {
- d.mu.Lock()
- dirents := make(map[string]*vfs.Dirent)
- for n, d := range d.dirents {
- dirents[n] = d
- }
- d.mu.Unlock()
- return dirents
-}
-
-// OrderedDirents returns an ordered list of dirents as discovered by this
-// collector.
-func (d *DirentCollector) OrderedDirents() []*vfs.Dirent {
- d.mu.Lock()
- dirents := make([]*vfs.Dirent, len(d.order))
- copy(dirents, d.order)
- d.mu.Unlock()
- return dirents
-}
diff --git a/pkg/sentry/fsimpl/timerfd/BUILD b/pkg/sentry/fsimpl/timerfd/BUILD
deleted file mode 100644
index e6980a314..000000000
--- a/pkg/sentry/fsimpl/timerfd/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "timerfd",
- srcs = ["timerfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go b/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go
new file mode 100644
index 000000000..12970f25c
--- /dev/null
+++ b/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go
@@ -0,0 +1,54 @@
+// automatically generated by stateify.
+
+package timerfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (tfd *TimerFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/timerfd.TimerFileDescription"
+}
+
+func (tfd *TimerFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "events",
+ "timer",
+ "val",
+ }
+}
+
+func (tfd *TimerFileDescription) beforeSave() {}
+
+// +checklocksignore
+func (tfd *TimerFileDescription) StateSave(stateSinkObject state.Sink) {
+ tfd.beforeSave()
+ stateSinkObject.Save(0, &tfd.vfsfd)
+ stateSinkObject.Save(1, &tfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &tfd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &tfd.NoLockFD)
+ stateSinkObject.Save(4, &tfd.events)
+ stateSinkObject.Save(5, &tfd.timer)
+ stateSinkObject.Save(6, &tfd.val)
+}
+
+func (tfd *TimerFileDescription) afterLoad() {}
+
+// +checklocksignore
+func (tfd *TimerFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tfd.vfsfd)
+ stateSourceObject.Load(1, &tfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &tfd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &tfd.NoLockFD)
+ stateSourceObject.Load(4, &tfd.events)
+ stateSourceObject.Load(5, &tfd.timer)
+ stateSourceObject.Load(6, &tfd.val)
+}
+
+func init() {
+ state.Register((*TimerFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/BUILD b/pkg/sentry/fsimpl/tmpfs/BUILD
deleted file mode 100644
index dc8b9bfeb..000000000
--- a/pkg/sentry/fsimpl/tmpfs/BUILD
+++ /dev/null
@@ -1,131 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "tmpfs",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dentry",
- "Linker": "*dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "tmpfs",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "tmpfs",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_library(
- name = "tmpfs",
- srcs = [
- "dentry_list.go",
- "device_file.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "inode_refs.go",
- "named_pipe.go",
- "regular_file.go",
- "save_restore.go",
- "socket_file.go",
- "symlink.go",
- "tmpfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/amutex",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sentry/vfs/memxattr",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "benchmark_test",
- size = "small",
- srcs = ["benchmark_test.go"],
- deps = [
- ":tmpfs",
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/refs",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
-
-go_test(
- name = "tmpfs_test",
- size = "small",
- srcs = [
- "pipe_test.go",
- "regular_file_test.go",
- "stat_test.go",
- "tmpfs_test.go",
- ],
- library = ":tmpfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go b/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
deleted file mode 100644
index 2c29343c1..000000000
--- a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package benchmark_test
-
-import (
- "fmt"
- "runtime"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/refs"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- _ "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// Differences from stat_benchmark:
-//
-// - Syscall interception, CopyInPath, copyOutStat, and overlayfs overheads are
-// not included.
-//
-// - *MountStat benchmarks use a tmpfs root mount and a tmpfs submount at /tmp.
-// Non-MountStat benchmarks use a tmpfs root mount and no submounts.
-// stat_benchmark uses a varying root mount, a tmpfs submount at /tmp, and a
-// subdirectory /tmp/<top_dir> (assuming TEST_TMPDIR == "/tmp"). Thus
-// stat_benchmark at depth 1 does a comparable amount of work to *MountStat
-// benchmarks at depth 2, and non-MountStat benchmarks at depth 3.
-var depths = []int{1, 2, 3, 8, 64, 100}
-
-const (
- mountPointName = "tmp"
- filename = "gvisor_test_temp_0_1557494568"
-)
-
-// This is copied from syscalls/linux/sys_file.go, with the dependency on
-// kernel.Task stripped out.
-func fileOpOn(ctx context.Context, mntns *fs.MountNamespace, root, wd *fs.Dirent, dirFD int32, path string, resolve bool, fn func(root *fs.Dirent, d *fs.Dirent) error) error {
- var (
- d *fs.Dirent // The file.
- rel *fs.Dirent // The relative directory for search (if required.)
- err error
- )
-
- // Extract the working directory (maybe).
- if len(path) > 0 && path[0] == '/' {
- // Absolute path; rel can be nil.
- } else if dirFD == linux.AT_FDCWD {
- // Need to reference the working directory.
- rel = wd
- } else {
- // Need to extract the given FD.
- return linuxerr.EBADF
- }
-
- // Lookup the node.
- remainingTraversals := uint(linux.MaxSymlinkTraversals)
- if resolve {
- d, err = mntns.FindInode(ctx, root, rel, path, &remainingTraversals)
- } else {
- d, err = mntns.FindLink(ctx, root, rel, path, &remainingTraversals)
- }
- if err != nil {
- return err
- }
-
- err = fn(root, d)
- d.DecRef(ctx)
- return err
-}
-
-func BenchmarkVFS1TmpfsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
-
- // Create VFS.
- tmpfsFS, ok := fs.FindFilesystem("tmpfs")
- if !ok {
- b.Fatalf("failed to find tmpfs filesystem type")
- }
- rootInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- mntns, err := fs.NewMountNamespace(ctx, rootInode)
- if err != nil {
- b.Fatalf("failed to create mount namespace: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- root := mntns.Root()
- defer root.DecRef(ctx)
- d := root
- d.IncRef()
- defer d.DecRef(ctx)
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- if err := d.Inode.CreateDirectory(ctx, d, name, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- next, err := d.Walk(ctx, root, name)
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- d.DecRef(ctx)
- d = next
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- file, err := d.Inode.Create(ctx, d, filename, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0644))
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- file.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- dirPath := false
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
- if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return linuxerr.ENOTDIR
- }
- uattr, err := d.Inode.UnstableAttr(ctx)
- if err != nil {
- return err
- }
- // Sanity check.
- if uattr.Perms.User.Execute {
- b.Fatalf("got wrong permissions (%0o)", uattr.Perms.LinuxMode())
- }
- return nil
- })
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS2TmpfsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- b.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- vd := root
- vd.IncRef()
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- pop := vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(name),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- nextVD, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- vd.DecRef(ctx)
- vd = nextVD
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(filename),
- FollowFinalSymlink: true,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: 0644,
- })
- vd.DecRef(ctx)
- vd = vfs.VirtualDentry{}
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- defer fd.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Mode&^linux.S_IFMT != 0644 {
- b.Fatalf("got wrong permissions (%0o)", stat.Mode)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS1TmpfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
-
- // Create VFS.
- tmpfsFS, ok := fs.FindFilesystem("tmpfs")
- if !ok {
- b.Fatalf("failed to find tmpfs filesystem type")
- }
- rootInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- mntns, err := fs.NewMountNamespace(ctx, rootInode)
- if err != nil {
- b.Fatalf("failed to create mount namespace: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create and mount the submount.
- root := mntns.Root()
- defer root.DecRef(ctx)
- if err := root.Inode.CreateDirectory(ctx, root, mountPointName, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create mount point: %v", err)
- }
- mountPoint, err := root.Walk(ctx, root, mountPointName)
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
- submountInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs submount: %v", err)
- }
- if err := mntns.Mount(ctx, mountPoint, submountInode); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- filePathBuilder.WriteString(mountPointName)
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- d, err := root.Walk(ctx, root, mountPointName)
- if err != nil {
- b.Fatalf("failed to walk to mount root: %v", err)
- }
- defer d.DecRef(ctx)
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- if err := d.Inode.CreateDirectory(ctx, d, name, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- next, err := d.Walk(ctx, root, name)
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- d.DecRef(ctx)
- d = next
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- file, err := d.Inode.Create(ctx, d, filename, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0644))
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- file.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- dirPath := false
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
- if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return linuxerr.ENOTDIR
- }
- uattr, err := d.Inode.UnstableAttr(ctx)
- if err != nil {
- return err
- }
- // Sanity check.
- if uattr.Perms.User.Execute {
- b.Fatalf("got wrong permissions (%0o)", uattr.Perms.LinuxMode())
- }
- return nil
- })
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS2TmpfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- b.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create the mount point.
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(mountPointName),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create mount point: %v", err)
- }
- // Save the mount point for later use.
- mountPoint, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
- // Create and mount the submount.
- if _, err := vfsObj.MountAt(ctx, creds, "", &pop, "tmpfs", &vfs.MountOptions{}); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- filePathBuilder.WriteString(mountPointName)
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- vd, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount root: %v", err)
- }
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- pop := vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(name),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- nextVD, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- vd.DecRef(ctx)
- vd = nextVD
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(filename),
- FollowFinalSymlink: true,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: 0644,
- })
- vd.DecRef(ctx)
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- fd.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Mode&^linux.S_IFMT != 0644 {
- b.Fatalf("got wrong permissions (%0o)", stat.Mode)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func init() {
- // Turn off reference leak checking for a fair comparison between vfs1 and
- // vfs2.
- refs.SetLeakMode(refs.NoLeakChecking)
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/dentry_list.go b/pkg/sentry/fsimpl/tmpfs/dentry_list.go
new file mode 100644
index 000000000..b95dd7101
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/dentry_list.go
@@ -0,0 +1,221 @@
+package tmpfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *dentry) *dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *dentry
+ tail *dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Front() *dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *dentryList) Back() *dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *dentryList) PushFront(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *dentryList) PushBack(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *dentryList) InsertAfter(b, e *dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *dentryList) InsertBefore(a, e *dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *dentryList) Remove(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *dentry
+ prev *dentry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Next() *dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) Prev() *dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetNext(elem *dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *dentryEntry) SetPrev(elem *dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/fstree.go b/pkg/sentry/fsimpl/tmpfs/fstree.go
new file mode 100644
index 000000000..d46351488
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/fstree.go
@@ -0,0 +1,55 @@
+package tmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
new file mode 100644
index 000000000..f0f032e0c
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -0,0 +1,140 @@
+package tmpfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/pipe_test.go b/pkg/sentry/fsimpl/tmpfs/pipe_test.go
deleted file mode 100644
index 418c7994e..000000000
--- a/pkg/sentry/fsimpl/tmpfs/pipe_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "bytes"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const fileName = "mypipe"
-
-func TestSeparateFDs(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the read side. This is done in a concurrently because opening
- // One end the pipe blocks until the other end is opened.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- rfdchan := make(chan *vfs.FileDescription)
- go func() {
- openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY}
- rfd, _ := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- rfdchan <- rfd
- }()
-
- // Open the write side.
- openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY}
- wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer wfd.DecRef(ctx)
-
- rfd, ok := <-rfdchan
- if !ok {
- t.Fatalf("failed to open pipe for reading %q", fileName)
- }
- defer rfd.DecRef(ctx)
-
- const msg = "vamos azul"
- checkEmpty(ctx, t, rfd)
- checkWrite(ctx, t, wfd, msg)
- checkRead(ctx, t, rfd, msg)
-}
-
-func TestNonblockingRead(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the read side as nonblocking.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY | linux.O_NONBLOCK}
- rfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for reading %q: %v", fileName, err)
- }
- defer rfd.DecRef(ctx)
-
- // Open the write side.
- openOpts = vfs.OpenOptions{Flags: linux.O_WRONLY}
- wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer wfd.DecRef(ctx)
-
- const msg = "geh blau"
- checkEmpty(ctx, t, rfd)
- checkWrite(ctx, t, wfd, msg)
- checkRead(ctx, t, rfd, msg)
-}
-
-func TestNonblockingWriteError(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the write side as nonblocking, which should return ENXIO.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY | linux.O_NONBLOCK}
- _, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if !linuxerr.Equals(linuxerr.ENXIO, err) {
- t.Fatalf("expected ENXIO, but got error: %v", err)
- }
-}
-
-func TestSingleFD(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the pipe as readable and writable.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_RDWR}
- fd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer fd.DecRef(ctx)
-
- const msg = "forza blu"
- checkEmpty(ctx, t, fd)
- checkWrite(ctx, t, fd, msg)
- checkRead(ctx, t, fd, msg)
-}
-
-// setup creates a VFS with a pipe in the root directory at path fileName. The
-// returned VirtualDentry must be DecRef()'d be the caller. It calls t.Fatal
-// upon failure.
-func setup(t *testing.T) (context.Context, *auth.Credentials, *vfs.VirtualFilesystem, vfs.VirtualDentry) {
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("failed to create tmpfs root mount: %v", err)
- }
-
- // Create the pipe.
- root := mntns.Root()
- root.IncRef()
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- }
- mknodOpts := vfs.MknodOptions{Mode: linux.ModeNamedPipe | 0644}
- if err := vfsObj.MknodAt(ctx, creds, &pop, &mknodOpts); err != nil {
- t.Fatalf("failed to create file %q: %v", fileName, err)
- }
-
- // Sanity check: the file pipe exists and has the correct mode.
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- t.Fatalf("stat(%q) failed: %v", fileName, err)
- }
- if stat.Mode&^linux.S_IFMT != 0644 {
- t.Errorf("got wrong permissions (%0o)", stat.Mode)
- }
- if stat.Mode&linux.S_IFMT != linux.ModeNamedPipe {
- t.Errorf("got wrong file type (%0o)", stat.Mode)
- }
-
- return ctx, creds, vfsObj, root
-}
-
-// checkEmpty calls t.Fatal if the pipe in fd is not empty.
-func checkEmpty(ctx context.Context, t *testing.T, fd *vfs.FileDescription) {
- readData := make([]byte, 1)
- dst := usermem.BytesIOSequence(readData)
- bytesRead, err := fd.Read(ctx, dst, vfs.ReadOptions{})
- if err != syserror.ErrWouldBlock {
- t.Fatalf("expected ErrWouldBlock reading from empty pipe %q, but got: %v", fileName, err)
- }
- if bytesRead != 0 {
- t.Fatalf("expected to read 0 bytes, but got %d", bytesRead)
- }
-}
-
-// checkWrite calls t.Fatal if it fails to write all of msg to fd.
-func checkWrite(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {
- writeData := []byte(msg)
- src := usermem.BytesIOSequence(writeData)
- bytesWritten, err := fd.Write(ctx, src, vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("error writing to pipe %q: %v", fileName, err)
- }
- if bytesWritten != int64(len(writeData)) {
- t.Fatalf("expected to write %d bytes, but wrote %d", len(writeData), bytesWritten)
- }
-}
-
-// checkRead calls t.Fatal if it fails to read msg from fd.
-func checkRead(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {
- readData := make([]byte, len(msg))
- dst := usermem.BytesIOSequence(readData)
- bytesRead, err := fd.Read(ctx, dst, vfs.ReadOptions{})
- if err != nil {
- t.Fatalf("error reading from pipe %q: %v", fileName, err)
- }
- if bytesRead != int64(len(msg)) {
- t.Fatalf("expected to read %d bytes, but got %d", len(msg), bytesRead)
- }
- if !bytes.Equal(readData, []byte(msg)) {
- t.Fatalf("expected to read %q from pipe, but got %q", msg, string(readData))
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go b/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
deleted file mode 100644
index 4393cc13b..000000000
--- a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "bytes"
- "fmt"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// Test that we can write some data to a file and read it back.`
-func TestSimpleWriteRead(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Write.
- data := []byte("foobarbaz")
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(len(data)); got != want {
- t.Errorf("fd.Write left offset at %d, want %d", got, want)
- }
-
- // Seek back to beginning.
- if _, err := fd.Seek(ctx, 0, linux.SEEK_SET); err != nil {
- t.Fatalf("fd.Seek failed: %v", err)
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(0); got != want {
- t.Errorf("fd.Seek(0) left offset at %d, want %d", got, want)
- }
-
- // Read.
- buf := make([]byte, len(data))
- n, err = fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.Read failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Read got short read length %d, want %d", n, len(data))
- }
- if got, want := string(buf), string(data); got != want {
- t.Errorf("Read got %q want %s", got, want)
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(len(data)); got != want {
- t.Errorf("fd.Write left offset at %d, want %d", got, want)
- }
-}
-
-func TestPWrite(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Fill file with 1k 'a's.
- data := bytes.Repeat([]byte{'a'}, 1000)
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
-
- // Write "gVisor is awesome" at various offsets.
- buf := []byte("gVisor is awesome")
- offsets := []int{0, 1, 2, 10, 20, 50, 100, len(data) - 100, len(data) - 1, len(data), len(data) + 1}
- for _, offset := range offsets {
- name := fmt.Sprintf("PWrite offset=%d", offset)
- t.Run(name, func(t *testing.T) {
- n, err := fd.PWrite(ctx, usermem.BytesIOSequence(buf), int64(offset), vfs.WriteOptions{})
- if err != nil {
- t.Errorf("fd.PWrite got err %v want nil", err)
- }
- if n != int64(len(buf)) {
- t.Errorf("fd.PWrite got %d bytes want %d", n, len(buf))
- }
-
- // Update data to reflect expected file contents.
- if len(data) < offset+len(buf) {
- data = append(data, make([]byte, (offset+len(buf))-len(data))...)
- }
- copy(data[offset:], buf)
-
- // Read the whole file and compare with data.
- readBuf := make([]byte, len(data))
- n, err = fd.PRead(ctx, usermem.BytesIOSequence(readBuf), 0, vfs.ReadOptions{})
- if err != nil {
- t.Fatalf("fd.PRead failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.PRead got short read length %d, want %d", n, len(data))
- }
- if got, want := string(readBuf), string(data); got != want {
- t.Errorf("PRead got %q want %s", got, want)
- }
-
- })
- }
-}
-
-func TestLocks(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- uid1 := 123
- uid2 := 456
- if err := fd.Impl().LockBSD(ctx, uid1, 0 /* ownerPID */, lock.ReadLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
- if err := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
- if got, want := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.WriteLock, nil), syserror.ErrWouldBlock; got != want {
- t.Fatalf("fd.Impl().LockBSD failed: got = %v, want = %v", got, want)
- }
- if err := fd.Impl().UnlockBSD(ctx, uid1); err != nil {
- t.Fatalf("fd.Impl().UnlockBSD failed: err = %v", err)
- }
- if err := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.WriteLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
-
- if err := fd.Impl().LockPOSIX(ctx, uid1, 0 /* ownerPID */, lock.ReadLock, lock.LockRange{Start: 0, End: 1}, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if err := fd.Impl().LockPOSIX(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, lock.LockRange{Start: 1, End: 2}, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if err := fd.Impl().LockPOSIX(ctx, uid1, 0 /* ownerPID */, lock.WriteLock, lock.LockRange{Start: 0, End: 1}, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if got, want := fd.Impl().LockPOSIX(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, lock.LockRange{Start: 0, End: 1}, nil), syserror.ErrWouldBlock; got != want {
- t.Fatalf("fd.Impl().LockPOSIX failed: got = %v, want = %v", got, want)
- }
- if err := fd.Impl().UnlockPOSIX(ctx, uid1, lock.LockRange{Start: 0, End: 1}); err != nil {
- t.Fatalf("fd.Impl().UnlockPOSIX failed: err = %v", err)
- }
-}
-
-func TestPRead(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Write 100 sequences of 'gVisor is awesome'.
- data := bytes.Repeat([]byte("gVisor is awsome"), 100)
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
-
- // Read various sizes from various offsets.
- sizes := []int{0, 1, 2, 10, 20, 50, 100, 1000}
- offsets := []int{0, 1, 2, 10, 20, 50, 100, 1000, len(data) - 100, len(data) - 1, len(data), len(data) + 1}
-
- for _, size := range sizes {
- for _, offset := range offsets {
- name := fmt.Sprintf("PRead offset=%d size=%d", offset, size)
- t.Run(name, func(t *testing.T) {
- var (
- wantRead []byte
- wantErr error
- )
- if offset < len(data) {
- wantRead = data[offset:]
- } else if size > 0 {
- wantErr = io.EOF
- }
- if offset+size < len(data) {
- wantRead = wantRead[:size]
- }
- buf := make([]byte, size)
- n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), int64(offset), vfs.ReadOptions{})
- if err != wantErr {
- t.Errorf("fd.PRead got err %v want %v", err, wantErr)
- }
- if n != int64(len(wantRead)) {
- t.Errorf("fd.PRead got %d bytes want %d", n, len(wantRead))
- }
- if got := string(buf[:n]); got != string(wantRead) {
- t.Errorf("fd.PRead got %q want %q", got, string(wantRead))
- }
- })
- }
- }
-}
-
-func TestTruncate(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Fill the file with some data.
- data := bytes.Repeat([]byte("gVisor is awsome"), 100)
- written, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
-
- // Size should be same as written.
- sizeStatOpts := vfs.StatOptions{Mask: linux.STATX_SIZE}
- stat, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := int64(stat.Size), written; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
-
- // Truncate down.
- newSize := uint64(10)
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- // Size should be updated.
- statAfterTruncateDown, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := statAfterTruncateDown.Size, newSize; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
- // We should only read newSize worth of data.
- buf := make([]byte, 1000)
- if n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0, vfs.ReadOptions{}); err != nil && err != io.EOF {
- t.Fatalf("fd.PRead failed: %v", err)
- } else if uint64(n) != newSize {
- t.Errorf("fd.PRead got size %d, want %d", n, newSize)
- }
- // Mtime and Ctime should be bumped.
- if got := statAfterTruncateDown.Mtime.ToNsec(); got <= stat.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want > %v", got, stat.Mtime)
- }
- if got := statAfterTruncateDown.Ctime.ToNsec(); got <= stat.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want > %v", got, stat.Ctime)
- }
-
- // Truncate up.
- newSize = 100
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- // Size should be updated.
- statAfterTruncateUp, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := statAfterTruncateUp.Size, newSize; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
- // We should read newSize worth of data.
- buf = make([]byte, 1000)
- if n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0, vfs.ReadOptions{}); err != nil && err != io.EOF {
- t.Fatalf("fd.PRead failed: %v", err)
- } else if uint64(n) != newSize {
- t.Errorf("fd.PRead got size %d, want %d", n, newSize)
- }
- // Bytes should be null after 10, since we previously truncated to 10.
- for i := uint64(10); i < newSize; i++ {
- if buf[i] != 0 {
- t.Errorf("fd.PRead got byte %d=%x, want 0", i, buf[i])
- break
- }
- }
- // Mtime and Ctime should be bumped.
- if got := statAfterTruncateUp.Mtime.ToNsec(); got <= statAfterTruncateDown.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want > %v", got, statAfterTruncateDown.Mtime)
- }
- if got := statAfterTruncateUp.Ctime.ToNsec(); got <= statAfterTruncateDown.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want > %v", got, stat.Ctime)
- }
-
- // Truncate to the current size.
- newSize = statAfterTruncateUp.Size
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- statAfterTruncateNoop, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- // Mtime and Ctime should not be bumped, since operation is a noop.
- if got := statAfterTruncateNoop.Mtime.ToNsec(); got != statAfterTruncateUp.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want %v", got, statAfterTruncateUp.Mtime)
- }
- if got := statAfterTruncateNoop.Ctime.ToNsec(); got != statAfterTruncateUp.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want %v", got, statAfterTruncateUp.Ctime)
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/stat_test.go b/pkg/sentry/fsimpl/tmpfs/stat_test.go
deleted file mode 100644
index f7ee4aab2..000000000
--- a/pkg/sentry/fsimpl/tmpfs/stat_test.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-func TestStatAfterCreate(t *testing.T) {
- ctx := contexttest.Context(t)
- mode := linux.FileMode(0644)
-
- // Run with different file types.
- for _, typ := range []string{"file", "dir", "pipe"} {
- t.Run(fmt.Sprintf("type=%q", typ), func(t *testing.T) {
- var (
- fd *vfs.FileDescription
- cleanup func()
- err error
- )
- switch typ {
- case "file":
- fd, cleanup, err = newFileFD(ctx, mode)
- case "dir":
- fd, cleanup, err = newDirFD(ctx, mode)
- case "pipe":
- fd, cleanup, err = newPipeFD(ctx, mode)
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- got, err := fd.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Atime, Ctime, Mtime should all be current time (non-zero).
- atime, ctime, mtime := got.Atime.ToNsec(), got.Ctime.ToNsec(), got.Mtime.ToNsec()
- if atime != ctime || ctime != mtime {
- t.Errorf("got atime=%d ctime=%d mtime=%d, wanted equal values", atime, ctime, mtime)
- }
- if atime == 0 {
- t.Errorf("got atime=%d, want non-zero", atime)
- }
-
- // Btime should be 0, as it is not set by tmpfs.
- if btime := got.Btime.ToNsec(); btime != 0 {
- t.Errorf("got btime %d, want 0", got.Btime.ToNsec())
- }
-
- // Size should be 0 (except for directories, which make up a size
- // of 20 per entry, including the "." and ".." entries present in
- // otherwise-empty directories).
- wantSize := uint64(0)
- if typ == "dir" {
- wantSize = 40
- }
- if got.Size != wantSize {
- t.Errorf("got size %d, want %d", got.Size, wantSize)
- }
-
- // Nlink should be 1 for files, 2 for dirs.
- wantNlink := uint32(1)
- if typ == "dir" {
- wantNlink = 2
- }
- if got.Nlink != wantNlink {
- t.Errorf("got nlink %d, want %d", got.Nlink, wantNlink)
- }
-
- // UID and GID are set from context creds.
- creds := auth.CredentialsFromContext(ctx)
- if got.UID != uint32(creds.EffectiveKUID) {
- t.Errorf("got uid %d, want %d", got.UID, uint32(creds.EffectiveKUID))
- }
- if got.GID != uint32(creds.EffectiveKGID) {
- t.Errorf("got gid %d, want %d", got.GID, uint32(creds.EffectiveKGID))
- }
-
- // Mode.
- wantMode := uint16(mode)
- switch typ {
- case "file":
- wantMode |= linux.S_IFREG
- case "dir":
- wantMode |= linux.S_IFDIR
- case "pipe":
- wantMode |= linux.S_IFIFO
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
-
- if got.Mode != wantMode {
- t.Errorf("got mode %x, want %x", got.Mode, wantMode)
- }
-
- // Ino.
- if got.Ino == 0 {
- t.Errorf("got ino %d, want not 0", got.Ino)
- }
- })
- }
-}
-
-func TestSetStatAtime(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- allStatOptions := vfs.StatOptions{Mask: linux.STATX_ALL}
-
- // Get initial stat.
- initialStat, err := fd.Stat(ctx, allStatOptions)
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Set atime, but without the mask.
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: linux.Statx{
- Mask: 0,
- Atime: linux.NsecToStatxTimestamp(100),
- }}); err != nil {
- t.Errorf("SetStat atime without mask failed: %v", err)
- }
- // Atime should be unchanged.
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != initialStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, initialStat.Atime)
- }
-
- // Set atime, this time included in the mask.
- setStat := linux.Statx{
- Mask: linux.STATX_ATIME,
- Atime: linux.NsecToStatxTimestamp(100),
- }
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: setStat}); err != nil {
- t.Errorf("SetStat atime with mask failed: %v", err)
- }
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != setStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, setStat.Atime)
- }
-}
-
-func TestSetStat(t *testing.T) {
- ctx := contexttest.Context(t)
- mode := linux.FileMode(0644)
-
- // Run with different file types.
- for _, typ := range []string{"file", "dir", "pipe"} {
- t.Run(fmt.Sprintf("type=%q", typ), func(t *testing.T) {
- var (
- fd *vfs.FileDescription
- cleanup func()
- err error
- )
- switch typ {
- case "file":
- fd, cleanup, err = newFileFD(ctx, mode)
- case "dir":
- fd, cleanup, err = newDirFD(ctx, mode)
- case "pipe":
- fd, cleanup, err = newPipeFD(ctx, mode)
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- allStatOptions := vfs.StatOptions{Mask: linux.STATX_ALL}
-
- // Get initial stat.
- initialStat, err := fd.Stat(ctx, allStatOptions)
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Set atime, but without the mask.
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: linux.Statx{
- Mask: 0,
- Atime: linux.NsecToStatxTimestamp(100),
- }}); err != nil {
- t.Errorf("SetStat atime without mask failed: %v", err)
- }
- // Atime should be unchanged.
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != initialStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, initialStat.Atime)
- }
-
- // Set atime, this time included in the mask.
- setStat := linux.Statx{
- Mask: linux.STATX_ATIME,
- Atime: linux.NsecToStatxTimestamp(100),
- }
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: setStat}); err != nil {
- t.Errorf("SetStat atime with mask failed: %v", err)
- }
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != setStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, setStat.Atime)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go
new file mode 100644
index 000000000..6d9f09eef
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go
@@ -0,0 +1,593 @@
+// automatically generated by stateify.
+
+package tmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+// +checklocksignore
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+// +checklocksignore
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (d *deviceFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.deviceFile"
+}
+
+func (d *deviceFile) StateFields() []string {
+ return []string{
+ "inode",
+ "kind",
+ "major",
+ "minor",
+ }
+}
+
+func (d *deviceFile) beforeSave() {}
+
+// +checklocksignore
+func (d *deviceFile) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.inode)
+ stateSinkObject.Save(1, &d.kind)
+ stateSinkObject.Save(2, &d.major)
+ stateSinkObject.Save(3, &d.minor)
+}
+
+func (d *deviceFile) afterLoad() {}
+
+// +checklocksignore
+func (d *deviceFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.inode)
+ stateSourceObject.Load(1, &d.kind)
+ stateSourceObject.Load(2, &d.major)
+ stateSourceObject.Load(3, &d.minor)
+}
+
+func (dir *directory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.directory"
+}
+
+func (dir *directory) StateFields() []string {
+ return []string{
+ "dentry",
+ "inode",
+ "childMap",
+ "numChildren",
+ "childList",
+ }
+}
+
+func (dir *directory) beforeSave() {}
+
+// +checklocksignore
+func (dir *directory) StateSave(stateSinkObject state.Sink) {
+ dir.beforeSave()
+ stateSinkObject.Save(0, &dir.dentry)
+ stateSinkObject.Save(1, &dir.inode)
+ stateSinkObject.Save(2, &dir.childMap)
+ stateSinkObject.Save(3, &dir.numChildren)
+ stateSinkObject.Save(4, &dir.childList)
+}
+
+func (dir *directory) afterLoad() {}
+
+// +checklocksignore
+func (dir *directory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &dir.dentry)
+ stateSourceObject.Load(1, &dir.inode)
+ stateSourceObject.Load(2, &dir.childMap)
+ stateSourceObject.Load(3, &dir.numChildren)
+ stateSourceObject.Load(4, &dir.childList)
+}
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "iter",
+ "off",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.iter)
+ stateSinkObject.Save(3, &fd.off)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.iter)
+ stateSourceObject.Load(3, &fd.off)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (n *namedPipe) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.namedPipe"
+}
+
+func (n *namedPipe) StateFields() []string {
+ return []string{
+ "inode",
+ "pipe",
+ }
+}
+
+func (n *namedPipe) beforeSave() {}
+
+// +checklocksignore
+func (n *namedPipe) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.inode)
+ stateSinkObject.Save(1, &n.pipe)
+}
+
+func (n *namedPipe) afterLoad() {}
+
+// +checklocksignore
+func (n *namedPipe) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.inode)
+ stateSourceObject.Load(1, &n.pipe)
+}
+
+func (rf *regularFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.regularFile"
+}
+
+func (rf *regularFile) StateFields() []string {
+ return []string{
+ "inode",
+ "memoryUsageKind",
+ "mappings",
+ "writableMappingPages",
+ "data",
+ "seals",
+ "size",
+ }
+}
+
+func (rf *regularFile) beforeSave() {}
+
+// +checklocksignore
+func (rf *regularFile) StateSave(stateSinkObject state.Sink) {
+ rf.beforeSave()
+ stateSinkObject.Save(0, &rf.inode)
+ stateSinkObject.Save(1, &rf.memoryUsageKind)
+ stateSinkObject.Save(2, &rf.mappings)
+ stateSinkObject.Save(3, &rf.writableMappingPages)
+ stateSinkObject.Save(4, &rf.data)
+ stateSinkObject.Save(5, &rf.seals)
+ stateSinkObject.Save(6, &rf.size)
+}
+
+// +checklocksignore
+func (rf *regularFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rf.inode)
+ stateSourceObject.Load(1, &rf.memoryUsageKind)
+ stateSourceObject.Load(2, &rf.mappings)
+ stateSourceObject.Load(3, &rf.writableMappingPages)
+ stateSourceObject.Load(4, &rf.data)
+ stateSourceObject.Load(5, &rf.seals)
+ stateSourceObject.Load(6, &rf.size)
+ stateSourceObject.AfterLoad(rf.afterLoad)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "off",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.off)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.off)
+}
+
+func (s *socketFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.socketFile"
+}
+
+func (s *socketFile) StateFields() []string {
+ return []string{
+ "inode",
+ "ep",
+ }
+}
+
+func (s *socketFile) beforeSave() {}
+
+// +checklocksignore
+func (s *socketFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.inode)
+ stateSinkObject.Save(1, &s.ep)
+}
+
+func (s *socketFile) afterLoad() {}
+
+// +checklocksignore
+func (s *socketFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.inode)
+ stateSourceObject.Load(1, &s.ep)
+}
+
+func (s *symlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.symlink"
+}
+
+func (s *symlink) StateFields() []string {
+ return []string{
+ "inode",
+ "target",
+ }
+}
+
+func (s *symlink) beforeSave() {}
+
+// +checklocksignore
+func (s *symlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.inode)
+ stateSinkObject.Save(1, &s.target)
+}
+
+func (s *symlink) afterLoad() {}
+
+// +checklocksignore
+func (s *symlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.inode)
+ stateSourceObject.Load(1, &s.target)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "mfp",
+ "clock",
+ "devMinor",
+ "mopts",
+ "nextInoMinusOne",
+ "root",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.mfp)
+ stateSinkObject.Save(2, &fs.clock)
+ stateSinkObject.Save(3, &fs.devMinor)
+ stateSinkObject.Save(4, &fs.mopts)
+ stateSinkObject.Save(5, &fs.nextInoMinusOne)
+ stateSinkObject.Save(6, &fs.root)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.mfp)
+ stateSourceObject.Load(2, &fs.clock)
+ stateSourceObject.Load(3, &fs.devMinor)
+ stateSourceObject.Load(4, &fs.mopts)
+ stateSourceObject.Load(5, &fs.nextInoMinusOne)
+ stateSourceObject.Load(6, &fs.root)
+}
+
+func (f *FilesystemOpts) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.FilesystemOpts"
+}
+
+func (f *FilesystemOpts) StateFields() []string {
+ return []string{
+ "RootFileType",
+ "RootSymlinkTarget",
+ "FilesystemType",
+ }
+}
+
+func (f *FilesystemOpts) beforeSave() {}
+
+// +checklocksignore
+func (f *FilesystemOpts) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.RootFileType)
+ stateSinkObject.Save(1, &f.RootSymlinkTarget)
+ stateSinkObject.Save(2, &f.FilesystemType)
+}
+
+func (f *FilesystemOpts) afterLoad() {}
+
+// +checklocksignore
+func (f *FilesystemOpts) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.RootFileType)
+ stateSourceObject.Load(1, &f.RootSymlinkTarget)
+ stateSourceObject.Load(2, &f.FilesystemType)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "parent",
+ "name",
+ "dentryEntry",
+ "inode",
+ }
+}
+
+func (d *dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.parent)
+ stateSinkObject.Save(2, &d.name)
+ stateSinkObject.Save(3, &d.dentryEntry)
+ stateSinkObject.Save(4, &d.inode)
+}
+
+func (d *dentry) afterLoad() {}
+
+// +checklocksignore
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.parent)
+ stateSourceObject.Load(2, &d.name)
+ stateSourceObject.Load(3, &d.dentryEntry)
+ stateSourceObject.Load(4, &d.inode)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "fs",
+ "refs",
+ "xattrs",
+ "mode",
+ "nlink",
+ "uid",
+ "gid",
+ "ino",
+ "atime",
+ "ctime",
+ "mtime",
+ "locks",
+ "watches",
+ "impl",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+// +checklocksignore
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fs)
+ stateSinkObject.Save(1, &i.refs)
+ stateSinkObject.Save(2, &i.xattrs)
+ stateSinkObject.Save(3, &i.mode)
+ stateSinkObject.Save(4, &i.nlink)
+ stateSinkObject.Save(5, &i.uid)
+ stateSinkObject.Save(6, &i.gid)
+ stateSinkObject.Save(7, &i.ino)
+ stateSinkObject.Save(8, &i.atime)
+ stateSinkObject.Save(9, &i.ctime)
+ stateSinkObject.Save(10, &i.mtime)
+ stateSinkObject.Save(11, &i.locks)
+ stateSinkObject.Save(12, &i.watches)
+ stateSinkObject.Save(13, &i.impl)
+}
+
+func (i *inode) afterLoad() {}
+
+// +checklocksignore
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fs)
+ stateSourceObject.Load(1, &i.refs)
+ stateSourceObject.Load(2, &i.xattrs)
+ stateSourceObject.Load(3, &i.mode)
+ stateSourceObject.Load(4, &i.nlink)
+ stateSourceObject.Load(5, &i.uid)
+ stateSourceObject.Load(6, &i.gid)
+ stateSourceObject.Load(7, &i.ino)
+ stateSourceObject.Load(8, &i.atime)
+ stateSourceObject.Load(9, &i.ctime)
+ stateSourceObject.Load(10, &i.mtime)
+ stateSourceObject.Load(11, &i.locks)
+ stateSourceObject.Load(12, &i.watches)
+ stateSourceObject.Load(13, &i.impl)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*deviceFile)(nil))
+ state.Register((*directory)(nil))
+ state.Register((*directoryFD)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*namedPipe)(nil))
+ state.Register((*regularFile)(nil))
+ state.Register((*regularFileFD)(nil))
+ state.Register((*socketFile)(nil))
+ state.Register((*symlink)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*FilesystemOpts)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*fileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go b/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go
deleted file mode 100644
index fc5323abc..000000000
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "fmt"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// nextFileID is used to generate unique file names.
-var nextFileID int64
-
-// newTmpfsRoot creates a new tmpfs mount, and returns the root. If the error
-// is not nil, then cleanup should be called when the root is no longer needed.
-func newTmpfsRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDentry, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
-
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("VFS init: %v", err)
- }
-
- vfsObj.MustRegisterFilesystemType("tmpfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("failed to create tmpfs root mount: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- return vfsObj, root, func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- }, nil
-}
-
-// newFileFD creates a new file in a new tmpfs mount, and returns the FD. If
-// the returned err is not nil, then cleanup should be called when the FD is no
-// longer needed.
-func newFileFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- filename := fmt.Sprintf("tmpfs-test-file-%d", atomic.AddInt64(&nextFileID, 1))
-
- // Create the file that will be write/read.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: linux.ModeRegular | mode,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create file %q: %v", filename, err)
- }
-
- return fd, cleanup, nil
-}
-
-// newDirFD is like newFileFD, but for directories.
-func newDirFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- dirname := fmt.Sprintf("tmpfs-test-dir-%d", atomic.AddInt64(&nextFileID, 1))
-
- // Create the dir.
- if err := vfsObj.MkdirAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(dirname),
- }, &vfs.MkdirOptions{
- Mode: linux.ModeDirectory | mode,
- }); err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create directory %q: %v", dirname, err)
- }
-
- // Open the dir and return it.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(dirname),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_DIRECTORY,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to open directory %q: %v", dirname, err)
- }
-
- return fd, cleanup, nil
-}
-
-// newPipeFD is like newFileFD, but for pipes.
-func newPipeFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- name := fmt.Sprintf("tmpfs-test-%d", atomic.AddInt64(&nextFileID, 1))
-
- if err := vfsObj.MknodAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }, &vfs.MknodOptions{
- Mode: linux.ModeNamedPipe | mode,
- }); err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create pipe %q: %v", name, err)
- }
-
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to open pipe %q: %v", name, err)
- }
-
- return fd, cleanup, nil
-}
diff --git a/pkg/sentry/fsimpl/verity/BUILD b/pkg/sentry/fsimpl/verity/BUILD
deleted file mode 100644
index 1d855234c..000000000
--- a/pkg/sentry/fsimpl/verity/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "verity",
- srcs = [
- "filesystem.go",
- "save_restore.go",
- "verity.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/marshal/primitive",
- "//pkg/merkletree",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "verity_test",
- srcs = [
- "verity_test.go",
- ],
- library = ":verity",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/verity/verity_state_autogen.go b/pkg/sentry/fsimpl/verity/verity_state_autogen.go
new file mode 100644
index 000000000..dba01a68e
--- /dev/null
+++ b/pkg/sentry/fsimpl/verity/verity_state_autogen.go
@@ -0,0 +1,240 @@
+// automatically generated by stateify.
+
+package verity
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/verity.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/verity.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "creds",
+ "allowRuntimeEnable",
+ "lowerMount",
+ "rootDentry",
+ "alg",
+ "action",
+ "opts",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.creds)
+ stateSinkObject.Save(2, &fs.allowRuntimeEnable)
+ stateSinkObject.Save(3, &fs.lowerMount)
+ stateSinkObject.Save(4, &fs.rootDentry)
+ stateSinkObject.Save(5, &fs.alg)
+ stateSinkObject.Save(6, &fs.action)
+ stateSinkObject.Save(7, &fs.opts)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.creds)
+ stateSourceObject.Load(2, &fs.allowRuntimeEnable)
+ stateSourceObject.Load(3, &fs.lowerMount)
+ stateSourceObject.Load(4, &fs.rootDentry)
+ stateSourceObject.Load(5, &fs.alg)
+ stateSourceObject.Load(6, &fs.action)
+ stateSourceObject.Load(7, &fs.opts)
+}
+
+func (i *InternalFilesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/verity.InternalFilesystemOptions"
+}
+
+func (i *InternalFilesystemOptions) StateFields() []string {
+ return []string{
+ "LowerName",
+ "Alg",
+ "AllowRuntimeEnable",
+ "LowerGetFSOptions",
+ "Action",
+ }
+}
+
+func (i *InternalFilesystemOptions) beforeSave() {}
+
+// +checklocksignore
+func (i *InternalFilesystemOptions) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.LowerName)
+ stateSinkObject.Save(1, &i.Alg)
+ stateSinkObject.Save(2, &i.AllowRuntimeEnable)
+ stateSinkObject.Save(3, &i.LowerGetFSOptions)
+ stateSinkObject.Save(4, &i.Action)
+}
+
+func (i *InternalFilesystemOptions) afterLoad() {}
+
+// +checklocksignore
+func (i *InternalFilesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.LowerName)
+ stateSourceObject.Load(1, &i.Alg)
+ stateSourceObject.Load(2, &i.AllowRuntimeEnable)
+ stateSourceObject.Load(3, &i.LowerGetFSOptions)
+ stateSourceObject.Load(4, &i.Action)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/verity.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "mode",
+ "uid",
+ "gid",
+ "size",
+ "parent",
+ "name",
+ "children",
+ "childrenNames",
+ "childrenList",
+ "lowerVD",
+ "lowerMerkleVD",
+ "symlinkTarget",
+ "hash",
+ }
+}
+
+func (d *dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.mode)
+ stateSinkObject.Save(4, &d.uid)
+ stateSinkObject.Save(5, &d.gid)
+ stateSinkObject.Save(6, &d.size)
+ stateSinkObject.Save(7, &d.parent)
+ stateSinkObject.Save(8, &d.name)
+ stateSinkObject.Save(9, &d.children)
+ stateSinkObject.Save(10, &d.childrenNames)
+ stateSinkObject.Save(11, &d.childrenList)
+ stateSinkObject.Save(12, &d.lowerVD)
+ stateSinkObject.Save(13, &d.lowerMerkleVD)
+ stateSinkObject.Save(14, &d.symlinkTarget)
+ stateSinkObject.Save(15, &d.hash)
+}
+
+// +checklocksignore
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.mode)
+ stateSourceObject.Load(4, &d.uid)
+ stateSourceObject.Load(5, &d.gid)
+ stateSourceObject.Load(6, &d.size)
+ stateSourceObject.Load(7, &d.parent)
+ stateSourceObject.Load(8, &d.name)
+ stateSourceObject.Load(9, &d.children)
+ stateSourceObject.Load(10, &d.childrenNames)
+ stateSourceObject.Load(11, &d.childrenList)
+ stateSourceObject.Load(12, &d.lowerVD)
+ stateSourceObject.Load(13, &d.lowerMerkleVD)
+ stateSourceObject.Load(14, &d.symlinkTarget)
+ stateSourceObject.Load(15, &d.hash)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/verity.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "d",
+ "isDir",
+ "lowerFD",
+ "lowerMappable",
+ "merkleReader",
+ "merkleWriter",
+ "parentMerkleWriter",
+ "off",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.d)
+ stateSinkObject.Save(3, &fd.isDir)
+ stateSinkObject.Save(4, &fd.lowerFD)
+ stateSinkObject.Save(5, &fd.lowerMappable)
+ stateSinkObject.Save(6, &fd.merkleReader)
+ stateSinkObject.Save(7, &fd.merkleWriter)
+ stateSinkObject.Save(8, &fd.parentMerkleWriter)
+ stateSinkObject.Save(9, &fd.off)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+// +checklocksignore
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.d)
+ stateSourceObject.Load(3, &fd.isDir)
+ stateSourceObject.Load(4, &fd.lowerFD)
+ stateSourceObject.Load(5, &fd.lowerMappable)
+ stateSourceObject.Load(6, &fd.merkleReader)
+ stateSourceObject.Load(7, &fd.merkleWriter)
+ stateSourceObject.Load(8, &fd.parentMerkleWriter)
+ stateSourceObject.Load(9, &fd.off)
+}
+
+func init() {
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*InternalFilesystemOptions)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*fileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/verity/verity_test.go b/pkg/sentry/fsimpl/verity/verity_test.go
deleted file mode 100644
index af041bd50..000000000
--- a/pkg/sentry/fsimpl/verity/verity_test.go
+++ /dev/null
@@ -1,1211 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verity
-
-import (
- "fmt"
- "io"
- "math/rand"
- "strconv"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- // rootMerkleFilename is the name of the root Merkle tree file.
- rootMerkleFilename = "root.verity"
- // maxDataSize is the maximum data size of a test file.
- maxDataSize = 100000
-)
-
-var hashAlgs = []HashAlgorithm{SHA256, SHA512}
-
-func dentryFromVD(t *testing.T, vd vfs.VirtualDentry) *dentry {
- t.Helper()
- d, ok := vd.Dentry().Impl().(*dentry)
- if !ok {
- t.Fatalf("can't assert %T as a *dentry", vd)
- }
- return d
-}
-
-// dentryFromFD returns the dentry corresponding to fd.
-func dentryFromFD(t *testing.T, fd *vfs.FileDescription) *dentry {
- t.Helper()
- f, ok := fd.Impl().(*fileDescription)
- if !ok {
- t.Fatalf("can't assert %T as a *fileDescription", fd)
- }
- return f.d
-}
-
-// newVerityRoot creates a new verity mount, and returns the root. The
-// underlying file system is tmpfs. If the error is not nil, then cleanup
-// should be called when the root is no longer needed.
-func newVerityRoot(t *testing.T, hashAlg HashAlgorithm) (*vfs.VirtualFilesystem, vfs.VirtualDentry, context.Context, error) {
- t.Helper()
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("testutil.Boot: %v", err)
- }
-
- ctx := k.SupervisorContext()
-
- rand.Seed(time.Now().UnixNano())
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("VFS init: %v", err)
- }
-
- vfsObj.MustRegisterFilesystemType("verity", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- data := "root_name=" + rootMerkleFilename
- mntns, err := vfsObj.NewMountNamespace(ctx, auth.CredentialsFromContext(ctx), "", "verity", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- Data: data,
- InternalData: InternalFilesystemOptions{
- LowerName: "tmpfs",
- Alg: hashAlg,
- AllowRuntimeEnable: true,
- Action: ErrorOnViolation,
- },
- },
- })
- if err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("NewMountNamespace: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
-
- // Use lowerRoot in the task as we modify the lower file system
- // directly in many tests.
- lowerRoot := root.Dentry().Impl().(*dentry).lowerVD
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(ctx, "name", tc, mntns, lowerRoot, lowerRoot)
- if err != nil {
- t.Fatalf("testutil.CreateTask: %v", err)
- }
-
- t.Cleanup(func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- })
- return vfsObj, root, task.AsyncContext(), nil
-}
-
-// openVerityAt opens a verity file.
-//
-// TODO(chongc): release reference from opening the file when done.
-func openVerityAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, vd vfs.VirtualDentry, path string, flags uint32, mode linux.FileMode) (*vfs.FileDescription, error) {
- return vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: vd,
- Start: vd,
- Path: fspath.Parse(path),
- }, &vfs.OpenOptions{
- Flags: flags,
- Mode: mode,
- })
-}
-
-// openLowerAt opens the file in the underlying file system.
-//
-// TODO(chongc): release reference from opening the file when done.
-func (d *dentry) openLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, path string, flags uint32, mode linux.FileMode) (*vfs.FileDescription, error) {
- return vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(path),
- }, &vfs.OpenOptions{
- Flags: flags,
- Mode: mode,
- })
-}
-
-// openLowerMerkleAt opens the Merkle file in the underlying file system.
-//
-// TODO(chongc): release reference from opening the file when done.
-func (d *dentry) openLowerMerkleAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, flags uint32, mode linux.FileMode) (*vfs.FileDescription, error) {
- return vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerMerkleVD,
- Start: d.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: flags,
- Mode: mode,
- })
-}
-
-// mkdirLowerAt creates a directory in the underlying file system.
-func (d *dentry) mkdirLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, path string, mode linux.FileMode) error {
- return vfsObj.MkdirAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(path),
- }, &vfs.MkdirOptions{
- Mode: mode,
- })
-}
-
-// unlinkLowerAt deletes the file in the underlying file system.
-func (d *dentry) unlinkLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, path string) error {
- return vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(path),
- })
-}
-
-// unlinkLowerMerkleAt deletes the Merkle file in the underlying file system.
-func (d *dentry) unlinkLowerMerkleAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, path string) error {
- return vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(merklePrefix + path),
- })
-}
-
-// renameLowerAt renames file name to newName in the underlying file system.
-func (d *dentry) renameLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, name string, newName string) error {
- return vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(name),
- }, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(newName),
- }, &vfs.RenameOptions{})
-}
-
-// renameLowerMerkleAt renames Merkle file name to newName in the underlying
-// file system.
-func (d *dentry) renameLowerMerkleAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, name string, newName string) error {
- return vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(merklePrefix + name),
- }, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(merklePrefix + newName),
- }, &vfs.RenameOptions{})
-}
-
-// symlinkLowerAt creates a symbolic link at symlink referring to the given target
-// in the underlying filesystem.
-func (d *dentry) symlinkLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, target, symlink string) error {
- return vfsObj.SymlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(symlink),
- }, target)
-}
-
-// newFileFD creates a new file in the verity mount, and returns the FD. The FD
-// points to a file that has random data generated.
-func newFileFD(ctx context.Context, t *testing.T, vfsObj *vfs.VirtualFilesystem, root vfs.VirtualDentry, filePath string, mode linux.FileMode) (*vfs.FileDescription, int, error) {
- // Create the file in the underlying file system.
- lowerFD, err := dentryFromVD(t, root).openLowerAt(ctx, vfsObj, filePath, linux.O_RDWR|linux.O_CREAT|linux.O_EXCL, linux.ModeRegular|mode)
- if err != nil {
- return nil, 0, err
- }
-
- // Generate random data to be written to the file.
- dataSize := rand.Intn(maxDataSize) + 1
- data := make([]byte, dataSize)
- rand.Read(data)
-
- // Write directly to the underlying FD, since verity FD is read-only.
- n, err := lowerFD.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- return nil, 0, err
- }
-
- if n != int64(len(data)) {
- return nil, 0, fmt.Errorf("lowerFD.Write got write length %d, want %d", n, len(data))
- }
-
- lowerFD.DecRef(ctx)
-
- // Now open the verity file descriptor.
- fd, err := openVerityAt(ctx, vfsObj, root, filePath, linux.O_RDONLY, mode)
- return fd, dataSize, err
-}
-
-// newDirFD creates a new directory in the verity mount, and returns the FD.
-func newDirFD(ctx context.Context, t *testing.T, vfsObj *vfs.VirtualFilesystem, root vfs.VirtualDentry, dirPath string, mode linux.FileMode) (*vfs.FileDescription, error) {
- // Create the directory in the underlying file system.
- if err := dentryFromVD(t, root).mkdirLowerAt(ctx, vfsObj, dirPath, linux.ModeRegular|mode); err != nil {
- return nil, err
- }
- if _, err := dentryFromVD(t, root).openLowerAt(ctx, vfsObj, dirPath, linux.O_RDONLY|linux.O_DIRECTORY, linux.ModeRegular|mode); err != nil {
- return nil, err
- }
- return openVerityAt(ctx, vfsObj, root, dirPath, linux.O_RDONLY|linux.O_DIRECTORY, mode)
-}
-
-// newEmptyFileFD creates a new empty file in the verity mount, and returns the FD.
-func newEmptyFileFD(ctx context.Context, t *testing.T, vfsObj *vfs.VirtualFilesystem, root vfs.VirtualDentry, filePath string, mode linux.FileMode) (*vfs.FileDescription, error) {
- // Create the file in the underlying file system.
- _, err := dentryFromVD(t, root).openLowerAt(ctx, vfsObj, filePath, linux.O_RDWR|linux.O_CREAT|linux.O_EXCL, linux.ModeRegular|mode)
- if err != nil {
- return nil, err
- }
- // Now open the verity file descriptor.
- fd, err := openVerityAt(ctx, vfsObj, root, filePath, linux.O_RDONLY, mode)
- return fd, err
-}
-
-// flipRandomBit randomly flips a bit in the file represented by fd.
-func flipRandomBit(ctx context.Context, fd *vfs.FileDescription, size int) error {
- randomPos := int64(rand.Intn(size))
- byteToModify := make([]byte, 1)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.ReadOptions{}); err != nil {
- return fmt.Errorf("lowerFD.PRead: %v", err)
- }
- byteToModify[0] ^= 1
- if _, err := fd.PWrite(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.WriteOptions{}); err != nil {
- return fmt.Errorf("lowerFD.PWrite: %v", err)
- }
- return nil
-}
-
-func enableVerity(ctx context.Context, t *testing.T, fd *vfs.FileDescription) {
- t.Helper()
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("enable verity: %v", err)
- }
-}
-
-// TestOpen ensures that when a file is created, the corresponding Merkle tree
-// file and the root Merkle tree file exist.
-func TestOpen(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Ensure that the corresponding Merkle tree file is created.
- if _, err = dentryFromFD(t, fd).openLowerMerkleAt(ctx, vfsObj, linux.O_RDONLY, linux.ModeRegular); err != nil {
- t.Errorf("OpenAt Merkle tree file %s: %v", merklePrefix+filename, err)
- }
-
- // Ensure the root merkle tree file is created.
- if _, err = dentryFromVD(t, root).openLowerMerkleAt(ctx, vfsObj, linux.O_RDONLY, linux.ModeRegular); err != nil {
- t.Errorf("OpenAt root Merkle tree file %s: %v", merklePrefix+rootMerkleFilename, err)
- }
- }
-}
-
-// TestPReadUnmodifiedFileSucceeds ensures that pread from an untouched verity
-// file succeeds after enabling verity for it.
-func TestPReadUnmodifiedFileSucceeds(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm a normal read succeeds.
- enableVerity(ctx, t, fd)
-
- buf := make([]byte, size)
- n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.PRead: %v", err)
- }
-
- if n != int64(size) {
- t.Errorf("fd.PRead got read length %d, want %d", n, size)
- }
- }
-}
-
-// TestReadUnmodifiedFileSucceeds ensures that read from an untouched verity
-// file succeeds after enabling verity for it.
-func TestReadUnmodifiedFileSucceeds(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm a normal read succeeds.
- enableVerity(ctx, t, fd)
-
- buf := make([]byte, size)
- n, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.Read: %v", err)
- }
-
- if n != int64(size) {
- t.Errorf("fd.PRead got read length %d, want %d", n, size)
- }
- }
-}
-
-// TestReadUnmodifiedEmptyFileSucceeds ensures that read from an untouched empty verity
-// file succeeds after enabling verity for it.
-func TestReadUnmodifiedEmptyFileSucceeds(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-empty-file"
- fd, err := newEmptyFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newEmptyFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm a normal read succeeds.
- enableVerity(ctx, t, fd)
-
- var buf []byte
- n, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.Read: %v", err)
- }
-
- if n != 0 {
- t.Errorf("fd.Read got read length %d, expected 0", n)
- }
- }
-}
-
-// TestReopenUnmodifiedFileSucceeds ensures that reopen an untouched verity file
-// succeeds after enabling verity for it.
-func TestReopenUnmodifiedFileSucceeds(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirms a normal read succeeds.
- enableVerity(ctx, t, fd)
-
- // Ensure reopening the verity enabled file succeeds.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); err != nil {
- t.Errorf("reopen enabled file failed: %v", err)
- }
- }
-}
-
-// TestOpenNonexistentFile ensures that opening a nonexistent file does not
-// trigger verification failure, even if the parent directory is verified.
-func TestOpenNonexistentFile(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirms a normal read succeeds.
- enableVerity(ctx, t, fd)
-
- // Enable verity on the parent directory.
- parentFD, err := openVerityAt(ctx, vfsObj, root, "", linux.O_RDONLY, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
- enableVerity(ctx, t, parentFD)
-
- // Ensure open an unexpected file in the parent directory fails with
- // ENOENT rather than verification failure.
- if _, err = openVerityAt(ctx, vfsObj, root, filename+"abc", linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.ENOENT, err) {
- t.Errorf("OpenAt unexpected error: %v", err)
- }
-}
-
-// TestPReadModifiedFileFails ensures that read from a modified verity file
-// fails.
-func TestPReadModifiedFileFails(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- // Open a new lowerFD that's read/writable.
- lowerFD, err := dentryFromFD(t, fd).openLowerAt(ctx, vfsObj, "", linux.O_RDWR, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- if err := flipRandomBit(ctx, lowerFD, size); err != nil {
- t.Fatalf("flipRandomBit: %v", err)
- }
-
- // Confirm that read from the modified file fails.
- buf := make([]byte, size)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {
- t.Fatalf("fd.PRead succeeded, expected failure")
- }
- }
-}
-
-// TestReadModifiedFileFails ensures that read from a modified verity file
-// fails.
-func TestReadModifiedFileFails(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- // Open a new lowerFD that's read/writable.
- lowerFD, err := dentryFromFD(t, fd).openLowerAt(ctx, vfsObj, "", linux.O_RDWR, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- if err := flipRandomBit(ctx, lowerFD, size); err != nil {
- t.Fatalf("flipRandomBit: %v", err)
- }
-
- // Confirm that read from the modified file fails.
- buf := make([]byte, size)
- if _, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{}); err == nil {
- t.Fatalf("fd.Read succeeded, expected failure")
- }
- }
-}
-
-// TestModifiedMerkleFails ensures that read from a verity file fails if the
-// corresponding Merkle tree file is modified.
-func TestModifiedMerkleFails(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- // Open a new lowerMerkleFD that's read/writable.
- lowerMerkleFD, err := dentryFromFD(t, fd).openLowerMerkleAt(ctx, vfsObj, linux.O_RDWR, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- // Flip a random bit in the Merkle tree file.
- stat, err := lowerMerkleFD.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("lowerMerkleFD.Stat: %v", err)
- }
-
- if err := flipRandomBit(ctx, lowerMerkleFD, int(stat.Size)); err != nil {
- t.Fatalf("flipRandomBit: %v", err)
- }
-
- // Confirm that read from a file with modified Merkle tree fails.
- buf := make([]byte, size)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {
- t.Fatalf("fd.PRead succeeded with modified Merkle file")
- }
- }
-}
-
-// TestModifiedParentMerkleFails ensures that open a verity enabled file in a
-// verity enabled directory fails if the hashes related to the target file in
-// the parent Merkle tree file is modified.
-func TestModifiedParentMerkleFails(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- // Enable verity on the parent directory.
- parentFD, err := openVerityAt(ctx, vfsObj, root, "", linux.O_RDONLY, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
- enableVerity(ctx, t, parentFD)
-
- // Open a new lowerMerkleFD that's read/writable.
- parentLowerMerkleFD, err := dentryFromFD(t, fd).parent.openLowerMerkleAt(ctx, vfsObj, linux.O_RDWR, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- // Flip a random bit in the parent Merkle tree file.
- // This parent directory contains only one child, so any random
- // modification in the parent Merkle tree should cause verification
- // failure when opening the child file.
- sizeString, err := parentLowerMerkleFD.GetXattr(ctx, &vfs.GetXattrOptions{
- Name: childrenOffsetXattr,
- Size: sizeOfStringInt32,
- })
- if err != nil {
- t.Fatalf("parentLowerMerkleFD.GetXattr: %v", err)
- }
- parentMerkleSize, err := strconv.Atoi(sizeString)
- if err != nil {
- t.Fatalf("Failed convert size to int: %v", err)
- }
- if err := flipRandomBit(ctx, parentLowerMerkleFD, parentMerkleSize); err != nil {
- t.Fatalf("flipRandomBit: %v", err)
- }
-
- parentLowerMerkleFD.DecRef(ctx)
-
- // Ensure reopening the verity enabled file fails.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); err == nil {
- t.Errorf("OpenAt file with modified parent Merkle succeeded")
- }
- }
-}
-
-// TestUnmodifiedStatSucceeds ensures that stat of an untouched verity file
-// succeeds after enabling verity for it.
-func TestUnmodifiedStatSucceeds(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm that stat succeeds.
- enableVerity(ctx, t, fd)
- if _, err := fd.Stat(ctx, vfs.StatOptions{}); err != nil {
- t.Errorf("fd.Stat: %v", err)
- }
- }
-}
-
-// TestModifiedStatFails checks that getting stat for a file with modified stat
-// should fail.
-func TestModifiedStatFails(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- lowerFD := fd.Impl().(*fileDescription).lowerFD
- // Change the stat of the underlying file, and check that stat fails.
- if err := lowerFD.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: uint32(linux.STATX_MODE),
- Mode: 0777,
- },
- }); err != nil {
- t.Fatalf("lowerFD.SetStat: %v", err)
- }
-
- if _, err := fd.Stat(ctx, vfs.StatOptions{}); err == nil {
- t.Errorf("fd.Stat succeeded when it should fail")
- }
- }
-}
-
-// TestOpenDeletedFileFails ensures that opening a deleted verity enabled file
-// and/or the corresponding Merkle tree file fails with the verity error.
-func TestOpenDeletedFileFails(t *testing.T) {
- testCases := []struct {
- name string
- // The original file is removed if changeFile is true.
- changeFile bool
- // The Merkle tree file is removed if changeMerkleFile is true.
- changeMerkleFile bool
- }{
- {
- name: "FileOnly",
- changeFile: true,
- changeMerkleFile: false,
- },
- {
- name: "MerkleOnly",
- changeFile: false,
- changeMerkleFile: true,
- },
- {
- name: "FileAndMerkle",
- changeFile: true,
- changeMerkleFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- if tc.changeFile {
- if err := dentryFromVD(t, root).unlinkLowerAt(ctx, vfsObj, filename); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := dentryFromVD(t, root).unlinkLowerMerkleAt(ctx, vfsObj, filename); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
-
- // Ensure reopening the verity enabled file fails.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("got OpenAt error: %v, expected EIO", err)
- }
- })
- }
-}
-
-// TestOpenRenamedFileFails ensures that opening a renamed verity enabled file
-// and/or the corresponding Merkle tree file fails with the verity error.
-func TestOpenRenamedFileFails(t *testing.T) {
- testCases := []struct {
- name string
- // The original file is renamed if changeFile is true.
- changeFile bool
- // The Merkle tree file is renamed if changeMerkleFile is true.
- changeMerkleFile bool
- }{
- {
- name: "FileOnly",
- changeFile: true,
- changeMerkleFile: false,
- },
- {
- name: "MerkleOnly",
- changeFile: false,
- changeMerkleFile: true,
- },
- {
- name: "FileAndMerkle",
- changeFile: true,
- changeMerkleFile: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, t, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- enableVerity(ctx, t, fd)
-
- newFilename := "renamed-test-file"
- if tc.changeFile {
- if err := dentryFromVD(t, root).renameLowerAt(ctx, vfsObj, filename, newFilename); err != nil {
- t.Fatalf("RenameAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := dentryFromVD(t, root).renameLowerMerkleAt(ctx, vfsObj, filename, newFilename); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
-
- // Ensure reopening the verity enabled file fails.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("got OpenAt error: %v, expected EIO", err)
- }
- })
- }
-}
-
-// TestUnmodifiedSymlinkFileReadSucceeds ensures that readlink() for an
-// unmodified verity enabled symlink succeeds.
-func TestUnmodifiedSymlinkFileReadSucceeds(t *testing.T) {
- testCases := []struct {
- name string
- // The symlink target is a directory.
- hasDirectoryTarget bool
- // The symlink target is a directory and contains a regular file which will be
- // used to test walking a symlink.
- testWalk bool
- }{
- {
- name: "RegularFileTarget",
- hasDirectoryTarget: false,
- testWalk: false,
- },
- {
- name: "DirectoryTarget",
- hasDirectoryTarget: true,
- testWalk: false,
- },
- {
- name: "RegularFileInSymlinkDirectory",
- hasDirectoryTarget: true,
- testWalk: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- if tc.testWalk && !tc.hasDirectoryTarget {
- t.Fatalf("Invalid test case: hasDirectoryTarget can't be false when testing symlink walk")
- }
-
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- var target string
- if tc.hasDirectoryTarget {
- target = "verity-test-dir"
- if _, err := newDirFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newDirFD: %v", err)
- }
- } else {
- target = "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- if tc.testWalk {
- fileInTargetDirectory := target + "/" + "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, fileInTargetDirectory, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- symlink := "verity-test-symlink"
- if err := dentryFromVD(t, root).symlinkLowerAt(ctx, vfsObj, target, symlink); err != nil {
- t.Fatalf("SymlinkAt: %v", err)
- }
-
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
-
- if err != nil {
- t.Fatalf("openVerityAt symlink: %v", err)
- }
-
- enableVerity(ctx, t, fd)
-
- if _, err := vfsObj.ReadlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(symlink),
- }); err != nil {
- t.Fatalf("ReadlinkAt: %v", err)
- }
-
- if tc.testWalk {
- fileInSymlinkDirectory := symlink + "/verity-test-file"
- // Ensure opening the verity enabled file in the symlink directory succeeds.
- if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); err != nil {
- t.Errorf("open enabled file failed: %v", err)
- }
- }
- })
- }
-}
-
-// TestDeletedSymlinkFileReadFails ensures that reading value of a deleted verity enabled
-// symlink fails.
-func TestDeletedSymlinkFileReadFails(t *testing.T) {
- testCases := []struct {
- name string
- // The original symlink is unlinked if deleteLink is true.
- deleteLink bool
- // The Merkle tree file is renamed if deleteMerkleFile is true.
- deleteMerkleFile bool
- // The symlink target is a directory.
- hasDirectoryTarget bool
- // The symlink target is a directory and contains a regular file which will be
- // used to test walking a symlink.
- testWalk bool
- }{
- {
- name: "DeleteLinkRegularFile",
- deleteLink: true,
- deleteMerkleFile: false,
- hasDirectoryTarget: false,
- testWalk: false,
- },
- {
- name: "DeleteMerkleRegFile",
- deleteLink: false,
- deleteMerkleFile: true,
- hasDirectoryTarget: false,
- testWalk: false,
- },
- {
- name: "DeleteLinkAndMerkleRegFile",
- deleteLink: true,
- deleteMerkleFile: true,
- hasDirectoryTarget: false,
- testWalk: false,
- },
- {
- name: "DeleteLinkDirectory",
- deleteLink: true,
- deleteMerkleFile: false,
- hasDirectoryTarget: true,
- testWalk: false,
- },
- {
- name: "DeleteMerkleDirectory",
- deleteLink: false,
- deleteMerkleFile: true,
- hasDirectoryTarget: true,
- testWalk: false,
- },
- {
- name: "DeleteLinkAndMerkleDirectory",
- deleteLink: true,
- deleteMerkleFile: true,
- hasDirectoryTarget: true,
- testWalk: false,
- },
- {
- name: "DeleteLinkDirectoryWalk",
- deleteLink: true,
- deleteMerkleFile: false,
- hasDirectoryTarget: true,
- testWalk: true,
- },
- {
- name: "DeleteMerkleDirectoryWalk",
- deleteLink: false,
- deleteMerkleFile: true,
- hasDirectoryTarget: true,
- testWalk: true,
- },
- {
- name: "DeleteLinkAndMerkleDirectoryWalk",
- deleteLink: true,
- deleteMerkleFile: true,
- hasDirectoryTarget: true,
- testWalk: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- if tc.testWalk && !tc.hasDirectoryTarget {
- t.Fatalf("Invalid test case: hasDirectoryTarget can't be false when testing symlink walk")
- }
-
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- var target string
- if tc.hasDirectoryTarget {
- target = "verity-test-dir"
- if _, err := newDirFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newDirFD: %v", err)
- }
- } else {
- target = "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- symlink := "verity-test-symlink"
- if err := dentryFromVD(t, root).symlinkLowerAt(ctx, vfsObj, target, symlink); err != nil {
- t.Fatalf("SymlinkAt: %v", err)
- }
-
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
-
- if err != nil {
- t.Fatalf("openVerityAt symlink: %v", err)
- }
-
- if tc.testWalk {
- fileInTargetDirectory := target + "/" + "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, fileInTargetDirectory, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- enableVerity(ctx, t, fd)
-
- if tc.deleteLink {
- if err := dentryFromVD(t, root).unlinkLowerAt(ctx, vfsObj, symlink); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- if tc.deleteMerkleFile {
- if err := dentryFromVD(t, root).unlinkLowerMerkleAt(ctx, vfsObj, symlink); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- if _, err := vfsObj.ReadlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(symlink),
- }); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Fatalf("ReadlinkAt succeeded with modified symlink: %v", err)
- }
-
- if tc.testWalk {
- fileInSymlinkDirectory := symlink + "/verity-test-file"
- // Ensure opening the verity enabled file in the symlink directory fails.
- if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("Open succeeded with modified symlink: %v", err)
- }
- }
- })
- }
-}
-
-// TestModifiedSymlinkFileReadFails ensures that reading value of a modified verity enabled
-// symlink fails.
-func TestModifiedSymlinkFileReadFails(t *testing.T) {
- testCases := []struct {
- name string
- // The symlink target is a directory.
- hasDirectoryTarget bool
- // The symlink target is a directory and contains a regular file which will be
- // used to test walking a symlink.
- testWalk bool
- }{
- {
- name: "RegularFileTarget",
- hasDirectoryTarget: false,
- testWalk: false,
- },
- {
- name: "DirectoryTarget",
- hasDirectoryTarget: true,
- testWalk: false,
- },
- {
- name: "RegularFileInSymlinkDirectory",
- hasDirectoryTarget: true,
- testWalk: true,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- if tc.testWalk && !tc.hasDirectoryTarget {
- t.Fatalf("Invalid test case: hasDirectoryTarget can't be false when testing symlink walk")
- }
-
- vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- var target string
- if tc.hasDirectoryTarget {
- target = "verity-test-dir"
- if _, err := newDirFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newDirFD: %v", err)
- }
- } else {
- target = "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, target, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- // Create symlink which points to target file.
- symlink := "verity-test-symlink"
- if err := dentryFromVD(t, root).symlinkLowerAt(ctx, vfsObj, target, symlink); err != nil {
- t.Fatalf("SymlinkAt: %v", err)
- }
-
- // Open symlink file to get the fd for ioctl in new step.
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
- if err != nil {
- t.Fatalf("OpenAt symlink: %v", err)
- }
-
- if tc.testWalk {
- fileInTargetDirectory := target + "/" + "verity-test-file"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, fileInTargetDirectory, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- enableVerity(ctx, t, fd)
-
- var newTarget string
- if tc.hasDirectoryTarget {
- newTarget = "verity-test-dir-new"
- if _, err := newDirFD(ctx, t, vfsObj, root, newTarget, 0644); err != nil {
- t.Fatalf("newDirFD: %v", err)
- }
- } else {
- newTarget = "verity-test-file-new"
- if _, _, err := newFileFD(ctx, t, vfsObj, root, newTarget, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
- }
-
- // Unlink symlink->target.
- if err := dentryFromVD(t, root).unlinkLowerAt(ctx, vfsObj, symlink); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
-
- // Link symlink->newTarget.
- if err := dentryFromVD(t, root).symlinkLowerAt(ctx, vfsObj, newTarget, symlink); err != nil {
- t.Fatalf("SymlinkAt: %v", err)
- }
-
- // Freshen lower dentry for symlink.
- symlinkVD, err := vfsObj.GetDentryAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(symlink),
- }, &vfs.GetDentryOptions{})
- if err != nil {
- t.Fatalf("Failed to get symlink dentry: %v", err)
- }
- symlinkDentry := dentryFromVD(t, symlinkVD)
-
- symlinkLowerVD, err := dentryFromVD(t, root).getLowerAt(ctx, vfsObj, symlink)
- if err != nil {
- t.Fatalf("Failed to get symlink lower dentry: %v", err)
- }
- symlinkDentry.lowerVD = symlinkLowerVD
-
- // Verify ReadlinkAt() fails.
- if _, err := vfsObj.ReadlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(symlink),
- }); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Fatalf("ReadlinkAt succeeded with modified symlink: %v", err)
- }
-
- if tc.testWalk {
- fileInSymlinkDirectory := symlink + "/verity-test-file"
- // Ensure opening the verity enabled file in the symlink directory fails.
- if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("Open succeeded with modified symlink: %v", err)
- }
- }
- })
- }
-}
diff --git a/pkg/sentry/fsmetric/BUILD b/pkg/sentry/fsmetric/BUILD
deleted file mode 100644
index 4e86fbdd8..000000000
--- a/pkg/sentry/fsmetric/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "fsmetric",
- srcs = ["fsmetric.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = ["//pkg/metric"],
-)
diff --git a/pkg/sentry/fsmetric/fsmetric_state_autogen.go b/pkg/sentry/fsmetric/fsmetric_state_autogen.go
new file mode 100644
index 000000000..61975bbb4
--- /dev/null
+++ b/pkg/sentry/fsmetric/fsmetric_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fsmetric
diff --git a/pkg/sentry/hostcpu/BUILD b/pkg/sentry/hostcpu/BUILD
deleted file mode 100644
index e6933aa70..000000000
--- a/pkg/sentry/hostcpu/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "hostcpu",
- srcs = [
- "getcpu_amd64.s",
- "getcpu_arm64.s",
- "hostcpu.go",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "hostcpu_test",
- size = "small",
- srcs = ["hostcpu_test.go"],
- library = ":hostcpu",
-)
diff --git a/pkg/sentry/hostcpu/hostcpu_state_autogen.go b/pkg/sentry/hostcpu/hostcpu_state_autogen.go
new file mode 100644
index 000000000..97d33d8bf
--- /dev/null
+++ b/pkg/sentry/hostcpu/hostcpu_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostcpu
diff --git a/pkg/sentry/hostcpu/hostcpu_test.go b/pkg/sentry/hostcpu/hostcpu_test.go
deleted file mode 100644
index 7d6885c9e..000000000
--- a/pkg/sentry/hostcpu/hostcpu_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package hostcpu
-
-import (
- "fmt"
- "testing"
-)
-
-func TestMaxValueInLinuxBitmap(t *testing.T) {
- for _, test := range []struct {
- str string
- max uint64
- }{
- {"0", 0},
- {"0\n", 0},
- {"0,2", 2},
- {"0-63", 63},
- {"0-3,8-11", 11},
- } {
- t.Run(fmt.Sprintf("%q", test.str), func(t *testing.T) {
- max, err := maxValueInLinuxBitmap(test.str)
- if err != nil || max != test.max {
- t.Errorf("maxValueInLinuxBitmap: got (%d, %v), wanted (%d, nil)", max, err, test.max)
- }
- })
- }
-}
-
-func TestMaxValueInLinuxBitmapErrors(t *testing.T) {
- for _, str := range []string{"", "\n"} {
- t.Run(fmt.Sprintf("%q", str), func(t *testing.T) {
- max, err := maxValueInLinuxBitmap(str)
- if err == nil {
- t.Errorf("maxValueInLinuxBitmap: got (%d, nil), wanted (_, error)", max)
- }
- t.Log(err)
- })
- }
-}
diff --git a/pkg/sentry/hostfd/BUILD b/pkg/sentry/hostfd/BUILD
deleted file mode 100644
index db3b0d0a0..000000000
--- a/pkg/sentry/hostfd/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "hostfd",
- srcs = [
- "hostfd.go",
- "hostfd_linux.go",
- "hostfd_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/log",
- "//pkg/safemem",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/hostfd/hostfd_linux_state_autogen.go b/pkg/sentry/hostfd/hostfd_linux_state_autogen.go
new file mode 100644
index 000000000..256bf270a
--- /dev/null
+++ b/pkg/sentry/hostfd/hostfd_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package hostfd
diff --git a/pkg/sentry/hostfd/hostfd_state_autogen.go b/pkg/sentry/hostfd/hostfd_state_autogen.go
new file mode 100644
index 000000000..9033424d5
--- /dev/null
+++ b/pkg/sentry/hostfd/hostfd_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostfd
diff --git a/pkg/sentry/hostfd/hostfd_unsafe_state_autogen.go b/pkg/sentry/hostfd/hostfd_unsafe_state_autogen.go
new file mode 100644
index 000000000..9033424d5
--- /dev/null
+++ b/pkg/sentry/hostfd/hostfd_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostfd
diff --git a/pkg/sentry/hostmm/BUILD b/pkg/sentry/hostmm/BUILD
deleted file mode 100644
index 66fa1ad40..000000000
--- a/pkg/sentry/hostmm/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "hostmm",
- srcs = [
- "cgroup.go",
- "hostmm.go",
- "membarrier.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/fd",
- "//pkg/hostarch",
- "//pkg/log",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/hostmm/hostmm_state_autogen.go b/pkg/sentry/hostmm/hostmm_state_autogen.go
new file mode 100644
index 000000000..925c56e14
--- /dev/null
+++ b/pkg/sentry/hostmm/hostmm_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostmm
diff --git a/pkg/sentry/inet/BUILD b/pkg/sentry/inet/BUILD
deleted file mode 100644
index 5bba9de0b..000000000
--- a/pkg/sentry/inet/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-go_library(
- name = "inet",
- srcs = [
- "context.go",
- "inet.go",
- "namespace.go",
- "test_stack.go",
- ],
- deps = [
- "//pkg/context",
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/sentry/inet/inet_state_autogen.go b/pkg/sentry/inet/inet_state_autogen.go
new file mode 100644
index 000000000..30af4fb91
--- /dev/null
+++ b/pkg/sentry/inet/inet_state_autogen.go
@@ -0,0 +1,70 @@
+// automatically generated by stateify.
+
+package inet
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *TCPBufferSize) StateTypeName() string {
+ return "pkg/sentry/inet.TCPBufferSize"
+}
+
+func (t *TCPBufferSize) StateFields() []string {
+ return []string{
+ "Min",
+ "Default",
+ "Max",
+ }
+}
+
+func (t *TCPBufferSize) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPBufferSize) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.Min)
+ stateSinkObject.Save(1, &t.Default)
+ stateSinkObject.Save(2, &t.Max)
+}
+
+func (t *TCPBufferSize) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPBufferSize) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.Min)
+ stateSourceObject.Load(1, &t.Default)
+ stateSourceObject.Load(2, &t.Max)
+}
+
+func (n *Namespace) StateTypeName() string {
+ return "pkg/sentry/inet.Namespace"
+}
+
+func (n *Namespace) StateFields() []string {
+ return []string{
+ "creator",
+ "isRoot",
+ }
+}
+
+func (n *Namespace) beforeSave() {}
+
+// +checklocksignore
+func (n *Namespace) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.creator)
+ stateSinkObject.Save(1, &n.isRoot)
+}
+
+// +checklocksignore
+func (n *Namespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &n.creator)
+ stateSourceObject.Load(1, &n.isRoot)
+ stateSourceObject.AfterLoad(n.afterLoad)
+}
+
+func init() {
+ state.Register((*TCPBufferSize)(nil))
+ state.Register((*Namespace)(nil))
+}
diff --git a/pkg/sentry/kernel/BUILD b/pkg/sentry/kernel/BUILD
deleted file mode 100644
index 26614b029..000000000
--- a/pkg/sentry/kernel/BUILD
+++ /dev/null
@@ -1,317 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "proto_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "pending_signals_list",
- out = "pending_signals_list.go",
- package = "kernel",
- prefix = "pendingSignal",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*pendingSignal",
- "Linker": "*pendingSignal",
- },
-)
-
-go_template_instance(
- name = "process_group_list",
- out = "process_group_list.go",
- package = "kernel",
- prefix = "processGroup",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*ProcessGroup",
- "Linker": "*ProcessGroup",
- },
-)
-
-go_template_instance(
- name = "seqatomic_taskgoroutineschedinfo",
- out = "seqatomic_taskgoroutineschedinfo_unsafe.go",
- package = "kernel",
- suffix = "TaskGoroutineSchedInfo",
- template = "//pkg/sync/seqatomic:generic_seqatomic",
- types = {
- "Value": "TaskGoroutineSchedInfo",
- },
-)
-
-go_template_instance(
- name = "session_list",
- out = "session_list.go",
- package = "kernel",
- prefix = "session",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Session",
- "Linker": "*Session",
- },
-)
-
-go_template_instance(
- name = "task_list",
- out = "task_list.go",
- package = "kernel",
- prefix = "task",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Task",
- "Linker": "*Task",
- },
-)
-
-go_template_instance(
- name = "socket_list",
- out = "socket_list.go",
- package = "kernel",
- prefix = "socket",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*SocketRecordVFS1",
- "Linker": "*SocketRecordVFS1",
- },
-)
-
-go_template_instance(
- name = "fd_table_refs",
- out = "fd_table_refs.go",
- package = "kernel",
- prefix = "FDTable",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "FDTable",
- },
-)
-
-go_template_instance(
- name = "fs_context_refs",
- out = "fs_context_refs.go",
- package = "kernel",
- prefix = "FSContext",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "FSContext",
- },
-)
-
-go_template_instance(
- name = "ipc_namespace_refs",
- out = "ipc_namespace_refs.go",
- package = "kernel",
- prefix = "IPCNamespace",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "IPCNamespace",
- },
-)
-
-go_template_instance(
- name = "process_group_refs",
- out = "process_group_refs.go",
- package = "kernel",
- prefix = "ProcessGroup",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "ProcessGroup",
- },
-)
-
-go_template_instance(
- name = "session_refs",
- out = "session_refs.go",
- package = "kernel",
- prefix = "Session",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "Session",
- },
-)
-
-proto_library(
- name = "uncaught_signal",
- srcs = ["uncaught_signal.proto"],
- visibility = ["//visibility:public"],
- deps = ["//pkg/sentry/arch:registers_proto"],
-)
-
-go_library(
- name = "kernel",
- srcs = [
- "abstract_socket_namespace.go",
- "aio.go",
- "cgroup.go",
- "context.go",
- "fd_table.go",
- "fd_table_refs.go",
- "fd_table_unsafe.go",
- "fs_context.go",
- "fs_context_refs.go",
- "ipc_namespace.go",
- "ipc_namespace_refs.go",
- "kcov.go",
- "kcov_unsafe.go",
- "kernel.go",
- "kernel_opts.go",
- "kernel_state.go",
- "pending_signals.go",
- "pending_signals_list.go",
- "pending_signals_state.go",
- "posixtimer.go",
- "process_group_list.go",
- "process_group_refs.go",
- "ptrace.go",
- "ptrace_amd64.go",
- "ptrace_arm64.go",
- "rseq.go",
- "seccomp.go",
- "seqatomic_taskgoroutineschedinfo_unsafe.go",
- "session_list.go",
- "session_refs.go",
- "sessions.go",
- "signal.go",
- "signal_handlers.go",
- "socket_list.go",
- "syscalls.go",
- "syscalls_state.go",
- "syslog.go",
- "task.go",
- "task_acct.go",
- "task_block.go",
- "task_cgroup.go",
- "task_clone.go",
- "task_context.go",
- "task_exec.go",
- "task_exit.go",
- "task_futex.go",
- "task_identity.go",
- "task_image.go",
- "task_list.go",
- "task_log.go",
- "task_net.go",
- "task_run.go",
- "task_sched.go",
- "task_signals.go",
- "task_start.go",
- "task_stop.go",
- "task_syscall.go",
- "task_usermem.go",
- "task_work.go",
- "thread_group.go",
- "threads.go",
- "timekeeper.go",
- "timekeeper_state.go",
- "tty.go",
- "uts_namespace.go",
- "vdso.go",
- "version.go",
- ],
- imports = [
- "gvisor.dev/gvisor/pkg/bpf",
- "gvisor.dev/gvisor/pkg/sentry/device",
- "gvisor.dev/gvisor/pkg/tcpip",
- ],
- marshal = True,
- visibility = ["//:sandbox"],
- deps = [
- ":uncaught_signal_go_proto",
- "//pkg/abi",
- "//pkg/abi/linux",
- "//pkg/abi/linux/errno",
- "//pkg/amutex",
- "//pkg/bits",
- "//pkg/bpf",
- "//pkg/cleanup",
- "//pkg/context",
- "//pkg/coverage",
- "//pkg/cpuid",
- "//pkg/errors",
- "//pkg/errors/linuxerr",
- "//pkg/eventchannel",
- "//pkg/fspath",
- "//pkg/goid",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/metric",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/secio",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fs/timerfd",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/fsimpl/pipefs",
- "//pkg/sentry/fsimpl/sockfs",
- "//pkg/sentry/fsimpl/timerfd",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/hostcpu",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/epoll",
- "//pkg/sentry/kernel/futex",
- "//pkg/sentry/kernel/sched",
- "//pkg/sentry/kernel/semaphore",
- "//pkg/sentry/kernel/shm",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/memmap",
- "//pkg/sentry/mm",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/netlink/port",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/time",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/unimpl:unimplemented_syscall_go_proto",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/state",
- "//pkg/state/statefile",
- "//pkg/state/wire",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "kernel_test",
- size = "small",
- srcs = [
- "fd_table_test.go",
- "table_test.go",
- "task_test.go",
- "timekeeper_test.go",
- ],
- library = ":kernel",
- deps = [
- "//pkg/abi",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/arch",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/filetest",
- "//pkg/sentry/kernel/sched",
- "//pkg/sentry/limits",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/time",
- "//pkg/sentry/usage",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/sentry/kernel/README.md b/pkg/sentry/kernel/README.md
deleted file mode 100644
index 427311be8..000000000
--- a/pkg/sentry/kernel/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-This package contains:
-
-- A (partial) emulation of the "core Linux kernel", which governs task
- execution and scheduling, system call dispatch, and signal handling. See
- below for details.
-
-- The top-level interface for the sentry's Linux kernel emulation in general,
- used by the `main` function of all versions of the sentry. This interface
- revolves around the `Env` type (defined in `kernel.go`).
-
-# Background
-
-In Linux, each schedulable context is referred to interchangeably as a "task" or
-"thread". Tasks can be divided into userspace and kernel tasks. In the sentry,
-scheduling is managed by the Go runtime, so each schedulable context is a
-goroutine; only "userspace" (application) contexts are referred to as tasks, and
-represented by Task objects. (From this point forward, "task" refers to the
-sentry's notion of a task unless otherwise specified.)
-
-At a high level, Linux application threads can be thought of as repeating a "run
-loop":
-
-- Some amount of application code is executed in userspace.
-
-- A trap (explicit syscall invocation, hardware interrupt or exception, etc.)
- causes control flow to switch to the kernel.
-
-- Some amount of kernel code is executed in kernelspace, e.g. to handle the
- cause of the trap.
-
-- The kernel "returns from the trap" into application code.
-
-Analogously, each task in the sentry is associated with a *task goroutine* that
-executes that task's run loop (`Task.run` in `task_run.go`). However, the
-sentry's task run loop differs in structure in order to support saving execution
-state to, and resuming execution from, checkpoints.
-
-While in kernelspace, a Linux thread can be descheduled (cease execution) in a
-variety of ways:
-
-- It can yield or be preempted, becoming temporarily descheduled but still
- runnable. At present, the sentry delegates scheduling of runnable threads to
- the Go runtime.
-
-- It can exit, becoming permanently descheduled. The sentry's equivalent is
- returning from `Task.run`, terminating the task goroutine.
-
-- It can enter interruptible sleep, a state in which it can be woken by a
- caller-defined wakeup or the receipt of a signal. In the sentry,
- interruptible sleep (which is ambiguously referred to as *blocking*) is
- implemented by making all events that can end blocking (including signal
- notifications) communicated via Go channels and using `select` to multiplex
- wakeup sources; see `task_block.go`.
-
-- It can enter uninterruptible sleep, a state in which it can only be woken by
- a caller-defined wakeup. Killable sleep is a closely related variant in
- which the task can also be woken by SIGKILL. (These definitions also include
- Linux's "group-stopped" (`TASK_STOPPED`) and "ptrace-stopped"
- (`TASK_TRACED`) states.)
-
-To maximize compatibility with Linux, sentry checkpointing appears as a spurious
-signal-delivery interrupt on all tasks; interrupted system calls return `EINTR`
-or are automatically restarted as usual. However, these semantics require that
-uninterruptible and killable sleeps do not appear to be interrupted. In other
-words, the state of the task, including its progress through the interrupted
-operation, must be preserved by checkpointing. For many such sleeps, the wakeup
-condition is application-controlled, making it infeasible to wait for the sleep
-to end before checkpointing. Instead, we must support checkpointing progress
-through sleeping operations.
-
-# Implementation
-
-We break the task's control flow graph into *states*, delimited by:
-
-1. Points where uninterruptible and killable sleeps may occur. For example,
- there exists a state boundary between signal dequeueing and signal delivery
- because there may be an intervening ptrace signal-delivery-stop.
-
-2. Points where sleep-induced branches may "rejoin" normal execution. For
- example, the syscall exit state exists because it can be reached immediately
- following a synchronous syscall, or after a task that is sleeping in
- `execve()` or `vfork()` resumes execution.
-
-3. Points containing large branches. This is strictly for organizational
- purposes. For example, the state that processes interrupt-signaled
- conditions is kept separate from the main "app" state to reduce the size of
- the latter.
-
-4. `SyscallReinvoke`, which does not correspond to anything in Linux, and
- exists solely to serve the autosave feature.
-
-![dot -Tpng -Goverlap=false -orun_states.png run_states.dot](g3doc/run_states.png "Task control flow graph")
-
-States before which a stop may occur are represented as implementations of the
-`taskRunState` interface named `run(state)`, allowing them to be saved and
-restored. States that cannot be immediately preceded by a stop are simply `Task`
-methods named `do(state)`.
-
-Conditions that can require task goroutines to cease execution for unknown
-lengths of time are called *stops*. Stops are divided into *internal stops*,
-which are stops whose start and end conditions are implemented within the
-sentry, and *external stops*, which are stops whose start and end conditions are
-not known to the sentry. Hence all uninterruptible and killable sleeps are
-internal stops, and the existence of a pending checkpoint operation is an
-external stop. Internal stops are reified into instances of the `TaskStop` type,
-while external stops are merely counted. The task run loop alternates between
-checking for stops and advancing the task's state. This allows checkpointing to
-hold tasks in a stopped state while waiting for all tasks in the system to stop.
diff --git a/pkg/sentry/kernel/auth/BUILD b/pkg/sentry/kernel/auth/BUILD
deleted file mode 100644
index 7a1a36454..000000000
--- a/pkg/sentry/kernel/auth/BUILD
+++ /dev/null
@@ -1,71 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "atomicptr_credentials",
- out = "atomicptr_credentials_unsafe.go",
- package = "auth",
- suffix = "Credentials",
- template = "//pkg/sync/atomicptr:generic_atomicptr",
- types = {
- "Value": "Credentials",
- },
-)
-
-go_template_instance(
- name = "id_map_range",
- out = "id_map_range.go",
- package = "auth",
- prefix = "idMap",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uint32",
- },
-)
-
-go_template_instance(
- name = "id_map_set",
- out = "id_map_set.go",
- consts = {
- "minDegree": "3",
- },
- package = "auth",
- prefix = "idMap",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint32",
- "Range": "idMapRange",
- "Value": "uint32",
- "Functions": "idMapFunctions",
- },
-)
-
-go_library(
- name = "auth",
- srcs = [
- "atomicptr_credentials_unsafe.go",
- "auth.go",
- "capability_set.go",
- "context.go",
- "credentials.go",
- "id.go",
- "id_map.go",
- "id_map_functions.go",
- "id_map_range.go",
- "id_map_set.go",
- "user_namespace.go",
- ],
- marshal = True,
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/sync",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sync/atomicptr/generic_atomicptr_unsafe.go b/pkg/sentry/kernel/auth/atomicptr_credentials_unsafe.go
index 82b6df18c..4535c958f 100644
--- a/pkg/sync/atomicptr/generic_atomicptr_unsafe.go
+++ b/pkg/sentry/kernel/auth/atomicptr_credentials_unsafe.go
@@ -1,20 +1,10 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package seqatomic doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package seqatomic
+package auth
import (
"sync/atomic"
"unsafe"
)
-// Value is a required type parameter.
-type Value struct{}
-
// An AtomicPtr is a pointer to a value of type Value that can be atomically
// loaded and stored. The zero value of an AtomicPtr represents nil.
//
@@ -23,25 +13,25 @@ type Value struct{}
// this case, do `dst.Store(src.Load())` instead.
//
// +stateify savable
-type AtomicPtr struct {
- ptr unsafe.Pointer `state:".(*Value)"`
+type AtomicPtrCredentials struct {
+ ptr unsafe.Pointer `state:".(*Credentials)"`
}
-func (p *AtomicPtr) savePtr() *Value {
+func (p *AtomicPtrCredentials) savePtr() *Credentials {
return p.Load()
}
-func (p *AtomicPtr) loadPtr(v *Value) {
+func (p *AtomicPtrCredentials) loadPtr(v *Credentials) {
p.Store(v)
}
// Load returns the value set by the most recent Store. It returns nil if there
// has been no previous call to Store.
-func (p *AtomicPtr) Load() *Value {
- return (*Value)(atomic.LoadPointer(&p.ptr))
+func (p *AtomicPtrCredentials) Load() *Credentials {
+ return (*Credentials)(atomic.LoadPointer(&p.ptr))
}
// Store sets the value returned by Load to x.
-func (p *AtomicPtr) Store(x *Value) {
+func (p *AtomicPtrCredentials) Store(x *Credentials) {
atomic.StorePointer(&p.ptr, (unsafe.Pointer)(x))
}
diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
new file mode 100644
index 000000000..8ec71bc50
--- /dev/null
+++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
@@ -0,0 +1,280 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package auth
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*GID)(nil)
+var _ marshal.Marshallable = (*UID)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (gid *GID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (gid *GID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*gid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (gid *GID) UnmarshalBytes(src []byte) {
+ *gid = GID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (gid *GID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (gid *GID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(gid), uintptr(gid.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (gid *GID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(gid), unsafe.Pointer(&src[0]), uintptr(gid.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (gid *GID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (gid *GID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return gid.CopyOutN(cc, addr, gid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (gid *GID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (gid *GID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyGIDSliceIn copies in a slice of GID objects from the task's memory.
+//go:nosplit
+func CopyGIDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []GID) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*GID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyGIDSliceOut copies a slice of GID objects to the task's memory.
+//go:nosplit
+func CopyGIDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []GID) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*GID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeGIDSlice is like GID.MarshalUnsafe, but for a []GID.
+func MarshalUnsafeGIDSlice(src []GID, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*GID)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeGIDSlice is like GID.UnmarshalUnsafe, but for a []GID.
+func UnmarshalUnsafeGIDSlice(dst []GID, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*GID)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (uid *UID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (uid *UID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*uid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (uid *UID) UnmarshalBytes(src []byte) {
+ *uid = UID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (uid *UID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (uid *UID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(uid), uintptr(uid.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (uid *UID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(uid), unsafe.Pointer(&src[0]), uintptr(uid.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (uid *UID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (uid *UID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return uid.CopyOutN(cc, addr, uid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (uid *UID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (uid *UID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/kernel/auth/auth_state_autogen.go b/pkg/sentry/kernel/auth/auth_state_autogen.go
new file mode 100644
index 000000000..5a7f55933
--- /dev/null
+++ b/pkg/sentry/kernel/auth/auth_state_autogen.go
@@ -0,0 +1,280 @@
+// automatically generated by stateify.
+
+package auth
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (c *Credentials) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.Credentials"
+}
+
+func (c *Credentials) StateFields() []string {
+ return []string{
+ "RealKUID",
+ "EffectiveKUID",
+ "SavedKUID",
+ "RealKGID",
+ "EffectiveKGID",
+ "SavedKGID",
+ "ExtraKGIDs",
+ "PermittedCaps",
+ "InheritableCaps",
+ "EffectiveCaps",
+ "BoundingCaps",
+ "KeepCaps",
+ "UserNamespace",
+ }
+}
+
+func (c *Credentials) beforeSave() {}
+
+// +checklocksignore
+func (c *Credentials) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.RealKUID)
+ stateSinkObject.Save(1, &c.EffectiveKUID)
+ stateSinkObject.Save(2, &c.SavedKUID)
+ stateSinkObject.Save(3, &c.RealKGID)
+ stateSinkObject.Save(4, &c.EffectiveKGID)
+ stateSinkObject.Save(5, &c.SavedKGID)
+ stateSinkObject.Save(6, &c.ExtraKGIDs)
+ stateSinkObject.Save(7, &c.PermittedCaps)
+ stateSinkObject.Save(8, &c.InheritableCaps)
+ stateSinkObject.Save(9, &c.EffectiveCaps)
+ stateSinkObject.Save(10, &c.BoundingCaps)
+ stateSinkObject.Save(11, &c.KeepCaps)
+ stateSinkObject.Save(12, &c.UserNamespace)
+}
+
+func (c *Credentials) afterLoad() {}
+
+// +checklocksignore
+func (c *Credentials) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.RealKUID)
+ stateSourceObject.Load(1, &c.EffectiveKUID)
+ stateSourceObject.Load(2, &c.SavedKUID)
+ stateSourceObject.Load(3, &c.RealKGID)
+ stateSourceObject.Load(4, &c.EffectiveKGID)
+ stateSourceObject.Load(5, &c.SavedKGID)
+ stateSourceObject.Load(6, &c.ExtraKGIDs)
+ stateSourceObject.Load(7, &c.PermittedCaps)
+ stateSourceObject.Load(8, &c.InheritableCaps)
+ stateSourceObject.Load(9, &c.EffectiveCaps)
+ stateSourceObject.Load(10, &c.BoundingCaps)
+ stateSourceObject.Load(11, &c.KeepCaps)
+ stateSourceObject.Load(12, &c.UserNamespace)
+}
+
+func (i *IDMapEntry) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.IDMapEntry"
+}
+
+func (i *IDMapEntry) StateFields() []string {
+ return []string{
+ "FirstID",
+ "FirstParentID",
+ "Length",
+ }
+}
+
+func (i *IDMapEntry) beforeSave() {}
+
+// +checklocksignore
+func (i *IDMapEntry) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.FirstID)
+ stateSinkObject.Save(1, &i.FirstParentID)
+ stateSinkObject.Save(2, &i.Length)
+}
+
+func (i *IDMapEntry) afterLoad() {}
+
+// +checklocksignore
+func (i *IDMapEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.FirstID)
+ stateSourceObject.Load(1, &i.FirstParentID)
+ stateSourceObject.Load(2, &i.Length)
+}
+
+func (r *idMapRange) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.idMapRange"
+}
+
+func (r *idMapRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *idMapRange) beforeSave() {}
+
+// +checklocksignore
+func (r *idMapRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *idMapRange) afterLoad() {}
+
+// +checklocksignore
+func (r *idMapRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func (s *idMapSet) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.idMapSet"
+}
+
+func (s *idMapSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *idMapSet) beforeSave() {}
+
+// +checklocksignore
+func (s *idMapSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *idMapSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *idMapSet) afterLoad() {}
+
+// +checklocksignore
+func (s *idMapSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*idMapSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*idMapSegmentDataSlices)) })
+}
+
+func (n *idMapnode) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.idMapnode"
+}
+
+func (n *idMapnode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *idMapnode) beforeSave() {}
+
+// +checklocksignore
+func (n *idMapnode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *idMapnode) afterLoad() {}
+
+// +checklocksignore
+func (n *idMapnode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (i *idMapSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.idMapSegmentDataSlices"
+}
+
+func (i *idMapSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (i *idMapSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (i *idMapSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Start)
+ stateSinkObject.Save(1, &i.End)
+ stateSinkObject.Save(2, &i.Values)
+}
+
+func (i *idMapSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (i *idMapSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Start)
+ stateSourceObject.Load(1, &i.End)
+ stateSourceObject.Load(2, &i.Values)
+}
+
+func (ns *UserNamespace) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.UserNamespace"
+}
+
+func (ns *UserNamespace) StateFields() []string {
+ return []string{
+ "parent",
+ "owner",
+ "uidMapFromParent",
+ "uidMapToParent",
+ "gidMapFromParent",
+ "gidMapToParent",
+ }
+}
+
+func (ns *UserNamespace) beforeSave() {}
+
+// +checklocksignore
+func (ns *UserNamespace) StateSave(stateSinkObject state.Sink) {
+ ns.beforeSave()
+ stateSinkObject.Save(0, &ns.parent)
+ stateSinkObject.Save(1, &ns.owner)
+ stateSinkObject.Save(2, &ns.uidMapFromParent)
+ stateSinkObject.Save(3, &ns.uidMapToParent)
+ stateSinkObject.Save(4, &ns.gidMapFromParent)
+ stateSinkObject.Save(5, &ns.gidMapToParent)
+}
+
+func (ns *UserNamespace) afterLoad() {}
+
+// +checklocksignore
+func (ns *UserNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ns.parent)
+ stateSourceObject.Load(1, &ns.owner)
+ stateSourceObject.Load(2, &ns.uidMapFromParent)
+ stateSourceObject.Load(3, &ns.uidMapToParent)
+ stateSourceObject.Load(4, &ns.gidMapFromParent)
+ stateSourceObject.Load(5, &ns.gidMapToParent)
+}
+
+func init() {
+ state.Register((*Credentials)(nil))
+ state.Register((*IDMapEntry)(nil))
+ state.Register((*idMapRange)(nil))
+ state.Register((*idMapSet)(nil))
+ state.Register((*idMapnode)(nil))
+ state.Register((*idMapSegmentDataSlices)(nil))
+ state.Register((*UserNamespace)(nil))
+}
diff --git a/pkg/sentry/kernel/auth/auth_unsafe_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_unsafe_abi_autogen_unsafe.go
new file mode 100644
index 000000000..7f4cbed94
--- /dev/null
+++ b/pkg/sentry/kernel/auth/auth_unsafe_abi_autogen_unsafe.go
@@ -0,0 +1,13 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package auth
+
+import (
+)
+
diff --git a/pkg/sentry/kernel/auth/auth_unsafe_state_autogen.go b/pkg/sentry/kernel/auth/auth_unsafe_state_autogen.go
new file mode 100644
index 000000000..61f7e4ce9
--- /dev/null
+++ b/pkg/sentry/kernel/auth/auth_unsafe_state_autogen.go
@@ -0,0 +1,37 @@
+// automatically generated by stateify.
+
+package auth
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *AtomicPtrCredentials) StateTypeName() string {
+ return "pkg/sentry/kernel/auth.AtomicPtrCredentials"
+}
+
+func (p *AtomicPtrCredentials) StateFields() []string {
+ return []string{
+ "ptr",
+ }
+}
+
+func (p *AtomicPtrCredentials) beforeSave() {}
+
+// +checklocksignore
+func (p *AtomicPtrCredentials) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var ptrValue *Credentials = p.savePtr()
+ stateSinkObject.SaveValue(0, ptrValue)
+}
+
+func (p *AtomicPtrCredentials) afterLoad() {}
+
+// +checklocksignore
+func (p *AtomicPtrCredentials) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*Credentials), func(y interface{}) { p.loadPtr(y.(*Credentials)) })
+}
+
+func init() {
+ state.Register((*AtomicPtrCredentials)(nil))
+}
diff --git a/pkg/sentry/kernel/auth/id_map_range.go b/pkg/sentry/kernel/auth/id_map_range.go
new file mode 100644
index 000000000..19d542716
--- /dev/null
+++ b/pkg/sentry/kernel/auth/id_map_range.go
@@ -0,0 +1,76 @@
+package auth
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type idMapRange struct {
+ // Start is the inclusive start of the range.
+ Start uint32
+
+ // End is the exclusive end of the range.
+ End uint32
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r idMapRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r idMapRange) Length() uint32 {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r idMapRange) Contains(x uint32) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r idMapRange) Overlaps(r2 idMapRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r idMapRange) IsSupersetOf(r2 idMapRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r idMapRange) Intersect(r2 idMapRange) idMapRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r idMapRange) CanSplitAt(x uint32) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/sentry/kernel/auth/id_map_set.go b/pkg/sentry/kernel/auth/id_map_set.go
new file mode 100644
index 000000000..479753981
--- /dev/null
+++ b/pkg/sentry/kernel/auth/id_map_set.go
@@ -0,0 +1,1643 @@
+package auth
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const idMaptrackGaps = 0
+
+var _ = uint8(idMaptrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type idMapdynamicGap [idMaptrackGaps]uint32
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *idMapdynamicGap) Get() uint32 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *idMapdynamicGap) Set(v uint32) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ idMapminDegree = 3
+
+ idMapmaxDegree = 2 * idMapminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type idMapSet struct {
+ root idMapnode `state:".(*idMapSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *idMapSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *idMapSet) IsEmptyRange(r idMapRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *idMapSet) Span() uint32 {
+ var sz uint32
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *idMapSet) SpanRange(r idMapRange) uint32 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint32
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *idMapSet) FirstSegment() idMapIterator {
+ if s.root.nrSegments == 0 {
+ return idMapIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *idMapSet) LastSegment() idMapIterator {
+ if s.root.nrSegments == 0 {
+ return idMapIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *idMapSet) FirstGap() idMapGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return idMapGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *idMapSet) LastGap() idMapGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return idMapGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *idMapSet) Find(key uint32) (idMapIterator, idMapGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return idMapIterator{n, i}, idMapGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return idMapIterator{}, idMapGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *idMapSet) FindSegment(key uint32) idMapIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *idMapSet) LowerBoundSegment(min uint32) idMapIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *idMapSet) UpperBoundSegment(max uint32) idMapIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *idMapSet) FindGap(key uint32) idMapGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *idMapSet) LowerBoundGap(min uint32) idMapGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *idMapSet) UpperBoundGap(max uint32) idMapGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *idMapSet) Add(r idMapRange, val uint32) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *idMapSet) AddWithoutMerging(r idMapRange, val uint32) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *idMapSet) Insert(gap idMapGapIterator, r idMapRange, val uint32) idMapIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (idMapFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := idMaptrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (idMapFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (idMapFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := idMaptrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *idMapSet) InsertWithoutMerging(gap idMapGapIterator, r idMapRange, val uint32) idMapIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *idMapSet) InsertWithoutMergingUnchecked(gap idMapGapIterator, r idMapRange, val uint32) idMapIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := idMaptrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return idMapIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *idMapSet) Remove(seg idMapIterator) idMapGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if idMaptrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ idMapFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if idMaptrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(idMapGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *idMapSet) RemoveAll() {
+ s.root = idMapnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *idMapSet) RemoveRange(r idMapRange) idMapGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *idMapSet) Merge(first, second idMapIterator) idMapIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *idMapSet) MergeUnchecked(first, second idMapIterator) idMapIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (idMapFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return idMapIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *idMapSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *idMapSet) MergeRange(r idMapRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *idMapSet) MergeAdjacent(r idMapRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *idMapSet) Split(seg idMapIterator, split uint32) (idMapIterator, idMapIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *idMapSet) SplitUnchecked(seg idMapIterator, split uint32) (idMapIterator, idMapIterator) {
+ val1, val2 := (idMapFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), idMapRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *idMapSet) SplitAt(split uint32) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *idMapSet) Isolate(seg idMapIterator, r idMapRange) idMapIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *idMapSet) ApplyContiguous(r idMapRange, fn func(seg idMapIterator)) idMapGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return idMapGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return idMapGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type idMapnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *idMapnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap idMapdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [idMapmaxDegree - 1]idMapRange
+ values [idMapmaxDegree - 1]uint32
+ children [idMapmaxDegree]*idMapnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *idMapnode) firstSegment() idMapIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return idMapIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *idMapnode) lastSegment() idMapIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return idMapIterator{n, n.nrSegments - 1}
+}
+
+func (n *idMapnode) prevSibling() *idMapnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *idMapnode) nextSibling() *idMapnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *idMapnode) rebalanceBeforeInsert(gap idMapGapIterator) idMapGapIterator {
+ if n.nrSegments < idMapmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &idMapnode{
+ nrSegments: idMapminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &idMapnode{
+ nrSegments: idMapminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:idMapminDegree-1], n.keys[:idMapminDegree-1])
+ copy(left.values[:idMapminDegree-1], n.values[:idMapminDegree-1])
+ copy(right.keys[:idMapminDegree-1], n.keys[idMapminDegree:])
+ copy(right.values[:idMapminDegree-1], n.values[idMapminDegree:])
+ n.keys[0], n.values[0] = n.keys[idMapminDegree-1], n.values[idMapminDegree-1]
+ idMapzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:idMapminDegree], n.children[:idMapminDegree])
+ copy(right.children[:idMapminDegree], n.children[idMapminDegree:])
+ idMapzeroNodeSlice(n.children[2:])
+ for i := 0; i < idMapminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if idMaptrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < idMapminDegree {
+ return idMapGapIterator{left, gap.index}
+ }
+ return idMapGapIterator{right, gap.index - idMapminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[idMapminDegree-1], n.values[idMapminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &idMapnode{
+ nrSegments: idMapminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:idMapminDegree-1], n.keys[idMapminDegree:])
+ copy(sibling.values[:idMapminDegree-1], n.values[idMapminDegree:])
+ idMapzeroValueSlice(n.values[idMapminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:idMapminDegree], n.children[idMapminDegree:])
+ idMapzeroNodeSlice(n.children[idMapminDegree:])
+ for i := 0; i < idMapminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = idMapminDegree - 1
+
+ if idMaptrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < idMapminDegree {
+ return gap
+ }
+ return idMapGapIterator{sibling, gap.index - idMapminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *idMapnode) rebalanceAfterRemove(gap idMapGapIterator) idMapGapIterator {
+ for {
+ if n.nrSegments >= idMapminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= idMapminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ idMapFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if idMaptrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return idMapGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return idMapGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= idMapminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ idMapFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if idMaptrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return idMapGapIterator{n, n.nrSegments}
+ }
+ return idMapGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return idMapGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return idMapGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *idMapnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = idMapGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ idMapFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if idMaptrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *idMapnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *idMapnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *idMapnode) calculateMaxGapLeaf() uint32 {
+ max := idMapGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (idMapGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *idMapnode) calculateMaxGapInternal() uint32 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *idMapnode) searchFirstLargeEnoughGap(minSize uint32) idMapGapIterator {
+ if n.maxGap.Get() < minSize {
+ return idMapGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := idMapGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *idMapnode) searchLastLargeEnoughGap(minSize uint32) idMapGapIterator {
+ if n.maxGap.Get() < minSize {
+ return idMapGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := idMapGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type idMapIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *idMapnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg idMapIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg idMapIterator) Range() idMapRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg idMapIterator) Start() uint32 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg idMapIterator) End() uint32 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg idMapIterator) SetRangeUnchecked(r idMapRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg idMapIterator) SetRange(r idMapRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg idMapIterator) SetStartUnchecked(start uint32) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg idMapIterator) SetStart(start uint32) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg idMapIterator) SetEndUnchecked(end uint32) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg idMapIterator) SetEnd(end uint32) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg idMapIterator) Value() uint32 {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg idMapIterator) ValuePtr() *uint32 {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg idMapIterator) SetValue(val uint32) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg idMapIterator) PrevSegment() idMapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return idMapIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return idMapIterator{}
+ }
+ return idMapsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg idMapIterator) NextSegment() idMapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return idMapIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return idMapIterator{}
+ }
+ return idMapsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg idMapIterator) PrevGap() idMapGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return idMapGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg idMapIterator) NextGap() idMapGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return idMapGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg idMapIterator) PrevNonEmpty() (idMapIterator, idMapGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return idMapIterator{}, gap
+ }
+ return gap.PrevSegment(), idMapGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg idMapIterator) NextNonEmpty() (idMapIterator, idMapGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return idMapIterator{}, gap
+ }
+ return gap.NextSegment(), idMapGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type idMapGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *idMapnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap idMapGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap idMapGapIterator) Range() idMapRange {
+ return idMapRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap idMapGapIterator) Start() uint32 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return idMapFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap idMapGapIterator) End() uint32 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return idMapFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap idMapGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap idMapGapIterator) PrevSegment() idMapIterator {
+ return idMapsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap idMapGapIterator) NextSegment() idMapIterator {
+ return idMapsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap idMapGapIterator) PrevGap() idMapGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return idMapGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap idMapGapIterator) NextGap() idMapGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return idMapGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap idMapGapIterator) NextLargeEnoughGap(minSize uint32) idMapGapIterator {
+ if idMaptrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap idMapGapIterator) nextLargeEnoughGapHelper(minSize uint32) idMapGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return idMapGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap idMapGapIterator) PrevLargeEnoughGap(minSize uint32) idMapGapIterator {
+ if idMaptrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap idMapGapIterator) prevLargeEnoughGapHelper(minSize uint32) idMapGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return idMapGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func idMapsegmentBeforePosition(n *idMapnode, i int) idMapIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return idMapIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return idMapIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func idMapsegmentAfterPosition(n *idMapnode, i int) idMapIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return idMapIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return idMapIterator{n, i}
+}
+
+func idMapzeroValueSlice(slice []uint32) {
+
+ for i := range slice {
+ idMapFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func idMapzeroNodeSlice(slice []*idMapnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *idMapSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *idMapnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *idMapnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if idMaptrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type idMapSegmentDataSlices struct {
+ Start []uint32
+ End []uint32
+ Values []uint32
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *idMapSet) ExportSortedSlices() *idMapSegmentDataSlices {
+ var sds idMapSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *idMapSet) ImportSortedSlices(sds *idMapSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := idMapRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *idMapSet) segmentTestCheck(expectedSegments int, segFunc func(int, idMapRange, uint32) error) error {
+ havePrev := false
+ prev := uint32(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *idMapSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *idMapSet) saveRoot() *idMapSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *idMapSet) loadRoot(sds *idMapSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/kernel/contexttest/BUILD b/pkg/sentry/kernel/contexttest/BUILD
deleted file mode 100644
index 9d26392c0..000000000
--- a/pkg/sentry/kernel/contexttest/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "contexttest",
- testonly = 1,
- srcs = ["contexttest.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/kernel",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- ],
-)
diff --git a/pkg/sentry/kernel/contexttest/contexttest.go b/pkg/sentry/kernel/contexttest/contexttest.go
deleted file mode 100644
index 22c340e56..000000000
--- a/pkg/sentry/kernel/contexttest/contexttest.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package contexttest provides a test context.Context which includes
-// a dummy kernel pointing to a valid platform.
-package contexttest
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/sentry/platform"
-)
-
-// Context returns a Context that may be used in tests. Uses ptrace as the
-// platform.Platform, and provides a stub kernel that only serves to point to
-// the platform.
-func Context(tb testing.TB) context.Context {
- ctx := contexttest.Context(tb)
- k := &kernel.Kernel{
- Platform: platform.FromContext(ctx),
- }
- k.SetMemoryFile(pgalloc.MemoryFileFromContext(ctx))
- ctx.(*contexttest.TestContext).RegisterValue(kernel.CtxKernel, k)
- return ctx
-}
diff --git a/pkg/sentry/kernel/epoll/BUILD b/pkg/sentry/kernel/epoll/BUILD
deleted file mode 100644
index 723a85f64..000000000
--- a/pkg/sentry/kernel/epoll/BUILD
+++ /dev/null
@@ -1,52 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "epoll_list",
- out = "epoll_list.go",
- package = "epoll",
- prefix = "pollEntry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*pollEntry",
- "Linker": "*pollEntry",
- },
-)
-
-go_library(
- name = "epoll",
- srcs = [
- "epoll.go",
- "epoll_list.go",
- "epoll_state.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/refs",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sync",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "epoll_test",
- size = "small",
- srcs = [
- "epoll_test.go",
- ],
- library = ":epoll",
- deps = [
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs/filetest",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/epoll/epoll_list.go b/pkg/sentry/kernel/epoll/epoll_list.go
new file mode 100644
index 000000000..b6abe2de9
--- /dev/null
+++ b/pkg/sentry/kernel/epoll/epoll_list.go
@@ -0,0 +1,221 @@
+package epoll
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type pollEntryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (pollEntryElementMapper) linkerFor(elem *pollEntry) *pollEntry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type pollEntryList struct {
+ head *pollEntry
+ tail *pollEntry
+}
+
+// Reset resets list l to the empty state.
+func (l *pollEntryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *pollEntryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *pollEntryList) Front() *pollEntry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *pollEntryList) Back() *pollEntry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *pollEntryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (pollEntryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *pollEntryList) PushFront(e *pollEntry) {
+ linker := pollEntryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ pollEntryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *pollEntryList) PushBack(e *pollEntry) {
+ linker := pollEntryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ pollEntryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *pollEntryList) PushBackList(m *pollEntryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ pollEntryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ pollEntryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *pollEntryList) InsertAfter(b, e *pollEntry) {
+ bLinker := pollEntryElementMapper{}.linkerFor(b)
+ eLinker := pollEntryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ pollEntryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *pollEntryList) InsertBefore(a, e *pollEntry) {
+ aLinker := pollEntryElementMapper{}.linkerFor(a)
+ eLinker := pollEntryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ pollEntryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *pollEntryList) Remove(e *pollEntry) {
+ linker := pollEntryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ pollEntryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ pollEntryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type pollEntryEntry struct {
+ next *pollEntry
+ prev *pollEntry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *pollEntryEntry) Next() *pollEntry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *pollEntryEntry) Prev() *pollEntry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *pollEntryEntry) SetNext(elem *pollEntry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *pollEntryEntry) SetPrev(elem *pollEntry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/epoll/epoll_state_autogen.go b/pkg/sentry/kernel/epoll/epoll_state_autogen.go
new file mode 100644
index 000000000..44bd520ac
--- /dev/null
+++ b/pkg/sentry/kernel/epoll/epoll_state_autogen.go
@@ -0,0 +1,192 @@
+// automatically generated by stateify.
+
+package epoll
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *FileIdentifier) StateTypeName() string {
+ return "pkg/sentry/kernel/epoll.FileIdentifier"
+}
+
+func (f *FileIdentifier) StateFields() []string {
+ return []string{
+ "File",
+ "Fd",
+ }
+}
+
+func (f *FileIdentifier) beforeSave() {}
+
+// +checklocksignore
+func (f *FileIdentifier) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.File)
+ stateSinkObject.Save(1, &f.Fd)
+}
+
+func (f *FileIdentifier) afterLoad() {}
+
+// +checklocksignore
+func (f *FileIdentifier) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &f.File)
+ stateSourceObject.Load(1, &f.Fd)
+}
+
+func (p *pollEntry) StateTypeName() string {
+ return "pkg/sentry/kernel/epoll.pollEntry"
+}
+
+func (p *pollEntry) StateFields() []string {
+ return []string{
+ "pollEntryEntry",
+ "id",
+ "userData",
+ "mask",
+ "flags",
+ "epoll",
+ }
+}
+
+func (p *pollEntry) beforeSave() {}
+
+// +checklocksignore
+func (p *pollEntry) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.pollEntryEntry)
+ stateSinkObject.Save(1, &p.id)
+ stateSinkObject.Save(2, &p.userData)
+ stateSinkObject.Save(3, &p.mask)
+ stateSinkObject.Save(4, &p.flags)
+ stateSinkObject.Save(5, &p.epoll)
+}
+
+// +checklocksignore
+func (p *pollEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.pollEntryEntry)
+ stateSourceObject.LoadWait(1, &p.id)
+ stateSourceObject.Load(2, &p.userData)
+ stateSourceObject.Load(3, &p.mask)
+ stateSourceObject.Load(4, &p.flags)
+ stateSourceObject.Load(5, &p.epoll)
+ stateSourceObject.AfterLoad(p.afterLoad)
+}
+
+func (e *EventPoll) StateTypeName() string {
+ return "pkg/sentry/kernel/epoll.EventPoll"
+}
+
+func (e *EventPoll) StateFields() []string {
+ return []string{
+ "files",
+ "readyList",
+ "waitingList",
+ "disabledList",
+ }
+}
+
+func (e *EventPoll) beforeSave() {}
+
+// +checklocksignore
+func (e *EventPoll) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ if !state.IsZeroValue(&e.FilePipeSeek) {
+ state.Failf("FilePipeSeek is %#v, expected zero", &e.FilePipeSeek)
+ }
+ if !state.IsZeroValue(&e.FileNotDirReaddir) {
+ state.Failf("FileNotDirReaddir is %#v, expected zero", &e.FileNotDirReaddir)
+ }
+ if !state.IsZeroValue(&e.FileNoFsync) {
+ state.Failf("FileNoFsync is %#v, expected zero", &e.FileNoFsync)
+ }
+ if !state.IsZeroValue(&e.FileNoopFlush) {
+ state.Failf("FileNoopFlush is %#v, expected zero", &e.FileNoopFlush)
+ }
+ if !state.IsZeroValue(&e.FileNoIoctl) {
+ state.Failf("FileNoIoctl is %#v, expected zero", &e.FileNoIoctl)
+ }
+ if !state.IsZeroValue(&e.FileNoMMap) {
+ state.Failf("FileNoMMap is %#v, expected zero", &e.FileNoMMap)
+ }
+ if !state.IsZeroValue(&e.Queue) {
+ state.Failf("Queue is %#v, expected zero", &e.Queue)
+ }
+ stateSinkObject.Save(0, &e.files)
+ stateSinkObject.Save(1, &e.readyList)
+ stateSinkObject.Save(2, &e.waitingList)
+ stateSinkObject.Save(3, &e.disabledList)
+}
+
+// +checklocksignore
+func (e *EventPoll) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.files)
+ stateSourceObject.Load(1, &e.readyList)
+ stateSourceObject.Load(2, &e.waitingList)
+ stateSourceObject.Load(3, &e.disabledList)
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (l *pollEntryList) StateTypeName() string {
+ return "pkg/sentry/kernel/epoll.pollEntryList"
+}
+
+func (l *pollEntryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *pollEntryList) beforeSave() {}
+
+// +checklocksignore
+func (l *pollEntryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *pollEntryList) afterLoad() {}
+
+// +checklocksignore
+func (l *pollEntryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *pollEntryEntry) StateTypeName() string {
+ return "pkg/sentry/kernel/epoll.pollEntryEntry"
+}
+
+func (e *pollEntryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *pollEntryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *pollEntryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *pollEntryEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *pollEntryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*FileIdentifier)(nil))
+ state.Register((*pollEntry)(nil))
+ state.Register((*EventPoll)(nil))
+ state.Register((*pollEntryList)(nil))
+ state.Register((*pollEntryEntry)(nil))
+}
diff --git a/pkg/sentry/kernel/epoll/epoll_test.go b/pkg/sentry/kernel/epoll/epoll_test.go
deleted file mode 100644
index 8ef6cb3e7..000000000
--- a/pkg/sentry/kernel/epoll/epoll_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package epoll
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs/filetest"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestFileDestroyed(t *testing.T) {
- f := filetest.NewTestFile(t)
- id := FileIdentifier{f, 12}
-
- ctx := contexttest.Context(t)
- efile := NewEventPoll(ctx)
- e := efile.FileOperations.(*EventPoll)
- if err := e.AddEntry(id, 0, waiter.ReadableEvents, [2]int32{}); err != nil {
- t.Fatalf("addEntry failed: %v", err)
- }
-
- // Check that we get an event reported twice in a row.
- evt := e.ReadEvents(1)
- if len(evt) != 1 {
- t.Fatalf("Unexpected number of ready events: want %v, got %v", 1, len(evt))
- }
-
- evt = e.ReadEvents(1)
- if len(evt) != 1 {
- t.Fatalf("Unexpected number of ready events: want %v, got %v", 1, len(evt))
- }
-
- // Destroy the file. Check that we get no more events.
- f.DecRef(ctx)
-
- evt = e.ReadEvents(1)
- if len(evt) != 0 {
- t.Fatalf("Unexpected number of ready events: want %v, got %v", 0, len(evt))
- }
-
-}
diff --git a/pkg/sentry/kernel/eventfd/BUILD b/pkg/sentry/kernel/eventfd/BUILD
deleted file mode 100644
index 564c3d42e..000000000
--- a/pkg/sentry/kernel/eventfd/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "eventfd",
- srcs = ["eventfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fdnotifier",
- "//pkg/hostarch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "eventfd_test",
- size = "small",
- srcs = ["eventfd_test.go"],
- library = ":eventfd",
- deps = [
- "//pkg/sentry/contexttest",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/eventfd/eventfd_state_autogen.go b/pkg/sentry/kernel/eventfd/eventfd_state_autogen.go
new file mode 100644
index 000000000..596920c42
--- /dev/null
+++ b/pkg/sentry/kernel/eventfd/eventfd_state_autogen.go
@@ -0,0 +1,45 @@
+// automatically generated by stateify.
+
+package eventfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *EventOperations) StateTypeName() string {
+ return "pkg/sentry/kernel/eventfd.EventOperations"
+}
+
+func (e *EventOperations) StateFields() []string {
+ return []string{
+ "val",
+ "semMode",
+ "hostfd",
+ }
+}
+
+func (e *EventOperations) beforeSave() {}
+
+// +checklocksignore
+func (e *EventOperations) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ if !state.IsZeroValue(&e.wq) {
+ state.Failf("wq is %#v, expected zero", &e.wq)
+ }
+ stateSinkObject.Save(0, &e.val)
+ stateSinkObject.Save(1, &e.semMode)
+ stateSinkObject.Save(2, &e.hostfd)
+}
+
+func (e *EventOperations) afterLoad() {}
+
+// +checklocksignore
+func (e *EventOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.val)
+ stateSourceObject.Load(1, &e.semMode)
+ stateSourceObject.Load(2, &e.hostfd)
+}
+
+func init() {
+ state.Register((*EventOperations)(nil))
+}
diff --git a/pkg/sentry/kernel/eventfd/eventfd_test.go b/pkg/sentry/kernel/eventfd/eventfd_test.go
deleted file mode 100644
index 1b9e60b3a..000000000
--- a/pkg/sentry/kernel/eventfd/eventfd_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventfd
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestEventfd(t *testing.T) {
- initVals := []uint64{
- 0,
- // Using a non-zero initial value verifies that writing to an
- // eventfd signals when the eventfd's counter was already
- // non-zero.
- 343,
- }
-
- for _, initVal := range initVals {
- ctx := contexttest.Context(t)
-
- // Make a new event that is writable.
- event := New(ctx, initVal, false)
-
- // Register a callback for a write event.
- w, ch := waiter.NewChannelEntry(nil)
- event.EventRegister(&w, waiter.ReadableEvents)
- defer event.EventUnregister(&w)
-
- data := []byte("00000124")
- // Create and submit a write request.
- n, err := event.Writev(ctx, usermem.BytesIOSequence(data))
- if err != nil {
- t.Fatal(err)
- }
- if n != 8 {
- t.Errorf("eventfd.write wrote %d bytes, not full int64", n)
- }
-
- // Check if the callback fired due to the write event.
- select {
- case <-ch:
- default:
- t.Errorf("Didn't get notified of EventIn after write")
- }
- }
-}
-
-func TestEventfdStat(t *testing.T) {
- ctx := contexttest.Context(t)
-
- // Make a new event that is writable.
- event := New(ctx, 0, false)
-
- // Create and submit an stat request.
- uattr, err := event.Dirent.Inode.UnstableAttr(ctx)
- if err != nil {
- t.Fatalf("eventfd stat request failed: %v", err)
- }
- if uattr.Size != 0 {
- t.Fatal("EventFD size should be 0")
- }
-}
diff --git a/pkg/sentry/kernel/fasync/BUILD b/pkg/sentry/kernel/fasync/BUILD
deleted file mode 100644
index 6b2dd09da..000000000
--- a/pkg/sentry/kernel/fasync/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fasync",
- srcs = ["fasync.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/fasync/fasync_state_autogen.go b/pkg/sentry/kernel/fasync/fasync_state_autogen.go
new file mode 100644
index 000000000..8ec069b92
--- /dev/null
+++ b/pkg/sentry/kernel/fasync/fasync_state_autogen.go
@@ -0,0 +1,57 @@
+// automatically generated by stateify.
+
+package fasync
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *FileAsync) StateTypeName() string {
+ return "pkg/sentry/kernel/fasync.FileAsync"
+}
+
+func (a *FileAsync) StateFields() []string {
+ return []string{
+ "e",
+ "fd",
+ "requester",
+ "registered",
+ "signal",
+ "recipientPG",
+ "recipientTG",
+ "recipientT",
+ }
+}
+
+func (a *FileAsync) beforeSave() {}
+
+// +checklocksignore
+func (a *FileAsync) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.e)
+ stateSinkObject.Save(1, &a.fd)
+ stateSinkObject.Save(2, &a.requester)
+ stateSinkObject.Save(3, &a.registered)
+ stateSinkObject.Save(4, &a.signal)
+ stateSinkObject.Save(5, &a.recipientPG)
+ stateSinkObject.Save(6, &a.recipientTG)
+ stateSinkObject.Save(7, &a.recipientT)
+}
+
+func (a *FileAsync) afterLoad() {}
+
+// +checklocksignore
+func (a *FileAsync) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.e)
+ stateSourceObject.Load(1, &a.fd)
+ stateSourceObject.Load(2, &a.requester)
+ stateSourceObject.Load(3, &a.registered)
+ stateSourceObject.Load(4, &a.signal)
+ stateSourceObject.Load(5, &a.recipientPG)
+ stateSourceObject.Load(6, &a.recipientTG)
+ stateSourceObject.Load(7, &a.recipientT)
+}
+
+func init() {
+ state.Register((*FileAsync)(nil))
+}
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go
new file mode 100644
index 000000000..9f3bcc07a
--- /dev/null
+++ b/pkg/sentry/kernel/fd_table_refs.go
@@ -0,0 +1,140 @@
+package kernel
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FDTableenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var FDTableobj *FDTable
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FDTableRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FDTableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FDTableRefs) RefType() string {
+ return fmt.Sprintf("%T", FDTableobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FDTableRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FDTableRefs) LogRefs() bool {
+ return FDTableenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FDTableRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FDTableRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FDTableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FDTableRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FDTableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FDTableRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FDTableenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FDTableRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/fd_table_test.go b/pkg/sentry/kernel/fd_table_test.go
deleted file mode 100644
index bf5460083..000000000
--- a/pkg/sentry/kernel/fd_table_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernel
-
-import (
- "runtime"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fs/filetest"
- "gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-const (
- // maxFD is the maximum FD to try to create in the map.
- //
- // This number of open files has been seen in the wild.
- maxFD = 2 * 1024
-)
-
-func runTest(t testing.TB, fn func(ctx context.Context, fdTable *FDTable, file *fs.File, limitSet *limits.LimitSet)) {
- t.Helper() // Don't show in stacks.
-
- // Create the limits and context.
- limitSet := limits.NewLimitSet()
- limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}, true)
- ctx := contexttest.WithLimitSet(contexttest.Context(t), limitSet)
-
- // Create a test file.;
- file := filetest.NewTestFile(t)
-
- // Create the table.
- fdTable := new(FDTable)
- fdTable.init()
-
- // Run the test.
- fn(ctx, fdTable, file, limitSet)
-}
-
-// TestFDTableMany allocates maxFD FDs, i.e. maxes out the FDTable, until there
-// is no room, then makes sure that NewFDAt works and also that if we remove
-// one and add one that works too.
-func TestFDTableMany(t *testing.T) {
- runTest(t, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {
- for i := 0; i < maxFD; i++ {
- if _, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil {
- t.Fatalf("Allocated %v FDs but wanted to allocate %v", i, maxFD)
- }
- }
-
- if _, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err == nil {
- t.Fatalf("fdTable.NewFDs(0, r) in full map: got nil, wanted error")
- }
-
- if err := fdTable.NewFDAt(ctx, 1, file, FDFlags{}); err != nil {
- t.Fatalf("fdTable.NewFDAt(1, r, FDFlags{}): got %v, wanted nil", err)
- }
-
- i := int32(2)
- fdTable.Remove(ctx, i)
- if fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil || fds[0] != i {
- t.Fatalf("Allocated %v FDs but wanted to allocate %v: %v", i, maxFD, err)
- }
- })
-}
-
-func TestFDTableOverLimit(t *testing.T) {
- runTest(t, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {
- if _, err := fdTable.NewFDs(ctx, maxFD, []*fs.File{file}, FDFlags{}); err == nil {
- t.Fatalf("fdTable.NewFDs(maxFD, f): got nil, wanted error")
- }
-
- if _, err := fdTable.NewFDs(ctx, maxFD-2, []*fs.File{file, file, file}, FDFlags{}); err == nil {
- t.Fatalf("fdTable.NewFDs(maxFD-2, {f,f,f}): got nil, wanted error")
- }
-
- if fds, err := fdTable.NewFDs(ctx, maxFD-3, []*fs.File{file, file, file}, FDFlags{}); err != nil {
- t.Fatalf("fdTable.NewFDs(maxFD-3, {f,f,f}): got %v, wanted nil", err)
- } else {
- for _, fd := range fds {
- fdTable.Remove(ctx, fd)
- }
- }
-
- if fds, err := fdTable.NewFDs(ctx, maxFD-1, []*fs.File{file}, FDFlags{}); err != nil || fds[0] != maxFD-1 {
- t.Fatalf("fdTable.NewFDAt(1, r, FDFlags{}): got %v, wanted nil", err)
- }
-
- if fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil {
- t.Fatalf("Adding an FD to a resized map: got %v, want nil", err)
- } else if len(fds) != 1 || fds[0] != 0 {
- t.Fatalf("Added an FD to a resized map: got %v, want {1}", fds)
- }
- })
-}
-
-// TestFDTable does a set of simple tests to make sure simple adds, removes,
-// GetRefs, and DecRefs work. The ordering is just weird enough that a
-// table-driven approach seemed clumsy.
-func TestFDTable(t *testing.T) {
- runTest(t, func(ctx context.Context, fdTable *FDTable, file *fs.File, limitSet *limits.LimitSet) {
- // Cap the limit at one.
- limitSet.Set(limits.NumberOfFiles, limits.Limit{1, maxFD}, true)
-
- if _, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil {
- t.Fatalf("Adding an FD to an empty 1-size map: got %v, want nil", err)
- }
-
- if _, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err == nil {
- t.Fatalf("Adding an FD to a filled 1-size map: got nil, wanted an error")
- }
-
- // Remove the previous limit.
- limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}, true)
-
- if fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file}, FDFlags{}); err != nil {
- t.Fatalf("Adding an FD to a resized map: got %v, want nil", err)
- } else if len(fds) != 1 || fds[0] != 1 {
- t.Fatalf("Added an FD to a resized map: got %v, want {1}", fds)
- }
-
- if err := fdTable.NewFDAt(ctx, 1, file, FDFlags{}); err != nil {
- t.Fatalf("Replacing FD 1 via fdTable.NewFDAt(1, r, FDFlags{}): got %v, wanted nil", err)
- }
-
- if err := fdTable.NewFDAt(ctx, maxFD+1, file, FDFlags{}); err == nil {
- t.Fatalf("Using an FD that was too large via fdTable.NewFDAt(%v, r, FDFlags{}): got nil, wanted an error", maxFD+1)
- }
-
- if ref, _ := fdTable.Get(1); ref == nil {
- t.Fatalf("fdTable.Get(1): got nil, wanted %v", file)
- }
-
- if ref, _ := fdTable.Get(2); ref != nil {
- t.Fatalf("fdTable.Get(2): got a %v, wanted nil", ref)
- }
-
- ref, _ := fdTable.Remove(ctx, 1)
- if ref == nil {
- t.Fatalf("fdTable.Remove(1) for an existing FD: failed, want success")
- }
- ref.DecRef(ctx)
-
- if ref, _ := fdTable.Remove(ctx, 1); ref != nil {
- t.Fatalf("r.Remove(1) for a removed FD: got success, want failure")
- }
- })
-}
-
-func TestDescriptorFlags(t *testing.T) {
- runTest(t, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {
- if err := fdTable.NewFDAt(ctx, 2, file, FDFlags{CloseOnExec: true}); err != nil {
- t.Fatalf("fdTable.NewFDAt(2, r, FDFlags{}): got %v, wanted nil", err)
- }
-
- newFile, flags := fdTable.Get(2)
- if newFile == nil {
- t.Fatalf("fdTable.Get(2): got a %v, wanted nil", newFile)
- }
-
- if !flags.CloseOnExec {
- t.Fatalf("new File flags %v don't match original %d\n", flags, 0)
- }
- })
-}
-
-func BenchmarkFDLookupAndDecRef(b *testing.B) {
- b.StopTimer() // Setup.
-
- runTest(b, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {
- fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file, file, file, file, file}, FDFlags{})
- if err != nil {
- b.Fatalf("fdTable.NewFDs: got %v, wanted nil", err)
- }
-
- b.StartTimer() // Benchmark.
- for i := 0; i < b.N; i++ {
- tf, _ := fdTable.Get(fds[i%len(fds)])
- tf.DecRef(ctx)
- }
- })
-}
-
-func BenchmarkFDLookupAndDecRefConcurrent(b *testing.B) {
- b.StopTimer() // Setup.
-
- runTest(b, func(ctx context.Context, fdTable *FDTable, file *fs.File, _ *limits.LimitSet) {
- fds, err := fdTable.NewFDs(ctx, 0, []*fs.File{file, file, file, file, file}, FDFlags{})
- if err != nil {
- b.Fatalf("fdTable.NewFDs: got %v, wanted nil", err)
- }
-
- concurrency := runtime.GOMAXPROCS(0)
- if concurrency < 4 {
- concurrency = 4
- }
- each := b.N / concurrency
-
- b.StartTimer() // Benchmark.
- var wg sync.WaitGroup
- for i := 0; i < concurrency; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i := 0; i < each; i++ {
- tf, _ := fdTable.Get(fds[i%len(fds)])
- tf.DecRef(ctx)
- }
- }()
- }
- wg.Wait()
- })
-}
diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go
new file mode 100644
index 000000000..8cbe70d1f
--- /dev/null
+++ b/pkg/sentry/kernel/fs_context_refs.go
@@ -0,0 +1,140 @@
+package kernel
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FSContextenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var FSContextobj *FSContext
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FSContextRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FSContextRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FSContextRefs) RefType() string {
+ return fmt.Sprintf("%T", FSContextobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FSContextRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FSContextRefs) LogRefs() bool {
+ return FSContextenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FSContextRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FSContextRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FSContextenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FSContextRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FSContextenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FSContextRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FSContextenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FSContextRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/futex/BUILD b/pkg/sentry/kernel/futex/BUILD
deleted file mode 100644
index cfdea5cf7..000000000
--- a/pkg/sentry/kernel/futex/BUILD
+++ /dev/null
@@ -1,61 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "atomicptr_bucket",
- out = "atomicptr_bucket_unsafe.go",
- package = "futex",
- suffix = "Bucket",
- template = "//pkg/sync/atomicptr:generic_atomicptr",
- types = {
- "Value": "bucket",
- },
-)
-
-go_template_instance(
- name = "waiter_list",
- out = "waiter_list.go",
- package = "futex",
- prefix = "waiter",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Waiter",
- "Linker": "*Waiter",
- },
-)
-
-go_library(
- name = "futex",
- srcs = [
- "atomicptr_bucket_unsafe.go",
- "futex.go",
- "waiter_list.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/sentry/memmap",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "futex_test",
- size = "small",
- srcs = ["futex_test.go"],
- library = ":futex",
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/sentry/kernel/futex/atomicptr_bucket_unsafe.go b/pkg/sentry/kernel/futex/atomicptr_bucket_unsafe.go
new file mode 100644
index 000000000..d3fdf09b0
--- /dev/null
+++ b/pkg/sentry/kernel/futex/atomicptr_bucket_unsafe.go
@@ -0,0 +1,37 @@
+package futex
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// An AtomicPtr is a pointer to a value of type Value that can be atomically
+// loaded and stored. The zero value of an AtomicPtr represents nil.
+//
+// Note that copying AtomicPtr by value performs a non-atomic read of the
+// stored pointer, which is unsafe if Store() can be called concurrently; in
+// this case, do `dst.Store(src.Load())` instead.
+//
+// +stateify savable
+type AtomicPtrBucket struct {
+ ptr unsafe.Pointer `state:".(*bucket)"`
+}
+
+func (p *AtomicPtrBucket) savePtr() *bucket {
+ return p.Load()
+}
+
+func (p *AtomicPtrBucket) loadPtr(v *bucket) {
+ p.Store(v)
+}
+
+// Load returns the value set by the most recent Store. It returns nil if there
+// has been no previous call to Store.
+func (p *AtomicPtrBucket) Load() *bucket {
+ return (*bucket)(atomic.LoadPointer(&p.ptr))
+}
+
+// Store sets the value returned by Load to x.
+func (p *AtomicPtrBucket) Store(x *bucket) {
+ atomic.StorePointer(&p.ptr, (unsafe.Pointer)(x))
+}
diff --git a/pkg/sentry/kernel/futex/futex_state_autogen.go b/pkg/sentry/kernel/futex/futex_state_autogen.go
new file mode 100644
index 000000000..6e22b313a
--- /dev/null
+++ b/pkg/sentry/kernel/futex/futex_state_autogen.go
@@ -0,0 +1,122 @@
+// automatically generated by stateify.
+
+package futex
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (b *bucket) StateTypeName() string {
+ return "pkg/sentry/kernel/futex.bucket"
+}
+
+func (b *bucket) StateFields() []string {
+ return []string{}
+}
+
+func (b *bucket) beforeSave() {}
+
+// +checklocksignore
+func (b *bucket) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ if !state.IsZeroValue(&b.waiters) {
+ state.Failf("waiters is %#v, expected zero", &b.waiters)
+ }
+}
+
+func (b *bucket) afterLoad() {}
+
+// +checklocksignore
+func (b *bucket) StateLoad(stateSourceObject state.Source) {
+}
+
+func (m *Manager) StateTypeName() string {
+ return "pkg/sentry/kernel/futex.Manager"
+}
+
+func (m *Manager) StateFields() []string {
+ return []string{
+ "sharedBucket",
+ }
+}
+
+func (m *Manager) beforeSave() {}
+
+// +checklocksignore
+func (m *Manager) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ if !state.IsZeroValue(&m.privateBuckets) {
+ state.Failf("privateBuckets is %#v, expected zero", &m.privateBuckets)
+ }
+ stateSinkObject.Save(0, &m.sharedBucket)
+}
+
+func (m *Manager) afterLoad() {}
+
+// +checklocksignore
+func (m *Manager) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.sharedBucket)
+}
+
+func (l *waiterList) StateTypeName() string {
+ return "pkg/sentry/kernel/futex.waiterList"
+}
+
+func (l *waiterList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *waiterList) beforeSave() {}
+
+// +checklocksignore
+func (l *waiterList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *waiterList) afterLoad() {}
+
+// +checklocksignore
+func (l *waiterList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *waiterEntry) StateTypeName() string {
+ return "pkg/sentry/kernel/futex.waiterEntry"
+}
+
+func (e *waiterEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *waiterEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *waiterEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*bucket)(nil))
+ state.Register((*Manager)(nil))
+ state.Register((*waiterList)(nil))
+ state.Register((*waiterEntry)(nil))
+}
diff --git a/pkg/sentry/kernel/futex/futex_test.go b/pkg/sentry/kernel/futex/futex_test.go
deleted file mode 100644
index 04c136f87..000000000
--- a/pkg/sentry/kernel/futex/futex_test.go
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package futex
-
-import (
- "math"
- "runtime"
- "sync/atomic"
- "testing"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// testData implements the Target interface, and allows us to
-// treat the address passed for futex operations as an index in
-// a byte slice for testing simplicity.
-type testData struct {
- context.Context
- data []byte
-}
-
-const sizeofInt32 = 4
-
-func newTestData(size uint) testData {
- return testData{
- data: make([]byte, size),
- }
-}
-
-func (t testData) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) {
- val := atomic.SwapUint32((*uint32)(unsafe.Pointer(&t.data[addr])), new)
- return val, nil
-}
-
-func (t testData) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) {
- if atomic.CompareAndSwapUint32((*uint32)(unsafe.Pointer(&t.data[addr])), old, new) {
- return old, nil
- }
- return atomic.LoadUint32((*uint32)(unsafe.Pointer(&t.data[addr]))), nil
-}
-
-func (t testData) LoadUint32(addr hostarch.Addr) (uint32, error) {
- return atomic.LoadUint32((*uint32)(unsafe.Pointer(&t.data[addr]))), nil
-}
-
-func (t testData) GetSharedKey(addr hostarch.Addr) (Key, error) {
- return Key{
- Kind: KindSharedMappable,
- Offset: uint64(addr),
- }, nil
-}
-
-func futexKind(private bool) string {
- if private {
- return "private"
- }
- return "shared"
-}
-
-func newPreparedTestWaiter(t *testing.T, m *Manager, ta Target, addr hostarch.Addr, private bool, val uint32, bitmask uint32) *Waiter {
- w := NewWaiter()
- if err := m.WaitPrepare(w, ta, addr, private, val, bitmask); err != nil {
- t.Fatalf("WaitPrepare failed: %v", err)
- }
- return w
-}
-
-func TestFutexWake(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(sizeofInt32)
-
- // Start waiting for wakeup.
- w := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w, d)
-
- // Perform a wakeup.
- if n, err := m.Wake(d, 0, private, ^uint32(0), 1); err != nil || n != 1 {
- t.Errorf("Wake: got (%d, %v), wanted (1, nil)", n, err)
- }
-
- // Expect the waiter to have been woken.
- if !w.woken() {
- t.Error("waiter not woken")
- }
- })
- }
-}
-
-func TestFutexWakeBitmask(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(sizeofInt32)
-
- // Start waiting for wakeup.
- w := newPreparedTestWaiter(t, m, d, 0, private, 0, 0x0000ffff)
- defer m.WaitComplete(w, d)
-
- // Perform a wakeup using the wrong bitmask.
- if n, err := m.Wake(d, 0, private, 0xffff0000, 1); err != nil || n != 0 {
- t.Errorf("Wake with non-matching bitmask: got (%d, %v), wanted (0, nil)", n, err)
- }
-
- // Expect the waiter to still be waiting.
- if w.woken() {
- t.Error("waiter woken unexpectedly")
- }
-
- // Perform a wakeup using the right bitmask.
- if n, err := m.Wake(d, 0, private, 0x00000001, 1); err != nil || n != 1 {
- t.Errorf("Wake with matching bitmask: got (%d, %v), wanted (1, nil)", n, err)
- }
-
- // Expect that the waiter was woken.
- if !w.woken() {
- t.Error("waiter not woken")
- }
- })
- }
-}
-
-func TestFutexWakeTwo(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(sizeofInt32)
-
- // Start three waiters waiting for wakeup.
- var ws [3]*Waiter
- for i := range ws {
- ws[i] = newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(ws[i], d)
- }
-
- // Perform two wakeups.
- const wakeups = 2
- if n, err := m.Wake(d, 0, private, ^uint32(0), 2); err != nil || n != wakeups {
- t.Errorf("Wake: got (%d, %v), wanted (%d, nil)", n, err, wakeups)
- }
-
- // Expect that exactly two waiters were woken.
- // We don't get guarantees about exactly which two,
- // (although we expect them to be w1 and w2).
- awake := 0
- for i := range ws {
- if ws[i].woken() {
- awake++
- }
- }
- if awake != wakeups {
- t.Errorf("got %d woken waiters, wanted %d", awake, wakeups)
- }
- })
- }
-}
-
-func TestFutexWakeUnrelated(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(2 * sizeofInt32)
-
- // Start two waiters waiting for wakeup on different addresses.
- w1 := newPreparedTestWaiter(t, m, d, 0*sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, 1*sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Perform two wakeups on the second address.
- if n, err := m.Wake(d, 1*sizeofInt32, private, ^uint32(0), 2); err != nil || n != 1 {
- t.Errorf("Wake: got (%d, %v), wanted (1, nil)", n, err)
- }
-
- // Expect that only the second waiter was woken.
- if w1.woken() {
- t.Error("w1 woken unexpectedly")
- }
- if !w2.woken() {
- t.Error("w2 not woken")
- }
- })
- }
-}
-
-func TestWakeOpEmpty(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(2 * sizeofInt32)
-
- // Perform wakeups with no waiters.
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 10, 10, 0); err != nil || n != 0 {
- t.Fatalf("WakeOp: got (%d, %v), wanted (0, nil)", n, err)
- }
- })
- }
-}
-
-func TestWakeOpFirstNonEmpty(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add two waiters on address 0.
- w1 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Perform 10 wakeups on address 0.
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 10, 0, 0); err != nil || n != 2 {
- t.Errorf("WakeOp: got (%d, %v), wanted (2, nil)", n, err)
- }
-
- // Expect that both waiters were woken.
- if !w1.woken() {
- t.Error("w1 not woken")
- }
- if !w2.woken() {
- t.Error("w2 not woken")
- }
- })
- }
-}
-
-func TestWakeOpSecondNonEmpty(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add two waiters on address sizeofInt32.
- w1 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Perform 10 wakeups on address sizeofInt32 (contingent on
- // d.Op(0), which should succeed).
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 0, 10, 0); err != nil || n != 2 {
- t.Errorf("WakeOp: got (%d, %v), wanted (2, nil)", n, err)
- }
-
- // Expect that both waiters were woken.
- if !w1.woken() {
- t.Error("w1 not woken")
- }
- if !w2.woken() {
- t.Error("w2 not woken")
- }
- })
- }
-}
-
-func TestWakeOpSecondNonEmptyFailingOp(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add two waiters on address sizeofInt32.
- w1 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Perform 10 wakeups on address sizeofInt32 (contingent on
- // d.Op(1), which should fail).
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 0, 10, 1); err != nil || n != 0 {
- t.Errorf("WakeOp: got (%d, %v), wanted (0, nil)", n, err)
- }
-
- // Expect that neither waiter was woken.
- if w1.woken() {
- t.Error("w1 woken unexpectedly")
- }
- if w2.woken() {
- t.Error("w2 woken unexpectedly")
- }
- })
- }
-}
-
-func TestWakeOpAllNonEmpty(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add two waiters on address 0.
- w1 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Add two waiters on address sizeofInt32.
- w3 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w3, d)
- w4 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w4, d)
-
- // Perform 10 wakeups on address 0 (unconditionally), and 10
- // wakeups on address sizeofInt32 (contingent on d.Op(0), which
- // should succeed).
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 10, 10, 0); err != nil || n != 4 {
- t.Errorf("WakeOp: got (%d, %v), wanted (4, nil)", n, err)
- }
-
- // Expect that all waiters were woken.
- if !w1.woken() {
- t.Error("w1 not woken")
- }
- if !w2.woken() {
- t.Error("w2 not woken")
- }
- if !w3.woken() {
- t.Error("w3 not woken")
- }
- if !w4.woken() {
- t.Error("w4 not woken")
- }
- })
- }
-}
-
-func TestWakeOpAllNonEmptyFailingOp(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add two waiters on address 0.
- w1 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w1, d)
- w2 := newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(w2, d)
-
- // Add two waiters on address sizeofInt32.
- w3 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w3, d)
- w4 := newPreparedTestWaiter(t, m, d, sizeofInt32, private, 0, ^uint32(0))
- defer m.WaitComplete(w4, d)
-
- // Perform 10 wakeups on address 0 (unconditionally), and 10
- // wakeups on address sizeofInt32 (contingent on d.Op(1), which
- // should fail).
- if n, err := m.WakeOp(d, 0, sizeofInt32, private, 10, 10, 1); err != nil || n != 2 {
- t.Errorf("WakeOp: got (%d, %v), wanted (2, nil)", n, err)
- }
-
- // Expect that only the first two waiters were woken.
- if !w1.woken() {
- t.Error("w1 not woken")
- }
- if !w2.woken() {
- t.Error("w2 not woken")
- }
- if w3.woken() {
- t.Error("w3 woken unexpectedly")
- }
- if w4.woken() {
- t.Error("w4 woken unexpectedly")
- }
- })
- }
-}
-
-func TestWakeOpSameAddress(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add four waiters on address 0.
- var ws [4]*Waiter
- for i := range ws {
- ws[i] = newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(ws[i], d)
- }
-
- // Perform 1 wakeup on address 0 (unconditionally), and 1 wakeup
- // on address 0 (contingent on d.Op(0), which should succeed).
- const wakeups = 2
- if n, err := m.WakeOp(d, 0, 0, private, 1, 1, 0); err != nil || n != wakeups {
- t.Errorf("WakeOp: got (%d, %v), wanted (%d, nil)", n, err, wakeups)
- }
-
- // Expect that exactly two waiters were woken.
- awake := 0
- for i := range ws {
- if ws[i].woken() {
- awake++
- }
- }
- if awake != wakeups {
- t.Errorf("got %d woken waiters, wanted %d", awake, wakeups)
- }
- })
- }
-}
-
-func TestWakeOpSameAddressFailingOp(t *testing.T) {
- for _, private := range []bool{false, true} {
- t.Run(futexKind(private), func(t *testing.T) {
- m := NewManager()
- d := newTestData(8)
-
- // Add four waiters on address 0.
- var ws [4]*Waiter
- for i := range ws {
- ws[i] = newPreparedTestWaiter(t, m, d, 0, private, 0, ^uint32(0))
- defer m.WaitComplete(ws[i], d)
- }
-
- // Perform 1 wakeup on address 0 (unconditionally), and 1 wakeup
- // on address 0 (contingent on d.Op(1), which should fail).
- const wakeups = 1
- if n, err := m.WakeOp(d, 0, 0, private, 1, 1, 1); err != nil || n != wakeups {
- t.Errorf("WakeOp: got (%d, %v), wanted (%d, nil)", n, err, wakeups)
- }
-
- // Expect that exactly one waiter was woken.
- awake := 0
- for i := range ws {
- if ws[i].woken() {
- awake++
- }
- }
- if awake != wakeups {
- t.Errorf("got %d woken waiters, wanted %d", awake, wakeups)
- }
- })
- }
-}
-
-const (
- testMutexSize = sizeofInt32
- testMutexLocked uint32 = 1
- testMutexUnlocked uint32 = 0
-)
-
-// testMutex ties together a testData slice, an address, and a
-// futex manager in order to implement the sync.Locker interface.
-// Beyond being used as a Locker, this is a simple mechanism for
-// changing the underlying values for simpler tests.
-type testMutex struct {
- a hostarch.Addr
- d testData
- m *Manager
-}
-
-func newTestMutex(addr hostarch.Addr, d testData, m *Manager) *testMutex {
- return &testMutex{a: addr, d: d, m: m}
-}
-
-// Lock acquires the testMutex.
-// This may wait for it to be available via the futex manager.
-func (t *testMutex) Lock() {
- for {
- // Attempt to grab the lock.
- if atomic.CompareAndSwapUint32(
- (*uint32)(unsafe.Pointer(&t.d.data[t.a])),
- testMutexUnlocked,
- testMutexLocked) {
- // Lock held.
- return
- }
-
- // Wait for it to be "not locked".
- w := NewWaiter()
- err := t.m.WaitPrepare(w, t.d, t.a, true, testMutexLocked, ^uint32(0))
- if linuxerr.Equals(linuxerr.EAGAIN, err) {
- continue
- }
- if err != nil {
- // Should never happen.
- panic("WaitPrepare returned unexpected error: " + err.Error())
- }
- <-w.C
- t.m.WaitComplete(w, t.d)
- }
-}
-
-// Unlock releases the testMutex.
-// This will notify any waiters via the futex manager.
-func (t *testMutex) Unlock() {
- // Unlock.
- atomic.StoreUint32((*uint32)(unsafe.Pointer(&t.d.data[t.a])), testMutexUnlocked)
-
- // Notify all waiters.
- t.m.Wake(t.d, t.a, true, ^uint32(0), math.MaxInt32)
-}
-
-// This function was shamelessly stolen from mutex_test.go.
-func HammerMutex(l sync.Locker, loops int, cdone chan bool) {
- for i := 0; i < loops; i++ {
- l.Lock()
- runtime.Gosched()
- l.Unlock()
- }
- cdone <- true
-}
-
-func TestMutexStress(t *testing.T) {
- m := NewManager()
- d := newTestData(testMutexSize)
- tm := newTestMutex(0*testMutexSize, d, m)
- c := make(chan bool)
-
- for i := 0; i < 10; i++ {
- go HammerMutex(tm, 1000, c)
- }
-
- for i := 0; i < 10; i++ {
- <-c
- }
-}
diff --git a/pkg/sentry/kernel/futex/futex_unsafe_state_autogen.go b/pkg/sentry/kernel/futex/futex_unsafe_state_autogen.go
new file mode 100644
index 000000000..84bb180fc
--- /dev/null
+++ b/pkg/sentry/kernel/futex/futex_unsafe_state_autogen.go
@@ -0,0 +1,37 @@
+// automatically generated by stateify.
+
+package futex
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *AtomicPtrBucket) StateTypeName() string {
+ return "pkg/sentry/kernel/futex.AtomicPtrBucket"
+}
+
+func (p *AtomicPtrBucket) StateFields() []string {
+ return []string{
+ "ptr",
+ }
+}
+
+func (p *AtomicPtrBucket) beforeSave() {}
+
+// +checklocksignore
+func (p *AtomicPtrBucket) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var ptrValue *bucket = p.savePtr()
+ stateSinkObject.SaveValue(0, ptrValue)
+}
+
+func (p *AtomicPtrBucket) afterLoad() {}
+
+// +checklocksignore
+func (p *AtomicPtrBucket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*bucket), func(y interface{}) { p.loadPtr(y.(*bucket)) })
+}
+
+func init() {
+ state.Register((*AtomicPtrBucket)(nil))
+}
diff --git a/pkg/sentry/kernel/futex/waiter_list.go b/pkg/sentry/kernel/futex/waiter_list.go
new file mode 100644
index 000000000..24968ce4b
--- /dev/null
+++ b/pkg/sentry/kernel/futex/waiter_list.go
@@ -0,0 +1,221 @@
+package futex
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type waiterElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (waiterElementMapper) linkerFor(elem *Waiter) *Waiter { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type waiterList struct {
+ head *Waiter
+ tail *Waiter
+}
+
+// Reset resets list l to the empty state.
+func (l *waiterList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *waiterList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Front() *Waiter {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Back() *Waiter {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *waiterList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (waiterElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *waiterList) PushFront(e *Waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ waiterElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *waiterList) PushBack(e *Waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *waiterList) PushBackList(m *waiterList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ waiterElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *waiterList) InsertAfter(b, e *Waiter) {
+ bLinker := waiterElementMapper{}.linkerFor(b)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ waiterElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *waiterList) InsertBefore(a, e *Waiter) {
+ aLinker := waiterElementMapper{}.linkerFor(a)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ waiterElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *waiterList) Remove(e *Waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ waiterElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ waiterElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type waiterEntry struct {
+ next *Waiter
+ prev *Waiter
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Next() *Waiter {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Prev() *Waiter {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetNext(elem *Waiter) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetPrev(elem *Waiter) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/g3doc/run_states.dot b/pkg/sentry/kernel/g3doc/run_states.dot
deleted file mode 100644
index 7861fe1f5..000000000
--- a/pkg/sentry/kernel/g3doc/run_states.dot
+++ /dev/null
@@ -1,99 +0,0 @@
-digraph {
- subgraph {
- App;
- }
- subgraph {
- Interrupt;
- InterruptAfterSignalDeliveryStop;
- }
- subgraph {
- Syscall;
- SyscallAfterPtraceEventSeccomp;
- SyscallEnter;
- SyscallAfterSyscallEnterStop;
- SyscallAfterSysemuStop;
- SyscallInvoke;
- SyscallAfterPtraceEventClone;
- SyscallAfterExecStop;
- SyscallAfterVforkStop;
- SyscallReinvoke;
- SyscallExit;
- }
- subgraph {
- Vsyscall;
- VsyscallAfterPtraceEventSeccomp;
- VsyscallInvoke;
- }
- subgraph {
- Exit;
- ExitMain; // leave thread group, release resources, reparent children, kill PID namespace and wait if TGID 1
- ExitNotify; // signal parent/tracer, become waitable
- ExitDone; // represented by t.runState == nil
- }
-
- // Task exit
- Exit -> ExitMain;
- ExitMain -> ExitNotify;
- ExitNotify -> ExitDone;
-
- // Execution of untrusted application code
- App -> App;
-
- // Interrupts (usually signal delivery)
- App -> Interrupt;
- Interrupt -> Interrupt; // if other interrupt conditions may still apply
- Interrupt -> Exit; // if killed
-
- // Syscalls
- App -> Syscall;
- Syscall -> SyscallEnter;
- SyscallEnter -> SyscallInvoke;
- SyscallInvoke -> SyscallExit;
- SyscallExit -> App;
-
- // exit, exit_group
- SyscallInvoke -> Exit;
-
- // execve
- SyscallInvoke -> SyscallAfterExecStop;
- SyscallAfterExecStop -> SyscallExit;
- SyscallAfterExecStop -> App; // fatal signal pending
-
- // vfork
- SyscallInvoke -> SyscallAfterVforkStop;
- SyscallAfterVforkStop -> SyscallExit;
-
- // Vsyscalls
- App -> Vsyscall;
- Vsyscall -> VsyscallInvoke;
- Vsyscall -> App; // fault while reading return address from stack
- VsyscallInvoke -> App;
-
- // ptrace-specific branches
- Interrupt -> InterruptAfterSignalDeliveryStop;
- InterruptAfterSignalDeliveryStop -> Interrupt;
- SyscallEnter -> SyscallAfterSyscallEnterStop;
- SyscallAfterSyscallEnterStop -> SyscallInvoke;
- SyscallAfterSyscallEnterStop -> SyscallExit; // skipped by tracer
- SyscallAfterSyscallEnterStop -> App; // fatal signal pending
- SyscallEnter -> SyscallAfterSysemuStop;
- SyscallAfterSysemuStop -> SyscallExit;
- SyscallAfterSysemuStop -> App; // fatal signal pending
- SyscallInvoke -> SyscallAfterPtraceEventClone;
- SyscallAfterPtraceEventClone -> SyscallExit;
- SyscallAfterPtraceEventClone -> SyscallAfterVforkStop;
-
- // seccomp
- Syscall -> App; // SECCOMP_RET_TRAP, SECCOMP_RET_ERRNO, SECCOMP_RET_KILL, SECCOMP_RET_TRACE without tracer
- Syscall -> SyscallAfterPtraceEventSeccomp; // SECCOMP_RET_TRACE
- SyscallAfterPtraceEventSeccomp -> SyscallEnter;
- SyscallAfterPtraceEventSeccomp -> SyscallExit; // skipped by tracer
- SyscallAfterPtraceEventSeccomp -> App; // fatal signal pending
- Vsyscall -> VsyscallAfterPtraceEventSeccomp;
- VsyscallAfterPtraceEventSeccomp -> VsyscallInvoke;
- VsyscallAfterPtraceEventSeccomp -> App;
-
- // Autosave
- SyscallInvoke -> SyscallReinvoke;
- SyscallReinvoke -> SyscallInvoke;
-}
diff --git a/pkg/sentry/kernel/g3doc/run_states.png b/pkg/sentry/kernel/g3doc/run_states.png
deleted file mode 100644
index b63b60f02..000000000
--- a/pkg/sentry/kernel/g3doc/run_states.png
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go
new file mode 100644
index 000000000..1a4c31bb0
--- /dev/null
+++ b/pkg/sentry/kernel/ipc_namespace_refs.go
@@ -0,0 +1,140 @@
+package kernel
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const IPCNamespaceenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var IPCNamespaceobj *IPCNamespace
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type IPCNamespaceRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *IPCNamespaceRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *IPCNamespaceRefs) RefType() string {
+ return fmt.Sprintf("%T", IPCNamespaceobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *IPCNamespaceRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *IPCNamespaceRefs) LogRefs() bool {
+ return IPCNamespaceenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *IPCNamespaceRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *IPCNamespaceRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *IPCNamespaceRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *IPCNamespaceRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *IPCNamespaceRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go
new file mode 100644
index 000000000..f951fab78
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go
@@ -0,0 +1,230 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package kernel
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*ThreadID)(nil)
+var _ marshal.Marshallable = (*vdsoParams)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (tid *ThreadID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (tid *ThreadID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*tid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (tid *ThreadID) UnmarshalBytes(src []byte) {
+ *tid = ThreadID(int32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (tid *ThreadID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (tid *ThreadID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(tid), uintptr(tid.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (tid *ThreadID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(tid), unsafe.Pointer(&src[0]), uintptr(tid.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (tid *ThreadID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid)))
+ hdr.Len = tid.SizeBytes()
+ hdr.Cap = tid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tid
+ // must live until the use above.
+ runtime.KeepAlive(tid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (tid *ThreadID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return tid.CopyOutN(cc, addr, tid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (tid *ThreadID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid)))
+ hdr.Len = tid.SizeBytes()
+ hdr.Cap = tid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that tid
+ // must live until the use above.
+ runtime.KeepAlive(tid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (tid *ThreadID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid)))
+ hdr.Len = tid.SizeBytes()
+ hdr.Cap = tid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that tid
+ // must live until the use above.
+ runtime.KeepAlive(tid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (v *vdsoParams) SizeBytes() int {
+ return 64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (v *vdsoParams) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicReady))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseCycles))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseRef))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicFrequency))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeReady))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseCycles))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseRef))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeFrequency))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (v *vdsoParams) UnmarshalBytes(src []byte) {
+ v.monotonicReady = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.monotonicBaseCycles = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.monotonicBaseRef = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.monotonicFrequency = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.realtimeReady = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.realtimeBaseCycles = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.realtimeBaseRef = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ v.realtimeFrequency = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (v *vdsoParams) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (v *vdsoParams) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(v), uintptr(v.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (v *vdsoParams) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(v), unsafe.Pointer(&src[0]), uintptr(v.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (v *vdsoParams) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(v)))
+ hdr.Len = v.SizeBytes()
+ hdr.Cap = v.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that v
+ // must live until the use above.
+ runtime.KeepAlive(v) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (v *vdsoParams) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return v.CopyOutN(cc, addr, v.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (v *vdsoParams) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(v)))
+ hdr.Len = v.SizeBytes()
+ hdr.Cap = v.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that v
+ // must live until the use above.
+ runtime.KeepAlive(v) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (v *vdsoParams) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(v)))
+ hdr.Len = v.SizeBytes()
+ hdr.Cap = v.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that v
+ // must live until the use above.
+ runtime.KeepAlive(v) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/kernel/kernel_amd64_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_amd64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..49bde618c
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64
+
+package kernel
+
+import (
+)
+
diff --git a/pkg/sentry/kernel/kernel_amd64_state_autogen.go b/pkg/sentry/kernel/kernel_amd64_state_autogen.go
new file mode 100644
index 000000000..12de47ad0
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package kernel
diff --git a/pkg/sentry/kernel/kernel_arm64_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_arm64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..ed40b65c6
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+
+package kernel
+
+import (
+)
+
diff --git a/pkg/sentry/kernel/kernel_arm64_state_autogen.go b/pkg/sentry/kernel/kernel_arm64_state_autogen.go
new file mode 100644
index 000000000..3c040d283
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package kernel
diff --git a/pkg/sentry/kernel/kernel_opts_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_opts_abi_autogen_unsafe.go
new file mode 100644
index 000000000..3e0693a12
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_opts_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build go1.1
+
+package kernel
+
+import (
+)
+
diff --git a/pkg/sentry/kernel/kernel_opts_state_autogen.go b/pkg/sentry/kernel/kernel_opts_state_autogen.go
new file mode 100644
index 000000000..3a027c5ed
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_opts_state_autogen.go
@@ -0,0 +1,34 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package kernel
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *SpecialOpts) StateTypeName() string {
+ return "pkg/sentry/kernel.SpecialOpts"
+}
+
+func (s *SpecialOpts) StateFields() []string {
+ return []string{}
+}
+
+func (s *SpecialOpts) beforeSave() {}
+
+// +checklocksignore
+func (s *SpecialOpts) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+}
+
+func (s *SpecialOpts) afterLoad() {}
+
+// +checklocksignore
+func (s *SpecialOpts) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*SpecialOpts)(nil))
+}
diff --git a/pkg/sentry/kernel/kernel_state_autogen.go b/pkg/sentry/kernel/kernel_state_autogen.go
new file mode 100644
index 000000000..860599e73
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_state_autogen.go
@@ -0,0 +1,2599 @@
+// automatically generated by stateify.
+
+package kernel
+
+import (
+ "gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/sentry/device"
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip"
+)
+
+func (a *abstractEndpoint) StateTypeName() string {
+ return "pkg/sentry/kernel.abstractEndpoint"
+}
+
+func (a *abstractEndpoint) StateFields() []string {
+ return []string{
+ "ep",
+ "socket",
+ "name",
+ "ns",
+ }
+}
+
+func (a *abstractEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (a *abstractEndpoint) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.ep)
+ stateSinkObject.Save(1, &a.socket)
+ stateSinkObject.Save(2, &a.name)
+ stateSinkObject.Save(3, &a.ns)
+}
+
+func (a *abstractEndpoint) afterLoad() {}
+
+// +checklocksignore
+func (a *abstractEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.ep)
+ stateSourceObject.Load(1, &a.socket)
+ stateSourceObject.Load(2, &a.name)
+ stateSourceObject.Load(3, &a.ns)
+}
+
+func (a *AbstractSocketNamespace) StateTypeName() string {
+ return "pkg/sentry/kernel.AbstractSocketNamespace"
+}
+
+func (a *AbstractSocketNamespace) StateFields() []string {
+ return []string{
+ "endpoints",
+ }
+}
+
+func (a *AbstractSocketNamespace) beforeSave() {}
+
+// +checklocksignore
+func (a *AbstractSocketNamespace) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.endpoints)
+}
+
+func (a *AbstractSocketNamespace) afterLoad() {}
+
+// +checklocksignore
+func (a *AbstractSocketNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.endpoints)
+}
+
+func (c *Cgroup) StateTypeName() string {
+ return "pkg/sentry/kernel.Cgroup"
+}
+
+func (c *Cgroup) StateFields() []string {
+ return []string{
+ "Dentry",
+ "CgroupImpl",
+ }
+}
+
+func (c *Cgroup) beforeSave() {}
+
+// +checklocksignore
+func (c *Cgroup) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.Dentry)
+ stateSinkObject.Save(1, &c.CgroupImpl)
+}
+
+func (c *Cgroup) afterLoad() {}
+
+// +checklocksignore
+func (c *Cgroup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.Dentry)
+ stateSourceObject.Load(1, &c.CgroupImpl)
+}
+
+func (h *hierarchy) StateTypeName() string {
+ return "pkg/sentry/kernel.hierarchy"
+}
+
+func (h *hierarchy) StateFields() []string {
+ return []string{
+ "id",
+ "controllers",
+ "fs",
+ }
+}
+
+func (h *hierarchy) beforeSave() {}
+
+// +checklocksignore
+func (h *hierarchy) StateSave(stateSinkObject state.Sink) {
+ h.beforeSave()
+ stateSinkObject.Save(0, &h.id)
+ stateSinkObject.Save(1, &h.controllers)
+ stateSinkObject.Save(2, &h.fs)
+}
+
+func (h *hierarchy) afterLoad() {}
+
+// +checklocksignore
+func (h *hierarchy) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &h.id)
+ stateSourceObject.Load(1, &h.controllers)
+ stateSourceObject.Load(2, &h.fs)
+}
+
+func (r *CgroupRegistry) StateTypeName() string {
+ return "pkg/sentry/kernel.CgroupRegistry"
+}
+
+func (r *CgroupRegistry) StateFields() []string {
+ return []string{
+ "lastHierarchyID",
+ "controllers",
+ "hierarchies",
+ }
+}
+
+func (r *CgroupRegistry) beforeSave() {}
+
+// +checklocksignore
+func (r *CgroupRegistry) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.lastHierarchyID)
+ stateSinkObject.Save(1, &r.controllers)
+ stateSinkObject.Save(2, &r.hierarchies)
+}
+
+func (r *CgroupRegistry) afterLoad() {}
+
+// +checklocksignore
+func (r *CgroupRegistry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.lastHierarchyID)
+ stateSourceObject.Load(1, &r.controllers)
+ stateSourceObject.Load(2, &r.hierarchies)
+}
+
+func (f *FDFlags) StateTypeName() string {
+ return "pkg/sentry/kernel.FDFlags"
+}
+
+func (f *FDFlags) StateFields() []string {
+ return []string{
+ "CloseOnExec",
+ }
+}
+
+func (f *FDFlags) beforeSave() {}
+
+// +checklocksignore
+func (f *FDFlags) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.CloseOnExec)
+}
+
+func (f *FDFlags) afterLoad() {}
+
+// +checklocksignore
+func (f *FDFlags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.CloseOnExec)
+}
+
+func (d *descriptor) StateTypeName() string {
+ return "pkg/sentry/kernel.descriptor"
+}
+
+func (d *descriptor) StateFields() []string {
+ return []string{
+ "file",
+ "fileVFS2",
+ "flags",
+ }
+}
+
+func (d *descriptor) beforeSave() {}
+
+// +checklocksignore
+func (d *descriptor) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.file)
+ stateSinkObject.Save(1, &d.fileVFS2)
+ stateSinkObject.Save(2, &d.flags)
+}
+
+func (d *descriptor) afterLoad() {}
+
+// +checklocksignore
+func (d *descriptor) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.file)
+ stateSourceObject.Load(1, &d.fileVFS2)
+ stateSourceObject.Load(2, &d.flags)
+}
+
+func (f *FDTable) StateTypeName() string {
+ return "pkg/sentry/kernel.FDTable"
+}
+
+func (f *FDTable) StateFields() []string {
+ return []string{
+ "FDTableRefs",
+ "k",
+ "next",
+ "used",
+ "descriptorTable",
+ }
+}
+
+func (f *FDTable) beforeSave() {}
+
+// +checklocksignore
+func (f *FDTable) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ var descriptorTableValue map[int32]descriptor = f.saveDescriptorTable()
+ stateSinkObject.SaveValue(4, descriptorTableValue)
+ stateSinkObject.Save(0, &f.FDTableRefs)
+ stateSinkObject.Save(1, &f.k)
+ stateSinkObject.Save(2, &f.next)
+ stateSinkObject.Save(3, &f.used)
+}
+
+func (f *FDTable) afterLoad() {}
+
+// +checklocksignore
+func (f *FDTable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.FDTableRefs)
+ stateSourceObject.Load(1, &f.k)
+ stateSourceObject.Load(2, &f.next)
+ stateSourceObject.Load(3, &f.used)
+ stateSourceObject.LoadValue(4, new(map[int32]descriptor), func(y interface{}) { f.loadDescriptorTable(y.(map[int32]descriptor)) })
+}
+
+func (r *FDTableRefs) StateTypeName() string {
+ return "pkg/sentry/kernel.FDTableRefs"
+}
+
+func (r *FDTableRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FDTableRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FDTableRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FDTableRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (f *FSContext) StateTypeName() string {
+ return "pkg/sentry/kernel.FSContext"
+}
+
+func (f *FSContext) StateFields() []string {
+ return []string{
+ "FSContextRefs",
+ "root",
+ "rootVFS2",
+ "cwd",
+ "cwdVFS2",
+ "umask",
+ }
+}
+
+func (f *FSContext) beforeSave() {}
+
+// +checklocksignore
+func (f *FSContext) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.FSContextRefs)
+ stateSinkObject.Save(1, &f.root)
+ stateSinkObject.Save(2, &f.rootVFS2)
+ stateSinkObject.Save(3, &f.cwd)
+ stateSinkObject.Save(4, &f.cwdVFS2)
+ stateSinkObject.Save(5, &f.umask)
+}
+
+func (f *FSContext) afterLoad() {}
+
+// +checklocksignore
+func (f *FSContext) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.FSContextRefs)
+ stateSourceObject.Load(1, &f.root)
+ stateSourceObject.Load(2, &f.rootVFS2)
+ stateSourceObject.Load(3, &f.cwd)
+ stateSourceObject.Load(4, &f.cwdVFS2)
+ stateSourceObject.Load(5, &f.umask)
+}
+
+func (r *FSContextRefs) StateTypeName() string {
+ return "pkg/sentry/kernel.FSContextRefs"
+}
+
+func (r *FSContextRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FSContextRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FSContextRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FSContextRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *IPCNamespace) StateTypeName() string {
+ return "pkg/sentry/kernel.IPCNamespace"
+}
+
+func (i *IPCNamespace) StateFields() []string {
+ return []string{
+ "IPCNamespaceRefs",
+ "userNS",
+ "semaphores",
+ "shms",
+ }
+}
+
+func (i *IPCNamespace) beforeSave() {}
+
+// +checklocksignore
+func (i *IPCNamespace) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.IPCNamespaceRefs)
+ stateSinkObject.Save(1, &i.userNS)
+ stateSinkObject.Save(2, &i.semaphores)
+ stateSinkObject.Save(3, &i.shms)
+}
+
+func (i *IPCNamespace) afterLoad() {}
+
+// +checklocksignore
+func (i *IPCNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.IPCNamespaceRefs)
+ stateSourceObject.Load(1, &i.userNS)
+ stateSourceObject.Load(2, &i.semaphores)
+ stateSourceObject.Load(3, &i.shms)
+}
+
+func (r *IPCNamespaceRefs) StateTypeName() string {
+ return "pkg/sentry/kernel.IPCNamespaceRefs"
+}
+
+func (r *IPCNamespaceRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *IPCNamespaceRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *IPCNamespaceRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *IPCNamespaceRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (k *Kernel) StateTypeName() string {
+ return "pkg/sentry/kernel.Kernel"
+}
+
+func (k *Kernel) StateFields() []string {
+ return []string{
+ "featureSet",
+ "timekeeper",
+ "tasks",
+ "rootUserNamespace",
+ "rootNetworkNamespace",
+ "applicationCores",
+ "useHostCores",
+ "extraAuxv",
+ "vdso",
+ "rootUTSNamespace",
+ "rootIPCNamespace",
+ "rootAbstractSocketNamespace",
+ "futexes",
+ "globalInit",
+ "syslog",
+ "runningTasks",
+ "cpuClock",
+ "cpuClockTickerDisabled",
+ "cpuClockTickerSetting",
+ "uniqueID",
+ "nextInotifyCookie",
+ "netlinkPorts",
+ "danglingEndpoints",
+ "sockets",
+ "socketsVFS2",
+ "nextSocketRecord",
+ "deviceRegistry",
+ "DirentCacheLimiter",
+ "SpecialOpts",
+ "vfs",
+ "hostMount",
+ "pipeMount",
+ "shmMount",
+ "socketMount",
+ "SleepForAddressSpaceActivation",
+ "ptraceExceptions",
+ "YAMAPtraceScope",
+ "cgroupRegistry",
+ }
+}
+
+func (k *Kernel) beforeSave() {}
+
+// +checklocksignore
+func (k *Kernel) StateSave(stateSinkObject state.Sink) {
+ k.beforeSave()
+ var danglingEndpointsValue []tcpip.Endpoint = k.saveDanglingEndpoints()
+ stateSinkObject.SaveValue(22, danglingEndpointsValue)
+ var deviceRegistryValue *device.Registry = k.saveDeviceRegistry()
+ stateSinkObject.SaveValue(26, deviceRegistryValue)
+ stateSinkObject.Save(0, &k.featureSet)
+ stateSinkObject.Save(1, &k.timekeeper)
+ stateSinkObject.Save(2, &k.tasks)
+ stateSinkObject.Save(3, &k.rootUserNamespace)
+ stateSinkObject.Save(4, &k.rootNetworkNamespace)
+ stateSinkObject.Save(5, &k.applicationCores)
+ stateSinkObject.Save(6, &k.useHostCores)
+ stateSinkObject.Save(7, &k.extraAuxv)
+ stateSinkObject.Save(8, &k.vdso)
+ stateSinkObject.Save(9, &k.rootUTSNamespace)
+ stateSinkObject.Save(10, &k.rootIPCNamespace)
+ stateSinkObject.Save(11, &k.rootAbstractSocketNamespace)
+ stateSinkObject.Save(12, &k.futexes)
+ stateSinkObject.Save(13, &k.globalInit)
+ stateSinkObject.Save(14, &k.syslog)
+ stateSinkObject.Save(15, &k.runningTasks)
+ stateSinkObject.Save(16, &k.cpuClock)
+ stateSinkObject.Save(17, &k.cpuClockTickerDisabled)
+ stateSinkObject.Save(18, &k.cpuClockTickerSetting)
+ stateSinkObject.Save(19, &k.uniqueID)
+ stateSinkObject.Save(20, &k.nextInotifyCookie)
+ stateSinkObject.Save(21, &k.netlinkPorts)
+ stateSinkObject.Save(23, &k.sockets)
+ stateSinkObject.Save(24, &k.socketsVFS2)
+ stateSinkObject.Save(25, &k.nextSocketRecord)
+ stateSinkObject.Save(27, &k.DirentCacheLimiter)
+ stateSinkObject.Save(28, &k.SpecialOpts)
+ stateSinkObject.Save(29, &k.vfs)
+ stateSinkObject.Save(30, &k.hostMount)
+ stateSinkObject.Save(31, &k.pipeMount)
+ stateSinkObject.Save(32, &k.shmMount)
+ stateSinkObject.Save(33, &k.socketMount)
+ stateSinkObject.Save(34, &k.SleepForAddressSpaceActivation)
+ stateSinkObject.Save(35, &k.ptraceExceptions)
+ stateSinkObject.Save(36, &k.YAMAPtraceScope)
+ stateSinkObject.Save(37, &k.cgroupRegistry)
+}
+
+func (k *Kernel) afterLoad() {}
+
+// +checklocksignore
+func (k *Kernel) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &k.featureSet)
+ stateSourceObject.Load(1, &k.timekeeper)
+ stateSourceObject.Load(2, &k.tasks)
+ stateSourceObject.Load(3, &k.rootUserNamespace)
+ stateSourceObject.Load(4, &k.rootNetworkNamespace)
+ stateSourceObject.Load(5, &k.applicationCores)
+ stateSourceObject.Load(6, &k.useHostCores)
+ stateSourceObject.Load(7, &k.extraAuxv)
+ stateSourceObject.Load(8, &k.vdso)
+ stateSourceObject.Load(9, &k.rootUTSNamespace)
+ stateSourceObject.Load(10, &k.rootIPCNamespace)
+ stateSourceObject.Load(11, &k.rootAbstractSocketNamespace)
+ stateSourceObject.Load(12, &k.futexes)
+ stateSourceObject.Load(13, &k.globalInit)
+ stateSourceObject.Load(14, &k.syslog)
+ stateSourceObject.Load(15, &k.runningTasks)
+ stateSourceObject.Load(16, &k.cpuClock)
+ stateSourceObject.Load(17, &k.cpuClockTickerDisabled)
+ stateSourceObject.Load(18, &k.cpuClockTickerSetting)
+ stateSourceObject.Load(19, &k.uniqueID)
+ stateSourceObject.Load(20, &k.nextInotifyCookie)
+ stateSourceObject.Load(21, &k.netlinkPorts)
+ stateSourceObject.Load(23, &k.sockets)
+ stateSourceObject.Load(24, &k.socketsVFS2)
+ stateSourceObject.Load(25, &k.nextSocketRecord)
+ stateSourceObject.Load(27, &k.DirentCacheLimiter)
+ stateSourceObject.Load(28, &k.SpecialOpts)
+ stateSourceObject.Load(29, &k.vfs)
+ stateSourceObject.Load(30, &k.hostMount)
+ stateSourceObject.Load(31, &k.pipeMount)
+ stateSourceObject.Load(32, &k.shmMount)
+ stateSourceObject.Load(33, &k.socketMount)
+ stateSourceObject.Load(34, &k.SleepForAddressSpaceActivation)
+ stateSourceObject.Load(35, &k.ptraceExceptions)
+ stateSourceObject.Load(36, &k.YAMAPtraceScope)
+ stateSourceObject.Load(37, &k.cgroupRegistry)
+ stateSourceObject.LoadValue(22, new([]tcpip.Endpoint), func(y interface{}) { k.loadDanglingEndpoints(y.([]tcpip.Endpoint)) })
+ stateSourceObject.LoadValue(26, new(*device.Registry), func(y interface{}) { k.loadDeviceRegistry(y.(*device.Registry)) })
+}
+
+func (s *SocketRecord) StateTypeName() string {
+ return "pkg/sentry/kernel.SocketRecord"
+}
+
+func (s *SocketRecord) StateFields() []string {
+ return []string{
+ "k",
+ "Sock",
+ "SockVFS2",
+ "ID",
+ }
+}
+
+func (s *SocketRecord) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketRecord) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.k)
+ stateSinkObject.Save(1, &s.Sock)
+ stateSinkObject.Save(2, &s.SockVFS2)
+ stateSinkObject.Save(3, &s.ID)
+}
+
+func (s *SocketRecord) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketRecord) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.k)
+ stateSourceObject.Load(1, &s.Sock)
+ stateSourceObject.Load(2, &s.SockVFS2)
+ stateSourceObject.Load(3, &s.ID)
+}
+
+func (s *SocketRecordVFS1) StateTypeName() string {
+ return "pkg/sentry/kernel.SocketRecordVFS1"
+}
+
+func (s *SocketRecordVFS1) StateFields() []string {
+ return []string{
+ "socketEntry",
+ "SocketRecord",
+ }
+}
+
+func (s *SocketRecordVFS1) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketRecordVFS1) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.socketEntry)
+ stateSinkObject.Save(1, &s.SocketRecord)
+}
+
+func (s *SocketRecordVFS1) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketRecordVFS1) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.socketEntry)
+ stateSourceObject.Load(1, &s.SocketRecord)
+}
+
+func (p *pendingSignals) StateTypeName() string {
+ return "pkg/sentry/kernel.pendingSignals"
+}
+
+func (p *pendingSignals) StateFields() []string {
+ return []string{
+ "signals",
+ }
+}
+
+func (p *pendingSignals) beforeSave() {}
+
+// +checklocksignore
+func (p *pendingSignals) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var signalsValue []savedPendingSignal = p.saveSignals()
+ stateSinkObject.SaveValue(0, signalsValue)
+}
+
+func (p *pendingSignals) afterLoad() {}
+
+// +checklocksignore
+func (p *pendingSignals) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new([]savedPendingSignal), func(y interface{}) { p.loadSignals(y.([]savedPendingSignal)) })
+}
+
+func (p *pendingSignalQueue) StateTypeName() string {
+ return "pkg/sentry/kernel.pendingSignalQueue"
+}
+
+func (p *pendingSignalQueue) StateFields() []string {
+ return []string{
+ "pendingSignalList",
+ "length",
+ }
+}
+
+func (p *pendingSignalQueue) beforeSave() {}
+
+// +checklocksignore
+func (p *pendingSignalQueue) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.pendingSignalList)
+ stateSinkObject.Save(1, &p.length)
+}
+
+func (p *pendingSignalQueue) afterLoad() {}
+
+// +checklocksignore
+func (p *pendingSignalQueue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.pendingSignalList)
+ stateSourceObject.Load(1, &p.length)
+}
+
+func (p *pendingSignal) StateTypeName() string {
+ return "pkg/sentry/kernel.pendingSignal"
+}
+
+func (p *pendingSignal) StateFields() []string {
+ return []string{
+ "pendingSignalEntry",
+ "SignalInfo",
+ "timer",
+ }
+}
+
+func (p *pendingSignal) beforeSave() {}
+
+// +checklocksignore
+func (p *pendingSignal) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.pendingSignalEntry)
+ stateSinkObject.Save(1, &p.SignalInfo)
+ stateSinkObject.Save(2, &p.timer)
+}
+
+func (p *pendingSignal) afterLoad() {}
+
+// +checklocksignore
+func (p *pendingSignal) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.pendingSignalEntry)
+ stateSourceObject.Load(1, &p.SignalInfo)
+ stateSourceObject.Load(2, &p.timer)
+}
+
+func (l *pendingSignalList) StateTypeName() string {
+ return "pkg/sentry/kernel.pendingSignalList"
+}
+
+func (l *pendingSignalList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *pendingSignalList) beforeSave() {}
+
+// +checklocksignore
+func (l *pendingSignalList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *pendingSignalList) afterLoad() {}
+
+// +checklocksignore
+func (l *pendingSignalList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *pendingSignalEntry) StateTypeName() string {
+ return "pkg/sentry/kernel.pendingSignalEntry"
+}
+
+func (e *pendingSignalEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *pendingSignalEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *pendingSignalEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *pendingSignalEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *pendingSignalEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (s *savedPendingSignal) StateTypeName() string {
+ return "pkg/sentry/kernel.savedPendingSignal"
+}
+
+func (s *savedPendingSignal) StateFields() []string {
+ return []string{
+ "si",
+ "timer",
+ }
+}
+
+func (s *savedPendingSignal) beforeSave() {}
+
+// +checklocksignore
+func (s *savedPendingSignal) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.si)
+ stateSinkObject.Save(1, &s.timer)
+}
+
+func (s *savedPendingSignal) afterLoad() {}
+
+// +checklocksignore
+func (s *savedPendingSignal) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.si)
+ stateSourceObject.Load(1, &s.timer)
+}
+
+func (it *IntervalTimer) StateTypeName() string {
+ return "pkg/sentry/kernel.IntervalTimer"
+}
+
+func (it *IntervalTimer) StateFields() []string {
+ return []string{
+ "timer",
+ "target",
+ "signo",
+ "id",
+ "sigval",
+ "group",
+ "sigpending",
+ "sigorphan",
+ "overrunCur",
+ "overrunLast",
+ }
+}
+
+func (it *IntervalTimer) beforeSave() {}
+
+// +checklocksignore
+func (it *IntervalTimer) StateSave(stateSinkObject state.Sink) {
+ it.beforeSave()
+ stateSinkObject.Save(0, &it.timer)
+ stateSinkObject.Save(1, &it.target)
+ stateSinkObject.Save(2, &it.signo)
+ stateSinkObject.Save(3, &it.id)
+ stateSinkObject.Save(4, &it.sigval)
+ stateSinkObject.Save(5, &it.group)
+ stateSinkObject.Save(6, &it.sigpending)
+ stateSinkObject.Save(7, &it.sigorphan)
+ stateSinkObject.Save(8, &it.overrunCur)
+ stateSinkObject.Save(9, &it.overrunLast)
+}
+
+func (it *IntervalTimer) afterLoad() {}
+
+// +checklocksignore
+func (it *IntervalTimer) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &it.timer)
+ stateSourceObject.Load(1, &it.target)
+ stateSourceObject.Load(2, &it.signo)
+ stateSourceObject.Load(3, &it.id)
+ stateSourceObject.Load(4, &it.sigval)
+ stateSourceObject.Load(5, &it.group)
+ stateSourceObject.Load(6, &it.sigpending)
+ stateSourceObject.Load(7, &it.sigorphan)
+ stateSourceObject.Load(8, &it.overrunCur)
+ stateSourceObject.Load(9, &it.overrunLast)
+}
+
+func (l *processGroupList) StateTypeName() string {
+ return "pkg/sentry/kernel.processGroupList"
+}
+
+func (l *processGroupList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *processGroupList) beforeSave() {}
+
+// +checklocksignore
+func (l *processGroupList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *processGroupList) afterLoad() {}
+
+// +checklocksignore
+func (l *processGroupList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *processGroupEntry) StateTypeName() string {
+ return "pkg/sentry/kernel.processGroupEntry"
+}
+
+func (e *processGroupEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *processGroupEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *processGroupEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *processGroupEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *processGroupEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *ProcessGroupRefs) StateTypeName() string {
+ return "pkg/sentry/kernel.ProcessGroupRefs"
+}
+
+func (r *ProcessGroupRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *ProcessGroupRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *ProcessGroupRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *ProcessGroupRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (p *ptraceOptions) StateTypeName() string {
+ return "pkg/sentry/kernel.ptraceOptions"
+}
+
+func (p *ptraceOptions) StateFields() []string {
+ return []string{
+ "ExitKill",
+ "SysGood",
+ "TraceClone",
+ "TraceExec",
+ "TraceExit",
+ "TraceFork",
+ "TraceSeccomp",
+ "TraceVfork",
+ "TraceVforkDone",
+ }
+}
+
+func (p *ptraceOptions) beforeSave() {}
+
+// +checklocksignore
+func (p *ptraceOptions) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.ExitKill)
+ stateSinkObject.Save(1, &p.SysGood)
+ stateSinkObject.Save(2, &p.TraceClone)
+ stateSinkObject.Save(3, &p.TraceExec)
+ stateSinkObject.Save(4, &p.TraceExit)
+ stateSinkObject.Save(5, &p.TraceFork)
+ stateSinkObject.Save(6, &p.TraceSeccomp)
+ stateSinkObject.Save(7, &p.TraceVfork)
+ stateSinkObject.Save(8, &p.TraceVforkDone)
+}
+
+func (p *ptraceOptions) afterLoad() {}
+
+// +checklocksignore
+func (p *ptraceOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.ExitKill)
+ stateSourceObject.Load(1, &p.SysGood)
+ stateSourceObject.Load(2, &p.TraceClone)
+ stateSourceObject.Load(3, &p.TraceExec)
+ stateSourceObject.Load(4, &p.TraceExit)
+ stateSourceObject.Load(5, &p.TraceFork)
+ stateSourceObject.Load(6, &p.TraceSeccomp)
+ stateSourceObject.Load(7, &p.TraceVfork)
+ stateSourceObject.Load(8, &p.TraceVforkDone)
+}
+
+func (s *ptraceStop) StateTypeName() string {
+ return "pkg/sentry/kernel.ptraceStop"
+}
+
+func (s *ptraceStop) StateFields() []string {
+ return []string{
+ "frozen",
+ "listen",
+ }
+}
+
+func (s *ptraceStop) beforeSave() {}
+
+// +checklocksignore
+func (s *ptraceStop) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.frozen)
+ stateSinkObject.Save(1, &s.listen)
+}
+
+func (s *ptraceStop) afterLoad() {}
+
+// +checklocksignore
+func (s *ptraceStop) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.frozen)
+ stateSourceObject.Load(1, &s.listen)
+}
+
+func (o *OldRSeqCriticalRegion) StateTypeName() string {
+ return "pkg/sentry/kernel.OldRSeqCriticalRegion"
+}
+
+func (o *OldRSeqCriticalRegion) StateFields() []string {
+ return []string{
+ "CriticalSection",
+ "Restart",
+ }
+}
+
+func (o *OldRSeqCriticalRegion) beforeSave() {}
+
+// +checklocksignore
+func (o *OldRSeqCriticalRegion) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.CriticalSection)
+ stateSinkObject.Save(1, &o.Restart)
+}
+
+func (o *OldRSeqCriticalRegion) afterLoad() {}
+
+// +checklocksignore
+func (o *OldRSeqCriticalRegion) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.CriticalSection)
+ stateSourceObject.Load(1, &o.Restart)
+}
+
+func (l *sessionList) StateTypeName() string {
+ return "pkg/sentry/kernel.sessionList"
+}
+
+func (l *sessionList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *sessionList) beforeSave() {}
+
+// +checklocksignore
+func (l *sessionList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *sessionList) afterLoad() {}
+
+// +checklocksignore
+func (l *sessionList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *sessionEntry) StateTypeName() string {
+ return "pkg/sentry/kernel.sessionEntry"
+}
+
+func (e *sessionEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *sessionEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *sessionEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *sessionEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *sessionEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *SessionRefs) StateTypeName() string {
+ return "pkg/sentry/kernel.SessionRefs"
+}
+
+func (r *SessionRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *SessionRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *SessionRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *SessionRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *Session) StateTypeName() string {
+ return "pkg/sentry/kernel.Session"
+}
+
+func (s *Session) StateFields() []string {
+ return []string{
+ "SessionRefs",
+ "leader",
+ "id",
+ "foreground",
+ "processGroups",
+ "sessionEntry",
+ }
+}
+
+func (s *Session) beforeSave() {}
+
+// +checklocksignore
+func (s *Session) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SessionRefs)
+ stateSinkObject.Save(1, &s.leader)
+ stateSinkObject.Save(2, &s.id)
+ stateSinkObject.Save(3, &s.foreground)
+ stateSinkObject.Save(4, &s.processGroups)
+ stateSinkObject.Save(5, &s.sessionEntry)
+}
+
+func (s *Session) afterLoad() {}
+
+// +checklocksignore
+func (s *Session) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SessionRefs)
+ stateSourceObject.Load(1, &s.leader)
+ stateSourceObject.Load(2, &s.id)
+ stateSourceObject.Load(3, &s.foreground)
+ stateSourceObject.Load(4, &s.processGroups)
+ stateSourceObject.Load(5, &s.sessionEntry)
+}
+
+func (pg *ProcessGroup) StateTypeName() string {
+ return "pkg/sentry/kernel.ProcessGroup"
+}
+
+func (pg *ProcessGroup) StateFields() []string {
+ return []string{
+ "refs",
+ "originator",
+ "id",
+ "session",
+ "ancestors",
+ "processGroupEntry",
+ }
+}
+
+func (pg *ProcessGroup) beforeSave() {}
+
+// +checklocksignore
+func (pg *ProcessGroup) StateSave(stateSinkObject state.Sink) {
+ pg.beforeSave()
+ stateSinkObject.Save(0, &pg.refs)
+ stateSinkObject.Save(1, &pg.originator)
+ stateSinkObject.Save(2, &pg.id)
+ stateSinkObject.Save(3, &pg.session)
+ stateSinkObject.Save(4, &pg.ancestors)
+ stateSinkObject.Save(5, &pg.processGroupEntry)
+}
+
+func (pg *ProcessGroup) afterLoad() {}
+
+// +checklocksignore
+func (pg *ProcessGroup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &pg.refs)
+ stateSourceObject.Load(1, &pg.originator)
+ stateSourceObject.Load(2, &pg.id)
+ stateSourceObject.Load(3, &pg.session)
+ stateSourceObject.Load(4, &pg.ancestors)
+ stateSourceObject.Load(5, &pg.processGroupEntry)
+}
+
+func (sh *SignalHandlers) StateTypeName() string {
+ return "pkg/sentry/kernel.SignalHandlers"
+}
+
+func (sh *SignalHandlers) StateFields() []string {
+ return []string{
+ "actions",
+ }
+}
+
+func (sh *SignalHandlers) beforeSave() {}
+
+// +checklocksignore
+func (sh *SignalHandlers) StateSave(stateSinkObject state.Sink) {
+ sh.beforeSave()
+ stateSinkObject.Save(0, &sh.actions)
+}
+
+func (sh *SignalHandlers) afterLoad() {}
+
+// +checklocksignore
+func (sh *SignalHandlers) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sh.actions)
+}
+
+func (l *socketList) StateTypeName() string {
+ return "pkg/sentry/kernel.socketList"
+}
+
+func (l *socketList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *socketList) beforeSave() {}
+
+// +checklocksignore
+func (l *socketList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *socketList) afterLoad() {}
+
+// +checklocksignore
+func (l *socketList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *socketEntry) StateTypeName() string {
+ return "pkg/sentry/kernel.socketEntry"
+}
+
+func (e *socketEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *socketEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *socketEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *socketEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *socketEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (s *syscallTableInfo) StateTypeName() string {
+ return "pkg/sentry/kernel.syscallTableInfo"
+}
+
+func (s *syscallTableInfo) StateFields() []string {
+ return []string{
+ "OS",
+ "Arch",
+ }
+}
+
+func (s *syscallTableInfo) beforeSave() {}
+
+// +checklocksignore
+func (s *syscallTableInfo) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.OS)
+ stateSinkObject.Save(1, &s.Arch)
+}
+
+func (s *syscallTableInfo) afterLoad() {}
+
+// +checklocksignore
+func (s *syscallTableInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.OS)
+ stateSourceObject.Load(1, &s.Arch)
+}
+
+func (s *syslog) StateTypeName() string {
+ return "pkg/sentry/kernel.syslog"
+}
+
+func (s *syslog) StateFields() []string {
+ return []string{
+ "msg",
+ }
+}
+
+func (s *syslog) beforeSave() {}
+
+// +checklocksignore
+func (s *syslog) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.msg)
+}
+
+func (s *syslog) afterLoad() {}
+
+// +checklocksignore
+func (s *syslog) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.msg)
+}
+
+func (t *Task) StateTypeName() string {
+ return "pkg/sentry/kernel.Task"
+}
+
+func (t *Task) StateFields() []string {
+ return []string{
+ "taskNode",
+ "runState",
+ "taskWorkCount",
+ "taskWork",
+ "haveSyscallReturn",
+ "gosched",
+ "yieldCount",
+ "pendingSignals",
+ "signalMask",
+ "realSignalMask",
+ "haveSavedSignalMask",
+ "savedSignalMask",
+ "signalStack",
+ "groupStopPending",
+ "groupStopAcknowledged",
+ "trapStopPending",
+ "trapNotifyPending",
+ "stop",
+ "exitStatus",
+ "syscallRestartBlock",
+ "k",
+ "containerID",
+ "image",
+ "fsContext",
+ "fdTable",
+ "vforkParent",
+ "exitState",
+ "exitTracerNotified",
+ "exitTracerAcked",
+ "exitParentNotified",
+ "exitParentAcked",
+ "ptraceTracer",
+ "ptraceTracees",
+ "ptraceSeized",
+ "ptraceOpts",
+ "ptraceSyscallMode",
+ "ptraceSinglestep",
+ "ptraceCode",
+ "ptraceSiginfo",
+ "ptraceEventMsg",
+ "ptraceYAMAExceptionAdded",
+ "ioUsage",
+ "creds",
+ "utsns",
+ "ipcns",
+ "abstractSockets",
+ "mountNamespaceVFS2",
+ "parentDeathSignal",
+ "syscallFilters",
+ "cleartid",
+ "allowedCPUMask",
+ "cpu",
+ "niceness",
+ "numaPolicy",
+ "numaNodeMask",
+ "netns",
+ "rseqCPU",
+ "oldRSeqCPUAddr",
+ "rseqAddr",
+ "rseqSignature",
+ "robustList",
+ "startTime",
+ "kcov",
+ "cgroups",
+ }
+}
+
+func (t *Task) beforeSave() {}
+
+// +checklocksignore
+func (t *Task) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ if !state.IsZeroValue(&t.signalQueue) {
+ state.Failf("signalQueue is %#v, expected zero", &t.signalQueue)
+ }
+ var ptraceTracerValue *Task = t.savePtraceTracer()
+ stateSinkObject.SaveValue(31, ptraceTracerValue)
+ var syscallFiltersValue []bpf.Program = t.saveSyscallFilters()
+ stateSinkObject.SaveValue(48, syscallFiltersValue)
+ stateSinkObject.Save(0, &t.taskNode)
+ stateSinkObject.Save(1, &t.runState)
+ stateSinkObject.Save(2, &t.taskWorkCount)
+ stateSinkObject.Save(3, &t.taskWork)
+ stateSinkObject.Save(4, &t.haveSyscallReturn)
+ stateSinkObject.Save(5, &t.gosched)
+ stateSinkObject.Save(6, &t.yieldCount)
+ stateSinkObject.Save(7, &t.pendingSignals)
+ stateSinkObject.Save(8, &t.signalMask)
+ stateSinkObject.Save(9, &t.realSignalMask)
+ stateSinkObject.Save(10, &t.haveSavedSignalMask)
+ stateSinkObject.Save(11, &t.savedSignalMask)
+ stateSinkObject.Save(12, &t.signalStack)
+ stateSinkObject.Save(13, &t.groupStopPending)
+ stateSinkObject.Save(14, &t.groupStopAcknowledged)
+ stateSinkObject.Save(15, &t.trapStopPending)
+ stateSinkObject.Save(16, &t.trapNotifyPending)
+ stateSinkObject.Save(17, &t.stop)
+ stateSinkObject.Save(18, &t.exitStatus)
+ stateSinkObject.Save(19, &t.syscallRestartBlock)
+ stateSinkObject.Save(20, &t.k)
+ stateSinkObject.Save(21, &t.containerID)
+ stateSinkObject.Save(22, &t.image)
+ stateSinkObject.Save(23, &t.fsContext)
+ stateSinkObject.Save(24, &t.fdTable)
+ stateSinkObject.Save(25, &t.vforkParent)
+ stateSinkObject.Save(26, &t.exitState)
+ stateSinkObject.Save(27, &t.exitTracerNotified)
+ stateSinkObject.Save(28, &t.exitTracerAcked)
+ stateSinkObject.Save(29, &t.exitParentNotified)
+ stateSinkObject.Save(30, &t.exitParentAcked)
+ stateSinkObject.Save(32, &t.ptraceTracees)
+ stateSinkObject.Save(33, &t.ptraceSeized)
+ stateSinkObject.Save(34, &t.ptraceOpts)
+ stateSinkObject.Save(35, &t.ptraceSyscallMode)
+ stateSinkObject.Save(36, &t.ptraceSinglestep)
+ stateSinkObject.Save(37, &t.ptraceCode)
+ stateSinkObject.Save(38, &t.ptraceSiginfo)
+ stateSinkObject.Save(39, &t.ptraceEventMsg)
+ stateSinkObject.Save(40, &t.ptraceYAMAExceptionAdded)
+ stateSinkObject.Save(41, &t.ioUsage)
+ stateSinkObject.Save(42, &t.creds)
+ stateSinkObject.Save(43, &t.utsns)
+ stateSinkObject.Save(44, &t.ipcns)
+ stateSinkObject.Save(45, &t.abstractSockets)
+ stateSinkObject.Save(46, &t.mountNamespaceVFS2)
+ stateSinkObject.Save(47, &t.parentDeathSignal)
+ stateSinkObject.Save(49, &t.cleartid)
+ stateSinkObject.Save(50, &t.allowedCPUMask)
+ stateSinkObject.Save(51, &t.cpu)
+ stateSinkObject.Save(52, &t.niceness)
+ stateSinkObject.Save(53, &t.numaPolicy)
+ stateSinkObject.Save(54, &t.numaNodeMask)
+ stateSinkObject.Save(55, &t.netns)
+ stateSinkObject.Save(56, &t.rseqCPU)
+ stateSinkObject.Save(57, &t.oldRSeqCPUAddr)
+ stateSinkObject.Save(58, &t.rseqAddr)
+ stateSinkObject.Save(59, &t.rseqSignature)
+ stateSinkObject.Save(60, &t.robustList)
+ stateSinkObject.Save(61, &t.startTime)
+ stateSinkObject.Save(62, &t.kcov)
+ stateSinkObject.Save(63, &t.cgroups)
+}
+
+// +checklocksignore
+func (t *Task) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.taskNode)
+ stateSourceObject.Load(1, &t.runState)
+ stateSourceObject.Load(2, &t.taskWorkCount)
+ stateSourceObject.Load(3, &t.taskWork)
+ stateSourceObject.Load(4, &t.haveSyscallReturn)
+ stateSourceObject.Load(5, &t.gosched)
+ stateSourceObject.Load(6, &t.yieldCount)
+ stateSourceObject.Load(7, &t.pendingSignals)
+ stateSourceObject.Load(8, &t.signalMask)
+ stateSourceObject.Load(9, &t.realSignalMask)
+ stateSourceObject.Load(10, &t.haveSavedSignalMask)
+ stateSourceObject.Load(11, &t.savedSignalMask)
+ stateSourceObject.Load(12, &t.signalStack)
+ stateSourceObject.Load(13, &t.groupStopPending)
+ stateSourceObject.Load(14, &t.groupStopAcknowledged)
+ stateSourceObject.Load(15, &t.trapStopPending)
+ stateSourceObject.Load(16, &t.trapNotifyPending)
+ stateSourceObject.Load(17, &t.stop)
+ stateSourceObject.Load(18, &t.exitStatus)
+ stateSourceObject.Load(19, &t.syscallRestartBlock)
+ stateSourceObject.Load(20, &t.k)
+ stateSourceObject.Load(21, &t.containerID)
+ stateSourceObject.Load(22, &t.image)
+ stateSourceObject.Load(23, &t.fsContext)
+ stateSourceObject.Load(24, &t.fdTable)
+ stateSourceObject.Load(25, &t.vforkParent)
+ stateSourceObject.Load(26, &t.exitState)
+ stateSourceObject.Load(27, &t.exitTracerNotified)
+ stateSourceObject.Load(28, &t.exitTracerAcked)
+ stateSourceObject.Load(29, &t.exitParentNotified)
+ stateSourceObject.Load(30, &t.exitParentAcked)
+ stateSourceObject.Load(32, &t.ptraceTracees)
+ stateSourceObject.Load(33, &t.ptraceSeized)
+ stateSourceObject.Load(34, &t.ptraceOpts)
+ stateSourceObject.Load(35, &t.ptraceSyscallMode)
+ stateSourceObject.Load(36, &t.ptraceSinglestep)
+ stateSourceObject.Load(37, &t.ptraceCode)
+ stateSourceObject.Load(38, &t.ptraceSiginfo)
+ stateSourceObject.Load(39, &t.ptraceEventMsg)
+ stateSourceObject.Load(40, &t.ptraceYAMAExceptionAdded)
+ stateSourceObject.Load(41, &t.ioUsage)
+ stateSourceObject.Load(42, &t.creds)
+ stateSourceObject.Load(43, &t.utsns)
+ stateSourceObject.Load(44, &t.ipcns)
+ stateSourceObject.Load(45, &t.abstractSockets)
+ stateSourceObject.Load(46, &t.mountNamespaceVFS2)
+ stateSourceObject.Load(47, &t.parentDeathSignal)
+ stateSourceObject.Load(49, &t.cleartid)
+ stateSourceObject.Load(50, &t.allowedCPUMask)
+ stateSourceObject.Load(51, &t.cpu)
+ stateSourceObject.Load(52, &t.niceness)
+ stateSourceObject.Load(53, &t.numaPolicy)
+ stateSourceObject.Load(54, &t.numaNodeMask)
+ stateSourceObject.Load(55, &t.netns)
+ stateSourceObject.Load(56, &t.rseqCPU)
+ stateSourceObject.Load(57, &t.oldRSeqCPUAddr)
+ stateSourceObject.Load(58, &t.rseqAddr)
+ stateSourceObject.Load(59, &t.rseqSignature)
+ stateSourceObject.Load(60, &t.robustList)
+ stateSourceObject.Load(61, &t.startTime)
+ stateSourceObject.Load(62, &t.kcov)
+ stateSourceObject.Load(63, &t.cgroups)
+ stateSourceObject.LoadValue(31, new(*Task), func(y interface{}) { t.loadPtraceTracer(y.(*Task)) })
+ stateSourceObject.LoadValue(48, new([]bpf.Program), func(y interface{}) { t.loadSyscallFilters(y.([]bpf.Program)) })
+ stateSourceObject.AfterLoad(t.afterLoad)
+}
+
+func (r *runSyscallAfterPtraceEventClone) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallAfterPtraceEventClone"
+}
+
+func (r *runSyscallAfterPtraceEventClone) StateFields() []string {
+ return []string{
+ "vforkChild",
+ "vforkChildTID",
+ }
+}
+
+func (r *runSyscallAfterPtraceEventClone) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallAfterPtraceEventClone) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.vforkChild)
+ stateSinkObject.Save(1, &r.vforkChildTID)
+}
+
+func (r *runSyscallAfterPtraceEventClone) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallAfterPtraceEventClone) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.vforkChild)
+ stateSourceObject.Load(1, &r.vforkChildTID)
+}
+
+func (r *runSyscallAfterVforkStop) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallAfterVforkStop"
+}
+
+func (r *runSyscallAfterVforkStop) StateFields() []string {
+ return []string{
+ "childTID",
+ }
+}
+
+func (r *runSyscallAfterVforkStop) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallAfterVforkStop) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.childTID)
+}
+
+func (r *runSyscallAfterVforkStop) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallAfterVforkStop) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.childTID)
+}
+
+func (v *vforkStop) StateTypeName() string {
+ return "pkg/sentry/kernel.vforkStop"
+}
+
+func (v *vforkStop) StateFields() []string {
+ return []string{}
+}
+
+func (v *vforkStop) beforeSave() {}
+
+// +checklocksignore
+func (v *vforkStop) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+}
+
+func (v *vforkStop) afterLoad() {}
+
+// +checklocksignore
+func (v *vforkStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *execStop) StateTypeName() string {
+ return "pkg/sentry/kernel.execStop"
+}
+
+func (e *execStop) StateFields() []string {
+ return []string{}
+}
+
+func (e *execStop) beforeSave() {}
+
+// +checklocksignore
+func (e *execStop) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *execStop) afterLoad() {}
+
+// +checklocksignore
+func (e *execStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runSyscallAfterExecStop) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallAfterExecStop"
+}
+
+func (r *runSyscallAfterExecStop) StateFields() []string {
+ return []string{
+ "image",
+ }
+}
+
+func (r *runSyscallAfterExecStop) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallAfterExecStop) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.image)
+}
+
+func (r *runSyscallAfterExecStop) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallAfterExecStop) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.image)
+}
+
+func (r *runExit) StateTypeName() string {
+ return "pkg/sentry/kernel.runExit"
+}
+
+func (r *runExit) StateFields() []string {
+ return []string{}
+}
+
+func (r *runExit) beforeSave() {}
+
+// +checklocksignore
+func (r *runExit) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runExit) afterLoad() {}
+
+// +checklocksignore
+func (r *runExit) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runExitMain) StateTypeName() string {
+ return "pkg/sentry/kernel.runExitMain"
+}
+
+func (r *runExitMain) StateFields() []string {
+ return []string{}
+}
+
+func (r *runExitMain) beforeSave() {}
+
+// +checklocksignore
+func (r *runExitMain) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runExitMain) afterLoad() {}
+
+// +checklocksignore
+func (r *runExitMain) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runExitNotify) StateTypeName() string {
+ return "pkg/sentry/kernel.runExitNotify"
+}
+
+func (r *runExitNotify) StateFields() []string {
+ return []string{}
+}
+
+func (r *runExitNotify) beforeSave() {}
+
+// +checklocksignore
+func (r *runExitNotify) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runExitNotify) afterLoad() {}
+
+// +checklocksignore
+func (r *runExitNotify) StateLoad(stateSourceObject state.Source) {
+}
+
+func (image *TaskImage) StateTypeName() string {
+ return "pkg/sentry/kernel.TaskImage"
+}
+
+func (image *TaskImage) StateFields() []string {
+ return []string{
+ "Name",
+ "Arch",
+ "MemoryManager",
+ "fu",
+ "st",
+ }
+}
+
+func (image *TaskImage) beforeSave() {}
+
+// +checklocksignore
+func (image *TaskImage) StateSave(stateSinkObject state.Sink) {
+ image.beforeSave()
+ var stValue syscallTableInfo = image.saveSt()
+ stateSinkObject.SaveValue(4, stValue)
+ stateSinkObject.Save(0, &image.Name)
+ stateSinkObject.Save(1, &image.Arch)
+ stateSinkObject.Save(2, &image.MemoryManager)
+ stateSinkObject.Save(3, &image.fu)
+}
+
+func (image *TaskImage) afterLoad() {}
+
+// +checklocksignore
+func (image *TaskImage) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &image.Name)
+ stateSourceObject.Load(1, &image.Arch)
+ stateSourceObject.Load(2, &image.MemoryManager)
+ stateSourceObject.Load(3, &image.fu)
+ stateSourceObject.LoadValue(4, new(syscallTableInfo), func(y interface{}) { image.loadSt(y.(syscallTableInfo)) })
+}
+
+func (l *taskList) StateTypeName() string {
+ return "pkg/sentry/kernel.taskList"
+}
+
+func (l *taskList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *taskList) beforeSave() {}
+
+// +checklocksignore
+func (l *taskList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *taskList) afterLoad() {}
+
+// +checklocksignore
+func (l *taskList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *taskEntry) StateTypeName() string {
+ return "pkg/sentry/kernel.taskEntry"
+}
+
+func (e *taskEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *taskEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *taskEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *taskEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *taskEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (app *runApp) StateTypeName() string {
+ return "pkg/sentry/kernel.runApp"
+}
+
+func (app *runApp) StateFields() []string {
+ return []string{}
+}
+
+func (app *runApp) beforeSave() {}
+
+// +checklocksignore
+func (app *runApp) StateSave(stateSinkObject state.Sink) {
+ app.beforeSave()
+}
+
+func (app *runApp) afterLoad() {}
+
+// +checklocksignore
+func (app *runApp) StateLoad(stateSourceObject state.Source) {
+}
+
+func (ts *TaskGoroutineSchedInfo) StateTypeName() string {
+ return "pkg/sentry/kernel.TaskGoroutineSchedInfo"
+}
+
+func (ts *TaskGoroutineSchedInfo) StateFields() []string {
+ return []string{
+ "Timestamp",
+ "State",
+ "UserTicks",
+ "SysTicks",
+ }
+}
+
+func (ts *TaskGoroutineSchedInfo) beforeSave() {}
+
+// +checklocksignore
+func (ts *TaskGoroutineSchedInfo) StateSave(stateSinkObject state.Sink) {
+ ts.beforeSave()
+ stateSinkObject.Save(0, &ts.Timestamp)
+ stateSinkObject.Save(1, &ts.State)
+ stateSinkObject.Save(2, &ts.UserTicks)
+ stateSinkObject.Save(3, &ts.SysTicks)
+}
+
+func (ts *TaskGoroutineSchedInfo) afterLoad() {}
+
+// +checklocksignore
+func (ts *TaskGoroutineSchedInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ts.Timestamp)
+ stateSourceObject.Load(1, &ts.State)
+ stateSourceObject.Load(2, &ts.UserTicks)
+ stateSourceObject.Load(3, &ts.SysTicks)
+}
+
+func (tc *taskClock) StateTypeName() string {
+ return "pkg/sentry/kernel.taskClock"
+}
+
+func (tc *taskClock) StateFields() []string {
+ return []string{
+ "t",
+ "includeSys",
+ }
+}
+
+func (tc *taskClock) beforeSave() {}
+
+// +checklocksignore
+func (tc *taskClock) StateSave(stateSinkObject state.Sink) {
+ tc.beforeSave()
+ stateSinkObject.Save(0, &tc.t)
+ stateSinkObject.Save(1, &tc.includeSys)
+}
+
+func (tc *taskClock) afterLoad() {}
+
+// +checklocksignore
+func (tc *taskClock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tc.t)
+ stateSourceObject.Load(1, &tc.includeSys)
+}
+
+func (tgc *tgClock) StateTypeName() string {
+ return "pkg/sentry/kernel.tgClock"
+}
+
+func (tgc *tgClock) StateFields() []string {
+ return []string{
+ "tg",
+ "includeSys",
+ }
+}
+
+func (tgc *tgClock) beforeSave() {}
+
+// +checklocksignore
+func (tgc *tgClock) StateSave(stateSinkObject state.Sink) {
+ tgc.beforeSave()
+ stateSinkObject.Save(0, &tgc.tg)
+ stateSinkObject.Save(1, &tgc.includeSys)
+}
+
+func (tgc *tgClock) afterLoad() {}
+
+// +checklocksignore
+func (tgc *tgClock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tgc.tg)
+ stateSourceObject.Load(1, &tgc.includeSys)
+}
+
+func (g *groupStop) StateTypeName() string {
+ return "pkg/sentry/kernel.groupStop"
+}
+
+func (g *groupStop) StateFields() []string {
+ return []string{}
+}
+
+func (g *groupStop) beforeSave() {}
+
+// +checklocksignore
+func (g *groupStop) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+}
+
+func (g *groupStop) afterLoad() {}
+
+// +checklocksignore
+func (g *groupStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runInterrupt) StateTypeName() string {
+ return "pkg/sentry/kernel.runInterrupt"
+}
+
+func (r *runInterrupt) StateFields() []string {
+ return []string{}
+}
+
+func (r *runInterrupt) beforeSave() {}
+
+// +checklocksignore
+func (r *runInterrupt) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runInterrupt) afterLoad() {}
+
+// +checklocksignore
+func (r *runInterrupt) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runInterruptAfterSignalDeliveryStop) StateTypeName() string {
+ return "pkg/sentry/kernel.runInterruptAfterSignalDeliveryStop"
+}
+
+func (r *runInterruptAfterSignalDeliveryStop) StateFields() []string {
+ return []string{}
+}
+
+func (r *runInterruptAfterSignalDeliveryStop) beforeSave() {}
+
+// +checklocksignore
+func (r *runInterruptAfterSignalDeliveryStop) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runInterruptAfterSignalDeliveryStop) afterLoad() {}
+
+// +checklocksignore
+func (r *runInterruptAfterSignalDeliveryStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runSyscallAfterSyscallEnterStop) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallAfterSyscallEnterStop"
+}
+
+func (r *runSyscallAfterSyscallEnterStop) StateFields() []string {
+ return []string{}
+}
+
+func (r *runSyscallAfterSyscallEnterStop) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallAfterSyscallEnterStop) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runSyscallAfterSyscallEnterStop) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallAfterSyscallEnterStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runSyscallAfterSysemuStop) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallAfterSysemuStop"
+}
+
+func (r *runSyscallAfterSysemuStop) StateFields() []string {
+ return []string{}
+}
+
+func (r *runSyscallAfterSysemuStop) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallAfterSysemuStop) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runSyscallAfterSysemuStop) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallAfterSysemuStop) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runSyscallReinvoke) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallReinvoke"
+}
+
+func (r *runSyscallReinvoke) StateFields() []string {
+ return []string{}
+}
+
+func (r *runSyscallReinvoke) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallReinvoke) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runSyscallReinvoke) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallReinvoke) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *runSyscallExit) StateTypeName() string {
+ return "pkg/sentry/kernel.runSyscallExit"
+}
+
+func (r *runSyscallExit) StateFields() []string {
+ return []string{}
+}
+
+func (r *runSyscallExit) beforeSave() {}
+
+// +checklocksignore
+func (r *runSyscallExit) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *runSyscallExit) afterLoad() {}
+
+// +checklocksignore
+func (r *runSyscallExit) StateLoad(stateSourceObject state.Source) {
+}
+
+func (tg *ThreadGroup) StateTypeName() string {
+ return "pkg/sentry/kernel.ThreadGroup"
+}
+
+func (tg *ThreadGroup) StateFields() []string {
+ return []string{
+ "threadGroupNode",
+ "signalHandlers",
+ "pendingSignals",
+ "groupStopDequeued",
+ "groupStopSignal",
+ "groupStopPendingCount",
+ "groupStopComplete",
+ "groupStopWaitable",
+ "groupContNotify",
+ "groupContInterrupted",
+ "groupContWaitable",
+ "exiting",
+ "exitStatus",
+ "terminationSignal",
+ "itimerRealTimer",
+ "itimerVirtSetting",
+ "itimerProfSetting",
+ "rlimitCPUSoftSetting",
+ "cpuTimersEnabled",
+ "timers",
+ "nextTimerID",
+ "exitedCPUStats",
+ "childCPUStats",
+ "ioUsage",
+ "maxRSS",
+ "childMaxRSS",
+ "limits",
+ "processGroup",
+ "execed",
+ "oldRSeqCritical",
+ "mounts",
+ "tty",
+ "oomScoreAdj",
+ }
+}
+
+func (tg *ThreadGroup) beforeSave() {}
+
+// +checklocksignore
+func (tg *ThreadGroup) StateSave(stateSinkObject state.Sink) {
+ tg.beforeSave()
+ var oldRSeqCriticalValue *OldRSeqCriticalRegion = tg.saveOldRSeqCritical()
+ stateSinkObject.SaveValue(29, oldRSeqCriticalValue)
+ stateSinkObject.Save(0, &tg.threadGroupNode)
+ stateSinkObject.Save(1, &tg.signalHandlers)
+ stateSinkObject.Save(2, &tg.pendingSignals)
+ stateSinkObject.Save(3, &tg.groupStopDequeued)
+ stateSinkObject.Save(4, &tg.groupStopSignal)
+ stateSinkObject.Save(5, &tg.groupStopPendingCount)
+ stateSinkObject.Save(6, &tg.groupStopComplete)
+ stateSinkObject.Save(7, &tg.groupStopWaitable)
+ stateSinkObject.Save(8, &tg.groupContNotify)
+ stateSinkObject.Save(9, &tg.groupContInterrupted)
+ stateSinkObject.Save(10, &tg.groupContWaitable)
+ stateSinkObject.Save(11, &tg.exiting)
+ stateSinkObject.Save(12, &tg.exitStatus)
+ stateSinkObject.Save(13, &tg.terminationSignal)
+ stateSinkObject.Save(14, &tg.itimerRealTimer)
+ stateSinkObject.Save(15, &tg.itimerVirtSetting)
+ stateSinkObject.Save(16, &tg.itimerProfSetting)
+ stateSinkObject.Save(17, &tg.rlimitCPUSoftSetting)
+ stateSinkObject.Save(18, &tg.cpuTimersEnabled)
+ stateSinkObject.Save(19, &tg.timers)
+ stateSinkObject.Save(20, &tg.nextTimerID)
+ stateSinkObject.Save(21, &tg.exitedCPUStats)
+ stateSinkObject.Save(22, &tg.childCPUStats)
+ stateSinkObject.Save(23, &tg.ioUsage)
+ stateSinkObject.Save(24, &tg.maxRSS)
+ stateSinkObject.Save(25, &tg.childMaxRSS)
+ stateSinkObject.Save(26, &tg.limits)
+ stateSinkObject.Save(27, &tg.processGroup)
+ stateSinkObject.Save(28, &tg.execed)
+ stateSinkObject.Save(30, &tg.mounts)
+ stateSinkObject.Save(31, &tg.tty)
+ stateSinkObject.Save(32, &tg.oomScoreAdj)
+}
+
+func (tg *ThreadGroup) afterLoad() {}
+
+// +checklocksignore
+func (tg *ThreadGroup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tg.threadGroupNode)
+ stateSourceObject.Load(1, &tg.signalHandlers)
+ stateSourceObject.Load(2, &tg.pendingSignals)
+ stateSourceObject.Load(3, &tg.groupStopDequeued)
+ stateSourceObject.Load(4, &tg.groupStopSignal)
+ stateSourceObject.Load(5, &tg.groupStopPendingCount)
+ stateSourceObject.Load(6, &tg.groupStopComplete)
+ stateSourceObject.Load(7, &tg.groupStopWaitable)
+ stateSourceObject.Load(8, &tg.groupContNotify)
+ stateSourceObject.Load(9, &tg.groupContInterrupted)
+ stateSourceObject.Load(10, &tg.groupContWaitable)
+ stateSourceObject.Load(11, &tg.exiting)
+ stateSourceObject.Load(12, &tg.exitStatus)
+ stateSourceObject.Load(13, &tg.terminationSignal)
+ stateSourceObject.Load(14, &tg.itimerRealTimer)
+ stateSourceObject.Load(15, &tg.itimerVirtSetting)
+ stateSourceObject.Load(16, &tg.itimerProfSetting)
+ stateSourceObject.Load(17, &tg.rlimitCPUSoftSetting)
+ stateSourceObject.Load(18, &tg.cpuTimersEnabled)
+ stateSourceObject.Load(19, &tg.timers)
+ stateSourceObject.Load(20, &tg.nextTimerID)
+ stateSourceObject.Load(21, &tg.exitedCPUStats)
+ stateSourceObject.Load(22, &tg.childCPUStats)
+ stateSourceObject.Load(23, &tg.ioUsage)
+ stateSourceObject.Load(24, &tg.maxRSS)
+ stateSourceObject.Load(25, &tg.childMaxRSS)
+ stateSourceObject.Load(26, &tg.limits)
+ stateSourceObject.Load(27, &tg.processGroup)
+ stateSourceObject.Load(28, &tg.execed)
+ stateSourceObject.Load(30, &tg.mounts)
+ stateSourceObject.Load(31, &tg.tty)
+ stateSourceObject.Load(32, &tg.oomScoreAdj)
+ stateSourceObject.LoadValue(29, new(*OldRSeqCriticalRegion), func(y interface{}) { tg.loadOldRSeqCritical(y.(*OldRSeqCriticalRegion)) })
+}
+
+func (l *itimerRealListener) StateTypeName() string {
+ return "pkg/sentry/kernel.itimerRealListener"
+}
+
+func (l *itimerRealListener) StateFields() []string {
+ return []string{
+ "tg",
+ }
+}
+
+func (l *itimerRealListener) beforeSave() {}
+
+// +checklocksignore
+func (l *itimerRealListener) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.tg)
+}
+
+func (l *itimerRealListener) afterLoad() {}
+
+// +checklocksignore
+func (l *itimerRealListener) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.tg)
+}
+
+func (ts *TaskSet) StateTypeName() string {
+ return "pkg/sentry/kernel.TaskSet"
+}
+
+func (ts *TaskSet) StateFields() []string {
+ return []string{
+ "Root",
+ "sessions",
+ }
+}
+
+func (ts *TaskSet) beforeSave() {}
+
+// +checklocksignore
+func (ts *TaskSet) StateSave(stateSinkObject state.Sink) {
+ ts.beforeSave()
+ stateSinkObject.Save(0, &ts.Root)
+ stateSinkObject.Save(1, &ts.sessions)
+}
+
+func (ts *TaskSet) afterLoad() {}
+
+// +checklocksignore
+func (ts *TaskSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ts.Root)
+ stateSourceObject.Load(1, &ts.sessions)
+}
+
+func (ns *PIDNamespace) StateTypeName() string {
+ return "pkg/sentry/kernel.PIDNamespace"
+}
+
+func (ns *PIDNamespace) StateFields() []string {
+ return []string{
+ "owner",
+ "parent",
+ "userns",
+ "last",
+ "tasks",
+ "tids",
+ "tgids",
+ "sessions",
+ "sids",
+ "processGroups",
+ "pgids",
+ "exiting",
+ }
+}
+
+func (ns *PIDNamespace) beforeSave() {}
+
+// +checklocksignore
+func (ns *PIDNamespace) StateSave(stateSinkObject state.Sink) {
+ ns.beforeSave()
+ stateSinkObject.Save(0, &ns.owner)
+ stateSinkObject.Save(1, &ns.parent)
+ stateSinkObject.Save(2, &ns.userns)
+ stateSinkObject.Save(3, &ns.last)
+ stateSinkObject.Save(4, &ns.tasks)
+ stateSinkObject.Save(5, &ns.tids)
+ stateSinkObject.Save(6, &ns.tgids)
+ stateSinkObject.Save(7, &ns.sessions)
+ stateSinkObject.Save(8, &ns.sids)
+ stateSinkObject.Save(9, &ns.processGroups)
+ stateSinkObject.Save(10, &ns.pgids)
+ stateSinkObject.Save(11, &ns.exiting)
+}
+
+func (ns *PIDNamespace) afterLoad() {}
+
+// +checklocksignore
+func (ns *PIDNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ns.owner)
+ stateSourceObject.Load(1, &ns.parent)
+ stateSourceObject.Load(2, &ns.userns)
+ stateSourceObject.Load(3, &ns.last)
+ stateSourceObject.Load(4, &ns.tasks)
+ stateSourceObject.Load(5, &ns.tids)
+ stateSourceObject.Load(6, &ns.tgids)
+ stateSourceObject.Load(7, &ns.sessions)
+ stateSourceObject.Load(8, &ns.sids)
+ stateSourceObject.Load(9, &ns.processGroups)
+ stateSourceObject.Load(10, &ns.pgids)
+ stateSourceObject.Load(11, &ns.exiting)
+}
+
+func (t *threadGroupNode) StateTypeName() string {
+ return "pkg/sentry/kernel.threadGroupNode"
+}
+
+func (t *threadGroupNode) StateFields() []string {
+ return []string{
+ "pidns",
+ "leader",
+ "execing",
+ "tasks",
+ "tasksCount",
+ "liveTasks",
+ "activeTasks",
+ }
+}
+
+func (t *threadGroupNode) beforeSave() {}
+
+// +checklocksignore
+func (t *threadGroupNode) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.pidns)
+ stateSinkObject.Save(1, &t.leader)
+ stateSinkObject.Save(2, &t.execing)
+ stateSinkObject.Save(3, &t.tasks)
+ stateSinkObject.Save(4, &t.tasksCount)
+ stateSinkObject.Save(5, &t.liveTasks)
+ stateSinkObject.Save(6, &t.activeTasks)
+}
+
+func (t *threadGroupNode) afterLoad() {}
+
+// +checklocksignore
+func (t *threadGroupNode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.pidns)
+ stateSourceObject.Load(1, &t.leader)
+ stateSourceObject.Load(2, &t.execing)
+ stateSourceObject.Load(3, &t.tasks)
+ stateSourceObject.Load(4, &t.tasksCount)
+ stateSourceObject.Load(5, &t.liveTasks)
+ stateSourceObject.Load(6, &t.activeTasks)
+}
+
+func (t *taskNode) StateTypeName() string {
+ return "pkg/sentry/kernel.taskNode"
+}
+
+func (t *taskNode) StateFields() []string {
+ return []string{
+ "tg",
+ "taskEntry",
+ "parent",
+ "children",
+ "childPIDNamespace",
+ }
+}
+
+func (t *taskNode) beforeSave() {}
+
+// +checklocksignore
+func (t *taskNode) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.tg)
+ stateSinkObject.Save(1, &t.taskEntry)
+ stateSinkObject.Save(2, &t.parent)
+ stateSinkObject.Save(3, &t.children)
+ stateSinkObject.Save(4, &t.childPIDNamespace)
+}
+
+func (t *taskNode) afterLoad() {}
+
+// +checklocksignore
+func (t *taskNode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &t.tg)
+ stateSourceObject.Load(1, &t.taskEntry)
+ stateSourceObject.Load(2, &t.parent)
+ stateSourceObject.Load(3, &t.children)
+ stateSourceObject.Load(4, &t.childPIDNamespace)
+}
+
+func (t *Timekeeper) StateTypeName() string {
+ return "pkg/sentry/kernel.Timekeeper"
+}
+
+func (t *Timekeeper) StateFields() []string {
+ return []string{
+ "realtimeClock",
+ "monotonicClock",
+ "bootTime",
+ "saveMonotonic",
+ "saveRealtime",
+ "params",
+ }
+}
+
+// +checklocksignore
+func (t *Timekeeper) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.realtimeClock)
+ stateSinkObject.Save(1, &t.monotonicClock)
+ stateSinkObject.Save(2, &t.bootTime)
+ stateSinkObject.Save(3, &t.saveMonotonic)
+ stateSinkObject.Save(4, &t.saveRealtime)
+ stateSinkObject.Save(5, &t.params)
+}
+
+// +checklocksignore
+func (t *Timekeeper) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.realtimeClock)
+ stateSourceObject.Load(1, &t.monotonicClock)
+ stateSourceObject.Load(2, &t.bootTime)
+ stateSourceObject.Load(3, &t.saveMonotonic)
+ stateSourceObject.Load(4, &t.saveRealtime)
+ stateSourceObject.Load(5, &t.params)
+ stateSourceObject.AfterLoad(t.afterLoad)
+}
+
+func (tc *timekeeperClock) StateTypeName() string {
+ return "pkg/sentry/kernel.timekeeperClock"
+}
+
+func (tc *timekeeperClock) StateFields() []string {
+ return []string{
+ "tk",
+ "c",
+ }
+}
+
+func (tc *timekeeperClock) beforeSave() {}
+
+// +checklocksignore
+func (tc *timekeeperClock) StateSave(stateSinkObject state.Sink) {
+ tc.beforeSave()
+ stateSinkObject.Save(0, &tc.tk)
+ stateSinkObject.Save(1, &tc.c)
+}
+
+func (tc *timekeeperClock) afterLoad() {}
+
+// +checklocksignore
+func (tc *timekeeperClock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tc.tk)
+ stateSourceObject.Load(1, &tc.c)
+}
+
+func (t *TTY) StateTypeName() string {
+ return "pkg/sentry/kernel.TTY"
+}
+
+func (t *TTY) StateFields() []string {
+ return []string{
+ "Index",
+ "tg",
+ }
+}
+
+func (t *TTY) beforeSave() {}
+
+// +checklocksignore
+func (t *TTY) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.Index)
+ stateSinkObject.Save(1, &t.tg)
+}
+
+func (t *TTY) afterLoad() {}
+
+// +checklocksignore
+func (t *TTY) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.Index)
+ stateSourceObject.Load(1, &t.tg)
+}
+
+func (u *UTSNamespace) StateTypeName() string {
+ return "pkg/sentry/kernel.UTSNamespace"
+}
+
+func (u *UTSNamespace) StateFields() []string {
+ return []string{
+ "hostName",
+ "domainName",
+ "userns",
+ }
+}
+
+func (u *UTSNamespace) beforeSave() {}
+
+// +checklocksignore
+func (u *UTSNamespace) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.hostName)
+ stateSinkObject.Save(1, &u.domainName)
+ stateSinkObject.Save(2, &u.userns)
+}
+
+func (u *UTSNamespace) afterLoad() {}
+
+// +checklocksignore
+func (u *UTSNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.hostName)
+ stateSourceObject.Load(1, &u.domainName)
+ stateSourceObject.Load(2, &u.userns)
+}
+
+func (v *VDSOParamPage) StateTypeName() string {
+ return "pkg/sentry/kernel.VDSOParamPage"
+}
+
+func (v *VDSOParamPage) StateFields() []string {
+ return []string{
+ "mfp",
+ "fr",
+ "seq",
+ "copyScratchBuffer",
+ }
+}
+
+func (v *VDSOParamPage) beforeSave() {}
+
+// +checklocksignore
+func (v *VDSOParamPage) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.mfp)
+ stateSinkObject.Save(1, &v.fr)
+ stateSinkObject.Save(2, &v.seq)
+ stateSinkObject.Save(3, &v.copyScratchBuffer)
+}
+
+func (v *VDSOParamPage) afterLoad() {}
+
+// +checklocksignore
+func (v *VDSOParamPage) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.mfp)
+ stateSourceObject.Load(1, &v.fr)
+ stateSourceObject.Load(2, &v.seq)
+ stateSourceObject.Load(3, &v.copyScratchBuffer)
+}
+
+func init() {
+ state.Register((*abstractEndpoint)(nil))
+ state.Register((*AbstractSocketNamespace)(nil))
+ state.Register((*Cgroup)(nil))
+ state.Register((*hierarchy)(nil))
+ state.Register((*CgroupRegistry)(nil))
+ state.Register((*FDFlags)(nil))
+ state.Register((*descriptor)(nil))
+ state.Register((*FDTable)(nil))
+ state.Register((*FDTableRefs)(nil))
+ state.Register((*FSContext)(nil))
+ state.Register((*FSContextRefs)(nil))
+ state.Register((*IPCNamespace)(nil))
+ state.Register((*IPCNamespaceRefs)(nil))
+ state.Register((*Kernel)(nil))
+ state.Register((*SocketRecord)(nil))
+ state.Register((*SocketRecordVFS1)(nil))
+ state.Register((*pendingSignals)(nil))
+ state.Register((*pendingSignalQueue)(nil))
+ state.Register((*pendingSignal)(nil))
+ state.Register((*pendingSignalList)(nil))
+ state.Register((*pendingSignalEntry)(nil))
+ state.Register((*savedPendingSignal)(nil))
+ state.Register((*IntervalTimer)(nil))
+ state.Register((*processGroupList)(nil))
+ state.Register((*processGroupEntry)(nil))
+ state.Register((*ProcessGroupRefs)(nil))
+ state.Register((*ptraceOptions)(nil))
+ state.Register((*ptraceStop)(nil))
+ state.Register((*OldRSeqCriticalRegion)(nil))
+ state.Register((*sessionList)(nil))
+ state.Register((*sessionEntry)(nil))
+ state.Register((*SessionRefs)(nil))
+ state.Register((*Session)(nil))
+ state.Register((*ProcessGroup)(nil))
+ state.Register((*SignalHandlers)(nil))
+ state.Register((*socketList)(nil))
+ state.Register((*socketEntry)(nil))
+ state.Register((*syscallTableInfo)(nil))
+ state.Register((*syslog)(nil))
+ state.Register((*Task)(nil))
+ state.Register((*runSyscallAfterPtraceEventClone)(nil))
+ state.Register((*runSyscallAfterVforkStop)(nil))
+ state.Register((*vforkStop)(nil))
+ state.Register((*execStop)(nil))
+ state.Register((*runSyscallAfterExecStop)(nil))
+ state.Register((*runExit)(nil))
+ state.Register((*runExitMain)(nil))
+ state.Register((*runExitNotify)(nil))
+ state.Register((*TaskImage)(nil))
+ state.Register((*taskList)(nil))
+ state.Register((*taskEntry)(nil))
+ state.Register((*runApp)(nil))
+ state.Register((*TaskGoroutineSchedInfo)(nil))
+ state.Register((*taskClock)(nil))
+ state.Register((*tgClock)(nil))
+ state.Register((*groupStop)(nil))
+ state.Register((*runInterrupt)(nil))
+ state.Register((*runInterruptAfterSignalDeliveryStop)(nil))
+ state.Register((*runSyscallAfterSyscallEnterStop)(nil))
+ state.Register((*runSyscallAfterSysemuStop)(nil))
+ state.Register((*runSyscallReinvoke)(nil))
+ state.Register((*runSyscallExit)(nil))
+ state.Register((*ThreadGroup)(nil))
+ state.Register((*itimerRealListener)(nil))
+ state.Register((*TaskSet)(nil))
+ state.Register((*PIDNamespace)(nil))
+ state.Register((*threadGroupNode)(nil))
+ state.Register((*taskNode)(nil))
+ state.Register((*Timekeeper)(nil))
+ state.Register((*timekeeperClock)(nil))
+ state.Register((*TTY)(nil))
+ state.Register((*UTSNamespace)(nil))
+ state.Register((*VDSOParamPage)(nil))
+}
diff --git a/pkg/sentry/kernel/kernel_unsafe_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_unsafe_abi_autogen_unsafe.go
new file mode 100644
index 000000000..9c39d630a
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_unsafe_abi_autogen_unsafe.go
@@ -0,0 +1,13 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package kernel
+
+import (
+)
+
diff --git a/pkg/sentry/kernel/kernel_unsafe_state_autogen.go b/pkg/sentry/kernel/kernel_unsafe_state_autogen.go
new file mode 100644
index 000000000..12130bf74
--- /dev/null
+++ b/pkg/sentry/kernel/kernel_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package kernel
diff --git a/pkg/sentry/kernel/memevent/BUILD b/pkg/sentry/kernel/memevent/BUILD
deleted file mode 100644
index 4486848d2..000000000
--- a/pkg/sentry/kernel/memevent/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library", "proto_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "memevent",
- srcs = ["memory_events.go"],
- visibility = ["//:sandbox"],
- deps = [
- ":memory_events_go_proto",
- "//pkg/eventchannel",
- "//pkg/log",
- "//pkg/metric",
- "//pkg/sentry/kernel",
- "//pkg/sentry/usage",
- "//pkg/sync",
- ],
-)
-
-proto_library(
- name = "memory_events",
- srcs = ["memory_events.proto"],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/sentry/kernel/memevent/memevent_state_autogen.go b/pkg/sentry/kernel/memevent/memevent_state_autogen.go
new file mode 100644
index 000000000..4a1679fa9
--- /dev/null
+++ b/pkg/sentry/kernel/memevent/memevent_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package memevent
diff --git a/pkg/sentry/kernel/memevent/memory_events.proto b/pkg/sentry/kernel/memevent/memory_events.proto
deleted file mode 100644
index bf8029ff5..000000000
--- a/pkg/sentry/kernel/memevent/memory_events.proto
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-// MemoryUsageEvent describes the memory usage of the sandbox at a single
-// instant in time. These messages are emitted periodically on the eventchannel.
-message MemoryUsageEvent {
- // The total memory usage of the sandboxed application in bytes, calculated
- // using the 'fast' method.
- uint64 total = 1;
-
- // Memory used to back memory-mapped regions for files in the application, in
- // bytes. This corresponds to the usage.MemoryKind.Mapped memory type.
- uint64 mapped = 2;
-}
diff --git a/pkg/sentry/kernel/memevent/memory_events_go_proto/memory_events.pb.go b/pkg/sentry/kernel/memevent/memory_events_go_proto/memory_events.pb.go
new file mode 100644
index 000000000..ed1b8a8ca
--- /dev/null
+++ b/pkg/sentry/kernel/memevent/memory_events_go_proto/memory_events.pb.go
@@ -0,0 +1,158 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/sentry/kernel/memevent/memory_events.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type MemoryUsageEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
+ Mapped uint64 `protobuf:"varint,2,opt,name=mapped,proto3" json:"mapped,omitempty"`
+}
+
+func (x *MemoryUsageEvent) Reset() {
+ *x = MemoryUsageEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_kernel_memevent_memory_events_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryUsageEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryUsageEvent) ProtoMessage() {}
+
+func (x *MemoryUsageEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_kernel_memevent_memory_events_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryUsageEvent.ProtoReflect.Descriptor instead.
+func (*MemoryUsageEvent) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MemoryUsageEvent) GetTotal() uint64 {
+ if x != nil {
+ return x.Total
+ }
+ return 0
+}
+
+func (x *MemoryUsageEvent) GetMapped() uint64 {
+ if x != nil {
+ return x.Mapped
+ }
+ return 0
+}
+
+var File_pkg_sentry_kernel_memevent_memory_events_proto protoreflect.FileDescriptor
+
+var file_pkg_sentry_kernel_memevent_memory_events_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x72,
+ 0x6e, 0x65, 0x6c, 0x2f, 0x6d, 0x65, 0x6d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x6d,
+ 0x6f, 0x72, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x22, 0x40, 0x0a, 0x10, 0x4d, 0x65, 0x6d, 0x6f,
+ 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x06, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescOnce sync.Once
+ file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescData = file_pkg_sentry_kernel_memevent_memory_events_proto_rawDesc
+)
+
+func file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescGZIP() []byte {
+ file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescOnce.Do(func() {
+ file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescData)
+ })
+ return file_pkg_sentry_kernel_memevent_memory_events_proto_rawDescData
+}
+
+var file_pkg_sentry_kernel_memevent_memory_events_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pkg_sentry_kernel_memevent_memory_events_proto_goTypes = []interface{}{
+ (*MemoryUsageEvent)(nil), // 0: gvisor.MemoryUsageEvent
+}
+var file_pkg_sentry_kernel_memevent_memory_events_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pkg_sentry_kernel_memevent_memory_events_proto_init() }
+func file_pkg_sentry_kernel_memevent_memory_events_proto_init() {
+ if File_pkg_sentry_kernel_memevent_memory_events_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_sentry_kernel_memevent_memory_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryUsageEvent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_sentry_kernel_memevent_memory_events_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_sentry_kernel_memevent_memory_events_proto_goTypes,
+ DependencyIndexes: file_pkg_sentry_kernel_memevent_memory_events_proto_depIdxs,
+ MessageInfos: file_pkg_sentry_kernel_memevent_memory_events_proto_msgTypes,
+ }.Build()
+ File_pkg_sentry_kernel_memevent_memory_events_proto = out.File
+ file_pkg_sentry_kernel_memevent_memory_events_proto_rawDesc = nil
+ file_pkg_sentry_kernel_memevent_memory_events_proto_goTypes = nil
+ file_pkg_sentry_kernel_memevent_memory_events_proto_depIdxs = nil
+}
diff --git a/pkg/sentry/kernel/pending_signals_list.go b/pkg/sentry/kernel/pending_signals_list.go
new file mode 100644
index 000000000..d5b483b3e
--- /dev/null
+++ b/pkg/sentry/kernel/pending_signals_list.go
@@ -0,0 +1,221 @@
+package kernel
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type pendingSignalElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (pendingSignalElementMapper) linkerFor(elem *pendingSignal) *pendingSignal { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type pendingSignalList struct {
+ head *pendingSignal
+ tail *pendingSignal
+}
+
+// Reset resets list l to the empty state.
+func (l *pendingSignalList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *pendingSignalList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *pendingSignalList) Front() *pendingSignal {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *pendingSignalList) Back() *pendingSignal {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *pendingSignalList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (pendingSignalElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *pendingSignalList) PushFront(e *pendingSignal) {
+ linker := pendingSignalElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ pendingSignalElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *pendingSignalList) PushBack(e *pendingSignal) {
+ linker := pendingSignalElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ pendingSignalElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *pendingSignalList) PushBackList(m *pendingSignalList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ pendingSignalElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ pendingSignalElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *pendingSignalList) InsertAfter(b, e *pendingSignal) {
+ bLinker := pendingSignalElementMapper{}.linkerFor(b)
+ eLinker := pendingSignalElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ pendingSignalElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *pendingSignalList) InsertBefore(a, e *pendingSignal) {
+ aLinker := pendingSignalElementMapper{}.linkerFor(a)
+ eLinker := pendingSignalElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ pendingSignalElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *pendingSignalList) Remove(e *pendingSignal) {
+ linker := pendingSignalElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ pendingSignalElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ pendingSignalElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type pendingSignalEntry struct {
+ next *pendingSignal
+ prev *pendingSignal
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *pendingSignalEntry) Next() *pendingSignal {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *pendingSignalEntry) Prev() *pendingSignal {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *pendingSignalEntry) SetNext(elem *pendingSignal) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *pendingSignalEntry) SetPrev(elem *pendingSignal) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/pipe/BUILD b/pkg/sentry/kernel/pipe/BUILD
deleted file mode 100644
index 94ebac7c5..000000000
--- a/pkg/sentry/kernel/pipe/BUILD
+++ /dev/null
@@ -1,58 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "pipe",
- srcs = [
- "device.go",
- "node.go",
- "pipe.go",
- "pipe_unsafe.go",
- "pipe_util.go",
- "reader.go",
- "reader_writer.go",
- "save_restore.go",
- "vfs.go",
- "writer.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/amutex",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal/primitive",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "pipe_test",
- size = "small",
- srcs = [
- "node_test.go",
- "pipe_test.go",
- ],
- library = ":pipe",
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/pipe/node_test.go b/pkg/sentry/kernel/pipe/node_test.go
deleted file mode 100644
index d25cf658e..000000000
--- a/pkg/sentry/kernel/pipe/node_test.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pipe
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-type sleeper struct {
- context.Context
- ch chan struct{}
-}
-
-func newSleeperContext(t *testing.T) context.Context {
- return &sleeper{
- Context: contexttest.Context(t),
- ch: make(chan struct{}),
- }
-}
-
-func (s *sleeper) SleepStart() <-chan struct{} {
- return s.ch
-}
-
-func (s *sleeper) SleepFinish(bool) {
-}
-
-func (s *sleeper) Cancel() {
- s.ch <- struct{}{}
-}
-
-func (s *sleeper) Interrupted() bool {
- return len(s.ch) != 0
-}
-
-type openResult struct {
- *fs.File
- error
-}
-
-var perms fs.FilePermissions = fs.FilePermissions{
- User: fs.PermMask{Read: true, Write: true},
-}
-
-func testOpenOrDie(ctx context.Context, t *testing.T, n fs.InodeOperations, flags fs.FileFlags, doneChan chan<- struct{}) (*fs.File, error) {
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Pipe})
- d := fs.NewDirent(ctx, inode, "pipe")
- file, err := n.GetFile(ctx, d, flags)
- if err != nil {
- t.Errorf("open with flags %+v failed: %v", flags, err)
- return nil, err
- }
- if doneChan != nil {
- doneChan <- struct{}{}
- }
- return file, err
-}
-
-func testOpen(ctx context.Context, t *testing.T, n fs.InodeOperations, flags fs.FileFlags, resChan chan<- openResult) (*fs.File, error) {
- inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Pipe})
- d := fs.NewDirent(ctx, inode, "pipe")
- file, err := n.GetFile(ctx, d, flags)
- if resChan != nil {
- resChan <- openResult{file, err}
- }
- return file, err
-}
-
-func newNamedPipe(t *testing.T) *Pipe {
- return NewPipe(true, DefaultPipeSize)
-}
-
-func newAnonPipe(t *testing.T) *Pipe {
- return NewPipe(false, DefaultPipeSize)
-}
-
-// assertRecvBlocks ensures that a recv attempt on c blocks for at least
-// blockDuration. This is useful for checking that a goroutine that is supposed
-// to be executing a blocking operation is actually blocking.
-func assertRecvBlocks(t *testing.T, c <-chan struct{}, blockDuration time.Duration, failMsg string) {
- select {
- case <-c:
- t.Fatalf(failMsg)
- case <-time.After(blockDuration):
- // Ok, blocked for the required duration.
- }
-}
-
-func TestReadOpenBlocksForWriteOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone)
-
- // Verify that the open for read is blocking.
- assertRecvBlocks(t, rDone, time.Millisecond*100,
- "open for read not blocking with no writers")
-
- wDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
-
- <-wDone
- <-rDone
-}
-
-func TestWriteOpenBlocksForReadOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- wDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
-
- // Verify that the open for write is blocking
- assertRecvBlocks(t, wDone, time.Millisecond*100,
- "open for write not blocking with no readers")
-
- rDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone)
-
- <-rDone
- <-wDone
-}
-
-func TestMultipleWriteOpenDoesntCountAsReadOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rDone1 := make(chan struct{})
- rDone2 := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone1)
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone2)
-
- assertRecvBlocks(t, rDone1, time.Millisecond*100,
- "open for read didn't block with no writers")
- assertRecvBlocks(t, rDone2, time.Millisecond*100,
- "open for read didn't block with no writers")
-
- wDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
-
- <-wDone
- <-rDone2
- <-rDone1
-}
-
-func TestClosedReaderBlocksWriteOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rFile, _ := testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil)
- rFile.DecRef(ctx)
-
- wDone := make(chan struct{})
- // This open for write should block because the reader is now gone.
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
- assertRecvBlocks(t, wDone, time.Millisecond*100,
- "open for write didn't block with no concurrent readers")
-
- // Open for read again. This should unblock the open for write.
- rDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone)
-
- <-rDone
- <-wDone
-}
-
-func TestReadWriteOpenNeverBlocks(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rwDone := make(chan struct{})
- // Open for read-write never wait for a reader or writer, even if the
- // nonblocking flag is not set.
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true, NonBlocking: false}, rwDone)
- <-rwDone
-}
-
-func TestReadWriteOpenUnblocksReadOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone)
-
- rwDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true}, rwDone)
-
- <-rwDone
- <-rDone
-}
-
-func TestReadWriteOpenUnblocksWriteOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- wDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
-
- rwDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true}, rwDone)
-
- <-rwDone
- <-wDone
-}
-
-func TestBlockedOpenIsCancellable(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- done := make(chan openResult)
- go testOpen(ctx, t, f, fs.FileFlags{Read: true}, done)
- select {
- case <-done:
- t.Fatalf("open for read didn't block with no writers")
- case <-time.After(time.Millisecond * 100):
- // Ok.
- }
-
- ctx.(*sleeper).Cancel()
- // If the cancel on the sleeper didn't work, the open for read would never
- // return.
- res := <-done
- if res.error != syserror.ErrInterrupted {
- t.Fatalf("Cancellation didn't cause GetFile to return fs.ErrInterrupted, got %v.",
- res.error)
- }
-}
-
-func TestNonblockingReadOpenFileNoWriters(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil); err != nil {
- t.Fatalf("Nonblocking open for read failed with error %v.", err)
- }
-}
-
-func TestNonblockingWriteOpenFileNoReaders(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); !linuxerr.Equals(linuxerr.ENXIO, err) {
- t.Fatalf("Nonblocking open for write failed unexpected error %v.", err)
- }
-}
-
-func TestNonBlockingReadOpenWithWriter(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- wDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone)
-
- // Open for write blocks since there are no readers yet.
- assertRecvBlocks(t, wDone, time.Millisecond*100,
- "Open for write didn't block with no reader.")
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil); err != nil {
- t.Fatalf("Nonblocking open for read failed with error %v.", err)
- }
-
- // Open for write should now be unblocked.
- <-wDone
-}
-
-func TestNonBlockingWriteOpenWithReader(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newNamedPipe(t))
-
- rDone := make(chan struct{})
- go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone)
-
- // Open for write blocked, since no reader yet.
- assertRecvBlocks(t, rDone, time.Millisecond*100,
- "Open for reader didn't block with no writer.")
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); err != nil {
- t.Fatalf("Nonblocking open for write failed with error %v.", err)
- }
-
- // Open for write should now be unblocked.
- <-rDone
-}
-
-func TestAnonReadOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newAnonPipe(t))
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true}, nil); err != nil {
- t.Fatalf("open anon pipe for read failed: %v", err)
- }
-}
-
-func TestAnonWriteOpen(t *testing.T) {
- ctx := newSleeperContext(t)
- f := NewInodeOperations(ctx, perms, newAnonPipe(t))
-
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true}, nil); err != nil {
- t.Fatalf("open anon pipe for write failed: %v", err)
- }
-}
diff --git a/pkg/sentry/kernel/pipe/pipe_state_autogen.go b/pkg/sentry/kernel/pipe/pipe_state_autogen.go
new file mode 100644
index 000000000..57451e951
--- /dev/null
+++ b/pkg/sentry/kernel/pipe/pipe_state_autogen.go
@@ -0,0 +1,227 @@
+// automatically generated by stateify.
+
+package pipe
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *inodeOperations) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.inodeOperations"
+}
+
+func (i *inodeOperations) StateFields() []string {
+ return []string{
+ "InodeSimpleAttributes",
+ "p",
+ }
+}
+
+func (i *inodeOperations) beforeSave() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeSimpleAttributes)
+ stateSinkObject.Save(1, &i.p)
+}
+
+func (i *inodeOperations) afterLoad() {}
+
+// +checklocksignore
+func (i *inodeOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeSimpleAttributes)
+ stateSourceObject.Load(1, &i.p)
+}
+
+func (p *Pipe) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.Pipe"
+}
+
+func (p *Pipe) StateFields() []string {
+ return []string{
+ "isNamed",
+ "readers",
+ "writers",
+ "buf",
+ "off",
+ "size",
+ "max",
+ "hadWriter",
+ }
+}
+
+func (p *Pipe) beforeSave() {}
+
+// +checklocksignore
+func (p *Pipe) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.isNamed)
+ stateSinkObject.Save(1, &p.readers)
+ stateSinkObject.Save(2, &p.writers)
+ stateSinkObject.Save(3, &p.buf)
+ stateSinkObject.Save(4, &p.off)
+ stateSinkObject.Save(5, &p.size)
+ stateSinkObject.Save(6, &p.max)
+ stateSinkObject.Save(7, &p.hadWriter)
+}
+
+// +checklocksignore
+func (p *Pipe) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.isNamed)
+ stateSourceObject.Load(1, &p.readers)
+ stateSourceObject.Load(2, &p.writers)
+ stateSourceObject.Load(3, &p.buf)
+ stateSourceObject.Load(4, &p.off)
+ stateSourceObject.Load(5, &p.size)
+ stateSourceObject.Load(6, &p.max)
+ stateSourceObject.Load(7, &p.hadWriter)
+ stateSourceObject.AfterLoad(p.afterLoad)
+}
+
+func (r *Reader) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.Reader"
+}
+
+func (r *Reader) StateFields() []string {
+ return []string{
+ "ReaderWriter",
+ }
+}
+
+func (r *Reader) beforeSave() {}
+
+// +checklocksignore
+func (r *Reader) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.ReaderWriter)
+}
+
+func (r *Reader) afterLoad() {}
+
+// +checklocksignore
+func (r *Reader) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.ReaderWriter)
+}
+
+func (rw *ReaderWriter) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.ReaderWriter"
+}
+
+func (rw *ReaderWriter) StateFields() []string {
+ return []string{
+ "Pipe",
+ }
+}
+
+func (rw *ReaderWriter) beforeSave() {}
+
+// +checklocksignore
+func (rw *ReaderWriter) StateSave(stateSinkObject state.Sink) {
+ rw.beforeSave()
+ stateSinkObject.Save(0, &rw.Pipe)
+}
+
+func (rw *ReaderWriter) afterLoad() {}
+
+// +checklocksignore
+func (rw *ReaderWriter) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rw.Pipe)
+}
+
+func (vp *VFSPipe) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.VFSPipe"
+}
+
+func (vp *VFSPipe) StateFields() []string {
+ return []string{
+ "pipe",
+ }
+}
+
+func (vp *VFSPipe) beforeSave() {}
+
+// +checklocksignore
+func (vp *VFSPipe) StateSave(stateSinkObject state.Sink) {
+ vp.beforeSave()
+ stateSinkObject.Save(0, &vp.pipe)
+}
+
+func (vp *VFSPipe) afterLoad() {}
+
+// +checklocksignore
+func (vp *VFSPipe) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &vp.pipe)
+}
+
+func (fd *VFSPipeFD) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.VFSPipeFD"
+}
+
+func (fd *VFSPipeFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "LockFD",
+ "pipe",
+ }
+}
+
+func (fd *VFSPipeFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *VFSPipeFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.LockFD)
+ stateSinkObject.Save(4, &fd.pipe)
+}
+
+func (fd *VFSPipeFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *VFSPipeFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.LockFD)
+ stateSourceObject.Load(4, &fd.pipe)
+}
+
+func (w *Writer) StateTypeName() string {
+ return "pkg/sentry/kernel/pipe.Writer"
+}
+
+func (w *Writer) StateFields() []string {
+ return []string{
+ "ReaderWriter",
+ }
+}
+
+func (w *Writer) beforeSave() {}
+
+// +checklocksignore
+func (w *Writer) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.ReaderWriter)
+}
+
+func (w *Writer) afterLoad() {}
+
+// +checklocksignore
+func (w *Writer) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.ReaderWriter)
+}
+
+func init() {
+ state.Register((*inodeOperations)(nil))
+ state.Register((*Pipe)(nil))
+ state.Register((*Reader)(nil))
+ state.Register((*ReaderWriter)(nil))
+ state.Register((*VFSPipe)(nil))
+ state.Register((*VFSPipeFD)(nil))
+ state.Register((*Writer)(nil))
+}
diff --git a/pkg/sentry/kernel/pipe/pipe_test.go b/pkg/sentry/kernel/pipe/pipe_test.go
deleted file mode 100644
index 867f4a76b..000000000
--- a/pkg/sentry/kernel/pipe/pipe_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pipe
-
-import (
- "bytes"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestPipeRW(t *testing.T) {
- ctx := contexttest.Context(t)
- r, w := NewConnectedPipe(ctx, 65536)
- defer r.DecRef(ctx)
- defer w.DecRef(ctx)
-
- msg := []byte("here's some bytes")
- wantN := int64(len(msg))
- n, err := w.Writev(ctx, usermem.BytesIOSequence(msg))
- if n != wantN || err != nil {
- t.Fatalf("Writev: got (%d, %v), wanted (%d, nil)", n, err, wantN)
- }
-
- buf := make([]byte, len(msg))
- n, err = r.Readv(ctx, usermem.BytesIOSequence(buf))
- if n != wantN || err != nil || !bytes.Equal(buf, msg) {
- t.Fatalf("Readv: got (%d, %v) %q, wanted (%d, nil) %q", n, err, buf, wantN, msg)
- }
-}
-
-func TestPipeReadBlock(t *testing.T) {
- ctx := contexttest.Context(t)
- r, w := NewConnectedPipe(ctx, 65536)
- defer r.DecRef(ctx)
- defer w.DecRef(ctx)
-
- n, err := r.Readv(ctx, usermem.BytesIOSequence(make([]byte, 1)))
- if n != 0 || err != syserror.ErrWouldBlock {
- t.Fatalf("Readv: got (%d, %v), wanted (0, %v)", n, err, syserror.ErrWouldBlock)
- }
-}
-
-func TestPipeWriteBlock(t *testing.T) {
- const atomicIOBytes = 2
- const capacity = MinimumPipeSize
-
- ctx := contexttest.Context(t)
- r, w := NewConnectedPipe(ctx, capacity)
- defer r.DecRef(ctx)
- defer w.DecRef(ctx)
-
- msg := make([]byte, capacity+1)
- n, err := w.Writev(ctx, usermem.BytesIOSequence(msg))
- if wantN, wantErr := int64(capacity), syserror.ErrWouldBlock; n != wantN || err != wantErr {
- t.Fatalf("Writev: got (%d, %v), wanted (%d, %v)", n, err, wantN, wantErr)
- }
-}
-
-func TestPipeWriteUntilEnd(t *testing.T) {
- const atomicIOBytes = 2
-
- ctx := contexttest.Context(t)
- r, w := NewConnectedPipe(ctx, atomicIOBytes)
- defer r.DecRef(ctx)
- defer w.DecRef(ctx)
-
- msg := []byte("here's some bytes")
-
- wDone := make(chan struct{}, 0)
- rDone := make(chan struct{}, 0)
- defer func() {
- // Signal the reader to stop and wait until it does so.
- close(wDone)
- <-rDone
- }()
-
- go func() {
- defer close(rDone)
- // Read from r until done is closed.
- ctx := contexttest.Context(t)
- buf := make([]byte, len(msg)+1)
- dst := usermem.BytesIOSequence(buf)
- e, ch := waiter.NewChannelEntry(nil)
- r.EventRegister(&e, waiter.ReadableEvents)
- defer r.EventUnregister(&e)
- for {
- n, err := r.Readv(ctx, dst)
- dst = dst.DropFirst64(n)
- if err == syserror.ErrWouldBlock {
- select {
- case <-ch:
- continue
- case <-wDone:
- // We expect to have 1 byte left in dst since len(buf) ==
- // len(msg)+1.
- if dst.NumBytes() != 1 || !bytes.Equal(buf[:len(msg)], msg) {
- t.Errorf("Reader: got %q (%d bytes remaining), wanted %q", buf, dst.NumBytes(), msg)
- }
- return
- }
- }
- if err != nil {
- t.Errorf("Readv: got unexpected error %v", err)
- return
- }
- }
- }()
-
- src := usermem.BytesIOSequence(msg)
- e, ch := waiter.NewChannelEntry(nil)
- w.EventRegister(&e, waiter.WritableEvents)
- defer w.EventUnregister(&e)
- for src.NumBytes() != 0 {
- n, err := w.Writev(ctx, src)
- src = src.DropFirst64(n)
- if err == syserror.ErrWouldBlock {
- <-ch
- continue
- }
- if err != nil {
- t.Fatalf("Writev: got (%d, %v)", n, err)
- }
- }
-}
diff --git a/pkg/sentry/kernel/pipe/pipe_unsafe_state_autogen.go b/pkg/sentry/kernel/pipe/pipe_unsafe_state_autogen.go
new file mode 100644
index 000000000..d3b40feb4
--- /dev/null
+++ b/pkg/sentry/kernel/pipe/pipe_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pipe
diff --git a/pkg/sentry/kernel/process_group_list.go b/pkg/sentry/kernel/process_group_list.go
new file mode 100644
index 000000000..9c493504d
--- /dev/null
+++ b/pkg/sentry/kernel/process_group_list.go
@@ -0,0 +1,221 @@
+package kernel
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type processGroupElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (processGroupElementMapper) linkerFor(elem *ProcessGroup) *ProcessGroup { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type processGroupList struct {
+ head *ProcessGroup
+ tail *ProcessGroup
+}
+
+// Reset resets list l to the empty state.
+func (l *processGroupList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *processGroupList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *processGroupList) Front() *ProcessGroup {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *processGroupList) Back() *ProcessGroup {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *processGroupList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (processGroupElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *processGroupList) PushFront(e *ProcessGroup) {
+ linker := processGroupElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ processGroupElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *processGroupList) PushBack(e *ProcessGroup) {
+ linker := processGroupElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ processGroupElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *processGroupList) PushBackList(m *processGroupList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ processGroupElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ processGroupElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *processGroupList) InsertAfter(b, e *ProcessGroup) {
+ bLinker := processGroupElementMapper{}.linkerFor(b)
+ eLinker := processGroupElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ processGroupElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *processGroupList) InsertBefore(a, e *ProcessGroup) {
+ aLinker := processGroupElementMapper{}.linkerFor(a)
+ eLinker := processGroupElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ processGroupElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *processGroupList) Remove(e *ProcessGroup) {
+ linker := processGroupElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ processGroupElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ processGroupElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type processGroupEntry struct {
+ next *ProcessGroup
+ prev *ProcessGroup
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *processGroupEntry) Next() *ProcessGroup {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *processGroupEntry) Prev() *ProcessGroup {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *processGroupEntry) SetNext(elem *ProcessGroup) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *processGroupEntry) SetPrev(elem *ProcessGroup) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go
new file mode 100644
index 000000000..cfd73315f
--- /dev/null
+++ b/pkg/sentry/kernel/process_group_refs.go
@@ -0,0 +1,140 @@
+package kernel
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const ProcessGroupenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var ProcessGroupobj *ProcessGroup
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type ProcessGroupRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ProcessGroupRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *ProcessGroupRefs) RefType() string {
+ return fmt.Sprintf("%T", ProcessGroupobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *ProcessGroupRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *ProcessGroupRefs) LogRefs() bool {
+ return ProcessGroupenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *ProcessGroupRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *ProcessGroupRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if ProcessGroupenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *ProcessGroupRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if ProcessGroupenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *ProcessGroupRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if ProcessGroupenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *ProcessGroupRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/sched/BUILD b/pkg/sentry/kernel/sched/BUILD
deleted file mode 100644
index 1b82e087b..000000000
--- a/pkg/sentry/kernel/sched/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sched",
- srcs = [
- "cpuset.go",
- "sched.go",
- ],
- visibility = ["//pkg/sentry:internal"],
-)
-
-go_test(
- name = "sched_test",
- size = "small",
- srcs = ["cpuset_test.go"],
- library = ":sched",
-)
diff --git a/pkg/sentry/kernel/sched/cpuset_test.go b/pkg/sentry/kernel/sched/cpuset_test.go
deleted file mode 100644
index 3af9f1197..000000000
--- a/pkg/sentry/kernel/sched/cpuset_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sched
-
-import (
- "testing"
-)
-
-func TestNumCPUs(t *testing.T) {
- for i := uint(0); i < 1024; i++ {
- c := NewCPUSet(i)
- for j := uint(0); j < i; j++ {
- c.Set(j)
- }
- n := c.NumCPUs()
- if n != i {
- t.Errorf("got wrong number of cpus %d, want %d", n, i)
- }
- }
-}
-
-func TestClearAbove(t *testing.T) {
- const n = 1024
- c := NewFullCPUSet(n)
- for i := uint(0); i < n; i++ {
- cpu := n - i
- c.ClearAbove(cpu)
- if got := c.NumCPUs(); got != cpu {
- t.Errorf("iteration %d: got %d cpus, wanted %d", i, got, cpu)
- }
- }
-}
diff --git a/pkg/sentry/kernel/sched/sched_state_autogen.go b/pkg/sentry/kernel/sched/sched_state_autogen.go
new file mode 100644
index 000000000..9705ca79d
--- /dev/null
+++ b/pkg/sentry/kernel/sched/sched_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sched
diff --git a/pkg/sentry/kernel/semaphore/BUILD b/pkg/sentry/kernel/semaphore/BUILD
deleted file mode 100644
index a787c00a8..000000000
--- a/pkg/sentry/kernel/semaphore/BUILD
+++ /dev/null
@@ -1,50 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "waiter_list",
- out = "waiter_list.go",
- package = "semaphore",
- prefix = "waiter",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*waiter",
- "Linker": "*waiter",
- },
-)
-
-go_library(
- name = "semaphore",
- srcs = [
- "semaphore.go",
- "waiter_list.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sync",
- "//pkg/syserror",
- ],
-)
-
-go_test(
- name = "semaphore_test",
- size = "small",
- srcs = ["semaphore_test.go"],
- library = ":semaphore",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/kernel/auth",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/kernel/semaphore/semaphore_state_autogen.go b/pkg/sentry/kernel/semaphore/semaphore_state_autogen.go
new file mode 100644
index 000000000..f90fbff34
--- /dev/null
+++ b/pkg/sentry/kernel/semaphore/semaphore_state_autogen.go
@@ -0,0 +1,220 @@
+// automatically generated by stateify.
+
+package semaphore
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *Registry) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.Registry"
+}
+
+func (r *Registry) StateFields() []string {
+ return []string{
+ "userNS",
+ "semaphores",
+ "lastIDUsed",
+ "indexes",
+ }
+}
+
+func (r *Registry) beforeSave() {}
+
+// +checklocksignore
+func (r *Registry) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.userNS)
+ stateSinkObject.Save(1, &r.semaphores)
+ stateSinkObject.Save(2, &r.lastIDUsed)
+ stateSinkObject.Save(3, &r.indexes)
+}
+
+func (r *Registry) afterLoad() {}
+
+// +checklocksignore
+func (r *Registry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.userNS)
+ stateSourceObject.Load(1, &r.semaphores)
+ stateSourceObject.Load(2, &r.lastIDUsed)
+ stateSourceObject.Load(3, &r.indexes)
+}
+
+func (s *Set) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.Set"
+}
+
+func (s *Set) StateFields() []string {
+ return []string{
+ "registry",
+ "ID",
+ "key",
+ "creator",
+ "owner",
+ "perms",
+ "opTime",
+ "changeTime",
+ "sems",
+ "dead",
+ }
+}
+
+func (s *Set) beforeSave() {}
+
+// +checklocksignore
+func (s *Set) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.registry)
+ stateSinkObject.Save(1, &s.ID)
+ stateSinkObject.Save(2, &s.key)
+ stateSinkObject.Save(3, &s.creator)
+ stateSinkObject.Save(4, &s.owner)
+ stateSinkObject.Save(5, &s.perms)
+ stateSinkObject.Save(6, &s.opTime)
+ stateSinkObject.Save(7, &s.changeTime)
+ stateSinkObject.Save(8, &s.sems)
+ stateSinkObject.Save(9, &s.dead)
+}
+
+func (s *Set) afterLoad() {}
+
+// +checklocksignore
+func (s *Set) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.registry)
+ stateSourceObject.Load(1, &s.ID)
+ stateSourceObject.Load(2, &s.key)
+ stateSourceObject.Load(3, &s.creator)
+ stateSourceObject.Load(4, &s.owner)
+ stateSourceObject.Load(5, &s.perms)
+ stateSourceObject.Load(6, &s.opTime)
+ stateSourceObject.Load(7, &s.changeTime)
+ stateSourceObject.Load(8, &s.sems)
+ stateSourceObject.Load(9, &s.dead)
+}
+
+func (s *sem) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.sem"
+}
+
+func (s *sem) StateFields() []string {
+ return []string{
+ "value",
+ "pid",
+ }
+}
+
+func (s *sem) beforeSave() {}
+
+// +checklocksignore
+func (s *sem) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ if !state.IsZeroValue(&s.waiters) {
+ state.Failf("waiters is %#v, expected zero", &s.waiters)
+ }
+ stateSinkObject.Save(0, &s.value)
+ stateSinkObject.Save(1, &s.pid)
+}
+
+func (s *sem) afterLoad() {}
+
+// +checklocksignore
+func (s *sem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.value)
+ stateSourceObject.Load(1, &s.pid)
+}
+
+func (w *waiter) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.waiter"
+}
+
+func (w *waiter) StateFields() []string {
+ return []string{
+ "waiterEntry",
+ "value",
+ "ch",
+ }
+}
+
+func (w *waiter) beforeSave() {}
+
+// +checklocksignore
+func (w *waiter) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.waiterEntry)
+ stateSinkObject.Save(1, &w.value)
+ stateSinkObject.Save(2, &w.ch)
+}
+
+func (w *waiter) afterLoad() {}
+
+// +checklocksignore
+func (w *waiter) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.waiterEntry)
+ stateSourceObject.Load(1, &w.value)
+ stateSourceObject.Load(2, &w.ch)
+}
+
+func (l *waiterList) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.waiterList"
+}
+
+func (l *waiterList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *waiterList) beforeSave() {}
+
+// +checklocksignore
+func (l *waiterList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *waiterList) afterLoad() {}
+
+// +checklocksignore
+func (l *waiterList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *waiterEntry) StateTypeName() string {
+ return "pkg/sentry/kernel/semaphore.waiterEntry"
+}
+
+func (e *waiterEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *waiterEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *waiterEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*Registry)(nil))
+ state.Register((*Set)(nil))
+ state.Register((*sem)(nil))
+ state.Register((*waiter)(nil))
+ state.Register((*waiterList)(nil))
+ state.Register((*waiterEntry)(nil))
+}
diff --git a/pkg/sentry/kernel/semaphore/semaphore_test.go b/pkg/sentry/kernel/semaphore/semaphore_test.go
deleted file mode 100644
index e47acefdf..000000000
--- a/pkg/sentry/kernel/semaphore/semaphore_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semaphore
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-func executeOps(ctx context.Context, t *testing.T, set *Set, ops []linux.Sembuf, block bool) chan struct{} {
- ch, _, err := set.executeOps(ctx, ops, 123)
- if err != nil {
- t.Fatalf("ExecuteOps(ops) failed, err: %v, ops: %+v", err, ops)
- }
- if block {
- if ch == nil {
- t.Fatalf("ExecuteOps(ops) got: nil, expected: !nil, ops: %+v", ops)
- }
- if signalled(ch) {
- t.Fatalf("ExecuteOps(ops) channel should not have been signalled, ops: %+v", ops)
- }
- } else {
- if ch != nil {
- t.Fatalf("ExecuteOps(ops) got: %v, expected: nil, ops: %+v", ch, ops)
- }
- }
- return ch
-}
-
-func signalled(ch chan struct{}) bool {
- select {
- case <-ch:
- return true
- default:
- return false
- }
-}
-
-func TestBasic(t *testing.T) {
- ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
- ops := []linux.Sembuf{
- {SemOp: 1},
- }
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = -1
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = -1
- ch1 := executeOps(ctx, t, set, ops, true)
-
- ops[0].SemOp = 1
- executeOps(ctx, t, set, ops, false)
- if !signalled(ch1) {
- t.Fatalf("ExecuteOps(ops) channel should not have been signalled, ops: %+v", ops)
- }
-}
-
-func TestWaitForZero(t *testing.T) {
- ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
- ops := []linux.Sembuf{
- {SemOp: 0},
- }
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = -2
- ch1 := executeOps(ctx, t, set, ops, true)
-
- ops[0].SemOp = 0
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = 1
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = 0
- chZero1 := executeOps(ctx, t, set, ops, true)
-
- ops[0].SemOp = 0
- chZero2 := executeOps(ctx, t, set, ops, true)
-
- ops[0].SemOp = 1
- executeOps(ctx, t, set, ops, false)
- if !signalled(ch1) {
- t.Fatalf("ExecuteOps(ops) channel should have been signalled, ops: %+v, set: %+v", ops, set)
- }
-
- ops[0].SemOp = -2
- executeOps(ctx, t, set, ops, false)
- if !signalled(chZero1) {
- t.Fatalf("ExecuteOps(ops) channel zero 1 should have been signalled, ops: %+v, set: %+v", ops, set)
- }
- if !signalled(chZero2) {
- t.Fatalf("ExecuteOps(ops) channel zero 2 should have been signalled, ops: %+v, set: %+v", ops, set)
- }
-}
-
-func TestNoWait(t *testing.T) {
- ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
- ops := []linux.Sembuf{
- {SemOp: 1},
- }
- executeOps(ctx, t, set, ops, false)
-
- ops[0].SemOp = -2
- ops[0].SemFlg = linux.IPC_NOWAIT
- if _, _, err := set.executeOps(ctx, ops, 123); err != syserror.ErrWouldBlock {
- t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock)
- }
-
- ops[0].SemOp = 0
- ops[0].SemFlg = linux.IPC_NOWAIT
- if _, _, err := set.executeOps(ctx, ops, 123); err != syserror.ErrWouldBlock {
- t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock)
- }
-}
-
-func TestUnregister(t *testing.T) {
- ctx := contexttest.Context(t)
- r := NewRegistry(auth.NewRootUserNamespace())
- set, err := r.FindOrCreate(ctx, 123, 2, linux.FileMode(0x600), true, true, true)
- if err != nil {
- t.Fatalf("FindOrCreate() failed, err: %v", err)
- }
- if got := r.FindByID(set.ID); got.ID != set.ID {
- t.Fatalf("FindById(%d) failed, got: %+v, expected: %+v", set.ID, got, set)
- }
-
- ops := []linux.Sembuf{
- {SemOp: -1},
- }
- chs := make([]chan struct{}, 0, 5)
- for i := 0; i < 5; i++ {
- ch := executeOps(ctx, t, set, ops, true)
- chs = append(chs, ch)
- }
-
- creds := auth.CredentialsFromContext(ctx)
- if err := r.RemoveID(set.ID, creds); err != nil {
- t.Fatalf("RemoveID(%d) failed, err: %v", set.ID, err)
- }
- if !set.dead {
- t.Fatalf("set is not dead: %+v", set)
- }
- if got := r.FindByID(set.ID); got != nil {
- t.Fatalf("FindById(%d) failed, got: %+v, expected: nil", set.ID, got)
- }
- for i, ch := range chs {
- if !signalled(ch) {
- t.Fatalf("channel %d should have been signalled", i)
- }
- }
-}
diff --git a/pkg/sentry/kernel/semaphore/waiter_list.go b/pkg/sentry/kernel/semaphore/waiter_list.go
new file mode 100644
index 000000000..51586d43f
--- /dev/null
+++ b/pkg/sentry/kernel/semaphore/waiter_list.go
@@ -0,0 +1,221 @@
+package semaphore
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type waiterElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (waiterElementMapper) linkerFor(elem *waiter) *waiter { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type waiterList struct {
+ head *waiter
+ tail *waiter
+}
+
+// Reset resets list l to the empty state.
+func (l *waiterList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *waiterList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Front() *waiter {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Back() *waiter {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *waiterList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (waiterElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *waiterList) PushFront(e *waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ waiterElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *waiterList) PushBack(e *waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *waiterList) PushBackList(m *waiterList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ waiterElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *waiterList) InsertAfter(b, e *waiter) {
+ bLinker := waiterElementMapper{}.linkerFor(b)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ waiterElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *waiterList) InsertBefore(a, e *waiter) {
+ aLinker := waiterElementMapper{}.linkerFor(a)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ waiterElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *waiterList) Remove(e *waiter) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ waiterElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ waiterElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type waiterEntry struct {
+ next *waiter
+ prev *waiter
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Next() *waiter {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Prev() *waiter {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetNext(elem *waiter) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetPrev(elem *waiter) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
new file mode 100644
index 000000000..d45cc0a0f
--- /dev/null
+++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
@@ -0,0 +1,38 @@
+package kernel
+
+import (
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
+ for {
+ if val, ok := SeqAtomicTryLoadTaskGoroutineSchedInfo(seq, seq.BeginRead(), ptr); ok {
+ return val
+ }
+ }
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+//
+//go:nosplit
+func SeqAtomicTryLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (val TaskGoroutineSchedInfo, ok bool) {
+ if sync.RaceEnabled {
+
+ gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+
+ val = *ptr
+ }
+ ok = seq.ReadOk(epoch)
+ return
+}
diff --git a/pkg/sentry/kernel/session_list.go b/pkg/sentry/kernel/session_list.go
new file mode 100644
index 000000000..f53dea401
--- /dev/null
+++ b/pkg/sentry/kernel/session_list.go
@@ -0,0 +1,221 @@
+package kernel
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type sessionElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (sessionElementMapper) linkerFor(elem *Session) *Session { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type sessionList struct {
+ head *Session
+ tail *Session
+}
+
+// Reset resets list l to the empty state.
+func (l *sessionList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *sessionList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *sessionList) Front() *Session {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *sessionList) Back() *Session {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *sessionList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (sessionElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *sessionList) PushFront(e *Session) {
+ linker := sessionElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ sessionElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *sessionList) PushBack(e *Session) {
+ linker := sessionElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ sessionElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *sessionList) PushBackList(m *sessionList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ sessionElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ sessionElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *sessionList) InsertAfter(b, e *Session) {
+ bLinker := sessionElementMapper{}.linkerFor(b)
+ eLinker := sessionElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ sessionElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *sessionList) InsertBefore(a, e *Session) {
+ aLinker := sessionElementMapper{}.linkerFor(a)
+ eLinker := sessionElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ sessionElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *sessionList) Remove(e *Session) {
+ linker := sessionElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ sessionElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ sessionElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type sessionEntry struct {
+ next *Session
+ prev *Session
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *sessionEntry) Next() *Session {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *sessionEntry) Prev() *Session {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *sessionEntry) SetNext(elem *Session) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *sessionEntry) SetPrev(elem *Session) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go
new file mode 100644
index 000000000..94d6380fa
--- /dev/null
+++ b/pkg/sentry/kernel/session_refs.go
@@ -0,0 +1,140 @@
+package kernel
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const SessionenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var Sessionobj *Session
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type SessionRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *SessionRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *SessionRefs) RefType() string {
+ return fmt.Sprintf("%T", Sessionobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *SessionRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *SessionRefs) LogRefs() bool {
+ return SessionenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *SessionRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *SessionRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if SessionenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *SessionRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if SessionenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *SessionRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if SessionenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *SessionRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/shm/BUILD b/pkg/sentry/kernel/shm/BUILD
deleted file mode 100644
index 5b69333fe..000000000
--- a/pkg/sentry/kernel/shm/BUILD
+++ /dev/null
@@ -1,47 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "shm_refs",
- out = "shm_refs.go",
- consts = {
- "enableLogging": "true",
- },
- package = "shm",
- prefix = "Shm",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "Shm",
- },
-)
-
-go_library(
- name = "shm",
- srcs = [
- "device.go",
- "shm.go",
- "shm_refs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/usage",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go
new file mode 100644
index 000000000..e6eed69ef
--- /dev/null
+++ b/pkg/sentry/kernel/shm/shm_refs.go
@@ -0,0 +1,140 @@
+package shm
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const ShmenableLogging = true
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var Shmobj *Shm
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type ShmRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ShmRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *ShmRefs) RefType() string {
+ return fmt.Sprintf("%T", Shmobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *ShmRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *ShmRefs) LogRefs() bool {
+ return ShmenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *ShmRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *ShmRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if ShmenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *ShmRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if ShmenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *ShmRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if ShmenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *ShmRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/kernel/shm/shm_state_autogen.go b/pkg/sentry/kernel/shm/shm_state_autogen.go
new file mode 100644
index 000000000..30ceeaa03
--- /dev/null
+++ b/pkg/sentry/kernel/shm/shm_state_autogen.go
@@ -0,0 +1,147 @@
+// automatically generated by stateify.
+
+package shm
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *Registry) StateTypeName() string {
+ return "pkg/sentry/kernel/shm.Registry"
+}
+
+func (r *Registry) StateFields() []string {
+ return []string{
+ "userNS",
+ "shms",
+ "keysToShms",
+ "totalPages",
+ "lastIDUsed",
+ }
+}
+
+func (r *Registry) beforeSave() {}
+
+// +checklocksignore
+func (r *Registry) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.userNS)
+ stateSinkObject.Save(1, &r.shms)
+ stateSinkObject.Save(2, &r.keysToShms)
+ stateSinkObject.Save(3, &r.totalPages)
+ stateSinkObject.Save(4, &r.lastIDUsed)
+}
+
+func (r *Registry) afterLoad() {}
+
+// +checklocksignore
+func (r *Registry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.userNS)
+ stateSourceObject.Load(1, &r.shms)
+ stateSourceObject.Load(2, &r.keysToShms)
+ stateSourceObject.Load(3, &r.totalPages)
+ stateSourceObject.Load(4, &r.lastIDUsed)
+}
+
+func (s *Shm) StateTypeName() string {
+ return "pkg/sentry/kernel/shm.Shm"
+}
+
+func (s *Shm) StateFields() []string {
+ return []string{
+ "ShmRefs",
+ "mfp",
+ "registry",
+ "ID",
+ "creator",
+ "size",
+ "effectiveSize",
+ "fr",
+ "key",
+ "perms",
+ "owner",
+ "attachTime",
+ "detachTime",
+ "changeTime",
+ "creatorPID",
+ "lastAttachDetachPID",
+ "pendingDestruction",
+ }
+}
+
+func (s *Shm) beforeSave() {}
+
+// +checklocksignore
+func (s *Shm) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.ShmRefs)
+ stateSinkObject.Save(1, &s.mfp)
+ stateSinkObject.Save(2, &s.registry)
+ stateSinkObject.Save(3, &s.ID)
+ stateSinkObject.Save(4, &s.creator)
+ stateSinkObject.Save(5, &s.size)
+ stateSinkObject.Save(6, &s.effectiveSize)
+ stateSinkObject.Save(7, &s.fr)
+ stateSinkObject.Save(8, &s.key)
+ stateSinkObject.Save(9, &s.perms)
+ stateSinkObject.Save(10, &s.owner)
+ stateSinkObject.Save(11, &s.attachTime)
+ stateSinkObject.Save(12, &s.detachTime)
+ stateSinkObject.Save(13, &s.changeTime)
+ stateSinkObject.Save(14, &s.creatorPID)
+ stateSinkObject.Save(15, &s.lastAttachDetachPID)
+ stateSinkObject.Save(16, &s.pendingDestruction)
+}
+
+func (s *Shm) afterLoad() {}
+
+// +checklocksignore
+func (s *Shm) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.ShmRefs)
+ stateSourceObject.Load(1, &s.mfp)
+ stateSourceObject.Load(2, &s.registry)
+ stateSourceObject.Load(3, &s.ID)
+ stateSourceObject.Load(4, &s.creator)
+ stateSourceObject.Load(5, &s.size)
+ stateSourceObject.Load(6, &s.effectiveSize)
+ stateSourceObject.Load(7, &s.fr)
+ stateSourceObject.Load(8, &s.key)
+ stateSourceObject.Load(9, &s.perms)
+ stateSourceObject.Load(10, &s.owner)
+ stateSourceObject.Load(11, &s.attachTime)
+ stateSourceObject.Load(12, &s.detachTime)
+ stateSourceObject.Load(13, &s.changeTime)
+ stateSourceObject.Load(14, &s.creatorPID)
+ stateSourceObject.Load(15, &s.lastAttachDetachPID)
+ stateSourceObject.Load(16, &s.pendingDestruction)
+}
+
+func (r *ShmRefs) StateTypeName() string {
+ return "pkg/sentry/kernel/shm.ShmRefs"
+}
+
+func (r *ShmRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *ShmRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *ShmRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *ShmRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func init() {
+ state.Register((*Registry)(nil))
+ state.Register((*Shm)(nil))
+ state.Register((*ShmRefs)(nil))
+}
diff --git a/pkg/sentry/kernel/signalfd/BUILD b/pkg/sentry/kernel/signalfd/BUILD
deleted file mode 100644
index 1110ecca5..000000000
--- a/pkg/sentry/kernel/signalfd/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "signalfd",
- srcs = ["signalfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/kernel",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/signalfd/signalfd_state_autogen.go b/pkg/sentry/kernel/signalfd/signalfd_state_autogen.go
new file mode 100644
index 000000000..ad2622f27
--- /dev/null
+++ b/pkg/sentry/kernel/signalfd/signalfd_state_autogen.go
@@ -0,0 +1,39 @@
+// automatically generated by stateify.
+
+package signalfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *SignalOperations) StateTypeName() string {
+ return "pkg/sentry/kernel/signalfd.SignalOperations"
+}
+
+func (s *SignalOperations) StateFields() []string {
+ return []string{
+ "target",
+ "mask",
+ }
+}
+
+func (s *SignalOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *SignalOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.target)
+ stateSinkObject.Save(1, &s.mask)
+}
+
+func (s *SignalOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *SignalOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.target)
+ stateSourceObject.Load(1, &s.mask)
+}
+
+func init() {
+ state.Register((*SignalOperations)(nil))
+}
diff --git a/pkg/sentry/kernel/socket_list.go b/pkg/sentry/kernel/socket_list.go
new file mode 100644
index 000000000..2b2f5055e
--- /dev/null
+++ b/pkg/sentry/kernel/socket_list.go
@@ -0,0 +1,221 @@
+package kernel
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type socketElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (socketElementMapper) linkerFor(elem *SocketRecordVFS1) *SocketRecordVFS1 { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type socketList struct {
+ head *SocketRecordVFS1
+ tail *SocketRecordVFS1
+}
+
+// Reset resets list l to the empty state.
+func (l *socketList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *socketList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *socketList) Front() *SocketRecordVFS1 {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *socketList) Back() *SocketRecordVFS1 {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *socketList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (socketElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *socketList) PushFront(e *SocketRecordVFS1) {
+ linker := socketElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ socketElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *socketList) PushBack(e *SocketRecordVFS1) {
+ linker := socketElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ socketElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *socketList) PushBackList(m *socketList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ socketElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ socketElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *socketList) InsertAfter(b, e *SocketRecordVFS1) {
+ bLinker := socketElementMapper{}.linkerFor(b)
+ eLinker := socketElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ socketElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *socketList) InsertBefore(a, e *SocketRecordVFS1) {
+ aLinker := socketElementMapper{}.linkerFor(a)
+ eLinker := socketElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ socketElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *socketList) Remove(e *SocketRecordVFS1) {
+ linker := socketElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ socketElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ socketElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type socketEntry struct {
+ next *SocketRecordVFS1
+ prev *SocketRecordVFS1
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *socketEntry) Next() *SocketRecordVFS1 {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *socketEntry) Prev() *SocketRecordVFS1 {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *socketEntry) SetNext(elem *SocketRecordVFS1) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *socketEntry) SetPrev(elem *SocketRecordVFS1) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/table_test.go b/pkg/sentry/kernel/table_test.go
deleted file mode 100644
index 32cf47e05..000000000
--- a/pkg/sentry/kernel/table_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernel
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi"
- "gvisor.dev/gvisor/pkg/sentry/arch"
-)
-
-const (
- maxTestSyscall = 1000
-)
-
-func createSyscallTable() *SyscallTable {
- m := make(map[uintptr]Syscall)
- for i := uintptr(0); i <= maxTestSyscall; i++ {
- j := i
- m[i] = Syscall{
- Fn: func(*Task, arch.SyscallArguments) (uintptr, *SyscallControl, error) {
- return j, nil, nil
- },
- }
- }
-
- s := &SyscallTable{
- OS: abi.Linux,
- Arch: arch.AMD64,
- Table: m,
- }
-
- RegisterSyscallTable(s)
- return s
-}
-
-func TestTable(t *testing.T) {
- table := createSyscallTable()
- defer func() {
- // Cleanup registered tables to keep tests separate.
- allSyscallTables = []*SyscallTable{}
- }()
-
- // Go through all functions and check that they return the right value.
- for i := uintptr(0); i < maxTestSyscall; i++ {
- fn := table.Lookup(i)
- if fn == nil {
- t.Errorf("Syscall %v is set to nil", i)
- continue
- }
-
- v, _, _ := fn(nil, arch.SyscallArguments{})
- if v != i {
- t.Errorf("Wrong return value for syscall %v: expected %v, got %v", i, i, v)
- }
- }
-
- // Check that values outside the range return nil.
- for i := uintptr(maxTestSyscall + 1); i < maxTestSyscall+100; i++ {
- fn := table.Lookup(i)
- if fn != nil {
- t.Errorf("Syscall %v is not nil: %v", i, fn)
- continue
- }
- }
-}
-
-func BenchmarkTableLookup(b *testing.B) {
- table := createSyscallTable()
-
- b.ResetTimer()
-
- j := uintptr(0)
- for i := 0; i < b.N; i++ {
- table.Lookup(j)
- j = (j + 1) % 310
- }
-
- b.StopTimer()
- // Cleanup registered tables to keep tests separate.
- allSyscallTables = []*SyscallTable{}
-}
-
-func BenchmarkTableMapLookup(b *testing.B) {
- table := createSyscallTable()
-
- b.ResetTimer()
-
- j := uintptr(0)
- for i := 0; i < b.N; i++ {
- table.mapLookup(j)
- j = (j + 1) % 310
- }
-
- b.StopTimer()
- // Cleanup registered tables to keep tests separate.
- allSyscallTables = []*SyscallTable{}
-}
diff --git a/pkg/sentry/kernel/task_list.go b/pkg/sentry/kernel/task_list.go
new file mode 100644
index 000000000..66df9ac45
--- /dev/null
+++ b/pkg/sentry/kernel/task_list.go
@@ -0,0 +1,221 @@
+package kernel
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type taskElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (taskElementMapper) linkerFor(elem *Task) *Task { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type taskList struct {
+ head *Task
+ tail *Task
+}
+
+// Reset resets list l to the empty state.
+func (l *taskList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *taskList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *taskList) Front() *Task {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *taskList) Back() *Task {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *taskList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (taskElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *taskList) PushFront(e *Task) {
+ linker := taskElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ taskElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *taskList) PushBack(e *Task) {
+ linker := taskElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ taskElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *taskList) PushBackList(m *taskList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ taskElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ taskElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *taskList) InsertAfter(b, e *Task) {
+ bLinker := taskElementMapper{}.linkerFor(b)
+ eLinker := taskElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ taskElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *taskList) InsertBefore(a, e *Task) {
+ aLinker := taskElementMapper{}.linkerFor(a)
+ eLinker := taskElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ taskElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *taskList) Remove(e *Task) {
+ linker := taskElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ taskElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ taskElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type taskEntry struct {
+ next *Task
+ prev *Task
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *taskEntry) Next() *Task {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *taskEntry) Prev() *Task {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *taskEntry) SetNext(elem *Task) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *taskEntry) SetPrev(elem *Task) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/kernel/task_test.go b/pkg/sentry/kernel/task_test.go
deleted file mode 100644
index cfcde9a7a..000000000
--- a/pkg/sentry/kernel/task_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernel
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/kernel/sched"
-)
-
-func TestTaskCPU(t *testing.T) {
- for _, test := range []struct {
- mask sched.CPUSet
- tid ThreadID
- cpu int32
- }{
- {
- mask: []byte{0xff},
- tid: 1,
- cpu: 0,
- },
- {
- mask: []byte{0xff},
- tid: 10,
- cpu: 1,
- },
- {
- // more than 8 cpus.
- mask: []byte{0xff, 0xff},
- tid: 10,
- cpu: 9,
- },
- {
- // missing the first cpu.
- mask: []byte{0xfe},
- tid: 1,
- cpu: 1,
- },
- {
- mask: []byte{0xfe},
- tid: 10,
- cpu: 3,
- },
- {
- // missing the fifth cpu.
- mask: []byte{0xef},
- tid: 10,
- cpu: 2,
- },
- } {
- assigned := assignCPU(test.mask, test.tid)
- if test.cpu != assigned {
- t.Errorf("assignCPU(%v, %v) got %v, want %v", test.mask, test.tid, assigned, test.cpu)
- }
- }
-
-}
diff --git a/pkg/sentry/kernel/time/BUILD b/pkg/sentry/kernel/time/BUILD
deleted file mode 100644
index e293d9a0f..000000000
--- a/pkg/sentry/kernel/time/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "time",
- srcs = [
- "context.go",
- "tcpip.go",
- "time.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/sync",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/kernel/time/time_state_autogen.go b/pkg/sentry/kernel/time/time_state_autogen.go
new file mode 100644
index 000000000..855751953
--- /dev/null
+++ b/pkg/sentry/kernel/time/time_state_autogen.go
@@ -0,0 +1,103 @@
+// automatically generated by stateify.
+
+package time
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *Time) StateTypeName() string {
+ return "pkg/sentry/kernel/time.Time"
+}
+
+func (t *Time) StateFields() []string {
+ return []string{
+ "ns",
+ }
+}
+
+func (t *Time) beforeSave() {}
+
+// +checklocksignore
+func (t *Time) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.ns)
+}
+
+func (t *Time) afterLoad() {}
+
+// +checklocksignore
+func (t *Time) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.ns)
+}
+
+func (s *Setting) StateTypeName() string {
+ return "pkg/sentry/kernel/time.Setting"
+}
+
+func (s *Setting) StateFields() []string {
+ return []string{
+ "Enabled",
+ "Next",
+ "Period",
+ }
+}
+
+func (s *Setting) beforeSave() {}
+
+// +checklocksignore
+func (s *Setting) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Enabled)
+ stateSinkObject.Save(1, &s.Next)
+ stateSinkObject.Save(2, &s.Period)
+}
+
+func (s *Setting) afterLoad() {}
+
+// +checklocksignore
+func (s *Setting) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Enabled)
+ stateSourceObject.Load(1, &s.Next)
+ stateSourceObject.Load(2, &s.Period)
+}
+
+func (t *Timer) StateTypeName() string {
+ return "pkg/sentry/kernel/time.Timer"
+}
+
+func (t *Timer) StateFields() []string {
+ return []string{
+ "clock",
+ "listener",
+ "setting",
+ "paused",
+ }
+}
+
+func (t *Timer) beforeSave() {}
+
+// +checklocksignore
+func (t *Timer) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.clock)
+ stateSinkObject.Save(1, &t.listener)
+ stateSinkObject.Save(2, &t.setting)
+ stateSinkObject.Save(3, &t.paused)
+}
+
+func (t *Timer) afterLoad() {}
+
+// +checklocksignore
+func (t *Timer) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.clock)
+ stateSourceObject.Load(1, &t.listener)
+ stateSourceObject.Load(2, &t.setting)
+ stateSourceObject.Load(3, &t.paused)
+}
+
+func init() {
+ state.Register((*Time)(nil))
+ state.Register((*Setting)(nil))
+ state.Register((*Timer)(nil))
+}
diff --git a/pkg/sentry/kernel/timekeeper_test.go b/pkg/sentry/kernel/timekeeper_test.go
deleted file mode 100644
index b6039505a..000000000
--- a/pkg/sentry/kernel/timekeeper_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernel
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- sentrytime "gvisor.dev/gvisor/pkg/sentry/time"
- "gvisor.dev/gvisor/pkg/sentry/usage"
-)
-
-// mockClocks is a sentrytime.Clocks that simply returns the times in the
-// struct.
-type mockClocks struct {
- monotonic int64
- realtime int64
-}
-
-// Update implements sentrytime.Clocks.Update. It does nothing.
-func (*mockClocks) Update() (monotonicParams sentrytime.Parameters, monotonicOk bool, realtimeParam sentrytime.Parameters, realtimeOk bool) {
- return
-}
-
-// Update implements sentrytime.Clocks.GetTime.
-func (c *mockClocks) GetTime(id sentrytime.ClockID) (int64, error) {
- switch id {
- case sentrytime.Monotonic:
- return c.monotonic, nil
- case sentrytime.Realtime:
- return c.realtime, nil
- default:
- return 0, linuxerr.EINVAL
- }
-}
-
-// stateTestClocklessTimekeeper returns a test Timekeeper which has not had
-// SetClocks called.
-func stateTestClocklessTimekeeper(tb testing.TB) *Timekeeper {
- ctx := contexttest.Context(tb)
- mfp := pgalloc.MemoryFileProviderFromContext(ctx)
- fr, err := mfp.MemoryFile().Allocate(hostarch.PageSize, usage.Anonymous)
- if err != nil {
- tb.Fatalf("failed to allocate memory: %v", err)
- }
- return &Timekeeper{
- params: NewVDSOParamPage(mfp, fr),
- }
-}
-
-func stateTestTimekeeper(tb testing.TB) *Timekeeper {
- t := stateTestClocklessTimekeeper(tb)
- t.SetClocks(sentrytime.NewCalibratedClocks())
- return t
-}
-
-// TestTimekeeperMonotonicZero tests that monotonic time starts at zero.
-func TestTimekeeperMonotonicZero(t *testing.T) {
- c := &mockClocks{
- monotonic: 100000,
- }
-
- tk := stateTestClocklessTimekeeper(t)
- tk.SetClocks(c)
- defer tk.Destroy()
-
- now, err := tk.GetTime(sentrytime.Monotonic)
- if err != nil {
- t.Errorf("GetTime err got %v want nil", err)
- }
- if now != 0 {
- t.Errorf("GetTime got %d want 0", now)
- }
-
- c.monotonic += 10
-
- now, err = tk.GetTime(sentrytime.Monotonic)
- if err != nil {
- t.Errorf("GetTime err got %v want nil", err)
- }
- if now != 10 {
- t.Errorf("GetTime got %d want 10", now)
- }
-}
-
-// TestTimekeeperMonotonicJumpForward tests that monotonic time jumps forward
-// after restore.
-func TestTimekeeperMonotonicForward(t *testing.T) {
- c := &mockClocks{
- monotonic: 900000,
- realtime: 600000,
- }
-
- tk := stateTestClocklessTimekeeper(t)
- tk.restored = make(chan struct{})
- tk.saveMonotonic = 100000
- tk.saveRealtime = 400000
- tk.SetClocks(c)
- defer tk.Destroy()
-
- // The monotonic clock should jump ahead by 200000 to 300000.
- //
- // The new system monotonic time (900000) is irrelevant to what the app
- // sees.
- now, err := tk.GetTime(sentrytime.Monotonic)
- if err != nil {
- t.Errorf("GetTime err got %v want nil", err)
- }
- if now != 300000 {
- t.Errorf("GetTime got %d want 300000", now)
- }
-}
-
-// TestTimekeeperMonotonicJumpBackwards tests that monotonic time does not jump
-// backwards when realtime goes backwards.
-func TestTimekeeperMonotonicJumpBackwards(t *testing.T) {
- c := &mockClocks{
- monotonic: 900000,
- realtime: 400000,
- }
-
- tk := stateTestClocklessTimekeeper(t)
- tk.restored = make(chan struct{})
- tk.saveMonotonic = 100000
- tk.saveRealtime = 600000
- tk.SetClocks(c)
- defer tk.Destroy()
-
- // The monotonic clock should remain at 100000.
- //
- // The new system monotonic time (900000) is irrelevant to what the app
- // sees and we don't want to jump the monotonic clock backwards like
- // realtime did.
- now, err := tk.GetTime(sentrytime.Monotonic)
- if err != nil {
- t.Errorf("GetTime err got %v want nil", err)
- }
- if now != 100000 {
- t.Errorf("GetTime got %d want 100000", now)
- }
-}
diff --git a/pkg/sentry/kernel/uncaught_signal.proto b/pkg/sentry/kernel/uncaught_signal.proto
deleted file mode 100644
index 0bdb062cb..000000000
--- a/pkg/sentry/kernel/uncaught_signal.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-import "pkg/sentry/arch/registers.proto";
-
-message UncaughtSignal {
- // Thread ID.
- int32 tid = 1;
-
- // Process ID.
- int32 pid = 2;
-
- // Registers at the time of the fault or signal.
- Registers registers = 3;
-
- // Signal number.
- int32 signal_number = 4;
-
- // The memory location which caused the fault (set if applicable, 0
- // otherwise). This will be set for SIGILL, SIGFPE, SIGSEGV, and SIGBUS.
- uint64 fault_addr = 5;
-}
diff --git a/pkg/sentry/kernel/uncaught_signal_go_proto/uncaught_signal.pb.go b/pkg/sentry/kernel/uncaught_signal_go_proto/uncaught_signal.pb.go
new file mode 100644
index 000000000..54955f061
--- /dev/null
+++ b/pkg/sentry/kernel/uncaught_signal_go_proto/uncaught_signal.pb.go
@@ -0,0 +1,193 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/sentry/kernel/uncaught_signal.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ registers_go_proto "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type UncaughtSignal struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Tid int32 `protobuf:"varint,1,opt,name=tid,proto3" json:"tid,omitempty"`
+ Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+ Registers *registers_go_proto.Registers `protobuf:"bytes,3,opt,name=registers,proto3" json:"registers,omitempty"`
+ SignalNumber int32 `protobuf:"varint,4,opt,name=signal_number,json=signalNumber,proto3" json:"signal_number,omitempty"`
+ FaultAddr uint64 `protobuf:"varint,5,opt,name=fault_addr,json=faultAddr,proto3" json:"fault_addr,omitempty"`
+}
+
+func (x *UncaughtSignal) Reset() {
+ *x = UncaughtSignal{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_kernel_uncaught_signal_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UncaughtSignal) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UncaughtSignal) ProtoMessage() {}
+
+func (x *UncaughtSignal) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_kernel_uncaught_signal_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UncaughtSignal.ProtoReflect.Descriptor instead.
+func (*UncaughtSignal) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_kernel_uncaught_signal_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UncaughtSignal) GetTid() int32 {
+ if x != nil {
+ return x.Tid
+ }
+ return 0
+}
+
+func (x *UncaughtSignal) GetPid() int32 {
+ if x != nil {
+ return x.Pid
+ }
+ return 0
+}
+
+func (x *UncaughtSignal) GetRegisters() *registers_go_proto.Registers {
+ if x != nil {
+ return x.Registers
+ }
+ return nil
+}
+
+func (x *UncaughtSignal) GetSignalNumber() int32 {
+ if x != nil {
+ return x.SignalNumber
+ }
+ return 0
+}
+
+func (x *UncaughtSignal) GetFaultAddr() uint64 {
+ if x != nil {
+ return x.FaultAddr
+ }
+ return 0
+}
+
+var File_pkg_sentry_kernel_uncaught_signal_proto protoreflect.FileDescriptor
+
+var file_pkg_sentry_kernel_uncaught_signal_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x72,
+ 0x6e, 0x65, 0x6c, 0x2f, 0x75, 0x6e, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x5f, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f,
+ 0x72, 0x1a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x61, 0x72,
+ 0x63, 0x68, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x0e, 0x55, 0x6e, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x03, 0x74, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67,
+ 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52,
+ 0x09, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x64, 0x64, 0x72, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_sentry_kernel_uncaught_signal_proto_rawDescOnce sync.Once
+ file_pkg_sentry_kernel_uncaught_signal_proto_rawDescData = file_pkg_sentry_kernel_uncaught_signal_proto_rawDesc
+)
+
+func file_pkg_sentry_kernel_uncaught_signal_proto_rawDescGZIP() []byte {
+ file_pkg_sentry_kernel_uncaught_signal_proto_rawDescOnce.Do(func() {
+ file_pkg_sentry_kernel_uncaught_signal_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_sentry_kernel_uncaught_signal_proto_rawDescData)
+ })
+ return file_pkg_sentry_kernel_uncaught_signal_proto_rawDescData
+}
+
+var file_pkg_sentry_kernel_uncaught_signal_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pkg_sentry_kernel_uncaught_signal_proto_goTypes = []interface{}{
+ (*UncaughtSignal)(nil), // 0: gvisor.UncaughtSignal
+ (*registers_go_proto.Registers)(nil), // 1: gvisor.Registers
+}
+var file_pkg_sentry_kernel_uncaught_signal_proto_depIdxs = []int32{
+ 1, // 0: gvisor.UncaughtSignal.registers:type_name -> gvisor.Registers
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pkg_sentry_kernel_uncaught_signal_proto_init() }
+func file_pkg_sentry_kernel_uncaught_signal_proto_init() {
+ if File_pkg_sentry_kernel_uncaught_signal_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_sentry_kernel_uncaught_signal_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UncaughtSignal); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_sentry_kernel_uncaught_signal_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_sentry_kernel_uncaught_signal_proto_goTypes,
+ DependencyIndexes: file_pkg_sentry_kernel_uncaught_signal_proto_depIdxs,
+ MessageInfos: file_pkg_sentry_kernel_uncaught_signal_proto_msgTypes,
+ }.Build()
+ File_pkg_sentry_kernel_uncaught_signal_proto = out.File
+ file_pkg_sentry_kernel_uncaught_signal_proto_rawDesc = nil
+ file_pkg_sentry_kernel_uncaught_signal_proto_goTypes = nil
+ file_pkg_sentry_kernel_uncaught_signal_proto_depIdxs = nil
+}
diff --git a/pkg/sentry/limits/BUILD b/pkg/sentry/limits/BUILD
deleted file mode 100644
index 21b0d1595..000000000
--- a/pkg/sentry/limits/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "limits",
- srcs = [
- "context.go",
- "limits.go",
- "linux.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "limits_test",
- size = "small",
- srcs = [
- "limits_test.go",
- ],
- library = ":limits",
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/sentry/limits/limits_state_autogen.go b/pkg/sentry/limits/limits_state_autogen.go
new file mode 100644
index 000000000..038124f76
--- /dev/null
+++ b/pkg/sentry/limits/limits_state_autogen.go
@@ -0,0 +1,65 @@
+// automatically generated by stateify.
+
+package limits
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *Limit) StateTypeName() string {
+ return "pkg/sentry/limits.Limit"
+}
+
+func (l *Limit) StateFields() []string {
+ return []string{
+ "Cur",
+ "Max",
+ }
+}
+
+func (l *Limit) beforeSave() {}
+
+// +checklocksignore
+func (l *Limit) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Cur)
+ stateSinkObject.Save(1, &l.Max)
+}
+
+func (l *Limit) afterLoad() {}
+
+// +checklocksignore
+func (l *Limit) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Cur)
+ stateSourceObject.Load(1, &l.Max)
+}
+
+func (l *LimitSet) StateTypeName() string {
+ return "pkg/sentry/limits.LimitSet"
+}
+
+func (l *LimitSet) StateFields() []string {
+ return []string{
+ "data",
+ }
+}
+
+func (l *LimitSet) beforeSave() {}
+
+// +checklocksignore
+func (l *LimitSet) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.data)
+}
+
+func (l *LimitSet) afterLoad() {}
+
+// +checklocksignore
+func (l *LimitSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.data)
+}
+
+func init() {
+ state.Register((*Limit)(nil))
+ state.Register((*LimitSet)(nil))
+}
diff --git a/pkg/sentry/limits/limits_test.go b/pkg/sentry/limits/limits_test.go
deleted file mode 100644
index 0ee877be4..000000000
--- a/pkg/sentry/limits/limits_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package limits
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
-)
-
-func TestSet(t *testing.T) {
- testCases := []struct {
- limit Limit
- privileged bool
- expectedErr error
- }{
- {limit: Limit{Cur: 50, Max: 50}, privileged: false, expectedErr: nil},
- {limit: Limit{Cur: 20, Max: 50}, privileged: false, expectedErr: nil},
- {limit: Limit{Cur: 20, Max: 60}, privileged: false, expectedErr: unix.EPERM},
- {limit: Limit{Cur: 60, Max: 50}, privileged: false, expectedErr: unix.EINVAL},
- {limit: Limit{Cur: 11, Max: 10}, privileged: false, expectedErr: unix.EINVAL},
- {limit: Limit{Cur: 20, Max: 60}, privileged: true, expectedErr: nil},
- }
-
- ls := NewLimitSet()
- for _, tc := range testCases {
- if _, err := ls.Set(1, tc.limit, tc.privileged); err != tc.expectedErr {
- t.Fatalf("Tried to set Limit to %+v and privilege %t: got %v, wanted %v", tc.limit, tc.privileged, err, tc.expectedErr)
- }
- }
-
-}
diff --git a/pkg/sentry/loader/BUILD b/pkg/sentry/loader/BUILD
deleted file mode 100644
index 54bfed644..000000000
--- a/pkg/sentry/loader/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "loader",
- srcs = [
- "elf.go",
- "interpreter.go",
- "loader.go",
- "vdso.go",
- "vdso_state.go",
- ],
- marshal = True,
- marshal_debug = True,
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi",
- "//pkg/abi/linux",
- "//pkg/abi/linux/errno",
- "//pkg/context",
- "//pkg/cpuid",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/rand",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader/vdsodata",
- "//pkg/sentry/memmap",
- "//pkg/sentry/mm",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/loader/loader_abi_autogen_unsafe.go b/pkg/sentry/loader/loader_abi_autogen_unsafe.go
new file mode 100644
index 000000000..bfa048912
--- /dev/null
+++ b/pkg/sentry/loader/loader_abi_autogen_unsafe.go
@@ -0,0 +1,13 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package loader
+
+import (
+)
+
diff --git a/pkg/sentry/loader/loader_state_autogen.go b/pkg/sentry/loader/loader_state_autogen.go
new file mode 100644
index 000000000..7d001c54b
--- /dev/null
+++ b/pkg/sentry/loader/loader_state_autogen.go
@@ -0,0 +1,96 @@
+// automatically generated by stateify.
+
+package loader
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (v *VDSO) StateTypeName() string {
+ return "pkg/sentry/loader.VDSO"
+}
+
+func (v *VDSO) StateFields() []string {
+ return []string{
+ "ParamPage",
+ "vdso",
+ "os",
+ "arch",
+ "phdrs",
+ }
+}
+
+func (v *VDSO) beforeSave() {}
+
+// +checklocksignore
+func (v *VDSO) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ var phdrsValue []elfProgHeader = v.savePhdrs()
+ stateSinkObject.SaveValue(4, phdrsValue)
+ stateSinkObject.Save(0, &v.ParamPage)
+ stateSinkObject.Save(1, &v.vdso)
+ stateSinkObject.Save(2, &v.os)
+ stateSinkObject.Save(3, &v.arch)
+}
+
+func (v *VDSO) afterLoad() {}
+
+// +checklocksignore
+func (v *VDSO) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.ParamPage)
+ stateSourceObject.Load(1, &v.vdso)
+ stateSourceObject.Load(2, &v.os)
+ stateSourceObject.Load(3, &v.arch)
+ stateSourceObject.LoadValue(4, new([]elfProgHeader), func(y interface{}) { v.loadPhdrs(y.([]elfProgHeader)) })
+}
+
+func (e *elfProgHeader) StateTypeName() string {
+ return "pkg/sentry/loader.elfProgHeader"
+}
+
+func (e *elfProgHeader) StateFields() []string {
+ return []string{
+ "Type",
+ "Flags",
+ "Off",
+ "Vaddr",
+ "Paddr",
+ "Filesz",
+ "Memsz",
+ "Align",
+ }
+}
+
+func (e *elfProgHeader) beforeSave() {}
+
+// +checklocksignore
+func (e *elfProgHeader) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Type)
+ stateSinkObject.Save(1, &e.Flags)
+ stateSinkObject.Save(2, &e.Off)
+ stateSinkObject.Save(3, &e.Vaddr)
+ stateSinkObject.Save(4, &e.Paddr)
+ stateSinkObject.Save(5, &e.Filesz)
+ stateSinkObject.Save(6, &e.Memsz)
+ stateSinkObject.Save(7, &e.Align)
+}
+
+func (e *elfProgHeader) afterLoad() {}
+
+// +checklocksignore
+func (e *elfProgHeader) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Type)
+ stateSourceObject.Load(1, &e.Flags)
+ stateSourceObject.Load(2, &e.Off)
+ stateSourceObject.Load(3, &e.Vaddr)
+ stateSourceObject.Load(4, &e.Paddr)
+ stateSourceObject.Load(5, &e.Filesz)
+ stateSourceObject.Load(6, &e.Memsz)
+ stateSourceObject.Load(7, &e.Align)
+}
+
+func init() {
+ state.Register((*VDSO)(nil))
+ state.Register((*elfProgHeader)(nil))
+}
diff --git a/pkg/sentry/loader/vdsodata/BUILD b/pkg/sentry/loader/vdsodata/BUILD
deleted file mode 100644
index 119199f97..000000000
--- a/pkg/sentry/loader/vdsodata/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "go_add_tags", "go_embed_data", "go_library")
-
-package(licenses = ["notice"])
-
-go_embed_data(
- name = "vdso_bin",
- src = "//vdso:vdso.so",
- package = "vdsodata",
- var = "Binary",
-)
-
-[
- # Generate multiple tagged files. Note that the contents of all files
- # will be the same (i.e. vdso_arm64.go will contain the amd64 vdso), but
- # the build tags will ensure only one is selected. When we generate the
- # "Go" branch, we select all archiecture files from the relevant build.
- # This is a hack around some limitations for "out" being a configurable
- # attribute and selects for srcs. See also tools/go_branch.sh.
- go_add_tags(
- name = "vdso_%s" % arch,
- src = ":vdso_bin",
- out = "vdso_%s.go" % arch,
- go_tags = [arch],
- )
- for arch in ("amd64", "arm64")
-]
-
-go_library(
- name = "vdsodata",
- srcs = [
- "vdsodata.go",
- ":vdso_amd64",
- ":vdso_arm64",
- ],
- marshal = False,
- stateify = False,
- visibility = ["//pkg/sentry:internal"],
-)
diff --git a/pkg/sentry/loader/vdsodata/vdso_amd64.go b/pkg/sentry/loader/vdsodata/vdso_amd64.go
new file mode 100644
index 000000000..ff53489f8
--- /dev/null
+++ b/pkg/sentry/loader/vdsodata/vdso_amd64.go
@@ -0,0 +1,7 @@
+// +build amd64
+
+// Generated by go_embed_data for //pkg/sentry/loader/vdsodata:vdso_bin. DO NOT EDIT.
+
+package vdsodata
+
+var Binary = []byte("ELF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00X\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x008\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\xff\xff\x00\x00p\xff\xff\xff\xff\xff\xae\x00\x00\x00\x00\x00\x00\xae\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\xe0p\xff\xff\xff\xff\xff\xe0p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00Tp\xff\xff\xff\xff\xffTp\xff\xff\xff\xff\xff`\x00\x00\x00\x00\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00P\xe5td\x00\x00\x00\xb4\x00\x00\x00\x00\x00\x00\xb4p\xff\xff\xff\xff\xff\xb4p\xff\xff\xff\xff\xffD\x00\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\n\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\"\x00 \x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\"\x00\x00\x00\x00 \x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x000\x00\x00\x00\"\x00 \x00\xf0p\xff\xff\xff\xff\xff&\x00\x00\x00\x00\x00\x00\x005\x00\x00\x00\x00 \x00\xf0p\xff\xff\xff\xff\xff&\x00\x00\x00\x00\x00\x00\x00A\x00\x00\x00\"\x00 \x00\x80p\xff\xff\xff\xff\xffb\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00 \x00\x80p\xff\xff\xff\xff\xffb\x00\x00\x00\x00\x00\x00\x00b\x00\x00\x00\"\x00 \x00@p\xff\xff\xff\xff\xff8\x00\x00\x00\x00\x00\x00\x00p\x00\x00\x00\x00 \x00@p\xff\xff\xff\xff\xff8\x00\x00\x00\x00\x00\x00\x00\x85\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00linux-vdso.so.1\x00LINUX_2.6\x00getcpu\x00__vdso_getcpu\x00time\x00__vdso_time\x00gettimeofday\x00__vdso_gettimeofday\x00clock_gettime\x00__vdso_clock_gettime\x00__kernel_rt_sigreturn\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa1\xbf\xee \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6u\xae\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GNU\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00GNU\x00gold 1.16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GNU\x00g\xf3E\xee\xe6C\n\x9a<\x8b\xa5L\xddU\x9fN;@\x00\x00\x00\x00\x00\x00| \x00\x00\\\x00\x00\x00\x8c \x00\x00t\x00\x00\x00\xcc \x00\x00\x8c\x00\x00\x00< \x00\x00\xb4\x00\x00\x00l \x00\x00\xd4\x00\x00\x00| \x00\x00\xec\x00\x00\x00<\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00zR\x00x \x90\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x004\x00\x00\x00 \x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x00\x00L\x00\x00\x008 \x00\x00b\x00\x00\x00\x00E\x86D\x83D0RAA\x00\x00\x00t\x00\x00\x00\x80 \x00\x00&\x00\x00\x00\x00E\x83G XA\x00\x00\x00\x00\x94\x00\x00\x00\x90 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x00\x00\x00\x88 \x00\x00\xbb\x00\x00\x00\x00E\x83~\nm J\x00\x00\x00\xcc\x00\x00\x00( \x00\x00\xbe\x00\x00\x00\x00E\x83~\nm M\x00\x00\x00\x00\x00\x00\x00`p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00hp\xff\xff\xff\xff\xff\n\x00\x00\x00\x00\x00\x00\x00\x9b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xffo\x00\x00\x00\x00p\xff\xff\xff\xff\xff\xfc\xff\xffo\x00\x00\x00\x00p\xff\xff\xff\xff\xff\xfd\xff\xffo\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf3\xfa\xb8\x00\x00\x00\xc3@\x00\xf3\xfa\x83\xfft\x83\xfft\x85\xfft\xb8\xe4\x00\x00\x00\xc3fD\x00\x00H\x89\xf7\xe9\x88\x00\x00\x84\x00\x00\x00\x00\x00H\x89\xf7\xe9\xb8\x00\x00\x00\x84\x00\x00\x00\x00\x00\xf3\xfaUH\x89\xf5SH\x83\xecH\x85\xfft:H\x89\xfbH\x89\xe7\xe8\x93\x00\x00\x00\x85\xc0u:H\x8b$H\x8bL$H\xba\xcf\xf7S㥛\xc4 H\x89H\x89\xc8H\xc1\xf9?H\xf7\xeaH\xc1\xfaH)\xcaH\x89S1\xc0H\x85\xedtH\xc7E\x00\x00\x00\x00\x00H\x83\xc4[]\xc3ff.\x84\x00\x00\x00\x00\x00\x00\xf3\xfaSH\x89\xfbH\x83\xecH\x89\xe7\xe8,\x00\x00\x00H\x8b$H\x85\xdbtH\x89H\x83\xc4[\xc3f.\x84\x00\x00\x00\x00\x00\xf3\xfa\xb85\x00\x00H\x98Ð\x90\xf3\xfaSH\x89\xfeH\x8d \xc1\xde\xff\xffH\x8b9\x83\xe7\xfeL\x8bQ(L\x8bA8H\x8bY0L\x8bY@Lc\xcf\xae\xe81H\x8b9L9\xcfu\xddM\x85\xd2tv\x89\xc0H\xc1\xe2 H \xc21\xc0H9\xd3+H\xb8\x00\x00\x00\x00\x00ʚ;H\x89\xd11\xd2I\xf7\xf3H)\xd9H\x89\xcfH\xc1\xff?H\xaf\xf8H\xf7\xe1H\xfaH\xac\xd0 H\xb9SZ\x9b\xa0/\xb8D\x00I\xc0[L\x89\xc2H\xc1\xea H\x89\xd0H\xf7\xe11\xc0H\xc1\xea H\x89Hi\xd2\x00ʚ;I)\xd0L\x89F\xc3\x84\x00\x00\x00\x00\x00\xb8\xe4\x00\x00\x001\xff[\xc3D\x00\x00\xf3\xfaSH\x89\xfeH\x8d \xde\xff\xffH\x8b9\x83\xe7\xfeL\x8bQL\x8bAH\x8bYL\x8bY Lc\xcf\xae\xe81H\x8b9L9\xcfu\xddM\x85\xd2tv\x89\xc0H\xc1\xe2 H \xc21\xc0H9\xd3+H\xb8\x00\x00\x00\x00\x00ʚ;H\x89\xd11\xd2I\xf7\xf3H)\xd9H\x89\xcfH\xc1\xff?H\xaf\xf8H\xf7\xe1H\xfaH\xac\xd0 H\xb9SZ\x9b\xa0/\xb8D\x00I\xc0[L\x89\xc2H\xc1\xea H\x89\xd0H\xf7\xe11\xc0H\xc1\xea H\x89Hi\xd2\x00ʚ;I)\xd0L\x89F\xc3\x84\x00\x00\x00\x00\x00\xb8\xe4\x00\x00\x00\xbf\x00\x00\x00[\xc3\x00GCC: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff\xbb\x00\x00\x00\x00\x00\x00\x009\x00\x00\x00\x00 \x00\xf0p\xff\xff\xff\xff\xff\xbe\x00\x00\x00\x00\x00\x00\x00]\x00\x00\x00 \x00\xe0p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00f\x00\x00\x00\x00\x00\xf1\xff\x00\x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00s\x00\x00\x00\x00\x00\xf1\xff\x00\xf0o\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00{\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x85\x00\x00\x00\"\x00 \x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x8c\x00\x00\x00\x00 \x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x9a\x00\x00\x00\"\x00 \x00\xf0p\xff\xff\xff\xff\xff&\x00\x00\x00\x00\x00\x00\x00\x9f\x00\x00\x00\x00 \x00\xf0p\xff\xff\xff\xff\xff&\x00\x00\x00\x00\x00\x00\x00\xab\x00\x00\x00\"\x00 \x00\x80p\xff\xff\xff\xff\xffb\x00\x00\x00\x00\x00\x00\x00\xb8\x00\x00\x00\x00 \x00\x80p\xff\xff\xff\xff\xffb\x00\x00\x00\x00\x00\x00\x00\xcc\x00\x00\x00\"\x00 \x00@p\xff\xff\xff\xff\xff8\x00\x00\x00\x00\x00\x00\x00\xda\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00 \x00@p\xff\xff\xff\xff\xff8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00vdso.cc\x00vdso_time.cc\x00_ZN4vdso13ClockRealtimeEP8timespec\x00_ZN4vdso14ClockMonotonicEP8timespec\x00_DYNAMIC\x00VDSO_PRELINK\x00_params\x00LINUX_2.6\x00getcpu\x00__vdso_getcpu\x00time\x00__vdso_time\x00gettimeofday\x00__vdso_gettimeofday\x00clock_gettime\x00_GLOBAL_OFFSET_TABLE_\x00__vdso_clock_gettime\x00__kernel_rt_sigreturn\x00\x00.text\x00.comment\x00.bss\x00.dynstr\x00.eh_frame_hdr\x00.gnu.version\x00.dynsym\x00.hash\x00.note\x00.eh_frame\x00.gnu.version_d\x00.dynamic\x00.shstrtab\x00.strtab\x00.symtab\x00.data\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x008\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`p\xff\xff\xff\xff\xff`\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00hp\xff\xff\xff\xff\xffh\x00\x00\x00\x00\x00\x00\x9b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00\xff\xff\xffo\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x00\x00\x00\xfd\xff\xffo\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Tp\xff\xff\xff\xff\xffT\x00\x00\x00\x00\x00\x00`\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x00\x00\x00\x00\x00\x00\x00\xb4p\xff\xff\xff\xff\xff\xb4\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00L\x00\x00\x00\x00\x00p\x00\x00\x00\x00\x00\x00\x00\xf8p\xff\xff\xff\xff\xff\xf8\x00\x00\x00\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0p\xff\xff\xff\xff\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0p\xff\xff\xff\xff\xff\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000p\xff\xff\xff\xff\xff0\x00\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaep\xff\xff\xff\xff\xff\xae\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xae\x00\x00\x00\x00\x00\x00+\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc3\x00\x00\x00\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
diff --git a/pkg/sentry/loader/vdsodata/vdso_arm64.go b/pkg/sentry/loader/vdsodata/vdso_arm64.go
new file mode 100644
index 000000000..67de78969
--- /dev/null
+++ b/pkg/sentry/loader/vdsodata/vdso_arm64.go
@@ -0,0 +1,7 @@
+// +build arm64
+
+// Generated by go_embed_data for //pkg/sentry/loader/vdsodata:vdso_bin. DO NOT EDIT.
+
+package vdsodata
+
+var Binary = []byte("ELF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x008\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\xff\xff\x00\x00p\xff\xff\xff\xff\xff\x84\x00\x00\x00\x00\x00\x00\x84\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd0\x00\x00\x00\x00\x00\x00\xd0p\xff\xff\xff\xff\xff\xd0p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x00\x9cp\xff\xff\xff\xff\xff\x9cp\xff\xff\xff\xff\xff@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00P\xe5td\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x00\xdcp\xff\xff\xff\xff\xff\xdcp\xff\xff\xff\xff\xff<\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\xd8p\xff\xff\xff\xff\xff<\x00\x00\x00\x00\x00\x00\x004\x00\x00\x00\x00 \x00xp\xff\xff\xff\xff\xff`\x00\x00\x00\x00\x00\x00\x00J\x00\x00\x00\x00 \x00@p\xff\xff\xff\xff\xff4\x00\x00\x00\x00\x00\x00\x00a\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00linux-vdso.so.1\x00LINUX_2.6.39\x00__kernel_clock_getres\x00__kernel_gettimeofday\x00__kernel_clock_gettime\x00__kernel_rt_sigreturn\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa1\xbf\xee \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x89\xcb_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00GNU\x00gold 1.16\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GNU\x004\x93\xb8\xcc$\xaaU\x92\xcd\\\xc0o\xbeb)*\xe1T\x90\x9f;8\x00\x00\x00\x00\x00\x00T \x00\x00T\x00\x00\x00d \x00\x00l\x00\x00\x00\x9c \x00\x00\x84\x00\x00\x00\xfc \x00\x00\xac\x00\x00\x00<\x00\x00\xc4\x00\x00\x00\xf4\x00\x00\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00zR\x00x \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8 \x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x004\x00\x00\x00\xf0 \x00\x004\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x00\x00L\x00\x00\x00 \x00\x00`\x00\x00\x00\x00A0\x9d\x9eB\x93\x94T\xde\xdd\xd3\xd4\x00\x00\x00\x00\x00\x00\x00\x00t\x00\x00\x00H \x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8c\x00\x00\x00p \x00\x00\xb4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x00\x00\x00\x00\x00\xb4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Pp\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0p\xff\xff\xff\xff\xff\n\x00\x00\x00\x00\x00\x00\x00w\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xffo\x00\x00\x00\x00Xp\xff\xff\xff\xff\xff\xfc\xff\xffo\x00\x00\x00\x00dp\xff\xff\xff\xff\xff\xfd\xff\xffo\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x80\xd2\x00\x00\xd4\xc0_\xd6 \xd5\xe3\xaa\x00q\xe0\x00\x00T\x00q\xa0\x00\x00T\xc0\x00\x004(\x80\xd2\x00\x00\xd4\xc0_\xd6\xe0\xaaZ\x00\x00\xe0\xaa*\x00\x00 \xd5\xfd{\xbd\xa9\xfd\x00\x91\xf3S\xa9\xf4\xaa\xc0\x00\xb4\xf3\x00\xaa\xe0\x83\x00\x91!\x00\x00\x94\xa0\x005\xe2B\xa9\xe0\xf9\x9e\xd2`j\xbc\xf2\xa0t\xd3\xf2\x80\xe4\xf2 |@\x9b\x00\xfcG\x93\x00\xfc\x81\xcbb\x00\xa9\x00\x00\x80RT\x00\x00\xb4\x9f\x00\xf9\xf3SA\xa9\xfd{è\xc0_\xd6\x00q\xac\x00\x00T\xc0\x00\xf86H\x80\xd2\x00\x00\xd4\xc0_\xd6\x00q\x81\xff\xffT\xa1\x00\x00\xb4\"\x00\x80\xd2\x00\x00\x80R?\x00\xa9\xc0_\xd6\x00\x00\x80R\xc0_\xd6\xd5 B\xf7\xfeC\x00@\xf9\xe1\x00\xaa\xbf9\xd5cxF\x9cB\xa9e|@\x93D\xa0C\xa9@\xe0;տ9\xd5C\x00@\xf9\x00\xeb\xe1\xfe\xffT\x86\x00\xb4\xff\x00\x00\xeb\x00\x80\xd2L\x00T@\xd9\xd2\x00\x00\xcbCs\xe7\xf2\xfc\x93cȚ|Û\xa2\x9b\x00|\x9bB\x80\xc0\x93\x84\x00\x8bbJ\x8b\xd2b\xb4\xf2@\x99҃\xfcI\xd3\xe2\xd7\xf2\x82\xe0\xf2Es\xa7\xf2\x00\x00\x80Rc|›b\xfcK\xd3\"\x00\x00\xf9B\x90\x9b\"\x00\xf9\xc0_\xd6\x00\x00\x80R(\x80\xd2\x00\x00\xd4\xc0_\xd6 Ղ\xf1\xfeC\x00@\xf9\xe1\x00\xaa\xbf9\xd5cxF\x9c@\xa9e|@\x93D\xa0A\xa9@\xe0;տ9\xd5C\x00@\xf9\x00\xeb\xe1\xfe\xffT\x86\x00\xb4\xff\x00\x00\xeb\x00\x80\xd2L\x00T@\xd9\xd2\x00\x00\xcbCs\xe7\xf2\xfc\x93cȚ|Û\xa2\x9b\x00|\x9bB\x80\xc0\x93\x84\x00\x8bbJ\x8b\xd2b\xb4\xf2@\x99҃\xfcI\xd3\xe2\xd7\xf2\x82\xe0\xf2Es\xa7\xf2\x00\x00\x80Rc|›b\xfcK\xd3\"\x00\x00\xf9B\x90\x9b\"\x00\xf9\xc0_\xd6 \x00\x80R(\x80\xd2\x00\x00\xd4\xc0_\xd6\x00GCC: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00 \x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00p\xff\xff\xff\xff\xff\xb4\x00\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00 \x00\xd0p\xff\xff\xff\xff\xff\xb4\x00\x00\x00\x00\x00\x00\x00`\x00\x00\x00 \x00\xd0p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00i\x00\x00\x00\x00\x00\xf1\xff\x00\x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00v\x00\x00\x00\x00\x00\xf1\xff\x00\xf0o\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00~\x00\x00\x00\x00\xf1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8b\x00\x00\x00\x00 \x00\xd8p\xff\xff\xff\xff\xff<\x00\x00\x00\x00\x00\x00\x00\xa1\x00\x00\x00\x00 \x00xp\xff\xff\xff\xff\xff`\x00\x00\x00\x00\x00\x00\x00\xb7\x00\x00\x00\x00 \x00@p\xff\xff\xff\xff\xff4\x00\x00\x00\x00\x00\x00\x00\xce\x00\x00\x00\x00 \x000p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00\x00\x00vdso.cc\x00$x\x00vdso_time.cc\x00_ZN4vdso13ClockRealtimeEP8timespec\x00_ZN4vdso14ClockMonotonicEP8timespec\x00_DYNAMIC\x00VDSO_PRELINK\x00_params\x00LINUX_2.6.39\x00__kernel_clock_getres\x00__kernel_gettimeofday\x00__kernel_clock_gettime\x00__kernel_rt_sigreturn\x00\x00.text\x00.comment\x00.bss\x00.dynstr\x00.eh_frame_hdr\x00.gnu.version\x00.dynsym\x00.hash\x00.note\x00.eh_frame\x00.gnu.version_d\x00.dynamic\x00.shstrtab\x00.strtab\x00.symtab\x00.data\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 p\xff\xff\xff\xff\xff \x00\x00\x00\x00\x00\x00,\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x008\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Pp\xff\xff\xff\xff\xffP\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0p\xff\xff\xff\xff\xff\xe0\x00\x00\x00\x00\x00\x00w\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00\xff\xff\xffo\x00\x00\x00\x00\x00\x00\x00Xp\xff\xff\xff\xff\xffX\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00V\x00\x00\x00\xfd\xff\xffo\x00\x00\x00\x00\x00\x00\x00dp\xff\xff\xff\xff\xffd\x00\x00\x00\x00\x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9cp\xff\xff\xff\xff\xff\x9c\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdcp\xff\xff\xff\xff\xff\xdc\x00\x00\x00\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00L\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd0p\xff\xff\xff\xff\xff\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0p\xff\xff\xff\xff\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000p\xff\xff\xff\xff\xff0\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84p\xff\xff\xff\xff\xff\x84\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84\x00\x00\x00\x00\x00\x00+\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x00h\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
diff --git a/pkg/sentry/memmap/BUILD b/pkg/sentry/memmap/BUILD
deleted file mode 100644
index c30e88725..000000000
--- a/pkg/sentry/memmap/BUILD
+++ /dev/null
@@ -1,68 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "mappable_range",
- out = "mappable_range.go",
- package = "memmap",
- prefix = "Mappable",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uint64",
- },
-)
-
-go_template_instance(
- name = "mapping_set_impl",
- out = "mapping_set_impl.go",
- package = "memmap",
- prefix = "Mapping",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "MappableRange",
- "Value": "MappingsOfRange",
- "Functions": "mappingSetFunctions",
- },
-)
-
-go_template_instance(
- name = "file_range",
- out = "file_range.go",
- package = "memmap",
- prefix = "File",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uint64",
- },
-)
-
-go_library(
- name = "memmap",
- srcs = [
- "file_range.go",
- "mappable_range.go",
- "mapping_set.go",
- "mapping_set_impl.go",
- "memmap.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/safemem",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "memmap_test",
- size = "small",
- srcs = ["mapping_set_test.go"],
- library = ":memmap",
- deps = ["//pkg/hostarch"],
-)
diff --git a/pkg/sentry/memmap/file_range.go b/pkg/sentry/memmap/file_range.go
new file mode 100644
index 000000000..6f0b4bde4
--- /dev/null
+++ b/pkg/sentry/memmap/file_range.go
@@ -0,0 +1,76 @@
+package memmap
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type FileRange struct {
+ // Start is the inclusive start of the range.
+ Start uint64
+
+ // End is the exclusive end of the range.
+ End uint64
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r FileRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r FileRange) Length() uint64 {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r FileRange) Contains(x uint64) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r FileRange) Overlaps(r2 FileRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r FileRange) IsSupersetOf(r2 FileRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r FileRange) Intersect(r2 FileRange) FileRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r FileRange) CanSplitAt(x uint64) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/sentry/memmap/mappable_range.go b/pkg/sentry/memmap/mappable_range.go
new file mode 100644
index 000000000..7b7312cb6
--- /dev/null
+++ b/pkg/sentry/memmap/mappable_range.go
@@ -0,0 +1,76 @@
+package memmap
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type MappableRange struct {
+ // Start is the inclusive start of the range.
+ Start uint64
+
+ // End is the exclusive end of the range.
+ End uint64
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r MappableRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r MappableRange) Length() uint64 {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r MappableRange) Contains(x uint64) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r MappableRange) Overlaps(r2 MappableRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r MappableRange) IsSupersetOf(r2 MappableRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r MappableRange) Intersect(r2 MappableRange) MappableRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r MappableRange) CanSplitAt(x uint64) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/sentry/memmap/mapping_set_impl.go b/pkg/sentry/memmap/mapping_set_impl.go
new file mode 100644
index 000000000..c32df9259
--- /dev/null
+++ b/pkg/sentry/memmap/mapping_set_impl.go
@@ -0,0 +1,1643 @@
+package memmap
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const MappingtrackGaps = 0
+
+var _ = uint8(MappingtrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type MappingdynamicGap [MappingtrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *MappingdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *MappingdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ MappingminDegree = 3
+
+ MappingmaxDegree = 2 * MappingminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type MappingSet struct {
+ root Mappingnode `state:".(*MappingSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *MappingSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *MappingSet) IsEmptyRange(r MappableRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *MappingSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *MappingSet) SpanRange(r MappableRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *MappingSet) FirstSegment() MappingIterator {
+ if s.root.nrSegments == 0 {
+ return MappingIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *MappingSet) LastSegment() MappingIterator {
+ if s.root.nrSegments == 0 {
+ return MappingIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *MappingSet) FirstGap() MappingGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return MappingGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *MappingSet) LastGap() MappingGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return MappingGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *MappingSet) Find(key uint64) (MappingIterator, MappingGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return MappingIterator{n, i}, MappingGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return MappingIterator{}, MappingGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *MappingSet) FindSegment(key uint64) MappingIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *MappingSet) LowerBoundSegment(min uint64) MappingIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *MappingSet) UpperBoundSegment(max uint64) MappingIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *MappingSet) FindGap(key uint64) MappingGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *MappingSet) LowerBoundGap(min uint64) MappingGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *MappingSet) UpperBoundGap(max uint64) MappingGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *MappingSet) Add(r MappableRange, val MappingsOfRange) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *MappingSet) AddWithoutMerging(r MappableRange, val MappingsOfRange) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *MappingSet) Insert(gap MappingGapIterator, r MappableRange, val MappingsOfRange) MappingIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (mappingSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := MappingtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (mappingSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (mappingSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := MappingtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *MappingSet) InsertWithoutMerging(gap MappingGapIterator, r MappableRange, val MappingsOfRange) MappingIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *MappingSet) InsertWithoutMergingUnchecked(gap MappingGapIterator, r MappableRange, val MappingsOfRange) MappingIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := MappingtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return MappingIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *MappingSet) Remove(seg MappingIterator) MappingGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if MappingtrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ mappingSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if MappingtrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(MappingGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *MappingSet) RemoveAll() {
+ s.root = Mappingnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *MappingSet) RemoveRange(r MappableRange) MappingGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *MappingSet) Merge(first, second MappingIterator) MappingIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *MappingSet) MergeUnchecked(first, second MappingIterator) MappingIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (mappingSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return MappingIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *MappingSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *MappingSet) MergeRange(r MappableRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *MappingSet) MergeAdjacent(r MappableRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *MappingSet) Split(seg MappingIterator, split uint64) (MappingIterator, MappingIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *MappingSet) SplitUnchecked(seg MappingIterator, split uint64) (MappingIterator, MappingIterator) {
+ val1, val2 := (mappingSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), MappableRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *MappingSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *MappingSet) Isolate(seg MappingIterator, r MappableRange) MappingIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *MappingSet) ApplyContiguous(r MappableRange, fn func(seg MappingIterator)) MappingGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return MappingGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return MappingGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type Mappingnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *Mappingnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap MappingdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [MappingmaxDegree - 1]MappableRange
+ values [MappingmaxDegree - 1]MappingsOfRange
+ children [MappingmaxDegree]*Mappingnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Mappingnode) firstSegment() MappingIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return MappingIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *Mappingnode) lastSegment() MappingIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return MappingIterator{n, n.nrSegments - 1}
+}
+
+func (n *Mappingnode) prevSibling() *Mappingnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *Mappingnode) nextSibling() *Mappingnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *Mappingnode) rebalanceBeforeInsert(gap MappingGapIterator) MappingGapIterator {
+ if n.nrSegments < MappingmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &Mappingnode{
+ nrSegments: MappingminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &Mappingnode{
+ nrSegments: MappingminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:MappingminDegree-1], n.keys[:MappingminDegree-1])
+ copy(left.values[:MappingminDegree-1], n.values[:MappingminDegree-1])
+ copy(right.keys[:MappingminDegree-1], n.keys[MappingminDegree:])
+ copy(right.values[:MappingminDegree-1], n.values[MappingminDegree:])
+ n.keys[0], n.values[0] = n.keys[MappingminDegree-1], n.values[MappingminDegree-1]
+ MappingzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:MappingminDegree], n.children[:MappingminDegree])
+ copy(right.children[:MappingminDegree], n.children[MappingminDegree:])
+ MappingzeroNodeSlice(n.children[2:])
+ for i := 0; i < MappingminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if MappingtrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < MappingminDegree {
+ return MappingGapIterator{left, gap.index}
+ }
+ return MappingGapIterator{right, gap.index - MappingminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[MappingminDegree-1], n.values[MappingminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &Mappingnode{
+ nrSegments: MappingminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:MappingminDegree-1], n.keys[MappingminDegree:])
+ copy(sibling.values[:MappingminDegree-1], n.values[MappingminDegree:])
+ MappingzeroValueSlice(n.values[MappingminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:MappingminDegree], n.children[MappingminDegree:])
+ MappingzeroNodeSlice(n.children[MappingminDegree:])
+ for i := 0; i < MappingminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = MappingminDegree - 1
+
+ if MappingtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < MappingminDegree {
+ return gap
+ }
+ return MappingGapIterator{sibling, gap.index - MappingminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *Mappingnode) rebalanceAfterRemove(gap MappingGapIterator) MappingGapIterator {
+ for {
+ if n.nrSegments >= MappingminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= MappingminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ mappingSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if MappingtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return MappingGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return MappingGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= MappingminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ mappingSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if MappingtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return MappingGapIterator{n, n.nrSegments}
+ }
+ return MappingGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return MappingGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return MappingGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *Mappingnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = MappingGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ mappingSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if MappingtrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *Mappingnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *Mappingnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *Mappingnode) calculateMaxGapLeaf() uint64 {
+ max := MappingGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (MappingGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *Mappingnode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Mappingnode) searchFirstLargeEnoughGap(minSize uint64) MappingGapIterator {
+ if n.maxGap.Get() < minSize {
+ return MappingGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := MappingGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Mappingnode) searchLastLargeEnoughGap(minSize uint64) MappingGapIterator {
+ if n.maxGap.Get() < minSize {
+ return MappingGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := MappingGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type MappingIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *Mappingnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg MappingIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg MappingIterator) Range() MappableRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg MappingIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg MappingIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg MappingIterator) SetRangeUnchecked(r MappableRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg MappingIterator) SetRange(r MappableRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg MappingIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg MappingIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg MappingIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg MappingIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg MappingIterator) Value() MappingsOfRange {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg MappingIterator) ValuePtr() *MappingsOfRange {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg MappingIterator) SetValue(val MappingsOfRange) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg MappingIterator) PrevSegment() MappingIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return MappingIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return MappingIterator{}
+ }
+ return MappingsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg MappingIterator) NextSegment() MappingIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return MappingIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return MappingIterator{}
+ }
+ return MappingsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg MappingIterator) PrevGap() MappingGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return MappingGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg MappingIterator) NextGap() MappingGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return MappingGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg MappingIterator) PrevNonEmpty() (MappingIterator, MappingGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return MappingIterator{}, gap
+ }
+ return gap.PrevSegment(), MappingGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg MappingIterator) NextNonEmpty() (MappingIterator, MappingGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return MappingIterator{}, gap
+ }
+ return gap.NextSegment(), MappingGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type MappingGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *Mappingnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap MappingGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap MappingGapIterator) Range() MappableRange {
+ return MappableRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap MappingGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return mappingSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap MappingGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return mappingSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap MappingGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap MappingGapIterator) PrevSegment() MappingIterator {
+ return MappingsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap MappingGapIterator) NextSegment() MappingIterator {
+ return MappingsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap MappingGapIterator) PrevGap() MappingGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return MappingGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap MappingGapIterator) NextGap() MappingGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return MappingGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap MappingGapIterator) NextLargeEnoughGap(minSize uint64) MappingGapIterator {
+ if MappingtrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap MappingGapIterator) nextLargeEnoughGapHelper(minSize uint64) MappingGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return MappingGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap MappingGapIterator) PrevLargeEnoughGap(minSize uint64) MappingGapIterator {
+ if MappingtrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap MappingGapIterator) prevLargeEnoughGapHelper(minSize uint64) MappingGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return MappingGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func MappingsegmentBeforePosition(n *Mappingnode, i int) MappingIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return MappingIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return MappingIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func MappingsegmentAfterPosition(n *Mappingnode, i int) MappingIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return MappingIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return MappingIterator{n, i}
+}
+
+func MappingzeroValueSlice(slice []MappingsOfRange) {
+
+ for i := range slice {
+ mappingSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func MappingzeroNodeSlice(slice []*Mappingnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *MappingSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *Mappingnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *Mappingnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if MappingtrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type MappingSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []MappingsOfRange
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *MappingSet) ExportSortedSlices() *MappingSegmentDataSlices {
+ var sds MappingSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *MappingSet) ImportSortedSlices(sds *MappingSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := MappableRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *MappingSet) segmentTestCheck(expectedSegments int, segFunc func(int, MappableRange, MappingsOfRange) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *MappingSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *MappingSet) saveRoot() *MappingSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *MappingSet) loadRoot(sds *MappingSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/memmap/mapping_set_test.go b/pkg/sentry/memmap/mapping_set_test.go
deleted file mode 100644
index 5cb81fde7..000000000
--- a/pkg/sentry/memmap/mapping_set_test.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package memmap
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
- "reflect"
- "testing"
-)
-
-type testMappingSpace struct {
- // Ideally we'd store the full ranges that were invalidated, rather
- // than individual calls to Invalidate, as they are an implementation
- // detail, but this is the simplest way for now.
- inv []hostarch.AddrRange
-}
-
-func (n *testMappingSpace) reset() {
- n.inv = []hostarch.AddrRange{}
-}
-
-func (n *testMappingSpace) Invalidate(ar hostarch.AddrRange, opts InvalidateOpts) {
- n.inv = append(n.inv, ar)
-}
-
-func TestAddRemoveMapping(t *testing.T) {
- set := MappingSet{}
- ms := &testMappingSpace{}
-
- mapped := set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true)
- if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("AddMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings (hostarch.AddrRanges => memmap.MappableRange):
- // [0x10000, 0x12000) => [0x1000, 0x3000)
- t.Log(&set)
-
- mapped = set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true)
- if len(mapped) != 0 {
- t.Errorf("AddMapping: got %+v, wanted []", mapped)
- }
-
- // Mappings:
- // [0x10000, 0x11000) => [0x1000, 0x2000)
- // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000)
- t.Log(&set)
-
- mapped = set.AddMapping(ms, hostarch.AddrRange{0x30000, 0x31000}, 0x4000, true)
- if got, want := mapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("AddMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x10000, 0x11000) => [0x1000, 0x2000)
- // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000)
- // [0x30000, 0x31000) => [0x4000, 0x5000)
- t.Log(&set)
-
- mapped = set.AddMapping(ms, hostarch.AddrRange{0x12000, 0x15000}, 0x3000, true)
- if got, want := mapped, []MappableRange{{0x3000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("AddMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x10000, 0x11000) => [0x1000, 0x2000)
- // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000)
- // [0x12000, 0x13000) => [0x3000, 0x4000)
- // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000)
- // [0x14000, 0x15000) => [0x5000, 0x6000)
- t.Log(&set)
-
- unmapped := set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0x1000, true)
- if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000)
- // [0x12000, 0x13000) => [0x3000, 0x4000)
- // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000)
- // [0x14000, 0x15000) => [0x5000, 0x6000)
- t.Log(&set)
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true)
- if len(unmapped) != 0 {
- t.Errorf("RemoveMapping: got %+v, wanted []", unmapped)
- }
-
- // Mappings:
- // [0x11000, 0x13000) => [0x2000, 0x4000)
- // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000)
- // [0x14000, 0x15000) => [0x5000, 0x6000)
- t.Log(&set)
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x11000, 0x15000}, 0x2000, true)
- if got, want := unmapped, []MappableRange{{0x2000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x30000, 0x31000) => [0x4000, 0x5000)
- t.Log(&set)
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x30000, 0x31000}, 0x4000, true)
- if got, want := unmapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-}
-
-func TestInvalidateWholeMapping(t *testing.T) {
- set := MappingSet{}
- ms := &testMappingSpace{}
-
- set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0, true)
- // Mappings:
- // [0x10000, 0x11000) => [0, 0x1000)
- t.Log(&set)
- set.Invalidate(MappableRange{0, 0x1000}, InvalidateOpts{})
- if got, want := ms.inv, []hostarch.AddrRange{{0x10000, 0x11000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("Invalidate: got %+v, wanted %+v", got, want)
- }
-}
-
-func TestInvalidatePartialMapping(t *testing.T) {
- set := MappingSet{}
- ms := &testMappingSpace{}
-
- set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x13000}, 0, true)
- // Mappings:
- // [0x10000, 0x13000) => [0, 0x3000)
- t.Log(&set)
- set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{})
- if got, want := ms.inv, []hostarch.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("Invalidate: got %+v, wanted %+v", got, want)
- }
-}
-
-func TestInvalidateMultipleMappings(t *testing.T) {
- set := MappingSet{}
- ms := &testMappingSpace{}
-
- set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0, true)
- set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true)
- // Mappings:
- // [0x10000, 0x11000) => [0, 0x1000)
- // [0x12000, 0x13000) => [0x2000, 0x3000)
- t.Log(&set)
- set.Invalidate(MappableRange{0, 0x3000}, InvalidateOpts{})
- if got, want := ms.inv, []hostarch.AddrRange{{0x10000, 0x11000}, {0x20000, 0x21000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("Invalidate: got %+v, wanted %+v", got, want)
- }
-}
-
-func TestInvalidateOverlappingMappings(t *testing.T) {
- set := MappingSet{}
- ms1 := &testMappingSpace{}
- ms2 := &testMappingSpace{}
-
- set.AddMapping(ms1, hostarch.AddrRange{0x10000, 0x12000}, 0, true)
- set.AddMapping(ms2, hostarch.AddrRange{0x20000, 0x22000}, 0x1000, true)
- // Mappings:
- // ms1:[0x10000, 0x12000) => [0, 0x2000)
- // ms2:[0x11000, 0x13000) => [0x1000, 0x3000)
- t.Log(&set)
- set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{})
- if got, want := ms1.inv, []hostarch.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want)
- }
- if got, want := ms2.inv, []hostarch.AddrRange{{0x20000, 0x21000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want)
- }
-}
-
-func TestMixedWritableMappings(t *testing.T) {
- set := MappingSet{}
- ms := &testMappingSpace{}
-
- mapped := set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true)
- if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("AddMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x10000, 0x12000) writable => [0x1000, 0x3000)
- t.Log(&set)
-
- mapped = set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x22000}, 0x2000, false)
- if got, want := mapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("AddMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x10000, 0x11000) writable => [0x1000, 0x2000)
- // [0x11000, 0x12000) writable and [0x20000, 0x21000) readonly => [0x2000, 0x3000)
- // [0x21000, 0x22000) readonly => [0x3000, 0x4000)
- t.Log(&set)
-
- // Unmap should fail because we specified the readonly map address range, but
- // asked to unmap a writable segment.
- unmapped := set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true)
- if len(unmapped) != 0 {
- t.Errorf("RemoveMapping: got %+v, wanted []", unmapped)
- }
-
- // Readonly mapping removed, but writable mapping still exists in the range,
- // so no mappable range fully unmapped.
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, false)
- if len(unmapped) != 0 {
- t.Errorf("RemoveMapping: got %+v, wanted []", unmapped)
- }
-
- // Mappings:
- // [0x10000, 0x12000) writable => [0x1000, 0x3000)
- // [0x21000, 0x22000) readonly => [0x3000, 0x4000)
- t.Log(&set)
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x11000, 0x12000}, 0x2000, true)
- if got, want := unmapped, []MappableRange{{0x2000, 0x3000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x10000, 0x12000) writable => [0x1000, 0x3000)
- // [0x21000, 0x22000) readonly => [0x3000, 0x4000)
- t.Log(&set)
-
- // Unmap should fail since writable bit doesn't match.
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, false)
- if len(unmapped) != 0 {
- t.Errorf("RemoveMapping: got %+v, wanted []", unmapped)
- }
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true)
- if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-
- // Mappings:
- // [0x21000, 0x22000) readonly => [0x3000, 0x4000)
- t.Log(&set)
-
- unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x21000, 0x22000}, 0x3000, false)
- if got, want := unmapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) {
- t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want)
- }
-}
diff --git a/pkg/sentry/memmap/memmap_impl_state_autogen.go b/pkg/sentry/memmap/memmap_impl_state_autogen.go
new file mode 100644
index 000000000..d65558bbe
--- /dev/null
+++ b/pkg/sentry/memmap/memmap_impl_state_autogen.go
@@ -0,0 +1,116 @@
+// automatically generated by stateify.
+
+package memmap
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *MappingSet) StateTypeName() string {
+ return "pkg/sentry/memmap.MappingSet"
+}
+
+func (s *MappingSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *MappingSet) beforeSave() {}
+
+// +checklocksignore
+func (s *MappingSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *MappingSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *MappingSet) afterLoad() {}
+
+// +checklocksignore
+func (s *MappingSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*MappingSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*MappingSegmentDataSlices)) })
+}
+
+func (n *Mappingnode) StateTypeName() string {
+ return "pkg/sentry/memmap.Mappingnode"
+}
+
+func (n *Mappingnode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *Mappingnode) beforeSave() {}
+
+// +checklocksignore
+func (n *Mappingnode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *Mappingnode) afterLoad() {}
+
+// +checklocksignore
+func (n *Mappingnode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (m *MappingSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/memmap.MappingSegmentDataSlices"
+}
+
+func (m *MappingSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (m *MappingSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (m *MappingSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Start)
+ stateSinkObject.Save(1, &m.End)
+ stateSinkObject.Save(2, &m.Values)
+}
+
+func (m *MappingSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (m *MappingSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Start)
+ stateSourceObject.Load(1, &m.End)
+ stateSourceObject.Load(2, &m.Values)
+}
+
+func init() {
+ state.Register((*MappingSet)(nil))
+ state.Register((*Mappingnode)(nil))
+ state.Register((*MappingSegmentDataSlices)(nil))
+}
diff --git a/pkg/sentry/memmap/memmap_state_autogen.go b/pkg/sentry/memmap/memmap_state_autogen.go
new file mode 100644
index 000000000..8624ecb60
--- /dev/null
+++ b/pkg/sentry/memmap/memmap_state_autogen.go
@@ -0,0 +1,100 @@
+// automatically generated by stateify.
+
+package memmap
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fr *FileRange) StateTypeName() string {
+ return "pkg/sentry/memmap.FileRange"
+}
+
+func (fr *FileRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (fr *FileRange) beforeSave() {}
+
+// +checklocksignore
+func (fr *FileRange) StateSave(stateSinkObject state.Sink) {
+ fr.beforeSave()
+ stateSinkObject.Save(0, &fr.Start)
+ stateSinkObject.Save(1, &fr.End)
+}
+
+func (fr *FileRange) afterLoad() {}
+
+// +checklocksignore
+func (fr *FileRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fr.Start)
+ stateSourceObject.Load(1, &fr.End)
+}
+
+func (mr *MappableRange) StateTypeName() string {
+ return "pkg/sentry/memmap.MappableRange"
+}
+
+func (mr *MappableRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (mr *MappableRange) beforeSave() {}
+
+// +checklocksignore
+func (mr *MappableRange) StateSave(stateSinkObject state.Sink) {
+ mr.beforeSave()
+ stateSinkObject.Save(0, &mr.Start)
+ stateSinkObject.Save(1, &mr.End)
+}
+
+func (mr *MappableRange) afterLoad() {}
+
+// +checklocksignore
+func (mr *MappableRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mr.Start)
+ stateSourceObject.Load(1, &mr.End)
+}
+
+func (r *MappingOfRange) StateTypeName() string {
+ return "pkg/sentry/memmap.MappingOfRange"
+}
+
+func (r *MappingOfRange) StateFields() []string {
+ return []string{
+ "MappingSpace",
+ "AddrRange",
+ "Writable",
+ }
+}
+
+func (r *MappingOfRange) beforeSave() {}
+
+// +checklocksignore
+func (r *MappingOfRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.MappingSpace)
+ stateSinkObject.Save(1, &r.AddrRange)
+ stateSinkObject.Save(2, &r.Writable)
+}
+
+func (r *MappingOfRange) afterLoad() {}
+
+// +checklocksignore
+func (r *MappingOfRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.MappingSpace)
+ stateSourceObject.Load(1, &r.AddrRange)
+ stateSourceObject.Load(2, &r.Writable)
+}
+
+func init() {
+ state.Register((*FileRange)(nil))
+ state.Register((*MappableRange)(nil))
+ state.Register((*MappingOfRange)(nil))
+}
diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD
deleted file mode 100644
index 69aff21b6..000000000
--- a/pkg/sentry/mm/BUILD
+++ /dev/null
@@ -1,170 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "file_refcount_set",
- out = "file_refcount_set.go",
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "mm",
- prefix = "fileRefcount",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.FileRange",
- "Value": "int32",
- "Functions": "fileRefcountSetFunctions",
- },
-)
-
-go_template_instance(
- name = "vma_set",
- out = "vma_set.go",
- consts = {
- "minDegree": "8",
- "trackGaps": "1",
- },
- imports = {
- "hostarch": "gvisor.dev/gvisor/pkg/hostarch",
- },
- package = "mm",
- prefix = "vma",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "hostarch.Addr",
- "Range": "hostarch.AddrRange",
- "Value": "vma",
- "Functions": "vmaSetFunctions",
- },
-)
-
-go_template_instance(
- name = "pma_set",
- out = "pma_set.go",
- consts = {
- "minDegree": "8",
- },
- imports = {
- "hostarch": "gvisor.dev/gvisor/pkg/hostarch",
- },
- package = "mm",
- prefix = "pma",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "hostarch.Addr",
- "Range": "hostarch.AddrRange",
- "Value": "pma",
- "Functions": "pmaSetFunctions",
- },
-)
-
-go_template_instance(
- name = "io_list",
- out = "io_list.go",
- package = "mm",
- prefix = "io",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*ioResult",
- "Linker": "*ioResult",
- },
-)
-
-go_template_instance(
- name = "aio_mappable_refs",
- out = "aio_mappable_refs.go",
- package = "mm",
- prefix = "aioMappable",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "aioMappable",
- },
-)
-
-go_template_instance(
- name = "special_mappable_refs",
- out = "special_mappable_refs.go",
- package = "mm",
- prefix = "SpecialMappable",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "SpecialMappable",
- },
-)
-
-go_library(
- name = "mm",
- srcs = [
- "address_space.go",
- "aio_context.go",
- "aio_context_state.go",
- "aio_mappable_refs.go",
- "debug.go",
- "file_refcount_set.go",
- "io.go",
- "io_list.go",
- "lifecycle.go",
- "metadata.go",
- "mm.go",
- "pma.go",
- "pma_set.go",
- "procfs.go",
- "save_restore.go",
- "shm.go",
- "special_mappable.go",
- "special_mappable_refs.go",
- "syscalls.go",
- "vma.go",
- "vma_set.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/atomicbitops",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safecopy",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/proc/seqfile",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/futex",
- "//pkg/sentry/kernel/shm",
- "//pkg/sentry/limits",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/usage",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/tcpip/buffer",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "mm_test",
- size = "small",
- srcs = ["mm_test.go"],
- library = ":mm",
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/sentry/arch",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/limits",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/mm/README.md b/pkg/sentry/mm/README.md
deleted file mode 100644
index f4d43d927..000000000
--- a/pkg/sentry/mm/README.md
+++ /dev/null
@@ -1,280 +0,0 @@
-This package provides an emulation of Linux semantics for application virtual
-memory mappings.
-
-For completeness, this document also describes aspects of the memory management
-subsystem defined outside this package.
-
-# Background
-
-We begin by describing semantics for virtual memory in Linux.
-
-A virtual address space is defined as a collection of mappings from virtual
-addresses to physical memory. However, userspace applications do not configure
-mappings to physical memory directly. Instead, applications configure memory
-mappings from virtual addresses to offsets into a file using the `mmap` system
-call.[^mmap-anon] For example, a call to:
-
- mmap(
- /* addr = */ 0x400000,
- /* length = */ 0x1000,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- /* fd = */ 3,
- /* offset = */ 0);
-
-creates a mapping of length 0x1000 bytes, starting at virtual address (VA)
-0x400000, to offset 0 in the file represented by file descriptor (FD) 3. Within
-the Linux kernel, virtual memory mappings are represented by *virtual memory
-areas* (VMAs). Supposing that FD 3 represents file /tmp/foo, the state of the
-virtual memory subsystem after the `mmap` call may be depicted as:
-
- VMA: VA:0x400000 -> /tmp/foo:0x0
-
-Establishing a virtual memory area does not necessarily establish a mapping to a
-physical address, because Linux has not necessarily provisioned physical memory
-to store the file's contents. Thus, if the application attempts to read the
-contents of VA 0x400000, it may incur a *page fault*, a CPU exception that
-forces the kernel to create such a mapping to service the read.
-
-For a file, doing so consists of several logical phases:
-
-1. The kernel allocates physical memory to store the contents of the required
- part of the file, and copies file contents to the allocated memory.
- Supposing that the kernel chooses the physical memory at physical address
- (PA) 0x2fb000, the resulting state of the system is:
-
- VMA: VA:0x400000 -> /tmp/foo:0x0
- Filemap: /tmp/foo:0x0 -> PA:0x2fb000
-
- (In Linux the state of the mapping from file offset to physical memory is
- stored in `struct address_space`, but to avoid confusion with other notions
- of address space we will refer to this system as filemap, named after Linux
- kernel source file `mm/filemap.c`.)
-
-2. The kernel stores the effective mapping from virtual to physical address in
- a *page table entry* (PTE) in the application's *page tables*, which are
- used by the CPU's virtual memory hardware to perform address translation.
- The resulting state of the system is:
-
- VMA: VA:0x400000 -> /tmp/foo:0x0
- Filemap: /tmp/foo:0x0 -> PA:0x2fb000
- PTE: VA:0x400000 -----------------> PA:0x2fb000
-
- The PTE is required for the application to actually use the contents of the
- mapped file as virtual memory. However, the PTE is derived from the VMA and
- filemap state, both of which are independently mutable, such that mutations
- to either will affect the PTE. For example:
-
- - The application may remove the VMA using the `munmap` system call. This
- breaks the mapping from VA:0x400000 to /tmp/foo:0x0, and consequently
- the mapping from VA:0x400000 to PA:0x2fb000. However, it does not
- necessarily break the mapping from /tmp/foo:0x0 to PA:0x2fb000, so a
- future mapping of the same file offset may reuse this physical memory.
-
- - The application may invalidate the file's contents by passing a length
- of 0 to the `ftruncate` system call. This breaks the mapping from
- /tmp/foo:0x0 to PA:0x2fb000, and consequently the mapping from
- VA:0x400000 to PA:0x2fb000. However, it does not break the mapping from
- VA:0x400000 to /tmp/foo:0x0, so future changes to the file's contents
- may again be made visible at VA:0x400000 after another page fault
- results in the allocation of a new physical address.
-
- Note that, in order to correctly break the mapping from VA:0x400000 to
- PA:0x2fb000 in the latter case, filemap must also store a *reverse mapping*
- from /tmp/foo:0x0 to VA:0x400000 so that it can locate and remove the PTE.
-
-[^mmap-anon]: Memory mappings to non-files are discussed in later sections.
-
-## Private Mappings
-
-The preceding example considered VMAs created using the `MAP_SHARED` flag, which
-means that PTEs derived from the mapping should always use physical memory that
-represents the current state of the mapped file.[^mmap-dev-zero] Applications
-can alternatively pass the `MAP_PRIVATE` flag to create a *private mapping*.
-Private mappings are *copy-on-write*.
-
-Suppose that the application instead created a private mapping in the previous
-example. In Linux, the state of the system after a read page fault would be:
-
- VMA: VA:0x400000 -> /tmp/foo:0x0 (private)
- Filemap: /tmp/foo:0x0 -> PA:0x2fb000
- PTE: VA:0x400000 -----------------> PA:0x2fb000 (read-only)
-
-Now suppose the application attempts to write to VA:0x400000. For a shared
-mapping, the write would be propagated to PA:0x2fb000, and the kernel would be
-responsible for ensuring that the write is later propagated to the mapped file.
-For a private mapping, the write incurs another page fault since the PTE is
-marked read-only. In response, the kernel allocates physical memory to store the
-mapping's *private copy* of the file's contents, copies file contents to the
-allocated memory, and changes the PTE to map to the private copy. Supposing that
-the kernel chooses the physical memory at physical address (PA) 0x5ea000, the
-resulting state of the system is:
-
- VMA: VA:0x400000 -> /tmp/foo:0x0 (private)
- Filemap: /tmp/foo:0x0 -> PA:0x2fb000
- PTE: VA:0x400000 -----------------> PA:0x5ea000
-
-Note that the filemap mapping from /tmp/foo:0x0 to PA:0x2fb000 may still exist,
-but is now irrelevant to this mapping.
-
-[^mmap-dev-zero]: Modulo files with special mmap semantics such as `/dev/zero`.
-
-## Anonymous Mappings
-
-Instead of passing a file to the `mmap` system call, applications can instead
-request an *anonymous* mapping by passing the `MAP_ANONYMOUS` flag.
-Semantically, an anonymous mapping is essentially a mapping to an ephemeral file
-initially filled with zero bytes. Practically speaking, this is how shared
-anonymous mappings are implemented, but private anonymous mappings do not result
-in the creation of an ephemeral file; since there would be no way to modify the
-contents of the underlying file through a private mapping, all private anonymous
-mappings use a single shared page filled with zero bytes until copy-on-write
-occurs.
-
-# Virtual Memory in the Sentry
-
-The sentry implements application virtual memory atop a host kernel, introducing
-an additional level of indirection to the above.
-
-Consider the same scenario as in the previous section. Since the sentry handles
-application system calls, the effect of an application `mmap` system call is to
-create a VMA in the sentry (as opposed to the host kernel):
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0
-
-When the application first incurs a page fault on this address, the host kernel
-delivers information about the page fault to the sentry in a platform-dependent
-manner, and the sentry handles the fault:
-
-1. The sentry allocates memory to store the contents of the required part of
- the file, and copies file contents to the allocated memory. However, since
- the sentry is implemented atop a host kernel, it does not configure mappings
- to physical memory directly. Instead, mappable "memory" in the sentry is
- represented by a host file descriptor and offset, since (as noted in
- "Background") this is the memory mapping primitive provided by the host
- kernel. In general, memory is allocated from a temporary host file using the
- `pgalloc` package. Supposing that the sentry allocates offset 0x3000 from
- host file "memory-file", the resulting state is:
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0
- Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000
-
-2. The sentry stores the effective mapping from virtual address to host file in
- a host VMA by invoking the `mmap` system call:
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0
- Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000
- Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000
-
-3. The sentry returns control to the application, which immediately incurs the
- page fault again.[^mmap-populate] However, since a host VMA now exists for
- the faulting virtual address, the host kernel now handles the page fault as
- described in "Background":
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0
- Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000
- Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000
- Host filemap: host:memory-file:0x3000 -> PA:0x2fb000
- Host PTE: VA:0x400000 --------------------------------------------> PA:0x2fb000
-
-Thus, from an implementation standpoint, host VMAs serve the same purpose in the
-sentry that PTEs do in Linux. As in Linux, sentry VMA and filemap state is
-independently mutable, and the desired state of host VMAs is derived from that
-state.
-
-[^mmap-populate]: The sentry could force the host kernel to establish PTEs when
- it creates the host VMA by passing the `MAP_POPULATE` flag to
- the `mmap` system call, but usually does not. This is because,
- to reduce the number of page faults that require handling by
- the sentry and (correspondingly) the number of host `mmap`
- system calls, the sentry usually creates host VMAs that are
- much larger than the single faulting page.
-
-## Private Mappings
-
-The sentry implements private mappings consistently with Linux. Before
-copy-on-write, the private mapping example given in the Background results in:
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 (private)
- Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000
- Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000 (read-only)
- Host filemap: host:memory-file:0x3000 -> PA:0x2fb000
- Host PTE: VA:0x400000 --------------------------------------------> PA:0x2fb000 (read-only)
-
-When the application attempts to write to this address, the host kernel delivers
-information about the resulting page fault to the sentry. Analogous to Linux,
-the sentry allocates memory to store the mapping's private copy of the file's
-contents, copies file contents to the allocated memory, and changes the host VMA
-to map to the private copy. Supposing that the sentry chooses the offset 0x4000
-in host file `memory-file` to store the private copy, the state of the system
-after copy-on-write is:
-
- Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 (private)
- Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000
- Host VMA: VA:0x400000 -----------------> host:memory-file:0x4000
- Host filemap: host:memory-file:0x4000 -> PA:0x5ea000
- Host PTE: VA:0x400000 --------------------------------------------> PA:0x5ea000
-
-However, this highlights an important difference between Linux and the sentry.
-In Linux, page tables are concrete (architecture-dependent) data structures
-owned by the kernel. Conversely, the sentry has the ability to create and
-destroy host VMAs using host system calls, but it does not have direct access to
-their state. Thus, as written, if the application invokes the `munmap` system
-call to remove the sentry VMA, it is non-trivial for the sentry to determine
-that it should deallocate `host:memory-file:0x4000`. This implies that the
-sentry must retain information about the host VMAs that it has created.
-
-## Anonymous Mappings
-
-The sentry implements anonymous mappings consistently with Linux, except that
-there is no shared zero page.
-
-# Implementation Constructs
-
-In Linux:
-
-- A virtual address space is represented by `struct mm_struct`.
-
-- VMAs are represented by `struct vm_area_struct`, stored in `struct
- mm_struct::mmap`.
-
-- Mappings from file offsets to physical memory are stored in `struct
- address_space`.
-
-- Reverse mappings from file offsets to virtual mappings are stored in `struct
- address_space::i_mmap`.
-
-- Physical memory pages are represented by a pointer to `struct page` or an
- index called a *page frame number* (PFN), represented by `pfn_t`.
-
-- PTEs are represented by architecture-dependent type `pte_t`, stored in a
- table hierarchy rooted at `struct mm_struct::pgd`.
-
-In the sentry:
-
-- A virtual address space is represented by type [`mm.MemoryManager`][mm].
-
-- Sentry VMAs are represented by type [`mm.vma`][mm], stored in
- `mm.MemoryManager.vmas`.
-
-- Mappings from sentry file offsets to host file offsets are abstracted
- through interface method [`memmap.Mappable.Translate`][memmap].
-
-- Reverse mappings from sentry file offsets to virtual mappings are abstracted
- through interface methods
- [`memmap.Mappable.AddMapping` and `memmap.Mappable.RemoveMapping`][memmap].
-
-- Host files that may be mapped into host VMAs are represented by type
- [`platform.File`][platform].
-
-- Host VMAs are represented in the sentry by type [`mm.pma`][mm] ("platform
- mapping area"), stored in `mm.MemoryManager.pmas`.
-
-- Creation and destruction of host VMAs is abstracted through interface
- methods
- [`platform.AddressSpace.MapFile` and `platform.AddressSpace.Unmap`][platform].
-
-[memmap]: https://github.com/google/gvisor/blob/master/pkg/sentry/memmap/memmap.go
-[mm]: https://github.com/google/gvisor/blob/master/pkg/sentry/mm/mm.go
-[pgalloc]: https://github.com/google/gvisor/blob/master/pkg/sentry/pgalloc/pgalloc.go
-[platform]: https://github.com/google/gvisor/blob/master/pkg/sentry/platform/platform.go
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
new file mode 100644
index 000000000..6e1bc6739
--- /dev/null
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -0,0 +1,140 @@
+package mm
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const aioMappableenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var aioMappableobj *aioMappable
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type aioMappableRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *aioMappableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *aioMappableRefs) RefType() string {
+ return fmt.Sprintf("%T", aioMappableobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *aioMappableRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *aioMappableRefs) LogRefs() bool {
+ return aioMappableenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *aioMappableRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *aioMappableRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if aioMappableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *aioMappableRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if aioMappableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *aioMappableRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if aioMappableenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *aioMappableRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/mm/file_refcount_set.go b/pkg/sentry/mm/file_refcount_set.go
new file mode 100644
index 000000000..602a137d4
--- /dev/null
+++ b/pkg/sentry/mm/file_refcount_set.go
@@ -0,0 +1,1647 @@
+package mm
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const fileRefcounttrackGaps = 0
+
+var _ = uint8(fileRefcounttrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type fileRefcountdynamicGap [fileRefcounttrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *fileRefcountdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *fileRefcountdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ fileRefcountminDegree = 3
+
+ fileRefcountmaxDegree = 2 * fileRefcountminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type fileRefcountSet struct {
+ root fileRefcountnode `state:".(*fileRefcountSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *fileRefcountSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *fileRefcountSet) IsEmptyRange(r __generics_imported0.FileRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *fileRefcountSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *fileRefcountSet) SpanRange(r __generics_imported0.FileRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *fileRefcountSet) FirstSegment() fileRefcountIterator {
+ if s.root.nrSegments == 0 {
+ return fileRefcountIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *fileRefcountSet) LastSegment() fileRefcountIterator {
+ if s.root.nrSegments == 0 {
+ return fileRefcountIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *fileRefcountSet) FirstGap() fileRefcountGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return fileRefcountGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *fileRefcountSet) LastGap() fileRefcountGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return fileRefcountGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *fileRefcountSet) Find(key uint64) (fileRefcountIterator, fileRefcountGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return fileRefcountIterator{n, i}, fileRefcountGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return fileRefcountIterator{}, fileRefcountGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *fileRefcountSet) FindSegment(key uint64) fileRefcountIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *fileRefcountSet) LowerBoundSegment(min uint64) fileRefcountIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *fileRefcountSet) UpperBoundSegment(max uint64) fileRefcountIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *fileRefcountSet) FindGap(key uint64) fileRefcountGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *fileRefcountSet) LowerBoundGap(min uint64) fileRefcountGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *fileRefcountSet) UpperBoundGap(max uint64) fileRefcountGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *fileRefcountSet) Add(r __generics_imported0.FileRange, val int32) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *fileRefcountSet) AddWithoutMerging(r __generics_imported0.FileRange, val int32) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *fileRefcountSet) Insert(gap fileRefcountGapIterator, r __generics_imported0.FileRange, val int32) fileRefcountIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (fileRefcountSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := fileRefcounttrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (fileRefcountSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (fileRefcountSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := fileRefcounttrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *fileRefcountSet) InsertWithoutMerging(gap fileRefcountGapIterator, r __generics_imported0.FileRange, val int32) fileRefcountIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *fileRefcountSet) InsertWithoutMergingUnchecked(gap fileRefcountGapIterator, r __generics_imported0.FileRange, val int32) fileRefcountIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := fileRefcounttrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return fileRefcountIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *fileRefcountSet) Remove(seg fileRefcountIterator) fileRefcountGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if fileRefcounttrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ fileRefcountSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if fileRefcounttrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(fileRefcountGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *fileRefcountSet) RemoveAll() {
+ s.root = fileRefcountnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *fileRefcountSet) RemoveRange(r __generics_imported0.FileRange) fileRefcountGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *fileRefcountSet) Merge(first, second fileRefcountIterator) fileRefcountIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *fileRefcountSet) MergeUnchecked(first, second fileRefcountIterator) fileRefcountIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (fileRefcountSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return fileRefcountIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *fileRefcountSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *fileRefcountSet) MergeRange(r __generics_imported0.FileRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *fileRefcountSet) MergeAdjacent(r __generics_imported0.FileRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *fileRefcountSet) Split(seg fileRefcountIterator, split uint64) (fileRefcountIterator, fileRefcountIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *fileRefcountSet) SplitUnchecked(seg fileRefcountIterator, split uint64) (fileRefcountIterator, fileRefcountIterator) {
+ val1, val2 := (fileRefcountSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.FileRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *fileRefcountSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *fileRefcountSet) Isolate(seg fileRefcountIterator, r __generics_imported0.FileRange) fileRefcountIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *fileRefcountSet) ApplyContiguous(r __generics_imported0.FileRange, fn func(seg fileRefcountIterator)) fileRefcountGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return fileRefcountGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return fileRefcountGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type fileRefcountnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *fileRefcountnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap fileRefcountdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [fileRefcountmaxDegree - 1]__generics_imported0.FileRange
+ values [fileRefcountmaxDegree - 1]int32
+ children [fileRefcountmaxDegree]*fileRefcountnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *fileRefcountnode) firstSegment() fileRefcountIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return fileRefcountIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *fileRefcountnode) lastSegment() fileRefcountIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return fileRefcountIterator{n, n.nrSegments - 1}
+}
+
+func (n *fileRefcountnode) prevSibling() *fileRefcountnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *fileRefcountnode) nextSibling() *fileRefcountnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *fileRefcountnode) rebalanceBeforeInsert(gap fileRefcountGapIterator) fileRefcountGapIterator {
+ if n.nrSegments < fileRefcountmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &fileRefcountnode{
+ nrSegments: fileRefcountminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &fileRefcountnode{
+ nrSegments: fileRefcountminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:fileRefcountminDegree-1], n.keys[:fileRefcountminDegree-1])
+ copy(left.values[:fileRefcountminDegree-1], n.values[:fileRefcountminDegree-1])
+ copy(right.keys[:fileRefcountminDegree-1], n.keys[fileRefcountminDegree:])
+ copy(right.values[:fileRefcountminDegree-1], n.values[fileRefcountminDegree:])
+ n.keys[0], n.values[0] = n.keys[fileRefcountminDegree-1], n.values[fileRefcountminDegree-1]
+ fileRefcountzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:fileRefcountminDegree], n.children[:fileRefcountminDegree])
+ copy(right.children[:fileRefcountminDegree], n.children[fileRefcountminDegree:])
+ fileRefcountzeroNodeSlice(n.children[2:])
+ for i := 0; i < fileRefcountminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if fileRefcounttrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < fileRefcountminDegree {
+ return fileRefcountGapIterator{left, gap.index}
+ }
+ return fileRefcountGapIterator{right, gap.index - fileRefcountminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[fileRefcountminDegree-1], n.values[fileRefcountminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &fileRefcountnode{
+ nrSegments: fileRefcountminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:fileRefcountminDegree-1], n.keys[fileRefcountminDegree:])
+ copy(sibling.values[:fileRefcountminDegree-1], n.values[fileRefcountminDegree:])
+ fileRefcountzeroValueSlice(n.values[fileRefcountminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:fileRefcountminDegree], n.children[fileRefcountminDegree:])
+ fileRefcountzeroNodeSlice(n.children[fileRefcountminDegree:])
+ for i := 0; i < fileRefcountminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = fileRefcountminDegree - 1
+
+ if fileRefcounttrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < fileRefcountminDegree {
+ return gap
+ }
+ return fileRefcountGapIterator{sibling, gap.index - fileRefcountminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *fileRefcountnode) rebalanceAfterRemove(gap fileRefcountGapIterator) fileRefcountGapIterator {
+ for {
+ if n.nrSegments >= fileRefcountminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= fileRefcountminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ fileRefcountSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if fileRefcounttrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return fileRefcountGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return fileRefcountGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= fileRefcountminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ fileRefcountSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if fileRefcounttrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return fileRefcountGapIterator{n, n.nrSegments}
+ }
+ return fileRefcountGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return fileRefcountGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return fileRefcountGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *fileRefcountnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = fileRefcountGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ fileRefcountSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if fileRefcounttrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *fileRefcountnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *fileRefcountnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *fileRefcountnode) calculateMaxGapLeaf() uint64 {
+ max := fileRefcountGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (fileRefcountGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *fileRefcountnode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *fileRefcountnode) searchFirstLargeEnoughGap(minSize uint64) fileRefcountGapIterator {
+ if n.maxGap.Get() < minSize {
+ return fileRefcountGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := fileRefcountGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *fileRefcountnode) searchLastLargeEnoughGap(minSize uint64) fileRefcountGapIterator {
+ if n.maxGap.Get() < minSize {
+ return fileRefcountGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := fileRefcountGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type fileRefcountIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *fileRefcountnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg fileRefcountIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg fileRefcountIterator) Range() __generics_imported0.FileRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg fileRefcountIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg fileRefcountIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg fileRefcountIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg fileRefcountIterator) SetRange(r __generics_imported0.FileRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg fileRefcountIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg fileRefcountIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg fileRefcountIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg fileRefcountIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg fileRefcountIterator) Value() int32 {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg fileRefcountIterator) ValuePtr() *int32 {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg fileRefcountIterator) SetValue(val int32) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg fileRefcountIterator) PrevSegment() fileRefcountIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return fileRefcountIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return fileRefcountIterator{}
+ }
+ return fileRefcountsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg fileRefcountIterator) NextSegment() fileRefcountIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return fileRefcountIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return fileRefcountIterator{}
+ }
+ return fileRefcountsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg fileRefcountIterator) PrevGap() fileRefcountGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return fileRefcountGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg fileRefcountIterator) NextGap() fileRefcountGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return fileRefcountGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg fileRefcountIterator) PrevNonEmpty() (fileRefcountIterator, fileRefcountGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return fileRefcountIterator{}, gap
+ }
+ return gap.PrevSegment(), fileRefcountGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg fileRefcountIterator) NextNonEmpty() (fileRefcountIterator, fileRefcountGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return fileRefcountIterator{}, gap
+ }
+ return gap.NextSegment(), fileRefcountGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type fileRefcountGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *fileRefcountnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap fileRefcountGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap fileRefcountGapIterator) Range() __generics_imported0.FileRange {
+ return __generics_imported0.FileRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap fileRefcountGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return fileRefcountSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap fileRefcountGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return fileRefcountSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap fileRefcountGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap fileRefcountGapIterator) PrevSegment() fileRefcountIterator {
+ return fileRefcountsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap fileRefcountGapIterator) NextSegment() fileRefcountIterator {
+ return fileRefcountsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap fileRefcountGapIterator) PrevGap() fileRefcountGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return fileRefcountGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap fileRefcountGapIterator) NextGap() fileRefcountGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return fileRefcountGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap fileRefcountGapIterator) NextLargeEnoughGap(minSize uint64) fileRefcountGapIterator {
+ if fileRefcounttrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap fileRefcountGapIterator) nextLargeEnoughGapHelper(minSize uint64) fileRefcountGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return fileRefcountGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap fileRefcountGapIterator) PrevLargeEnoughGap(minSize uint64) fileRefcountGapIterator {
+ if fileRefcounttrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap fileRefcountGapIterator) prevLargeEnoughGapHelper(minSize uint64) fileRefcountGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return fileRefcountGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func fileRefcountsegmentBeforePosition(n *fileRefcountnode, i int) fileRefcountIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return fileRefcountIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return fileRefcountIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func fileRefcountsegmentAfterPosition(n *fileRefcountnode, i int) fileRefcountIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return fileRefcountIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return fileRefcountIterator{n, i}
+}
+
+func fileRefcountzeroValueSlice(slice []int32) {
+
+ for i := range slice {
+ fileRefcountSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func fileRefcountzeroNodeSlice(slice []*fileRefcountnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *fileRefcountSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *fileRefcountnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *fileRefcountnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if fileRefcounttrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type fileRefcountSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []int32
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *fileRefcountSet) ExportSortedSlices() *fileRefcountSegmentDataSlices {
+ var sds fileRefcountSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *fileRefcountSet) ImportSortedSlices(sds *fileRefcountSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.FileRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *fileRefcountSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.FileRange, int32) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *fileRefcountSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *fileRefcountSet) saveRoot() *fileRefcountSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *fileRefcountSet) loadRoot(sds *fileRefcountSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/mm/io_list.go b/pkg/sentry/mm/io_list.go
new file mode 100644
index 000000000..9a54e60be
--- /dev/null
+++ b/pkg/sentry/mm/io_list.go
@@ -0,0 +1,221 @@
+package mm
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type ioElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (ioElementMapper) linkerFor(elem *ioResult) *ioResult { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type ioList struct {
+ head *ioResult
+ tail *ioResult
+}
+
+// Reset resets list l to the empty state.
+func (l *ioList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *ioList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *ioList) Front() *ioResult {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *ioList) Back() *ioResult {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *ioList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (ioElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *ioList) PushFront(e *ioResult) {
+ linker := ioElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ ioElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *ioList) PushBack(e *ioResult) {
+ linker := ioElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ ioElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *ioList) PushBackList(m *ioList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ ioElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ ioElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *ioList) InsertAfter(b, e *ioResult) {
+ bLinker := ioElementMapper{}.linkerFor(b)
+ eLinker := ioElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ ioElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *ioList) InsertBefore(a, e *ioResult) {
+ aLinker := ioElementMapper{}.linkerFor(a)
+ eLinker := ioElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ ioElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *ioList) Remove(e *ioResult) {
+ linker := ioElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ ioElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ ioElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type ioEntry struct {
+ next *ioResult
+ prev *ioResult
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *ioEntry) Next() *ioResult {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *ioEntry) Prev() *ioResult {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *ioEntry) SetNext(elem *ioResult) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *ioEntry) SetPrev(elem *ioResult) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/mm/mm_state_autogen.go b/pkg/sentry/mm/mm_state_autogen.go
new file mode 100644
index 000000000..c4cdf8bd2
--- /dev/null
+++ b/pkg/sentry/mm/mm_state_autogen.go
@@ -0,0 +1,808 @@
+// automatically generated by stateify.
+
+package mm
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *aioManager) StateTypeName() string {
+ return "pkg/sentry/mm.aioManager"
+}
+
+func (a *aioManager) StateFields() []string {
+ return []string{
+ "contexts",
+ }
+}
+
+func (a *aioManager) beforeSave() {}
+
+// +checklocksignore
+func (a *aioManager) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.contexts)
+}
+
+func (a *aioManager) afterLoad() {}
+
+// +checklocksignore
+func (a *aioManager) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.contexts)
+}
+
+func (i *ioResult) StateTypeName() string {
+ return "pkg/sentry/mm.ioResult"
+}
+
+func (i *ioResult) StateFields() []string {
+ return []string{
+ "data",
+ "ioEntry",
+ }
+}
+
+func (i *ioResult) beforeSave() {}
+
+// +checklocksignore
+func (i *ioResult) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.data)
+ stateSinkObject.Save(1, &i.ioEntry)
+}
+
+func (i *ioResult) afterLoad() {}
+
+// +checklocksignore
+func (i *ioResult) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.data)
+ stateSourceObject.Load(1, &i.ioEntry)
+}
+
+func (ctx *AIOContext) StateTypeName() string {
+ return "pkg/sentry/mm.AIOContext"
+}
+
+func (ctx *AIOContext) StateFields() []string {
+ return []string{
+ "results",
+ "maxOutstanding",
+ "outstanding",
+ }
+}
+
+func (ctx *AIOContext) beforeSave() {}
+
+// +checklocksignore
+func (ctx *AIOContext) StateSave(stateSinkObject state.Sink) {
+ ctx.beforeSave()
+ if !state.IsZeroValue(&ctx.dead) {
+ state.Failf("dead is %#v, expected zero", &ctx.dead)
+ }
+ stateSinkObject.Save(0, &ctx.results)
+ stateSinkObject.Save(1, &ctx.maxOutstanding)
+ stateSinkObject.Save(2, &ctx.outstanding)
+}
+
+// +checklocksignore
+func (ctx *AIOContext) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ctx.results)
+ stateSourceObject.Load(1, &ctx.maxOutstanding)
+ stateSourceObject.Load(2, &ctx.outstanding)
+ stateSourceObject.AfterLoad(ctx.afterLoad)
+}
+
+func (m *aioMappable) StateTypeName() string {
+ return "pkg/sentry/mm.aioMappable"
+}
+
+func (m *aioMappable) StateFields() []string {
+ return []string{
+ "aioMappableRefs",
+ "mfp",
+ "fr",
+ }
+}
+
+func (m *aioMappable) beforeSave() {}
+
+// +checklocksignore
+func (m *aioMappable) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.aioMappableRefs)
+ stateSinkObject.Save(1, &m.mfp)
+ stateSinkObject.Save(2, &m.fr)
+}
+
+func (m *aioMappable) afterLoad() {}
+
+// +checklocksignore
+func (m *aioMappable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.aioMappableRefs)
+ stateSourceObject.Load(1, &m.mfp)
+ stateSourceObject.Load(2, &m.fr)
+}
+
+func (r *aioMappableRefs) StateTypeName() string {
+ return "pkg/sentry/mm.aioMappableRefs"
+}
+
+func (r *aioMappableRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *aioMappableRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *aioMappableRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *aioMappableRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *fileRefcountSet) StateTypeName() string {
+ return "pkg/sentry/mm.fileRefcountSet"
+}
+
+func (s *fileRefcountSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *fileRefcountSet) beforeSave() {}
+
+// +checklocksignore
+func (s *fileRefcountSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *fileRefcountSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *fileRefcountSet) afterLoad() {}
+
+// +checklocksignore
+func (s *fileRefcountSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*fileRefcountSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*fileRefcountSegmentDataSlices)) })
+}
+
+func (n *fileRefcountnode) StateTypeName() string {
+ return "pkg/sentry/mm.fileRefcountnode"
+}
+
+func (n *fileRefcountnode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *fileRefcountnode) beforeSave() {}
+
+// +checklocksignore
+func (n *fileRefcountnode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *fileRefcountnode) afterLoad() {}
+
+// +checklocksignore
+func (n *fileRefcountnode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (f *fileRefcountSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/mm.fileRefcountSegmentDataSlices"
+}
+
+func (f *fileRefcountSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (f *fileRefcountSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (f *fileRefcountSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.Start)
+ stateSinkObject.Save(1, &f.End)
+ stateSinkObject.Save(2, &f.Values)
+}
+
+func (f *fileRefcountSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (f *fileRefcountSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.Start)
+ stateSourceObject.Load(1, &f.End)
+ stateSourceObject.Load(2, &f.Values)
+}
+
+func (l *ioList) StateTypeName() string {
+ return "pkg/sentry/mm.ioList"
+}
+
+func (l *ioList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *ioList) beforeSave() {}
+
+// +checklocksignore
+func (l *ioList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *ioList) afterLoad() {}
+
+// +checklocksignore
+func (l *ioList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *ioEntry) StateTypeName() string {
+ return "pkg/sentry/mm.ioEntry"
+}
+
+func (e *ioEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *ioEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *ioEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *ioEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *ioEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (mm *MemoryManager) StateTypeName() string {
+ return "pkg/sentry/mm.MemoryManager"
+}
+
+func (mm *MemoryManager) StateFields() []string {
+ return []string{
+ "p",
+ "mfp",
+ "layout",
+ "privateRefs",
+ "users",
+ "vmas",
+ "brk",
+ "usageAS",
+ "lockedAS",
+ "dataAS",
+ "defMLockMode",
+ "pmas",
+ "curRSS",
+ "maxRSS",
+ "argv",
+ "envv",
+ "auxv",
+ "executable",
+ "dumpability",
+ "aioManager",
+ "sleepForActivation",
+ "vdsoSigReturnAddr",
+ "membarrierPrivateEnabled",
+ "membarrierRSeqEnabled",
+ }
+}
+
+// +checklocksignore
+func (mm *MemoryManager) StateSave(stateSinkObject state.Sink) {
+ mm.beforeSave()
+ if !state.IsZeroValue(&mm.active) {
+ state.Failf("active is %#v, expected zero", &mm.active)
+ }
+ if !state.IsZeroValue(&mm.captureInvalidations) {
+ state.Failf("captureInvalidations is %#v, expected zero", &mm.captureInvalidations)
+ }
+ stateSinkObject.Save(0, &mm.p)
+ stateSinkObject.Save(1, &mm.mfp)
+ stateSinkObject.Save(2, &mm.layout)
+ stateSinkObject.Save(3, &mm.privateRefs)
+ stateSinkObject.Save(4, &mm.users)
+ stateSinkObject.Save(5, &mm.vmas)
+ stateSinkObject.Save(6, &mm.brk)
+ stateSinkObject.Save(7, &mm.usageAS)
+ stateSinkObject.Save(8, &mm.lockedAS)
+ stateSinkObject.Save(9, &mm.dataAS)
+ stateSinkObject.Save(10, &mm.defMLockMode)
+ stateSinkObject.Save(11, &mm.pmas)
+ stateSinkObject.Save(12, &mm.curRSS)
+ stateSinkObject.Save(13, &mm.maxRSS)
+ stateSinkObject.Save(14, &mm.argv)
+ stateSinkObject.Save(15, &mm.envv)
+ stateSinkObject.Save(16, &mm.auxv)
+ stateSinkObject.Save(17, &mm.executable)
+ stateSinkObject.Save(18, &mm.dumpability)
+ stateSinkObject.Save(19, &mm.aioManager)
+ stateSinkObject.Save(20, &mm.sleepForActivation)
+ stateSinkObject.Save(21, &mm.vdsoSigReturnAddr)
+ stateSinkObject.Save(22, &mm.membarrierPrivateEnabled)
+ stateSinkObject.Save(23, &mm.membarrierRSeqEnabled)
+}
+
+// +checklocksignore
+func (mm *MemoryManager) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mm.p)
+ stateSourceObject.Load(1, &mm.mfp)
+ stateSourceObject.Load(2, &mm.layout)
+ stateSourceObject.Load(3, &mm.privateRefs)
+ stateSourceObject.Load(4, &mm.users)
+ stateSourceObject.Load(5, &mm.vmas)
+ stateSourceObject.Load(6, &mm.brk)
+ stateSourceObject.Load(7, &mm.usageAS)
+ stateSourceObject.Load(8, &mm.lockedAS)
+ stateSourceObject.Load(9, &mm.dataAS)
+ stateSourceObject.Load(10, &mm.defMLockMode)
+ stateSourceObject.Load(11, &mm.pmas)
+ stateSourceObject.Load(12, &mm.curRSS)
+ stateSourceObject.Load(13, &mm.maxRSS)
+ stateSourceObject.Load(14, &mm.argv)
+ stateSourceObject.Load(15, &mm.envv)
+ stateSourceObject.Load(16, &mm.auxv)
+ stateSourceObject.Load(17, &mm.executable)
+ stateSourceObject.Load(18, &mm.dumpability)
+ stateSourceObject.Load(19, &mm.aioManager)
+ stateSourceObject.Load(20, &mm.sleepForActivation)
+ stateSourceObject.Load(21, &mm.vdsoSigReturnAddr)
+ stateSourceObject.Load(22, &mm.membarrierPrivateEnabled)
+ stateSourceObject.Load(23, &mm.membarrierRSeqEnabled)
+ stateSourceObject.AfterLoad(mm.afterLoad)
+}
+
+func (vma *vma) StateTypeName() string {
+ return "pkg/sentry/mm.vma"
+}
+
+func (vma *vma) StateFields() []string {
+ return []string{
+ "mappable",
+ "off",
+ "realPerms",
+ "dontfork",
+ "mlockMode",
+ "numaPolicy",
+ "numaNodemask",
+ "id",
+ "hint",
+ }
+}
+
+func (vma *vma) beforeSave() {}
+
+// +checklocksignore
+func (vma *vma) StateSave(stateSinkObject state.Sink) {
+ vma.beforeSave()
+ var realPermsValue int = vma.saveRealPerms()
+ stateSinkObject.SaveValue(2, realPermsValue)
+ stateSinkObject.Save(0, &vma.mappable)
+ stateSinkObject.Save(1, &vma.off)
+ stateSinkObject.Save(3, &vma.dontfork)
+ stateSinkObject.Save(4, &vma.mlockMode)
+ stateSinkObject.Save(5, &vma.numaPolicy)
+ stateSinkObject.Save(6, &vma.numaNodemask)
+ stateSinkObject.Save(7, &vma.id)
+ stateSinkObject.Save(8, &vma.hint)
+}
+
+func (vma *vma) afterLoad() {}
+
+// +checklocksignore
+func (vma *vma) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &vma.mappable)
+ stateSourceObject.Load(1, &vma.off)
+ stateSourceObject.Load(3, &vma.dontfork)
+ stateSourceObject.Load(4, &vma.mlockMode)
+ stateSourceObject.Load(5, &vma.numaPolicy)
+ stateSourceObject.Load(6, &vma.numaNodemask)
+ stateSourceObject.Load(7, &vma.id)
+ stateSourceObject.Load(8, &vma.hint)
+ stateSourceObject.LoadValue(2, new(int), func(y interface{}) { vma.loadRealPerms(y.(int)) })
+}
+
+func (p *pma) StateTypeName() string {
+ return "pkg/sentry/mm.pma"
+}
+
+func (p *pma) StateFields() []string {
+ return []string{
+ "off",
+ "translatePerms",
+ "effectivePerms",
+ "maxPerms",
+ "needCOW",
+ "private",
+ }
+}
+
+func (p *pma) beforeSave() {}
+
+// +checklocksignore
+func (p *pma) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.off)
+ stateSinkObject.Save(1, &p.translatePerms)
+ stateSinkObject.Save(2, &p.effectivePerms)
+ stateSinkObject.Save(3, &p.maxPerms)
+ stateSinkObject.Save(4, &p.needCOW)
+ stateSinkObject.Save(5, &p.private)
+}
+
+func (p *pma) afterLoad() {}
+
+// +checklocksignore
+func (p *pma) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.off)
+ stateSourceObject.Load(1, &p.translatePerms)
+ stateSourceObject.Load(2, &p.effectivePerms)
+ stateSourceObject.Load(3, &p.maxPerms)
+ stateSourceObject.Load(4, &p.needCOW)
+ stateSourceObject.Load(5, &p.private)
+}
+
+func (p *privateRefs) StateTypeName() string {
+ return "pkg/sentry/mm.privateRefs"
+}
+
+func (p *privateRefs) StateFields() []string {
+ return []string{
+ "refs",
+ }
+}
+
+func (p *privateRefs) beforeSave() {}
+
+// +checklocksignore
+func (p *privateRefs) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.refs)
+}
+
+func (p *privateRefs) afterLoad() {}
+
+// +checklocksignore
+func (p *privateRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.refs)
+}
+
+func (s *pmaSet) StateTypeName() string {
+ return "pkg/sentry/mm.pmaSet"
+}
+
+func (s *pmaSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *pmaSet) beforeSave() {}
+
+// +checklocksignore
+func (s *pmaSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *pmaSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *pmaSet) afterLoad() {}
+
+// +checklocksignore
+func (s *pmaSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*pmaSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*pmaSegmentDataSlices)) })
+}
+
+func (n *pmanode) StateTypeName() string {
+ return "pkg/sentry/mm.pmanode"
+}
+
+func (n *pmanode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *pmanode) beforeSave() {}
+
+// +checklocksignore
+func (n *pmanode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *pmanode) afterLoad() {}
+
+// +checklocksignore
+func (n *pmanode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (p *pmaSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/mm.pmaSegmentDataSlices"
+}
+
+func (p *pmaSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (p *pmaSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (p *pmaSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Start)
+ stateSinkObject.Save(1, &p.End)
+ stateSinkObject.Save(2, &p.Values)
+}
+
+func (p *pmaSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (p *pmaSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Start)
+ stateSourceObject.Load(1, &p.End)
+ stateSourceObject.Load(2, &p.Values)
+}
+
+func (m *SpecialMappable) StateTypeName() string {
+ return "pkg/sentry/mm.SpecialMappable"
+}
+
+func (m *SpecialMappable) StateFields() []string {
+ return []string{
+ "SpecialMappableRefs",
+ "mfp",
+ "fr",
+ "name",
+ }
+}
+
+func (m *SpecialMappable) beforeSave() {}
+
+// +checklocksignore
+func (m *SpecialMappable) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.SpecialMappableRefs)
+ stateSinkObject.Save(1, &m.mfp)
+ stateSinkObject.Save(2, &m.fr)
+ stateSinkObject.Save(3, &m.name)
+}
+
+func (m *SpecialMappable) afterLoad() {}
+
+// +checklocksignore
+func (m *SpecialMappable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.SpecialMappableRefs)
+ stateSourceObject.Load(1, &m.mfp)
+ stateSourceObject.Load(2, &m.fr)
+ stateSourceObject.Load(3, &m.name)
+}
+
+func (r *SpecialMappableRefs) StateTypeName() string {
+ return "pkg/sentry/mm.SpecialMappableRefs"
+}
+
+func (r *SpecialMappableRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *SpecialMappableRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *SpecialMappableRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *SpecialMappableRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *vmaSet) StateTypeName() string {
+ return "pkg/sentry/mm.vmaSet"
+}
+
+func (s *vmaSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *vmaSet) beforeSave() {}
+
+// +checklocksignore
+func (s *vmaSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *vmaSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *vmaSet) afterLoad() {}
+
+// +checklocksignore
+func (s *vmaSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*vmaSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*vmaSegmentDataSlices)) })
+}
+
+func (n *vmanode) StateTypeName() string {
+ return "pkg/sentry/mm.vmanode"
+}
+
+func (n *vmanode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *vmanode) beforeSave() {}
+
+// +checklocksignore
+func (n *vmanode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *vmanode) afterLoad() {}
+
+// +checklocksignore
+func (n *vmanode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (v *vmaSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/mm.vmaSegmentDataSlices"
+}
+
+func (v *vmaSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (v *vmaSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (v *vmaSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.Start)
+ stateSinkObject.Save(1, &v.End)
+ stateSinkObject.Save(2, &v.Values)
+}
+
+func (v *vmaSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (v *vmaSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.Start)
+ stateSourceObject.Load(1, &v.End)
+ stateSourceObject.Load(2, &v.Values)
+}
+
+func init() {
+ state.Register((*aioManager)(nil))
+ state.Register((*ioResult)(nil))
+ state.Register((*AIOContext)(nil))
+ state.Register((*aioMappable)(nil))
+ state.Register((*aioMappableRefs)(nil))
+ state.Register((*fileRefcountSet)(nil))
+ state.Register((*fileRefcountnode)(nil))
+ state.Register((*fileRefcountSegmentDataSlices)(nil))
+ state.Register((*ioList)(nil))
+ state.Register((*ioEntry)(nil))
+ state.Register((*MemoryManager)(nil))
+ state.Register((*vma)(nil))
+ state.Register((*pma)(nil))
+ state.Register((*privateRefs)(nil))
+ state.Register((*pmaSet)(nil))
+ state.Register((*pmanode)(nil))
+ state.Register((*pmaSegmentDataSlices)(nil))
+ state.Register((*SpecialMappable)(nil))
+ state.Register((*SpecialMappableRefs)(nil))
+ state.Register((*vmaSet)(nil))
+ state.Register((*vmanode)(nil))
+ state.Register((*vmaSegmentDataSlices)(nil))
+}
diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go
deleted file mode 100644
index 84cb8158d..000000000
--- a/pkg/sentry/mm/mm_test.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mm
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func testMemoryManager(ctx context.Context) *MemoryManager {
- p := platform.FromContext(ctx)
- mfp := pgalloc.MemoryFileProviderFromContext(ctx)
- mm := NewMemoryManager(p, mfp, false)
- mm.layout = arch.MmapLayout{
- MinAddr: p.MinUserAddress(),
- MaxAddr: p.MaxUserAddress(),
- BottomUpBase: p.MinUserAddress(),
- TopDownBase: p.MaxUserAddress(),
- }
- return mm
-}
-
-func (mm *MemoryManager) realUsageAS() uint64 {
- return uint64(mm.vmas.Span())
-}
-
-func TestUsageASUpdates(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: 2 * hostarch.PageSize,
- Private: true,
- })
- if err != nil {
- t.Fatalf("MMap got err %v want nil", err)
- }
- realUsage := mm.realUsageAS()
- if mm.usageAS != realUsage {
- t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage)
- }
-
- mm.MUnmap(ctx, addr, hostarch.PageSize)
- realUsage = mm.realUsageAS()
- if mm.usageAS != realUsage {
- t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage)
- }
-}
-
-func (mm *MemoryManager) realDataAS() uint64 {
- var sz uint64
- for seg := mm.vmas.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
- vma := seg.Value()
- if vma.isPrivateDataLocked() {
- sz += uint64(seg.Range().Length())
- }
- }
- return sz
-}
-
-func TestDataASUpdates(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: 3 * hostarch.PageSize,
- Private: true,
- Perms: hostarch.Write,
- MaxPerms: hostarch.AnyAccess,
- })
- if err != nil {
- t.Fatalf("MMap got err %v want nil", err)
- }
- if mm.dataAS == 0 {
- t.Fatalf("dataAS is 0, wanted not 0")
- }
- realDataAS := mm.realDataAS()
- if mm.dataAS != realDataAS {
- t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
- }
-
- mm.MUnmap(ctx, addr, hostarch.PageSize)
- realDataAS = mm.realDataAS()
- if mm.dataAS != realDataAS {
- t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
- }
-
- mm.MProtect(addr+hostarch.PageSize, hostarch.PageSize, hostarch.Read, false)
- realDataAS = mm.realDataAS()
- if mm.dataAS != realDataAS {
- t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
- }
-
- mm.MRemap(ctx, addr+2*hostarch.PageSize, hostarch.PageSize, 2*hostarch.PageSize, MRemapOpts{
- Move: MRemapMayMove,
- })
- realDataAS = mm.realDataAS()
- if mm.dataAS != realDataAS {
- t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
- }
-}
-
-func TestBrkDataLimitUpdates(t *testing.T) {
- limitSet := limits.NewLimitSet()
- limitSet.Set(limits.Data, limits.Limit{}, true /* privileged */) // zero RLIMIT_DATA
-
- ctx := contexttest.WithLimitSet(contexttest.Context(t), limitSet)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- // Try to extend the brk by one page and expect doing so to fail.
- oldBrk, _ := mm.Brk(ctx, 0)
- if newBrk, _ := mm.Brk(ctx, oldBrk+hostarch.PageSize); newBrk != oldBrk {
- t.Errorf("brk() increased data segment above RLIMIT_DATA (old brk = %#x, new brk = %#x", oldBrk, newBrk)
- }
-}
-
-// TestIOAfterUnmap ensures that IO fails after unmap.
-func TestIOAfterUnmap(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: hostarch.PageSize,
- Private: true,
- Perms: hostarch.Read,
- MaxPerms: hostarch.AnyAccess,
- })
- if err != nil {
- t.Fatalf("MMap got err %v want nil", err)
- }
-
- // IO works before munmap.
- b := make([]byte, 1)
- n, err := mm.CopyIn(ctx, addr, b, usermem.IOOpts{})
- if err != nil {
- t.Errorf("CopyIn got err %v want nil", err)
- }
- if n != 1 {
- t.Errorf("CopyIn got %d want 1", n)
- }
-
- err = mm.MUnmap(ctx, addr, hostarch.PageSize)
- if err != nil {
- t.Fatalf("MUnmap got err %v want nil", err)
- }
-
- n, err = mm.CopyIn(ctx, addr, b, usermem.IOOpts{})
- if !linuxerr.Equals(linuxerr.EFAULT, err) {
- t.Errorf("CopyIn got err %v want EFAULT", err)
- }
- if n != 0 {
- t.Errorf("CopyIn got %d want 0", n)
- }
-}
-
-// TestIOAfterMProtect tests IO interaction with mprotect permissions.
-func TestIOAfterMProtect(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: hostarch.PageSize,
- Private: true,
- Perms: hostarch.ReadWrite,
- MaxPerms: hostarch.AnyAccess,
- })
- if err != nil {
- t.Fatalf("MMap got err %v want nil", err)
- }
-
- // Writing works before mprotect.
- b := make([]byte, 1)
- n, err := mm.CopyOut(ctx, addr, b, usermem.IOOpts{})
- if err != nil {
- t.Errorf("CopyOut got err %v want nil", err)
- }
- if n != 1 {
- t.Errorf("CopyOut got %d want 1", n)
- }
-
- err = mm.MProtect(addr, hostarch.PageSize, hostarch.Read, false)
- if err != nil {
- t.Errorf("MProtect got err %v want nil", err)
- }
-
- // Without IgnorePermissions, CopyOut should no longer succeed.
- n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{})
- if !linuxerr.Equals(linuxerr.EFAULT, err) {
- t.Errorf("CopyOut got err %v want EFAULT", err)
- }
- if n != 0 {
- t.Errorf("CopyOut got %d want 0", n)
- }
-
- // With IgnorePermissions, CopyOut should succeed despite mprotect.
- n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{
- IgnorePermissions: true,
- })
- if err != nil {
- t.Errorf("CopyOut got err %v want nil", err)
- }
- if n != 1 {
- t.Errorf("CopyOut got %d want 1", n)
- }
-}
-
-// TestAIOPrepareAfterDestroy tests that AIOContext should not be able to be
-// prepared after destruction.
-func TestAIOPrepareAfterDestroy(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
- defer mm.DecUsers(ctx)
-
- id, err := mm.NewAIOContext(ctx, 1)
- if err != nil {
- t.Fatalf("mm.NewAIOContext got err %v want nil", err)
- }
- aioCtx, ok := mm.LookupAIOContext(ctx, id)
- if !ok {
- t.Fatalf("AIOContext not found")
- }
- mm.DestroyAIOContext(ctx, id)
-
- // Prepare should fail because aioCtx should be destroyed.
- if err := aioCtx.Prepare(); !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("aioCtx.Prepare got err %v want nil", err)
- } else if err == nil {
- aioCtx.CancelPendingRequest()
- }
-}
-
-// TestAIOLookupAfterDestroy tests that AIOContext should not be able to be
-// looked up after memory manager is destroyed.
-func TestAIOLookupAfterDestroy(t *testing.T) {
- ctx := contexttest.Context(t)
- mm := testMemoryManager(ctx)
-
- id, err := mm.NewAIOContext(ctx, 1)
- if err != nil {
- mm.DecUsers(ctx)
- t.Fatalf("mm.NewAIOContext got err %v want nil", err)
- }
- mm.DecUsers(ctx) // This destroys the AIOContext manager.
-
- if _, ok := mm.LookupAIOContext(ctx, id); ok {
- t.Errorf("AIOContext found even after AIOContext manager is destroyed")
- }
-}
diff --git a/pkg/sentry/mm/pma_set.go b/pkg/sentry/mm/pma_set.go
new file mode 100644
index 000000000..14a7c83b9
--- /dev/null
+++ b/pkg/sentry/mm/pma_set.go
@@ -0,0 +1,1647 @@
+package mm
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/hostarch"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const pmatrackGaps = 0
+
+var _ = uint8(pmatrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type pmadynamicGap [pmatrackGaps]__generics_imported0.Addr
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *pmadynamicGap) Get() __generics_imported0.Addr {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *pmadynamicGap) Set(v __generics_imported0.Addr) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ pmaminDegree = 8
+
+ pmamaxDegree = 2 * pmaminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type pmaSet struct {
+ root pmanode `state:".(*pmaSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *pmaSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *pmaSet) IsEmptyRange(r __generics_imported0.AddrRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *pmaSet) Span() __generics_imported0.Addr {
+ var sz __generics_imported0.Addr
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *pmaSet) SpanRange(r __generics_imported0.AddrRange) __generics_imported0.Addr {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz __generics_imported0.Addr
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *pmaSet) FirstSegment() pmaIterator {
+ if s.root.nrSegments == 0 {
+ return pmaIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *pmaSet) LastSegment() pmaIterator {
+ if s.root.nrSegments == 0 {
+ return pmaIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *pmaSet) FirstGap() pmaGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return pmaGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *pmaSet) LastGap() pmaGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return pmaGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *pmaSet) Find(key __generics_imported0.Addr) (pmaIterator, pmaGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return pmaIterator{n, i}, pmaGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return pmaIterator{}, pmaGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *pmaSet) FindSegment(key __generics_imported0.Addr) pmaIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *pmaSet) LowerBoundSegment(min __generics_imported0.Addr) pmaIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *pmaSet) UpperBoundSegment(max __generics_imported0.Addr) pmaIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *pmaSet) FindGap(key __generics_imported0.Addr) pmaGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *pmaSet) LowerBoundGap(min __generics_imported0.Addr) pmaGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *pmaSet) UpperBoundGap(max __generics_imported0.Addr) pmaGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *pmaSet) Add(r __generics_imported0.AddrRange, val pma) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *pmaSet) AddWithoutMerging(r __generics_imported0.AddrRange, val pma) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *pmaSet) Insert(gap pmaGapIterator, r __generics_imported0.AddrRange, val pma) pmaIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (pmaSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := pmatrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (pmaSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (pmaSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := pmatrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *pmaSet) InsertWithoutMerging(gap pmaGapIterator, r __generics_imported0.AddrRange, val pma) pmaIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *pmaSet) InsertWithoutMergingUnchecked(gap pmaGapIterator, r __generics_imported0.AddrRange, val pma) pmaIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := pmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return pmaIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *pmaSet) Remove(seg pmaIterator) pmaGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if pmatrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ pmaSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if pmatrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(pmaGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *pmaSet) RemoveAll() {
+ s.root = pmanode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *pmaSet) RemoveRange(r __generics_imported0.AddrRange) pmaGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *pmaSet) Merge(first, second pmaIterator) pmaIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *pmaSet) MergeUnchecked(first, second pmaIterator) pmaIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (pmaSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return pmaIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *pmaSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *pmaSet) MergeRange(r __generics_imported0.AddrRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *pmaSet) MergeAdjacent(r __generics_imported0.AddrRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *pmaSet) Split(seg pmaIterator, split __generics_imported0.Addr) (pmaIterator, pmaIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *pmaSet) SplitUnchecked(seg pmaIterator, split __generics_imported0.Addr) (pmaIterator, pmaIterator) {
+ val1, val2 := (pmaSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.AddrRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *pmaSet) SplitAt(split __generics_imported0.Addr) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *pmaSet) Isolate(seg pmaIterator, r __generics_imported0.AddrRange) pmaIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *pmaSet) ApplyContiguous(r __generics_imported0.AddrRange, fn func(seg pmaIterator)) pmaGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return pmaGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return pmaGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type pmanode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *pmanode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap pmadynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [pmamaxDegree - 1]__generics_imported0.AddrRange
+ values [pmamaxDegree - 1]pma
+ children [pmamaxDegree]*pmanode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *pmanode) firstSegment() pmaIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return pmaIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *pmanode) lastSegment() pmaIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return pmaIterator{n, n.nrSegments - 1}
+}
+
+func (n *pmanode) prevSibling() *pmanode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *pmanode) nextSibling() *pmanode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *pmanode) rebalanceBeforeInsert(gap pmaGapIterator) pmaGapIterator {
+ if n.nrSegments < pmamaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &pmanode{
+ nrSegments: pmaminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &pmanode{
+ nrSegments: pmaminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:pmaminDegree-1], n.keys[:pmaminDegree-1])
+ copy(left.values[:pmaminDegree-1], n.values[:pmaminDegree-1])
+ copy(right.keys[:pmaminDegree-1], n.keys[pmaminDegree:])
+ copy(right.values[:pmaminDegree-1], n.values[pmaminDegree:])
+ n.keys[0], n.values[0] = n.keys[pmaminDegree-1], n.values[pmaminDegree-1]
+ pmazeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:pmaminDegree], n.children[:pmaminDegree])
+ copy(right.children[:pmaminDegree], n.children[pmaminDegree:])
+ pmazeroNodeSlice(n.children[2:])
+ for i := 0; i < pmaminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if pmatrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < pmaminDegree {
+ return pmaGapIterator{left, gap.index}
+ }
+ return pmaGapIterator{right, gap.index - pmaminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[pmaminDegree-1], n.values[pmaminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &pmanode{
+ nrSegments: pmaminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:pmaminDegree-1], n.keys[pmaminDegree:])
+ copy(sibling.values[:pmaminDegree-1], n.values[pmaminDegree:])
+ pmazeroValueSlice(n.values[pmaminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:pmaminDegree], n.children[pmaminDegree:])
+ pmazeroNodeSlice(n.children[pmaminDegree:])
+ for i := 0; i < pmaminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = pmaminDegree - 1
+
+ if pmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < pmaminDegree {
+ return gap
+ }
+ return pmaGapIterator{sibling, gap.index - pmaminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *pmanode) rebalanceAfterRemove(gap pmaGapIterator) pmaGapIterator {
+ for {
+ if n.nrSegments >= pmaminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= pmaminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ pmaSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if pmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return pmaGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return pmaGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= pmaminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ pmaSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if pmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return pmaGapIterator{n, n.nrSegments}
+ }
+ return pmaGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return pmaGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return pmaGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *pmanode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = pmaGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ pmaSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if pmatrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *pmanode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *pmanode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *pmanode) calculateMaxGapLeaf() __generics_imported0.Addr {
+ max := pmaGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (pmaGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *pmanode) calculateMaxGapInternal() __generics_imported0.Addr {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *pmanode) searchFirstLargeEnoughGap(minSize __generics_imported0.Addr) pmaGapIterator {
+ if n.maxGap.Get() < minSize {
+ return pmaGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := pmaGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *pmanode) searchLastLargeEnoughGap(minSize __generics_imported0.Addr) pmaGapIterator {
+ if n.maxGap.Get() < minSize {
+ return pmaGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := pmaGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type pmaIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *pmanode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg pmaIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg pmaIterator) Range() __generics_imported0.AddrRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg pmaIterator) Start() __generics_imported0.Addr {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg pmaIterator) End() __generics_imported0.Addr {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg pmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg pmaIterator) SetRange(r __generics_imported0.AddrRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg pmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg pmaIterator) SetStart(start __generics_imported0.Addr) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg pmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg pmaIterator) SetEnd(end __generics_imported0.Addr) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg pmaIterator) Value() pma {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg pmaIterator) ValuePtr() *pma {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg pmaIterator) SetValue(val pma) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg pmaIterator) PrevSegment() pmaIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return pmaIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return pmaIterator{}
+ }
+ return pmasegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg pmaIterator) NextSegment() pmaIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return pmaIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return pmaIterator{}
+ }
+ return pmasegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg pmaIterator) PrevGap() pmaGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return pmaGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg pmaIterator) NextGap() pmaGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return pmaGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg pmaIterator) PrevNonEmpty() (pmaIterator, pmaGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return pmaIterator{}, gap
+ }
+ return gap.PrevSegment(), pmaGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg pmaIterator) NextNonEmpty() (pmaIterator, pmaGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return pmaIterator{}, gap
+ }
+ return gap.NextSegment(), pmaGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type pmaGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *pmanode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap pmaGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap pmaGapIterator) Range() __generics_imported0.AddrRange {
+ return __generics_imported0.AddrRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap pmaGapIterator) Start() __generics_imported0.Addr {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return pmaSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap pmaGapIterator) End() __generics_imported0.Addr {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return pmaSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap pmaGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap pmaGapIterator) PrevSegment() pmaIterator {
+ return pmasegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap pmaGapIterator) NextSegment() pmaIterator {
+ return pmasegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap pmaGapIterator) PrevGap() pmaGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return pmaGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap pmaGapIterator) NextGap() pmaGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return pmaGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap pmaGapIterator) NextLargeEnoughGap(minSize __generics_imported0.Addr) pmaGapIterator {
+ if pmatrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap pmaGapIterator) nextLargeEnoughGapHelper(minSize __generics_imported0.Addr) pmaGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return pmaGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap pmaGapIterator) PrevLargeEnoughGap(minSize __generics_imported0.Addr) pmaGapIterator {
+ if pmatrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap pmaGapIterator) prevLargeEnoughGapHelper(minSize __generics_imported0.Addr) pmaGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return pmaGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func pmasegmentBeforePosition(n *pmanode, i int) pmaIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return pmaIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return pmaIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func pmasegmentAfterPosition(n *pmanode, i int) pmaIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return pmaIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return pmaIterator{n, i}
+}
+
+func pmazeroValueSlice(slice []pma) {
+
+ for i := range slice {
+ pmaSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func pmazeroNodeSlice(slice []*pmanode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *pmaSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *pmanode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *pmanode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if pmatrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type pmaSegmentDataSlices struct {
+ Start []__generics_imported0.Addr
+ End []__generics_imported0.Addr
+ Values []pma
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *pmaSet) ExportSortedSlices() *pmaSegmentDataSlices {
+ var sds pmaSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *pmaSet) ImportSortedSlices(sds *pmaSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.AddrRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *pmaSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.AddrRange, pma) error) error {
+ havePrev := false
+ prev := __generics_imported0.Addr(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *pmaSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *pmaSet) saveRoot() *pmaSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *pmaSet) loadRoot(sds *pmaSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
new file mode 100644
index 000000000..386a9fa3b
--- /dev/null
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -0,0 +1,140 @@
+package mm
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const SpecialMappableenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var SpecialMappableobj *SpecialMappable
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type SpecialMappableRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *SpecialMappableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *SpecialMappableRefs) RefType() string {
+ return fmt.Sprintf("%T", SpecialMappableobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *SpecialMappableRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *SpecialMappableRefs) LogRefs() bool {
+ return SpecialMappableenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *SpecialMappableRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *SpecialMappableRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if SpecialMappableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *SpecialMappableRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if SpecialMappableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *SpecialMappableRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if SpecialMappableenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *SpecialMappableRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/mm/vma_set.go b/pkg/sentry/mm/vma_set.go
new file mode 100644
index 000000000..298c86629
--- /dev/null
+++ b/pkg/sentry/mm/vma_set.go
@@ -0,0 +1,1647 @@
+package mm
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/hostarch"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const vmatrackGaps = 1
+
+var _ = uint8(vmatrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type vmadynamicGap [vmatrackGaps]__generics_imported0.Addr
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *vmadynamicGap) Get() __generics_imported0.Addr {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *vmadynamicGap) Set(v __generics_imported0.Addr) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ vmaminDegree = 8
+
+ vmamaxDegree = 2 * vmaminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type vmaSet struct {
+ root vmanode `state:".(*vmaSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *vmaSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *vmaSet) IsEmptyRange(r __generics_imported0.AddrRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *vmaSet) Span() __generics_imported0.Addr {
+ var sz __generics_imported0.Addr
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *vmaSet) SpanRange(r __generics_imported0.AddrRange) __generics_imported0.Addr {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz __generics_imported0.Addr
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *vmaSet) FirstSegment() vmaIterator {
+ if s.root.nrSegments == 0 {
+ return vmaIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *vmaSet) LastSegment() vmaIterator {
+ if s.root.nrSegments == 0 {
+ return vmaIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *vmaSet) FirstGap() vmaGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return vmaGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *vmaSet) LastGap() vmaGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return vmaGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *vmaSet) Find(key __generics_imported0.Addr) (vmaIterator, vmaGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return vmaIterator{n, i}, vmaGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return vmaIterator{}, vmaGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *vmaSet) FindSegment(key __generics_imported0.Addr) vmaIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *vmaSet) LowerBoundSegment(min __generics_imported0.Addr) vmaIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *vmaSet) UpperBoundSegment(max __generics_imported0.Addr) vmaIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *vmaSet) FindGap(key __generics_imported0.Addr) vmaGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *vmaSet) LowerBoundGap(min __generics_imported0.Addr) vmaGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *vmaSet) UpperBoundGap(max __generics_imported0.Addr) vmaGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *vmaSet) Add(r __generics_imported0.AddrRange, val vma) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *vmaSet) AddWithoutMerging(r __generics_imported0.AddrRange, val vma) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *vmaSet) Insert(gap vmaGapIterator, r __generics_imported0.AddrRange, val vma) vmaIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (vmaSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := vmatrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (vmaSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (vmaSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := vmatrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *vmaSet) InsertWithoutMerging(gap vmaGapIterator, r __generics_imported0.AddrRange, val vma) vmaIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *vmaSet) InsertWithoutMergingUnchecked(gap vmaGapIterator, r __generics_imported0.AddrRange, val vma) vmaIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := vmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return vmaIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *vmaSet) Remove(seg vmaIterator) vmaGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if vmatrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ vmaSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if vmatrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(vmaGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *vmaSet) RemoveAll() {
+ s.root = vmanode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *vmaSet) RemoveRange(r __generics_imported0.AddrRange) vmaGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *vmaSet) Merge(first, second vmaIterator) vmaIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *vmaSet) MergeUnchecked(first, second vmaIterator) vmaIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (vmaSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return vmaIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *vmaSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *vmaSet) MergeRange(r __generics_imported0.AddrRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *vmaSet) MergeAdjacent(r __generics_imported0.AddrRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *vmaSet) Split(seg vmaIterator, split __generics_imported0.Addr) (vmaIterator, vmaIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *vmaSet) SplitUnchecked(seg vmaIterator, split __generics_imported0.Addr) (vmaIterator, vmaIterator) {
+ val1, val2 := (vmaSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.AddrRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *vmaSet) SplitAt(split __generics_imported0.Addr) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *vmaSet) Isolate(seg vmaIterator, r __generics_imported0.AddrRange) vmaIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *vmaSet) ApplyContiguous(r __generics_imported0.AddrRange, fn func(seg vmaIterator)) vmaGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return vmaGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return vmaGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type vmanode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *vmanode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap vmadynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [vmamaxDegree - 1]__generics_imported0.AddrRange
+ values [vmamaxDegree - 1]vma
+ children [vmamaxDegree]*vmanode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *vmanode) firstSegment() vmaIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return vmaIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *vmanode) lastSegment() vmaIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return vmaIterator{n, n.nrSegments - 1}
+}
+
+func (n *vmanode) prevSibling() *vmanode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *vmanode) nextSibling() *vmanode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *vmanode) rebalanceBeforeInsert(gap vmaGapIterator) vmaGapIterator {
+ if n.nrSegments < vmamaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &vmanode{
+ nrSegments: vmaminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &vmanode{
+ nrSegments: vmaminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:vmaminDegree-1], n.keys[:vmaminDegree-1])
+ copy(left.values[:vmaminDegree-1], n.values[:vmaminDegree-1])
+ copy(right.keys[:vmaminDegree-1], n.keys[vmaminDegree:])
+ copy(right.values[:vmaminDegree-1], n.values[vmaminDegree:])
+ n.keys[0], n.values[0] = n.keys[vmaminDegree-1], n.values[vmaminDegree-1]
+ vmazeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:vmaminDegree], n.children[:vmaminDegree])
+ copy(right.children[:vmaminDegree], n.children[vmaminDegree:])
+ vmazeroNodeSlice(n.children[2:])
+ for i := 0; i < vmaminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if vmatrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < vmaminDegree {
+ return vmaGapIterator{left, gap.index}
+ }
+ return vmaGapIterator{right, gap.index - vmaminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[vmaminDegree-1], n.values[vmaminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &vmanode{
+ nrSegments: vmaminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:vmaminDegree-1], n.keys[vmaminDegree:])
+ copy(sibling.values[:vmaminDegree-1], n.values[vmaminDegree:])
+ vmazeroValueSlice(n.values[vmaminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:vmaminDegree], n.children[vmaminDegree:])
+ vmazeroNodeSlice(n.children[vmaminDegree:])
+ for i := 0; i < vmaminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = vmaminDegree - 1
+
+ if vmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < vmaminDegree {
+ return gap
+ }
+ return vmaGapIterator{sibling, gap.index - vmaminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *vmanode) rebalanceAfterRemove(gap vmaGapIterator) vmaGapIterator {
+ for {
+ if n.nrSegments >= vmaminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= vmaminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ vmaSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if vmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return vmaGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return vmaGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= vmaminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ vmaSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if vmatrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return vmaGapIterator{n, n.nrSegments}
+ }
+ return vmaGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return vmaGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return vmaGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *vmanode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = vmaGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ vmaSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if vmatrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *vmanode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *vmanode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *vmanode) calculateMaxGapLeaf() __generics_imported0.Addr {
+ max := vmaGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (vmaGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *vmanode) calculateMaxGapInternal() __generics_imported0.Addr {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *vmanode) searchFirstLargeEnoughGap(minSize __generics_imported0.Addr) vmaGapIterator {
+ if n.maxGap.Get() < minSize {
+ return vmaGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := vmaGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *vmanode) searchLastLargeEnoughGap(minSize __generics_imported0.Addr) vmaGapIterator {
+ if n.maxGap.Get() < minSize {
+ return vmaGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := vmaGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type vmaIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *vmanode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg vmaIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg vmaIterator) Range() __generics_imported0.AddrRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg vmaIterator) Start() __generics_imported0.Addr {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg vmaIterator) End() __generics_imported0.Addr {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg vmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg vmaIterator) SetRange(r __generics_imported0.AddrRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg vmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg vmaIterator) SetStart(start __generics_imported0.Addr) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg vmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg vmaIterator) SetEnd(end __generics_imported0.Addr) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg vmaIterator) Value() vma {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg vmaIterator) ValuePtr() *vma {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg vmaIterator) SetValue(val vma) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg vmaIterator) PrevSegment() vmaIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return vmaIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return vmaIterator{}
+ }
+ return vmasegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg vmaIterator) NextSegment() vmaIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return vmaIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return vmaIterator{}
+ }
+ return vmasegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg vmaIterator) PrevGap() vmaGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return vmaGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg vmaIterator) NextGap() vmaGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return vmaGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg vmaIterator) PrevNonEmpty() (vmaIterator, vmaGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return vmaIterator{}, gap
+ }
+ return gap.PrevSegment(), vmaGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg vmaIterator) NextNonEmpty() (vmaIterator, vmaGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return vmaIterator{}, gap
+ }
+ return gap.NextSegment(), vmaGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type vmaGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *vmanode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap vmaGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap vmaGapIterator) Range() __generics_imported0.AddrRange {
+ return __generics_imported0.AddrRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap vmaGapIterator) Start() __generics_imported0.Addr {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return vmaSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap vmaGapIterator) End() __generics_imported0.Addr {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return vmaSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap vmaGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap vmaGapIterator) PrevSegment() vmaIterator {
+ return vmasegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap vmaGapIterator) NextSegment() vmaIterator {
+ return vmasegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap vmaGapIterator) PrevGap() vmaGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return vmaGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap vmaGapIterator) NextGap() vmaGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return vmaGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap vmaGapIterator) NextLargeEnoughGap(minSize __generics_imported0.Addr) vmaGapIterator {
+ if vmatrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap vmaGapIterator) nextLargeEnoughGapHelper(minSize __generics_imported0.Addr) vmaGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return vmaGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap vmaGapIterator) PrevLargeEnoughGap(minSize __generics_imported0.Addr) vmaGapIterator {
+ if vmatrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap vmaGapIterator) prevLargeEnoughGapHelper(minSize __generics_imported0.Addr) vmaGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return vmaGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func vmasegmentBeforePosition(n *vmanode, i int) vmaIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return vmaIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return vmaIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func vmasegmentAfterPosition(n *vmanode, i int) vmaIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return vmaIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return vmaIterator{n, i}
+}
+
+func vmazeroValueSlice(slice []vma) {
+
+ for i := range slice {
+ vmaSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func vmazeroNodeSlice(slice []*vmanode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *vmaSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *vmanode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *vmanode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if vmatrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type vmaSegmentDataSlices struct {
+ Start []__generics_imported0.Addr
+ End []__generics_imported0.Addr
+ Values []vma
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *vmaSet) ExportSortedSlices() *vmaSegmentDataSlices {
+ var sds vmaSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *vmaSet) ImportSortedSlices(sds *vmaSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.AddrRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *vmaSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.AddrRange, vma) error) error {
+ havePrev := false
+ prev := __generics_imported0.Addr(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *vmaSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *vmaSet) saveRoot() *vmaSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *vmaSet) loadRoot(sds *vmaSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/pgalloc/BUILD b/pkg/sentry/pgalloc/BUILD
deleted file mode 100644
index d351869ef..000000000
--- a/pkg/sentry/pgalloc/BUILD
+++ /dev/null
@@ -1,112 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "evictable_range",
- out = "evictable_range.go",
- package = "pgalloc",
- prefix = "Evictable",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uint64",
- },
-)
-
-go_template_instance(
- name = "evictable_range_set",
- out = "evictable_range_set.go",
- package = "pgalloc",
- prefix = "evictableRange",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "EvictableRange",
- "Value": "evictableRangeSetValue",
- "Functions": "evictableRangeSetFunctions",
- },
-)
-
-go_template_instance(
- name = "usage_set",
- out = "usage_set.go",
- consts = {
- "minDegree": "10",
- "trackGaps": "1",
- },
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "pgalloc",
- prefix = "usage",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.FileRange",
- "Value": "usageInfo",
- "Functions": "usageSetFunctions",
- },
-)
-
-go_template_instance(
- name = "reclaim_set",
- out = "reclaim_set.go",
- consts = {
- "minDegree": "10",
- },
- imports = {
- "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap",
- },
- package = "pgalloc",
- prefix = "reclaim",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uint64",
- "Range": "memmap.FileRange",
- "Value": "reclaimSetValue",
- "Functions": "reclaimSetFunctions",
- },
-)
-
-go_library(
- name = "pgalloc",
- srcs = [
- "context.go",
- "evictable_range.go",
- "evictable_range_set.go",
- "pgalloc.go",
- "pgalloc_unsafe.go",
- "reclaim_set.go",
- "save_restore.go",
- "usage_set.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/memutil",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/hostmm",
- "//pkg/sentry/memmap",
- "//pkg/sentry/usage",
- "//pkg/state",
- "//pkg/state/wire",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "pgalloc_test",
- size = "small",
- srcs = ["pgalloc_test.go"],
- library = ":pgalloc",
- deps = ["//pkg/hostarch"],
-)
diff --git a/pkg/sentry/pgalloc/evictable_range.go b/pkg/sentry/pgalloc/evictable_range.go
new file mode 100644
index 000000000..79f513ac2
--- /dev/null
+++ b/pkg/sentry/pgalloc/evictable_range.go
@@ -0,0 +1,76 @@
+package pgalloc
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type EvictableRange struct {
+ // Start is the inclusive start of the range.
+ Start uint64
+
+ // End is the exclusive end of the range.
+ End uint64
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r EvictableRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r EvictableRange) Length() uint64 {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r EvictableRange) Contains(x uint64) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r EvictableRange) Overlaps(r2 EvictableRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r EvictableRange) IsSupersetOf(r2 EvictableRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r EvictableRange) Intersect(r2 EvictableRange) EvictableRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r EvictableRange) CanSplitAt(x uint64) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/sentry/pgalloc/evictable_range_set.go b/pkg/sentry/pgalloc/evictable_range_set.go
new file mode 100644
index 000000000..c0c712b21
--- /dev/null
+++ b/pkg/sentry/pgalloc/evictable_range_set.go
@@ -0,0 +1,1643 @@
+package pgalloc
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const evictableRangetrackGaps = 0
+
+var _ = uint8(evictableRangetrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type evictableRangedynamicGap [evictableRangetrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *evictableRangedynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *evictableRangedynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ evictableRangeminDegree = 3
+
+ evictableRangemaxDegree = 2 * evictableRangeminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type evictableRangeSet struct {
+ root evictableRangenode `state:".(*evictableRangeSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *evictableRangeSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *evictableRangeSet) IsEmptyRange(r EvictableRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *evictableRangeSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *evictableRangeSet) SpanRange(r EvictableRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *evictableRangeSet) FirstSegment() evictableRangeIterator {
+ if s.root.nrSegments == 0 {
+ return evictableRangeIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *evictableRangeSet) LastSegment() evictableRangeIterator {
+ if s.root.nrSegments == 0 {
+ return evictableRangeIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *evictableRangeSet) FirstGap() evictableRangeGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return evictableRangeGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *evictableRangeSet) LastGap() evictableRangeGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return evictableRangeGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *evictableRangeSet) Find(key uint64) (evictableRangeIterator, evictableRangeGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return evictableRangeIterator{n, i}, evictableRangeGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return evictableRangeIterator{}, evictableRangeGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *evictableRangeSet) FindSegment(key uint64) evictableRangeIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *evictableRangeSet) LowerBoundSegment(min uint64) evictableRangeIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *evictableRangeSet) UpperBoundSegment(max uint64) evictableRangeIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *evictableRangeSet) FindGap(key uint64) evictableRangeGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *evictableRangeSet) LowerBoundGap(min uint64) evictableRangeGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *evictableRangeSet) UpperBoundGap(max uint64) evictableRangeGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *evictableRangeSet) Add(r EvictableRange, val evictableRangeSetValue) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *evictableRangeSet) AddWithoutMerging(r EvictableRange, val evictableRangeSetValue) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *evictableRangeSet) Insert(gap evictableRangeGapIterator, r EvictableRange, val evictableRangeSetValue) evictableRangeIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (evictableRangeSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := evictableRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (evictableRangeSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (evictableRangeSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := evictableRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *evictableRangeSet) InsertWithoutMerging(gap evictableRangeGapIterator, r EvictableRange, val evictableRangeSetValue) evictableRangeIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *evictableRangeSet) InsertWithoutMergingUnchecked(gap evictableRangeGapIterator, r EvictableRange, val evictableRangeSetValue) evictableRangeIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := evictableRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return evictableRangeIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *evictableRangeSet) Remove(seg evictableRangeIterator) evictableRangeGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if evictableRangetrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ evictableRangeSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if evictableRangetrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(evictableRangeGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *evictableRangeSet) RemoveAll() {
+ s.root = evictableRangenode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *evictableRangeSet) RemoveRange(r EvictableRange) evictableRangeGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *evictableRangeSet) Merge(first, second evictableRangeIterator) evictableRangeIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *evictableRangeSet) MergeUnchecked(first, second evictableRangeIterator) evictableRangeIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (evictableRangeSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return evictableRangeIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *evictableRangeSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *evictableRangeSet) MergeRange(r EvictableRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *evictableRangeSet) MergeAdjacent(r EvictableRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *evictableRangeSet) Split(seg evictableRangeIterator, split uint64) (evictableRangeIterator, evictableRangeIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *evictableRangeSet) SplitUnchecked(seg evictableRangeIterator, split uint64) (evictableRangeIterator, evictableRangeIterator) {
+ val1, val2 := (evictableRangeSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), EvictableRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *evictableRangeSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *evictableRangeSet) Isolate(seg evictableRangeIterator, r EvictableRange) evictableRangeIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *evictableRangeSet) ApplyContiguous(r EvictableRange, fn func(seg evictableRangeIterator)) evictableRangeGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return evictableRangeGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return evictableRangeGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type evictableRangenode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *evictableRangenode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap evictableRangedynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [evictableRangemaxDegree - 1]EvictableRange
+ values [evictableRangemaxDegree - 1]evictableRangeSetValue
+ children [evictableRangemaxDegree]*evictableRangenode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *evictableRangenode) firstSegment() evictableRangeIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return evictableRangeIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *evictableRangenode) lastSegment() evictableRangeIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return evictableRangeIterator{n, n.nrSegments - 1}
+}
+
+func (n *evictableRangenode) prevSibling() *evictableRangenode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *evictableRangenode) nextSibling() *evictableRangenode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *evictableRangenode) rebalanceBeforeInsert(gap evictableRangeGapIterator) evictableRangeGapIterator {
+ if n.nrSegments < evictableRangemaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &evictableRangenode{
+ nrSegments: evictableRangeminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &evictableRangenode{
+ nrSegments: evictableRangeminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:evictableRangeminDegree-1], n.keys[:evictableRangeminDegree-1])
+ copy(left.values[:evictableRangeminDegree-1], n.values[:evictableRangeminDegree-1])
+ copy(right.keys[:evictableRangeminDegree-1], n.keys[evictableRangeminDegree:])
+ copy(right.values[:evictableRangeminDegree-1], n.values[evictableRangeminDegree:])
+ n.keys[0], n.values[0] = n.keys[evictableRangeminDegree-1], n.values[evictableRangeminDegree-1]
+ evictableRangezeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:evictableRangeminDegree], n.children[:evictableRangeminDegree])
+ copy(right.children[:evictableRangeminDegree], n.children[evictableRangeminDegree:])
+ evictableRangezeroNodeSlice(n.children[2:])
+ for i := 0; i < evictableRangeminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if evictableRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < evictableRangeminDegree {
+ return evictableRangeGapIterator{left, gap.index}
+ }
+ return evictableRangeGapIterator{right, gap.index - evictableRangeminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[evictableRangeminDegree-1], n.values[evictableRangeminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &evictableRangenode{
+ nrSegments: evictableRangeminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:evictableRangeminDegree-1], n.keys[evictableRangeminDegree:])
+ copy(sibling.values[:evictableRangeminDegree-1], n.values[evictableRangeminDegree:])
+ evictableRangezeroValueSlice(n.values[evictableRangeminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:evictableRangeminDegree], n.children[evictableRangeminDegree:])
+ evictableRangezeroNodeSlice(n.children[evictableRangeminDegree:])
+ for i := 0; i < evictableRangeminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = evictableRangeminDegree - 1
+
+ if evictableRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < evictableRangeminDegree {
+ return gap
+ }
+ return evictableRangeGapIterator{sibling, gap.index - evictableRangeminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *evictableRangenode) rebalanceAfterRemove(gap evictableRangeGapIterator) evictableRangeGapIterator {
+ for {
+ if n.nrSegments >= evictableRangeminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= evictableRangeminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ evictableRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if evictableRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return evictableRangeGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return evictableRangeGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= evictableRangeminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ evictableRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if evictableRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return evictableRangeGapIterator{n, n.nrSegments}
+ }
+ return evictableRangeGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return evictableRangeGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return evictableRangeGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *evictableRangenode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = evictableRangeGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ evictableRangeSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if evictableRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *evictableRangenode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *evictableRangenode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *evictableRangenode) calculateMaxGapLeaf() uint64 {
+ max := evictableRangeGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (evictableRangeGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *evictableRangenode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *evictableRangenode) searchFirstLargeEnoughGap(minSize uint64) evictableRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return evictableRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := evictableRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *evictableRangenode) searchLastLargeEnoughGap(minSize uint64) evictableRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return evictableRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := evictableRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type evictableRangeIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *evictableRangenode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg evictableRangeIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg evictableRangeIterator) Range() EvictableRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg evictableRangeIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg evictableRangeIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg evictableRangeIterator) SetRangeUnchecked(r EvictableRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg evictableRangeIterator) SetRange(r EvictableRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg evictableRangeIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg evictableRangeIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg evictableRangeIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg evictableRangeIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg evictableRangeIterator) Value() evictableRangeSetValue {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg evictableRangeIterator) ValuePtr() *evictableRangeSetValue {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg evictableRangeIterator) SetValue(val evictableRangeSetValue) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg evictableRangeIterator) PrevSegment() evictableRangeIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return evictableRangeIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return evictableRangeIterator{}
+ }
+ return evictableRangesegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg evictableRangeIterator) NextSegment() evictableRangeIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return evictableRangeIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return evictableRangeIterator{}
+ }
+ return evictableRangesegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg evictableRangeIterator) PrevGap() evictableRangeGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return evictableRangeGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg evictableRangeIterator) NextGap() evictableRangeGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return evictableRangeGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg evictableRangeIterator) PrevNonEmpty() (evictableRangeIterator, evictableRangeGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return evictableRangeIterator{}, gap
+ }
+ return gap.PrevSegment(), evictableRangeGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg evictableRangeIterator) NextNonEmpty() (evictableRangeIterator, evictableRangeGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return evictableRangeIterator{}, gap
+ }
+ return gap.NextSegment(), evictableRangeGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type evictableRangeGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *evictableRangenode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap evictableRangeGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap evictableRangeGapIterator) Range() EvictableRange {
+ return EvictableRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap evictableRangeGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return evictableRangeSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap evictableRangeGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return evictableRangeSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap evictableRangeGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap evictableRangeGapIterator) PrevSegment() evictableRangeIterator {
+ return evictableRangesegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap evictableRangeGapIterator) NextSegment() evictableRangeIterator {
+ return evictableRangesegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap evictableRangeGapIterator) PrevGap() evictableRangeGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return evictableRangeGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap evictableRangeGapIterator) NextGap() evictableRangeGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return evictableRangeGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap evictableRangeGapIterator) NextLargeEnoughGap(minSize uint64) evictableRangeGapIterator {
+ if evictableRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap evictableRangeGapIterator) nextLargeEnoughGapHelper(minSize uint64) evictableRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return evictableRangeGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap evictableRangeGapIterator) PrevLargeEnoughGap(minSize uint64) evictableRangeGapIterator {
+ if evictableRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap evictableRangeGapIterator) prevLargeEnoughGapHelper(minSize uint64) evictableRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return evictableRangeGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func evictableRangesegmentBeforePosition(n *evictableRangenode, i int) evictableRangeIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return evictableRangeIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return evictableRangeIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func evictableRangesegmentAfterPosition(n *evictableRangenode, i int) evictableRangeIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return evictableRangeIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return evictableRangeIterator{n, i}
+}
+
+func evictableRangezeroValueSlice(slice []evictableRangeSetValue) {
+
+ for i := range slice {
+ evictableRangeSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func evictableRangezeroNodeSlice(slice []*evictableRangenode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *evictableRangeSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *evictableRangenode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *evictableRangenode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if evictableRangetrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type evictableRangeSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []evictableRangeSetValue
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *evictableRangeSet) ExportSortedSlices() *evictableRangeSegmentDataSlices {
+ var sds evictableRangeSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *evictableRangeSet) ImportSortedSlices(sds *evictableRangeSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := EvictableRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *evictableRangeSet) segmentTestCheck(expectedSegments int, segFunc func(int, EvictableRange, evictableRangeSetValue) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *evictableRangeSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *evictableRangeSet) saveRoot() *evictableRangeSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *evictableRangeSet) loadRoot(sds *evictableRangeSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/pgalloc/pgalloc_state_autogen.go b/pkg/sentry/pgalloc/pgalloc_state_autogen.go
new file mode 100644
index 000000000..272186381
--- /dev/null
+++ b/pkg/sentry/pgalloc/pgalloc_state_autogen.go
@@ -0,0 +1,389 @@
+// automatically generated by stateify.
+
+package pgalloc
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *EvictableRange) StateTypeName() string {
+ return "pkg/sentry/pgalloc.EvictableRange"
+}
+
+func (r *EvictableRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *EvictableRange) beforeSave() {}
+
+// +checklocksignore
+func (r *EvictableRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *EvictableRange) afterLoad() {}
+
+// +checklocksignore
+func (r *EvictableRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func (s *evictableRangeSet) StateTypeName() string {
+ return "pkg/sentry/pgalloc.evictableRangeSet"
+}
+
+func (s *evictableRangeSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *evictableRangeSet) beforeSave() {}
+
+// +checklocksignore
+func (s *evictableRangeSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *evictableRangeSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *evictableRangeSet) afterLoad() {}
+
+// +checklocksignore
+func (s *evictableRangeSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*evictableRangeSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*evictableRangeSegmentDataSlices)) })
+}
+
+func (n *evictableRangenode) StateTypeName() string {
+ return "pkg/sentry/pgalloc.evictableRangenode"
+}
+
+func (n *evictableRangenode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *evictableRangenode) beforeSave() {}
+
+// +checklocksignore
+func (n *evictableRangenode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *evictableRangenode) afterLoad() {}
+
+// +checklocksignore
+func (n *evictableRangenode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (e *evictableRangeSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/pgalloc.evictableRangeSegmentDataSlices"
+}
+
+func (e *evictableRangeSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (e *evictableRangeSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (e *evictableRangeSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Start)
+ stateSinkObject.Save(1, &e.End)
+ stateSinkObject.Save(2, &e.Values)
+}
+
+func (e *evictableRangeSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (e *evictableRangeSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Start)
+ stateSourceObject.Load(1, &e.End)
+ stateSourceObject.Load(2, &e.Values)
+}
+
+func (u *usageInfo) StateTypeName() string {
+ return "pkg/sentry/pgalloc.usageInfo"
+}
+
+func (u *usageInfo) StateFields() []string {
+ return []string{
+ "kind",
+ "knownCommitted",
+ "refs",
+ }
+}
+
+func (u *usageInfo) beforeSave() {}
+
+// +checklocksignore
+func (u *usageInfo) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.kind)
+ stateSinkObject.Save(1, &u.knownCommitted)
+ stateSinkObject.Save(2, &u.refs)
+}
+
+func (u *usageInfo) afterLoad() {}
+
+// +checklocksignore
+func (u *usageInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.kind)
+ stateSourceObject.Load(1, &u.knownCommitted)
+ stateSourceObject.Load(2, &u.refs)
+}
+
+func (s *reclaimSet) StateTypeName() string {
+ return "pkg/sentry/pgalloc.reclaimSet"
+}
+
+func (s *reclaimSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *reclaimSet) beforeSave() {}
+
+// +checklocksignore
+func (s *reclaimSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *reclaimSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *reclaimSet) afterLoad() {}
+
+// +checklocksignore
+func (s *reclaimSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*reclaimSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*reclaimSegmentDataSlices)) })
+}
+
+func (n *reclaimnode) StateTypeName() string {
+ return "pkg/sentry/pgalloc.reclaimnode"
+}
+
+func (n *reclaimnode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *reclaimnode) beforeSave() {}
+
+// +checklocksignore
+func (n *reclaimnode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *reclaimnode) afterLoad() {}
+
+// +checklocksignore
+func (n *reclaimnode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (r *reclaimSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/pgalloc.reclaimSegmentDataSlices"
+}
+
+func (r *reclaimSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (r *reclaimSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (r *reclaimSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+ stateSinkObject.Save(2, &r.Values)
+}
+
+func (r *reclaimSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (r *reclaimSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+ stateSourceObject.Load(2, &r.Values)
+}
+
+func (s *usageSet) StateTypeName() string {
+ return "pkg/sentry/pgalloc.usageSet"
+}
+
+func (s *usageSet) StateFields() []string {
+ return []string{
+ "root",
+ }
+}
+
+func (s *usageSet) beforeSave() {}
+
+// +checklocksignore
+func (s *usageSet) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var rootValue *usageSegmentDataSlices = s.saveRoot()
+ stateSinkObject.SaveValue(0, rootValue)
+}
+
+func (s *usageSet) afterLoad() {}
+
+// +checklocksignore
+func (s *usageSet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadValue(0, new(*usageSegmentDataSlices), func(y interface{}) { s.loadRoot(y.(*usageSegmentDataSlices)) })
+}
+
+func (n *usagenode) StateTypeName() string {
+ return "pkg/sentry/pgalloc.usagenode"
+}
+
+func (n *usagenode) StateFields() []string {
+ return []string{
+ "nrSegments",
+ "parent",
+ "parentIndex",
+ "hasChildren",
+ "maxGap",
+ "keys",
+ "values",
+ "children",
+ }
+}
+
+func (n *usagenode) beforeSave() {}
+
+// +checklocksignore
+func (n *usagenode) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.nrSegments)
+ stateSinkObject.Save(1, &n.parent)
+ stateSinkObject.Save(2, &n.parentIndex)
+ stateSinkObject.Save(3, &n.hasChildren)
+ stateSinkObject.Save(4, &n.maxGap)
+ stateSinkObject.Save(5, &n.keys)
+ stateSinkObject.Save(6, &n.values)
+ stateSinkObject.Save(7, &n.children)
+}
+
+func (n *usagenode) afterLoad() {}
+
+// +checklocksignore
+func (n *usagenode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.nrSegments)
+ stateSourceObject.Load(1, &n.parent)
+ stateSourceObject.Load(2, &n.parentIndex)
+ stateSourceObject.Load(3, &n.hasChildren)
+ stateSourceObject.Load(4, &n.maxGap)
+ stateSourceObject.Load(5, &n.keys)
+ stateSourceObject.Load(6, &n.values)
+ stateSourceObject.Load(7, &n.children)
+}
+
+func (u *usageSegmentDataSlices) StateTypeName() string {
+ return "pkg/sentry/pgalloc.usageSegmentDataSlices"
+}
+
+func (u *usageSegmentDataSlices) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ "Values",
+ }
+}
+
+func (u *usageSegmentDataSlices) beforeSave() {}
+
+// +checklocksignore
+func (u *usageSegmentDataSlices) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.Start)
+ stateSinkObject.Save(1, &u.End)
+ stateSinkObject.Save(2, &u.Values)
+}
+
+func (u *usageSegmentDataSlices) afterLoad() {}
+
+// +checklocksignore
+func (u *usageSegmentDataSlices) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.Start)
+ stateSourceObject.Load(1, &u.End)
+ stateSourceObject.Load(2, &u.Values)
+}
+
+func init() {
+ state.Register((*EvictableRange)(nil))
+ state.Register((*evictableRangeSet)(nil))
+ state.Register((*evictableRangenode)(nil))
+ state.Register((*evictableRangeSegmentDataSlices)(nil))
+ state.Register((*usageInfo)(nil))
+ state.Register((*reclaimSet)(nil))
+ state.Register((*reclaimnode)(nil))
+ state.Register((*reclaimSegmentDataSlices)(nil))
+ state.Register((*usageSet)(nil))
+ state.Register((*usagenode)(nil))
+ state.Register((*usageSegmentDataSlices)(nil))
+}
diff --git a/pkg/sentry/pgalloc/pgalloc_test.go b/pkg/sentry/pgalloc/pgalloc_test.go
deleted file mode 100644
index 8d2b7eb5e..000000000
--- a/pkg/sentry/pgalloc/pgalloc_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pgalloc
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-const (
- page = hostarch.PageSize
- hugepage = hostarch.HugePageSize
- topPage = (1 << 63) - page
-)
-
-func TestFindUnallocatedRange(t *testing.T) {
- for _, test := range []struct {
- desc string
- usage *usageSegmentDataSlices
- fileSize int64
- length uint64
- alignment uint64
- start uint64
- expectFail bool
- }{
- {
- desc: "Initial allocation succeeds",
- usage: &usageSegmentDataSlices{},
- length: page,
- alignment: page,
- start: chunkSize - page, // Grows by chunkSize, allocate down.
- },
- {
- desc: "Allocation finds empty space at start of file",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page},
- End: []uint64{2 * page},
- Values: []usageInfo{{refs: 1}},
- },
- fileSize: 2 * page,
- length: page,
- alignment: page,
- start: 0,
- },
- {
- desc: "Allocation finds empty space at end of file",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0},
- End: []uint64{page},
- Values: []usageInfo{{refs: 1}},
- },
- fileSize: 2 * page,
- length: page,
- alignment: page,
- start: page,
- },
- {
- desc: "In-use frames are not allocatable",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, page},
- End: []uint64{page, 2 * page},
- Values: []usageInfo{{refs: 1}, {refs: 2}},
- },
- fileSize: 2 * page,
- length: page,
- alignment: page,
- start: 3 * page, // Double fileSize, allocate top-down.
- },
- {
- desc: "Reclaimable frames are not allocatable",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, page, 2 * page},
- End: []uint64{page, 2 * page, 3 * page},
- Values: []usageInfo{{refs: 1}, {refs: 0}, {refs: 1}},
- },
- fileSize: 3 * page,
- length: page,
- alignment: page,
- start: 5 * page, // Double fileSize, grow down.
- },
- {
- desc: "Gaps between in-use frames are allocatable",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, 2 * page},
- End: []uint64{page, 3 * page},
- Values: []usageInfo{{refs: 1}, {refs: 1}},
- },
- fileSize: 3 * page,
- length: page,
- alignment: page,
- start: page,
- },
- {
- desc: "Inadequately-sized gaps are rejected",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, 2 * page},
- End: []uint64{page, 3 * page},
- Values: []usageInfo{{refs: 1}, {refs: 1}},
- },
- fileSize: 3 * page,
- length: 2 * page,
- alignment: page,
- start: 4 * page, // Double fileSize, grow down.
- },
- {
- desc: "Alignment is honored at end of file",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, hugepage + page},
- // Hugepage-sized gap here that shouldn't be allocated from
- // since it's incorrectly aligned.
- End: []uint64{page, hugepage + 2*page},
- Values: []usageInfo{{refs: 1}, {refs: 1}},
- },
- fileSize: hugepage + 2*page,
- length: hugepage,
- alignment: hugepage,
- start: 3 * hugepage, // Double fileSize until alignment is satisfied, grow down.
- },
- {
- desc: "Alignment is honored before end of file",
- usage: &usageSegmentDataSlices{
- Start: []uint64{0, 2*hugepage + page},
- // Page will need to be shifted down from top.
- End: []uint64{page, 2*hugepage + 2*page},
- Values: []usageInfo{{refs: 1}, {refs: 1}},
- },
- fileSize: 2*hugepage + 2*page,
- length: hugepage,
- alignment: hugepage,
- start: hugepage,
- },
- {
- desc: "Allocation doubles file size more than once if necessary",
- usage: &usageSegmentDataSlices{},
- fileSize: page,
- length: 4 * page,
- alignment: page,
- start: 0,
- },
- {
- desc: "Allocations are compact if possible",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page, 3 * page},
- End: []uint64{2 * page, 4 * page},
- Values: []usageInfo{{refs: 1}, {refs: 2}},
- },
- fileSize: 4 * page,
- length: page,
- alignment: page,
- start: 2 * page,
- },
- {
- desc: "Top-down allocation within one gap",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page, 4 * page, 7 * page},
- End: []uint64{2 * page, 5 * page, 8 * page},
- Values: []usageInfo{{refs: 1}, {refs: 2}, {refs: 1}},
- },
- fileSize: 8 * page,
- length: page,
- alignment: page,
- start: 6 * page,
- },
- {
- desc: "Top-down allocation between multiple gaps",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page, 3 * page, 5 * page},
- End: []uint64{2 * page, 4 * page, 6 * page},
- Values: []usageInfo{{refs: 1}, {refs: 2}, {refs: 1}},
- },
- fileSize: 6 * page,
- length: page,
- alignment: page,
- start: 4 * page,
- },
- {
- desc: "Top-down allocation with large top gap",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page, 3 * page},
- End: []uint64{2 * page, 4 * page},
- Values: []usageInfo{{refs: 1}, {refs: 2}},
- },
- fileSize: 8 * page,
- length: page,
- alignment: page,
- start: 7 * page,
- },
- {
- desc: "Gaps found with possible overflow",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page, topPage - page},
- End: []uint64{2 * page, topPage},
- Values: []usageInfo{{refs: 1}, {refs: 1}},
- },
- fileSize: topPage,
- length: page,
- alignment: page,
- start: topPage - 2*page,
- },
- {
- desc: "Overflow detected",
- usage: &usageSegmentDataSlices{
- Start: []uint64{page},
- End: []uint64{topPage},
- Values: []usageInfo{{refs: 1}},
- },
- fileSize: topPage,
- length: 2 * page,
- alignment: page,
- expectFail: true,
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- var usage usageSet
- if err := usage.ImportSortedSlices(test.usage); err != nil {
- t.Fatalf("Failed to initialize usage from %v: %v", test.usage, err)
- }
- fr, ok := findAvailableRange(&usage, test.fileSize, test.length, test.alignment)
- if !test.expectFail && !ok {
- t.Fatalf("findAvailableRange(%v, %x, %x, %x): got %x, false wanted %x, true", test.usage, test.fileSize, test.length, test.alignment, fr.Start, test.start)
- }
- if test.expectFail && ok {
- t.Fatalf("findAvailableRange(%v, %x, %x, %x): got %x, true wanted %x, false", test.usage, test.fileSize, test.length, test.alignment, fr.Start, test.start)
- }
- if ok && fr.Start != test.start {
- t.Errorf("findAvailableRange(%v, %x, %x, %x): got start=%x, wanted %x", test.usage, test.fileSize, test.length, test.alignment, fr.Start, test.start)
- }
- if ok && fr.End != test.start+test.length {
- t.Errorf("findAvailableRange(%v, %x, %x, %x): got end=%x, wanted %x", test.usage, test.fileSize, test.length, test.alignment, fr.End, test.start+test.length)
- }
- })
- }
-}
diff --git a/pkg/sentry/pgalloc/pgalloc_unsafe_state_autogen.go b/pkg/sentry/pgalloc/pgalloc_unsafe_state_autogen.go
new file mode 100644
index 000000000..87c214008
--- /dev/null
+++ b/pkg/sentry/pgalloc/pgalloc_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pgalloc
diff --git a/pkg/segment/set.go b/pkg/sentry/pgalloc/reclaim_set.go
index fae6c363d..737f38776 100644
--- a/pkg/segment/set.go
+++ b/pkg/sentry/pgalloc/reclaim_set.go
@@ -1,41 +1,14 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package segment provides tools for working with collections of segments. A
-// segment is a key-value mapping, where the key is a non-empty contiguous
-// range of values of type Key, and the value is a single value of type Value.
-//
-// Clients using this package must use the go_template_instance rule in
-// tools/go_generics/defs.bzl to create an instantiation of this
-// template package, providing types to use in place of Key, Range, Value, and
-// Functions. See pkg/segment/test/BUILD for a usage example.
-package segment
+package pgalloc
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
import (
"bytes"
"fmt"
)
-// Key is a required type parameter that must be an integral type.
-type Key uint64
-
-// Range is a required type parameter equivalent to Range<Key>.
-type Range interface{}
-
-// Value is a required type parameter.
-type Value interface{}
-
// trackGaps is an optional parameter.
//
// If trackGaps is 1, the Set will track maximum gap size recursively,
@@ -43,59 +16,27 @@ type Value interface{}
// case, Key must be an unsigned integer.
//
// trackGaps must be 0 or 1.
-const trackGaps = 0
+const reclaimtrackGaps = 0
-var _ = uint8(trackGaps << 7) // Will fail if not zero or one.
+var _ = uint8(reclaimtrackGaps << 7) // Will fail if not zero or one.
// dynamicGap is a type that disappears if trackGaps is 0.
-type dynamicGap [trackGaps]Key
+type reclaimdynamicGap [reclaimtrackGaps]uint64
// Get returns the value of the gap.
//
// Precondition: trackGaps must be non-zero.
-func (d *dynamicGap) Get() Key {
+func (d *reclaimdynamicGap) Get() uint64 {
return d[:][0]
}
// Set sets the value of the gap.
//
// Precondition: trackGaps must be non-zero.
-func (d *dynamicGap) Set(v Key) {
+func (d *reclaimdynamicGap) Set(v uint64) {
d[:][0] = v
}
-// Functions is a required type parameter that must be a struct implementing
-// the methods defined by Functions.
-type Functions interface {
- // MinKey returns the minimum allowed key.
- MinKey() Key
-
- // MaxKey returns the maximum allowed key + 1.
- MaxKey() Key
-
- // ClearValue deinitializes the given value. (For example, if Value is a
- // pointer or interface type, ClearValue should set it to nil.)
- ClearValue(*Value)
-
- // Merge attempts to merge the values corresponding to two consecutive
- // segments. If successful, Merge returns (merged value, true). Otherwise,
- // it returns (unspecified, false).
- //
- // Preconditions: r1.End == r2.Start.
- //
- // Postconditions: If merging succeeds, val1 and val2 are invalidated.
- Merge(r1 Range, val1 Value, r2 Range, val2 Value) (Value, bool)
-
- // Split splits a segment's value at a key within its range, such that the
- // first returned value corresponds to the range [r.Start, split) and the
- // second returned value corresponds to the range [split, r.End).
- //
- // Preconditions: r.Start < split < r.End.
- //
- // Postconditions: The original value val is invalidated.
- Split(r Range, val Value, split Key) (Value, Value)
-}
-
const (
// minDegree is the minimum degree of an internal node in a Set B-tree.
//
@@ -108,9 +49,9 @@ const (
//
// Our implementation requires minDegree >= 3. Higher values of minDegree
// usually improve performance, but increase memory usage for small sets.
- minDegree = 3
+ reclaimminDegree = 10
- maxDegree = 2 * minDegree
+ reclaimmaxDegree = 2 * reclaimminDegree
)
// A Set is a mapping of segments with non-overlapping Range keys. The zero
@@ -118,19 +59,19 @@ const (
// copyable. Set is thread-compatible.
//
// +stateify savable
-type Set struct {
- root node `state:".(*SegmentDataSlices)"`
+type reclaimSet struct {
+ root reclaimnode `state:".(*reclaimSegmentDataSlices)"`
}
// IsEmpty returns true if the set contains no segments.
-func (s *Set) IsEmpty() bool {
+func (s *reclaimSet) IsEmpty() bool {
return s.root.nrSegments == 0
}
// IsEmptyRange returns true iff no segments in the set overlap the given
// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
// more efficient.
-func (s *Set) IsEmptyRange(r Range) bool {
+func (s *reclaimSet) IsEmptyRange(r __generics_imported0.FileRange) bool {
switch {
case r.Length() < 0:
panic(fmt.Sprintf("invalid range %v", r))
@@ -145,8 +86,8 @@ func (s *Set) IsEmptyRange(r Range) bool {
}
// Span returns the total size of all segments in the set.
-func (s *Set) Span() Key {
- var sz Key
+func (s *reclaimSet) Span() uint64 {
+ var sz uint64
for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
sz += seg.Range().Length()
}
@@ -155,14 +96,14 @@ func (s *Set) Span() Key {
// SpanRange returns the total size of the intersection of segments in the set
// with the given range.
-func (s *Set) SpanRange(r Range) Key {
+func (s *reclaimSet) SpanRange(r __generics_imported0.FileRange) uint64 {
switch {
case r.Length() < 0:
panic(fmt.Sprintf("invalid range %v", r))
case r.Length() == 0:
return 0
}
- var sz Key
+ var sz uint64
for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
sz += seg.Range().Intersect(r).Length()
}
@@ -171,56 +112,55 @@ func (s *Set) SpanRange(r Range) Key {
// FirstSegment returns the first segment in the set. If the set is empty,
// FirstSegment returns a terminal iterator.
-func (s *Set) FirstSegment() Iterator {
+func (s *reclaimSet) FirstSegment() reclaimIterator {
if s.root.nrSegments == 0 {
- return Iterator{}
+ return reclaimIterator{}
}
return s.root.firstSegment()
}
// LastSegment returns the last segment in the set. If the set is empty,
// LastSegment returns a terminal iterator.
-func (s *Set) LastSegment() Iterator {
+func (s *reclaimSet) LastSegment() reclaimIterator {
if s.root.nrSegments == 0 {
- return Iterator{}
+ return reclaimIterator{}
}
return s.root.lastSegment()
}
// FirstGap returns the first gap in the set.
-func (s *Set) FirstGap() GapIterator {
+func (s *reclaimSet) FirstGap() reclaimGapIterator {
n := &s.root
for n.hasChildren {
n = n.children[0]
}
- return GapIterator{n, 0}
+ return reclaimGapIterator{n, 0}
}
// LastGap returns the last gap in the set.
-func (s *Set) LastGap() GapIterator {
+func (s *reclaimSet) LastGap() reclaimGapIterator {
n := &s.root
for n.hasChildren {
n = n.children[n.nrSegments]
}
- return GapIterator{n, n.nrSegments}
+ return reclaimGapIterator{n, n.nrSegments}
}
// Find returns the segment or gap whose range contains the given key. If a
// segment is found, the returned Iterator is non-terminal and the
// returned GapIterator is terminal. Otherwise, the returned Iterator is
// terminal and the returned GapIterator is non-terminal.
-func (s *Set) Find(key Key) (Iterator, GapIterator) {
+func (s *reclaimSet) Find(key uint64) (reclaimIterator, reclaimGapIterator) {
n := &s.root
for {
- // Binary search invariant: the correct value of i lies within [lower,
- // upper].
+
lower := 0
upper := n.nrSegments
for lower < upper {
i := lower + (upper-lower)/2
if r := n.keys[i]; key < r.End {
if key >= r.Start {
- return Iterator{n, i}, GapIterator{}
+ return reclaimIterator{n, i}, reclaimGapIterator{}
}
upper = i
} else {
@@ -229,7 +169,7 @@ func (s *Set) Find(key Key) (Iterator, GapIterator) {
}
i := lower
if !n.hasChildren {
- return Iterator{}, GapIterator{n, i}
+ return reclaimIterator{}, reclaimGapIterator{n, i}
}
n = n.children[i]
}
@@ -237,7 +177,7 @@ func (s *Set) Find(key Key) (Iterator, GapIterator) {
// FindSegment returns the segment whose range contains the given key. If no
// such segment exists, FindSegment returns a terminal iterator.
-func (s *Set) FindSegment(key Key) Iterator {
+func (s *reclaimSet) FindSegment(key uint64) reclaimIterator {
seg, _ := s.Find(key)
return seg
}
@@ -245,7 +185,7 @@ func (s *Set) FindSegment(key Key) Iterator {
// LowerBoundSegment returns the segment with the lowest range that contains a
// key greater than or equal to min. If no such segment exists,
// LowerBoundSegment returns a terminal iterator.
-func (s *Set) LowerBoundSegment(min Key) Iterator {
+func (s *reclaimSet) LowerBoundSegment(min uint64) reclaimIterator {
seg, gap := s.Find(min)
if seg.Ok() {
return seg
@@ -256,7 +196,7 @@ func (s *Set) LowerBoundSegment(min Key) Iterator {
// UpperBoundSegment returns the segment with the highest range that contains a
// key less than or equal to max. If no such segment exists, UpperBoundSegment
// returns a terminal iterator.
-func (s *Set) UpperBoundSegment(max Key) Iterator {
+func (s *reclaimSet) UpperBoundSegment(max uint64) reclaimIterator {
seg, gap := s.Find(max)
if seg.Ok() {
return seg
@@ -267,14 +207,14 @@ func (s *Set) UpperBoundSegment(max Key) Iterator {
// FindGap returns the gap containing the given key. If no such gap exists
// (i.e. the set contains a segment containing that key), FindGap returns a
// terminal iterator.
-func (s *Set) FindGap(key Key) GapIterator {
+func (s *reclaimSet) FindGap(key uint64) reclaimGapIterator {
_, gap := s.Find(key)
return gap
}
// LowerBoundGap returns the gap with the lowest range that is greater than or
// equal to min.
-func (s *Set) LowerBoundGap(min Key) GapIterator {
+func (s *reclaimSet) LowerBoundGap(min uint64) reclaimGapIterator {
seg, gap := s.Find(min)
if gap.Ok() {
return gap
@@ -284,7 +224,7 @@ func (s *Set) LowerBoundGap(min Key) GapIterator {
// UpperBoundGap returns the gap with the highest range that is less than or
// equal to max.
-func (s *Set) UpperBoundGap(max Key) GapIterator {
+func (s *reclaimSet) UpperBoundGap(max uint64) reclaimGapIterator {
seg, gap := s.Find(max)
if gap.Ok() {
return gap
@@ -296,7 +236,7 @@ func (s *Set) UpperBoundGap(max Key) GapIterator {
// segment can be merged with adjacent segments, Add will do so. If the new
// segment would overlap an existing segment, Add returns false. If Add
// succeeds, all existing iterators are invalidated.
-func (s *Set) Add(r Range, val Value) bool {
+func (s *reclaimSet) Add(r __generics_imported0.FileRange, val reclaimSetValue) bool {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
@@ -315,7 +255,7 @@ func (s *Set) Add(r Range, val Value) bool {
// If it would overlap an existing segment, AddWithoutMerging does nothing and
// returns false. If AddWithoutMerging succeeds, all existing iterators are
// invalidated.
-func (s *Set) AddWithoutMerging(r Range, val Value) bool {
+func (s *reclaimSet) AddWithoutMerging(r __generics_imported0.FileRange, val reclaimSetValue) bool {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
@@ -342,7 +282,7 @@ func (s *Set) AddWithoutMerging(r Range, val Value) bool {
// Merge, but may be more efficient. Note that there is no unchecked variant of
// Insert since Insert must retrieve and inspect gap's predecessor and
// successor segments regardless.
-func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
+func (s *reclaimSet) Insert(gap reclaimGapIterator, r __generics_imported0.FileRange, val reclaimSetValue) reclaimIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
@@ -354,8 +294,8 @@ func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
}
if prev.Ok() && prev.End() == r.Start {
- if mval, ok := (Functions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
- shrinkMaxGap := trackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ if mval, ok := (reclaimSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := reclaimtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
prev.SetEndUnchecked(r.End)
prev.SetValue(mval)
if shrinkMaxGap {
@@ -363,7 +303,7 @@ func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
}
if next.Ok() && next.Start() == r.End {
val = mval
- if mval, ok := (Functions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ if mval, ok := (reclaimSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
prev.SetEndUnchecked(next.End())
prev.SetValue(mval)
return s.Remove(next).PrevSegment()
@@ -373,8 +313,8 @@ func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
}
}
if next.Ok() && next.Start() == r.End {
- if mval, ok := (Functions{}).Merge(r, val, next.Range(), next.Value()); ok {
- shrinkMaxGap := trackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ if mval, ok := (reclaimSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := reclaimtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
next.SetStartUnchecked(r.Start)
next.SetValue(mval)
if shrinkMaxGap {
@@ -383,7 +323,7 @@ func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
return next
}
}
- // InsertWithoutMergingUnchecked will maintain maxGap if necessary.
+
return s.InsertWithoutMergingUnchecked(gap, r, val)
}
@@ -393,7 +333,7 @@ func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator {
//
// If the gap cannot accommodate the segment, or if r is invalid,
// InsertWithoutMerging panics.
-func (s *Set) InsertWithoutMerging(gap GapIterator, r Range, val Value) Iterator {
+func (s *reclaimSet) InsertWithoutMerging(gap reclaimGapIterator, r __generics_imported0.FileRange, val reclaimSetValue) reclaimIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
@@ -410,9 +350,9 @@ func (s *Set) InsertWithoutMerging(gap GapIterator, r Range, val Value) Iterator
// Preconditions:
// * r.Start >= gap.Start().
// * r.End <= gap.End().
-func (s *Set) InsertWithoutMergingUnchecked(gap GapIterator, r Range, val Value) Iterator {
+func (s *reclaimSet) InsertWithoutMergingUnchecked(gap reclaimGapIterator, r __generics_imported0.FileRange, val reclaimSetValue) reclaimIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
- splitMaxGap := trackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ splitMaxGap := reclaimtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
gap.node.keys[gap.index] = r
@@ -421,56 +361,46 @@ func (s *Set) InsertWithoutMergingUnchecked(gap GapIterator, r Range, val Value)
if splitMaxGap {
gap.node.updateMaxGapLeaf()
}
- return Iterator{gap.node, gap.index}
+ return reclaimIterator{gap.node, gap.index}
}
// Remove removes the given segment and returns an iterator to the vacated gap.
// All existing iterators (including seg, but not including the returned
// iterator) are invalidated.
-func (s *Set) Remove(seg Iterator) GapIterator {
- // We only want to remove directly from a leaf node.
+func (s *reclaimSet) Remove(seg reclaimIterator) reclaimGapIterator {
+
if seg.node.hasChildren {
- // Since seg.node has children, the removed segment must have a
- // predecessor (at the end of the rightmost leaf of its left child
- // subtree). Move the contents of that predecessor into the removed
- // segment's position, and remove that predecessor instead. (We choose
- // to steal the predecessor rather than the successor because removing
- // from the end of a leaf node doesn't involve any copying unless
- // merging is required.)
+
victim := seg.PrevSegment()
- // This must be unchecked since until victim is removed, seg and victim
- // overlap.
+
seg.SetRangeUnchecked(victim.Range())
seg.SetValue(victim.Value())
- // Need to update the nextAdjacentNode's maxGap because the gap in between
- // must have been modified by updating seg.Range() to victim.Range().
- // seg.NextSegment() must exist since the last segment can't be in a
- // non-leaf node.
+
nextAdjacentNode := seg.NextSegment().node
- if trackGaps != 0 {
+ if reclaimtrackGaps != 0 {
nextAdjacentNode.updateMaxGapLeaf()
}
return s.Remove(victim).NextGap()
}
copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
- Functions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ reclaimSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
seg.node.nrSegments--
- if trackGaps != 0 {
+ if reclaimtrackGaps != 0 {
seg.node.updateMaxGapLeaf()
}
- return seg.node.rebalanceAfterRemove(GapIterator{seg.node, seg.index})
+ return seg.node.rebalanceAfterRemove(reclaimGapIterator{seg.node, seg.index})
}
// RemoveAll removes all segments from the set. All existing iterators are
// invalidated.
-func (s *Set) RemoveAll() {
- s.root = node{}
+func (s *reclaimSet) RemoveAll() {
+ s.root = reclaimnode{}
}
// RemoveRange removes all segments in the given range. An iterator to the
// newly formed gap is returned, and all existing iterators are invalidated.
-func (s *Set) RemoveRange(r Range) GapIterator {
+func (s *reclaimSet) RemoveRange(r __generics_imported0.FileRange) reclaimGapIterator {
seg, gap := s.Find(r.Start)
if seg.Ok() {
seg = s.Isolate(seg, r)
@@ -488,7 +418,7 @@ func (s *Set) RemoveRange(r Range) GapIterator {
// invalidated. Otherwise, Merge returns a terminal iterator.
//
// If first is not the predecessor of second, Merge panics.
-func (s *Set) Merge(first, second Iterator) Iterator {
+func (s *reclaimSet) Merge(first, second reclaimIterator) reclaimIterator {
if first.NextSegment() != second {
panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
}
@@ -502,23 +432,22 @@ func (s *Set) Merge(first, second Iterator) Iterator {
//
// Precondition: first is the predecessor of second: first.NextSegment() ==
// second, first == second.PrevSegment().
-func (s *Set) MergeUnchecked(first, second Iterator) Iterator {
+func (s *reclaimSet) MergeUnchecked(first, second reclaimIterator) reclaimIterator {
if first.End() == second.Start() {
- if mval, ok := (Functions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
- // N.B. This must be unchecked because until s.Remove(second), first
- // overlaps second.
+ if mval, ok := (reclaimSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
first.SetEndUnchecked(second.End())
first.SetValue(mval)
- // Remove will handle the maxGap update if necessary.
+
return s.Remove(second).PrevSegment()
}
}
- return Iterator{}
+ return reclaimIterator{}
}
// MergeAll attempts to merge all adjacent segments in the set. All existing
// iterators are invalidated.
-func (s *Set) MergeAll() {
+func (s *reclaimSet) MergeAll() {
seg := s.FirstSegment()
if !seg.Ok() {
return
@@ -535,7 +464,7 @@ func (s *Set) MergeAll() {
// MergeRange attempts to merge all adjacent segments that contain a key in the
// specific range. All existing iterators are invalidated.
-func (s *Set) MergeRange(r Range) {
+func (s *reclaimSet) MergeRange(r __generics_imported0.FileRange) {
seg := s.LowerBoundSegment(r.Start)
if !seg.Ok() {
return
@@ -552,7 +481,7 @@ func (s *Set) MergeRange(r Range) {
// MergeAdjacent attempts to merge the segment containing r.Start with its
// predecessor, and the segment containing r.End-1 with its successor.
-func (s *Set) MergeAdjacent(r Range) {
+func (s *reclaimSet) MergeAdjacent(r __generics_imported0.FileRange) {
first := s.FindSegment(r.Start)
if first.Ok() {
if prev := first.PrevSegment(); prev.Ok() {
@@ -575,7 +504,7 @@ func (s *Set) MergeAdjacent(r Range) {
// end of the segment's range, so splitting would produce a segment with zero
// length, or because split falls outside the segment's range altogether),
// Split panics.
-func (s *Set) Split(seg Iterator, split Key) (Iterator, Iterator) {
+func (s *reclaimSet) Split(seg reclaimIterator, split uint64) (reclaimIterator, reclaimIterator) {
if !seg.Range().CanSplitAt(split) {
panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
}
@@ -587,20 +516,20 @@ func (s *Set) Split(seg Iterator, split Key) (Iterator, Iterator) {
// seg, but not including the returned iterators) are invalidated.
//
// Preconditions: seg.Start() < key < seg.End().
-func (s *Set) SplitUnchecked(seg Iterator, split Key) (Iterator, Iterator) {
- val1, val2 := (Functions{}).Split(seg.Range(), seg.Value(), split)
+func (s *reclaimSet) SplitUnchecked(seg reclaimIterator, split uint64) (reclaimIterator, reclaimIterator) {
+ val1, val2 := (reclaimSetFunctions{}).Split(seg.Range(), seg.Value(), split)
end2 := seg.End()
seg.SetEndUnchecked(split)
seg.SetValue(val1)
- seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), Range{split, end2}, val2)
- // seg may now be invalid due to the Insert.
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.FileRange{split, end2}, val2)
+
return seg2.PrevSegment(), seg2
}
// SplitAt splits the segment straddling split, if one exists. SplitAt returns
// true if a segment was split and false otherwise. If SplitAt splits a
// segment, all existing iterators are invalidated.
-func (s *Set) SplitAt(split Key) bool {
+func (s *reclaimSet) SplitAt(split uint64) bool {
if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
s.SplitUnchecked(seg, split)
return true
@@ -612,7 +541,7 @@ func (s *Set) SplitAt(split Key) bool {
// splitting at r.Start and r.End if necessary, and returns an updated iterator
// to the bounded segment. All existing iterators (including seg, but not
// including the returned iterators) are invalidated.
-func (s *Set) Isolate(seg Iterator, r Range) Iterator {
+func (s *reclaimSet) Isolate(seg reclaimIterator, r __generics_imported0.FileRange) reclaimIterator {
if seg.Range().CanSplitAt(r.Start) {
_, seg = s.SplitUnchecked(seg, r.Start)
}
@@ -629,7 +558,7 @@ func (s *Set) Isolate(seg Iterator, r Range) Iterator {
// are invalidated.
//
// N.B. The Iterator must not be invalidated by the function.
-func (s *Set) ApplyContiguous(r Range, fn func(seg Iterator)) GapIterator {
+func (s *reclaimSet) ApplyContiguous(r __generics_imported0.FileRange, fn func(seg reclaimIterator)) reclaimGapIterator {
seg, gap := s.Find(r.Start)
if !seg.Ok() {
return gap
@@ -638,7 +567,7 @@ func (s *Set) ApplyContiguous(r Range, fn func(seg Iterator)) GapIterator {
seg = s.Isolate(seg, r)
fn(seg)
if seg.End() >= r.End {
- return GapIterator{}
+ return reclaimGapIterator{}
}
gap = seg.NextGap()
if !gap.IsEmpty() {
@@ -646,15 +575,14 @@ func (s *Set) ApplyContiguous(r Range, fn func(seg Iterator)) GapIterator {
}
seg = gap.NextSegment()
if !seg.Ok() {
- // This implies that the last segment extended all the
- // way to the maximum value, since the gap was empty.
- return GapIterator{}
+
+ return reclaimGapIterator{}
}
}
}
// +stateify savable
-type node struct {
+type reclaimnode struct {
// An internal binary tree node looks like:
//
// K
@@ -676,7 +604,7 @@ type node struct {
// parent is a pointer to this node's parent. If this node is root, parent
// is nil.
- parent *node
+ parent *reclaimnode
// parentIndex is the index of this node in parent.children.
parentIndex int
@@ -690,43 +618,43 @@ type node struct {
// maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
// including the 0th and nrSegments-th gap possibly shared with its upper-level
// nodes; if it's a non-leaf node, it's the max of all children's maxGap.
- maxGap dynamicGap
+ maxGap reclaimdynamicGap
// Nodes store keys and values in separate arrays to maximize locality in
// the common case (scanning keys for lookup).
- keys [maxDegree - 1]Range
- values [maxDegree - 1]Value
- children [maxDegree]*node
+ keys [reclaimmaxDegree - 1]__generics_imported0.FileRange
+ values [reclaimmaxDegree - 1]reclaimSetValue
+ children [reclaimmaxDegree]*reclaimnode
}
// firstSegment returns the first segment in the subtree rooted by n.
//
// Preconditions: n.nrSegments != 0.
-func (n *node) firstSegment() Iterator {
+func (n *reclaimnode) firstSegment() reclaimIterator {
for n.hasChildren {
n = n.children[0]
}
- return Iterator{n, 0}
+ return reclaimIterator{n, 0}
}
// lastSegment returns the last segment in the subtree rooted by n.
//
// Preconditions: n.nrSegments != 0.
-func (n *node) lastSegment() Iterator {
+func (n *reclaimnode) lastSegment() reclaimIterator {
for n.hasChildren {
n = n.children[n.nrSegments]
}
- return Iterator{n, n.nrSegments - 1}
+ return reclaimIterator{n, n.nrSegments - 1}
}
-func (n *node) prevSibling() *node {
+func (n *reclaimnode) prevSibling() *reclaimnode {
if n.parent == nil || n.parentIndex == 0 {
return nil
}
return n.parent.children[n.parentIndex-1]
}
-func (n *node) nextSibling() *node {
+func (n *reclaimnode) nextSibling() *reclaimnode {
if n.parent == nil || n.parentIndex == n.parent.nrSegments {
return nil
}
@@ -736,40 +664,38 @@ func (n *node) nextSibling() *node {
// rebalanceBeforeInsert splits n and its ancestors if they are full, as
// required for insertion, and returns an updated iterator to the position
// represented by gap.
-func (n *node) rebalanceBeforeInsert(gap GapIterator) GapIterator {
- if n.nrSegments < maxDegree-1 {
+func (n *reclaimnode) rebalanceBeforeInsert(gap reclaimGapIterator) reclaimGapIterator {
+ if n.nrSegments < reclaimmaxDegree-1 {
return gap
}
if n.parent != nil {
gap = n.parent.rebalanceBeforeInsert(gap)
}
if n.parent == nil {
- // n is root. Move all segments before and after n's median segment
- // into new child nodes adjacent to the median segment, which is now
- // the only segment in root.
- left := &node{
- nrSegments: minDegree - 1,
+
+ left := &reclaimnode{
+ nrSegments: reclaimminDegree - 1,
parent: n,
parentIndex: 0,
hasChildren: n.hasChildren,
}
- right := &node{
- nrSegments: minDegree - 1,
+ right := &reclaimnode{
+ nrSegments: reclaimminDegree - 1,
parent: n,
parentIndex: 1,
hasChildren: n.hasChildren,
}
- copy(left.keys[:minDegree-1], n.keys[:minDegree-1])
- copy(left.values[:minDegree-1], n.values[:minDegree-1])
- copy(right.keys[:minDegree-1], n.keys[minDegree:])
- copy(right.values[:minDegree-1], n.values[minDegree:])
- n.keys[0], n.values[0] = n.keys[minDegree-1], n.values[minDegree-1]
- zeroValueSlice(n.values[1:])
+ copy(left.keys[:reclaimminDegree-1], n.keys[:reclaimminDegree-1])
+ copy(left.values[:reclaimminDegree-1], n.values[:reclaimminDegree-1])
+ copy(right.keys[:reclaimminDegree-1], n.keys[reclaimminDegree:])
+ copy(right.values[:reclaimminDegree-1], n.values[reclaimminDegree:])
+ n.keys[0], n.values[0] = n.keys[reclaimminDegree-1], n.values[reclaimminDegree-1]
+ reclaimzeroValueSlice(n.values[1:])
if n.hasChildren {
- copy(left.children[:minDegree], n.children[:minDegree])
- copy(right.children[:minDegree], n.children[minDegree:])
- zeroNodeSlice(n.children[2:])
- for i := 0; i < minDegree; i++ {
+ copy(left.children[:reclaimminDegree], n.children[:reclaimminDegree])
+ copy(right.children[:reclaimminDegree], n.children[reclaimminDegree:])
+ reclaimzeroNodeSlice(n.children[2:])
+ for i := 0; i < reclaimminDegree; i++ {
left.children[i].parent = left
left.children[i].parentIndex = i
right.children[i].parent = right
@@ -780,66 +706,60 @@ func (n *node) rebalanceBeforeInsert(gap GapIterator) GapIterator {
n.hasChildren = true
n.children[0] = left
n.children[1] = right
- // In this case, n's maxGap won't violated as it's still the root,
- // but the left and right children should be updated locally as they
- // are newly split from n.
- if trackGaps != 0 {
+
+ if reclaimtrackGaps != 0 {
left.updateMaxGapLocal()
right.updateMaxGapLocal()
}
if gap.node != n {
return gap
}
- if gap.index < minDegree {
- return GapIterator{left, gap.index}
+ if gap.index < reclaimminDegree {
+ return reclaimGapIterator{left, gap.index}
}
- return GapIterator{right, gap.index - minDegree}
+ return reclaimGapIterator{right, gap.index - reclaimminDegree}
}
- // n is non-root. Move n's median segment into its parent node (which can't
- // be full because we've already invoked n.parent.rebalanceBeforeInsert)
- // and move all segments after n's median into a new sibling node (the
- // median segment's right child subtree).
+
copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
- n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[minDegree-1], n.values[minDegree-1]
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[reclaimminDegree-1], n.values[reclaimminDegree-1]
copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
n.parent.children[i].parentIndex = i
}
- sibling := &node{
- nrSegments: minDegree - 1,
+ sibling := &reclaimnode{
+ nrSegments: reclaimminDegree - 1,
parent: n.parent,
parentIndex: n.parentIndex + 1,
hasChildren: n.hasChildren,
}
n.parent.children[n.parentIndex+1] = sibling
n.parent.nrSegments++
- copy(sibling.keys[:minDegree-1], n.keys[minDegree:])
- copy(sibling.values[:minDegree-1], n.values[minDegree:])
- zeroValueSlice(n.values[minDegree-1:])
+ copy(sibling.keys[:reclaimminDegree-1], n.keys[reclaimminDegree:])
+ copy(sibling.values[:reclaimminDegree-1], n.values[reclaimminDegree:])
+ reclaimzeroValueSlice(n.values[reclaimminDegree-1:])
if n.hasChildren {
- copy(sibling.children[:minDegree], n.children[minDegree:])
- zeroNodeSlice(n.children[minDegree:])
- for i := 0; i < minDegree; i++ {
+ copy(sibling.children[:reclaimminDegree], n.children[reclaimminDegree:])
+ reclaimzeroNodeSlice(n.children[reclaimminDegree:])
+ for i := 0; i < reclaimminDegree; i++ {
sibling.children[i].parent = sibling
sibling.children[i].parentIndex = i
}
}
- n.nrSegments = minDegree - 1
- // MaxGap of n's parent is not violated because the segments within is not changed.
- // n and its sibling's maxGap need to be updated locally as they are two new nodes split from old n.
- if trackGaps != 0 {
+ n.nrSegments = reclaimminDegree - 1
+
+ if reclaimtrackGaps != 0 {
n.updateMaxGapLocal()
sibling.updateMaxGapLocal()
}
- // gap.node can't be n.parent because gaps are always in leaf nodes.
+
if gap.node != n {
return gap
}
- if gap.index < minDegree {
+ if gap.index < reclaimminDegree {
return gap
}
- return GapIterator{sibling, gap.index - minDegree}
+ return reclaimGapIterator{sibling, gap.index - reclaimminDegree}
}
// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
@@ -848,41 +768,24 @@ func (n *node) rebalanceBeforeInsert(gap GapIterator) GapIterator {
//
// Precondition: n is the only node in the tree that may currently violate a
// B-tree invariant.
-func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
+func (n *reclaimnode) rebalanceAfterRemove(gap reclaimGapIterator) reclaimGapIterator {
for {
- if n.nrSegments >= minDegree-1 {
+ if n.nrSegments >= reclaimminDegree-1 {
return gap
}
if n.parent == nil {
- // Root is allowed to be deficient.
+
return gap
}
- // There's one other thing we can do before resorting to unsplitting.
- // If either sibling node has at least minDegree segments, rotate that
- // sibling's closest segment through the segment in the parent that
- // separates us. That is, given:
- //
- // ... D ...
- // / \
- // ... B C] [E ...
- //
- // where the node containing E is deficient, end up with:
- //
- // ... C ...
- // / \
- // ... B] [D E ...
- //
- // As in Set.Remove, prefer rotating from the end of the sibling to the
- // left: by precondition, n.node has fewer segments (to memcpy) than
- // the sibling does.
- if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= minDegree {
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= reclaimminDegree {
copy(n.keys[1:], n.keys[:n.nrSegments])
copy(n.values[1:], n.values[:n.nrSegments])
n.keys[0] = n.parent.keys[n.parentIndex-1]
n.values[0] = n.parent.values[n.parentIndex-1]
n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
- Functions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ reclaimSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
if n.hasChildren {
copy(n.children[1:], n.children[:n.nrSegments+1])
n.children[0] = sibling.children[sibling.nrSegments]
@@ -895,28 +798,27 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
}
n.nrSegments++
sibling.nrSegments--
- // n's parent's maxGap does not need to be updated as its content is unmodified.
- // n and its sibling must be updated with (new) maxGap because of the shift of keys.
- if trackGaps != 0 {
+
+ if reclaimtrackGaps != 0 {
n.updateMaxGapLocal()
sibling.updateMaxGapLocal()
}
if gap.node == sibling && gap.index == sibling.nrSegments {
- return GapIterator{n, 0}
+ return reclaimGapIterator{n, 0}
}
if gap.node == n {
- return GapIterator{n, gap.index + 1}
+ return reclaimGapIterator{n, gap.index + 1}
}
return gap
}
- if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= minDegree {
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= reclaimminDegree {
n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
n.values[n.nrSegments] = n.parent.values[n.parentIndex]
n.parent.keys[n.parentIndex] = sibling.keys[0]
n.parent.values[n.parentIndex] = sibling.values[0]
copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
- Functions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ reclaimSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
if n.hasChildren {
n.children[n.nrSegments+1] = sibling.children[0]
copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
@@ -929,29 +831,23 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
}
n.nrSegments++
sibling.nrSegments--
- // n's parent's maxGap does not need to be updated as its content is unmodified.
- // n and its sibling must be updated with (new) maxGap because of the shift of keys.
- if trackGaps != 0 {
+
+ if reclaimtrackGaps != 0 {
n.updateMaxGapLocal()
sibling.updateMaxGapLocal()
}
if gap.node == sibling {
if gap.index == 0 {
- return GapIterator{n, n.nrSegments}
+ return reclaimGapIterator{n, n.nrSegments}
}
- return GapIterator{sibling, gap.index - 1}
+ return reclaimGapIterator{sibling, gap.index - 1}
}
return gap
}
- // Otherwise, we must unsplit.
+
p := n.parent
if p.nrSegments == 1 {
- // Merge all segments in both n and its sibling back into n.parent.
- // This is the reverse of the root splitting case in
- // node.rebalanceBeforeInsert. (Because we require minDegree >= 3,
- // only root can have 1 segment in this path, so this reduces the
- // height of the tree by 1, without violating the constraint that
- // all leaf nodes remain at the same depth.)
+
left, right := p.children[0], p.children[1]
p.nrSegments = left.nrSegments + right.nrSegments + 1
p.hasChildren = left.hasChildren
@@ -972,12 +868,12 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
p.children[0] = nil
p.children[1] = nil
}
- // No need to update maxGap of p as its content is not changed.
+
if gap.node == left {
- return GapIterator{p, gap.index}
+ return reclaimGapIterator{p, gap.index}
}
if gap.node == right {
- return GapIterator{p, gap.index + left.nrSegments + 1}
+ return reclaimGapIterator{p, gap.index + left.nrSegments + 1}
}
return gap
}
@@ -985,7 +881,7 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
// two, into whichever of the two nodes comes first. This is the
// reverse of the non-root splitting case in
// node.rebalanceBeforeInsert.
- var left, right *node
+ var left, right *reclaimnode
if n.parentIndex > 0 {
left = n.prevSibling()
right = n
@@ -993,10 +889,9 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
left = n
right = n.nextSibling()
}
- // Fix up gap first since we need the old left.nrSegments, which
- // merging will change.
+
if gap.node == right {
- gap = GapIterator{left, gap.index + left.nrSegments + 1}
+ gap = reclaimGapIterator{left, gap.index + left.nrSegments + 1}
}
left.keys[left.nrSegments] = p.keys[left.parentIndex]
left.values[left.nrSegments] = p.values[left.parentIndex]
@@ -1012,19 +907,18 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
left.nrSegments += right.nrSegments + 1
copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
- Functions{}.ClearValue(&p.values[p.nrSegments-1])
+ reclaimSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
for i := 0; i < p.nrSegments; i++ {
p.children[i].parentIndex = i
}
p.children[p.nrSegments] = nil
p.nrSegments--
- // Update maxGap of left locally, no need to change p and right because
- // p's contents is not changed and right is already invalid.
- if trackGaps != 0 {
+
+ if reclaimtrackGaps != 0 {
left.updateMaxGapLocal()
}
- // This process robs p of one segment, so recurse into rebalancing p.
+
n = p
}
}
@@ -1033,46 +927,42 @@ func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator {
// necessary update.
//
// Preconditions: n must be a leaf node, trackGaps must be 1.
-func (n *node) updateMaxGapLeaf() {
+func (n *reclaimnode) updateMaxGapLeaf() {
if n.hasChildren {
panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
}
max := n.calculateMaxGapLeaf()
if max == n.maxGap.Get() {
- // If new max equals the old maxGap, no update is needed.
+
return
}
oldMax := n.maxGap.Get()
n.maxGap.Set(max)
if max > oldMax {
- // Grow ancestor maxGaps.
+
for p := n.parent; p != nil; p = p.parent {
if p.maxGap.Get() >= max {
- // p and its ancestors already contain an equal or larger gap.
+
break
}
- // Only if new maxGap is larger than parent's
- // old maxGap, propagate this update to parent.
+
p.maxGap.Set(max)
}
return
}
- // Shrink ancestor maxGaps.
+
for p := n.parent; p != nil; p = p.parent {
if p.maxGap.Get() > oldMax {
- // p and its ancestors still contain a larger gap.
+
break
}
- // If new max is smaller than the old maxGap, and this gap used
- // to be the maxGap of its parent, iterate parent's children
- // and calculate parent's new maxGap.(It's probable that parent
- // has two children with the old maxGap, but we need to check it anyway.)
+
parentNewMax := p.calculateMaxGapInternal()
if p.maxGap.Get() == parentNewMax {
- // p and its ancestors still contain a gap of at least equal size.
+
break
}
- // If p's new maxGap differs from the old one, propagate this update.
+
p.maxGap.Set(parentNewMax)
}
}
@@ -1081,12 +971,12 @@ func (n *node) updateMaxGapLeaf() {
// propagation to ancestor nodes.
//
// Precondition: trackGaps must be 1.
-func (n *node) updateMaxGapLocal() {
+func (n *reclaimnode) updateMaxGapLocal() {
if !n.hasChildren {
- // Leaf node iterates its gaps.
+
n.maxGap.Set(n.calculateMaxGapLeaf())
} else {
- // Non-leaf node iterates its children.
+
n.maxGap.Set(n.calculateMaxGapInternal())
}
}
@@ -1095,10 +985,10 @@ func (n *node) updateMaxGapLocal() {
// max.
//
// Preconditions: n must be a leaf node.
-func (n *node) calculateMaxGapLeaf() Key {
- max := GapIterator{n, 0}.Range().Length()
+func (n *reclaimnode) calculateMaxGapLeaf() uint64 {
+ max := reclaimGapIterator{n, 0}.Range().Length()
for i := 1; i <= n.nrSegments; i++ {
- if current := (GapIterator{n, i}).Range().Length(); current > max {
+ if current := (reclaimGapIterator{n, i}).Range().Length(); current > max {
max = current
}
}
@@ -1109,7 +999,7 @@ func (n *node) calculateMaxGapLeaf() Key {
// and calculate the max.
//
// Preconditions: n must be a non-leaf node.
-func (n *node) calculateMaxGapInternal() Key {
+func (n *reclaimnode) calculateMaxGapInternal() uint64 {
max := n.children[0].maxGap.Get()
for i := 1; i <= n.nrSegments; i++ {
if current := n.children[i].maxGap.Get(); current > max {
@@ -1121,9 +1011,9 @@ func (n *node) calculateMaxGapInternal() Key {
// searchFirstLargeEnoughGap returns the first gap having at least minSize length
// in the subtree rooted by n. If not found, return a terminal gap iterator.
-func (n *node) searchFirstLargeEnoughGap(minSize Key) GapIterator {
+func (n *reclaimnode) searchFirstLargeEnoughGap(minSize uint64) reclaimGapIterator {
if n.maxGap.Get() < minSize {
- return GapIterator{}
+ return reclaimGapIterator{}
}
if n.hasChildren {
for i := 0; i <= n.nrSegments; i++ {
@@ -1133,7 +1023,7 @@ func (n *node) searchFirstLargeEnoughGap(minSize Key) GapIterator {
}
} else {
for i := 0; i <= n.nrSegments; i++ {
- currentGap := GapIterator{n, i}
+ currentGap := reclaimGapIterator{n, i}
if currentGap.Range().Length() >= minSize {
return currentGap
}
@@ -1144,9 +1034,9 @@ func (n *node) searchFirstLargeEnoughGap(minSize Key) GapIterator {
// searchLastLargeEnoughGap returns the last gap having at least minSize length
// in the subtree rooted by n. If not found, return a terminal gap iterator.
-func (n *node) searchLastLargeEnoughGap(minSize Key) GapIterator {
+func (n *reclaimnode) searchLastLargeEnoughGap(minSize uint64) reclaimGapIterator {
if n.maxGap.Get() < minSize {
- return GapIterator{}
+ return reclaimGapIterator{}
}
if n.hasChildren {
for i := n.nrSegments; i >= 0; i-- {
@@ -1156,7 +1046,7 @@ func (n *node) searchLastLargeEnoughGap(minSize Key) GapIterator {
}
} else {
for i := n.nrSegments; i >= 0; i-- {
- currentGap := GapIterator{n, i}
+ currentGap := reclaimGapIterator{n, i}
if currentGap.Range().Length() >= minSize {
return currentGap
}
@@ -1177,10 +1067,10 @@ func (n *node) searchLastLargeEnoughGap(minSize Key) GapIterator {
//
// Unless otherwise specified, any mutation of a set invalidates all existing
// iterators into the set.
-type Iterator struct {
+type reclaimIterator struct {
// node is the node containing the iterated segment. If the iterator is
// terminal, node is nil.
- node *node
+ node *reclaimnode
// index is the index of the segment in node.keys/values.
index int
@@ -1188,24 +1078,24 @@ type Iterator struct {
// Ok returns true if the iterator is not terminal. All other methods are only
// valid for non-terminal iterators.
-func (seg Iterator) Ok() bool {
+func (seg reclaimIterator) Ok() bool {
return seg.node != nil
}
// Range returns the iterated segment's range key.
-func (seg Iterator) Range() Range {
+func (seg reclaimIterator) Range() __generics_imported0.FileRange {
return seg.node.keys[seg.index]
}
// Start is equivalent to Range().Start, but should be preferred if only the
// start of the range is needed.
-func (seg Iterator) Start() Key {
+func (seg reclaimIterator) Start() uint64 {
return seg.node.keys[seg.index].Start
}
// End is equivalent to Range().End, but should be preferred if only the end of
// the range is needed.
-func (seg Iterator) End() Key {
+func (seg reclaimIterator) End() uint64 {
return seg.node.keys[seg.index].End
}
@@ -1217,7 +1107,7 @@ func (seg Iterator) End() Key {
// * The new range must not overlap an existing one:
// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
-func (seg Iterator) SetRangeUnchecked(r Range) {
+func (seg reclaimIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1225,7 +1115,7 @@ func (seg Iterator) SetRangeUnchecked(r Range) {
// cause the iterated segment to overlap another segment, or if the new range
// is invalid, SetRange panics. This operation does not invalidate any
// iterators.
-func (seg Iterator) SetRange(r Range) {
+func (seg reclaimIterator) SetRange(r __generics_imported0.FileRange) {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
@@ -1244,7 +1134,7 @@ func (seg Iterator) SetRange(r Range) {
// Preconditions: The new start must be valid:
// * start < seg.End()
// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
-func (seg Iterator) SetStartUnchecked(start Key) {
+func (seg reclaimIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1252,7 +1142,7 @@ func (seg Iterator) SetStartUnchecked(start Key) {
// cause the iterated segment to overlap another segment, or would result in an
// invalid range, SetStart panics. This operation does not invalidate any
// iterators.
-func (seg Iterator) SetStart(start Key) {
+func (seg reclaimIterator) SetStart(start uint64) {
if start >= seg.End() {
panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
}
@@ -1268,7 +1158,7 @@ func (seg Iterator) SetStart(start Key) {
// Preconditions: The new end must be valid:
// * end > seg.Start().
// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
-func (seg Iterator) SetEndUnchecked(end Key) {
+func (seg reclaimIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1276,7 +1166,7 @@ func (seg Iterator) SetEndUnchecked(end Key) {
// the iterated segment to overlap another segment, or would result in an
// invalid range, SetEnd panics. This operation does not invalidate any
// iterators.
-func (seg Iterator) SetEnd(end Key) {
+func (seg reclaimIterator) SetEnd(end uint64) {
if end <= seg.Start() {
panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
}
@@ -1287,69 +1177,68 @@ func (seg Iterator) SetEnd(end Key) {
}
// Value returns a copy of the iterated segment's value.
-func (seg Iterator) Value() Value {
+func (seg reclaimIterator) Value() reclaimSetValue {
return seg.node.values[seg.index]
}
// ValuePtr returns a pointer to the iterated segment's value. The pointer is
// invalidated if the iterator is invalidated. This operation does not
// invalidate any iterators.
-func (seg Iterator) ValuePtr() *Value {
+func (seg reclaimIterator) ValuePtr() *reclaimSetValue {
return &seg.node.values[seg.index]
}
// SetValue mutates the iterated segment's value. This operation does not
// invalidate any iterators.
-func (seg Iterator) SetValue(val Value) {
+func (seg reclaimIterator) SetValue(val reclaimSetValue) {
seg.node.values[seg.index] = val
}
// PrevSegment returns the iterated segment's predecessor. If there is no
// preceding segment, PrevSegment returns a terminal iterator.
-func (seg Iterator) PrevSegment() Iterator {
+func (seg reclaimIterator) PrevSegment() reclaimIterator {
if seg.node.hasChildren {
return seg.node.children[seg.index].lastSegment()
}
if seg.index > 0 {
- return Iterator{seg.node, seg.index - 1}
+ return reclaimIterator{seg.node, seg.index - 1}
}
if seg.node.parent == nil {
- return Iterator{}
+ return reclaimIterator{}
}
- return segmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+ return reclaimsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
}
// NextSegment returns the iterated segment's successor. If there is no
// succeeding segment, NextSegment returns a terminal iterator.
-func (seg Iterator) NextSegment() Iterator {
+func (seg reclaimIterator) NextSegment() reclaimIterator {
if seg.node.hasChildren {
return seg.node.children[seg.index+1].firstSegment()
}
if seg.index < seg.node.nrSegments-1 {
- return Iterator{seg.node, seg.index + 1}
+ return reclaimIterator{seg.node, seg.index + 1}
}
if seg.node.parent == nil {
- return Iterator{}
+ return reclaimIterator{}
}
- return segmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+ return reclaimsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
}
// PrevGap returns the gap immediately before the iterated segment.
-func (seg Iterator) PrevGap() GapIterator {
+func (seg reclaimIterator) PrevGap() reclaimGapIterator {
if seg.node.hasChildren {
- // Note that this isn't recursive because the last segment in a subtree
- // must be in a leaf node.
+
return seg.node.children[seg.index].lastSegment().NextGap()
}
- return GapIterator{seg.node, seg.index}
+ return reclaimGapIterator{seg.node, seg.index}
}
// NextGap returns the gap immediately after the iterated segment.
-func (seg Iterator) NextGap() GapIterator {
+func (seg reclaimIterator) NextGap() reclaimGapIterator {
if seg.node.hasChildren {
return seg.node.children[seg.index+1].firstSegment().PrevGap()
}
- return GapIterator{seg.node, seg.index + 1}
+ return reclaimGapIterator{seg.node, seg.index + 1}
}
// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
@@ -1357,12 +1246,12 @@ func (seg Iterator) NextGap() GapIterator {
// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
// non-terminal.
-func (seg Iterator) PrevNonEmpty() (Iterator, GapIterator) {
+func (seg reclaimIterator) PrevNonEmpty() (reclaimIterator, reclaimGapIterator) {
gap := seg.PrevGap()
if gap.Range().Length() != 0 {
- return Iterator{}, gap
+ return reclaimIterator{}, gap
}
- return gap.PrevSegment(), GapIterator{}
+ return gap.PrevSegment(), reclaimGapIterator{}
}
// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
@@ -1370,12 +1259,12 @@ func (seg Iterator) PrevNonEmpty() (Iterator, GapIterator) {
// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
// non-terminal.
-func (seg Iterator) NextNonEmpty() (Iterator, GapIterator) {
+func (seg reclaimIterator) NextNonEmpty() (reclaimIterator, reclaimGapIterator) {
gap := seg.NextGap()
if gap.Range().Length() != 0 {
- return Iterator{}, gap
+ return reclaimIterator{}, gap
}
- return gap.NextSegment(), GapIterator{}
+ return gap.NextSegment(), reclaimGapIterator{}
}
// A GapIterator is conceptually one of:
@@ -1396,77 +1285,77 @@ func (seg Iterator) NextNonEmpty() (Iterator, GapIterator) {
//
// Unless otherwise specified, any mutation of a set invalidates all existing
// iterators into the set.
-type GapIterator struct {
+type reclaimGapIterator struct {
// The representation of a GapIterator is identical to that of an Iterator,
// except that index corresponds to positions between segments in the same
// way as for node.children (see comment for node.nrSegments).
- node *node
+ node *reclaimnode
index int
}
// Ok returns true if the iterator is not terminal. All other methods are only
// valid for non-terminal iterators.
-func (gap GapIterator) Ok() bool {
+func (gap reclaimGapIterator) Ok() bool {
return gap.node != nil
}
// Range returns the range spanned by the iterated gap.
-func (gap GapIterator) Range() Range {
- return Range{gap.Start(), gap.End()}
+func (gap reclaimGapIterator) Range() __generics_imported0.FileRange {
+ return __generics_imported0.FileRange{gap.Start(), gap.End()}
}
// Start is equivalent to Range().Start, but should be preferred if only the
// start of the range is needed.
-func (gap GapIterator) Start() Key {
+func (gap reclaimGapIterator) Start() uint64 {
if ps := gap.PrevSegment(); ps.Ok() {
return ps.End()
}
- return Functions{}.MinKey()
+ return reclaimSetFunctions{}.MinKey()
}
// End is equivalent to Range().End, but should be preferred if only the end of
// the range is needed.
-func (gap GapIterator) End() Key {
+func (gap reclaimGapIterator) End() uint64 {
if ns := gap.NextSegment(); ns.Ok() {
return ns.Start()
}
- return Functions{}.MaxKey()
+ return reclaimSetFunctions{}.MaxKey()
}
// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
// between two adjacent segments.)
-func (gap GapIterator) IsEmpty() bool {
+func (gap reclaimGapIterator) IsEmpty() bool {
return gap.Range().Length() == 0
}
// PrevSegment returns the segment immediately before the iterated gap. If no
// such segment exists, PrevSegment returns a terminal iterator.
-func (gap GapIterator) PrevSegment() Iterator {
- return segmentBeforePosition(gap.node, gap.index)
+func (gap reclaimGapIterator) PrevSegment() reclaimIterator {
+ return reclaimsegmentBeforePosition(gap.node, gap.index)
}
// NextSegment returns the segment immediately after the iterated gap. If no
// such segment exists, NextSegment returns a terminal iterator.
-func (gap GapIterator) NextSegment() Iterator {
- return segmentAfterPosition(gap.node, gap.index)
+func (gap reclaimGapIterator) NextSegment() reclaimIterator {
+ return reclaimsegmentAfterPosition(gap.node, gap.index)
}
// PrevGap returns the iterated gap's predecessor. If no such gap exists,
// PrevGap returns a terminal iterator.
-func (gap GapIterator) PrevGap() GapIterator {
+func (gap reclaimGapIterator) PrevGap() reclaimGapIterator {
seg := gap.PrevSegment()
if !seg.Ok() {
- return GapIterator{}
+ return reclaimGapIterator{}
}
return seg.PrevGap()
}
// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
// returns a terminal iterator.
-func (gap GapIterator) NextGap() GapIterator {
+func (gap reclaimGapIterator) NextGap() reclaimGapIterator {
seg := gap.NextSegment()
if !seg.Ok() {
- return GapIterator{}
+ return reclaimGapIterator{}
}
return seg.NextGap()
}
@@ -1476,13 +1365,12 @@ func (gap GapIterator) NextGap() GapIterator {
// include this gap itself).
//
// Precondition: trackGaps must be 1.
-func (gap GapIterator) NextLargeEnoughGap(minSize Key) GapIterator {
- if trackGaps != 1 {
+func (gap reclaimGapIterator) NextLargeEnoughGap(minSize uint64) reclaimGapIterator {
+ if reclaimtrackGaps != 1 {
panic("set is not tracking gaps")
}
if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
- // If gap is the trailing gap of an non-leaf node,
- // translate it to the equivalent gap on leaf level.
+
gap.node = gap.NextSegment().node
gap.index = 0
return gap.nextLargeEnoughGapHelper(minSize)
@@ -1494,19 +1382,17 @@ func (gap GapIterator) NextLargeEnoughGap(minSize Key) GapIterator {
// to do the real recursions.
//
// Preconditions: gap is NOT the trailing gap of a non-leaf node.
-func (gap GapIterator) nextLargeEnoughGapHelper(minSize Key) GapIterator {
- // Crawl up the tree if no large enough gap in current node or the
- // current gap is the trailing one on leaf level.
+func (gap reclaimGapIterator) nextLargeEnoughGapHelper(minSize uint64) reclaimGapIterator {
+
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
- // If no large enough gap throughout the whole set, return a terminal
- // gap iterator.
+
if gap.node == nil {
- return GapIterator{}
+ return reclaimGapIterator{}
}
- // Iterate subsequent gaps.
+
gap.index++
for gap.index <= gap.node.nrSegments {
if gap.node.hasChildren {
@@ -1522,8 +1408,7 @@ func (gap GapIterator) nextLargeEnoughGapHelper(minSize Key) GapIterator {
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == gap.node.nrSegments {
- // If gap is the trailing gap of a non-leaf node, crawl up to
- // parent again and do recursion.
+
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
return gap.nextLargeEnoughGapHelper(minSize)
@@ -1534,13 +1419,12 @@ func (gap GapIterator) nextLargeEnoughGapHelper(minSize Key) GapIterator {
// (does NOT include this gap itself).
//
// Precondition: trackGaps must be 1.
-func (gap GapIterator) PrevLargeEnoughGap(minSize Key) GapIterator {
- if trackGaps != 1 {
+func (gap reclaimGapIterator) PrevLargeEnoughGap(minSize uint64) reclaimGapIterator {
+ if reclaimtrackGaps != 1 {
panic("set is not tracking gaps")
}
if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
- // If gap is the first gap of an non-leaf node,
- // translate it to the equivalent gap on leaf level.
+
gap.node = gap.PrevSegment().node
gap.index = gap.node.nrSegments
return gap.prevLargeEnoughGapHelper(minSize)
@@ -1552,19 +1436,17 @@ func (gap GapIterator) PrevLargeEnoughGap(minSize Key) GapIterator {
// to do the real recursions.
//
// Preconditions: gap is NOT the first gap of a non-leaf node.
-func (gap GapIterator) prevLargeEnoughGapHelper(minSize Key) GapIterator {
- // Crawl up the tree if no large enough gap in current node or the
- // current gap is the first one on leaf level.
+func (gap reclaimGapIterator) prevLargeEnoughGapHelper(minSize uint64) reclaimGapIterator {
+
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
- // If no large enough gap throughout the whole set, return a terminal
- // gap iterator.
+
if gap.node == nil {
- return GapIterator{}
+ return reclaimGapIterator{}
}
- // Iterate previous gaps.
+
gap.index--
for gap.index >= 0 {
if gap.node.hasChildren {
@@ -1580,8 +1462,7 @@ func (gap GapIterator) prevLargeEnoughGapHelper(minSize Key) GapIterator {
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == 0 {
- // If gap is the first gap of a non-leaf node, crawl up to
- // parent again and do recursion.
+
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
return gap.prevLargeEnoughGapHelper(minSize)
@@ -1590,56 +1471,55 @@ func (gap GapIterator) prevLargeEnoughGapHelper(minSize Key) GapIterator {
// segmentBeforePosition returns the predecessor segment of the position given
// by n.children[i], which may or may not contain a child. If no such segment
// exists, segmentBeforePosition returns a terminal iterator.
-func segmentBeforePosition(n *node, i int) Iterator {
+func reclaimsegmentBeforePosition(n *reclaimnode, i int) reclaimIterator {
for i == 0 {
if n.parent == nil {
- return Iterator{}
+ return reclaimIterator{}
}
n, i = n.parent, n.parentIndex
}
- return Iterator{n, i - 1}
+ return reclaimIterator{n, i - 1}
}
// segmentAfterPosition returns the successor segment of the position given by
// n.children[i], which may or may not contain a child. If no such segment
// exists, segmentAfterPosition returns a terminal iterator.
-func segmentAfterPosition(n *node, i int) Iterator {
+func reclaimsegmentAfterPosition(n *reclaimnode, i int) reclaimIterator {
for i == n.nrSegments {
if n.parent == nil {
- return Iterator{}
+ return reclaimIterator{}
}
n, i = n.parent, n.parentIndex
}
- return Iterator{n, i}
+ return reclaimIterator{n, i}
}
-func zeroValueSlice(slice []Value) {
- // TODO(jamieliu): check if Go is actually smart enough to optimize a
- // ClearValue that assigns nil to a memset here.
+func reclaimzeroValueSlice(slice []reclaimSetValue) {
+
for i := range slice {
- Functions{}.ClearValue(&slice[i])
+ reclaimSetFunctions{}.ClearValue(&slice[i])
}
}
-func zeroNodeSlice(slice []*node) {
+func reclaimzeroNodeSlice(slice []*reclaimnode) {
for i := range slice {
slice[i] = nil
}
}
// String stringifies a Set for debugging.
-func (s *Set) String() string {
+func (s *reclaimSet) String() string {
return s.root.String()
}
// String stringifies a node (and all of its children) for debugging.
-func (n *node) String() string {
+func (n *reclaimnode) String() string {
var buf bytes.Buffer
n.writeDebugString(&buf, "")
return buf.String()
}
-func (n *node) writeDebugString(buf *bytes.Buffer, prefix string) {
+func (n *reclaimnode) writeDebugString(buf *bytes.Buffer, prefix string) {
if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
buf.WriteString(prefix)
buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
@@ -1655,7 +1535,7 @@ func (n *node) writeDebugString(buf *bytes.Buffer, prefix string) {
}
buf.WriteString(prefix)
if n.hasChildren {
- if trackGaps != 0 {
+ if reclaimtrackGaps != 0 {
buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
} else {
buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
@@ -1674,16 +1554,16 @@ func (n *node) writeDebugString(buf *bytes.Buffer, prefix string) {
// for save/restore and the layout here is optimized for that.
//
// +stateify savable
-type SegmentDataSlices struct {
- Start []Key
- End []Key
- Values []Value
+type reclaimSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []reclaimSetValue
}
// ExportSortedSlices returns a copy of all segments in the given set, in
// ascending key order.
-func (s *Set) ExportSortedSlices() *SegmentDataSlices {
- var sds SegmentDataSlices
+func (s *reclaimSet) ExportSortedSlices() *reclaimSegmentDataSlices {
+ var sds reclaimSegmentDataSlices
for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
sds.Start = append(sds.Start, seg.Start())
sds.End = append(sds.End, seg.End())
@@ -1702,13 +1582,13 @@ func (s *Set) ExportSortedSlices() *SegmentDataSlices {
// * sds must represent a valid set (the segments in sds must have valid
// lengths that do not overlap).
// * The segments in sds must be sorted in ascending key order.
-func (s *Set) ImportSortedSlices(sds *SegmentDataSlices) error {
+func (s *reclaimSet) ImportSortedSlices(sds *reclaimSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
}
gap := s.FirstGap()
for i := range sds.Start {
- r := Range{sds.Start[i], sds.End[i]}
+ r := __generics_imported0.FileRange{sds.Start[i], sds.End[i]}
if !gap.Range().IsSupersetOf(r) {
return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
}
@@ -1723,9 +1603,9 @@ func (s *Set) ImportSortedSlices(sds *SegmentDataSlices) error {
//
// This should be used only for testing, and has been added to this package for
// templating convenience.
-func (s *Set) segmentTestCheck(expectedSegments int, segFunc func(int, Range, Value) error) error {
+func (s *reclaimSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.FileRange, reclaimSetValue) error) error {
havePrev := false
- prev := Key(0)
+ prev := uint64(0)
nrSegments := 0
for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
next := seg.Start()
@@ -1750,9 +1630,18 @@ func (s *Set) segmentTestCheck(expectedSegments int, segFunc func(int, Range, Va
// countSegments counts the number of segments in the set.
//
// Similar to Check, this should only be used for testing.
-func (s *Set) countSegments() (segments int) {
+func (s *reclaimSet) countSegments() (segments int) {
for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
segments++
}
return segments
}
+func (s *reclaimSet) saveRoot() *reclaimSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *reclaimSet) loadRoot(sds *reclaimSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/pgalloc/usage_set.go b/pkg/sentry/pgalloc/usage_set.go
new file mode 100644
index 000000000..8d96e817a
--- /dev/null
+++ b/pkg/sentry/pgalloc/usage_set.go
@@ -0,0 +1,1647 @@
+package pgalloc
+
+import (
+ __generics_imported0 "gvisor.dev/gvisor/pkg/sentry/memmap"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const usagetrackGaps = 1
+
+var _ = uint8(usagetrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type usagedynamicGap [usagetrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *usagedynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *usagedynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ usageminDegree = 10
+
+ usagemaxDegree = 2 * usageminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type usageSet struct {
+ root usagenode `state:".(*usageSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *usageSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *usageSet) IsEmptyRange(r __generics_imported0.FileRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *usageSet) Span() uint64 {
+ var sz uint64
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *usageSet) SpanRange(r __generics_imported0.FileRange) uint64 {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uint64
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *usageSet) FirstSegment() usageIterator {
+ if s.root.nrSegments == 0 {
+ return usageIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *usageSet) LastSegment() usageIterator {
+ if s.root.nrSegments == 0 {
+ return usageIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *usageSet) FirstGap() usageGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return usageGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *usageSet) LastGap() usageGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return usageGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *usageSet) Find(key uint64) (usageIterator, usageGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return usageIterator{n, i}, usageGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return usageIterator{}, usageGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *usageSet) FindSegment(key uint64) usageIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *usageSet) LowerBoundSegment(min uint64) usageIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *usageSet) UpperBoundSegment(max uint64) usageIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *usageSet) FindGap(key uint64) usageGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *usageSet) LowerBoundGap(min uint64) usageGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *usageSet) UpperBoundGap(max uint64) usageGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *usageSet) Add(r __generics_imported0.FileRange, val usageInfo) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *usageSet) AddWithoutMerging(r __generics_imported0.FileRange, val usageInfo) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *usageSet) Insert(gap usageGapIterator, r __generics_imported0.FileRange, val usageInfo) usageIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (usageSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := usagetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (usageSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (usageSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := usagetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *usageSet) InsertWithoutMerging(gap usageGapIterator, r __generics_imported0.FileRange, val usageInfo) usageIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *usageSet) InsertWithoutMergingUnchecked(gap usageGapIterator, r __generics_imported0.FileRange, val usageInfo) usageIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := usagetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return usageIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *usageSet) Remove(seg usageIterator) usageGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if usagetrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ usageSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if usagetrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(usageGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *usageSet) RemoveAll() {
+ s.root = usagenode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *usageSet) RemoveRange(r __generics_imported0.FileRange) usageGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *usageSet) Merge(first, second usageIterator) usageIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *usageSet) MergeUnchecked(first, second usageIterator) usageIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (usageSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return usageIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *usageSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *usageSet) MergeRange(r __generics_imported0.FileRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *usageSet) MergeAdjacent(r __generics_imported0.FileRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *usageSet) Split(seg usageIterator, split uint64) (usageIterator, usageIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *usageSet) SplitUnchecked(seg usageIterator, split uint64) (usageIterator, usageIterator) {
+ val1, val2 := (usageSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.FileRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *usageSet) SplitAt(split uint64) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *usageSet) Isolate(seg usageIterator, r __generics_imported0.FileRange) usageIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *usageSet) ApplyContiguous(r __generics_imported0.FileRange, fn func(seg usageIterator)) usageGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return usageGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return usageGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type usagenode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *usagenode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap usagedynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [usagemaxDegree - 1]__generics_imported0.FileRange
+ values [usagemaxDegree - 1]usageInfo
+ children [usagemaxDegree]*usagenode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *usagenode) firstSegment() usageIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return usageIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *usagenode) lastSegment() usageIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return usageIterator{n, n.nrSegments - 1}
+}
+
+func (n *usagenode) prevSibling() *usagenode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *usagenode) nextSibling() *usagenode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *usagenode) rebalanceBeforeInsert(gap usageGapIterator) usageGapIterator {
+ if n.nrSegments < usagemaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &usagenode{
+ nrSegments: usageminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &usagenode{
+ nrSegments: usageminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:usageminDegree-1], n.keys[:usageminDegree-1])
+ copy(left.values[:usageminDegree-1], n.values[:usageminDegree-1])
+ copy(right.keys[:usageminDegree-1], n.keys[usageminDegree:])
+ copy(right.values[:usageminDegree-1], n.values[usageminDegree:])
+ n.keys[0], n.values[0] = n.keys[usageminDegree-1], n.values[usageminDegree-1]
+ usagezeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:usageminDegree], n.children[:usageminDegree])
+ copy(right.children[:usageminDegree], n.children[usageminDegree:])
+ usagezeroNodeSlice(n.children[2:])
+ for i := 0; i < usageminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if usagetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < usageminDegree {
+ return usageGapIterator{left, gap.index}
+ }
+ return usageGapIterator{right, gap.index - usageminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[usageminDegree-1], n.values[usageminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &usagenode{
+ nrSegments: usageminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:usageminDegree-1], n.keys[usageminDegree:])
+ copy(sibling.values[:usageminDegree-1], n.values[usageminDegree:])
+ usagezeroValueSlice(n.values[usageminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:usageminDegree], n.children[usageminDegree:])
+ usagezeroNodeSlice(n.children[usageminDegree:])
+ for i := 0; i < usageminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = usageminDegree - 1
+
+ if usagetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < usageminDegree {
+ return gap
+ }
+ return usageGapIterator{sibling, gap.index - usageminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *usagenode) rebalanceAfterRemove(gap usageGapIterator) usageGapIterator {
+ for {
+ if n.nrSegments >= usageminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= usageminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ usageSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if usagetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return usageGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return usageGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= usageminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ usageSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if usagetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return usageGapIterator{n, n.nrSegments}
+ }
+ return usageGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return usageGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return usageGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *usagenode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = usageGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ usageSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if usagetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *usagenode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *usagenode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *usagenode) calculateMaxGapLeaf() uint64 {
+ max := usageGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (usageGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *usagenode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *usagenode) searchFirstLargeEnoughGap(minSize uint64) usageGapIterator {
+ if n.maxGap.Get() < minSize {
+ return usageGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := usageGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *usagenode) searchLastLargeEnoughGap(minSize uint64) usageGapIterator {
+ if n.maxGap.Get() < minSize {
+ return usageGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := usageGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type usageIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *usagenode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg usageIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg usageIterator) Range() __generics_imported0.FileRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg usageIterator) Start() uint64 {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg usageIterator) End() uint64 {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg usageIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg usageIterator) SetRange(r __generics_imported0.FileRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg usageIterator) SetStartUnchecked(start uint64) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg usageIterator) SetStart(start uint64) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg usageIterator) SetEndUnchecked(end uint64) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg usageIterator) SetEnd(end uint64) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg usageIterator) Value() usageInfo {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg usageIterator) ValuePtr() *usageInfo {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg usageIterator) SetValue(val usageInfo) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg usageIterator) PrevSegment() usageIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return usageIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return usageIterator{}
+ }
+ return usagesegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg usageIterator) NextSegment() usageIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return usageIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return usageIterator{}
+ }
+ return usagesegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg usageIterator) PrevGap() usageGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return usageGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg usageIterator) NextGap() usageGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return usageGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg usageIterator) PrevNonEmpty() (usageIterator, usageGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return usageIterator{}, gap
+ }
+ return gap.PrevSegment(), usageGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg usageIterator) NextNonEmpty() (usageIterator, usageGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return usageIterator{}, gap
+ }
+ return gap.NextSegment(), usageGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type usageGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *usagenode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap usageGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap usageGapIterator) Range() __generics_imported0.FileRange {
+ return __generics_imported0.FileRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap usageGapIterator) Start() uint64 {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return usageSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap usageGapIterator) End() uint64 {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return usageSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap usageGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap usageGapIterator) PrevSegment() usageIterator {
+ return usagesegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap usageGapIterator) NextSegment() usageIterator {
+ return usagesegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap usageGapIterator) PrevGap() usageGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return usageGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap usageGapIterator) NextGap() usageGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return usageGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap usageGapIterator) NextLargeEnoughGap(minSize uint64) usageGapIterator {
+ if usagetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap usageGapIterator) nextLargeEnoughGapHelper(minSize uint64) usageGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return usageGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap usageGapIterator) PrevLargeEnoughGap(minSize uint64) usageGapIterator {
+ if usagetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap usageGapIterator) prevLargeEnoughGapHelper(minSize uint64) usageGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return usageGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func usagesegmentBeforePosition(n *usagenode, i int) usageIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return usageIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return usageIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func usagesegmentAfterPosition(n *usagenode, i int) usageIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return usageIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return usageIterator{n, i}
+}
+
+func usagezeroValueSlice(slice []usageInfo) {
+
+ for i := range slice {
+ usageSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func usagezeroNodeSlice(slice []*usagenode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *usageSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *usagenode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *usagenode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if usagetrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type usageSegmentDataSlices struct {
+ Start []uint64
+ End []uint64
+ Values []usageInfo
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *usageSet) ExportSortedSlices() *usageSegmentDataSlices {
+ var sds usageSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *usageSet) ImportSortedSlices(sds *usageSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := __generics_imported0.FileRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *usageSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.FileRange, usageInfo) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *usageSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *usageSet) saveRoot() *usageSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *usageSet) loadRoot(sds *usageSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/sentry/platform/BUILD b/pkg/sentry/platform/BUILD
deleted file mode 100644
index 7125657b3..000000000
--- a/pkg/sentry/platform/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "platform",
- srcs = [
- "context.go",
- "mmap_min_addr.go",
- "platform.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/seccomp",
- "//pkg/sentry/arch",
- "//pkg/sentry/hostmm",
- "//pkg/sentry/memmap",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/platform/interrupt/BUILD b/pkg/sentry/platform/interrupt/BUILD
deleted file mode 100644
index 83b385f14..000000000
--- a/pkg/sentry/platform/interrupt/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "interrupt",
- srcs = [
- "interrupt.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "interrupt_test",
- size = "small",
- srcs = ["interrupt_test.go"],
- library = ":interrupt",
-)
diff --git a/pkg/sentry/platform/interrupt/interrupt_state_autogen.go b/pkg/sentry/platform/interrupt/interrupt_state_autogen.go
new file mode 100644
index 000000000..1336e5f01
--- /dev/null
+++ b/pkg/sentry/platform/interrupt/interrupt_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package interrupt
diff --git a/pkg/sentry/platform/interrupt/interrupt_test.go b/pkg/sentry/platform/interrupt/interrupt_test.go
deleted file mode 100644
index 0ecdf6e7a..000000000
--- a/pkg/sentry/platform/interrupt/interrupt_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interrupt
-
-import (
- "testing"
-)
-
-type countingReceiver struct {
- interrupts int
-}
-
-// NotifyInterrupt implements Receiver.NotifyInterrupt.
-func (r *countingReceiver) NotifyInterrupt() {
- r.interrupts++
-}
-
-func TestSingleInterruptBeforeEnable(t *testing.T) {
- var (
- f Forwarder
- r countingReceiver
- )
- f.NotifyInterrupt()
- // The interrupt should cause the first Enable to fail.
- if f.Enable(&r) {
- f.Disable()
- t.Fatalf("Enable: got true, wanted false")
- }
- // The failing Enable "acknowledges" the interrupt, allowing future Enables
- // to succeed.
- if !f.Enable(&r) {
- t.Fatalf("Enable: got false, wanted true")
- }
- f.Disable()
-}
-
-func TestMultipleInterruptsBeforeEnable(t *testing.T) {
- var (
- f Forwarder
- r countingReceiver
- )
- f.NotifyInterrupt()
- f.NotifyInterrupt()
- // The interrupts should cause the first Enable to fail.
- if f.Enable(&r) {
- f.Disable()
- t.Fatalf("Enable: got true, wanted false")
- }
- // Interrupts are deduplicated while the Forwarder is disabled, so the
- // failing Enable "acknowledges" all interrupts, allowing future Enables to
- // succeed.
- if !f.Enable(&r) {
- t.Fatalf("Enable: got false, wanted true")
- }
- f.Disable()
-}
-
-func TestSingleInterruptAfterEnable(t *testing.T) {
- var (
- f Forwarder
- r countingReceiver
- )
- if !f.Enable(&r) {
- t.Fatalf("Enable: got false, wanted true")
- }
- defer f.Disable()
- f.NotifyInterrupt()
- if r.interrupts != 1 {
- t.Errorf("interrupts: got %d, wanted 1", r.interrupts)
- }
-}
-
-func TestMultipleInterruptsAfterEnable(t *testing.T) {
- var (
- f Forwarder
- r countingReceiver
- )
- if !f.Enable(&r) {
- t.Fatalf("Enable: got false, wanted true")
- }
- defer f.Disable()
- f.NotifyInterrupt()
- f.NotifyInterrupt()
- if r.interrupts != 2 {
- t.Errorf("interrupts: got %d, wanted 2", r.interrupts)
- }
-}
diff --git a/pkg/sentry/platform/kvm/BUILD b/pkg/sentry/platform/kvm/BUILD
deleted file mode 100644
index 8a490b3de..000000000
--- a/pkg/sentry/platform/kvm/BUILD
+++ /dev/null
@@ -1,101 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "kvm",
- srcs = [
- "address_space.go",
- "address_space_amd64.go",
- "address_space_arm64.go",
- "bluepill.go",
- "bluepill_allocator.go",
- "bluepill_amd64.go",
- "bluepill_amd64_unsafe.go",
- "bluepill_arm64.go",
- "bluepill_arm64.s",
- "bluepill_arm64_unsafe.go",
- "bluepill_fault.go",
- "bluepill_impl_amd64.s",
- "bluepill_unsafe.go",
- "context.go",
- "filters_amd64.go",
- "filters_arm64.go",
- "kvm.go",
- "kvm_amd64.go",
- "kvm_amd64_unsafe.go",
- "kvm_arm64.go",
- "kvm_arm64_unsafe.go",
- "kvm_const.go",
- "kvm_const_arm64.go",
- "machine.go",
- "machine_amd64.go",
- "machine_amd64_unsafe.go",
- "machine_arm64.go",
- "machine_arm64_unsafe.go",
- "machine_unsafe.go",
- "physical_map.go",
- "physical_map_amd64.go",
- "physical_map_arm64.go",
- "virtual_map.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/atomicbitops",
- "//pkg/context",
- "//pkg/cpuid",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/procid",
- "//pkg/ring0",
- "//pkg/ring0/pagetables",
- "//pkg/safecopy",
- "//pkg/seccomp",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch/fpu",
- "//pkg/sentry/memmap",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/interrupt",
- "//pkg/sentry/time",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "kvm_test",
- srcs = [
- "kvm_amd64_test.go",
- "kvm_amd64_test.s",
- "kvm_arm64_test.go",
- "kvm_test.go",
- "virtual_map_test.go",
- ],
- library = ":kvm",
- tags = [
- "manual",
- "nogotsan",
- "requires-kvm",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/hostarch",
- "//pkg/ring0",
- "//pkg/ring0/pagetables",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch/fpu",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/kvm/testutil",
- "//pkg/sentry/time",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-genrule(
- name = "bluepill_impl_amd64",
- srcs = ["bluepill_amd64.s"],
- outs = ["bluepill_impl_amd64.s"],
- cmd = "(echo -e '// build +amd64\\n' && $(location //pkg/ring0/gen_offsets) && cat $(SRCS)) > $@",
- tools = ["//pkg/ring0/gen_offsets"],
-)
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.s b/pkg/sentry/platform/kvm/bluepill_impl_amd64.s
index c2a1dca11..99f254342 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64.s
+++ b/pkg/sentry/platform/kvm/bluepill_impl_amd64.s
@@ -1,3 +1,73 @@
+// build +amd64
+
+// Automatically generated, do not edit.
+
+// CPU offsets.
+#define CPU_REGISTERS 0x28
+#define CPU_ERROR_CODE 0x10
+#define CPU_ERROR_TYPE 0x18
+#define CPU_ENTRY 0x20
+
+// CPU entry offsets.
+#define ENTRY_SCRATCH0 0x100
+#define ENTRY_STACK_TOP 0x108
+#define ENTRY_CPU_SELF 0x110
+#define ENTRY_KERNEL_CR3 0x118
+
+// Bits.
+#define _RFLAGS_IF 0x200
+#define _RFLAGS_IOPL0 0x1000
+#define _KERNEL_FLAGS 0x02
+
+// Vectors.
+#define DivideByZero 0x00
+#define Debug 0x01
+#define NMI 0x02
+#define Breakpoint 0x03
+#define Overflow 0x04
+#define BoundRangeExceeded 0x05
+#define InvalidOpcode 0x06
+#define DeviceNotAvailable 0x07
+#define DoubleFault 0x08
+#define CoprocessorSegmentOverrun 0x09
+#define InvalidTSS 0x0a
+#define SegmentNotPresent 0x0b
+#define StackSegmentFault 0x0c
+#define GeneralProtectionFault 0x0d
+#define PageFault 0x0e
+#define X87FloatingPointException 0x10
+#define AlignmentCheck 0x11
+#define MachineCheck 0x12
+#define SIMDFloatingPointException 0x13
+#define VirtualizationException 0x14
+#define SecurityException 0x1e
+#define SyscallInt80 0x80
+#define Syscall 0x100
+
+// Ptrace registers.
+#define PTRACE_R15 0x00
+#define PTRACE_R14 0x08
+#define PTRACE_R13 0x10
+#define PTRACE_R12 0x18
+#define PTRACE_RBP 0x20
+#define PTRACE_RBX 0x28
+#define PTRACE_R11 0x30
+#define PTRACE_R10 0x38
+#define PTRACE_R9 0x40
+#define PTRACE_R8 0x48
+#define PTRACE_RAX 0x50
+#define PTRACE_RCX 0x58
+#define PTRACE_RDX 0x60
+#define PTRACE_RSI 0x68
+#define PTRACE_RDI 0x70
+#define PTRACE_ORIGRAX 0x78
+#define PTRACE_RIP 0x80
+#define PTRACE_CS 0x88
+#define PTRACE_FLAGS 0x90
+#define PTRACE_RSP 0x98
+#define PTRACE_SS 0xa0
+#define PTRACE_FS_BASE 0xa8
+#define PTRACE_GS_BASE 0xb0
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_state_autogen.go b/pkg/sentry/platform/kvm/kvm_amd64_state_autogen.go
new file mode 100644
index 000000000..a69cbee8b
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_amd64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_test.go b/pkg/sentry/platform/kvm/kvm_amd64_test.go
deleted file mode 100644
index c3fbbdc75..000000000
--- a/pkg/sentry/platform/kvm/kvm_amd64_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package kvm
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/ring0"
- "gvisor.dev/gvisor/pkg/ring0/pagetables"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/sentry/platform/kvm/testutil"
-)
-
-func TestSegments(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfTwiddleSegments(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTestSegments(regs)
- for {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err == platform.ErrContextInterrupt {
- continue // Retry.
- } else if err != nil {
- t.Errorf("application segment check with full restore got unexpected error: %v", err)
- }
- if err := testutil.CheckTestSegments(regs); err != nil {
- t.Errorf("application segment check with full restore failed: %v", err)
- }
- break // Done.
- }
- return false
- })
-}
-
-// stmxcsr reads the MXCSR control and status register.
-func stmxcsr(addr *uint32)
-
-func TestMXCSR(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- switchOpts := ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }
-
- const mxcsrControllMask = uint32(0x1f80)
- mxcsrBefore := uint32(0)
- mxcsrAfter := uint32(0)
- stmxcsr(&mxcsrBefore)
- if mxcsrBefore == 0 {
- // goruntime sets mxcsr to 0x1f80 and it never changes
- // the control configuration.
- panic("mxcsr is zero")
- }
- switchOpts.FloatingPointState.SetMXCSR(0)
- if _, err := c.SwitchToUser(
- switchOpts, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if err != nil {
- t.Errorf("application syscall failed: %v", err)
- }
- stmxcsr(&mxcsrAfter)
- if mxcsrAfter&mxcsrControllMask != mxcsrBefore&mxcsrControllMask {
- t.Errorf("mxcsr = %x (expected %x)", mxcsrBefore, mxcsrAfter)
- }
- return false
- })
-}
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_test.s b/pkg/sentry/platform/kvm/kvm_amd64_test.s
deleted file mode 100644
index 8e9079867..000000000
--- a/pkg/sentry/platform/kvm/kvm_amd64_test.s
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "textflag.h"
-
-// stmxcsr reads the MXCSR control and status register.
-TEXT ·stmxcsr(SB),NOSPLIT,$0-8
- MOVQ addr+0(FP), SI
- STMXCSR (SI)
- RET
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_unsafe_state_autogen.go b/pkg/sentry/platform/kvm/kvm_amd64_unsafe_state_autogen.go
new file mode 100644
index 000000000..a69cbee8b
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_amd64_unsafe_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_state_autogen.go b/pkg/sentry/platform/kvm/kvm_arm64_state_autogen.go
new file mode 100644
index 000000000..90183b764
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_arm64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_test.go b/pkg/sentry/platform/kvm/kvm_arm64_test.go
deleted file mode 100644
index b53e354da..000000000
--- a/pkg/sentry/platform/kvm/kvm_arm64_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package kvm
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/platform/kvm/testutil"
-)
-
-func TestKernelTLS(t *testing.T) {
- bluepillTest(t, func(c *vCPU) {
- if !testutil.TLSWorks() {
- t.Errorf("tls does not work, and it should!")
- }
- })
-}
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_unsafe_state_autogen.go b/pkg/sentry/platform/kvm/kvm_arm64_unsafe_state_autogen.go
new file mode 100644
index 000000000..90183b764
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_arm64_unsafe_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_state_autogen.go b/pkg/sentry/platform/kvm/kvm_state_autogen.go
new file mode 100644
index 000000000..8d85b96d0
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_test.go b/pkg/sentry/platform/kvm/kvm_test.go
deleted file mode 100644
index 3a30286e2..000000000
--- a/pkg/sentry/platform/kvm/kvm_test.go
+++ /dev/null
@@ -1,529 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kvm
-
-import (
- "math/rand"
- "reflect"
- "sync/atomic"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/ring0"
- "gvisor.dev/gvisor/pkg/ring0/pagetables"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/sentry/platform/kvm/testutil"
- ktime "gvisor.dev/gvisor/pkg/sentry/time"
-)
-
-var dummyFPState = fpu.NewState()
-
-type testHarness interface {
- Errorf(format string, args ...interface{})
- Fatalf(format string, args ...interface{})
-}
-
-func kvmTest(t testHarness, setup func(*KVM), fn func(*vCPU) bool) {
- // Create the machine.
- deviceFile, err := OpenDevice()
- if err != nil {
- t.Fatalf("error opening device file: %v", err)
- }
- k, err := New(deviceFile)
- if err != nil {
- t.Fatalf("error creating KVM instance: %v", err)
- }
- defer k.machine.Destroy()
-
- // Call additional setup.
- if setup != nil {
- setup(k)
- }
-
- var c *vCPU // For recovery.
- defer func() {
- redpill()
- if c != nil {
- k.machine.Put(c)
- }
- }()
- for {
- c = k.machine.Get()
- if !fn(c) {
- break
- }
-
- // We put the vCPU here and clear the value so that the
- // deferred recovery will not re-put it above.
- k.machine.Put(c)
- c = nil
- }
-}
-
-func bluepillTest(t testHarness, fn func(*vCPU)) {
- kvmTest(t, nil, func(c *vCPU) bool {
- bluepill(c)
- fn(c)
- return false
- })
-}
-
-func TestKernelSyscall(t *testing.T) {
- bluepillTest(t, func(c *vCPU) {
- redpill() // Leave guest mode.
- if got := atomic.LoadUint32(&c.state); got != vCPUUser {
- t.Errorf("vCPU not in ready state: got %v", got)
- }
- })
-}
-
-func hostFault() {
- defer func() {
- recover()
- }()
- var foo *int
- *foo = 0
-}
-
-func TestKernelFault(t *testing.T) {
- hostFault() // Ensure recovery works.
- bluepillTest(t, func(c *vCPU) {
- hostFault()
- if got := atomic.LoadUint32(&c.state); got != vCPUUser {
- t.Errorf("vCPU not in ready state: got %v", got)
- }
- })
-}
-
-func TestKernelFloatingPoint(t *testing.T) {
- bluepillTest(t, func(c *vCPU) {
- if !testutil.FloatingPointWorks() {
- t.Errorf("floating point does not work, and it should!")
- }
- })
-}
-
-func applicationTest(t testHarness, useHostMappings bool, targetFn uintptr, fn func(*vCPU, *arch.Registers, *pagetables.PageTables) bool) {
- // Initialize registers & page tables.
- var (
- regs arch.Registers
- pt *pagetables.PageTables
- )
- testutil.SetTestTarget(&regs, targetFn)
-
- kvmTest(t, func(k *KVM) {
- // Create new page tables.
- as, _, err := k.NewAddressSpace(nil /* invalidator */)
- if err != nil {
- t.Fatalf("can't create new address space: %v", err)
- }
- pt = as.(*addressSpace).pageTables
-
- if useHostMappings {
- // Apply the physical mappings to these page tables.
- // (This is normally dangerous, since they point to
- // physical pages that may not exist. This shouldn't be
- // done for regular user code, but is fine for test
- // purposes.)
- applyPhysicalRegions(func(pr physicalRegion) bool {
- pt.Map(hostarch.Addr(pr.virtual), pr.length, pagetables.MapOpts{
- AccessType: hostarch.AnyAccess,
- User: true,
- }, pr.physical)
- return true // Keep iterating.
- })
- }
- }, func(c *vCPU) bool {
- // Invoke the function with the extra data.
- return fn(c, &regs, pt)
- })
-}
-
-func TestApplicationSyscall(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if err != nil {
- t.Errorf("application syscall with full restore failed: %v", err)
- }
- return false
- })
- applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if err != nil {
- t.Errorf("application syscall with partial restore failed: %v", err)
- }
- return false
- })
-}
-
-func TestApplicationFault(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTouchTarget(regs, nil) // Cause fault.
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
- t.Errorf("application fault with full restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal)
- }
- return false
- })
- applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTouchTarget(regs, nil) // Cause fault.
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
- t.Errorf("application fault with partial restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal)
- }
- return false
- })
-}
-
-func TestRegistersSyscall(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfTwiddleRegsSyscall(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTestRegs(regs) // Fill values for all registers.
- for {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- continue // Retry.
- } else if err != nil {
- t.Errorf("application register check with partial restore got unexpected error: %v", err)
- }
- if err := testutil.CheckTestRegs(regs, false); err != nil {
- t.Errorf("application register check with partial restore failed: %v", err)
- }
- break // Done.
- }
- return false
- })
-}
-
-func TestRegistersFault(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfTwiddleRegsFault(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTestRegs(regs) // Fill values for all registers.
- for {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err == platform.ErrContextInterrupt {
- continue // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
- t.Errorf("application register check with full restore got unexpected error: %v", err)
- }
- if err := testutil.CheckTestRegs(regs, true); err != nil {
- t.Errorf("application register check with full restore failed: %v", err)
- }
- break // Done.
- }
- return false
- })
-}
-
-func TestBounce(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- go func() {
- time.Sleep(time.Millisecond)
- c.BounceToKernel()
- }()
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err != platform.ErrContextInterrupt {
- t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextInterrupt)
- }
- return false
- })
- applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- go func() {
- time.Sleep(time.Millisecond)
- c.BounceToKernel()
- }()
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err != platform.ErrContextInterrupt {
- t.Errorf("application full restore: got %v, wanted %v", err, platform.ErrContextInterrupt)
- }
- return false
- })
-}
-
-func TestBounceStress(t *testing.T) {
- applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- randomSleep := func() {
- // O(hundreds of microseconds) is appropriate to ensure
- // different overlaps and different schedules.
- if n := rand.Intn(1000); n > 100 {
- time.Sleep(time.Duration(n) * time.Microsecond)
- }
- }
- for i := 0; i < 1000; i++ {
- // Start an asynchronously executing goroutine that
- // calls Bounce at pseudo-random point in time.
- // This should wind up calling Bounce when the
- // kernel is in various stages of the switch.
- go func() {
- randomSleep()
- c.BounceToKernel()
- }()
- randomSleep()
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err != platform.ErrContextInterrupt {
- t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextInterrupt)
- }
- c.unlock()
- randomSleep()
- c.lock()
- }
- return false
- })
-}
-
-func TestInvalidate(t *testing.T) {
- var data uintptr // Used below.
- applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- testutil.SetTouchTarget(regs, &data) // Read legitimate value.
- for {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- continue // Retry.
- } else if err != nil {
- t.Errorf("application partial restore: got %v, wanted nil", err)
- }
- break // Done.
- }
- // Unmap the page containing data & invalidate.
- pt.Unmap(hostarch.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(hostarch.PageSize-1)), hostarch.PageSize)
- for {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- Flush: true,
- }, &si); err == platform.ErrContextInterrupt {
- continue // Retry.
- } else if err != platform.ErrContextSignal {
- t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextSignal)
- }
- break // Success.
- }
- return false
- })
-}
-
-// IsFault returns true iff the given signal represents a fault.
-func IsFault(err error, si *linux.SignalInfo) bool {
- return err == platform.ErrContextSignal && si.Signo == int32(unix.SIGSEGV)
-}
-
-func TestEmptyAddressSpace(t *testing.T) {
- applicationTest(t, false, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if !IsFault(err, &si) {
- t.Errorf("first fault with partial restore failed got %v", err)
- t.Logf("registers: %#v", &regs)
- }
- return false
- })
- applicationTest(t, false, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- FullRestore: true,
- }, &si); err == platform.ErrContextInterrupt {
- return true // Retry.
- } else if !IsFault(err, &si) {
- t.Errorf("first fault with full restore failed got %v", err)
- t.Logf("registers: %#v", &regs)
- }
- return false
- })
-}
-
-func TestWrongVCPU(t *testing.T) {
- kvmTest(t, nil, func(c1 *vCPU) bool {
- kvmTest(t, nil, func(c2 *vCPU) bool {
- // Basic test, one then the other.
- bluepill(c1)
- bluepill(c2)
- if c1.guestExits == 0 {
- // Check: vCPU1 will exit due to redpill() in bluepill(c2).
- // Don't allow the test to proceed if this fails.
- t.Fatalf("wrong vCPU#1 exits: vCPU1=%+v,vCPU2=%+v", c1, c2)
- }
-
- // Alternate vCPUs; we expect to need to trigger the
- // wrong vCPU path on each switch.
- for i := 0; i < 100; i++ {
- bluepill(c1)
- bluepill(c2)
- }
- if count := c1.guestExits; count < 90 {
- t.Errorf("wrong vCPU#1 exits: vCPU1=%+v,vCPU2=%+v", c1, c2)
- }
- if count := c2.guestExits; count < 90 {
- t.Errorf("wrong vCPU#2 exits: vCPU1=%+v,vCPU2=%+v", c1, c2)
- }
- return false
- })
- return false
- })
- kvmTest(t, nil, func(c1 *vCPU) bool {
- kvmTest(t, nil, func(c2 *vCPU) bool {
- bluepill(c1)
- bluepill(c2)
- return false
- })
- return false
- })
-}
-
-func TestRdtsc(t *testing.T) {
- var i int // Iteration count.
- kvmTest(t, nil, func(c *vCPU) bool {
- start := ktime.Rdtsc()
- bluepill(c)
- guest := ktime.Rdtsc()
- redpill()
- end := ktime.Rdtsc()
- if start > guest || guest > end {
- t.Errorf("inconsistent time: start=%d, guest=%d, end=%d", start, guest, end)
- }
- i++
- return i < 100
- })
-}
-
-func BenchmarkApplicationSyscall(b *testing.B) {
- var (
- i int // Iteration includes machine.Get() / machine.Put().
- a int // Count for ErrContextInterrupt.
- )
- applicationTest(b, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- a++
- return true // Ignore.
- } else if err != nil {
- b.Fatalf("benchmark failed: %v", err)
- }
- i++
- return i < b.N
- })
- if a != 0 {
- b.Logf("ErrContextInterrupt occurred %d times (in %d iterations).", a, a+i)
- }
-}
-
-func BenchmarkKernelSyscall(b *testing.B) {
- // Note that the target passed here is irrelevant, we never execute SwitchToUser.
- applicationTest(b, true, testutil.AddrOfGetpid(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- // iteration does not include machine.Get() / machine.Put().
- for i := 0; i < b.N; i++ {
- testutil.Getpid()
- }
- return false
- })
-}
-
-func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) {
- // see BenchmarkApplicationSyscall.
- var (
- i int
- a int
- )
- applicationTest(b, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
- var si linux.SignalInfo
- if _, err := c.SwitchToUser(ring0.SwitchOpts{
- Registers: regs,
- FloatingPointState: &dummyFPState,
- PageTables: pt,
- }, &si); err == platform.ErrContextInterrupt {
- a++
- return true // Ignore.
- } else if err != nil {
- b.Fatalf("benchmark failed: %v", err)
- }
- // This will intentionally cause the world switch. By executing
- // a host syscall here, we force the transition between guest
- // and host mode.
- testutil.Getpid()
- i++
- return i < b.N
- })
- if a != 0 {
- b.Logf("ErrContextInterrupt occurred %d times (in %d iterations).", a, a+i)
- }
-}
diff --git a/pkg/sentry/platform/kvm/kvm_unsafe_state_autogen.go b/pkg/sentry/platform/kvm/kvm_unsafe_state_autogen.go
new file mode 100644
index 000000000..f1270c722
--- /dev/null
+++ b/pkg/sentry/platform/kvm/kvm_unsafe_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build go1.12,!go1.18
+// +build go1.12,!go1.18
+
+package kvm
diff --git a/pkg/sentry/platform/kvm/testutil/BUILD b/pkg/sentry/platform/kvm/testutil/BUILD
deleted file mode 100644
index f7feb8683..000000000
--- a/pkg/sentry/platform/kvm/testutil/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "testutil",
- testonly = 1,
- srcs = [
- "testutil.go",
- "testutil_amd64.go",
- "testutil_amd64.s",
- "testutil_arm64.go",
- "testutil_arm64.s",
- ],
- visibility = ["//pkg/sentry/platform/kvm:__pkg__"],
- deps = ["//pkg/sentry/arch"],
-)
diff --git a/pkg/sentry/platform/kvm/testutil/testutil.go b/pkg/sentry/platform/kvm/testutil/testutil.go
deleted file mode 100644
index d8c273796..000000000
--- a/pkg/sentry/platform/kvm/testutil/testutil.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides common assembly stubs for testing.
-package testutil
-
-import (
- "fmt"
- "strings"
-)
-
-// Getpid executes a trivial system call.
-func Getpid()
-
-// AddrOfGetpid returns the address of Getpid.
-//
-// In Go 1.17+, Go references to assembly functions resolve to an ABIInternal
-// wrapper function rather than the function itself. We must reference from
-// assembly to get the ABI0 (i.e., primary) address.
-func AddrOfGetpid() uintptr
-
-// AddrOfTouch returns the address of a function that touches the value in the
-// first register.
-func AddrOfTouch() uintptr
-func touch()
-
-// AddrOfSyscallLoop returns the address of a function that executes a syscall
-// and loops.
-func AddrOfSyscallLoop() uintptr
-func syscallLoop()
-
-// AddrOfSpinLoop returns the address of a function that spins on the CPU.
-func AddrOfSpinLoop() uintptr
-func spinLoop()
-
-// AddrOfHaltLoop returns the address of a function that immediately halts and
-// loops.
-func AddrOfHaltLoop() uintptr
-func haltLoop()
-
-// AddrOfTwiddleRegsFault returns the address of a function that twiddles
-// registers then faults.
-func AddrOfTwiddleRegsFault() uintptr
-func twiddleRegsFault()
-
-// AddrOfTwiddleRegsSyscall returns the address of a function that twiddles
-// registers then executes a syscall.
-func AddrOfTwiddleRegsSyscall() uintptr
-func twiddleRegsSyscall()
-
-// FloatingPointWorks is a floating point test.
-//
-// It returns true or false.
-func FloatingPointWorks() bool
-
-// RegisterMismatchError is used for checking registers.
-type RegisterMismatchError []string
-
-// Error returns a human-readable error.
-func (r RegisterMismatchError) Error() string {
- return strings.Join([]string(r), ";")
-}
-
-// addRegisterMisatch allows simple chaining of register mismatches.
-func addRegisterMismatch(err error, reg string, got, expected interface{}) error {
- errStr := fmt.Sprintf("%s got %08x, expected %08x", reg, got, expected)
- switch r := err.(type) {
- case nil:
- // Return a new register mismatch.
- return RegisterMismatchError{errStr}
- case RegisterMismatchError:
- // Append the error.
- r = append(r, errStr)
- return r
- default:
- // Leave as is.
- return err
- }
-}
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.go b/pkg/sentry/platform/kvm/testutil/testutil_amd64.go
deleted file mode 100644
index 98c52b2f5..000000000
--- a/pkg/sentry/platform/kvm/testutil/testutil_amd64.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package testutil
-
-import (
- "reflect"
-
- "gvisor.dev/gvisor/pkg/sentry/arch"
-)
-
-// AddrOfTwiddleSegments return the address of a function that reads segments
-// into known registers.
-func AddrOfTwiddleSegments() uintptr
-func twiddleSegments()
-
-// SetTestTarget sets the rip appropriately.
-func SetTestTarget(regs *arch.Registers, fn uintptr) {
- regs.Rip = uint64(fn)
-}
-
-// SetTouchTarget sets rax appropriately.
-func SetTouchTarget(regs *arch.Registers, target *uintptr) {
- if target != nil {
- regs.Rax = uint64(reflect.ValueOf(target).Pointer())
- } else {
- regs.Rax = 0
- }
-}
-
-// RewindSyscall rewinds a syscall RIP.
-func RewindSyscall(regs *arch.Registers) {
- regs.Rip -= 2
-}
-
-// SetTestRegs initializes registers to known values.
-func SetTestRegs(regs *arch.Registers) {
- regs.R15 = 0x15
- regs.R14 = 0x14
- regs.R13 = 0x13
- regs.R12 = 0x12
- regs.Rbp = 0xb9
- regs.Rbx = 0xb4
- regs.R11 = 0x11
- regs.R10 = 0x10
- regs.R9 = 0x09
- regs.R8 = 0x08
- regs.Rax = 0x44
- regs.Rcx = 0xc4
- regs.Rdx = 0xd4
- regs.Rsi = 0x51
- regs.Rdi = 0xd1
- regs.Rsp = 0x59
-}
-
-// CheckTestRegs checks that registers were twiddled per TwiddleRegs.
-func CheckTestRegs(regs *arch.Registers, full bool) (err error) {
- if need := ^uint64(0x15); regs.R15 != need {
- err = addRegisterMismatch(err, "R15", regs.R15, need)
- }
- if need := ^uint64(0x14); regs.R14 != need {
- err = addRegisterMismatch(err, "R14", regs.R14, need)
- }
- if need := ^uint64(0x13); regs.R13 != need {
- err = addRegisterMismatch(err, "R13", regs.R13, need)
- }
- if need := ^uint64(0x12); regs.R12 != need {
- err = addRegisterMismatch(err, "R12", regs.R12, need)
- }
- if need := ^uint64(0xb9); regs.Rbp != need {
- err = addRegisterMismatch(err, "Rbp", regs.Rbp, need)
- }
- if need := ^uint64(0xb4); regs.Rbx != need {
- err = addRegisterMismatch(err, "Rbx", regs.Rbx, need)
- }
- if need := ^uint64(0x10); regs.R10 != need {
- err = addRegisterMismatch(err, "R10", regs.R10, need)
- }
- if need := ^uint64(0x09); regs.R9 != need {
- err = addRegisterMismatch(err, "R9", regs.R9, need)
- }
- if need := ^uint64(0x08); regs.R8 != need {
- err = addRegisterMismatch(err, "R8", regs.R8, need)
- }
- if need := ^uint64(0x44); regs.Rax != need {
- err = addRegisterMismatch(err, "Rax", regs.Rax, need)
- }
- if need := ^uint64(0xd4); regs.Rdx != need {
- err = addRegisterMismatch(err, "Rdx", regs.Rdx, need)
- }
- if need := ^uint64(0x51); regs.Rsi != need {
- err = addRegisterMismatch(err, "Rsi", regs.Rsi, need)
- }
- if need := ^uint64(0xd1); regs.Rdi != need {
- err = addRegisterMismatch(err, "Rdi", regs.Rdi, need)
- }
- if need := ^uint64(0x59); regs.Rsp != need {
- err = addRegisterMismatch(err, "Rsp", regs.Rsp, need)
- }
- // Rcx & R11 are ignored if !full is set.
- if need := ^uint64(0x11); full && regs.R11 != need {
- err = addRegisterMismatch(err, "R11", regs.R11, need)
- }
- if need := ^uint64(0xc4); full && regs.Rcx != need {
- err = addRegisterMismatch(err, "Rcx", regs.Rcx, need)
- }
- return
-}
-
-var fsData uint64 = 0x55
-var gsData uint64 = 0x85
-
-// SetTestSegments initializes segments to known values.
-func SetTestSegments(regs *arch.Registers) {
- regs.Fs_base = uint64(reflect.ValueOf(&fsData).Pointer())
- regs.Gs_base = uint64(reflect.ValueOf(&gsData).Pointer())
-}
-
-// CheckTestSegments checks that registers were twiddled per TwiddleSegments.
-func CheckTestSegments(regs *arch.Registers) (err error) {
- if regs.Rax != fsData {
- err = addRegisterMismatch(err, "Rax", regs.Rax, fsData)
- }
- if regs.Rbx != gsData {
- err = addRegisterMismatch(err, "Rbx", regs.Rcx, gsData)
- }
- return
-}
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.s b/pkg/sentry/platform/kvm/testutil/testutil_amd64.s
deleted file mode 100644
index 65e7c05ea..000000000
--- a/pkg/sentry/platform/kvm/testutil/testutil_amd64.s
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build amd64
-
-// test_util_amd64.s provides AMD64 test functions.
-
-#include "funcdata.h"
-#include "textflag.h"
-
-TEXT ·Getpid(SB),NOSPLIT,$0
- NO_LOCAL_POINTERS
- MOVQ $39, AX // getpid
- SYSCALL
- RET
-
-// func AddrOfGetpid() uintptr
-TEXT ·AddrOfGetpid(SB), $0-8
- MOVQ $·Getpid(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·touch(SB),NOSPLIT,$0
-start:
- MOVQ 0(AX), BX // deref AX
- MOVQ $39, AX // getpid
- SYSCALL
- JMP start
-
-// func AddrOfTouch() uintptr
-TEXT ·AddrOfTouch(SB), $0-8
- MOVQ $·touch(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·syscallLoop(SB),NOSPLIT,$0
-start:
- SYSCALL
- JMP start
-
-// func AddrOfSyscallLoop() uintptr
-TEXT ·AddrOfSyscallLoop(SB), $0-8
- MOVQ $·syscallLoop(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·spinLoop(SB),NOSPLIT,$0
-start:
- JMP start
-
-// func AddrOfSpinLoop() uintptr
-TEXT ·AddrOfSpinLoop(SB), $0-8
- MOVQ $·spinLoop(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·FloatingPointWorks(SB),NOSPLIT,$0-8
- NO_LOCAL_POINTERS
- MOVQ $1, AX
- MOVQ AX, X0
- MOVQ $39, AX // getpid
- SYSCALL
- MOVQ X0, AX
- CMPQ AX, $1
- SETEQ ret+0(FP)
- RET
-
-#define TWIDDLE_REGS() \
- NOTQ R15; \
- NOTQ R14; \
- NOTQ R13; \
- NOTQ R12; \
- NOTQ BP; \
- NOTQ BX; \
- NOTQ R11; \
- NOTQ R10; \
- NOTQ R9; \
- NOTQ R8; \
- NOTQ AX; \
- NOTQ CX; \
- NOTQ DX; \
- NOTQ SI; \
- NOTQ DI; \
- NOTQ SP;
-
-TEXT ·twiddleRegsSyscall(SB),NOSPLIT,$0
- TWIDDLE_REGS()
- SYSCALL
- RET // never reached
-
-// func AddrOfTwiddleRegsSyscall() uintptr
-TEXT ·AddrOfTwiddleRegsSyscall(SB), $0-8
- MOVQ $·twiddleRegsSyscall(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·twiddleRegsFault(SB),NOSPLIT,$0
- TWIDDLE_REGS()
- JMP AX // must fault
- RET // never reached
-
-// func AddrOfTwiddleRegsFault() uintptr
-TEXT ·AddrOfTwiddleRegsFault(SB), $0-8
- MOVQ $·twiddleRegsFault(SB), AX
- MOVQ AX, ret+0(FP)
- RET
-
-#define READ_FS() BYTE $0x64; BYTE $0x48; BYTE $0x8b; BYTE $0x00;
-#define READ_GS() BYTE $0x65; BYTE $0x48; BYTE $0x8b; BYTE $0x00;
-
-TEXT ·twiddleSegments(SB),NOSPLIT,$0
- MOVQ $0x0, AX
- READ_GS()
- MOVQ AX, BX
- MOVQ $0x0, AX
- READ_FS()
- SYSCALL
- RET // never reached
-
-// func AddrOfTwiddleSegments() uintptr
-TEXT ·AddrOfTwiddleSegments(SB), $0-8
- MOVQ $·twiddleSegments(SB), AX
- MOVQ AX, ret+0(FP)
- RET
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_arm64.go b/pkg/sentry/platform/kvm/testutil/testutil_arm64.go
deleted file mode 100644
index 6d0ba8252..000000000
--- a/pkg/sentry/platform/kvm/testutil/testutil_arm64.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build arm64
-// +build arm64
-
-package testutil
-
-import (
- "fmt"
- "reflect"
-
- "gvisor.dev/gvisor/pkg/sentry/arch"
-)
-
-// TLSWorks is a tls test.
-//
-// It returns true or false.
-func TLSWorks() bool
-
-// SetTestTarget sets the rip appropriately.
-func SetTestTarget(regs *arch.Registers, fn func()) {
- regs.Pc = uint64(reflect.ValueOf(fn).Pointer())
-}
-
-// SetTouchTarget sets rax appropriately.
-func SetTouchTarget(regs *arch.Registers, target *uintptr) {
- if target != nil {
- regs.Regs[8] = uint64(reflect.ValueOf(target).Pointer())
- } else {
- regs.Regs[8] = 0
- }
-}
-
-// RewindSyscall rewinds a syscall RIP.
-func RewindSyscall(regs *arch.Registers) {
- regs.Pc -= 4
-}
-
-// SetTestRegs initializes registers to known values.
-func SetTestRegs(regs *arch.Registers) {
- for i := 0; i <= 30; i++ {
- regs.Regs[i] = uint64(i) + 1
- }
-}
-
-// CheckTestRegs checks that registers were twiddled per TwiddleRegs.
-func CheckTestRegs(regs *arch.Registers, full bool) (err error) {
- for i := 0; i <= 30; i++ {
- if need := ^uint64(i + 1); regs.Regs[i] != need {
- err = addRegisterMismatch(err, fmt.Sprintf("R%d", i), regs.Regs[i], need)
- }
- }
- // Check tls.
- if need := ^uint64(11); regs.TPIDR_EL0 != need {
- err = addRegisterMismatch(err, "tpdir_el0", regs.TPIDR_EL0, need)
- }
- return
-}
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_arm64.s b/pkg/sentry/platform/kvm/testutil/testutil_arm64.s
deleted file mode 100644
index 7348c29a5..000000000
--- a/pkg/sentry/platform/kvm/testutil/testutil_arm64.s
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build arm64
-
-// test_util_arm64.s provides ARM64 test functions.
-
-#include "funcdata.h"
-#include "textflag.h"
-
-#define SYS_GETPID 172
-
-// This function simulates the getpid syscall.
-TEXT ·Getpid(SB),NOSPLIT,$0
- NO_LOCAL_POINTERS
- MOVD $SYS_GETPID, R8
- SVC
- RET
-
-TEXT ·Touch(SB),NOSPLIT,$0
-start:
- MOVD 0(R8), R1
- MOVD $SYS_GETPID, R8 // getpid
- SVC
- B start
-
-TEXT ·HaltLoop(SB),NOSPLIT,$0
-start:
- HLT
- B start
-
-// This function simulates a loop of syscall.
-TEXT ·SyscallLoop(SB),NOSPLIT,$0
-start:
- SVC
- B start
-
-TEXT ·SpinLoop(SB),NOSPLIT,$0
-start:
- B start
-
-TEXT ·TLSWorks(SB),NOSPLIT,$0-8
- NO_LOCAL_POINTERS
- MOVD $0x6789, R5
- MSR R5, TPIDR_EL0
- MOVD $SYS_GETPID, R8 // getpid
- SVC
- MRS TPIDR_EL0, R6
- CMP R5, R6
- BNE isNaN
- MOVD $1, R0
- MOVD R0, ret+0(FP)
- RET
-isNaN:
- MOVD $0, ret+0(FP)
- RET
-
-TEXT ·FloatingPointWorks(SB),NOSPLIT,$0-8
- NO_LOCAL_POINTERS
- // gc will touch fpsimd, so we should test it.
- // such as in <runtime.deductSweepCredit>.
- FMOVD $(9.9), F0
- MOVD $SYS_GETPID, R8 // getpid
- SVC
- FMOVD $(9.9), F1
- FCMPD F0, F1
- BNE isNaN
- MOVD $1, R0
- MOVD R0, ret+0(FP)
- RET
-isNaN:
- MOVD $0, ret+0(FP)
- RET
-
-// MVN: bitwise logical NOT
-// This case simulates an application that modified R0-R30.
-#define TWIDDLE_REGS() \
- MVN R0, R0; \
- MVN R1, R1; \
- MVN R2, R2; \
- MVN R3, R3; \
- MVN R4, R4; \
- MVN R5, R5; \
- MVN R6, R6; \
- MVN R7, R7; \
- MVN R8, R8; \
- MVN R9, R9; \
- MVN R10, R10; \
- MVN R11, R11; \
- MVN R12, R12; \
- MVN R13, R13; \
- MVN R14, R14; \
- MVN R15, R15; \
- MVN R16, R16; \
- MVN R17, R17; \
- MVN R18_PLATFORM, R18_PLATFORM; \
- MVN R19, R19; \
- MVN R20, R20; \
- MVN R21, R21; \
- MVN R22, R22; \
- MVN R23, R23; \
- MVN R24, R24; \
- MVN R25, R25; \
- MVN R26, R26; \
- MVN R27, R27; \
- MVN g, g; \
- MVN R29, R29; \
- MVN R30, R30;
-
-TEXT ·TwiddleRegsSyscall(SB),NOSPLIT,$0
- TWIDDLE_REGS()
- MSR R10, TPIDR_EL0
- // Trapped in el0_svc.
- SVC
- RET // never reached
-
-TEXT ·TwiddleRegsFault(SB),NOSPLIT,$0
- TWIDDLE_REGS()
- MSR R10, TPIDR_EL0
- // Trapped in el0_ia.
- // Branch to Register branches unconditionally to an address in <Rn>.
- JMP (R6) // <=> br x6, must fault
- RET // never reached
diff --git a/pkg/sentry/platform/kvm/virtual_map_test.go b/pkg/sentry/platform/kvm/virtual_map_test.go
deleted file mode 100644
index 1f4a774f3..000000000
--- a/pkg/sentry/platform/kvm/virtual_map_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kvm
-
-import (
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-type checker struct {
- ok bool
- accessType hostarch.AccessType
-}
-
-func (c *checker) Containing(addr uintptr) func(virtualRegion) {
- c.ok = false // Reset for below calls.
- return func(vr virtualRegion) {
- if vr.virtual <= addr && addr < vr.virtual+vr.length {
- c.ok = true
- c.accessType = vr.accessType
- }
- }
-}
-
-func TestParseMaps(t *testing.T) {
- c := new(checker)
-
- // Simple test.
- if err := applyVirtualRegions(c.Containing(0)); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- // MMap a new page.
- addr, _, errno := unix.RawSyscall6(
- unix.SYS_MMAP, 0, hostarch.PageSize,
- unix.PROT_READ|unix.PROT_WRITE,
- unix.MAP_ANONYMOUS|unix.MAP_PRIVATE, 0, 0)
- if errno != 0 {
- t.Fatalf("unexpected map error: %v", errno)
- }
-
- // Re-parse maps.
- if err := applyVirtualRegions(c.Containing(addr)); err != nil {
- unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0)
- t.Fatalf("unexpected error: %v", err)
- }
-
- // Assert that it now does contain the region.
- if !c.ok {
- unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0)
- t.Fatalf("updated map does not contain 0x%08x, expected true", addr)
- }
-
- // Map the region as PROT_NONE.
- newAddr, _, errno := unix.RawSyscall6(
- unix.SYS_MMAP, addr, hostarch.PageSize,
- unix.PROT_NONE,
- unix.MAP_ANONYMOUS|unix.MAP_FIXED|unix.MAP_PRIVATE, 0, 0)
- if errno != 0 {
- t.Fatalf("unexpected map error: %v", errno)
- }
- if newAddr != addr {
- t.Fatalf("unable to remap address: got 0x%08x, wanted 0x%08x", newAddr, addr)
- }
-
- // Re-parse maps.
- if err := applyVirtualRegions(c.Containing(addr)); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if !c.ok {
- t.Fatalf("final map does not contain 0x%08x, expected true", addr)
- }
- if c.accessType.Read || c.accessType.Write || c.accessType.Execute {
- t.Fatalf("final map has incorrect permissions for 0x%08x", addr)
- }
-
- // Unmap the region.
- unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0)
-}
diff --git a/pkg/sentry/platform/platform_state_autogen.go b/pkg/sentry/platform/platform_state_autogen.go
new file mode 100644
index 000000000..7a4d04589
--- /dev/null
+++ b/pkg/sentry/platform/platform_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package platform
diff --git a/pkg/sentry/platform/ptrace/BUILD b/pkg/sentry/platform/ptrace/BUILD
deleted file mode 100644
index d101f2f53..000000000
--- a/pkg/sentry/platform/ptrace/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ptrace",
- srcs = [
- "filters.go",
- "ptrace.go",
- "ptrace_amd64.go",
- "ptrace_arm64.go",
- "ptrace_arm64_unsafe.go",
- "ptrace_unsafe.go",
- "stub_amd64.s",
- "stub_arm64.s",
- "stub_unsafe.go",
- "subprocess.go",
- "subprocess_amd64.go",
- "subprocess_arm64.go",
- "subprocess_linux.go",
- "subprocess_linux_unsafe.go",
- "subprocess_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/procid",
- "//pkg/safecopy",
- "//pkg/seccomp",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch/fpu",
- "//pkg/sentry/memmap",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/interrupt",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/platform/ptrace/ptrace_amd64_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_amd64_state_autogen.go
new file mode 100644
index 000000000..f730ab393
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_arm64_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_arm64_state_autogen.go
new file mode 100644
index 000000000..6239d1305
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe_state_autogen.go
new file mode 100644
index 000000000..6239d1305
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_linux_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_linux_state_autogen.go
new file mode 100644
index 000000000..9f90aef93
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_linux_unsafe_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_linux_unsafe_state_autogen.go
new file mode 100644
index 000000000..45d94c547
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_linux_unsafe_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build linux
+// +build amd64 arm64
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_state_autogen.go
new file mode 100644
index 000000000..1bf0526f9
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package ptrace
diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe_state_autogen.go b/pkg/sentry/platform/ptrace/ptrace_unsafe_state_autogen.go
new file mode 100644
index 000000000..ab4ad223e
--- /dev/null
+++ b/pkg/sentry/platform/ptrace/ptrace_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.12,!go1.18
+
+package ptrace
diff --git a/pkg/sentry/sighandling/BUILD b/pkg/sentry/sighandling/BUILD
deleted file mode 100644
index 1790d57c9..000000000
--- a/pkg/sentry/sighandling/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sighandling",
- srcs = [
- "sighandling.go",
- "sighandling_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/sighandling/sighandling_state_autogen.go b/pkg/sentry/sighandling/sighandling_state_autogen.go
new file mode 100644
index 000000000..da9d96382
--- /dev/null
+++ b/pkg/sentry/sighandling/sighandling_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sighandling
diff --git a/pkg/sentry/sighandling/sighandling_unsafe_state_autogen.go b/pkg/sentry/sighandling/sighandling_unsafe_state_autogen.go
new file mode 100644
index 000000000..da9d96382
--- /dev/null
+++ b/pkg/sentry/sighandling/sighandling_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sighandling
diff --git a/pkg/sentry/socket/BUILD b/pkg/sentry/socket/BUILD
deleted file mode 100644
index 7ee89a735..000000000
--- a/pkg/sentry/socket/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "socket",
- srcs = ["socket.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/syserr",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/usermem",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/control/BUILD b/pkg/sentry/socket/control/BUILD
deleted file mode 100644
index b2fc84181..000000000
--- a/pkg/sentry/socket/control/BUILD
+++ /dev/null
@@ -1,44 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "control",
- srcs = [
- "control.go",
- "control_vfs2.go",
- ],
- imports = [
- "gvisor.dev/gvisor/pkg/sentry/fs",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- ],
-)
-
-go_test(
- name = "control_test",
- size = "small",
- srcs = ["control_test.go"],
- library = ":control",
- deps = [
- "//pkg/abi/linux",
- "//pkg/binary",
- "//pkg/hostarch",
- "//pkg/sentry/socket",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/control/control_state_autogen.go b/pkg/sentry/socket/control/control_state_autogen.go
new file mode 100644
index 000000000..412025601
--- /dev/null
+++ b/pkg/sentry/socket/control/control_state_autogen.go
@@ -0,0 +1,60 @@
+// automatically generated by stateify.
+
+package control
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fs *RightsFiles) StateTypeName() string {
+ return "pkg/sentry/socket/control.RightsFiles"
+}
+
+func (fs *RightsFiles) StateFields() []string {
+ return nil
+}
+
+func (c *scmCredentials) StateTypeName() string {
+ return "pkg/sentry/socket/control.scmCredentials"
+}
+
+func (c *scmCredentials) StateFields() []string {
+ return []string{
+ "t",
+ "kuid",
+ "kgid",
+ }
+}
+
+func (c *scmCredentials) beforeSave() {}
+
+// +checklocksignore
+func (c *scmCredentials) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.t)
+ stateSinkObject.Save(1, &c.kuid)
+ stateSinkObject.Save(2, &c.kgid)
+}
+
+func (c *scmCredentials) afterLoad() {}
+
+// +checklocksignore
+func (c *scmCredentials) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.t)
+ stateSourceObject.Load(1, &c.kuid)
+ stateSourceObject.Load(2, &c.kgid)
+}
+
+func (fs *RightsFilesVFS2) StateTypeName() string {
+ return "pkg/sentry/socket/control.RightsFilesVFS2"
+}
+
+func (fs *RightsFilesVFS2) StateFields() []string {
+ return nil
+}
+
+func init() {
+ state.Register((*RightsFiles)(nil))
+ state.Register((*scmCredentials)(nil))
+ state.Register((*RightsFilesVFS2)(nil))
+}
diff --git a/pkg/sentry/socket/control/control_test.go b/pkg/sentry/socket/control/control_test.go
deleted file mode 100644
index 7e28a0cef..000000000
--- a/pkg/sentry/socket/control/control_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package control provides internal representations of socket control
-// messages.
-package control
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/sentry/socket"
-)
-
-func TestParse(t *testing.T) {
- // Craft the control message to parse.
- length := linux.SizeOfControlMessageHeader + linux.SizeOfTimeval
- hdr := linux.ControlMessageHeader{
- Length: uint64(length),
- Level: linux.SOL_SOCKET,
- Type: linux.SO_TIMESTAMP,
- }
- buf := make([]byte, 0, length)
- buf = binary.Marshal(buf, hostarch.ByteOrder, &hdr)
- ts := linux.Timeval{
- Sec: 2401,
- Usec: 343,
- }
- buf = binary.Marshal(buf, hostarch.ByteOrder, &ts)
-
- cmsg, err := Parse(nil, nil, buf, 8 /* width */)
- if err != nil {
- t.Fatalf("Parse(_, _, %+v, _): %v", cmsg, err)
- }
-
- want := socket.ControlMessages{
- IP: socket.IPControlMessages{
- HasTimestamp: true,
- Timestamp: ts.ToNsecCapped(),
- },
- }
- if diff := cmp.Diff(want, cmsg); diff != "" {
- t.Errorf("unexpected message parsed, (-want, +got):\n%s", diff)
- }
-}
diff --git a/pkg/sentry/socket/hostinet/BUILD b/pkg/sentry/socket/hostinet/BUILD
deleted file mode 100644
index 3950caa0f..000000000
--- a/pkg/sentry/socket/hostinet/BUILD
+++ /dev/null
@@ -1,48 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "hostinet",
- srcs = [
- "device.go",
- "hostinet.go",
- "save_restore.go",
- "socket.go",
- "socket_unsafe.go",
- "socket_vfs2.go",
- "sockopt_impl.go",
- "stack.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fdnotifier",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fsimpl/sockfs",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/vfs",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/hostinet/hostinet_impl_state_autogen.go b/pkg/sentry/socket/hostinet/hostinet_impl_state_autogen.go
new file mode 100644
index 000000000..7dd0e5006
--- /dev/null
+++ b/pkg/sentry/socket/hostinet/hostinet_impl_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package hostinet
diff --git a/pkg/sentry/socket/hostinet/hostinet_state_autogen.go b/pkg/sentry/socket/hostinet/hostinet_state_autogen.go
new file mode 100644
index 000000000..519c65339
--- /dev/null
+++ b/pkg/sentry/socket/hostinet/hostinet_state_autogen.go
@@ -0,0 +1,89 @@
+// automatically generated by stateify.
+
+package hostinet
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *socketOpsCommon) StateTypeName() string {
+ return "pkg/sentry/socket/hostinet.socketOpsCommon"
+}
+
+func (s *socketOpsCommon) StateFields() []string {
+ return []string{
+ "SendReceiveTimeout",
+ "family",
+ "stype",
+ "protocol",
+ "queue",
+ "fd",
+ }
+}
+
+func (s *socketOpsCommon) beforeSave() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SendReceiveTimeout)
+ stateSinkObject.Save(1, &s.family)
+ stateSinkObject.Save(2, &s.stype)
+ stateSinkObject.Save(3, &s.protocol)
+ stateSinkObject.Save(4, &s.queue)
+ stateSinkObject.Save(5, &s.fd)
+}
+
+func (s *socketOpsCommon) afterLoad() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SendReceiveTimeout)
+ stateSourceObject.Load(1, &s.family)
+ stateSourceObject.Load(2, &s.stype)
+ stateSourceObject.Load(3, &s.protocol)
+ stateSourceObject.Load(4, &s.queue)
+ stateSourceObject.Load(5, &s.fd)
+}
+
+func (s *socketVFS2) StateTypeName() string {
+ return "pkg/sentry/socket/hostinet.socketVFS2"
+}
+
+func (s *socketVFS2) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "DentryMetadataFileDescriptionImpl",
+ "socketOpsCommon",
+ }
+}
+
+func (s *socketVFS2) beforeSave() {}
+
+// +checklocksignore
+func (s *socketVFS2) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.vfsfd)
+ stateSinkObject.Save(1, &s.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &s.LockFD)
+ stateSinkObject.Save(3, &s.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(4, &s.socketOpsCommon)
+}
+
+func (s *socketVFS2) afterLoad() {}
+
+// +checklocksignore
+func (s *socketVFS2) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.vfsfd)
+ stateSourceObject.Load(1, &s.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &s.LockFD)
+ stateSourceObject.Load(3, &s.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(4, &s.socketOpsCommon)
+}
+
+func init() {
+ state.Register((*socketOpsCommon)(nil))
+ state.Register((*socketVFS2)(nil))
+}
diff --git a/pkg/sentry/socket/hostinet/hostinet_unsafe_state_autogen.go b/pkg/sentry/socket/hostinet/hostinet_unsafe_state_autogen.go
new file mode 100644
index 000000000..b0a59ba93
--- /dev/null
+++ b/pkg/sentry/socket/hostinet/hostinet_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostinet
diff --git a/pkg/sentry/socket/netfilter/BUILD b/pkg/sentry/socket/netfilter/BUILD
deleted file mode 100644
index 608474fa1..000000000
--- a/pkg/sentry/socket/netfilter/BUILD
+++ /dev/null
@@ -1,34 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "netfilter",
- srcs = [
- "extensions.go",
- "ipv4.go",
- "ipv6.go",
- "netfilter.go",
- "owner_matcher.go",
- "targets.go",
- "tcp_matcher.go",
- "udp_matcher.go",
- ],
- marshal = True,
- # This target depends on netstack and should only be used by epsocket,
- # which is allowed to depend on netstack.
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/syserr",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go b/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go
new file mode 100644
index 000000000..ea4abc34f
--- /dev/null
+++ b/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go
@@ -0,0 +1,156 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package netfilter
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*linux.NFNATRange)(nil)
+var _ marshal.Marshallable = (*linux.XTEntryTarget)(nil)
+var _ marshal.Marshallable = (*nfNATTarget)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (n *nfNATTarget) SizeBytes() int {
+ return 0 +
+ (*linux.XTEntryTarget)(nil).SizeBytes() +
+ (*linux.NFNATRange)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (n *nfNATTarget) MarshalBytes(dst []byte) {
+ n.Target.MarshalBytes(dst[:n.Target.SizeBytes()])
+ dst = dst[n.Target.SizeBytes():]
+ n.Range.MarshalBytes(dst[:n.Range.SizeBytes()])
+ dst = dst[n.Range.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (n *nfNATTarget) UnmarshalBytes(src []byte) {
+ n.Target.UnmarshalBytes(src[:n.Target.SizeBytes()])
+ src = src[n.Target.SizeBytes():]
+ n.Range.UnmarshalBytes(src[:n.Range.SizeBytes()])
+ src = src[n.Range.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (n *nfNATTarget) Packed() bool {
+ return n.Range.Packed() && n.Target.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (n *nfNATTarget) MarshalUnsafe(dst []byte) {
+ if n.Range.Packed() && n.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes()))
+ } else {
+ // Type nfNATTarget doesn't have a packed layout in memory, fallback to MarshalBytes.
+ n.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (n *nfNATTarget) UnmarshalUnsafe(src []byte) {
+ if n.Range.Packed() && n.Target.Packed() {
+ gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes()))
+ } else {
+ // Type nfNATTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ n.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (n *nfNATTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !n.Range.Packed() && n.Target.Packed() {
+ // Type nfNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ n.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (n *nfNATTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return n.CopyOutN(cc, addr, n.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (n *nfNATTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !n.Range.Packed() && n.Target.Packed() {
+ // Type nfNATTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ n.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (n *nfNATTarget) WriteTo(writer io.Writer) (int64, error) {
+ if !n.Range.Packed() && n.Target.Packed() {
+ // Type nfNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, n.SizeBytes())
+ n.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n)))
+ hdr.Len = n.SizeBytes()
+ hdr.Cap = n.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that n
+ // must live until the use above.
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/socket/netfilter/netfilter_state_autogen.go b/pkg/sentry/socket/netfilter/netfilter_state_autogen.go
new file mode 100644
index 000000000..6e95d89a4
--- /dev/null
+++ b/pkg/sentry/socket/netfilter/netfilter_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package netfilter
diff --git a/pkg/sentry/socket/netlink/BUILD b/pkg/sentry/socket/netlink/BUILD
deleted file mode 100644
index ed85404da..000000000
--- a/pkg/sentry/socket/netlink/BUILD
+++ /dev/null
@@ -1,58 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "netlink",
- srcs = [
- "message.go",
- "provider.go",
- "provider_vfs2.go",
- "socket.go",
- "socket_vfs2.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/abi/linux/errno",
- "//pkg/bits",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fsimpl/sockfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/netlink/port",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "netlink_test",
- size = "small",
- srcs = [
- "message_test.go",
- ],
- deps = [
- ":netlink",
- "//pkg/abi/linux",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- ],
-)
diff --git a/pkg/sentry/socket/netlink/message_test.go b/pkg/sentry/socket/netlink/message_test.go
deleted file mode 100644
index 968968469..000000000
--- a/pkg/sentry/socket/netlink/message_test.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package message_test
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/sentry/socket/netlink"
-)
-
-type dummyNetlinkMsg struct {
- marshal.StubMarshallable
- Foo uint16
-}
-
-func (*dummyNetlinkMsg) SizeBytes() int {
- return 2
-}
-
-func (m *dummyNetlinkMsg) MarshalUnsafe(dst []byte) {
- p := primitive.Uint16(m.Foo)
- p.MarshalUnsafe(dst)
-}
-
-func (m *dummyNetlinkMsg) UnmarshalUnsafe(src []byte) {
- var p primitive.Uint16
- p.UnmarshalUnsafe(src)
- m.Foo = uint16(p)
-}
-
-func TestParseMessage(t *testing.T) {
- tests := []struct {
- desc string
- input []byte
-
- header linux.NetlinkMessageHeader
- dataMsg *dummyNetlinkMsg
- restLen int
- ok bool
- }{
- {
- desc: "valid",
- input: []byte{
- 0x14, 0x00, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, 0x00, 0x00, // Data message with 2 bytes padding
- },
- header: linux.NetlinkMessageHeader{
- Length: 20,
- Type: 1,
- Flags: 2,
- Seq: 3,
- PortID: 4,
- },
- dataMsg: &dummyNetlinkMsg{
- Foo: 0x3130,
- },
- restLen: 0,
- ok: true,
- },
- {
- desc: "valid with next message",
- input: []byte{
- 0x14, 0x00, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, 0x00, 0x00, // Data message with 2 bytes padding
- 0xFF, // Next message (rest)
- },
- header: linux.NetlinkMessageHeader{
- Length: 20,
- Type: 1,
- Flags: 2,
- Seq: 3,
- PortID: 4,
- },
- dataMsg: &dummyNetlinkMsg{
- Foo: 0x3130,
- },
- restLen: 1,
- ok: true,
- },
- {
- desc: "valid for last message without padding",
- input: []byte{
- 0x12, 0x00, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, // Data message
- },
- header: linux.NetlinkMessageHeader{
- Length: 18,
- Type: 1,
- Flags: 2,
- Seq: 3,
- PortID: 4,
- },
- dataMsg: &dummyNetlinkMsg{
- Foo: 0x3130,
- },
- restLen: 0,
- ok: true,
- },
- {
- desc: "valid for last message not to be aligned",
- input: []byte{
- 0x13, 0x00, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, // Data message
- 0x00, // Excessive 1 byte permitted at end
- },
- header: linux.NetlinkMessageHeader{
- Length: 19,
- Type: 1,
- Flags: 2,
- Seq: 3,
- PortID: 4,
- },
- dataMsg: &dummyNetlinkMsg{
- Foo: 0x3130,
- },
- restLen: 0,
- ok: true,
- },
- {
- desc: "header.Length too short",
- input: []byte{
- 0x04, 0x00, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, 0x00, 0x00, // Data message with 2 bytes padding
- },
- ok: false,
- },
- {
- desc: "header.Length too long",
- input: []byte{
- 0xFF, 0xFF, 0x00, 0x00, // Length
- 0x01, 0x00, // Type
- 0x02, 0x00, // Flags
- 0x03, 0x00, 0x00, 0x00, // Seq
- 0x04, 0x00, 0x00, 0x00, // PortID
- 0x30, 0x31, 0x00, 0x00, // Data message with 2 bytes padding
- },
- ok: false,
- },
- {
- desc: "header incomplete",
- input: []byte{
- 0x04, 0x00, 0x00, 0x00, // Length
- },
- ok: false,
- },
- {
- desc: "empty message",
- input: []byte{},
- ok: false,
- },
- }
- for _, test := range tests {
- msg, rest, ok := netlink.ParseMessage(test.input)
- if ok != test.ok {
- t.Errorf("%v: got ok = %v, want = %v", test.desc, ok, test.ok)
- continue
- }
- if !test.ok {
- continue
- }
- if !reflect.DeepEqual(msg.Header(), test.header) {
- t.Errorf("%v: got hdr = %+v, want = %+v", test.desc, msg.Header(), test.header)
- }
-
- dataMsg := &dummyNetlinkMsg{}
- _, dataOk := msg.GetData(dataMsg)
- if !dataOk {
- t.Errorf("%v: GetData.ok = %v, want = true", test.desc, dataOk)
- } else if !reflect.DeepEqual(dataMsg, test.dataMsg) {
- t.Errorf("%v: GetData.msg = %+v, want = %+v", test.desc, dataMsg, test.dataMsg)
- }
-
- if got, want := rest, test.input[len(test.input)-test.restLen:]; !bytes.Equal(got, want) {
- t.Errorf("%v: got rest = %v, want = %v", test.desc, got, want)
- }
- }
-}
-
-func TestAttrView(t *testing.T) {
- tests := []struct {
- desc string
- input []byte
-
- // Outputs for ParseFirst.
- hdr linux.NetlinkAttrHeader
- value []byte
- restLen int
- ok bool
-
- // Outputs for Empty.
- isEmpty bool
- }{
- {
- desc: "valid",
- input: []byte{
- 0x06, 0x00, // Length
- 0x01, 0x00, // Type
- 0x30, 0x31, 0x00, 0x00, // Data with 2 bytes padding
- },
- hdr: linux.NetlinkAttrHeader{
- Length: 6,
- Type: 1,
- },
- value: []byte{0x30, 0x31},
- restLen: 0,
- ok: true,
- isEmpty: false,
- },
- {
- desc: "at alignment",
- input: []byte{
- 0x08, 0x00, // Length
- 0x01, 0x00, // Type
- 0x30, 0x31, 0x32, 0x33, // Data
- },
- hdr: linux.NetlinkAttrHeader{
- Length: 8,
- Type: 1,
- },
- value: []byte{0x30, 0x31, 0x32, 0x33},
- restLen: 0,
- ok: true,
- isEmpty: false,
- },
- {
- desc: "at alignment with rest data",
- input: []byte{
- 0x08, 0x00, // Length
- 0x01, 0x00, // Type
- 0x30, 0x31, 0x32, 0x33, // Data
- 0xFF, 0xFE, // Rest data
- },
- hdr: linux.NetlinkAttrHeader{
- Length: 8,
- Type: 1,
- },
- value: []byte{0x30, 0x31, 0x32, 0x33},
- restLen: 2,
- ok: true,
- isEmpty: false,
- },
- {
- desc: "hdr.Length too long",
- input: []byte{
- 0xFF, 0x00, // Length
- 0x01, 0x00, // Type
- 0x30, 0x31, 0x32, 0x33, // Data
- },
- ok: false,
- isEmpty: false,
- },
- {
- desc: "hdr.Length too short",
- input: []byte{
- 0x01, 0x00, // Length
- 0x01, 0x00, // Type
- 0x30, 0x31, 0x32, 0x33, // Data
- },
- ok: false,
- isEmpty: false,
- },
- {
- desc: "empty",
- input: []byte{},
- ok: false,
- isEmpty: true,
- },
- }
- for _, test := range tests {
- attrs := netlink.AttrsView(test.input)
-
- // Test ParseFirst().
- hdr, value, rest, ok := attrs.ParseFirst()
- if ok != test.ok {
- t.Errorf("%v: got ok = %v, want = %v", test.desc, ok, test.ok)
- } else if test.ok {
- if !reflect.DeepEqual(hdr, test.hdr) {
- t.Errorf("%v: got hdr = %+v, want = %+v", test.desc, hdr, test.hdr)
- }
- if !bytes.Equal(value, test.value) {
- t.Errorf("%v: got value = %v, want = %v", test.desc, value, test.value)
- }
- if wantRest := test.input[len(test.input)-test.restLen:]; !bytes.Equal(rest, wantRest) {
- t.Errorf("%v: got rest = %v, want = %v", test.desc, rest, wantRest)
- }
- }
-
- // Test Empty().
- if got, want := attrs.Empty(), test.isEmpty; got != want {
- t.Errorf("%v: got empty = %v, want = %v", test.desc, got, want)
- }
- }
-}
diff --git a/pkg/sentry/socket/netlink/netlink_state_autogen.go b/pkg/sentry/socket/netlink/netlink_state_autogen.go
new file mode 100644
index 000000000..29c549880
--- /dev/null
+++ b/pkg/sentry/socket/netlink/netlink_state_autogen.go
@@ -0,0 +1,149 @@
+// automatically generated by stateify.
+
+package netlink
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *Socket) StateTypeName() string {
+ return "pkg/sentry/socket/netlink.Socket"
+}
+
+func (s *Socket) StateFields() []string {
+ return []string{
+ "socketOpsCommon",
+ }
+}
+
+func (s *Socket) beforeSave() {}
+
+// +checklocksignore
+func (s *Socket) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.socketOpsCommon)
+}
+
+func (s *Socket) afterLoad() {}
+
+// +checklocksignore
+func (s *Socket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.socketOpsCommon)
+}
+
+func (s *socketOpsCommon) StateTypeName() string {
+ return "pkg/sentry/socket/netlink.socketOpsCommon"
+}
+
+func (s *socketOpsCommon) StateFields() []string {
+ return []string{
+ "SendReceiveTimeout",
+ "ports",
+ "protocol",
+ "skType",
+ "ep",
+ "connection",
+ "bound",
+ "portID",
+ "sendBufferSize",
+ "filter",
+ }
+}
+
+func (s *socketOpsCommon) beforeSave() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SendReceiveTimeout)
+ stateSinkObject.Save(1, &s.ports)
+ stateSinkObject.Save(2, &s.protocol)
+ stateSinkObject.Save(3, &s.skType)
+ stateSinkObject.Save(4, &s.ep)
+ stateSinkObject.Save(5, &s.connection)
+ stateSinkObject.Save(6, &s.bound)
+ stateSinkObject.Save(7, &s.portID)
+ stateSinkObject.Save(8, &s.sendBufferSize)
+ stateSinkObject.Save(9, &s.filter)
+}
+
+func (s *socketOpsCommon) afterLoad() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SendReceiveTimeout)
+ stateSourceObject.Load(1, &s.ports)
+ stateSourceObject.Load(2, &s.protocol)
+ stateSourceObject.Load(3, &s.skType)
+ stateSourceObject.Load(4, &s.ep)
+ stateSourceObject.Load(5, &s.connection)
+ stateSourceObject.Load(6, &s.bound)
+ stateSourceObject.Load(7, &s.portID)
+ stateSourceObject.Load(8, &s.sendBufferSize)
+ stateSourceObject.Load(9, &s.filter)
+}
+
+func (k *kernelSCM) StateTypeName() string {
+ return "pkg/sentry/socket/netlink.kernelSCM"
+}
+
+func (k *kernelSCM) StateFields() []string {
+ return []string{}
+}
+
+func (k *kernelSCM) beforeSave() {}
+
+// +checklocksignore
+func (k *kernelSCM) StateSave(stateSinkObject state.Sink) {
+ k.beforeSave()
+}
+
+func (k *kernelSCM) afterLoad() {}
+
+// +checklocksignore
+func (k *kernelSCM) StateLoad(stateSourceObject state.Source) {
+}
+
+func (s *SocketVFS2) StateTypeName() string {
+ return "pkg/sentry/socket/netlink.SocketVFS2"
+}
+
+func (s *SocketVFS2) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "LockFD",
+ "socketOpsCommon",
+ }
+}
+
+func (s *SocketVFS2) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.vfsfd)
+ stateSinkObject.Save(1, &s.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &s.LockFD)
+ stateSinkObject.Save(4, &s.socketOpsCommon)
+}
+
+func (s *SocketVFS2) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.vfsfd)
+ stateSourceObject.Load(1, &s.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &s.LockFD)
+ stateSourceObject.Load(4, &s.socketOpsCommon)
+}
+
+func init() {
+ state.Register((*Socket)(nil))
+ state.Register((*socketOpsCommon)(nil))
+ state.Register((*kernelSCM)(nil))
+ state.Register((*SocketVFS2)(nil))
+}
diff --git a/pkg/sentry/socket/netlink/port/BUILD b/pkg/sentry/socket/netlink/port/BUILD
deleted file mode 100644
index 3a22923d8..000000000
--- a/pkg/sentry/socket/netlink/port/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "port",
- srcs = ["port.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "port_test",
- srcs = ["port_test.go"],
- library = ":port",
-)
diff --git a/pkg/sentry/socket/netlink/port/port_state_autogen.go b/pkg/sentry/socket/netlink/port/port_state_autogen.go
new file mode 100644
index 000000000..b22471899
--- /dev/null
+++ b/pkg/sentry/socket/netlink/port/port_state_autogen.go
@@ -0,0 +1,36 @@
+// automatically generated by stateify.
+
+package port
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (m *Manager) StateTypeName() string {
+ return "pkg/sentry/socket/netlink/port.Manager"
+}
+
+func (m *Manager) StateFields() []string {
+ return []string{
+ "ports",
+ }
+}
+
+func (m *Manager) beforeSave() {}
+
+// +checklocksignore
+func (m *Manager) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.ports)
+}
+
+func (m *Manager) afterLoad() {}
+
+// +checklocksignore
+func (m *Manager) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.ports)
+}
+
+func init() {
+ state.Register((*Manager)(nil))
+}
diff --git a/pkg/sentry/socket/netlink/port/port_test.go b/pkg/sentry/socket/netlink/port/port_test.go
deleted file mode 100644
index 516f6cd6c..000000000
--- a/pkg/sentry/socket/netlink/port/port_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package port
-
-import (
- "testing"
-)
-
-func TestAllocateHint(t *testing.T) {
- m := New()
-
- // We can get the hint port.
- p, ok := m.Allocate(0, 1)
- if !ok {
- t.Errorf("m.Allocate got !ok want ok")
- }
- if p != 1 {
- t.Errorf("m.Allocate(0, 1) got %d want 1", p)
- }
-
- // Hint is taken.
- p, ok = m.Allocate(0, 1)
- if !ok {
- t.Errorf("m.Allocate got !ok want ok")
- }
- if p == 1 {
- t.Errorf("m.Allocate(0, 1) got 1 want anything else")
- }
-
- // Hint is available for a different protocol.
- p, ok = m.Allocate(1, 1)
- if !ok {
- t.Errorf("m.Allocate got !ok want ok")
- }
- if p != 1 {
- t.Errorf("m.Allocate(1, 1) got %d want 1", p)
- }
-
- m.Release(0, 1)
-
- // Hint is available again after release.
- p, ok = m.Allocate(0, 1)
- if !ok {
- t.Errorf("m.Allocate got !ok want ok")
- }
- if p != 1 {
- t.Errorf("m.Allocate(0, 1) got %d want 1", p)
- }
-}
-
-func TestAllocateExhausted(t *testing.T) {
- m := New()
-
- // Fill all ports (0 is already reserved).
- for i := int32(1); i < maxPorts; i++ {
- p, ok := m.Allocate(0, i)
- if !ok {
- t.Fatalf("m.Allocate got !ok want ok")
- }
- if p != i {
- t.Fatalf("m.Allocate(0, %d) got %d want %d", i, p, i)
- }
- }
-
- // Now no more can be allocated.
- p, ok := m.Allocate(0, 1)
- if ok {
- t.Errorf("m.Allocate got %d, ok want !ok", p)
- }
-}
diff --git a/pkg/sentry/socket/netlink/route/BUILD b/pkg/sentry/socket/netlink/route/BUILD
deleted file mode 100644
index c6c04b4e3..000000000
--- a/pkg/sentry/socket/netlink/route/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "route",
- srcs = [
- "protocol.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/marshal/primitive",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/socket/netlink",
- "//pkg/syserr",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/netlink/route/route_state_autogen.go b/pkg/sentry/socket/netlink/route/route_state_autogen.go
new file mode 100644
index 000000000..c4a94ab49
--- /dev/null
+++ b/pkg/sentry/socket/netlink/route/route_state_autogen.go
@@ -0,0 +1,32 @@
+// automatically generated by stateify.
+
+package route
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *Protocol) StateTypeName() string {
+ return "pkg/sentry/socket/netlink/route.Protocol"
+}
+
+func (p *Protocol) StateFields() []string {
+ return []string{}
+}
+
+func (p *Protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *Protocol) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *Protocol) afterLoad() {}
+
+// +checklocksignore
+func (p *Protocol) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*Protocol)(nil))
+}
diff --git a/pkg/sentry/socket/netlink/uevent/BUILD b/pkg/sentry/socket/netlink/uevent/BUILD
deleted file mode 100644
index b6434923c..000000000
--- a/pkg/sentry/socket/netlink/uevent/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "uevent",
- srcs = ["protocol.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/kernel",
- "//pkg/sentry/socket/netlink",
- "//pkg/syserr",
- ],
-)
diff --git a/pkg/sentry/socket/netlink/uevent/uevent_state_autogen.go b/pkg/sentry/socket/netlink/uevent/uevent_state_autogen.go
new file mode 100644
index 000000000..f45d63d9a
--- /dev/null
+++ b/pkg/sentry/socket/netlink/uevent/uevent_state_autogen.go
@@ -0,0 +1,32 @@
+// automatically generated by stateify.
+
+package uevent
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *Protocol) StateTypeName() string {
+ return "pkg/sentry/socket/netlink/uevent.Protocol"
+}
+
+func (p *Protocol) StateFields() []string {
+ return []string{}
+}
+
+func (p *Protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *Protocol) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *Protocol) afterLoad() {}
+
+// +checklocksignore
+func (p *Protocol) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*Protocol)(nil))
+}
diff --git a/pkg/sentry/socket/netstack/BUILD b/pkg/sentry/socket/netstack/BUILD
deleted file mode 100644
index e828982eb..000000000
--- a/pkg/sentry/socket/netstack/BUILD
+++ /dev/null
@@ -1,58 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "netstack",
- srcs = [
- "device.go",
- "netstack.go",
- "netstack_vfs2.go",
- "provider.go",
- "provider_vfs2.go",
- "save_restore.go",
- "stack.go",
- "tun.go",
- ],
- visibility = [
- "//pkg/sentry:internal",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/abi/linux/errno",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/metric",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fsimpl/sockfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/netfilter",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/tun",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/netstack/netstack_state_autogen.go b/pkg/sentry/socket/netstack/netstack_state_autogen.go
new file mode 100644
index 000000000..335437f04
--- /dev/null
+++ b/pkg/sentry/socket/netstack/netstack_state_autogen.go
@@ -0,0 +1,148 @@
+// automatically generated by stateify.
+
+package netstack
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *SocketOperations) StateTypeName() string {
+ return "pkg/sentry/socket/netstack.SocketOperations"
+}
+
+func (s *SocketOperations) StateFields() []string {
+ return []string{
+ "socketOpsCommon",
+ }
+}
+
+func (s *SocketOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.socketOpsCommon)
+}
+
+func (s *SocketOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.socketOpsCommon)
+}
+
+func (s *socketOpsCommon) StateTypeName() string {
+ return "pkg/sentry/socket/netstack.socketOpsCommon"
+}
+
+func (s *socketOpsCommon) StateFields() []string {
+ return []string{
+ "SendReceiveTimeout",
+ "Queue",
+ "family",
+ "Endpoint",
+ "skType",
+ "protocol",
+ "sockOptTimestamp",
+ "timestampValid",
+ "timestampNS",
+ "sockOptInq",
+ }
+}
+
+func (s *socketOpsCommon) beforeSave() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SendReceiveTimeout)
+ stateSinkObject.Save(1, &s.Queue)
+ stateSinkObject.Save(2, &s.family)
+ stateSinkObject.Save(3, &s.Endpoint)
+ stateSinkObject.Save(4, &s.skType)
+ stateSinkObject.Save(5, &s.protocol)
+ stateSinkObject.Save(6, &s.sockOptTimestamp)
+ stateSinkObject.Save(7, &s.timestampValid)
+ stateSinkObject.Save(8, &s.timestampNS)
+ stateSinkObject.Save(9, &s.sockOptInq)
+}
+
+func (s *socketOpsCommon) afterLoad() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SendReceiveTimeout)
+ stateSourceObject.Load(1, &s.Queue)
+ stateSourceObject.Load(2, &s.family)
+ stateSourceObject.Load(3, &s.Endpoint)
+ stateSourceObject.Load(4, &s.skType)
+ stateSourceObject.Load(5, &s.protocol)
+ stateSourceObject.Load(6, &s.sockOptTimestamp)
+ stateSourceObject.Load(7, &s.timestampValid)
+ stateSourceObject.Load(8, &s.timestampNS)
+ stateSourceObject.Load(9, &s.sockOptInq)
+}
+
+func (s *SocketVFS2) StateTypeName() string {
+ return "pkg/sentry/socket/netstack.SocketVFS2"
+}
+
+func (s *SocketVFS2) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "LockFD",
+ "socketOpsCommon",
+ }
+}
+
+func (s *SocketVFS2) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.vfsfd)
+ stateSinkObject.Save(1, &s.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &s.LockFD)
+ stateSinkObject.Save(4, &s.socketOpsCommon)
+}
+
+func (s *SocketVFS2) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.vfsfd)
+ stateSourceObject.Load(1, &s.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &s.LockFD)
+ stateSourceObject.Load(4, &s.socketOpsCommon)
+}
+
+func (s *Stack) StateTypeName() string {
+ return "pkg/sentry/socket/netstack.Stack"
+}
+
+func (s *Stack) StateFields() []string {
+ return []string{}
+}
+
+func (s *Stack) beforeSave() {}
+
+// +checklocksignore
+func (s *Stack) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+}
+
+// +checklocksignore
+func (s *Stack) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.AfterLoad(s.afterLoad)
+}
+
+func init() {
+ state.Register((*SocketOperations)(nil))
+ state.Register((*socketOpsCommon)(nil))
+ state.Register((*SocketVFS2)(nil))
+ state.Register((*Stack)(nil))
+}
diff --git a/pkg/sentry/socket/socket_state_autogen.go b/pkg/sentry/socket/socket_state_autogen.go
new file mode 100644
index 000000000..d050093f6
--- /dev/null
+++ b/pkg/sentry/socket/socket_state_autogen.go
@@ -0,0 +1,98 @@
+// automatically generated by stateify.
+
+package socket
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *IPControlMessages) StateTypeName() string {
+ return "pkg/sentry/socket.IPControlMessages"
+}
+
+func (i *IPControlMessages) StateFields() []string {
+ return []string{
+ "HasTimestamp",
+ "Timestamp",
+ "HasInq",
+ "Inq",
+ "HasTOS",
+ "TOS",
+ "HasTClass",
+ "TClass",
+ "HasIPPacketInfo",
+ "PacketInfo",
+ "OriginalDstAddress",
+ "SockErr",
+ }
+}
+
+func (i *IPControlMessages) beforeSave() {}
+
+// +checklocksignore
+func (i *IPControlMessages) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.HasTimestamp)
+ stateSinkObject.Save(1, &i.Timestamp)
+ stateSinkObject.Save(2, &i.HasInq)
+ stateSinkObject.Save(3, &i.Inq)
+ stateSinkObject.Save(4, &i.HasTOS)
+ stateSinkObject.Save(5, &i.TOS)
+ stateSinkObject.Save(6, &i.HasTClass)
+ stateSinkObject.Save(7, &i.TClass)
+ stateSinkObject.Save(8, &i.HasIPPacketInfo)
+ stateSinkObject.Save(9, &i.PacketInfo)
+ stateSinkObject.Save(10, &i.OriginalDstAddress)
+ stateSinkObject.Save(11, &i.SockErr)
+}
+
+func (i *IPControlMessages) afterLoad() {}
+
+// +checklocksignore
+func (i *IPControlMessages) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.HasTimestamp)
+ stateSourceObject.Load(1, &i.Timestamp)
+ stateSourceObject.Load(2, &i.HasInq)
+ stateSourceObject.Load(3, &i.Inq)
+ stateSourceObject.Load(4, &i.HasTOS)
+ stateSourceObject.Load(5, &i.TOS)
+ stateSourceObject.Load(6, &i.HasTClass)
+ stateSourceObject.Load(7, &i.TClass)
+ stateSourceObject.Load(8, &i.HasIPPacketInfo)
+ stateSourceObject.Load(9, &i.PacketInfo)
+ stateSourceObject.Load(10, &i.OriginalDstAddress)
+ stateSourceObject.Load(11, &i.SockErr)
+}
+
+func (to *SendReceiveTimeout) StateTypeName() string {
+ return "pkg/sentry/socket.SendReceiveTimeout"
+}
+
+func (to *SendReceiveTimeout) StateFields() []string {
+ return []string{
+ "send",
+ "recv",
+ }
+}
+
+func (to *SendReceiveTimeout) beforeSave() {}
+
+// +checklocksignore
+func (to *SendReceiveTimeout) StateSave(stateSinkObject state.Sink) {
+ to.beforeSave()
+ stateSinkObject.Save(0, &to.send)
+ stateSinkObject.Save(1, &to.recv)
+}
+
+func (to *SendReceiveTimeout) afterLoad() {}
+
+// +checklocksignore
+func (to *SendReceiveTimeout) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &to.send)
+ stateSourceObject.Load(1, &to.recv)
+}
+
+func init() {
+ state.Register((*IPControlMessages)(nil))
+ state.Register((*SendReceiveTimeout)(nil))
+}
diff --git a/pkg/sentry/socket/unix/BUILD b/pkg/sentry/socket/unix/BUILD
deleted file mode 100644
index 5c3cdef6a..000000000
--- a/pkg/sentry/socket/unix/BUILD
+++ /dev/null
@@ -1,71 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "socket_refs",
- out = "socket_refs.go",
- package = "unix",
- prefix = "socketOperations",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "SocketOperations",
- },
-)
-
-go_template_instance(
- name = "socket_vfs2_refs",
- out = "socket_vfs2_refs.go",
- package = "unix",
- prefix = "socketVFS2",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "SocketVFS2",
- },
-)
-
-go_library(
- name = "unix",
- srcs = [
- "device.go",
- "io.go",
- "socket_refs.go",
- "socket_vfs2_refs.go",
- "unix.go",
- "unix_vfs2.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/device",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/sockfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/netstack",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
new file mode 100644
index 000000000..61c6bd17c
--- /dev/null
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -0,0 +1,140 @@
+package unix
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const socketOperationsenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var socketOperationsobj *SocketOperations
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type socketOperationsRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *socketOperationsRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *socketOperationsRefs) RefType() string {
+ return fmt.Sprintf("%T", socketOperationsobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *socketOperationsRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *socketOperationsRefs) LogRefs() bool {
+ return socketOperationsenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *socketOperationsRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *socketOperationsRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if socketOperationsenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *socketOperationsRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if socketOperationsenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *socketOperationsRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if socketOperationsenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *socketOperationsRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/socket/unix/socket_vfs2_refs.go b/pkg/sentry/socket/unix/socket_vfs2_refs.go
new file mode 100644
index 000000000..f6ef581d8
--- /dev/null
+++ b/pkg/sentry/socket/unix/socket_vfs2_refs.go
@@ -0,0 +1,140 @@
+package unix
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const socketVFS2enableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var socketVFS2obj *SocketVFS2
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type socketVFS2Refs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *socketVFS2Refs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *socketVFS2Refs) RefType() string {
+ return fmt.Sprintf("%T", socketVFS2obj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *socketVFS2Refs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *socketVFS2Refs) LogRefs() bool {
+ return socketVFS2enableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *socketVFS2Refs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *socketVFS2Refs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if socketVFS2enableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *socketVFS2Refs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if socketVFS2enableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *socketVFS2Refs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if socketVFS2enableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *socketVFS2Refs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/socket/unix/transport/BUILD b/pkg/sentry/socket/unix/transport/BUILD
deleted file mode 100644
index 0d11bb251..000000000
--- a/pkg/sentry/socket/unix/transport/BUILD
+++ /dev/null
@@ -1,56 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "transport_message_list",
- out = "transport_message_list.go",
- package = "transport",
- prefix = "message",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*message",
- "Linker": "*message",
- },
-)
-
-go_template_instance(
- name = "queue_refs",
- out = "queue_refs.go",
- package = "transport",
- prefix = "queue",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "queue",
- },
-)
-
-go_library(
- name = "transport",
- srcs = [
- "connectioned.go",
- "connectioned_state.go",
- "connectionless.go",
- "connectionless_state.go",
- "queue.go",
- "queue_refs.go",
- "transport_message_list.go",
- "unix.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/ilist",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/inet",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go
new file mode 100644
index 000000000..9e6f52616
--- /dev/null
+++ b/pkg/sentry/socket/unix/transport/queue_refs.go
@@ -0,0 +1,140 @@
+package transport
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const queueenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var queueobj *queue
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type queueRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *queueRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *queueRefs) RefType() string {
+ return fmt.Sprintf("%T", queueobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *queueRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *queueRefs) LogRefs() bool {
+ return queueenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *queueRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *queueRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if queueenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *queueRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if queueenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *queueRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if queueenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *queueRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/socket/unix/transport/transport_message_list.go b/pkg/sentry/socket/unix/transport/transport_message_list.go
new file mode 100644
index 000000000..945163cc1
--- /dev/null
+++ b/pkg/sentry/socket/unix/transport/transport_message_list.go
@@ -0,0 +1,221 @@
+package transport
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type messageElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (messageElementMapper) linkerFor(elem *message) *message { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type messageList struct {
+ head *message
+ tail *message
+}
+
+// Reset resets list l to the empty state.
+func (l *messageList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *messageList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *messageList) Front() *message {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *messageList) Back() *message {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *messageList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (messageElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *messageList) PushFront(e *message) {
+ linker := messageElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ messageElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *messageList) PushBack(e *message) {
+ linker := messageElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ messageElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *messageList) PushBackList(m *messageList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ messageElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ messageElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *messageList) InsertAfter(b, e *message) {
+ bLinker := messageElementMapper{}.linkerFor(b)
+ eLinker := messageElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ messageElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *messageList) InsertBefore(a, e *message) {
+ aLinker := messageElementMapper{}.linkerFor(a)
+ eLinker := messageElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ messageElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *messageList) Remove(e *message) {
+ linker := messageElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ messageElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ messageElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type messageEntry struct {
+ next *message
+ prev *message
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *messageEntry) Next() *message {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *messageEntry) Prev() *message {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *messageEntry) SetNext(elem *message) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *messageEntry) SetPrev(elem *message) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/socket/unix/transport/transport_state_autogen.go b/pkg/sentry/socket/unix/transport/transport_state_autogen.go
new file mode 100644
index 000000000..946b0032e
--- /dev/null
+++ b/pkg/sentry/socket/unix/transport/transport_state_autogen.go
@@ -0,0 +1,398 @@
+// automatically generated by stateify.
+
+package transport
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *connectionedEndpoint) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.connectionedEndpoint"
+}
+
+func (e *connectionedEndpoint) StateFields() []string {
+ return []string{
+ "baseEndpoint",
+ "id",
+ "idGenerator",
+ "stype",
+ "acceptedChan",
+ }
+}
+
+func (e *connectionedEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *connectionedEndpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ var acceptedChanValue []*connectionedEndpoint = e.saveAcceptedChan()
+ stateSinkObject.SaveValue(4, acceptedChanValue)
+ stateSinkObject.Save(0, &e.baseEndpoint)
+ stateSinkObject.Save(1, &e.id)
+ stateSinkObject.Save(2, &e.idGenerator)
+ stateSinkObject.Save(3, &e.stype)
+}
+
+// +checklocksignore
+func (e *connectionedEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.baseEndpoint)
+ stateSourceObject.Load(1, &e.id)
+ stateSourceObject.Load(2, &e.idGenerator)
+ stateSourceObject.Load(3, &e.stype)
+ stateSourceObject.LoadValue(4, new([]*connectionedEndpoint), func(y interface{}) { e.loadAcceptedChan(y.([]*connectionedEndpoint)) })
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (e *connectionlessEndpoint) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.connectionlessEndpoint"
+}
+
+func (e *connectionlessEndpoint) StateFields() []string {
+ return []string{
+ "baseEndpoint",
+ }
+}
+
+func (e *connectionlessEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *connectionlessEndpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.baseEndpoint)
+}
+
+// +checklocksignore
+func (e *connectionlessEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.baseEndpoint)
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (q *queue) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.queue"
+}
+
+func (q *queue) StateFields() []string {
+ return []string{
+ "queueRefs",
+ "ReaderQueue",
+ "WriterQueue",
+ "closed",
+ "unread",
+ "used",
+ "limit",
+ "dataList",
+ }
+}
+
+func (q *queue) beforeSave() {}
+
+// +checklocksignore
+func (q *queue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.queueRefs)
+ stateSinkObject.Save(1, &q.ReaderQueue)
+ stateSinkObject.Save(2, &q.WriterQueue)
+ stateSinkObject.Save(3, &q.closed)
+ stateSinkObject.Save(4, &q.unread)
+ stateSinkObject.Save(5, &q.used)
+ stateSinkObject.Save(6, &q.limit)
+ stateSinkObject.Save(7, &q.dataList)
+}
+
+func (q *queue) afterLoad() {}
+
+// +checklocksignore
+func (q *queue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.queueRefs)
+ stateSourceObject.Load(1, &q.ReaderQueue)
+ stateSourceObject.Load(2, &q.WriterQueue)
+ stateSourceObject.Load(3, &q.closed)
+ stateSourceObject.Load(4, &q.unread)
+ stateSourceObject.Load(5, &q.used)
+ stateSourceObject.Load(6, &q.limit)
+ stateSourceObject.Load(7, &q.dataList)
+}
+
+func (r *queueRefs) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.queueRefs"
+}
+
+func (r *queueRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *queueRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *queueRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *queueRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (l *messageList) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.messageList"
+}
+
+func (l *messageList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *messageList) beforeSave() {}
+
+// +checklocksignore
+func (l *messageList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *messageList) afterLoad() {}
+
+// +checklocksignore
+func (l *messageList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *messageEntry) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.messageEntry"
+}
+
+func (e *messageEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *messageEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *messageEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *messageEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *messageEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (c *ControlMessages) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.ControlMessages"
+}
+
+func (c *ControlMessages) StateFields() []string {
+ return []string{
+ "Rights",
+ "Credentials",
+ }
+}
+
+func (c *ControlMessages) beforeSave() {}
+
+// +checklocksignore
+func (c *ControlMessages) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.Rights)
+ stateSinkObject.Save(1, &c.Credentials)
+}
+
+func (c *ControlMessages) afterLoad() {}
+
+// +checklocksignore
+func (c *ControlMessages) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.Rights)
+ stateSourceObject.Load(1, &c.Credentials)
+}
+
+func (m *message) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.message"
+}
+
+func (m *message) StateFields() []string {
+ return []string{
+ "messageEntry",
+ "Data",
+ "Control",
+ "Address",
+ }
+}
+
+func (m *message) beforeSave() {}
+
+// +checklocksignore
+func (m *message) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.messageEntry)
+ stateSinkObject.Save(1, &m.Data)
+ stateSinkObject.Save(2, &m.Control)
+ stateSinkObject.Save(3, &m.Address)
+}
+
+func (m *message) afterLoad() {}
+
+// +checklocksignore
+func (m *message) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.messageEntry)
+ stateSourceObject.Load(1, &m.Data)
+ stateSourceObject.Load(2, &m.Control)
+ stateSourceObject.Load(3, &m.Address)
+}
+
+func (q *queueReceiver) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.queueReceiver"
+}
+
+func (q *queueReceiver) StateFields() []string {
+ return []string{
+ "readQueue",
+ }
+}
+
+func (q *queueReceiver) beforeSave() {}
+
+// +checklocksignore
+func (q *queueReceiver) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.readQueue)
+}
+
+func (q *queueReceiver) afterLoad() {}
+
+// +checklocksignore
+func (q *queueReceiver) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.readQueue)
+}
+
+func (q *streamQueueReceiver) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.streamQueueReceiver"
+}
+
+func (q *streamQueueReceiver) StateFields() []string {
+ return []string{
+ "queueReceiver",
+ "buffer",
+ "control",
+ "addr",
+ }
+}
+
+func (q *streamQueueReceiver) beforeSave() {}
+
+// +checklocksignore
+func (q *streamQueueReceiver) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.queueReceiver)
+ stateSinkObject.Save(1, &q.buffer)
+ stateSinkObject.Save(2, &q.control)
+ stateSinkObject.Save(3, &q.addr)
+}
+
+func (q *streamQueueReceiver) afterLoad() {}
+
+// +checklocksignore
+func (q *streamQueueReceiver) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.queueReceiver)
+ stateSourceObject.Load(1, &q.buffer)
+ stateSourceObject.Load(2, &q.control)
+ stateSourceObject.Load(3, &q.addr)
+}
+
+func (e *connectedEndpoint) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.connectedEndpoint"
+}
+
+func (e *connectedEndpoint) StateFields() []string {
+ return []string{
+ "endpoint",
+ "writeQueue",
+ }
+}
+
+func (e *connectedEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *connectedEndpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.endpoint)
+ stateSinkObject.Save(1, &e.writeQueue)
+}
+
+func (e *connectedEndpoint) afterLoad() {}
+
+// +checklocksignore
+func (e *connectedEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.endpoint)
+ stateSourceObject.Load(1, &e.writeQueue)
+}
+
+func (e *baseEndpoint) StateTypeName() string {
+ return "pkg/sentry/socket/unix/transport.baseEndpoint"
+}
+
+func (e *baseEndpoint) StateFields() []string {
+ return []string{
+ "Queue",
+ "DefaultSocketOptionsHandler",
+ "receiver",
+ "connected",
+ "path",
+ "ops",
+ }
+}
+
+func (e *baseEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *baseEndpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Queue)
+ stateSinkObject.Save(1, &e.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(2, &e.receiver)
+ stateSinkObject.Save(3, &e.connected)
+ stateSinkObject.Save(4, &e.path)
+ stateSinkObject.Save(5, &e.ops)
+}
+
+func (e *baseEndpoint) afterLoad() {}
+
+// +checklocksignore
+func (e *baseEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Queue)
+ stateSourceObject.Load(1, &e.DefaultSocketOptionsHandler)
+ stateSourceObject.Load(2, &e.receiver)
+ stateSourceObject.Load(3, &e.connected)
+ stateSourceObject.Load(4, &e.path)
+ stateSourceObject.Load(5, &e.ops)
+}
+
+func init() {
+ state.Register((*connectionedEndpoint)(nil))
+ state.Register((*connectionlessEndpoint)(nil))
+ state.Register((*queue)(nil))
+ state.Register((*queueRefs)(nil))
+ state.Register((*messageList)(nil))
+ state.Register((*messageEntry)(nil))
+ state.Register((*ControlMessages)(nil))
+ state.Register((*message)(nil))
+ state.Register((*queueReceiver)(nil))
+ state.Register((*streamQueueReceiver)(nil))
+ state.Register((*connectedEndpoint)(nil))
+ state.Register((*baseEndpoint)(nil))
+}
diff --git a/pkg/sentry/socket/unix/unix_state_autogen.go b/pkg/sentry/socket/unix/unix_state_autogen.go
new file mode 100644
index 000000000..e6169dfad
--- /dev/null
+++ b/pkg/sentry/socket/unix/unix_state_autogen.go
@@ -0,0 +1,168 @@
+// automatically generated by stateify.
+
+package unix
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *socketOperationsRefs) StateTypeName() string {
+ return "pkg/sentry/socket/unix.socketOperationsRefs"
+}
+
+func (r *socketOperationsRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *socketOperationsRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *socketOperationsRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *socketOperationsRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (r *socketVFS2Refs) StateTypeName() string {
+ return "pkg/sentry/socket/unix.socketVFS2Refs"
+}
+
+func (r *socketVFS2Refs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *socketVFS2Refs) beforeSave() {}
+
+// +checklocksignore
+func (r *socketVFS2Refs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *socketVFS2Refs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *SocketOperations) StateTypeName() string {
+ return "pkg/sentry/socket/unix.SocketOperations"
+}
+
+func (s *SocketOperations) StateFields() []string {
+ return []string{
+ "socketOperationsRefs",
+ "socketOpsCommon",
+ }
+}
+
+func (s *SocketOperations) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketOperations) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.socketOperationsRefs)
+ stateSinkObject.Save(1, &s.socketOpsCommon)
+}
+
+func (s *SocketOperations) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketOperations) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.socketOperationsRefs)
+ stateSourceObject.Load(1, &s.socketOpsCommon)
+}
+
+func (s *socketOpsCommon) StateTypeName() string {
+ return "pkg/sentry/socket/unix.socketOpsCommon"
+}
+
+func (s *socketOpsCommon) StateFields() []string {
+ return []string{
+ "SendReceiveTimeout",
+ "ep",
+ "stype",
+ "abstractName",
+ "abstractNamespace",
+ }
+}
+
+func (s *socketOpsCommon) beforeSave() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.SendReceiveTimeout)
+ stateSinkObject.Save(1, &s.ep)
+ stateSinkObject.Save(2, &s.stype)
+ stateSinkObject.Save(3, &s.abstractName)
+ stateSinkObject.Save(4, &s.abstractNamespace)
+}
+
+func (s *socketOpsCommon) afterLoad() {}
+
+// +checklocksignore
+func (s *socketOpsCommon) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.SendReceiveTimeout)
+ stateSourceObject.Load(1, &s.ep)
+ stateSourceObject.Load(2, &s.stype)
+ stateSourceObject.Load(3, &s.abstractName)
+ stateSourceObject.Load(4, &s.abstractNamespace)
+}
+
+func (s *SocketVFS2) StateTypeName() string {
+ return "pkg/sentry/socket/unix.SocketVFS2"
+}
+
+func (s *SocketVFS2) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "LockFD",
+ "socketVFS2Refs",
+ "socketOpsCommon",
+ }
+}
+
+func (s *SocketVFS2) beforeSave() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.vfsfd)
+ stateSinkObject.Save(1, &s.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &s.LockFD)
+ stateSinkObject.Save(4, &s.socketVFS2Refs)
+ stateSinkObject.Save(5, &s.socketOpsCommon)
+}
+
+func (s *SocketVFS2) afterLoad() {}
+
+// +checklocksignore
+func (s *SocketVFS2) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.vfsfd)
+ stateSourceObject.Load(1, &s.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &s.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &s.LockFD)
+ stateSourceObject.Load(4, &s.socketVFS2Refs)
+ stateSourceObject.Load(5, &s.socketOpsCommon)
+}
+
+func init() {
+ state.Register((*socketOperationsRefs)(nil))
+ state.Register((*socketVFS2Refs)(nil))
+ state.Register((*SocketOperations)(nil))
+ state.Register((*socketOpsCommon)(nil))
+ state.Register((*SocketVFS2)(nil))
+}
diff --git a/pkg/sentry/state/BUILD b/pkg/sentry/state/BUILD
deleted file mode 100644
index 7f02807c5..000000000
--- a/pkg/sentry/state/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "state",
- srcs = [
- "state.go",
- "state_metadata.go",
- "state_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/time",
- "//pkg/sentry/vfs",
- "//pkg/sentry/watchdog",
- "//pkg/state/statefile",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/state/state_state_autogen.go b/pkg/sentry/state/state_state_autogen.go
new file mode 100644
index 000000000..e3e199293
--- /dev/null
+++ b/pkg/sentry/state/state_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package state
diff --git a/pkg/sentry/state/state_unsafe_state_autogen.go b/pkg/sentry/state/state_unsafe_state_autogen.go
new file mode 100644
index 000000000..6c2b29632
--- /dev/null
+++ b/pkg/sentry/state/state_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package state
diff --git a/pkg/sentry/strace/BUILD b/pkg/sentry/strace/BUILD
deleted file mode 100644
index 369541c7a..000000000
--- a/pkg/sentry/strace/BUILD
+++ /dev/null
@@ -1,46 +0,0 @@
-load("//tools:defs.bzl", "go_library", "proto_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "strace",
- srcs = [
- "capability.go",
- "clone.go",
- "epoll.go",
- "futex.go",
- "linux64_amd64.go",
- "linux64_arm64.go",
- "mmap.go",
- "open.go",
- "poll.go",
- "ptrace.go",
- "select.go",
- "signal.go",
- "socket.go",
- "strace.go",
- "syscalls.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- ":strace_go_proto",
- "//pkg/abi",
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/eventchannel",
- "//pkg/hostarch",
- "//pkg/marshal/primitive",
- "//pkg/seccomp",
- "//pkg/sentry/arch",
- "//pkg/sentry/kernel",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/netlink",
- "//pkg/sentry/syscalls/linux",
- ],
-)
-
-proto_library(
- name = "strace",
- srcs = ["strace.proto"],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/sentry/strace/strace.proto b/pkg/sentry/strace/strace.proto
deleted file mode 100644
index 906c52c51..000000000
--- a/pkg/sentry/strace/strace.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-message Strace {
- // Process name that made the syscall.
- string process = 1;
-
- // Syscall function name.
- string function = 2;
-
- // List of syscall arguments formatted as strings.
- repeated string args = 3;
-
- oneof info {
- StraceEnter enter = 4;
- StraceExit exit = 5;
- }
-}
-
-message StraceEnter {}
-
-message StraceExit {
- // Return value formatted as string.
- string return = 1;
-
- // Formatted error string in case syscall failed.
- string error = 2;
-
- // Value of errno upon syscall exit.
- int64 err_no = 3; // errno is a macro and gets expanded :-(
-
- // Time elapsed between syscall enter and exit.
- int64 elapsed_ns = 4;
-}
diff --git a/pkg/sentry/strace/strace_amd64_state_autogen.go b/pkg/sentry/strace/strace_amd64_state_autogen.go
new file mode 100644
index 000000000..c7d4b3eb4
--- /dev/null
+++ b/pkg/sentry/strace/strace_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package strace
diff --git a/pkg/sentry/strace/strace_arm64_state_autogen.go b/pkg/sentry/strace/strace_arm64_state_autogen.go
new file mode 100644
index 000000000..9b8f66dc9
--- /dev/null
+++ b/pkg/sentry/strace/strace_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package strace
diff --git a/pkg/sentry/strace/strace_go_proto/strace.pb.go b/pkg/sentry/strace/strace_go_proto/strace.pb.go
new file mode 100644
index 000000000..f3d5c1108
--- /dev/null
+++ b/pkg/sentry/strace/strace_go_proto/strace.pb.go
@@ -0,0 +1,362 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/sentry/strace/strace.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type Strace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Process string `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+ // Types that are assignable to Info:
+ // *Strace_Enter
+ // *Strace_Exit
+ Info isStrace_Info `protobuf_oneof:"info"`
+}
+
+func (x *Strace) Reset() {
+ *x = Strace{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Strace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Strace) ProtoMessage() {}
+
+func (x *Strace) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Strace.ProtoReflect.Descriptor instead.
+func (*Strace) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_strace_strace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Strace) GetProcess() string {
+ if x != nil {
+ return x.Process
+ }
+ return ""
+}
+
+func (x *Strace) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Strace) GetArgs() []string {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+func (m *Strace) GetInfo() isStrace_Info {
+ if m != nil {
+ return m.Info
+ }
+ return nil
+}
+
+func (x *Strace) GetEnter() *StraceEnter {
+ if x, ok := x.GetInfo().(*Strace_Enter); ok {
+ return x.Enter
+ }
+ return nil
+}
+
+func (x *Strace) GetExit() *StraceExit {
+ if x, ok := x.GetInfo().(*Strace_Exit); ok {
+ return x.Exit
+ }
+ return nil
+}
+
+type isStrace_Info interface {
+ isStrace_Info()
+}
+
+type Strace_Enter struct {
+ Enter *StraceEnter `protobuf:"bytes,4,opt,name=enter,proto3,oneof"`
+}
+
+type Strace_Exit struct {
+ Exit *StraceExit `protobuf:"bytes,5,opt,name=exit,proto3,oneof"`
+}
+
+func (*Strace_Enter) isStrace_Info() {}
+
+func (*Strace_Exit) isStrace_Info() {}
+
+type StraceEnter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StraceEnter) Reset() {
+ *x = StraceEnter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StraceEnter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StraceEnter) ProtoMessage() {}
+
+func (x *StraceEnter) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StraceEnter.ProtoReflect.Descriptor instead.
+func (*StraceEnter) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_strace_strace_proto_rawDescGZIP(), []int{1}
+}
+
+type StraceExit struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Return string `protobuf:"bytes,1,opt,name=return,proto3" json:"return,omitempty"`
+ Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+ ErrNo int64 `protobuf:"varint,3,opt,name=err_no,json=errNo,proto3" json:"err_no,omitempty"`
+ ElapsedNs int64 `protobuf:"varint,4,opt,name=elapsed_ns,json=elapsedNs,proto3" json:"elapsed_ns,omitempty"`
+}
+
+func (x *StraceExit) Reset() {
+ *x = StraceExit{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StraceExit) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StraceExit) ProtoMessage() {}
+
+func (x *StraceExit) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_strace_strace_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StraceExit.ProtoReflect.Descriptor instead.
+func (*StraceExit) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_strace_strace_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *StraceExit) GetReturn() string {
+ if x != nil {
+ return x.Return
+ }
+ return ""
+}
+
+func (x *StraceExit) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+func (x *StraceExit) GetErrNo() int64 {
+ if x != nil {
+ return x.ErrNo
+ }
+ return 0
+}
+
+func (x *StraceExit) GetElapsedNs() int64 {
+ if x != nil {
+ return x.ElapsedNs
+ }
+ return 0
+}
+
+var File_pkg_sentry_strace_strace_proto protoreflect.FileDescriptor
+
+var file_pkg_sentry_strace_strace_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2f, 0x73, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x22, 0xb1, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a,
+ 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x2b, 0x0a,
+ 0x05, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67,
+ 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x04, 0x65, 0x78,
+ 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f,
+ 0x72, 0x2e, 0x53, 0x74, 0x72, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x74, 0x48, 0x00, 0x52, 0x04,
+ 0x65, 0x78, 0x69, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x0d, 0x0a, 0x0b,
+ 0x53, 0x74, 0x72, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x70, 0x0a, 0x0a, 0x53,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x5f, 0x6e,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x65, 0x72, 0x72, 0x4e, 0x6f, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x65, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x09, 0x65, 0x6c, 0x61, 0x70, 0x73, 0x65, 0x64, 0x4e, 0x73, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_sentry_strace_strace_proto_rawDescOnce sync.Once
+ file_pkg_sentry_strace_strace_proto_rawDescData = file_pkg_sentry_strace_strace_proto_rawDesc
+)
+
+func file_pkg_sentry_strace_strace_proto_rawDescGZIP() []byte {
+ file_pkg_sentry_strace_strace_proto_rawDescOnce.Do(func() {
+ file_pkg_sentry_strace_strace_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_sentry_strace_strace_proto_rawDescData)
+ })
+ return file_pkg_sentry_strace_strace_proto_rawDescData
+}
+
+var file_pkg_sentry_strace_strace_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_pkg_sentry_strace_strace_proto_goTypes = []interface{}{
+ (*Strace)(nil), // 0: gvisor.Strace
+ (*StraceEnter)(nil), // 1: gvisor.StraceEnter
+ (*StraceExit)(nil), // 2: gvisor.StraceExit
+}
+var file_pkg_sentry_strace_strace_proto_depIdxs = []int32{
+ 1, // 0: gvisor.Strace.enter:type_name -> gvisor.StraceEnter
+ 2, // 1: gvisor.Strace.exit:type_name -> gvisor.StraceExit
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_pkg_sentry_strace_strace_proto_init() }
+func file_pkg_sentry_strace_strace_proto_init() {
+ if File_pkg_sentry_strace_strace_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_sentry_strace_strace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Strace); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_sentry_strace_strace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StraceEnter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_sentry_strace_strace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StraceExit); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_pkg_sentry_strace_strace_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Strace_Enter)(nil),
+ (*Strace_Exit)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_sentry_strace_strace_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_sentry_strace_strace_proto_goTypes,
+ DependencyIndexes: file_pkg_sentry_strace_strace_proto_depIdxs,
+ MessageInfos: file_pkg_sentry_strace_strace_proto_msgTypes,
+ }.Build()
+ File_pkg_sentry_strace_strace_proto = out.File
+ file_pkg_sentry_strace_strace_proto_rawDesc = nil
+ file_pkg_sentry_strace_strace_proto_goTypes = nil
+ file_pkg_sentry_strace_strace_proto_depIdxs = nil
+}
diff --git a/pkg/sentry/strace/strace_state_autogen.go b/pkg/sentry/strace/strace_state_autogen.go
new file mode 100644
index 000000000..33f6a7a54
--- /dev/null
+++ b/pkg/sentry/strace/strace_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package strace
diff --git a/pkg/sentry/syscalls/BUILD b/pkg/sentry/syscalls/BUILD
deleted file mode 100644
index f2c55588f..000000000
--- a/pkg/sentry/syscalls/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "syscalls",
- srcs = [
- "epoll.go",
- "syscalls.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/arch",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/epoll",
- "//pkg/sentry/kernel/time",
- "//pkg/syserror",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD
deleted file mode 100644
index a2f612f45..000000000
--- a/pkg/sentry/syscalls/linux/BUILD
+++ /dev/null
@@ -1,109 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "linux",
- srcs = [
- "error.go",
- "flags.go",
- "linux64.go",
- "sigset.go",
- "sys_aio.go",
- "sys_capability.go",
- "sys_clone_amd64.go",
- "sys_clone_arm64.go",
- "sys_epoll.go",
- "sys_eventfd.go",
- "sys_file.go",
- "sys_futex.go",
- "sys_getdents.go",
- "sys_identity.go",
- "sys_inotify.go",
- "sys_lseek.go",
- "sys_membarrier.go",
- "sys_mempolicy.go",
- "sys_mmap.go",
- "sys_mount.go",
- "sys_pipe.go",
- "sys_poll.go",
- "sys_prctl.go",
- "sys_random.go",
- "sys_read.go",
- "sys_rlimit.go",
- "sys_rseq.go",
- "sys_rusage.go",
- "sys_sched.go",
- "sys_seccomp.go",
- "sys_sem.go",
- "sys_shm.go",
- "sys_signal.go",
- "sys_socket.go",
- "sys_splice.go",
- "sys_stat.go",
- "sys_stat_amd64.go",
- "sys_stat_arm64.go",
- "sys_sync.go",
- "sys_sysinfo.go",
- "sys_syslog.go",
- "sys_thread.go",
- "sys_time.go",
- "sys_timer.go",
- "sys_timerfd.go",
- "sys_tls_amd64.go",
- "sys_tls_arm64.go",
- "sys_utsname.go",
- "sys_write.go",
- "sys_xattr.go",
- "timespec.go",
- ],
- marshal = True,
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi",
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/metric",
- "//pkg/rand",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/anon",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fs/timerfd",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/epoll",
- "//pkg/sentry/kernel/eventfd",
- "//pkg/sentry/kernel/fasync",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/sched",
- "//pkg/sentry/kernel/shm",
- "//pkg/sentry/kernel/signalfd",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/memmap",
- "//pkg/sentry/mm",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/syscalls",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
new file mode 100644
index 000000000..8539f13c3
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -0,0 +1,718 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*MessageHeader64)(nil)
+var _ marshal.Marshallable = (*SchedParam)(nil)
+var _ marshal.Marshallable = (*direntHdr)(nil)
+var _ marshal.Marshallable = (*multipleMessageHeader64)(nil)
+var _ marshal.Marshallable = (*oldDirentHdr)(nil)
+var _ marshal.Marshallable = (*rlimit64)(nil)
+var _ marshal.Marshallable = (*userSockFprog)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (d *direntHdr) SizeBytes() int {
+ return 1 +
+ (*oldDirentHdr)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (d *direntHdr) MarshalBytes(dst []byte) {
+ d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()])
+ dst = dst[d.OldHdr.SizeBytes():]
+ dst[0] = byte(d.Typ)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (d *direntHdr) UnmarshalBytes(src []byte) {
+ d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()])
+ src = src[d.OldHdr.SizeBytes():]
+ d.Typ = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (d *direntHdr) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (d *direntHdr) MarshalUnsafe(dst []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ d.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (d *direntHdr) UnmarshalUnsafe(src []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ d.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+ d.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return d.CopyOutN(cc, addr, d.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ d.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, d.SizeBytes())
+ d.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (o *oldDirentHdr) SizeBytes() int {
+ return 18
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (o *oldDirentHdr) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
+ o.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Reclen = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (o *oldDirentHdr) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (o *oldDirentHdr) MarshalUnsafe(dst []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ o.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *rlimit64) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *rlimit64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Cur))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Max))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *rlimit64) UnmarshalBytes(src []byte) {
+ r.Cur = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ r.Max = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *rlimit64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *rlimit64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *rlimit64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *rlimit64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SchedParam) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SchedParam) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SchedParam) UnmarshalBytes(src []byte) {
+ s.schedPriority = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SchedParam) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SchedParam) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SchedParam) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SchedParam) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *userSockFprog) SizeBytes() int {
+ return 10 +
+ 1*6
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *userSockFprog) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(u.Len))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(byte)*6] ~= [6]byte{0}
+ dst = dst[1*(6):]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Filter))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *userSockFprog) UnmarshalBytes(src []byte) {
+ u.Len = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([6]byte(u._), src[:sizeof(byte)*6])
+ src = src[1*(6):]
+ u.Filter = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *userSockFprog) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *userSockFprog) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *userSockFprog) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *userSockFprog) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *MessageHeader64) SizeBytes() int {
+ return 56
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MessageHeader64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MessageHeader64) UnmarshalBytes(src []byte) {
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MessageHeader64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *multipleMessageHeader64) SizeBytes() int {
+ return 8 +
+ (*MessageHeader64)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
+ m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
+ dst = dst[m.msgHdr.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
+ m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
+ src = src[m.msgHdr.SizeBytes():]
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *multipleMessageHeader64) Packed() bool {
+ return m.msgHdr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
+ if m.msgHdr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
+ if m.msgHdr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..8238cf9c8
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,17 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package linux
+
+import (
+)
+
diff --git a/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go b/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go
new file mode 100644
index 000000000..b3e3be0b6
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package linux
diff --git a/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..6f0f546ea
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,17 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package linux
+
+import (
+)
+
diff --git a/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go b/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go
new file mode 100644
index 000000000..f03e36ddc
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build arm64
+// +build arm64
+// +build arm64
+
+package linux
diff --git a/pkg/sentry/syscalls/linux/linux_state_autogen.go b/pkg/sentry/syscalls/linux/linux_state_autogen.go
new file mode 100644
index 000000000..cc577e4ac
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/linux_state_autogen.go
@@ -0,0 +1,112 @@
+// automatically generated by stateify.
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *futexWaitRestartBlock) StateTypeName() string {
+ return "pkg/sentry/syscalls/linux.futexWaitRestartBlock"
+}
+
+func (f *futexWaitRestartBlock) StateFields() []string {
+ return []string{
+ "duration",
+ "addr",
+ "private",
+ "val",
+ "mask",
+ }
+}
+
+func (f *futexWaitRestartBlock) beforeSave() {}
+
+// +checklocksignore
+func (f *futexWaitRestartBlock) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.duration)
+ stateSinkObject.Save(1, &f.addr)
+ stateSinkObject.Save(2, &f.private)
+ stateSinkObject.Save(3, &f.val)
+ stateSinkObject.Save(4, &f.mask)
+}
+
+func (f *futexWaitRestartBlock) afterLoad() {}
+
+// +checklocksignore
+func (f *futexWaitRestartBlock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.duration)
+ stateSourceObject.Load(1, &f.addr)
+ stateSourceObject.Load(2, &f.private)
+ stateSourceObject.Load(3, &f.val)
+ stateSourceObject.Load(4, &f.mask)
+}
+
+func (p *pollRestartBlock) StateTypeName() string {
+ return "pkg/sentry/syscalls/linux.pollRestartBlock"
+}
+
+func (p *pollRestartBlock) StateFields() []string {
+ return []string{
+ "pfdAddr",
+ "nfds",
+ "timeout",
+ }
+}
+
+func (p *pollRestartBlock) beforeSave() {}
+
+// +checklocksignore
+func (p *pollRestartBlock) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.pfdAddr)
+ stateSinkObject.Save(1, &p.nfds)
+ stateSinkObject.Save(2, &p.timeout)
+}
+
+func (p *pollRestartBlock) afterLoad() {}
+
+// +checklocksignore
+func (p *pollRestartBlock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.pfdAddr)
+ stateSourceObject.Load(1, &p.nfds)
+ stateSourceObject.Load(2, &p.timeout)
+}
+
+func (n *clockNanosleepRestartBlock) StateTypeName() string {
+ return "pkg/sentry/syscalls/linux.clockNanosleepRestartBlock"
+}
+
+func (n *clockNanosleepRestartBlock) StateFields() []string {
+ return []string{
+ "c",
+ "end",
+ "rem",
+ }
+}
+
+func (n *clockNanosleepRestartBlock) beforeSave() {}
+
+// +checklocksignore
+func (n *clockNanosleepRestartBlock) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.c)
+ stateSinkObject.Save(1, &n.end)
+ stateSinkObject.Save(2, &n.rem)
+}
+
+func (n *clockNanosleepRestartBlock) afterLoad() {}
+
+// +checklocksignore
+func (n *clockNanosleepRestartBlock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.c)
+ stateSourceObject.Load(1, &n.end)
+ stateSourceObject.Load(2, &n.rem)
+}
+
+func init() {
+ state.Register((*futexWaitRestartBlock)(nil))
+ state.Register((*pollRestartBlock)(nil))
+ state.Register((*clockNanosleepRestartBlock)(nil))
+}
diff --git a/pkg/sentry/syscalls/linux/vfs2/BUILD b/pkg/sentry/syscalls/linux/vfs2/BUILD
deleted file mode 100644
index a73f096ff..000000000
--- a/pkg/sentry/syscalls/linux/vfs2/BUILD
+++ /dev/null
@@ -1,80 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "vfs2",
- srcs = [
- "aio.go",
- "epoll.go",
- "eventfd.go",
- "execve.go",
- "fd.go",
- "filesystem.go",
- "fscontext.go",
- "getdents.go",
- "inotify.go",
- "ioctl.go",
- "lock.go",
- "memfd.go",
- "mmap.go",
- "mount.go",
- "path.go",
- "pipe.go",
- "poll.go",
- "read_write.go",
- "setstat.go",
- "signal.go",
- "socket.go",
- "splice.go",
- "stat.go",
- "stat_amd64.go",
- "stat_arm64.go",
- "sync.go",
- "timerfd.go",
- "vfs2.go",
- "xattr.go",
- ],
- marshal = True,
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fspath",
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/eventfd",
- "//pkg/sentry/fsimpl/pipefs",
- "//pkg/sentry/fsimpl/signalfd",
- "//pkg/sentry/fsimpl/timerfd",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/fasync",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/memmap",
- "//pkg/sentry/mm",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/syscalls",
- "//pkg/sentry/syscalls/linux",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
new file mode 100644
index 000000000..0525fce4c
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -0,0 +1,372 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+package vfs2
+
+import (
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*MessageHeader64)(nil)
+var _ marshal.Marshallable = (*multipleMessageHeader64)(nil)
+var _ marshal.Marshallable = (*sigSetWithSize)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *sigSetWithSize) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *sigSetWithSize) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *sigSetWithSize) UnmarshalBytes(src []byte) {
+ s.sigsetAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.sizeofSigset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *sigSetWithSize) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *sigSetWithSize) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *sigSetWithSize) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *MessageHeader64) SizeBytes() int {
+ return 56
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MessageHeader64) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MessageHeader64) UnmarshalBytes(src []byte) {
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MessageHeader64) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *multipleMessageHeader64) SizeBytes() int {
+ return 8 +
+ (*MessageHeader64)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
+ m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
+ dst = dst[m.msgHdr.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
+ m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
+ src = src[m.msgHdr.SizeBytes():]
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *multipleMessageHeader64) Packed() bool {
+ return m.msgHdr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
+ if m.msgHdr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
+ if m.msgHdr.Packed() {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..f07118503
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build amd64
+
+package vfs2
+
+import (
+)
+
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go
new file mode 100644
index 000000000..7f5556f9c
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package vfs2
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go
new file mode 100644
index 000000000..ea1f77a0f
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,15 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// If there are issues with build tag aggregation, see
+// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here
+// come from the input set of files used to generate this file. This input set
+// is filtered based on pre-defined file suffixes related to build tags, see
+// tools/defs.bzl:calculate_sets().
+
+// +build arm64
+
+package vfs2
+
+import (
+)
+
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go
new file mode 100644
index 000000000..3e3862045
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package vfs2
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go
new file mode 100644
index 000000000..d02c8467f
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go
@@ -0,0 +1,42 @@
+// automatically generated by stateify.
+
+package vfs2
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (p *pollRestartBlock) StateTypeName() string {
+ return "pkg/sentry/syscalls/linux/vfs2.pollRestartBlock"
+}
+
+func (p *pollRestartBlock) StateFields() []string {
+ return []string{
+ "pfdAddr",
+ "nfds",
+ "timeout",
+ }
+}
+
+func (p *pollRestartBlock) beforeSave() {}
+
+// +checklocksignore
+func (p *pollRestartBlock) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.pfdAddr)
+ stateSinkObject.Save(1, &p.nfds)
+ stateSinkObject.Save(2, &p.timeout)
+}
+
+func (p *pollRestartBlock) afterLoad() {}
+
+// +checklocksignore
+func (p *pollRestartBlock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.pfdAddr)
+ stateSourceObject.Load(1, &p.nfds)
+ stateSourceObject.Load(2, &p.timeout)
+}
+
+func init() {
+ state.Register((*pollRestartBlock)(nil))
+}
diff --git a/pkg/sentry/syscalls/syscalls_state_autogen.go b/pkg/sentry/syscalls/syscalls_state_autogen.go
new file mode 100644
index 000000000..b577e39a3
--- /dev/null
+++ b/pkg/sentry/syscalls/syscalls_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package syscalls
diff --git a/pkg/sentry/time/BUILD b/pkg/sentry/time/BUILD
deleted file mode 100644
index 36d999c47..000000000
--- a/pkg/sentry/time/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "seqatomic_parameters",
- out = "seqatomic_parameters_unsafe.go",
- package = "time",
- suffix = "Parameters",
- template = "//pkg/sync/seqatomic:generic_seqatomic",
- types = {
- "Value": "Parameters",
- },
-)
-
-go_library(
- name = "time",
- srcs = [
- "arith_arm64.go",
- "calibrated_clock.go",
- "clock_id.go",
- "clocks.go",
- "muldiv_amd64.s",
- "muldiv_arm64.s",
- "parameters.go",
- "sampler.go",
- "sampler_amd64.go",
- "sampler_arm64.go",
- "sampler_unsafe.go",
- "seqatomic_parameters_unsafe.go",
- "tsc_amd64.s",
- "tsc_arm64.s",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/errors/linuxerr",
- "//pkg/gohacks",
- "//pkg/log",
- "//pkg/metric",
- "//pkg/sync",
- "//pkg/syserror",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "time_test",
- srcs = [
- "calibrated_clock_test.go",
- "parameters_test.go",
- "sampler_test.go",
- ],
- library = ":time",
-)
diff --git a/pkg/sentry/time/LICENSE b/pkg/sentry/time/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/pkg/sentry/time/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/sentry/time/calibrated_clock_test.go b/pkg/sentry/time/calibrated_clock_test.go
deleted file mode 100644
index 0a4b1f1bf..000000000
--- a/pkg/sentry/time/calibrated_clock_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package time
-
-import (
- "testing"
- "time"
-)
-
-// newTestCalibratedClock returns a CalibratedClock that collects samples from
-// the given sample list and cycle counts from the given cycle list.
-func newTestCalibratedClock(samples []sample, cycles []TSCValue) *CalibratedClock {
- return &CalibratedClock{
- ref: newTestSampler(samples, cycles),
- }
-}
-
-func TestConstantFrequency(t *testing.T) {
- // Perfectly constant frequency.
- samples := []sample{
- {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100},
- {before: 200000, after: 200000 + defaultOverheadCycles, ref: 200},
- {before: 300000, after: 300000 + defaultOverheadCycles, ref: 300},
- {before: 400000, after: 400000 + defaultOverheadCycles, ref: 400},
- {before: 500000, after: 500000 + defaultOverheadCycles, ref: 500},
- {before: 600000, after: 600000 + defaultOverheadCycles, ref: 600},
- {before: 700000, after: 700000 + defaultOverheadCycles, ref: 700},
- }
-
- c := newTestCalibratedClock(samples, nil)
-
- // Update from all samples.
- for range samples {
- c.Update()
- }
-
- c.mu.RLock()
- if !c.ready {
- c.mu.RUnlock()
- t.Fatalf("clock not ready")
- return // For checklocks consistency.
- }
- // A bit after the last sample.
- now, ok := c.params.ComputeTime(750000)
- c.mu.RUnlock()
- if !ok {
- t.Fatalf("ComputeTime ok got %v want true", ok)
- }
-
- t.Logf("now: %v", now)
-
- // Time should be between the current sample and where we'd expect the
- // next sample.
- if now < 700 || now > 800 {
- t.Errorf("now got %v want > 700 && < 800", now)
- }
-}
-
-func TestErrorCorrection(t *testing.T) {
- testCases := []struct {
- name string
- samples [5]sample
- projectedTimeStart int64
- projectedTimeEnd int64
- }{
- // Initial calibration should be ~1MHz for each of these, and
- // the reference clock changes in samples[2].
- {
- name: "slow-down",
- samples: [5]sample{
- {before: 1000000, after: 1000001, ref: ReferenceNS(1 * ApproxUpdateInterval.Nanoseconds())},
- {before: 2000000, after: 2000001, ref: ReferenceNS(2 * ApproxUpdateInterval.Nanoseconds())},
- // Reference clock has slowed down, causing 100ms of error.
- {before: 3010000, after: 3010001, ref: ReferenceNS(3 * ApproxUpdateInterval.Nanoseconds())},
- {before: 4020000, after: 4020001, ref: ReferenceNS(4 * ApproxUpdateInterval.Nanoseconds())},
- {before: 5030000, after: 5030001, ref: ReferenceNS(5 * ApproxUpdateInterval.Nanoseconds())},
- },
- projectedTimeStart: 3005 * time.Millisecond.Nanoseconds(),
- projectedTimeEnd: 3015 * time.Millisecond.Nanoseconds(),
- },
- {
- name: "speed-up",
- samples: [5]sample{
- {before: 1000000, after: 1000001, ref: ReferenceNS(1 * ApproxUpdateInterval.Nanoseconds())},
- {before: 2000000, after: 2000001, ref: ReferenceNS(2 * ApproxUpdateInterval.Nanoseconds())},
- // Reference clock has sped up, causing 100ms of error.
- {before: 2990000, after: 2990001, ref: ReferenceNS(3 * ApproxUpdateInterval.Nanoseconds())},
- {before: 3980000, after: 3980001, ref: ReferenceNS(4 * ApproxUpdateInterval.Nanoseconds())},
- {before: 4970000, after: 4970001, ref: ReferenceNS(5 * ApproxUpdateInterval.Nanoseconds())},
- },
- projectedTimeStart: 2985 * time.Millisecond.Nanoseconds(),
- projectedTimeEnd: 2995 * time.Millisecond.Nanoseconds(),
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- c := newTestCalibratedClock(tc.samples[:], nil)
-
- // Initial calibration takes two updates.
- _, ok := c.Update()
- if ok {
- t.Fatalf("Update ready too early")
- }
-
- params, ok := c.Update()
- if !ok {
- t.Fatalf("Update not ready")
- }
-
- // Initial calibration is ~1MHz.
- hz := params.Frequency
- if hz < 990000 || hz > 1010000 {
- t.Fatalf("Frequency got %v want > 990kHz && < 1010kHz", hz)
- }
-
- // Project time at the next update. Given the 1MHz
- // calibration, it is expected to be ~3.1s/2.9s, not
- // the actual 3s.
- //
- // N.B. the next update time is the "after" time above.
- projected, ok := params.ComputeTime(tc.samples[2].after)
- if !ok {
- t.Fatalf("ComputeTime ok got %v want true", ok)
- }
- if projected < tc.projectedTimeStart || projected > tc.projectedTimeEnd {
- t.Fatalf("ComputeTime(%v) got %v want > %v && < %v", tc.samples[2].after, projected, tc.projectedTimeStart, tc.projectedTimeEnd)
- }
-
- // Update again to see the changed reference clock.
- params, ok = c.Update()
- if !ok {
- t.Fatalf("Update not ready")
- }
-
- // We now know that TSC = tc.samples[2].after -> 3s,
- // but with the previous params indicated that TSC
- // tc.samples[2].after -> 3.5s/2.5s. We can't allow the
- // clock to go backwards, and having the clock jump
- // forwards is undesirable. There should be a smooth
- // transition that corrects the clock error over time.
- // Check that the clock is continuous at TSC =
- // tc.samples[2].after.
- newProjected, ok := params.ComputeTime(tc.samples[2].after)
- if !ok {
- t.Fatalf("ComputeTime ok got %v want true", ok)
- }
- if newProjected != projected {
- t.Errorf("Discontinuous time; ComputeTime(%v) got %v want %v", tc.samples[2].after, newProjected, projected)
- }
-
- // As the reference clock stablizes, ensure that the clock error
- // decreases.
- initialErr := c.errorNS
- t.Logf("initial error: %v ns", initialErr)
-
- _, ok = c.Update()
- if !ok {
- t.Fatalf("Update not ready")
- }
- if c.errorNS.Magnitude() > initialErr.Magnitude() {
- t.Errorf("errorNS increased, got %v want |%v| <= |%v|", c.errorNS, c.errorNS, initialErr)
- }
-
- _, ok = c.Update()
- if !ok {
- t.Fatalf("Update not ready")
- }
- if c.errorNS.Magnitude() > initialErr.Magnitude() {
- t.Errorf("errorNS increased, got %v want |%v| <= |%v|", c.errorNS, c.errorNS, initialErr)
- }
-
- t.Logf("final error: %v ns", c.errorNS)
- })
- }
-}
diff --git a/pkg/sentry/time/parameters_test.go b/pkg/sentry/time/parameters_test.go
deleted file mode 100644
index 0ce1257f6..000000000
--- a/pkg/sentry/time/parameters_test.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package time
-
-import (
- "math"
- "testing"
- "time"
-)
-
-func TestParametersComputeTime(t *testing.T) {
- testCases := []struct {
- name string
- params Parameters
- now TSCValue
- want int64
- }{
- {
- // Now is the same as the base cycles.
- name: "base-cycles",
- params: Parameters{
- BaseCycles: 10000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 10000,
- want: 5000 * time.Millisecond.Nanoseconds(),
- },
- {
- // Now is the behind the base cycles. Time is frozen.
- name: "backwards",
- params: Parameters{
- BaseCycles: 10000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 9000,
- want: 5000 * time.Millisecond.Nanoseconds(),
- },
- {
- // Now is ahead of the base cycles.
- name: "ahead",
- params: Parameters{
- BaseCycles: 10000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 15000,
- want: 5500 * time.Millisecond.Nanoseconds(),
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- got, ok := tc.params.ComputeTime(tc.now)
- if !ok {
- t.Errorf("ComputeTime ok got %v want true", got)
- }
- if got != tc.want {
- t.Errorf("ComputeTime got %+v want %+v", got, tc.want)
- }
- })
- }
-}
-
-func TestParametersErrorAdjust(t *testing.T) {
- testCases := []struct {
- name string
- oldParams Parameters
- now TSCValue
- newParams Parameters
- want Parameters
- errorNS ReferenceNS
- wantErr bool
- }{
- {
- // newParams are perfectly continuous with oldParams
- // and don't need adjustment.
- name: "continuous",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 50000,
- newParams: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- want: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- },
- {
- // Same as "continuous", but with now ahead of
- // newParams.BaseCycles. The result is the same as
- // there is no error to correct.
- name: "continuous-nowdiff",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 60000,
- newParams: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- want: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- },
- {
- // errorAdjust bails out if the TSC goes backwards.
- name: "tsc-backwards",
- oldParams: Parameters{
- BaseCycles: 10000,
- BaseRef: ReferenceNS(1000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 9000,
- newParams: Parameters{
- BaseCycles: 9000,
- BaseRef: ReferenceNS(1100 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- wantErr: true,
- },
- {
- // errorAdjust bails out if new params are from after now.
- name: "params-after-now",
- oldParams: Parameters{
- BaseCycles: 10000,
- BaseRef: ReferenceNS(1000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 11000,
- newParams: Parameters{
- BaseCycles: 12000,
- BaseRef: ReferenceNS(1200 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- wantErr: true,
- },
- {
- // Host clock sped up.
- name: "speed-up",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 45000,
- // Host frequency changed to 9000 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 45000,
- // From oldParams, we think ref = 4.5s at cycles = 45000.
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 9000,
- },
- want: Parameters{
- BaseCycles: 45000,
- BaseRef: ReferenceNS(4500 * time.Millisecond.Nanoseconds()),
- // We must decrease the new frequency by 50% to
- // correct 0.5s of error in 1s
- // (ApproxUpdateInterval).
- Frequency: 4500,
- },
- errorNS: ReferenceNS(-500 * time.Millisecond.Nanoseconds()),
- },
- {
- // Host clock sped up, with now ahead of newParams.
- name: "speed-up-nowdiff",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 50000,
- // Host frequency changed to 9000 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 45000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 9000,
- },
- // nextRef = 6000ms
- // nextCycles = 9000 * (6000ms - 5000ms) + 45000
- // nextCycles = 9000 * (1s) + 45000
- // nextCycles = 54000
- // f = (54000 - 50000) / 1s = 4000
- //
- // ref = 5000ms - (50000 - 45000) / 4000
- // ref = 3.75s
- want: Parameters{
- BaseCycles: 45000,
- BaseRef: ReferenceNS(3750 * time.Millisecond.Nanoseconds()),
- Frequency: 4000,
- },
- // oldNow = 50000 * 10000 = 5s
- // newNow = (50000 - 45000) / 9000 + 5s = 5.555s
- errorNS: ReferenceNS((5000*time.Millisecond - 5555555555).Nanoseconds()),
- },
- {
- // Host clock sped up. The new parameters are so far
- // ahead that the next update time already passed.
- name: "speed-up-uncorrectable-baseref",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 50000,
- // Host frequency changed to 5000 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 45000,
- BaseRef: ReferenceNS(9000 * time.Millisecond.Nanoseconds()),
- Frequency: 5000,
- },
- // The next update should be at 10s, but newParams
- // already passed 6s. Thus it is impossible to correct
- // the clock by then.
- wantErr: true,
- },
- {
- // Host clock sped up. The new parameters are moving so
- // fast that the next update should be before now.
- name: "speed-up-uncorrectable-frequency",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 55000,
- // Host frequency changed to 7500 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 45000,
- BaseRef: ReferenceNS(6000 * time.Millisecond.Nanoseconds()),
- Frequency: 7500,
- },
- // The next update should be at 6.5s, but newParams are
- // so far ahead and fast that they reach 6.5s at cycle
- // 48750, which before now! Thus it is impossible to
- // correct the clock by then.
- wantErr: true,
- },
- {
- // Host clock slowed down.
- name: "slow-down",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 55000,
- // Host frequency changed to 11000 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 55000,
- // From oldParams, we think ref = 5.5s at cycles = 55000.
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 11000,
- },
- want: Parameters{
- BaseCycles: 55000,
- BaseRef: ReferenceNS(5500 * time.Millisecond.Nanoseconds()),
- // We must increase the new frequency by 50% to
- // correct 0.5s of error in 1s
- // (ApproxUpdateInterval).
- Frequency: 16500,
- },
- errorNS: ReferenceNS(500 * time.Millisecond.Nanoseconds()),
- },
- {
- // Host clock slowed down, with now ahead of newParams.
- name: "slow-down-nowdiff",
- oldParams: Parameters{
- BaseCycles: 0,
- BaseRef: 0,
- Frequency: 10000,
- },
- now: 60000,
- // Host frequency changed to 11000 immediately after
- // oldParams was returned.
- newParams: Parameters{
- BaseCycles: 55000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 11000,
- },
- // nextRef = 7000ms
- // nextCycles = 11000 * (7000ms - 5000ms) + 55000
- // nextCycles = 11000 * (2000ms) + 55000
- // nextCycles = 77000
- // f = (77000 - 60000) / 1s = 17000
- //
- // ref = 6000ms - (60000 - 55000) / 17000
- // ref = 5705882353ns
- want: Parameters{
- BaseCycles: 55000,
- BaseRef: ReferenceNS(5705882353),
- Frequency: 17000,
- },
- // oldNow = 60000 * 10000 = 6s
- // newNow = (60000 - 55000) / 11000 + 5s = 5.4545s
- errorNS: ReferenceNS((6*time.Second - 5454545454).Nanoseconds()),
- },
- {
- // Host time went backwards.
- name: "time-backwards",
- oldParams: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 60000,
- newParams: Parameters{
- BaseCycles: 60000,
- // From oldParams, we think ref = 6s at cycles = 60000.
- BaseRef: ReferenceNS(4000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- want: Parameters{
- BaseCycles: 60000,
- BaseRef: ReferenceNS(6000 * time.Millisecond.Nanoseconds()),
- // We must increase the frequency by 200% to
- // correct 2s of error in 1s
- // (ApproxUpdateInterval).
- Frequency: 30000,
- },
- errorNS: ReferenceNS(2000 * time.Millisecond.Nanoseconds()),
- },
- {
- // Host time went backwards, with now ahead of newParams.
- name: "time-backwards-nowdiff",
- oldParams: Parameters{
- BaseCycles: 50000,
- BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- now: 65000,
- // nextRef = 7500ms
- // nextCycles = 10000 * (7500ms - 4000ms) + 60000
- // nextCycles = 10000 * (3500ms) + 60000
- // nextCycles = 95000
- // f = (95000 - 65000) / 1s = 30000
- //
- // ref = 6500ms - (65000 - 60000) / 30000
- // ref = 6333333333ns
- newParams: Parameters{
- BaseCycles: 60000,
- BaseRef: ReferenceNS(4000 * time.Millisecond.Nanoseconds()),
- Frequency: 10000,
- },
- want: Parameters{
- BaseCycles: 60000,
- BaseRef: ReferenceNS(6333333334),
- Frequency: 30000,
- },
- // oldNow = 65000 * 10000 = 6.5s
- // newNow = (65000 - 60000) / 10000 + 4s = 4.5s
- errorNS: ReferenceNS(2000 * time.Millisecond.Nanoseconds()),
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- got, errorNS, err := errorAdjust(tc.oldParams, tc.newParams, tc.now)
- if err != nil && !tc.wantErr {
- t.Errorf("err got %v want nil", err)
- } else if err == nil && tc.wantErr {
- t.Errorf("err got nil want non-nil")
- }
-
- if got != tc.want {
- t.Errorf("Parameters got %+v want %+v", got, tc.want)
- }
- if errorNS != tc.errorNS {
- t.Errorf("errorNS got %v want %v", errorNS, tc.errorNS)
- }
- })
- }
-}
-
-func testMuldiv(t *testing.T, v uint64) {
- for i := uint64(1); i <= 1000000; i++ {
- mult := uint64(1000000000)
- div := i * mult
- res, ok := muldiv64(v, mult, div)
- if !ok {
- t.Errorf("Result of %v * %v / %v ok got false want true", v, mult, div)
- }
- if want := v / i; res != want {
- t.Errorf("Bad result of %v * %v / %v: got %v, want %v", v, mult, div, res, want)
- }
- }
-}
-
-func TestMulDiv(t *testing.T) {
- testMuldiv(t, math.MaxUint64)
- for i := int64(-10); i <= 10; i++ {
- testMuldiv(t, uint64(i))
- }
-}
-
-func TestMulDivZero(t *testing.T) {
- if r, ok := muldiv64(2, 4, 0); ok {
- t.Errorf("muldiv64(2, 4, 0) got %d, ok want !ok", r)
- }
-
- if r, ok := muldiv64(0, 0, 0); ok {
- t.Errorf("muldiv64(0, 0, 0) got %d, ok want !ok", r)
- }
-}
-
-func TestMulDivOverflow(t *testing.T) {
- testCases := []struct {
- name string
- val uint64
- mult uint64
- div uint64
- ok bool
- ret uint64
- }{
- {
- name: "2^62",
- val: 1 << 63,
- mult: 4,
- div: 8,
- ok: true,
- ret: 1 << 62,
- },
- {
- name: "2^64-1",
- val: 0xffffffffffffffff,
- mult: 1,
- div: 1,
- ok: true,
- ret: 0xffffffffffffffff,
- },
- {
- name: "2^64",
- val: 1 << 63,
- mult: 4,
- div: 2,
- ok: false,
- },
- {
- name: "2^125",
- val: 1 << 63,
- mult: 1 << 63,
- div: 2,
- ok: false,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- r, ok := muldiv64(tc.val, tc.mult, tc.div)
- if ok != tc.ok {
- t.Errorf("ok got %v want %v", ok, tc.ok)
- }
- if tc.ok && r != tc.ret {
- t.Errorf("ret got %v want %v", r, tc.ret)
- }
- })
- }
-}
-
-func BenchmarkMuldiv64(b *testing.B) {
- var v uint64 = math.MaxUint64
- for i := uint64(1); i <= 1000000; i++ {
- mult := uint64(1000000000)
- div := i * mult
- res, ok := muldiv64(v, mult, div)
- if !ok {
- b.Errorf("Result of %v * %v / %v ok got false want true", v, mult, div)
- }
- if want := v / i; res != want {
- b.Errorf("Bad result of %v * %v / %v: got %v, want %v", v, mult, div, res, want)
- }
- }
-}
diff --git a/pkg/sentry/time/sampler_test.go b/pkg/sentry/time/sampler_test.go
deleted file mode 100644
index 3e70a1134..000000000
--- a/pkg/sentry/time/sampler_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package time
-
-import (
- "errors"
- "testing"
-)
-
-// errNoSamples is returned when testReferenceClocks runs out of samples.
-var errNoSamples = errors.New("no samples available")
-
-// testReferenceClocks returns a preset list of samples and cycle counts.
-type testReferenceClocks struct {
- samples []sample
- cycles []TSCValue
-}
-
-// Sample implements referenceClocks.Sample, returning the next sample in the list.
-func (t *testReferenceClocks) Sample(_ ClockID) (sample, error) {
- if len(t.samples) == 0 {
- return sample{}, errNoSamples
- }
-
- s := t.samples[0]
- if len(t.samples) == 1 {
- t.samples = nil
- } else {
- t.samples = t.samples[1:]
- }
-
- return s, nil
-}
-
-// Cycles implements referenceClocks.Cycles, returning the next TSCValue in the list.
-func (t *testReferenceClocks) Cycles() TSCValue {
- if len(t.cycles) == 0 {
- return 0
- }
-
- c := t.cycles[0]
- if len(t.cycles) == 1 {
- t.cycles = nil
- } else {
- t.cycles = t.cycles[1:]
- }
-
- return c
-}
-
-// newTestSampler returns a sampler that collects samples from
-// the given sample list and cycle counts from the given cycle list.
-func newTestSampler(samples []sample, cycles []TSCValue) *sampler {
- return &sampler{
- clocks: &testReferenceClocks{
- samples: samples,
- cycles: cycles,
- },
- overhead: defaultOverheadCycles,
- }
-}
-
-// generateSamples generates n samples with the given overhead.
-func generateSamples(n int, overhead TSCValue) []sample {
- samples := []sample{{before: 1000000, after: 1000000 + overhead, ref: 100}}
- for i := 0; i < n-1; i++ {
- prev := samples[len(samples)-1]
- samples = append(samples, sample{
- before: prev.before + 1000000,
- after: prev.after + 1000000,
- ref: prev.ref + 100,
- })
- }
- return samples
-}
-
-// TestSample ensures that samples can be collected.
-func TestSample(t *testing.T) {
- testCases := []struct {
- name string
- samples []sample
- err error
- }{
- {
- name: "basic",
- samples: []sample{
- {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100},
- },
- err: nil,
- },
- {
- // Sample with backwards TSC ignored.
- // referenceClock should retry and get errNoSamples.
- name: "backwards-tsc-ignored",
- samples: []sample{
- {before: 100000, after: 90000, ref: 100},
- },
- err: errNoSamples,
- },
- {
- // Sample far above overhead skipped.
- // referenceClock should retry and get errNoSamples.
- name: "reject-overhead",
- samples: []sample{
- {before: 100000, after: 100000 + 5*defaultOverheadCycles, ref: 100},
- },
- err: errNoSamples,
- },
- {
- // Maximum overhead allowed is bounded.
- name: "over-max-overhead",
- // Generate a bunch of samples. The reference clock
- // needs a while to ramp up its expected overhead.
- samples: generateSamples(100, 2*maxOverheadCycles),
- err: errOverheadTooHigh,
- },
- {
- // Overhead at maximum overhead is allowed.
- name: "max-overhead",
- // Generate a bunch of samples. The reference clock
- // needs a while to ramp up its expected overhead.
- samples: generateSamples(100, maxOverheadCycles),
- err: nil,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- s := newTestSampler(tc.samples, nil)
- err := s.Sample()
- if err != tc.err {
- t.Errorf("Sample err got %v want %v", err, tc.err)
- }
- })
- }
-}
-
-// TestOutliersIgnored tests that referenceClock ignores samples with very high
-// overhead.
-func TestOutliersIgnored(t *testing.T) {
- s := newTestSampler([]sample{
- {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100},
- {before: 200000, after: 200000 + defaultOverheadCycles, ref: 200},
- {before: 300000, after: 300000 + defaultOverheadCycles, ref: 300},
- {before: 400000, after: 400000 + defaultOverheadCycles, ref: 400},
- {before: 500000, after: 500000 + 5*defaultOverheadCycles, ref: 500}, // Ignored
- {before: 600000, after: 600000 + defaultOverheadCycles, ref: 600},
- {before: 700000, after: 700000 + defaultOverheadCycles, ref: 700},
- }, nil)
-
- // Collect 5 samples.
- for i := 0; i < 5; i++ {
- err := s.Sample()
- if err != nil {
- t.Fatalf("Unexpected error while sampling: %v", err)
- }
- }
-
- oldest, newest, ok := s.Range()
- if !ok {
- t.Fatalf("Range not ok")
- }
-
- if oldest.ref != 100 {
- t.Errorf("oldest.ref got %v want %v", oldest.ref, 100)
- }
-
- // We skipped the high-overhead sample.
- if newest.ref != 600 {
- t.Errorf("newest.ref got %v want %v", newest.ref, 600)
- }
-}
diff --git a/pkg/sentry/time/seqatomic_parameters_unsafe.go b/pkg/sentry/time/seqatomic_parameters_unsafe.go
new file mode 100644
index 000000000..357e476ec
--- /dev/null
+++ b/pkg/sentry/time/seqatomic_parameters_unsafe.go
@@ -0,0 +1,38 @@
+package time
+
+import (
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoadParameters(seq *sync.SeqCount, ptr *Parameters) Parameters {
+ for {
+ if val, ok := SeqAtomicTryLoadParameters(seq, seq.BeginRead(), ptr); ok {
+ return val
+ }
+ }
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+//
+//go:nosplit
+func SeqAtomicTryLoadParameters(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Parameters) (val Parameters, ok bool) {
+ if sync.RaceEnabled {
+
+ gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+
+ val = *ptr
+ }
+ ok = seq.ReadOk(epoch)
+ return
+}
diff --git a/pkg/sentry/time/time_amd64_state_autogen.go b/pkg/sentry/time/time_amd64_state_autogen.go
new file mode 100644
index 000000000..a4d3199ac
--- /dev/null
+++ b/pkg/sentry/time/time_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package time
diff --git a/pkg/sentry/time/time_arm64_state_autogen.go b/pkg/sentry/time/time_arm64_state_autogen.go
new file mode 100644
index 000000000..6929b58cd
--- /dev/null
+++ b/pkg/sentry/time/time_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package time
diff --git a/pkg/sentry/time/time_state_autogen.go b/pkg/sentry/time/time_state_autogen.go
new file mode 100644
index 000000000..2adc9c9e0
--- /dev/null
+++ b/pkg/sentry/time/time_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package time
diff --git a/pkg/sentry/time/time_unsafe_state_autogen.go b/pkg/sentry/time/time_unsafe_state_autogen.go
new file mode 100644
index 000000000..2adc9c9e0
--- /dev/null
+++ b/pkg/sentry/time/time_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package time
diff --git a/pkg/sentry/unimpl/BUILD b/pkg/sentry/unimpl/BUILD
deleted file mode 100644
index 5d4aa3a63..000000000
--- a/pkg/sentry/unimpl/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library", "proto_library")
-
-package(licenses = ["notice"])
-
-proto_library(
- name = "unimplemented_syscall",
- srcs = ["unimplemented_syscall.proto"],
- visibility = ["//visibility:public"],
- deps = ["//pkg/sentry/arch:registers_proto"],
-)
-
-go_library(
- name = "unimpl",
- srcs = ["events.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/context",
- "//pkg/log",
- ],
-)
diff --git a/pkg/sentry/unimpl/unimpl_state_autogen.go b/pkg/sentry/unimpl/unimpl_state_autogen.go
new file mode 100644
index 000000000..b37d16f87
--- /dev/null
+++ b/pkg/sentry/unimpl/unimpl_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package unimpl
diff --git a/pkg/sentry/unimpl/unimplemented_syscall.proto b/pkg/sentry/unimpl/unimplemented_syscall.proto
deleted file mode 100644
index 0d7a94be7..000000000
--- a/pkg/sentry/unimpl/unimplemented_syscall.proto
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor;
-
-import "pkg/sentry/arch/registers.proto";
-
-message UnimplementedSyscall {
- // Task ID.
- int32 tid = 1;
-
- // Registers at the time of the call.
- Registers registers = 2;
-}
diff --git a/pkg/sentry/unimpl/unimplemented_syscall_go_proto/unimplemented_syscall.pb.go b/pkg/sentry/unimpl/unimplemented_syscall_go_proto/unimplemented_syscall.pb.go
new file mode 100644
index 000000000..aa85c330a
--- /dev/null
+++ b/pkg/sentry/unimpl/unimplemented_syscall_go_proto/unimplemented_syscall.pb.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.25.0
+// protoc v3.13.0
+// source: pkg/sentry/unimpl/unimplemented_syscall.proto
+
+package gvisor
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ registers_go_proto "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type UnimplementedSyscall struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Tid int32 `protobuf:"varint,1,opt,name=tid,proto3" json:"tid,omitempty"`
+ Registers *registers_go_proto.Registers `protobuf:"bytes,2,opt,name=registers,proto3" json:"registers,omitempty"`
+}
+
+func (x *UnimplementedSyscall) Reset() {
+ *x = UnimplementedSyscall{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_sentry_unimpl_unimplemented_syscall_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnimplementedSyscall) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnimplementedSyscall) ProtoMessage() {}
+
+func (x *UnimplementedSyscall) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_sentry_unimpl_unimplemented_syscall_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnimplementedSyscall.ProtoReflect.Descriptor instead.
+func (*UnimplementedSyscall) Descriptor() ([]byte, []int) {
+ return file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UnimplementedSyscall) GetTid() int32 {
+ if x != nil {
+ return x.Tid
+ }
+ return 0
+}
+
+func (x *UnimplementedSyscall) GetRegisters() *registers_go_proto.Registers {
+ if x != nil {
+ return x.Registers
+ }
+ return nil
+}
+
+var File_pkg_sentry_unimpl_unimplemented_syscall_proto protoreflect.FileDescriptor
+
+var file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x75, 0x6e, 0x69,
+ 0x6d, 0x70, 0x6c, 0x2f, 0x75, 0x6e, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65,
+ 0x64, 0x5f, 0x73, 0x79, 0x73, 0x63, 0x61, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x06, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x1a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x6e,
+ 0x74, 0x72, 0x79, 0x2f, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x59, 0x0a, 0x14, 0x55, 0x6e, 0x69, 0x6d,
+ 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x53, 0x79, 0x73, 0x63, 0x61, 0x6c, 0x6c,
+ 0x12, 0x10, 0x0a, 0x03, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x74,
+ 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x2e, 0x52,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x09, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x65, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescOnce sync.Once
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescData = file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDesc
+)
+
+func file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescGZIP() []byte {
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescOnce.Do(func() {
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescData)
+ })
+ return file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDescData
+}
+
+var file_pkg_sentry_unimpl_unimplemented_syscall_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pkg_sentry_unimpl_unimplemented_syscall_proto_goTypes = []interface{}{
+ (*UnimplementedSyscall)(nil), // 0: gvisor.UnimplementedSyscall
+ (*registers_go_proto.Registers)(nil), // 1: gvisor.Registers
+}
+var file_pkg_sentry_unimpl_unimplemented_syscall_proto_depIdxs = []int32{
+ 1, // 0: gvisor.UnimplementedSyscall.registers:type_name -> gvisor.Registers
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pkg_sentry_unimpl_unimplemented_syscall_proto_init() }
+func file_pkg_sentry_unimpl_unimplemented_syscall_proto_init() {
+ if File_pkg_sentry_unimpl_unimplemented_syscall_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnimplementedSyscall); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_sentry_unimpl_unimplemented_syscall_proto_goTypes,
+ DependencyIndexes: file_pkg_sentry_unimpl_unimplemented_syscall_proto_depIdxs,
+ MessageInfos: file_pkg_sentry_unimpl_unimplemented_syscall_proto_msgTypes,
+ }.Build()
+ File_pkg_sentry_unimpl_unimplemented_syscall_proto = out.File
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_rawDesc = nil
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_goTypes = nil
+ file_pkg_sentry_unimpl_unimplemented_syscall_proto_depIdxs = nil
+}
diff --git a/pkg/sentry/uniqueid/BUILD b/pkg/sentry/uniqueid/BUILD
deleted file mode 100644
index 7467e6398..000000000
--- a/pkg/sentry/uniqueid/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "uniqueid",
- srcs = ["context.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/sentry/socket/unix/transport",
- ],
-)
diff --git a/pkg/sentry/uniqueid/uniqueid_state_autogen.go b/pkg/sentry/uniqueid/uniqueid_state_autogen.go
new file mode 100644
index 000000000..1890fdf46
--- /dev/null
+++ b/pkg/sentry/uniqueid/uniqueid_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package uniqueid
diff --git a/pkg/sentry/usage/BUILD b/pkg/sentry/usage/BUILD
deleted file mode 100644
index 8e2b3ed79..000000000
--- a/pkg/sentry/usage/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "usage",
- srcs = [
- "cpu.go",
- "io.go",
- "memory.go",
- "memory_unsafe.go",
- "usage.go",
- ],
- visibility = [
- "//:sandbox",
- ],
- deps = [
- "//pkg/bits",
- "//pkg/memutil",
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/usage/usage_state_autogen.go b/pkg/sentry/usage/usage_state_autogen.go
new file mode 100644
index 000000000..a0077a6ca
--- /dev/null
+++ b/pkg/sentry/usage/usage_state_autogen.go
@@ -0,0 +1,86 @@
+// automatically generated by stateify.
+
+package usage
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (s *CPUStats) StateTypeName() string {
+ return "pkg/sentry/usage.CPUStats"
+}
+
+func (s *CPUStats) StateFields() []string {
+ return []string{
+ "UserTime",
+ "SysTime",
+ "VoluntarySwitches",
+ }
+}
+
+func (s *CPUStats) beforeSave() {}
+
+// +checklocksignore
+func (s *CPUStats) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.UserTime)
+ stateSinkObject.Save(1, &s.SysTime)
+ stateSinkObject.Save(2, &s.VoluntarySwitches)
+}
+
+func (s *CPUStats) afterLoad() {}
+
+// +checklocksignore
+func (s *CPUStats) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.UserTime)
+ stateSourceObject.Load(1, &s.SysTime)
+ stateSourceObject.Load(2, &s.VoluntarySwitches)
+}
+
+func (i *IO) StateTypeName() string {
+ return "pkg/sentry/usage.IO"
+}
+
+func (i *IO) StateFields() []string {
+ return []string{
+ "CharsRead",
+ "CharsWritten",
+ "ReadSyscalls",
+ "WriteSyscalls",
+ "BytesRead",
+ "BytesWritten",
+ "BytesWriteCancelled",
+ }
+}
+
+func (i *IO) beforeSave() {}
+
+// +checklocksignore
+func (i *IO) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.CharsRead)
+ stateSinkObject.Save(1, &i.CharsWritten)
+ stateSinkObject.Save(2, &i.ReadSyscalls)
+ stateSinkObject.Save(3, &i.WriteSyscalls)
+ stateSinkObject.Save(4, &i.BytesRead)
+ stateSinkObject.Save(5, &i.BytesWritten)
+ stateSinkObject.Save(6, &i.BytesWriteCancelled)
+}
+
+func (i *IO) afterLoad() {}
+
+// +checklocksignore
+func (i *IO) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.CharsRead)
+ stateSourceObject.Load(1, &i.CharsWritten)
+ stateSourceObject.Load(2, &i.ReadSyscalls)
+ stateSourceObject.Load(3, &i.WriteSyscalls)
+ stateSourceObject.Load(4, &i.BytesRead)
+ stateSourceObject.Load(5, &i.BytesWritten)
+ stateSourceObject.Load(6, &i.BytesWriteCancelled)
+}
+
+func init() {
+ state.Register((*CPUStats)(nil))
+ state.Register((*IO)(nil))
+}
diff --git a/pkg/sentry/usage/usage_unsafe_state_autogen.go b/pkg/sentry/usage/usage_unsafe_state_autogen.go
new file mode 100644
index 000000000..5d9e86efa
--- /dev/null
+++ b/pkg/sentry/usage/usage_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package usage
diff --git a/pkg/sentry/vfs/BUILD b/pkg/sentry/vfs/BUILD
deleted file mode 100644
index a2032162d..000000000
--- a/pkg/sentry/vfs/BUILD
+++ /dev/null
@@ -1,143 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "epoll_interest_list",
- out = "epoll_interest_list.go",
- package = "vfs",
- prefix = "epollInterest",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*epollInterest",
- "Linker": "*epollInterest",
- },
-)
-
-go_template_instance(
- name = "event_list",
- out = "event_list.go",
- package = "vfs",
- prefix = "event",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Event",
- "Linker": "*Event",
- },
-)
-
-go_template_instance(
- name = "file_description_refs",
- out = "file_description_refs.go",
- package = "vfs",
- prefix = "FileDescription",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "FileDescription",
- },
-)
-
-go_template_instance(
- name = "mount_namespace_refs",
- out = "mount_namespace_refs.go",
- package = "vfs",
- prefix = "MountNamespace",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "MountNamespace",
- },
-)
-
-go_template_instance(
- name = "filesystem_refs",
- out = "filesystem_refs.go",
- package = "vfs",
- prefix = "Filesystem",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "Filesystem",
- },
-)
-
-go_library(
- name = "vfs",
- srcs = [
- "anonfs.go",
- "context.go",
- "debug.go",
- "dentry.go",
- "device.go",
- "epoll.go",
- "epoll_interest_list.go",
- "event_list.go",
- "file_description.go",
- "file_description_impl_util.go",
- "file_description_refs.go",
- "filesystem.go",
- "filesystem_impl_util.go",
- "filesystem_refs.go",
- "filesystem_type.go",
- "inotify.go",
- "lock.go",
- "mount.go",
- "mount_namespace_refs.go",
- "mount_unsafe.go",
- "opath.go",
- "options.go",
- "pathname.go",
- "permissions.go",
- "resolving_path.go",
- "save_restore.go",
- "vfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/uniqueid",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "vfs_test",
- size = "small",
- srcs = [
- "file_description_impl_util_test.go",
- "mount_test.go",
- ],
- library = ":vfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/contexttest",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/vfs/README.md b/pkg/sentry/vfs/README.md
deleted file mode 100644
index 5aad31b78..000000000
--- a/pkg/sentry/vfs/README.md
+++ /dev/null
@@ -1,186 +0,0 @@
-# The gVisor Virtual Filesystem
-
-THIS PACKAGE IS CURRENTLY EXPERIMENTAL AND NOT READY OR ENABLED FOR PRODUCTION
-USE. For the filesystem implementation currently used by gVisor, see the `fs`
-package.
-
-## Implementation Notes
-
-### Reference Counting
-
-Filesystem, Dentry, Mount, MountNamespace, and FileDescription are all
-reference-counted. Mount and MountNamespace are exclusively VFS-managed; when
-their reference count reaches zero, VFS releases their resources. Filesystem and
-FileDescription management is shared between VFS and filesystem implementations;
-when their reference count reaches zero, VFS notifies the implementation by
-calling `FilesystemImpl.Release()` or `FileDescriptionImpl.Release()`
-respectively and then releases VFS-owned resources. Dentries are exclusively
-managed by filesystem implementations; reference count changes are abstracted
-through DentryImpl, which should release resources when reference count reaches
-zero.
-
-Filesystem references are held by:
-
-- Mount: Each referenced Mount holds a reference on the mounted Filesystem.
-
-Dentry references are held by:
-
-- FileDescription: Each referenced FileDescription holds a reference on the
- Dentry through which it was opened, via `FileDescription.vd.dentry`.
-
-- Mount: Each referenced Mount holds a reference on its mount point and on the
- mounted filesystem root. The mount point is mutable (`mount(MS_MOVE)`).
-
-Mount references are held by:
-
-- FileDescription: Each referenced FileDescription holds a reference on the
- Mount on which it was opened, via `FileDescription.vd.mount`.
-
-- Mount: Each referenced Mount holds a reference on its parent, which is the
- mount containing its mount point.
-
-- VirtualFilesystem: A reference is held on each Mount that has been connected
- to a mount point, but not yet umounted.
-
-MountNamespace and FileDescription references are held by users of VFS. The
-expectation is that each `kernel.Task` holds a reference on its corresponding
-MountNamespace, and each file descriptor holds a reference on its represented
-FileDescription.
-
-Notes:
-
-- Dentries do not hold a reference on their owning Filesystem. Instead, all
- uses of a Dentry occur in the context of a Mount, which holds a reference on
- the relevant Filesystem (see e.g. the VirtualDentry type). As a corollary,
- when releasing references on both a Dentry and its corresponding Mount, the
- Dentry's reference must be released first (because releasing the Mount's
- reference may release the last reference on the Filesystem, whose state may
- be required to release the Dentry reference).
-
-### The Inheritance Pattern
-
-Filesystem, Dentry, and FileDescription are all concepts featuring both state
-that must be shared between VFS and filesystem implementations, and operations
-that are implementation-defined. To facilitate this, each of these three
-concepts follows the same pattern, shown below for Dentry:
-
-```go
-// Dentry represents a node in a filesystem tree.
-type Dentry struct {
- // VFS-required dentry state.
- parent *Dentry
- // ...
-
- // impl is the DentryImpl associated with this Dentry. impl is immutable.
- // This should be the last field in Dentry.
- impl DentryImpl
-}
-
-// Init must be called before first use of d.
-func (d *Dentry) Init(impl DentryImpl) {
- d.impl = impl
-}
-
-// Impl returns the DentryImpl associated with d.
-func (d *Dentry) Impl() DentryImpl {
- return d.impl
-}
-
-// DentryImpl contains implementation-specific details of a Dentry.
-// Implementations of DentryImpl should contain their associated Dentry by
-// value as their first field.
-type DentryImpl interface {
- // VFS-required implementation-defined dentry operations.
- IncRef()
- // ...
-}
-```
-
-This construction, which is essentially a type-safe analogue to Linux's
-`container_of` pattern, has the following properties:
-
-- VFS works almost exclusively with pointers to Dentry rather than DentryImpl
- interface objects, such as in the type of `Dentry.parent`. This avoids
- interface method calls (which are somewhat expensive to perform, and defeat
- inlining and escape analysis), reduces the size of VFS types (since an
- interface object is two pointers in size), and allows pointers to be loaded
- and stored atomically using `sync/atomic`. Implementation-defined behavior
- is accessed via `Dentry.impl` when required.
-
-- Filesystem implementations can access the implementation-defined state
- associated with objects of VFS types by type-asserting or type-switching
- (e.g. `Dentry.Impl().(*myDentry)`). Type assertions to a concrete type
- require only an equality comparison of the interface object's type pointer
- to a static constant, and are consequently very fast.
-
-- Filesystem implementations can access the VFS state associated with objects
- of implementation-defined types directly.
-
-- VFS and implementation-defined state for a given type occupy the same
- object, minimizing memory allocations and maximizing memory locality. `impl`
- is the last field in `Dentry`, and `Dentry` is the first field in
- `DentryImpl` implementations, for similar reasons: this tends to cause
- fetching of the `Dentry.impl` interface object to also fetch `DentryImpl`
- fields, either because they are in the same cache line or via next-line
- prefetching.
-
-## Future Work
-
-- Most `mount(2)` features, and unmounting, are incomplete.
-
-- VFS1 filesystems are not directly compatible with VFS2. It may be possible
- to implement shims that implement `vfs.FilesystemImpl` for
- `fs.MountNamespace`, `vfs.DentryImpl` for `fs.Dirent`, and
- `vfs.FileDescriptionImpl` for `fs.File`, which may be adequate for
- filesystems that are not performance-critical (e.g. sysfs); however, it is
- not clear that this will be less effort than simply porting the filesystems
- in question. Practically speaking, the following filesystems will probably
- need to be ported or made compatible through a shim to evaluate filesystem
- performance on realistic workloads:
-
- - devfs/procfs/sysfs, which will realistically be necessary to execute
- most applications. (Note that procfs and sysfs do not support hard
- links, so they do not require the complexity of separate inode objects.
- Also note that Linux's /dev is actually a variant of tmpfs called
- devtmpfs.)
-
- - tmpfs. This should be relatively straightforward: copy/paste memfs,
- store regular file contents in pgalloc-allocated memory instead of
- `[]byte`, and add support for file timestamps. (In fact, it probably
- makes more sense to convert memfs to tmpfs and not keep the former.)
-
- - A remote filesystem, either lisafs (if it is ready by the time that
- other benchmarking prerequisites are) or v9fs (aka 9P, aka gofers).
-
- - epoll files.
-
- Filesystems that will need to be ported before switching to VFS2, but can
- probably be skipped for early testing:
-
- - overlayfs, which is needed for (at least) synthetic mount points.
-
- - Support for host ttys.
-
- - timerfd files.
-
- Filesystems that can be probably dropped:
-
- - ashmem, which is far too incomplete to use.
-
- - binder, which is similarly far too incomplete to use.
-
-- Save/restore. For instance, it is unclear if the current implementation of
- the `state` package supports the inheritance pattern described above.
-
-- Many features that were previously implemented by VFS must now be
- implemented by individual filesystems (though, in most cases, this should
- consist of calls to hooks or libraries provided by `vfs` or other packages).
- This includes, but is not necessarily limited to:
-
- - Block and character device special files
-
- - Inotify
-
- - File locking
-
- - `O_ASYNC`
diff --git a/pkg/sentry/vfs/epoll_interest_list.go b/pkg/sentry/vfs/epoll_interest_list.go
new file mode 100644
index 000000000..e75ea361b
--- /dev/null
+++ b/pkg/sentry/vfs/epoll_interest_list.go
@@ -0,0 +1,221 @@
+package vfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type epollInterestElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (epollInterestElementMapper) linkerFor(elem *epollInterest) *epollInterest { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type epollInterestList struct {
+ head *epollInterest
+ tail *epollInterest
+}
+
+// Reset resets list l to the empty state.
+func (l *epollInterestList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *epollInterestList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *epollInterestList) Front() *epollInterest {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *epollInterestList) Back() *epollInterest {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *epollInterestList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (epollInterestElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *epollInterestList) PushFront(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ epollInterestElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *epollInterestList) PushBack(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ epollInterestElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *epollInterestList) PushBackList(m *epollInterestList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ epollInterestElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ epollInterestElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *epollInterestList) InsertAfter(b, e *epollInterest) {
+ bLinker := epollInterestElementMapper{}.linkerFor(b)
+ eLinker := epollInterestElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ epollInterestElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *epollInterestList) InsertBefore(a, e *epollInterest) {
+ aLinker := epollInterestElementMapper{}.linkerFor(a)
+ eLinker := epollInterestElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ epollInterestElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *epollInterestList) Remove(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ epollInterestElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ epollInterestElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type epollInterestEntry struct {
+ next *epollInterest
+ prev *epollInterest
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) Next() *epollInterest {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) Prev() *epollInterest {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) SetNext(elem *epollInterest) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) SetPrev(elem *epollInterest) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/vfs/event_list.go b/pkg/sentry/vfs/event_list.go
new file mode 100644
index 000000000..c0946b585
--- /dev/null
+++ b/pkg/sentry/vfs/event_list.go
@@ -0,0 +1,221 @@
+package vfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type eventElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (eventElementMapper) linkerFor(elem *Event) *Event { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type eventList struct {
+ head *Event
+ tail *Event
+}
+
+// Reset resets list l to the empty state.
+func (l *eventList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *eventList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Front() *Event {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Back() *Event {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *eventList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (eventElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *eventList) PushFront(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ eventElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *eventList) PushBack(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *eventList) PushBackList(m *eventList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ eventElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *eventList) InsertAfter(b, e *Event) {
+ bLinker := eventElementMapper{}.linkerFor(b)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ eventElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *eventList) InsertBefore(a, e *Event) {
+ aLinker := eventElementMapper{}.linkerFor(a)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ eventElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *eventList) Remove(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ eventElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ eventElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type eventEntry struct {
+ next *Event
+ prev *Event
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Next() *Event {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Prev() *Event {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetNext(elem *Event) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetPrev(elem *Event) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/vfs/file_description_impl_util_test.go b/pkg/sentry/vfs/file_description_impl_util_test.go
deleted file mode 100644
index 3423dede1..000000000
--- a/pkg/sentry/vfs/file_description_impl_util_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package vfs
-
-import (
- "bytes"
- "fmt"
- "io"
- "sync/atomic"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// fileDescription is the common fd struct which a filesystem implementation
-// embeds in all of its file description implementations as required.
-type fileDescription struct {
- vfsfd FileDescription
- FileDescriptionDefaultImpl
- NoLockFD
-}
-
-// genCount contains the number of times its DynamicBytesSource.Generate()
-// implementation has been called.
-type genCount struct {
- count uint64 // accessed using atomic memory ops
-}
-
-// Generate implements DynamicBytesSource.Generate.
-func (g *genCount) Generate(ctx context.Context, buf *bytes.Buffer) error {
- fmt.Fprintf(buf, "%d", atomic.AddUint64(&g.count, 1))
- return nil
-}
-
-type storeData struct {
- data string
-}
-
-var _ WritableDynamicBytesSource = (*storeData)(nil)
-
-// Generate implements DynamicBytesSource.
-func (d *storeData) Generate(ctx context.Context, buf *bytes.Buffer) error {
- buf.WriteString(d.data)
- return nil
-}
-
-// Generate implements WritableDynamicBytesSource.
-func (d *storeData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
- buf := make([]byte, src.NumBytes())
- n, err := src.CopyIn(ctx, buf)
- if err != nil {
- return 0, err
- }
-
- d.data = string(buf[:n])
- return 0, nil
-}
-
-// testFD is a read-only FileDescriptionImpl representing a regular file.
-type testFD struct {
- fileDescription
- DynamicBytesFileDescriptionImpl
-
- data DynamicBytesSource
-}
-
-func newTestFD(ctx context.Context, vfsObj *VirtualFilesystem, statusFlags uint32, data DynamicBytesSource) *FileDescription {
- vd := vfsObj.NewAnonVirtualDentry("genCountFD")
- defer vd.DecRef(ctx)
- var fd testFD
- fd.vfsfd.Init(&fd, statusFlags, vd.Mount(), vd.Dentry(), &FileDescriptionOptions{})
- fd.DynamicBytesFileDescriptionImpl.SetDataSource(data)
- return &fd.vfsfd
-}
-
-// Release implements FileDescriptionImpl.Release.
-func (fd *testFD) Release(context.Context) {
-}
-
-// SetStatusFlags implements FileDescriptionImpl.SetStatusFlags.
-// Stat implements FileDescriptionImpl.Stat.
-func (fd *testFD) Stat(ctx context.Context, opts StatOptions) (linux.Statx, error) {
- // Note that Statx.Mask == 0 in the return value.
- return linux.Statx{}, nil
-}
-
-// SetStat implements FileDescriptionImpl.SetStat.
-func (fd *testFD) SetStat(ctx context.Context, opts SetStatOptions) error {
- return linuxerr.EPERM
-}
-
-func TestGenCountFD(t *testing.T) {
- ctx := contexttest.Context(t)
-
- vfsObj := &VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- fd := newTestFD(ctx, vfsObj, linux.O_RDWR, &genCount{})
- defer fd.DecRef(ctx)
-
- // The first read causes Generate to be called to fill the FD's buffer.
- buf := make([]byte, 2)
- ioseq := usermem.BytesIOSequence(buf)
- n, err := fd.Read(ctx, ioseq, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("first Read: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('1'); buf[0] != want {
- t.Errorf("first Read: got byte %c, wanted %c", buf[0], want)
- }
-
- // A second read without seeking is still at EOF.
- n, err = fd.Read(ctx, ioseq, ReadOptions{})
- if n != 0 || err != io.EOF {
- t.Fatalf("second Read: got (%d, %v), wanted (0, EOF)", n, err)
- }
-
- // Seeking to the beginning of the file causes it to be regenerated.
- n, err = fd.Seek(ctx, 0, linux.SEEK_SET)
- if n != 0 || err != nil {
- t.Fatalf("Seek: got (%d, %v), wanted (0, nil)", n, err)
- }
- n, err = fd.Read(ctx, ioseq, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("Read after Seek: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('2'); buf[0] != want {
- t.Errorf("Read after Seek: got byte %c, wanted %c", buf[0], want)
- }
-
- // PRead at the beginning of the file also causes it to be regenerated.
- n, err = fd.PRead(ctx, ioseq, 0, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("PRead: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('3'); buf[0] != want {
- t.Errorf("PRead: got byte %c, wanted %c", buf[0], want)
- }
-
- // Write and PWrite fails.
- if _, err := fd.Write(ctx, ioseq, WriteOptions{}); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
- }
- if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); !linuxerr.Equals(linuxerr.EIO, err) {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
- }
-}
-
-func TestWritable(t *testing.T) {
- ctx := contexttest.Context(t)
-
- vfsObj := &VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- fd := newTestFD(ctx, vfsObj, linux.O_RDWR, &storeData{data: "init"})
- defer fd.DecRef(ctx)
-
- buf := make([]byte, 10)
- ioseq := usermem.BytesIOSequence(buf)
- if n, err := fd.Read(ctx, ioseq, ReadOptions{}); n != 4 && err != io.EOF {
- t.Fatalf("Read: got (%v, %v), wanted (4, EOF)", n, err)
- }
- if want := "init"; want == string(buf) {
- t.Fatalf("Read: got %v, wanted %v", string(buf), want)
- }
-
- // Test PWrite.
- want := "write"
- writeIOSeq := usermem.BytesIOSequence([]byte(want))
- if n, err := fd.PWrite(ctx, writeIOSeq, 0, WriteOptions{}); int(n) != len(want) && err != nil {
- t.Errorf("PWrite: got err (%v, %v), wanted (%v, nil)", n, err, len(want))
- }
- if n, err := fd.PRead(ctx, ioseq, 0, ReadOptions{}); int(n) != len(want) && err != io.EOF {
- t.Fatalf("PRead: got (%v, %v), wanted (%v, EOF)", n, err, len(want))
- }
- if want == string(buf) {
- t.Fatalf("PRead: got %v, wanted %v", string(buf), want)
- }
-
- // Test Seek to 0 followed by Write.
- want = "write2"
- writeIOSeq = usermem.BytesIOSequence([]byte(want))
- if n, err := fd.Seek(ctx, 0, linux.SEEK_SET); n != 0 && err != nil {
- t.Errorf("Seek: got err (%v, %v), wanted (0, nil)", n, err)
- }
- if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); int(n) != len(want) && err != nil {
- t.Errorf("Write: got err (%v, %v), wanted (%v, nil)", n, err, len(want))
- }
- if n, err := fd.PRead(ctx, ioseq, 0, ReadOptions{}); int(n) != len(want) && err != io.EOF {
- t.Fatalf("PRead: got (%v, %v), wanted (%v, EOF)", n, err, len(want))
- }
- if want == string(buf) {
- t.Fatalf("PRead: got %v, wanted %v", string(buf), want)
- }
-
- // Test failure if offset != 0.
- if n, err := fd.Seek(ctx, 1, linux.SEEK_SET); n != 0 && err != nil {
- t.Errorf("Seek: got err (%v, %v), wanted (0, nil)", n, err)
- }
- if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); n != 0 && !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("Write: got err (%v, %v), wanted (0, EINVAL)", n, err)
- }
- if n, err := fd.PWrite(ctx, writeIOSeq, 2, WriteOptions{}); n != 0 && !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("PWrite: got err (%v, %v), wanted (0, EINVAL)", n, err)
- }
-}
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
new file mode 100644
index 000000000..9e6b7bd40
--- /dev/null
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FileDescriptionenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var FileDescriptionobj *FileDescription
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FileDescriptionRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FileDescriptionRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FileDescriptionRefs) RefType() string {
+ return fmt.Sprintf("%T", FileDescriptionobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FileDescriptionRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FileDescriptionRefs) LogRefs() bool {
+ return FileDescriptionenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FileDescriptionRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FileDescriptionRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FileDescriptionRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FileDescriptionRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FileDescriptionRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
new file mode 100644
index 000000000..fc47919d0
--- /dev/null
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FilesystemenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var Filesystemobj *Filesystem
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FilesystemRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FilesystemRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FilesystemRefs) RefType() string {
+ return fmt.Sprintf("%T", Filesystemobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FilesystemRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FilesystemRefs) LogRefs() bool {
+ return FilesystemenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FilesystemRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FilesystemRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FilesystemenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FilesystemRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FilesystemenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FilesystemRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FilesystemenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FilesystemRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/g3doc/inotify.md b/pkg/sentry/vfs/g3doc/inotify.md
deleted file mode 100644
index 833db213f..000000000
--- a/pkg/sentry/vfs/g3doc/inotify.md
+++ /dev/null
@@ -1,210 +0,0 @@
-# Inotify
-
-Inotify is a mechanism for monitoring filesystem events in Linux--see
-inotify(7). An inotify instance can be used to monitor files and directories for
-modifications, creation/deletion, etc. The inotify API consists of system calls
-that create inotify instances (inotify_init/inotify_init1) and add/remove
-watches on files to an instance (inotify_add_watch/inotify_rm_watch). Events are
-generated from various places in the sentry, including the syscall layer, the
-vfs layer, the process fd table, and within each filesystem implementation. This
-document outlines the implementation details of inotify in VFS2.
-
-## Inotify Objects
-
-Inotify data structures are implemented in the vfs package.
-
-### vfs.Inotify
-
-Inotify instances are represented by vfs.Inotify objects, which implement
-vfs.FileDescriptionImpl. As in Linux, inotify fds are backed by a
-pseudo-filesystem (anonfs). Each inotify instance receives events from a set of
-vfs.Watch objects, which can be modified with inotify_add_watch(2) and
-inotify_rm_watch(2). An application can retrieve events by reading the inotify
-fd.
-
-### vfs.Watches
-
-The set of all watches held on a single file (i.e., the watch target) is stored
-in vfs.Watches. Each watch will belong to a different inotify instance (an
-instance can only have one watch on any watch target). The watches are stored in
-a map indexed by their vfs.Inotify owner’s id. Hard links and file descriptions
-to a single file will all share the same vfs.Watches (with the exception of the
-gofer filesystem, described in a later section). Activity on the target causes
-its vfs.Watches to generate notifications on its watches’ inotify instances.
-
-### vfs.Watch
-
-A single watch, owned by one inotify instance and applied to one watch target.
-Both the vfs.Inotify owner and vfs.Watches on the target will hold a vfs.Watch,
-which leads to some complicated locking behavior (see Lock Ordering). Whenever a
-watch is notified of an event on its target, it will queue events to its inotify
-instance for delivery to the user.
-
-### vfs.Event
-
-vfs.Event is a simple struct encapsulating all the fields for an inotify event.
-It is generated by vfs.Watches and forwarded to the watches' owners. It is
-serialized to the user during read(2) syscalls on the associated fs.Inotify's
-fd.
-
-## Lock Ordering
-
-There are three locks related to the inotify implementation:
-
-Inotify.mu: the inotify instance lock. Inotify.evMu: the inotify event queue
-lock. Watches.mu: the watch set lock, used to protect the collection of watches
-on a target.
-
-The correct lock ordering for inotify code is:
-
-Inotify.mu -> Watches.mu -> Inotify.evMu.
-
-Note that we use a distinct lock to protect the inotify event queue. If we
-simply used Inotify.mu, we could simultaneously have locks being acquired in the
-order of Inotify.mu -> Watches.mu and Watches.mu -> Inotify.mu, which would
-cause deadlocks. For instance, adding a watch to an inotify instance would
-require locking Inotify.mu, and then adding the same watch to the target would
-cause Watches.mu to be held. At the same time, generating an event on the target
-would require Watches.mu to be held before iterating through each watch, and
-then notifying the owner of each watch would cause Inotify.mu to be held.
-
-See the vfs package comment to understand how inotify locks fit into the overall
-ordering of filesystem locks.
-
-## Watch Targets in Different Filesystem Implementations
-
-In Linux, watches reside on inodes at the virtual filesystem layer. As a result,
-all hard links and file descriptions on a single file will all share the same
-watch set. In VFS2, there is no common inode structure across filesystem types
-(some may not even have inodes), so we have to plumb inotify support through
-each specific filesystem implementation. Some of the technical considerations
-are outlined below.
-
-### Tmpfs
-
-For filesystems with inodes, like tmpfs, the design is quite similar to that of
-Linux, where watches reside on the inode.
-
-### Pseudo-filesystems
-
-Technically, because inotify is implemented at the vfs layer in Linux,
-pseudo-filesystems on top of kernfs support inotify passively. However, watches
-can only track explicit filesystem operations like read/write, open/close,
-mknod, etc., so watches on a target like /proc/self/fd will not generate events
-every time a new fd is added or removed. As of this writing, we leave inotify
-unimplemented in kernfs and anonfs; it does not seem particularly useful.
-
-### Gofer Filesystem (fsimpl/gofer)
-
-The gofer filesystem has several traits that make it difficult to support
-inotify:
-
-* **There are no inodes.** A file is represented as a dentry that holds an
- unopened p9 file (and possibly an open FID), through which the Sentry
- interacts with the gofer.
- * *Solution:* Because there is no inode structure stored in the sandbox,
- inotify watches must be held on the dentry. For the purposes of inotify,
- we assume that every dentry corresponds to a unique inode, which may
- cause unexpected behavior in the presence of hard links, where multiple
- dentries should share the same set of watches. Indeed, it is impossible
- for us to be absolutely sure whether dentries correspond to the same
- file or not, due to the following point:
-* **The Sentry cannot always be aware of hard links on the remote
- filesystem.** There is no way for us to confirm whether two files on the
- remote filesystem are actually links to the same inode. QIDs and inodes are
- not always 1:1. The assumption that dentries and inodes are 1:1 is
- inevitably broken if there are remote hard links that we cannot detect.
- * *Solution:* this is an issue with gofer fs in general, not only inotify,
- and we will have to live with it.
-* **Dentries can be cached, and then evicted.** Dentry lifetime does not
- correspond to file lifetime. Because gofer fs is not entirely in-memory, the
- absence of a dentry does not mean that the corresponding file does not
- exist, nor does a dentry reaching zero references mean that the
- corresponding file no longer exists. When a dentry reaches zero references,
- it will be cached, in case the file at that path is needed again in the
- future. However, the dentry may be evicted from the cache, which will cause
- a new dentry to be created next time the same file path is used. The
- existing watches will be lost.
- * *Solution:* When a dentry reaches zero references, do not cache it if it
- has any watches, so we can avoid eviction/destruction. Note that if the
- dentry was deleted or invalidated (d.vfsd.IsDead()), we should still
- destroy it along with its watches. Additionally, when a dentry’s last
- watch is removed, we cache it if it also has zero references. This way,
- the dentry can eventually be evicted from memory if it is no longer
- needed.
-* **Dentries can be invalidated.** Another issue with dentry lifetime is that
- the remote file at the file path represented may change from underneath the
- dentry. In this case, the next time that the dentry is used, it will be
- invalidated and a new dentry will replace it. In this case, it is not clear
- what should be done with the watches on the old dentry.
- * *Solution:* Silently destroy the watches when invalidation occurs. We
- have no way of knowing exactly what happened, when it happens. Inotify
- instances on NFS files in Linux probably behave in a similar fashion,
- since inotify is implemented at the vfs layer and is not aware of the
- complexities of remote file systems.
- * An alternative would be to issue some kind of event upon invalidation,
- e.g. a delete event, but this has several issues:
- * We cannot discern whether the remote file was invalidated because it was
- moved, deleted, etc. This information is crucial, because these cases
- should result in different events. Furthermore, the watches should only
- be destroyed if the file has been deleted.
- * Moreover, the mechanism for detecting whether the underlying file has
- changed is to check whether a new QID is given by the gofer. This may
- result in false positives, e.g. suppose that the server closed and
- re-opened the same file, which may result in a new QID.
- * Finally, the time of the event may be completely different from the time
- of the file modification, since a dentry is not immediately notified
- when the underlying file has changed. It would be quite unexpected to
- receive the notification when invalidation was triggered, i.e. the next
- time the file was accessed within the sandbox, because then the
- read/write/etc. operation on the file would not result in the expected
- event.
- * Another point in favor of the first solution: inotify in Linux can
- already be lossy on local filesystems (one of the sacrifices made so
- that filesystem performance isn’t killed), and it is lossy on NFS for
- similar reasons to gofer fs. Therefore, it is better for inotify to be
- silent than to emit incorrect notifications.
-* **There may be external users of the remote filesystem.** We can only track
- operations performed on the file within the sandbox. This is sufficient
- under InteropModeExclusive, but whenever there are external users, the set
- of actions we are aware of is incomplete.
- * *Solution:* We could either return an error or just issue a warning when
- inotify is used without InteropModeExclusive. Although faulty, VFS1
- allows it when the filesystem is shared, and Linux does the same for
- remote filesystems (as mentioned above, inotify sits at the vfs level).
-
-## Dentry Interface
-
-For events that must be generated above the vfs layer, we provide the following
-DentryImpl methods to allow interactions with targets on any FilesystemImpl:
-
-* **InotifyWithParent()** generates events on the dentry’s watches as well as
- its parent’s.
-* **Watches()** retrieves the watch set of the target represented by the
- dentry. This is used to access and modify watches on a target.
-* **OnZeroWatches()** performs cleanup tasks after the last watch is removed
- from a dentry. This is needed by gofer fs, which must allow a watched dentry
- to be cached once it has no more watches. Most implementations can just do
- nothing. Note that OnZeroWatches() must be called after all inotify locks
- are released to preserve lock ordering, since it may acquire
- FilesystemImpl-specific locks.
-
-## IN_EXCL_UNLINK
-
-There are several options that can be set for a watch, specified as part of the
-mask in inotify_add_watch(2). In particular, IN_EXCL_UNLINK requires some
-additional support in each filesystem.
-
-A watch with IN_EXCL_UNLINK will not generate events for its target if it
-corresponds to a path that was unlinked. For instance, if an fd is opened on
-“foo/bar” and “foo/bar” is subsequently unlinked, any reads/writes/etc. on the
-fd will be ignored by watches on “foo” or “foo/bar” with IN_EXCL_UNLINK. This
-requires each DentryImpl to keep track of whether it has been unlinked, in order
-to determine whether events should be sent to watches with IN_EXCL_UNLINK.
-
-## IN_ONESHOT
-
-One-shot watches expire after generating a single event. When an event occurs,
-all one-shot watches on the target that successfully generated an event are
-removed. Lock ordering can cause the management of one-shot watches to be quite
-expensive; see Watches.Notify() for more information.
diff --git a/pkg/sentry/vfs/genericfstree/BUILD b/pkg/sentry/vfs/genericfstree/BUILD
deleted file mode 100644
index d8fd92677..000000000
--- a/pkg/sentry/vfs/genericfstree/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics:defs.bzl", "go_template")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-go_template(
- name = "generic_fstree",
- srcs = [
- "genericfstree.go",
- ],
- types = [
- "Dentry",
- ],
-)
diff --git a/pkg/sentry/vfs/genericfstree/genericfstree.go b/pkg/sentry/vfs/genericfstree/genericfstree.go
deleted file mode 100644
index ba6e6ed49..000000000
--- a/pkg/sentry/vfs/genericfstree/genericfstree.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package genericfstree provides tools for implementing vfs.FilesystemImpls
-// where a single statically-determined lock or set of locks is sufficient to
-// ensure that a Dentry's name and parent are contextually immutable.
-//
-// Clients using this package must use the go_template_instance rule in
-// tools/go_generics/defs.bzl to create an instantiation of this template
-// package, providing types to use in place of Dentry.
-package genericfstree
-
-import (
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// Dentry is a required type parameter that is a struct with the given fields.
-//
-// +stateify savable
-type Dentry struct {
- // vfsd is the embedded vfs.Dentry corresponding to this vfs.DentryImpl.
- vfsd vfs.Dentry
-
- // parent is the parent of this Dentry in the filesystem's tree. If this
- // Dentry is a filesystem root, parent is nil.
- parent *Dentry
-
- // name is the name of this Dentry in its parent. If this Dentry is a
- // filesystem root, name is unspecified.
- name string
-}
-
-// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
-// either d2's parent or an ancestor of d2's parent.
-func IsAncestorDentry(d, d2 *Dentry) bool {
- for d2 != nil { // Stop at root, where d2.parent == nil.
- if d2.parent == d {
- return true
- }
- if d2.parent == d2 {
- return false
- }
- d2 = d2.parent
- }
- return false
-}
-
-// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
-func ParentOrSelf(d *Dentry) *Dentry {
- if d.parent != nil {
- return d.parent
- }
- return d
-}
-
-// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
-func PrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *Dentry, b *fspath.Builder) error {
- for {
- if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
- return vfs.PrependPathAtVFSRootError{}
- }
- if mnt != nil && &d.vfsd == mnt.Root() {
- return nil
- }
- if d.parent == nil {
- return vfs.PrependPathAtNonMountRootError{}
- }
- b.PrependComponent(d.name)
- d = d.parent
- }
-}
-
-// DebugPathname returns a pathname to d relative to its filesystem root.
-// DebugPathname does not correspond to any Linux function; it's used to
-// generate dentry pathnames for debugging.
-func DebugPathname(d *Dentry) string {
- var b fspath.Builder
- _ = PrependPath(vfs.VirtualDentry{}, nil, d, &b)
- return b.String()
-}
diff --git a/pkg/sentry/vfs/memxattr/BUILD b/pkg/sentry/vfs/memxattr/BUILD
deleted file mode 100644
index 444ab42b9..000000000
--- a/pkg/sentry/vfs/memxattr/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "memxattr",
- srcs = ["xattr.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/errors/linuxerr",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go b/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go
new file mode 100644
index 000000000..f75223725
--- /dev/null
+++ b/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go
@@ -0,0 +1,36 @@
+// automatically generated by stateify.
+
+package memxattr
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (x *SimpleExtendedAttributes) StateTypeName() string {
+ return "pkg/sentry/vfs/memxattr.SimpleExtendedAttributes"
+}
+
+func (x *SimpleExtendedAttributes) StateFields() []string {
+ return []string{
+ "xattrs",
+ }
+}
+
+func (x *SimpleExtendedAttributes) beforeSave() {}
+
+// +checklocksignore
+func (x *SimpleExtendedAttributes) StateSave(stateSinkObject state.Sink) {
+ x.beforeSave()
+ stateSinkObject.Save(0, &x.xattrs)
+}
+
+func (x *SimpleExtendedAttributes) afterLoad() {}
+
+// +checklocksignore
+func (x *SimpleExtendedAttributes) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &x.xattrs)
+}
+
+func init() {
+ state.Register((*SimpleExtendedAttributes)(nil))
+}
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
new file mode 100644
index 000000000..176733505
--- /dev/null
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const MountNamespaceenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var MountNamespaceobj *MountNamespace
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type MountNamespaceRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *MountNamespaceRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *MountNamespaceRefs) RefType() string {
+ return fmt.Sprintf("%T", MountNamespaceobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *MountNamespaceRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *MountNamespaceRefs) LogRefs() bool {
+ return MountNamespaceenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *MountNamespaceRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *MountNamespaceRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *MountNamespaceRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *MountNamespaceRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *MountNamespaceRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/mount_test.go b/pkg/sentry/vfs/mount_test.go
deleted file mode 100644
index cb882a983..000000000
--- a/pkg/sentry/vfs/mount_test.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package vfs
-
-import (
- "fmt"
- "runtime"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestMountTableLookupEmpty(t *testing.T) {
- var mt mountTable
- mt.Init()
-
- parent := &Mount{}
- point := &Dentry{}
- if m := mt.Lookup(parent, point); m != nil {
- t.Errorf("Empty mountTable lookup: got %p, wanted nil", m)
- }
-}
-
-func TestMountTableInsertLookup(t *testing.T) {
- var mt mountTable
- mt.Init()
-
- mount := &Mount{}
- mount.setKey(VirtualDentry{&Mount{}, &Dentry{}})
- mt.Insert(mount)
-
- if m := mt.Lookup(mount.parent(), mount.point()); m != mount {
- t.Errorf("mountTable positive lookup: got %p, wanted %p", m, mount)
- }
-
- otherParent := &Mount{}
- if m := mt.Lookup(otherParent, mount.point()); m != nil {
- t.Errorf("mountTable lookup with wrong mount parent: got %p, wanted nil", m)
- }
- otherPoint := &Dentry{}
- if m := mt.Lookup(mount.parent(), otherPoint); m != nil {
- t.Errorf("mountTable lookup with wrong mount point: got %p, wanted nil", m)
- }
-}
-
-// TODO(gvisor.dev/issue/1035): concurrent lookup/insertion/removal.
-
-// must be powers of 2
-var benchNumMounts = []int{1 << 2, 1 << 5, 1 << 8}
-
-// For all of the following:
-//
-// - BenchmarkMountTableFoo tests usage pattern "Foo" for mountTable.
-//
-// - BenchmarkMountMapFoo tests usage pattern "Foo" for a
-// sync.RWMutex-protected map. (Mutator benchmarks do not use a RWMutex, since
-// mountTable also requires external synchronization between mutators.)
-//
-// - BenchmarkMountSyncMapFoo tests usage pattern "Foo" for a sync.Map.
-//
-// ParallelLookup is by far the most common and performance-sensitive operation
-// for this application. NegativeLookup is also important, but less so (only
-// relevant with multiple mount namespaces and significant differences in
-// mounts between them). Insertion and removal are benchmarked for
-// completeness.
-const enableComparativeBenchmarks = false
-
-func newBenchMount() *Mount {
- mount := &Mount{}
- mount.loadKey(VirtualDentry{&Mount{}, &Dentry{}})
- return mount
-}
-
-func BenchmarkMountTableParallelLookup(b *testing.B) {
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var mt mountTable
- mt.Init()
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- mt.Insert(mount)
- keys = append(keys, mount.saveKey())
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- m := mt.Lookup(k.mount, k.dentry)
- if m == nil {
- b.Errorf("Lookup failed")
- return
- }
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountMapParallelLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var mu sync.RWMutex
- ms := make(map[VirtualDentry]*Mount)
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- key := mount.saveKey()
- ms[key] = mount
- keys = append(keys, key)
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- mu.RLock()
- m := ms[k]
- mu.RUnlock()
- if m == nil {
- b.Errorf("Lookup failed")
- return
- }
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountSyncMapParallelLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var ms sync.Map
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- key := mount.getKey()
- ms.Store(key, mount)
- keys = append(keys, key)
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- mi, ok := ms.Load(k)
- if !ok {
- b.Errorf("Lookup failed")
- return
- }
- m := mi.(*Mount)
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountTableNegativeLookup(b *testing.B) {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var mt mountTable
- mt.Init()
- for i := 0; i < numMounts; i++ {
- mt.Insert(newBenchMount())
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- m := mt.Lookup(k.mount, k.dentry)
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountMapNegativeLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var mu sync.RWMutex
- ms := make(map[VirtualDentry]*Mount)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- ms[mount.getKey()] = mount
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- mu.RLock()
- m := ms[k]
- mu.RUnlock()
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountSyncMapNegativeLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var ms sync.Map
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- ms.Store(mount.saveKey(), mount)
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- m, _ := ms.Load(k)
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountTableInsert(b *testing.B) {
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- var mt mountTable
- mt.Init()
- b.ResetTimer()
- for i := range mounts {
- mt.Insert(mounts[i])
- }
-}
-
-func BenchmarkMountMapInsert(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- ms := make(map[VirtualDentry]*Mount)
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms[mount.saveKey()] = mount
- }
-}
-
-func BenchmarkMountSyncMapInsert(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- var ms sync.Map
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms.Store(mount.saveKey(), mount)
- }
-}
-
-func BenchmarkMountTableRemove(b *testing.B) {
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- var mt mountTable
- mt.Init()
- for i := range mounts {
- mt.Insert(mounts[i])
- }
-
- b.ResetTimer()
- for i := range mounts {
- mt.Remove(mounts[i])
- }
-}
-
-func BenchmarkMountMapRemove(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- ms := make(map[VirtualDentry]*Mount)
- for i := range mounts {
- mount := mounts[i]
- ms[mount.saveKey()] = mount
- }
-
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- delete(ms, mount.saveKey())
- }
-}
-
-func BenchmarkMountSyncMapRemove(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- var ms sync.Map
- for i := range mounts {
- mount := mounts[i]
- ms.Store(mount.saveKey(), mount)
- }
-
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms.Delete(mount.saveKey())
- }
-}
diff --git a/pkg/sentry/vfs/vfs_state_autogen.go b/pkg/sentry/vfs/vfs_state_autogen.go
new file mode 100644
index 000000000..364adcc0f
--- /dev/null
+++ b/pkg/sentry/vfs/vfs_state_autogen.go
@@ -0,0 +1,2052 @@
+// automatically generated by stateify.
+
+package vfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *anonFilesystemType) StateTypeName() string {
+ return "pkg/sentry/vfs.anonFilesystemType"
+}
+
+func (a *anonFilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (a *anonFilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (a *anonFilesystemType) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+}
+
+func (a *anonFilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (a *anonFilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *anonFilesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.anonFilesystem"
+}
+
+func (fs *anonFilesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "devMinor",
+ }
+}
+
+func (fs *anonFilesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *anonFilesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *anonFilesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *anonFilesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (d *anonDentry) StateTypeName() string {
+ return "pkg/sentry/vfs.anonDentry"
+}
+
+func (d *anonDentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "name",
+ }
+}
+
+func (d *anonDentry) beforeSave() {}
+
+// +checklocksignore
+func (d *anonDentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.name)
+}
+
+func (d *anonDentry) afterLoad() {}
+
+// +checklocksignore
+func (d *anonDentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.name)
+}
+
+func (d *Dentry) StateTypeName() string {
+ return "pkg/sentry/vfs.Dentry"
+}
+
+func (d *Dentry) StateFields() []string {
+ return []string{
+ "dead",
+ "mounts",
+ "impl",
+ }
+}
+
+func (d *Dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *Dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dead)
+ stateSinkObject.Save(1, &d.mounts)
+ stateSinkObject.Save(2, &d.impl)
+}
+
+func (d *Dentry) afterLoad() {}
+
+// +checklocksignore
+func (d *Dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dead)
+ stateSourceObject.Load(1, &d.mounts)
+ stateSourceObject.Load(2, &d.impl)
+}
+
+func (kind *DeviceKind) StateTypeName() string {
+ return "pkg/sentry/vfs.DeviceKind"
+}
+
+func (kind *DeviceKind) StateFields() []string {
+ return nil
+}
+
+func (d *devTuple) StateTypeName() string {
+ return "pkg/sentry/vfs.devTuple"
+}
+
+func (d *devTuple) StateFields() []string {
+ return []string{
+ "kind",
+ "major",
+ "minor",
+ }
+}
+
+func (d *devTuple) beforeSave() {}
+
+// +checklocksignore
+func (d *devTuple) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.kind)
+ stateSinkObject.Save(1, &d.major)
+ stateSinkObject.Save(2, &d.minor)
+}
+
+func (d *devTuple) afterLoad() {}
+
+// +checklocksignore
+func (d *devTuple) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.kind)
+ stateSourceObject.Load(1, &d.major)
+ stateSourceObject.Load(2, &d.minor)
+}
+
+func (r *registeredDevice) StateTypeName() string {
+ return "pkg/sentry/vfs.registeredDevice"
+}
+
+func (r *registeredDevice) StateFields() []string {
+ return []string{
+ "dev",
+ "opts",
+ }
+}
+
+func (r *registeredDevice) beforeSave() {}
+
+// +checklocksignore
+func (r *registeredDevice) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.dev)
+ stateSinkObject.Save(1, &r.opts)
+}
+
+func (r *registeredDevice) afterLoad() {}
+
+// +checklocksignore
+func (r *registeredDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.dev)
+ stateSourceObject.Load(1, &r.opts)
+}
+
+func (r *RegisterDeviceOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RegisterDeviceOptions"
+}
+
+func (r *RegisterDeviceOptions) StateFields() []string {
+ return []string{
+ "GroupName",
+ }
+}
+
+func (r *RegisterDeviceOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RegisterDeviceOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.GroupName)
+}
+
+func (r *RegisterDeviceOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RegisterDeviceOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.GroupName)
+}
+
+func (ep *EpollInstance) StateTypeName() string {
+ return "pkg/sentry/vfs.EpollInstance"
+}
+
+func (ep *EpollInstance) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "q",
+ "interest",
+ "ready",
+ }
+}
+
+func (ep *EpollInstance) beforeSave() {}
+
+// +checklocksignore
+func (ep *EpollInstance) StateSave(stateSinkObject state.Sink) {
+ ep.beforeSave()
+ stateSinkObject.Save(0, &ep.vfsfd)
+ stateSinkObject.Save(1, &ep.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &ep.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &ep.NoLockFD)
+ stateSinkObject.Save(4, &ep.q)
+ stateSinkObject.Save(5, &ep.interest)
+ stateSinkObject.Save(6, &ep.ready)
+}
+
+func (ep *EpollInstance) afterLoad() {}
+
+// +checklocksignore
+func (ep *EpollInstance) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ep.vfsfd)
+ stateSourceObject.Load(1, &ep.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &ep.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &ep.NoLockFD)
+ stateSourceObject.Load(4, &ep.q)
+ stateSourceObject.Load(5, &ep.interest)
+ stateSourceObject.Load(6, &ep.ready)
+}
+
+func (e *epollInterestKey) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestKey"
+}
+
+func (e *epollInterestKey) StateFields() []string {
+ return []string{
+ "file",
+ "num",
+ }
+}
+
+func (e *epollInterestKey) beforeSave() {}
+
+// +checklocksignore
+func (e *epollInterestKey) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.file)
+ stateSinkObject.Save(1, &e.num)
+}
+
+func (e *epollInterestKey) afterLoad() {}
+
+// +checklocksignore
+func (e *epollInterestKey) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.file)
+ stateSourceObject.Load(1, &e.num)
+}
+
+func (epi *epollInterest) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterest"
+}
+
+func (epi *epollInterest) StateFields() []string {
+ return []string{
+ "epoll",
+ "key",
+ "waiter",
+ "mask",
+ "ready",
+ "epollInterestEntry",
+ "userData",
+ }
+}
+
+func (epi *epollInterest) beforeSave() {}
+
+// +checklocksignore
+func (epi *epollInterest) StateSave(stateSinkObject state.Sink) {
+ epi.beforeSave()
+ stateSinkObject.Save(0, &epi.epoll)
+ stateSinkObject.Save(1, &epi.key)
+ stateSinkObject.Save(2, &epi.waiter)
+ stateSinkObject.Save(3, &epi.mask)
+ stateSinkObject.Save(4, &epi.ready)
+ stateSinkObject.Save(5, &epi.epollInterestEntry)
+ stateSinkObject.Save(6, &epi.userData)
+}
+
+// +checklocksignore
+func (epi *epollInterest) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &epi.epoll)
+ stateSourceObject.Load(1, &epi.key)
+ stateSourceObject.Load(2, &epi.waiter)
+ stateSourceObject.Load(3, &epi.mask)
+ stateSourceObject.Load(4, &epi.ready)
+ stateSourceObject.Load(5, &epi.epollInterestEntry)
+ stateSourceObject.Load(6, &epi.userData)
+ stateSourceObject.AfterLoad(epi.afterLoad)
+}
+
+func (l *epollInterestList) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestList"
+}
+
+func (l *epollInterestList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *epollInterestList) beforeSave() {}
+
+// +checklocksignore
+func (l *epollInterestList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *epollInterestList) afterLoad() {}
+
+// +checklocksignore
+func (l *epollInterestList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *epollInterestEntry) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestEntry"
+}
+
+func (e *epollInterestEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *epollInterestEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *epollInterestEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *epollInterestEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *epollInterestEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (l *eventList) StateTypeName() string {
+ return "pkg/sentry/vfs.eventList"
+}
+
+func (l *eventList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *eventList) beforeSave() {}
+
+// +checklocksignore
+func (l *eventList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *eventList) afterLoad() {}
+
+// +checklocksignore
+func (l *eventList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *eventEntry) StateTypeName() string {
+ return "pkg/sentry/vfs.eventEntry"
+}
+
+func (e *eventEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *eventEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *eventEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *eventEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *eventEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (fd *FileDescription) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescription"
+}
+
+func (fd *FileDescription) StateFields() []string {
+ return []string{
+ "FileDescriptionRefs",
+ "statusFlags",
+ "asyncHandler",
+ "epolls",
+ "vd",
+ "opts",
+ "readable",
+ "writable",
+ "usedLockBSD",
+ "impl",
+ }
+}
+
+// +checklocksignore
+func (fd *FileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionRefs)
+ stateSinkObject.Save(1, &fd.statusFlags)
+ stateSinkObject.Save(2, &fd.asyncHandler)
+ stateSinkObject.Save(3, &fd.epolls)
+ stateSinkObject.Save(4, &fd.vd)
+ stateSinkObject.Save(5, &fd.opts)
+ stateSinkObject.Save(6, &fd.readable)
+ stateSinkObject.Save(7, &fd.writable)
+ stateSinkObject.Save(8, &fd.usedLockBSD)
+ stateSinkObject.Save(9, &fd.impl)
+}
+
+// +checklocksignore
+func (fd *FileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionRefs)
+ stateSourceObject.Load(1, &fd.statusFlags)
+ stateSourceObject.Load(2, &fd.asyncHandler)
+ stateSourceObject.Load(3, &fd.epolls)
+ stateSourceObject.Load(4, &fd.vd)
+ stateSourceObject.Load(5, &fd.opts)
+ stateSourceObject.Load(6, &fd.readable)
+ stateSourceObject.Load(7, &fd.writable)
+ stateSourceObject.Load(8, &fd.usedLockBSD)
+ stateSourceObject.Load(9, &fd.impl)
+ stateSourceObject.AfterLoad(fd.afterLoad)
+}
+
+func (f *FileDescriptionOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionOptions"
+}
+
+func (f *FileDescriptionOptions) StateFields() []string {
+ return []string{
+ "AllowDirectIO",
+ "DenyPRead",
+ "DenyPWrite",
+ "UseDentryMetadata",
+ }
+}
+
+func (f *FileDescriptionOptions) beforeSave() {}
+
+// +checklocksignore
+func (f *FileDescriptionOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.AllowDirectIO)
+ stateSinkObject.Save(1, &f.DenyPRead)
+ stateSinkObject.Save(2, &f.DenyPWrite)
+ stateSinkObject.Save(3, &f.UseDentryMetadata)
+}
+
+func (f *FileDescriptionOptions) afterLoad() {}
+
+// +checklocksignore
+func (f *FileDescriptionOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.AllowDirectIO)
+ stateSourceObject.Load(1, &f.DenyPRead)
+ stateSourceObject.Load(2, &f.DenyPWrite)
+ stateSourceObject.Load(3, &f.UseDentryMetadata)
+}
+
+func (d *Dirent) StateTypeName() string {
+ return "pkg/sentry/vfs.Dirent"
+}
+
+func (d *Dirent) StateFields() []string {
+ return []string{
+ "Name",
+ "Type",
+ "Ino",
+ "NextOff",
+ }
+}
+
+func (d *Dirent) beforeSave() {}
+
+// +checklocksignore
+func (d *Dirent) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Name)
+ stateSinkObject.Save(1, &d.Type)
+ stateSinkObject.Save(2, &d.Ino)
+ stateSinkObject.Save(3, &d.NextOff)
+}
+
+func (d *Dirent) afterLoad() {}
+
+// +checklocksignore
+func (d *Dirent) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Name)
+ stateSourceObject.Load(1, &d.Type)
+ stateSourceObject.Load(2, &d.Ino)
+ stateSourceObject.Load(3, &d.NextOff)
+}
+
+func (f *FileDescriptionDefaultImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionDefaultImpl"
+}
+
+func (f *FileDescriptionDefaultImpl) StateFields() []string {
+ return []string{}
+}
+
+func (f *FileDescriptionDefaultImpl) beforeSave() {}
+
+// +checklocksignore
+func (f *FileDescriptionDefaultImpl) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *FileDescriptionDefaultImpl) afterLoad() {}
+
+// +checklocksignore
+func (f *FileDescriptionDefaultImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DirectoryFileDescriptionDefaultImpl"
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) StateFields() []string {
+ return []string{}
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) beforeSave() {}
+
+// +checklocksignore
+func (d *DirectoryFileDescriptionDefaultImpl) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) afterLoad() {}
+
+// +checklocksignore
+func (d *DirectoryFileDescriptionDefaultImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *DentryMetadataFileDescriptionImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DentryMetadataFileDescriptionImpl"
+}
+
+func (d *DentryMetadataFileDescriptionImpl) StateFields() []string {
+ return []string{}
+}
+
+func (d *DentryMetadataFileDescriptionImpl) beforeSave() {}
+
+// +checklocksignore
+func (d *DentryMetadataFileDescriptionImpl) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *DentryMetadataFileDescriptionImpl) afterLoad() {}
+
+// +checklocksignore
+func (d *DentryMetadataFileDescriptionImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (s *StaticData) StateTypeName() string {
+ return "pkg/sentry/vfs.StaticData"
+}
+
+func (s *StaticData) StateFields() []string {
+ return []string{
+ "Data",
+ }
+}
+
+func (s *StaticData) beforeSave() {}
+
+// +checklocksignore
+func (s *StaticData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Data)
+}
+
+func (s *StaticData) afterLoad() {}
+
+// +checklocksignore
+func (s *StaticData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Data)
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DynamicBytesFileDescriptionImpl"
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) StateFields() []string {
+ return []string{
+ "data",
+ "buf",
+ "off",
+ "lastRead",
+ }
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) beforeSave() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFileDescriptionImpl) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ var bufValue []byte = fd.saveBuf()
+ stateSinkObject.SaveValue(1, bufValue)
+ stateSinkObject.Save(0, &fd.data)
+ stateSinkObject.Save(2, &fd.off)
+ stateSinkObject.Save(3, &fd.lastRead)
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) afterLoad() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFileDescriptionImpl) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.data)
+ stateSourceObject.Load(2, &fd.off)
+ stateSourceObject.Load(3, &fd.lastRead)
+ stateSourceObject.LoadValue(1, new([]byte), func(y interface{}) { fd.loadBuf(y.([]byte)) })
+}
+
+func (fd *LockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.LockFD"
+}
+
+func (fd *LockFD) StateFields() []string {
+ return []string{
+ "locks",
+ }
+}
+
+func (fd *LockFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *LockFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.locks)
+}
+
+func (fd *LockFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *LockFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.locks)
+}
+
+func (n *NoLockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.NoLockFD"
+}
+
+func (n *NoLockFD) StateFields() []string {
+ return []string{}
+}
+
+func (n *NoLockFD) beforeSave() {}
+
+// +checklocksignore
+func (n *NoLockFD) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+}
+
+func (n *NoLockFD) afterLoad() {}
+
+// +checklocksignore
+func (n *NoLockFD) StateLoad(stateSourceObject state.Source) {
+}
+
+func (b *BadLockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.BadLockFD"
+}
+
+func (b *BadLockFD) StateFields() []string {
+ return []string{}
+}
+
+func (b *BadLockFD) beforeSave() {}
+
+// +checklocksignore
+func (b *BadLockFD) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+}
+
+func (b *BadLockFD) afterLoad() {}
+
+// +checklocksignore
+func (b *BadLockFD) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *FileDescriptionRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionRefs"
+}
+
+func (r *FileDescriptionRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FileDescriptionRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FileDescriptionRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FileDescriptionRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (fs *Filesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.Filesystem"
+}
+
+func (fs *Filesystem) StateFields() []string {
+ return []string{
+ "FilesystemRefs",
+ "vfs",
+ "fsType",
+ "impl",
+ }
+}
+
+func (fs *Filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.FilesystemRefs)
+ stateSinkObject.Save(1, &fs.vfs)
+ stateSinkObject.Save(2, &fs.fsType)
+ stateSinkObject.Save(3, &fs.impl)
+}
+
+func (fs *Filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.FilesystemRefs)
+ stateSourceObject.Load(1, &fs.vfs)
+ stateSourceObject.Load(2, &fs.fsType)
+ stateSourceObject.Load(3, &fs.impl)
+}
+
+func (p *PrependPathAtVFSRootError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathAtVFSRootError"
+}
+
+func (p *PrependPathAtVFSRootError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathAtVFSRootError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathAtVFSRootError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathAtVFSRootError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathAtVFSRootError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (p *PrependPathAtNonMountRootError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathAtNonMountRootError"
+}
+
+func (p *PrependPathAtNonMountRootError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathAtNonMountRootError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathAtNonMountRootError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathAtNonMountRootError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathAtNonMountRootError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (p *PrependPathSyntheticError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathSyntheticError"
+}
+
+func (p *PrependPathSyntheticError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathSyntheticError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathSyntheticError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathSyntheticError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathSyntheticError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *FilesystemRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.FilesystemRefs"
+}
+
+func (r *FilesystemRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FilesystemRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FilesystemRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FilesystemRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (r *registeredFilesystemType) StateTypeName() string {
+ return "pkg/sentry/vfs.registeredFilesystemType"
+}
+
+func (r *registeredFilesystemType) StateFields() []string {
+ return []string{
+ "fsType",
+ "opts",
+ }
+}
+
+func (r *registeredFilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (r *registeredFilesystemType) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.fsType)
+ stateSinkObject.Save(1, &r.opts)
+}
+
+func (r *registeredFilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (r *registeredFilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.fsType)
+ stateSourceObject.Load(1, &r.opts)
+}
+
+func (r *RegisterFilesystemTypeOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RegisterFilesystemTypeOptions"
+}
+
+func (r *RegisterFilesystemTypeOptions) StateFields() []string {
+ return []string{
+ "AllowUserMount",
+ "AllowUserList",
+ "RequiresDevice",
+ }
+}
+
+func (r *RegisterFilesystemTypeOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RegisterFilesystemTypeOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.AllowUserMount)
+ stateSinkObject.Save(1, &r.AllowUserList)
+ stateSinkObject.Save(2, &r.RequiresDevice)
+}
+
+func (r *RegisterFilesystemTypeOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RegisterFilesystemTypeOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.AllowUserMount)
+ stateSourceObject.Load(1, &r.AllowUserList)
+ stateSourceObject.Load(2, &r.RequiresDevice)
+}
+
+func (e *EventType) StateTypeName() string {
+ return "pkg/sentry/vfs.EventType"
+}
+
+func (e *EventType) StateFields() []string {
+ return nil
+}
+
+func (i *Inotify) StateTypeName() string {
+ return "pkg/sentry/vfs.Inotify"
+}
+
+func (i *Inotify) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "id",
+ "queue",
+ "events",
+ "scratch",
+ "nextWatchMinusOne",
+ "watches",
+ }
+}
+
+func (i *Inotify) beforeSave() {}
+
+// +checklocksignore
+func (i *Inotify) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.vfsfd)
+ stateSinkObject.Save(1, &i.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &i.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &i.NoLockFD)
+ stateSinkObject.Save(4, &i.id)
+ stateSinkObject.Save(5, &i.queue)
+ stateSinkObject.Save(6, &i.events)
+ stateSinkObject.Save(7, &i.scratch)
+ stateSinkObject.Save(8, &i.nextWatchMinusOne)
+ stateSinkObject.Save(9, &i.watches)
+}
+
+func (i *Inotify) afterLoad() {}
+
+// +checklocksignore
+func (i *Inotify) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.vfsfd)
+ stateSourceObject.Load(1, &i.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &i.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &i.NoLockFD)
+ stateSourceObject.Load(4, &i.id)
+ stateSourceObject.Load(5, &i.queue)
+ stateSourceObject.Load(6, &i.events)
+ stateSourceObject.Load(7, &i.scratch)
+ stateSourceObject.Load(8, &i.nextWatchMinusOne)
+ stateSourceObject.Load(9, &i.watches)
+}
+
+func (w *Watches) StateTypeName() string {
+ return "pkg/sentry/vfs.Watches"
+}
+
+func (w *Watches) StateFields() []string {
+ return []string{
+ "ws",
+ }
+}
+
+func (w *Watches) beforeSave() {}
+
+// +checklocksignore
+func (w *Watches) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.ws)
+}
+
+func (w *Watches) afterLoad() {}
+
+// +checklocksignore
+func (w *Watches) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.ws)
+}
+
+func (w *Watch) StateTypeName() string {
+ return "pkg/sentry/vfs.Watch"
+}
+
+func (w *Watch) StateFields() []string {
+ return []string{
+ "owner",
+ "wd",
+ "target",
+ "mask",
+ "expired",
+ }
+}
+
+func (w *Watch) beforeSave() {}
+
+// +checklocksignore
+func (w *Watch) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.owner)
+ stateSinkObject.Save(1, &w.wd)
+ stateSinkObject.Save(2, &w.target)
+ stateSinkObject.Save(3, &w.mask)
+ stateSinkObject.Save(4, &w.expired)
+}
+
+func (w *Watch) afterLoad() {}
+
+// +checklocksignore
+func (w *Watch) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.owner)
+ stateSourceObject.Load(1, &w.wd)
+ stateSourceObject.Load(2, &w.target)
+ stateSourceObject.Load(3, &w.mask)
+ stateSourceObject.Load(4, &w.expired)
+}
+
+func (e *Event) StateTypeName() string {
+ return "pkg/sentry/vfs.Event"
+}
+
+func (e *Event) StateFields() []string {
+ return []string{
+ "eventEntry",
+ "wd",
+ "mask",
+ "cookie",
+ "len",
+ "name",
+ }
+}
+
+func (e *Event) beforeSave() {}
+
+// +checklocksignore
+func (e *Event) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.eventEntry)
+ stateSinkObject.Save(1, &e.wd)
+ stateSinkObject.Save(2, &e.mask)
+ stateSinkObject.Save(3, &e.cookie)
+ stateSinkObject.Save(4, &e.len)
+ stateSinkObject.Save(5, &e.name)
+}
+
+func (e *Event) afterLoad() {}
+
+// +checklocksignore
+func (e *Event) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.eventEntry)
+ stateSourceObject.Load(1, &e.wd)
+ stateSourceObject.Load(2, &e.mask)
+ stateSourceObject.Load(3, &e.cookie)
+ stateSourceObject.Load(4, &e.len)
+ stateSourceObject.Load(5, &e.name)
+}
+
+func (fl *FileLocks) StateTypeName() string {
+ return "pkg/sentry/vfs.FileLocks"
+}
+
+func (fl *FileLocks) StateFields() []string {
+ return []string{
+ "bsd",
+ "posix",
+ }
+}
+
+func (fl *FileLocks) beforeSave() {}
+
+// +checklocksignore
+func (fl *FileLocks) StateSave(stateSinkObject state.Sink) {
+ fl.beforeSave()
+ stateSinkObject.Save(0, &fl.bsd)
+ stateSinkObject.Save(1, &fl.posix)
+}
+
+func (fl *FileLocks) afterLoad() {}
+
+// +checklocksignore
+func (fl *FileLocks) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fl.bsd)
+ stateSourceObject.Load(1, &fl.posix)
+}
+
+func (mnt *Mount) StateTypeName() string {
+ return "pkg/sentry/vfs.Mount"
+}
+
+func (mnt *Mount) StateFields() []string {
+ return []string{
+ "vfs",
+ "fs",
+ "root",
+ "ID",
+ "Flags",
+ "key",
+ "ns",
+ "refs",
+ "children",
+ "umounted",
+ "writers",
+ }
+}
+
+func (mnt *Mount) beforeSave() {}
+
+// +checklocksignore
+func (mnt *Mount) StateSave(stateSinkObject state.Sink) {
+ mnt.beforeSave()
+ var keyValue VirtualDentry = mnt.saveKey()
+ stateSinkObject.SaveValue(5, keyValue)
+ stateSinkObject.Save(0, &mnt.vfs)
+ stateSinkObject.Save(1, &mnt.fs)
+ stateSinkObject.Save(2, &mnt.root)
+ stateSinkObject.Save(3, &mnt.ID)
+ stateSinkObject.Save(4, &mnt.Flags)
+ stateSinkObject.Save(6, &mnt.ns)
+ stateSinkObject.Save(7, &mnt.refs)
+ stateSinkObject.Save(8, &mnt.children)
+ stateSinkObject.Save(9, &mnt.umounted)
+ stateSinkObject.Save(10, &mnt.writers)
+}
+
+// +checklocksignore
+func (mnt *Mount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mnt.vfs)
+ stateSourceObject.Load(1, &mnt.fs)
+ stateSourceObject.Load(2, &mnt.root)
+ stateSourceObject.Load(3, &mnt.ID)
+ stateSourceObject.Load(4, &mnt.Flags)
+ stateSourceObject.Load(6, &mnt.ns)
+ stateSourceObject.Load(7, &mnt.refs)
+ stateSourceObject.Load(8, &mnt.children)
+ stateSourceObject.Load(9, &mnt.umounted)
+ stateSourceObject.Load(10, &mnt.writers)
+ stateSourceObject.LoadValue(5, new(VirtualDentry), func(y interface{}) { mnt.loadKey(y.(VirtualDentry)) })
+ stateSourceObject.AfterLoad(mnt.afterLoad)
+}
+
+func (mntns *MountNamespace) StateTypeName() string {
+ return "pkg/sentry/vfs.MountNamespace"
+}
+
+func (mntns *MountNamespace) StateFields() []string {
+ return []string{
+ "MountNamespaceRefs",
+ "Owner",
+ "root",
+ "mountpoints",
+ }
+}
+
+func (mntns *MountNamespace) beforeSave() {}
+
+// +checklocksignore
+func (mntns *MountNamespace) StateSave(stateSinkObject state.Sink) {
+ mntns.beforeSave()
+ stateSinkObject.Save(0, &mntns.MountNamespaceRefs)
+ stateSinkObject.Save(1, &mntns.Owner)
+ stateSinkObject.Save(2, &mntns.root)
+ stateSinkObject.Save(3, &mntns.mountpoints)
+}
+
+func (mntns *MountNamespace) afterLoad() {}
+
+// +checklocksignore
+func (mntns *MountNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mntns.MountNamespaceRefs)
+ stateSourceObject.Load(1, &mntns.Owner)
+ stateSourceObject.Load(2, &mntns.root)
+ stateSourceObject.Load(3, &mntns.mountpoints)
+}
+
+func (u *umountRecursiveOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.umountRecursiveOptions"
+}
+
+func (u *umountRecursiveOptions) StateFields() []string {
+ return []string{
+ "eager",
+ "disconnectHierarchy",
+ }
+}
+
+func (u *umountRecursiveOptions) beforeSave() {}
+
+// +checklocksignore
+func (u *umountRecursiveOptions) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.eager)
+ stateSinkObject.Save(1, &u.disconnectHierarchy)
+}
+
+func (u *umountRecursiveOptions) afterLoad() {}
+
+// +checklocksignore
+func (u *umountRecursiveOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.eager)
+ stateSourceObject.Load(1, &u.disconnectHierarchy)
+}
+
+func (r *MountNamespaceRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.MountNamespaceRefs"
+}
+
+func (r *MountNamespaceRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *MountNamespaceRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *MountNamespaceRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *MountNamespaceRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (fd *opathFD) StateTypeName() string {
+ return "pkg/sentry/vfs.opathFD"
+}
+
+func (fd *opathFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "BadLockFD",
+ }
+}
+
+func (fd *opathFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *opathFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.BadLockFD)
+}
+
+func (fd *opathFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *opathFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.BadLockFD)
+}
+
+func (g *GetDentryOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.GetDentryOptions"
+}
+
+func (g *GetDentryOptions) StateFields() []string {
+ return []string{
+ "CheckSearchable",
+ }
+}
+
+func (g *GetDentryOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GetDentryOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.CheckSearchable)
+}
+
+func (g *GetDentryOptions) afterLoad() {}
+
+// +checklocksignore
+func (g *GetDentryOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.CheckSearchable)
+}
+
+func (m *MkdirOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MkdirOptions"
+}
+
+func (m *MkdirOptions) StateFields() []string {
+ return []string{
+ "Mode",
+ "ForSyntheticMountpoint",
+ }
+}
+
+func (m *MkdirOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MkdirOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Mode)
+ stateSinkObject.Save(1, &m.ForSyntheticMountpoint)
+}
+
+func (m *MkdirOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MkdirOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Mode)
+ stateSourceObject.Load(1, &m.ForSyntheticMountpoint)
+}
+
+func (m *MknodOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MknodOptions"
+}
+
+func (m *MknodOptions) StateFields() []string {
+ return []string{
+ "Mode",
+ "DevMajor",
+ "DevMinor",
+ "Endpoint",
+ }
+}
+
+func (m *MknodOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MknodOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Mode)
+ stateSinkObject.Save(1, &m.DevMajor)
+ stateSinkObject.Save(2, &m.DevMinor)
+ stateSinkObject.Save(3, &m.Endpoint)
+}
+
+func (m *MknodOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MknodOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Mode)
+ stateSourceObject.Load(1, &m.DevMajor)
+ stateSourceObject.Load(2, &m.DevMinor)
+ stateSourceObject.Load(3, &m.Endpoint)
+}
+
+func (m *MountFlags) StateTypeName() string {
+ return "pkg/sentry/vfs.MountFlags"
+}
+
+func (m *MountFlags) StateFields() []string {
+ return []string{
+ "NoExec",
+ "NoATime",
+ "NoDev",
+ "NoSUID",
+ }
+}
+
+func (m *MountFlags) beforeSave() {}
+
+// +checklocksignore
+func (m *MountFlags) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.NoExec)
+ stateSinkObject.Save(1, &m.NoATime)
+ stateSinkObject.Save(2, &m.NoDev)
+ stateSinkObject.Save(3, &m.NoSUID)
+}
+
+func (m *MountFlags) afterLoad() {}
+
+// +checklocksignore
+func (m *MountFlags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.NoExec)
+ stateSourceObject.Load(1, &m.NoATime)
+ stateSourceObject.Load(2, &m.NoDev)
+ stateSourceObject.Load(3, &m.NoSUID)
+}
+
+func (m *MountOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MountOptions"
+}
+
+func (m *MountOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "ReadOnly",
+ "GetFilesystemOptions",
+ "InternalMount",
+ }
+}
+
+func (m *MountOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MountOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Flags)
+ stateSinkObject.Save(1, &m.ReadOnly)
+ stateSinkObject.Save(2, &m.GetFilesystemOptions)
+ stateSinkObject.Save(3, &m.InternalMount)
+}
+
+func (m *MountOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MountOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Flags)
+ stateSourceObject.Load(1, &m.ReadOnly)
+ stateSourceObject.Load(2, &m.GetFilesystemOptions)
+ stateSourceObject.Load(3, &m.InternalMount)
+}
+
+func (o *OpenOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.OpenOptions"
+}
+
+func (o *OpenOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "Mode",
+ "FileExec",
+ }
+}
+
+func (o *OpenOptions) beforeSave() {}
+
+// +checklocksignore
+func (o *OpenOptions) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.Flags)
+ stateSinkObject.Save(1, &o.Mode)
+ stateSinkObject.Save(2, &o.FileExec)
+}
+
+func (o *OpenOptions) afterLoad() {}
+
+// +checklocksignore
+func (o *OpenOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.Flags)
+ stateSourceObject.Load(1, &o.Mode)
+ stateSourceObject.Load(2, &o.FileExec)
+}
+
+func (r *ReadOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.ReadOptions"
+}
+
+func (r *ReadOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (r *ReadOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *ReadOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Flags)
+}
+
+func (r *ReadOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *ReadOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Flags)
+}
+
+func (r *RenameOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RenameOptions"
+}
+
+func (r *RenameOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "MustBeDir",
+ }
+}
+
+func (r *RenameOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RenameOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Flags)
+ stateSinkObject.Save(1, &r.MustBeDir)
+}
+
+func (r *RenameOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RenameOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Flags)
+ stateSourceObject.Load(1, &r.MustBeDir)
+}
+
+func (s *SetStatOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.SetStatOptions"
+}
+
+func (s *SetStatOptions) StateFields() []string {
+ return []string{
+ "Stat",
+ "NeedWritePerm",
+ }
+}
+
+func (s *SetStatOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *SetStatOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Stat)
+ stateSinkObject.Save(1, &s.NeedWritePerm)
+}
+
+func (s *SetStatOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *SetStatOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Stat)
+ stateSourceObject.Load(1, &s.NeedWritePerm)
+}
+
+func (b *BoundEndpointOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.BoundEndpointOptions"
+}
+
+func (b *BoundEndpointOptions) StateFields() []string {
+ return []string{
+ "Addr",
+ }
+}
+
+func (b *BoundEndpointOptions) beforeSave() {}
+
+// +checklocksignore
+func (b *BoundEndpointOptions) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ stateSinkObject.Save(0, &b.Addr)
+}
+
+func (b *BoundEndpointOptions) afterLoad() {}
+
+// +checklocksignore
+func (b *BoundEndpointOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &b.Addr)
+}
+
+func (g *GetXattrOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.GetXattrOptions"
+}
+
+func (g *GetXattrOptions) StateFields() []string {
+ return []string{
+ "Name",
+ "Size",
+ }
+}
+
+func (g *GetXattrOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GetXattrOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.Name)
+ stateSinkObject.Save(1, &g.Size)
+}
+
+func (g *GetXattrOptions) afterLoad() {}
+
+// +checklocksignore
+func (g *GetXattrOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.Name)
+ stateSourceObject.Load(1, &g.Size)
+}
+
+func (s *SetXattrOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.SetXattrOptions"
+}
+
+func (s *SetXattrOptions) StateFields() []string {
+ return []string{
+ "Name",
+ "Value",
+ "Flags",
+ }
+}
+
+func (s *SetXattrOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *SetXattrOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Name)
+ stateSinkObject.Save(1, &s.Value)
+ stateSinkObject.Save(2, &s.Flags)
+}
+
+func (s *SetXattrOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *SetXattrOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Name)
+ stateSourceObject.Load(1, &s.Value)
+ stateSourceObject.Load(2, &s.Flags)
+}
+
+func (s *StatOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.StatOptions"
+}
+
+func (s *StatOptions) StateFields() []string {
+ return []string{
+ "Mask",
+ "Sync",
+ }
+}
+
+func (s *StatOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *StatOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Mask)
+ stateSinkObject.Save(1, &s.Sync)
+}
+
+func (s *StatOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *StatOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Mask)
+ stateSourceObject.Load(1, &s.Sync)
+}
+
+func (u *UmountOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.UmountOptions"
+}
+
+func (u *UmountOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (u *UmountOptions) beforeSave() {}
+
+// +checklocksignore
+func (u *UmountOptions) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.Flags)
+}
+
+func (u *UmountOptions) afterLoad() {}
+
+// +checklocksignore
+func (u *UmountOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.Flags)
+}
+
+func (w *WriteOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.WriteOptions"
+}
+
+func (w *WriteOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (w *WriteOptions) beforeSave() {}
+
+// +checklocksignore
+func (w *WriteOptions) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.Flags)
+}
+
+func (w *WriteOptions) afterLoad() {}
+
+// +checklocksignore
+func (w *WriteOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.Flags)
+}
+
+func (a *AccessTypes) StateTypeName() string {
+ return "pkg/sentry/vfs.AccessTypes"
+}
+
+func (a *AccessTypes) StateFields() []string {
+ return nil
+}
+
+func (rp *ResolvingPath) StateTypeName() string {
+ return "pkg/sentry/vfs.ResolvingPath"
+}
+
+func (rp *ResolvingPath) StateFields() []string {
+ return []string{
+ "vfs",
+ "root",
+ "mount",
+ "start",
+ "pit",
+ "flags",
+ "mustBeDir",
+ "symlinks",
+ "curPart",
+ "creds",
+ "nextMount",
+ "nextStart",
+ "absSymlinkTarget",
+ "parts",
+ }
+}
+
+func (rp *ResolvingPath) beforeSave() {}
+
+// +checklocksignore
+func (rp *ResolvingPath) StateSave(stateSinkObject state.Sink) {
+ rp.beforeSave()
+ stateSinkObject.Save(0, &rp.vfs)
+ stateSinkObject.Save(1, &rp.root)
+ stateSinkObject.Save(2, &rp.mount)
+ stateSinkObject.Save(3, &rp.start)
+ stateSinkObject.Save(4, &rp.pit)
+ stateSinkObject.Save(5, &rp.flags)
+ stateSinkObject.Save(6, &rp.mustBeDir)
+ stateSinkObject.Save(7, &rp.symlinks)
+ stateSinkObject.Save(8, &rp.curPart)
+ stateSinkObject.Save(9, &rp.creds)
+ stateSinkObject.Save(10, &rp.nextMount)
+ stateSinkObject.Save(11, &rp.nextStart)
+ stateSinkObject.Save(12, &rp.absSymlinkTarget)
+ stateSinkObject.Save(13, &rp.parts)
+}
+
+func (rp *ResolvingPath) afterLoad() {}
+
+// +checklocksignore
+func (rp *ResolvingPath) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rp.vfs)
+ stateSourceObject.Load(1, &rp.root)
+ stateSourceObject.Load(2, &rp.mount)
+ stateSourceObject.Load(3, &rp.start)
+ stateSourceObject.Load(4, &rp.pit)
+ stateSourceObject.Load(5, &rp.flags)
+ stateSourceObject.Load(6, &rp.mustBeDir)
+ stateSourceObject.Load(7, &rp.symlinks)
+ stateSourceObject.Load(8, &rp.curPart)
+ stateSourceObject.Load(9, &rp.creds)
+ stateSourceObject.Load(10, &rp.nextMount)
+ stateSourceObject.Load(11, &rp.nextStart)
+ stateSourceObject.Load(12, &rp.absSymlinkTarget)
+ stateSourceObject.Load(13, &rp.parts)
+}
+
+func (r *resolveMountRootOrJumpError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveMountRootOrJumpError"
+}
+
+func (r *resolveMountRootOrJumpError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveMountRootOrJumpError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveMountRootOrJumpError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveMountRootOrJumpError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveMountRootOrJumpError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *resolveMountPointError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveMountPointError"
+}
+
+func (r *resolveMountPointError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveMountPointError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveMountPointError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveMountPointError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveMountPointError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *resolveAbsSymlinkError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveAbsSymlinkError"
+}
+
+func (r *resolveAbsSymlinkError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveAbsSymlinkError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveAbsSymlinkError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveAbsSymlinkError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveAbsSymlinkError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (vfs *VirtualFilesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.VirtualFilesystem"
+}
+
+func (vfs *VirtualFilesystem) StateFields() []string {
+ return []string{
+ "mounts",
+ "mountpoints",
+ "lastMountID",
+ "anonMount",
+ "devices",
+ "anonBlockDevMinorNext",
+ "anonBlockDevMinor",
+ "fsTypes",
+ "filesystems",
+ }
+}
+
+func (vfs *VirtualFilesystem) beforeSave() {}
+
+// +checklocksignore
+func (vfs *VirtualFilesystem) StateSave(stateSinkObject state.Sink) {
+ vfs.beforeSave()
+ var mountsValue []*Mount = vfs.saveMounts()
+ stateSinkObject.SaveValue(0, mountsValue)
+ stateSinkObject.Save(1, &vfs.mountpoints)
+ stateSinkObject.Save(2, &vfs.lastMountID)
+ stateSinkObject.Save(3, &vfs.anonMount)
+ stateSinkObject.Save(4, &vfs.devices)
+ stateSinkObject.Save(5, &vfs.anonBlockDevMinorNext)
+ stateSinkObject.Save(6, &vfs.anonBlockDevMinor)
+ stateSinkObject.Save(7, &vfs.fsTypes)
+ stateSinkObject.Save(8, &vfs.filesystems)
+}
+
+func (vfs *VirtualFilesystem) afterLoad() {}
+
+// +checklocksignore
+func (vfs *VirtualFilesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(1, &vfs.mountpoints)
+ stateSourceObject.Load(2, &vfs.lastMountID)
+ stateSourceObject.Load(3, &vfs.anonMount)
+ stateSourceObject.Load(4, &vfs.devices)
+ stateSourceObject.Load(5, &vfs.anonBlockDevMinorNext)
+ stateSourceObject.Load(6, &vfs.anonBlockDevMinor)
+ stateSourceObject.Load(7, &vfs.fsTypes)
+ stateSourceObject.Load(8, &vfs.filesystems)
+ stateSourceObject.LoadValue(0, new([]*Mount), func(y interface{}) { vfs.loadMounts(y.([]*Mount)) })
+}
+
+func (p *PathOperation) StateTypeName() string {
+ return "pkg/sentry/vfs.PathOperation"
+}
+
+func (p *PathOperation) StateFields() []string {
+ return []string{
+ "Root",
+ "Start",
+ "Path",
+ "FollowFinalSymlink",
+ }
+}
+
+func (p *PathOperation) beforeSave() {}
+
+// +checklocksignore
+func (p *PathOperation) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Root)
+ stateSinkObject.Save(1, &p.Start)
+ stateSinkObject.Save(2, &p.Path)
+ stateSinkObject.Save(3, &p.FollowFinalSymlink)
+}
+
+func (p *PathOperation) afterLoad() {}
+
+// +checklocksignore
+func (p *PathOperation) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Root)
+ stateSourceObject.Load(1, &p.Start)
+ stateSourceObject.Load(2, &p.Path)
+ stateSourceObject.Load(3, &p.FollowFinalSymlink)
+}
+
+func (vd *VirtualDentry) StateTypeName() string {
+ return "pkg/sentry/vfs.VirtualDentry"
+}
+
+func (vd *VirtualDentry) StateFields() []string {
+ return []string{
+ "mount",
+ "dentry",
+ }
+}
+
+func (vd *VirtualDentry) beforeSave() {}
+
+// +checklocksignore
+func (vd *VirtualDentry) StateSave(stateSinkObject state.Sink) {
+ vd.beforeSave()
+ stateSinkObject.Save(0, &vd.mount)
+ stateSinkObject.Save(1, &vd.dentry)
+}
+
+func (vd *VirtualDentry) afterLoad() {}
+
+// +checklocksignore
+func (vd *VirtualDentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &vd.mount)
+ stateSourceObject.Load(1, &vd.dentry)
+}
+
+func init() {
+ state.Register((*anonFilesystemType)(nil))
+ state.Register((*anonFilesystem)(nil))
+ state.Register((*anonDentry)(nil))
+ state.Register((*Dentry)(nil))
+ state.Register((*DeviceKind)(nil))
+ state.Register((*devTuple)(nil))
+ state.Register((*registeredDevice)(nil))
+ state.Register((*RegisterDeviceOptions)(nil))
+ state.Register((*EpollInstance)(nil))
+ state.Register((*epollInterestKey)(nil))
+ state.Register((*epollInterest)(nil))
+ state.Register((*epollInterestList)(nil))
+ state.Register((*epollInterestEntry)(nil))
+ state.Register((*eventList)(nil))
+ state.Register((*eventEntry)(nil))
+ state.Register((*FileDescription)(nil))
+ state.Register((*FileDescriptionOptions)(nil))
+ state.Register((*Dirent)(nil))
+ state.Register((*FileDescriptionDefaultImpl)(nil))
+ state.Register((*DirectoryFileDescriptionDefaultImpl)(nil))
+ state.Register((*DentryMetadataFileDescriptionImpl)(nil))
+ state.Register((*StaticData)(nil))
+ state.Register((*DynamicBytesFileDescriptionImpl)(nil))
+ state.Register((*LockFD)(nil))
+ state.Register((*NoLockFD)(nil))
+ state.Register((*BadLockFD)(nil))
+ state.Register((*FileDescriptionRefs)(nil))
+ state.Register((*Filesystem)(nil))
+ state.Register((*PrependPathAtVFSRootError)(nil))
+ state.Register((*PrependPathAtNonMountRootError)(nil))
+ state.Register((*PrependPathSyntheticError)(nil))
+ state.Register((*FilesystemRefs)(nil))
+ state.Register((*registeredFilesystemType)(nil))
+ state.Register((*RegisterFilesystemTypeOptions)(nil))
+ state.Register((*EventType)(nil))
+ state.Register((*Inotify)(nil))
+ state.Register((*Watches)(nil))
+ state.Register((*Watch)(nil))
+ state.Register((*Event)(nil))
+ state.Register((*FileLocks)(nil))
+ state.Register((*Mount)(nil))
+ state.Register((*MountNamespace)(nil))
+ state.Register((*umountRecursiveOptions)(nil))
+ state.Register((*MountNamespaceRefs)(nil))
+ state.Register((*opathFD)(nil))
+ state.Register((*GetDentryOptions)(nil))
+ state.Register((*MkdirOptions)(nil))
+ state.Register((*MknodOptions)(nil))
+ state.Register((*MountFlags)(nil))
+ state.Register((*MountOptions)(nil))
+ state.Register((*OpenOptions)(nil))
+ state.Register((*ReadOptions)(nil))
+ state.Register((*RenameOptions)(nil))
+ state.Register((*SetStatOptions)(nil))
+ state.Register((*BoundEndpointOptions)(nil))
+ state.Register((*GetXattrOptions)(nil))
+ state.Register((*SetXattrOptions)(nil))
+ state.Register((*StatOptions)(nil))
+ state.Register((*UmountOptions)(nil))
+ state.Register((*WriteOptions)(nil))
+ state.Register((*AccessTypes)(nil))
+ state.Register((*ResolvingPath)(nil))
+ state.Register((*resolveMountRootOrJumpError)(nil))
+ state.Register((*resolveMountPointError)(nil))
+ state.Register((*resolveAbsSymlinkError)(nil))
+ state.Register((*VirtualFilesystem)(nil))
+ state.Register((*PathOperation)(nil))
+ state.Register((*VirtualDentry)(nil))
+}
diff --git a/pkg/sentry/vfs/vfs_unsafe_state_autogen.go b/pkg/sentry/vfs/vfs_unsafe_state_autogen.go
new file mode 100644
index 000000000..20f06c953
--- /dev/null
+++ b/pkg/sentry/vfs/vfs_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package vfs
diff --git a/pkg/sentry/watchdog/BUILD b/pkg/sentry/watchdog/BUILD
deleted file mode 100644
index 1c5a1c9b6..000000000
--- a/pkg/sentry/watchdog/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "watchdog",
- srcs = ["watchdog.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/log",
- "//pkg/metric",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/time",
- "//pkg/sync",
- ],
-)
diff --git a/pkg/sentry/watchdog/watchdog_state_autogen.go b/pkg/sentry/watchdog/watchdog_state_autogen.go
new file mode 100644
index 000000000..bce0200e7
--- /dev/null
+++ b/pkg/sentry/watchdog/watchdog_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package watchdog
diff --git a/pkg/shim/BUILD b/pkg/shim/BUILD
deleted file mode 100644
index 367765209..000000000
--- a/pkg/shim/BUILD
+++ /dev/null
@@ -1,60 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "shim",
- srcs = [
- "api.go",
- "debug.go",
- "epoll.go",
- "options.go",
- "service.go",
- "service_linux.go",
- "state.go",
- ],
- visibility = ["//shim:__subpackages__"],
- deps = [
- "//pkg/cleanup",
- "//pkg/shim/proc",
- "//pkg/shim/runsc",
- "//pkg/shim/runtimeoptions",
- "//pkg/shim/utils",
- "//runsc/specutils",
- "@com_github_burntsushi_toml//:go_default_library",
- "@com_github_containerd_cgroups//:go_default_library",
- "@com_github_containerd_cgroups//stats/v1:go_default_library",
- "@com_github_containerd_console//:go_default_library",
- "@com_github_containerd_containerd//api/events:go_default_library",
- "@com_github_containerd_containerd//api/types/task:go_default_library",
- "@com_github_containerd_containerd//errdefs:go_default_library",
- "@com_github_containerd_containerd//events:go_default_library",
- "@com_github_containerd_containerd//log:go_default_library",
- "@com_github_containerd_containerd//mount:go_default_library",
- "@com_github_containerd_containerd//namespaces:go_default_library",
- "@com_github_containerd_containerd//pkg/process:go_default_library",
- "@com_github_containerd_containerd//pkg/stdio:go_default_library",
- "@com_github_containerd_containerd//runtime:go_default_library",
- "@com_github_containerd_containerd//runtime/linux/runctypes:go_default_library",
- "@com_github_containerd_containerd//runtime/v2/shim:go_default_library",
- "@com_github_containerd_containerd//runtime/v2/task:go_default_library",
- "@com_github_containerd_containerd//sys/reaper:go_default_library",
- "@com_github_containerd_fifo//:go_default_library",
- "@com_github_containerd_typeurl//:go_default_library",
- "@com_github_gogo_protobuf//types:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_sirupsen_logrus//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "shim_test",
- size = "small",
- srcs = ["service_test.go"],
- library = ":shim",
- deps = [
- "//pkg/shim/utils",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- ],
-)
diff --git a/pkg/shim/proc/BUILD b/pkg/shim/proc/BUILD
deleted file mode 100644
index c8527a6d9..000000000
--- a/pkg/shim/proc/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "proc",
- srcs = [
- "deleted_state.go",
- "exec.go",
- "exec_state.go",
- "init.go",
- "init_state.go",
- "io.go",
- "proc.go",
- "types.go",
- "utils.go",
- ],
- visibility = [
- "//pkg/shim:__subpackages__",
- "//shim:__subpackages__",
- ],
- deps = [
- "//pkg/cleanup",
- "//pkg/shim/runsc",
- "//pkg/shim/utils",
- "@com_github_containerd_console//:go_default_library",
- "@com_github_containerd_containerd//errdefs:go_default_library",
- "@com_github_containerd_containerd//log:go_default_library",
- "@com_github_containerd_containerd//mount:go_default_library",
- "@com_github_containerd_containerd//pkg/process:go_default_library",
- "@com_github_containerd_containerd//pkg/stdio:go_default_library",
- "@com_github_containerd_fifo//:go_default_library",
- "@com_github_containerd_go_runc//:go_default_library",
- "@com_github_gogo_protobuf//types:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/shim/proc/proc_state_autogen.go b/pkg/shim/proc/proc_state_autogen.go
new file mode 100644
index 000000000..210252d9d
--- /dev/null
+++ b/pkg/shim/proc/proc_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package proc
diff --git a/pkg/shim/runsc/BUILD b/pkg/shim/runsc/BUILD
deleted file mode 100644
index f2fc813e6..000000000
--- a/pkg/shim/runsc/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "runsc",
- srcs = [
- "runsc.go",
- "utils.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "@com_github_containerd_containerd//log:go_default_library",
- "@com_github_containerd_go_runc//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/shim/runsc/runsc_state_autogen.go b/pkg/shim/runsc/runsc_state_autogen.go
new file mode 100644
index 000000000..ee470594f
--- /dev/null
+++ b/pkg/shim/runsc/runsc_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package runsc
diff --git a/pkg/shim/runtimeoptions/BUILD b/pkg/shim/runtimeoptions/BUILD
deleted file mode 100644
index 029be7c09..000000000
--- a/pkg/shim/runtimeoptions/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "proto_library")
-
-package(licenses = ["notice"])
-
-proto_library(
- name = "api",
- srcs = [
- "runtimeoptions.proto",
- ],
-)
-
-go_library(
- name = "runtimeoptions",
- srcs = [
- "runtimeoptions.go",
- "runtimeoptions_cri.go",
- ],
- visibility = ["//pkg/shim:__pkg__"],
- deps = ["@com_github_gogo_protobuf//proto:go_default_library"],
-)
-
-go_test(
- name = "runtimeoptions_test",
- size = "small",
- srcs = ["runtimeoptions_test.go"],
- library = ":runtimeoptions",
- deps = [
- "@com_github_containerd_containerd//runtime/v1/shim/v1:go_default_library",
- "@com_github_containerd_typeurl//:go_default_library",
- "@com_github_gogo_protobuf//proto:go_default_library",
- ],
-)
diff --git a/pkg/shim/runtimeoptions/runtimeoptions.proto b/pkg/shim/runtimeoptions/runtimeoptions.proto
deleted file mode 100644
index 057032e34..000000000
--- a/pkg/shim/runtimeoptions/runtimeoptions.proto
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package cri.runtimeoptions.v1;
-
-// This is a version of the runtimeoptions CRI API that is vendored.
-//
-// Importing the full CRI package is a nightmare.
-message Options {
- string type_url = 1;
- string config_path = 2;
-}
diff --git a/pkg/shim/runtimeoptions/runtimeoptions_state_autogen.go b/pkg/shim/runtimeoptions/runtimeoptions_state_autogen.go
new file mode 100644
index 000000000..346542871
--- /dev/null
+++ b/pkg/shim/runtimeoptions/runtimeoptions_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package runtimeoptions
diff --git a/pkg/shim/runtimeoptions/runtimeoptions_test.go b/pkg/shim/runtimeoptions/runtimeoptions_test.go
deleted file mode 100644
index c59a2400e..000000000
--- a/pkg/shim/runtimeoptions/runtimeoptions_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runtimeoptions
-
-import (
- "bytes"
- "testing"
-
- shim "github.com/containerd/containerd/runtime/v1/shim/v1"
- "github.com/containerd/typeurl"
- "github.com/gogo/protobuf/proto"
-)
-
-func TestCreateTaskRequest(t *testing.T) {
- // Serialize the top-level message.
- const encodedText = `options: <
- type_url: "cri.runtimeoptions.v1.Options"
- value: "\n\010type_url\022\013config_path"
->`
- got := &shim.CreateTaskRequest{} // Should have raw options.
- if err := proto.UnmarshalText(encodedText, got); err != nil {
- t.Fatalf("unable to unmarshal text: %v", err)
- }
- var textBuffer bytes.Buffer
- if err := proto.MarshalText(&textBuffer, got); err != nil {
- t.Errorf("unable to marshal text: %v", err)
- }
- t.Logf("got: %s", string(textBuffer.Bytes()))
-
- // Check the options.
- wantOptions := &Options{}
- wantOptions.TypeUrl = "type_url"
- wantOptions.ConfigPath = "config_path"
- gotMessage, err := typeurl.UnmarshalAny(got.Options)
- if err != nil {
- t.Fatalf("unable to unmarshal any: %v", err)
- }
- gotOptions, ok := gotMessage.(*Options)
- if !ok {
- t.Fatalf("got %v, want %v", gotMessage, wantOptions)
- }
- if !proto.Equal(gotOptions, wantOptions) {
- t.Fatalf("got %v, want %v", gotOptions, wantOptions)
- }
-}
diff --git a/pkg/shim/service_test.go b/pkg/shim/service_test.go
deleted file mode 100644
index 2d9f07e02..000000000
--- a/pkg/shim/service_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package shim
-
-import (
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "gvisor.dev/gvisor/pkg/shim/utils"
-)
-
-func TestCgroupPath(t *testing.T) {
- for _, tc := range []struct {
- name string
- path string
- want string
- }{
- {
- name: "simple",
- path: "foo/pod123/container",
- want: "foo/pod123",
- },
- {
- name: "absolute",
- path: "/foo/pod123/container",
- want: "/foo/pod123",
- },
- {
- name: "no-container",
- path: "foo/pod123",
- want: "foo/pod123",
- },
- {
- name: "no-container-absolute",
- path: "/foo/pod123",
- want: "/foo/pod123",
- },
- {
- name: "double-pod",
- path: "/foo/podium/pod123/container",
- want: "/foo/podium/pod123",
- },
- {
- name: "start-pod",
- path: "pod123/container",
- want: "pod123",
- },
- {
- name: "start-pod-absolute",
- path: "/pod123/container",
- want: "/pod123",
- },
- {
- name: "slashes",
- path: "///foo/////pod123//////container",
- want: "/foo/pod123",
- },
- {
- name: "no-pod",
- path: "/foo/nopod123/container",
- want: "/foo/nopod123/container",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- spec := specs.Spec{
- Linux: &specs.Linux{
- CgroupsPath: tc.path,
- },
- }
- updated := updateCgroup(&spec)
- if spec.Linux.CgroupsPath != tc.want {
- t.Errorf("updateCgroup(%q), want: %q, got: %q", tc.path, tc.want, spec.Linux.CgroupsPath)
- }
- if shouldUpdate := tc.path != tc.want; shouldUpdate != updated {
- t.Errorf("updateCgroup(%q)=%v, want: %v", tc.path, updated, shouldUpdate)
- }
- })
- }
-}
-
-// Test cases that cgroup path should not be updated.
-func TestCgroupNoUpdate(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.Spec
- }{
- {
- name: "empty",
- spec: &specs.Spec{},
- },
- {
- name: "subcontainer",
- spec: &specs.Spec{
- Linux: &specs.Linux{
- CgroupsPath: "foo/pod123/container",
- },
- Annotations: map[string]string{
- utils.ContainerTypeAnnotation: utils.ContainerTypeContainer,
- },
- },
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- if updated := updateCgroup(tc.spec); updated {
- t.Errorf("updateCgroup(%+v), got: %v, want: false", tc.spec.Linux, updated)
- }
- })
- }
-}
diff --git a/pkg/shim/shim_linux_state_autogen.go b/pkg/shim/shim_linux_state_autogen.go
new file mode 100644
index 000000000..191a9e496
--- /dev/null
+++ b/pkg/shim/shim_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package shim
diff --git a/pkg/shim/shim_state_autogen.go b/pkg/shim/shim_state_autogen.go
new file mode 100644
index 000000000..191a9e496
--- /dev/null
+++ b/pkg/shim/shim_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package shim
diff --git a/pkg/shim/utils/BUILD b/pkg/shim/utils/BUILD
deleted file mode 100644
index 2eb82f63c..000000000
--- a/pkg/shim/utils/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "utils",
- srcs = [
- "annotations.go",
- "errors.go",
- "utils.go",
- "volumes.go",
- ],
- visibility = [
- "//pkg/shim:__subpackages__",
- "//shim:__subpackages__",
- ],
- deps = [
- "@com_github_containerd_containerd//errdefs:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
-
-go_test(
- name = "utils_test",
- size = "small",
- srcs = [
- "errors_test.go",
- "volumes_test.go",
- ],
- library = ":utils",
- deps = [
- "@com_github_containerd_containerd//errdefs:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- ],
-)
diff --git a/pkg/shim/utils/errors_test.go b/pkg/shim/utils/errors_test.go
deleted file mode 100644
index 0a8fe34c8..000000000
--- a/pkg/shim/utils/errors_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "fmt"
- "testing"
-
- "github.com/containerd/containerd/errdefs"
-)
-
-func TestGRPCRoundTripsErrors(t *testing.T) {
- for _, tc := range []struct {
- name string
- err error
- test func(err error) bool
- }{
- {
- name: "passthrough",
- err: errdefs.ErrNotFound,
- test: errdefs.IsNotFound,
- },
- {
- name: "wrapped",
- err: fmt.Errorf("oh no: %w", errdefs.ErrNotFound),
- test: errdefs.IsNotFound,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- if err := errdefs.FromGRPC(ErrToGRPC(tc.err)); !tc.test(err) {
- t.Errorf("errToGRPC got %+v", err)
- }
- if err := errdefs.FromGRPC(ErrToGRPCf(tc.err, "testing %s", "123")); !tc.test(err) {
- t.Errorf("errToGRPCf got %+v", err)
- }
- })
- }
-}
diff --git a/pkg/shim/utils/utils_state_autogen.go b/pkg/shim/utils/utils_state_autogen.go
new file mode 100644
index 000000000..dba8bfb1a
--- /dev/null
+++ b/pkg/shim/utils/utils_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package utils
diff --git a/pkg/shim/utils/volumes_test.go b/pkg/shim/utils/volumes_test.go
deleted file mode 100644
index 5db43cdf1..000000000
--- a/pkg/shim/utils/volumes_test.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "reflect"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func TestUpdateVolumeAnnotations(t *testing.T) {
- dir, err := ioutil.TempDir("", "test-update-volume-annotations")
- if err != nil {
- t.Fatalf("create tempdir: %v", err)
- }
- defer os.RemoveAll(dir)
- kubeletPodsDir = dir
-
- const (
- testPodUID = "testuid"
- testVolumeName = "testvolume"
- testLogDirPath = "/var/log/pods/testns_testname_" + testPodUID
- testLegacyLogDirPath = "/var/log/pods/" + testPodUID
- )
- testVolumePath := fmt.Sprintf("%s/%s/volumes/kubernetes.io~empty-dir/%s", dir, testPodUID, testVolumeName)
-
- if err := os.MkdirAll(testVolumePath, 0755); err != nil {
- t.Fatalf("Create test volume: %v", err)
- }
-
- for _, test := range []struct {
- name string
- spec *specs.Spec
- expected *specs.Spec
- expectErr bool
- expectUpdate bool
- }{
- {
- name: "volume annotations for sandbox",
- spec: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expected: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- volumeKeyPrefix + testVolumeName + ".source": testVolumePath,
- },
- },
- expectUpdate: true,
- },
- {
- name: "volume annotations for sandbox with legacy log path",
- spec: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLegacyLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expected: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLegacyLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- volumeKeyPrefix + testVolumeName + ".source": testVolumePath,
- },
- },
- expectUpdate: true,
- },
- {
- name: "tmpfs: volume annotations for container",
- spec: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "bind",
- Source: testVolumePath,
- Options: []string{"ro"},
- },
- {
- Destination: "/random",
- Type: "bind",
- Source: "/random",
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expected: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "tmpfs",
- Source: testVolumePath,
- Options: []string{"ro"},
- },
- {
- Destination: "/random",
- Type: "bind",
- Source: "/random",
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expectUpdate: true,
- },
- {
- name: "bind: volume annotations for container",
- spec: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "bind",
- Source: testVolumePath,
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "container",
- volumeKeyPrefix + testVolumeName + ".type": "bind",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expected: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "bind",
- Source: testVolumePath,
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "container",
- volumeKeyPrefix + testVolumeName + ".type": "bind",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expectUpdate: true,
- },
- {
- name: "should not return error without pod log directory",
- spec: &specs.Spec{
- Annotations: map[string]string{
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- expected: &specs.Spec{
- Annotations: map[string]string{
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- },
- },
- },
- {
- name: "should return error if volume path does not exist",
- spec: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- volumeKeyPrefix + "notexist.share": "pod",
- volumeKeyPrefix + "notexist.type": "tmpfs",
- volumeKeyPrefix + "notexist.options": "ro",
- },
- },
- expectErr: true,
- },
- {
- name: "no volume annotations for sandbox",
- spec: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- },
- },
- expected: &specs.Spec{
- Annotations: map[string]string{
- sandboxLogDirAnnotation: testLogDirPath,
- ContainerTypeAnnotation: containerTypeSandbox,
- },
- },
- },
- {
- name: "no volume annotations for container",
- spec: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "bind",
- Source: "/test",
- Options: []string{"ro"},
- },
- {
- Destination: "/random",
- Type: "bind",
- Source: "/random",
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- },
- },
- expected: &specs.Spec{
- Mounts: []specs.Mount{
- {
- Destination: "/test",
- Type: "bind",
- Source: "/test",
- Options: []string{"ro"},
- },
- {
- Destination: "/random",
- Type: "bind",
- Source: "/random",
- Options: []string{"ro"},
- },
- },
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- },
- },
- },
- {
- name: "bind options removed",
- spec: &specs.Spec{
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- volumeKeyPrefix + testVolumeName + ".source": testVolumePath,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/dst",
- Type: "bind",
- Source: testVolumePath,
- Options: []string{"ro", "bind", "rbind"},
- },
- },
- },
- expected: &specs.Spec{
- Annotations: map[string]string{
- ContainerTypeAnnotation: ContainerTypeContainer,
- volumeKeyPrefix + testVolumeName + ".share": "pod",
- volumeKeyPrefix + testVolumeName + ".type": "tmpfs",
- volumeKeyPrefix + testVolumeName + ".options": "ro",
- volumeKeyPrefix + testVolumeName + ".source": testVolumePath,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/dst",
- Type: "tmpfs",
- Source: testVolumePath,
- Options: []string{"ro"},
- },
- },
- },
- expectUpdate: true,
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- updated, err := UpdateVolumeAnnotations(test.spec)
- if test.expectErr {
- if err == nil {
- t.Fatal("Expected error, but got nil")
- }
- return
- }
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- if !reflect.DeepEqual(test.expected, test.spec) {
- t.Fatalf("Expected %+v, got %+v", test.expected, test.spec)
- }
- if test.expectUpdate != updated {
- t.Errorf("Expected %v, got %v", test.expected, updated)
- }
- })
- }
-}
diff --git a/pkg/sleep/BUILD b/pkg/sleep/BUILD
deleted file mode 100644
index 48bcdd62b..000000000
--- a/pkg/sleep/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sleep",
- srcs = [
- "sleep_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "sleep_test",
- size = "medium",
- srcs = [
- "sleep_test.go",
- ],
- library = ":sleep",
-)
diff --git a/pkg/sleep/sleep_test.go b/pkg/sleep/sleep_test.go
deleted file mode 100644
index 1dd11707d..000000000
--- a/pkg/sleep/sleep_test.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sleep
-
-import (
- "math/rand"
- "runtime"
- "testing"
- "time"
-)
-
-// ZeroWakerNotAsserted tests that a zero-value waker is in non-asserted state.
-func ZeroWakerNotAsserted(t *testing.T) {
- var w Waker
- if w.IsAsserted() {
- t.Fatalf("Zero waker is asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Zero waker is asserted")
- }
-}
-
-// AssertedWakerAfterAssert tests that a waker properly reports its state as
-// asserted once its Assert() method is called.
-func AssertedWakerAfterAssert(t *testing.T) {
- var w Waker
- w.Assert()
- if !w.IsAsserted() {
- t.Fatalf("Asserted waker is not reported as such")
- }
-
- if !w.Clear() {
- t.Fatalf("Asserted waker is not reported as such")
- }
-}
-
-// AssertedWakerAfterTwoAsserts tests that a waker properly reports its state as
-// asserted once its Assert() method is called twice.
-func AssertedWakerAfterTwoAsserts(t *testing.T) {
- var w Waker
- w.Assert()
- w.Assert()
- if !w.IsAsserted() {
- t.Fatalf("Asserted waker is not reported as such")
- }
-
- if !w.Clear() {
- t.Fatalf("Asserted waker is not reported as such")
- }
-}
-
-// NotAssertedWakerWithSleeper tests that a waker properly reports its state as
-// not asserted after a sleeper is associated with it.
-func NotAssertedWakerWithSleeper(t *testing.T) {
- var w Waker
- var s Sleeper
- s.AddWaker(&w, 0)
- if w.IsAsserted() {
- t.Fatalf("Non-asserted waker is reported as asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Non-asserted waker is reported as asserted")
- }
-}
-
-// NotAssertedWakerAfterWake tests that a waker properly reports its state as
-// not asserted after a previous assert is consumed by a sleeper. That is, tests
-// the "edge-triggered" behavior.
-func NotAssertedWakerAfterWake(t *testing.T) {
- var w Waker
- var s Sleeper
- s.AddWaker(&w, 0)
- w.Assert()
- s.Fetch(true)
- if w.IsAsserted() {
- t.Fatalf("Consumed waker is reported as asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Consumed waker is reported as asserted")
- }
-}
-
-// AssertedWakerBeforeAdd tests that a waker causes a sleeper to not sleep if
-// it's already asserted before being added.
-func AssertedWakerBeforeAdd(t *testing.T) {
- var w Waker
- var s Sleeper
- w.Assert()
- s.AddWaker(&w, 0)
-
- if _, ok := s.Fetch(false); !ok {
- t.Fatalf("Fetch failed even though asserted waker was added")
- }
-}
-
-// ClearedWaker tests that a waker properly reports its state as not asserted
-// after it is cleared.
-func ClearedWaker(t *testing.T) {
- var w Waker
- w.Assert()
- w.Clear()
- if w.IsAsserted() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-}
-
-// ClearedWakerWithSleeper tests that a waker properly reports its state as
-// not asserted when it is cleared while it has a sleeper associated with it.
-func ClearedWakerWithSleeper(t *testing.T) {
- var w Waker
- var s Sleeper
- s.AddWaker(&w, 0)
- w.Clear()
- if w.IsAsserted() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-}
-
-// ClearedWakerAssertedWithSleeper tests that a waker properly reports its state
-// as not asserted when it is cleared while it has a sleeper associated with it
-// and has been asserted.
-func ClearedWakerAssertedWithSleeper(t *testing.T) {
- var w Waker
- var s Sleeper
- s.AddWaker(&w, 0)
- w.Assert()
- w.Clear()
- if w.IsAsserted() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-
- if w.Clear() {
- t.Fatalf("Cleared waker is reported as asserted")
- }
-}
-
-// TestBlock tests that a sleeper actually blocks waiting for the waker to
-// assert its state.
-func TestBlock(t *testing.T) {
- var w Waker
- var s Sleeper
-
- s.AddWaker(&w, 0)
-
- // Assert waker after one second.
- before := time.Now()
- go func() {
- time.Sleep(1 * time.Second)
- w.Assert()
- }()
-
- // Fetch the result and make sure it took at least 500ms.
- if _, ok := s.Fetch(true); !ok {
- t.Fatalf("Fetch failed unexpectedly")
- }
- if d := time.Now().Sub(before); d < 500*time.Millisecond {
- t.Fatalf("Duration was too short: %v", d)
- }
-
- // Check that already-asserted waker completes inline.
- w.Assert()
- if _, ok := s.Fetch(true); !ok {
- t.Fatalf("Fetch failed unexpectedly")
- }
-
- // Check that fetch sleeps if waker had been asserted but was reset
- // before Fetch is called.
- w.Assert()
- w.Clear()
- before = time.Now()
- go func() {
- time.Sleep(1 * time.Second)
- w.Assert()
- }()
- if _, ok := s.Fetch(true); !ok {
- t.Fatalf("Fetch failed unexpectedly")
- }
- if d := time.Now().Sub(before); d < 500*time.Millisecond {
- t.Fatalf("Duration was too short: %v", d)
- }
-}
-
-// TestNonBlock checks that a sleeper won't block if waker isn't asserted.
-func TestNonBlock(t *testing.T) {
- var w Waker
- var s Sleeper
-
- // Don't block when there's no waker.
- if _, ok := s.Fetch(false); ok {
- t.Fatalf("Fetch succeeded when there is no waker")
- }
-
- // Don't block when waker isn't asserted.
- s.AddWaker(&w, 0)
- if _, ok := s.Fetch(false); ok {
- t.Fatalf("Fetch succeeded when waker was not asserted")
- }
-
- // Don't block when waker was asserted, but isn't anymore.
- w.Assert()
- w.Clear()
- if _, ok := s.Fetch(false); ok {
- t.Fatalf("Fetch succeeded when waker was not asserted anymore")
- }
-
- // Don't block when waker was consumed by previous Fetch().
- w.Assert()
- if _, ok := s.Fetch(false); !ok {
- t.Fatalf("Fetch failed even though waker was asserted")
- }
-
- if _, ok := s.Fetch(false); ok {
- t.Fatalf("Fetch succeeded when waker had been consumed")
- }
-}
-
-// TestMultiple checks that a sleeper can wait for and receives notifications
-// from multiple wakers.
-func TestMultiple(t *testing.T) {
- s := Sleeper{}
- w1 := Waker{}
- w2 := Waker{}
-
- s.AddWaker(&w1, 0)
- s.AddWaker(&w2, 1)
-
- w1.Assert()
- w2.Assert()
-
- v, ok := s.Fetch(false)
- if !ok {
- t.Fatalf("Fetch failed when there are asserted wakers")
- }
-
- if v != 0 && v != 1 {
- t.Fatalf("Unexpected waker id: %v", v)
- }
-
- want := 1 - v
- v, ok = s.Fetch(false)
- if !ok {
- t.Fatalf("Fetch failed when there is an asserted waker")
- }
-
- if v != want {
- t.Fatalf("Unexpected waker id, got %v, want %v", v, want)
- }
-}
-
-// TestDoneFunction tests if calling Done() on a sleeper works properly.
-func TestDoneFunction(t *testing.T) {
- // Trivial case of no waker.
- s := Sleeper{}
- s.Done()
-
- // Cases when the sleeper has n wakers, but none are asserted.
- for n := 1; n < 20; n++ {
- s := Sleeper{}
- w := make([]Waker, n)
- for j := 0; j < n; j++ {
- s.AddWaker(&w[j], j)
- }
- s.Done()
- }
-
- // Cases when the sleeper has n wakers, and only the i-th one is
- // asserted.
- for n := 1; n < 20; n++ {
- for i := 0; i < n; i++ {
- s := Sleeper{}
- w := make([]Waker, n)
- for j := 0; j < n; j++ {
- s.AddWaker(&w[j], j)
- }
- w[i].Assert()
- s.Done()
- }
- }
-
- // Cases when the sleeper has n wakers, and the i-th one is asserted
- // and cleared.
- for n := 1; n < 20; n++ {
- for i := 0; i < n; i++ {
- s := Sleeper{}
- w := make([]Waker, n)
- for j := 0; j < n; j++ {
- s.AddWaker(&w[j], j)
- }
- w[i].Assert()
- w[i].Clear()
- s.Done()
- }
- }
-
- // Cases when the sleeper has n wakers, with a random number of them
- // asserted.
- for n := 1; n < 20; n++ {
- for iters := 0; iters < 1000; iters++ {
- s := Sleeper{}
- w := make([]Waker, n)
- for j := 0; j < n; j++ {
- s.AddWaker(&w[j], j)
- }
-
- // Pick the number of asserted elements, then assert
- // random wakers.
- asserted := rand.Int() % (n + 1)
- for j := 0; j < asserted; j++ {
- w[rand.Int()%n].Assert()
- }
- s.Done()
- }
- }
-}
-
-// TestRace tests that multiple wakers can continuously send wake requests to
-// the sleeper.
-func TestRace(t *testing.T) {
- const wakers = 100
- const wakeRequests = 10000
-
- counts := make([]int, wakers)
- w := make([]Waker, wakers)
- s := Sleeper{}
-
- // Associate each waker and start goroutines that will assert them.
- for i := range w {
- s.AddWaker(&w[i], i)
- go func(w *Waker) {
- n := 0
- for n < wakeRequests {
- if !w.IsAsserted() {
- w.Assert()
- n++
- } else {
- runtime.Gosched()
- }
- }
- }(&w[i])
- }
-
- // Wait for all wake up notifications from all wakers.
- for i := 0; i < wakers*wakeRequests; i++ {
- v, _ := s.Fetch(true)
- counts[v]++
- }
-
- // Check that we got the right number for each.
- for i, v := range counts {
- if v != wakeRequests {
- t.Errorf("Waker %v only got %v wakes", i, v)
- }
- }
-}
-
-// TestRaceInOrder tests that multiple wakers can continuously send wake requests to
-// the sleeper and that the wakers are retrieved in the order asserted.
-func TestRaceInOrder(t *testing.T) {
- w := make([]Waker, 10000)
- s := Sleeper{}
-
- // Associate each waker and start goroutines that will assert them.
- for i := range w {
- s.AddWaker(&w[i], i)
- }
- go func() {
- for i := range w {
- w[i].Assert()
- }
- }()
-
- // Wait for all wake up notifications from all wakers.
- for want := range w {
- got, _ := s.Fetch(true)
- if got != want {
- t.Fatalf("got %d want %d", got, want)
- }
- }
-}
-
-// BenchmarkSleeperMultiSelect measures how long it takes to fetch a wake up
-// from 4 wakers when at least one is already asserted.
-func BenchmarkSleeperMultiSelect(b *testing.B) {
- const count = 4
- s := Sleeper{}
- w := make([]Waker, count)
- for i := range w {
- s.AddWaker(&w[i], i)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w[count-1].Assert()
- s.Fetch(true)
- }
-}
-
-// BenchmarkGoMultiSelect measures how long it takes to fetch a zero-length
-// struct from one of 4 channels when at least one is ready.
-func BenchmarkGoMultiSelect(b *testing.B) {
- const count = 4
- ch := make([]chan struct{}, count)
- for i := range ch {
- ch[i] = make(chan struct{}, 1)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- ch[count-1] <- struct{}{}
- select {
- case <-ch[0]:
- case <-ch[1]:
- case <-ch[2]:
- case <-ch[3]:
- }
- }
-}
-
-// BenchmarkSleeperSingleSelect measures how long it takes to fetch a wake up
-// from one waker that is already asserted.
-func BenchmarkSleeperSingleSelect(b *testing.B) {
- s := Sleeper{}
- w := Waker{}
- s.AddWaker(&w, 0)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Assert()
- s.Fetch(true)
- }
-}
-
-// BenchmarkGoSingleSelect measures how long it takes to fetch a zero-length
-// struct from a channel that already has it buffered.
-func BenchmarkGoSingleSelect(b *testing.B) {
- ch := make(chan struct{}, 1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- ch <- struct{}{}
- <-ch
- }
-}
-
-// BenchmarkSleeperAssertNonWaiting measures how long it takes to assert a
-// channel that is already asserted.
-func BenchmarkSleeperAssertNonWaiting(b *testing.B) {
- w := Waker{}
- w.Assert()
- for i := 0; i < b.N; i++ {
- w.Assert()
- }
-
-}
-
-// BenchmarkGoAssertNonWaiting measures how long it takes to write to a channel
-// that has already something written to it.
-func BenchmarkGoAssertNonWaiting(b *testing.B) {
- ch := make(chan struct{}, 1)
- ch <- struct{}{}
- for i := 0; i < b.N; i++ {
- select {
- case ch <- struct{}{}:
- default:
- }
- }
-}
-
-// BenchmarkSleeperWaitOnSingleSelect measures how long it takes to wait on one
-// waker channel while another goroutine wakes up the sleeper. This assumes that
-// a new goroutine doesn't run immediately (i.e., the creator of a new goroutine
-// is allowed to go to sleep before the new goroutine has a chance to run).
-func BenchmarkSleeperWaitOnSingleSelect(b *testing.B) {
- s := Sleeper{}
- w := Waker{}
- s.AddWaker(&w, 0)
- for i := 0; i < b.N; i++ {
- go func() {
- w.Assert()
- }()
- s.Fetch(true)
- }
-
-}
-
-// BenchmarkGoWaitOnSingleSelect measures how long it takes to wait on one
-// channel while another goroutine wakes up the sleeper. This assumes that a new
-// goroutine doesn't run immediately (i.e., the creator of a new goroutine is
-// allowed to go to sleep before the new goroutine has a chance to run).
-func BenchmarkGoWaitOnSingleSelect(b *testing.B) {
- ch := make(chan struct{}, 1)
- for i := 0; i < b.N; i++ {
- go func() {
- ch <- struct{}{}
- }()
- <-ch
- }
-}
-
-// BenchmarkSleeperWaitOnMultiSelect measures how long it takes to wait on 4
-// wakers while another goroutine wakes up the sleeper. This assumes that a new
-// goroutine doesn't run immediately (i.e., the creator of a new goroutine is
-// allowed to go to sleep before the new goroutine has a chance to run).
-func BenchmarkSleeperWaitOnMultiSelect(b *testing.B) {
- const count = 4
- s := Sleeper{}
- w := make([]Waker, count)
- for i := range w {
- s.AddWaker(&w[i], i)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- w[count-1].Assert()
- }()
- s.Fetch(true)
- }
-}
-
-// BenchmarkGoWaitOnMultiSelect measures how long it takes to wait on 4 channels
-// while another goroutine wakes up the sleeper. This assumes that a new
-// goroutine doesn't run immediately (i.e., the creator of a new goroutine is
-// allowed to go to sleep before the new goroutine has a chance to run).
-func BenchmarkGoWaitOnMultiSelect(b *testing.B) {
- const count = 4
- ch := make([]chan struct{}, count)
- for i := range ch {
- ch[i] = make(chan struct{}, 1)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- ch[count-1] <- struct{}{}
- }()
- select {
- case <-ch[0]:
- case <-ch[1]:
- case <-ch[2]:
- case <-ch[3]:
- }
- }
-}
diff --git a/pkg/sleep/sleep_unsafe_state_autogen.go b/pkg/sleep/sleep_unsafe_state_autogen.go
new file mode 100644
index 000000000..bea86dcca
--- /dev/null
+++ b/pkg/sleep/sleep_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sleep
diff --git a/pkg/state/BUILD b/pkg/state/BUILD
deleted file mode 100644
index 92c51879b..000000000
--- a/pkg/state/BUILD
+++ /dev/null
@@ -1,86 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "deferred_list",
- out = "deferred_list.go",
- package = "state",
- prefix = "deferred",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*objectEncodeState",
- "ElementMapper": "deferredMapper",
- "Linker": "*deferredEntry",
- },
-)
-
-go_template_instance(
- name = "complete_list",
- out = "complete_list.go",
- package = "state",
- prefix = "complete",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*objectDecodeState",
- "Linker": "*objectDecodeState",
- },
-)
-
-go_template_instance(
- name = "addr_range",
- out = "addr_range.go",
- package = "state",
- prefix = "addr",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uintptr",
- },
-)
-
-go_template_instance(
- name = "addr_set",
- out = "addr_set.go",
- consts = {
- "minDegree": "10",
- },
- imports = {
- "reflect": "reflect",
- },
- package = "state",
- prefix = "addr",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uintptr",
- "Range": "addrRange",
- "Value": "*objectEncodeState",
- "Functions": "addrSetFunctions",
- },
-)
-
-go_library(
- name = "state",
- srcs = [
- "addr_range.go",
- "addr_set.go",
- "complete_list.go",
- "decode.go",
- "decode_unsafe.go",
- "deferred_list.go",
- "encode.go",
- "encode_unsafe.go",
- "state.go",
- "state_norace.go",
- "state_race.go",
- "stats.go",
- "types.go",
- ],
- marshal = False,
- stateify = False,
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "//pkg/state/wire",
- ],
-)
diff --git a/pkg/state/README.md b/pkg/state/README.md
deleted file mode 100644
index 1aa401193..000000000
--- a/pkg/state/README.md
+++ /dev/null
@@ -1,158 +0,0 @@
-# State Encoding and Decoding
-
-The state package implements the encoding and decoding of data structures for
-`go_stateify`. This package is designed for use cases other than the standard
-encoding packages, e.g. `gob` and `json`. Principally:
-
-* This package operates on complex object graphs and accurately serializes and
- restores all relationships. That is, you can have things like: intrusive
- pointers, cycles, and pointer chains of arbitrary depths. These are not
- handled appropriately by existing encoders. This is not an implementation
- flaw: the formats themselves are not capable of representing these graphs,
- as they can only generate directed trees.
-
-* This package allows installing order-dependent load callbacks and then
- resolves that graph at load time, with cycle detection. Similarly, there is
- no analogous feature possible in the standard encoders.
-
-* This package handles the resolution of interfaces, based on a registered
- type name. For interface objects type information is saved in the serialized
- format. This is generally true for `gob` as well, but it works differently.
-
-Here's an overview of how encoding and decoding works.
-
-## Encoding
-
-Encoding produces a `statefile`, which contains a list of chunks of the form
-`(header, payload)`. The payload can either be some raw data, or a series of
-encoded wire objects representing some object graph. All encoded objects are
-defined in the `wire` subpackage.
-
-Encoding of an object graph begins with `encodeState.Save`.
-
-### 1. Memory Map & Encoding
-
-To discover relationships between potentially interdependent data structures
-(for example, a struct may contain pointers to members of other data
-structures), the encoder first walks the object graph and constructs a memory
-map of the objects in the input graph. As this walk progresses, objects are
-queued in the `pending` list and items are placed on the `deferred` list as they
-are discovered. No single object will be encoded multiple times, but the
-discovered relationships between objects may change as more parts of the overall
-object graph are discovered.
-
-The encoder starts at the root object and recursively visits all reachable
-objects, recording the address ranges containing the underlying data for each
-object. This is stored as a segment set (`addrSet`), mapping address ranges to
-the of the object occupying the range; see `encodeState.values`. Note that there
-is special handling for zero-sized types and map objects during this process.
-
-Additionally, the encoder assigns each object a unique identifier which is used
-to indicate relationships between objects in the statefile; see `objectID` in
-`encode.go`.
-
-### 2. Type Serialization
-
-The enoder will subsequently serialize all information about discovered types,
-including field names. These are used during decoding to reconcile these types
-with other internally registered types.
-
-### 3. Object Serialization
-
-With a full address map, and all objects correctly encoded, all object encodings
-are serialized. The assigned `objectID`s aren't explicitly encoded in the
-statefile. The order of object messages in the stream determine their IDs.
-
-### Example
-
-Given the following data structure definitions:
-
-```go
-type system struct {
- o *outer
- i *inner
-}
-
-type outer struct {
- a int64
- cn *container
-}
-
-type container struct {
- n uint64
- elem *inner
-}
-
-type inner struct {
- c container
- x, y uint64
-}
-```
-
-Initialized like this:
-
-```go
-o := outer{
- a: 10,
- cn: nil,
-}
-i := inner{
- x: 20,
- y: 30,
- c: container{},
-}
-s := system{
- o: &o,
- i: &i,
-}
-
-o.cn = &i.c
-o.cn.elem = &i
-
-```
-
-Encoding will produce an object stream like this:
-
-```
-g0r1 = struct{
- i: g0r3,
- o: g0r2,
-}
-g0r2 = struct{
- a: 10,
- cn: g0r3.c,
-}
-g0r3 = struct{
- c: struct{
- elem: g0r3,
- n: 0u,
- },
- x: 20u,
- y: 30u,
-}
-```
-
-Note how `g0r3.c` is correctly encoded as the underlying `container` object for
-`inner.c`, and how the pointer from `outer.cn` points to it, despite `system.i`
-being discovered after the pointer to it in `system.o.cn`. Also note that
-decoding isn't strictly reliant on the order of encoded object stream, as long
-as the relationship between objects are correctly encoded.
-
-## Decoding
-
-Decoding reads the statefile and reconstructs the object graph. Decoding begins
-in `decodeState.Load`. Decoding is performed in a single pass over the object
-stream in the statefile, and a subsequent pass over all deserialized objects is
-done to fire off all loading callbacks in the correctly defined order. Note that
-introducing cycles is possible here, but these are detected and an error will be
-returned.
-
-Decoding is relatively straight forward. For most primitive values, the decoder
-constructs an appropriate object and fills it with the values encoded in the
-statefile. Pointers need special handling, as they must point to a value
-allocated elsewhere. When values are constructed, the decoder indexes them by
-their `objectID`s in `decodeState.objectsByID`. The target of pointers are
-resolved by searching for the target in this index by their `objectID`; see
-`decodeState.register`. For pointers to values inside another value (fields in a
-pointer, elements of an array), the decoder uses the accessor path to walk to
-the appropriate location; see `walkChild`.
diff --git a/pkg/state/addr_range.go b/pkg/state/addr_range.go
new file mode 100644
index 000000000..0b7346e47
--- /dev/null
+++ b/pkg/state/addr_range.go
@@ -0,0 +1,76 @@
+package state
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type addrRange struct {
+ // Start is the inclusive start of the range.
+ Start uintptr
+
+ // End is the exclusive end of the range.
+ End uintptr
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+//
+//go:nosplit
+func (r addrRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+//
+//go:nosplit
+func (r addrRange) Length() uintptr {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+//
+//go:nosplit
+func (r addrRange) Contains(x uintptr) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+//
+//go:nosplit
+func (r addrRange) Overlaps(r2 addrRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+//
+//go:nosplit
+func (r addrRange) IsSupersetOf(r2 addrRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+//
+//go:nosplit
+func (r addrRange) Intersect(r2 addrRange) addrRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+//
+//go:nosplit
+func (r addrRange) CanSplitAt(x uintptr) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/state/addr_set.go b/pkg/state/addr_set.go
new file mode 100644
index 000000000..8abc3dd34
--- /dev/null
+++ b/pkg/state/addr_set.go
@@ -0,0 +1,1643 @@
+package state
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const addrtrackGaps = 0
+
+var _ = uint8(addrtrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type addrdynamicGap [addrtrackGaps]uintptr
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *addrdynamicGap) Get() uintptr {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *addrdynamicGap) Set(v uintptr) {
+ d[:][0] = v
+}
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ addrminDegree = 10
+
+ addrmaxDegree = 2 * addrminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type addrSet struct {
+ root addrnode `state:".(*addrSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *addrSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *addrSet) IsEmptyRange(r addrRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *addrSet) Span() uintptr {
+ var sz uintptr
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *addrSet) SpanRange(r addrRange) uintptr {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uintptr
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *addrSet) FirstSegment() addrIterator {
+ if s.root.nrSegments == 0 {
+ return addrIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *addrSet) LastSegment() addrIterator {
+ if s.root.nrSegments == 0 {
+ return addrIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *addrSet) FirstGap() addrGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return addrGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *addrSet) LastGap() addrGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return addrGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *addrSet) Find(key uintptr) (addrIterator, addrGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return addrIterator{n, i}, addrGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return addrIterator{}, addrGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *addrSet) FindSegment(key uintptr) addrIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *addrSet) LowerBoundSegment(min uintptr) addrIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *addrSet) UpperBoundSegment(max uintptr) addrIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *addrSet) FindGap(key uintptr) addrGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *addrSet) LowerBoundGap(min uintptr) addrGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *addrSet) UpperBoundGap(max uintptr) addrGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *addrSet) Add(r addrRange, val *objectEncodeState) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *addrSet) AddWithoutMerging(r addrRange, val *objectEncodeState) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *addrSet) Insert(gap addrGapIterator, r addrRange, val *objectEncodeState) addrIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (addrSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := addrtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (addrSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (addrSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := addrtrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return next
+ }
+ }
+
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *addrSet) InsertWithoutMerging(gap addrGapIterator, r addrRange, val *objectEncodeState) addrIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
+func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange, val *objectEncodeState) addrIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := addrtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
+ return addrIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *addrSet) Remove(seg addrIterator) addrGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if addrtrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ addrSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ if addrtrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
+ return seg.node.rebalanceAfterRemove(addrGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *addrSet) RemoveAll() {
+ s.root = addrnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *addrSet) Merge(first, second addrIterator) addrIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *addrSet) MergeUnchecked(first, second addrIterator) addrIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (addrSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return addrIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *addrSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *addrSet) MergeRange(r addrRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *addrSet) MergeAdjacent(r addrRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *addrSet) Split(seg addrIterator, split uintptr) (addrIterator, addrIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *addrSet) SplitUnchecked(seg addrIterator, split uintptr) (addrIterator, addrIterator) {
+ val1, val2 := (addrSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), addrRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *addrSet) SplitAt(split uintptr) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *addrSet) ApplyContiguous(r addrRange, fn func(seg addrIterator)) addrGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return addrGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return addrGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type addrnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *addrnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap addrdynamicGap
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [addrmaxDegree - 1]addrRange
+ values [addrmaxDegree - 1]*objectEncodeState
+ children [addrmaxDegree]*addrnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *addrnode) firstSegment() addrIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return addrIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *addrnode) lastSegment() addrIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return addrIterator{n, n.nrSegments - 1}
+}
+
+func (n *addrnode) prevSibling() *addrnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *addrnode) nextSibling() *addrnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *addrnode) rebalanceBeforeInsert(gap addrGapIterator) addrGapIterator {
+ if n.nrSegments < addrmaxDegree-1 {
+ return gap
+ }
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.parent == nil {
+
+ left := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:addrminDegree-1], n.keys[:addrminDegree-1])
+ copy(left.values[:addrminDegree-1], n.values[:addrminDegree-1])
+ copy(right.keys[:addrminDegree-1], n.keys[addrminDegree:])
+ copy(right.values[:addrminDegree-1], n.values[addrminDegree:])
+ n.keys[0], n.values[0] = n.keys[addrminDegree-1], n.values[addrminDegree-1]
+ addrzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:addrminDegree], n.children[:addrminDegree])
+ copy(right.children[:addrminDegree], n.children[addrminDegree:])
+ addrzeroNodeSlice(n.children[2:])
+ for i := 0; i < addrminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+
+ if addrtrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < addrminDegree {
+ return addrGapIterator{left, gap.index}
+ }
+ return addrGapIterator{right, gap.index - addrminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[addrminDegree-1], n.values[addrminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:addrminDegree-1], n.keys[addrminDegree:])
+ copy(sibling.values[:addrminDegree-1], n.values[addrminDegree:])
+ addrzeroValueSlice(n.values[addrminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:addrminDegree], n.children[addrminDegree:])
+ addrzeroNodeSlice(n.children[addrminDegree:])
+ for i := 0; i < addrminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = addrminDegree - 1
+
+ if addrtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < addrminDegree {
+ return gap
+ }
+ return addrGapIterator{sibling, gap.index - addrminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *addrnode) rebalanceAfterRemove(gap addrGapIterator) addrGapIterator {
+ for {
+ if n.nrSegments >= addrminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= addrminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ addrSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if addrtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return addrGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return addrGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= addrminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ addrSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+
+ if addrtrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return addrGapIterator{n, n.nrSegments}
+ }
+ return addrGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+
+ if gap.node == left {
+ return addrGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return addrGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *addrnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = addrGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ addrSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ if addrtrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
+ n = p
+ }
+}
+
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *addrnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *addrnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *addrnode) calculateMaxGapLeaf() uintptr {
+ max := addrGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (addrGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *addrnode) calculateMaxGapInternal() uintptr {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *addrnode) searchFirstLargeEnoughGap(minSize uintptr) addrGapIterator {
+ if n.maxGap.Get() < minSize {
+ return addrGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := addrGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *addrnode) searchLastLargeEnoughGap(minSize uintptr) addrGapIterator {
+ if n.maxGap.Get() < minSize {
+ return addrGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := addrGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type addrIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *addrnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg addrIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg addrIterator) Range() addrRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg addrIterator) Start() uintptr {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg addrIterator) End() uintptr {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
+func (seg addrIterator) SetRangeUnchecked(r addrRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetRange(r addrRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg addrIterator) SetStartUnchecked(start uintptr) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetStart(start uintptr) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg addrIterator) SetEndUnchecked(end uintptr) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetEnd(end uintptr) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg addrIterator) Value() *objectEncodeState {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg addrIterator) ValuePtr() **objectEncodeState {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg addrIterator) SetValue(val *objectEncodeState) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg addrIterator) PrevSegment() addrIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return addrIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return addrIterator{}
+ }
+ return addrsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg addrIterator) NextSegment() addrIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return addrIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return addrIterator{}
+ }
+ return addrsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg addrIterator) PrevGap() addrGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return addrGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg addrIterator) NextGap() addrGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return addrGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return addrIterator{}, gap
+ }
+ return gap.PrevSegment(), addrGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg addrIterator) NextNonEmpty() (addrIterator, addrGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return addrIterator{}, gap
+ }
+ return gap.NextSegment(), addrGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type addrGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *addrnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap addrGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap addrGapIterator) Range() addrRange {
+ return addrRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap addrGapIterator) Start() uintptr {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return addrSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap addrGapIterator) End() uintptr {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return addrSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap addrGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap addrGapIterator) PrevSegment() addrIterator {
+ return addrsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap addrGapIterator) NextSegment() addrIterator {
+ return addrsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap addrGapIterator) PrevGap() addrGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return addrGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap addrGapIterator) NextGap() addrGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return addrGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap addrGapIterator) NextLargeEnoughGap(minSize uintptr) addrGapIterator {
+ if addrtrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap addrGapIterator) nextLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return addrGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap addrGapIterator) PrevLargeEnoughGap(minSize uintptr) addrGapIterator {
+ if addrtrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap addrGapIterator) prevLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return addrGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func addrsegmentBeforePosition(n *addrnode, i int) addrIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return addrIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return addrIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func addrsegmentAfterPosition(n *addrnode, i int) addrIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return addrIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return addrIterator{n, i}
+}
+
+func addrzeroValueSlice(slice []*objectEncodeState) {
+
+ for i := range slice {
+ addrSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func addrzeroNodeSlice(slice []*addrnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *addrSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *addrnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *addrnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ if n.hasChildren {
+ if addrtrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type addrSegmentDataSlices struct {
+ Start []uintptr
+ End []uintptr
+ Values []*objectEncodeState
+}
+
+// ExportSortedSlices returns a copy of all segments in the given set, in
+// ascending key order.
+func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices {
+ var sds addrSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlices initializes the given set from the given slice.
+//
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
+func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := addrRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *addrSet) segmentTestCheck(expectedSegments int, segFunc func(int, addrRange, *objectEncodeState) error) error {
+ havePrev := false
+ prev := uintptr(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *addrSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
+func (s *addrSet) saveRoot() *addrSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *addrSet) loadRoot(sds *addrSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/state/complete_list.go b/pkg/state/complete_list.go
new file mode 100644
index 000000000..4d340a1af
--- /dev/null
+++ b/pkg/state/complete_list.go
@@ -0,0 +1,221 @@
+package state
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type completeElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (completeElementMapper) linkerFor(elem *objectDecodeState) *objectDecodeState { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type completeList struct {
+ head *objectDecodeState
+ tail *objectDecodeState
+}
+
+// Reset resets list l to the empty state.
+func (l *completeList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *completeList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *completeList) Front() *objectDecodeState {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *completeList) Back() *objectDecodeState {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *completeList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (completeElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *completeList) PushFront(e *objectDecodeState) {
+ linker := completeElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ completeElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *completeList) PushBack(e *objectDecodeState) {
+ linker := completeElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ completeElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *completeList) PushBackList(m *completeList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ completeElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ completeElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *completeList) InsertAfter(b, e *objectDecodeState) {
+ bLinker := completeElementMapper{}.linkerFor(b)
+ eLinker := completeElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ completeElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *completeList) InsertBefore(a, e *objectDecodeState) {
+ aLinker := completeElementMapper{}.linkerFor(a)
+ eLinker := completeElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ completeElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *completeList) Remove(e *objectDecodeState) {
+ linker := completeElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ completeElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ completeElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type completeEntry struct {
+ next *objectDecodeState
+ prev *objectDecodeState
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *completeEntry) Next() *objectDecodeState {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *completeEntry) Prev() *objectDecodeState {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *completeEntry) SetNext(elem *objectDecodeState) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *completeEntry) SetPrev(elem *objectDecodeState) {
+ e.prev = elem
+}
diff --git a/pkg/state/deferred_list.go b/pkg/state/deferred_list.go
new file mode 100644
index 000000000..2753ce4a2
--- /dev/null
+++ b/pkg/state/deferred_list.go
@@ -0,0 +1,206 @@
+package state
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type deferredList struct {
+ head *objectEncodeState
+ tail *objectEncodeState
+}
+
+// Reset resets list l to the empty state.
+func (l *deferredList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *deferredList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *deferredList) Front() *objectEncodeState {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *deferredList) Back() *objectEncodeState {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *deferredList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (deferredMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *deferredList) PushFront(e *objectEncodeState) {
+ linker := deferredMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ deferredMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *deferredList) PushBack(e *objectEncodeState) {
+ linker := deferredMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ deferredMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *deferredList) PushBackList(m *deferredList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ deferredMapper{}.linkerFor(l.tail).SetNext(m.head)
+ deferredMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *deferredList) InsertAfter(b, e *objectEncodeState) {
+ bLinker := deferredMapper{}.linkerFor(b)
+ eLinker := deferredMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ deferredMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *deferredList) InsertBefore(a, e *objectEncodeState) {
+ aLinker := deferredMapper{}.linkerFor(a)
+ eLinker := deferredMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ deferredMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *deferredList) Remove(e *objectEncodeState) {
+ linker := deferredMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ deferredMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ deferredMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type deferredEntry struct {
+ next *objectEncodeState
+ prev *objectEncodeState
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *deferredEntry) Next() *objectEncodeState {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *deferredEntry) Prev() *objectEncodeState {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *deferredEntry) SetNext(elem *objectEncodeState) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *deferredEntry) SetPrev(elem *objectEncodeState) {
+ e.prev = elem
+}
diff --git a/pkg/state/pretty/BUILD b/pkg/state/pretty/BUILD
deleted file mode 100644
index d053802f7..000000000
--- a/pkg/state/pretty/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "pretty",
- srcs = ["pretty.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/state",
- "//pkg/state/wire",
- ],
-)
diff --git a/pkg/state/pretty/pretty_state_autogen.go b/pkg/state/pretty/pretty_state_autogen.go
new file mode 100644
index 000000000..e772e34a4
--- /dev/null
+++ b/pkg/state/pretty/pretty_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pretty
diff --git a/pkg/state/statefile/BUILD b/pkg/state/statefile/BUILD
deleted file mode 100644
index 08d06e37b..000000000
--- a/pkg/state/statefile/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "statefile",
- srcs = ["statefile.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/compressio",
- "//pkg/state/wire",
- ],
-)
-
-go_test(
- name = "statefile_test",
- size = "small",
- srcs = ["statefile_test.go"],
- library = ":statefile",
- deps = ["//pkg/compressio"],
-)
diff --git a/pkg/state/statefile/statefile_state_autogen.go b/pkg/state/statefile/statefile_state_autogen.go
new file mode 100644
index 000000000..a2cdaa3f1
--- /dev/null
+++ b/pkg/state/statefile/statefile_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package statefile
diff --git a/pkg/state/statefile/statefile_test.go b/pkg/state/statefile/statefile_test.go
deleted file mode 100644
index 0b470fdec..000000000
--- a/pkg/state/statefile/statefile_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package statefile
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/base64"
- "io"
- "math/rand"
- "runtime"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/compressio"
-)
-
-func randomKey() ([]byte, error) {
- r := make([]byte, base64.RawStdEncoding.DecodedLen(keySize))
- if _, err := io.ReadFull(crand.Reader, r); err != nil {
- return nil, err
- }
- key := make([]byte, keySize)
- base64.RawStdEncoding.Encode(key, r)
- return key, nil
-}
-
-type testCase struct {
- name string
- data []byte
- metadata map[string]string
-}
-
-func TestStatefile(t *testing.T) {
- rand.Seed(time.Now().Unix())
-
- cases := []testCase{
- // Various data sizes.
- {"nil", nil, nil},
- {"empty", []byte(""), nil},
- {"some", []byte("_"), nil},
- {"one", []byte("0"), nil},
- {"two", []byte("01"), nil},
- {"three", []byte("012"), nil},
- {"four", []byte("0123"), nil},
- {"five", []byte("01234"), nil},
- {"six", []byte("012356"), nil},
- {"seven", []byte("0123567"), nil},
- {"eight", []byte("01235678"), nil},
-
- // Make sure we have one longer than the hash length.
- {"longer than hash", []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), nil},
-
- // Make sure we have one longer than the chunk size.
- {"chunks", make([]byte, 3*compressionChunkSize), nil},
- {"large", make([]byte, 30*compressionChunkSize), nil},
-
- // Different metadata.
- {"one metadata", []byte("data"), map[string]string{"foo": "bar"}},
- {"two metadata", []byte("data"), map[string]string{"foo": "bar", "one": "two"}},
- }
-
- for _, c := range cases {
- // Generate a key.
- integrityKey, err := randomKey()
- if err != nil {
- t.Errorf("can't generate key: got %v, excepted nil", err)
- continue
- }
-
- t.Run(c.name, func(t *testing.T) {
- for _, key := range [][]byte{nil, integrityKey} {
- t.Run("key="+string(key), func(t *testing.T) {
- // Encoding happens via a buffer.
- var bufEncoded bytes.Buffer
- var bufDecoded bytes.Buffer
-
- // Do all the writing.
- w, err := NewWriter(&bufEncoded, key, c.metadata)
- if err != nil {
- t.Fatalf("error creating writer: got %v, expected nil", err)
- }
- if _, err := io.Copy(w, bytes.NewBuffer(c.data)); err != nil {
- t.Fatalf("error during write: got %v, expected nil", err)
- }
-
- // Finish the sum.
- if err := w.Close(); err != nil {
- t.Fatalf("error during close: got %v, expected nil", err)
- }
-
- t.Logf("original data: %d bytes, encoded: %d bytes.",
- len(c.data), len(bufEncoded.Bytes()))
-
- // Do all the reading.
- r, metadata, err := NewReader(bytes.NewReader(bufEncoded.Bytes()), key)
- if err != nil {
- t.Fatalf("error creating reader: got %v, expected nil", err)
- }
- if _, err := io.Copy(&bufDecoded, r); err != nil {
- t.Fatalf("error during read: got %v, expected nil", err)
- }
-
- // Check that the data matches.
- if !bytes.Equal(c.data, bufDecoded.Bytes()) {
- t.Fatalf("data didn't match (%d vs %d bytes)", len(bufDecoded.Bytes()), len(c.data))
- }
-
- // Check that the metadata matches.
- for k, v := range c.metadata {
- nv, ok := metadata[k]
- if !ok {
- t.Fatalf("missing metadata: %s", k)
- }
- if v != nv {
- t.Fatalf("mismatched metdata for %s: got %s, expected %s", k, nv, v)
- }
- }
-
- // Change the data and verify that it fails.
- if key != nil {
- b := append([]byte(nil), bufEncoded.Bytes()...)
- b[rand.Intn(len(b))]++
- bufDecoded.Reset()
- r, _, err = NewReader(bytes.NewReader(b), key)
- if err == nil {
- _, err = io.Copy(&bufDecoded, r)
- }
- if err == nil {
- t.Error("got no error: expected error on data corruption")
- }
- }
-
- // Change the key and verify that it fails.
- newKey := integrityKey
- if len(key) > 0 {
- newKey = append([]byte{}, key...)
- newKey[rand.Intn(len(newKey))]++
- }
- bufDecoded.Reset()
- r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), newKey)
- if err == nil {
- _, err = io.Copy(&bufDecoded, r)
- }
- if err != compressio.ErrHashMismatch {
- t.Errorf("got error: %v, expected ErrHashMismatch on key mismatch", err)
- }
- })
- }
- })
- }
-}
-
-const benchmarkDataSize = 100 * 1024 * 1024
-
-func benchmark(b *testing.B, size int, write bool, compressible bool) {
- b.StopTimer()
- b.SetBytes(benchmarkDataSize)
-
- // Generate source data.
- var source []byte
- if compressible {
- // For compressible data, we use essentially all zeros.
- source = make([]byte, benchmarkDataSize)
- } else {
- // For non-compressible data, we use random base64 data (to
- // make it marginally compressible, a ratio of 75%).
- var sourceBuf bytes.Buffer
- bufW := base64.NewEncoder(base64.RawStdEncoding, &sourceBuf)
- bufR := rand.New(rand.NewSource(0))
- if _, err := io.CopyN(bufW, bufR, benchmarkDataSize); err != nil {
- b.Fatalf("unable to seed random data: %v", err)
- }
- source = sourceBuf.Bytes()
- }
-
- // Generate a random key for integrity check.
- key, err := randomKey()
- if err != nil {
- b.Fatalf("error generating key: %v", err)
- }
-
- // Define our benchmark functions. Prior to running the readState
- // function here, you must execute the writeState function at least
- // once (done below).
- var stateBuf bytes.Buffer
- writeState := func() {
- stateBuf.Reset()
- w, err := NewWriter(&stateBuf, key, nil)
- if err != nil {
- b.Fatalf("error creating writer: %v", err)
- }
- for done := 0; done < len(source); {
- chunk := size // limit size.
- if done+chunk > len(source) {
- chunk = len(source) - done
- }
- n, err := w.Write(source[done : done+chunk])
- done += n
- if n == 0 && err != nil {
- b.Fatalf("error during write: %v", err)
- }
- }
- if err := w.Close(); err != nil {
- b.Fatalf("error closing writer: %v", err)
- }
- }
- readState := func() {
- tmpBuf := bytes.NewBuffer(stateBuf.Bytes())
- r, _, err := NewReader(tmpBuf, key)
- if err != nil {
- b.Fatalf("error creating reader: %v", err)
- }
- for done := 0; done < len(source); {
- chunk := size // limit size.
- if done+chunk > len(source) {
- chunk = len(source) - done
- }
- n, err := r.Read(source[done : done+chunk])
- done += n
- if n == 0 && err != nil {
- b.Fatalf("error during read: %v", err)
- }
- }
- }
- // Generate the state once without timing to ensure that buffers have
- // been appropriately allocated.
- writeState()
- if write {
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- writeState()
- }
- b.StopTimer()
- } else {
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- readState()
- }
- b.StopTimer()
- }
-}
-
-func BenchmarkWrite4KCompressible(b *testing.B) {
- benchmark(b, 4096, true, true)
-}
-
-func BenchmarkWrite4KNoncompressible(b *testing.B) {
- benchmark(b, 4096, true, false)
-}
-
-func BenchmarkWrite1MCompressible(b *testing.B) {
- benchmark(b, 1024*1024, true, true)
-}
-
-func BenchmarkWrite1MNoncompressible(b *testing.B) {
- benchmark(b, 1024*1024, true, false)
-}
-
-func BenchmarkRead4KCompressible(b *testing.B) {
- benchmark(b, 4096, false, true)
-}
-
-func BenchmarkRead4KNoncompressible(b *testing.B) {
- benchmark(b, 4096, false, false)
-}
-
-func BenchmarkRead1MCompressible(b *testing.B) {
- benchmark(b, 1024*1024, false, true)
-}
-
-func BenchmarkRead1MNoncompressible(b *testing.B) {
- benchmark(b, 1024*1024, false, false)
-}
-
-func init() {
- runtime.GOMAXPROCS(runtime.NumCPU())
-}
diff --git a/pkg/state/tests/BUILD b/pkg/state/tests/BUILD
deleted file mode 100644
index 9297cafbe..000000000
--- a/pkg/state/tests/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tests",
- srcs = [
- "array.go",
- "bench.go",
- "integer.go",
- "load.go",
- "map.go",
- "register.go",
- "struct.go",
- "tests.go",
- ],
- deps = [
- "//pkg/state",
- "//pkg/state/pretty",
- ],
-)
-
-go_test(
- name = "tests_test",
- size = "small",
- srcs = [
- "array_test.go",
- "bench_test.go",
- "bool_test.go",
- "float_test.go",
- "integer_test.go",
- "load_test.go",
- "map_test.go",
- "register_test.go",
- "string_test.go",
- "struct_test.go",
- ],
- library = ":tests",
- deps = [
- "//pkg/state",
- "//pkg/state/wire",
- ],
-)
diff --git a/pkg/state/tests/array.go b/pkg/state/tests/array.go
deleted file mode 100644
index 0972a80e7..000000000
--- a/pkg/state/tests/array.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-// +stateify savable
-type arrayContainer struct {
- v [1]interface{}
-}
-
-// +stateify savable
-type arrayPtrContainer struct {
- v *[1]interface{}
-}
-
-// +stateify savable
-type sliceContainer struct {
- v []interface{}
-}
-
-// +stateify savable
-type slicePtrContainer struct {
- v *[]interface{}
-}
diff --git a/pkg/state/tests/array_test.go b/pkg/state/tests/array_test.go
deleted file mode 100644
index a347b2947..000000000
--- a/pkg/state/tests/array_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "reflect"
- "testing"
-)
-
-var allArrayPrimitives = []interface{}{
- [1]bool{},
- [1]bool{true},
- [2]bool{false, true},
- [1]int{},
- [1]int{1},
- [2]int{0, 1},
- [1]int8{},
- [1]int8{1},
- [2]int8{0, 1},
- [1]int16{},
- [1]int16{1},
- [2]int16{0, 1},
- [1]int32{},
- [1]int32{1},
- [2]int32{0, 1},
- [1]int64{},
- [1]int64{1},
- [2]int64{0, 1},
- [1]uint{},
- [1]uint{1},
- [2]uint{0, 1},
- [1]uintptr{},
- [1]uintptr{1},
- [2]uintptr{0, 1},
- [1]uint8{},
- [1]uint8{1},
- [2]uint8{0, 1},
- [1]uint16{},
- [1]uint16{1},
- [2]uint16{0, 1},
- [1]uint32{},
- [1]uint32{1},
- [2]uint32{0, 1},
- [1]uint64{},
- [1]uint64{1},
- [2]uint64{0, 1},
- [1]string{},
- [1]string{""},
- [1]string{nonEmptyString},
- [2]string{"", nonEmptyString},
-}
-
-func TestArrayPrimitives(t *testing.T) {
- runTestCases(t, false, "plain", flatten(allArrayPrimitives))
- runTestCases(t, false, "pointers", pointersTo(flatten(allArrayPrimitives)))
- runTestCases(t, false, "interfaces", interfacesTo(flatten(allArrayPrimitives)))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(flatten(allArrayPrimitives))))
-}
-
-func TestSlices(t *testing.T) {
- var allSlices = flatten(
- filter(allArrayPrimitives, func(o interface{}) (interface{}, bool) {
- v := reflect.New(reflect.TypeOf(o)).Elem()
- v.Set(reflect.ValueOf(o))
- return v.Slice(0, v.Len()).Interface(), true
- }),
- filter(allArrayPrimitives, func(o interface{}) (interface{}, bool) {
- v := reflect.New(reflect.TypeOf(o)).Elem()
- v.Set(reflect.ValueOf(o))
- if v.Len() == 0 {
- // Return the pure "nil" value for the slice.
- return reflect.New(v.Slice(0, 0).Type()).Elem().Interface(), true
- }
- return v.Slice(1, v.Len()).Interface(), true
- }),
- filter(allArrayPrimitives, func(o interface{}) (interface{}, bool) {
- v := reflect.New(reflect.TypeOf(o)).Elem()
- v.Set(reflect.ValueOf(o))
- if v.Len() == 0 {
- // Return the zero-valued slice.
- return reflect.MakeSlice(v.Slice(0, 0).Type(), 0, 0).Interface(), true
- }
- return v.Slice(0, v.Len()-1).Interface(), true
- }),
- )
- runTestCases(t, false, "plain", allSlices)
- runTestCases(t, false, "pointers", pointersTo(allSlices))
- runTestCases(t, false, "interfaces", interfacesTo(allSlices))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(allSlices)))
-}
-
-func TestArrayContainers(t *testing.T) {
- var (
- emptyArray [1]interface{}
- fullArray [1]interface{}
- )
- fullArray[0] = &emptyArray
- runTestCases(t, false, "", []interface{}{
- arrayContainer{v: emptyArray},
- arrayContainer{v: fullArray},
- arrayPtrContainer{v: nil},
- arrayPtrContainer{v: &emptyArray},
- arrayPtrContainer{v: &fullArray},
- })
-}
-
-func TestSliceContainers(t *testing.T) {
- var (
- nilSlice []interface{}
- emptySlice = make([]interface{}, 0)
- fullSlice = []interface{}{nil}
- )
- runTestCases(t, false, "", []interface{}{
- sliceContainer{v: nilSlice},
- sliceContainer{v: emptySlice},
- sliceContainer{v: fullSlice},
- slicePtrContainer{v: nil},
- slicePtrContainer{v: &nilSlice},
- slicePtrContainer{v: &emptySlice},
- slicePtrContainer{v: &fullSlice},
- })
-}
diff --git a/pkg/state/tests/bench.go b/pkg/state/tests/bench.go
deleted file mode 100644
index 40869cdfb..000000000
--- a/pkg/state/tests/bench.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-// +stateify savable
-type benchStruct struct {
- B *benchStruct // Must be exported for gob.
-}
-
-func (b *benchStruct) afterLoad() {
- // Do nothing, just force scheduling.
-}
diff --git a/pkg/state/tests/bench_test.go b/pkg/state/tests/bench_test.go
deleted file mode 100644
index 7e102c907..000000000
--- a/pkg/state/tests/bench_test.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "bytes"
- "context"
- "encoding/gob"
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/state"
- "gvisor.dev/gvisor/pkg/state/wire"
-)
-
-// buildPtrObject builds a benchmark object.
-func buildPtrObject(n int) interface{} {
- b := new(benchStruct)
- for i := 0; i < n; i++ {
- b = &benchStruct{B: b}
- }
- return b
-}
-
-// buildMapObject builds a benchmark object.
-func buildMapObject(n int) interface{} {
- b := new(benchStruct)
- m := make(map[int]*benchStruct)
- for i := 0; i < n; i++ {
- m[i] = b
- }
- return &m
-}
-
-// buildSliceObject builds a benchmark object.
-func buildSliceObject(n int) interface{} {
- b := new(benchStruct)
- s := make([]*benchStruct, 0, n)
- for i := 0; i < n; i++ {
- s = append(s, b)
- }
- return &s
-}
-
-var allObjects = map[string]struct {
- New func(int) interface{}
-}{
- "ptr": {
- New: buildPtrObject,
- },
- "map": {
- New: buildMapObject,
- },
- "slice": {
- New: buildSliceObject,
- },
-}
-
-func buildObjects(n int, fn func(int) interface{}) (iters int, v interface{}) {
- // maxSize is the maximum size of an individual object below. For an N
- // larger than this, we start to return multiple objects.
- const maxSize = 1024
- if n <= maxSize {
- return 1, fn(n)
- }
- iters = (n + maxSize - 1) / maxSize
- return iters, fn(maxSize)
-}
-
-// gobSave is a version of save using gob (no stats available).
-func gobSave(_ context.Context, w wire.Writer, v interface{}) (_ state.Stats, err error) {
- enc := gob.NewEncoder(w)
- err = enc.Encode(v)
- return
-}
-
-// gobLoad is a version of load using gob (no stats available).
-func gobLoad(_ context.Context, r wire.Reader, v interface{}) (_ state.Stats, err error) {
- dec := gob.NewDecoder(r)
- err = dec.Decode(v)
- return
-}
-
-var allAlgos = map[string]struct {
- Save func(context.Context, wire.Writer, interface{}) (state.Stats, error)
- Load func(context.Context, wire.Reader, interface{}) (state.Stats, error)
- MaxPtr int
-}{
- "state": {
- Save: state.Save,
- Load: state.Load,
- },
- "gob": {
- Save: gobSave,
- Load: gobLoad,
- },
-}
-
-func BenchmarkEncoding(b *testing.B) {
- for objName, objInfo := range allObjects {
- for algoName, algoInfo := range allAlgos {
- b.Run(fmt.Sprintf("%s/%s", objName, algoName), func(b *testing.B) {
- b.StopTimer()
- n, v := buildObjects(b.N, objInfo.New)
- b.ReportAllocs()
- b.StartTimer()
- for i := 0; i < n; i++ {
- if _, err := algoInfo.Save(context.Background(), discard{}, v); err != nil {
- b.Errorf("save failed: %v", err)
- }
- }
- b.StopTimer()
- })
- }
- }
-}
-
-func BenchmarkDecoding(b *testing.B) {
- for objName, objInfo := range allObjects {
- for algoName, algoInfo := range allAlgos {
- b.Run(fmt.Sprintf("%s/%s", objName, algoName), func(b *testing.B) {
- b.StopTimer()
- n, v := buildObjects(b.N, objInfo.New)
- buf := new(bytes.Buffer)
- if _, err := algoInfo.Save(context.Background(), buf, v); err != nil {
- b.Errorf("save failed: %v", err)
- }
- b.ReportAllocs()
- b.StartTimer()
- var r bytes.Reader
- for i := 0; i < n; i++ {
- r.Reset(buf.Bytes())
- if _, err := algoInfo.Load(context.Background(), &r, v); err != nil {
- b.Errorf("load failed: %v", err)
- }
- }
- b.StopTimer()
- })
- }
- }
-}
diff --git a/pkg/state/tests/bool_test.go b/pkg/state/tests/bool_test.go
deleted file mode 100644
index e17cfacf9..000000000
--- a/pkg/state/tests/bool_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "testing"
-)
-
-var allBools = []bool{
- true,
- false,
-}
-
-func TestBool(t *testing.T) {
- runTestCases(t, false, "plain", flatten(allBools))
- runTestCases(t, false, "pointers", pointersTo(flatten(allBools)))
- runTestCases(t, false, "interfaces", interfacesTo(flatten(allBools)))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(flatten(allBools))))
-}
diff --git a/pkg/state/tests/float_test.go b/pkg/state/tests/float_test.go
deleted file mode 100644
index 3e89edd9c..000000000
--- a/pkg/state/tests/float_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "math"
- "testing"
-)
-
-var safeFloat32s = []float32{
- float32(0.0),
- float32(1.0),
- float32(-1.0),
- float32(math.Inf(1)),
- float32(math.Inf(-1)),
-}
-
-var allFloat32s = append(safeFloat32s, float32(math.NaN()))
-
-var safeFloat64s = []float64{
- float64(0.0),
- float64(1.0),
- float64(-1.0),
- math.Inf(1),
- math.Inf(-1),
-}
-
-var allFloat64s = append(safeFloat64s, math.NaN())
-
-func TestFloat(t *testing.T) {
- runTestCases(t, false, "plain", flatten(
- allFloat32s,
- allFloat64s,
- ))
- // See checkEqual for why NaNs are missing.
- runTestCases(t, false, "pointers", pointersTo(flatten(
- safeFloat32s,
- safeFloat64s,
- )))
- runTestCases(t, false, "interfaces", interfacesTo(flatten(
- safeFloat32s,
- safeFloat64s,
- )))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(flatten(
- safeFloat32s,
- safeFloat64s,
- ))))
-}
-
-const onlyDouble float64 = 1.0000000000000002
-
-func TestFloatTruncation(t *testing.T) {
- runTestCases(t, true, "pass", []interface{}{
- truncatingFloat32{save: onlyDouble},
- })
- runTestCases(t, false, "fail", []interface{}{
- truncatingFloat32{save: 1.0},
- })
-}
-
-var safeComplex64s = combine(safeFloat32s, safeFloat32s, func(i, j interface{}) interface{} {
- return complex(i.(float32), j.(float32))
-})
-
-var allComplex64s = combine(allFloat32s, allFloat32s, func(i, j interface{}) interface{} {
- return complex(i.(float32), j.(float32))
-})
-
-var safeComplex128s = combine(safeFloat64s, safeFloat64s, func(i, j interface{}) interface{} {
- return complex(i.(float64), j.(float64))
-})
-
-var allComplex128s = combine(allFloat64s, allFloat64s, func(i, j interface{}) interface{} {
- return complex(i.(float64), j.(float64))
-})
-
-func TestComplex(t *testing.T) {
- runTestCases(t, false, "plain", flatten(
- allComplex64s,
- allComplex128s,
- ))
- // See TestFloat; same issue.
- runTestCases(t, false, "pointers", pointersTo(flatten(
- safeComplex64s,
- safeComplex128s,
- )))
- runTestCases(t, false, "interfacse", interfacesTo(flatten(
- safeComplex64s,
- safeComplex128s,
- )))
- runTestCases(t, false, "interfacesTo", interfacesTo(pointersTo(flatten(
- safeComplex64s,
- safeComplex128s,
- ))))
-}
-
-func TestComplexTruncation(t *testing.T) {
- runTestCases(t, true, "pass", []interface{}{
- truncatingComplex64{save: complex(onlyDouble, onlyDouble)},
- truncatingComplex64{save: complex(1.0, onlyDouble)},
- truncatingComplex64{save: complex(onlyDouble, 1.0)},
- })
- runTestCases(t, false, "fail", []interface{}{
- truncatingComplex64{save: complex(1.0, 1.0)},
- })
-}
diff --git a/pkg/state/tests/integer.go b/pkg/state/tests/integer.go
deleted file mode 100644
index ca403eed1..000000000
--- a/pkg/state/tests/integer.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "gvisor.dev/gvisor/pkg/state"
-)
-
-// +stateify type
-type truncatingUint8 struct {
- save uint64
- load uint8 `state:"nosave"`
-}
-
-func (t *truncatingUint8) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingUint8) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = uint64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingUint8)(nil)
-
-// +stateify type
-type truncatingUint16 struct {
- save uint64
- load uint16 `state:"nosave"`
-}
-
-func (t *truncatingUint16) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingUint16) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = uint64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingUint16)(nil)
-
-// +stateify type
-type truncatingUint32 struct {
- save uint64
- load uint32 `state:"nosave"`
-}
-
-func (t *truncatingUint32) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingUint32) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = uint64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingUint32)(nil)
-
-// +stateify type
-type truncatingInt8 struct {
- save int64
- load int8 `state:"nosave"`
-}
-
-func (t *truncatingInt8) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingInt8) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = int64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingInt8)(nil)
-
-// +stateify type
-type truncatingInt16 struct {
- save int64
- load int16 `state:"nosave"`
-}
-
-func (t *truncatingInt16) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingInt16) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = int64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingInt16)(nil)
-
-// +stateify type
-type truncatingInt32 struct {
- save int64
- load int32 `state:"nosave"`
-}
-
-func (t *truncatingInt32) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingInt32) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = int64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingInt32)(nil)
-
-// +stateify type
-type truncatingFloat32 struct {
- save float64
- load float32 `state:"nosave"`
-}
-
-func (t *truncatingFloat32) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingFloat32) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = float64(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingFloat32)(nil)
-
-// +stateify type
-type truncatingComplex64 struct {
- save complex128
- load complex64 `state:"nosave"`
-}
-
-func (t *truncatingComplex64) StateSave(m state.Sink) {
- m.Save(0, &t.save)
-}
-
-func (t *truncatingComplex64) StateLoad(m state.Source) {
- m.Load(0, &t.load)
- t.save = complex128(t.load)
- t.load = 0
-}
-
-var _ state.SaverLoader = (*truncatingComplex64)(nil)
diff --git a/pkg/state/tests/integer_test.go b/pkg/state/tests/integer_test.go
deleted file mode 100644
index 2b1609af0..000000000
--- a/pkg/state/tests/integer_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "math"
- "testing"
-)
-
-var (
- allBasicInts = []int{-1, 0, 1}
- allInt8s = []int8{math.MinInt8, -1, 0, 1, math.MaxInt8}
- allInt16s = []int16{math.MinInt16, -1, 0, 1, math.MaxInt16}
- allInt32s = []int32{math.MinInt32, -1, 0, 1, math.MaxInt32}
- allInt64s = []int64{math.MinInt64, -1, 0, 1, math.MaxInt64}
- allBasicUints = []uint{0, 1}
- allUintptrs = []uintptr{0, 1, ^uintptr(0)}
- allUint8s = []uint8{0, 1, math.MaxUint8}
- allUint16s = []uint16{0, 1, math.MaxUint16}
- allUint32s = []uint32{0, 1, math.MaxUint32}
- allUint64s = []uint64{0, 1, math.MaxUint64}
-)
-
-var allInts = flatten(
- allBasicInts,
- allInt8s,
- allInt16s,
- allInt32s,
- allInt64s,
-)
-
-var allUints = flatten(
- allBasicUints,
- allUintptrs,
- allUint8s,
- allUint16s,
- allUint32s,
- allUint64s,
-)
-
-func TestInt(t *testing.T) {
- runTestCases(t, false, "plain", allInts)
- runTestCases(t, false, "pointers", pointersTo(allInts))
- runTestCases(t, false, "interfaces", interfacesTo(allInts))
- runTestCases(t, false, "interfacesTo", interfacesTo(pointersTo(allInts)))
-}
-
-func TestIntTruncation(t *testing.T) {
- runTestCases(t, true, "pass", []interface{}{
- truncatingInt8{save: math.MinInt8 - 1},
- truncatingInt16{save: math.MinInt16 - 1},
- truncatingInt32{save: math.MinInt32 - 1},
- truncatingInt8{save: math.MaxInt8 + 1},
- truncatingInt16{save: math.MaxInt16 + 1},
- truncatingInt32{save: math.MaxInt32 + 1},
- })
- runTestCases(t, false, "fail", []interface{}{
- truncatingInt8{save: 1},
- truncatingInt16{save: 1},
- truncatingInt32{save: 1},
- })
-}
-
-func TestUint(t *testing.T) {
- runTestCases(t, false, "plain", allUints)
- runTestCases(t, false, "pointers", pointersTo(allUints))
- runTestCases(t, false, "interfaces", interfacesTo(allUints))
- runTestCases(t, false, "interfacesTo", interfacesTo(pointersTo(allUints)))
-}
-
-func TestUintTruncation(t *testing.T) {
- runTestCases(t, true, "pass", []interface{}{
- truncatingUint8{save: math.MaxUint8 + 1},
- truncatingUint16{save: math.MaxUint16 + 1},
- truncatingUint32{save: math.MaxUint32 + 1},
- })
- runTestCases(t, false, "fail", []interface{}{
- truncatingUint8{save: 1},
- truncatingUint16{save: 1},
- truncatingUint32{save: 1},
- })
-}
diff --git a/pkg/state/tests/load.go b/pkg/state/tests/load.go
deleted file mode 100644
index a8350c0f3..000000000
--- a/pkg/state/tests/load.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-// +stateify savable
-type genericContainer struct {
- v interface{}
-}
-
-// +stateify savable
-type afterLoadStruct struct {
- v int `state:"nosave"`
-}
-
-func (a *afterLoadStruct) afterLoad() {
- a.v++
-}
-
-// +stateify savable
-type valueLoadStruct struct {
- v int `state:".(int64)"`
-}
-
-func (v *valueLoadStruct) saveV() int64 {
- return int64(v.v) // Save as int64.
-}
-
-func (v *valueLoadStruct) loadV(value int64) {
- v.v = int(value) // Load as int.
-}
-
-// +stateify savable
-type cycleStruct struct {
- c *cycleStruct
-}
-
-// +stateify savable
-type badCycleStruct struct {
- b *badCycleStruct `state:"wait"`
-}
-
-func (b *badCycleStruct) afterLoad() {
- if b.b != b {
- // This is not executable, since AfterLoad requires that the
- // object and all dependencies are complete. This should cause
- // a deadlock error during load.
- panic("badCycleStruct.afterLoad called")
- }
-}
diff --git a/pkg/state/tests/load_test.go b/pkg/state/tests/load_test.go
deleted file mode 100644
index 3c73ac391..000000000
--- a/pkg/state/tests/load_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "testing"
-)
-
-func TestLoadHooks(t *testing.T) {
- runTestCases(t, false, "load-hooks", []interface{}{
- // Root object being a struct.
- afterLoadStruct{v: 1},
- valueLoadStruct{v: 1},
- genericContainer{v: &afterLoadStruct{v: 1}},
- genericContainer{v: &valueLoadStruct{v: 1}},
- sliceContainer{v: []interface{}{&afterLoadStruct{v: 1}}},
- sliceContainer{v: []interface{}{&valueLoadStruct{v: 1}}},
- // Root object being a pointer.
- &afterLoadStruct{v: 1},
- &valueLoadStruct{v: 1},
- &genericContainer{v: &afterLoadStruct{v: 1}},
- &genericContainer{v: &valueLoadStruct{v: 1}},
- &sliceContainer{v: []interface{}{&afterLoadStruct{v: 1}}},
- &sliceContainer{v: []interface{}{&valueLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: &afterLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: &valueLoadStruct{v: 1}}},
- })
-}
-
-func TestCycles(t *testing.T) {
- // cs is a single object cycle.
- cs := cycleStruct{nil}
- cs.c = &cs
-
- // cs1 and cs2 are in a two object cycle.
- cs1 := cycleStruct{nil}
- cs2 := cycleStruct{nil}
- cs1.c = &cs2
- cs2.c = &cs1
-
- runTestCases(t, false, "cycles", []interface{}{
- cs,
- cs1,
- })
-}
-
-func TestDeadlock(t *testing.T) {
- // bs is a single object cycle. This does not cause deadlock because an
- // object cannot wait for itself.
- bs := badCycleStruct{nil}
- bs.b = &bs
-
- runTestCases(t, false, "self", []interface{}{
- &bs,
- })
-
- // bs2 and bs2 are in a deadlocking cycle.
- bs1 := badCycleStruct{nil}
- bs2 := badCycleStruct{nil}
- bs1.b = &bs2
- bs2.b = &bs1
-
- runTestCases(t, true, "deadlock", []interface{}{
- &bs1,
- })
-}
diff --git a/pkg/state/tests/map.go b/pkg/state/tests/map.go
deleted file mode 100644
index db4e548f1..000000000
--- a/pkg/state/tests/map.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-// +stateify savable
-type mapContainer struct {
- v map[int]interface{}
-}
-
-// +stateify savable
-type mapPtrContainer struct {
- v *map[int]interface{}
-}
-
-// +stateify savable
-type registeredMapStruct struct{}
diff --git a/pkg/state/tests/map_test.go b/pkg/state/tests/map_test.go
deleted file mode 100644
index 92bf0fc01..000000000
--- a/pkg/state/tests/map_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "reflect"
- "testing"
-)
-
-var allMapPrimitives = []interface{}{
- bool(true),
- int(1),
- int8(1),
- int16(1),
- int32(1),
- int64(1),
- uint(1),
- uintptr(1),
- uint8(1),
- uint16(1),
- uint32(1),
- uint64(1),
- string(""),
- registeredMapStruct{},
-}
-
-var allMapKeys = flatten(allMapPrimitives, pointersTo(allMapPrimitives))
-
-var allMapValues = flatten(allMapPrimitives, pointersTo(allMapPrimitives), interfacesTo(allMapPrimitives))
-
-var emptyMaps = combine(allMapKeys, allMapValues, func(v1, v2 interface{}) interface{} {
- m := reflect.MakeMap(reflect.MapOf(reflect.TypeOf(v1), reflect.TypeOf(v2)))
- return m.Interface()
-})
-
-var fullMaps = combine(allMapKeys, allMapValues, func(v1, v2 interface{}) interface{} {
- m := reflect.MakeMap(reflect.MapOf(reflect.TypeOf(v1), reflect.TypeOf(v2)))
- m.SetMapIndex(reflect.Zero(reflect.TypeOf(v1)), reflect.Zero(reflect.TypeOf(v2)))
- return m.Interface()
-})
-
-func TestMapAliasing(t *testing.T) {
- v := make(map[int]int)
- ptrToV := &v
- aliases := []map[int]int{v, v}
- runTestCases(t, false, "", []interface{}{ptrToV, aliases})
-}
-
-func TestMapsEmpty(t *testing.T) {
- runTestCases(t, false, "plain", emptyMaps)
- runTestCases(t, false, "pointers", pointersTo(emptyMaps))
- runTestCases(t, false, "interfaces", interfacesTo(emptyMaps))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(emptyMaps)))
-}
-
-func TestMapsFull(t *testing.T) {
- runTestCases(t, false, "plain", fullMaps)
- runTestCases(t, false, "pointers", pointersTo(fullMaps))
- runTestCases(t, false, "interfaces", interfacesTo(fullMaps))
- runTestCases(t, false, "interfacesToPointer", interfacesTo(pointersTo(fullMaps)))
-}
-
-func TestMapContainers(t *testing.T) {
- var (
- nilMap map[int]interface{}
- emptyMap = make(map[int]interface{})
- fullMap = map[int]interface{}{0: nil}
- )
- runTestCases(t, false, "", []interface{}{
- mapContainer{v: nilMap},
- mapContainer{v: emptyMap},
- mapContainer{v: fullMap},
- mapPtrContainer{v: nil},
- mapPtrContainer{v: &nilMap},
- mapPtrContainer{v: &emptyMap},
- mapPtrContainer{v: &fullMap},
- })
-}
diff --git a/pkg/state/tests/register.go b/pkg/state/tests/register.go
deleted file mode 100644
index 074d86315..000000000
--- a/pkg/state/tests/register.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-// +stateify savable
-type alreadyRegisteredStruct struct{}
-
-// +stateify savable
-type alreadyRegisteredOther int
diff --git a/pkg/state/tests/register_test.go b/pkg/state/tests/register_test.go
deleted file mode 100644
index 2199d6b01..000000000
--- a/pkg/state/tests/register_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build race
-// +build race
-
-package tests
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/state"
-)
-
-// faker calls itself whatever is in the name field.
-type faker struct {
- Name string
- Fields []string
-}
-
-func (f *faker) StateTypeName() string {
- return f.Name
-}
-
-func (f *faker) StateFields() []string {
- return f.Fields
-}
-
-// fakerWithSaverLoader has all it needs.
-type fakerWithSaverLoader struct {
- faker
-}
-
-func (f *fakerWithSaverLoader) StateSave(m state.Sink) {}
-
-func (f *fakerWithSaverLoader) StateLoad(m state.Source) {}
-
-// fakerOther calls itself .. uh, itself?
-type fakerOther string
-
-func (f *fakerOther) StateTypeName() string {
- return string(*f)
-}
-
-func (f *fakerOther) StateFields() []string {
- return nil
-}
-
-func newFakerOther(name string) *fakerOther {
- f := fakerOther(name)
- return &f
-}
-
-// fakerOtherBadFields returns non-nil fields.
-type fakerOtherBadFields string
-
-func (f *fakerOtherBadFields) StateTypeName() string {
- return string(*f)
-}
-
-func (f *fakerOtherBadFields) StateFields() []string {
- return []string{string(*f)}
-}
-
-func newFakerOtherBadFields(name string) *fakerOtherBadFields {
- f := fakerOtherBadFields(name)
- return &f
-}
-
-// fakerOtherSaverLoader implements SaverLoader methods.
-type fakerOtherSaverLoader string
-
-func (f *fakerOtherSaverLoader) StateTypeName() string {
- return string(*f)
-}
-
-func (f *fakerOtherSaverLoader) StateFields() []string {
- return nil
-}
-
-func (f *fakerOtherSaverLoader) StateSave(m state.Sink) {}
-
-func (f *fakerOtherSaverLoader) StateLoad(m state.Source) {}
-
-func newFakerOtherSaverLoader(name string) *fakerOtherSaverLoader {
- f := fakerOtherSaverLoader(name)
- return &f
-}
-
-func TestRegisterPrimitives(t *testing.T) {
- for _, typeName := range []string{
- "int",
- "int8",
- "int16",
- "int32",
- "int64",
- "uint",
- "uintptr",
- "uint8",
- "uint16",
- "uint32",
- "uint64",
- "float32",
- "float64",
- "complex64",
- "complex128",
- "string",
- } {
- t.Run("struct/"+typeName, func(t *testing.T) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Registering type %q did not panic", typeName)
- }
- }()
- state.Register(&faker{
- Name: typeName,
- })
- })
- t.Run("other/"+typeName, func(t *testing.T) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Registering type %q did not panic", typeName)
- }
- }()
- state.Register(newFakerOther(typeName))
- })
- }
-}
-
-func TestRegisterBad(t *testing.T) {
- const (
- goodName = "foo"
- firstField = "a"
- secondField = "b"
- )
- for name, object := range map[string]state.Type{
- "non-struct-with-fields": newFakerOtherBadFields(goodName),
- "non-struct-with-saverloader": newFakerOtherSaverLoader(goodName),
- "struct-without-saverloader": &faker{Name: goodName},
- "non-struct-duplicate-with-struct": newFakerOther((new(alreadyRegisteredStruct)).StateTypeName()),
- "non-struct-duplicate-with-non-struct": newFakerOther((new(alreadyRegisteredOther)).StateTypeName()),
- "struct-duplicate-with-struct": &fakerWithSaverLoader{faker{Name: (new(alreadyRegisteredStruct)).StateTypeName()}},
- "struct-duplicate-with-non-struct": &fakerWithSaverLoader{faker{Name: (new(alreadyRegisteredOther)).StateTypeName()}},
- "struct-with-empty-field": &fakerWithSaverLoader{faker{Name: goodName, Fields: []string{""}}},
- "struct-with-empty-field-and-non-empty": &fakerWithSaverLoader{faker{Name: goodName, Fields: []string{firstField, ""}}},
- "struct-with-duplicate-field": &fakerWithSaverLoader{faker{Name: goodName, Fields: []string{firstField, firstField}}},
- "struct-with-duplicate-field-and-non-dup": &fakerWithSaverLoader{faker{Name: goodName, Fields: []string{firstField, secondField, firstField}}},
- } {
- t.Run(name, func(t *testing.T) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Registering object %#v did not panic", object)
- }
- }()
- state.Register(object)
- })
-
- }
-}
-
-func TestRegisterTypeOnlyStruct(t *testing.T) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("Register did not panic")
- }
- }()
- state.Register((*typeOnlyEmptyStruct)(nil))
-}
diff --git a/pkg/state/tests/string_test.go b/pkg/state/tests/string_test.go
deleted file mode 100644
index 44f5a562c..000000000
--- a/pkg/state/tests/string_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "testing"
-)
-
-const nonEmptyString = "hello world"
-
-var allStrings = []string{
- "",
- nonEmptyString,
- "\\0",
-}
-
-func TestString(t *testing.T) {
- runTestCases(t, false, "plain", flatten(allStrings))
- runTestCases(t, false, "pointers", pointersTo(flatten(allStrings)))
- runTestCases(t, false, "interfaces", interfacesTo(flatten(allStrings)))
- runTestCases(t, false, "interfacesToPointers", interfacesTo(pointersTo(flatten(allStrings))))
-}
diff --git a/pkg/state/tests/struct.go b/pkg/state/tests/struct.go
deleted file mode 100644
index 69143d194..000000000
--- a/pkg/state/tests/struct.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type unregisteredEmptyStruct struct{}
-
-// typeOnlyEmptyStruct just implements the state.Type interface.
-type typeOnlyEmptyStruct struct{}
-
-func (*typeOnlyEmptyStruct) StateTypeName() string { return "registeredEmptyStruct" }
-
-func (*typeOnlyEmptyStruct) StateFields() []string { return nil }
-
-// +stateify savable
-type savableEmptyStruct struct{}
-
-// +stateify savable
-type emptyStructPointer struct {
- nothing *struct{}
-}
-
-// +stateify savable
-type outerSame struct {
- inner inner
-}
-
-// +stateify savable
-type outerFieldFirst struct {
- inner inner
- v int64
-}
-
-// +stateify savable
-type outerFieldSecond struct {
- v int64
- inner inner
-}
-
-// +stateify savable
-type outerArray struct {
- inner [2]inner
-}
-
-// +stateify savable
-type outerSlice struct {
- inner []inner
-}
-
-// +stateify savable
-type inner struct {
- v int64
-}
-
-// +stateify savable
-type outerFieldValue struct {
- inner innerFieldValue
-}
-
-// +stateify savable
-type innerFieldValue struct {
- v int64 `state:".(*savedFieldValue)"`
-}
-
-// +stateify savable
-type savedFieldValue struct {
- v int64
-}
-
-func (ifv *innerFieldValue) saveV() *savedFieldValue {
- return &savedFieldValue{ifv.v}
-}
-
-func (ifv *innerFieldValue) loadV(sfv *savedFieldValue) {
- ifv.v = sfv.v
-}
-
-// +stateify savable
-type system struct {
- v1 interface{}
- v2 interface{}
-}
-
-// +stateify savable
-type system3 struct {
- v1 interface{}
- v2 interface{}
- v3 interface{}
-}
diff --git a/pkg/state/tests/struct_test.go b/pkg/state/tests/struct_test.go
deleted file mode 100644
index 9826f1ee9..000000000
--- a/pkg/state/tests/struct_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "math/rand"
- "testing"
-)
-
-func TestEmptyStruct(t *testing.T) {
- runTestCases(t, false, "plain", []interface{}{
- unregisteredEmptyStruct{},
- typeOnlyEmptyStruct{},
- savableEmptyStruct{},
- })
- runTestCases(t, false, "pointers", pointersTo([]interface{}{
- unregisteredEmptyStruct{},
- typeOnlyEmptyStruct{},
- savableEmptyStruct{},
- }))
- runTestCases(t, false, "interfaces-pass", interfacesTo([]interface{}{
- // Only registered types can be dispatched via interfaces. All
- // other types should fail, even if it is the empty struct.
- savableEmptyStruct{},
- }))
- runTestCases(t, true, "interfaces-fail", interfacesTo([]interface{}{
- unregisteredEmptyStruct{},
- typeOnlyEmptyStruct{},
- }))
- runTestCases(t, false, "interfacesToPointers-pass", interfacesTo(pointersTo([]interface{}{
- savableEmptyStruct{},
- })))
- runTestCases(t, true, "interfacesToPointers-fail", interfacesTo(pointersTo([]interface{}{
- unregisteredEmptyStruct{},
- typeOnlyEmptyStruct{},
- })))
-
- // Ensuring empty struct aliasing works.
- es := emptyStructPointer{new(struct{})}
- runTestCases(t, false, "empty-struct-pointers", []interface{}{
- emptyStructPointer{},
- es,
- []emptyStructPointer{es, es}, // Same pointer.
- })
-}
-
-func TestEmbeddedPointers(t *testing.T) {
- // Give each int64 a random value to prevent Go from using
- // runtime.staticuint64s, which confounds tests for struct duplication.
- magic := func() int64 {
- for {
- n := rand.Int63()
- if n < 0 || n > 255 {
- return n
- }
- }
- }
-
- ofs := outerSame{inner{magic()}}
- of1 := outerFieldFirst{inner{magic()}, magic()}
- of2 := outerFieldSecond{magic(), inner{magic()}}
- oa := outerArray{[2]inner{{magic()}, {magic()}}}
- osl := outerSlice{oa.inner[:]}
- ofv := outerFieldValue{innerFieldValue{magic()}}
-
- runTestCases(t, false, "embedded-pointers", []interface{}{
- system{&ofs, &ofs.inner},
- system{&ofs.inner, &ofs},
- system{&of1, &of1.inner},
- system{&of1.inner, &of1},
- system{&of2, &of2.inner},
- system{&of2.inner, &of2},
- system{&oa, &oa.inner[0]},
- system{&oa, &oa.inner[1]},
- system{&oa.inner[0], &oa},
- system{&oa.inner[1], &oa},
- system3{&oa, &oa.inner[0], &oa.inner[1]},
- system3{&oa, &oa.inner[1], &oa.inner[0]},
- system3{&oa.inner[0], &oa, &oa.inner[1]},
- system3{&oa.inner[1], &oa, &oa.inner[0]},
- system3{&oa.inner[0], &oa.inner[1], &oa},
- system3{&oa.inner[1], &oa.inner[0], &oa},
- system{&oa, &osl},
- system{&osl, &oa},
- system{&ofv, &ofv.inner},
- system{&ofv.inner, &ofv},
- })
-}
diff --git a/pkg/state/tests/tests.go b/pkg/state/tests/tests.go
deleted file mode 100644
index 435a0e9db..000000000
--- a/pkg/state/tests/tests.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tests tests the state packages.
-package tests
-
-import (
- "bytes"
- "context"
- "fmt"
- "math"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/state"
- "gvisor.dev/gvisor/pkg/state/pretty"
-)
-
-// discard is an implementation of wire.Writer.
-type discard struct{}
-
-// Write implements wire.Writer.Write.
-func (discard) Write(p []byte) (int, error) { return len(p), nil }
-
-// WriteByte implements wire.Writer.WriteByte.
-func (discard) WriteByte(byte) error { return nil }
-
-// checkEqual checks if two objects are equal.
-//
-// N.B. This only handles one level of dereferences for NaN. Otherwise we
-// would need to fork the entire implementation of reflect.DeepEqual.
-func checkEqual(root, loadedValue interface{}) bool {
- if reflect.DeepEqual(root, loadedValue) {
- return true
- }
-
- // NaN is not equal to itself. We handle the case of raw floating point
- // primitives here, but don't handle this case nested.
- rf32, ok1 := root.(float32)
- lf32, ok2 := loadedValue.(float32)
- if ok1 && ok2 && math.IsNaN(float64(rf32)) && math.IsNaN(float64(lf32)) {
- return true
- }
- rf64, ok1 := root.(float64)
- lf64, ok2 := loadedValue.(float64)
- if ok1 && ok2 && math.IsNaN(rf64) && math.IsNaN(lf64) {
- return true
- }
-
- // Same real for complex numbers.
- rc64, ok1 := root.(complex64)
- lc64, ok2 := root.(complex64)
- if ok1 && ok2 {
- return checkEqual(real(rc64), real(lc64)) && checkEqual(imag(rc64), imag(lc64))
- }
- rc128, ok1 := root.(complex128)
- lc128, ok2 := root.(complex128)
- if ok1 && ok2 {
- return checkEqual(real(rc128), real(lc128)) && checkEqual(imag(rc128), imag(lc128))
- }
-
- return false
-}
-
-// runTestCases runs a test for each object in objects.
-func runTestCases(t *testing.T, shouldFail bool, prefix string, objects []interface{}) {
- t.Helper()
- for i, root := range objects {
- t.Run(fmt.Sprintf("%s%d", prefix, i), func(t *testing.T) {
- t.Logf("Original object:\n%#v", root)
-
- // Save the passed object.
- saveBuffer := &bytes.Buffer{}
- saveObjectPtr := reflect.New(reflect.TypeOf(root))
- saveObjectPtr.Elem().Set(reflect.ValueOf(root))
- saveStats, err := state.Save(context.Background(), saveBuffer, saveObjectPtr.Interface())
- if err != nil {
- if shouldFail {
- return
- }
- t.Fatalf("Save failed unexpectedly: %v", err)
- }
-
- // Dump the serialized proto to aid with debugging.
- var ppBuf bytes.Buffer
- t.Logf("Raw state:\n%v", saveBuffer.Bytes())
- if err := pretty.PrintText(&ppBuf, bytes.NewReader(saveBuffer.Bytes())); err != nil {
- // We don't count this as a test failure if we
- // have shouldFail set, but we will count as a
- // failure if we were not expecting to fail.
- if !shouldFail {
- t.Errorf("PrettyPrint(html=false) failed unexpected: %v", err)
- }
- }
- if err := pretty.PrintHTML(discard{}, bytes.NewReader(saveBuffer.Bytes())); err != nil {
- // See above.
- if !shouldFail {
- t.Errorf("PrettyPrint(html=true) failed unexpected: %v", err)
- }
- }
- t.Logf("Encoded state:\n%s", ppBuf.String())
- t.Logf("Save stats:\n%s", saveStats.String())
-
- // Load a new copy of the object.
- loadObjectPtr := reflect.New(reflect.TypeOf(root))
- loadStats, err := state.Load(context.Background(), bytes.NewReader(saveBuffer.Bytes()), loadObjectPtr.Interface())
- if err != nil {
- if shouldFail {
- return
- }
- t.Fatalf("Load failed unexpectedly: %v", err)
- }
-
- // Compare the values.
- loadedValue := loadObjectPtr.Elem().Interface()
- if !checkEqual(root, loadedValue) {
- if shouldFail {
- return
- }
- t.Fatalf("Objects differ:\n\toriginal: %#v\n\tloaded: %#v\n", root, loadedValue)
- }
-
- // Everything went okay. Is that good?
- if shouldFail {
- t.Fatalf("This test was expected to fail, but didn't.")
- }
- t.Logf("Load stats:\n%s", loadStats.String())
-
- // Truncate half the bytes in the byte stream,
- // and ensure that we can't restore. Then
- // truncate only the final byte and ensure that
- // we can't restore.
- l := saveBuffer.Len()
- halfReader := bytes.NewReader(saveBuffer.Bytes()[:l/2])
- if _, err := state.Load(context.Background(), halfReader, loadObjectPtr.Interface()); err == nil {
- t.Errorf("Load with half bytes succeeded unexpectedly.")
- }
- missingByteReader := bytes.NewReader(saveBuffer.Bytes()[:l-1])
- if _, err := state.Load(context.Background(), missingByteReader, loadObjectPtr.Interface()); err == nil {
- t.Errorf("Load with missing byte succeeded unexpectedly.")
- }
- })
- }
-}
-
-// convert converts the slice to an []interface{}.
-func convert(v interface{}) (r []interface{}) {
- s := reflect.ValueOf(v) // Must be slice.
- for i := 0; i < s.Len(); i++ {
- r = append(r, s.Index(i).Interface())
- }
- return r
-}
-
-// flatten flattens multiple slices.
-func flatten(vs ...interface{}) (r []interface{}) {
- for _, v := range vs {
- r = append(r, convert(v)...)
- }
- return r
-}
-
-// filter maps from one slice to another.
-func filter(vs interface{}, fn func(interface{}) (interface{}, bool)) (r []interface{}) {
- s := reflect.ValueOf(vs)
- for i := 0; i < s.Len(); i++ {
- v, ok := fn(s.Index(i).Interface())
- if ok {
- r = append(r, v)
- }
- }
- return r
-}
-
-// combine combines objects in two slices as specified.
-func combine(v1, v2 interface{}, fn func(_, _ interface{}) interface{}) (r []interface{}) {
- s1 := reflect.ValueOf(v1)
- s2 := reflect.ValueOf(v2)
- for i := 0; i < s1.Len(); i++ {
- for j := 0; j < s2.Len(); j++ {
- // Combine using the given function.
- r = append(r, fn(s1.Index(i).Interface(), s2.Index(j).Interface()))
- }
- }
- return r
-}
-
-// pointersTo is a filter function that returns pointers.
-func pointersTo(vs interface{}) []interface{} {
- return filter(vs, func(o interface{}) (interface{}, bool) {
- v := reflect.New(reflect.TypeOf(o))
- v.Elem().Set(reflect.ValueOf(o))
- return v.Interface(), true
- })
-}
-
-// interfacesTo is a filter function that returns interface objects.
-func interfacesTo(vs interface{}) []interface{} {
- return filter(vs, func(o interface{}) (interface{}, bool) {
- var v [1]interface{}
- v[0] = o
- return v, true
- })
-}
diff --git a/pkg/state/wire/BUILD b/pkg/state/wire/BUILD
deleted file mode 100644
index 311b93dcb..000000000
--- a/pkg/state/wire/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "wire",
- srcs = ["wire.go"],
- marshal = False,
- stateify = False,
- visibility = ["//:sandbox"],
- deps = ["//pkg/gohacks"],
-)
diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD
deleted file mode 100644
index 73791b456..000000000
--- a/pkg/sync/BUILD
+++ /dev/null
@@ -1,49 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-exports_files(["LICENSE"])
-
-go_library(
- name = "sync",
- srcs = [
- "aliases.go",
- "checklocks_off_unsafe.go",
- "checklocks_on_unsafe.go",
- "gate_unsafe.go",
- "goyield_go113_unsafe.go",
- "goyield_unsafe.go",
- "mutex_unsafe.go",
- "nocopy.go",
- "norace_unsafe.go",
- "race_amd64.s",
- "race_arm64.s",
- "race_unsafe.go",
- "runtime_unsafe.go",
- "rwmutex_unsafe.go",
- "seqcount.go",
- "sync.go",
- ],
- marshal = False,
- stateify = False,
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/gohacks",
- "//pkg/goid",
- ],
-)
-
-go_test(
- name = "sync_test",
- size = "small",
- srcs = [
- "gate_test.go",
- "mutex_test.go",
- "rwmutex_test.go",
- "seqcount_test.go",
- ],
- library = ":sync",
-)
diff --git a/pkg/sync/LICENSE b/pkg/sync/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/pkg/sync/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/sync/README.md b/pkg/sync/README.md
deleted file mode 100644
index be1a01f08..000000000
--- a/pkg/sync/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# sync
-
-This package provides additional synchronization primitives not provided by the
-Go stdlib 'sync' package. It is partially derived from the upstream 'sync'
-package from go1.10.
diff --git a/pkg/sync/atomicptr/BUILD b/pkg/sync/atomicptr/BUILD
deleted file mode 100644
index a6a7f01ac..000000000
--- a/pkg/sync/atomicptr/BUILD
+++ /dev/null
@@ -1,36 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template(
- name = "generic_atomicptr",
- srcs = ["generic_atomicptr_unsafe.go"],
- types = [
- "Value",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_template_instance(
- name = "atomicptr_int",
- out = "atomicptr_int_unsafe.go",
- package = "atomicptr",
- suffix = "Int",
- template = ":generic_atomicptr",
- types = {
- "Value": "int",
- },
-)
-
-go_library(
- name = "atomicptr",
- srcs = ["atomicptr_int_unsafe.go"],
-)
-
-go_test(
- name = "atomicptr_test",
- size = "small",
- srcs = ["atomicptr_test.go"],
- library = ":atomicptr",
-)
diff --git a/pkg/sync/atomicptr/atomicptr_test.go b/pkg/sync/atomicptr/atomicptr_test.go
deleted file mode 100644
index 8fdc5112e..000000000
--- a/pkg/sync/atomicptr/atomicptr_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package atomicptr
-
-import (
- "testing"
-)
-
-func newInt(val int) *int {
- return &val
-}
-
-func TestAtomicPtr(t *testing.T) {
- var p AtomicPtrInt
- if got := p.Load(); got != nil {
- t.Errorf("initial value is %p (%v), wanted nil", got, got)
- }
- want := newInt(42)
- p.Store(want)
- if got := p.Load(); got != want {
- t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
- }
- want = newInt(100)
- p.Store(want)
- if got := p.Load(); got != want {
- t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
- }
-}
diff --git a/pkg/sync/atomicptrmap/BUILD b/pkg/sync/atomicptrmap/BUILD
deleted file mode 100644
index b0e218c79..000000000
--- a/pkg/sync/atomicptrmap/BUILD
+++ /dev/null
@@ -1,76 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(
- default_visibility = ["//visibility:private"],
- licenses = ["notice"],
-)
-
-go_template(
- name = "generic_atomicptrmap",
- srcs = ["generic_atomicptrmap_unsafe.go"],
- opt_consts = [
- "ShardOrder",
- ],
- opt_types = [
- "Hasher",
- ],
- types = [
- "Key",
- "Value",
- ],
- deps = [
- "//pkg/gohacks",
- "//pkg/sync",
- ],
-)
-
-go_template_instance(
- name = "test_atomicptrmap",
- out = "test_atomicptrmap_unsafe.go",
- package = "atomicptrmap",
- prefix = "test",
- template = ":generic_atomicptrmap",
- types = {
- "Key": "int64",
- "Value": "testValue",
- },
-)
-
-go_template_instance(
- name = "test_atomicptrmap_sharded",
- out = "test_atomicptrmap_sharded_unsafe.go",
- consts = {
- "ShardOrder": "4",
- },
- package = "atomicptrmap",
- prefix = "test",
- suffix = "Sharded",
- template = ":generic_atomicptrmap",
- types = {
- "Key": "int64",
- "Value": "testValue",
- },
-)
-
-go_library(
- name = "atomicptrmap",
- testonly = 1,
- srcs = [
- "atomicptrmap.go",
- "test_atomicptrmap_sharded_unsafe.go",
- "test_atomicptrmap_unsafe.go",
- ],
- deps = [
- "//pkg/gohacks",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "atomicptrmap_test",
- size = "small",
- srcs = ["atomicptrmap_test.go"],
- library = ":atomicptrmap",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/sync/atomicptrmap/atomicptrmap.go b/pkg/sync/atomicptrmap/atomicptrmap.go
deleted file mode 100644
index 867821ce9..000000000
--- a/pkg/sync/atomicptrmap/atomicptrmap.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package atomicptrmap instantiates generic_atomicptrmap for testing.
-package atomicptrmap
-
-type testValue struct {
- val int
-}
diff --git a/pkg/sync/atomicptrmap/atomicptrmap_test.go b/pkg/sync/atomicptrmap/atomicptrmap_test.go
deleted file mode 100644
index 75a9997ef..000000000
--- a/pkg/sync/atomicptrmap/atomicptrmap_test.go
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package atomicptrmap
-
-import (
- "context"
- "fmt"
- "math/rand"
- "reflect"
- "runtime"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestConsistencyWithGoMap(t *testing.T) {
- const maxKey = 16
- var vals [4]*testValue
- for i := 1; /* leave vals[0] nil */ i < len(vals); i++ {
- vals[i] = new(testValue)
- }
- var (
- m = make(map[int64]*testValue)
- apm testAtomicPtrMap
- )
- for i := 0; i < 100000; i++ {
- // Apply a random operation to both m and apm and expect them to have
- // the same result. Bias toward CompareAndSwap, which has the most
- // cases; bias away from Range and RangeRepeatable, which are
- // relatively expensive.
- switch rand.Intn(10) {
- case 0, 1: // Load
- key := rand.Int63n(maxKey)
- want := m[key]
- got := apm.Load(key)
- t.Logf("Load(%d) = %p", key, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 2, 3: // Swap
- key := rand.Int63n(maxKey)
- val := vals[rand.Intn(len(vals))]
- want := m[key]
- if val != nil {
- m[key] = val
- } else {
- delete(m, key)
- }
- got := apm.Swap(key, val)
- t.Logf("Swap(%d, %p) = %p", key, val, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 4, 5, 6, 7: // CompareAndSwap
- key := rand.Int63n(maxKey)
- oldVal := vals[rand.Intn(len(vals))]
- newVal := vals[rand.Intn(len(vals))]
- want := m[key]
- if want == oldVal {
- if newVal != nil {
- m[key] = newVal
- } else {
- delete(m, key)
- }
- }
- got := apm.CompareAndSwap(key, oldVal, newVal)
- t.Logf("CompareAndSwap(%d, %p, %p) = %p", key, oldVal, newVal, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 8: // Range
- got := make(map[int64]*testValue)
- var (
- haveDup = false
- dup int64
- )
- apm.Range(func(key int64, val *testValue) bool {
- if _, ok := got[key]; ok && !haveDup {
- haveDup = true
- dup = key
- }
- got[key] = val
- return true
- })
- t.Logf("Range() = %v", got)
- if !reflect.DeepEqual(got, m) {
- t.Fatalf("got %v, wanted %v", got, m)
- }
- if haveDup {
- t.Fatalf("got duplicate key %d", dup)
- }
- case 9: // RangeRepeatable
- got := make(map[int64]*testValue)
- apm.RangeRepeatable(func(key int64, val *testValue) bool {
- got[key] = val
- return true
- })
- t.Logf("RangeRepeatable() = %v", got)
- if !reflect.DeepEqual(got, m) {
- t.Fatalf("got %v, wanted %v", got, m)
- }
- }
- }
-}
-
-func TestConcurrentHeterogeneous(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- var (
- apm testAtomicPtrMap
- wg sync.WaitGroup
- )
- defer func() {
- cancel()
- wg.Wait()
- }()
-
- possibleKeyValuePairs := make(map[int64]map[*testValue]struct{})
- addKeyValuePair := func(key int64, val *testValue) {
- values := possibleKeyValuePairs[key]
- if values == nil {
- values = make(map[*testValue]struct{})
- possibleKeyValuePairs[key] = values
- }
- values[val] = struct{}{}
- }
-
- const numValuesPerKey = 4
-
- // These goroutines use keys not used by any other goroutine.
- const numPrivateKeys = 3
- for i := 0; i < numPrivateKeys; i++ {
- key := int64(i)
- var vals [numValuesPerKey]*testValue
- for i := 1; /* leave vals[0] nil */ i < len(vals); i++ {
- val := new(testValue)
- vals[i] = val
- addKeyValuePair(key, val)
- }
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- var stored *testValue
- for ctx.Err() == nil {
- switch r.Intn(4) {
- case 0:
- got := apm.Load(key)
- if got != stored {
- t.Errorf("Load(%d): got %p, wanted %p", key, got, stored)
- return
- }
- case 1:
- val := vals[r.Intn(len(vals))]
- want := stored
- stored = val
- got := apm.Swap(key, val)
- if got != want {
- t.Errorf("Swap(%d, %p): got %p, wanted %p", key, val, got, want)
- return
- }
- case 2, 3:
- oldVal := vals[r.Intn(len(vals))]
- newVal := vals[r.Intn(len(vals))]
- want := stored
- if stored == oldVal {
- stored = newVal
- }
- got := apm.CompareAndSwap(key, oldVal, newVal)
- if got != want {
- t.Errorf("CompareAndSwap(%d, %p, %p): got %p, wanted %p", key, oldVal, newVal, got, want)
- return
- }
- }
- }
- }()
- }
-
- // These goroutines share a small set of keys.
- const numSharedKeys = 2
- var (
- sharedKeys [numSharedKeys]int64
- sharedValues = make(map[int64][]*testValue)
- sharedValuesSet = make(map[int64]map[*testValue]struct{})
- )
- for i := range sharedKeys {
- key := int64(numPrivateKeys + i)
- sharedKeys[i] = key
- vals := make([]*testValue, numValuesPerKey)
- valsSet := make(map[*testValue]struct{})
- for j := range vals {
- val := new(testValue)
- vals[j] = val
- valsSet[val] = struct{}{}
- addKeyValuePair(key, val)
- }
- sharedValues[key] = vals
- sharedValuesSet[key] = valsSet
- }
- randSharedValue := func(r *rand.Rand, key int64) *testValue {
- vals := sharedValues[key]
- return vals[r.Intn(len(vals))]
- }
- for i := 0; i < 3; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- for ctx.Err() == nil {
- keyIndex := r.Intn(len(sharedKeys))
- key := sharedKeys[keyIndex]
- var (
- op string
- got *testValue
- )
- switch r.Intn(4) {
- case 0:
- op = "Load"
- got = apm.Load(key)
- case 1:
- op = "Swap"
- got = apm.Swap(key, randSharedValue(r, key))
- case 2, 3:
- op = "CompareAndSwap"
- got = apm.CompareAndSwap(key, randSharedValue(r, key), randSharedValue(r, key))
- }
- if got != nil {
- valsSet := sharedValuesSet[key]
- if _, ok := valsSet[got]; !ok {
- t.Errorf("%s: got key %d, value %p; expected value in %v", op, key, got, valsSet)
- return
- }
- }
- }
- }()
- }
-
- // This goroutine repeatedly searches for unused keys.
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- for ctx.Err() == nil {
- key := -1 - r.Int63()
- if got := apm.Load(key); got != nil {
- t.Errorf("Load(%d): got %p, wanted nil", key, got)
- }
- }
- }()
-
- // This goroutine repeatedly calls RangeRepeatable() and checks that each
- // key corresponds to an expected value.
- wg.Add(1)
- go func() {
- defer wg.Done()
- abort := false
- for !abort && ctx.Err() == nil {
- apm.RangeRepeatable(func(key int64, val *testValue) bool {
- values, ok := possibleKeyValuePairs[key]
- if !ok {
- t.Errorf("RangeRepeatable: got invalid key %d", key)
- abort = true
- return false
- }
- if _, ok := values[val]; !ok {
- t.Errorf("RangeRepeatable: got key %d, value %p; expected one of %v", key, val, values)
- abort = true
- return false
- }
- return true
- })
- }
- }()
-
- // Finally, the main goroutine spins for the length of the test calling
- // Range() and checking that each key that it observes is unique and
- // corresponds to an expected value.
- seenKeys := make(map[int64]struct{})
- const testDuration = 5 * time.Second
- end := time.Now().Add(testDuration)
- abort := false
- for time.Now().Before(end) {
- apm.Range(func(key int64, val *testValue) bool {
- values, ok := possibleKeyValuePairs[key]
- if !ok {
- t.Errorf("Range: got invalid key %d", key)
- abort = true
- return false
- }
- if _, ok := values[val]; !ok {
- t.Errorf("Range: got key %d, value %p; expected one of %v", key, val, values)
- abort = true
- return false
- }
- if _, ok := seenKeys[key]; ok {
- t.Errorf("Range: got duplicate key %d", key)
- abort = true
- return false
- }
- seenKeys[key] = struct{}{}
- return true
- })
- if abort {
- break
- }
- for k := range seenKeys {
- delete(seenKeys, k)
- }
- }
-}
-
-type benchmarkableMap interface {
- Load(key int64) *testValue
- Store(key int64, val *testValue)
- LoadOrStore(key int64, val *testValue) (*testValue, bool)
- Delete(key int64)
-}
-
-// rwMutexMap implements benchmarkableMap for a RWMutex-protected Go map.
-type rwMutexMap struct {
- mu sync.RWMutex
- m map[int64]*testValue
-}
-
-func (m *rwMutexMap) Load(key int64) *testValue {
- m.mu.RLock()
- defer m.mu.RUnlock()
- return m.m[key]
-}
-
-func (m *rwMutexMap) Store(key int64, val *testValue) {
- m.mu.Lock()
- defer m.mu.Unlock()
- if m.m == nil {
- m.m = make(map[int64]*testValue)
- }
- m.m[key] = val
-}
-
-func (m *rwMutexMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
- if m.m == nil {
- m.m = make(map[int64]*testValue)
- }
- if oldVal, ok := m.m[key]; ok {
- return oldVal, true
- }
- m.m[key] = val
- return val, false
-}
-
-func (m *rwMutexMap) Delete(key int64) {
- m.mu.Lock()
- defer m.mu.Unlock()
- delete(m.m, key)
-}
-
-// syncMap implements benchmarkableMap for a sync.Map.
-type syncMap struct {
- m sync.Map
-}
-
-func (m *syncMap) Load(key int64) *testValue {
- val, ok := m.m.Load(key)
- if !ok {
- return nil
- }
- return val.(*testValue)
-}
-
-func (m *syncMap) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *syncMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- actual, loaded := m.m.LoadOrStore(key, val)
- return actual.(*testValue), loaded
-}
-
-func (m *syncMap) Delete(key int64) {
- m.m.Delete(key)
-}
-
-// benchmarkableAtomicPtrMap implements benchmarkableMap for testAtomicPtrMap.
-type benchmarkableAtomicPtrMap struct {
- m testAtomicPtrMap
-}
-
-func (m *benchmarkableAtomicPtrMap) Load(key int64) *testValue {
- return m.m.Load(key)
-}
-
-func (m *benchmarkableAtomicPtrMap) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *benchmarkableAtomicPtrMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- if prev := m.m.CompareAndSwap(key, nil, val); prev != nil {
- return prev, true
- }
- return val, false
-}
-
-func (m *benchmarkableAtomicPtrMap) Delete(key int64) {
- m.m.Store(key, nil)
-}
-
-// benchmarkableAtomicPtrMapSharded implements benchmarkableMap for testAtomicPtrMapSharded.
-type benchmarkableAtomicPtrMapSharded struct {
- m testAtomicPtrMapSharded
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Load(key int64) *testValue {
- return m.m.Load(key)
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- if prev := m.m.CompareAndSwap(key, nil, val); prev != nil {
- return prev, true
- }
- return val, false
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Delete(key int64) {
- m.m.Store(key, nil)
-}
-
-var mapImpls = [...]struct {
- name string
- ctor func() benchmarkableMap
-}{
- {
- name: "RWMutexMap",
- ctor: func() benchmarkableMap {
- return new(rwMutexMap)
- },
- },
- {
- name: "SyncMap",
- ctor: func() benchmarkableMap {
- return new(syncMap)
- },
- },
- {
- name: "AtomicPtrMap",
- ctor: func() benchmarkableMap {
- return new(benchmarkableAtomicPtrMap)
- },
- },
- {
- name: "AtomicPtrMapSharded",
- ctor: func() benchmarkableMap {
- return new(benchmarkableAtomicPtrMapSharded)
- },
- },
-}
-
-func benchmarkStoreDelete(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- for i := 0; i < b.N; i++ {
- m.Delete(int64(i))
- }
-}
-
-func BenchmarkStoreDelete(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkStoreDelete(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLoadOrStoreDelete(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.LoadOrStore(int64(i), val)
- }
- for i := 0; i < b.N; i++ {
- m.Delete(int64(i))
- }
-}
-
-func BenchmarkLoadOrStoreDelete(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLoadOrStoreDelete(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLookupPositive(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- m.Load(int64(i))
- }
-}
-
-func BenchmarkLookupPositive(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLookupPositive(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLookupNegative(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- m.Load(int64(-1 - i))
- }
-}
-
-func BenchmarkLookupNegative(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLookupNegative(b, mapImpl.ctor)
- })
- }
-}
-
-type benchmarkConcurrentOptions struct {
- // loadsPerMutationPair is the number of map lookups between each
- // insertion/deletion pair.
- loadsPerMutationPair int
-
- // If changeKeys is true, the keys used by each goroutine change between
- // iterations of the test.
- changeKeys bool
-}
-
-func benchmarkConcurrent(b *testing.B, mapCtor func() benchmarkableMap, opts benchmarkConcurrentOptions) {
- var (
- started sync.WaitGroup
- workers sync.WaitGroup
- )
- started.Add(1)
-
- m := mapCtor()
- val := &testValue{}
- // Insert a large number of unused elements into the map so that used
- // elements are distributed throughout memory.
- for i := 0; i < 10000; i++ {
- m.Store(int64(-1-i), val)
- }
- // n := ceil(b.N / (opts.loadsPerMutationPair + 2))
- n := (b.N + opts.loadsPerMutationPair + 1) / (opts.loadsPerMutationPair + 2)
- for i, procs := 0, runtime.GOMAXPROCS(0); i < procs; i++ {
- workerID := i
- workers.Add(1)
- go func() {
- defer workers.Done()
- started.Wait()
- for i := 0; i < n; i++ {
- var key int64
- if opts.changeKeys {
- key = int64(workerID*n + i)
- } else {
- key = int64(workerID)
- }
- m.LoadOrStore(key, val)
- for j := 0; j < opts.loadsPerMutationPair; j++ {
- m.Load(key)
- }
- m.Delete(key)
- }
- }()
- }
-
- b.ResetTimer()
- started.Done()
- workers.Wait()
-}
-
-func BenchmarkConcurrent(b *testing.B) {
- changeKeysChoices := [...]struct {
- name string
- val bool
- }{
- {"FixedKeys", false},
- {"ChangingKeys", true},
- }
- writePcts := [...]struct {
- name string
- loadsPerMutationPair int
- }{
- {"1PercentWrites", 198},
- {"10PercentWrites", 18},
- {"50PercentWrites", 2},
- }
- for _, changeKeys := range changeKeysChoices {
- for _, writePct := range writePcts {
- for _, mapImpl := range mapImpls {
- name := fmt.Sprintf("%s_%s_%s", changeKeys.name, writePct.name, mapImpl.name)
- b.Run(name, func(b *testing.B) {
- benchmarkConcurrent(b, mapImpl.ctor, benchmarkConcurrentOptions{
- loadsPerMutationPair: writePct.loadsPerMutationPair,
- changeKeys: changeKeys.val,
- })
- })
- }
- }
- }
-}
diff --git a/pkg/sync/atomicptrmap/generic_atomicptrmap_unsafe.go b/pkg/sync/atomicptrmap/generic_atomicptrmap_unsafe.go
deleted file mode 100644
index 3e98cb309..000000000
--- a/pkg/sync/atomicptrmap/generic_atomicptrmap_unsafe.go
+++ /dev/null
@@ -1,500 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package atomicptrmap doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package atomicptrmap
-
-import (
- "sync/atomic"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/gohacks"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Key is a required type parameter.
-type Key struct{}
-
-// Value is a required type parameter.
-type Value struct{}
-
-const (
- // ShardOrder is an optional parameter specifying the base-2 log of the
- // number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
- // unnecessary synchronization between unrelated concurrent operations,
- // improving performance for write-heavy workloads, but increase memory
- // usage for small maps.
- ShardOrder = 0
-)
-
-// Hasher is an optional type parameter. If Hasher is provided, it must define
-// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
-type Hasher struct {
- defaultHasher
-}
-
-// defaultHasher is the default Hasher. This indirection exists because
-// defaultHasher must exist even if a custom Hasher is provided, to prevent the
-// Go compiler from complaining about defaultHasher's unused imports.
-type defaultHasher struct {
- fn func(unsafe.Pointer, uintptr) uintptr
- seed uintptr
-}
-
-// Init initializes the Hasher.
-func (h *defaultHasher) Init() {
- h.fn = sync.MapKeyHasher(map[Key]*Value(nil))
- h.seed = sync.RandUintptr()
-}
-
-// Hash returns the hash value for the given Key.
-func (h *defaultHasher) Hash(key Key) uintptr {
- return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
-}
-
-var hasher Hasher
-
-func init() {
- hasher.Init()
-}
-
-// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
-// safe for concurrent use from multiple goroutines without additional
-// synchronization.
-//
-// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
-// use. AtomicPtrMaps must not be copied after first use.
-//
-// sync.Map may be faster than AtomicPtrMap if most operations on the map are
-// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
-// other circumstances.
-type AtomicPtrMap struct {
- // AtomicPtrMap is implemented as a hash table with the following
- // properties:
- //
- // * Collisions are resolved with quadratic probing. Of the two major
- // alternatives, Robin Hood linear probing makes it difficult for writers
- // to execute in parallel, and bucketing is less effective in Go due to
- // lack of SIMD.
- //
- // * The table is optionally divided into shards indexed by hash to further
- // reduce unnecessary synchronization.
-
- shards [1 << ShardOrder]apmShard
-}
-
-func (m *AtomicPtrMap) shard(hash uintptr) *apmShard {
- // Go defines right shifts >= width of shifted unsigned operand as 0, so
- // this is correct even if ShardOrder is 0 (although nogo complains because
- // nogo is dumb).
- const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ShardOrder
- index := hash >> indexLSB
- return (*apmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(apmShard{}))))
-}
-
-type apmShard struct {
- apmShardMutationData
- _ [apmShardMutationDataPadding]byte
- apmShardLookupData
- _ [apmShardLookupDataPadding]byte
-}
-
-type apmShardMutationData struct {
- dirtyMu sync.Mutex // serializes slot transitions out of empty
- dirty uintptr // # slots with val != nil
- count uintptr // # slots with val != nil and val != tombstone()
- rehashMu sync.Mutex // serializes rehashing
-}
-
-type apmShardLookupData struct {
- seq sync.SeqCount // allows atomic reads of slots+mask
- slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
- mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
-}
-
-const (
- cacheLineBytes = 64
- // Cache line padding is enabled if sharding is.
- apmEnablePadding = (ShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
- // The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
- // cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
- apmShardMutationDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardMutationData{}) - 1) % cacheLineBytes) + 1)
- apmShardMutationDataPadding = apmEnablePadding * apmShardMutationDataRequiredPadding
- apmShardLookupDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardLookupData{}) - 1) % cacheLineBytes) + 1)
- apmShardLookupDataPadding = apmEnablePadding * apmShardLookupDataRequiredPadding
-
- // These define fractional thresholds for when apmShard.rehash() is called
- // (i.e. the load factor) and when it rehases to a larger table
- // respectively. They are chosen such that the rehash threshold = the
- // expansion threshold + 1/2, so that when reuse of deleted slots is rare
- // or non-existent, rehashing occurs after the insertion of at least 1/2
- // the table's size in new entries, which is acceptably infrequent.
- apmRehashThresholdNum = 2
- apmRehashThresholdDen = 3
- apmExpansionThresholdNum = 1
- apmExpansionThresholdDen = 6
-)
-
-type apmSlot struct {
- // slot states are indicated by val:
- //
- // * Empty: val == nil; key is meaningless. May transition to full or
- // evacuated with dirtyMu locked.
- //
- // * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
- // is the Value mapped to key. May transition to deleted or evacuated.
- //
- // * Deleted: val == tombstone(); key is still immutable. key is mapped to
- // no Value. May transition to full or evacuated.
- //
- // * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
- // slots that have already been moved, requiring readers to wait for
- // rehashing to complete and use the new table. Terminal state.
- //
- // Note that once val is non-nil, it cannot become nil again. That is, the
- // transition from empty to non-empty is irreversible for a given slot;
- // the only way to create more empty slots is by rehashing.
- val unsafe.Pointer
- key Key
-}
-
-func apmSlotAt(slots unsafe.Pointer, pos uintptr) *apmSlot {
- return (*apmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(apmSlot{})))
-}
-
-var tombstoneObj byte
-
-func tombstone() unsafe.Pointer {
- return unsafe.Pointer(&tombstoneObj)
-}
-
-var evacuatedObj byte
-
-func evacuated() unsafe.Pointer {
- return unsafe.Pointer(&evacuatedObj)
-}
-
-// Load returns the Value stored in m for key.
-func (m *AtomicPtrMap) Load(key Key) *Value {
- hash := hasher.Hash(key)
- shard := m.shard(hash)
-
-retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- return nil
- }
-
- i := hash & mask
- inc := uintptr(1)
- for {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- // Empty slot; end of probe sequence.
- return nil
- }
- if slotVal == evacuated() {
- // Racing with rehashing.
- goto retry
- }
- if slot.key == key {
- if slotVal == tombstone() {
- return nil
- }
- return (*Value)(slotVal)
- }
- i = (i + inc) & mask
- inc++
- }
-}
-
-// Store stores the Value val for key.
-func (m *AtomicPtrMap) Store(key Key, val *Value) {
- m.maybeCompareAndSwap(key, false, nil, val)
-}
-
-// Swap stores the Value val for key and returns the previously-mapped Value.
-func (m *AtomicPtrMap) Swap(key Key, val *Value) *Value {
- return m.maybeCompareAndSwap(key, false, nil, val)
-}
-
-// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
-// stores the Value newVal for key. CompareAndSwap returns the previous Value
-// stored for key, whether or not it stores newVal.
-func (m *AtomicPtrMap) CompareAndSwap(key Key, oldVal, newVal *Value) *Value {
- return m.maybeCompareAndSwap(key, true, oldVal, newVal)
-}
-
-func (m *AtomicPtrMap) maybeCompareAndSwap(key Key, compare bool, typedOldVal, typedNewVal *Value) *Value {
- hash := hasher.Hash(key)
- shard := m.shard(hash)
- oldVal := tombstone()
- if typedOldVal != nil {
- oldVal = unsafe.Pointer(typedOldVal)
- }
- newVal := tombstone()
- if typedNewVal != nil {
- newVal = unsafe.Pointer(typedNewVal)
- }
-
-retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- if (compare && oldVal != tombstone()) || newVal == tombstone() {
- return nil
- }
- // Need to allocate a table before insertion.
- shard.rehash(nil)
- goto retry
- }
-
- i := hash & mask
- inc := uintptr(1)
- for {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- if (compare && oldVal != tombstone()) || newVal == tombstone() {
- return nil
- }
- // Try to grab this slot for ourselves.
- shard.dirtyMu.Lock()
- slotVal = atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- // Check if we need to rehash before dirtying a slot.
- if dirty, capacity := shard.dirty+1, mask+1; dirty*apmRehashThresholdDen >= capacity*apmRehashThresholdNum {
- shard.dirtyMu.Unlock()
- shard.rehash(slots)
- goto retry
- }
- slot.key = key
- atomic.StorePointer(&slot.val, newVal) // transitions slot to full
- shard.dirty++
- atomic.AddUintptr(&shard.count, 1)
- shard.dirtyMu.Unlock()
- return nil
- }
- // Raced with another store; the slot is no longer empty. Continue
- // with the new value of slotVal since we may have raced with
- // another store of key.
- shard.dirtyMu.Unlock()
- }
- if slotVal == evacuated() {
- // Racing with rehashing.
- goto retry
- }
- if slot.key == key {
- // We're reusing an existing slot, so rehashing isn't necessary.
- for {
- if (compare && oldVal != slotVal) || newVal == slotVal {
- if slotVal == tombstone() {
- return nil
- }
- return (*Value)(slotVal)
- }
- if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
- if slotVal == tombstone() {
- atomic.AddUintptr(&shard.count, 1)
- return nil
- }
- if newVal == tombstone() {
- atomic.AddUintptr(&shard.count, ^uintptr(0) /* -1 */)
- }
- return (*Value)(slotVal)
- }
- slotVal = atomic.LoadPointer(&slot.val)
- if slotVal == evacuated() {
- goto retry
- }
- }
- }
- // This produces a triangular number sequence of offsets from the
- // initially-probed position.
- i = (i + inc) & mask
- inc++
- }
-}
-
-// rehash is marked nosplit to avoid preemption during table copying.
-//go:nosplit
-func (shard *apmShard) rehash(oldSlots unsafe.Pointer) {
- shard.rehashMu.Lock()
- defer shard.rehashMu.Unlock()
-
- if shard.slots != oldSlots {
- // Raced with another call to rehash().
- return
- }
-
- // Determine the size of the new table. Constraints:
- //
- // * The size of the table must be a power of two to ensure that every slot
- // is visitable by every probe sequence under quadratic probing with
- // triangular numbers.
- //
- // * The size of the table cannot decrease because even if shard.count is
- // currently smaller than shard.dirty, concurrent stores that reuse
- // existing slots can drive shard.count back up to a maximum of
- // shard.dirty.
- newSize := uintptr(8) // arbitrary initial size
- if oldSlots != nil {
- oldSize := shard.mask + 1
- newSize = oldSize
- if count := atomic.LoadUintptr(&shard.count) + 1; count*apmExpansionThresholdDen > oldSize*apmExpansionThresholdNum {
- newSize *= 2
- }
- }
-
- // Allocate the new table.
- newSlotsSlice := make([]apmSlot, newSize)
- newSlotsHeader := (*gohacks.SliceHeader)(unsafe.Pointer(&newSlotsSlice))
- newSlots := newSlotsHeader.Data
- newMask := newSize - 1
-
- // Start a writer critical section now so that racing users of the old
- // table that observe evacuated() wait for the new table. (But lock dirtyMu
- // first since doing so may block, which we don't want to do during the
- // writer critical section.)
- shard.dirtyMu.Lock()
- shard.seq.BeginWrite()
-
- if oldSlots != nil {
- realCount := uintptr(0)
- // Copy old entries to the new table.
- oldMask := shard.mask
- for i := uintptr(0); i <= oldMask; i++ {
- oldSlot := apmSlotAt(oldSlots, i)
- val := atomic.SwapPointer(&oldSlot.val, evacuated())
- if val == nil || val == tombstone() {
- continue
- }
- hash := hasher.Hash(oldSlot.key)
- j := hash & newMask
- inc := uintptr(1)
- for {
- newSlot := apmSlotAt(newSlots, j)
- if newSlot.val == nil {
- newSlot.val = val
- newSlot.key = oldSlot.key
- break
- }
- j = (j + inc) & newMask
- inc++
- }
- realCount++
- }
- // Update dirty to reflect that tombstones were not copied to the new
- // table. Use realCount since a concurrent mutator may not have updated
- // shard.count yet.
- shard.dirty = realCount
- }
-
- // Switch to the new table.
- atomic.StorePointer(&shard.slots, newSlots)
- atomic.StoreUintptr(&shard.mask, newMask)
-
- shard.seq.EndWrite()
- shard.dirtyMu.Unlock()
-}
-
-// Range invokes f on each Key-Value pair stored in m. If any call to f returns
-// false, Range stops iteration and returns.
-//
-// Range does not necessarily correspond to any consistent snapshot of the
-// Map's contents: no Key will be visited more than once, but if the Value for
-// any Key is stored or deleted concurrently, Range may reflect any mapping for
-// that Key from any point during the Range call.
-//
-// f must not call other methods on m.
-func (m *AtomicPtrMap) Range(f func(key Key, val *Value) bool) {
- for si := 0; si < len(m.shards); si++ {
- shard := &m.shards[si]
- if !shard.doRange(f) {
- return
- }
- }
-}
-
-func (shard *apmShard) doRange(f func(key Key, val *Value) bool) bool {
- // We have to lock rehashMu because if we handled races with rehashing by
- // retrying, f could see the same key twice.
- shard.rehashMu.Lock()
- defer shard.rehashMu.Unlock()
- slots := shard.slots
- if slots == nil {
- return true
- }
- mask := shard.mask
- for i := uintptr(0); i <= mask; i++ {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil || slotVal == tombstone() {
- continue
- }
- if !f(slot.key, (*Value)(slotVal)) {
- return false
- }
- }
- return true
-}
-
-// RangeRepeatable is like Range, but:
-//
-// * RangeRepeatable may visit the same Key multiple times in the presence of
-// concurrent mutators, possibly passing different Values to f in different
-// calls.
-//
-// * It is safe for f to call other methods on m.
-func (m *AtomicPtrMap) RangeRepeatable(f func(key Key, val *Value) bool) {
- for si := 0; si < len(m.shards); si++ {
- shard := &m.shards[si]
-
- retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- continue
- }
-
- for i := uintptr(0); i <= mask; i++ {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == evacuated() {
- goto retry
- }
- if slotVal == nil || slotVal == tombstone() {
- continue
- }
- if !f(slot.key, (*Value)(slotVal)) {
- return
- }
- }
- }
-}
diff --git a/pkg/sync/gate_test.go b/pkg/sync/gate_test.go
deleted file mode 100644
index 82ce02b97..000000000
--- a/pkg/sync/gate_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sync
-
-import (
- "context"
- "runtime"
- "sync/atomic"
- "testing"
- "time"
-)
-
-func TestGateBasic(t *testing.T) {
- var g Gate
-
- if !g.Enter() {
- t.Fatalf("Enter failed before Close")
- }
- g.Leave()
-
- g.Close()
- if g.Enter() {
- t.Fatalf("Enter succeeded after Close")
- }
-}
-
-func TestGateConcurrent(t *testing.T) {
- // Each call to testGateConcurrentOnce tests behavior around a single call
- // to Gate.Close, so run many short tests to increase the probability of
- // flushing out any issues.
- totalTime := 5 * time.Second
- timePerTest := 20 * time.Millisecond
- numTests := int(totalTime / timePerTest)
- for i := 0; i < numTests; i++ {
- testGateConcurrentOnce(t, timePerTest)
- }
-}
-
-func testGateConcurrentOnce(t *testing.T, d time.Duration) {
- const numGoroutines = 1000
-
- ctx, cancel := context.WithCancel(context.Background())
- var wg WaitGroup
- defer func() {
- cancel()
- wg.Wait()
- }()
-
- var g Gate
- closeState := int32(0) // set to 1 before g.Close() and 2 after it returns
-
- // Start a large number of goroutines that repeatedly attempt to enter the
- // gate and get the expected result.
- for i := 0; i < numGoroutines; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for ctx.Err() == nil {
- closedBeforeEnter := atomic.LoadInt32(&closeState) == 2
- if g.Enter() {
- closedBeforeLeave := atomic.LoadInt32(&closeState) == 2
- g.Leave()
- if closedBeforeEnter {
- t.Errorf("Enter succeeded after Close")
- return
- }
- if closedBeforeLeave {
- t.Errorf("Close returned before Leave")
- return
- }
- } else {
- if atomic.LoadInt32(&closeState) == 0 {
- t.Errorf("Enter failed before Close")
- return
- }
- }
- // Go does not preempt busy loops until Go 1.14.
- runtime.Gosched()
- }
- }()
- }
-
- // Allow goroutines to enter the gate successfully for half of the test's
- // duration, then close the gate and allow goroutines to fail to enter the
- // gate for the remaining half.
- time.Sleep(d / 2)
- atomic.StoreInt32(&closeState, 1)
- g.Close()
- atomic.StoreInt32(&closeState, 2)
- time.Sleep(d / 2)
-}
-
-func BenchmarkGateEnterLeave(b *testing.B) {
- var g Gate
- for i := 0; i < b.N; i++ {
- g.Enter()
- g.Leave()
- }
-}
-
-func BenchmarkGateClose(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var g Gate
- g.Close()
- }
-}
-
-func BenchmarkGateEnterLeaveAsyncClose(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var g Gate
- g.Enter()
- go func() {
- g.Leave()
- }()
- g.Close()
- }
-}
diff --git a/pkg/sync/mutex_test.go b/pkg/sync/mutex_test.go
deleted file mode 100644
index 9e4e3f0b2..000000000
--- a/pkg/sync/mutex_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "sync"
- "testing"
- "unsafe"
-)
-
-// TestStructSize verifies that syncMutex's size hasn't drifted from the
-// standard library's version.
-//
-// The correctness of this package relies on these remaining in sync.
-func TestStructSize(t *testing.T) {
- const (
- got = unsafe.Sizeof(syncMutex{})
- want = unsafe.Sizeof(sync.Mutex{})
- )
- if got != want {
- t.Errorf("got sizeof(syncMutex) = %d, want = sizeof(sync.Mutex) = %d", got, want)
- }
-}
-
-// TestFieldValues verifies that the semantics of syncMutex.state from the
-// standard library's implementation.
-//
-// The correctness of this package relies on these remaining in sync.
-func TestFieldValues(t *testing.T) {
- var m Mutex
- m.Lock()
- if got := *m.m.state(); got != mutexLocked {
- t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked)
- }
- m.Unlock()
- if got := *m.m.state(); got != mutexUnlocked {
- t.Errorf("got unlocked sync.Mutex.state = %d, want = %d", got, mutexUnlocked)
- }
-}
-
-func TestDoubleTryLock(t *testing.T) {
- var m Mutex
- if !m.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- if m.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockAfterLock(t *testing.T) {
- var m Mutex
- m.Lock()
- if m.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockUnlock(t *testing.T) {
- var m Mutex
- if !m.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- m.Unlock() // +checklocksforce
- if !m.TryLock() {
- t.Fatal("failed to aquire lock after unlock")
- }
-}
diff --git a/pkg/sync/rwmutex_test.go b/pkg/sync/rwmutex_test.go
deleted file mode 100644
index 56a88e712..000000000
--- a/pkg/sync/rwmutex_test.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Copyright 2019 The gVisor Authors.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// GOMAXPROCS=10 go test
-
-// Copy/pasted from the standard library's sync/rwmutex_test.go, except for the
-// addition of downgradingWriter and the renaming of num_iterations to
-// numIterations to shut up Golint.
-
-package sync
-
-import (
- "fmt"
- "runtime"
- "sync/atomic"
- "testing"
-)
-
-func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
- m.RLock()
- clocked <- true
- <-cunlock
- m.RUnlock()
- cdone <- true
-}
-
-func doTestParallelReaders(numReaders, gomaxprocs int) {
- runtime.GOMAXPROCS(gomaxprocs)
- var m RWMutex
- clocked := make(chan bool)
- cunlock := make(chan bool)
- cdone := make(chan bool)
- for i := 0; i < numReaders; i++ {
- go parallelReader(&m, clocked, cunlock, cdone)
- }
- // Wait for all parallel RLock()s to succeed.
- for i := 0; i < numReaders; i++ {
- <-clocked
- }
- for i := 0; i < numReaders; i++ {
- cunlock <- true
- }
- // Wait for the goroutines to finish.
- for i := 0; i < numReaders; i++ {
- <-cdone
- }
-}
-
-func TestParallelReaders(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
- doTestParallelReaders(1, 4)
- doTestParallelReaders(3, 4)
- doTestParallelReaders(4, 2)
-}
-
-func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.RLock()
- n := atomic.AddInt32(activity, 1)
- if n < 1 || n >= 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -1)
- rwm.RUnlock()
- }
- cdone <- true
-}
-
-func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.Lock()
- n := atomic.AddInt32(activity, 10000)
- if n != 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -10000)
- rwm.Unlock()
- }
- cdone <- true
-}
-
-func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.Lock()
- n := atomic.AddInt32(activity, 10000)
- if n != 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -10000)
- rwm.DowngradeLock()
- n = atomic.AddInt32(activity, 1)
- if n < 1 || n >= 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -1)
- rwm.RUnlock()
- }
- cdone <- true
-}
-
-func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) {
- runtime.GOMAXPROCS(gomaxprocs)
- // Number of active readers + 10000 * number of active writers.
- var activity int32
- var rwm RWMutex
- cdone := make(chan bool)
- go writer(&rwm, numIterations, &activity, cdone)
- go downgradingWriter(&rwm, numIterations, &activity, cdone)
- var i int
- for i = 0; i < numReaders/2; i++ {
- go reader(&rwm, numIterations, &activity, cdone)
- }
- go writer(&rwm, numIterations, &activity, cdone)
- go downgradingWriter(&rwm, numIterations, &activity, cdone)
- for ; i < numReaders; i++ {
- go reader(&rwm, numIterations, &activity, cdone)
- }
- // Wait for the 4 writers and all readers to finish.
- for i := 0; i < 4+numReaders; i++ {
- <-cdone
- }
-}
-
-func TestDowngradableRWMutex(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
- n := 1000
- if testing.Short() {
- n = 5
- }
- HammerDowngradableRWMutex(1, 1, n)
- HammerDowngradableRWMutex(1, 3, n)
- HammerDowngradableRWMutex(1, 10, n)
- HammerDowngradableRWMutex(4, 1, n)
- HammerDowngradableRWMutex(4, 3, n)
- HammerDowngradableRWMutex(4, 10, n)
- HammerDowngradableRWMutex(10, 1, n)
- HammerDowngradableRWMutex(10, 3, n)
- HammerDowngradableRWMutex(10, 10, n)
- HammerDowngradableRWMutex(10, 5, n)
-}
-
-func TestRWDoubleTryLock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestRWTryLockAfterLock(t *testing.T) {
- var rwm RWMutex
- rwm.Lock()
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestRWTryLockUnlock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- rwm.Unlock() // +checklocksforce
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock after unlock")
- }
-}
-
-func TestTryRLockAfterLock(t *testing.T) {
- var rwm RWMutex
- rwm.Lock()
- if rwm.TryRLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockAfterRLock(t *testing.T) {
- var rwm RWMutex
- rwm.RLock()
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestDoubleTryRLock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryRLock() {
- t.Fatal("failed to aquire lock")
- }
- if !rwm.TryRLock() {
- t.Fatal("failed to read aquire read locked lock")
- }
-}
diff --git a/pkg/sync/seqatomic/BUILD b/pkg/sync/seqatomic/BUILD
deleted file mode 100644
index 60f79ab54..000000000
--- a/pkg/sync/seqatomic/BUILD
+++ /dev/null
@@ -1,45 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template(
- name = "generic_seqatomic",
- srcs = ["generic_seqatomic_unsafe.go"],
- types = [
- "Value",
- ],
- visibility = ["//:sandbox"],
- deps = [
- ":sync",
- "//pkg/gohacks",
- ],
-)
-
-go_template_instance(
- name = "seqatomic_int",
- out = "seqatomic_int_unsafe.go",
- package = "seqatomic",
- suffix = "Int",
- template = ":generic_seqatomic",
- types = {
- "Value": "int",
- },
-)
-
-go_library(
- name = "seqatomic",
- srcs = ["seqatomic_int_unsafe.go"],
- deps = [
- "//pkg/gohacks",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "seqatomic_test",
- size = "small",
- srcs = ["seqatomic_test.go"],
- library = ":seqatomic",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/sync/seqatomic/generic_seqatomic_unsafe.go b/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
deleted file mode 100644
index 9578c9c52..000000000
--- a/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package seqatomic doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package seqatomic
-
-import (
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/gohacks"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Value is a required type parameter.
-type Value struct{}
-
-// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
-// with any writer critical sections in seq.
-//
-//go:nosplit
-func SeqAtomicLoad(seq *sync.SeqCount, ptr *Value) Value {
- for {
- if val, ok := SeqAtomicTryLoad(seq, seq.BeginRead(), ptr); ok {
- return val
- }
- }
-}
-
-// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
-// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
-// read would race with a writer critical section, SeqAtomicTryLoad returns
-// (unspecified, false).
-//
-//go:nosplit
-func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (val Value, ok bool) {
- if sync.RaceEnabled {
- // runtime.RaceDisable() doesn't actually stop the race detector, so it
- // can't help us here. Instead, call runtime.memmove directly, which is
- // not instrumented by the race detector.
- gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
- } else {
- // This is ~40% faster for short reads than going through memmove.
- val = *ptr
- }
- ok = seq.ReadOk(epoch)
- return
-}
diff --git a/pkg/sync/seqatomic/seqatomic_test.go b/pkg/sync/seqatomic/seqatomic_test.go
deleted file mode 100644
index 2c4568b07..000000000
--- a/pkg/sync/seqatomic/seqatomic_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package seqatomic
-
-import (
- "sync/atomic"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestSeqAtomicLoadUncontended(t *testing.T) {
- var seq sync.SeqCount
- const want = 1
- data := want
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicLoadAfterWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- data = want
- seq.EndWrite()
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicLoadDuringWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- go func() {
- time.Sleep(time.Second)
- data = want
- seq.EndWrite()
- }()
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicTryLoadUncontended(t *testing.T) {
- var seq sync.SeqCount
- const want = 1
- data := want
- epoch := seq.BeginRead()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
- }
-}
-
-func TestSeqAtomicTryLoadDuringWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- epoch := seq.BeginRead()
- seq.BeginWrite()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
- }
- seq.EndWrite()
-}
-
-func TestSeqAtomicTryLoadAfterWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- epoch := seq.BeginRead()
- seq.BeginWrite()
- seq.EndWrite()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
- }
-}
-
-func BenchmarkSeqAtomicLoadIntUncontended(b *testing.B) {
- var seq sync.SeqCount
- const want = 42
- data := want
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- b.Fatalf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
- }
- })
-}
-
-func BenchmarkSeqAtomicTryLoadIntUncontended(b *testing.B) {
- var seq sync.SeqCount
- const want = 42
- data := want
- b.RunParallel(func(pb *testing.PB) {
- epoch := seq.BeginRead()
- for pb.Next() {
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
- b.Fatalf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
- }
- }
- })
-}
-
-// For comparison:
-func BenchmarkAtomicValueLoadIntUncontended(b *testing.B) {
- var a atomic.Value
- const want = 42
- a.Store(int(want))
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if got := a.Load().(int); got != want {
- b.Fatalf("atomic.Value.Load: got %v, wanted %v", got, want)
- }
- }
- })
-}
diff --git a/pkg/sync/seqcount_test.go b/pkg/sync/seqcount_test.go
deleted file mode 100644
index 3f5592e3e..000000000
--- a/pkg/sync/seqcount_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "testing"
- "time"
-)
-
-func TestSeqCountWriteUncontended(t *testing.T) {
- var seq SeqCount
- seq.BeginWrite()
- seq.EndWrite()
-}
-
-func TestSeqCountReadUncontended(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountBeginReadAfterWrite(t *testing.T) {
- var seq SeqCount
- var data int32
- const want = 1
- seq.BeginWrite()
- data = want
- seq.EndWrite()
- epoch := seq.BeginRead()
- if data != want {
- t.Errorf("Reader: got %v, wanted %v", data, want)
- }
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountBeginReadDuringWrite(t *testing.T) {
- var seq SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- go func() {
- time.Sleep(time.Second)
- data = want
- seq.EndWrite()
- }()
- epoch := seq.BeginRead()
- if data != want {
- t.Errorf("Reader: got %v, wanted %v", data, want)
- }
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountReadOkAfterWrite(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- seq.BeginWrite()
- seq.EndWrite()
- if seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got true, wanted false")
- }
-}
-
-func TestSeqCountReadOkDuringWrite(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- seq.BeginWrite()
- if seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got true, wanted false")
- }
- seq.EndWrite()
-}
-
-func BenchmarkSeqCountWriteUncontended(b *testing.B) {
- var seq SeqCount
- for i := 0; i < b.N; i++ {
- seq.BeginWrite()
- seq.EndWrite()
- }
-}
-
-func BenchmarkSeqCountReadUncontended(b *testing.B) {
- var seq SeqCount
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- epoch := seq.BeginRead()
- if !seq.ReadOk(epoch) {
- b.Fatalf("ReadOk: got false, wanted true")
- }
- }
- })
-}
diff --git a/pkg/syncevent/BUILD b/pkg/syncevent/BUILD
deleted file mode 100644
index 42c553308..000000000
--- a/pkg/syncevent/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "syncevent",
- srcs = [
- "broadcaster.go",
- "receiver.go",
- "source.go",
- "syncevent.go",
- "waiter_unsafe.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/atomicbitops",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "syncevent_test",
- size = "small",
- srcs = [
- "broadcaster_test.go",
- "syncevent_example_test.go",
- "waiter_test.go",
- ],
- library = ":syncevent",
- deps = [
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/syncevent/broadcaster.go b/pkg/syncevent/broadcaster.go
deleted file mode 100644
index dabf08895..000000000
--- a/pkg/syncevent/broadcaster.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Broadcaster is an implementation of Source that supports any number of
-// subscribed Receivers.
-//
-// The zero value of Broadcaster is valid and has no subscribed Receivers.
-// Broadcaster is not copyable by value.
-//
-// All Broadcaster methods may be called concurrently from multiple goroutines.
-type Broadcaster struct {
- // Broadcaster is implemented as a hash table where keys are assigned by
- // the Broadcaster and returned as SubscriptionIDs, making it safe to use
- // the identity function for hashing. The hash table resolves collisions
- // using linear probing and features Robin Hood insertion and backward
- // shift deletion in order to support a relatively high load factor
- // efficiently, which matters since the cost of Broadcast is linear in the
- // size of the table.
-
- // mu protects the following fields.
- mu sync.Mutex
-
- // Invariants: len(table) is 0 or a power of 2.
- table []broadcasterSlot
-
- // load is the number of entries in table with receiver != nil.
- load int
-
- lastID SubscriptionID
-}
-
-type broadcasterSlot struct {
- // Invariants: If receiver == nil, then filter == NoEvents and id == 0.
- // Otherwise, id != 0.
- receiver *Receiver
- filter Set
- id SubscriptionID
-}
-
-const (
- broadcasterMinNonZeroTableSize = 2 // must be a power of 2 > 1
-
- broadcasterMaxLoadNum = 13
- broadcasterMaxLoadDen = 16
-)
-
-// SubscribeEvents implements Source.SubscribeEvents.
-func (b *Broadcaster) SubscribeEvents(r *Receiver, filter Set) SubscriptionID {
- b.mu.Lock()
-
- // Assign an ID for this subscription.
- b.lastID++
- id := b.lastID
-
- // Expand the table if over the maximum load factor:
- //
- // load / len(b.table) > broadcasterMaxLoadNum / broadcasterMaxLoadDen
- // load * broadcasterMaxLoadDen > broadcasterMaxLoadNum * len(b.table)
- b.load++
- if (b.load * broadcasterMaxLoadDen) > (broadcasterMaxLoadNum * len(b.table)) {
- // Double the number of slots in the new table.
- newlen := broadcasterMinNonZeroTableSize
- if len(b.table) != 0 {
- newlen = 2 * len(b.table)
- }
- if newlen <= cap(b.table) {
- // Reuse excess capacity in the current table, moving entries not
- // already in their first-probed positions to better ones.
- newtable := b.table[:newlen]
- newmask := uint64(newlen - 1)
- for i := range b.table {
- if b.table[i].receiver != nil && uint64(b.table[i].id)&newmask != uint64(i) {
- entry := b.table[i]
- b.table[i] = broadcasterSlot{}
- broadcasterTableInsert(newtable, entry.id, entry.receiver, entry.filter)
- }
- }
- b.table = newtable
- } else {
- newtable := make([]broadcasterSlot, newlen)
- // Copy existing entries to the new table.
- for i := range b.table {
- if b.table[i].receiver != nil {
- broadcasterTableInsert(newtable, b.table[i].id, b.table[i].receiver, b.table[i].filter)
- }
- }
- // Switch to the new table.
- b.table = newtable
- }
- }
-
- broadcasterTableInsert(b.table, id, r, filter)
- b.mu.Unlock()
- return id
-}
-
-// Preconditions:
-// * table must not be full.
-// * len(table) is a power of 2.
-func broadcasterTableInsert(table []broadcasterSlot, id SubscriptionID, r *Receiver, filter Set) {
- entry := broadcasterSlot{
- receiver: r,
- filter: filter,
- id: id,
- }
- mask := uint64(len(table) - 1)
- i := uint64(id) & mask
- disp := uint64(0)
- for {
- if table[i].receiver == nil {
- table[i] = entry
- return
- }
- // If we've been displaced farther from our first-probed slot than the
- // element stored in this one, swap elements and switch to inserting
- // the replaced one. (This is Robin Hood insertion.)
- slotDisp := (i - uint64(table[i].id)) & mask
- if disp > slotDisp {
- table[i], entry = entry, table[i]
- disp = slotDisp
- }
- i = (i + 1) & mask
- disp++
- }
-}
-
-// UnsubscribeEvents implements Source.UnsubscribeEvents.
-func (b *Broadcaster) UnsubscribeEvents(id SubscriptionID) {
- b.mu.Lock()
-
- mask := uint64(len(b.table) - 1)
- i := uint64(id) & mask
- for {
- if b.table[i].id == id {
- // Found the element to remove. Move all subsequent elements
- // backward until we either find an empty slot, or an element that
- // is already in its first-probed slot. (This is backward shift
- // deletion.)
- for {
- next := (i + 1) & mask
- if b.table[next].receiver == nil {
- break
- }
- if uint64(b.table[next].id)&mask == next {
- break
- }
- b.table[i] = b.table[next]
- i = next
- }
- b.table[i] = broadcasterSlot{}
- break
- }
- i = (i + 1) & mask
- }
-
- // If a table 1/4 of the current size would still be at or under the
- // maximum load factor (i.e. the current table size is at least two
- // expansions bigger than necessary), halve the size of the table to reduce
- // the cost of Broadcast. Since we are concerned with iteration time and
- // not memory usage, reuse the existing slice to reduce future allocations
- // from table re-expansion.
- b.load--
- if len(b.table) > broadcasterMinNonZeroTableSize && (b.load*(4*broadcasterMaxLoadDen)) <= (broadcasterMaxLoadNum*len(b.table)) {
- newlen := len(b.table) / 2
- newtable := b.table[:newlen]
- for i := newlen; i < len(b.table); i++ {
- if b.table[i].receiver != nil {
- broadcasterTableInsert(newtable, b.table[i].id, b.table[i].receiver, b.table[i].filter)
- b.table[i] = broadcasterSlot{}
- }
- }
- b.table = newtable
- }
-
- b.mu.Unlock()
-}
-
-// Broadcast notifies all Receivers subscribed to the Broadcaster of the subset
-// of events to which they subscribed. The order in which Receivers are
-// notified is unspecified.
-func (b *Broadcaster) Broadcast(events Set) {
- b.mu.Lock()
- for i := range b.table {
- if intersection := events & b.table[i].filter; intersection != 0 {
- // We don't need to check if broadcasterSlot.receiver is nil, since
- // if it is then broadcasterSlot.filter is 0.
- b.table[i].receiver.Notify(intersection)
- }
- }
- b.mu.Unlock()
-}
-
-// FilteredEvents returns the set of events for which Broadcast will notify at
-// least one Receiver, i.e. the union of filters for all subscribed Receivers.
-func (b *Broadcaster) FilteredEvents() Set {
- var es Set
- b.mu.Lock()
- for i := range b.table {
- es |= b.table[i].filter
- }
- b.mu.Unlock()
- return es
-}
diff --git a/pkg/syncevent/broadcaster_test.go b/pkg/syncevent/broadcaster_test.go
deleted file mode 100644
index e88779e23..000000000
--- a/pkg/syncevent/broadcaster_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "fmt"
- "math/rand"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestBroadcasterFilter(t *testing.T) {
- const numReceivers = 2 * MaxEvents
-
- var br Broadcaster
- ws := make([]Waiter, numReceivers)
- for i := range ws {
- ws[i].Init()
- br.SubscribeEvents(ws[i].Receiver(), 1<<(i%MaxEvents))
- }
- for ev := 0; ev < MaxEvents; ev++ {
- br.Broadcast(1 << ev)
- for i := range ws {
- want := NoEvents
- if i%MaxEvents == ev {
- want = 1 << ev
- }
- if got := ws[i].Receiver().PendingAndAckAll(); got != want {
- t.Errorf("after Broadcast of event %d: waiter %d has pending event set %#x, wanted %#x", ev, i, got, want)
- }
- }
- }
-}
-
-// TestBroadcasterManySubscriptions tests that subscriptions are not lost by
-// table expansion/compaction.
-func TestBroadcasterManySubscriptions(t *testing.T) {
- const numReceivers = 5000 // arbitrary
-
- var br Broadcaster
- ws := make([]Waiter, numReceivers)
- for i := range ws {
- ws[i].Init()
- }
-
- ids := make([]SubscriptionID, numReceivers)
- for i := 0; i < numReceivers; i++ {
- // Subscribe receiver i.
- ids[i] = br.SubscribeEvents(ws[i].Receiver(), 1)
- // Check that receivers [0, i] are subscribed.
- br.Broadcast(1)
- for j := 0; j <= i; j++ {
- if ws[j].Pending() != 1 {
- t.Errorf("receiver %d did not receive an event after subscription of receiver %d", j, i)
- }
- ws[j].Ack(1)
- }
- }
-
- // Generate a random order for unsubscriptions.
- unsub := rand.Perm(numReceivers)
- for i := 0; i < numReceivers; i++ {
- // Unsubscribe receiver unsub[i].
- br.UnsubscribeEvents(ids[unsub[i]])
- // Check that receivers [unsub[0], unsub[i]] are not subscribed, and that
- // receivers (unsub[i], unsub[numReceivers]) are still subscribed.
- br.Broadcast(1)
- for j := 0; j <= i; j++ {
- if ws[unsub[j]].Pending() != 0 {
- t.Errorf("unsub iteration %d: receiver %d received an event after unsubscription of receiver %d", i, unsub[j], unsub[i])
- }
- }
- for j := i + 1; j < numReceivers; j++ {
- if ws[unsub[j]].Pending() != 1 {
- t.Errorf("unsub iteration %d: receiver %d did not receive an event after unsubscription of receiver %d", i, unsub[j], unsub[i])
- }
- ws[unsub[j]].Ack(1)
- }
- }
-}
-
-var (
- receiverCountsNonZero = []int{1, 4, 16, 64}
- receiverCountsIncludingZero = append([]int{0}, receiverCountsNonZero...)
-)
-
-// BenchmarkBroadcasterX, BenchmarkMapX, and BenchmarkQueueX benchmark usage
-// pattern X (described in terms of Broadcaster) with Broadcaster, a
-// Mutex-protected map[*Receiver]Set, and waiter.Queue respectively.
-
-// BenchmarkXxxSubscribeUnsubscribe measures the cost of a Subscribe/Unsubscribe
-// cycle.
-
-func BenchmarkBroadcasterSubscribeUnsubscribe(b *testing.B) {
- var br Broadcaster
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- id := br.SubscribeEvents(w.Receiver(), 1)
- br.UnsubscribeEvents(id)
- }
-}
-
-func BenchmarkMapSubscribeUnsubscribe(b *testing.B) {
- var mu sync.Mutex
- m := make(map[*Receiver]Set)
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- mu.Lock()
- m[w.Receiver()] = Set(1)
- mu.Unlock()
- mu.Lock()
- delete(m, w.Receiver())
- mu.Unlock()
- }
-}
-
-func BenchmarkQueueSubscribeUnsubscribe(b *testing.B) {
- var q waiter.Queue
- e, _ := waiter.NewChannelEntry(nil)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- q.EventRegister(&e, 1)
- q.EventUnregister(&e)
- }
-}
-
-// BenchmarkXxxSubscribeUnsubscribeBatch is similar to
-// BenchmarkXxxSubscribeUnsubscribe, but subscribes and unsubscribes a large
-// number of Receivers at a time in order to measure the amortized overhead of
-// table expansion/compaction. (Since waiter.Queue is implemented using a
-// linked list, BenchmarkQueueSubscribeUnsubscribe and
-// BenchmarkQueueSubscribeUnsubscribeBatch should produce nearly the same
-// result.)
-
-const numBatchReceivers = 1000
-
-func BenchmarkBroadcasterSubscribeUnsubscribeBatch(b *testing.B) {
- var br Broadcaster
- ws := make([]Waiter, numBatchReceivers)
- for i := range ws {
- ws[i].Init()
- }
- ids := make([]SubscriptionID, numBatchReceivers)
-
- // Generate a random order for unsubscriptions.
- unsub := rand.Perm(numBatchReceivers)
-
- b.ResetTimer()
- for i := 0; i < b.N/numBatchReceivers; i++ {
- for j := 0; j < numBatchReceivers; j++ {
- ids[j] = br.SubscribeEvents(ws[j].Receiver(), 1)
- }
- for j := 0; j < numBatchReceivers; j++ {
- br.UnsubscribeEvents(ids[unsub[j]])
- }
- }
-}
-
-func BenchmarkMapSubscribeUnsubscribeBatch(b *testing.B) {
- var mu sync.Mutex
- m := make(map[*Receiver]Set)
- ws := make([]Waiter, numBatchReceivers)
- for i := range ws {
- ws[i].Init()
- }
-
- // Generate a random order for unsubscriptions.
- unsub := rand.Perm(numBatchReceivers)
-
- b.ResetTimer()
- for i := 0; i < b.N/numBatchReceivers; i++ {
- for j := 0; j < numBatchReceivers; j++ {
- mu.Lock()
- m[ws[j].Receiver()] = Set(1)
- mu.Unlock()
- }
- for j := 0; j < numBatchReceivers; j++ {
- mu.Lock()
- delete(m, ws[unsub[j]].Receiver())
- mu.Unlock()
- }
- }
-}
-
-func BenchmarkQueueSubscribeUnsubscribeBatch(b *testing.B) {
- var q waiter.Queue
- es := make([]waiter.Entry, numBatchReceivers)
- for i := range es {
- es[i], _ = waiter.NewChannelEntry(nil)
- }
-
- // Generate a random order for unsubscriptions.
- unsub := rand.Perm(numBatchReceivers)
-
- b.ResetTimer()
- for i := 0; i < b.N/numBatchReceivers; i++ {
- for j := 0; j < numBatchReceivers; j++ {
- q.EventRegister(&es[j], 1)
- }
- for j := 0; j < numBatchReceivers; j++ {
- q.EventUnregister(&es[unsub[j]])
- }
- }
-}
-
-// BenchmarkXxxBroadcastRedundant measures how long it takes to Broadcast
-// already-pending events to multiple Receivers.
-
-func BenchmarkBroadcasterBroadcastRedundant(b *testing.B) {
- for _, n := range receiverCountsIncludingZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var br Broadcaster
- ws := make([]Waiter, n)
- for i := range ws {
- ws[i].Init()
- br.SubscribeEvents(ws[i].Receiver(), 1)
- }
- br.Broadcast(1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- br.Broadcast(1)
- }
- })
- }
-}
-
-func BenchmarkMapBroadcastRedundant(b *testing.B) {
- for _, n := range receiverCountsIncludingZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var mu sync.Mutex
- m := make(map[*Receiver]Set)
- ws := make([]Waiter, n)
- for i := range ws {
- ws[i].Init()
- m[ws[i].Receiver()] = Set(1)
- }
- mu.Lock()
- for r := range m {
- r.Notify(1)
- }
- mu.Unlock()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- mu.Lock()
- for r := range m {
- r.Notify(1)
- }
- mu.Unlock()
- }
- })
- }
-}
-
-func BenchmarkQueueBroadcastRedundant(b *testing.B) {
- for _, n := range receiverCountsIncludingZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var q waiter.Queue
- for i := 0; i < n; i++ {
- e, _ := waiter.NewChannelEntry(nil)
- q.EventRegister(&e, 1)
- }
- q.Notify(1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- q.Notify(1)
- }
- })
- }
-}
-
-// BenchmarkXxxBroadcastAck measures how long it takes to Broadcast events to
-// multiple Receivers, check that all Receivers have received the event, and
-// clear the event from all Receivers.
-
-func BenchmarkBroadcasterBroadcastAck(b *testing.B) {
- for _, n := range receiverCountsNonZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var br Broadcaster
- ws := make([]Waiter, n)
- for i := range ws {
- ws[i].Init()
- br.SubscribeEvents(ws[i].Receiver(), 1)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- br.Broadcast(1)
- for j := range ws {
- if got, want := ws[j].Pending(), Set(1); got != want {
- b.Fatalf("Receiver.Pending(): got %#x, wanted %#x", got, want)
- }
- ws[j].Ack(1)
- }
- }
- })
- }
-}
-
-func BenchmarkMapBroadcastAck(b *testing.B) {
- for _, n := range receiverCountsNonZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var mu sync.Mutex
- m := make(map[*Receiver]Set)
- ws := make([]Waiter, n)
- for i := range ws {
- ws[i].Init()
- m[ws[i].Receiver()] = Set(1)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- mu.Lock()
- for r := range m {
- r.Notify(1)
- }
- mu.Unlock()
- for j := range ws {
- if got, want := ws[j].Pending(), Set(1); got != want {
- b.Fatalf("Receiver.Pending(): got %#x, wanted %#x", got, want)
- }
- ws[j].Ack(1)
- }
- }
- })
- }
-}
-
-func BenchmarkQueueBroadcastAck(b *testing.B) {
- for _, n := range receiverCountsNonZero {
- b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
- var q waiter.Queue
- chs := make([]chan struct{}, n)
- for i := range chs {
- e, ch := waiter.NewChannelEntry(nil)
- q.EventRegister(&e, 1)
- chs[i] = ch
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- q.Notify(1)
- for _, ch := range chs {
- select {
- case <-ch:
- default:
- b.Fatalf("channel did not receive event")
- }
- }
- }
- })
- }
-}
diff --git a/pkg/syncevent/receiver.go b/pkg/syncevent/receiver.go
deleted file mode 100644
index 5c86e5400..000000000
--- a/pkg/syncevent/receiver.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/atomicbitops"
-)
-
-// Receiver is an event sink that holds pending events and invokes a callback
-// whenever new events become pending. Receiver's methods may be called
-// concurrently from multiple goroutines.
-//
-// Receiver.Init() must be called before first use.
-type Receiver struct {
- // pending is the set of pending events. pending is accessed using atomic
- // memory operations.
- pending uint64
-
- // cb is notified when new events become pending. cb is immutable after
- // Init().
- cb ReceiverCallback
-}
-
-// ReceiverCallback receives callbacks from a Receiver.
-type ReceiverCallback interface {
- // NotifyPending is called when the corresponding Receiver has new pending
- // events.
- //
- // NotifyPending is called synchronously from Receiver.Notify(), so
- // implementations must not take locks that may be held by callers of
- // Receiver.Notify(). NotifyPending may be called concurrently from
- // multiple goroutines.
- NotifyPending()
-}
-
-// Init must be called before first use of r.
-func (r *Receiver) Init(cb ReceiverCallback) {
- r.cb = cb
-}
-
-// Pending returns the set of pending events.
-func (r *Receiver) Pending() Set {
- return Set(atomic.LoadUint64(&r.pending))
-}
-
-// Notify sets the given events as pending.
-func (r *Receiver) Notify(es Set) {
- p := Set(atomic.LoadUint64(&r.pending))
- // Optimization: Skip the atomic CAS on r.pending if all events are
- // already pending.
- if p&es == es {
- return
- }
- // When this is uncontended (the common case), CAS is faster than
- // atomic-OR because the former is inlined and the latter (which we
- // implement in assembly ourselves) is not.
- if !atomic.CompareAndSwapUint64(&r.pending, uint64(p), uint64(p|es)) {
- // If the CAS fails, fall back to atomic-OR.
- atomicbitops.OrUint64(&r.pending, uint64(es))
- }
- r.cb.NotifyPending()
-}
-
-// Ack unsets the given events as pending.
-func (r *Receiver) Ack(es Set) {
- p := Set(atomic.LoadUint64(&r.pending))
- // Optimization: Skip the atomic CAS on r.pending if all events are
- // already not pending.
- if p&es == 0 {
- return
- }
- // When this is uncontended (the common case), CAS is faster than
- // atomic-AND because the former is inlined and the latter (which we
- // implement in assembly ourselves) is not.
- if !atomic.CompareAndSwapUint64(&r.pending, uint64(p), uint64(p&^es)) {
- // If the CAS fails, fall back to atomic-AND.
- atomicbitops.AndUint64(&r.pending, ^uint64(es))
- }
-}
-
-// PendingAndAckAll unsets all events as pending and returns the set of
-// previously-pending events.
-//
-// PendingAndAckAll should only be used in preference to a call to Pending
-// followed by a conditional call to Ack when the caller expects events to be
-// pending (e.g. after a call to ReceiverCallback.NotifyPending()).
-func (r *Receiver) PendingAndAckAll() Set {
- return Set(atomic.SwapUint64(&r.pending, 0))
-}
diff --git a/pkg/syncevent/source.go b/pkg/syncevent/source.go
deleted file mode 100644
index d3d0f34c5..000000000
--- a/pkg/syncevent/source.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-// Source represents an event source.
-type Source interface {
- // SubscribeEvents causes the Source to notify the given Receiver of the
- // given subset of events.
- //
- // Preconditions:
- // * r != nil.
- // * The ReceiverCallback for r must not take locks that are ordered
- // prior to the Source; for example, it cannot call any Source
- // methods.
- SubscribeEvents(r *Receiver, filter Set) SubscriptionID
-
- // UnsubscribeEvents causes the Source to stop notifying the Receiver
- // subscribed by a previous call to SubscribeEvents that returned the given
- // SubscriptionID.
- //
- // Preconditions: UnsubscribeEvents may be called at most once for any
- // given SubscriptionID.
- UnsubscribeEvents(id SubscriptionID)
-}
-
-// SubscriptionID identifies a call to Source.SubscribeEvents.
-type SubscriptionID uint64
-
-// UnsubscribeAndAck is a convenience function that unsubscribes r from the
-// given events from src and also clears them from r.
-func UnsubscribeAndAck(src Source, r *Receiver, filter Set, id SubscriptionID) {
- src.UnsubscribeEvents(id)
- r.Ack(filter)
-}
-
-// NoopSource implements Source by never sending events to subscribed
-// Receivers.
-type NoopSource struct{}
-
-// SubscribeEvents implements Source.SubscribeEvents.
-func (NoopSource) SubscribeEvents(*Receiver, Set) SubscriptionID {
- return 0
-}
-
-// UnsubscribeEvents implements Source.UnsubscribeEvents.
-func (NoopSource) UnsubscribeEvents(SubscriptionID) {
-}
-
-// See Broadcaster for a non-noop implementations of Source.
diff --git a/pkg/syncevent/syncevent.go b/pkg/syncevent/syncevent.go
deleted file mode 100644
index 9fb6a06de..000000000
--- a/pkg/syncevent/syncevent.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package syncevent provides efficient primitives for goroutine
-// synchronization based on event bitmasks.
-package syncevent
-
-// Set is a bitmask where each bit represents a distinct user-defined event.
-// The event package does not treat any bits in Set specially.
-type Set uint64
-
-const (
- // NoEvents is a Set containing no events.
- NoEvents = Set(0)
-
- // AllEvents is a Set containing all possible events.
- AllEvents = ^Set(0)
-
- // MaxEvents is the number of distinct events that can be represented by a Set.
- MaxEvents = 64
-)
diff --git a/pkg/syncevent/syncevent_example_test.go b/pkg/syncevent/syncevent_example_test.go
deleted file mode 100644
index bfb18e2ea..000000000
--- a/pkg/syncevent/syncevent_example_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "fmt"
- "sync/atomic"
- "time"
-)
-
-func Example_ioReadinessInterrputible() {
- const (
- evReady = Set(1 << iota)
- evInterrupt
- )
- errNotReady := fmt.Errorf("not ready for I/O")
-
- // State of some I/O object.
- var (
- br Broadcaster
- ready uint32
- )
- doIO := func() error {
- if atomic.LoadUint32(&ready) == 0 {
- return errNotReady
- }
- return nil
- }
- go func() {
- // The I/O object eventually becomes ready for I/O.
- time.Sleep(100 * time.Millisecond)
- // When it does, it first ensures that future calls to isReady() return
- // true, then broadcasts the readiness event to Receivers.
- atomic.StoreUint32(&ready, 1)
- br.Broadcast(evReady)
- }()
-
- // Each user of the I/O object owns a Waiter.
- var w Waiter
- w.Init()
- // The Waiter may be asynchronously interruptible, e.g. for signal
- // handling in the sentry.
- go func() {
- time.Sleep(200 * time.Millisecond)
- w.Receiver().Notify(evInterrupt)
- }()
-
- // To use the I/O object:
- //
- // Optionally, if the I/O object is likely to be ready, attempt I/O first.
- err := doIO()
- if err == nil {
- // Success, we're done.
- return /* nil */
- }
- if err != errNotReady {
- // Failure, I/O failed for some reason other than readiness.
- return /* err */
- }
- // Subscribe for readiness events from the I/O object.
- id := br.SubscribeEvents(w.Receiver(), evReady)
- // When we are finished blocking, unsubscribe from readiness events and
- // remove readiness events from the pending event set.
- defer UnsubscribeAndAck(&br, w.Receiver(), evReady, id)
- for {
- // Attempt I/O again. This must be done after the call to SubscribeEvents,
- // since the I/O object might have become ready between the previous call
- // to doIO and the call to SubscribeEvents.
- err = doIO()
- if err == nil {
- return /* nil */
- }
- if err != errNotReady {
- return /* err */
- }
- // Block until either the I/O object indicates it is ready, or we are
- // interrupted.
- events := w.Wait()
- if events&evInterrupt != 0 {
- // In the specific case of sentry signal handling, signal delivery
- // is handled by another system, so we aren't responsible for
- // acknowledging evInterrupt.
- return /* errInterrupted */
- }
- // Note that, in a concurrent context, the I/O object might become
- // ready and then not ready again. To handle this:
- //
- // - evReady must be acknowledged before calling doIO() again (rather
- // than after), so that if the I/O object becomes ready *again* after
- // the call to doIO(), the readiness event is not lost.
- //
- // - We must loop instead of just calling doIO() once after receiving
- // evReady.
- w.Ack(evReady)
- }
-}
diff --git a/pkg/syncevent/waiter_test.go b/pkg/syncevent/waiter_test.go
deleted file mode 100644
index 3c8cbcdd8..000000000
--- a/pkg/syncevent/waiter_test.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "sync/atomic"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sleep"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestWaiterAlreadyPending(t *testing.T) {
- var w Waiter
- w.Init()
- want := Set(1)
- w.Notify(want)
- if got := w.Wait(); got != want {
- t.Errorf("Waiter.Wait: got %#x, wanted %#x", got, want)
- }
-}
-
-func TestWaiterAsyncNotify(t *testing.T) {
- var w Waiter
- w.Init()
- want := Set(1)
- go func() {
- time.Sleep(100 * time.Millisecond)
- w.Notify(want)
- }()
- if got := w.Wait(); got != want {
- t.Errorf("Waiter.Wait: got %#x, wanted %#x", got, want)
- }
-}
-
-func TestWaiterWaitFor(t *testing.T) {
- var w Waiter
- w.Init()
- evWaited := Set(1)
- evOther := Set(2)
- w.Notify(evOther)
- notifiedEvent := uint32(0)
- go func() {
- time.Sleep(100 * time.Millisecond)
- atomic.StoreUint32(&notifiedEvent, 1)
- w.Notify(evWaited)
- }()
- if got, want := w.WaitFor(evWaited), evWaited|evOther; got != want {
- t.Errorf("Waiter.WaitFor: got %#x, wanted %#x", got, want)
- }
- if atomic.LoadUint32(&notifiedEvent) == 0 {
- t.Errorf("Waiter.WaitFor returned before goroutine notified waited-for event")
- }
-}
-
-func TestWaiterWaitAndAckAll(t *testing.T) {
- var w Waiter
- w.Init()
- w.Notify(AllEvents)
- if got := w.WaitAndAckAll(); got != AllEvents {
- t.Errorf("Waiter.WaitAndAckAll: got %#x, wanted %#x", got, AllEvents)
- }
- if got := w.Pending(); got != NoEvents {
- t.Errorf("Waiter.WaitAndAckAll did not ack all events: got %#x, wanted 0", got)
- }
-}
-
-// BenchmarkWaiterX, BenchmarkSleeperX, and BenchmarkChannelX benchmark usage
-// pattern X (described in terms of Waiter) with Waiter, sleep.Sleeper, and
-// buffered chan struct{} respectively. When the maximum number of event
-// sources is relevant, we use 3 event sources because this is representative
-// of the kernel.Task.block() use case: an interrupt source, a timeout source,
-// and the actual event source being waited on.
-
-// Event set used by most benchmarks.
-const evBench Set = 1
-
-// BenchmarkXxxNotifyRedundant measures how long it takes to notify a Waiter of
-// an event that is already pending.
-
-func BenchmarkWaiterNotifyRedundant(b *testing.B) {
- var w Waiter
- w.Init()
- w.Notify(evBench)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Notify(evBench)
- }
-}
-
-func BenchmarkSleeperNotifyRedundant(b *testing.B) {
- var s sleep.Sleeper
- var w sleep.Waker
- s.AddWaker(&w, 0)
- w.Assert()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Assert()
- }
-}
-
-func BenchmarkChannelNotifyRedundant(b *testing.B) {
- ch := make(chan struct{}, 1)
- ch <- struct{}{}
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- select {
- case ch <- struct{}{}:
- default:
- }
- }
-}
-
-// BenchmarkXxxNotifyWaitAck measures how long it takes to notify a Waiter an
-// event, return that event using a blocking check, and then unset the event as
-// pending.
-
-func BenchmarkWaiterNotifyWaitAck(b *testing.B) {
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Notify(evBench)
- w.Wait()
- w.Ack(evBench)
- }
-}
-
-func BenchmarkSleeperNotifyWaitAck(b *testing.B) {
- var s sleep.Sleeper
- var w sleep.Waker
- s.AddWaker(&w, 0)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Assert()
- s.Fetch(true)
- }
-}
-
-func BenchmarkChannelNotifyWaitAck(b *testing.B) {
- ch := make(chan struct{}, 1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- // notify
- select {
- case ch <- struct{}{}:
- default:
- }
-
- // wait + ack
- <-ch
- }
-}
-
-// BenchmarkSleeperMultiNotifyWaitAck is equivalent to
-// BenchmarkSleeperNotifyWaitAck, but also includes allocation of a
-// temporary sleep.Waker. This is necessary when multiple goroutines may wait
-// for the same event, since each sleep.Waker can wake only a single
-// sleep.Sleeper.
-//
-// The syncevent package does not require a distinct object for each
-// waiter-waker relationship, so BenchmarkWaiterNotifyWaitAck and
-// BenchmarkWaiterMultiNotifyWaitAck would be identical. The analogous state
-// for channels, runtime.sudog, is inescapably runtime-allocated, so
-// BenchmarkChannelNotifyWaitAck and BenchmarkChannelMultiNotifyWaitAck would
-// also be identical.
-
-func BenchmarkSleeperMultiNotifyWaitAck(b *testing.B) {
- var s sleep.Sleeper
- // The sleep package doesn't provide sync.Pool allocation of Wakers;
- // we do for a fairer comparison.
- wakerPool := sync.Pool{
- New: func() interface{} {
- return &sleep.Waker{}
- },
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w := wakerPool.Get().(*sleep.Waker)
- s.AddWaker(w, 0)
- w.Assert()
- s.Fetch(true)
- s.Done()
- wakerPool.Put(w)
- }
-}
-
-// BenchmarkXxxTempNotifyWaitAck is equivalent to NotifyWaitAck, but also
-// includes allocation of a temporary Waiter. This models the case where a
-// goroutine not already associated with a Waiter needs one in order to block.
-//
-// The analogous state for channels is built into runtime.g, so
-// BenchmarkChannelNotifyWaitAck and BenchmarkChannelTempNotifyWaitAck would be
-// identical.
-
-func BenchmarkWaiterTempNotifyWaitAck(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w := GetWaiter()
- w.Notify(evBench)
- w.Wait()
- w.Ack(evBench)
- PutWaiter(w)
- }
-}
-
-func BenchmarkSleeperTempNotifyWaitAck(b *testing.B) {
- // The sleep package doesn't provide sync.Pool allocation of Sleepers;
- // we do for a fairer comparison.
- sleeperPool := sync.Pool{
- New: func() interface{} {
- return &sleep.Sleeper{}
- },
- }
- var w sleep.Waker
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- s := sleeperPool.Get().(*sleep.Sleeper)
- s.AddWaker(&w, 0)
- w.Assert()
- s.Fetch(true)
- s.Done()
- sleeperPool.Put(s)
- }
-}
-
-// BenchmarkXxxNotifyWaitMultiAck is equivalent to NotifyWaitAck, but allows
-// for multiple event sources.
-
-func BenchmarkWaiterNotifyWaitMultiAck(b *testing.B) {
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- w.Notify(evBench)
- if e := w.Wait(); e != evBench {
- b.Fatalf("Wait: got %#x, wanted %#x", e, evBench)
- }
- w.Ack(evBench)
- }
-}
-
-func BenchmarkSleeperNotifyWaitMultiAck(b *testing.B) {
- var s sleep.Sleeper
- var ws [3]sleep.Waker
- for i := range ws {
- s.AddWaker(&ws[i], i)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- ws[0].Assert()
- if id, _ := s.Fetch(true); id != 0 {
- b.Fatalf("Fetch: got %d, wanted 0", id)
- }
- }
-}
-
-func BenchmarkChannelNotifyWaitMultiAck(b *testing.B) {
- ch0 := make(chan struct{}, 1)
- ch1 := make(chan struct{}, 1)
- ch2 := make(chan struct{}, 1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- // notify
- select {
- case ch0 <- struct{}{}:
- default:
- }
-
- // wait + clear
- select {
- case <-ch0:
- // ok
- case <-ch1:
- b.Fatalf("received from ch1")
- case <-ch2:
- b.Fatalf("received from ch2")
- }
- }
-}
-
-// BenchmarkXxxNotifyAsyncWaitAck measures how long it takes to wait for an
-// event while another goroutine signals the event. This assumes that a new
-// goroutine doesn't run immediately (i.e. the creator of a new goroutine is
-// allowed to go to sleep before the new goroutine has a chance to run).
-
-func BenchmarkWaiterNotifyAsyncWaitAck(b *testing.B) {
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- w.Notify(1)
- }()
- w.Wait()
- w.Ack(evBench)
- }
-}
-
-func BenchmarkSleeperNotifyAsyncWaitAck(b *testing.B) {
- var s sleep.Sleeper
- var w sleep.Waker
- s.AddWaker(&w, 0)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- w.Assert()
- }()
- s.Fetch(true)
- }
-}
-
-func BenchmarkChannelNotifyAsyncWaitAck(b *testing.B) {
- ch := make(chan struct{}, 1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- select {
- case ch <- struct{}{}:
- default:
- }
- }()
- <-ch
- }
-}
-
-// BenchmarkXxxNotifyAsyncWaitMultiAck is equivalent to NotifyAsyncWaitAck, but
-// allows for multiple event sources.
-
-func BenchmarkWaiterNotifyAsyncWaitMultiAck(b *testing.B) {
- var w Waiter
- w.Init()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- w.Notify(evBench)
- }()
- if e := w.Wait(); e != evBench {
- b.Fatalf("Wait: got %#x, wanted %#x", e, evBench)
- }
- w.Ack(evBench)
- }
-}
-
-func BenchmarkSleeperNotifyAsyncWaitMultiAck(b *testing.B) {
- var s sleep.Sleeper
- var ws [3]sleep.Waker
- for i := range ws {
- s.AddWaker(&ws[i], i)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- ws[0].Assert()
- }()
- if id, _ := s.Fetch(true); id != 0 {
- b.Fatalf("Fetch: got %d, expected 0", id)
- }
- }
-}
-
-func BenchmarkChannelNotifyAsyncWaitMultiAck(b *testing.B) {
- ch0 := make(chan struct{}, 1)
- ch1 := make(chan struct{}, 1)
- ch2 := make(chan struct{}, 1)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- go func() {
- select {
- case ch0 <- struct{}{}:
- default:
- }
- }()
-
- select {
- case <-ch0:
- // ok
- case <-ch1:
- b.Fatalf("received from ch1")
- case <-ch2:
- b.Fatalf("received from ch2")
- }
- }
-}
diff --git a/pkg/syncevent/waiter_unsafe.go b/pkg/syncevent/waiter_unsafe.go
deleted file mode 100644
index b6ed2852d..000000000
--- a/pkg/syncevent/waiter_unsafe.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncevent
-
-import (
- "sync/atomic"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Waiter allows a goroutine to block on pending events received by a Receiver.
-//
-// Waiter.Init() must be called before first use.
-type Waiter struct {
- r Receiver
-
- // g is one of:
- //
- // - 0: No goroutine is blocking in Wait.
- //
- // - preparingG: A goroutine is in Wait preparing to sleep, but hasn't yet
- // completed waiterUnlock(). Thus the wait can only be interrupted by
- // replacing the value of g with 0 (the G may not be in state Gwaiting yet,
- // so we can't call goready.)
- //
- // - Otherwise: g is a pointer to the runtime.g in state Gwaiting for the
- // goroutine blocked in Wait, which can only be woken by calling goready.
- g uintptr `state:"zerovalue"`
-}
-
-const preparingG = 1
-
-// Init must be called before first use of w.
-func (w *Waiter) Init() {
- w.r.Init(w)
-}
-
-// Receiver returns the Receiver that receives events that unblock calls to
-// w.Wait().
-func (w *Waiter) Receiver() *Receiver {
- return &w.r
-}
-
-// Pending returns the set of pending events.
-func (w *Waiter) Pending() Set {
- return w.r.Pending()
-}
-
-// Wait blocks until at least one event is pending, then returns the set of
-// pending events. It does not affect the set of pending events; callers must
-// call w.Ack() to do so, or use w.WaitAndAck() instead.
-//
-// Precondition: Only one goroutine may call any Wait* method at a time.
-func (w *Waiter) Wait() Set {
- return w.WaitFor(AllEvents)
-}
-
-// WaitFor blocks until at least one event in es is pending, then returns the
-// set of pending events (including those not in es). It does not affect the
-// set of pending events; callers must call w.Ack() to do so.
-//
-// Precondition: Only one goroutine may call any Wait* method at a time.
-func (w *Waiter) WaitFor(es Set) Set {
- for {
- // Optimization: Skip the atomic store to w.g if an event is already
- // pending.
- if p := w.r.Pending(); p&es != NoEvents {
- return p
- }
-
- // Indicate that we're preparing to go to sleep.
- atomic.StoreUintptr(&w.g, preparingG)
-
- // If an event is pending, abort the sleep.
- if p := w.r.Pending(); p&es != NoEvents {
- atomic.StoreUintptr(&w.g, 0)
- return p
- }
-
- // If w.g is still preparingG (i.e. w.NotifyPending() has not been
- // called or has not reached atomic.SwapUintptr()), go to sleep until
- // w.NotifyPending() => goready().
- sync.Gopark(waiterCommit, unsafe.Pointer(&w.g), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0)
- }
-}
-
-//go:norace
-//go:nosplit
-func waiterCommit(g uintptr, wg unsafe.Pointer) bool {
- // The only way this CAS can fail is if a call to Waiter.NotifyPending()
- // has replaced *wg with nil, in which case we should not sleep.
- return sync.RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(wg), preparingG, g)
-}
-
-// Ack marks the given events as not pending.
-func (w *Waiter) Ack(es Set) {
- w.r.Ack(es)
-}
-
-// WaitAndAckAll blocks until at least one event is pending, then marks all
-// events as not pending and returns the set of previously-pending events.
-//
-// Precondition: Only one goroutine may call any Wait* method at a time.
-func (w *Waiter) WaitAndAckAll() Set {
- // Optimization: Skip the atomic store to w.g if an event is already
- // pending. Call Pending() first since, in the common case that events are
- // not yet pending, this skips an atomic swap on w.r.pending.
- if w.r.Pending() != NoEvents {
- if p := w.r.PendingAndAckAll(); p != NoEvents {
- return p
- }
- }
-
- for {
- // Indicate that we're preparing to go to sleep.
- atomic.StoreUintptr(&w.g, preparingG)
-
- // If an event is pending, abort the sleep.
- if w.r.Pending() != NoEvents {
- if p := w.r.PendingAndAckAll(); p != NoEvents {
- atomic.StoreUintptr(&w.g, 0)
- return p
- }
- }
-
- // If w.g is still preparingG (i.e. w.NotifyPending() has not been
- // called or has not reached atomic.SwapUintptr()), go to sleep until
- // w.NotifyPending() => goready().
- sync.Gopark(waiterCommit, unsafe.Pointer(&w.g), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0)
-
- // Check for pending events. We call PendingAndAckAll() directly now since
- // we only expect to be woken after events become pending.
- if p := w.r.PendingAndAckAll(); p != NoEvents {
- return p
- }
- }
-}
-
-// Notify marks the given events as pending, possibly unblocking concurrent
-// calls to w.Wait() or w.WaitFor().
-func (w *Waiter) Notify(es Set) {
- w.r.Notify(es)
-}
-
-// NotifyPending implements ReceiverCallback.NotifyPending. Users of Waiter
-// should not call NotifyPending.
-func (w *Waiter) NotifyPending() {
- // Optimization: Skip the atomic swap on w.g if there is no sleeping
- // goroutine. NotifyPending is called after w.r.Pending() is updated, so
- // concurrent and future calls to w.Wait() will observe pending events and
- // abort sleeping.
- if atomic.LoadUintptr(&w.g) == 0 {
- return
- }
- // Wake a sleeping G, or prevent a G that is preparing to sleep from doing
- // so. Swap is needed here to ensure that only one call to NotifyPending
- // calls goready.
- if g := atomic.SwapUintptr(&w.g, 0); g > preparingG {
- sync.Goready(g, 0)
- }
-}
-
-var waiterPool = sync.Pool{
- New: func() interface{} {
- w := &Waiter{}
- w.Init()
- return w
- },
-}
-
-// GetWaiter returns an unused Waiter. PutWaiter should be called to release
-// the Waiter once it is no longer needed.
-//
-// Where possible, users should prefer to associate each goroutine that calls
-// Waiter.Wait() with a distinct pre-allocated Waiter to avoid allocation of
-// Waiters in hot paths.
-func GetWaiter() *Waiter {
- return waiterPool.Get().(*Waiter)
-}
-
-// PutWaiter releases an unused Waiter previously returned by GetWaiter.
-func PutWaiter(w *Waiter) {
- waiterPool.Put(w)
-}
diff --git a/pkg/syserr/BUILD b/pkg/syserr/BUILD
deleted file mode 100644
index ceee494fc..000000000
--- a/pkg/syserr/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "syserr",
- srcs = [
- "host_linux.go",
- "netstack.go",
- "syserr.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/abi/linux/errno",
- "//pkg/errors",
- "//pkg/errors/linuxerr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/syserr/syserr_linux_state_autogen.go b/pkg/syserr/syserr_linux_state_autogen.go
new file mode 100644
index 000000000..7fd5a68b8
--- /dev/null
+++ b/pkg/syserr/syserr_linux_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package syserr
diff --git a/pkg/syserr/syserr_state_autogen.go b/pkg/syserr/syserr_state_autogen.go
new file mode 100644
index 000000000..712631a64
--- /dev/null
+++ b/pkg/syserr/syserr_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package syserr
diff --git a/pkg/syserror/BUILD b/pkg/syserror/BUILD
deleted file mode 100644
index 76bee5a64..000000000
--- a/pkg/syserror/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "syserror",
- srcs = ["syserror.go"],
- visibility = ["//visibility:public"],
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
diff --git a/pkg/syserror/syserror_state_autogen.go b/pkg/syserror/syserror_state_autogen.go
new file mode 100644
index 000000000..456dcf093
--- /dev/null
+++ b/pkg/syserror/syserror_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package syserror
diff --git a/pkg/tcpip/BUILD b/pkg/tcpip/BUILD
deleted file mode 100644
index f00cfd0f5..000000000
--- a/pkg/tcpip/BUILD
+++ /dev/null
@@ -1,99 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools:deps.bzl", "deps_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "sock_err_list",
- out = "sock_err_list.go",
- package = "tcpip",
- prefix = "sockError",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*SockError",
- "Linker": "*SockError",
- },
-)
-
-go_library(
- name = "tcpip",
- srcs = [
- "errors.go",
- "sock_err_list.go",
- "socketops.go",
- "stdclock.go",
- "stdclock_state.go",
- "tcpip.go",
- "timer.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/atomicbitops",
- "//pkg/sync",
- "//pkg/tcpip/buffer",
- "//pkg/waiter",
- ],
-)
-
-deps_test(
- name = "netstack_deps_test",
- allowed = [
- # gVisor deps.
- "//pkg/atomicbitops",
- "//pkg/buffer",
- "//pkg/context",
- "//pkg/gohacks",
- "//pkg/goid",
- "//pkg/ilist",
- "//pkg/linewriter",
- "//pkg/log",
- "//pkg/rand",
- "//pkg/sleep",
- "//pkg/state",
- "//pkg/state/wire",
- "//pkg/sync",
- "//pkg/waiter",
-
- # Other deps.
- "@com_github_google_btree//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- "@org_golang_x_time//rate:go_default_library",
- ],
- allowed_prefixes = [
- "//pkg/tcpip",
- "@org_golang_x_sys//internal/unsafeheader",
- ],
- targets = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/link/packetsocket",
- "//pkg/tcpip/link/qdisc/fifo",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/raw",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- ],
-)
-
-go_test(
- name = "tcpip_test",
- size = "small",
- srcs = ["tcpip_test.go"],
- library = ":tcpip",
- deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
-)
-
-go_test(
- name = "tcpip_x_test",
- size = "small",
- srcs = ["timer_test.go"],
- deps = [":tcpip"],
-)
diff --git a/pkg/tcpip/adapters/gonet/BUILD b/pkg/tcpip/adapters/gonet/BUILD
deleted file mode 100644
index a984f1712..000000000
--- a/pkg/tcpip/adapters/gonet/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "gonet",
- srcs = ["gonet.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "gonet_test",
- size = "small",
- srcs = ["gonet_test.go"],
- library = ":gonet",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@org_golang_x_net//nettest:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/adapters/gonet/gonet_state_autogen.go b/pkg/tcpip/adapters/gonet/gonet_state_autogen.go
new file mode 100644
index 000000000..7a5c5419e
--- /dev/null
+++ b/pkg/tcpip/adapters/gonet/gonet_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package gonet
diff --git a/pkg/tcpip/adapters/gonet/gonet_test.go b/pkg/tcpip/adapters/gonet/gonet_test.go
deleted file mode 100644
index 48b24692b..000000000
--- a/pkg/tcpip/adapters/gonet/gonet_test.go
+++ /dev/null
@@ -1,725 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gonet
-
-import (
- "context"
- "fmt"
- "io"
- "net"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "golang.org/x/net/nettest"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- NICID = 1
-)
-
-func TestTimeouts(t *testing.T) {
- nc := NewTCPConn(nil, nil)
- dlfs := []struct {
- name string
- f func(time.Time) error
- }{
- {"SetDeadline", nc.SetDeadline},
- {"SetReadDeadline", nc.SetReadDeadline},
- {"SetWriteDeadline", nc.SetWriteDeadline},
- }
-
- for _, dlf := range dlfs {
- if err := dlf.f(time.Time{}); err != nil {
- t.Errorf("got %s(time.Time{}) = %v, want = %v", dlf.name, err, nil)
- }
- }
-}
-
-func newLoopbackStack() (*stack.Stack, tcpip.Error) {
- // Create the stack and add a NIC.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol},
- })
-
- if err := s.CreateNIC(NICID, loopback.New()); err != nil {
- return nil, err
- }
-
- // Add default route.
- s.SetRouteTable([]tcpip.Route{
- // IPv4
- {
- Destination: header.IPv4EmptySubnet,
- NIC: NICID,
- },
-
- // IPv6
- {
- Destination: header.IPv6EmptySubnet,
- NIC: NICID,
- },
- })
-
- return s, nil
-}
-
-type testConnection struct {
- wq *waiter.Queue
- e *waiter.Entry
- ch chan struct{}
- ep tcpip.Endpoint
-}
-
-func connect(s *stack.Stack, addr tcpip.FullAddress) (*testConnection, tcpip.Error) {
- wq := &waiter.Queue{}
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- return nil, err
- }
-
- entry, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&entry, waiter.WritableEvents)
-
- err = ep.Connect(addr)
- if _, ok := err.(*tcpip.ErrConnectStarted); ok {
- <-ch
- err = ep.LastError()
- }
- if err != nil {
- return nil, err
- }
-
- wq.EventUnregister(&entry)
- wq.EventRegister(&entry, waiter.ReadableEvents)
-
- return &testConnection{wq, &entry, ch, ep}, nil
-}
-
-func (c *testConnection) close() {
- c.wq.EventUnregister(c.e)
- c.ep.Close()
-}
-
-// TestCloseReader tests that Conn.Close() causes Conn.Read() to unblock.
-func TestCloseReader(t *testing.T) {
- s, err := newLoopbackStack()
- if err != nil {
- t.Fatalf("newLoopbackStack() = %v", err)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
-
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- l, e := ListenTCP(s, addr, ipv4.ProtocolNumber)
- if e != nil {
- t.Fatalf("NewListener() = %v", e)
- }
- done := make(chan struct{})
- go func() {
- defer close(done)
- c, err := l.Accept()
- if err != nil {
- t.Errorf("l.Accept() = %v", err)
- // Cannot call Fatalf in goroutine. Just return from the goroutine.
- return
- }
-
- // Give c.Read() a chance to block before closing the connection.
- time.AfterFunc(time.Millisecond*50, func() {
- c.Close()
- })
-
- buf := make([]byte, 256)
- n, err := c.Read(buf)
- if n != 0 || err != io.EOF {
- t.Errorf("c.Read() = (%d, %v), want (0, EOF)", n, err)
- }
- }()
- sender, err := connect(s, addr)
- if err != nil {
- t.Fatalf("connect() = %v", err)
- }
-
- select {
- case <-done:
- case <-time.After(5 * time.Second):
- t.Errorf("c.Read() didn't unblock")
- }
- sender.close()
-}
-
-// TestCloseReaderWithForwarder tests that TCPConn.Close wakes TCPConn.Read when
-// using tcp.Forwarder.
-func TestCloseReaderWithForwarder(t *testing.T) {
- s, err := newLoopbackStack()
- if err != nil {
- t.Fatalf("newLoopbackStack() = %v", err)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- done := make(chan struct{})
-
- fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {
- defer close(done)
-
- var wq waiter.Queue
- ep, err := r.CreateEndpoint(&wq)
- if err != nil {
- t.Fatalf("r.CreateEndpoint() = %v", err)
- }
- defer ep.Close()
- r.Complete(false)
-
- c := NewTCPConn(&wq, ep)
-
- // Give c.Read() a chance to block before closing the connection.
- time.AfterFunc(time.Millisecond*50, func() {
- c.Close()
- })
-
- buf := make([]byte, 256)
- n, e := c.Read(buf)
- if n != 0 || e != io.EOF {
- t.Errorf("c.Read() = (%d, %v), want (0, EOF)", n, e)
- }
- })
- s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)
-
- sender, err := connect(s, addr)
- if err != nil {
- t.Fatalf("connect() = %v", err)
- }
-
- select {
- case <-done:
- case <-time.After(5 * time.Second):
- t.Errorf("c.Read() didn't unblock")
- }
- sender.close()
-}
-
-func TestCloseRead(t *testing.T) {
- s, terr := newLoopbackStack()
- if terr != nil {
- t.Fatalf("newLoopbackStack() = %v", terr)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {
- var wq waiter.Queue
- _, err := r.CreateEndpoint(&wq)
- if err != nil {
- t.Fatalf("r.CreateEndpoint() = %v", err)
- }
- // Endpoint will be closed in deferred s.Close (above).
- })
-
- s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)
-
- tc, terr := connect(s, addr)
- if terr != nil {
- t.Fatalf("connect() = %v", terr)
- }
- c := NewTCPConn(tc.wq, tc.ep)
-
- if err := c.CloseRead(); err != nil {
- t.Errorf("c.CloseRead() = %v", err)
- }
-
- buf := make([]byte, 256)
- if n, err := c.Read(buf); err != io.EOF {
- t.Errorf("c.Read() = (%d, %v), want (0, io.EOF)", n, err)
- }
-
- if n, err := c.Write([]byte("abc123")); n != 6 || err != nil {
- t.Errorf("c.Write() = (%d, %v), want (6, nil)", n, err)
- }
-}
-
-func TestCloseWrite(t *testing.T) {
- s, terr := newLoopbackStack()
- if terr != nil {
- t.Fatalf("newLoopbackStack() = %v", terr)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {
- var wq waiter.Queue
- ep, err := r.CreateEndpoint(&wq)
- if err != nil {
- t.Fatalf("r.CreateEndpoint() = %v", err)
- }
- defer ep.Close()
- r.Complete(false)
-
- c := NewTCPConn(&wq, ep)
-
- n, e := c.Read(make([]byte, 256))
- if n != 0 || e != io.EOF {
- t.Errorf("c.Read() = (%d, %v), want (0, io.EOF)", n, e)
- }
-
- if n, e = c.Write([]byte("abc123")); n != 6 || e != nil {
- t.Errorf("c.Write() = (%d, %v), want (6, nil)", n, e)
- }
- })
-
- s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)
-
- tc, terr := connect(s, addr)
- if terr != nil {
- t.Fatalf("connect() = %v", terr)
- }
- c := NewTCPConn(tc.wq, tc.ep)
-
- if err := c.CloseWrite(); err != nil {
- t.Errorf("c.CloseWrite() = %v", err)
- }
-
- buf := make([]byte, 256)
- n, err := c.Read(buf)
- if err != nil || string(buf[:n]) != "abc123" {
- t.Fatalf("c.Read() = (%d, %v), want (6, nil)", n, err)
- }
-
- n, err = c.Write([]byte("abc123"))
- got, ok := err.(*net.OpError)
- want := "endpoint is closed for send"
- if n != 0 || !ok || got.Op != "write" || got.Err == nil || !strings.HasSuffix(got.Err.Error(), want) {
- t.Errorf("c.Write() = (%d, %v), want (0, OpError(Op: write, Err: %s))", n, err, want)
- }
-}
-
-func TestUDPForwarder(t *testing.T) {
- s, terr := newLoopbackStack()
- if terr != nil {
- t.Fatalf("newLoopbackStack() = %v", terr)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- ip1 := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())
- addr1 := tcpip.FullAddress{NICID, ip1, 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip1)
- ip2 := tcpip.Address(net.IPv4(169, 254, 10, 2).To4())
- addr2 := tcpip.FullAddress{NICID, ip2, 11311}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip2)
-
- done := make(chan struct{})
- fwd := udp.NewForwarder(s, func(r *udp.ForwarderRequest) {
- defer close(done)
-
- var wq waiter.Queue
- ep, err := r.CreateEndpoint(&wq)
- if err != nil {
- t.Fatalf("r.CreateEndpoint() = %v", err)
- }
- defer ep.Close()
-
- c := NewTCPConn(&wq, ep)
-
- buf := make([]byte, 256)
- n, e := c.Read(buf)
- if e != nil {
- t.Errorf("c.Read() = %v", e)
- }
-
- if _, e := c.Write(buf[:n]); e != nil {
- t.Errorf("c.Write() = %v", e)
- }
- })
- s.SetTransportProtocolHandler(udp.ProtocolNumber, fwd.HandlePacket)
-
- c2, err := DialUDP(s, &addr2, nil, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatal("DialUDP(bind port 5):", err)
- }
-
- sent := "abc123"
- sendAddr := fullToUDPAddr(addr1)
- if n, err := c2.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) {
- t.Errorf("c1.WriteTo(%q, %v) = %d, %v, want = %d, %v", sent, sendAddr, n, err, len(sent), nil)
- }
-
- buf := make([]byte, 256)
- n, recvAddr, err := c2.ReadFrom(buf)
- if err != nil || recvAddr.String() != sendAddr.String() {
- t.Errorf("c1.ReadFrom() = %d, %v, %v, want = %d, %v, %v", n, recvAddr, err, len(sent), sendAddr, nil)
- }
-}
-
-// TestDeadlineChange tests that changing the deadline affects currently blocked reads.
-func TestDeadlineChange(t *testing.T) {
- s, err := newLoopbackStack()
- if err != nil {
- t.Fatalf("newLoopbackStack() = %v", err)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
-
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- l, e := ListenTCP(s, addr, ipv4.ProtocolNumber)
- if e != nil {
- t.Fatalf("NewListener() = %v", e)
- }
- done := make(chan struct{})
- go func() {
- defer close(done)
- c, err := l.Accept()
- if err != nil {
- t.Errorf("l.Accept() = %v", err)
- // Cannot call Fatalf in goroutine. Just return from the goroutine.
- return
- }
-
- c.SetDeadline(time.Now().Add(time.Minute))
- // Give c.Read() a chance to block before closing the connection.
- time.AfterFunc(time.Millisecond*50, func() {
- c.SetDeadline(time.Now().Add(time.Millisecond * 10))
- })
-
- buf := make([]byte, 256)
- n, err := c.Read(buf)
- got, ok := err.(*net.OpError)
- want := "i/o timeout"
- if n != 0 || !ok || got.Err == nil || got.Err.Error() != want {
- t.Errorf("c.Read() = (%d, %v), want (0, OpError(%s))", n, err, want)
- }
- }()
- sender, err := connect(s, addr)
- if err != nil {
- t.Fatalf("connect() = %v", err)
- }
-
- select {
- case <-done:
- case <-time.After(time.Millisecond * 500):
- t.Errorf("c.Read() didn't unblock")
- }
- sender.close()
-}
-
-func TestPacketConnTransfer(t *testing.T) {
- s, e := newLoopbackStack()
- if e != nil {
- t.Fatalf("newLoopbackStack() = %v", e)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- ip1 := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())
- addr1 := tcpip.FullAddress{NICID, ip1, 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip1)
- ip2 := tcpip.Address(net.IPv4(169, 254, 10, 2).To4())
- addr2 := tcpip.FullAddress{NICID, ip2, 11311}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip2)
-
- c1, err := DialUDP(s, &addr1, nil, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatal("DialUDP(bind port 4):", err)
- }
- c2, err := DialUDP(s, &addr2, nil, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatal("DialUDP(bind port 5):", err)
- }
-
- c1.SetDeadline(time.Now().Add(time.Second))
- c2.SetDeadline(time.Now().Add(time.Second))
-
- sent := "abc123"
- sendAddr := fullToUDPAddr(addr2)
- if n, err := c1.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) {
- t.Errorf("got c1.WriteTo(%q, %v) = %d, %v, want = %d, %v", sent, sendAddr, n, err, len(sent), nil)
- }
- recv := make([]byte, len(sent))
- n, recvAddr, err := c2.ReadFrom(recv)
- if err != nil || n != len(recv) {
- t.Errorf("got c2.ReadFrom() = %d, %v, want = %d, %v", n, err, len(recv), nil)
- }
-
- if recv := string(recv); recv != sent {
- t.Errorf("got recv = %q, want = %q", recv, sent)
- }
-
- if want := fullToUDPAddr(addr1); !reflect.DeepEqual(recvAddr, want) {
- t.Errorf("got recvAddr = %v, want = %v", recvAddr, want)
- }
-
- if err := c1.Close(); err != nil {
- t.Error("c1.Close():", err)
- }
- if err := c2.Close(); err != nil {
- t.Error("c2.Close():", err)
- }
-}
-
-func TestConnectedPacketConnTransfer(t *testing.T) {
- s, e := newLoopbackStack()
- if e != nil {
- t.Fatalf("newLoopbackStack() = %v", e)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- ip := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())
- addr := tcpip.FullAddress{NICID, ip, 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip)
-
- c1, err := DialUDP(s, &addr, nil, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatal("DialUDP(bind port 4):", err)
- }
- c2, err := DialUDP(s, nil, &addr, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatal("DialUDP(bind port 5):", err)
- }
-
- c1.SetDeadline(time.Now().Add(time.Second))
- c2.SetDeadline(time.Now().Add(time.Second))
-
- sent := "abc123"
- if n, err := c2.Write([]byte(sent)); err != nil || n != len(sent) {
- t.Errorf("got c2.Write(%q) = %d, %v, want = %d, %v", sent, n, err, len(sent), nil)
- }
- recv := make([]byte, len(sent))
- n, err := c1.Read(recv)
- if err != nil || n != len(recv) {
- t.Errorf("got c1.Read() = %d, %v, want = %d, %v", n, err, len(recv), nil)
- }
-
- if recv := string(recv); recv != sent {
- t.Errorf("got recv = %q, want = %q", recv, sent)
- }
-
- if err := c1.Close(); err != nil {
- t.Error("c1.Close():", err)
- }
- if err := c2.Close(); err != nil {
- t.Error("c2.Close():", err)
- }
-}
-
-func makePipe() (c1, c2 net.Conn, stop func(), err error) {
- s, e := newLoopbackStack()
- if e != nil {
- return nil, nil, nil, fmt.Errorf("newLoopbackStack() = %v", e)
- }
-
- ip := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())
- addr := tcpip.FullAddress{NICID, ip, 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, ip)
-
- l, err := ListenTCP(s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("NewListener: %v", err)
- }
-
- c1, err = DialTCP(s, addr, ipv4.ProtocolNumber)
- if err != nil {
- l.Close()
- return nil, nil, nil, fmt.Errorf("DialTCP: %v", err)
- }
-
- c2, err = l.Accept()
- if err != nil {
- l.Close()
- c1.Close()
- return nil, nil, nil, fmt.Errorf("l.Accept: %v", err)
- }
-
- stop = func() {
- c1.Close()
- c2.Close()
- s.Close()
- s.Wait()
- }
-
- if err := l.Close(); err != nil {
- stop()
- return nil, nil, nil, fmt.Errorf("l.Close(): %v", err)
- }
-
- return c1, c2, stop, nil
-}
-
-func TestTCPConnTransfer(t *testing.T) {
- c1, c2, _, err := makePipe()
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if err := c1.Close(); err != nil {
- t.Error("c1.Close():", err)
- }
- if err := c2.Close(); err != nil {
- t.Error("c2.Close():", err)
- }
- }()
-
- c1.SetDeadline(time.Now().Add(time.Second))
- c2.SetDeadline(time.Now().Add(time.Second))
-
- const sent = "abc123"
-
- tests := []struct {
- name string
- c1 net.Conn
- c2 net.Conn
- }{
- {"connected to accepted", c1, c2},
- {"accepted to connected", c2, c1},
- }
-
- for _, test := range tests {
- if n, err := test.c1.Write([]byte(sent)); err != nil || n != len(sent) {
- t.Errorf("%s: got test.c1.Write(%q) = %d, %v, want = %d, %v", test.name, sent, n, err, len(sent), nil)
- continue
- }
-
- recv := make([]byte, len(sent))
- n, err := test.c2.Read(recv)
- if err != nil || n != len(recv) {
- t.Errorf("%s: got test.c2.Read() = %d, %v, want = %d, %v", test.name, n, err, len(recv), nil)
- continue
- }
-
- if recv := string(recv); recv != sent {
- t.Errorf("%s: got recv = %q, want = %q", test.name, recv, sent)
- }
- }
-}
-
-func TestTCPDialError(t *testing.T) {
- s, e := newLoopbackStack()
- if e != nil {
- t.Fatalf("newLoopbackStack() = %v", e)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- ip := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())
- addr := tcpip.FullAddress{NICID, ip, 11211}
-
- switch _, err := DialTCP(s, addr, ipv4.ProtocolNumber); err := err.(type) {
- case *net.OpError:
- if err.Err.Error() != (&tcpip.ErrNoRoute{}).String() {
- t.Errorf("got DialTCP() = %s, want = %s", err, &tcpip.ErrNoRoute{})
- }
- default:
- t.Errorf("got DialTCP(...) = %v, want %s", err, &tcpip.ErrNoRoute{})
- }
-}
-
-func TestDialContextTCPCanceled(t *testing.T) {
- s, err := newLoopbackStack()
- if err != nil {
- t.Fatalf("newLoopbackStack() = %v", err)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- cancel()
-
- if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.Canceled {
- t.Errorf("got DialContextTCP(...) = %v, want = %v", err, context.Canceled)
- }
-}
-
-func TestDialContextTCPTimeout(t *testing.T) {
- s, err := newLoopbackStack()
- if err != nil {
- t.Fatalf("newLoopbackStack() = %v", err)
- }
- defer func() {
- s.Close()
- s.Wait()
- }()
-
- addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}
- s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)
-
- fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {
- time.Sleep(time.Second)
- r.Complete(true)
- })
- s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)
-
- ctx := context.Background()
- ctx, cancel := context.WithDeadline(ctx, time.Now().Add(100*time.Millisecond))
- defer cancel()
-
- if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.DeadlineExceeded {
- t.Errorf("got DialContextTCP(...) = %v, want = %v", err, context.DeadlineExceeded)
- }
-}
-
-func TestNetTest(t *testing.T) {
- nettest.TestConn(t, makePipe)
-}
diff --git a/pkg/tcpip/buffer/BUILD b/pkg/tcpip/buffer/BUILD
deleted file mode 100644
index 23aa0ad05..000000000
--- a/pkg/tcpip/buffer/BUILD
+++ /dev/null
@@ -1,25 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "buffer",
- srcs = [
- "prependable.go",
- "view.go",
- "view_unsafe.go",
- ],
- visibility = ["//visibility:public"],
-)
-
-go_test(
- name = "buffer_x_test",
- size = "small",
- srcs = [
- "view_test.go",
- ],
- deps = [
- ":buffer",
- "//pkg/tcpip",
- ],
-)
diff --git a/pkg/tcpip/buffer/buffer_state_autogen.go b/pkg/tcpip/buffer/buffer_state_autogen.go
new file mode 100644
index 000000000..51bfbff8a
--- /dev/null
+++ b/pkg/tcpip/buffer/buffer_state_autogen.go
@@ -0,0 +1,39 @@
+// automatically generated by stateify.
+
+package buffer
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (vv *VectorisedView) StateTypeName() string {
+ return "pkg/tcpip/buffer.VectorisedView"
+}
+
+func (vv *VectorisedView) StateFields() []string {
+ return []string{
+ "views",
+ "size",
+ }
+}
+
+func (vv *VectorisedView) beforeSave() {}
+
+// +checklocksignore
+func (vv *VectorisedView) StateSave(stateSinkObject state.Sink) {
+ vv.beforeSave()
+ stateSinkObject.Save(0, &vv.views)
+ stateSinkObject.Save(1, &vv.size)
+}
+
+func (vv *VectorisedView) afterLoad() {}
+
+// +checklocksignore
+func (vv *VectorisedView) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &vv.views)
+ stateSourceObject.Load(1, &vv.size)
+}
+
+func init() {
+ state.Register((*VectorisedView)(nil))
+}
diff --git a/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go b/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go
new file mode 100644
index 000000000..5a5c40722
--- /dev/null
+++ b/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package buffer
diff --git a/pkg/tcpip/buffer/view_test.go b/pkg/tcpip/buffer/view_test.go
deleted file mode 100644
index d296d9c2b..000000000
--- a/pkg/tcpip/buffer/view_test.go
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package buffer_test contains tests for the buffer.VectorisedView type.
-package buffer_test
-
-import (
- "bytes"
- "io"
- "reflect"
- "testing"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
-)
-
-// copy returns a deep-copy of the vectorised view.
-func copyVV(vv buffer.VectorisedView) buffer.VectorisedView {
- views := make([]buffer.View, 0, len(vv.Views()))
- for _, v := range vv.Views() {
- views = append(views, append(buffer.View(nil), v...))
- }
- return buffer.NewVectorisedView(vv.Size(), views)
-}
-
-// vv is an helper to build buffer.VectorisedView from different strings.
-func vv(size int, pieces ...string) buffer.VectorisedView {
- views := make([]buffer.View, len(pieces))
- for i, p := range pieces {
- views[i] = []byte(p)
- }
-
- return buffer.NewVectorisedView(size, views)
-}
-
-// v returns a buffer.View containing piece.
-func v(piece string) buffer.View {
- return buffer.View(piece)
-}
-
-var capLengthTestCases = []struct {
- comment string
- in buffer.VectorisedView
- length int
- want buffer.VectorisedView
-}{
- {
- comment: "Simple case",
- in: vv(2, "12"),
- length: 1,
- want: vv(1, "1"),
- },
- {
- comment: "Case spanning across two Views",
- in: vv(4, "123", "4"),
- length: 2,
- want: vv(2, "12"),
- },
- {
- comment: "Corner case with negative length",
- in: vv(1, "1"),
- length: -1,
- want: vv(0),
- },
- {
- comment: "Corner case with length = 0",
- in: vv(3, "12", "3"),
- length: 0,
- want: vv(0),
- },
- {
- comment: "Corner case with length = size",
- in: vv(1, "1"),
- length: 1,
- want: vv(1, "1"),
- },
- {
- comment: "Corner case with length > size",
- in: vv(1, "1"),
- length: 2,
- want: vv(1, "1"),
- },
-}
-
-func TestCapLength(t *testing.T) {
- for _, c := range capLengthTestCases {
- orig := copyVV(c.in)
- c.in.CapLength(c.length)
- if !reflect.DeepEqual(c.in, c.want) {
- t.Errorf("Test \"%s\" failed when calling CapLength(%d) on %v. Got %v. Want %v",
- c.comment, c.length, orig, c.in, c.want)
- }
- }
-}
-
-var trimFrontTestCases = []struct {
- comment string
- in buffer.VectorisedView
- count int
- want buffer.VectorisedView
-}{
- {
- comment: "Simple case",
- in: vv(2, "12"),
- count: 1,
- want: vv(1, "2"),
- },
- {
- comment: "Case where we trim an entire View",
- in: vv(2, "1", "2"),
- count: 1,
- want: vv(1, "2"),
- },
- {
- comment: "Case spanning across two Views",
- in: vv(3, "1", "23"),
- count: 2,
- want: vv(1, "3"),
- },
- {
- comment: "Case with one empty Views",
- in: vv(3, "1", "", "23"),
- count: 2,
- want: vv(1, "3"),
- },
- {
- comment: "Corner case with negative count",
- in: vv(1, "1"),
- count: -1,
- want: vv(1, "1"),
- },
- {
- comment: " Corner case with count = 0",
- in: vv(1, "1"),
- count: 0,
- want: vv(1, "1"),
- },
- {
- comment: "Corner case with count = size",
- in: vv(1, "1"),
- count: 1,
- want: vv(0),
- },
- {
- comment: "Corner case with count > size",
- in: vv(1, "1"),
- count: 2,
- want: vv(0),
- },
-}
-
-func TestTrimFront(t *testing.T) {
- for _, c := range trimFrontTestCases {
- orig := copyVV(c.in)
- c.in.TrimFront(c.count)
- if !reflect.DeepEqual(c.in, c.want) {
- t.Errorf("Test \"%s\" failed when calling TrimFront(%d) on %v. Got %v. Want %v",
- c.comment, c.count, orig, c.in, c.want)
- }
- }
-}
-
-var toViewCases = []struct {
- comment string
- in buffer.VectorisedView
- want buffer.View
-}{
- {
- comment: "Simple case",
- in: vv(2, "12"),
- want: []byte("12"),
- },
- {
- comment: "Case with multiple views",
- in: vv(2, "1", "2"),
- want: []byte("12"),
- },
- {
- comment: "Empty case",
- in: vv(0),
- want: []byte(""),
- },
-}
-
-func TestToView(t *testing.T) {
- for _, c := range toViewCases {
- got := c.in.ToView()
- if !reflect.DeepEqual(got, c.want) {
- t.Errorf("Test \"%s\" failed when calling ToView() on %v. Got %v. Want %v",
- c.comment, c.in, got, c.want)
- }
- }
-}
-
-var toCloneCases = []struct {
- comment string
- inView buffer.VectorisedView
- inBuffer []buffer.View
-}{
- {
- comment: "Simple case",
- inView: vv(1, "1"),
- inBuffer: make([]buffer.View, 1),
- },
- {
- comment: "Case with multiple views",
- inView: vv(2, "1", "2"),
- inBuffer: make([]buffer.View, 2),
- },
- {
- comment: "Case with buffer too small",
- inView: vv(2, "1", "2"),
- inBuffer: make([]buffer.View, 1),
- },
- {
- comment: "Case with buffer larger than needed",
- inView: vv(1, "1"),
- inBuffer: make([]buffer.View, 2),
- },
- {
- comment: "Case with nil buffer",
- inView: vv(1, "1"),
- inBuffer: nil,
- },
-}
-
-func TestToClone(t *testing.T) {
- for _, c := range toCloneCases {
- t.Run(c.comment, func(t *testing.T) {
- got := c.inView.Clone(c.inBuffer)
- if !reflect.DeepEqual(got, c.inView) {
- t.Fatalf("got (%+v).Clone(%+v) = %+v, want = %+v",
- c.inView, c.inBuffer, got, c.inView)
- }
- })
- }
-}
-
-type readToTestCases struct {
- comment string
- vv buffer.VectorisedView
- bytesToRead int
- wantBytes string
- leftVV buffer.VectorisedView
-}
-
-func createReadToTestCases() []readToTestCases {
- return []readToTestCases{
- {
- comment: "large VV, short read",
- vv: vv(30, "012345678901234567890123456789"),
- bytesToRead: 10,
- wantBytes: "0123456789",
- leftVV: vv(20, "01234567890123456789"),
- },
- {
- comment: "largeVV, multiple views, short read",
- vv: vv(13, "123", "345", "567", "8910"),
- bytesToRead: 6,
- wantBytes: "123345",
- leftVV: vv(7, "567", "8910"),
- },
- {
- comment: "smallVV (multiple views), large read",
- vv: vv(3, "1", "2", "3"),
- bytesToRead: 10,
- wantBytes: "123",
- leftVV: vv(0, ""),
- },
- {
- comment: "smallVV (single view), large read",
- vv: vv(1, "1"),
- bytesToRead: 10,
- wantBytes: "1",
- leftVV: vv(0, ""),
- },
- {
- comment: "emptyVV, large read",
- vv: vv(0, ""),
- bytesToRead: 10,
- wantBytes: "",
- leftVV: vv(0, ""),
- },
- }
-}
-
-func TestVVReadToVV(t *testing.T) {
- for _, tc := range createReadToTestCases() {
- t.Run(tc.comment, func(t *testing.T) {
- var readTo buffer.VectorisedView
- inSize := tc.vv.Size()
- copied := tc.vv.ReadToVV(&readTo, tc.bytesToRead)
- if got, want := copied, len(tc.wantBytes); got != want {
- t.Errorf("incorrect number of bytes copied returned in ReadToVV got: %d, want: %d, tc: %+v", got, want, tc)
- }
- if got, want := string(readTo.ToView()), tc.wantBytes; got != want {
- t.Errorf("unexpected content in readTo got: %s, want: %s", got, want)
- }
- if got, want := tc.vv.Size(), inSize-copied; got != want {
- t.Errorf("test VV has incorrect size after reading got: %d, want: %d, tc.vv: %+v", got, want, tc.vv)
- }
- if got, want := string(tc.vv.ToView()), string(tc.leftVV.ToView()); got != want {
- t.Errorf("unexpected data left in vv after read got: %+v, want: %+v", got, want)
- }
- })
- }
-}
-
-func TestVVReadTo(t *testing.T) {
- for _, tc := range createReadToTestCases() {
- t.Run(tc.comment, func(t *testing.T) {
- b := make([]byte, tc.bytesToRead)
- dst := tcpip.SliceWriter(b)
- origSize := tc.vv.Size()
- copied, err := tc.vv.ReadTo(&dst, false /* peek */)
- if err != nil && err != io.ErrShortWrite {
- t.Errorf("got ReadTo(&dst, false) = (_, %s); want nil or io.ErrShortWrite", err)
- }
- if got, want := copied, len(tc.wantBytes); got != want {
- t.Errorf("got ReadTo(&dst, false) = (%d, _); want %d", got, want)
- }
- if got, want := string(b[:copied]), tc.wantBytes; got != want {
- t.Errorf("got dst = %q, want %q", got, want)
- }
- if got, want := tc.vv.Size(), origSize-copied; got != want {
- t.Errorf("got after-read tc.vv.Size() = %d, want %d", got, want)
- }
- if got, want := string(tc.vv.ToView()), string(tc.leftVV.ToView()); got != want {
- t.Errorf("got after-read data in tc.vv = %q, want %q", got, want)
- }
- })
- }
-}
-
-func TestVVReadToPeek(t *testing.T) {
- for _, tc := range createReadToTestCases() {
- t.Run(tc.comment, func(t *testing.T) {
- b := make([]byte, tc.bytesToRead)
- dst := tcpip.SliceWriter(b)
- origSize := tc.vv.Size()
- origData := string(tc.vv.ToView())
- copied, err := tc.vv.ReadTo(&dst, true /* peek */)
- if err != nil && err != io.ErrShortWrite {
- t.Errorf("got ReadTo(&dst, true) = (_, %s); want nil or io.ErrShortWrite", err)
- }
- if got, want := copied, len(tc.wantBytes); got != want {
- t.Errorf("got ReadTo(&dst, true) = (%d, _); want %d", got, want)
- }
- if got, want := string(b[:copied]), tc.wantBytes; got != want {
- t.Errorf("got dst = %q, want %q", got, want)
- }
- // Expect tc.vv is unchanged.
- if got, want := tc.vv.Size(), origSize; got != want {
- t.Errorf("got after-read tc.vv.Size() = %d, want %d", got, want)
- }
- if got, want := string(tc.vv.ToView()), origData; got != want {
- t.Errorf("got after-read data in tc.vv = %q, want %q", got, want)
- }
- })
- }
-}
-
-func TestVVRead(t *testing.T) {
- testCases := []struct {
- comment string
- vv buffer.VectorisedView
- bytesToRead int
- readBytes string
- leftBytes string
- wantError bool
- }{
- {
- comment: "large VV, short read",
- vv: vv(30, "012345678901234567890123456789"),
- bytesToRead: 10,
- readBytes: "0123456789",
- leftBytes: "01234567890123456789",
- },
- {
- comment: "largeVV, multiple buffers, short read",
- vv: vv(13, "123", "345", "567", "8910"),
- bytesToRead: 6,
- readBytes: "123345",
- leftBytes: "5678910",
- },
- {
- comment: "smallVV, large read",
- vv: vv(3, "1", "2", "3"),
- bytesToRead: 10,
- readBytes: "123",
- leftBytes: "",
- },
- {
- comment: "smallVV, large read",
- vv: vv(1, "1"),
- bytesToRead: 10,
- readBytes: "1",
- leftBytes: "",
- },
- {
- comment: "emptyVV, large read",
- vv: vv(0, ""),
- bytesToRead: 10,
- readBytes: "",
- wantError: true,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.comment, func(t *testing.T) {
- readTo := buffer.NewView(tc.bytesToRead)
- inSize := tc.vv.Size()
- copied, err := tc.vv.Read(readTo)
- if !tc.wantError && err != nil {
- t.Fatalf("unexpected error in tc.vv.Read(..) = %s", err)
- }
- readTo = readTo[:copied]
- if got, want := copied, len(tc.readBytes); got != want {
- t.Errorf("incorrect number of bytes copied returned in ReadToVV got: %d, want: %d, tc.vv: %+v", got, want, tc.vv)
- }
- if got, want := string(readTo), tc.readBytes; got != want {
- t.Errorf("unexpected data in readTo got: %s, want: %s", got, want)
- }
- if got, want := tc.vv.Size(), inSize-copied; got != want {
- t.Errorf("test VV has incorrect size after reading got: %d, want: %d, tc.vv: %+v", got, want, tc.vv)
- }
- if got, want := string(tc.vv.ToView()), tc.leftBytes; got != want {
- t.Errorf("vv has incorrect data after Read got: %s, want: %s", got, want)
- }
- })
- }
-}
-
-var pullUpTestCases = []struct {
- comment string
- in buffer.VectorisedView
- count int
- want []byte
- result buffer.VectorisedView
- ok bool
-}{
- {
- comment: "simple case",
- in: vv(2, "12"),
- count: 1,
- want: []byte("1"),
- result: vv(2, "12"),
- ok: true,
- },
- {
- comment: "entire View",
- in: vv(2, "1", "2"),
- count: 1,
- want: []byte("1"),
- result: vv(2, "1", "2"),
- ok: true,
- },
- {
- comment: "spanning across two Views",
- in: vv(3, "1", "23"),
- count: 2,
- want: []byte("12"),
- result: vv(3, "12", "3"),
- ok: true,
- },
- {
- comment: "spanning across all Views",
- in: vv(5, "1", "23", "45"),
- count: 5,
- want: []byte("12345"),
- result: vv(5, "12345"),
- ok: true,
- },
- {
- comment: "count = 0",
- in: vv(1, "1"),
- count: 0,
- want: []byte{},
- result: vv(1, "1"),
- ok: true,
- },
- {
- comment: "count = size",
- in: vv(1, "1"),
- count: 1,
- want: []byte("1"),
- result: vv(1, "1"),
- ok: true,
- },
- {
- comment: "count too large",
- in: vv(3, "1", "23"),
- count: 4,
- want: nil,
- result: vv(3, "1", "23"),
- ok: false,
- },
- {
- comment: "empty vv",
- in: vv(0, ""),
- count: 1,
- want: nil,
- result: vv(0, ""),
- ok: false,
- },
- {
- comment: "empty vv, count = 0",
- in: vv(0, ""),
- count: 0,
- want: nil,
- result: vv(0, ""),
- ok: true,
- },
- {
- comment: "empty views",
- in: vv(3, "", "1", "", "23"),
- count: 2,
- want: []byte("12"),
- result: vv(3, "12", "3"),
- ok: true,
- },
-}
-
-func TestPullUp(t *testing.T) {
- for _, c := range pullUpTestCases {
- got, ok := c.in.PullUp(c.count)
-
- // Is the return value right?
- if ok != c.ok {
- t.Errorf("Test %q failed when calling PullUp(%d) on %v. Got an ok of %t. Want %t",
- c.comment, c.count, c.in, ok, c.ok)
- }
- if bytes.Compare(got, buffer.View(c.want)) != 0 {
- t.Errorf("Test %q failed when calling PullUp(%d) on %v. Got %v. Want %v",
- c.comment, c.count, c.in, got, c.want)
- }
-
- // Is the underlying structure right?
- if !reflect.DeepEqual(c.in, c.result) {
- t.Errorf("Test %q failed when calling PullUp(%d). Got vv with structure %v. Wanted %v",
- c.comment, c.count, c.in, c.result)
- }
- }
-}
-
-func TestToVectorisedView(t *testing.T) {
- testCases := []struct {
- in buffer.View
- want buffer.VectorisedView
- }{
- {nil, buffer.VectorisedView{}},
- {buffer.View{}, buffer.VectorisedView{}},
- {buffer.View{'a'}, buffer.NewVectorisedView(1, []buffer.View{{'a'}})},
- }
- for _, tc := range testCases {
- if got, want := tc.in.ToVectorisedView(), tc.want; !reflect.DeepEqual(got, want) {
- t.Errorf("(%v).ToVectorisedView failed got: %+v, want: %+v", tc.in, got, want)
- }
- }
-}
-
-func TestAppendView(t *testing.T) {
- testCases := []struct {
- vv buffer.VectorisedView
- in buffer.View
- want buffer.VectorisedView
- }{
- {vv(0), nil, vv(0)},
- {vv(0), v(""), vv(0)},
- {vv(4, "abcd"), nil, vv(4, "abcd")},
- {vv(4, "abcd"), v(""), vv(4, "abcd")},
- {vv(4, "abcd"), v("e"), vv(5, "abcd", "e")},
- }
- for _, tc := range testCases {
- tc.vv.AppendView(tc.in)
- if got, want := tc.vv, tc.want; !reflect.DeepEqual(got, want) {
- t.Errorf("(%v).ToVectorisedView failed got: %+v, want: %+v", tc.in, got, want)
- }
- }
-}
-
-func TestAppendViews(t *testing.T) {
- testCases := []struct {
- vv buffer.VectorisedView
- in []buffer.View
- want buffer.VectorisedView
- }{
- {vv(0), nil, vv(0)},
- {vv(0), []buffer.View{}, vv(0)},
- {vv(0), []buffer.View{v("")}, vv(0, "")},
- {vv(4, "abcd"), nil, vv(4, "abcd")},
- {vv(4, "abcd"), []buffer.View{}, vv(4, "abcd")},
- {vv(4, "abcd"), []buffer.View{v("")}, vv(4, "abcd", "")},
- {vv(4, "abcd"), []buffer.View{v("")}, vv(4, "abcd", "")},
- {vv(4, "abcd"), []buffer.View{v("e")}, vv(5, "abcd", "e")},
- {vv(4, "abcd"), []buffer.View{v("e"), v("fg")}, vv(7, "abcd", "e", "fg")},
- {vv(4, "abcd"), []buffer.View{v(""), v("fg")}, vv(6, "abcd", "", "fg")},
- }
- for _, tc := range testCases {
- tc.vv.AppendViews(tc.in)
- if got, want := tc.vv, tc.want; !reflect.DeepEqual(got, want) {
- t.Errorf("(%v).ToVectorisedView failed got: %+v, want: %+v", tc.in, got, want)
- }
- }
-}
-
-func TestMemSize(t *testing.T) {
- const perViewCap = 128
- views := make([]buffer.View, 2, 32)
- views[0] = make(buffer.View, 10, perViewCap)
- views[1] = make(buffer.View, 20, perViewCap)
- vv := buffer.NewVectorisedView(30, views)
- want := int(unsafe.Sizeof(vv)) + cap(views)*int(unsafe.Sizeof(views)) + 2*perViewCap
- if got := vv.MemSize(); got != want {
- t.Errorf("vv.MemSize() = %d, want %d", got, want)
- }
-}
diff --git a/pkg/tcpip/checker/BUILD b/pkg/tcpip/checker/BUILD
deleted file mode 100644
index c984470e6..000000000
--- a/pkg/tcpip/checker/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checker",
- testonly = 1,
- srcs = ["checker.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/checker/checker.go b/pkg/tcpip/checker/checker.go
deleted file mode 100644
index e0dfe5813..000000000
--- a/pkg/tcpip/checker/checker.go
+++ /dev/null
@@ -1,1625 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checker provides helper functions to check networking packets for
-// validity.
-package checker
-
-import (
- "encoding/binary"
- "reflect"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
-)
-
-// NetworkChecker is a function to check a property of a network packet.
-type NetworkChecker func(*testing.T, []header.Network)
-
-// TransportChecker is a function to check a property of a transport packet.
-type TransportChecker func(*testing.T, header.Transport)
-
-// ControlMessagesChecker is a function to check a property of ancillary data.
-type ControlMessagesChecker func(*testing.T, tcpip.ControlMessages)
-
-// IPv4 checks the validity and properties of the given IPv4 packet. It is
-// expected to be used in conjunction with other network checkers for specific
-// properties. For example, to check the source and destination address, one
-// would call:
-//
-// checker.IPv4(t, b, checker.SrcAddr(x), checker.DstAddr(y))
-func IPv4(t *testing.T, b []byte, checkers ...NetworkChecker) {
- t.Helper()
-
- ipv4 := header.IPv4(b)
-
- if !ipv4.IsValid(len(b)) {
- t.Fatalf("Not a valid IPv4 packet: %x", ipv4)
- }
-
- if !ipv4.IsChecksumValid() {
- t.Errorf("Bad checksum, got = %d", ipv4.Checksum())
- }
-
- for _, f := range checkers {
- f(t, []header.Network{ipv4})
- }
- if t.Failed() {
- t.FailNow()
- }
-}
-
-// IPv6 checks the validity and properties of the given IPv6 packet. The usage
-// is similar to IPv4.
-func IPv6(t *testing.T, b []byte, checkers ...NetworkChecker) {
- t.Helper()
-
- ipv6 := header.IPv6(b)
- if !ipv6.IsValid(len(b)) {
- t.Fatalf("Not a valid IPv6 packet: %x", ipv6)
- }
-
- for _, f := range checkers {
- f(t, []header.Network{ipv6})
- }
- if t.Failed() {
- t.FailNow()
- }
-}
-
-// SrcAddr creates a checker that checks the source address.
-func SrcAddr(addr tcpip.Address) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if a := h[0].SourceAddress(); a != addr {
- t.Errorf("Bad source address, got %v, want %v", a, addr)
- }
- }
-}
-
-// DstAddr creates a checker that checks the destination address.
-func DstAddr(addr tcpip.Address) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if a := h[0].DestinationAddress(); a != addr {
- t.Errorf("Bad destination address, got %v, want %v", a, addr)
- }
- }
-}
-
-// TTL creates a checker that checks the TTL (ipv4) or HopLimit (ipv6).
-func TTL(ttl uint8) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- var v uint8
- switch ip := h[0].(type) {
- case header.IPv4:
- v = ip.TTL()
- case header.IPv6:
- v = ip.HopLimit()
- case *ipv6HeaderWithExtHdr:
- v = ip.HopLimit()
- default:
- t.Fatalf("unrecognized header type %T for TTL evaluation", ip)
- }
- if v != ttl {
- t.Fatalf("Bad TTL, got = %d, want = %d", v, ttl)
- }
- }
-}
-
-// IPFullLength creates a checker for the full IP packet length. The
-// expected size is checked against both the Total Length in the
-// header and the number of bytes received.
-func IPFullLength(packetLength uint16) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- var v uint16
- var l uint16
- switch ip := h[0].(type) {
- case header.IPv4:
- v = ip.TotalLength()
- l = uint16(len(ip))
- case header.IPv6:
- v = ip.PayloadLength() + header.IPv6FixedHeaderSize
- l = uint16(len(ip))
- default:
- t.Fatalf("unexpected network header passed to checker, got = %T, want = header.IPv4 or header.IPv6", ip)
- }
- if l != packetLength {
- t.Errorf("bad packet length, got = %d, want = %d", l, packetLength)
- }
- if v != packetLength {
- t.Errorf("unexpected packet length in header, got = %d, want = %d", v, packetLength)
- }
- }
-}
-
-// IPv4HeaderLength creates a checker that checks the IPv4 Header length.
-func IPv4HeaderLength(headerLength int) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- switch ip := h[0].(type) {
- case header.IPv4:
- if hl := ip.HeaderLength(); hl != uint8(headerLength) {
- t.Errorf("Bad header length, got = %d, want = %d", hl, headerLength)
- }
- default:
- t.Fatalf("unexpected network header passed to checker, got = %T, want = header.IPv4", ip)
- }
- }
-}
-
-// PayloadLen creates a checker that checks the payload length.
-func PayloadLen(payloadLength int) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if l := len(h[0].Payload()); l != payloadLength {
- t.Errorf("Bad payload length, got = %d, want = %d", l, payloadLength)
- }
- }
-}
-
-// IPPayload creates a checker that checks the payload.
-func IPPayload(payload []byte) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- got := h[0].Payload()
-
- // cmp.Diff does not consider nil slices equal to empty slices, but we do.
- if len(got) == 0 && len(payload) == 0 {
- return
- }
-
- if diff := cmp.Diff(payload, got); diff != "" {
- t.Errorf("payload mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// IPv4Options returns a checker that checks the options in an IPv4 packet.
-func IPv4Options(want header.IPv4Options) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- ip, ok := h[0].(header.IPv4)
- if !ok {
- t.Fatalf("unexpected network header passed to checker, got = %T, want = header.IPv4", h[0])
- }
- options := ip.Options()
- // cmp.Diff does not consider nil slices equal to empty slices, but we do.
- if len(want) == 0 && len(options) == 0 {
- return
- }
- if diff := cmp.Diff(want, options); diff != "" {
- t.Errorf("options mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// IPv4RouterAlert returns a checker that checks that the RouterAlert option is
-// set in an IPv4 packet.
-func IPv4RouterAlert() NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
- ip, ok := h[0].(header.IPv4)
- if !ok {
- t.Fatalf("unexpected network header passed to checker, got = %T, want = header.IPv4", h[0])
- }
- iterator := ip.Options().MakeIterator()
- for {
- opt, done, err := iterator.Next()
- if err != nil {
- t.Fatalf("error acquiring next IPv4 option at offset %d", err.Pointer)
- }
- if done {
- break
- }
- if opt.Type() != header.IPv4OptionRouterAlertType {
- continue
- }
- want := [header.IPv4OptionRouterAlertLength]byte{
- byte(header.IPv4OptionRouterAlertType),
- header.IPv4OptionRouterAlertLength,
- header.IPv4OptionRouterAlertValue,
- header.IPv4OptionRouterAlertValue,
- }
- if diff := cmp.Diff(want[:], opt.Contents()); diff != "" {
- t.Errorf("router alert option mismatch (-want +got):\n%s", diff)
- }
- return
- }
- t.Errorf("failed to find router alert option in %v", ip.Options())
- }
-}
-
-// FragmentOffset creates a checker that checks the FragmentOffset field.
-func FragmentOffset(offset uint16) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- // We only do this for IPv4 for now.
- switch ip := h[0].(type) {
- case header.IPv4:
- if v := ip.FragmentOffset(); v != offset {
- t.Errorf("Bad fragment offset, got = %d, want = %d", v, offset)
- }
- }
- }
-}
-
-// FragmentFlags creates a checker that checks the fragment flags field.
-func FragmentFlags(flags uint8) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- // We only do this for IPv4 for now.
- switch ip := h[0].(type) {
- case header.IPv4:
- if v := ip.Flags(); v != flags {
- t.Errorf("Bad fragment offset, got = %d, want = %d", v, flags)
- }
- }
- }
-}
-
-// ReceiveTClass creates a checker that checks the TCLASS field in
-// ControlMessages.
-func ReceiveTClass(want uint32) ControlMessagesChecker {
- return func(t *testing.T, cm tcpip.ControlMessages) {
- t.Helper()
- if !cm.HasTClass {
- t.Errorf("got cm.HasTClass = %t, want = true", cm.HasTClass)
- } else if got := cm.TClass; got != want {
- t.Errorf("got cm.TClass = %d, want %d", got, want)
- }
- }
-}
-
-// ReceiveTOS creates a checker that checks the TOS field in ControlMessages.
-func ReceiveTOS(want uint8) ControlMessagesChecker {
- return func(t *testing.T, cm tcpip.ControlMessages) {
- t.Helper()
- if !cm.HasTOS {
- t.Errorf("got cm.HasTOS = %t, want = true", cm.HasTOS)
- } else if got := cm.TOS; got != want {
- t.Errorf("got cm.TOS = %d, want %d", got, want)
- }
- }
-}
-
-// ReceiveIPPacketInfo creates a checker that checks the PacketInfo field in
-// ControlMessages.
-func ReceiveIPPacketInfo(want tcpip.IPPacketInfo) ControlMessagesChecker {
- return func(t *testing.T, cm tcpip.ControlMessages) {
- t.Helper()
- if !cm.HasIPPacketInfo {
- t.Errorf("got cm.HasIPPacketInfo = %t, want = true", cm.HasIPPacketInfo)
- } else if diff := cmp.Diff(want, cm.PacketInfo); diff != "" {
- t.Errorf("IPPacketInfo mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// ReceiveOriginalDstAddr creates a checker that checks the OriginalDstAddress
-// field in ControlMessages.
-func ReceiveOriginalDstAddr(want tcpip.FullAddress) ControlMessagesChecker {
- return func(t *testing.T, cm tcpip.ControlMessages) {
- t.Helper()
- if !cm.HasOriginalDstAddress {
- t.Errorf("got cm.HasOriginalDstAddress = %t, want = true", cm.HasOriginalDstAddress)
- } else if diff := cmp.Diff(want, cm.OriginalDstAddress); diff != "" {
- t.Errorf("OriginalDstAddress mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// TOS creates a checker that checks the TOS field.
-func TOS(tos uint8, label uint32) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if v, l := h[0].TOS(); v != tos || l != label {
- t.Errorf("Bad TOS, got = (%d, %d), want = (%d,%d)", v, l, tos, label)
- }
- }
-}
-
-// Raw creates a checker that checks the bytes of payload.
-// The checker always checks the payload of the last network header.
-// For instance, in case of IPv6 fragments, the payload that will be checked
-// is the one containing the actual data that the packet is carrying, without
-// the bytes added by the IPv6 fragmentation.
-func Raw(want []byte) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if got := h[len(h)-1].Payload(); !reflect.DeepEqual(got, want) {
- t.Errorf("Wrong payload, got %v, want %v", got, want)
- }
- }
-}
-
-// IPv6Fragment creates a checker that validates an IPv6 fragment.
-func IPv6Fragment(checkers ...NetworkChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- if p := h[0].TransportProtocol(); p != header.IPv6FragmentHeader {
- t.Errorf("Bad protocol, got = %d, want = %d", p, header.UDPProtocolNumber)
- }
-
- ipv6Frag := header.IPv6Fragment(h[0].Payload())
- if !ipv6Frag.IsValid() {
- t.Error("Not a valid IPv6 fragment")
- }
-
- for _, f := range checkers {
- f(t, []header.Network{h[0], ipv6Frag})
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// TCP creates a checker that checks that the transport protocol is TCP and
-// potentially additional transport header fields.
-func TCP(checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- first := h[0]
- last := h[len(h)-1]
-
- if p := last.TransportProtocol(); p != header.TCPProtocolNumber {
- t.Errorf("Bad protocol, got = %d, want = %d", p, header.TCPProtocolNumber)
- }
-
- tcp := header.TCP(last.Payload())
- payload := tcp.Payload()
- payloadChecksum := header.Checksum(payload, 0)
- if !tcp.IsChecksumValid(first.SourceAddress(), first.DestinationAddress(), payloadChecksum, uint16(len(payload))) {
- t.Errorf("Bad checksum, got = %d", tcp.Checksum())
- }
-
- // Run the transport checkers.
- for _, f := range checkers {
- f(t, tcp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// UDP creates a checker that checks that the transport protocol is UDP and
-// potentially additional transport header fields.
-func UDP(checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- last := h[len(h)-1]
-
- if p := last.TransportProtocol(); p != header.UDPProtocolNumber {
- t.Errorf("Bad protocol, got = %d, want = %d", p, header.UDPProtocolNumber)
- }
-
- udp := header.UDP(last.Payload())
- for _, f := range checkers {
- f(t, udp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// SrcPort creates a checker that checks the source port.
-func SrcPort(port uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- if p := h.SourcePort(); p != port {
- t.Errorf("Bad source port, got = %d, want = %d", p, port)
- }
- }
-}
-
-// DstPort creates a checker that checks the destination port.
-func DstPort(port uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- if p := h.DestinationPort(); p != port {
- t.Errorf("Bad destination port, got = %d, want = %d", p, port)
- }
- }
-}
-
-// NoChecksum creates a checker that checks if the checksum is zero.
-func NoChecksum(noChecksum bool) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- udp, ok := h.(header.UDP)
- if !ok {
- t.Fatalf("UDP header not found in h: %T", h)
- }
-
- if b := udp.Checksum() == 0; b != noChecksum {
- t.Errorf("bad checksum state, got %t, want %t", b, noChecksum)
- }
- }
-}
-
-// TCPSeqNum creates a checker that checks the sequence number.
-func TCPSeqNum(seq uint32) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if s := tcp.SequenceNumber(); s != seq {
- t.Errorf("Bad sequence number, got = %d, want = %d", s, seq)
- }
- }
-}
-
-// TCPAckNum creates a checker that checks the ack number.
-func TCPAckNum(seq uint32) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if s := tcp.AckNumber(); s != seq {
- t.Errorf("Bad ack number, got = %d, want = %d", s, seq)
- }
- }
-}
-
-// TCPWindow creates a checker that checks the tcp window.
-func TCPWindow(window uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in hdr : %T", h)
- }
-
- if w := tcp.WindowSize(); w != window {
- t.Errorf("Bad window, got %d, want %d", w, window)
- }
- }
-}
-
-// TCPWindowGreaterThanEq creates a checker that checks that the TCP window
-// is greater than or equal to the provided value.
-func TCPWindowGreaterThanEq(window uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if w := tcp.WindowSize(); w < window {
- t.Errorf("Bad window, got %d, want > %d", w, window)
- }
- }
-}
-
-// TCPWindowLessThanEq creates a checker that checks that the tcp window
-// is less than or equal to the provided value.
-func TCPWindowLessThanEq(window uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if w := tcp.WindowSize(); w > window {
- t.Errorf("Bad window, got %d, want < %d", w, window)
- }
- }
-}
-
-// TCPFlags creates a checker that checks the tcp flags.
-func TCPFlags(flags header.TCPFlags) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if got := tcp.Flags(); got != flags {
- t.Errorf("got tcp.Flags() = %s, want %s", got, flags)
- }
- }
-}
-
-// TCPFlagsMatch creates a checker that checks that the tcp flags, masked by the
-// given mask, match the supplied flags.
-func TCPFlagsMatch(flags, mask header.TCPFlags) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- t.Fatalf("TCP header not found in h: %T", h)
- }
-
- if got := tcp.Flags(); (got & mask) != (flags & mask) {
- t.Errorf("got tcp.Flags() = %s, want %s, mask %s", got, flags, mask)
- }
- }
-}
-
-// TCPSynOptions creates a checker that checks the presence of TCP options in
-// SYN segments.
-//
-// If wndscale is negative, the window scale option must not be present.
-func TCPSynOptions(wantOpts header.TCPSynOptions) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- return
- }
- opts := tcp.Options()
- limit := len(opts)
- foundMSS := false
- foundWS := false
- foundTS := false
- foundSACKPermitted := false
- tsVal := uint32(0)
- tsEcr := uint32(0)
- for i := 0; i < limit; {
- switch opts[i] {
- case header.TCPOptionEOL:
- i = limit
- case header.TCPOptionNOP:
- i++
- case header.TCPOptionMSS:
- v := uint16(opts[i+2])<<8 | uint16(opts[i+3])
- if wantOpts.MSS != v {
- t.Errorf("Bad MSS, got = %d, want = %d", v, wantOpts.MSS)
- }
- foundMSS = true
- i += 4
- case header.TCPOptionWS:
- if wantOpts.WS < 0 {
- t.Error("WS present when it shouldn't be")
- }
- v := int(opts[i+2])
- if v != wantOpts.WS {
- t.Errorf("Bad WS, got = %d, want = %d", v, wantOpts.WS)
- }
- foundWS = true
- i += 3
- case header.TCPOptionTS:
- if i+9 >= limit {
- t.Errorf("TS Option truncated , option is only: %d bytes, want 10", limit-i)
- }
- if opts[i+1] != 10 {
- t.Errorf("Bad length %d for TS option, limit: %d", opts[i+1], limit)
- }
- tsVal = binary.BigEndian.Uint32(opts[i+2:])
- tsEcr = uint32(0)
- if tcp.Flags()&header.TCPFlagAck != 0 {
- // If the syn is an SYN-ACK then read
- // the tsEcr value as well.
- tsEcr = binary.BigEndian.Uint32(opts[i+6:])
- }
- foundTS = true
- i += 10
- case header.TCPOptionSACKPermitted:
- if i+1 >= limit {
- t.Errorf("SACKPermitted option truncated, option is only : %d bytes, want 2", limit-i)
- }
- if opts[i+1] != 2 {
- t.Errorf("Bad length %d for SACKPermitted option, limit: %d", opts[i+1], limit)
- }
- foundSACKPermitted = true
- i += 2
-
- default:
- i += int(opts[i+1])
- }
- }
-
- if !foundMSS {
- t.Errorf("MSS option not found. Options: %x", opts)
- }
-
- if !foundWS && wantOpts.WS >= 0 {
- t.Errorf("WS option not found. Options: %x", opts)
- }
- if wantOpts.TS && !foundTS {
- t.Errorf("TS option not found. Options: %x", opts)
- }
- if foundTS && tsVal == 0 {
- t.Error("TS option specified but the timestamp value is zero")
- }
- if foundTS && tsEcr == 0 && wantOpts.TSEcr != 0 {
- t.Errorf("TS option specified but TSEcr is incorrect, got = %d, want = %d", tsEcr, wantOpts.TSEcr)
- }
- if wantOpts.SACKPermitted && !foundSACKPermitted {
- t.Errorf("SACKPermitted option not found. Options: %x", opts)
- }
- }
-}
-
-// TCPTimestampChecker creates a checker that validates that a TCP segment has a
-// TCP Timestamp option if wantTS is true, it also compares the wantTSVal and
-// wantTSEcr values with those in the TCP segment (if present).
-//
-// If wantTSVal or wantTSEcr is zero then the corresponding comparison is
-// skipped.
-func TCPTimestampChecker(wantTS bool, wantTSVal uint32, wantTSEcr uint32) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- tcp, ok := h.(header.TCP)
- if !ok {
- return
- }
- opts := tcp.Options()
- limit := len(opts)
- foundTS := false
- tsVal := uint32(0)
- tsEcr := uint32(0)
- for i := 0; i < limit; {
- switch opts[i] {
- case header.TCPOptionEOL:
- i = limit
- case header.TCPOptionNOP:
- i++
- case header.TCPOptionTS:
- if i+9 >= limit {
- t.Errorf("TS option found, but option is truncated, option length: %d, want 10 bytes", limit-i)
- }
- if opts[i+1] != 10 {
- t.Errorf("TS option found, but bad length specified: got = %d, want = 10", opts[i+1])
- }
- tsVal = binary.BigEndian.Uint32(opts[i+2:])
- tsEcr = binary.BigEndian.Uint32(opts[i+6:])
- foundTS = true
- i += 10
- default:
- // We don't recognize this option, just skip over it.
- if i+2 > limit {
- return
- }
- l := int(opts[i+1])
- if i < 2 || i+l > limit {
- return
- }
- i += l
- }
- }
-
- if wantTS != foundTS {
- t.Errorf("TS Option mismatch, got TS= %t, want TS= %t", foundTS, wantTS)
- }
- if wantTS && wantTSVal != 0 && wantTSVal != tsVal {
- t.Errorf("Timestamp value is incorrect, got = %d, want = %d", tsVal, wantTSVal)
- }
- if wantTS && wantTSEcr != 0 && tsEcr != wantTSEcr {
- t.Errorf("Timestamp Echo Reply is incorrect, got = %d, want = %d", tsEcr, wantTSEcr)
- }
- }
-}
-
-// TCPSACKBlockChecker creates a checker that verifies that the segment does
-// contain the specified SACK blocks in the TCP options.
-func TCPSACKBlockChecker(sackBlocks []header.SACKBlock) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
- tcp, ok := h.(header.TCP)
- if !ok {
- return
- }
- var gotSACKBlocks []header.SACKBlock
-
- opts := tcp.Options()
- limit := len(opts)
- for i := 0; i < limit; {
- switch opts[i] {
- case header.TCPOptionEOL:
- i = limit
- case header.TCPOptionNOP:
- i++
- case header.TCPOptionSACK:
- if i+2 > limit {
- // Malformed SACK block.
- t.Errorf("malformed SACK option in options: %v", opts)
- }
- sackOptionLen := int(opts[i+1])
- if i+sackOptionLen > limit || (sackOptionLen-2)%8 != 0 {
- // Malformed SACK block.
- t.Errorf("malformed SACK option length in options: %v", opts)
- }
- numBlocks := sackOptionLen / 8
- for j := 0; j < numBlocks; j++ {
- start := binary.BigEndian.Uint32(opts[i+2+j*8:])
- end := binary.BigEndian.Uint32(opts[i+2+j*8+4:])
- gotSACKBlocks = append(gotSACKBlocks, header.SACKBlock{
- Start: seqnum.Value(start),
- End: seqnum.Value(end),
- })
- }
- i += sackOptionLen
- default:
- // We don't recognize this option, just skip over it.
- if i+2 > limit {
- break
- }
- l := int(opts[i+1])
- if l < 2 || i+l > limit {
- break
- }
- i += l
- }
- }
-
- if !reflect.DeepEqual(gotSACKBlocks, sackBlocks) {
- t.Errorf("SACKBlocks are not equal, got = %v, want = %v", gotSACKBlocks, sackBlocks)
- }
- }
-}
-
-// Payload creates a checker that checks the payload.
-func Payload(want []byte) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- if got := h.Payload(); !reflect.DeepEqual(got, want) {
- t.Errorf("Wrong payload, got %v, want %v", got, want)
- }
- }
-}
-
-// ICMPv4 creates a checker that checks that the transport protocol is ICMPv4
-// and potentially additional ICMPv4 header fields.
-func ICMPv4(checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- last := h[len(h)-1]
-
- if p := last.TransportProtocol(); p != header.ICMPv4ProtocolNumber {
- t.Fatalf("Bad protocol, got %d, want %d", p, header.ICMPv4ProtocolNumber)
- }
-
- icmp := header.ICMPv4(last.Payload())
- for _, f := range checkers {
- f(t, icmp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// ICMPv4Type creates a checker that checks the ICMPv4 Type field.
-func ICMPv4Type(want header.ICMPv4Type) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- if got := icmpv4.Type(); got != want {
- t.Fatalf("unexpected icmp type, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv4Code creates a checker that checks the ICMPv4 Code field.
-func ICMPv4Code(want header.ICMPv4Code) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- if got := icmpv4.Code(); got != want {
- t.Fatalf("unexpected ICMP code, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv4Ident creates a checker that checks the ICMPv4 echo Ident.
-func ICMPv4Ident(want uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- if got := icmpv4.Ident(); got != want {
- t.Fatalf("unexpected ICMP ident, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv4Seq creates a checker that checks the ICMPv4 echo Sequence.
-func ICMPv4Seq(want uint16) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- if got := icmpv4.Sequence(); got != want {
- t.Fatalf("unexpected ICMP sequence, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv4Pointer creates a checker that checks the ICMPv4 Param Problem pointer.
-func ICMPv4Pointer(want uint8) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- if got := icmpv4.Pointer(); got != want {
- t.Fatalf("unexpected ICMP Param Problem pointer, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv4Checksum creates a checker that checks the ICMPv4 Checksum.
-// This assumes that the payload exactly makes up the rest of the slice.
-func ICMPv4Checksum() TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- heldChecksum := icmpv4.Checksum()
- icmpv4.SetChecksum(0)
- newChecksum := ^header.Checksum(icmpv4, 0)
- icmpv4.SetChecksum(heldChecksum)
- if heldChecksum != newChecksum {
- t.Errorf("unexpected ICMP checksum, got = %d, want = %d", heldChecksum, newChecksum)
- }
- }
-}
-
-// ICMPv4Payload creates a checker that checks the payload in an ICMPv4 packet.
-func ICMPv4Payload(want []byte) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv4, ok := h.(header.ICMPv4)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv4", h)
- }
- payload := icmpv4.Payload()
-
- // cmp.Diff does not consider nil slices equal to empty slices, but we do.
- if len(want) == 0 && len(payload) == 0 {
- return
- }
-
- if diff := cmp.Diff(want, payload); diff != "" {
- t.Errorf("ICMP payload mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// ICMPv6 creates a checker that checks that the transport protocol is ICMPv6 and
-// potentially additional ICMPv6 header fields.
-//
-// ICMPv6 will validate the checksum field before calling checkers.
-func ICMPv6(checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- last := h[len(h)-1]
-
- if p := last.TransportProtocol(); p != header.ICMPv6ProtocolNumber {
- t.Fatalf("Bad protocol, got %d, want %d", p, header.ICMPv6ProtocolNumber)
- }
-
- icmp := header.ICMPv6(last.Payload())
- if got, want := icmp.Checksum(), header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: last.SourceAddress(),
- Dst: last.DestinationAddress(),
- }); got != want {
- t.Fatalf("Bad ICMPv6 checksum; got %d, want %d", got, want)
- }
-
- for _, f := range checkers {
- f(t, icmp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// ICMPv6Type creates a checker that checks the ICMPv6 Type field.
-func ICMPv6Type(want header.ICMPv6Type) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv6, ok := h.(header.ICMPv6)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv6", h)
- }
- if got := icmpv6.Type(); got != want {
- t.Fatalf("unexpected icmp type, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv6Code creates a checker that checks the ICMPv6 Code field.
-func ICMPv6Code(want header.ICMPv6Code) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv6, ok := h.(header.ICMPv6)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv6", h)
- }
- if got := icmpv6.Code(); got != want {
- t.Fatalf("unexpected ICMP code, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv6TypeSpecific creates a checker that checks the ICMPv6 TypeSpecific
-// field.
-func ICMPv6TypeSpecific(want uint32) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv6, ok := h.(header.ICMPv6)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv6", h)
- }
- if got := icmpv6.TypeSpecific(); got != want {
- t.Fatalf("unexpected ICMP TypeSpecific, got = %d, want = %d", got, want)
- }
- }
-}
-
-// ICMPv6Payload creates a checker that checks the payload in an ICMPv6 packet.
-func ICMPv6Payload(want []byte) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmpv6, ok := h.(header.ICMPv6)
- if !ok {
- t.Fatalf("unexpected transport header passed to checker, got = %T, want = header.ICMPv6", h)
- }
- payload := icmpv6.Payload()
-
- // cmp.Diff does not consider nil slices equal to empty slices, but we do.
- if len(want) == 0 && len(payload) == 0 {
- return
- }
-
- if diff := cmp.Diff(want, payload); diff != "" {
- t.Errorf("ICMP payload mismatch (-want +got):\n%s", diff)
- }
- }
-}
-
-// MLD creates a checker that checks that the packet contains a valid MLD
-// message for type of mldType, with potentially additional checks specified by
-// checkers.
-//
-// Checkers may assume that a valid ICMPv6 is passed to it containing a valid
-// MLD message as far as the size of the message (minSize) is concerned. The
-// values within the message are up to checkers to validate.
-func MLD(msgType header.ICMPv6Type, minSize int, checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- // Check normal ICMPv6 first.
- ICMPv6(
- ICMPv6Type(msgType),
- ICMPv6Code(0))(t, h)
-
- last := h[len(h)-1]
-
- icmp := header.ICMPv6(last.Payload())
- if got := len(icmp.MessageBody()); got < minSize {
- t.Fatalf("ICMPv6 MLD (type = %d) payload size of %d is less than the minimum size of %d", msgType, got, minSize)
- }
-
- for _, f := range checkers {
- f(t, icmp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// MLDMaxRespDelay creates a checker that checks the Maximum Response Delay
-// field of a MLD message.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid MLD message as far as the size is concerned.
-func MLDMaxRespDelay(want time.Duration) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- ns := header.MLD(icmp.MessageBody())
-
- if got := ns.MaximumResponseDelay(); got != want {
- t.Errorf("got %T.MaximumResponseDelay() = %s, want = %s", ns, got, want)
- }
- }
-}
-
-// MLDMulticastAddress creates a checker that checks the Multicast Address
-// field of a MLD message.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid MLD message as far as the size is concerned.
-func MLDMulticastAddress(want tcpip.Address) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- ns := header.MLD(icmp.MessageBody())
-
- if got := ns.MulticastAddress(); got != want {
- t.Errorf("got %T.MulticastAddress() = %s, want = %s", ns, got, want)
- }
- }
-}
-
-// NDP creates a checker that checks that the packet contains a valid NDP
-// message for type of ty, with potentially additional checks specified by
-// checkers.
-//
-// Checkers may assume that a valid ICMPv6 is passed to it containing a valid
-// NDP message as far as the size of the message (minSize) is concerned. The
-// values within the message are up to checkers to validate.
-func NDP(msgType header.ICMPv6Type, minSize int, checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- // Check normal ICMPv6 first.
- ICMPv6(
- ICMPv6Type(msgType),
- ICMPv6Code(0))(t, h)
-
- last := h[len(h)-1]
-
- icmp := header.ICMPv6(last.Payload())
- if got := len(icmp.MessageBody()); got < minSize {
- t.Fatalf("ICMPv6 NDP (type = %d) payload size of %d is less than the minimum size of %d", msgType, got, minSize)
- }
-
- for _, f := range checkers {
- f(t, icmp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// NDPNS creates a checker that checks that the packet contains a valid NDP
-// Neighbor Solicitation message (as per the raw wire format), with potentially
-// additional checks specified by checkers.
-//
-// Checkers may assume that a valid ICMPv6 is passed to it containing a valid
-// NDPNS message as far as the size of the message is concerned. The values
-// within the message are up to checkers to validate.
-func NDPNS(checkers ...TransportChecker) NetworkChecker {
- return NDP(header.ICMPv6NeighborSolicit, header.NDPNSMinimumSize, checkers...)
-}
-
-// NDPNSTargetAddress creates a checker that checks the Target Address field of
-// a header.NDPNeighborSolicit.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPNS message as far as the size is concerned.
-func NDPNSTargetAddress(want tcpip.Address) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
-
- if got := ns.TargetAddress(); got != want {
- t.Errorf("got %T.TargetAddress() = %s, want = %s", ns, got, want)
- }
- }
-}
-
-// NDPNA creates a checker that checks that the packet contains a valid NDP
-// Neighbor Advertisement message (as per the raw wire format), with potentially
-// additional checks specified by checkers.
-//
-// Checkers may assume that a valid ICMPv6 is passed to it containing a valid
-// NDPNA message as far as the size of the message is concerned. The values
-// within the message are up to checkers to validate.
-func NDPNA(checkers ...TransportChecker) NetworkChecker {
- return NDP(header.ICMPv6NeighborAdvert, header.NDPNAMinimumSize, checkers...)
-}
-
-// NDPNATargetAddress creates a checker that checks the Target Address field of
-// a header.NDPNeighborAdvert.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPNA message as far as the size is concerned.
-func NDPNATargetAddress(want tcpip.Address) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
-
- if got := na.TargetAddress(); got != want {
- t.Errorf("got %T.TargetAddress() = %s, want = %s", na, got, want)
- }
- }
-}
-
-// NDPNASolicitedFlag creates a checker that checks the Solicited field of
-// a header.NDPNeighborAdvert.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPNA message as far as the size is concerned.
-func NDPNASolicitedFlag(want bool) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
-
- if got := na.SolicitedFlag(); got != want {
- t.Errorf("got %T.SolicitedFlag = %t, want = %t", na, got, want)
- }
- }
-}
-
-// ndpOptions checks that optsBuf only contains opts.
-func ndpOptions(t *testing.T, optsBuf header.NDPOptions, opts []header.NDPOption) {
- t.Helper()
-
- it, err := optsBuf.Iter(true)
- if err != nil {
- t.Errorf("optsBuf.Iter(true): %s", err)
- return
- }
-
- i := 0
- for {
- opt, done, err := it.Next()
- if err != nil {
- // This should never happen as Iter(true) above did not return an error.
- t.Fatalf("unexpected error when iterating over NDP options: %s", err)
- }
- if done {
- break
- }
-
- if i >= len(opts) {
- t.Errorf("got unexpected option: %s", opt)
- continue
- }
-
- switch wantOpt := opts[i].(type) {
- case header.NDPSourceLinkLayerAddressOption:
- gotOpt, ok := opt.(header.NDPSourceLinkLayerAddressOption)
- if !ok {
- t.Errorf("got type = %T at index = %d; want = %T", opt, i, wantOpt)
- } else if got, want := gotOpt.EthernetAddress(), wantOpt.EthernetAddress(); got != want {
- t.Errorf("got EthernetAddress() = %s at index %d, want = %s", got, i, want)
- }
- case header.NDPTargetLinkLayerAddressOption:
- gotOpt, ok := opt.(header.NDPTargetLinkLayerAddressOption)
- if !ok {
- t.Errorf("got type = %T at index = %d; want = %T", opt, i, wantOpt)
- } else if got, want := gotOpt.EthernetAddress(), wantOpt.EthernetAddress(); got != want {
- t.Errorf("got EthernetAddress() = %s at index %d, want = %s", got, i, want)
- }
- case header.NDPNonceOption:
- gotOpt, ok := opt.(header.NDPNonceOption)
- if !ok {
- t.Errorf("got type = %T at index = %d; want = %T", opt, i, wantOpt)
- } else if diff := cmp.Diff(wantOpt.Nonce(), gotOpt.Nonce()); diff != "" {
- t.Errorf("nonce mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatalf("checker not implemented for expected NDP option: %T", wantOpt)
- }
-
- i++
- }
-
- if missing := opts[i:]; len(missing) > 0 {
- t.Errorf("missing options: %s", missing)
- }
-}
-
-// NDPNAOptions creates a checker that checks that the packet contains the
-// provided NDP options within an NDP Neighbor Solicitation message.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPNA message as far as the size is concerned.
-func NDPNAOptions(opts []header.NDPOption) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
- ndpOptions(t, na.Options(), opts)
- }
-}
-
-// NDPNSOptions creates a checker that checks that the packet contains the
-// provided NDP options within an NDP Neighbor Solicitation message.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPNS message as far as the size is concerned.
-func NDPNSOptions(opts []header.NDPOption) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
- ndpOptions(t, ns.Options(), opts)
- }
-}
-
-// NDPRS creates a checker that checks that the packet contains a valid NDP
-// Router Solicitation message (as per the raw wire format).
-//
-// Checkers may assume that a valid ICMPv6 is passed to it containing a valid
-// NDPRS as far as the size of the message is concerned. The values within the
-// message are up to checkers to validate.
-func NDPRS(checkers ...TransportChecker) NetworkChecker {
- return NDP(header.ICMPv6RouterSolicit, header.NDPRSMinimumSize, checkers...)
-}
-
-// NDPRSOptions creates a checker that checks that the packet contains the
-// provided NDP options within an NDP Router Solicitation message.
-//
-// The returned TransportChecker assumes that a valid ICMPv6 is passed to it
-// containing a valid NDPRS message as far as the size is concerned.
-func NDPRSOptions(opts []header.NDPOption) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- icmp := h.(header.ICMPv6)
- rs := header.NDPRouterSolicit(icmp.MessageBody())
- ndpOptions(t, rs.Options(), opts)
- }
-}
-
-// IGMP checks the validity and properties of the given IGMP packet. It is
-// expected to be used in conjunction with other IGMP transport checkers for
-// specific properties.
-func IGMP(checkers ...TransportChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- last := h[len(h)-1]
-
- if p := last.TransportProtocol(); p != header.IGMPProtocolNumber {
- t.Fatalf("Bad protocol, got %d, want %d", p, header.IGMPProtocolNumber)
- }
-
- igmp := header.IGMP(last.Payload())
- for _, f := range checkers {
- f(t, igmp)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-}
-
-// IGMPType creates a checker that checks the IGMP Type field.
-func IGMPType(want header.IGMPType) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- igmp, ok := h.(header.IGMP)
- if !ok {
- t.Fatalf("got transport header = %T, want = header.IGMP", h)
- }
- if got := igmp.Type(); got != want {
- t.Errorf("got igmp.Type() = %d, want = %d", got, want)
- }
- }
-}
-
-// IGMPMaxRespTime creates a checker that checks the IGMP Max Resp Time field.
-func IGMPMaxRespTime(want time.Duration) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- igmp, ok := h.(header.IGMP)
- if !ok {
- t.Fatalf("got transport header = %T, want = header.IGMP", h)
- }
- if got := igmp.MaxRespTime(); got != want {
- t.Errorf("got igmp.MaxRespTime() = %s, want = %s", got, want)
- }
- }
-}
-
-// IGMPGroupAddress creates a checker that checks the IGMP Group Address field.
-func IGMPGroupAddress(want tcpip.Address) TransportChecker {
- return func(t *testing.T, h header.Transport) {
- t.Helper()
-
- igmp, ok := h.(header.IGMP)
- if !ok {
- t.Fatalf("got transport header = %T, want = header.IGMP", h)
- }
- if got := igmp.GroupAddress(); got != want {
- t.Errorf("got igmp.GroupAddress() = %s, want = %s", got, want)
- }
- }
-}
-
-// IPv6ExtHdrChecker is a function to check an extension header.
-type IPv6ExtHdrChecker func(*testing.T, header.IPv6PayloadHeader)
-
-// IPv6WithExtHdr is like IPv6 but allows IPv6 packets with extension headers.
-func IPv6WithExtHdr(t *testing.T, b []byte, checkers ...NetworkChecker) {
- t.Helper()
-
- ipv6 := header.IPv6(b)
- if !ipv6.IsValid(len(b)) {
- t.Error("not a valid IPv6 packet")
- return
- }
-
- payloadIterator := header.MakeIPv6PayloadIterator(
- header.IPv6ExtensionHeaderIdentifier(ipv6.NextHeader()),
- buffer.View(ipv6.Payload()).ToVectorisedView(),
- )
-
- var rawPayloadHeader header.IPv6RawPayloadHeader
- for {
- h, done, err := payloadIterator.Next()
- if err != nil {
- t.Errorf("payloadIterator.Next(): %s", err)
- return
- }
- if done {
- t.Errorf("got payloadIterator.Next() = (%T, %t, _), want = (_, true, _)", h, done)
- return
- }
- r, ok := h.(header.IPv6RawPayloadHeader)
- if ok {
- rawPayloadHeader = r
- break
- }
- }
-
- networkHeader := ipv6HeaderWithExtHdr{
- IPv6: ipv6,
- transport: tcpip.TransportProtocolNumber(rawPayloadHeader.Identifier),
- payload: rawPayloadHeader.Buf.ToView(),
- }
-
- for _, checker := range checkers {
- checker(t, []header.Network{&networkHeader})
- }
-}
-
-// IPv6ExtHdr checks for the presence of extension headers.
-//
-// All the extension headers in headers will be checked exhaustively in the
-// order provided.
-func IPv6ExtHdr(headers ...IPv6ExtHdrChecker) NetworkChecker {
- return func(t *testing.T, h []header.Network) {
- t.Helper()
-
- extHdrs, ok := h[0].(*ipv6HeaderWithExtHdr)
- if !ok {
- t.Errorf("got network header = %T, want = *ipv6HeaderWithExtHdr", h[0])
- return
- }
-
- payloadIterator := header.MakeIPv6PayloadIterator(
- header.IPv6ExtensionHeaderIdentifier(extHdrs.IPv6.NextHeader()),
- buffer.View(extHdrs.IPv6.Payload()).ToVectorisedView(),
- )
-
- for _, check := range headers {
- h, done, err := payloadIterator.Next()
- if err != nil {
- t.Errorf("payloadIterator.Next(): %s", err)
- return
- }
- if done {
- t.Errorf("got payloadIterator.Next() = (%T, %t, _), want = (_, false, _)", h, done)
- return
- }
- check(t, h)
- }
- // Validate we consumed all headers.
- //
- // The next one over should be a raw payload and then iterator should
- // terminate.
- wantDone := false
- for {
- h, done, err := payloadIterator.Next()
- if err != nil {
- t.Errorf("payloadIterator.Next(): %s", err)
- return
- }
- if done != wantDone {
- t.Errorf("got payloadIterator.Next() = (%T, %t, _), want = (_, %t, _)", h, done, wantDone)
- return
- }
- if done {
- break
- }
- if _, ok := h.(header.IPv6RawPayloadHeader); !ok {
- t.Errorf("got payloadIterator.Next() = (%T, _, _), want = (header.IPv6RawPayloadHeader, _, _)", h)
- continue
- }
- wantDone = true
- }
- }
-}
-
-var _ header.Network = (*ipv6HeaderWithExtHdr)(nil)
-
-// ipv6HeaderWithExtHdr provides a header.Network implementation that takes
-// extension headers into consideration, which is not the case with vanilla
-// header.IPv6.
-type ipv6HeaderWithExtHdr struct {
- header.IPv6
- transport tcpip.TransportProtocolNumber
- payload []byte
-}
-
-// TransportProtocol implements header.Network.
-func (h *ipv6HeaderWithExtHdr) TransportProtocol() tcpip.TransportProtocolNumber {
- return h.transport
-}
-
-// Payload implements header.Network.
-func (h *ipv6HeaderWithExtHdr) Payload() []byte {
- return h.payload
-}
-
-// IPv6ExtHdrOptionChecker is a function to check an extension header option.
-type IPv6ExtHdrOptionChecker func(*testing.T, header.IPv6ExtHdrOption)
-
-// IPv6HopByHopExtensionHeader checks the extension header is a Hop by Hop
-// extension header and validates the containing options with checkers.
-//
-// checkers must exhaustively contain all the expected options.
-func IPv6HopByHopExtensionHeader(checkers ...IPv6ExtHdrOptionChecker) IPv6ExtHdrChecker {
- return func(t *testing.T, payloadHeader header.IPv6PayloadHeader) {
- t.Helper()
-
- hbh, ok := payloadHeader.(header.IPv6HopByHopOptionsExtHdr)
- if !ok {
- t.Errorf("unexpected IPv6 payload header, got = %T, want = header.IPv6HopByHopOptionsExtHdr", payloadHeader)
- return
- }
- optionsIterator := hbh.Iter()
- for _, f := range checkers {
- opt, done, err := optionsIterator.Next()
- if err != nil {
- t.Errorf("optionsIterator.Next(): %s", err)
- return
- }
- if done {
- t.Errorf("got optionsIterator.Next() = (%T, %t, _), want = (_, false, _)", opt, done)
- }
- f(t, opt)
- }
- // Validate all options were consumed.
- for {
- opt, done, err := optionsIterator.Next()
- if err != nil {
- t.Errorf("optionsIterator.Next(): %s", err)
- return
- }
- if !done {
- t.Errorf("got optionsIterator.Next() = (%T, %t, _), want = (_, true, _)", opt, done)
- }
- if done {
- break
- }
- }
- }
-}
-
-// IPv6RouterAlert validates that an extension header option is the RouterAlert
-// option and matches on its value.
-func IPv6RouterAlert(want header.IPv6RouterAlertValue) IPv6ExtHdrOptionChecker {
- return func(t *testing.T, opt header.IPv6ExtHdrOption) {
- routerAlert, ok := opt.(*header.IPv6RouterAlertOption)
- if !ok {
- t.Errorf("unexpected extension header option, got = %T, want = header.IPv6RouterAlertOption", opt)
- return
- }
- if routerAlert.Value != want {
- t.Errorf("got routerAlert.Value = %d, want = %d", routerAlert.Value, want)
- }
- }
-}
-
-// IPv6UnknownOption validates that an extension header option is the
-// unknown header option.
-func IPv6UnknownOption() IPv6ExtHdrOptionChecker {
- return func(t *testing.T, opt header.IPv6ExtHdrOption) {
- _, ok := opt.(*header.IPv6UnknownExtHdrOption)
- if !ok {
- t.Errorf("got = %T, want = header.IPv6UnknownExtHdrOption", opt)
- }
- }
-}
-
-// IgnoreCmpPath returns a cmp.Option that ignores listed field paths.
-func IgnoreCmpPath(paths ...string) cmp.Option {
- ignores := map[string]struct{}{}
- for _, path := range paths {
- ignores[path] = struct{}{}
- }
- return cmp.FilterPath(func(path cmp.Path) bool {
- _, ok := ignores[path.String()]
- return ok
- }, cmp.Ignore())
-}
diff --git a/pkg/tcpip/faketime/BUILD b/pkg/tcpip/faketime/BUILD
deleted file mode 100644
index bb9d44aff..000000000
--- a/pkg/tcpip/faketime/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "faketime",
- srcs = ["faketime.go"],
- visibility = ["//visibility:public"],
- deps = ["//pkg/tcpip"],
-)
-
-go_test(
- name = "faketime_test",
- size = "small",
- srcs = [
- "faketime_test.go",
- ],
- deps = [
- "//pkg/tcpip/faketime",
- ],
-)
diff --git a/pkg/tcpip/faketime/faketime_state_autogen.go b/pkg/tcpip/faketime/faketime_state_autogen.go
new file mode 100644
index 000000000..3de72f27d
--- /dev/null
+++ b/pkg/tcpip/faketime/faketime_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package faketime
diff --git a/pkg/tcpip/faketime/faketime_test.go b/pkg/tcpip/faketime/faketime_test.go
deleted file mode 100644
index fd2bb470a..000000000
--- a/pkg/tcpip/faketime/faketime_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package faketime_test
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
-)
-
-func TestManualClockAdvance(t *testing.T) {
- const timeout = time.Millisecond
- clock := faketime.NewManualClock()
- start := clock.NowMonotonic()
- clock.Advance(timeout)
- if got, want := clock.NowMonotonic().Sub(start), timeout; got != want {
- t.Errorf("got = %d, want = %d", got, want)
- }
-}
-
-func TestManualClockAfterFunc(t *testing.T) {
- const (
- timeout1 = time.Millisecond // timeout for counter1
- timeout2 = 2 * time.Millisecond // timeout for counter2
- )
- tests := []struct {
- name string
- advance time.Duration
- wantCounter1 int
- wantCounter2 int
- }{
- {
- name: "before timeout1",
- advance: timeout1 - 1,
- wantCounter1: 0,
- wantCounter2: 0,
- },
- {
- name: "timeout1",
- advance: timeout1,
- wantCounter1: 1,
- wantCounter2: 0,
- },
- {
- name: "timeout2",
- advance: timeout2,
- wantCounter1: 1,
- wantCounter2: 1,
- },
- {
- name: "after timeout2",
- advance: timeout2 + 1,
- wantCounter1: 1,
- wantCounter2: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- counter1 := 0
- counter2 := 0
- clock.AfterFunc(timeout1, func() {
- counter1++
- })
- clock.AfterFunc(timeout2, func() {
- counter2++
- })
- start := clock.NowMonotonic()
- clock.Advance(test.advance)
- if got, want := counter1, test.wantCounter1; got != want {
- t.Errorf("got counter1 = %d, want = %d", got, want)
- }
- if got, want := counter2, test.wantCounter2; got != want {
- t.Errorf("got counter2 = %d, want = %d", got, want)
- }
- if got, want := clock.NowMonotonic().Sub(start), test.advance; got != want {
- t.Errorf("got elapsed = %d, want = %d", got, want)
- }
- })
- }
-}
diff --git a/pkg/tcpip/hash/jenkins/BUILD b/pkg/tcpip/hash/jenkins/BUILD
deleted file mode 100644
index ff2719291..000000000
--- a/pkg/tcpip/hash/jenkins/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "jenkins",
- srcs = ["jenkins.go"],
- visibility = ["//visibility:public"],
-)
-
-go_test(
- name = "jenkins_test",
- size = "small",
- srcs = [
- "jenkins_test.go",
- ],
- library = ":jenkins",
-)
diff --git a/pkg/tcpip/hash/jenkins/jenkins_state_autogen.go b/pkg/tcpip/hash/jenkins/jenkins_state_autogen.go
new file mode 100644
index 000000000..216cc5a2e
--- /dev/null
+++ b/pkg/tcpip/hash/jenkins/jenkins_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package jenkins
diff --git a/pkg/tcpip/hash/jenkins/jenkins_test.go b/pkg/tcpip/hash/jenkins/jenkins_test.go
deleted file mode 100644
index 4c78b5808..000000000
--- a/pkg/tcpip/hash/jenkins/jenkins_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package jenkins
-
-import (
- "bytes"
- "encoding/binary"
- "hash"
- "hash/fnv"
- "math"
- "testing"
-)
-
-func TestGolden32(t *testing.T) {
- var golden32 = []struct {
- out []byte
- in string
- }{
- {[]byte{0x00, 0x00, 0x00, 0x00}, ""},
- {[]byte{0xca, 0x2e, 0x94, 0x42}, "a"},
- {[]byte{0x45, 0xe6, 0x1e, 0x58}, "ab"},
- {[]byte{0xed, 0x13, 0x1f, 0x5b}, "abc"},
- }
-
- hash := New32()
-
- for _, g := range golden32 {
- hash.Reset()
- done, error := hash.Write([]byte(g.in))
- if error != nil {
- t.Fatalf("write error: %s", error)
- }
- if done != len(g.in) {
- t.Fatalf("wrote only %d out of %d bytes", done, len(g.in))
- }
- if actual := hash.Sum(nil); !bytes.Equal(g.out, actual) {
- t.Errorf("hash(%q) = 0x%x want 0x%x", g.in, actual, g.out)
- }
- }
-}
-
-func TestIntegrity32(t *testing.T) {
- data := []byte{'1', '2', 3, 4, 5}
-
- h := New32()
- h.Write(data)
- sum := h.Sum(nil)
-
- if size := h.Size(); size != len(sum) {
- t.Fatalf("Size()=%d but len(Sum())=%d", size, len(sum))
- }
-
- if a := h.Sum(nil); !bytes.Equal(sum, a) {
- t.Fatalf("first Sum()=0x%x, second Sum()=0x%x", sum, a)
- }
-
- h.Reset()
- h.Write(data)
- if a := h.Sum(nil); !bytes.Equal(sum, a) {
- t.Fatalf("Sum()=0x%x, but after Reset() Sum()=0x%x", sum, a)
- }
-
- h.Reset()
- h.Write(data[:2])
- h.Write(data[2:])
- if a := h.Sum(nil); !bytes.Equal(sum, a) {
- t.Fatalf("Sum()=0x%x, but with partial writes, Sum()=0x%x", sum, a)
- }
-
- sum32 := h.(hash.Hash32).Sum32()
- if sum32 != binary.BigEndian.Uint32(sum) {
- t.Fatalf("Sum()=0x%x, but Sum32()=0x%x", sum, sum32)
- }
-}
-
-func BenchmarkJenkins32KB(b *testing.B) {
- h := New32()
-
- b.SetBytes(1024)
- data := make([]byte, 1024)
- for i := range data {
- data[i] = byte(i)
- }
- in := make([]byte, 0, h.Size())
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- h.Reset()
- h.Write(data)
- h.Sum(in)
- }
-}
-
-func BenchmarkFnv32(b *testing.B) {
- arr := make([]int64, 1000)
- for i := 0; i < b.N; i++ {
- var payload [8]byte
- binary.BigEndian.PutUint32(payload[:4], uint32(i))
- binary.BigEndian.PutUint32(payload[4:], uint32(i))
-
- h := fnv.New32()
- h.Write(payload[:])
- idx := int(h.Sum32()) % len(arr)
- arr[idx]++
- }
- b.StopTimer()
- c := 0
- if b.N > 1000000 {
- for i := 0; i < len(arr)-1; i++ {
- if math.Abs(float64(arr[i]-arr[i+1]))/float64(arr[i]) > float64(0.1) {
- if c == 0 {
- b.Logf("i %d val[i] %d val[i+1] %d b.N %b\n", i, arr[i], arr[i+1], b.N)
- }
- c++
- }
- }
- if c > 0 {
- b.Logf("Unbalanced buckets: %d", c)
- }
- }
-}
-
-func BenchmarkSum32(b *testing.B) {
- arr := make([]int64, 1000)
- for i := 0; i < b.N; i++ {
- var payload [8]byte
- binary.BigEndian.PutUint32(payload[:4], uint32(i))
- binary.BigEndian.PutUint32(payload[4:], uint32(i))
- h := Sum32(0)
- h.Write(payload[:])
- idx := int(h.Sum32()) % len(arr)
- arr[idx]++
- }
- b.StopTimer()
- if b.N > 1000000 {
- for i := 0; i < len(arr)-1; i++ {
- if math.Abs(float64(arr[i]-arr[i+1]))/float64(arr[i]) > float64(0.1) {
- b.Logf("val[%3d]=%8d\tval[%3d]=%8d\tb.N=%b\n", i, arr[i], i+1, arr[i+1], b.N)
- break
- }
- }
- }
-}
-
-func BenchmarkNew32(b *testing.B) {
- arr := make([]int64, 1000)
- for i := 0; i < b.N; i++ {
- var payload [8]byte
- binary.BigEndian.PutUint32(payload[:4], uint32(i))
- binary.BigEndian.PutUint32(payload[4:], uint32(i))
- h := New32()
- h.Write(payload[:])
- idx := int(h.Sum32()) % len(arr)
- arr[idx]++
- }
- b.StopTimer()
- if b.N > 1000000 {
- for i := 0; i < len(arr)-1; i++ {
- if math.Abs(float64(arr[i]-arr[i+1]))/float64(arr[i]) > float64(0.1) {
- b.Logf("val[%3d]=%8d\tval[%3d]=%8d\tb.N=%b\n", i, arr[i], i+1, arr[i+1], b.N)
- break
- }
- }
- }
-}
diff --git a/pkg/tcpip/header/BUILD b/pkg/tcpip/header/BUILD
deleted file mode 100644
index 01240f5d0..000000000
--- a/pkg/tcpip/header/BUILD
+++ /dev/null
@@ -1,76 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "header",
- srcs = [
- "arp.go",
- "checksum.go",
- "eth.go",
- "gue.go",
- "icmpv4.go",
- "icmpv6.go",
- "igmp.go",
- "interfaces.go",
- "ipv4.go",
- "ipv6.go",
- "ipv6_extension_headers.go",
- "ipv6_fragment.go",
- "mld.go",
- "ndp_neighbor_advert.go",
- "ndp_neighbor_solicit.go",
- "ndp_options.go",
- "ndp_router_advert.go",
- "ndp_router_solicit.go",
- "ndpoptionidentifier_string.go",
- "tcp.go",
- "udp.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/seqnum",
- "@com_github_google_btree//:go_default_library",
- ],
-)
-
-go_test(
- name = "header_x_test",
- size = "small",
- srcs = [
- "checksum_test.go",
- "igmp_test.go",
- "ipv4_test.go",
- "ipv6_test.go",
- "ipversion_test.go",
- "tcp_test.go",
- ],
- deps = [
- ":header",
- "//pkg/rand",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/testutil",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "header_test",
- size = "small",
- srcs = [
- "eth_test.go",
- "ipv6_extension_headers_test.go",
- "mld_test.go",
- "ndp_test.go",
- ],
- library = ":header",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/testutil",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/header/checksum_test.go b/pkg/tcpip/header/checksum_test.go
deleted file mode 100644
index 3445511f4..000000000
--- a/pkg/tcpip/header/checksum_test.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package header provides the implementation of the encoding and decoding of
-// network protocol headers.
-package header_test
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "sync"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestChecksumer(t *testing.T) {
- testCases := []struct {
- name string
- data [][]byte
- want uint16
- }{
- {
- name: "empty",
- want: 0,
- },
- {
- name: "OneOddView",
- data: [][]byte{
- []byte{1, 9, 0, 5, 4},
- },
- want: 1294,
- },
- {
- name: "TwoOddViews",
- data: [][]byte{
- []byte{1, 9, 0, 5, 4},
- []byte{4, 3, 7, 1, 2, 123},
- },
- want: 33819,
- },
- {
- name: "OneEvenView",
- data: [][]byte{
- []byte{1, 9, 0, 5},
- },
- want: 270,
- },
- {
- name: "TwoEvenViews",
- data: [][]byte{
- buffer.NewViewFromBytes([]byte{98, 1, 9, 0}),
- buffer.NewViewFromBytes([]byte{9, 0, 5, 4}),
- },
- want: 30981,
- },
- {
- name: "ThreeViews",
- data: [][]byte{
- []byte{77, 11, 33, 0, 55, 44},
- []byte{98, 1, 9, 0, 5, 4},
- []byte{4, 3, 7, 1, 2, 123, 99},
- },
- want: 34236,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var all bytes.Buffer
- var c header.Checksumer
- for _, b := range tc.data {
- c.Add(b)
- // Append to the buffer. We will check the checksum as a whole later.
- if _, err := all.Write(b); err != nil {
- t.Fatalf("all.Write(b) = _, %s; want _, nil", err)
- }
- }
- if got, want := c.Checksum(), tc.want; got != want {
- t.Errorf("c.Checksum() = %d, want %d", got, want)
- }
- if got, want := header.Checksum(all.Bytes(), 0 /* initial */), tc.want; got != want {
- t.Errorf("Checksum(flatten tc.data) = %d, want %d", got, want)
- }
- })
- }
-}
-
-func TestChecksum(t *testing.T) {
- var bufSizes = []int{0, 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 255, 256, 257, 1023, 1024}
- type testCase struct {
- buf []byte
- initial uint16
- csumOrig uint16
- csumNew uint16
- }
- testCases := make([]testCase, 100000)
- // Ensure same buffer generation for test consistency.
- rnd := rand.New(rand.NewSource(42))
- for i := range testCases {
- testCases[i].buf = make([]byte, bufSizes[i%len(bufSizes)])
- testCases[i].initial = uint16(rnd.Intn(65536))
- rnd.Read(testCases[i].buf)
- }
-
- for i := range testCases {
- testCases[i].csumOrig = header.ChecksumOld(testCases[i].buf, testCases[i].initial)
- testCases[i].csumNew = header.Checksum(testCases[i].buf, testCases[i].initial)
- if got, want := testCases[i].csumNew, testCases[i].csumOrig; got != want {
- t.Fatalf("new checksum for (buf = %x, initial = %d) does not match old got: %d, want: %d", testCases[i].buf, testCases[i].initial, got, want)
- }
- }
-}
-
-func BenchmarkChecksum(b *testing.B) {
- var bufSizes = []int{64, 128, 256, 512, 1024, 1500, 2048, 4096, 8192, 16384, 32767, 32768, 65535, 65536}
-
- checkSumImpls := []struct {
- fn func([]byte, uint16) uint16
- name string
- }{
- {header.ChecksumOld, fmt.Sprintf("checksum_old")},
- {header.Checksum, fmt.Sprintf("checksum")},
- }
-
- for _, csumImpl := range checkSumImpls {
- // Ensure same buffer generation for test consistency.
- rnd := rand.New(rand.NewSource(42))
- for _, bufSz := range bufSizes {
- b.Run(fmt.Sprintf("%s_%d", csumImpl.name, bufSz), func(b *testing.B) {
- tc := struct {
- buf []byte
- initial uint16
- csum uint16
- }{
- buf: make([]byte, bufSz),
- initial: uint16(rnd.Intn(65536)),
- }
- rnd.Read(tc.buf)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- tc.csum = csumImpl.fn(tc.buf, tc.initial)
- }
- })
- }
- }
-}
-
-func testICMPChecksum(t *testing.T, headerChecksum func() uint16, icmpChecksum func() uint16, want uint16, pktStr string) {
- // icmpChecksum should not do any modifications of the header to
- // calculate its checksum. Let's call it from a few go-routines and the
- // race detector will trigger a warning if there are any concurrent
- // read/write accesses.
-
- const concurrency = 5
- start := make(chan int)
- ready := make(chan bool, concurrency)
- var wg sync.WaitGroup
- wg.Add(concurrency)
- defer wg.Wait()
-
- for i := 0; i < concurrency; i++ {
- go func() {
- defer wg.Done()
-
- ready <- true
- <-start
-
- if got := headerChecksum(); want != got {
- t.Errorf("new checksum for %s does not match old got: %x, want: %x", pktStr, got, want)
- }
- if got := icmpChecksum(); want != got {
- t.Errorf("new checksum for %s does not match old got: %x, want: %x", pktStr, got, want)
- }
- }()
- }
- for i := 0; i < concurrency; i++ {
- <-ready
- }
- close(start)
-}
-
-func TestICMPv4Checksum(t *testing.T) {
- rnd := rand.New(rand.NewSource(42))
-
- h := header.ICMPv4(make([]byte, header.ICMPv4MinimumSize))
- if _, err := rnd.Read(h); err != nil {
- t.Fatalf("rnd.Read failed: %v", err)
- }
- h.SetChecksum(0)
-
- buf := make([]byte, 13)
- if _, err := rnd.Read(buf); err != nil {
- t.Fatalf("rnd.Read failed: %v", err)
- }
- vv := buffer.NewVectorisedView(len(buf), []buffer.View{
- buffer.NewViewFromBytes(buf[:5]),
- buffer.NewViewFromBytes(buf[5:]),
- })
-
- want := header.Checksum(vv.ToView(), 0)
- want = ^header.Checksum(h, want)
- h.SetChecksum(want)
-
- testICMPChecksum(t, h.Checksum, func() uint16 {
- return header.ICMPv4Checksum(h, header.ChecksumVV(vv, 0))
- }, want, fmt.Sprintf("header: {% x} data {% x}", h, vv.ToView()))
-}
-
-func TestICMPv6Checksum(t *testing.T) {
- rnd := rand.New(rand.NewSource(42))
-
- h := header.ICMPv6(make([]byte, header.ICMPv6MinimumSize))
- if _, err := rnd.Read(h); err != nil {
- t.Fatalf("rnd.Read failed: %v", err)
- }
- h.SetChecksum(0)
-
- buf := make([]byte, 13)
- if _, err := rnd.Read(buf); err != nil {
- t.Fatalf("rnd.Read failed: %v", err)
- }
- vv := buffer.NewVectorisedView(len(buf), []buffer.View{
- buffer.NewViewFromBytes(buf[:7]),
- buffer.NewViewFromBytes(buf[7:10]),
- buffer.NewViewFromBytes(buf[10:]),
- })
-
- dst := header.IPv6Loopback
- src := header.IPv6Loopback
-
- want := header.PseudoHeaderChecksum(header.ICMPv6ProtocolNumber, src, dst, uint16(len(h)+vv.Size()))
- want = header.Checksum(vv.ToView(), want)
- want = ^header.Checksum(h, want)
- h.SetChecksum(want)
-
- testICMPChecksum(t, h.Checksum, func() uint16 {
- return header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: h,
- Src: src,
- Dst: dst,
- PayloadCsum: header.ChecksumVV(vv, 0),
- PayloadLen: vv.Size(),
- })
- }, want, fmt.Sprintf("header: {% x} data {% x}", h, vv.ToView()))
-}
-
-func randomAddress(size int) tcpip.Address {
- s := make([]byte, size)
- for i := 0; i < size; i++ {
- s[i] = byte(rand.Uint32())
- }
- return tcpip.Address(s)
-}
-
-func TestChecksummableNetworkUpdateAddress(t *testing.T) {
- tests := []struct {
- name string
- update func(header.IPv4, tcpip.Address)
- }{
- {
- name: "SetSourceAddressWithChecksumUpdate",
- update: header.IPv4.SetSourceAddressWithChecksumUpdate,
- },
- {
- name: "SetDestinationAddressWithChecksumUpdate",
- update: header.IPv4.SetDestinationAddressWithChecksumUpdate,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for i := 0; i < 1000; i++ {
- var origBytes [header.IPv4MinimumSize]byte
- header.IPv4(origBytes[:]).Encode(&header.IPv4Fields{
- TOS: 1,
- TotalLength: header.IPv4MinimumSize,
- ID: 2,
- Flags: 3,
- FragmentOffset: 4,
- TTL: 5,
- Protocol: 6,
- Checksum: 0,
- SrcAddr: randomAddress(header.IPv4AddressSize),
- DstAddr: randomAddress(header.IPv4AddressSize),
- })
-
- addr := randomAddress(header.IPv4AddressSize)
-
- bytesCopy := origBytes
- h := header.IPv4(bytesCopy[:])
- origXSum := h.CalculateChecksum()
- h.SetChecksum(^origXSum)
-
- test.update(h, addr)
- got := ^h.Checksum()
- h.SetChecksum(0)
- want := h.CalculateChecksum()
- if got != want {
- t.Errorf("got h.Checksum() = 0x%x, want = 0x%x; originalBytes = 0x%x, new addr = %s", got, want, origBytes, addr)
- }
- }
- })
- }
-}
-
-func TestChecksummableTransportUpdatePort(t *testing.T) {
- // The fields in the pseudo header is not tested here so we just use 0.
- const pseudoHeaderXSum = 0
-
- tests := []struct {
- name string
- transportHdr func(_, _ uint16) (header.ChecksummableTransport, func(uint16) uint16)
- proto tcpip.TransportProtocolNumber
- }{
- {
- name: "TCP",
- transportHdr: func(src, dst uint16) (header.ChecksummableTransport, func(uint16) uint16) {
- h := header.TCP(make([]byte, header.TCPMinimumSize))
- h.Encode(&header.TCPFields{
- SrcPort: src,
- DstPort: dst,
- SeqNum: 1,
- AckNum: 2,
- DataOffset: header.TCPMinimumSize,
- Flags: 3,
- WindowSize: 4,
- Checksum: 0,
- UrgentPointer: 5,
- })
- h.SetChecksum(^h.CalculateChecksum(pseudoHeaderXSum))
- return h, h.CalculateChecksum
- },
- proto: header.TCPProtocolNumber,
- },
- {
- name: "UDP",
- transportHdr: func(src, dst uint16) (header.ChecksummableTransport, func(uint16) uint16) {
- h := header.UDP(make([]byte, header.UDPMinimumSize))
- h.Encode(&header.UDPFields{
- SrcPort: src,
- DstPort: dst,
- Length: 0,
- Checksum: 0,
- })
- h.SetChecksum(^h.CalculateChecksum(pseudoHeaderXSum))
- return h, h.CalculateChecksum
- },
- proto: header.UDPProtocolNumber,
- },
- }
-
- for i := 0; i < 1000; i++ {
- origSrcPort := uint16(rand.Uint32())
- origDstPort := uint16(rand.Uint32())
- newPort := uint16(rand.Uint32())
-
- t.Run(fmt.Sprintf("OrigSrcPort=%d,OrigDstPort=%d,NewPort=%d", origSrcPort, origDstPort, newPort), func(*testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range []struct {
- name string
- update func(header.ChecksummableTransport)
- }{
- {
- name: "Source port",
- update: func(h header.ChecksummableTransport) { h.SetSourcePortWithChecksumUpdate(newPort) },
- },
- {
- name: "Destination port",
- update: func(h header.ChecksummableTransport) { h.SetDestinationPortWithChecksumUpdate(newPort) },
- },
- } {
- t.Run(subTest.name, func(t *testing.T) {
- h, calcXSum := test.transportHdr(origSrcPort, origDstPort)
- subTest.update(h)
- // TCP and UDP hold the 1s complement of the fully calculated
- // checksum.
- got := ^h.Checksum()
- h.SetChecksum(0)
-
- if want := calcXSum(pseudoHeaderXSum); got != want {
- h, _ := test.transportHdr(origSrcPort, origDstPort)
- t.Errorf("got Checksum() = 0x%x, want = 0x%x; originalBytes = %#v, new port = %d", got, want, h, newPort)
- }
- })
- }
- })
- }
- })
- }
-}
-
-func TestChecksummableTransportUpdatePseudoHeaderAddress(t *testing.T) {
- const addressSize = 6
-
- tests := []struct {
- name string
- transportHdr func() header.ChecksummableTransport
- proto tcpip.TransportProtocolNumber
- }{
- {
- name: "TCP",
- transportHdr: func() header.ChecksummableTransport { return header.TCP(make([]byte, header.TCPMinimumSize)) },
- proto: header.TCPProtocolNumber,
- },
- {
- name: "UDP",
- transportHdr: func() header.ChecksummableTransport { return header.UDP(make([]byte, header.UDPMinimumSize)) },
- proto: header.UDPProtocolNumber,
- },
- }
-
- for i := 0; i < 1000; i++ {
- permanent := randomAddress(addressSize)
- old := randomAddress(addressSize)
- new := randomAddress(addressSize)
-
- t.Run(fmt.Sprintf("Permanent=%q,Old=%q,New=%q", permanent, old, new), func(t *testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, fullChecksum := range []bool{true, false} {
- t.Run(fmt.Sprintf("FullChecksum=%t", fullChecksum), func(t *testing.T) {
- initialXSum := header.PseudoHeaderChecksum(test.proto, permanent, old, 0)
- if fullChecksum {
- // TCP and UDP hold the 1s complement of the fully calculated
- // checksum.
- initialXSum = ^initialXSum
- }
-
- h := test.transportHdr()
- h.SetChecksum(initialXSum)
- h.UpdateChecksumPseudoHeaderAddress(old, new, fullChecksum)
-
- got := h.Checksum()
- if fullChecksum {
- got = ^got
- }
- if want := header.PseudoHeaderChecksum(test.proto, permanent, new, 0); got != want {
- t.Errorf("got Checksum() = 0x%x, want = 0x%x; h = %#v", got, want, h)
- }
- })
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/header/eth_test.go b/pkg/tcpip/header/eth_test.go
deleted file mode 100644
index bf9ccbf1a..000000000
--- a/pkg/tcpip/header/eth_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-func TestIsValidUnicastEthernetAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.LinkAddress
- expected bool
- }{
- {
- "Nil",
- tcpip.LinkAddress([]byte(nil)),
- false,
- },
- {
- "Empty",
- tcpip.LinkAddress(""),
- false,
- },
- {
- "InvalidLength",
- tcpip.LinkAddress("\x01\x02\x03"),
- false,
- },
- {
- "Unspecified",
- unspecifiedEthernetAddress,
- false,
- },
- {
- "Multicast",
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- false,
- },
- {
- "Valid",
- tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06"),
- true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := IsValidUnicastEthernetAddress(test.addr); got != test.expected {
- t.Fatalf("got IsValidUnicastEthernetAddress = %t, want = %t", got, test.expected)
- }
- })
- }
-}
-
-func TestIsMulticastEthernetAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.LinkAddress
- expected bool
- }{
- {
- "Nil",
- tcpip.LinkAddress([]byte(nil)),
- false,
- },
- {
- "Empty",
- tcpip.LinkAddress(""),
- false,
- },
- {
- "InvalidLength",
- tcpip.LinkAddress("\x01\x02\x03"),
- false,
- },
- {
- "Unspecified",
- unspecifiedEthernetAddress,
- false,
- },
- {
- "Multicast",
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- true,
- },
- {
- "Unicast",
- tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06"),
- false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := IsMulticastEthernetAddress(test.addr); got != test.expected {
- t.Fatalf("got IsMulticastEthernetAddress = %t, want = %t", got, test.expected)
- }
- })
- }
-}
-
-func TestEthernetAddressFromMulticastIPv4Address(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- expectedLinkAddr tcpip.LinkAddress
- }{
- {
- name: "IPv4 Multicast without 24th bit set",
- addr: "\xe0\x7e\xdc\xba",
- expectedLinkAddr: "\x01\x00\x5e\x7e\xdc\xba",
- },
- {
- name: "IPv4 Multicast with 24th bit set",
- addr: "\xe0\xfe\xdc\xba",
- expectedLinkAddr: "\x01\x00\x5e\x7e\xdc\xba",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := EthernetAddressFromMulticastIPv4Address(test.addr); got != test.expectedLinkAddr {
- t.Fatalf("got EthernetAddressFromMulticastIPv4Address(%s) = %s, want = %s", test.addr, got, test.expectedLinkAddr)
- }
- })
- }
-}
-
-func TestEthernetAddressFromMulticastIPv6Address(t *testing.T) {
- addr := testutil.MustParse6("ff02:304:506:708:90a:b0c:d0e:f1a")
- if got, want := EthernetAddressFromMulticastIPv6Address(addr), tcpip.LinkAddress("\x33\x33\x0d\x0e\x0f\x1a"); got != want {
- t.Fatalf("got EthernetAddressFromMulticastIPv6Address(%s) = %s, want = %s", addr, got, want)
- }
-}
diff --git a/pkg/tcpip/header/header_state_autogen.go b/pkg/tcpip/header/header_state_autogen.go
new file mode 100644
index 000000000..d6dd58874
--- /dev/null
+++ b/pkg/tcpip/header/header_state_autogen.go
@@ -0,0 +1,74 @@
+// automatically generated by stateify.
+
+package header
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *SACKBlock) StateTypeName() string {
+ return "pkg/tcpip/header.SACKBlock"
+}
+
+func (r *SACKBlock) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *SACKBlock) beforeSave() {}
+
+// +checklocksignore
+func (r *SACKBlock) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *SACKBlock) afterLoad() {}
+
+// +checklocksignore
+func (r *SACKBlock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func (t *TCPOptions) StateTypeName() string {
+ return "pkg/tcpip/header.TCPOptions"
+}
+
+func (t *TCPOptions) StateFields() []string {
+ return []string{
+ "TS",
+ "TSVal",
+ "TSEcr",
+ "SACKBlocks",
+ }
+}
+
+func (t *TCPOptions) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPOptions) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.TS)
+ stateSinkObject.Save(1, &t.TSVal)
+ stateSinkObject.Save(2, &t.TSEcr)
+ stateSinkObject.Save(3, &t.SACKBlocks)
+}
+
+func (t *TCPOptions) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.TS)
+ stateSourceObject.Load(1, &t.TSVal)
+ stateSourceObject.Load(2, &t.TSEcr)
+ stateSourceObject.Load(3, &t.SACKBlocks)
+}
+
+func init() {
+ state.Register((*SACKBlock)(nil))
+ state.Register((*TCPOptions)(nil))
+}
diff --git a/pkg/tcpip/header/igmp_test.go b/pkg/tcpip/header/igmp_test.go
deleted file mode 100644
index 575604928..000000000
--- a/pkg/tcpip/header/igmp_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header_test
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-// TestIGMPHeader tests the functions within header.igmp
-func TestIGMPHeader(t *testing.T) {
- const maxRespTimeTenthSec = 0xF0
- b := []byte{
- 0x11, // IGMP Type, Membership Query
- maxRespTimeTenthSec, // Maximum Response Time
- 0xC0, 0xC0, // Checksum
- 0x01, 0x02, 0x03, 0x04, // Group Address
- }
-
- igmpHeader := header.IGMP(b)
-
- if got, want := igmpHeader.Type(), header.IGMPMembershipQuery; got != want {
- t.Errorf("got igmpHeader.Type() = %x, want = %x", got, want)
- }
-
- if got, want := igmpHeader.MaxRespTime(), header.DecisecondToDuration(maxRespTimeTenthSec); got != want {
- t.Errorf("got igmpHeader.MaxRespTime() = %s, want = %s", got, want)
- }
-
- if got, want := igmpHeader.Checksum(), uint16(0xC0C0); got != want {
- t.Errorf("got igmpHeader.Checksum() = %x, want = %x", got, want)
- }
-
- if got, want := igmpHeader.GroupAddress(), testutil.MustParse4("1.2.3.4"); got != want {
- t.Errorf("got igmpHeader.GroupAddress() = %s, want = %s", got, want)
- }
-
- igmpType := header.IGMPv2MembershipReport
- igmpHeader.SetType(igmpType)
- if got := igmpHeader.Type(); got != igmpType {
- t.Errorf("got igmpHeader.Type() = %x, want = %x", got, igmpType)
- }
- if got := header.IGMPType(b[0]); got != igmpType {
- t.Errorf("got IGMPtype in backing buffer = %x, want %x", got, igmpType)
- }
-
- respTime := byte(0x02)
- igmpHeader.SetMaxRespTime(respTime)
- if got, want := igmpHeader.MaxRespTime(), header.DecisecondToDuration(respTime); got != want {
- t.Errorf("got igmpHeader.MaxRespTime() = %s, want = %s", got, want)
- }
-
- checksum := uint16(0x0102)
- igmpHeader.SetChecksum(checksum)
- if got := igmpHeader.Checksum(); got != checksum {
- t.Errorf("got igmpHeader.Checksum() = %x, want = %x", got, checksum)
- }
-
- groupAddress := testutil.MustParse4("4.3.2.1")
- igmpHeader.SetGroupAddress(groupAddress)
- if got := igmpHeader.GroupAddress(); got != groupAddress {
- t.Errorf("got igmpHeader.GroupAddress() = %s, want = %s", got, groupAddress)
- }
-}
-
-// TestIGMPChecksum ensures that the checksum calculator produces the expected
-// checksum.
-func TestIGMPChecksum(t *testing.T) {
- b := []byte{
- 0x11, // IGMP Type, Membership Query
- 0xF0, // Maximum Response Time
- 0xC0, 0xC0, // Checksum
- 0x01, 0x02, 0x03, 0x04, // Group Address
- }
-
- igmpHeader := header.IGMP(b)
-
- // Calculate the initial checksum after setting the checksum temporarily to 0
- // to avoid checksumming the checksum.
- initialChecksum := igmpHeader.Checksum()
- igmpHeader.SetChecksum(0)
- checksum := ^header.Checksum(b, 0)
- igmpHeader.SetChecksum(initialChecksum)
-
- if got := header.IGMPCalculateChecksum(igmpHeader); got != checksum {
- t.Errorf("got IGMPCalculateChecksum = %x, want %x", got, checksum)
- }
-}
-
-func TestDecisecondToDuration(t *testing.T) {
- const valueInDeciseconds = 5
- if got, want := header.DecisecondToDuration(valueInDeciseconds), valueInDeciseconds*time.Second/10; got != want {
- t.Fatalf("got header.DecisecondToDuration(%d) = %s, want = %s", valueInDeciseconds, got, want)
- }
-}
diff --git a/pkg/tcpip/header/ipv4_test.go b/pkg/tcpip/header/ipv4_test.go
deleted file mode 100644
index c02fe898b..000000000
--- a/pkg/tcpip/header/ipv4_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header_test
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestIPv4OptionsSerializer(t *testing.T) {
- optCases := []struct {
- name string
- option []header.IPv4SerializableOption
- expect []byte
- }{
- {
- name: "NOP",
- option: []header.IPv4SerializableOption{
- &header.IPv4SerializableNOPOption{},
- },
- expect: []byte{1, 0, 0, 0},
- },
- {
- name: "ListEnd",
- option: []header.IPv4SerializableOption{
- &header.IPv4SerializableListEndOption{},
- },
- expect: []byte{0, 0, 0, 0},
- },
- {
- name: "RouterAlert",
- option: []header.IPv4SerializableOption{
- &header.IPv4SerializableRouterAlertOption{},
- },
- expect: []byte{148, 4, 0, 0},
- }, {
- name: "NOP and RouterAlert",
- option: []header.IPv4SerializableOption{
- &header.IPv4SerializableNOPOption{},
- &header.IPv4SerializableRouterAlertOption{},
- },
- expect: []byte{1, 148, 4, 0, 0, 0, 0, 0},
- },
- }
-
- for _, opt := range optCases {
- t.Run(opt.name, func(t *testing.T) {
- s := header.IPv4OptionsSerializer(opt.option)
- l := s.Length()
- if got := len(opt.expect); got != int(l) {
- t.Fatalf("s.Length() = %d, want = %d", got, l)
- }
- b := make([]byte, l)
- for i := range b {
- // Fill the buffer with full bytes to ensure padding is being set
- // correctly.
- b[i] = 0xFF
- }
- if serializedLength := s.Serialize(b); serializedLength != l {
- t.Fatalf("s.Serialize(_) = %d, want %d", serializedLength, l)
- }
- if diff := cmp.Diff(opt.expect, b); diff != "" {
- t.Errorf("mismatched serialized option (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestIPv4Encode checks that ipv4.Encode correctly fills out the requested
-// fields when options are supplied.
-func TestIPv4EncodeOptions(t *testing.T) {
- tests := []struct {
- name string
- numberOfNops int
- encodedOptions header.IPv4Options // reply should look like this
- wantIHL int
- }{
- {
- name: "valid no options",
- wantIHL: header.IPv4MinimumSize,
- },
- {
- name: "one byte options",
- numberOfNops: 1,
- encodedOptions: header.IPv4Options{1, 0, 0, 0},
- wantIHL: header.IPv4MinimumSize + 4,
- },
- {
- name: "two byte options",
- numberOfNops: 2,
- encodedOptions: header.IPv4Options{1, 1, 0, 0},
- wantIHL: header.IPv4MinimumSize + 4,
- },
- {
- name: "three byte options",
- numberOfNops: 3,
- encodedOptions: header.IPv4Options{1, 1, 1, 0},
- wantIHL: header.IPv4MinimumSize + 4,
- },
- {
- name: "four byte options",
- numberOfNops: 4,
- encodedOptions: header.IPv4Options{1, 1, 1, 1},
- wantIHL: header.IPv4MinimumSize + 4,
- },
- {
- name: "five byte options",
- numberOfNops: 5,
- encodedOptions: header.IPv4Options{1, 1, 1, 1, 1, 0, 0, 0},
- wantIHL: header.IPv4MinimumSize + 8,
- },
- {
- name: "thirty nine byte options",
- numberOfNops: 39,
- encodedOptions: header.IPv4Options{
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 0,
- },
- wantIHL: header.IPv4MinimumSize + 40,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- serializeOpts := header.IPv4OptionsSerializer(make([]header.IPv4SerializableOption, test.numberOfNops))
- for i := range serializeOpts {
- serializeOpts[i] = &header.IPv4SerializableNOPOption{}
- }
- paddedOptionLength := serializeOpts.Length()
- ipHeaderLength := int(header.IPv4MinimumSize + paddedOptionLength)
- if ipHeaderLength > header.IPv4MaximumHeaderSize {
- t.Fatalf("IP header length too large: got = %d, want <= %d ", ipHeaderLength, header.IPv4MaximumHeaderSize)
- }
- totalLen := uint16(ipHeaderLength)
- hdr := buffer.NewPrependable(int(totalLen))
- ip := header.IPv4(hdr.Prepend(ipHeaderLength))
- // To check the padding works, poison the last byte of the options space.
- if paddedOptionLength != serializeOpts.Length() {
- ip.SetHeaderLength(uint8(ipHeaderLength))
- ip.Options()[paddedOptionLength-1] = 0xff
- ip.SetHeaderLength(0)
- }
- ip.Encode(&header.IPv4Fields{
- Options: serializeOpts,
- })
- options := ip.Options()
- wantOptions := test.encodedOptions
- if got, want := int(ip.HeaderLength()), test.wantIHL; got != want {
- t.Errorf("got IHL of %d, want %d", got, want)
- }
-
- // cmp.Diff does not consider nil slices equal to empty slices, but we do.
- if len(wantOptions) == 0 && len(options) == 0 {
- return
- }
-
- if diff := cmp.Diff(wantOptions, options); diff != "" {
- t.Errorf("options mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestIsV4LinkLocalUnicastAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- expected bool
- }{
- {
- name: "Valid (lowest)",
- addr: "\xa9\xfe\x00\x00",
- expected: true,
- },
- {
- name: "Valid (highest)",
- addr: "\xa9\xfe\xff\xff",
- expected: true,
- },
- {
- name: "Invalid (before subnet)",
- addr: "\xa9\xfd\xff\xff",
- expected: false,
- },
- {
- name: "Invalid (after subnet)",
- addr: "\xa9\xff\x00\x00",
- expected: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := header.IsV4LinkLocalUnicastAddress(test.addr); got != test.expected {
- t.Errorf("got header.IsV4LinkLocalUnicastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
- }
- })
- }
-}
-
-func TestIsV4LinkLocalMulticastAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- expected bool
- }{
- {
- name: "Valid (lowest)",
- addr: "\xe0\x00\x00\x00",
- expected: true,
- },
- {
- name: "Valid (highest)",
- addr: "\xe0\x00\x00\xff",
- expected: true,
- },
- {
- name: "Invalid (before subnet)",
- addr: "\xdf\xff\xff\xff",
- expected: false,
- },
- {
- name: "Invalid (after subnet)",
- addr: "\xe0\x00\x01\x00",
- expected: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := header.IsV4LinkLocalMulticastAddress(test.addr); got != test.expected {
- t.Errorf("got header.IsV4LinkLocalMulticastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
- }
- })
- }
-}
diff --git a/pkg/tcpip/header/ipv6_extension_headers_test.go b/pkg/tcpip/header/ipv6_extension_headers_test.go
deleted file mode 100644
index 65adc6250..000000000
--- a/pkg/tcpip/header/ipv6_extension_headers_test.go
+++ /dev/null
@@ -1,1346 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header
-
-import (
- "bytes"
- "errors"
- "io"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
-)
-
-// Equal returns true of a and b are equivalent.
-//
-// Note, Equal will return true if a and b hold the same Identifier value and
-// contain the same bytes in Buf, even if the bytes are split across views
-// differently.
-//
-// Needed to use cmp.Equal on IPv6RawPayloadHeader as it contains unexported
-// fields.
-func (a IPv6RawPayloadHeader) Equal(b IPv6RawPayloadHeader) bool {
- return a.Identifier == b.Identifier && bytes.Equal(a.Buf.ToView(), b.Buf.ToView())
-}
-
-// Equal returns true of a and b are equivalent.
-//
-// Note, Equal will return true if a and b hold equivalent ipv6OptionsExtHdrs.
-//
-// Needed to use cmp.Equal on IPv6RawPayloadHeader as it contains unexported
-// fields.
-func (a IPv6HopByHopOptionsExtHdr) Equal(b IPv6HopByHopOptionsExtHdr) bool {
- return bytes.Equal(a.ipv6OptionsExtHdr, b.ipv6OptionsExtHdr)
-}
-
-// Equal returns true of a and b are equivalent.
-//
-// Note, Equal will return true if a and b hold equivalent ipv6OptionsExtHdrs.
-//
-// Needed to use cmp.Equal on IPv6RawPayloadHeader as it contains unexported
-// fields.
-func (a IPv6DestinationOptionsExtHdr) Equal(b IPv6DestinationOptionsExtHdr) bool {
- return bytes.Equal(a.ipv6OptionsExtHdr, b.ipv6OptionsExtHdr)
-}
-
-func TestIPv6UnknownExtHdrOption(t *testing.T) {
- tests := []struct {
- name string
- identifier IPv6ExtHdrOptionIdentifier
- expectedUnknownAction IPv6OptionUnknownAction
- }{
- {
- name: "Skip with zero LSBs",
- identifier: 0,
- expectedUnknownAction: IPv6OptionUnknownActionSkip,
- },
- {
- name: "Discard with zero LSBs",
- identifier: 64,
- expectedUnknownAction: IPv6OptionUnknownActionDiscard,
- },
- {
- name: "Discard and ICMP with zero LSBs",
- identifier: 128,
- expectedUnknownAction: IPv6OptionUnknownActionDiscardSendICMP,
- },
- {
- name: "Discard and ICMP for non multicast destination with zero LSBs",
- identifier: 192,
- expectedUnknownAction: IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- },
- {
- name: "Skip with non-zero LSBs",
- identifier: 63,
- expectedUnknownAction: IPv6OptionUnknownActionSkip,
- },
- {
- name: "Discard with non-zero LSBs",
- identifier: 127,
- expectedUnknownAction: IPv6OptionUnknownActionDiscard,
- },
- {
- name: "Discard and ICMP with non-zero LSBs",
- identifier: 191,
- expectedUnknownAction: IPv6OptionUnknownActionDiscardSendICMP,
- },
- {
- name: "Discard and ICMP for non multicast destination with non-zero LSBs",
- identifier: 255,
- expectedUnknownAction: IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opt := &IPv6UnknownExtHdrOption{Identifier: test.identifier, Data: []byte{1, 2, 3, 4}}
- if a := opt.UnknownAction(); a != test.expectedUnknownAction {
- t.Fatalf("got UnknownAction() = %d, want = %d", a, test.expectedUnknownAction)
- }
- })
- }
-
-}
-
-func TestIPv6OptionsExtHdrIterErr(t *testing.T) {
- tests := []struct {
- name string
- bytes []byte
- err error
- }{
- {
- name: "Single unknown with zero length",
- bytes: []byte{255, 0},
- },
- {
- name: "Single unknown with non-zero length",
- bytes: []byte{255, 3, 1, 2, 3},
- },
- {
- name: "Two options",
- bytes: []byte{
- 255, 0,
- 254, 1, 1,
- },
- },
- {
- name: "Three options",
- bytes: []byte{
- 255, 0,
- 254, 1, 1,
- 253, 4, 2, 3, 4, 5,
- },
- },
- {
- name: "Single unknown only identifier",
- bytes: []byte{255},
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Single unknown too small with length = 1",
- bytes: []byte{255, 1},
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Single unknown too small with length = 2",
- bytes: []byte{255, 2, 1},
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid first with second unknown only identifier",
- bytes: []byte{
- 255, 0,
- 254,
- },
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid first with second unknown missing data",
- bytes: []byte{
- 255, 0,
- 254, 1,
- },
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid first with second unknown too small",
- bytes: []byte{
- 255, 0,
- 254, 2, 1,
- },
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "One Pad1",
- bytes: []byte{0},
- },
- {
- name: "Multiple Pad1",
- bytes: []byte{0, 0, 0},
- },
- {
- name: "Multiple PadN",
- bytes: []byte{
- // Pad3
- 1, 1, 1,
-
- // Pad5
- 1, 3, 1, 2, 3,
- },
- },
- {
- name: "Pad5 too small middle of data buffer",
- bytes: []byte{1, 3, 1, 2},
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Pad5 no data",
- bytes: []byte{1, 3},
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Router alert without data",
- bytes: []byte{byte(ipv6RouterAlertHopByHopOptionIdentifier), 0},
- err: ErrMalformedIPv6ExtHdrOption,
- },
- {
- name: "Router alert with partial data",
- bytes: []byte{byte(ipv6RouterAlertHopByHopOptionIdentifier), 1, 1},
- err: ErrMalformedIPv6ExtHdrOption,
- },
- {
- name: "Router alert with partial data and Pad1",
- bytes: []byte{byte(ipv6RouterAlertHopByHopOptionIdentifier), 1, 1, 0},
- err: ErrMalformedIPv6ExtHdrOption,
- },
- {
- name: "Router alert with extra data",
- bytes: []byte{byte(ipv6RouterAlertHopByHopOptionIdentifier), 3, 1, 2, 3},
- err: ErrMalformedIPv6ExtHdrOption,
- },
- {
- name: "Router alert with missing data",
- bytes: []byte{byte(ipv6RouterAlertHopByHopOptionIdentifier), 1},
- err: io.ErrUnexpectedEOF,
- },
- }
-
- check := func(t *testing.T, it IPv6OptionsExtHdrOptionsIterator, expectedErr error) {
- for i := 0; ; i++ {
- _, done, err := it.Next()
- if err != nil {
- // If we encountered a non-nil error while iterating, make sure it is
- // is the same error as expectedErr.
- if !errors.Is(err, expectedErr) {
- t.Fatalf("got %d-th Next() = %v, want = %v", i, err, expectedErr)
- }
-
- return
- }
- if done {
- // If we are done (without an error), make sure that we did not expect
- // an error.
- if expectedErr != nil {
- t.Fatalf("expected error when iterating; want = %s", expectedErr)
- }
-
- return
- }
- }
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Run("Hop By Hop", func(t *testing.T) {
- extHdr := IPv6HopByHopOptionsExtHdr{ipv6OptionsExtHdr: test.bytes}
- check(t, extHdr.Iter(), test.err)
- })
-
- t.Run("Destination", func(t *testing.T) {
- extHdr := IPv6DestinationOptionsExtHdr{ipv6OptionsExtHdr: test.bytes}
- check(t, extHdr.Iter(), test.err)
- })
- })
- }
-}
-
-func TestIPv6OptionsExtHdrIter(t *testing.T) {
- tests := []struct {
- name string
- bytes []byte
- expected []IPv6ExtHdrOption
- }{
- {
- name: "Single unknown with zero length",
- bytes: []byte{255, 0},
- expected: []IPv6ExtHdrOption{
- &IPv6UnknownExtHdrOption{Identifier: 255, Data: []byte{}},
- },
- },
- {
- name: "Single unknown with non-zero length",
- bytes: []byte{255, 3, 1, 2, 3},
- expected: []IPv6ExtHdrOption{
- &IPv6UnknownExtHdrOption{Identifier: 255, Data: []byte{1, 2, 3}},
- },
- },
- {
- name: "Single Pad1",
- bytes: []byte{0},
- },
- {
- name: "Two Pad1",
- bytes: []byte{0, 0},
- },
- {
- name: "Single Pad3",
- bytes: []byte{1, 1, 1},
- },
- {
- name: "Single Pad5",
- bytes: []byte{1, 3, 1, 2, 3},
- },
- {
- name: "Multiple Pad",
- bytes: []byte{
- // Pad1
- 0,
-
- // Pad2
- 1, 0,
-
- // Pad3
- 1, 1, 1,
-
- // Pad4
- 1, 2, 1, 2,
-
- // Pad5
- 1, 3, 1, 2, 3,
- },
- },
- {
- name: "Multiple options",
- bytes: []byte{
- // Pad1
- 0,
-
- // Unknown
- 255, 0,
-
- // Pad2
- 1, 0,
-
- // Unknown
- 254, 1, 1,
-
- // Pad3
- 1, 1, 1,
-
- // Unknown
- 253, 4, 2, 3, 4, 5,
-
- // Pad4
- 1, 2, 1, 2,
- },
- expected: []IPv6ExtHdrOption{
- &IPv6UnknownExtHdrOption{Identifier: 255, Data: []byte{}},
- &IPv6UnknownExtHdrOption{Identifier: 254, Data: []byte{1}},
- &IPv6UnknownExtHdrOption{Identifier: 253, Data: []byte{2, 3, 4, 5}},
- },
- },
- }
-
- checkIter := func(t *testing.T, it IPv6OptionsExtHdrOptionsIterator, expected []IPv6ExtHdrOption) {
- for i, e := range expected {
- opt, done, err := it.Next()
- if err != nil {
- t.Errorf("(i=%d) Next(): %s", i, err)
- }
- if done {
- t.Errorf("(i=%d) unexpectedly done iterating", i)
- }
- if diff := cmp.Diff(e, opt); diff != "" {
- t.Errorf("(i=%d) got option mismatch (-want +got):\n%s", i, diff)
- }
-
- if t.Failed() {
- t.FailNow()
- }
- }
-
- opt, done, err := it.Next()
- if err != nil {
- t.Errorf("(last) Next(): %s", err)
- }
- if !done {
- t.Errorf("(last) iterator unexpectedly not done")
- }
- if opt != nil {
- t.Errorf("(last) got Next() = %T, want = nil", opt)
- }
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Run("Hop By Hop", func(t *testing.T) {
- extHdr := IPv6HopByHopOptionsExtHdr{ipv6OptionsExtHdr: test.bytes}
- checkIter(t, extHdr.Iter(), test.expected)
- })
-
- t.Run("Destination", func(t *testing.T) {
- extHdr := IPv6DestinationOptionsExtHdr{ipv6OptionsExtHdr: test.bytes}
- checkIter(t, extHdr.Iter(), test.expected)
- })
- })
- }
-}
-
-func TestIPv6RoutingExtHdr(t *testing.T) {
- tests := []struct {
- name string
- bytes []byte
- segmentsLeft uint8
- }{
- {
- name: "Zeroes",
- bytes: []byte{0, 0, 0, 0, 0, 0},
- segmentsLeft: 0,
- },
- {
- name: "Ones",
- bytes: []byte{1, 1, 1, 1, 1, 1},
- segmentsLeft: 1,
- },
- {
- name: "Mixed",
- bytes: []byte{1, 2, 3, 4, 5, 6},
- segmentsLeft: 2,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- extHdr := IPv6RoutingExtHdr(test.bytes)
- if got := extHdr.SegmentsLeft(); got != test.segmentsLeft {
- t.Errorf("got SegmentsLeft() = %d, want = %d", got, test.segmentsLeft)
- }
- })
- }
-}
-
-func TestIPv6FragmentExtHdr(t *testing.T) {
- tests := []struct {
- name string
- bytes [6]byte
- fragmentOffset uint16
- more bool
- id uint32
- }{
- {
- name: "Zeroes",
- bytes: [6]byte{0, 0, 0, 0, 0, 0},
- fragmentOffset: 0,
- more: false,
- id: 0,
- },
- {
- name: "Ones",
- bytes: [6]byte{0, 9, 0, 0, 0, 1},
- fragmentOffset: 1,
- more: true,
- id: 1,
- },
- {
- name: "Mixed",
- bytes: [6]byte{68, 9, 128, 4, 2, 1},
- fragmentOffset: 2177,
- more: true,
- id: 2147746305,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- extHdr := IPv6FragmentExtHdr(test.bytes)
- if got := extHdr.FragmentOffset(); got != test.fragmentOffset {
- t.Errorf("got FragmentOffset() = %d, want = %d", got, test.fragmentOffset)
- }
- if got := extHdr.More(); got != test.more {
- t.Errorf("got More() = %t, want = %t", got, test.more)
- }
- if got := extHdr.ID(); got != test.id {
- t.Errorf("got ID() = %d, want = %d", got, test.id)
- }
- })
- }
-}
-
-func makeVectorisedViewFromByteBuffers(bs ...[]byte) buffer.VectorisedView {
- size := 0
- var vs []buffer.View
-
- for _, b := range bs {
- vs = append(vs, buffer.View(b))
- size += len(b)
- }
-
- return buffer.NewVectorisedView(size, vs)
-}
-
-func TestIPv6ExtHdrIterErr(t *testing.T) {
- tests := []struct {
- name string
- firstNextHdr IPv6ExtensionHeaderIdentifier
- payload buffer.VectorisedView
- err error
- }{
- {
- name: "Upper layer only without data",
- firstNextHdr: 255,
- },
- {
- name: "Upper layer only with data",
- firstNextHdr: 255,
- payload: makeVectorisedViewFromByteBuffers([]byte{1, 2, 3, 4}),
- },
- {
- name: "No next header",
- firstNextHdr: IPv6NoNextHeaderIdentifier,
- },
- {
- name: "No next header with data",
- firstNextHdr: IPv6NoNextHeaderIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{1, 2, 3, 4}),
- },
- {
- name: "Valid single hop by hop",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 4, 1, 2, 3, 4}),
- },
- {
- name: "Hop by hop too small",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 4, 1, 2, 3}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid single fragment",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 68, 9, 128, 4, 2, 1}),
- },
- {
- name: "Fragment too small",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 68, 9, 128, 4, 2}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid single destination",
- firstNextHdr: IPv6DestinationOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 4, 1, 2, 3, 4}),
- },
- {
- name: "Destination too small",
- firstNextHdr: IPv6DestinationOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 4, 1, 2, 3}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid single routing",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 2, 3, 4, 5, 6}),
- },
- {
- name: "Valid single routing across views",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 2}, []byte{3, 4, 5, 6}),
- },
- {
- name: "Routing too small with zero length field",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 0, 1, 2, 3, 4, 5}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Valid routing with non-zero length field",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 1, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 8}),
- },
- {
- name: "Valid routing with non-zero length field across views",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 1, 1, 2, 3, 4, 5, 6}, []byte{1, 2, 3, 4, 5, 6, 7, 8}),
- },
- {
- name: "Routing too small with non-zero length field",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 1, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 7}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Routing too small with non-zero length field across views",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{255, 1, 1, 2, 3, 4, 5, 6}, []byte{1, 2, 3, 4, 5, 6, 7}),
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "Mixed",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop Options extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // (Atomic) Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6RoutingExtHdrIdentifier), 255, 0, 6, 128, 4, 2, 1,
-
- // Routing extension header.
- uint8(IPv6DestinationOptionsExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Destination Options extension header.
- 255, 0, 255, 4, 1, 2, 3, 4,
-
- // Upper layer data.
- 1, 2, 3, 4,
- }),
- },
- {
- name: "Mixed without upper layer data",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop Options extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // (Atomic) Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6RoutingExtHdrIdentifier), 255, 0, 6, 128, 4, 2, 1,
-
- // Routing extension header.
- uint8(IPv6DestinationOptionsExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Destination Options extension header.
- 255, 0, 255, 4, 1, 2, 3, 4,
- }),
- },
- {
- name: "Mixed without upper layer data but last ext hdr too small",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop Options extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // (Atomic) Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6RoutingExtHdrIdentifier), 255, 0, 6, 128, 4, 2, 1,
-
- // Routing extension header.
- uint8(IPv6DestinationOptionsExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Destination Options extension header.
- 255, 0, 255, 4, 1, 2, 3,
- }),
- err: io.ErrUnexpectedEOF,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- it := MakeIPv6PayloadIterator(test.firstNextHdr, test.payload)
-
- for i := 0; ; i++ {
- _, done, err := it.Next()
- if err != nil {
- // If we encountered a non-nil error while iterating, make sure it is
- // is the same error as test.err.
- if !errors.Is(err, test.err) {
- t.Fatalf("got %d-th Next() = %v, want = %v", i, err, test.err)
- }
-
- return
- }
- if done {
- // If we are done (without an error), make sure that we did not expect
- // an error.
- if test.err != nil {
- t.Fatalf("expected error when iterating; want = %s", test.err)
- }
-
- return
- }
- }
- })
- }
-}
-
-func TestIPv6ExtHdrIter(t *testing.T) {
- routingExtHdrWithUpperLayerData := buffer.View([]byte{255, 0, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4})
- upperLayerData := buffer.View([]byte{1, 2, 3, 4})
- tests := []struct {
- name string
- firstNextHdr IPv6ExtensionHeaderIdentifier
- payload buffer.VectorisedView
- expected []IPv6PayloadHeader
- }{
- // With a non-atomic fragment that is not the first fragment, the payload
- // after the fragment will not be parsed because the payload is expected to
- // only hold upper layer data.
- {
- name: "hopbyhop - fragment (not first) - routing - upper",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // Fragment extension header.
- //
- // More = 1, Fragment Offset = 2117, ID = 2147746305
- uint8(IPv6RoutingExtHdrIdentifier), 0, 68, 9, 128, 4, 2, 1,
-
- // Routing extension header.
- //
- // Even though we have a routing ext header here, it should be
- // be interpretted as raw bytes as only the first fragment is expected
- // to hold headers.
- 255, 0, 1, 2, 3, 4, 5, 6,
-
- // Upper layer data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6HopByHopOptionsExtHdr{ipv6OptionsExtHdr: []byte{1, 4, 1, 2, 3, 4}},
- IPv6FragmentExtHdr([6]byte{68, 9, 128, 4, 2, 1}),
- IPv6RawPayloadHeader{
- Identifier: IPv6RoutingExtHdrIdentifier,
- Buf: routingExtHdrWithUpperLayerData.ToVectorisedView(),
- },
- },
- },
- {
- name: "hopbyhop - fragment (first) - routing - upper",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // Fragment extension header.
- //
- // More = 1, Fragment Offset = 0, ID = 2147746305
- uint8(IPv6RoutingExtHdrIdentifier), 0, 0, 1, 128, 4, 2, 1,
-
- // Routing extension header.
- 255, 0, 1, 2, 3, 4, 5, 6,
-
- // Upper layer data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6HopByHopOptionsExtHdr{ipv6OptionsExtHdr: []byte{1, 4, 1, 2, 3, 4}},
- IPv6FragmentExtHdr([6]byte{0, 1, 128, 4, 2, 1}),
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6RawPayloadHeader{
- Identifier: 255,
- Buf: upperLayerData.ToVectorisedView(),
- },
- },
- },
- {
- name: "fragment - routing - upper (across views)",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Fragment extension header.
- uint8(IPv6RoutingExtHdrIdentifier), 0, 68, 9, 128, 4, 2, 1,
-
- // Routing extension header.
- 255, 0, 1, 2}, []byte{3, 4, 5, 6,
-
- // Upper layer data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6FragmentExtHdr([6]byte{68, 9, 128, 4, 2, 1}),
- IPv6RawPayloadHeader{
- Identifier: IPv6RoutingExtHdrIdentifier,
- Buf: routingExtHdrWithUpperLayerData.ToVectorisedView(),
- },
- },
- },
-
- // If we have an atomic fragment, the payload following the fragment
- // extension header should be parsed normally.
- {
- name: "atomic fragment - routing - destination - upper",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6RoutingExtHdrIdentifier), 255, 0, 6, 128, 4, 2, 1,
-
- // Routing extension header.
- uint8(IPv6DestinationOptionsExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Destination Options extension header.
- 255, 0, 1, 4, 1, 2, 3, 4,
-
- // Upper layer data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6FragmentExtHdr([6]byte{0, 6, 128, 4, 2, 1}),
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6DestinationOptionsExtHdr{ipv6OptionsExtHdr: []byte{1, 4, 1, 2, 3, 4}},
- IPv6RawPayloadHeader{
- Identifier: 255,
- Buf: upperLayerData.ToVectorisedView(),
- },
- },
- },
- {
- name: "atomic fragment - routing - upper (across views)",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6RoutingExtHdrIdentifier), 255, 0, 6}, []byte{128, 4, 2, 1,
-
- // Routing extension header.
- 255, 0, 1, 2}, []byte{3, 4, 5, 6,
-
- // Upper layer data.
- 1, 2}, []byte{3, 4}),
- expected: []IPv6PayloadHeader{
- IPv6FragmentExtHdr([6]byte{0, 6, 128, 4, 2, 1}),
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6RawPayloadHeader{
- Identifier: 255,
- Buf: makeVectorisedViewFromByteBuffers(upperLayerData[:2], upperLayerData[2:]),
- },
- },
- },
- {
- name: "atomic fragment - destination - no next header",
- firstNextHdr: IPv6FragmentExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Fragment extension header.
- //
- // Res (Reserved) bits are 1 which should not affect anything.
- uint8(IPv6DestinationOptionsExtHdrIdentifier), 0, 0, 6, 128, 4, 2, 1,
-
- // Destination Options extension header.
- uint8(IPv6NoNextHeaderIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // Random data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6FragmentExtHdr([6]byte{0, 6, 128, 4, 2, 1}),
- IPv6DestinationOptionsExtHdr{ipv6OptionsExtHdr: []byte{1, 4, 1, 2, 3, 4}},
- },
- },
- {
- name: "routing - atomic fragment - no next header",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Routing extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6NoNextHeaderIdentifier), 0, 0, 6, 128, 4, 2, 1,
-
- // Random data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6FragmentExtHdr([6]byte{0, 6, 128, 4, 2, 1}),
- },
- },
- {
- name: "routing - atomic fragment - no next header (across views)",
- firstNextHdr: IPv6RoutingExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Routing extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Fragment extension header.
- //
- // Reserved bits are 1 which should not affect anything.
- uint8(IPv6NoNextHeaderIdentifier), 255, 0, 6}, []byte{128, 4, 2, 1,
-
- // Random data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6FragmentExtHdr([6]byte{0, 6, 128, 4, 2, 1}),
- },
- },
- {
- name: "hopbyhop - routing - fragment - no next header",
- firstNextHdr: IPv6HopByHopOptionsExtHdrIdentifier,
- payload: makeVectorisedViewFromByteBuffers([]byte{
- // Hop By Hop Options extension header.
- uint8(IPv6RoutingExtHdrIdentifier), 0, 1, 4, 1, 2, 3, 4,
-
- // Routing extension header.
- uint8(IPv6FragmentExtHdrIdentifier), 0, 1, 2, 3, 4, 5, 6,
-
- // Fragment extension header.
- //
- // Fragment Offset = 32; Res = 6.
- uint8(IPv6NoNextHeaderIdentifier), 0, 1, 6, 128, 4, 2, 1,
-
- // Random data.
- 1, 2, 3, 4,
- }),
- expected: []IPv6PayloadHeader{
- IPv6HopByHopOptionsExtHdr{ipv6OptionsExtHdr: []byte{1, 4, 1, 2, 3, 4}},
- IPv6RoutingExtHdr([]byte{1, 2, 3, 4, 5, 6}),
- IPv6FragmentExtHdr([6]byte{1, 6, 128, 4, 2, 1}),
- IPv6RawPayloadHeader{
- Identifier: IPv6NoNextHeaderIdentifier,
- Buf: upperLayerData.ToVectorisedView(),
- },
- },
- },
-
- // Test the raw payload for common transport layer protocol numbers.
- {
- name: "TCP raw payload",
- firstNextHdr: IPv6ExtensionHeaderIdentifier(TCPProtocolNumber),
- payload: makeVectorisedViewFromByteBuffers(upperLayerData),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: IPv6ExtensionHeaderIdentifier(TCPProtocolNumber),
- Buf: upperLayerData.ToVectorisedView(),
- }},
- },
- {
- name: "UDP raw payload",
- firstNextHdr: IPv6ExtensionHeaderIdentifier(UDPProtocolNumber),
- payload: makeVectorisedViewFromByteBuffers(upperLayerData),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: IPv6ExtensionHeaderIdentifier(UDPProtocolNumber),
- Buf: upperLayerData.ToVectorisedView(),
- }},
- },
- {
- name: "ICMPv4 raw payload",
- firstNextHdr: IPv6ExtensionHeaderIdentifier(ICMPv4ProtocolNumber),
- payload: makeVectorisedViewFromByteBuffers(upperLayerData),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: IPv6ExtensionHeaderIdentifier(ICMPv4ProtocolNumber),
- Buf: upperLayerData.ToVectorisedView(),
- }},
- },
- {
- name: "ICMPv6 raw payload",
- firstNextHdr: IPv6ExtensionHeaderIdentifier(ICMPv6ProtocolNumber),
- payload: makeVectorisedViewFromByteBuffers(upperLayerData),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: IPv6ExtensionHeaderIdentifier(ICMPv6ProtocolNumber),
- Buf: upperLayerData.ToVectorisedView(),
- }},
- },
- {
- name: "Unknwon next header raw payload",
- firstNextHdr: 255,
- payload: makeVectorisedViewFromByteBuffers(upperLayerData),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: 255,
- Buf: upperLayerData.ToVectorisedView(),
- }},
- },
- {
- name: "Unknwon next header raw payload (across views)",
- firstNextHdr: 255,
- payload: makeVectorisedViewFromByteBuffers(upperLayerData[:2], upperLayerData[2:]),
- expected: []IPv6PayloadHeader{IPv6RawPayloadHeader{
- Identifier: 255,
- Buf: makeVectorisedViewFromByteBuffers(upperLayerData[:2], upperLayerData[2:]),
- }},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- it := MakeIPv6PayloadIterator(test.firstNextHdr, test.payload)
-
- for i, e := range test.expected {
- extHdr, done, err := it.Next()
- if err != nil {
- t.Errorf("(i=%d) Next(): %s", i, err)
- }
- if done {
- t.Errorf("(i=%d) unexpectedly done iterating", i)
- }
- if diff := cmp.Diff(e, extHdr); diff != "" {
- t.Errorf("(i=%d) got ext hdr mismatch (-want +got):\n%s", i, diff)
- }
-
- if t.Failed() {
- t.FailNow()
- }
- }
-
- extHdr, done, err := it.Next()
- if err != nil {
- t.Errorf("(last) Next(): %s", err)
- }
- if !done {
- t.Errorf("(last) iterator unexpectedly not done")
- }
- if extHdr != nil {
- t.Errorf("(last) got Next() = %T, want = nil", extHdr)
- }
- })
- }
-}
-
-var _ IPv6SerializableHopByHopOption = (*dummyHbHOptionSerializer)(nil)
-
-// dummyHbHOptionSerializer provides a generic implementation of
-// IPv6SerializableHopByHopOption for use in tests.
-type dummyHbHOptionSerializer struct {
- id IPv6ExtHdrOptionIdentifier
- payload []byte
- align int
- alignOffset int
-}
-
-// identifier implements IPv6SerializableHopByHopOption.
-func (s *dummyHbHOptionSerializer) identifier() IPv6ExtHdrOptionIdentifier {
- return s.id
-}
-
-// length implements IPv6SerializableHopByHopOption.
-func (s *dummyHbHOptionSerializer) length() uint8 {
- return uint8(len(s.payload))
-}
-
-// alignment implements IPv6SerializableHopByHopOption.
-func (s *dummyHbHOptionSerializer) alignment() (int, int) {
- align := 1
- if s.align != 0 {
- align = s.align
- }
- return align, s.alignOffset
-}
-
-// serializeInto implements IPv6SerializableHopByHopOption.
-func (s *dummyHbHOptionSerializer) serializeInto(b []byte) uint8 {
- return uint8(copy(b, s.payload))
-}
-
-func TestIPv6HopByHopSerializer(t *testing.T) {
- validateDummies := func(t *testing.T, serializable IPv6SerializableHopByHopOption, deserialized IPv6ExtHdrOption) {
- t.Helper()
- dummy, ok := serializable.(*dummyHbHOptionSerializer)
- if !ok {
- t.Fatalf("got serializable = %T, want = *dummyHbHOptionSerializer", serializable)
- }
- unknown, ok := deserialized.(*IPv6UnknownExtHdrOption)
- if !ok {
- t.Fatalf("got deserialized = %T, want = %T", deserialized, &IPv6UnknownExtHdrOption{})
- }
- if dummy.id != unknown.Identifier {
- t.Errorf("got deserialized identifier = %d, want = %d", unknown.Identifier, dummy.id)
- }
- if diff := cmp.Diff(dummy.payload, unknown.Data); diff != "" {
- t.Errorf("option payload deserialization mismatch (-want +got):\n%s", diff)
- }
- }
- tests := []struct {
- name string
- nextHeader uint8
- options []IPv6SerializableHopByHopOption
- expect []byte
- validate func(*testing.T, IPv6SerializableHopByHopOption, IPv6ExtHdrOption)
- }{
- {
- name: "single option",
- nextHeader: 13,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 15,
- payload: []byte{9, 8, 7, 6},
- },
- },
- expect: []byte{13, 0, 15, 4, 9, 8, 7, 6},
- validate: validateDummies,
- },
- {
- name: "short option padN zero",
- nextHeader: 88,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 22,
- payload: []byte{4, 5},
- },
- },
- expect: []byte{88, 0, 22, 2, 4, 5, 1, 0},
- validate: validateDummies,
- },
- {
- name: "short option pad1",
- nextHeader: 11,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 33,
- payload: []byte{1, 2, 3},
- },
- },
- expect: []byte{11, 0, 33, 3, 1, 2, 3, 0},
- validate: validateDummies,
- },
- {
- name: "long option padN",
- nextHeader: 55,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 77,
- payload: []byte{1, 2, 3, 4, 5, 6, 7, 8},
- },
- },
- expect: []byte{55, 1, 77, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 0, 0},
- validate: validateDummies,
- },
- {
- name: "two options",
- nextHeader: 33,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 11,
- payload: []byte{1, 2, 3},
- },
- &dummyHbHOptionSerializer{
- id: 22,
- payload: []byte{4, 5, 6},
- },
- },
- expect: []byte{33, 1, 11, 3, 1, 2, 3, 22, 3, 4, 5, 6, 1, 2, 0, 0},
- validate: validateDummies,
- },
- {
- name: "two options align 2n",
- nextHeader: 33,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 11,
- payload: []byte{1, 2, 3},
- },
- &dummyHbHOptionSerializer{
- id: 22,
- payload: []byte{4, 5, 6},
- align: 2,
- },
- },
- expect: []byte{33, 1, 11, 3, 1, 2, 3, 0, 22, 3, 4, 5, 6, 1, 1, 0},
- validate: validateDummies,
- },
- {
- name: "two options align 8n+1",
- nextHeader: 33,
- options: []IPv6SerializableHopByHopOption{
- &dummyHbHOptionSerializer{
- id: 11,
- payload: []byte{1, 2},
- },
- &dummyHbHOptionSerializer{
- id: 22,
- payload: []byte{4, 5, 6},
- align: 8,
- alignOffset: 1,
- },
- },
- expect: []byte{33, 1, 11, 2, 1, 2, 1, 1, 0, 22, 3, 4, 5, 6, 1, 0},
- validate: validateDummies,
- },
- {
- name: "no options",
- nextHeader: 33,
- options: []IPv6SerializableHopByHopOption{},
- expect: []byte{33, 0, 1, 4, 0, 0, 0, 0},
- },
- {
- name: "Router Alert",
- nextHeader: 33,
- options: []IPv6SerializableHopByHopOption{&IPv6RouterAlertOption{Value: IPv6RouterAlertMLD}},
- expect: []byte{33, 0, 5, 2, 0, 0, 1, 0},
- validate: func(t *testing.T, _ IPv6SerializableHopByHopOption, deserialized IPv6ExtHdrOption) {
- t.Helper()
- routerAlert, ok := deserialized.(*IPv6RouterAlertOption)
- if !ok {
- t.Fatalf("got deserialized = %T, want = *IPv6RouterAlertOption", deserialized)
- }
- if routerAlert.Value != IPv6RouterAlertMLD {
- t.Errorf("got routerAlert.Value = %d, want = %d", routerAlert.Value, IPv6RouterAlertMLD)
- }
- },
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := IPv6SerializableHopByHopExtHdr(test.options)
- length := s.length()
- if length != len(test.expect) {
- t.Fatalf("got s.length() = %d, want = %d", length, len(test.expect))
- }
- b := make([]byte, length)
- for i := range b {
- // Fill the buffer with ones to ensure all padding is correctly set.
- b[i] = 0xFF
- }
- if got := s.serializeInto(test.nextHeader, b); got != length {
- t.Fatalf("got s.serializeInto(..) = %d, want = %d", got, length)
- }
- if diff := cmp.Diff(test.expect, b); diff != "" {
- t.Fatalf("serialization mismatch (-want +got):\n%s", diff)
- }
-
- // Deserialize the options and verify them.
- optLen := (b[ipv6HopByHopExtHdrLengthOffset] + ipv6HopByHopExtHdrUnaccountedLenWords) * ipv6ExtHdrLenBytesPerUnit
- iter := ipv6OptionsExtHdr(b[ipv6HopByHopExtHdrOptionsOffset:optLen]).Iter()
- for _, testOpt := range test.options {
- opt, done, err := iter.Next()
- if err != nil {
- t.Fatalf("iter.Next(): %s", err)
- }
- if done {
- t.Fatalf("got iter.Next() = (%T, %t, _), want = (_, false, _)", opt, done)
- }
- test.validate(t, testOpt, opt)
- }
- opt, done, err := iter.Next()
- if err != nil {
- t.Fatalf("iter.Next(): %s", err)
- }
- if !done {
- t.Fatalf("got iter.Next() = (%T, %t, _), want = (_, true, _)", opt, done)
- }
- })
- }
-}
-
-var _ IPv6SerializableExtHdr = (*dummyIPv6ExtHdrSerializer)(nil)
-
-// dummyIPv6ExtHdrSerializer provides a generic implementation of
-// IPv6SerializableExtHdr for use in tests.
-//
-// The dummy header always carries the nextHeader value in the first byte.
-type dummyIPv6ExtHdrSerializer struct {
- id IPv6ExtensionHeaderIdentifier
- headerContents []byte
-}
-
-// identifier implements IPv6SerializableExtHdr.
-func (s *dummyIPv6ExtHdrSerializer) identifier() IPv6ExtensionHeaderIdentifier {
- return s.id
-}
-
-// length implements IPv6SerializableExtHdr.
-func (s *dummyIPv6ExtHdrSerializer) length() int {
- return len(s.headerContents) + 1
-}
-
-// serializeInto implements IPv6SerializableExtHdr.
-func (s *dummyIPv6ExtHdrSerializer) serializeInto(nextHeader uint8, b []byte) int {
- b[0] = nextHeader
- return copy(b[1:], s.headerContents) + 1
-}
-
-func TestIPv6ExtHdrSerializer(t *testing.T) {
- tests := []struct {
- name string
- headers []IPv6SerializableExtHdr
- nextHeader tcpip.TransportProtocolNumber
- expectSerialized []byte
- expectNextHeader uint8
- }{
- {
- name: "one header",
- headers: []IPv6SerializableExtHdr{
- &dummyIPv6ExtHdrSerializer{
- id: 15,
- headerContents: []byte{1, 2, 3, 4},
- },
- },
- nextHeader: TCPProtocolNumber,
- expectSerialized: []byte{byte(TCPProtocolNumber), 1, 2, 3, 4},
- expectNextHeader: 15,
- },
- {
- name: "two headers",
- headers: []IPv6SerializableExtHdr{
- &dummyIPv6ExtHdrSerializer{
- id: 22,
- headerContents: []byte{1, 2, 3},
- },
- &dummyIPv6ExtHdrSerializer{
- id: 23,
- headerContents: []byte{4, 5, 6},
- },
- },
- nextHeader: ICMPv6ProtocolNumber,
- expectSerialized: []byte{
- 23, 1, 2, 3,
- byte(ICMPv6ProtocolNumber), 4, 5, 6,
- },
- expectNextHeader: 22,
- },
- {
- name: "no headers",
- headers: []IPv6SerializableExtHdr{},
- nextHeader: UDPProtocolNumber,
- expectSerialized: []byte{},
- expectNextHeader: byte(UDPProtocolNumber),
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := IPv6ExtHdrSerializer(test.headers)
- l := s.Length()
- if got, want := l, len(test.expectSerialized); got != want {
- t.Fatalf("got serialized length = %d, want = %d", got, want)
- }
- b := make([]byte, l)
- for i := range b {
- // Fill the buffer with garbage to make sure we're writing to all bytes.
- b[i] = 0xFF
- }
- nextHeader, serializedLen := s.Serialize(test.nextHeader, b)
- if serializedLen != len(test.expectSerialized) || nextHeader != test.expectNextHeader {
- t.Errorf(
- "got s.Serialize(..) = (%d, %d), want = (%d, %d)",
- nextHeader,
- serializedLen,
- test.expectNextHeader,
- len(test.expectSerialized),
- )
- }
- if diff := cmp.Diff(test.expectSerialized, b); diff != "" {
- t.Errorf("serialization mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/pkg/tcpip/header/ipv6_test.go b/pkg/tcpip/header/ipv6_test.go
deleted file mode 100644
index 89be84068..000000000
--- a/pkg/tcpip/header/ipv6_test.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header_test
-
-import (
- "bytes"
- "crypto/sha256"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/rand"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const linkAddr = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
-
-var (
- linkLocalAddr = testutil.MustParse6("fe80::1")
- linkLocalMulticastAddr = testutil.MustParse6("ff02::1")
- uniqueLocalAddr1 = testutil.MustParse6("fc00::1")
- uniqueLocalAddr2 = testutil.MustParse6("fd00::2")
- globalAddr = testutil.MustParse6("a000::1")
-)
-
-func TestEthernetAdddressToModifiedEUI64(t *testing.T) {
- expectedIID := [header.IIDSize]byte{0, 2, 3, 255, 254, 4, 5, 6}
-
- if diff := cmp.Diff(expectedIID, header.EthernetAddressToModifiedEUI64(linkAddr)); diff != "" {
- t.Errorf("EthernetAddressToModifiedEUI64(%s) mismatch (-want +got):\n%s", linkAddr, diff)
- }
-
- var buf [header.IIDSize]byte
- header.EthernetAdddressToModifiedEUI64IntoBuf(linkAddr, buf[:])
- if diff := cmp.Diff(expectedIID, buf); diff != "" {
- t.Errorf("EthernetAddressToModifiedEUI64IntoBuf(%s, _) mismatch (-want +got):\n%s", linkAddr, diff)
- }
-}
-
-func TestLinkLocalAddr(t *testing.T) {
- if got, want := header.LinkLocalAddr(linkAddr), testutil.MustParse6("fe80::2:3ff:fe04:506"); got != want {
- t.Errorf("got LinkLocalAddr(%s) = %s, want = %s", linkAddr, got, want)
- }
-}
-
-func TestAppendOpaqueInterfaceIdentifier(t *testing.T) {
- var secretKeyBuf [header.OpaqueIIDSecretKeyMinBytes * 2]byte
- if n, err := rand.Read(secretKeyBuf[:]); err != nil {
- t.Fatalf("rand.Read(_): %s", err)
- } else if want := header.OpaqueIIDSecretKeyMinBytes * 2; n != want {
- t.Fatalf("expected rand.Read to read %d bytes, read %d bytes", want, n)
- }
-
- tests := []struct {
- name string
- prefix tcpip.Subnet
- nicName string
- dadCounter uint8
- secretKey []byte
- }{
- {
- name: "SecretKey of minimum size",
- prefix: header.IPv6LinkLocalPrefix.Subnet(),
- nicName: "eth0",
- dadCounter: 0,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes],
- },
- {
- name: "SecretKey of less than minimum size",
- prefix: func() tcpip.Subnet {
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: "\x01\x02\x03\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
- PrefixLen: header.IIDOffsetInIPv6Address * 8,
- }
- return addrWithPrefix.Subnet()
- }(),
- nicName: "eth10",
- dadCounter: 1,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes/2],
- },
- {
- name: "SecretKey of more than minimum size",
- prefix: func() tcpip.Subnet {
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: "\x01\x02\x03\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
- PrefixLen: header.IIDOffsetInIPv6Address * 8,
- }
- return addrWithPrefix.Subnet()
- }(),
- nicName: "eth11",
- dadCounter: 2,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes*2],
- },
- {
- name: "Nil SecretKey and empty nicName",
- prefix: func() tcpip.Subnet {
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: "\x01\x02\x03\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
- PrefixLen: header.IIDOffsetInIPv6Address * 8,
- }
- return addrWithPrefix.Subnet()
- }(),
- nicName: "",
- dadCounter: 3,
- secretKey: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- h := sha256.New()
- h.Write([]byte(test.prefix.ID()[:header.IIDOffsetInIPv6Address]))
- h.Write([]byte(test.nicName))
- h.Write([]byte{test.dadCounter})
- if k := test.secretKey; k != nil {
- h.Write(k)
- }
- var hashSum [sha256.Size]byte
- h.Sum(hashSum[:0])
- want := hashSum[:header.IIDSize]
-
- // Passing a nil buffer should result in a new buffer returned with the
- // IID.
- if got := header.AppendOpaqueInterfaceIdentifier(nil, test.prefix, test.nicName, test.dadCounter, test.secretKey); !bytes.Equal(got, want) {
- t.Errorf("got AppendOpaqueInterfaceIdentifier(nil, %s, %s, %d, %x) = %x, want = %x", test.prefix, test.nicName, test.dadCounter, test.secretKey, got, want)
- }
-
- // Passing a buffer with sufficient capacity for the IID should populate
- // the buffer provided.
- var iidBuf [header.IIDSize]byte
- if got := header.AppendOpaqueInterfaceIdentifier(iidBuf[:0], test.prefix, test.nicName, test.dadCounter, test.secretKey); !bytes.Equal(got, want) {
- t.Errorf("got AppendOpaqueInterfaceIdentifier(iidBuf[:0], %s, %s, %d, %x) = %x, want = %x", test.prefix, test.nicName, test.dadCounter, test.secretKey, got, want)
- }
- if got := iidBuf[:]; !bytes.Equal(got, want) {
- t.Errorf("got iidBuf = %x, want = %x", got, want)
- }
- })
- }
-}
-
-func TestLinkLocalAddrWithOpaqueIID(t *testing.T) {
- var secretKeyBuf [header.OpaqueIIDSecretKeyMinBytes * 2]byte
- if n, err := rand.Read(secretKeyBuf[:]); err != nil {
- t.Fatalf("rand.Read(_): %s", err)
- } else if want := header.OpaqueIIDSecretKeyMinBytes * 2; n != want {
- t.Fatalf("expected rand.Read to read %d bytes, read %d bytes", want, n)
- }
-
- prefix := header.IPv6LinkLocalPrefix.Subnet()
-
- tests := []struct {
- name string
- prefix tcpip.Subnet
- nicName string
- dadCounter uint8
- secretKey []byte
- }{
- {
- name: "SecretKey of minimum size",
- nicName: "eth0",
- dadCounter: 0,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes],
- },
- {
- name: "SecretKey of less than minimum size",
- nicName: "eth10",
- dadCounter: 1,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes/2],
- },
- {
- name: "SecretKey of more than minimum size",
- nicName: "eth11",
- dadCounter: 2,
- secretKey: secretKeyBuf[:header.OpaqueIIDSecretKeyMinBytes*2],
- },
- {
- name: "Nil SecretKey and empty nicName",
- nicName: "",
- dadCounter: 3,
- secretKey: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- addrBytes := [header.IPv6AddressSize]byte{
- 0: 0xFE,
- 1: 0x80,
- }
-
- want := tcpip.Address(header.AppendOpaqueInterfaceIdentifier(
- addrBytes[:header.IIDOffsetInIPv6Address],
- prefix,
- test.nicName,
- test.dadCounter,
- test.secretKey,
- ))
-
- if got := header.LinkLocalAddrWithOpaqueIID(test.nicName, test.dadCounter, test.secretKey); got != want {
- t.Errorf("got LinkLocalAddrWithOpaqueIID(%s, %d, %x) = %s, want = %s", test.nicName, test.dadCounter, test.secretKey, got, want)
- }
- })
- }
-}
-
-func TestIsV6LinkLocalMulticastAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- expected bool
- }{
- {
- name: "Valid Link Local Multicast",
- addr: linkLocalMulticastAddr,
- expected: true,
- },
- {
- name: "Valid Link Local Multicast with flags",
- addr: "\xff\xf2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- expected: true,
- },
- {
- name: "Link Local Unicast",
- addr: linkLocalAddr,
- expected: false,
- },
- {
- name: "IPv4 Multicast",
- addr: "\xe0\x00\x00\x01",
- expected: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := header.IsV6LinkLocalMulticastAddress(test.addr); got != test.expected {
- t.Errorf("got header.IsV6LinkLocalMulticastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
- }
- })
- }
-}
-
-func TestIsV6LinkLocalUnicastAddress(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- expected bool
- }{
- {
- name: "Valid Link Local Unicast",
- addr: linkLocalAddr,
- expected: true,
- },
- {
- name: "Link Local Multicast",
- addr: linkLocalMulticastAddr,
- expected: false,
- },
- {
- name: "Unique Local",
- addr: uniqueLocalAddr1,
- expected: false,
- },
- {
- name: "Global",
- addr: globalAddr,
- expected: false,
- },
- {
- name: "IPv4 Link Local",
- addr: "\xa9\xfe\x00\x01",
- expected: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := header.IsV6LinkLocalUnicastAddress(test.addr); got != test.expected {
- t.Errorf("got header.IsV6LinkLocalUnicastAddress(%s) = %t, want = %t", test.addr, got, test.expected)
- }
- })
- }
-}
-
-func TestScopeForIPv6Address(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- scope header.IPv6AddressScope
- err tcpip.Error
- }{
- {
- name: "Unique Local",
- addr: uniqueLocalAddr1,
- scope: header.GlobalScope,
- err: nil,
- },
- {
- name: "Link Local Unicast",
- addr: linkLocalAddr,
- scope: header.LinkLocalScope,
- err: nil,
- },
- {
- name: "Link Local Multicast",
- addr: linkLocalMulticastAddr,
- scope: header.LinkLocalScope,
- err: nil,
- },
- {
- name: "Global",
- addr: globalAddr,
- scope: header.GlobalScope,
- err: nil,
- },
- {
- name: "IPv4",
- addr: "\x01\x02\x03\x04",
- scope: header.GlobalScope,
- err: &tcpip.ErrBadAddress{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- got, err := header.ScopeForIPv6Address(test.addr)
- if diff := cmp.Diff(test.err, err); diff != "" {
- t.Errorf("unexpected error from header.IsV6UniqueLocalAddress(%s), (-want, +got):\n%s", test.addr, diff)
- }
- if got != test.scope {
- t.Errorf("got header.IsV6UniqueLocalAddress(%s) = (%d, _), want = (%d, _)", test.addr, got, test.scope)
- }
- })
- }
-}
-
-func TestSolicitedNodeAddr(t *testing.T) {
- tests := []struct {
- addr tcpip.Address
- want tcpip.Address
- }{
- {
- addr: "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\xa0",
- want: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x0e\x0f\xa0",
- },
- {
- addr: "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\xdd\x0e\x0f\xa0",
- want: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x0e\x0f\xa0",
- },
- {
- addr: "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\xdd\x01\x02\x03",
- want: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x01\x02\x03",
- },
- }
-
- for _, test := range tests {
- t.Run(fmt.Sprintf("%s", test.addr), func(t *testing.T) {
- if got := header.SolicitedNodeAddr(test.addr); got != test.want {
- t.Fatalf("got header.SolicitedNodeAddr(%s) = %s, want = %s", test.addr, got, test.want)
- }
- })
- }
-}
-
-func TestV6MulticastScope(t *testing.T) {
- tests := []struct {
- addr tcpip.Address
- want header.IPv6MulticastScope
- }{
- {
- addr: "\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6Reserved0MulticastScope,
- },
- {
- addr: "\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6InterfaceLocalMulticastScope,
- },
- {
- addr: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6LinkLocalMulticastScope,
- },
- {
- addr: "\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6RealmLocalMulticastScope,
- },
- {
- addr: "\xff\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6AdminLocalMulticastScope,
- },
- {
- addr: "\xff\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6SiteLocalMulticastScope,
- },
- {
- addr: "\xff\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(6),
- },
- {
- addr: "\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(7),
- },
- {
- addr: "\xff\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6OrganizationLocalMulticastScope,
- },
- {
- addr: "\xff\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(9),
- },
- {
- addr: "\xff\x0a\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(10),
- },
- {
- addr: "\xff\x0b\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(11),
- },
- {
- addr: "\xff\x0c\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(12),
- },
- {
- addr: "\xff\x0d\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6MulticastScope(13),
- },
- {
- addr: "\xff\x0e\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6GlobalMulticastScope,
- },
- {
- addr: "\xff\x0f\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- want: header.IPv6ReservedFMulticastScope,
- },
- }
-
- for _, test := range tests {
- t.Run(fmt.Sprintf("%s", test.addr), func(t *testing.T) {
- if got := header.V6MulticastScope(test.addr); got != test.want {
- t.Fatalf("got header.V6MulticastScope(%s) = %d, want = %d", test.addr, got, test.want)
- }
- })
- }
-}
diff --git a/pkg/tcpip/header/ipversion_test.go b/pkg/tcpip/header/ipversion_test.go
deleted file mode 100644
index b5540bf66..000000000
--- a/pkg/tcpip/header/ipversion_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestIPv4(t *testing.T) {
- b := header.IPv4(make([]byte, header.IPv4MinimumSize))
- b.Encode(&header.IPv4Fields{})
-
- const want = header.IPv4Version
- if v := header.IPVersion(b); v != want {
- t.Fatalf("Bad version, want %v, got %v", want, v)
- }
-}
-
-func TestIPv6(t *testing.T) {
- b := header.IPv6(make([]byte, header.IPv6MinimumSize))
- b.Encode(&header.IPv6Fields{})
-
- const want = header.IPv6Version
- if v := header.IPVersion(b); v != want {
- t.Fatalf("Bad version, want %v, got %v", want, v)
- }
-}
-
-func TestOtherVersion(t *testing.T) {
- const want = header.IPv4Version + header.IPv6Version
- b := make([]byte, 1)
- b[0] = want << 4
-
- if v := header.IPVersion(b); v != want {
- t.Fatalf("Bad version, want %v, got %v", want, v)
- }
-}
-
-func TestTooShort(t *testing.T) {
- b := make([]byte, 1)
- b[0] = (header.IPv4Version + header.IPv6Version) << 4
-
- // Get the version of a zero-length slice.
- const want = -1
- if v := header.IPVersion(b[:0]); v != want {
- t.Fatalf("Bad version, want %v, got %v", want, v)
- }
-
- // Get the version of a nil slice.
- if v := header.IPVersion(nil); v != want {
- t.Fatalf("Bad version, want %v, got %v", want, v)
- }
-}
diff --git a/pkg/tcpip/header/mld_test.go b/pkg/tcpip/header/mld_test.go
deleted file mode 100644
index 0cecf10d4..000000000
--- a/pkg/tcpip/header/mld_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header
-
-import (
- "encoding/binary"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
-)
-
-func TestMLD(t *testing.T) {
- b := []byte{
- // Maximum Response Delay
- 0, 0,
-
- // Reserved
- 0, 0,
-
- // MulticastAddress
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6,
- }
-
- const maxRespDelay = 513
- binary.BigEndian.PutUint16(b, maxRespDelay)
-
- mld := MLD(b)
-
- if got, want := mld.MaximumResponseDelay(), maxRespDelay*time.Millisecond; got != want {
- t.Errorf("got mld.MaximumResponseDelay() = %s, want = %s", got, want)
- }
-
- const newMaxRespDelay = 1234
- mld.SetMaximumResponseDelay(newMaxRespDelay)
- if got, want := mld.MaximumResponseDelay(), newMaxRespDelay*time.Millisecond; got != want {
- t.Errorf("got mld.MaximumResponseDelay() = %s, want = %s", got, want)
- }
-
- if got, want := mld.MulticastAddress(), tcpip.Address([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6}); got != want {
- t.Errorf("got mld.MulticastAddress() = %s, want = %s", got, want)
- }
-
- multicastAddress := tcpip.Address([]byte{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0})
- mld.SetMulticastAddress(multicastAddress)
- if got := mld.MulticastAddress(); got != multicastAddress {
- t.Errorf("got mld.MulticastAddress() = %s, want = %s", got, multicastAddress)
- }
-}
diff --git a/pkg/tcpip/header/ndp_test.go b/pkg/tcpip/header/ndp_test.go
deleted file mode 100644
index 2a897e938..000000000
--- a/pkg/tcpip/header/ndp_test.go
+++ /dev/null
@@ -1,1748 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "regexp"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-// TestNDPNeighborSolicit tests the functions of NDPNeighborSolicit.
-func TestNDPNeighborSolicit(t *testing.T) {
- b := []byte{
- 0, 0, 0, 0,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- }
-
- // Test getting the Target Address.
- ns := NDPNeighborSolicit(b)
- addr := testutil.MustParse6("102:304:506:708:90a:b0c:d0e:f10")
- if got := ns.TargetAddress(); got != addr {
- t.Errorf("got ns.TargetAddress = %s, want %s", got, addr)
- }
-
- // Test updating the Target Address.
- addr2 := testutil.MustParse6("1112:1314:1516:1718:191a:1b1c:1d1e:1f11")
- ns.SetTargetAddress(addr2)
- if got := ns.TargetAddress(); got != addr2 {
- t.Errorf("got ns.TargetAddress = %s, want %s", got, addr2)
- }
- // Make sure the address got updated in the backing buffer.
- if got := tcpip.Address(b[ndpNSTargetAddessOffset:][:IPv6AddressSize]); got != addr2 {
- t.Errorf("got targetaddress buffer = %s, want %s", got, addr2)
- }
-}
-
-func TestNDPRouteInformationOption(t *testing.T) {
- tests := []struct {
- name string
-
- length uint8
- prefixLength uint8
- prf NDPRoutePreference
- lifetimeS uint32
- prefixBytes []byte
- expectedPrefix tcpip.Subnet
-
- expectedErr error
- }{
- {
- name: "Length=1 with Prefix Length = 0",
- length: 1,
- prefixLength: 0,
- prf: MediumRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: IPv6EmptySubnet,
- },
- {
- name: "Length=1 but Prefix Length > 0",
- length: 1,
- prefixLength: 1,
- prf: MediumRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "Length=2 with Prefix Length = 0",
- length: 2,
- prefixLength: 0,
- prf: MediumRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: IPv6EmptySubnet,
- },
- {
- name: "Length=2 with Prefix Length in [1, 64] (1)",
- length: 2,
- prefixLength: 1,
- prf: LowRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 1,
- }.Subnet(),
- },
- {
- name: "Length=2 with Prefix Length in [1, 64] (64)",
- length: 2,
- prefixLength: 64,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 64,
- }.Subnet(),
- },
- {
- name: "Length=2 with Prefix Length > 64",
- length: 2,
- prefixLength: 65,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "Length=3 with Prefix Length = 0",
- length: 3,
- prefixLength: 0,
- prf: MediumRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: IPv6EmptySubnet,
- },
- {
- name: "Length=3 with Prefix Length in [1, 64] (1)",
- length: 3,
- prefixLength: 1,
- prf: LowRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 1,
- }.Subnet(),
- },
- {
- name: "Length=3 with Prefix Length in [1, 64] (64)",
- length: 3,
- prefixLength: 64,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 64,
- }.Subnet(),
- },
- {
- name: "Length=3 with Prefix Length in [65, 128] (65)",
- length: 3,
- prefixLength: 65,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 65,
- }.Subnet(),
- },
- {
- name: "Length=3 with Prefix Length in [65, 128] (128)",
- length: 3,
- prefixLength: 128,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(strings.Repeat("\x00", IPv6AddressSize)),
- PrefixLen: 128,
- }.Subnet(),
- },
- {
- name: "Length=3 with (invalid) Prefix Length > 128",
- length: 3,
- prefixLength: 129,
- prf: HighRoutePreference,
- lifetimeS: 1,
- prefixBytes: nil,
- expectedErr: ErrNDPOptMalformedBody,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- expectedRouteInformationBytes := [...]byte{
- // Type, Length
- 24, test.length,
-
- // Prefix Length, Prf
- uint8(test.prefixLength), uint8(test.prf) << 3,
-
- // Route Lifetime
- 0, 0, 0, 0,
-
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- }
- binary.BigEndian.PutUint32(expectedRouteInformationBytes[4:], test.lifetimeS)
- _ = copy(expectedRouteInformationBytes[8:], test.prefixBytes)
-
- opts := NDPOptions(expectedRouteInformationBytes[:test.length*lengthByteUnits])
- it, err := opts.Iter(false)
- if err != nil {
- t.Fatalf("got Iter(false) = (_, %s), want = (_, nil)", err)
- }
- opt, done, err := it.Next()
- if !errors.Is(err, test.expectedErr) {
- t.Fatalf("got Next() = (_, _, %s), want = (_, _, %s)", err, test.expectedErr)
- }
- if want := test.expectedErr != nil; done != want {
- t.Fatalf("got Next() = (_, %t, _), want = (_, %t, _)", done, want)
- }
- if test.expectedErr != nil {
- return
- }
-
- if got := opt.kind(); got != ndpRouteInformationType {
- t.Errorf("got kind() = %d, want = %d", got, ndpRouteInformationType)
- }
-
- ri, ok := opt.(NDPRouteInformation)
- if !ok {
- t.Fatalf("got opt = %T, want = NDPRouteInformation", opt)
- }
-
- if got := ri.PrefixLength(); got != test.prefixLength {
- t.Errorf("got PrefixLength() = %d, want = %d", got, test.prefixLength)
- }
- if got := ri.RoutePreference(); got != test.prf {
- t.Errorf("got RoutePreference() = %d, want = %d", got, test.prf)
- }
- if got, want := ri.RouteLifetime(), time.Duration(test.lifetimeS)*time.Second; got != want {
- t.Errorf("got RouteLifetime() = %s, want = %s", got, want)
- }
- if got, err := ri.Prefix(); err != nil {
- t.Errorf("Prefix(): %s", err)
- } else if got != test.expectedPrefix {
- t.Errorf("got Prefix() = %s, want = %s", got, test.expectedPrefix)
- }
-
- // Iterator should not return anything else.
- {
- next, done, err := it.Next()
- if err != nil {
- t.Errorf("got Next() = (_, _, %s), want = (_, _, nil)", err)
- }
- if !done {
- t.Error("got Next() = (_, false, _), want = (_, true, _)")
- }
- if next != nil {
- t.Errorf("got Next() = (%x, _, _), want = (nil, _, _)", next)
- }
- }
- })
- }
-}
-
-// TestNDPNeighborAdvert tests the functions of NDPNeighborAdvert.
-func TestNDPNeighborAdvert(t *testing.T) {
- b := []byte{
- 160, 0, 0, 0,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- }
-
- // Test getting the Target Address.
- na := NDPNeighborAdvert(b)
- addr := testutil.MustParse6("102:304:506:708:90a:b0c:d0e:f10")
- if got := na.TargetAddress(); got != addr {
- t.Errorf("got TargetAddress = %s, want %s", got, addr)
- }
-
- // Test getting the Router Flag.
- if got := na.RouterFlag(); !got {
- t.Errorf("got RouterFlag = false, want = true")
- }
-
- // Test getting the Solicited Flag.
- if got := na.SolicitedFlag(); got {
- t.Errorf("got SolicitedFlag = true, want = false")
- }
-
- // Test getting the Override Flag.
- if got := na.OverrideFlag(); !got {
- t.Errorf("got OverrideFlag = false, want = true")
- }
-
- // Test updating the Target Address.
- addr2 := testutil.MustParse6("1112:1314:1516:1718:191a:1b1c:1d1e:1f11")
- na.SetTargetAddress(addr2)
- if got := na.TargetAddress(); got != addr2 {
- t.Errorf("got TargetAddress = %s, want %s", got, addr2)
- }
- // Make sure the address got updated in the backing buffer.
- if got := tcpip.Address(b[ndpNATargetAddressOffset:][:IPv6AddressSize]); got != addr2 {
- t.Errorf("got targetaddress buffer = %s, want %s", got, addr2)
- }
-
- // Test updating the Router Flag.
- na.SetRouterFlag(false)
- if got := na.RouterFlag(); got {
- t.Errorf("got RouterFlag = true, want = false")
- }
-
- // Test updating the Solicited Flag.
- na.SetSolicitedFlag(true)
- if got := na.SolicitedFlag(); !got {
- t.Errorf("got SolicitedFlag = false, want = true")
- }
-
- // Test updating the Override Flag.
- na.SetOverrideFlag(false)
- if got := na.OverrideFlag(); got {
- t.Errorf("got OverrideFlag = true, want = false")
- }
-
- // Make sure flags got updated in the backing buffer.
- if got := b[ndpNAFlagsOffset]; got != 64 {
- t.Errorf("got flags byte = %d, want = 64", got)
- }
-}
-
-func TestNDPRouterAdvert(t *testing.T) {
- tests := []struct {
- hopLimit uint8
- managedFlag, otherConfFlag bool
- prf NDPRoutePreference
- routerLifetimeS uint16
- reachableTimeMS, retransTimerMS uint32
- }{
- {
- hopLimit: 1,
- managedFlag: false,
- otherConfFlag: true,
- prf: HighRoutePreference,
- routerLifetimeS: 2,
- reachableTimeMS: 3,
- retransTimerMS: 4,
- },
- {
- hopLimit: 64,
- managedFlag: true,
- otherConfFlag: false,
- prf: LowRoutePreference,
- routerLifetimeS: 258,
- reachableTimeMS: 78492,
- retransTimerMS: 13213,
- },
- }
-
- for i, test := range tests {
- t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
- flags := uint8(0)
- if test.managedFlag {
- flags |= 1 << 7
- }
- if test.otherConfFlag {
- flags |= 1 << 6
- }
- flags |= uint8(test.prf) << 3
-
- b := []byte{
- test.hopLimit, flags, 1, 2,
- 3, 4, 5, 6,
- 7, 8, 9, 10,
- }
- binary.BigEndian.PutUint16(b[2:], test.routerLifetimeS)
- binary.BigEndian.PutUint32(b[4:], test.reachableTimeMS)
- binary.BigEndian.PutUint32(b[8:], test.retransTimerMS)
-
- ra := NDPRouterAdvert(b)
-
- if got := ra.CurrHopLimit(); got != test.hopLimit {
- t.Errorf("got ra.CurrHopLimit() = %d, want = %d", got, test.hopLimit)
- }
-
- if got := ra.ManagedAddrConfFlag(); got != test.managedFlag {
- t.Errorf("got ManagedAddrConfFlag() = %t, want = %t", got, test.managedFlag)
- }
-
- if got := ra.OtherConfFlag(); got != test.otherConfFlag {
- t.Errorf("got OtherConfFlag() = %t, want = %t", got, test.otherConfFlag)
- }
-
- if got := ra.DefaultRouterPreference(); got != test.prf {
- t.Errorf("got DefaultRouterPreference() = %d, want = %d", got, test.prf)
- }
-
- if got, want := ra.RouterLifetime(), time.Second*time.Duration(test.routerLifetimeS); got != want {
- t.Errorf("got ra.RouterLifetime() = %d, want = %d", got, want)
- }
-
- if got, want := ra.ReachableTime(), time.Millisecond*time.Duration(test.reachableTimeMS); got != want {
- t.Errorf("got ra.ReachableTime() = %d, want = %d", got, want)
- }
-
- if got, want := ra.RetransTimer(), time.Millisecond*time.Duration(test.retransTimerMS); got != want {
- t.Errorf("got ra.RetransTimer() = %d, want = %d", got, want)
- }
- })
- }
-}
-
-// TestNDPSourceLinkLayerAddressOptionEthernetAddress tests getting the
-// Ethernet address from an NDPSourceLinkLayerAddressOption.
-func TestNDPSourceLinkLayerAddressOptionEthernetAddress(t *testing.T) {
- tests := []struct {
- name string
- buf []byte
- expected tcpip.LinkAddress
- }{
- {
- "ValidMAC",
- []byte{1, 2, 3, 4, 5, 6},
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- },
- {
- "SLLBodyTooShort",
- []byte{1, 2, 3, 4, 5},
- tcpip.LinkAddress([]byte(nil)),
- },
- {
- "SLLBodyLargerThanNeeded",
- []byte{1, 2, 3, 4, 5, 6, 7, 8},
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- sll := NDPSourceLinkLayerAddressOption(test.buf)
- if got := sll.EthernetAddress(); got != test.expected {
- t.Errorf("got sll.EthernetAddress = %s, want = %s", got, test.expected)
- }
- })
- }
-}
-
-// TestNDPTargetLinkLayerAddressOptionEthernetAddress tests getting the
-// Ethernet address from an NDPTargetLinkLayerAddressOption.
-func TestNDPTargetLinkLayerAddressOptionEthernetAddress(t *testing.T) {
- tests := []struct {
- name string
- buf []byte
- expected tcpip.LinkAddress
- }{
- {
- "ValidMAC",
- []byte{1, 2, 3, 4, 5, 6},
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- },
- {
- "TLLBodyTooShort",
- []byte{1, 2, 3, 4, 5},
- tcpip.LinkAddress([]byte(nil)),
- },
- {
- "TLLBodyLargerThanNeeded",
- []byte{1, 2, 3, 4, 5, 6, 7, 8},
- tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06"),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- tll := NDPTargetLinkLayerAddressOption(test.buf)
- if got := tll.EthernetAddress(); got != test.expected {
- t.Errorf("got tll.EthernetAddress = %s, want = %s", got, test.expected)
- }
- })
- }
-}
-
-func TestOpts(t *testing.T) {
- const optionHeaderLen = 2
-
- checkNonce := func(expectedNonce []byte) func(*testing.T, NDPOption) {
- return func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpNonceOptionType {
- t.Errorf("got kind() = %d, want = %d", got, ndpNonceOptionType)
- }
- nonce, ok := opt.(NDPNonceOption)
- if !ok {
- t.Fatalf("got nonce = %T, want = NDPNonceOption", opt)
- }
- if diff := cmp.Diff(expectedNonce, nonce.Nonce()); diff != "" {
- t.Errorf("nonce mismatch (-want +got):\n%s", diff)
- }
- }
- }
-
- checkTLL := func(expectedAddr tcpip.LinkAddress) func(*testing.T, NDPOption) {
- return func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpTargetLinkLayerAddressOptionType {
- t.Errorf("got kind() = %d, want = %d", got, ndpTargetLinkLayerAddressOptionType)
- }
- tll, ok := opt.(NDPTargetLinkLayerAddressOption)
- if !ok {
- t.Fatalf("got tll = %T, want = NDPTargetLinkLayerAddressOption", opt)
- }
- if got, want := tll.EthernetAddress(), expectedAddr; got != want {
- t.Errorf("got tll.EthernetAddress = %s, want = %s", got, want)
- }
- }
- }
-
- checkSLL := func(expectedAddr tcpip.LinkAddress) func(*testing.T, NDPOption) {
- return func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpSourceLinkLayerAddressOptionType {
- t.Errorf("got kind() = %d, want = %d", got, ndpSourceLinkLayerAddressOptionType)
- }
- sll, ok := opt.(NDPSourceLinkLayerAddressOption)
- if !ok {
- t.Fatalf("got sll = %T, want = NDPSourceLinkLayerAddressOption", opt)
- }
- if got, want := sll.EthernetAddress(), expectedAddr; got != want {
- t.Errorf("got sll.EthernetAddress = %s, want = %s", got, want)
- }
- }
- }
-
- const validLifetimeSeconds = 16909060
- address := testutil.MustParse6("90a:b0c:d0e:f10:1112:1314:1516:1718")
-
- expectedRDNSSBytes := [...]byte{
- // Type, Length
- 25, 3,
-
- // Reserved
- 0, 0,
-
- // Lifetime
- 1, 2, 4, 8,
-
- // Address
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- }
- binary.BigEndian.PutUint32(expectedRDNSSBytes[4:], validLifetimeSeconds)
- if n := copy(expectedRDNSSBytes[8:], address); n != IPv6AddressSize {
- t.Fatalf("got copy(...) = %d, want = %d", n, IPv6AddressSize)
- }
- // Update reserved fields to non zero values to make sure serializing sets
- // them to zero.
- rdnssBytes := expectedRDNSSBytes
- rdnssBytes[1] = 1
- rdnssBytes[2] = 2
-
- const searchListPaddingBytes = 3
- const domainName = "abc.abcd.e"
- expectedSearchListBytes := [...]byte{
- // Type, Length
- 31, 3,
-
- // Reserved
- 0, 0,
-
- // Lifetime
- 1, 0, 0, 0,
-
- // Domain names
- 3, 'a', 'b', 'c',
- 4, 'a', 'b', 'c', 'd',
- 1, 'e',
- 0,
- 0, 0, 0, 0,
- }
- binary.BigEndian.PutUint32(expectedSearchListBytes[4:], validLifetimeSeconds)
- // Update reserved fields to non zero values to make sure serializing sets
- // them to zero.
- searchListBytes := expectedSearchListBytes
- searchListBytes[2] = 1
- searchListBytes[3] = 2
-
- const prefixLength = 43
- const onLinkFlag = false
- const slaacFlag = true
- const preferredLifetimeSeconds = 84281096
- const onLinkFlagBit = 7
- const slaacFlagBit = 6
- boolToByte := func(v bool) byte {
- if v {
- return 1
- }
- return 0
- }
- flags := boolToByte(onLinkFlag)<<onLinkFlagBit | boolToByte(slaacFlag)<<slaacFlagBit
- expectedPrefixInformationBytes := [...]byte{
- // Type, Length
- 3, 4,
-
- prefixLength, flags,
-
- // Valid Lifetime
- 1, 2, 3, 4,
-
- // Preferred Lifetime
- 5, 6, 7, 8,
-
- // Reserved2
- 0, 0, 0, 0,
-
- // Address
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23, 24,
- }
- binary.BigEndian.PutUint32(expectedPrefixInformationBytes[4:], validLifetimeSeconds)
- binary.BigEndian.PutUint32(expectedPrefixInformationBytes[8:], preferredLifetimeSeconds)
- if n := copy(expectedPrefixInformationBytes[16:], address); n != IPv6AddressSize {
- t.Fatalf("got copy(...) = %d, want = %d", n, IPv6AddressSize)
- }
- // Update reserved fields to non zero values to make sure serializing sets
- // them to zero.
- prefixInformationBytes := expectedPrefixInformationBytes
- prefixInformationBytes[3] |= (1 << slaacFlagBit) - 1
- binary.BigEndian.PutUint32(prefixInformationBytes[12:], validLifetimeSeconds+1)
- tests := []struct {
- name string
- buf []byte
- opt NDPOption
- expectedBuf []byte
- check func(*testing.T, NDPOption)
- }{
- {
- name: "Nonce",
- buf: make([]byte, 8),
- opt: NDPNonceOption([]byte{1, 2, 3, 4, 5, 6}),
- expectedBuf: []byte{14, 1, 1, 2, 3, 4, 5, 6},
- check: checkNonce([]byte{1, 2, 3, 4, 5, 6}),
- },
- {
- name: "Nonce with padding",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1},
- opt: NDPNonceOption([]byte{1, 2, 3, 4, 5}),
- expectedBuf: []byte{14, 1, 1, 2, 3, 4, 5, 0},
- check: checkNonce([]byte{1, 2, 3, 4, 5, 0}),
- },
-
- {
- name: "TLL Ethernet",
- buf: make([]byte, 8),
- opt: NDPTargetLinkLayerAddressOption("\x01\x02\x03\x04\x05\x06"),
- expectedBuf: []byte{2, 1, 1, 2, 3, 4, 5, 6},
- check: checkTLL("\x01\x02\x03\x04\x05\x06"),
- },
- {
- name: "TLL Padding",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- opt: NDPTargetLinkLayerAddressOption("\x01\x02\x03\x04\x05\x06\x07\x08"),
- expectedBuf: []byte{2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0},
- check: checkTLL("\x01\x02\x03\x04\x05\x06"),
- },
- {
- name: "TLL Empty",
- buf: nil,
- opt: NDPTargetLinkLayerAddressOption(""),
- expectedBuf: nil,
- },
-
- {
- name: "SLL Ethernet",
- buf: make([]byte, 8),
- opt: NDPSourceLinkLayerAddressOption("\x01\x02\x03\x04\x05\x06"),
- expectedBuf: []byte{1, 1, 1, 2, 3, 4, 5, 6},
- check: checkSLL("\x01\x02\x03\x04\x05\x06"),
- },
- {
- name: "SLL Padding",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- opt: NDPSourceLinkLayerAddressOption("\x01\x02\x03\x04\x05\x06\x07\x08"),
- expectedBuf: []byte{1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0},
- check: checkSLL("\x01\x02\x03\x04\x05\x06"),
- },
- {
- name: "SLL Empty",
- buf: nil,
- opt: NDPSourceLinkLayerAddressOption(""),
- expectedBuf: nil,
- },
-
- {
- name: "RDNSS",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- // NDPRecursiveDNSServer holds the option after the header bytes.
- opt: NDPRecursiveDNSServer(rdnssBytes[optionHeaderLen:]),
- expectedBuf: expectedRDNSSBytes[:],
- check: func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpRecursiveDNSServerOptionType {
- t.Errorf("got kind() = %d, want = %d", got, ndpRecursiveDNSServerOptionType)
- }
- rdnss, ok := opt.(NDPRecursiveDNSServer)
- if !ok {
- t.Fatalf("got opt = %T, want = NDPRecursiveDNSServer", opt)
- }
- if got, want := rdnss.length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {
- t.Errorf("got length() = %d, want = %d", got, want)
- }
- if got, want := rdnss.Lifetime(), validLifetimeSeconds*time.Second; got != want {
- t.Errorf("got Lifetime() = %s, want = %s", got, want)
- }
- if addrs, err := rdnss.Addresses(); err != nil {
- t.Errorf("Addresses(): %s", err)
- } else if diff := cmp.Diff([]tcpip.Address{address}, addrs); diff != "" {
- t.Errorf("mismatched addresses (-want +got):\n%s", diff)
- }
- },
- },
-
- {
- name: "Search list",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- opt: NDPDNSSearchList(searchListBytes[optionHeaderLen:]),
- expectedBuf: expectedSearchListBytes[:],
- check: func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpDNSSearchListOptionType {
- t.Errorf("got kind() = %d, want = %d", got, ndpDNSSearchListOptionType)
- }
-
- dnssl, ok := opt.(NDPDNSSearchList)
- if !ok {
- t.Fatalf("got opt = %T, want = NDPDNSSearchList", opt)
- }
- if got, want := dnssl.length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {
- t.Errorf("got length() = %d, want = %d", got, want)
- }
- if got, want := dnssl.Lifetime(), validLifetimeSeconds*time.Second; got != want {
- t.Errorf("got Lifetime() = %s, want = %s", got, want)
- }
-
- if domainNames, err := dnssl.DomainNames(); err != nil {
- t.Errorf("DomainNames(): %s", err)
- } else if diff := cmp.Diff([]string{domainName}, domainNames); diff != "" {
- t.Errorf("domain names mismatch (-want +got):\n%s", diff)
- }
- },
- },
-
- {
- name: "Prefix Information",
- buf: []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- // NDPPrefixInformation holds the option after the header bytes.
- opt: NDPPrefixInformation(prefixInformationBytes[optionHeaderLen:]),
- expectedBuf: expectedPrefixInformationBytes[:],
- check: func(t *testing.T, opt NDPOption) {
- if got := opt.kind(); got != ndpPrefixInformationType {
- t.Errorf("got kind() = %d, want = %d", got, ndpPrefixInformationType)
- }
-
- pi, ok := opt.(NDPPrefixInformation)
- if !ok {
- t.Fatalf("got opt = %T, want = NDPPrefixInformation", opt)
- }
-
- if got, want := pi.length(), len(expectedPrefixInformationBytes[optionHeaderLen:]); got != want {
- t.Errorf("got length() = %d, want = %d", got, want)
- }
- if got := pi.PrefixLength(); got != prefixLength {
- t.Errorf("got PrefixLength() = %d, want = %d", got, prefixLength)
- }
- if got := pi.OnLinkFlag(); got != onLinkFlag {
- t.Errorf("got OnLinkFlag() = %t, want = %t", got, onLinkFlag)
- }
- if got := pi.AutonomousAddressConfigurationFlag(); got != slaacFlag {
- t.Errorf("got AutonomousAddressConfigurationFlag() = %t, want = %t", got, slaacFlag)
- }
- if got, want := pi.ValidLifetime(), validLifetimeSeconds*time.Second; got != want {
- t.Errorf("got ValidLifetime() = %s, want = %s", got, want)
- }
- if got, want := pi.PreferredLifetime(), preferredLifetimeSeconds*time.Second; got != want {
- t.Errorf("got PreferredLifetime() = %s, want = %s", got, want)
- }
- if got := pi.Prefix(); got != address {
- t.Errorf("got Prefix() = %s, want = %s", got, address)
- }
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opts := NDPOptions(test.buf)
- serializer := NDPOptionsSerializer{
- test.opt,
- }
- if got, want := int(serializer.Length()), len(test.expectedBuf); got != want {
- t.Fatalf("got Length() = %d, want = %d", got, want)
- }
- opts.Serialize(serializer)
- if diff := cmp.Diff(test.expectedBuf, test.buf); diff != "" {
- t.Fatalf("serialized buffer mismatch (-want +got):\n%s", diff)
- }
-
- it, err := opts.Iter(true)
- if err != nil {
- t.Fatalf("got Iter(true) = (_, %s), want = (_, nil)", err)
- }
-
- if len(test.expectedBuf) > 0 {
- next, done, err := it.Next()
- if err != nil {
- t.Fatalf("got Next() = (_, _, %s), want = (_, _, nil)", err)
- }
- if done {
- t.Fatal("got Next() = (_, true, _), want = (_, false, _)")
- }
- test.check(t, next)
- }
-
- // Iterator should not return anything else.
- next, done, err := it.Next()
- if err != nil {
- t.Errorf("got Next() = (_, _, %s), want = (_, _, nil)", err)
- }
- if !done {
- t.Error("got Next() = (_, false, _), want = (_, true, _)")
- }
- if next != nil {
- t.Errorf("got Next() = (%x, _, _), want = (nil, _, _)", next)
- }
- })
- }
-}
-
-func TestNDPRecursiveDNSServerOption(t *testing.T) {
- tests := []struct {
- name string
- buf []byte
- lifetime time.Duration
- addrs []tcpip.Address
- }{
- {
- "Valid1Addr",
- []byte{
- 25, 3, 0, 0,
- 0, 0, 0, 0,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- },
- 0,
- []tcpip.Address{
- "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- },
- },
- {
- "Valid2Addr",
- []byte{
- 25, 5, 0, 0,
- 0, 0, 0, 0,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16,
- },
- 0,
- []tcpip.Address{
- "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- "\x11\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x10",
- },
- },
- {
- "Valid3Addr",
- []byte{
- 25, 7, 0, 0,
- 0, 0, 0, 0,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16,
- 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17,
- },
- 0,
- []tcpip.Address{
- "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- "\x11\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x10",
- "\x11\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x11",
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opts := NDPOptions(test.buf)
- it, err := opts.Iter(true)
- if err != nil {
- t.Fatalf("got Iter = (_, %s), want = (_, nil)", err)
- }
-
- // Iterator should get our option.
- next, done, err := it.Next()
- if err != nil {
- t.Fatalf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if done {
- t.Fatal("got Next = (_, true, _), want = (_, false, _)")
- }
- if got := next.kind(); got != ndpRecursiveDNSServerOptionType {
- t.Fatalf("got Type = %d, want = %d", got, ndpRecursiveDNSServerOptionType)
- }
-
- opt, ok := next.(NDPRecursiveDNSServer)
- if !ok {
- t.Fatalf("next (type = %T) cannot be casted to an NDPRecursiveDNSServer", next)
- }
- if got := opt.Lifetime(); got != test.lifetime {
- t.Errorf("got Lifetime = %d, want = %d", got, test.lifetime)
- }
- addrs, err := opt.Addresses()
- if err != nil {
- t.Errorf("opt.Addresses() = %s", err)
- }
- if diff := cmp.Diff(addrs, test.addrs); diff != "" {
- t.Errorf("mismatched addresses (-want +got):\n%s", diff)
- }
-
- // Iterator should not return anything else.
- next, done, err = it.Next()
- if err != nil {
- t.Errorf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if !done {
- t.Error("got Next = (_, false, _), want = (_, true, _)")
- }
- if next != nil {
- t.Errorf("got Next = (%x, _, _), want = (nil, _, _)", next)
- }
- })
- }
-}
-
-// TestNDPDNSSearchListOption tests the getters of NDPDNSSearchList.
-func TestNDPDNSSearchListOption(t *testing.T) {
- tests := []struct {
- name string
- buf []byte
- lifetime time.Duration
- domainNames []string
- err error
- }{
- {
- name: "Valid1Label",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 1,
- 3, 'a', 'b', 'c',
- 0,
- 0, 0, 0,
- },
- lifetime: time.Second,
- domainNames: []string{
- "abc",
- },
- err: nil,
- },
- {
- name: "Valid2Label",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 5,
- 3, 'a', 'b', 'c',
- 4, 'a', 'b', 'c', 'd',
- 0,
- 0, 0, 0, 0, 0, 0,
- },
- lifetime: 5 * time.Second,
- domainNames: []string{
- "abc.abcd",
- },
- err: nil,
- },
- {
- name: "Valid3Label",
- buf: []byte{
- 0, 0,
- 1, 0, 0, 0,
- 3, 'a', 'b', 'c',
- 4, 'a', 'b', 'c', 'd',
- 1, 'e',
- 0,
- 0, 0, 0, 0,
- },
- lifetime: 16777216 * time.Second,
- domainNames: []string{
- "abc.abcd.e",
- },
- err: nil,
- },
- {
- name: "Valid2Domains",
- buf: []byte{
- 0, 0,
- 1, 2, 3, 4,
- 3, 'a', 'b', 'c',
- 0,
- 2, 'd', 'e',
- 3, 'x', 'y', 'z',
- 0,
- 0, 0, 0,
- },
- lifetime: 16909060 * time.Second,
- domainNames: []string{
- "abc",
- "de.xyz",
- },
- err: nil,
- },
- {
- name: "Valid3DomainsMixedCase",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 3, 'a', 'B', 'c',
- 0,
- 2, 'd', 'E',
- 3, 'X', 'y', 'z',
- 0,
- 1, 'J',
- 0,
- },
- lifetime: 0,
- domainNames: []string{
- "abc",
- "de.xyz",
- "j",
- },
- err: nil,
- },
- {
- name: "ValidDomainAfterNULL",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 3, 'a', 'B', 'c',
- 0, 0, 0, 0,
- 2, 'd', 'E',
- 3, 'X', 'y', 'z',
- 0,
- },
- lifetime: 0,
- domainNames: []string{
- "abc",
- "de.xyz",
- },
- err: nil,
- },
- {
- name: "Valid0Domains",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 0,
- 0, 0, 0, 0, 0, 0, 0,
- },
- lifetime: 0,
- domainNames: nil,
- err: nil,
- },
- {
- name: "NoTrailingNull",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 7, 'a', 'b', 'c', 'd', 'e', 'f', 'g',
- },
- lifetime: 0,
- domainNames: nil,
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "IncorrectLength",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 8, 'a', 'b', 'c', 'd', 'e', 'f', 'g',
- },
- lifetime: 0,
- domainNames: nil,
- err: io.ErrUnexpectedEOF,
- },
- {
- name: "IncorrectLengthWithNULL",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 7, 'a', 'b', 'c', 'd', 'e', 'f',
- 0,
- },
- lifetime: 0,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "LabelOfLength63",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 0,
- },
- lifetime: 0,
- domainNames: []string{
- "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk",
- },
- err: nil,
- },
- {
- name: "LabelOfLength64",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 64, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l',
- 0,
- },
- lifetime: 0,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "DomainNameOfLength255",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 62, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j',
- 0,
- },
- lifetime: 0,
- domainNames: []string{
- "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghij",
- },
- err: nil,
- },
- {
- name: "DomainNameOfLength256",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 0,
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 0,
- },
- lifetime: 0,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "StartingDigitForLabel",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 1,
- 3, '9', 'b', 'c',
- 0,
- 0, 0, 0,
- },
- lifetime: time.Second,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "StartingHyphenForLabel",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 1,
- 3, '-', 'b', 'c',
- 0,
- 0, 0, 0,
- },
- lifetime: time.Second,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "EndingHyphenForLabel",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 1,
- 3, 'a', 'b', '-',
- 0,
- 0, 0, 0,
- },
- lifetime: time.Second,
- domainNames: nil,
- err: ErrNDPOptMalformedBody,
- },
- {
- name: "EndingDigitForLabel",
- buf: []byte{
- 0, 0,
- 0, 0, 0, 1,
- 3, 'a', 'b', '9',
- 0,
- 0, 0, 0,
- },
- lifetime: time.Second,
- domainNames: []string{
- "ab9",
- },
- err: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opt := NDPDNSSearchList(test.buf)
-
- if got := opt.Lifetime(); got != test.lifetime {
- t.Errorf("got Lifetime = %d, want = %d", got, test.lifetime)
- }
- domainNames, err := opt.DomainNames()
- if !errors.Is(err, test.err) {
- t.Errorf("opt.DomainNames() = %s", err)
- }
- if diff := cmp.Diff(domainNames, test.domainNames); diff != "" {
- t.Errorf("mismatched domain names (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestNDPSearchListOptionDomainNameLabelInvalidSymbols(t *testing.T) {
- for r := rune(0); r <= 255; r++ {
- t.Run(fmt.Sprintf("RuneVal=%d", r), func(t *testing.T) {
- buf := []byte{
- 0, 0,
- 0, 0, 0, 0,
- 3, 'a', 0 /* will be replaced */, 'c',
- 0,
- 0, 0, 0,
- }
- buf[8] = uint8(r)
- opt := NDPDNSSearchList(buf)
-
- // As per RFC 1035 section 2.3.1, the label must only include ASCII
- // letters, digits and hyphens (a-z, A-Z, 0-9, -).
- var expectedErr error
- re := regexp.MustCompile(`[a-zA-Z0-9-]`)
- if !re.Match([]byte{byte(r)}) {
- expectedErr = ErrNDPOptMalformedBody
- }
-
- if domainNames, err := opt.DomainNames(); !errors.Is(err, expectedErr) {
- t.Errorf("got opt.DomainNames() = (%s, %v), want = (_, %v)", domainNames, err, ErrNDPOptMalformedBody)
- }
- })
- }
-}
-
-// TestNDPOptionsIterCheck tests that Iter will return false if the NDPOptions
-// the iterator was returned for is malformed.
-func TestNDPOptionsIterCheck(t *testing.T) {
- tests := []struct {
- name string
- buf []byte
- expectedErr error
- }{
- {
- name: "ZeroLengthField",
- buf: []byte{0, 0, 0, 0, 0, 0, 0, 0},
- expectedErr: ErrNDPOptMalformedHeader,
- },
- {
- name: "ValidSourceLinkLayerAddressOption",
- buf: []byte{1, 1, 1, 2, 3, 4, 5, 6},
- expectedErr: nil,
- },
- {
- name: "TooSmallSourceLinkLayerAddressOption",
- buf: []byte{1, 1, 1, 2, 3, 4, 5},
- expectedErr: io.ErrUnexpectedEOF,
- },
- {
- name: "ValidTargetLinkLayerAddressOption",
- buf: []byte{2, 1, 1, 2, 3, 4, 5, 6},
- expectedErr: nil,
- },
- {
- name: "TooSmallTargetLinkLayerAddressOption",
- buf: []byte{2, 1, 1, 2, 3, 4, 5},
- expectedErr: io.ErrUnexpectedEOF,
- },
- {
- name: "ValidPrefixInformation",
- buf: []byte{
- 3, 4, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23, 24,
- },
- expectedErr: nil,
- },
- {
- name: "TooSmallPrefixInformation",
- buf: []byte{
- 3, 4, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23,
- },
- expectedErr: io.ErrUnexpectedEOF,
- },
- {
- name: "InvalidPrefixInformationLength",
- buf: []byte{
- 3, 3, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- },
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "ValidSourceAndTargetLinkLayerAddressWithPrefixInformation",
- buf: []byte{
- // Source Link-Layer Address.
- 1, 1, 1, 2, 3, 4, 5, 6,
-
- // Target Link-Layer Address.
- 2, 1, 7, 8, 9, 10, 11, 12,
-
- // Prefix information.
- 3, 4, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23, 24,
- },
- expectedErr: nil,
- },
- {
- name: "ValidSourceAndTargetLinkLayerAddressWithPrefixInformationWithUnrecognized",
- buf: []byte{
- // Source Link-Layer Address.
- 1, 1, 1, 2, 3, 4, 5, 6,
-
- // Target Link-Layer Address.
- 2, 1, 7, 8, 9, 10, 11, 12,
-
- // 255 is an unrecognized type. If 255 ends up
- // being the type for some recognized type,
- // update 255 to some other unrecognized value.
- 255, 2, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 8,
-
- // Prefix information.
- 3, 4, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23, 24,
- },
- expectedErr: nil,
- },
- {
- name: "InvalidRecursiveDNSServerCutsOffAddress",
- buf: []byte{
- 25, 4, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 1, 2, 3, 4, 5, 6, 7,
- },
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "InvalidRecursiveDNSServerInvalidLengthField",
- buf: []byte{
- 25, 2, 0, 0,
- 0, 0, 0, 0,
- 0, 1, 2, 3, 4, 5, 6, 7, 8,
- },
- expectedErr: io.ErrUnexpectedEOF,
- },
- {
- name: "RecursiveDNSServerTooSmall",
- buf: []byte{
- 25, 1, 0, 0,
- 0, 0, 0,
- },
- expectedErr: io.ErrUnexpectedEOF,
- },
- {
- name: "RecursiveDNSServerMulticast",
- buf: []byte{
- 25, 3, 0, 0,
- 0, 0, 0, 0,
- 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- },
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "RecursiveDNSServerUnspecified",
- buf: []byte{
- 25, 3, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- },
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "DNSSearchListLargeCompliantRFC1035",
- buf: []byte{
- 31, 33, 0, 0,
- 0, 0, 0, 0,
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 62, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j',
- 0,
- },
- expectedErr: nil,
- },
- {
- name: "DNSSearchListNonCompliantRFC1035",
- buf: []byte{
- 31, 33, 0, 0,
- 0, 0, 0, 0,
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k',
- 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- },
- expectedErr: ErrNDPOptMalformedBody,
- },
- {
- name: "DNSSearchListValidSmall",
- buf: []byte{
- 31, 2, 0, 0,
- 0, 0, 0, 0,
- 6, 'a', 'b', 'c', 'd', 'e', 'f',
- 0,
- },
- expectedErr: nil,
- },
- {
- name: "DNSSearchListTooSmall",
- buf: []byte{
- 31, 1, 0, 0,
- 0, 0, 0,
- },
- expectedErr: io.ErrUnexpectedEOF,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opts := NDPOptions(test.buf)
-
- if _, err := opts.Iter(true); !errors.Is(err, test.expectedErr) {
- t.Fatalf("got Iter(true) = (_, %v), want = (_, %v)", err, test.expectedErr)
- }
-
- // test.buf may be malformed but we chose not to check
- // the iterator so it must return true.
- if _, err := opts.Iter(false); err != nil {
- t.Fatalf("got Iter(false) = (_, %s), want = (_, nil)", err)
- }
- })
- }
-}
-
-// TestNDPOptionsIter tests that we can iterator over a valid NDPOptions. Note,
-// this test does not actually check any of the option's getters, it simply
-// checks the option Type and Body. We have other tests that tests the option
-// field gettings given an option body and don't need to duplicate those tests
-// here.
-func TestNDPOptionsIter(t *testing.T) {
- buf := []byte{
- // Source Link-Layer Address.
- 1, 1, 1, 2, 3, 4, 5, 6,
-
- // Target Link-Layer Address.
- 2, 1, 7, 8, 9, 10, 11, 12,
-
- // 255 is an unrecognized type. If 255 ends up being the type
- // for some recognized type, update 255 to some other
- // unrecognized value. Note, this option should be skipped when
- // iterating.
- 255, 2, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 8,
-
- // Prefix information.
- 3, 4, 43, 64,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 21, 22, 23, 24,
- }
-
- opts := NDPOptions(buf)
- it, err := opts.Iter(true)
- if err != nil {
- t.Fatalf("got Iter = (_, %s), want = (_, nil)", err)
- }
-
- // Test the first (Source Link-Layer) option.
- next, done, err := it.Next()
- if err != nil {
- t.Fatalf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if done {
- t.Fatal("got Next = (_, true, _), want = (_, false, _)")
- }
- if got, want := []byte(next.(NDPSourceLinkLayerAddressOption)), buf[2:][:6]; !bytes.Equal(got, want) {
- t.Errorf("got Next = (%x, _, _), want = (%x, _, _)", got, want)
- }
- if got := next.kind(); got != ndpSourceLinkLayerAddressOptionType {
- t.Errorf("got Type = %d, want = %d", got, ndpSourceLinkLayerAddressOptionType)
- }
-
- // Test the next (Target Link-Layer) option.
- next, done, err = it.Next()
- if err != nil {
- t.Fatalf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if done {
- t.Fatal("got Next = (_, true, _), want = (_, false, _)")
- }
- if got, want := []byte(next.(NDPTargetLinkLayerAddressOption)), buf[10:][:6]; !bytes.Equal(got, want) {
- t.Errorf("got Next = (%x, _, _), want = (%x, _, _)", got, want)
- }
- if got := next.kind(); got != ndpTargetLinkLayerAddressOptionType {
- t.Errorf("got Type = %d, want = %d", got, ndpTargetLinkLayerAddressOptionType)
- }
-
- // Test the next (Prefix Information) option.
- // Note, the unrecognized option should be skipped.
- next, done, err = it.Next()
- if err != nil {
- t.Fatalf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if done {
- t.Fatal("got Next = (_, true, _), want = (_, false, _)")
- }
- if got, want := next.(NDPPrefixInformation), buf[34:][:30]; !bytes.Equal(got, want) {
- t.Errorf("got Next = (%x, _, _), want = (%x, _, _)", got, want)
- }
- if got := next.kind(); got != ndpPrefixInformationType {
- t.Errorf("got Type = %d, want = %d", got, ndpPrefixInformationType)
- }
-
- // Iterator should not return anything else.
- next, done, err = it.Next()
- if err != nil {
- t.Errorf("got Next = (_, _, %s), want = (_, _, nil)", err)
- }
- if !done {
- t.Error("got Next = (_, false, _), want = (_, true, _)")
- }
- if next != nil {
- t.Errorf("got Next = (%x, _, _), want = (nil, _, _)", next)
- }
-}
-
-func TestNDPRoutePreferenceStringer(t *testing.T) {
- p := NDPRoutePreference(0)
- for {
- var wantStr string
- switch p {
- case 0b01:
- wantStr = "HighRoutePreference"
- case 0b00:
- wantStr = "MediumRoutePreference"
- case 0b11:
- wantStr = "LowRoutePreference"
- case 0b10:
- wantStr = "ReservedRoutePreference"
- default:
- wantStr = fmt.Sprintf("NDPRoutePreference(%d)", p)
- }
-
- if gotStr := p.String(); gotStr != wantStr {
- t.Errorf("got NDPRoutePreference(%d).String() = %s, want = %s", p, gotStr, wantStr)
- }
-
- p++
- if p == 0 {
- // Overflowed, we hit all values.
- break
- }
- }
-}
diff --git a/pkg/tcpip/header/parse/BUILD b/pkg/tcpip/header/parse/BUILD
deleted file mode 100644
index 2adee9288..000000000
--- a/pkg/tcpip/header/parse/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "parse",
- srcs = ["parse.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/header/parse/parse_state_autogen.go b/pkg/tcpip/header/parse/parse_state_autogen.go
new file mode 100644
index 000000000..ad047be32
--- /dev/null
+++ b/pkg/tcpip/header/parse/parse_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package parse
diff --git a/pkg/tcpip/header/tcp_test.go b/pkg/tcpip/header/tcp_test.go
deleted file mode 100644
index 96db8460f..000000000
--- a/pkg/tcpip/header/tcp_test.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package header_test
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestEncodeSACKBlocks(t *testing.T) {
- testCases := []struct {
- sackBlocks []header.SACKBlock
- want []header.SACKBlock
- bufSize int
- }{
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}},
- 40,
- },
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}},
- 30,
- },
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- []header.SACKBlock{{10, 20}, {22, 30}},
- 20,
- },
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- []header.SACKBlock{{10, 20}},
- 10,
- },
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- nil,
- 8,
- },
- {
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}},
- []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}},
- 60,
- },
- }
- for _, tc := range testCases {
- b := make([]byte, tc.bufSize)
- t.Logf("testing: %v", tc)
- header.EncodeSACKBlocks(tc.sackBlocks, b)
- opts := header.ParseTCPOptions(b)
- if got, want := opts.SACKBlocks, tc.want; !reflect.DeepEqual(got, want) {
- t.Errorf("header.EncodeSACKBlocks(%v, %v), encoded blocks got: %v, want: %v", tc.sackBlocks, b, got, want)
- }
- }
-}
-
-func TestTCPParseOptions(t *testing.T) {
- type tsOption struct {
- tsVal uint32
- tsEcr uint32
- }
-
- generateOptions := func(tsOpt *tsOption, sackBlocks []header.SACKBlock) []byte {
- l := 0
- if tsOpt != nil {
- l += 10
- }
- if len(sackBlocks) != 0 {
- l += len(sackBlocks)*8 + 2
- }
- b := make([]byte, l)
- offset := 0
- if tsOpt != nil {
- offset = header.EncodeTSOption(tsOpt.tsVal, tsOpt.tsEcr, b)
- }
- header.EncodeSACKBlocks(sackBlocks, b[offset:])
- return b
- }
-
- testCases := []struct {
- b []byte
- want header.TCPOptions
- }{
- // Trivial cases.
- {nil, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionNOP}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionNOP, header.TCPOptionNOP}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionEOL}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionNOP, header.TCPOptionEOL, header.TCPOptionTS, 10, 1, 1}, header.TCPOptions{false, 0, 0, nil}},
-
- // Test timestamp parsing.
- {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}},
- {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}},
-
- // Test malformed timestamp option.
- {[]byte{header.TCPOptionTS, 8, 1, 1}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 8, 1, 1}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 8, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}},
-
- // Test SACKBlock parsing.
- {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, 1, 0, 0, 0, 10}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{1, 10}}}},
- {[]byte{header.TCPOptionSACK, 18, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{1, 10}, {11, 12}}}},
-
- // Test malformed SACK option.
- {[]byte{header.TCPOptionSACK, 0}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 8, 0, 0, 0, 1, 0, 0, 0, 10}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 17, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 10}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, 1, 0, 0, 0}, header.TCPOptions{false, 0, 0, nil}},
-
- // Test Timestamp + SACK block parsing.
- {generateOptions(&tsOption{1, 1}, []header.SACKBlock{{1, 10}, {11, 12}}), header.TCPOptions{true, 1, 1, []header.SACKBlock{{1, 10}, {11, 12}}}},
- {generateOptions(&tsOption{1, 2}, []header.SACKBlock{{1, 10}, {11, 12}}), header.TCPOptions{true, 1, 2, []header.SACKBlock{{1, 10}, {11, 12}}}},
- {generateOptions(&tsOption{1, 3}, []header.SACKBlock{{1, 10}, {11, 12}, {13, 14}, {14, 15}, {15, 16}}), header.TCPOptions{true, 1, 3, []header.SACKBlock{{1, 10}, {11, 12}, {13, 14}, {14, 15}}}},
-
- // Test valid timestamp + malformed SACK block parsing.
- {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK}, header.TCPOptions{true, 1, 1, nil}},
- {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 10}, header.TCPOptions{true, 1, 1, nil}},
- {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 10, 0, 0, 0}, header.TCPOptions{true, 1, 1, nil}},
- {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}},
- {[]byte{header.TCPOptionSACK, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}},
- {[]byte{header.TCPOptionSACK, 10, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{134873088, 65536}}}},
- {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{8, 167772160}}}},
- {[]byte{header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}},
- }
- for _, tc := range testCases {
- if got, want := header.ParseTCPOptions(tc.b), tc.want; !reflect.DeepEqual(got, want) {
- t.Errorf("ParseTCPOptions(%v) = %v, want: %v", tc.b, got, tc.want)
- }
- }
-}
-
-func TestTCPFlags(t *testing.T) {
- for _, tt := range []struct {
- flags header.TCPFlags
- want string
- }{
- {header.TCPFlagFin, "F "},
- {header.TCPFlagSyn, " S "},
- {header.TCPFlagRst, " R "},
- {header.TCPFlagPsh, " P "},
- {header.TCPFlagAck, " A "},
- {header.TCPFlagUrg, " U"},
- {header.TCPFlagSyn | header.TCPFlagAck, " S A "},
- {header.TCPFlagFin | header.TCPFlagAck, "F A "},
- } {
- if got := tt.flags.String(); got != tt.want {
- t.Errorf("got TCPFlags(%#b).String() = %s, want = %s", tt.flags, got, tt.want)
- }
- }
-}
diff --git a/pkg/tcpip/link/channel/BUILD b/pkg/tcpip/link/channel/BUILD
deleted file mode 100644
index 973f06cbc..000000000
--- a/pkg/tcpip/link/channel/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "channel",
- srcs = ["channel.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/channel/channel_state_autogen.go b/pkg/tcpip/link/channel/channel_state_autogen.go
new file mode 100644
index 000000000..7730b59b8
--- /dev/null
+++ b/pkg/tcpip/link/channel/channel_state_autogen.go
@@ -0,0 +1,36 @@
+// automatically generated by stateify.
+
+package channel
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (n *NotificationHandle) StateTypeName() string {
+ return "pkg/tcpip/link/channel.NotificationHandle"
+}
+
+func (n *NotificationHandle) StateFields() []string {
+ return []string{
+ "n",
+ }
+}
+
+func (n *NotificationHandle) beforeSave() {}
+
+// +checklocksignore
+func (n *NotificationHandle) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.n)
+}
+
+func (n *NotificationHandle) afterLoad() {}
+
+// +checklocksignore
+func (n *NotificationHandle) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.n)
+}
+
+func init() {
+ state.Register((*NotificationHandle)(nil))
+}
diff --git a/pkg/tcpip/link/ethernet/BUILD b/pkg/tcpip/link/ethernet/BUILD
deleted file mode 100644
index 0ae0d201a..000000000
--- a/pkg/tcpip/link/ethernet/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ethernet",
- srcs = ["ethernet.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/nested",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "ethernet_test",
- size = "small",
- srcs = ["ethernet_test.go"],
- deps = [
- ":ethernet",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/ethernet/ethernet_state_autogen.go b/pkg/tcpip/link/ethernet/ethernet_state_autogen.go
new file mode 100644
index 000000000..71d255c20
--- /dev/null
+++ b/pkg/tcpip/link/ethernet/ethernet_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package ethernet
diff --git a/pkg/tcpip/link/ethernet/ethernet_test.go b/pkg/tcpip/link/ethernet/ethernet_test.go
deleted file mode 100644
index 08a7f1ce1..000000000
--- a/pkg/tcpip/link/ethernet/ethernet_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ethernet_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/ethernet"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-var _ stack.NetworkDispatcher = (*testNetworkDispatcher)(nil)
-
-type testNetworkDispatcher struct {
- networkPackets int
-}
-
-func (t *testNetworkDispatcher) DeliverNetworkPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {
- t.networkPackets++
-}
-
-func (*testNetworkDispatcher) DeliverOutboundPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {
-}
-
-func TestDeliverNetworkPacket(t *testing.T) {
- const (
- linkAddr = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
- otherLinkAddr1 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x07")
- otherLinkAddr2 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x08")
- )
-
- e := ethernet.New(channel.New(0, 0, linkAddr))
- var networkDispatcher testNetworkDispatcher
- e.Attach(&networkDispatcher)
-
- if networkDispatcher.networkPackets != 0 {
- t.Fatalf("got networkDispatcher.networkPackets = %d, want = 0", networkDispatcher.networkPackets)
- }
-
- // An ethernet frame with a destination link address that is not assigned to
- // our ethernet link endpoint should still be delivered to the network
- // dispatcher since the ethernet endpoint is not expected to filter frames.
- eth := buffer.NewView(header.EthernetMinimumSize)
- header.Ethernet(eth).Encode(&header.EthernetFields{
- SrcAddr: otherLinkAddr1,
- DstAddr: otherLinkAddr2,
- Type: header.IPv4ProtocolNumber,
- })
- e.DeliverNetworkPacket("", "", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: eth.ToVectorisedView(),
- }))
- if networkDispatcher.networkPackets != 1 {
- t.Fatalf("got networkDispatcher.networkPackets = %d, want = 1", networkDispatcher.networkPackets)
- }
-}
diff --git a/pkg/tcpip/link/fdbased/BUILD b/pkg/tcpip/link/fdbased/BUILD
deleted file mode 100644
index 1d0163823..000000000
--- a/pkg/tcpip/link/fdbased/BUILD
+++ /dev/null
@@ -1,40 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fdbased",
- srcs = [
- "endpoint.go",
- "endpoint_unsafe.go",
- "mmap.go",
- "mmap_stub.go",
- "mmap_unsafe.go",
- "packet_dispatchers.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/rawfile",
- "//pkg/tcpip/stack",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fdbased_test",
- size = "small",
- srcs = ["endpoint_test.go"],
- library = ":fdbased",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/link/fdbased/endpoint_test.go b/pkg/tcpip/link/fdbased/endpoint_test.go
deleted file mode 100644
index eccd21579..000000000
--- a/pkg/tcpip/link/fdbased/endpoint_test.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package fdbased
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "reflect"
- "testing"
- "time"
- "unsafe"
-
- "github.com/google/go-cmp/cmp"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-const (
- mtu = 1500
- laddr = tcpip.LinkAddress("\x11\x22\x33\x44\x55\x66")
- raddr = tcpip.LinkAddress("\x77\x88\x99\xaa\xbb\xcc")
- proto = 10
- csumOffset = 48
- gsoMSS = 500
-)
-
-type packetInfo struct {
- Raddr tcpip.LinkAddress
- Proto tcpip.NetworkProtocolNumber
- Contents *stack.PacketBuffer
-}
-
-type packetContents struct {
- LinkHeader buffer.View
- NetworkHeader buffer.View
- TransportHeader buffer.View
- Data buffer.View
-}
-
-func checkPacketInfoEqual(t *testing.T, got, want packetInfo) {
- t.Helper()
- if diff := cmp.Diff(
- want, got,
- cmp.Transformer("ExtractPacketBuffer", func(pk *stack.PacketBuffer) *packetContents {
- if pk == nil {
- return nil
- }
- return &packetContents{
- LinkHeader: pk.LinkHeader().View(),
- NetworkHeader: pk.NetworkHeader().View(),
- TransportHeader: pk.TransportHeader().View(),
- Data: pk.Data().AsRange().ToOwnedView(),
- }
- }),
- ); diff != "" {
- t.Errorf("unexpected packetInfo (-want +got):\n%s", diff)
- }
-}
-
-type context struct {
- t *testing.T
- readFDs []int
- writeFDs []int
- ep stack.LinkEndpoint
- ch chan packetInfo
- done chan struct{}
-}
-
-func newContext(t *testing.T, opt *Options) *context {
- firstFDPair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_SEQPACKET, 0)
- if err != nil {
- t.Fatalf("Socketpair failed: %v", err)
- }
- secondFDPair, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_SEQPACKET, 0)
- if err != nil {
- t.Fatalf("Socketpair failed: %v", err)
- }
-
- done := make(chan struct{}, 2)
- opt.ClosedFunc = func(tcpip.Error) {
- done <- struct{}{}
- }
-
- opt.FDs = []int{firstFDPair[1], secondFDPair[1]}
- ep, err := New(opt)
- if err != nil {
- t.Fatalf("Failed to create FD endpoint: %v", err)
- }
-
- c := &context{
- t: t,
- readFDs: []int{firstFDPair[0], secondFDPair[0]},
- writeFDs: opt.FDs,
- ep: ep,
- ch: make(chan packetInfo, 100),
- done: done,
- }
-
- ep.Attach(c)
-
- return c
-}
-
-func (c *context) cleanup() {
- for _, fd := range c.readFDs {
- unix.Close(fd)
- }
- <-c.done
- <-c.done
- for _, fd := range c.writeFDs {
- unix.Close(fd)
- }
-}
-
-func (c *context) DeliverNetworkPacket(remote tcpip.LinkAddress, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- c.ch <- packetInfo{remote, protocol, pkt}
-}
-
-func (c *context) DeliverOutboundPacket(remote tcpip.LinkAddress, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func TestNoEthernetProperties(t *testing.T) {
- c := newContext(t, &Options{MTU: mtu})
- defer c.cleanup()
-
- if want, v := uint16(0), c.ep.MaxHeaderLength(); want != v {
- t.Fatalf("MaxHeaderLength() = %v, want %v", v, want)
- }
-
- if want, v := uint32(mtu), c.ep.MTU(); want != v {
- t.Fatalf("MTU() = %v, want %v", v, want)
- }
-}
-
-func TestEthernetProperties(t *testing.T) {
- c := newContext(t, &Options{EthernetHeader: true, MTU: mtu})
- defer c.cleanup()
-
- if want, v := uint16(header.EthernetMinimumSize), c.ep.MaxHeaderLength(); want != v {
- t.Fatalf("MaxHeaderLength() = %v, want %v", v, want)
- }
-
- if want, v := uint32(mtu), c.ep.MTU(); want != v {
- t.Fatalf("MTU() = %v, want %v", v, want)
- }
-}
-
-func TestAddress(t *testing.T) {
- addrs := []tcpip.LinkAddress{"", "abc", "def"}
- for _, a := range addrs {
- t.Run(fmt.Sprintf("Address: %q", a), func(t *testing.T) {
- c := newContext(t, &Options{Address: a, MTU: mtu})
- defer c.cleanup()
-
- if want, v := a, c.ep.LinkAddress(); want != v {
- t.Fatalf("LinkAddress() = %v, want %v", v, want)
- }
- })
- }
-}
-
-func testWritePacket(t *testing.T, plen int, eth bool, gsoMaxSize uint32, hash uint32) {
- c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: eth, GSOMaxSize: gsoMaxSize})
- defer c.cleanup()
-
- var r stack.RouteInfo
- r.RemoteLinkAddress = raddr
-
- // Build payload.
- payload := buffer.NewView(plen)
- if _, err := rand.Read(payload); err != nil {
- t.Fatalf("rand.Read(payload): %s", err)
- }
-
- // Build packet buffer.
- const netHdrLen = 100
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()) + netHdrLen,
- Data: payload.ToVectorisedView(),
- })
- pkt.Hash = hash
-
- // Build header.
- b := pkt.NetworkHeader().Push(netHdrLen)
- if _, err := rand.Read(b); err != nil {
- t.Fatalf("rand.Read(b): %s", err)
- }
-
- // Write.
- want := append(append(buffer.View(nil), b...), payload...)
- const l3HdrLen = header.IPv6MinimumSize
- if gsoMaxSize != 0 {
- pkt.GSOOptions = stack.GSO{
- Type: stack.GSOTCPv6,
- NeedsCsum: true,
- CsumOffset: csumOffset,
- MSS: gsoMSS,
- L3HdrLen: l3HdrLen,
- }
- }
- if err := c.ep.WritePacket(r, proto, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-
- // Read from the corresponding FD, then compare with what we wrote.
- b = make([]byte, mtu)
- fd := c.readFDs[hash%uint32(len(c.readFDs))]
- n, err := unix.Read(fd, b)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- b = b[:n]
- if gsoMaxSize != 0 {
- vnetHdr := *(*virtioNetHdr)(unsafe.Pointer(&b[0]))
- if vnetHdr.flags&_VIRTIO_NET_HDR_F_NEEDS_CSUM == 0 {
- t.Fatalf("virtioNetHdr.flags %v doesn't contain %v", vnetHdr.flags, _VIRTIO_NET_HDR_F_NEEDS_CSUM)
- }
- const csumStart = header.EthernetMinimumSize + l3HdrLen
- if vnetHdr.csumStart != csumStart {
- t.Fatalf("vnetHdr.csumStart = %v, want %v", vnetHdr.csumStart, csumStart)
- }
- if vnetHdr.csumOffset != csumOffset {
- t.Fatalf("vnetHdr.csumOffset = %v, want %v", vnetHdr.csumOffset, csumOffset)
- }
- gsoType := uint8(0)
- if plen > gsoMSS {
- gsoType = _VIRTIO_NET_HDR_GSO_TCPV6
- }
- if vnetHdr.gsoType != gsoType {
- t.Fatalf("vnetHdr.gsoType = %v, want %v", vnetHdr.gsoType, gsoType)
- }
- b = b[virtioNetHdrSize:]
- }
- if eth {
- h := header.Ethernet(b)
- b = b[header.EthernetMinimumSize:]
-
- if a := h.SourceAddress(); a != laddr {
- t.Fatalf("SourceAddress() = %v, want %v", a, laddr)
- }
-
- if a := h.DestinationAddress(); a != raddr {
- t.Fatalf("DestinationAddress() = %v, want %v", a, raddr)
- }
-
- if et := h.Type(); et != proto {
- t.Fatalf("Type() = %v, want %v", et, proto)
- }
- }
- if len(b) != len(want) {
- t.Fatalf("Read returned %v bytes, want %v", len(b), len(want))
- }
- if !bytes.Equal(b, want) {
- t.Fatalf("Read returned %x, want %x", b, want)
- }
-}
-
-func TestWritePacket(t *testing.T) {
- lengths := []int{0, 100, 1000}
- eths := []bool{true, false}
- gsos := []uint32{0, 32768}
-
- for _, eth := range eths {
- for _, plen := range lengths {
- for _, gso := range gsos {
- t.Run(
- fmt.Sprintf("Eth=%v,PayloadLen=%v,GSOMaxSize=%v", eth, plen, gso),
- func(t *testing.T) {
- testWritePacket(t, plen, eth, gso, 0)
- },
- )
- }
- }
- }
-}
-
-func TestHashedWritePacket(t *testing.T) {
- lengths := []int{0, 100, 1000}
- eths := []bool{true, false}
- gsos := []uint32{0, 32768}
- hashes := []uint32{0, 1}
- for _, eth := range eths {
- for _, plen := range lengths {
- for _, gso := range gsos {
- for _, hash := range hashes {
- t.Run(
- fmt.Sprintf("Eth=%v,PayloadLen=%v,GSOMaxSize=%v,Hash=%d", eth, plen, gso, hash),
- func(t *testing.T) {
- testWritePacket(t, plen, eth, gso, hash)
- },
- )
- }
- }
- }
- }
-}
-
-func TestPreserveSrcAddress(t *testing.T) {
- baddr := tcpip.LinkAddress("\xcc\xbb\xaa\x77\x88\x99")
-
- c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: true})
- defer c.cleanup()
-
- // Set LocalLinkAddress in route to the value of the bridged address.
- var r stack.RouteInfo
- r.LocalLinkAddress = baddr
- r.RemoteLinkAddress = raddr
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- // WritePacket panics given a prependable with anything less than
- // the minimum size of the ethernet header.
- // TODO(b/153685824): Figure out if this should use c.ep.MaxHeaderLength().
- ReserveHeaderBytes: header.EthernetMinimumSize,
- Data: buffer.VectorisedView{},
- })
- if err := c.ep.WritePacket(r, proto, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-
- // Read from the FD, then compare with what we wrote.
- b := make([]byte, mtu)
- n, err := unix.Read(c.readFDs[0], b)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- b = b[:n]
- h := header.Ethernet(b)
-
- if a := h.SourceAddress(); a != baddr {
- t.Fatalf("SourceAddress() = %v, want %v", a, baddr)
- }
-}
-
-func TestDeliverPacket(t *testing.T) {
- lengths := []int{100, 1000}
- eths := []bool{true, false}
-
- for _, eth := range eths {
- for _, plen := range lengths {
- t.Run(fmt.Sprintf("Eth=%v,PayloadLen=%v", eth, plen), func(t *testing.T) {
- c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: eth})
- defer c.cleanup()
-
- // Build packet.
- all := make([]byte, plen)
- if _, err := rand.Read(all); err != nil {
- t.Fatalf("rand.Read(all): %s", err)
- }
- // Make it look like an IPv4 packet.
- all[0] = 0x40
-
- wantPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: header.EthernetMinimumSize,
- Data: buffer.NewViewFromBytes(all).ToVectorisedView(),
- })
- if eth {
- hdr := header.Ethernet(wantPkt.LinkHeader().Push(header.EthernetMinimumSize))
- hdr.Encode(&header.EthernetFields{
- SrcAddr: raddr,
- DstAddr: laddr,
- Type: proto,
- })
- all = append(hdr, all...)
- }
-
- // Write packet via the file descriptor.
- if _, err := unix.Write(c.readFDs[0], all); err != nil {
- t.Fatalf("Write failed: %v", err)
- }
-
- // Receive packet through the endpoint.
- select {
- case pi := <-c.ch:
- want := packetInfo{
- Raddr: raddr,
- Proto: proto,
- Contents: wantPkt,
- }
- if !eth {
- want.Proto = header.IPv4ProtocolNumber
- want.Raddr = ""
- }
- checkPacketInfoEqual(t, pi, want)
- case <-time.After(10 * time.Second):
- t.Fatalf("Timed out waiting for packet")
- }
- })
- }
- }
-}
-
-func TestBufConfigMaxLength(t *testing.T) {
- got := 0
- for _, i := range BufConfig {
- got += i
- }
- want := header.MaxIPPacketSize // maximum TCP packet size
- if got < want {
- t.Errorf("total buffer size is invalid: got %d, want >= %d", got, want)
- }
-}
-
-func TestBufConfigFirst(t *testing.T) {
- // The stack assumes that the TCP/IP header is enterily contained in the first view.
- // Therefore, the first view needs to be large enough to contain the maximum TCP/IP
- // header, which is 120 bytes (60 bytes for IP + 60 bytes for TCP).
- want := 120
- got := BufConfig[0]
- if got < want {
- t.Errorf("first view has an invalid size: got %d, want >= %d", got, want)
- }
-}
-
-var capLengthTestCases = []struct {
- comment string
- config []int
- n int
- wantUsed int
- wantLengths []int
-}{
- {
- comment: "Single slice",
- config: []int{2},
- n: 1,
- wantUsed: 1,
- wantLengths: []int{1},
- },
- {
- comment: "Multiple slices",
- config: []int{1, 2},
- n: 2,
- wantUsed: 2,
- wantLengths: []int{1, 1},
- },
- {
- comment: "Entire buffer",
- config: []int{1, 2},
- n: 3,
- wantUsed: 2,
- wantLengths: []int{1, 2},
- },
- {
- comment: "Entire buffer but not on the last slice",
- config: []int{1, 2, 3},
- n: 3,
- wantUsed: 2,
- wantLengths: []int{1, 2},
- },
-}
-
-func TestIovecBuffer(t *testing.T) {
- for _, c := range capLengthTestCases {
- t.Run(c.comment, func(t *testing.T) {
- b := newIovecBuffer(c.config, false /* skipsVnetHdr */)
-
- // Test initial allocation.
- iovecs := b.nextIovecs()
- if got, want := len(iovecs), len(c.config); got != want {
- t.Fatalf("len(iovecs) = %d, want %d", got, want)
- }
-
- // Make a copy as iovecs points to internal slice. We will need this state
- // later.
- oldIovecs := append([]unix.Iovec(nil), iovecs...)
-
- // Test the views that get pulled.
- vv := b.pullViews(c.n)
- var lengths []int
- for _, v := range vv.Views() {
- lengths = append(lengths, len(v))
- }
- if !reflect.DeepEqual(lengths, c.wantLengths) {
- t.Errorf("Pulled view lengths = %v, want %v", lengths, c.wantLengths)
- }
-
- // Test that new views get reallocated.
- for i, newIov := range b.nextIovecs() {
- if i < c.wantUsed {
- if newIov.Base == oldIovecs[i].Base {
- t.Errorf("b.views[%d] should have been reallocated", i)
- }
- } else {
- if newIov.Base != oldIovecs[i].Base {
- t.Errorf("b.views[%d] should not have been reallocated", i)
- }
- }
- }
- })
- }
-}
-
-func TestIovecBufferSkipVnetHdr(t *testing.T) {
- for _, test := range []struct {
- desc string
- readN int
- wantLen int
- }{
- {
- desc: "nothing read",
- readN: 0,
- wantLen: 0,
- },
- {
- desc: "smaller than vnet header",
- readN: virtioNetHdrSize - 1,
- wantLen: 0,
- },
- {
- desc: "header skipped",
- readN: virtioNetHdrSize + 100,
- wantLen: 100,
- },
- } {
- t.Run(test.desc, func(t *testing.T) {
- b := newIovecBuffer([]int{10, 20, 50, 50}, true)
- // Pretend a read happend.
- b.nextIovecs()
- vv := b.pullViews(test.readN)
- if got, want := vv.Size(), test.wantLen; got != want {
- t.Errorf("b.pullView(%d).Size() = %d; want %d", test.readN, got, want)
- }
- if got, want := len(vv.ToOwnedView()), test.wantLen; got != want {
- t.Errorf("b.pullView(%d).ToOwnedView() has length %d; want %d", test.readN, got, want)
- }
- })
- }
-}
-
-// fakeNetworkDispatcher delivers packets to pkts.
-type fakeNetworkDispatcher struct {
- pkts []*stack.PacketBuffer
-}
-
-func (d *fakeNetworkDispatcher) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- d.pkts = append(d.pkts, pkt)
-}
-
-func (d *fakeNetworkDispatcher) DeliverOutboundPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func TestDispatchPacketFormat(t *testing.T) {
- for _, test := range []struct {
- name string
- newDispatcher func(fd int, e *endpoint) (linkDispatcher, error)
- }{
- {
- name: "readVDispatcher",
- newDispatcher: newReadVDispatcher,
- },
- {
- name: "recvMMsgDispatcher",
- newDispatcher: newRecvMMsgDispatcher,
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- // Create a socket pair to send/recv.
- fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_DGRAM, 0)
- if err != nil {
- t.Fatal(err)
- }
- defer unix.Close(fds[0])
- defer unix.Close(fds[1])
-
- data := []byte{
- // Ethernet header.
- 1, 2, 3, 4, 5, 60,
- 1, 2, 3, 4, 5, 61,
- 8, 0,
- // Mock network header.
- 40, 41, 42, 43,
- }
- err = unix.Sendmsg(fds[1], data, nil, nil, 0)
- if err != nil {
- t.Fatal(err)
- }
-
- // Create and run dispatcher once.
- sink := &fakeNetworkDispatcher{}
- d, err := test.newDispatcher(fds[0], &endpoint{
- hdrSize: header.EthernetMinimumSize,
- dispatcher: sink,
- })
- if err != nil {
- t.Fatal(err)
- }
- if ok, err := d.dispatch(); !ok || err != nil {
- t.Fatalf("d.dispatch() = %v, %v", ok, err)
- }
-
- // Verify packet.
- if got, want := len(sink.pkts), 1; got != want {
- t.Fatalf("len(sink.pkts) = %d, want %d", got, want)
- }
- pkt := sink.pkts[0]
- if got, want := pkt.LinkHeader().View().Size(), header.EthernetMinimumSize; got != want {
- t.Errorf("pkt.LinkHeader().View().Size() = %d, want %d", got, want)
- }
- if got, want := pkt.Data().Size(), 4; got != want {
- t.Errorf("pkt.Data().Size() = %d, want %d", got, want)
- }
- })
- }
-}
diff --git a/pkg/tcpip/link/fdbased/fdbased_state_autogen.go b/pkg/tcpip/link/fdbased/fdbased_state_autogen.go
new file mode 100644
index 000000000..b84e8f21c
--- /dev/null
+++ b/pkg/tcpip/link/fdbased/fdbased_state_autogen.go
@@ -0,0 +1,8 @@
+// automatically generated by stateify.
+
+// +build linux
+// +build linux,amd64 linux,arm64
+// +build !linux !amd64,!arm64
+// +build linux
+
+package fdbased
diff --git a/pkg/tcpip/link/fdbased/fdbased_unsafe_state_autogen.go b/pkg/tcpip/link/fdbased/fdbased_unsafe_state_autogen.go
new file mode 100644
index 000000000..e2ed505b2
--- /dev/null
+++ b/pkg/tcpip/link/fdbased/fdbased_unsafe_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build linux
+// +build linux,amd64 linux,arm64
+
+package fdbased
diff --git a/pkg/tcpip/link/loopback/BUILD b/pkg/tcpip/link/loopback/BUILD
deleted file mode 100644
index 6bf3805b7..000000000
--- a/pkg/tcpip/link/loopback/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "loopback",
- srcs = ["loopback.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/loopback/loopback_state_autogen.go b/pkg/tcpip/link/loopback/loopback_state_autogen.go
new file mode 100644
index 000000000..c00fd9f19
--- /dev/null
+++ b/pkg/tcpip/link/loopback/loopback_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package loopback
diff --git a/pkg/tcpip/link/muxed/BUILD b/pkg/tcpip/link/muxed/BUILD
deleted file mode 100644
index 193524525..000000000
--- a/pkg/tcpip/link/muxed/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "muxed",
- srcs = ["injectable.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "muxed_test",
- size = "small",
- srcs = ["injectable_test.go"],
- library = ":muxed",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/link/muxed/injectable_test.go b/pkg/tcpip/link/muxed/injectable_test.go
deleted file mode 100644
index 040e3a35b..000000000
--- a/pkg/tcpip/link/muxed/injectable_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package muxed
-
-import (
- "bytes"
- "net"
- "os"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-func TestInjectableEndpointRawDispatch(t *testing.T) {
- endpoint, sock, dstIP := makeTestInjectableEndpoint(t)
-
- endpoint.InjectOutbound(dstIP, []byte{0xFA})
-
- buf := make([]byte, ipv4.MaxTotalSize)
- bytesRead, err := sock.Read(buf)
- if err != nil {
- t.Fatalf("Unable to read from socketpair: %v", err)
- }
- if got, want := buf[:bytesRead], []byte{0xFA}; !bytes.Equal(got, want) {
- t.Fatalf("Read %v from the socketpair, wanted %v", got, want)
- }
-}
-
-func TestInjectableEndpointDispatch(t *testing.T) {
- endpoint, sock, dstIP := makeTestInjectableEndpoint(t)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: 1,
- Data: buffer.NewViewFromBytes([]byte{0xFB}).ToVectorisedView(),
- })
- pkt.TransportHeader().Push(1)[0] = 0xFA
- var packetRoute stack.RouteInfo
- packetRoute.RemoteAddress = dstIP
-
- endpoint.WritePacket(packetRoute, ipv4.ProtocolNumber, pkt)
-
- buf := make([]byte, 6500)
- bytesRead, err := sock.Read(buf)
- if err != nil {
- t.Fatalf("Unable to read from socketpair: %v", err)
- }
- if got, want := buf[:bytesRead], []byte{0xFA, 0xFB}; !bytes.Equal(got, want) {
- t.Fatalf("Read %v from the socketpair, wanted %v", got, want)
- }
-}
-
-func TestInjectableEndpointDispatchHdrOnly(t *testing.T) {
- endpoint, sock, dstIP := makeTestInjectableEndpoint(t)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: 1,
- Data: buffer.NewView(0).ToVectorisedView(),
- })
- pkt.TransportHeader().Push(1)[0] = 0xFA
- var packetRoute stack.RouteInfo
- packetRoute.RemoteAddress = dstIP
- endpoint.WritePacket(packetRoute, ipv4.ProtocolNumber, pkt)
- buf := make([]byte, 6500)
- bytesRead, err := sock.Read(buf)
- if err != nil {
- t.Fatalf("Unable to read from socketpair: %v", err)
- }
- if got, want := buf[:bytesRead], []byte{0xFA}; !bytes.Equal(got, want) {
- t.Fatalf("Read %v from the socketpair, wanted %v", got, want)
- }
-}
-
-func makeTestInjectableEndpoint(t *testing.T) (*InjectableEndpoint, *os.File, tcpip.Address) {
- dstIP := tcpip.Address(net.ParseIP("1.2.3.4").To4())
- pair, err := unix.Socketpair(unix.AF_UNIX,
- unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC|unix.SOCK_NONBLOCK, 0)
- if err != nil {
- t.Fatal("Failed to create socket pair:", err)
- }
- underlyingEndpoint := fdbased.NewInjectable(pair[1], 6500, stack.CapabilityNone)
- routes := map[tcpip.Address]stack.InjectableLinkEndpoint{dstIP: underlyingEndpoint}
- endpoint := NewInjectableEndpoint(routes)
- return endpoint, os.NewFile(uintptr(pair[0]), "test route end"), dstIP
-}
diff --git a/pkg/tcpip/link/muxed/muxed_state_autogen.go b/pkg/tcpip/link/muxed/muxed_state_autogen.go
new file mode 100644
index 000000000..56330e2a5
--- /dev/null
+++ b/pkg/tcpip/link/muxed/muxed_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package muxed
diff --git a/pkg/tcpip/link/nested/BUILD b/pkg/tcpip/link/nested/BUILD
deleted file mode 100644
index 00b42b924..000000000
--- a/pkg/tcpip/link/nested/BUILD
+++ /dev/null
@@ -1,31 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "nested",
- srcs = [
- "nested.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "nested_test",
- size = "small",
- srcs = [
- "nested_test.go",
- ],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/nested",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/nested/nested_state_autogen.go b/pkg/tcpip/link/nested/nested_state_autogen.go
new file mode 100644
index 000000000..9e1b5ca4e
--- /dev/null
+++ b/pkg/tcpip/link/nested/nested_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package nested
diff --git a/pkg/tcpip/link/nested/nested_test.go b/pkg/tcpip/link/nested/nested_test.go
deleted file mode 100644
index c1f9d308c..000000000
--- a/pkg/tcpip/link/nested/nested_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nested_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/nested"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-type parentEndpoint struct {
- nested.Endpoint
-}
-
-var _ stack.LinkEndpoint = (*parentEndpoint)(nil)
-var _ stack.NetworkDispatcher = (*parentEndpoint)(nil)
-
-type childEndpoint struct {
- stack.LinkEndpoint
- dispatcher stack.NetworkDispatcher
-}
-
-var _ stack.LinkEndpoint = (*childEndpoint)(nil)
-
-func (c *childEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
- c.dispatcher = dispatcher
-}
-
-func (c *childEndpoint) IsAttached() bool {
- return c.dispatcher != nil
-}
-
-type counterDispatcher struct {
- count int
-}
-
-var _ stack.NetworkDispatcher = (*counterDispatcher)(nil)
-
-func (d *counterDispatcher) DeliverNetworkPacket(tcpip.LinkAddress, tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) {
- d.count++
-}
-
-func (d *counterDispatcher) DeliverOutboundPacket(tcpip.LinkAddress, tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func TestNestedLinkEndpoint(t *testing.T) {
- const emptyAddress = tcpip.LinkAddress("")
-
- var (
- childEP childEndpoint
- nestedEP parentEndpoint
- disp counterDispatcher
- )
- nestedEP.Endpoint.Init(&childEP, &nestedEP)
-
- if childEP.IsAttached() {
- t.Error("On init, childEP.IsAttached() = true, want = false")
- }
- if nestedEP.IsAttached() {
- t.Error("On init, nestedEP.IsAttached() = true, want = false")
- }
-
- nestedEP.Attach(&disp)
- if disp.count != 0 {
- t.Fatalf("After attach, got disp.count = %d, want = 0", disp.count)
- }
- if !childEP.IsAttached() {
- t.Error("After attach, childEP.IsAttached() = false, want = true")
- }
- if !nestedEP.IsAttached() {
- t.Error("After attach, nestedEP.IsAttached() = false, want = true")
- }
-
- nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if disp.count != 1 {
- t.Errorf("After first packet with dispatcher attached, got disp.count = %d, want = 1", disp.count)
- }
-
- nestedEP.Attach(nil)
- if childEP.IsAttached() {
- t.Error("After detach, childEP.IsAttached() = true, want = false")
- }
- if nestedEP.IsAttached() {
- t.Error("After detach, nestedEP.IsAttached() = true, want = false")
- }
-
- disp.count = 0
- nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if disp.count != 0 {
- t.Errorf("After second packet with dispatcher detached, got disp.count = %d, want = 0", disp.count)
- }
-
-}
diff --git a/pkg/tcpip/link/packetsocket/BUILD b/pkg/tcpip/link/packetsocket/BUILD
deleted file mode 100644
index 6fff160ce..000000000
--- a/pkg/tcpip/link/packetsocket/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "packetsocket",
- srcs = ["endpoint.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/link/nested",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/packetsocket/packetsocket_state_autogen.go b/pkg/tcpip/link/packetsocket/packetsocket_state_autogen.go
new file mode 100644
index 000000000..6b3221fd8
--- /dev/null
+++ b/pkg/tcpip/link/packetsocket/packetsocket_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package packetsocket
diff --git a/pkg/tcpip/link/pipe/BUILD b/pkg/tcpip/link/pipe/BUILD
deleted file mode 100644
index 9f31c1ffc..000000000
--- a/pkg/tcpip/link/pipe/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "pipe",
- srcs = ["pipe.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/pipe/pipe_state_autogen.go b/pkg/tcpip/link/pipe/pipe_state_autogen.go
new file mode 100644
index 000000000..d3b40feb4
--- /dev/null
+++ b/pkg/tcpip/link/pipe/pipe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pipe
diff --git a/pkg/tcpip/link/qdisc/fifo/BUILD b/pkg/tcpip/link/qdisc/fifo/BUILD
deleted file mode 100644
index 5bea598eb..000000000
--- a/pkg/tcpip/link/qdisc/fifo/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fifo",
- srcs = [
- "endpoint.go",
- "packet_buffer_queue.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/qdisc/fifo/fifo_state_autogen.go b/pkg/tcpip/link/qdisc/fifo/fifo_state_autogen.go
new file mode 100644
index 000000000..9eb52b1cb
--- /dev/null
+++ b/pkg/tcpip/link/qdisc/fifo/fifo_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fifo
diff --git a/pkg/tcpip/link/rawfile/BUILD b/pkg/tcpip/link/rawfile/BUILD
deleted file mode 100644
index 4efd7c45e..000000000
--- a/pkg/tcpip/link/rawfile/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "rawfile",
- srcs = [
- "blockingpoll_amd64.s",
- "blockingpoll_arm64.s",
- "blockingpoll_noyield_unsafe.go",
- "blockingpoll_yield_unsafe.go",
- "errors.go",
- "rawfile_unsafe.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "rawfile_test",
- srcs = [
- "errors_test.go",
- ],
- library = "rawfile",
- deps = [
- "//pkg/tcpip",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/link/rawfile/errors_test.go b/pkg/tcpip/link/rawfile/errors_test.go
deleted file mode 100644
index 1b88c309b..000000000
--- a/pkg/tcpip/link/rawfile/errors_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package rawfile
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
-)
-
-func TestTranslateErrno(t *testing.T) {
- for _, test := range []struct {
- errno unix.Errno
- translated tcpip.Error
- }{
- {
- errno: unix.Errno(0),
- translated: &tcpip.ErrInvalidEndpointState{},
- },
- {
- errno: unix.Errno(maxErrno),
- translated: &tcpip.ErrInvalidEndpointState{},
- },
- {
- errno: unix.Errno(514),
- translated: &tcpip.ErrInvalidEndpointState{},
- },
- {
- errno: unix.EEXIST,
- translated: &tcpip.ErrDuplicateAddress{},
- },
- } {
- got := TranslateErrno(test.errno)
- if diff := cmp.Diff(test.translated, got); diff != "" {
- t.Errorf("unexpected result from TranslateErrno(%q), (-want, +got):\n%s", test.errno, diff)
- }
- }
-}
diff --git a/pkg/tcpip/link/rawfile/rawfile_state_autogen.go b/pkg/tcpip/link/rawfile/rawfile_state_autogen.go
new file mode 100644
index 000000000..338e9679b
--- /dev/null
+++ b/pkg/tcpip/link/rawfile/rawfile_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package rawfile
diff --git a/pkg/tcpip/link/rawfile/rawfile_unsafe_state_autogen.go b/pkg/tcpip/link/rawfile/rawfile_unsafe_state_autogen.go
new file mode 100644
index 000000000..f5eace013
--- /dev/null
+++ b/pkg/tcpip/link/rawfile/rawfile_unsafe_state_autogen.go
@@ -0,0 +1,9 @@
+// automatically generated by stateify.
+
+// +build linux,!amd64,!arm64
+// +build linux,amd64 linux,arm64
+// +build go1.12
+// +build !go1.18
+// +build linux
+
+package rawfile
diff --git a/pkg/tcpip/link/sharedmem/BUILD b/pkg/tcpip/link/sharedmem/BUILD
deleted file mode 100644
index 4215ee852..000000000
--- a/pkg/tcpip/link/sharedmem/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sharedmem",
- srcs = [
- "rx.go",
- "sharedmem.go",
- "sharedmem_unsafe.go",
- "tx.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/rawfile",
- "//pkg/tcpip/link/sharedmem/queue",
- "//pkg/tcpip/stack",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "sharedmem_test",
- srcs = [
- "sharedmem_test.go",
- ],
- library = ":sharedmem",
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/sharedmem/pipe",
- "//pkg/tcpip/link/sharedmem/queue",
- "//pkg/tcpip/stack",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/link/sharedmem/pipe/BUILD b/pkg/tcpip/link/sharedmem/pipe/BUILD
deleted file mode 100644
index 87020ec08..000000000
--- a/pkg/tcpip/link/sharedmem/pipe/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "pipe",
- srcs = [
- "pipe.go",
- "pipe_unsafe.go",
- "rx.go",
- "tx.go",
- ],
- visibility = ["//visibility:public"],
-)
-
-go_test(
- name = "pipe_test",
- srcs = [
- "pipe_test.go",
- ],
- library = ":pipe",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe_state_autogen.go b/pkg/tcpip/link/sharedmem/pipe/pipe_state_autogen.go
new file mode 100644
index 000000000..d3b40feb4
--- /dev/null
+++ b/pkg/tcpip/link/sharedmem/pipe/pipe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pipe
diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe_test.go b/pkg/tcpip/link/sharedmem/pipe/pipe_test.go
deleted file mode 100644
index 2777f1411..000000000
--- a/pkg/tcpip/link/sharedmem/pipe/pipe_test.go
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pipe
-
-import (
- "math/rand"
- "reflect"
- "runtime"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestSimpleReadWrite(t *testing.T) {
- // Check that a simple write can be properly read from the rx side.
- tr := rand.New(rand.NewSource(99))
- rr := rand.New(rand.NewSource(99))
-
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- wb := tx.Push(10)
- if wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- for i := range wb {
- wb[i] = byte(tr.Intn(256))
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- rb := rx.Pull()
- if len(rb) != 10 {
- t.Fatalf("Bad buffer size returned: got %v, want %v", len(rb), 10)
- }
-
- for i := range rb {
- if v := byte(rr.Intn(256)); v != rb[i] {
- t.Fatalf("Bad read buffer at index %v: got %v, want %v", i, rb[i], v)
- }
- }
- rx.Flush()
-}
-
-func TestEmptyRead(t *testing.T) {
- // Check that pulling from an empty pipe fails.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on empty pipe")
- }
-}
-
-func TestTooLargeWrite(t *testing.T) {
- // Check that writes that are too large are properly rejected.
- b := make([]byte, 96)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(96); wb != nil {
- t.Fatalf("Write of 96 bytes succeeded on 96-byte pipe")
- }
-
- if wb := tx.Push(88); wb != nil {
- t.Fatalf("Write of 88 bytes succeeded on 96-byte pipe")
- }
-
- if wb := tx.Push(80); wb == nil {
- t.Fatalf("Write of 80 bytes failed on 96-byte pipe")
- }
-}
-
-func TestFullWrite(t *testing.T) {
- // Check that writes fail when the pipe is full.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(80); wb == nil {
- t.Fatalf("Write of 80 bytes failed on 96-byte pipe")
- }
-
- if wb := tx.Push(1); wb != nil {
- t.Fatalf("Write succeeded on full pipe")
- }
-}
-
-func TestFullAndFlushedWrite(t *testing.T) {
- // Check that writes fail when the pipe is full and has already been
- // flushed.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(80); wb == nil {
- t.Fatalf("Write of 80 bytes failed on 96-byte pipe")
- }
-
- tx.Flush()
-
- if wb := tx.Push(1); wb != nil {
- t.Fatalf("Write succeeded on full pipe")
- }
-}
-
-func TestTxFlushTwice(t *testing.T) {
- // Checks that a second consecutive tx flush is a no-op.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- // Make copy of original tx queue, flush it, then check that it didn't
- // change.
- orig := tx
- tx.Flush()
-
- if !reflect.DeepEqual(orig, tx) {
- t.Fatalf("Flush mutated tx pipe: got %v, want %v", tx, orig)
- }
-}
-
-func TestRxFlushTwice(t *testing.T) {
- // Checks that a second consecutive rx flush is a no-op.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // Make copy of original rx queue, flush it, then check that it didn't
- // change.
- orig := rx
- rx.Flush()
-
- if !reflect.DeepEqual(orig, rx) {
- t.Fatalf("Flush mutated rx pipe: got %v, want %v", rx, orig)
- }
-}
-
-func TestWrapInMiddleOfTransaction(t *testing.T) {
- // Check that writes are not flushed when we need to wrap the buffer
- // around.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // At this point the ring buffer is empty, but the write is at offset
- // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment).
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on non-full pipe")
- }
-
- // We haven't flushed yet, so pull must return nil.
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on non-flushed pipe")
- }
-
- tx.Flush()
-
- // The two buffers must be available now.
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-}
-
-func TestWriteAbort(t *testing.T) {
- // Check that a read fails on a pipe that has had data pushed to it but
- // has aborted the push.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Write failed on empty pipe")
- }
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on empty pipe")
- }
-
- tx.Abort()
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on empty pipe")
- }
-}
-
-func TestWrappedWriteAbort(t *testing.T) {
- // Check that writes are properly aborted even if the writes wrap
- // around.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // At this point the ring buffer is empty, but the write is at offset
- // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment).
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on non-full pipe")
- }
-
- // We haven't flushed yet, so pull must return nil.
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on non-flushed pipe")
- }
-
- tx.Abort()
-
- // The pushes were aborted, so no data should be readable.
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on non-flushed pipe")
- }
-
- // Try the same transactions again, but flush this time.
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on non-full pipe")
- }
-
- tx.Flush()
-
- // The two buffers must be available now.
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-}
-
-func TestEmptyReadOnNonFlushedWrite(t *testing.T) {
- // Check that a read fails on a pipe that has had data pushed to it
- // but not yet flushed.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Write failed on empty pipe")
- }
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on empty pipe")
- }
-
- tx.Flush()
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull on failed on non-empty pipe")
- }
-}
-
-func TestPullAfterPullingEntirePipe(t *testing.T) {
- // Check that Pull fails when the pipe is full, but all of it has
- // already been pulled but not yet flushed.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // At this point the ring buffer is empty, but the write is at offset
- // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). Write 3
- // buffers that will fill the pipe.
- if wb := tx.Push(10); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-
- if wb := tx.Push(20); wb == nil {
- t.Fatalf("Push failed on non-full pipe")
- }
-
- if wb := tx.Push(24); wb == nil {
- t.Fatalf("Push failed on non-full pipe")
- }
-
- tx.Flush()
-
- // The three buffers must be available now.
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
-
- // Fourth pull must fail.
- if rb := rx.Pull(); rb != nil {
- t.Fatalf("Pull succeeded on empty pipe")
- }
-}
-
-func TestNoRoomToWrapOnPush(t *testing.T) {
- // Check that Push fails when it tries to allocate room to add a wrap
- // message.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- var rx Rx
- rx.Init(b)
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // At this point the ring buffer is empty, but the write is at offset
- // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). Write 20,
- // which won't fit (64+20+8+padding = 96, which wouldn't leave room for
- // the padding), so it wraps around.
- if wb := tx.Push(20); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-
- tx.Flush()
-
- // Buffer offset is at 28. Try to write 70, which would require a wrap
- // slot which cannot be created now.
- if wb := tx.Push(70); wb != nil {
- t.Fatalf("Push succeeded on pipe with no room for wrap message")
- }
-}
-
-func TestRxImplicitFlushOfWrapMessage(t *testing.T) {
- // Check if the first read is that of a wrapping message, that it gets
- // immediately flushed.
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- if wb := tx.Push(50); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
- tx.Flush()
-
- // This will cause a wrapping message to written.
- if wb := tx.Push(60); wb != nil {
- t.Fatalf("Push succeeded when there is no room in pipe")
- }
-
- var rx Rx
- rx.Init(b)
-
- // Read the first message.
- if rb := rx.Pull(); rb == nil {
- t.Fatalf("Pull failed on non-empty pipe")
- }
- rx.Flush()
-
- // This should fail because of the wrapping message is taking up space.
- if wb := tx.Push(60); wb != nil {
- t.Fatalf("Push succeeded when there is no room in pipe")
- }
-
- // Try to read the next one. This should consume the wrapping message.
- rx.Pull()
-
- // This must now succeed.
- if wb := tx.Push(60); wb == nil {
- t.Fatalf("Push failed on empty pipe")
- }
-}
-
-func TestConcurrentReaderWriter(t *testing.T) {
- // Push a million buffers of random sizes and random contents. Check
- // that buffers read match what was written.
- tr := rand.New(rand.NewSource(99))
- rr := rand.New(rand.NewSource(99))
-
- b := make([]byte, 100)
- var tx Tx
- tx.Init(b)
-
- var rx Rx
- rx.Init(b)
-
- const count = 1000000
- var wg sync.WaitGroup
- defer wg.Wait()
- wg.Add(1)
- go func() {
- defer wg.Done()
- runtime.Gosched()
- for i := 0; i < count; i++ {
- n := 1 + tr.Intn(80)
- wb := tx.Push(uint64(n))
- for wb == nil {
- wb = tx.Push(uint64(n))
- }
-
- for j := range wb {
- wb[j] = byte(tr.Intn(256))
- }
-
- tx.Flush()
- }
- }()
-
- for i := 0; i < count; i++ {
- n := 1 + rr.Intn(80)
- rb := rx.Pull()
- for rb == nil {
- rb = rx.Pull()
- }
-
- if n != len(rb) {
- t.Fatalf("Bad %v-th buffer length: got %v, want %v", i, len(rb), n)
- }
-
- for j := range rb {
- if v := byte(rr.Intn(256)); v != rb[j] {
- t.Fatalf("Bad %v-th read buffer at index %v: got %v, want %v", i, j, rb[j], v)
- }
- }
-
- rx.Flush()
- }
-}
diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe_state_autogen.go b/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe_state_autogen.go
new file mode 100644
index 000000000..d3b40feb4
--- /dev/null
+++ b/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package pipe
diff --git a/pkg/tcpip/link/sharedmem/queue/BUILD b/pkg/tcpip/link/sharedmem/queue/BUILD
deleted file mode 100644
index 3ba06af73..000000000
--- a/pkg/tcpip/link/sharedmem/queue/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "queue",
- srcs = [
- "rx.go",
- "tx.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/tcpip/link/sharedmem/pipe",
- ],
-)
-
-go_test(
- name = "queue_test",
- srcs = [
- "queue_test.go",
- ],
- library = ":queue",
- deps = [
- "//pkg/tcpip/link/sharedmem/pipe",
- ],
-)
diff --git a/pkg/tcpip/link/sharedmem/queue/queue_state_autogen.go b/pkg/tcpip/link/sharedmem/queue/queue_state_autogen.go
new file mode 100644
index 000000000..563d4fbb4
--- /dev/null
+++ b/pkg/tcpip/link/sharedmem/queue/queue_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package queue
diff --git a/pkg/tcpip/link/sharedmem/queue/queue_test.go b/pkg/tcpip/link/sharedmem/queue/queue_test.go
deleted file mode 100644
index 9a0aad5d7..000000000
--- a/pkg/tcpip/link/sharedmem/queue/queue_test.go
+++ /dev/null
@@ -1,517 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package queue
-
-import (
- "encoding/binary"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/link/sharedmem/pipe"
-)
-
-func TestBasicTxQueue(t *testing.T) {
- // Tests that a basic transmit on a queue works, and that completion
- // gets properly reported as well.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Tx
- q.Init(pb1, pb2)
-
- // Enqueue two buffers.
- b := []TxBuffer{
- {nil, 100, 60},
- {nil, 200, 40},
- }
-
- b[0].Next = &b[1]
-
- const usedID = 1002
- const usedTotalSize = 100
- if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) {
- t.Fatalf("Enqueue failed on empty queue")
- }
-
- // Check the contents of the pipe.
- d := rxp.Pull()
- if d == nil {
- t.Fatalf("Tx pipe is empty after Enqueue")
- }
-
- want := []byte{
- 234, 3, 0, 0, 0, 0, 0, 0, // id
- 100, 0, 0, 0, // total size
- 0, 0, 0, 0, // reserved
- 100, 0, 0, 0, 0, 0, 0, 0, // offset 1
- 60, 0, 0, 0, // size 1
- 200, 0, 0, 0, 0, 0, 0, 0, // offset 2
- 40, 0, 0, 0, // size 2
- }
-
- if !reflect.DeepEqual(want, d) {
- t.Fatalf("Bad posted packet: got %v, want %v", d, want)
- }
-
- rxp.Flush()
-
- // Check that there are no completions yet.
- if _, ok := q.CompletedPacket(); ok {
- t.Fatalf("Packet reported as completed too soon")
- }
-
- // Post a completion.
- d = txp.Push(8)
- if d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
- binary.LittleEndian.PutUint64(d, usedID)
- txp.Flush()
-
- // Check that completion is properly reported.
- id, ok := q.CompletedPacket()
- if !ok {
- t.Fatalf("Completion not reported")
- }
-
- if id != usedID {
- t.Fatalf("Bad completion id: got %v, want %v", id, usedID)
- }
-}
-
-func TestBasicRxQueue(t *testing.T) {
- // Tests that a basic receive on a queue works.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Rx
- q.Init(pb1, pb2, nil)
-
- // Post two buffers.
- b := []RxBuffer{
- {100, 60, 1077, 0},
- {200, 40, 2123, 0},
- }
-
- if !q.PostBuffers(b) {
- t.Fatalf("PostBuffers failed on empty queue")
- }
-
- // Check the contents of the pipe.
- want := [][]byte{
- {
- 100, 0, 0, 0, 0, 0, 0, 0, // Offset1
- 60, 0, 0, 0, // Size1
- 0, 0, 0, 0, // Remaining in group 1
- 0, 0, 0, 0, 0, 0, 0, 0, // User data 1
- 53, 4, 0, 0, 0, 0, 0, 0, // ID 1
- },
- {
- 200, 0, 0, 0, 0, 0, 0, 0, // Offset2
- 40, 0, 0, 0, // Size2
- 0, 0, 0, 0, // Remaining in group 2
- 0, 0, 0, 0, 0, 0, 0, 0, // User data 2
- 75, 8, 0, 0, 0, 0, 0, 0, // ID 2
- },
- }
-
- for i := range b {
- d := rxp.Pull()
- if d == nil {
- t.Fatalf("Tx pipe is empty after PostBuffers")
- }
-
- if !reflect.DeepEqual(want[i], d) {
- t.Fatalf("Bad posted packet: got %v, want %v", d, want[i])
- }
-
- rxp.Flush()
- }
-
- // Check that there are no completions.
- if _, n := q.Dequeue(nil); n != 0 {
- t.Fatalf("Packet reported as received too soon")
- }
-
- // Post a completion.
- d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
- if d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
-
- copy(d, []byte{
- 100, 0, 0, 0, // packet size
- 0, 0, 0, 0, // reserved
-
- 100, 0, 0, 0, 0, 0, 0, 0, // offset 1
- 60, 0, 0, 0, // size 1
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 1
- 53, 4, 0, 0, 0, 0, 0, 0, // ID 1
-
- 200, 0, 0, 0, 0, 0, 0, 0, // offset 2
- 40, 0, 0, 0, // size 2
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 2
- 75, 8, 0, 0, 0, 0, 0, 0, // ID 2
- })
-
- txp.Flush()
-
- // Check that completion is properly reported.
- bufs, n := q.Dequeue(nil)
- if n != 100 {
- t.Fatalf("Bad packet size: got %v, want %v", n, 100)
- }
-
- if !reflect.DeepEqual(bufs, b) {
- t.Fatalf("Bad returned buffers: got %v, want %v", bufs, b)
- }
-}
-
-func TestBadTxCompletion(t *testing.T) {
- // Check that tx completions with bad sizes are properly ignored.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Tx
- q.Init(pb1, pb2)
-
- // Post a completion that is too short, and check that it is ignored.
- if d := txp.Push(7); d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
- txp.Flush()
-
- if _, ok := q.CompletedPacket(); ok {
- t.Fatalf("Bad completion not ignored")
- }
-
- // Post a completion that is too long, and check that it is ignored.
- if d := txp.Push(10); d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
- txp.Flush()
-
- if _, ok := q.CompletedPacket(); ok {
- t.Fatalf("Bad completion not ignored")
- }
-}
-
-func TestBadRxCompletion(t *testing.T) {
- // Check that bad rx completions are properly ignored.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Rx
- q.Init(pb1, pb2, nil)
-
- // Post a completion that is too short, and check that it is ignored.
- if d := txp.Push(7); d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
- txp.Flush()
-
- if b, _ := q.Dequeue(nil); b != nil {
- t.Fatalf("Bad completion not ignored")
- }
-
- // Post a completion whose buffer sizes add up to less than the total
- // size.
- d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
- if d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
-
- copy(d, []byte{
- 100, 0, 0, 0, // packet size
- 0, 0, 0, 0, // reserved
-
- 100, 0, 0, 0, 0, 0, 0, 0, // offset 1
- 10, 0, 0, 0, // size 1
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 1
- 53, 4, 0, 0, 0, 0, 0, 0, // ID 1
-
- 200, 0, 0, 0, 0, 0, 0, 0, // offset 2
- 10, 0, 0, 0, // size 2
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 2
- 75, 8, 0, 0, 0, 0, 0, 0, // ID 2
- })
-
- txp.Flush()
- if b, _ := q.Dequeue(nil); b != nil {
- t.Fatalf("Bad completion not ignored")
- }
-
- // Post a completion whose buffer sizes will cause a 32-bit overflow,
- // but adds up to the right number.
- d = txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
- if d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
-
- copy(d, []byte{
- 100, 0, 0, 0, // packet size
- 0, 0, 0, 0, // reserved
-
- 100, 0, 0, 0, 0, 0, 0, 0, // offset 1
- 255, 255, 255, 255, // size 1
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 1
- 53, 4, 0, 0, 0, 0, 0, 0, // ID 1
-
- 200, 0, 0, 0, 0, 0, 0, 0, // offset 2
- 101, 0, 0, 0, // size 2
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 2
- 75, 8, 0, 0, 0, 0, 0, 0, // ID 2
- })
-
- txp.Flush()
- if b, _ := q.Dequeue(nil); b != nil {
- t.Fatalf("Bad completion not ignored")
- }
-}
-
-func TestFillTxPipe(t *testing.T) {
- // Check that transmitting a new buffer when the buffer pipe is full
- // fails gracefully.
- pb1 := make([]byte, 104)
- pb2 := make([]byte, 104)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Tx
- q.Init(pb1, pb2)
-
- // Transmit twice, which should fill the tx pipe.
- b := []TxBuffer{
- {nil, 100, 60},
- {nil, 200, 40},
- }
-
- b[0].Next = &b[1]
-
- const usedID = 1002
- const usedTotalSize = 100
- for i := uint64(0); i < 2; i++ {
- if !q.Enqueue(usedID+i, usedTotalSize, 2, &b[0]) {
- t.Fatalf("Failed to transmit buffer")
- }
- }
-
- // Transmit another packet now that the tx pipe is full.
- if q.Enqueue(usedID+2, usedTotalSize, 2, &b[0]) {
- t.Fatalf("Enqueue succeeded when tx pipe is full")
- }
-}
-
-func TestFillRxPipe(t *testing.T) {
- // Check that posting a new buffer when the buffer pipe is full fails
- // gracefully.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Rx
- q.Init(pb1, pb2, nil)
-
- // Post a buffer twice, it should fill the tx pipe.
- b := []RxBuffer{
- {100, 60, 1077, 0},
- }
-
- for i := 0; i < 2; i++ {
- if !q.PostBuffers(b) {
- t.Fatalf("PostBuffers failed on non-full queue")
- }
- }
-
- // Post another buffer now that the tx pipe is full.
- if q.PostBuffers(b) {
- t.Fatalf("PostBuffers succeeded on full queue")
- }
-}
-
-func TestLotsOfTransmissions(t *testing.T) {
- // Make sure pipes are being properly flushed when transmitting packets.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Tx
- q.Init(pb1, pb2)
-
- // Prepare packet with two buffers.
- b := []TxBuffer{
- {nil, 100, 60},
- {nil, 200, 40},
- }
-
- b[0].Next = &b[1]
-
- const usedID = 1002
- const usedTotalSize = 100
-
- // Post 100000 packets and completions.
- for i := 100000; i > 0; i-- {
- if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) {
- t.Fatalf("Enqueue failed on non-full queue")
- }
-
- if d := rxp.Pull(); d == nil {
- t.Fatalf("Tx pipe is empty after Enqueue")
- }
- rxp.Flush()
-
- d := txp.Push(8)
- if d == nil {
- t.Fatalf("Unable to write to rx pipe")
- }
- binary.LittleEndian.PutUint64(d, usedID)
- txp.Flush()
- if _, ok := q.CompletedPacket(); !ok {
- t.Fatalf("Completion not returned")
- }
- }
-}
-
-func TestLotsOfReceptions(t *testing.T) {
- // Make sure pipes are being properly flushed when receiving packets.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var rxp pipe.Rx
- rxp.Init(pb1)
-
- var txp pipe.Tx
- txp.Init(pb2)
-
- var q Rx
- q.Init(pb1, pb2, nil)
-
- // Prepare for posting two buffers.
- b := []RxBuffer{
- {100, 60, 1077, 0},
- {200, 40, 2123, 0},
- }
-
- // Post 100000 buffers and completions.
- for i := 100000; i > 0; i-- {
- if !q.PostBuffers(b) {
- t.Fatalf("PostBuffers failed on non-full queue")
- }
-
- if d := rxp.Pull(); d == nil {
- t.Fatalf("Tx pipe is empty after PostBuffers")
- }
- rxp.Flush()
-
- if d := rxp.Pull(); d == nil {
- t.Fatalf("Tx pipe is empty after PostBuffers")
- }
- rxp.Flush()
-
- d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
- if d == nil {
- t.Fatalf("Unable to push to rx pipe")
- }
-
- copy(d, []byte{
- 100, 0, 0, 0, // packet size
- 0, 0, 0, 0, // reserved
-
- 100, 0, 0, 0, 0, 0, 0, 0, // offset 1
- 60, 0, 0, 0, // size 1
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 1
- 53, 4, 0, 0, 0, 0, 0, 0, // ID 1
-
- 200, 0, 0, 0, 0, 0, 0, 0, // offset 2
- 40, 0, 0, 0, // size 2
- 0, 0, 0, 0, 0, 0, 0, 0, // user data 2
- 75, 8, 0, 0, 0, 0, 0, 0, // ID 2
- })
-
- txp.Flush()
-
- if _, n := q.Dequeue(nil); n == 0 {
- t.Fatalf("Dequeue failed when there is a completion")
- }
- }
-}
-
-func TestRxEnableNotification(t *testing.T) {
- // Check that enabling nofifications results in properly updated state.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var state uint32
- var q Rx
- q.Init(pb1, pb2, &state)
-
- q.EnableNotification()
- if state != eventFDEnabled {
- t.Fatalf("Bad value in shared state: got %v, want %v", state, eventFDEnabled)
- }
-}
-
-func TestRxDisableNotification(t *testing.T) {
- // Check that disabling nofifications results in properly updated state.
- pb1 := make([]byte, 100)
- pb2 := make([]byte, 100)
-
- var state uint32
- var q Rx
- q.Init(pb1, pb2, &state)
-
- q.DisableNotification()
- if state != eventFDDisabled {
- t.Fatalf("Bad value in shared state: got %v, want %v", state, eventFDDisabled)
- }
-}
diff --git a/pkg/tcpip/link/sharedmem/sharedmem_state_autogen.go b/pkg/tcpip/link/sharedmem/sharedmem_state_autogen.go
new file mode 100644
index 000000000..bc12017b2
--- /dev/null
+++ b/pkg/tcpip/link/sharedmem/sharedmem_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build linux
+// +build linux
+
+package sharedmem
diff --git a/pkg/tcpip/link/sharedmem/sharedmem_test.go b/pkg/tcpip/link/sharedmem/sharedmem_test.go
deleted file mode 100644
index d6d953085..000000000
--- a/pkg/tcpip/link/sharedmem/sharedmem_test.go
+++ /dev/null
@@ -1,815 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package sharedmem
-
-import (
- "bytes"
- "io/ioutil"
- "math/rand"
- "os"
- "strings"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/sharedmem/pipe"
- "gvisor.dev/gvisor/pkg/tcpip/link/sharedmem/queue"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-const (
- localLinkAddr = "\xde\xad\xbe\xef\x56\x78"
- remoteLinkAddr = "\xde\xad\xbe\xef\x12\x34"
-
- queueDataSize = 1024 * 1024
- queuePipeSize = 4096
-)
-
-type queueBuffers struct {
- data []byte
- rx pipe.Tx
- tx pipe.Rx
-}
-
-func initQueue(t *testing.T, q *queueBuffers, c *QueueConfig) {
- // Prepare tx pipe.
- b, err := getBuffer(c.TxPipeFD)
- if err != nil {
- t.Fatalf("getBuffer failed: %v", err)
- }
- q.tx.Init(b)
-
- // Prepare rx pipe.
- b, err = getBuffer(c.RxPipeFD)
- if err != nil {
- t.Fatalf("getBuffer failed: %v", err)
- }
- q.rx.Init(b)
-
- // Get data slice.
- q.data, err = getBuffer(c.DataFD)
- if err != nil {
- t.Fatalf("getBuffer failed: %v", err)
- }
-}
-
-func (q *queueBuffers) cleanup() {
- unix.Munmap(q.tx.Bytes())
- unix.Munmap(q.rx.Bytes())
- unix.Munmap(q.data)
-}
-
-type packetInfo struct {
- addr tcpip.LinkAddress
- proto tcpip.NetworkProtocolNumber
- data buffer.View
- linkHeader buffer.View
-}
-
-type testContext struct {
- t *testing.T
- ep *endpoint
- txCfg QueueConfig
- rxCfg QueueConfig
- txq queueBuffers
- rxq queueBuffers
-
- packetCh chan struct{}
- mu sync.Mutex
- packets []packetInfo
-}
-
-func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress) *testContext {
- var err error
- c := &testContext{
- t: t,
- packetCh: make(chan struct{}, 1000000),
- }
- c.txCfg = createQueueFDs(t, queueSizes{
- dataSize: queueDataSize,
- txPipeSize: queuePipeSize,
- rxPipeSize: queuePipeSize,
- sharedDataSize: 4096,
- })
-
- c.rxCfg = createQueueFDs(t, queueSizes{
- dataSize: queueDataSize,
- txPipeSize: queuePipeSize,
- rxPipeSize: queuePipeSize,
- sharedDataSize: 4096,
- })
-
- initQueue(t, &c.txq, &c.txCfg)
- initQueue(t, &c.rxq, &c.rxCfg)
-
- ep, err := New(mtu, bufferSize, addr, c.txCfg, c.rxCfg)
- if err != nil {
- t.Fatalf("New failed: %v", err)
- }
-
- c.ep = ep.(*endpoint)
- c.ep.Attach(c)
-
- return c
-}
-
-func (c *testContext) DeliverNetworkPacket(remoteLinkAddr, localLinkAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- c.mu.Lock()
- c.packets = append(c.packets, packetInfo{
- addr: remoteLinkAddr,
- proto: proto,
- data: pkt.Data().AsRange().ToOwnedView(),
- })
- c.mu.Unlock()
-
- c.packetCh <- struct{}{}
-}
-
-func (c *testContext) DeliverOutboundPacket(remoteLinkAddr, localLinkAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func (c *testContext) cleanup() {
- c.ep.Close()
- closeFDs(&c.txCfg)
- closeFDs(&c.rxCfg)
- c.txq.cleanup()
- c.rxq.cleanup()
-}
-
-func (c *testContext) waitForPackets(n int, to <-chan time.Time, errorStr string) {
- for i := 0; i < n; i++ {
- select {
- case <-c.packetCh:
- case <-to:
- c.t.Fatalf(errorStr)
- }
- }
-}
-
-func (c *testContext) pushRxCompletion(size uint32, bs []queue.RxBuffer) {
- b := c.rxq.rx.Push(queue.RxCompletionSize(len(bs)))
- queue.EncodeRxCompletion(b, size, 0)
- for i := range bs {
- queue.EncodeRxCompletionBuffer(b, i, queue.RxBuffer{
- Offset: bs[i].Offset,
- Size: bs[i].Size,
- ID: bs[i].ID,
- })
- }
-}
-
-func randomFill(b []byte) {
- for i := range b {
- b[i] = byte(rand.Intn(256))
- }
-}
-
-func shuffle(b []int) {
- for i := len(b) - 1; i >= 0; i-- {
- j := rand.Intn(i + 1)
- b[i], b[j] = b[j], b[i]
- }
-}
-
-func createFile(t *testing.T, size int64, initQueue bool) int {
- tmpDir, ok := os.LookupEnv("TEST_TMPDIR")
- if !ok {
- tmpDir = os.Getenv("TMPDIR")
- }
- f, err := ioutil.TempFile(tmpDir, "sharedmem_test")
- if err != nil {
- t.Fatalf("TempFile failed: %v", err)
- }
- defer f.Close()
- unix.Unlink(f.Name())
-
- if initQueue {
- // Write the "slot-free" flag in the initial queue.
- _, err := f.WriteAt([]byte{0, 0, 0, 0, 0, 0, 0, 0x80}, 0)
- if err != nil {
- t.Fatalf("WriteAt failed: %v", err)
- }
- }
-
- fd, err := unix.Dup(int(f.Fd()))
- if err != nil {
- t.Fatalf("Dup failed: %v", err)
- }
-
- if err := unix.Ftruncate(fd, size); err != nil {
- unix.Close(fd)
- t.Fatalf("Ftruncate failed: %v", err)
- }
-
- return fd
-}
-
-func closeFDs(c *QueueConfig) {
- unix.Close(c.DataFD)
- unix.Close(c.EventFD)
- unix.Close(c.TxPipeFD)
- unix.Close(c.RxPipeFD)
- unix.Close(c.SharedDataFD)
-}
-
-type queueSizes struct {
- dataSize int64
- txPipeSize int64
- rxPipeSize int64
- sharedDataSize int64
-}
-
-func createQueueFDs(t *testing.T, s queueSizes) QueueConfig {
- fd, _, err := unix.RawSyscall(unix.SYS_EVENTFD2, 0, 0, 0)
- if err != 0 {
- t.Fatalf("eventfd failed: %v", error(err))
- }
-
- return QueueConfig{
- EventFD: int(fd),
- DataFD: createFile(t, s.dataSize, false),
- TxPipeFD: createFile(t, s.txPipeSize, true),
- RxPipeFD: createFile(t, s.rxPipeSize, true),
- SharedDataFD: createFile(t, s.sharedDataSize, false),
- }
-}
-
-// TestSimpleSend sends 1000 packets with random header and payload sizes,
-// then checks that the right payload is received on the shared memory queues.
-func TestSimpleSend(t *testing.T) {
- c := newTestContext(t, 20000, 1500, localLinkAddr)
- defer c.cleanup()
-
- // Prepare route.
- var r stack.RouteInfo
- r.RemoteLinkAddress = remoteLinkAddr
-
- for iters := 1000; iters > 0; iters-- {
- func() {
- hdrLen, dataLen := rand.Intn(10000), rand.Intn(10000)
-
- // Prepare and send packet.
- hdrBuf := buffer.NewView(hdrLen)
- randomFill(hdrBuf)
-
- data := buffer.NewView(dataLen)
- randomFill(data)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: hdrLen + int(c.ep.MaxHeaderLength()),
- Data: data.ToVectorisedView(),
- })
- copy(pkt.NetworkHeader().Push(hdrLen), hdrBuf)
-
- proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))
- if err := c.ep.WritePacket(r, proto, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-
- // Receive packet.
- desc := c.txq.tx.Pull()
- pi := queue.DecodeTxPacketHeader(desc)
- if pi.Reserved != 0 {
- t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved)
- }
- contents := make([]byte, 0, pi.Size)
- for i := 0; i < pi.BufferCount; i++ {
- bi := queue.DecodeTxBufferHeader(desc, i)
- contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...)
- }
- c.txq.tx.Flush()
-
- defer func() {
- // Tell the endpoint about the completion of the write.
- b := c.txq.rx.Push(8)
- queue.EncodeTxCompletion(b, pi.ID)
- c.txq.rx.Flush()
- }()
-
- // Check the ethernet header.
- ethTemplate := make(header.Ethernet, header.EthernetMinimumSize)
- ethTemplate.Encode(&header.EthernetFields{
- SrcAddr: localLinkAddr,
- DstAddr: remoteLinkAddr,
- Type: proto,
- })
- if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) {
- t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate)
- }
-
- // Compare contents skipping the ethernet header added by the
- // endpoint.
- merged := append(hdrBuf, data...)
- if uint32(len(contents)) < pi.Size {
- t.Fatalf("Sum of buffers is less than packet size: %v < %v", len(contents), pi.Size)
- }
- contents = contents[:pi.Size][header.EthernetMinimumSize:]
-
- if !bytes.Equal(contents, merged) {
- t.Fatalf("Buffers are different: got %x (%v bytes), want %x (%v bytes)", contents, len(contents), merged, len(merged))
- }
- }()
- }
-}
-
-// TestPreserveSrcAddressInSend calls WritePacket once with LocalLinkAddress
-// set in Route (using much of the same code as TestSimpleSend), then checks
-// that the encoded ethernet header received includes the correct SrcAddr.
-func TestPreserveSrcAddressInSend(t *testing.T) {
- c := newTestContext(t, 20000, 1500, localLinkAddr)
- defer c.cleanup()
-
- newLocalLinkAddress := tcpip.LinkAddress(strings.Repeat("0xFE", 6))
- // Set both remote and local link address in route.
- var r stack.RouteInfo
- r.LocalLinkAddress = newLocalLinkAddress
- r.RemoteLinkAddress = remoteLinkAddr
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- // WritePacket panics given a prependable with anything less than
- // the minimum size of the ethernet header.
- ReserveHeaderBytes: header.EthernetMinimumSize,
- })
-
- proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))
- if err := c.ep.WritePacket(r, proto, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-
- // Receive packet.
- desc := c.txq.tx.Pull()
- pi := queue.DecodeTxPacketHeader(desc)
- if pi.Reserved != 0 {
- t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved)
- }
- contents := make([]byte, 0, pi.Size)
- for i := 0; i < pi.BufferCount; i++ {
- bi := queue.DecodeTxBufferHeader(desc, i)
- contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...)
- }
- c.txq.tx.Flush()
-
- defer func() {
- // Tell the endpoint about the completion of the write.
- b := c.txq.rx.Push(8)
- queue.EncodeTxCompletion(b, pi.ID)
- c.txq.rx.Flush()
- }()
-
- // Check that the ethernet header contains the expected SrcAddr.
- ethTemplate := make(header.Ethernet, header.EthernetMinimumSize)
- ethTemplate.Encode(&header.EthernetFields{
- SrcAddr: newLocalLinkAddress,
- DstAddr: remoteLinkAddr,
- Type: proto,
- })
- if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) {
- t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate)
- }
-}
-
-// TestFillTxQueue sends packets until the queue is full.
-func TestFillTxQueue(t *testing.T) {
- c := newTestContext(t, 20000, 1500, localLinkAddr)
- defer c.cleanup()
-
- // Prepare to send a packet.
- var r stack.RouteInfo
- r.RemoteLinkAddress = remoteLinkAddr
-
- buf := buffer.NewView(100)
-
- // Each packet is uses no more than 40 bytes, so write that many packets
- // until the tx queue if full.
- ids := make(map[uint64]struct{})
- for i := queuePipeSize / 40; i > 0; i-- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
-
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
-
- // Check that they have different IDs.
- desc := c.txq.tx.Pull()
- pi := queue.DecodeTxPacketHeader(desc)
- if _, ok := ids[pi.ID]; ok {
- t.Fatalf("ID (%v) reused", pi.ID)
- }
- ids[pi.ID] = struct{}{}
- }
-
- // Next attempt to write must fail.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got WritePacket(...) = %v, want %s", err, &tcpip.ErrWouldBlock{})
- }
-}
-
-// TestFillTxQueueAfterBadCompletion sends a bad completion, then sends packets
-// until the queue is full.
-func TestFillTxQueueAfterBadCompletion(t *testing.T) {
- c := newTestContext(t, 20000, 1500, localLinkAddr)
- defer c.cleanup()
-
- // Send a bad completion.
- queue.EncodeTxCompletion(c.txq.rx.Push(8), 1)
- c.txq.rx.Flush()
-
- // Prepare to send a packet.
- var r stack.RouteInfo
- r.RemoteLinkAddress = remoteLinkAddr
-
- buf := buffer.NewView(100)
-
- // Send two packets so that the id slice has at least two slots.
- for i := 2; i > 0; i-- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
- }
-
- // Complete the two writes twice.
- for i := 2; i > 0; i-- {
- pi := queue.DecodeTxPacketHeader(c.txq.tx.Pull())
-
- queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID)
- queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID)
- c.txq.rx.Flush()
- }
- c.txq.tx.Flush()
-
- // Each packet is uses no more than 40 bytes, so write that many packets
- // until the tx queue if full.
- ids := make(map[uint64]struct{})
- for i := queuePipeSize / 40; i > 0; i-- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
-
- // Check that they have different IDs.
- desc := c.txq.tx.Pull()
- pi := queue.DecodeTxPacketHeader(desc)
- if _, ok := ids[pi.ID]; ok {
- t.Fatalf("ID (%v) reused", pi.ID)
- }
- ids[pi.ID] = struct{}{}
- }
-
- // Next attempt to write must fail.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got WritePacket(...) = %v, want %s", err, &tcpip.ErrWouldBlock{})
- }
-}
-
-// TestFillTxMemory sends packets until the we run out of shared memory.
-func TestFillTxMemory(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- defer c.cleanup()
-
- // Prepare to send a packet.
- var r stack.RouteInfo
- r.RemoteLinkAddress = remoteLinkAddr
-
- buf := buffer.NewView(100)
-
- // Each packet is uses up one buffer, so write as many as possible until
- // we fill the memory.
- ids := make(map[uint64]struct{})
- for i := queueDataSize / bufferSize; i > 0; i-- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
-
- // Check that they have different IDs.
- desc := c.txq.tx.Pull()
- pi := queue.DecodeTxPacketHeader(desc)
- if _, ok := ids[pi.ID]; ok {
- t.Fatalf("ID (%v) reused", pi.ID)
- }
- ids[pi.ID] = struct{}{}
- c.txq.tx.Flush()
- }
-
- // Next attempt to write must fail.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got WritePacket(...) = %v, want %s", err, &tcpip.ErrWouldBlock{})
- }
-}
-
-// TestFillTxMemoryWithMultiBuffer sends packets until the we run out of
-// shared memory for a 2-buffer packet, but still with room for a 1-buffer
-// packet.
-func TestFillTxMemoryWithMultiBuffer(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- defer c.cleanup()
-
- // Prepare to send a packet.
- var r stack.RouteInfo
- r.RemoteLinkAddress = remoteLinkAddr
-
- buf := buffer.NewView(100)
-
- // Each packet is uses up one buffer, so write as many as possible
- // until there is only one buffer left.
- for i := queueDataSize/bufferSize - 1; i > 0; i-- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
-
- // Pull the posted buffer.
- c.txq.tx.Pull()
- c.txq.tx.Flush()
- }
-
- // Attempt to write a two-buffer packet. It must fail.
- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buffer.NewView(bufferSize).ToVectorisedView(),
- })
- err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got WritePacket(...) = %v, want %s", err, &tcpip.ErrWouldBlock{})
- }
- }
-
- // Attempt to write the one-buffer packet again. It must succeed.
- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(c.ep.MaxHeaderLength()),
- Data: buf.ToVectorisedView(),
- })
- if err := c.ep.WritePacket(r, header.IPv4ProtocolNumber, pkt); err != nil {
- t.Fatalf("WritePacket failed unexpectedly: %v", err)
- }
- }
-}
-
-func pollPull(t *testing.T, p *pipe.Rx, to <-chan time.Time, errStr string) []byte {
- t.Helper()
-
- for {
- b := p.Pull()
- if b != nil {
- return b
- }
-
- select {
- case <-time.After(10 * time.Millisecond):
- case <-to:
- t.Fatal(errStr)
- }
- }
-}
-
-// TestSimpleReceive completes 1000 different receives with random payload and
-// random number of buffers. It checks that the contents match the expected
-// values.
-func TestSimpleReceive(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- defer c.cleanup()
-
- // Check that buffers have been posted.
- limit := c.ep.rx.q.PostedBuffersLimit()
- for i := uint64(0); i < limit; i++ {
- timeout := time.After(2 * time.Second)
- bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers to be posted"))
-
- if want := i * bufferSize; want != bi.Offset {
- t.Fatalf("Bad posted offset: got %v, want %v", bi.Offset, want)
- }
-
- if want := i; want != bi.ID {
- t.Fatalf("Bad posted ID: got %v, want %v", bi.ID, want)
- }
-
- if bufferSize != bi.Size {
- t.Fatalf("Bad posted bufferSize: got %v, want %v", bi.Size, bufferSize)
- }
- }
- c.rxq.tx.Flush()
-
- // Create a slice with the indices 0..limit-1.
- idx := make([]int, limit)
- for i := range idx {
- idx[i] = i
- }
-
- // Complete random packets 1000 times.
- for iters := 1000; iters > 0; iters-- {
- timeout := time.After(2 * time.Second)
- // Prepare a random packet.
- shuffle(idx)
- n := 1 + rand.Intn(10)
- bufs := make([]queue.RxBuffer, n)
- contents := make([]byte, bufferSize*n-rand.Intn(500))
- randomFill(contents)
- for i := range bufs {
- j := idx[i]
- bufs[i].Size = bufferSize
- bufs[i].Offset = uint64(bufferSize * j)
- bufs[i].ID = uint64(j)
-
- copy(c.rxq.data[bufs[i].Offset:][:bufferSize], contents[i*bufferSize:])
- }
-
- // Push completion.
- c.pushRxCompletion(uint32(len(contents)), bufs)
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Wait for packet to be received, then check it.
- c.waitForPackets(1, time.After(5*time.Second), "Timeout waiting for packet")
- c.mu.Lock()
- rcvd := []byte(c.packets[0].data)
- c.packets = c.packets[:0]
- c.mu.Unlock()
-
- if contents := contents[header.EthernetMinimumSize:]; !bytes.Equal(contents, rcvd) {
- t.Fatalf("Unexpected buffer contents: got %x, want %x", rcvd, contents)
- }
-
- // Check that buffers have been reposted.
- for i := range bufs {
- bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffers to be reposted"))
- if bi != bufs[i] {
- t.Fatalf("Unexpected buffer reposted: got %x, want %x", bi, bufs[i])
- }
- }
- c.rxq.tx.Flush()
- }
-}
-
-// TestRxBuffersReposted tests that rx buffers get reposted after they have been
-// completed.
-func TestRxBuffersReposted(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- defer c.cleanup()
-
- // Receive all posted buffers.
- limit := c.ep.rx.q.PostedBuffersLimit()
- buffers := make([]queue.RxBuffer, 0, limit)
- for i := limit; i > 0; i-- {
- timeout := time.After(2 * time.Second)
- buffers = append(buffers, queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers")))
- }
- c.rxq.tx.Flush()
-
- // Check that all buffers are reposted when individually completed.
- for i := range buffers {
- timeout := time.After(2 * time.Second)
- // Complete the buffer.
- c.pushRxCompletion(buffers[i].Size, buffers[i:][:1])
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Wait for it to be reposted.
- bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted"))
- if bi != buffers[i] {
- t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[i])
- }
- }
- c.rxq.tx.Flush()
-
- // Check that all buffers are reposted when completed in pairs.
- for i := 0; i < len(buffers)/2; i++ {
- timeout := time.After(2 * time.Second)
- // Complete with two buffers.
- c.pushRxCompletion(2*bufferSize, buffers[2*i:][:2])
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Wait for them to be reposted.
- for j := 0; j < 2; j++ {
- bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted"))
- if bi != buffers[2*i+j] {
- t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i+j])
- }
- }
- }
- c.rxq.tx.Flush()
-}
-
-// TestReceivePostingIsFull checks that the endpoint will properly handle the
-// case when a received buffer cannot be immediately reposted because it hasn't
-// been pulled from the tx pipe yet.
-func TestReceivePostingIsFull(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- defer c.cleanup()
-
- // Complete first posted buffer before flushing it from the tx pipe.
- first := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for first buffer to be posted"))
- c.pushRxCompletion(first.Size, []queue.RxBuffer{first})
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Check that packet is received.
- c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet")
-
- // Complete another buffer.
- second := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for second buffer to be posted"))
- c.pushRxCompletion(second.Size, []queue.RxBuffer{second})
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Check that no packet is received yet, as the worker is blocked trying
- // to repost.
- select {
- case <-time.After(500 * time.Millisecond):
- case <-c.packetCh:
- t.Fatalf("Unexpected packet received")
- }
-
- // Flush tx queue, which will allow the first buffer to be reposted,
- // and the second completion to be pulled.
- c.rxq.tx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Check that second packet completes.
- c.waitForPackets(1, time.After(time.Second), "Timeout waiting for second completed packet")
-}
-
-// TestCloseWhileWaitingToPost closes the endpoint while it is waiting to
-// repost a buffer. Make sure it backs out.
-func TestCloseWhileWaitingToPost(t *testing.T) {
- const bufferSize = 1500
- c := newTestContext(t, 20000, bufferSize, localLinkAddr)
- cleaned := false
- defer func() {
- if !cleaned {
- c.cleanup()
- }
- }()
-
- // Complete first posted buffer before flushing it from the tx pipe.
- bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for initial buffer to be posted"))
- c.pushRxCompletion(bi.Size, []queue.RxBuffer{bi})
- c.rxq.rx.Flush()
- unix.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})
-
- // Wait for packet to be indicated.
- c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet")
-
- // Cleanup and wait for worker to complete.
- c.cleanup()
- cleaned = true
- c.ep.Wait()
-}
diff --git a/pkg/tcpip/link/sharedmem/sharedmem_unsafe_state_autogen.go b/pkg/tcpip/link/sharedmem/sharedmem_unsafe_state_autogen.go
new file mode 100644
index 000000000..ac3a66520
--- /dev/null
+++ b/pkg/tcpip/link/sharedmem/sharedmem_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sharedmem
diff --git a/pkg/tcpip/link/sniffer/BUILD b/pkg/tcpip/link/sniffer/BUILD
deleted file mode 100644
index 4aac12a8c..000000000
--- a/pkg/tcpip/link/sniffer/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sniffer",
- srcs = [
- "pcap.go",
- "sniffer.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/link/nested",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/sniffer/sniffer_state_autogen.go b/pkg/tcpip/link/sniffer/sniffer_state_autogen.go
new file mode 100644
index 000000000..8d79defea
--- /dev/null
+++ b/pkg/tcpip/link/sniffer/sniffer_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sniffer
diff --git a/pkg/tcpip/link/tun/BUILD b/pkg/tcpip/link/tun/BUILD
deleted file mode 100644
index 4758a99ad..000000000
--- a/pkg/tcpip/link/tun/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "tun_endpoint_refs",
- out = "tun_endpoint_refs.go",
- package = "tun",
- prefix = "tunEndpoint",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "tunEndpoint",
- },
-)
-
-go_library(
- name = "tun",
- srcs = [
- "device.go",
- "protocol.go",
- "tun_endpoint_refs.go",
- "tun_unsafe.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/stack",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/link/tun/tun_endpoint_refs.go b/pkg/tcpip/link/tun/tun_endpoint_refs.go
new file mode 100644
index 000000000..a3bee1c05
--- /dev/null
+++ b/pkg/tcpip/link/tun/tun_endpoint_refs.go
@@ -0,0 +1,140 @@
+package tun
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const tunEndpointenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var tunEndpointobj *tunEndpoint
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type tunEndpointRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *tunEndpointRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *tunEndpointRefs) RefType() string {
+ return fmt.Sprintf("%T", tunEndpointobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *tunEndpointRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *tunEndpointRefs) LogRefs() bool {
+ return tunEndpointenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *tunEndpointRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *tunEndpointRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if tunEndpointenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *tunEndpointRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if tunEndpointenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *tunEndpointRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if tunEndpointenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *tunEndpointRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/tcpip/link/tun/tun_state_autogen.go b/pkg/tcpip/link/tun/tun_state_autogen.go
new file mode 100644
index 000000000..c5773cc11
--- /dev/null
+++ b/pkg/tcpip/link/tun/tun_state_autogen.go
@@ -0,0 +1,68 @@
+// automatically generated by stateify.
+
+package tun
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (d *Device) StateTypeName() string {
+ return "pkg/tcpip/link/tun.Device"
+}
+
+func (d *Device) StateFields() []string {
+ return []string{
+ "Queue",
+ "endpoint",
+ "notifyHandle",
+ "flags",
+ }
+}
+
+// +checklocksignore
+func (d *Device) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Queue)
+ stateSinkObject.Save(1, &d.endpoint)
+ stateSinkObject.Save(2, &d.notifyHandle)
+ stateSinkObject.Save(3, &d.flags)
+}
+
+func (d *Device) afterLoad() {}
+
+// +checklocksignore
+func (d *Device) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Queue)
+ stateSourceObject.Load(1, &d.endpoint)
+ stateSourceObject.Load(2, &d.notifyHandle)
+ stateSourceObject.Load(3, &d.flags)
+}
+
+func (r *tunEndpointRefs) StateTypeName() string {
+ return "pkg/tcpip/link/tun.tunEndpointRefs"
+}
+
+func (r *tunEndpointRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *tunEndpointRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *tunEndpointRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *tunEndpointRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func init() {
+ state.Register((*Device)(nil))
+ state.Register((*tunEndpointRefs)(nil))
+}
diff --git a/pkg/tcpip/link/tun/tun_unsafe_state_autogen.go b/pkg/tcpip/link/tun/tun_unsafe_state_autogen.go
new file mode 100644
index 000000000..149299ea3
--- /dev/null
+++ b/pkg/tcpip/link/tun/tun_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build linux
+
+package tun
diff --git a/pkg/tcpip/link/waitable/BUILD b/pkg/tcpip/link/waitable/BUILD
deleted file mode 100644
index b8d417b7d..000000000
--- a/pkg/tcpip/link/waitable/BUILD
+++ /dev/null
@@ -1,30 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "waitable",
- srcs = [
- "waitable.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "waitable_test",
- srcs = [
- "waitable_test.go",
- ],
- library = ":waitable",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/link/waitable/waitable_state_autogen.go b/pkg/tcpip/link/waitable/waitable_state_autogen.go
new file mode 100644
index 000000000..059424fa0
--- /dev/null
+++ b/pkg/tcpip/link/waitable/waitable_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package waitable
diff --git a/pkg/tcpip/link/waitable/waitable_test.go b/pkg/tcpip/link/waitable/waitable_test.go
deleted file mode 100644
index a71400ee9..000000000
--- a/pkg/tcpip/link/waitable/waitable_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package waitable
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-type countedEndpoint struct {
- dispatchCount int
- writeCount int
- attachCount int
-
- mtu uint32
- capabilities stack.LinkEndpointCapabilities
- hdrLen uint16
- linkAddr tcpip.LinkAddress
-
- dispatcher stack.NetworkDispatcher
-}
-
-func (e *countedEndpoint) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- e.dispatchCount++
-}
-
-func (e *countedEndpoint) DeliverOutboundPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func (e *countedEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
- e.attachCount++
- e.dispatcher = dispatcher
-}
-
-// IsAttached implements stack.LinkEndpoint.IsAttached.
-func (e *countedEndpoint) IsAttached() bool {
- return e.dispatcher != nil
-}
-
-func (e *countedEndpoint) MTU() uint32 {
- return e.mtu
-}
-
-func (e *countedEndpoint) Capabilities() stack.LinkEndpointCapabilities {
- return e.capabilities
-}
-
-func (e *countedEndpoint) MaxHeaderLength() uint16 {
- return e.hdrLen
-}
-
-func (e *countedEndpoint) LinkAddress() tcpip.LinkAddress {
- return e.linkAddr
-}
-
-func (e *countedEndpoint) WritePacket(stack.RouteInfo, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) tcpip.Error {
- e.writeCount++
- return nil
-}
-
-// WritePackets implements stack.LinkEndpoint.WritePackets.
-func (e *countedEndpoint) WritePackets(_ stack.RouteInfo, pkts stack.PacketBufferList, _ tcpip.NetworkProtocolNumber) (int, tcpip.Error) {
- e.writeCount += pkts.Len()
- return pkts.Len(), nil
-}
-
-// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.
-func (*countedEndpoint) ARPHardwareType() header.ARPHardwareType {
- panic("unimplemented")
-}
-
-// Wait implements stack.LinkEndpoint.Wait.
-func (*countedEndpoint) Wait() {}
-
-// AddHeader implements stack.LinkEndpoint.AddHeader.
-func (e *countedEndpoint) AddHeader(local, remote tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("unimplemented")
-}
-
-func TestWaitWrite(t *testing.T) {
- ep := &countedEndpoint{}
- wep := New(ep)
-
- // Write and check that it goes through.
- wep.WritePacket(stack.RouteInfo{}, 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 1; ep.writeCount != want {
- t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want)
- }
-
- // Wait on dispatches, then try to write. It must go through.
- wep.WaitDispatch()
- wep.WritePacket(stack.RouteInfo{}, 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 2; ep.writeCount != want {
- t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want)
- }
-
- // Wait on writes, then try to write. It must not go through.
- wep.WaitWrite()
- wep.WritePacket(stack.RouteInfo{}, 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 2; ep.writeCount != want {
- t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want)
- }
-}
-
-func TestWaitDispatch(t *testing.T) {
- ep := &countedEndpoint{}
- wep := New(ep)
-
- // Check that attach happens.
- wep.Attach(ep)
- if want := 1; ep.attachCount != want {
- t.Fatalf("Unexpected attachCount: got=%v, want=%v", ep.attachCount, want)
- }
-
- // Dispatch and check that it goes through.
- ep.dispatcher.DeliverNetworkPacket("", "", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 1; ep.dispatchCount != want {
- t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want)
- }
-
- // Wait on writes, then try to dispatch. It must go through.
- wep.WaitWrite()
- ep.dispatcher.DeliverNetworkPacket("", "", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 2; ep.dispatchCount != want {
- t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want)
- }
-
- // Wait on dispatches, then try to dispatch. It must not go through.
- wep.WaitDispatch()
- ep.dispatcher.DeliverNetworkPacket("", "", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))
- if want := 2; ep.dispatchCount != want {
- t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want)
- }
-}
-
-func TestOtherMethods(t *testing.T) {
- const (
- mtu = 0xdead
- capabilities = 0xbeef
- hdrLen = 0x1234
- linkAddr = "test address"
- )
- ep := &countedEndpoint{
- mtu: mtu,
- capabilities: capabilities,
- hdrLen: hdrLen,
- linkAddr: linkAddr,
- }
- wep := New(ep)
-
- if v := wep.MTU(); v != mtu {
- t.Fatalf("Unexpected mtu: got=%v, want=%v", v, mtu)
- }
-
- if v := wep.Capabilities(); v != capabilities {
- t.Fatalf("Unexpected capabilities: got=%v, want=%v", v, capabilities)
- }
-
- if v := wep.MaxHeaderLength(); v != hdrLen {
- t.Fatalf("Unexpected MaxHeaderLength: got=%v, want=%v", v, hdrLen)
- }
-
- if v := wep.LinkAddress(); v != linkAddr {
- t.Fatalf("Unexpected LinkAddress: got=%q, want=%q", v, linkAddr)
- }
-}
diff --git a/pkg/tcpip/network/BUILD b/pkg/tcpip/network/BUILD
deleted file mode 100644
index 7b1ff44f4..000000000
--- a/pkg/tcpip/network/BUILD
+++ /dev/null
@@ -1,30 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "ip_test",
- size = "small",
- srcs = [
- "ip_test.go",
- "multicast_group_test.go",
- ],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/network/arp/BUILD b/pkg/tcpip/network/arp/BUILD
deleted file mode 100644
index 6fa1aee18..000000000
--- a/pkg/tcpip/network/arp/BUILD
+++ /dev/null
@@ -1,53 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "arp",
- srcs = [
- "arp.go",
- "stats.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/network/internal/ip",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "arp_test",
- size = "small",
- srcs = ["arp_test.go"],
- deps = [
- ":arp",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
-
-go_test(
- name = "stats_test",
- size = "small",
- srcs = ["stats_test.go"],
- library = ":arp",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- ],
-)
diff --git a/pkg/tcpip/network/arp/arp_state_autogen.go b/pkg/tcpip/network/arp/arp_state_autogen.go
new file mode 100644
index 000000000..5cd8535e3
--- /dev/null
+++ b/pkg/tcpip/network/arp/arp_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package arp
diff --git a/pkg/tcpip/network/arp/arp_test.go b/pkg/tcpip/network/arp/arp_test.go
deleted file mode 100644
index 5fcbfeaa2..000000000
--- a/pkg/tcpip/network/arp/arp_test.go
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package arp_test
-
-import (
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const (
- nicID = 1
-
- stackLinkAddr = tcpip.LinkAddress("\x0a\x0a\x0b\x0b\x0c\x0c")
- remoteLinkAddr = tcpip.LinkAddress("\x01\x02\x03\x04\x05\x06")
-)
-
-var (
- stackAddr = testutil.MustParse4("10.0.0.1")
- remoteAddr = testutil.MustParse4("10.0.0.2")
- unknownAddr = testutil.MustParse4("10.0.0.3")
-)
-
-type eventType uint8
-
-const (
- entryAdded eventType = iota
- entryChanged
- entryRemoved
-)
-
-func (t eventType) String() string {
- switch t {
- case entryAdded:
- return "add"
- case entryChanged:
- return "change"
- case entryRemoved:
- return "remove"
- default:
- return fmt.Sprintf("unknown (%d)", t)
- }
-}
-
-type eventInfo struct {
- eventType eventType
- nicID tcpip.NICID
- entry stack.NeighborEntry
-}
-
-func (e eventInfo) String() string {
- return fmt.Sprintf("%s event for NIC #%d, %#v", e.eventType, e.nicID, e.entry)
-}
-
-// arpDispatcher implements NUDDispatcher to validate the dispatching of
-// events upon certain NUD state machine events.
-type arpDispatcher struct {
- // C is where events are queued
- C chan eventInfo
-}
-
-var _ stack.NUDDispatcher = (*arpDispatcher)(nil)
-
-func (d *arpDispatcher) OnNeighborAdded(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryAdded,
- nicID: nicID,
- entry: entry,
- }
- d.C <- e
-}
-
-func (d *arpDispatcher) OnNeighborChanged(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryChanged,
- nicID: nicID,
- entry: entry,
- }
- d.C <- e
-}
-
-func (d *arpDispatcher) OnNeighborRemoved(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryRemoved,
- nicID: nicID,
- entry: entry,
- }
- d.C <- e
-}
-
-func (d *arpDispatcher) nextEvent() (eventInfo, bool) {
- select {
- case event := <-d.C:
- return event, true
- default:
- return eventInfo{}, false
- }
-}
-
-type testContext struct {
- s *stack.Stack
- linkEP *channel.Endpoint
- nudDisp arpDispatcher
-}
-
-func makeTestContext(t *testing.T, eventDepth int, packetDepth int) testContext {
- t.Helper()
-
- tc := testContext{
- nudDisp: arpDispatcher{
- C: make(chan eventInfo, eventDepth),
- },
- }
-
- tc.s = stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol},
- NUDDisp: &tc.nudDisp,
- Clock: &faketime.NullClock{},
- })
-
- tc.linkEP = channel.New(packetDepth, header.IPv4MinimumMTU, stackLinkAddr)
- tc.linkEP.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- wep := stack.LinkEndpoint(tc.linkEP)
- if testing.Verbose() {
- wep = sniffer.New(wep)
- }
- if err := tc.s.CreateNIC(nicID, wep); err != nil {
- t.Fatalf("CreateNIC failed: %s", err)
- }
-
- if err := tc.s.AddAddress(nicID, ipv4.ProtocolNumber, stackAddr); err != nil {
- t.Fatalf("AddAddress for ipv4 failed: %s", err)
- }
-
- tc.s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- }})
-
- return tc
-}
-
-func (c *testContext) cleanup() {
- c.linkEP.Close()
-}
-
-func TestMalformedPacket(t *testing.T) {
- c := makeTestContext(t, 0, 0)
- defer c.cleanup()
-
- v := make(buffer.View, header.ARPSize)
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: v.ToVectorisedView(),
- })
-
- c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)
-
- if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1", got)
- }
- if got := c.s.Stats().ARP.MalformedPacketsReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.MalformedPacketsReceived.Value() = %d, want = 1", got)
- }
-}
-
-func TestDisabledEndpoint(t *testing.T) {
- c := makeTestContext(t, 0, 0)
- defer c.cleanup()
-
- ep, err := c.s.GetNetworkEndpoint(nicID, header.ARPProtocolNumber)
- if err != nil {
- t.Fatalf("GetNetworkEndpoint(%d, header.ARPProtocolNumber) failed: %s", nicID, err)
- }
- ep.Disable()
-
- v := make(buffer.View, header.ARPSize)
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: v.ToVectorisedView(),
- })
-
- c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)
-
- if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1", got)
- }
- if got := c.s.Stats().ARP.DisabledPacketsReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.DisabledPacketsReceived.Value() = %d, want = 1", got)
- }
-}
-
-func TestDirectReply(t *testing.T) {
- c := makeTestContext(t, 0, 0)
- defer c.cleanup()
-
- const senderMAC = "\x01\x02\x03\x04\x05\x06"
- const senderIPv4 = "\x0a\x00\x00\x02"
-
- v := make(buffer.View, header.ARPSize)
- h := header.ARP(v)
- h.SetIPv4OverEthernet()
- h.SetOp(header.ARPReply)
-
- copy(h.HardwareAddressSender(), senderMAC)
- copy(h.ProtocolAddressSender(), senderIPv4)
- copy(h.HardwareAddressTarget(), stackLinkAddr)
- copy(h.ProtocolAddressTarget(), stackAddr)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: v.ToVectorisedView(),
- })
-
- c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)
-
- if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1", got)
- }
- if got := c.s.Stats().ARP.RepliesReceived.Value(); got != 1 {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1", got)
- }
-}
-
-func TestDirectRequest(t *testing.T) {
- c := makeTestContext(t, 1, 1)
- defer c.cleanup()
-
- tests := []struct {
- name string
- senderAddr tcpip.Address
- senderLinkAddr tcpip.LinkAddress
- targetAddr tcpip.Address
- isValid bool
- }{
- {
- name: "Loopback",
- senderAddr: stackAddr,
- senderLinkAddr: stackLinkAddr,
- targetAddr: stackAddr,
- isValid: true,
- },
- {
- name: "Remote",
- senderAddr: remoteAddr,
- senderLinkAddr: remoteLinkAddr,
- targetAddr: stackAddr,
- isValid: true,
- },
- {
- name: "RemoteInvalidTarget",
- senderAddr: remoteAddr,
- senderLinkAddr: remoteLinkAddr,
- targetAddr: unknownAddr,
- isValid: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- packetsRecv := c.s.Stats().ARP.PacketsReceived.Value()
- requestsRecv := c.s.Stats().ARP.RequestsReceived.Value()
- requestsRecvUnknownAddr := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value()
- outgoingReplies := c.s.Stats().ARP.OutgoingRepliesSent.Value()
-
- // Inject an incoming ARP request.
- v := make(buffer.View, header.ARPSize)
- h := header.ARP(v)
- h.SetIPv4OverEthernet()
- h.SetOp(header.ARPRequest)
- copy(h.HardwareAddressSender(), test.senderLinkAddr)
- copy(h.ProtocolAddressSender(), test.senderAddr)
- copy(h.ProtocolAddressTarget(), test.targetAddr)
- c.linkEP.InjectInbound(arp.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: v.ToVectorisedView(),
- }))
-
- if got, want := c.s.Stats().ARP.PacketsReceived.Value(), packetsRecv+1; got != want {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d", got, want)
- }
- if got, want := c.s.Stats().ARP.RequestsReceived.Value(), requestsRecv+1; got != want {
- t.Errorf("got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d", got, want)
- }
-
- if !test.isValid {
- // No packets should be sent after receiving an invalid ARP request.
- // There is no need to perform a blocking read here, since packets are
- // sent in the same function that handles ARP requests.
- if pkt, ok := c.linkEP.Read(); ok {
- t.Errorf("unexpected packet sent with network protocol number %d", pkt.Proto)
- }
- if got, want := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value(), requestsRecvUnknownAddr+1; got != want {
- t.Errorf("got c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value() = %d, want = %d", got, want)
- }
- if got, want := c.s.Stats().ARP.OutgoingRepliesSent.Value(), outgoingReplies; got != want {
- t.Errorf("got c.s.Stats().ARP.OutgoingRepliesSent.Value() = %d, want = %d", got, want)
- }
-
- return
- }
-
- if got, want := c.s.Stats().ARP.OutgoingRepliesSent.Value(), outgoingReplies+1; got != want {
- t.Errorf("got c.s.Stats().ARP.OutgoingRepliesSent.Value() = %d, want = %d", got, want)
- }
-
- // Verify an ARP response was sent.
- pi, ok := c.linkEP.Read()
- if !ok {
- t.Fatal("expected ARP response to be sent, got none")
- }
-
- if pi.Proto != arp.ProtocolNumber {
- t.Fatalf("expected ARP response, got network protocol number %d", pi.Proto)
- }
- rep := header.ARP(pi.Pkt.NetworkHeader().View())
- if !rep.IsValid() {
- t.Fatalf("invalid ARP response: len = %d; response = %x", len(rep), rep)
- }
- if got, want := tcpip.LinkAddress(rep.HardwareAddressSender()), stackLinkAddr; got != want {
- t.Errorf("got HardwareAddressSender() = %s, want = %s", got, want)
- }
- if got, want := tcpip.Address(rep.ProtocolAddressSender()), tcpip.Address(h.ProtocolAddressTarget()); got != want {
- t.Errorf("got ProtocolAddressSender() = %s, want = %s", got, want)
- }
- if got, want := tcpip.LinkAddress(rep.HardwareAddressTarget()), tcpip.LinkAddress(h.HardwareAddressSender()); got != want {
- t.Errorf("got HardwareAddressTarget() = %s, want = %s", got, want)
- }
- if got, want := tcpip.Address(rep.ProtocolAddressTarget()), tcpip.Address(h.ProtocolAddressSender()); got != want {
- t.Errorf("got ProtocolAddressTarget() = %s, want = %s", got, want)
- }
-
- // Verify the sender was saved in the neighbor cache.
- if got, ok := c.nudDisp.nextEvent(); ok {
- want := eventInfo{
- eventType: entryAdded,
- nicID: nicID,
- entry: stack.NeighborEntry{
- Addr: test.senderAddr,
- LinkAddr: test.senderLinkAddr,
- State: stack.Stale,
- },
- }
- if diff := cmp.Diff(want, got, cmp.AllowUnexported(eventInfo{}), cmpopts.IgnoreFields(stack.NeighborEntry{}, "UpdatedAt")); diff != "" {
- t.Errorf("got invalid event (-want +got):\n%s", diff)
- }
- } else {
- t.Fatal("event didn't arrive")
- }
-
- neighbors, err := c.s.Neighbors(nicID, ipv4.ProtocolNumber)
- if err != nil {
- t.Fatalf("c.s.Neighbors(%d, %d): %s", nicID, ipv4.ProtocolNumber, err)
- }
-
- neighborByAddr := make(map[tcpip.Address]stack.NeighborEntry)
- for _, n := range neighbors {
- if existing, ok := neighborByAddr[n.Addr]; ok {
- if diff := cmp.Diff(existing, n); diff != "" {
- t.Fatalf("duplicate neighbor entry found (-existing +got):\n%s", diff)
- }
- t.Fatalf("exact neighbor entry duplicate found for addr=%s", n.Addr)
- }
- neighborByAddr[n.Addr] = n
- }
-
- neigh, ok := neighborByAddr[test.senderAddr]
- if !ok {
- t.Fatalf("expected neighbor entry with Addr = %s", test.senderAddr)
- }
- if got, want := neigh.LinkAddr, test.senderLinkAddr; got != want {
- t.Errorf("got neighbor LinkAddr = %s, want = %s", got, want)
- }
- if got, want := neigh.State, stack.Stale; got != want {
- t.Errorf("got neighbor State = %s, want = %s", got, want)
- }
-
- // No more events should be dispatched
- for {
- event, ok := c.nudDisp.nextEvent()
- if !ok {
- break
- }
- t.Errorf("unexpected %s", event)
- }
- })
- }
-}
-
-var _ stack.LinkEndpoint = (*testLinkEndpoint)(nil)
-
-type testLinkEndpoint struct {
- stack.LinkEndpoint
-
- writeErr tcpip.Error
-}
-
-func (t *testLinkEndpoint) WritePacket(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
- if t.writeErr != nil {
- return t.writeErr
- }
-
- return t.LinkEndpoint.WritePacket(r, protocol, pkt)
-}
-
-func TestLinkAddressRequest(t *testing.T) {
- const nicID = 1
-
- testAddr := tcpip.Address([]byte{1, 2, 3, 4})
-
- tests := []struct {
- name string
- nicAddr tcpip.Address
- localAddr tcpip.Address
- remoteLinkAddr tcpip.LinkAddress
- linkErr tcpip.Error
- expectedErr tcpip.Error
- expectedLocalAddr tcpip.Address
- expectedRemoteLinkAddr tcpip.LinkAddress
- expectedRequestsSent uint64
- expectedRequestBadLocalAddressErrors uint64
- expectedRequestInterfaceHasNoLocalAddressErrors uint64
- expectedRequestDroppedErrors uint64
- }{
- {
- name: "Unicast",
- nicAddr: stackAddr,
- localAddr: stackAddr,
- remoteLinkAddr: remoteLinkAddr,
- expectedLocalAddr: stackAddr,
- expectedRemoteLinkAddr: remoteLinkAddr,
- expectedRequestsSent: 1,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Multicast",
- nicAddr: stackAddr,
- localAddr: stackAddr,
- remoteLinkAddr: "",
- expectedLocalAddr: stackAddr,
- expectedRemoteLinkAddr: header.EthernetBroadcastAddress,
- expectedRequestsSent: 1,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Unicast with unspecified source",
- nicAddr: stackAddr,
- localAddr: "",
- remoteLinkAddr: remoteLinkAddr,
- expectedLocalAddr: stackAddr,
- expectedRemoteLinkAddr: remoteLinkAddr,
- expectedRequestsSent: 1,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Multicast with unspecified source",
- nicAddr: stackAddr,
- localAddr: "",
- remoteLinkAddr: "",
- expectedLocalAddr: stackAddr,
- expectedRemoteLinkAddr: header.EthernetBroadcastAddress,
- expectedRequestsSent: 1,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Unicast with unassigned address",
- nicAddr: stackAddr,
- localAddr: testAddr,
- remoteLinkAddr: remoteLinkAddr,
- expectedErr: &tcpip.ErrBadLocalAddress{},
- expectedRequestsSent: 0,
- expectedRequestBadLocalAddressErrors: 1,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Multicast with unassigned address",
- nicAddr: stackAddr,
- localAddr: testAddr,
- remoteLinkAddr: "",
- expectedErr: &tcpip.ErrBadLocalAddress{},
- expectedRequestsSent: 0,
- expectedRequestBadLocalAddressErrors: 1,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Unicast with no local address available",
- nicAddr: "",
- localAddr: "",
- remoteLinkAddr: remoteLinkAddr,
- expectedErr: &tcpip.ErrNetworkUnreachable{},
- expectedRequestsSent: 0,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 1,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Multicast with no local address available",
- nicAddr: "",
- localAddr: "",
- remoteLinkAddr: "",
- expectedErr: &tcpip.ErrNetworkUnreachable{},
- expectedRequestsSent: 0,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 1,
- expectedRequestDroppedErrors: 0,
- },
- {
- name: "Link error",
- nicAddr: stackAddr,
- localAddr: stackAddr,
- remoteLinkAddr: remoteLinkAddr,
- linkErr: &tcpip.ErrInvalidEndpointState{},
- expectedErr: &tcpip.ErrInvalidEndpointState{},
- expectedRequestsSent: 0,
- expectedRequestBadLocalAddressErrors: 0,
- expectedRequestInterfaceHasNoLocalAddressErrors: 0,
- expectedRequestDroppedErrors: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},
- })
- linkEP := channel.New(1, header.IPv4MinimumMTU, stackLinkAddr)
- if err := s.CreateNIC(nicID, &testLinkEndpoint{LinkEndpoint: linkEP, writeErr: test.linkErr}); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- ep, err := s.GetNetworkEndpoint(nicID, arp.ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, arp.ProtocolNumber, err)
- }
- linkRes, ok := ep.(stack.LinkAddressResolver)
- if !ok {
- t.Fatalf("expected %T to implement stack.LinkAddressResolver", ep)
- }
-
- if len(test.nicAddr) != 0 {
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, test.nicAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, ipv4.ProtocolNumber, test.nicAddr, err)
- }
- }
-
- {
- err := linkRes.LinkAddressRequest(remoteAddr, test.localAddr, test.remoteLinkAddr)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Fatalf("unexpected error from p.LinkAddressRequest(%s, %s, %s, _), (-want, +got):\n%s", remoteAddr, test.localAddr, test.remoteLinkAddr, diff)
- }
- }
-
- if got := s.Stats().ARP.OutgoingRequestsSent.Value(); got != test.expectedRequestsSent {
- t.Errorf("got s.Stats().ARP.OutgoingRequestsSent.Value() = %d, want = %d", got, test.expectedRequestsSent)
- }
- if got := s.Stats().ARP.OutgoingRequestInterfaceHasNoLocalAddressErrors.Value(); got != test.expectedRequestInterfaceHasNoLocalAddressErrors {
- t.Errorf("got s.Stats().ARP.OutgoingRequestInterfaceHasNoLocalAddressErrors.Value() = %d, want = %d", got, test.expectedRequestInterfaceHasNoLocalAddressErrors)
- }
- if got := s.Stats().ARP.OutgoingRequestBadLocalAddressErrors.Value(); got != test.expectedRequestBadLocalAddressErrors {
- t.Errorf("got s.Stats().ARP.OutgoingRequestBadLocalAddressErrors.Value() = %d, want = %d", got, test.expectedRequestBadLocalAddressErrors)
- }
- if got := s.Stats().ARP.OutgoingRequestsDropped.Value(); got != test.expectedRequestDroppedErrors {
- t.Errorf("got s.Stats().ARP.OutgoingRequestsDropped.Value() = %d, want = %d", got, test.expectedRequestDroppedErrors)
- }
-
- if test.expectedErr != nil {
- return
- }
-
- pkt, ok := linkEP.Read()
- if !ok {
- t.Fatal("expected to send a link address request")
- }
-
- if pkt.Route.RemoteLinkAddress != test.expectedRemoteLinkAddr {
- t.Errorf("got pkt.Route.RemoteLinkAddress = %s, want = %s", pkt.Route.RemoteLinkAddress, test.expectedRemoteLinkAddr)
- }
-
- rep := header.ARP(stack.PayloadSince(pkt.Pkt.NetworkHeader()))
- if got := rep.Op(); got != header.ARPRequest {
- t.Errorf("got Op = %d, want = %d", got, header.ARPRequest)
- }
- if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != stackLinkAddr {
- t.Errorf("got HardwareAddressSender = %s, want = %s", got, stackLinkAddr)
- }
- if got := tcpip.Address(rep.ProtocolAddressSender()); got != test.expectedLocalAddr {
- t.Errorf("got ProtocolAddressSender = %s, want = %s", got, test.expectedLocalAddr)
- }
- if got, want := tcpip.LinkAddress(rep.HardwareAddressTarget()), tcpip.LinkAddress("\x00\x00\x00\x00\x00\x00"); got != want {
- t.Errorf("got HardwareAddressTarget = %s, want = %s", got, want)
- }
- if got := tcpip.Address(rep.ProtocolAddressTarget()); got != remoteAddr {
- t.Errorf("got ProtocolAddressTarget = %s, want = %s", got, remoteAddr)
- }
- })
- }
-}
-
-func TestDADARPRequestPacket(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocolWithOptions(arp.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: 1,
- },
- }), ipv4.NewProtocol},
- Clock: clock,
- })
- e := channel.New(1, header.IPv4MinimumMTU, stackLinkAddr)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- if res, err := s.CheckDuplicateAddress(nicID, header.IPv4ProtocolNumber, remoteAddr, func(stack.DADResult) {}); err != nil {
- t.Fatalf("s.CheckDuplicateAddress(%d, %d, %s, _): %s", nicID, header.IPv4ProtocolNumber, remoteAddr, err)
- } else if res != stack.DADStarting {
- t.Fatalf("got s.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", nicID, header.IPv4ProtocolNumber, remoteAddr, res, stack.DADStarting)
- }
-
- clock.RunImmediatelyScheduledJobs()
- pkt, ok := e.Read()
- if !ok {
- t.Fatal("expected to send an ARP request")
- }
-
- if pkt.Route.RemoteLinkAddress != header.EthernetBroadcastAddress {
- t.Errorf("got pkt.Route.RemoteLinkAddress = %s, want = %s", pkt.Route.RemoteLinkAddress, header.EthernetBroadcastAddress)
- }
-
- req := header.ARP(stack.PayloadSince(pkt.Pkt.NetworkHeader()))
- if !req.IsValid() {
- t.Errorf("got req.IsValid() = false, want = true")
- }
- if got := req.Op(); got != header.ARPRequest {
- t.Errorf("got req.Op() = %d, want = %d", got, header.ARPRequest)
- }
- if got := tcpip.LinkAddress(req.HardwareAddressSender()); got != stackLinkAddr {
- t.Errorf("got req.HardwareAddressSender() = %s, want = %s", got, stackLinkAddr)
- }
- if got := tcpip.Address(req.ProtocolAddressSender()); got != header.IPv4Any {
- t.Errorf("got req.ProtocolAddressSender() = %s, want = %s", got, header.IPv4Any)
- }
- if got, want := tcpip.LinkAddress(req.HardwareAddressTarget()), tcpip.LinkAddress("\x00\x00\x00\x00\x00\x00"); got != want {
- t.Errorf("got req.HardwareAddressTarget() = %s, want = %s", got, want)
- }
- if got := tcpip.Address(req.ProtocolAddressTarget()); got != remoteAddr {
- t.Errorf("got req.ProtocolAddressTarget() = %s, want = %s", got, remoteAddr)
- }
-}
diff --git a/pkg/tcpip/network/arp/stats_test.go b/pkg/tcpip/network/arp/stats_test.go
deleted file mode 100644
index 0df39ae81..000000000
--- a/pkg/tcpip/network/arp/stats_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package arp
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-var _ stack.NetworkInterface = (*testInterface)(nil)
-
-type testInterface struct {
- stack.NetworkInterface
- nicID tcpip.NICID
-}
-
-func (t *testInterface) ID() tcpip.NICID {
- return t.nicID
-}
-
-func TestMultiCounterStatsInitialization(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)
- var nic testInterface
- ep := proto.NewEndpoint(&nic, nil).(*endpoint)
- // At this point, the Stack's stats and the NetworkEndpoint's stats are
- // expected to be bound by a MultiCounterStat.
- refStack := s.Stats()
- refEP := ep.stats.localStats
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.arp).Elem(), []reflect.Value{reflect.ValueOf(&refEP.ARP).Elem(), reflect.ValueOf(&refStack.ARP).Elem()}); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/tcpip/network/hash/BUILD b/pkg/tcpip/network/hash/BUILD
deleted file mode 100644
index 872165866..000000000
--- a/pkg/tcpip/network/hash/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "hash",
- srcs = ["hash.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/rand",
- "//pkg/tcpip/header",
- ],
-)
diff --git a/pkg/tcpip/network/hash/hash_state_autogen.go b/pkg/tcpip/network/hash/hash_state_autogen.go
new file mode 100644
index 000000000..9467fe298
--- /dev/null
+++ b/pkg/tcpip/network/hash/hash_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hash
diff --git a/pkg/tcpip/network/internal/fragmentation/BUILD b/pkg/tcpip/network/internal/fragmentation/BUILD
deleted file mode 100644
index 274f09092..000000000
--- a/pkg/tcpip/network/internal/fragmentation/BUILD
+++ /dev/null
@@ -1,54 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "reassembler_list",
- out = "reassembler_list.go",
- package = "fragmentation",
- prefix = "reassembler",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*reassembler",
- "Linker": "*reassembler",
- },
-)
-
-go_library(
- name = "fragmentation",
- srcs = [
- "fragmentation.go",
- "reassembler.go",
- "reassembler_list.go",
- ],
- visibility = [
- "//pkg/tcpip/network/ipv4:__pkg__",
- "//pkg/tcpip/network/ipv6:__pkg__",
- ],
- deps = [
- "//pkg/log",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "fragmentation_test",
- size = "small",
- srcs = [
- "fragmentation_test.go",
- "reassembler_test.go",
- ],
- library = ":fragmentation",
- deps = [
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/network/internal/testutil",
- "//pkg/tcpip/stack",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go b/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go
new file mode 100644
index 000000000..21c5774e9
--- /dev/null
+++ b/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go
@@ -0,0 +1,68 @@
+// automatically generated by stateify.
+
+package fragmentation
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *reassemblerList) StateTypeName() string {
+ return "pkg/tcpip/network/internal/fragmentation.reassemblerList"
+}
+
+func (l *reassemblerList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *reassemblerList) beforeSave() {}
+
+// +checklocksignore
+func (l *reassemblerList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *reassemblerList) afterLoad() {}
+
+// +checklocksignore
+func (l *reassemblerList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *reassemblerEntry) StateTypeName() string {
+ return "pkg/tcpip/network/internal/fragmentation.reassemblerEntry"
+}
+
+func (e *reassemblerEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *reassemblerEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *reassemblerEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *reassemblerEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *reassemblerEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*reassemblerList)(nil))
+ state.Register((*reassemblerEntry)(nil))
+}
diff --git a/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go b/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go
deleted file mode 100644
index dadfc28cc..000000000
--- a/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fragmentation
-
-import (
- "errors"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/network/internal/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-// reassembleTimeout is dummy timeout used for testing, where the clock never
-// advances.
-const reassembleTimeout = 1
-
-// vv is a helper to build VectorisedView from different strings.
-func vv(size int, pieces ...string) buffer.VectorisedView {
- views := make([]buffer.View, len(pieces))
- for i, p := range pieces {
- views[i] = []byte(p)
- }
-
- return buffer.NewVectorisedView(size, views)
-}
-
-func pkt(size int, pieces ...string) *stack.PacketBuffer {
- return stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv(size, pieces...),
- })
-}
-
-type processInput struct {
- id FragmentID
- first uint16
- last uint16
- more bool
- proto uint8
- pkt *stack.PacketBuffer
-}
-
-type processOutput struct {
- vv buffer.VectorisedView
- proto uint8
- done bool
-}
-
-var processTestCases = []struct {
- comment string
- in []processInput
- out []processOutput
-}{
- {
- comment: "One ID",
- in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, pkt: pkt(2, "01")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, pkt: pkt(2, "23")},
- },
- out: []processOutput{
- {vv: buffer.VectorisedView{}, done: false},
- {vv: vv(4, "01", "23"), done: true},
- },
- },
- {
- comment: "Next Header protocol mismatch",
- in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, proto: 6, pkt: pkt(2, "01")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, proto: 17, pkt: pkt(2, "23")},
- },
- out: []processOutput{
- {vv: buffer.VectorisedView{}, done: false},
- {vv: vv(4, "01", "23"), proto: 6, done: true},
- },
- },
- {
- comment: "Two IDs",
- in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, pkt: pkt(2, "01")},
- {id: FragmentID{ID: 1}, first: 0, last: 1, more: true, pkt: pkt(2, "ab")},
- {id: FragmentID{ID: 1}, first: 2, last: 3, more: false, pkt: pkt(2, "cd")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, pkt: pkt(2, "23")},
- },
- out: []processOutput{
- {vv: buffer.VectorisedView{}, done: false},
- {vv: buffer.VectorisedView{}, done: false},
- {vv: vv(4, "ab", "cd"), done: true},
- {vv: vv(4, "01", "23"), done: true},
- },
- },
-}
-
-func TestFragmentationProcess(t *testing.T) {
- for _, c := range processTestCases {
- t.Run(c.comment, func(t *testing.T) {
- f := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{}, nil)
- firstFragmentProto := c.in[0].proto
- for i, in := range c.in {
- resPkt, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.pkt)
- if err != nil {
- t.Fatalf("f.Process(%+v, %d, %d, %t, %d, %#v) failed: %s",
- in.id, in.first, in.last, in.more, in.proto, in.pkt, err)
- }
- if done != c.out[i].done {
- t.Errorf("got Process(%+v, %d, %d, %t, %d, _) = (_, _, %t, _), want = (_, _, %t, _)",
- in.id, in.first, in.last, in.more, in.proto, done, c.out[i].done)
- }
- if c.out[i].done {
- if diff := cmp.Diff(c.out[i].vv.ToOwnedView(), resPkt.Data().AsRange().ToOwnedView()); diff != "" {
- t.Errorf("got Process(%+v, %d, %d, %t, %d, %#v) result mismatch (-want, +got):\n%s",
- in.id, in.first, in.last, in.more, in.proto, in.pkt, diff)
- }
- if firstFragmentProto != proto {
- t.Errorf("got Process(%+v, %d, %d, %t, %d, _) = (_, %d, _, _), want = (_, %d, _, _)",
- in.id, in.first, in.last, in.more, in.proto, proto, firstFragmentProto)
- }
- if _, ok := f.reassemblers[in.id]; ok {
- t.Errorf("Process(%d) did not remove buffer from reassemblers", i)
- }
- for n := f.rList.Front(); n != nil; n = n.Next() {
- if n.id == in.id {
- t.Errorf("Process(%d) did not remove buffer from rList", i)
- }
- }
- }
- }
- })
- }
-}
-
-func TestReassemblingTimeout(t *testing.T) {
- const (
- reassemblyTimeout = time.Millisecond
- protocol = 0xff
- )
-
- type fragment struct {
- first uint16
- last uint16
- more bool
- data string
- }
-
- type event struct {
- // name is a nickname of this event.
- name string
-
- // clockAdvance is a duration to advance the clock. The clock advances
- // before a fragment specified in the fragment field is processed.
- clockAdvance time.Duration
-
- // fragment is a fragment to process. This can be nil if there is no
- // fragment to process.
- fragment *fragment
-
- // expectDone is true if the fragmentation instance should report the
- // reassembly is done after the fragment is processd.
- expectDone bool
-
- // memSizeAfterEvent is the expected memory size of the fragmentation
- // instance after the event.
- memSizeAfterEvent int
- }
-
- memSizeOfFrags := func(frags ...*fragment) int {
- var size int
- for _, frag := range frags {
- size += pkt(len(frag.data), frag.data).MemSize()
- }
- return size
- }
-
- half1 := &fragment{first: 0, last: 0, more: true, data: "0"}
- half2 := &fragment{first: 1, last: 1, more: false, data: "1"}
-
- tests := []struct {
- name string
- events []event
- }{
- {
- name: "half1 and half2 are reassembled successfully",
- events: []event{
- {
- name: "half1",
- fragment: half1,
- expectDone: false,
- memSizeAfterEvent: memSizeOfFrags(half1),
- },
- {
- name: "half2",
- fragment: half2,
- expectDone: true,
- memSizeAfterEvent: 0,
- },
- },
- },
- {
- name: "half1 timeout, half2 timeout",
- events: []event{
- {
- name: "half1",
- fragment: half1,
- expectDone: false,
- memSizeAfterEvent: memSizeOfFrags(half1),
- },
- {
- name: "half1 just before reassembly timeout",
- clockAdvance: reassemblyTimeout - 1,
- memSizeAfterEvent: memSizeOfFrags(half1),
- },
- {
- name: "half1 reassembly timeout",
- clockAdvance: 1,
- memSizeAfterEvent: 0,
- },
- {
- name: "half2",
- fragment: half2,
- expectDone: false,
- memSizeAfterEvent: memSizeOfFrags(half2),
- },
- {
- name: "half2 just before reassembly timeout",
- clockAdvance: reassemblyTimeout - 1,
- memSizeAfterEvent: memSizeOfFrags(half2),
- },
- {
- name: "half2 reassembly timeout",
- clockAdvance: 1,
- memSizeAfterEvent: 0,
- },
- },
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassemblyTimeout, clock, nil)
- for _, event := range test.events {
- clock.Advance(event.clockAdvance)
- if frag := event.fragment; frag != nil {
- _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, pkt(len(frag.data), frag.data))
- if err != nil {
- t.Fatalf("%s: f.Process failed: %s", event.name, err)
- }
- if done != event.expectDone {
- t.Fatalf("%s: got done = %t, want = %t", event.name, done, event.expectDone)
- }
- }
- if got, want := f.memSize, event.memSizeAfterEvent; got != want {
- t.Errorf("%s: got f.memSize = %d, want = %d", event.name, got, want)
- }
- }
- })
- }
-}
-
-func TestMemoryLimits(t *testing.T) {
- lowLimit := pkt(1, "0").MemSize()
- highLimit := 3 * lowLimit // Allow at most 3 such packets.
- f := NewFragmentation(minBlockSize, highLimit, lowLimit, reassembleTimeout, &faketime.NullClock{}, nil)
- // Send first fragment with id = 0.
- if _, _, _, err := f.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, pkt(1, "0")); err != nil {
- t.Fatal(err)
- }
- // Send first fragment with id = 1.
- if _, _, _, err := f.Process(FragmentID{ID: 1}, 0, 0, true, 0xFF, pkt(1, "1")); err != nil {
- t.Fatal(err)
- }
- // Send first fragment with id = 2.
- if _, _, _, err := f.Process(FragmentID{ID: 2}, 0, 0, true, 0xFF, pkt(1, "2")); err != nil {
- t.Fatal(err)
- }
-
- // Send first fragment with id = 3. This should caused id = 0 and id = 1 to be
- // evicted.
- if _, _, _, err := f.Process(FragmentID{ID: 3}, 0, 0, true, 0xFF, pkt(1, "3")); err != nil {
- t.Fatal(err)
- }
-
- if _, ok := f.reassemblers[FragmentID{ID: 0}]; ok {
- t.Errorf("Memory limits are not respected: id=0 has not been evicted.")
- }
- if _, ok := f.reassemblers[FragmentID{ID: 1}]; ok {
- t.Errorf("Memory limits are not respected: id=1 has not been evicted.")
- }
- if _, ok := f.reassemblers[FragmentID{ID: 3}]; !ok {
- t.Errorf("Implementation of memory limits is wrong: id=3 is not present.")
- }
-}
-
-func TestMemoryLimitsIgnoresDuplicates(t *testing.T) {
- memSize := pkt(1, "0").MemSize()
- f := NewFragmentation(minBlockSize, memSize, 0, reassembleTimeout, &faketime.NullClock{}, nil)
- // Send first fragment with id = 0.
- if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, "0")); err != nil {
- t.Fatal(err)
- }
- // Send the same packet again.
- if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, "0")); err != nil {
- t.Fatal(err)
- }
-
- if got, want := f.memSize, memSize; got != want {
- t.Errorf("Wrong size, duplicates are not handled correctly: got=%d, want=%d.", got, want)
- }
-}
-
-func TestErrors(t *testing.T) {
- tests := []struct {
- name string
- blockSize uint16
- first uint16
- last uint16
- more bool
- data string
- err error
- }{
- {
- name: "exact block size without more",
- blockSize: 2,
- first: 2,
- last: 3,
- more: false,
- data: "01",
- },
- {
- name: "exact block size with more",
- blockSize: 2,
- first: 2,
- last: 3,
- more: true,
- data: "01",
- },
- {
- name: "exact block size with more and extra data",
- blockSize: 2,
- first: 2,
- last: 3,
- more: true,
- data: "012",
- err: ErrInvalidArgs,
- },
- {
- name: "exact block size with more and too little data",
- blockSize: 2,
- first: 2,
- last: 3,
- more: true,
- data: "0",
- err: ErrInvalidArgs,
- },
- {
- name: "not exact block size with more",
- blockSize: 2,
- first: 2,
- last: 2,
- more: true,
- data: "0",
- err: ErrInvalidArgs,
- },
- {
- name: "not exact block size without more",
- blockSize: 2,
- first: 2,
- last: 2,
- more: false,
- data: "0",
- },
- {
- name: "first not a multiple of block size",
- blockSize: 2,
- first: 3,
- last: 4,
- more: true,
- data: "01",
- err: ErrInvalidArgs,
- },
- {
- name: "first more than last",
- blockSize: 2,
- first: 4,
- last: 3,
- more: true,
- data: "01",
- err: ErrInvalidArgs,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{}, nil)
- _, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, pkt(len(test.data), test.data))
- if !errors.Is(err, test.err) {
- t.Errorf("got Process(_, %d, %d, %t, _, %q) = (_, _, _, %v), want = (_, _, _, %v)", test.first, test.last, test.more, test.data, err, test.err)
- }
- if done {
- t.Errorf("got Process(_, %d, %d, %t, _, %q) = (_, _, true, _), want = (_, _, false, _)", test.first, test.last, test.more, test.data)
- }
- })
- }
-}
-
-type fragmentInfo struct {
- remaining int
- copied int
- offset int
- more bool
-}
-
-func TestPacketFragmenter(t *testing.T) {
- const (
- reserve = 60
- proto = 0
- )
-
- tests := []struct {
- name string
- fragmentPayloadLen uint32
- transportHeaderLen int
- payloadSize int
- wantFragments []fragmentInfo
- }{
- {
- name: "Packet exactly fits in MTU",
- fragmentPayloadLen: 1280,
- transportHeaderLen: 0,
- payloadSize: 1280,
- wantFragments: []fragmentInfo{
- {remaining: 0, copied: 1280, offset: 0, more: false},
- },
- },
- {
- name: "Packet exactly does not fit in MTU",
- fragmentPayloadLen: 1000,
- transportHeaderLen: 0,
- payloadSize: 1001,
- wantFragments: []fragmentInfo{
- {remaining: 1, copied: 1000, offset: 0, more: true},
- {remaining: 0, copied: 1, offset: 1000, more: false},
- },
- },
- {
- name: "Packet has a transport header",
- fragmentPayloadLen: 560,
- transportHeaderLen: 40,
- payloadSize: 560,
- wantFragments: []fragmentInfo{
- {remaining: 1, copied: 560, offset: 0, more: true},
- {remaining: 0, copied: 40, offset: 560, more: false},
- },
- },
- {
- name: "Packet has a huge transport header",
- fragmentPayloadLen: 500,
- transportHeaderLen: 1300,
- payloadSize: 500,
- wantFragments: []fragmentInfo{
- {remaining: 3, copied: 500, offset: 0, more: true},
- {remaining: 2, copied: 500, offset: 500, more: true},
- {remaining: 1, copied: 500, offset: 1000, more: true},
- {remaining: 0, copied: 300, offset: 1500, more: false},
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- pkt := testutil.MakeRandPkt(test.transportHeaderLen, reserve, []int{test.payloadSize}, proto)
- originalPayload := stack.PayloadSince(pkt.TransportHeader())
- var reassembledPayload buffer.VectorisedView
- pf := MakePacketFragmenter(pkt, test.fragmentPayloadLen, reserve)
- for i := 0; ; i++ {
- fragPkt, offset, copied, more := pf.BuildNextFragment()
- wantFragment := test.wantFragments[i]
- if got := pf.RemainingFragmentCount(); got != wantFragment.remaining {
- t.Errorf("(fragment #%d) got pf.RemainingFragmentCount() = %d, want = %d", i, got, wantFragment.remaining)
- }
- if copied != wantFragment.copied {
- t.Errorf("(fragment #%d) got copied = %d, want = %d", i, copied, wantFragment.copied)
- }
- if offset != wantFragment.offset {
- t.Errorf("(fragment #%d) got offset = %d, want = %d", i, offset, wantFragment.offset)
- }
- if more != wantFragment.more {
- t.Errorf("(fragment #%d) got more = %t, want = %t", i, more, wantFragment.more)
- }
- if got := uint32(fragPkt.Size()); got > test.fragmentPayloadLen {
- t.Errorf("(fragment #%d) got fragPkt.Size() = %d, want <= %d", i, got, test.fragmentPayloadLen)
- }
- if got := fragPkt.AvailableHeaderBytes(); got != reserve {
- t.Errorf("(fragment #%d) got fragPkt.AvailableHeaderBytes() = %d, want = %d", i, got, reserve)
- }
- if got := fragPkt.TransportHeader().View().Size(); got != 0 {
- t.Errorf("(fragment #%d) got fragPkt.TransportHeader().View().Size() = %d, want = 0", i, got)
- }
- reassembledPayload.AppendViews(fragPkt.Data().Views())
- if !more {
- if i != len(test.wantFragments)-1 {
- t.Errorf("got fragment count = %d, want = %d", i, len(test.wantFragments)-1)
- }
- break
- }
- }
- if diff := cmp.Diff(reassembledPayload.ToView(), originalPayload); diff != "" {
- t.Errorf("reassembledPayload mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-type testTimeoutHandler struct {
- pkt *stack.PacketBuffer
-}
-
-func (h *testTimeoutHandler) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
- h.pkt = pkt
-}
-
-func TestTimeoutHandler(t *testing.T) {
- const (
- proto = 99
- )
-
- pk1 := pkt(1, "1")
- pk2 := pkt(1, "2")
-
- type processParam struct {
- first uint16
- last uint16
- more bool
- pkt *stack.PacketBuffer
- }
-
- tests := []struct {
- name string
- params []processParam
- wantError bool
- wantPkt *stack.PacketBuffer
- }{
- {
- name: "onTimeout runs",
- params: []processParam{
- {
- first: 0,
- last: 0,
- more: true,
- pkt: pk1,
- },
- },
- wantError: false,
- wantPkt: pk1,
- },
- {
- name: "no first fragment",
- params: []processParam{
- {
- first: 1,
- last: 1,
- more: true,
- pkt: pk1,
- },
- },
- wantError: false,
- wantPkt: nil,
- },
- {
- name: "second pkt is ignored",
- params: []processParam{
- {
- first: 0,
- last: 0,
- more: true,
- pkt: pk1,
- },
- {
- first: 0,
- last: 0,
- more: true,
- pkt: pk2,
- },
- },
- wantError: false,
- wantPkt: pk1,
- },
- {
- name: "invalid args - first is greater than last",
- params: []processParam{
- {
- first: 1,
- last: 0,
- more: true,
- pkt: pk1,
- },
- },
- wantError: true,
- wantPkt: nil,
- },
- }
-
- id := FragmentID{ID: 0}
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- handler := &testTimeoutHandler{pkt: nil}
-
- f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{}, handler)
-
- for _, p := range test.params {
- if _, _, _, err := f.Process(id, p.first, p.last, p.more, proto, p.pkt); err != nil && !test.wantError {
- t.Errorf("f.Process error = %s", err)
- }
- }
- if !test.wantError {
- r, ok := f.reassemblers[id]
- if !ok {
- t.Fatal("Reassembler not found")
- }
- f.release(r, true)
- }
- switch {
- case handler.pkt != nil && test.wantPkt == nil:
- t.Errorf("got handler.pkt = not nil (pkt.Data = %x), want = nil", handler.pkt.Data().AsRange().ToOwnedView())
- case handler.pkt == nil && test.wantPkt != nil:
- t.Errorf("got handler.pkt = nil, want = not nil (pkt.Data = %x)", test.wantPkt.Data().AsRange().ToOwnedView())
- case handler.pkt != nil && test.wantPkt != nil:
- if diff := cmp.Diff(test.wantPkt.Data().AsRange().ToOwnedView(), handler.pkt.Data().AsRange().ToOwnedView()); diff != "" {
- t.Errorf("pkt.Data mismatch (-want, +got):\n%s", diff)
- }
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/internal/fragmentation/reassembler_list.go b/pkg/tcpip/network/internal/fragmentation/reassembler_list.go
new file mode 100644
index 000000000..673bb11b0
--- /dev/null
+++ b/pkg/tcpip/network/internal/fragmentation/reassembler_list.go
@@ -0,0 +1,221 @@
+package fragmentation
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type reassemblerElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (reassemblerElementMapper) linkerFor(elem *reassembler) *reassembler { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type reassemblerList struct {
+ head *reassembler
+ tail *reassembler
+}
+
+// Reset resets list l to the empty state.
+func (l *reassemblerList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *reassemblerList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *reassemblerList) Front() *reassembler {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *reassemblerList) Back() *reassembler {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *reassemblerList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (reassemblerElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *reassemblerList) PushFront(e *reassembler) {
+ linker := reassemblerElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ reassemblerElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *reassemblerList) PushBack(e *reassembler) {
+ linker := reassemblerElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ reassemblerElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *reassemblerList) PushBackList(m *reassemblerList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ reassemblerElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ reassemblerElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *reassemblerList) InsertAfter(b, e *reassembler) {
+ bLinker := reassemblerElementMapper{}.linkerFor(b)
+ eLinker := reassemblerElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ reassemblerElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *reassemblerList) InsertBefore(a, e *reassembler) {
+ aLinker := reassemblerElementMapper{}.linkerFor(a)
+ eLinker := reassemblerElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ reassemblerElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *reassemblerList) Remove(e *reassembler) {
+ linker := reassemblerElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ reassemblerElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ reassemblerElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type reassemblerEntry struct {
+ next *reassembler
+ prev *reassembler
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *reassemblerEntry) Next() *reassembler {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *reassemblerEntry) Prev() *reassembler {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *reassemblerEntry) SetNext(elem *reassembler) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *reassemblerEntry) SetPrev(elem *reassembler) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/network/internal/fragmentation/reassembler_test.go b/pkg/tcpip/network/internal/fragmentation/reassembler_test.go
deleted file mode 100644
index cfd9f00ef..000000000
--- a/pkg/tcpip/network/internal/fragmentation/reassembler_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fragmentation
-
-import (
- "bytes"
- "math"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-type processParams struct {
- first uint16
- last uint16
- more bool
- pkt *stack.PacketBuffer
- wantDone bool
- wantError error
-}
-
-func TestReassemblerProcess(t *testing.T) {
- const proto = 99
-
- v := func(size int) buffer.View {
- payload := buffer.NewView(size)
- for i := 1; i < size; i++ {
- payload[i] = uint8(i) * 3
- }
- return payload
- }
-
- pkt := func(sizes ...int) *stack.PacketBuffer {
- var vv buffer.VectorisedView
- for _, size := range sizes {
- vv.AppendView(v(size))
- }
- return stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- })
- }
-
- var tests = []struct {
- name string
- params []processParams
- want []hole
- wantPkt *stack.PacketBuffer
- }{
- {
- name: "No fragments",
- params: nil,
- want: []hole{{first: 0, last: math.MaxUint16, filled: false, final: true}},
- },
- {
- name: "One fragment at beginning",
- params: []processParams{{first: 0, last: 1, more: true, pkt: pkt(2), wantDone: false, wantError: nil}},
- want: []hole{
- {first: 0, last: 1, filled: true, final: false, pkt: pkt(2)},
- {first: 2, last: math.MaxUint16, filled: false, final: true},
- },
- },
- {
- name: "One fragment in the middle",
- params: []processParams{{first: 1, last: 2, more: true, pkt: pkt(2), wantDone: false, wantError: nil}},
- want: []hole{
- {first: 1, last: 2, filled: true, final: false, pkt: pkt(2)},
- {first: 0, last: 0, filled: false, final: false},
- {first: 3, last: math.MaxUint16, filled: false, final: true},
- },
- },
- {
- name: "One fragment at the end",
- params: []processParams{{first: 1, last: 2, more: false, pkt: pkt(2), wantDone: false, wantError: nil}},
- want: []hole{
- {first: 1, last: 2, filled: true, final: true, pkt: pkt(2)},
- {first: 0, last: 0, filled: false},
- },
- },
- {
- name: "One fragment completing a packet",
- params: []processParams{{first: 0, last: 1, more: false, pkt: pkt(2), wantDone: true, wantError: nil}},
- want: []hole{
- {first: 0, last: 1, filled: true, final: true},
- },
- wantPkt: pkt(2),
- },
- {
- name: "Two fragments completing a packet",
- params: []processParams{
- {first: 0, last: 1, more: true, pkt: pkt(2), wantDone: false, wantError: nil},
- {first: 2, last: 3, more: false, pkt: pkt(2), wantDone: true, wantError: nil},
- },
- want: []hole{
- {first: 0, last: 1, filled: true, final: false},
- {first: 2, last: 3, filled: true, final: true},
- },
- wantPkt: pkt(2, 2),
- },
- {
- name: "Two fragments completing a packet with a duplicate",
- params: []processParams{
- {first: 0, last: 1, more: true, pkt: pkt(2), wantDone: false, wantError: nil},
- {first: 0, last: 1, more: true, pkt: pkt(2), wantDone: false, wantError: nil},
- {first: 2, last: 3, more: false, pkt: pkt(2), wantDone: true, wantError: nil},
- },
- want: []hole{
- {first: 0, last: 1, filled: true, final: false},
- {first: 2, last: 3, filled: true, final: true},
- },
- wantPkt: pkt(2, 2),
- },
- {
- name: "Two fragments completing a packet with a partial duplicate",
- params: []processParams{
- {first: 0, last: 3, more: true, pkt: pkt(4), wantDone: false, wantError: nil},
- {first: 1, last: 2, more: true, pkt: pkt(2), wantDone: false, wantError: nil},
- {first: 4, last: 5, more: false, pkt: pkt(2), wantDone: true, wantError: nil},
- },
- want: []hole{
- {first: 0, last: 3, filled: true, final: false},
- {first: 4, last: 5, filled: true, final: true},
- },
- wantPkt: pkt(4, 2),
- },
- {
- name: "Two overlapping fragments",
- params: []processParams{
- {first: 0, last: 10, more: true, pkt: pkt(11), wantDone: false, wantError: nil},
- {first: 5, last: 15, more: false, pkt: pkt(11), wantDone: false, wantError: ErrFragmentOverlap},
- },
- want: []hole{
- {first: 0, last: 10, filled: true, final: false, pkt: pkt(11)},
- {first: 11, last: math.MaxUint16, filled: false, final: true},
- },
- },
- {
- name: "Two final fragments with different ends",
- params: []processParams{
- {first: 10, last: 14, more: false, pkt: pkt(5), wantDone: false, wantError: nil},
- {first: 0, last: 9, more: false, pkt: pkt(10), wantDone: false, wantError: ErrFragmentConflict},
- },
- want: []hole{
- {first: 10, last: 14, filled: true, final: true, pkt: pkt(5)},
- {first: 0, last: 9, filled: false, final: false},
- },
- },
- {
- name: "Two final fragments - duplicate",
- params: []processParams{
- {first: 5, last: 14, more: false, pkt: pkt(10), wantDone: false, wantError: nil},
- {first: 10, last: 14, more: false, pkt: pkt(5), wantDone: false, wantError: nil},
- },
- want: []hole{
- {first: 5, last: 14, filled: true, final: true, pkt: pkt(10)},
- {first: 0, last: 4, filled: false, final: false},
- },
- },
- {
- name: "Two final fragments - duplicate, with different ends",
- params: []processParams{
- {first: 5, last: 14, more: false, pkt: pkt(10), wantDone: false, wantError: nil},
- {first: 10, last: 13, more: false, pkt: pkt(4), wantDone: false, wantError: ErrFragmentConflict},
- },
- want: []hole{
- {first: 5, last: 14, filled: true, final: true, pkt: pkt(10)},
- {first: 0, last: 4, filled: false, final: false},
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- r := newReassembler(FragmentID{}, &faketime.NullClock{})
- var resPkt *stack.PacketBuffer
- var isDone bool
- for _, param := range test.params {
- pkt, _, done, _, err := r.process(param.first, param.last, param.more, proto, param.pkt)
- if done != param.wantDone || err != param.wantError {
- t.Errorf("got r.process(%d, %d, %t, %d, _) = (_, _, %t, _, %v), want = (%t, %v)", param.first, param.last, param.more, proto, done, err, param.wantDone, param.wantError)
- }
- if done {
- resPkt = pkt
- isDone = true
- }
- }
-
- ignorePkt := func(a, b *stack.PacketBuffer) bool { return true }
- cmpPktData := func(a, b *stack.PacketBuffer) bool {
- if a == nil || b == nil {
- return a == b
- }
- return bytes.Equal(a.Data().AsRange().ToOwnedView(), b.Data().AsRange().ToOwnedView())
- }
-
- if isDone {
- if diff := cmp.Diff(
- test.want, r.holes,
- cmp.AllowUnexported(hole{}),
- // Do not compare pkt in hole. Data will be altered.
- cmp.Comparer(ignorePkt),
- ); diff != "" {
- t.Errorf("r.holes mismatch (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(test.wantPkt, resPkt, cmp.Comparer(cmpPktData)); diff != "" {
- t.Errorf("Reassembled pkt mismatch (-want +got):\n%s", diff)
- }
- } else {
- if diff := cmp.Diff(
- test.want, r.holes,
- cmp.AllowUnexported(hole{}),
- cmp.Comparer(cmpPktData),
- ); diff != "" {
- t.Errorf("r.holes mismatch (-want +got):\n%s", diff)
- }
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/internal/ip/BUILD b/pkg/tcpip/network/internal/ip/BUILD
deleted file mode 100644
index fd944ce99..000000000
--- a/pkg/tcpip/network/internal/ip/BUILD
+++ /dev/null
@@ -1,40 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ip",
- srcs = [
- "duplicate_address_detection.go",
- "errors.go",
- "generic_multicast_protocol.go",
- "stats.go",
- ],
- visibility = [
- "//pkg/tcpip/network/arp:__pkg__",
- "//pkg/tcpip/network/ipv4:__pkg__",
- "//pkg/tcpip/network/ipv6:__pkg__",
- ],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "ip_x_test",
- size = "small",
- srcs = [
- "duplicate_address_detection_test.go",
- "generic_multicast_protocol_test.go",
- ],
- deps = [
- ":ip",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/stack",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go b/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go
deleted file mode 100644
index 24687cf06..000000000
--- a/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ip_test
-
-import (
- "bytes"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/network/internal/ip"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-type mockDADProtocol struct {
- t *testing.T
-
- mu struct {
- sync.Mutex
-
- dad ip.DAD
- sentNonces map[tcpip.Address][][]byte
- }
-}
-
-func (m *mockDADProtocol) init(t *testing.T, c stack.DADConfigurations, opts ip.DADOptions) {
- m.mu.Lock()
- defer m.mu.Unlock()
-
- m.t = t
- opts.Protocol = m
- m.mu.dad.Init(&m.mu, c, opts)
- m.initLocked()
-}
-
-func (m *mockDADProtocol) initLocked() {
- m.mu.sentNonces = make(map[tcpip.Address][][]byte)
-}
-
-func (m *mockDADProtocol) SendDADMessage(addr tcpip.Address, nonce []byte) tcpip.Error {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.sentNonces[addr] = append(m.mu.sentNonces[addr], nonce)
- return nil
-}
-
-func (m *mockDADProtocol) check(addrs []tcpip.Address) string {
- sentNonces := make(map[tcpip.Address][][]byte)
- for _, a := range addrs {
- sentNonces[a] = append(sentNonces[a], nil)
- }
-
- return m.checkWithNonce(sentNonces)
-}
-
-func (m *mockDADProtocol) checkWithNonce(expectedSentNonces map[tcpip.Address][][]byte) string {
- m.mu.Lock()
- defer m.mu.Unlock()
-
- diff := cmp.Diff(expectedSentNonces, m.mu.sentNonces)
- m.initLocked()
- return diff
-}
-
-func (m *mockDADProtocol) checkDuplicateAddress(addr tcpip.Address, h stack.DADCompletionHandler) stack.DADCheckAddressDisposition {
- m.mu.Lock()
- defer m.mu.Unlock()
- return m.mu.dad.CheckDuplicateAddressLocked(addr, h)
-}
-
-func (m *mockDADProtocol) stop(addr tcpip.Address, reason stack.DADResult) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.dad.StopLocked(addr, reason)
-}
-
-func (m *mockDADProtocol) extendIfNonceEqual(addr tcpip.Address, nonce []byte) ip.ExtendIfNonceEqualLockedDisposition {
- m.mu.Lock()
- defer m.mu.Unlock()
- return m.mu.dad.ExtendIfNonceEqualLocked(addr, nonce)
-}
-
-func (m *mockDADProtocol) setConfigs(c stack.DADConfigurations) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.dad.SetConfigsLocked(c)
-}
-
-const (
- addr1 = tcpip.Address("\x01")
- addr2 = tcpip.Address("\x02")
- addr3 = tcpip.Address("\x03")
- addr4 = tcpip.Address("\x04")
-)
-
-type dadResult struct {
- Addr tcpip.Address
- R stack.DADResult
-}
-
-func handler(ch chan<- dadResult, a tcpip.Address) func(stack.DADResult) {
- return func(r stack.DADResult) {
- ch <- dadResult{Addr: a, R: r}
- }
-}
-
-func TestDADCheckDuplicateAddress(t *testing.T) {
- var dad mockDADProtocol
- clock := faketime.NewManualClock()
- dad.init(t, stack.DADConfigurations{}, ip.DADOptions{
- Clock: clock,
- })
-
- ch := make(chan dadResult, 2)
-
- // DAD should initially be disabled.
- if res := dad.checkDuplicateAddress(addr1, handler(nil, "")); res != stack.DADDisabled {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADDisabled)
- }
- // Wait for any initially fired timers to complete.
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check(nil); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
-
- // Enable and request DAD.
- dadConfigs1 := stack.DADConfigurations{
- DupAddrDetectTransmits: 1,
- RetransmitTimer: time.Second,
- }
- dad.setConfigs(dadConfigs1)
- if res := dad.checkDuplicateAddress(addr1, handler(ch, addr1)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADStarting)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check([]tcpip.Address{addr1}); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
- // The second request for DAD on the same address should use the original
- // request since it has not completed yet.
- if res := dad.checkDuplicateAddress(addr1, handler(ch, addr1)); res != stack.DADAlreadyRunning {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADAlreadyRunning)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check(nil); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
-
- dadConfigs2 := stack.DADConfigurations{
- DupAddrDetectTransmits: 2,
- RetransmitTimer: time.Second,
- }
- dad.setConfigs(dadConfigs2)
- // A new address should start a new DAD process.
- if res := dad.checkDuplicateAddress(addr2, handler(ch, addr2)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr2, res, stack.DADStarting)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check([]tcpip.Address{addr2}); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
-
- // Make sure DAD for addr1 only resolves after the expected timeout.
- const delta = time.Nanosecond
- dadConfig1Duration := time.Duration(dadConfigs1.DupAddrDetectTransmits) * dadConfigs1.RetransmitTimer
- clock.Advance(dadConfig1Duration - delta)
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got a DAD result before the expected timeout of %s; r = %#v", dadConfig1Duration, r)
- default:
- }
- clock.Advance(delta)
- for i := 0; i < 2; i++ {
- if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("(i=%d) dad result mismatch (-want +got):\n%s", i, diff)
- }
- }
-
- // Make sure DAD for addr2 only resolves after the expected timeout.
- dadConfig2Duration := time.Duration(dadConfigs2.DupAddrDetectTransmits) * dadConfigs2.RetransmitTimer
- clock.Advance(dadConfig2Duration - dadConfig1Duration - delta)
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got a DAD result before the expected timeout of %s; r = %#v", dadConfig2Duration, r)
- default:
- }
- clock.Advance(delta)
- if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- // Should be able to restart DAD for addr2 after it resolved.
- if res := dad.checkDuplicateAddress(addr2, handler(ch, addr2)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr2, res, stack.DADStarting)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check([]tcpip.Address{addr2, addr2}); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
- clock.Advance(dadConfig2Duration)
- if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anymore results.
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got an extra DAD result; r = %#v", r)
- default:
- }
-}
-
-func TestDADStop(t *testing.T) {
- var dad mockDADProtocol
- clock := faketime.NewManualClock()
- dadConfigs := stack.DADConfigurations{
- DupAddrDetectTransmits: 1,
- RetransmitTimer: time.Second,
- }
- dad.init(t, dadConfigs, ip.DADOptions{
- Clock: clock,
- })
-
- ch := make(chan dadResult, 1)
-
- if res := dad.checkDuplicateAddress(addr1, handler(ch, addr1)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADStarting)
- }
- if res := dad.checkDuplicateAddress(addr2, handler(ch, addr2)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr2, res, stack.DADStarting)
- }
- if res := dad.checkDuplicateAddress(addr3, handler(ch, addr3)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr2, res, stack.DADStarting)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check([]tcpip.Address{addr1, addr2, addr3}); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
-
- dad.stop(addr1, &stack.DADAborted{})
- if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADAborted{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- dad.stop(addr2, &stack.DADDupAddrDetected{})
- if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADDupAddrDetected{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- dadResolutionDuration := time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer
- clock.Advance(dadResolutionDuration)
- if diff := cmp.Diff(dadResult{Addr: addr3, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- // Should be able to restart DAD for an address we stopped DAD on.
- if res := dad.checkDuplicateAddress(addr1, handler(ch, addr1)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADStarting)
- }
- clock.RunImmediatelyScheduledJobs()
- if diff := dad.check([]tcpip.Address{addr1}); diff != "" {
- t.Errorf("dad check mismatch (-want +got):\n%s", diff)
- }
- clock.Advance(dadResolutionDuration)
- if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anymore updates.
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got an extra DAD result; r = %#v", r)
- default:
- }
-}
-
-func TestNonce(t *testing.T) {
- const (
- nonceSize = 2
-
- extendRequestAttempts = 2
-
- dupAddrDetectTransmits = 2
- extendTransmits = 5
- )
-
- var secureRNGBytes [nonceSize * (dupAddrDetectTransmits + extendTransmits)]byte
- for i := range secureRNGBytes {
- secureRNGBytes[i] = byte(i)
- }
-
- tests := []struct {
- name string
- mockedReceivedNonce []byte
- expectedResults [extendRequestAttempts]ip.ExtendIfNonceEqualLockedDisposition
- expectedTransmits int
- }{
- {
- name: "not matching",
- mockedReceivedNonce: []byte{0, 0},
- expectedResults: [extendRequestAttempts]ip.ExtendIfNonceEqualLockedDisposition{ip.NonceNotEqual, ip.NonceNotEqual},
- expectedTransmits: dupAddrDetectTransmits,
- },
- {
- name: "matching nonce",
- mockedReceivedNonce: secureRNGBytes[:nonceSize],
- expectedResults: [extendRequestAttempts]ip.ExtendIfNonceEqualLockedDisposition{ip.Extended, ip.AlreadyExtended},
- expectedTransmits: dupAddrDetectTransmits + extendTransmits,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var dad mockDADProtocol
- clock := faketime.NewManualClock()
- dadConfigs := stack.DADConfigurations{
- DupAddrDetectTransmits: dupAddrDetectTransmits,
- RetransmitTimer: time.Second,
- }
-
- var secureRNG bytes.Reader
- secureRNG.Reset(secureRNGBytes[:])
- dad.init(t, dadConfigs, ip.DADOptions{
- Clock: clock,
- SecureRNG: &secureRNG,
- NonceSize: nonceSize,
- ExtendDADTransmits: extendTransmits,
- })
-
- ch := make(chan dadResult, 1)
- if res := dad.checkDuplicateAddress(addr1, handler(ch, addr1)); res != stack.DADStarting {
- t.Errorf("got dad.checkDuplicateAddress(%s, _) = %d, want = %d", addr1, res, stack.DADStarting)
- }
-
- clock.RunImmediatelyScheduledJobs()
- for i, want := range test.expectedResults {
- if got := dad.extendIfNonceEqual(addr1, test.mockedReceivedNonce); got != want {
- t.Errorf("(i=%d) got dad.extendIfNonceEqual(%s, _) = %d, want = %d", i, addr1, got, want)
- }
- }
-
- for i := 0; i < test.expectedTransmits; i++ {
- if diff := dad.checkWithNonce(map[tcpip.Address][][]byte{
- addr1: {
- secureRNGBytes[nonceSize*i:][:nonceSize],
- },
- }); diff != "" {
- t.Errorf("(i=%d) dad check mismatch (-want +got):\n%s", i, diff)
- }
-
- clock.Advance(dadConfigs.RetransmitTimer)
- }
-
- if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
- t.Errorf("dad result mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anymore updates.
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got an extra DAD result; r = %#v", r)
- default:
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go b/pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go
deleted file mode 100644
index 1261ad414..000000000
--- a/pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go
+++ /dev/null
@@ -1,808 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ip_test
-
-import (
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/network/internal/ip"
-)
-
-const maxUnsolicitedReportDelay = time.Second
-
-var _ ip.MulticastGroupProtocol = (*mockMulticastGroupProtocol)(nil)
-
-type mockMulticastGroupProtocolProtectedFields struct {
- sync.RWMutex
-
- genericMulticastGroup ip.GenericMulticastProtocolState
- sendReportGroupAddrCount map[tcpip.Address]int
- sendLeaveGroupAddrCount map[tcpip.Address]int
- makeQueuePackets bool
- disabled bool
-}
-
-type mockMulticastGroupProtocol struct {
- t *testing.T
-
- skipProtocolAddress tcpip.Address
-
- mu mockMulticastGroupProtocolProtectedFields
-}
-
-func (m *mockMulticastGroupProtocol) init(opts ip.GenericMulticastProtocolOptions) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.initLocked()
- opts.Protocol = m
- m.mu.genericMulticastGroup.Init(&m.mu.RWMutex, opts)
-}
-
-func (m *mockMulticastGroupProtocol) initLocked() {
- m.mu.sendReportGroupAddrCount = make(map[tcpip.Address]int)
- m.mu.sendLeaveGroupAddrCount = make(map[tcpip.Address]int)
-}
-
-func (m *mockMulticastGroupProtocol) setEnabled(v bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.disabled = !v
-}
-
-func (m *mockMulticastGroupProtocol) setQueuePackets(v bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.makeQueuePackets = v
-}
-
-func (m *mockMulticastGroupProtocol) joinGroup(addr tcpip.Address) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.JoinGroupLocked(addr)
-}
-
-func (m *mockMulticastGroupProtocol) leaveGroup(addr tcpip.Address) bool {
- m.mu.Lock()
- defer m.mu.Unlock()
- return m.mu.genericMulticastGroup.LeaveGroupLocked(addr)
-}
-
-func (m *mockMulticastGroupProtocol) handleReport(addr tcpip.Address) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.HandleReportLocked(addr)
-}
-
-func (m *mockMulticastGroupProtocol) handleQuery(addr tcpip.Address, maxRespTime time.Duration) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.HandleQueryLocked(addr, maxRespTime)
-}
-
-func (m *mockMulticastGroupProtocol) isLocallyJoined(addr tcpip.Address) bool {
- m.mu.RLock()
- defer m.mu.RUnlock()
- return m.mu.genericMulticastGroup.IsLocallyJoinedRLocked(addr)
-}
-
-func (m *mockMulticastGroupProtocol) makeAllNonMember() {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.MakeAllNonMemberLocked()
-}
-
-func (m *mockMulticastGroupProtocol) initializeGroups() {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.InitializeGroupsLocked()
-}
-
-func (m *mockMulticastGroupProtocol) sendQueuedReports() {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.mu.genericMulticastGroup.SendQueuedReportsLocked()
-}
-
-// Enabled implements ip.MulticastGroupProtocol.
-//
-// Precondition: m.mu must be read locked.
-func (m *mockMulticastGroupProtocol) Enabled() bool {
- if m.mu.TryLock() {
- m.mu.Unlock() // +checklocksforce: TryLock.
- m.t.Fatal("got write lock, expected to not take the lock; generic multicast protocol must take the read or write lock before calling Enabled")
- }
-
- return !m.mu.disabled
-}
-
-// SendReport implements ip.MulticastGroupProtocol.
-//
-// Precondition: m.mu must be locked.
-func (m *mockMulticastGroupProtocol) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error) {
- if m.mu.TryLock() {
- m.mu.Unlock() // +checklocksforce: TryLock.
- m.t.Fatalf("got write lock, expected to not take the lock; generic multicast protocol must take the write lock before sending report for %s", groupAddress)
- }
- if m.mu.TryRLock() {
- m.mu.RUnlock() // +checklocksforce: TryLock.
- m.t.Fatalf("got read lock, expected to not take the lock; generic multicast protocol must take the write lock before sending report for %s", groupAddress)
- }
-
- m.mu.sendReportGroupAddrCount[groupAddress]++
- return !m.mu.makeQueuePackets, nil
-}
-
-// SendLeave implements ip.MulticastGroupProtocol.
-//
-// Precondition: m.mu must be locked.
-func (m *mockMulticastGroupProtocol) SendLeave(groupAddress tcpip.Address) tcpip.Error {
- if m.mu.TryLock() {
- m.mu.Unlock() // +checklocksforce: TryLock.
- m.t.Fatalf("got write lock, expected to not take the lock; generic multicast protocol must take the write lock before sending leave for %s", groupAddress)
- }
- if m.mu.TryRLock() {
- m.mu.RUnlock() // +checklocksforce: TryLock.
- m.t.Fatalf("got read lock, expected to not take the lock; generic multicast protocol must take the write lock before sending leave for %s", groupAddress)
- }
-
- m.mu.sendLeaveGroupAddrCount[groupAddress]++
- return nil
-}
-
-// ShouldPerformProtocol implements ip.MulticastGroupProtocol.
-func (m *mockMulticastGroupProtocol) ShouldPerformProtocol(groupAddress tcpip.Address) bool {
- return groupAddress != m.skipProtocolAddress
-}
-
-func (m *mockMulticastGroupProtocol) check(sendReportGroupAddresses []tcpip.Address, sendLeaveGroupAddresses []tcpip.Address) string {
- m.mu.Lock()
- defer m.mu.Unlock()
-
- sendReportGroupAddrCount := make(map[tcpip.Address]int)
- for _, a := range sendReportGroupAddresses {
- sendReportGroupAddrCount[a] = 1
- }
-
- sendLeaveGroupAddrCount := make(map[tcpip.Address]int)
- for _, a := range sendLeaveGroupAddresses {
- sendLeaveGroupAddrCount[a] = 1
- }
-
- diff := cmp.Diff(
- &mockMulticastGroupProtocol{
- mu: mockMulticastGroupProtocolProtectedFields{
- sendReportGroupAddrCount: sendReportGroupAddrCount,
- sendLeaveGroupAddrCount: sendLeaveGroupAddrCount,
- },
- },
- m,
- cmp.AllowUnexported(mockMulticastGroupProtocol{}),
- cmp.AllowUnexported(mockMulticastGroupProtocolProtectedFields{}),
- // ignore mockMulticastGroupProtocol.mu and mockMulticastGroupProtocol.t
- cmp.FilterPath(
- func(p cmp.Path) bool {
- switch p.Last().String() {
- case ".RWMutex", ".t", ".makeQueuePackets", ".disabled", ".genericMulticastGroup", ".skipProtocolAddress":
- return true
- default:
- return false
- }
- },
- cmp.Ignore(),
- ),
- )
- m.initLocked()
- return diff
-}
-
-func TestJoinGroup(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- shouldSendReports bool
- }{
- {
- name: "Normal group",
- addr: addr1,
- shouldSendReports: true,
- },
- {
- name: "All-nodes group",
- addr: addr2,
- shouldSendReports: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr2}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(0)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- // Joining a group should send a report immediately and another after
- // a random interval between 0 and the maximum unsolicited report delay.
- mgp.joinGroup(test.addr)
- if test.shouldSendReports {
- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- }
-
- // Should have no more messages to send.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestLeaveGroup(t *testing.T) {
- tests := []struct {
- name string
- addr tcpip.Address
- shouldSendMessages bool
- }{
- {
- name: "Normal group",
- addr: addr1,
- shouldSendMessages: true,
- },
- {
- name: "All-nodes group",
- addr: addr2,
- shouldSendMessages: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr2}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(1)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- mgp.joinGroup(test.addr)
- if test.shouldSendMessages {
- if diff := mgp.check([]tcpip.Address{test.addr} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- }
-
- // Leaving a group should send a leave report immediately and cancel any
- // delayed reports.
- {
-
- if !mgp.leaveGroup(test.addr) {
- t.Fatalf("got mgp.leaveGroup(%s) = false, want = true", test.addr)
- }
- }
- if test.shouldSendMessages {
- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{test.addr} /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- }
-
- // Should have no more messages to send.
- //
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestHandleReport(t *testing.T) {
- tests := []struct {
- name string
- reportAddr tcpip.Address
- expectReportsFor []tcpip.Address
- }{
- {
- name: "Unpecified empty",
- reportAddr: "",
- expectReportsFor: []tcpip.Address{addr1, addr2},
- },
- {
- name: "Unpecified any",
- reportAddr: "\x00",
- expectReportsFor: []tcpip.Address{addr1, addr2},
- },
- {
- name: "Specified",
- reportAddr: addr1,
- expectReportsFor: []tcpip.Address{addr2},
- },
- {
- name: "Specified all-nodes",
- reportAddr: addr3,
- expectReportsFor: []tcpip.Address{addr1, addr2},
- },
- {
- name: "Specified other",
- reportAddr: addr4,
- expectReportsFor: []tcpip.Address{addr1, addr2},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(2)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- mgp.joinGroup(addr1)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr2)
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr3)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receiving a report for a group we have a timer scheduled for should
- // cancel our delayed report timer for the group.
- mgp.handleReport(test.reportAddr)
- if len(test.expectReportsFor) != 0 {
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check(test.expectReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- }
-
- // Should have no more messages to send.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestHandleQuery(t *testing.T) {
- tests := []struct {
- name string
- queryAddr tcpip.Address
- maxDelay time.Duration
- expectQueriedReportsFor []tcpip.Address
- expectDelayedReportsFor []tcpip.Address
- }{
- {
- name: "Unpecified empty",
- queryAddr: "",
- maxDelay: 0,
- expectQueriedReportsFor: []tcpip.Address{addr1, addr2},
- expectDelayedReportsFor: nil,
- },
- {
- name: "Unpecified any",
- queryAddr: "\x00",
- maxDelay: 1,
- expectQueriedReportsFor: []tcpip.Address{addr1, addr2},
- expectDelayedReportsFor: nil,
- },
- {
- name: "Specified",
- queryAddr: addr1,
- maxDelay: 2,
- expectQueriedReportsFor: []tcpip.Address{addr1},
- expectDelayedReportsFor: []tcpip.Address{addr2},
- },
- {
- name: "Specified all-nodes",
- queryAddr: addr3,
- maxDelay: 3,
- expectQueriedReportsFor: nil,
- expectDelayedReportsFor: []tcpip.Address{addr1, addr2},
- },
- {
- name: "Specified other",
- queryAddr: addr4,
- maxDelay: 4,
- expectQueriedReportsFor: nil,
- expectDelayedReportsFor: []tcpip.Address{addr1, addr2},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(3)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- mgp.joinGroup(addr1)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr2)
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr3)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receiving a query should make us reschedule our delayed report timer
- // to some time within the new max response delay.
- mgp.handleQuery(test.queryAddr, test.maxDelay)
- clock.Advance(test.maxDelay)
- if diff := mgp.check(test.expectQueriedReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // The groups that were not affected by the query should still send a
- // report after the max unsolicited report delay.
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check(test.expectDelayedReportsFor /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should have no more messages to send.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestJoinCount(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(4)),
- Clock: clock,
- MaxUnsolicitedReportDelay: time.Second,
- })
-
- // Set the join count to 2 for a group.
- mgp.joinGroup(addr1)
- if !mgp.isLocallyJoined(addr1) {
- t.Fatalf("got mgp.isLocallyJoined(%s) = false, want = true", addr1)
- }
- // Only the first join should trigger a report to be sent.
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr1)
- if !mgp.isLocallyJoined(addr1) {
- t.Errorf("got mgp.isLocallyJoined(%s) = false, want = true", addr1)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Group should still be considered joined after leaving once.
- if !mgp.leaveGroup(addr1) {
- t.Errorf("got mgp.leaveGroup(%s) = false, want = true", addr1)
- }
- if !mgp.isLocallyJoined(addr1) {
- t.Errorf("got mgp.isLocallyJoined(%s) = false, want = true", addr1)
- }
- // A leave report should only be sent once the join count reaches 0.
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Leaving once more should actually remove us from the group.
- if !mgp.leaveGroup(addr1) {
- t.Errorf("got mgp.leaveGroup(%s) = false, want = true", addr1)
- }
- if mgp.isLocallyJoined(addr1) {
- t.Errorf("got mgp.isLocallyJoined(%s) = true, want = false", addr1)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{addr1} /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Group should no longer be joined so we should not have anything to
- // leave.
- if mgp.leaveGroup(addr1) {
- t.Errorf("got mgp.leaveGroup(%s) = true, want = false", addr1)
- }
- if mgp.isLocallyJoined(addr1) {
- t.Errorf("got mgp.isLocallyJoined(%s) = true, want = false", addr1)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should have no more messages to send.
- //
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-}
-
-func TestMakeAllNonMemberAndInitialize(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(3)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- mgp.joinGroup(addr1)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr2)
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr3)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should send the leave reports for each but still consider them locally
- // joined.
- mgp.makeAllNonMember()
- if diff := mgp.check(nil /* sendReportGroupAddresses */, []tcpip.Address{addr1, addr2} /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- for _, group := range []tcpip.Address{addr1, addr2, addr3} {
- if !mgp.isLocallyJoined(group) {
- t.Fatalf("got mgp.isLocallyJoined(%s) = false, want = true", group)
- }
- }
-
- // Should send the initial set of unsolcited reports.
- mgp.initializeGroups()
- if diff := mgp.check([]tcpip.Address{addr1, addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check([]tcpip.Address{addr1, addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should have no more messages to send.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-}
-
-// TestGroupStateNonMember tests that groups do not send packets when in the
-// non-member state, but are still considered locally joined.
-func TestGroupStateNonMember(t *testing.T) {
- mgp := mockMulticastGroupProtocol{t: t}
- clock := faketime.NewManualClock()
-
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(3)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
- mgp.setEnabled(false)
-
- // Joining groups should not send any reports.
- mgp.joinGroup(addr1)
- if !mgp.isLocallyJoined(addr1) {
- t.Fatalf("got mgp.isLocallyJoined(%s) = false, want = true", addr1)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.joinGroup(addr2)
- if !mgp.isLocallyJoined(addr1) {
- t.Fatalf("got mgp.isLocallyJoined(%s) = false, want = true", addr2)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receiving a query should not send any reports.
- mgp.handleQuery(addr1, time.Nanosecond)
- // Generic multicast protocol timers are expected to take the job mutex.
- clock.Advance(time.Nanosecond)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Leaving groups should not send any leave messages.
- if !mgp.leaveGroup(addr1) {
- t.Errorf("got mgp.leaveGroup(%s) = false, want = true", addr2)
- }
- if mgp.isLocallyJoined(addr1) {
- t.Errorf("got mgp.isLocallyJoined(%s) = true, want = false", addr2)
- }
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-}
-
-func TestQueuedPackets(t *testing.T) {
- clock := faketime.NewManualClock()
- mgp := mockMulticastGroupProtocol{t: t}
- mgp.init(ip.GenericMulticastProtocolOptions{
- Rand: rand.New(rand.NewSource(4)),
- Clock: clock,
- MaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,
- })
-
- // Joining should trigger a SendReport, but mgp should report that we did not
- // send the packet.
- mgp.setQueuePackets(true)
- mgp.joinGroup(addr1)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // The delayed report timer should have been cancelled since we did not send
- // the initial report earlier.
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Mock being able to successfully send the report.
- mgp.setQueuePackets(false)
- mgp.sendQueuedReports()
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // The delayed report (sent after the initial report) should now be sent.
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anything else to send (we should be idle).
- mgp.sendQueuedReports()
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receive a query but mock being unable to send reports again.
- mgp.setQueuePackets(true)
- mgp.handleQuery(addr1, time.Nanosecond)
- clock.Advance(time.Nanosecond)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Mock being able to send reports again - we should have a packet queued to
- // send.
- mgp.setQueuePackets(false)
- mgp.sendQueuedReports()
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anything else to send.
- mgp.sendQueuedReports()
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receive a query again, but mock being unable to send reports.
- mgp.setQueuePackets(true)
- mgp.handleQuery(addr1, time.Nanosecond)
- clock.Advance(time.Nanosecond)
- if diff := mgp.check([]tcpip.Address{addr1} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Receiving a report should should transition us into the idle member state,
- // even if we had a packet queued. We should no longer have any packets to
- // send.
- mgp.handleReport(addr1)
- mgp.sendQueuedReports()
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // When we fail to send the initial set of reports, incoming reports should
- // not affect a newly joined group's reports from being sent.
- mgp.setQueuePackets(true)
- mgp.joinGroup(addr2)
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- mgp.handleReport(addr2)
- // Attempting to send queued reports while still unable to send reports should
- // not change the host state.
- mgp.sendQueuedReports()
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- // Mock being able to successfully send the report.
- mgp.setQueuePackets(false)
- mgp.sendQueuedReports()
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
- // The delayed report (sent after the initial report) should now be sent.
- clock.Advance(maxUnsolicitedReportDelay)
- if diff := mgp.check([]tcpip.Address{addr2} /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Errorf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have anything else to send.
- mgp.sendQueuedReports()
- clock.Advance(time.Hour)
- if diff := mgp.check(nil /* sendReportGroupAddresses */, nil /* sendLeaveGroupAddresses */); diff != "" {
- t.Fatalf("mockMulticastGroupProtocol mismatch (-want +got):\n%s", diff)
- }
-}
diff --git a/pkg/tcpip/network/internal/ip/ip_state_autogen.go b/pkg/tcpip/network/internal/ip/ip_state_autogen.go
new file mode 100644
index 000000000..360922bfe
--- /dev/null
+++ b/pkg/tcpip/network/internal/ip/ip_state_autogen.go
@@ -0,0 +1,32 @@
+// automatically generated by stateify.
+
+package ip
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *ErrMessageTooLong) StateTypeName() string {
+ return "pkg/tcpip/network/internal/ip.ErrMessageTooLong"
+}
+
+func (e *ErrMessageTooLong) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrMessageTooLong) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrMessageTooLong) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*ErrMessageTooLong)(nil))
+}
diff --git a/pkg/tcpip/network/internal/testutil/BUILD b/pkg/tcpip/network/internal/testutil/BUILD
deleted file mode 100644
index a180e5c75..000000000
--- a/pkg/tcpip/network/internal/testutil/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "testutil",
- srcs = ["testutil.go"],
- visibility = [
- "//pkg/tcpip/network/arp:__pkg__",
- "//pkg/tcpip/network/internal/fragmentation:__pkg__",
- "//pkg/tcpip/network/ipv4:__pkg__",
- "//pkg/tcpip/network/ipv6:__pkg__",
- "//pkg/tcpip/tests/integration:__pkg__",
- ],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- ],
-)
diff --git a/pkg/tcpip/network/internal/testutil/testutil.go b/pkg/tcpip/network/internal/testutil/testutil.go
deleted file mode 100644
index 605e9ef8d..000000000
--- a/pkg/tcpip/network/internal/testutil/testutil.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil defines types and functions used to test Network Layer
-// functionality such as IP fragmentation.
-package testutil
-
-import (
- "fmt"
- "math/rand"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-// MockLinkEndpoint is an endpoint used for testing, it stores packets written
-// to it and can mock errors.
-type MockLinkEndpoint struct {
- // WrittenPackets is where packets written to the endpoint are stored.
- WrittenPackets []*stack.PacketBuffer
-
- mtu uint32
- err tcpip.Error
- allowPackets int
-}
-
-// NewMockLinkEndpoint creates a new MockLinkEndpoint.
-//
-// err is the error that will be returned once allowPackets packets are written
-// to the endpoint.
-func NewMockLinkEndpoint(mtu uint32, err tcpip.Error, allowPackets int) *MockLinkEndpoint {
- return &MockLinkEndpoint{
- mtu: mtu,
- err: err,
- allowPackets: allowPackets,
- }
-}
-
-// MTU implements LinkEndpoint.MTU.
-func (ep *MockLinkEndpoint) MTU() uint32 { return ep.mtu }
-
-// Capabilities implements LinkEndpoint.Capabilities.
-func (*MockLinkEndpoint) Capabilities() stack.LinkEndpointCapabilities { return 0 }
-
-// MaxHeaderLength implements LinkEndpoint.MaxHeaderLength.
-func (*MockLinkEndpoint) MaxHeaderLength() uint16 { return 0 }
-
-// LinkAddress implements LinkEndpoint.LinkAddress.
-func (*MockLinkEndpoint) LinkAddress() tcpip.LinkAddress { return "" }
-
-// WritePacket implements LinkEndpoint.WritePacket.
-func (ep *MockLinkEndpoint) WritePacket(_ stack.RouteInfo, _ tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
- if ep.allowPackets == 0 {
- return ep.err
- }
- ep.allowPackets--
- ep.WrittenPackets = append(ep.WrittenPackets, pkt)
- return nil
-}
-
-// WritePackets implements LinkEndpoint.WritePackets.
-func (ep *MockLinkEndpoint) WritePackets(r stack.RouteInfo, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, tcpip.Error) {
- var n int
-
- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {
- if err := ep.WritePacket(r, protocol, pkt); err != nil {
- return n, err
- }
- n++
- }
-
- return n, nil
-}
-
-// Attach implements LinkEndpoint.Attach.
-func (*MockLinkEndpoint) Attach(stack.NetworkDispatcher) {}
-
-// IsAttached implements LinkEndpoint.IsAttached.
-func (*MockLinkEndpoint) IsAttached() bool { return false }
-
-// Wait implements LinkEndpoint.Wait.
-func (*MockLinkEndpoint) Wait() {}
-
-// ARPHardwareType implements LinkEndpoint.ARPHardwareType.
-func (*MockLinkEndpoint) ARPHardwareType() header.ARPHardwareType { return header.ARPHardwareNone }
-
-// AddHeader implements LinkEndpoint.AddHeader.
-func (*MockLinkEndpoint) AddHeader(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {
-}
-
-// MakeRandPkt generates a randomized packet. transportHeaderLength indicates
-// how many random bytes will be copied in the Transport Header.
-// extraHeaderReserveLength indicates how much extra space will be reserved for
-// the other headers. The payload is made from Views of the sizes listed in
-// viewSizes.
-func MakeRandPkt(transportHeaderLength int, extraHeaderReserveLength int, viewSizes []int, proto tcpip.NetworkProtocolNumber) *stack.PacketBuffer {
- var views buffer.VectorisedView
-
- for _, s := range viewSizes {
- newView := buffer.NewView(s)
- if _, err := rand.Read(newView); err != nil {
- panic(fmt.Sprintf("rand.Read: %s", err))
- }
- views.AppendView(newView)
- }
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: transportHeaderLength + extraHeaderReserveLength,
- Data: views,
- })
- pkt.NetworkProtocolNumber = proto
- if _, err := rand.Read(pkt.TransportHeader().Push(transportHeaderLength)); err != nil {
- panic(fmt.Sprintf("rand.Read: %s", err))
- }
- return pkt
-}
diff --git a/pkg/tcpip/network/ip_test.go b/pkg/tcpip/network/ip_test.go
deleted file mode 100644
index 771b9173a..000000000
--- a/pkg/tcpip/network/ip_test.go
+++ /dev/null
@@ -1,2034 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ip_test
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-const nicID = 1
-
-var (
- localIPv4Addr = testutil.MustParse4("10.0.0.1")
- remoteIPv4Addr = testutil.MustParse4("10.0.0.2")
- ipv4SubnetAddr = testutil.MustParse4("10.0.0.0")
- ipv4SubnetMask = testutil.MustParse4("255.255.255.0")
- ipv4Gateway = testutil.MustParse4("10.0.0.3")
- localIPv6Addr = testutil.MustParse6("a00::1")
- remoteIPv6Addr = testutil.MustParse6("a00::2")
- ipv6SubnetAddr = testutil.MustParse6("a00::")
- ipv6SubnetMask = testutil.MustParse6("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00")
- ipv6Gateway = testutil.MustParse6("a00::3")
-)
-
-var localIPv4AddrWithPrefix = tcpip.AddressWithPrefix{
- Address: localIPv4Addr,
- PrefixLen: 24,
-}
-
-var localIPv6AddrWithPrefix = tcpip.AddressWithPrefix{
- Address: localIPv6Addr,
- PrefixLen: 120,
-}
-
-type transportError struct {
- origin tcpip.SockErrOrigin
- typ uint8
- code uint8
- info uint32
- kind stack.TransportErrorKind
-}
-
-// testObject implements two interfaces: LinkEndpoint and TransportDispatcher.
-// The former is used to pretend that it's a link endpoint so that we can
-// inspect packets written by the network endpoints. The latter is used to
-// pretend that it's the network stack so that it can inspect incoming packets
-// that have been handled by the network endpoints.
-//
-// Packets are checked by comparing their fields/values against the expected
-// values stored in the test object itself.
-type testObject struct {
- t *testing.T
- protocol tcpip.TransportProtocolNumber
- contents []byte
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- v4 bool
- transErr transportError
-
- dataCalls int
- controlCalls int
- rawCalls int
-}
-
-// checkValues verifies that the transport protocol, data contents, src & dst
-// addresses of a packet match what's expected. If any field doesn't match, the
-// test fails.
-func (t *testObject) checkValues(protocol tcpip.TransportProtocolNumber, v buffer.View, srcAddr, dstAddr tcpip.Address) {
- if protocol != t.protocol {
- t.t.Errorf("protocol = %v, want %v", protocol, t.protocol)
- }
-
- if srcAddr != t.srcAddr {
- t.t.Errorf("srcAddr = %v, want %v", srcAddr, t.srcAddr)
- }
-
- if dstAddr != t.dstAddr {
- t.t.Errorf("dstAddr = %v, want %v", dstAddr, t.dstAddr)
- }
-
- if len(v) != len(t.contents) {
- t.t.Fatalf("len(payload) = %v, want %v", len(v), len(t.contents))
- }
-
- for i := range t.contents {
- if t.contents[i] != v[i] {
- t.t.Fatalf("payload[%v] = %v, want %v", i, v[i], t.contents[i])
- }
- }
-}
-
-// DeliverTransportPacket is called by network endpoints after parsing incoming
-// packets. This is used by the test object to verify that the results of the
-// parsing are expected.
-func (t *testObject) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt *stack.PacketBuffer) stack.TransportPacketDisposition {
- netHdr := pkt.Network()
- t.checkValues(protocol, pkt.Data().AsRange().ToOwnedView(), netHdr.SourceAddress(), netHdr.DestinationAddress())
- t.dataCalls++
- return stack.TransportPacketHandled
-}
-
-// DeliverTransportError is called by network endpoints after parsing
-// incoming control (ICMP) packets. This is used by the test object to verify
-// that the results of the parsing are expected.
-func (t *testObject) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr stack.TransportError, pkt *stack.PacketBuffer) {
- t.checkValues(trans, pkt.Data().AsRange().ToOwnedView(), remote, local)
- if diff := cmp.Diff(
- t.transErr,
- transportError{
- origin: transErr.Origin(),
- typ: transErr.Type(),
- code: transErr.Code(),
- info: transErr.Info(),
- kind: transErr.Kind(),
- },
- cmp.AllowUnexported(transportError{}),
- ); diff != "" {
- t.t.Errorf("transport error mismatch (-want +got):\n%s", diff)
- }
- t.controlCalls++
-}
-
-func (t *testObject) DeliverRawPacket(tcpip.TransportProtocolNumber, *stack.PacketBuffer) {
- t.rawCalls++
-}
-
-// Attach is only implemented to satisfy the LinkEndpoint interface.
-func (*testObject) Attach(stack.NetworkDispatcher) {}
-
-// IsAttached implements stack.LinkEndpoint.IsAttached.
-func (*testObject) IsAttached() bool {
- return true
-}
-
-// MTU implements stack.LinkEndpoint.MTU. It just returns a constant that
-// matches the linux loopback MTU.
-func (*testObject) MTU() uint32 {
- return 65536
-}
-
-// Capabilities implements stack.LinkEndpoint.Capabilities.
-func (*testObject) Capabilities() stack.LinkEndpointCapabilities {
- return 0
-}
-
-// MaxHeaderLength is only implemented to satisfy the LinkEndpoint interface.
-func (*testObject) MaxHeaderLength() uint16 {
- return 0
-}
-
-// LinkAddress returns the link address of this endpoint.
-func (*testObject) LinkAddress() tcpip.LinkAddress {
- return ""
-}
-
-// Wait implements stack.LinkEndpoint.Wait.
-func (*testObject) Wait() {}
-
-// WritePacket is called by network endpoints after producing a packet and
-// writing it to the link endpoint. This is used by the test object to verify
-// that the produced packet is as expected.
-func (t *testObject) WritePacket(_ *stack.Route, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
- var prot tcpip.TransportProtocolNumber
- var srcAddr tcpip.Address
- var dstAddr tcpip.Address
-
- if t.v4 {
- h := header.IPv4(pkt.NetworkHeader().View())
- prot = tcpip.TransportProtocolNumber(h.Protocol())
- srcAddr = h.SourceAddress()
- dstAddr = h.DestinationAddress()
-
- } else {
- h := header.IPv6(pkt.NetworkHeader().View())
- prot = tcpip.TransportProtocolNumber(h.NextHeader())
- srcAddr = h.SourceAddress()
- dstAddr = h.DestinationAddress()
- }
- t.checkValues(prot, pkt.Data().AsRange().ToOwnedView(), srcAddr, dstAddr)
- return nil
-}
-
-// WritePackets implements stack.LinkEndpoint.WritePackets.
-func (*testObject) WritePackets(_ *stack.Route, pkt stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, tcpip.Error) {
- panic("not implemented")
-}
-
-// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.
-func (*testObject) ARPHardwareType() header.ARPHardwareType {
- panic("not implemented")
-}
-
-// AddHeader implements stack.LinkEndpoint.AddHeader.
-func (*testObject) AddHeader(local, remote tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- panic("not implemented")
-}
-
-func buildIPv4Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},
- })
- s.CreateNIC(nicID, loopback.New())
- s.AddAddress(nicID, ipv4.ProtocolNumber, local)
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv4EmptySubnet,
- Gateway: ipv4Gateway,
- NIC: 1,
- }})
-
- return s.FindRoute(nicID, local, remote, ipv4.ProtocolNumber, false /* multicastLoop */)
-}
-
-func buildIPv6Route(local, remote tcpip.Address) (*stack.Route, tcpip.Error) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},
- })
- s.CreateNIC(nicID, loopback.New())
- s.AddAddress(nicID, ipv6.ProtocolNumber, local)
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- Gateway: ipv6Gateway,
- NIC: 1,
- }})
-
- return s.FindRoute(nicID, local, remote, ipv6.ProtocolNumber, false /* multicastLoop */)
-}
-
-func buildDummyStackWithLinkEndpoint(t *testing.T, mtu uint32) (*stack.Stack, *channel.Endpoint) {
- t.Helper()
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},
- })
- e := channel.New(1, mtu, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- v4Addr := tcpip.ProtocolAddress{Protocol: header.IPv4ProtocolNumber, AddressWithPrefix: localIPv4AddrWithPrefix}
- if err := s.AddProtocolAddress(nicID, v4Addr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v) = %s", nicID, v4Addr, err)
- }
-
- v6Addr := tcpip.ProtocolAddress{Protocol: header.IPv6ProtocolNumber, AddressWithPrefix: localIPv6AddrWithPrefix}
- if err := s.AddProtocolAddress(nicID, v6Addr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v) = %s", nicID, v6Addr, err)
- }
-
- return s, e
-}
-
-func buildDummyStack(t *testing.T) *stack.Stack {
- t.Helper()
-
- s, _ := buildDummyStackWithLinkEndpoint(t, header.IPv6MinimumMTU)
- return s
-}
-
-var _ stack.NetworkInterface = (*testInterface)(nil)
-
-type testInterface struct {
- testObject
-
- mu struct {
- sync.RWMutex
- disabled bool
- }
-}
-
-func (*testInterface) ID() tcpip.NICID {
- return nicID
-}
-
-func (*testInterface) IsLoopback() bool {
- return false
-}
-
-func (*testInterface) Name() string {
- return ""
-}
-
-func (t *testInterface) Enabled() bool {
- t.mu.RLock()
- defer t.mu.RUnlock()
- return !t.mu.disabled
-}
-
-func (*testInterface) Promiscuous() bool {
- return false
-}
-
-func (*testInterface) Spoofing() bool {
- return false
-}
-
-func (t *testInterface) setEnabled(v bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.mu.disabled = !v
-}
-
-func (*testInterface) WritePacketToRemote(tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) tcpip.Error {
- return &tcpip.ErrNotSupported{}
-}
-
-func (*testInterface) HandleNeighborProbe(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress) tcpip.Error {
- return nil
-}
-
-func (*testInterface) HandleNeighborConfirmation(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress, stack.ReachabilityConfirmationFlags) tcpip.Error {
- return nil
-}
-
-func (*testInterface) PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {
- return tcpip.AddressWithPrefix{}, nil
-}
-
-func (*testInterface) CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool {
- return false
-}
-
-func TestSourceAddressValidation(t *testing.T) {
- rxIPv4ICMP := func(e *channel.Endpoint, src tcpip.Address) {
- totalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))
- pkt.SetType(header.ICMPv4Echo)
- pkt.SetCode(0)
- pkt.SetChecksum(0)
- pkt.SetChecksum(^header.Checksum(pkt, 0))
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- Protocol: uint8(icmp.ProtocolNumber4),
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: localIPv4Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- }
-
- rxIPv6ICMP := func(e *channel.Endpoint, src tcpip.Address) {
- totalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))
- pkt.SetType(header.ICMPv6EchoRequest)
- pkt.SetCode(0)
- pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: src,
- Dst: localIPv6Addr,
- }))
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: header.ICMPv6MinimumSize,
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: localIPv6Addr,
- })
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- }
-
- tests := []struct {
- name string
- srcAddress tcpip.Address
- rxICMP func(*channel.Endpoint, tcpip.Address)
- valid bool
- }{
- {
- name: "IPv4 valid",
- srcAddress: "\x01\x02\x03\x04",
- rxICMP: rxIPv4ICMP,
- valid: true,
- },
- {
- name: "IPv6 valid",
- srcAddress: "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10",
- rxICMP: rxIPv6ICMP,
- valid: true,
- },
- {
- name: "IPv4 unspecified",
- srcAddress: header.IPv4Any,
- rxICMP: rxIPv4ICMP,
- valid: true,
- },
- {
- name: "IPv6 unspecified",
- srcAddress: header.IPv4Any,
- rxICMP: rxIPv6ICMP,
- valid: true,
- },
- {
- name: "IPv4 multicast",
- srcAddress: "\xe0\x00\x00\x01",
- rxICMP: rxIPv4ICMP,
- valid: false,
- },
- {
- name: "IPv6 multicast",
- srcAddress: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- rxICMP: rxIPv6ICMP,
- valid: false,
- },
- {
- name: "IPv4 broadcast",
- srcAddress: header.IPv4Broadcast,
- rxICMP: rxIPv4ICMP,
- valid: false,
- },
- {
- name: "IPv4 subnet broadcast",
- srcAddress: func() tcpip.Address {
- subnet := localIPv4AddrWithPrefix.Subnet()
- return subnet.Broadcast()
- }(),
- rxICMP: rxIPv4ICMP,
- valid: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, e := buildDummyStackWithLinkEndpoint(t, header.IPv6MinimumMTU)
- test.rxICMP(e, test.srcAddress)
-
- var wantValid uint64
- if test.valid {
- wantValid = 1
- }
-
- if got, want := s.Stats().IP.InvalidSourceAddressesReceived.Value(), 1-wantValid; got != want {
- t.Errorf("got s.Stats().IP.InvalidSourceAddressesReceived.Value() = %d, want = %d", got, want)
- }
- if got := s.Stats().IP.PacketsDelivered.Value(); got != wantValid {
- t.Errorf("got s.Stats().IP.PacketsDelivered.Value() = %d, want = %d", got, wantValid)
- }
- })
- }
-}
-
-func TestEnableWhenNICDisabled(t *testing.T) {
- tests := []struct {
- name string
- protocolFactory stack.NetworkProtocolFactory
- protoNum tcpip.NetworkProtocolNumber
- }{
- {
- name: "IPv4",
- protocolFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- },
- {
- name: "IPv6",
- protocolFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var nic testInterface
- nic.setEnabled(false)
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{test.protocolFactory},
- })
- p := s.NetworkProtocolInstance(test.protoNum)
-
- // We pass nil for all parameters except the NetworkInterface and Stack
- // since Enable only depends on these.
- ep := p.NewEndpoint(&nic, nil)
-
- // The endpoint should initially be disabled, regardless the NIC's enabled
- // status.
- if ep.Enabled() {
- t.Fatal("got ep.Enabled() = true, want = false")
- }
- nic.setEnabled(true)
- if ep.Enabled() {
- t.Fatal("got ep.Enabled() = true, want = false")
- }
-
- // Attempting to enable the endpoint while the NIC is disabled should
- // fail.
- nic.setEnabled(false)
- err := ep.Enable()
- if _, ok := err.(*tcpip.ErrNotPermitted); !ok {
- t.Fatalf("got ep.Enable() = %s, want = %s", err, &tcpip.ErrNotPermitted{})
- }
- // ep should consider the NIC's enabled status when determining its own
- // enabled status so we "enable" the NIC to read just the endpoint's
- // enabled status.
- nic.setEnabled(true)
- if ep.Enabled() {
- t.Fatal("got ep.Enabled() = true, want = false")
- }
-
- // Enabling the interface after the NIC has been enabled should succeed.
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
- if !ep.Enabled() {
- t.Fatal("got ep.Enabled() = false, want = true")
- }
-
- // ep should consider the NIC's enabled status when determining its own
- // enabled status.
- nic.setEnabled(false)
- if ep.Enabled() {
- t.Fatal("got ep.Enabled() = true, want = false")
- }
-
- // Disabling the endpoint when the NIC is enabled should make the endpoint
- // disabled.
- nic.setEnabled(true)
- ep.Disable()
- if ep.Enabled() {
- t.Fatal("got ep.Enabled() = true, want = false")
- }
- })
- }
-}
-
-func TestIPv4Send(t *testing.T) {
- s := buildDummyStack(t)
- proto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)
- nic := testInterface{
- testObject: testObject{
- t: t,
- v4: true,
- },
- }
- ep := proto.NewEndpoint(&nic, nil)
- defer ep.Close()
-
- // Allocate and initialize the payload view.
- payload := buffer.NewView(100)
- for i := 0; i < len(payload); i++ {
- payload[i] = uint8(i)
- }
-
- // Setup the packet buffer.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(ep.MaxHeaderLength()),
- Data: payload.ToVectorisedView(),
- })
-
- // Issue the write.
- nic.testObject.protocol = 123
- nic.testObject.srcAddr = localIPv4Addr
- nic.testObject.dstAddr = remoteIPv4Addr
- nic.testObject.contents = payload
-
- r, err := buildIPv4Route(localIPv4Addr, remoteIPv4Addr)
- if err != nil {
- t.Fatalf("could not find route: %v", err)
- }
- if err := ep.WritePacket(r, stack.NetworkHeaderParams{
- Protocol: 123,
- TTL: 123,
- TOS: stack.DefaultTOS,
- }, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-}
-
-func TestReceive(t *testing.T) {
- tests := []struct {
- name string
- protoFactory stack.NetworkProtocolFactory
- protoNum tcpip.NetworkProtocolNumber
- v4 bool
- epAddr tcpip.AddressWithPrefix
- handlePacket func(*testing.T, stack.NetworkEndpoint, *testInterface)
- }{
- {
- name: "IPv4",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- v4: true,
- epAddr: localIPv4Addr.WithPrefix(),
- handlePacket: func(t *testing.T, ep stack.NetworkEndpoint, nic *testInterface) {
- const totalLen = header.IPv4MinimumSize + 30 /* payload length */
-
- view := buffer.NewView(totalLen)
- ip := header.IPv4(view)
- ip.Encode(&header.IPv4Fields{
- TotalLength: totalLen,
- TTL: ipv4.DefaultTTL,
- Protocol: 10,
- SrcAddr: remoteIPv4Addr,
- DstAddr: localIPv4Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Make payload be non-zero.
- for i := header.IPv4MinimumSize; i < len(view); i++ {
- view[i] = uint8(i)
- }
-
- // Give packet to ipv4 endpoint, dispatcher will validate that it's ok.
- nic.testObject.protocol = 10
- nic.testObject.srcAddr = remoteIPv4Addr
- nic.testObject.dstAddr = localIPv4Addr
- nic.testObject.contents = view[header.IPv4MinimumSize:totalLen]
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: view.ToVectorisedView(),
- })
- ep.HandlePacket(pkt)
- },
- },
- {
- name: "IPv6",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- v4: false,
- epAddr: localIPv6Addr.WithPrefix(),
- handlePacket: func(t *testing.T, ep stack.NetworkEndpoint, nic *testInterface) {
- const payloadLen = 30
- view := buffer.NewView(header.IPv6MinimumSize + payloadLen)
- ip := header.IPv6(view)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: payloadLen,
- TransportProtocol: 10,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: remoteIPv6Addr,
- DstAddr: localIPv6Addr,
- })
-
- // Make payload be non-zero.
- for i := header.IPv6MinimumSize; i < len(view); i++ {
- view[i] = uint8(i)
- }
-
- // Give packet to ipv6 endpoint, dispatcher will validate that it's ok.
- nic.testObject.protocol = 10
- nic.testObject.srcAddr = remoteIPv6Addr
- nic.testObject.dstAddr = localIPv6Addr
- nic.testObject.contents = view[header.IPv6MinimumSize:][:payloadLen]
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: view.ToVectorisedView(),
- })
- ep.HandlePacket(pkt)
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{test.protoFactory},
- })
- nic := testInterface{
- testObject: testObject{
- t: t,
- v4: test.v4,
- },
- }
- ep := s.NetworkProtocolInstance(test.protoNum).NewEndpoint(&nic, &nic.testObject)
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatalf("expected network endpoint with number = %d to implement stack.AddressableEndpoint", test.protoNum)
- }
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(test.epAddr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", test.epAddr, err)
- } else {
- ep.DecRef()
- }
-
- stat := s.Stats().IP.PacketsReceived
- if got := stat.Value(); got != 0 {
- t.Fatalf("got s.Stats().IP.PacketsReceived.Value() = %d, want = 0", got)
- }
- test.handlePacket(t, ep, &nic)
- if nic.testObject.dataCalls != 1 {
- t.Errorf("Bad number of data calls: got %d, want 1", nic.testObject.dataCalls)
- }
- if nic.testObject.rawCalls != 1 {
- t.Errorf("Bad number of raw calls: got %d, want 1", nic.testObject.rawCalls)
- }
- if got := stat.Value(); got != 1 {
- t.Errorf("got s.Stats().IP.PacketsReceived.Value() = %d, want = 1", got)
- }
- })
- }
-}
-
-func TestIPv4ReceiveControl(t *testing.T) {
- const (
- mtu = 0xbeef - header.IPv4MinimumSize
- dataLen = 8
- )
-
- cases := []struct {
- name string
- expectedCount int
- fragmentOffset uint16
- code header.ICMPv4Code
- transErr transportError
- trunc int
- }{
- {
- name: "FragmentationNeeded",
- expectedCount: 1,
- fragmentOffset: 0,
- code: header.ICMPv4FragmentationNeeded,
- transErr: transportError{
- origin: tcpip.SockExtErrorOriginICMP,
- typ: uint8(header.ICMPv4DstUnreachable),
- code: uint8(header.ICMPv4FragmentationNeeded),
- info: mtu,
- kind: stack.PacketTooBigTransportError,
- },
- trunc: 0,
- },
- {
- name: "Truncated (missing IPv4 header)",
- expectedCount: 0,
- fragmentOffset: 0,
- code: header.ICMPv4FragmentationNeeded,
- trunc: header.IPv4MinimumSize + header.ICMPv4MinimumSize,
- },
- {
- name: "Truncated (partial offending packet's IP header)",
- expectedCount: 0,
- fragmentOffset: 0,
- code: header.ICMPv4FragmentationNeeded,
- trunc: header.IPv4MinimumSize + header.ICMPv4MinimumSize + header.IPv4MinimumSize - 1,
- },
- {
- name: "Truncated (partial offending packet's data)",
- expectedCount: 0,
- fragmentOffset: 0,
- code: header.ICMPv4FragmentationNeeded,
- trunc: header.ICMPv4MinimumSize + header.ICMPv4MinimumSize + header.IPv4MinimumSize + dataLen - 1,
- },
- {
- name: "Port unreachable",
- expectedCount: 1,
- fragmentOffset: 0,
- code: header.ICMPv4PortUnreachable,
- transErr: transportError{
- origin: tcpip.SockExtErrorOriginICMP,
- typ: uint8(header.ICMPv4DstUnreachable),
- code: uint8(header.ICMPv4PortUnreachable),
- kind: stack.DestinationPortUnreachableTransportError,
- },
- trunc: 0,
- },
- {
- name: "Non-zero fragment offset",
- expectedCount: 0,
- fragmentOffset: 100,
- code: header.ICMPv4PortUnreachable,
- trunc: 0,
- },
- {
- name: "Zero-length packet",
- expectedCount: 0,
- fragmentOffset: 100,
- code: header.ICMPv4PortUnreachable,
- trunc: 2*header.IPv4MinimumSize + header.ICMPv4MinimumSize + dataLen,
- },
- }
- for _, c := range cases {
- t.Run(c.name, func(t *testing.T) {
- s := buildDummyStack(t)
- proto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)
- nic := testInterface{
- testObject: testObject{
- t: t,
- },
- }
- ep := proto.NewEndpoint(&nic, &nic.testObject)
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- const dataOffset = header.IPv4MinimumSize*2 + header.ICMPv4MinimumSize
- view := buffer.NewView(dataOffset + dataLen)
-
- // Create the outer IPv4 header.
- ip := header.IPv4(view)
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(len(view) - c.trunc),
- TTL: 20,
- Protocol: uint8(header.ICMPv4ProtocolNumber),
- SrcAddr: "\x0a\x00\x00\xbb",
- DstAddr: localIPv4Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Create the ICMP header.
- icmp := header.ICMPv4(view[header.IPv4MinimumSize:])
- icmp.SetType(header.ICMPv4DstUnreachable)
- icmp.SetCode(c.code)
- icmp.SetIdent(0xdead)
- icmp.SetSequence(0xbeef)
-
- // Create the inner IPv4 header.
- ip = header.IPv4(view[header.IPv4MinimumSize+header.ICMPv4MinimumSize:])
- ip.Encode(&header.IPv4Fields{
- TotalLength: 100,
- TTL: 20,
- Protocol: 10,
- FragmentOffset: c.fragmentOffset,
- SrcAddr: localIPv4Addr,
- DstAddr: remoteIPv4Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Make payload be non-zero.
- for i := dataOffset; i < len(view); i++ {
- view[i] = uint8(i)
- }
-
- icmp.SetChecksum(0)
- checksum := ^header.Checksum(icmp, 0 /* initial */)
- icmp.SetChecksum(checksum)
-
- // Give packet to IPv4 endpoint, dispatcher will validate that
- // it's ok.
- nic.testObject.protocol = 10
- nic.testObject.srcAddr = remoteIPv4Addr
- nic.testObject.dstAddr = localIPv4Addr
- nic.testObject.contents = view[dataOffset:]
- nic.testObject.transErr = c.transErr
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatal("expected IPv4 network endpoint to implement stack.AddressableEndpoint")
- }
- addr := localIPv4Addr.WithPrefix()
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(addr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", addr, err)
- } else {
- ep.DecRef()
- }
-
- pkt := truncatedPacket(view, c.trunc, header.IPv4MinimumSize)
- ep.HandlePacket(pkt)
- if want := c.expectedCount; nic.testObject.controlCalls != want {
- t.Fatalf("Bad number of control calls for %q case: got %v, want %v", c.name, nic.testObject.controlCalls, want)
- }
- })
- }
-}
-
-func TestIPv4FragmentationReceive(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ipv4.ProtocolNumber)
- nic := testInterface{
- testObject: testObject{
- t: t,
- v4: true,
- },
- }
- ep := proto.NewEndpoint(&nic, &nic.testObject)
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- totalLen := header.IPv4MinimumSize + 24
-
- frag1 := buffer.NewView(totalLen)
- ip1 := header.IPv4(frag1)
- ip1.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- TTL: 20,
- Protocol: 10,
- FragmentOffset: 0,
- Flags: header.IPv4FlagMoreFragments,
- SrcAddr: remoteIPv4Addr,
- DstAddr: localIPv4Addr,
- })
- ip1.SetChecksum(^ip1.CalculateChecksum())
-
- // Make payload be non-zero.
- for i := header.IPv4MinimumSize; i < totalLen; i++ {
- frag1[i] = uint8(i)
- }
-
- frag2 := buffer.NewView(totalLen)
- ip2 := header.IPv4(frag2)
- ip2.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- TTL: 20,
- Protocol: 10,
- FragmentOffset: 24,
- SrcAddr: remoteIPv4Addr,
- DstAddr: localIPv4Addr,
- })
- ip2.SetChecksum(^ip2.CalculateChecksum())
-
- // Make payload be non-zero.
- for i := header.IPv4MinimumSize; i < totalLen; i++ {
- frag2[i] = uint8(i)
- }
-
- // Give packet to ipv4 endpoint, dispatcher will validate that it's ok.
- nic.testObject.protocol = 10
- nic.testObject.srcAddr = remoteIPv4Addr
- nic.testObject.dstAddr = localIPv4Addr
- nic.testObject.contents = append(frag1[header.IPv4MinimumSize:totalLen], frag2[header.IPv4MinimumSize:totalLen]...)
-
- // Send first segment.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: frag1.ToVectorisedView(),
- })
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatal("expected IPv4 network endpoint to implement stack.AddressableEndpoint")
- }
- addr := localIPv4Addr.WithPrefix()
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(addr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", addr, err)
- } else {
- ep.DecRef()
- }
-
- ep.HandlePacket(pkt)
- if nic.testObject.dataCalls != 0 {
- t.Fatalf("Bad number of data calls: got %d, want 0", nic.testObject.dataCalls)
- }
- if nic.testObject.rawCalls != 0 {
- t.Errorf("Bad number of raw calls: got %d, want 0", nic.testObject.rawCalls)
- }
-
- // Send second segment.
- pkt = stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: frag2.ToVectorisedView(),
- })
- ep.HandlePacket(pkt)
- if nic.testObject.dataCalls != 1 {
- t.Fatalf("Bad number of data calls: got %d, want 1", nic.testObject.dataCalls)
- }
- if nic.testObject.rawCalls != 1 {
- t.Errorf("Bad number of raw calls: got %d, want 1", nic.testObject.rawCalls)
- }
-}
-
-func TestIPv6Send(t *testing.T) {
- s := buildDummyStack(t)
- proto := s.NetworkProtocolInstance(ipv6.ProtocolNumber)
- nic := testInterface{
- testObject: testObject{
- t: t,
- },
- }
- ep := proto.NewEndpoint(&nic, nil)
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- // Allocate and initialize the payload view.
- payload := buffer.NewView(100)
- for i := 0; i < len(payload); i++ {
- payload[i] = uint8(i)
- }
-
- // Setup the packet buffer.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(ep.MaxHeaderLength()),
- Data: payload.ToVectorisedView(),
- })
-
- // Issue the write.
- nic.testObject.protocol = 123
- nic.testObject.srcAddr = localIPv6Addr
- nic.testObject.dstAddr = remoteIPv6Addr
- nic.testObject.contents = payload
-
- r, err := buildIPv6Route(localIPv6Addr, remoteIPv6Addr)
- if err != nil {
- t.Fatalf("could not find route: %v", err)
- }
- if err := ep.WritePacket(r, stack.NetworkHeaderParams{
- Protocol: 123,
- TTL: 123,
- TOS: stack.DefaultTOS,
- }, pkt); err != nil {
- t.Fatalf("WritePacket failed: %v", err)
- }
-}
-
-func TestIPv6ReceiveControl(t *testing.T) {
- const (
- mtu = 0xffff
- outerSrcAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa"
- dataLen = 8
- )
-
- newUint16 := func(v uint16) *uint16 { return &v }
-
- portUnreachableTransErr := transportError{
- origin: tcpip.SockExtErrorOriginICMP6,
- typ: uint8(header.ICMPv6DstUnreachable),
- code: uint8(header.ICMPv6PortUnreachable),
- kind: stack.DestinationPortUnreachableTransportError,
- }
-
- cases := []struct {
- name string
- expectedCount int
- fragmentOffset *uint16
- typ header.ICMPv6Type
- code header.ICMPv6Code
- transErr transportError
- trunc int
- }{
- {
- name: "PacketTooBig",
- expectedCount: 1,
- fragmentOffset: nil,
- typ: header.ICMPv6PacketTooBig,
- code: header.ICMPv6UnusedCode,
- transErr: transportError{
- origin: tcpip.SockExtErrorOriginICMP6,
- typ: uint8(header.ICMPv6PacketTooBig),
- code: uint8(header.ICMPv6UnusedCode),
- info: mtu,
- kind: stack.PacketTooBigTransportError,
- },
- trunc: 0,
- },
- {
- name: "Truncated (missing offending packet's IPv6 header)",
- expectedCount: 0,
- fragmentOffset: nil,
- typ: header.ICMPv6PacketTooBig,
- code: header.ICMPv6UnusedCode,
- trunc: header.IPv6MinimumSize + header.ICMPv6PacketTooBigMinimumSize,
- },
- {
- name: "Truncated PacketTooBig (partial offending packet's IPv6 header)",
- expectedCount: 0,
- fragmentOffset: nil,
- typ: header.ICMPv6PacketTooBig,
- code: header.ICMPv6UnusedCode,
- trunc: header.IPv6MinimumSize + header.ICMPv6PacketTooBigMinimumSize + header.IPv6MinimumSize - 1,
- },
- {
- name: "Truncated (partial offending packet's data)",
- expectedCount: 0,
- fragmentOffset: nil,
- typ: header.ICMPv6PacketTooBig,
- code: header.ICMPv6UnusedCode,
- trunc: header.IPv6MinimumSize + header.ICMPv6PacketTooBigMinimumSize + header.IPv6MinimumSize + dataLen - 1,
- },
- {
- name: "Port unreachable",
- expectedCount: 1,
- fragmentOffset: nil,
- typ: header.ICMPv6DstUnreachable,
- code: header.ICMPv6PortUnreachable,
- transErr: portUnreachableTransErr,
- trunc: 0,
- },
- {
- name: "Truncated DstPortUnreachable (partial offending packet's IP header)",
- expectedCount: 0,
- fragmentOffset: nil,
- typ: header.ICMPv6DstUnreachable,
- code: header.ICMPv6PortUnreachable,
- trunc: header.IPv6MinimumSize + header.ICMPv6DstUnreachableMinimumSize + header.IPv6MinimumSize - 1,
- },
- {
- name: "DstPortUnreachable for Fragmented, zero offset",
- expectedCount: 1,
- fragmentOffset: newUint16(0),
- typ: header.ICMPv6DstUnreachable,
- code: header.ICMPv6PortUnreachable,
- transErr: portUnreachableTransErr,
- trunc: 0,
- },
- {
- name: "DstPortUnreachable for Non-zero fragment offset",
- expectedCount: 0,
- fragmentOffset: newUint16(100),
- typ: header.ICMPv6DstUnreachable,
- code: header.ICMPv6PortUnreachable,
- transErr: portUnreachableTransErr,
- trunc: 0,
- },
- {
- name: "Zero-length packet",
- expectedCount: 0,
- fragmentOffset: nil,
- typ: header.ICMPv6DstUnreachable,
- code: header.ICMPv6PortUnreachable,
- trunc: 2*header.IPv6MinimumSize + header.ICMPv6DstUnreachableMinimumSize + dataLen,
- },
- }
- for _, c := range cases {
- t.Run(c.name, func(t *testing.T) {
- s := buildDummyStack(t)
- proto := s.NetworkProtocolInstance(ipv6.ProtocolNumber)
- nic := testInterface{
- testObject: testObject{
- t: t,
- },
- }
- ep := proto.NewEndpoint(&nic, &nic.testObject)
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- dataOffset := header.IPv6MinimumSize*2 + header.ICMPv6MinimumSize
- if c.fragmentOffset != nil {
- dataOffset += header.IPv6FragmentHeaderSize
- }
- view := buffer.NewView(dataOffset + dataLen)
-
- // Create the outer IPv6 header.
- ip := header.IPv6(view)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(len(view) - header.IPv6MinimumSize - c.trunc),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 20,
- SrcAddr: outerSrcAddr,
- DstAddr: localIPv6Addr,
- })
-
- // Create the ICMP header.
- icmp := header.ICMPv6(view[header.IPv6MinimumSize:])
- icmp.SetType(c.typ)
- icmp.SetCode(c.code)
- icmp.SetIdent(0xdead)
- icmp.SetSequence(0xbeef)
-
- var extHdrs header.IPv6ExtHdrSerializer
- // Build the fragmentation header if needed.
- if c.fragmentOffset != nil {
- extHdrs = append(extHdrs, &header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: *c.fragmentOffset,
- M: true,
- Identification: 0x12345678,
- })
- }
-
- // Create the inner IPv6 header.
- ip = header.IPv6(view[header.IPv6MinimumSize+header.ICMPv6PayloadOffset:])
- ip.Encode(&header.IPv6Fields{
- PayloadLength: 100,
- TransportProtocol: 10,
- HopLimit: 20,
- SrcAddr: localIPv6Addr,
- DstAddr: remoteIPv6Addr,
- ExtensionHeaders: extHdrs,
- })
-
- // Make payload be non-zero.
- for i := dataOffset; i < len(view); i++ {
- view[i] = uint8(i)
- }
-
- // Give packet to IPv6 endpoint, dispatcher will validate that
- // it's ok.
- nic.testObject.protocol = 10
- nic.testObject.srcAddr = remoteIPv6Addr
- nic.testObject.dstAddr = localIPv6Addr
- nic.testObject.contents = view[dataOffset:]
- nic.testObject.transErr = c.transErr
-
- // Set ICMPv6 checksum.
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: outerSrcAddr,
- Dst: localIPv6Addr,
- }))
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatal("expected IPv6 network endpoint to implement stack.AddressableEndpoint")
- }
- addr := localIPv6Addr.WithPrefix()
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(addr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", addr, err)
- } else {
- ep.DecRef()
- }
- pkt := truncatedPacket(view, c.trunc, header.IPv6MinimumSize)
- ep.HandlePacket(pkt)
- if want := c.expectedCount; nic.testObject.controlCalls != want {
- t.Fatalf("Bad number of control calls for %q case: got %v, want %v", c.name, nic.testObject.controlCalls, want)
- }
- })
- }
-}
-
-// truncatedPacket returns a PacketBuffer based on a truncated view. If view,
-// after truncation, is large enough to hold a network header, it makes part of
-// view the packet's NetworkHeader and the rest its Data. Otherwise all of view
-// becomes Data.
-func truncatedPacket(view buffer.View, trunc, netHdrLen int) *stack.PacketBuffer {
- v := view[:len(view)-trunc]
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: v.ToVectorisedView(),
- })
- return pkt
-}
-
-func TestWriteHeaderIncludedPacket(t *testing.T) {
- const (
- nicID = 1
- transportProto = 5
-
- dataLen = 4
- )
-
- dataBuf := [dataLen]byte{1, 2, 3, 4}
- data := dataBuf[:]
-
- ipv4Options := header.IPv4OptionsSerializer{
- &header.IPv4SerializableListEndOption{},
- &header.IPv4SerializableNOPOption{},
- &header.IPv4SerializableListEndOption{},
- &header.IPv4SerializableNOPOption{},
- }
-
- expectOptions := header.IPv4Options{
- byte(header.IPv4OptionListEndType),
- byte(header.IPv4OptionNOPType),
- byte(header.IPv4OptionListEndType),
- byte(header.IPv4OptionNOPType),
- }
-
- ipv6FragmentExtHdrBuf := [header.IPv6FragmentExtHdrLength]byte{transportProto, 0, 62, 4, 1, 2, 3, 4}
- ipv6FragmentExtHdr := ipv6FragmentExtHdrBuf[:]
-
- var ipv6PayloadWithExtHdrBuf [dataLen + header.IPv6FragmentExtHdrLength]byte
- ipv6PayloadWithExtHdr := ipv6PayloadWithExtHdrBuf[:]
- if n := copy(ipv6PayloadWithExtHdr, ipv6FragmentExtHdr); n != len(ipv6FragmentExtHdr) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(ipv6FragmentExtHdr))
- }
- if n := copy(ipv6PayloadWithExtHdr[header.IPv6FragmentExtHdrLength:], data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
-
- tests := []struct {
- name string
- protoFactory stack.NetworkProtocolFactory
- protoNum tcpip.NetworkProtocolNumber
- nicAddr tcpip.Address
- remoteAddr tcpip.Address
- pktGen func(*testing.T, tcpip.Address) buffer.VectorisedView
- checker func(*testing.T, *stack.PacketBuffer, tcpip.Address)
- expectedErr tcpip.Error
- }{
- {
- name: "IPv4",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- totalLen := header.IPv4MinimumSize + len(data)
- hdr := buffer.NewPrependable(totalLen)
- if n := copy(hdr.Prepend(len(data)), data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- })
- return hdr.View().ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv4Any {
- src = localIPv4Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- if len(netHdr.View()) != header.IPv4MinimumSize {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), header.IPv4MinimumSize)
- }
-
- checker.IPv4(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv4Addr),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.IPFullLength(uint16(header.IPv4MinimumSize+len(data))),
- checker.IPPayload(data),
- )
- },
- },
- {
- name: "IPv4 with IHL too small",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- totalLen := header.IPv4MinimumSize + len(data)
- hdr := buffer.NewPrependable(totalLen)
- if n := copy(hdr.Prepend(len(data)), data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- })
- ip.SetHeaderLength(header.IPv4MinimumSize - 1)
- return hdr.View().ToVectorisedView()
- },
- expectedErr: &tcpip.ErrMalformedHeader{},
- },
- {
- name: "IPv4 too small",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ip := header.IPv4(make([]byte, header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- })
- return buffer.View(ip[:len(ip)-1]).ToVectorisedView()
- },
- expectedErr: &tcpip.ErrMalformedHeader{},
- },
- {
- name: "IPv4 minimum size",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ip := header.IPv4(make([]byte, header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- })
- return buffer.View(ip).ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv4Any {
- src = localIPv4Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- if len(netHdr.View()) != header.IPv4MinimumSize {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), header.IPv4MinimumSize)
- }
-
- checker.IPv4(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv4Addr),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.IPFullLength(header.IPv4MinimumSize),
- checker.IPPayload(nil),
- )
- },
- },
- {
- name: "IPv4 with options",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ipHdrLen := int(header.IPv4MinimumSize + ipv4Options.Length())
- totalLen := ipHdrLen + len(data)
- hdr := buffer.NewPrependable(totalLen)
- if n := copy(hdr.Prepend(len(data)), data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
- ip := header.IPv4(hdr.Prepend(ipHdrLen))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- Options: ipv4Options,
- })
- return hdr.View().ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv4Any {
- src = localIPv4Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- hdrLen := int(header.IPv4MinimumSize + ipv4Options.Length())
- if len(netHdr.View()) != hdrLen {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), hdrLen)
- }
-
- checker.IPv4(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv4Addr),
- checker.IPv4HeaderLength(hdrLen),
- checker.IPFullLength(uint16(hdrLen+len(data))),
- checker.IPv4Options(expectOptions),
- checker.IPPayload(data),
- )
- },
- },
- {
- name: "IPv4 with options and data across views",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- nicAddr: localIPv4Addr,
- remoteAddr: remoteIPv4Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ip := header.IPv4(make([]byte, header.IPv4MinimumSize+ipv4Options.Length()))
- ip.Encode(&header.IPv4Fields{
- Protocol: transportProto,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- Options: ipv4Options,
- })
- vv := buffer.View(ip).ToVectorisedView()
- vv.AppendView(data)
- return vv
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv4Any {
- src = localIPv4Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- hdrLen := int(header.IPv4MinimumSize + ipv4Options.Length())
- if len(netHdr.View()) != hdrLen {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), hdrLen)
- }
-
- checker.IPv4(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv4Addr),
- checker.IPv4HeaderLength(hdrLen),
- checker.IPFullLength(uint16(hdrLen+len(data))),
- checker.IPv4Options(expectOptions),
- checker.IPPayload(data),
- )
- },
- },
- {
- name: "IPv6",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- nicAddr: localIPv6Addr,
- remoteAddr: remoteIPv6Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- totalLen := header.IPv6MinimumSize + len(data)
- hdr := buffer.NewPrependable(totalLen)
- if n := copy(hdr.Prepend(len(data)), data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- TransportProtocol: transportProto,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv6Addr,
- })
- return hdr.View().ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv6Any {
- src = localIPv6Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- if len(netHdr.View()) != header.IPv6MinimumSize {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), header.IPv6MinimumSize)
- }
-
- checker.IPv6(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv6Addr),
- checker.IPFullLength(uint16(header.IPv6MinimumSize+len(data))),
- checker.IPPayload(data),
- )
- },
- },
- {
- name: "IPv6 with extension header",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- nicAddr: localIPv6Addr,
- remoteAddr: remoteIPv6Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- totalLen := header.IPv6MinimumSize + len(ipv6FragmentExtHdr) + len(data)
- hdr := buffer.NewPrependable(totalLen)
- if n := copy(hdr.Prepend(len(data)), data); n != len(data) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(data))
- }
- if n := copy(hdr.Prepend(len(ipv6FragmentExtHdr)), ipv6FragmentExtHdr); n != len(ipv6FragmentExtHdr) {
- t.Fatalf("copied %d bytes, expected %d bytes", n, len(ipv6FragmentExtHdr))
- }
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- // NB: we're lying about transport protocol here to verify the raw
- // fragment header bytes.
- TransportProtocol: tcpip.TransportProtocolNumber(header.IPv6FragmentExtHdrIdentifier),
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv6Addr,
- })
- return hdr.View().ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv6Any {
- src = localIPv6Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- if want := header.IPv6MinimumSize + len(ipv6FragmentExtHdr); len(netHdr.View()) != want {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), want)
- }
-
- checker.IPv6(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv6Addr),
- checker.IPFullLength(uint16(header.IPv6MinimumSize+len(ipv6PayloadWithExtHdr))),
- checker.IPPayload(ipv6PayloadWithExtHdr),
- )
- },
- },
- {
- name: "IPv6 minimum size",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- nicAddr: localIPv6Addr,
- remoteAddr: remoteIPv6Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ip := header.IPv6(make([]byte, header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- TransportProtocol: transportProto,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv6Addr,
- })
- return buffer.View(ip).ToVectorisedView()
- },
- checker: func(t *testing.T, pkt *stack.PacketBuffer, src tcpip.Address) {
- if src == header.IPv6Any {
- src = localIPv6Addr
- }
-
- netHdr := pkt.NetworkHeader()
-
- if len(netHdr.View()) != header.IPv6MinimumSize {
- t.Errorf("got len(netHdr.View()) = %d, want = %d", len(netHdr.View()), header.IPv6MinimumSize)
- }
-
- checker.IPv6(t, stack.PayloadSince(netHdr),
- checker.SrcAddr(src),
- checker.DstAddr(remoteIPv6Addr),
- checker.IPFullLength(header.IPv6MinimumSize),
- checker.IPPayload(nil),
- )
- },
- },
- {
- name: "IPv6 too small",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- nicAddr: localIPv6Addr,
- remoteAddr: remoteIPv6Addr,
- pktGen: func(t *testing.T, src tcpip.Address) buffer.VectorisedView {
- ip := header.IPv6(make([]byte, header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- TransportProtocol: transportProto,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: remoteIPv4Addr,
- })
- return buffer.View(ip[:len(ip)-1]).ToVectorisedView()
- },
- expectedErr: &tcpip.ErrMalformedHeader{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- subTests := []struct {
- name string
- srcAddr tcpip.Address
- }{
- {
- name: "unspecified source",
- srcAddr: tcpip.Address(strings.Repeat("\x00", len(test.nicAddr))),
- },
- {
- name: "random source",
- srcAddr: tcpip.Address(strings.Repeat("\xab", len(test.nicAddr))),
- },
- }
-
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{test.protoFactory},
- })
- e := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddAddress(nicID, test.protoNum, test.nicAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, test.protoNum, test.nicAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{{Destination: test.remoteAddr.WithPrefix().Subnet(), NIC: nicID}})
-
- r, err := s.FindRoute(nicID, test.nicAddr, test.remoteAddr, test.protoNum, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("s.FindRoute(%d, %s, %s, %d, false): %s", nicID, test.remoteAddr, test.nicAddr, test.protoNum, err)
- }
- defer r.Release()
-
- {
- err := r.WriteHeaderIncludedPacket(stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: test.pktGen(t, subTest.srcAddr),
- }))
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Fatalf("unexpected error from r.WriteHeaderIncludedPacket(_), (-want, +got):\n%s", diff)
- }
- }
-
- if test.expectedErr != nil {
- return
- }
-
- pkt, ok := e.Read()
- if !ok {
- t.Fatal("expected a packet to be written")
- }
- test.checker(t, pkt.Pkt, subTest.srcAddr)
- })
- }
- })
- }
-}
-
-// Test that the included data in an ICMP error packet conforms to the
-// requirements of RFC 972, RFC 4443 section 2.4 and RFC 1812 Section 4.3.2.3
-func TestICMPInclusionSize(t *testing.T) {
- const (
- replyHeaderLength4 = header.IPv4MinimumSize + header.IPv4MinimumSize + header.ICMPv4MinimumSize
- replyHeaderLength6 = header.IPv6MinimumSize + header.IPv6MinimumSize + header.ICMPv6MinimumSize
- targetSize4 = header.IPv4MinimumProcessableDatagramSize
- targetSize6 = header.IPv6MinimumMTU
- // A protocol number that will cause an error response.
- reservedProtocol = 254
- )
-
- // IPv4 function to create a IP packet and send it to the stack.
- // The packet should generate an error response. We can do that by using an
- // unknown transport protocol (254).
- rxIPv4Bad := func(e *channel.Endpoint, src tcpip.Address, payload []byte) buffer.View {
- totalLen := header.IPv4MinimumSize + len(payload)
- hdr := buffer.NewPrependable(header.IPv4MinimumSize)
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- Protocol: reservedProtocol,
- TTL: ipv4.DefaultTTL,
- SrcAddr: src,
- DstAddr: localIPv4Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(buffer.View(payload))
- // Take a copy before InjectInbound takes ownership of vv
- // as vv may be changed during the call.
- v := vv.ToView()
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- return v
- }
-
- // IPv6 function to create a packet and send it to the stack.
- // The packet should be errant in a way that causes the stack to send an
- // ICMP error response and have enough data to allow the testing of the
- // inclusion of the errant packet. Use `unknown next header' to generate
- // the error.
- rxIPv6Bad := func(e *channel.Endpoint, src tcpip.Address, payload []byte) buffer.View {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize)
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(len(payload)),
- TransportProtocol: reservedProtocol,
- HopLimit: ipv6.DefaultTTL,
- SrcAddr: src,
- DstAddr: localIPv6Addr,
- })
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(buffer.View(payload))
- // Take a copy before InjectInbound takes ownership of vv
- // as vv may be changed during the call.
- v := vv.ToView()
-
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- return v
- }
-
- v4Checker := func(t *testing.T, pkt *stack.PacketBuffer, payload buffer.View) {
- // We already know the entire packet is the right size so we can use its
- // length to calculate the right payload size to check.
- expectedPayloadLength := pkt.Size() - header.IPv4MinimumSize - header.ICMPv4MinimumSize
- checker.IPv4(t, stack.PayloadSince(pkt.NetworkHeader()),
- checker.SrcAddr(localIPv4Addr),
- checker.DstAddr(remoteIPv4Addr),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.IPFullLength(uint16(header.IPv4MinimumSize+header.ICMPv4MinimumSize+expectedPayloadLength)),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- checker.ICMPv4Type(header.ICMPv4DstUnreachable),
- checker.ICMPv4Code(header.ICMPv4ProtoUnreachable),
- checker.ICMPv4Payload(payload[:expectedPayloadLength]),
- ),
- )
- }
-
- v6Checker := func(t *testing.T, pkt *stack.PacketBuffer, payload buffer.View) {
- // We already know the entire packet is the right size so we can use its
- // length to calculate the right payload size to check.
- expectedPayloadLength := pkt.Size() - header.IPv6MinimumSize - header.ICMPv6MinimumSize
- checker.IPv6(t, stack.PayloadSince(pkt.NetworkHeader()),
- checker.SrcAddr(localIPv6Addr),
- checker.DstAddr(remoteIPv6Addr),
- checker.IPFullLength(uint16(header.IPv6MinimumSize+header.ICMPv6MinimumSize+expectedPayloadLength)),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6ParamProblem),
- checker.ICMPv6Code(header.ICMPv6UnknownHeader),
- checker.ICMPv6Payload(payload[:expectedPayloadLength]),
- ),
- )
- }
- tests := []struct {
- name string
- srcAddress tcpip.Address
- injector func(*channel.Endpoint, tcpip.Address, []byte) buffer.View
- checker func(*testing.T, *stack.PacketBuffer, buffer.View)
- payloadLength int // Not including IP header.
- linkMTU uint32 // Largest IP packet that the link can send as payload.
- replyLength int // Total size of IP/ICMP packet expected back.
- }{
- {
- name: "IPv4 exact match",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: targetSize4 - replyHeaderLength4,
- linkMTU: targetSize4,
- replyLength: targetSize4,
- },
- {
- name: "IPv4 larger MTU",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: targetSize4,
- linkMTU: targetSize4 + 1000,
- replyLength: targetSize4,
- },
- {
- name: "IPv4 smaller MTU",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: targetSize4,
- linkMTU: targetSize4 - 50,
- replyLength: targetSize4 - 50,
- },
- {
- name: "IPv4 payload exceeds",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: targetSize4 + 10,
- linkMTU: targetSize4,
- replyLength: targetSize4,
- },
- {
- name: "IPv4 1 byte less",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: targetSize4 - replyHeaderLength4 - 1,
- linkMTU: targetSize4,
- replyLength: targetSize4 - 1,
- },
- {
- name: "IPv4 No payload",
- srcAddress: remoteIPv4Addr,
- injector: rxIPv4Bad,
- checker: v4Checker,
- payloadLength: 0,
- linkMTU: targetSize4,
- replyLength: replyHeaderLength4,
- },
- {
- name: "IPv6 exact match",
- srcAddress: remoteIPv6Addr,
- injector: rxIPv6Bad,
- checker: v6Checker,
- payloadLength: targetSize6 - replyHeaderLength6,
- linkMTU: targetSize6,
- replyLength: targetSize6,
- },
- {
- name: "IPv6 larger MTU",
- srcAddress: remoteIPv6Addr,
- injector: rxIPv6Bad,
- checker: v6Checker,
- payloadLength: targetSize6,
- linkMTU: targetSize6 + 400,
- replyLength: targetSize6,
- },
- // NB. No "smaller MTU" test here as less than 1280 is not permitted
- // in IPv6.
- {
- name: "IPv6 payload exceeds",
- srcAddress: remoteIPv6Addr,
- injector: rxIPv6Bad,
- checker: v6Checker,
- payloadLength: targetSize6,
- linkMTU: targetSize6,
- replyLength: targetSize6,
- },
- {
- name: "IPv6 1 byte less",
- srcAddress: remoteIPv6Addr,
- injector: rxIPv6Bad,
- checker: v6Checker,
- payloadLength: targetSize6 - replyHeaderLength6 - 1,
- linkMTU: targetSize6,
- replyLength: targetSize6 - 1,
- },
- {
- name: "IPv6 no payload",
- srcAddress: remoteIPv6Addr,
- injector: rxIPv6Bad,
- checker: v6Checker,
- payloadLength: 0,
- linkMTU: targetSize6,
- replyLength: replyHeaderLength6,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, e := buildDummyStackWithLinkEndpoint(t, test.linkMTU)
- // Allocate and initialize the payload view.
- payload := buffer.NewView(test.payloadLength)
- for i := 0; i < len(payload); i++ {
- payload[i] = uint8(i)
- }
- // Default routes for IPv4&6 so ICMP can find a route to the remote
- // node when attempting to send the ICMP error Reply.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
- v := test.injector(e, test.srcAddress, payload)
- pkt, ok := e.Read()
- if !ok {
- t.Fatal("expected a packet to be written")
- }
- if got, want := pkt.Pkt.Size(), test.replyLength; got != want {
- t.Fatalf("got %d bytes of icmp error packet, want %d", got, want)
- }
- test.checker(t, pkt.Pkt, v)
- })
- }
-}
-
-func TestJoinLeaveAllRoutersGroup(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- protoFactory stack.NetworkProtocolFactory
- allRoutersAddr tcpip.Address
- }{
- {
- name: "IPv4",
- netProto: ipv4.ProtocolNumber,
- protoFactory: ipv4.NewProtocol,
- allRoutersAddr: header.IPv4AllRoutersGroup,
- },
- {
- name: "IPv6 Interface Local",
- netProto: ipv6.ProtocolNumber,
- protoFactory: ipv6.NewProtocol,
- allRoutersAddr: header.IPv6AllRoutersInterfaceLocalMulticastAddress,
- },
- {
- name: "IPv6 Link Local",
- netProto: ipv6.ProtocolNumber,
- protoFactory: ipv6.NewProtocol,
- allRoutersAddr: header.IPv6AllRoutersLinkLocalMulticastAddress,
- },
- {
- name: "IPv6 Site Local",
- netProto: ipv6.ProtocolNumber,
- protoFactory: ipv6.NewProtocol,
- allRoutersAddr: header.IPv6AllRoutersSiteLocalMulticastAddress,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, nicDisabled := range [...]bool{true, false} {
- t.Run(fmt.Sprintf("NIC Disabled = %t", nicDisabled), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},
- })
- opts := stack.NICOptions{Disabled: nicDisabled}
- if err := s.CreateNICWithOptions(nicID, channel.New(0, 0, ""), opts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, opts, err)
- }
-
- if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {
- t.Fatalf("s.IsInGroup(%d, %s): %s", nicID, test.allRoutersAddr, err)
- } else if got {
- t.Fatalf("got s.IsInGroup(%d, %s) = true, want = false", nicID, test.allRoutersAddr)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(test.netProto, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", test.netProto, err)
- }
- if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {
- t.Fatalf("s.IsInGroup(%d, %s): %s", nicID, test.allRoutersAddr, err)
- } else if !got {
- t.Fatalf("got s.IsInGroup(%d, %s) = false, want = true", nicID, test.allRoutersAddr)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(test.netProto, false); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, false): %s", test.netProto, err)
- }
- if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {
- t.Fatalf("s.IsInGroup(%d, %s): %s", nicID, test.allRoutersAddr, err)
- } else if got {
- t.Fatalf("got s.IsInGroup(%d, %s) = true, want = false", nicID, test.allRoutersAddr)
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/ipv4/BUILD b/pkg/tcpip/network/ipv4/BUILD
deleted file mode 100644
index c90974693..000000000
--- a/pkg/tcpip/network/ipv4/BUILD
+++ /dev/null
@@ -1,68 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ipv4",
- srcs = [
- "icmp.go",
- "igmp.go",
- "ipv4.go",
- "stats.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/network/hash",
- "//pkg/tcpip/network/internal/fragmentation",
- "//pkg/tcpip/network/internal/ip",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "ipv4_test",
- size = "small",
- srcs = [
- "igmp_test.go",
- "ipv4_test.go",
- ],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/internal/testutil",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/raw",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "stats_test",
- size = "small",
- srcs = ["stats_test.go"],
- library = ":ipv4",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- ],
-)
diff --git a/pkg/tcpip/network/ipv4/igmp_test.go b/pkg/tcpip/network/ipv4/igmp_test.go
deleted file mode 100644
index 4bd6f462e..000000000
--- a/pkg/tcpip/network/ipv4/igmp_test.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv4_test
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const (
- linkAddr = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
- nicID = 1
- defaultTTL = 1
- defaultPrefixLength = 24
-)
-
-var (
- stackAddr = testutil.MustParse4("10.0.0.1")
- remoteAddr = testutil.MustParse4("10.0.0.2")
- multicastAddr = testutil.MustParse4("224.0.0.3")
-)
-
-// validateIgmpPacket checks that a passed PacketInfo is an IPv4 IGMP packet
-// sent to the provided address with the passed fields set. Raises a t.Error if
-// any field does not match.
-func validateIgmpPacket(t *testing.T, p channel.PacketInfo, igmpType header.IGMPType, maxRespTime byte, srcAddr, dstAddr, groupAddress tcpip.Address) {
- t.Helper()
-
- payload := header.IPv4(stack.PayloadSince(p.Pkt.NetworkHeader()))
- checker.IPv4(t, payload,
- checker.SrcAddr(srcAddr),
- checker.DstAddr(dstAddr),
- // TTL for an IGMP message must be 1 as per RFC 2236 section 2.
- checker.TTL(1),
- checker.IPv4RouterAlert(),
- checker.IGMP(
- checker.IGMPType(igmpType),
- checker.IGMPMaxRespTime(header.DecisecondToDuration(maxRespTime)),
- checker.IGMPGroupAddress(groupAddress),
- ),
- )
-}
-
-func createStack(t *testing.T, igmpEnabled bool) (*channel.Endpoint, *stack.Stack, *faketime.ManualClock) {
- t.Helper()
-
- // Create an endpoint of queue size 1, since no more than 1 packets are ever
- // queued in the tests in this file.
- e := channel.New(1, 1280, linkAddr)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocolWithOptions(ipv4.Options{
- IGMP: ipv4.IGMPOptions{
- Enabled: igmpEnabled,
- },
- })},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- return e, s, clock
-}
-
-func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType header.IGMPType, maxRespTime byte, ttl uint8, srcAddr, dstAddr, groupAddress tcpip.Address, hasRouterAlertOption bool) {
- var options header.IPv4OptionsSerializer
- if hasRouterAlertOption {
- options = header.IPv4OptionsSerializer{
- &header.IPv4SerializableRouterAlertOption{},
- }
- }
- buf := buffer.NewView(header.IPv4MinimumSize + int(options.Length()) + header.IGMPQueryMinimumSize)
-
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(len(buf)),
- TTL: ttl,
- Protocol: uint8(header.IGMPProtocolNumber),
- SrcAddr: srcAddr,
- DstAddr: dstAddr,
- Options: options,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- igmp := header.IGMP(ip.Payload())
- igmp.SetType(igmpType)
- igmp.SetMaxRespTime(maxRespTime)
- igmp.SetGroupAddress(groupAddress)
- igmp.SetChecksum(header.IGMPCalculateChecksum(igmp))
-
- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-}
-
-// TestIGMPV1Present tests the node's ability to fallback to V1 when a V1
-// router is detected. V1 present status is expected to be reset when the NIC
-// cycles.
-func TestIGMPV1Present(t *testing.T) {
- e, s, clock := createStack(t, true)
- addr := tcpip.AddressWithPrefix{Address: stackAddr, PrefixLen: defaultPrefixLength}
- if err := s.AddAddressWithPrefix(nicID, ipv4.ProtocolNumber, addr); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s): %s", nicID, ipv4.ProtocolNumber, addr, err)
- }
-
- if err := s.JoinGroup(ipv4.ProtocolNumber, nicID, multicastAddr); err != nil {
- t.Fatalf("JoinGroup(ipv4, nic, %s) = %s", multicastAddr, err)
- }
-
- // This NIC will send an IGMPv2 report immediately, before this test can get
- // the IGMPv1 General Membership Query in.
- {
- p, ok := e.Read()
- if !ok {
- t.Fatal("unable to Read IGMP packet, expected V2MembershipReport")
- }
- if got := s.Stats().IGMP.PacketsSent.V2MembershipReport.Value(); got != 1 {
- t.Fatalf("got V2MembershipReport messages sent = %d, want = 1", got)
- }
- validateIgmpPacket(t, p, header.IGMPv2MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Inject an IGMPv1 General Membership Query which is identical to a standard
- // membership query except the Max Response Time is set to 0, which will tell
- // the stack that this is a router using IGMPv1. Send it to the all systems
- // group which is the only group this host belongs to.
- createAndInjectIGMPPacket(e, header.IGMPMembershipQuery, 0, defaultTTL, remoteAddr, stackAddr, header.IPv4AllSystems, true /* hasRouterAlertOption */)
- if got := s.Stats().IGMP.PacketsReceived.MembershipQuery.Value(); got != 1 {
- t.Fatalf("got Membership Queries received = %d, want = 1", got)
- }
-
- // Before advancing the clock, verify that this host has not sent a
- // V1MembershipReport yet.
- if got := s.Stats().IGMP.PacketsSent.V1MembershipReport.Value(); got != 0 {
- t.Fatalf("got V1MembershipReport messages sent = %d, want = 0", got)
- }
-
- // Verify the solicited Membership Report is sent. Now that this NIC has seen
- // an IGMPv1 query, it should send an IGMPv1 Membership Report.
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet, expected V1MembershipReport only after advancing the clock = %+v", p.Pkt)
- }
- clock.Advance(ipv4.UnsolicitedReportIntervalMax)
- {
- p, ok := e.Read()
- if !ok {
- t.Fatal("unable to Read IGMP packet, expected V1MembershipReport")
- }
- if got := s.Stats().IGMP.PacketsSent.V1MembershipReport.Value(); got != 1 {
- t.Fatalf("got V1MembershipReport messages sent = %d, want = 1", got)
- }
- validateIgmpPacket(t, p, header.IGMPv1MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)
- }
-
- // Cycling the interface should reset the V1 present flag.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- {
- p, ok := e.Read()
- if !ok {
- t.Fatal("unable to Read IGMP packet, expected V2MembershipReport")
- }
- if got := s.Stats().IGMP.PacketsSent.V2MembershipReport.Value(); got != 2 {
- t.Fatalf("got V2MembershipReport messages sent = %d, want = 2", got)
- }
- validateIgmpPacket(t, p, header.IGMPv2MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)
- }
-}
-
-func TestSendQueuedIGMPReports(t *testing.T) {
- e, s, clock := createStack(t, true)
-
- // Joining a group without an assigned address should queue IGMP packets; none
- // should be sent without an assigned address.
- if err := s.JoinGroup(ipv4.ProtocolNumber, nicID, multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", ipv4.ProtocolNumber, nicID, multicastAddr, err)
- }
- reportStat := s.Stats().IGMP.PacketsSent.V2MembershipReport
- if got := reportStat.Value(); got != 0 {
- t.Errorf("got reportStat.Value() = %d, want = 0", got)
- }
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("got unexpected packet = %#v", p)
- }
-
- // The initial set of IGMP reports that were queued should be sent once an
- // address is assigned.
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, stackAddr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID, ipv4.ProtocolNumber, stackAddr, err)
- }
- if got := reportStat.Value(); got != 1 {
- t.Errorf("got reportStat.Value() = %d, want = 1", got)
- }
- if p, ok := e.Read(); !ok {
- t.Error("expected to send an IGMP membership report")
- } else {
- validateIgmpPacket(t, p, header.IGMPv2MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)
- }
- if t.Failed() {
- t.FailNow()
- }
- clock.Advance(ipv4.UnsolicitedReportIntervalMax)
- if got := reportStat.Value(); got != 2 {
- t.Errorf("got reportStat.Value() = %d, want = 2", got)
- }
- if p, ok := e.Read(); !ok {
- t.Error("expected to send an IGMP membership report")
- } else {
- validateIgmpPacket(t, p, header.IGMPv2MembershipReport, 0, stackAddr, multicastAddr, multicastAddr)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Should have no more packets to send after the initial set of unsolicited
- // reports.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("got unexpected packet = %#v", p)
- }
-}
-
-func TestIGMPPacketValidation(t *testing.T) {
- tests := []struct {
- name string
- messageType header.IGMPType
- stackAddresses []tcpip.AddressWithPrefix
- srcAddr tcpip.Address
- includeRouterAlertOption bool
- ttl uint8
- expectValidIGMP bool
- getMessageTypeStatValue func(tcpip.Stats) uint64
- }{
- {
- name: "valid",
- messageType: header.IGMPLeaveGroup,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: remoteAddr,
- ttl: 1,
- expectValidIGMP: true,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.LeaveGroup.Value() },
- },
- {
- name: "bad ttl",
- messageType: header.IGMPv1MembershipReport,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: remoteAddr,
- ttl: 2,
- expectValidIGMP: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.V1MembershipReport.Value() },
- },
- {
- name: "missing router alert ip option",
- messageType: header.IGMPv2MembershipReport,
- includeRouterAlertOption: false,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: remoteAddr,
- ttl: 1,
- expectValidIGMP: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.V2MembershipReport.Value() },
- },
- {
- name: "igmp leave group and src ip does not belong to nic subnet",
- messageType: header.IGMPLeaveGroup,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: testutil.MustParse4("10.0.1.2"),
- ttl: 1,
- expectValidIGMP: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.LeaveGroup.Value() },
- },
- {
- name: "igmp query and src ip does not belong to nic subnet",
- messageType: header.IGMPMembershipQuery,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: testutil.MustParse4("10.0.1.2"),
- ttl: 1,
- expectValidIGMP: true,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.MembershipQuery.Value() },
- },
- {
- name: "igmp report v1 and src ip does not belong to nic subnet",
- messageType: header.IGMPv1MembershipReport,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: testutil.MustParse4("10.0.1.2"),
- ttl: 1,
- expectValidIGMP: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.V1MembershipReport.Value() },
- },
- {
- name: "igmp report v2 and src ip does not belong to nic subnet",
- messageType: header.IGMPv2MembershipReport,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{{Address: stackAddr, PrefixLen: 24}},
- srcAddr: testutil.MustParse4("10.0.1.2"),
- ttl: 1,
- expectValidIGMP: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.V2MembershipReport.Value() },
- },
- {
- name: "src ip belongs to the subnet of the nic's second address",
- messageType: header.IGMPv2MembershipReport,
- includeRouterAlertOption: true,
- stackAddresses: []tcpip.AddressWithPrefix{
- {Address: testutil.MustParse4("10.0.15.1"), PrefixLen: 24},
- {Address: stackAddr, PrefixLen: 24},
- },
- srcAddr: remoteAddr,
- ttl: 1,
- expectValidIGMP: true,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.IGMP.PacketsReceived.V2MembershipReport.Value() },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, _ := createStack(t, true)
- for _, address := range test.stackAddresses {
- if err := s.AddAddressWithPrefix(nicID, ipv4.ProtocolNumber, address); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s): %s", nicID, ipv4.ProtocolNumber, address, err)
- }
- }
- stats := s.Stats()
- // Verify that every relevant stats is zero'd before we send a packet.
- if got := test.getMessageTypeStatValue(s.Stats()); got != 0 {
- t.Errorf("got test.getMessageTypeStatValue(s.Stats()) = %d, want = 0", got)
- }
- if got := stats.IGMP.PacketsReceived.Invalid.Value(); got != 0 {
- t.Errorf("got stats.IGMP.PacketsReceived.Invalid.Value() = %d, want = 0", got)
- }
- if got := stats.IP.PacketsDelivered.Value(); got != 0 {
- t.Fatalf("got stats.IP.PacketsDelivered.Value() = %d, want = 0", got)
- }
- createAndInjectIGMPPacket(e, test.messageType, 0, test.ttl, test.srcAddr, header.IPv4AllSystems, header.IPv4AllSystems, test.includeRouterAlertOption)
- // We always expect the packet to pass IP validation.
- if got := stats.IP.PacketsDelivered.Value(); got != 1 {
- t.Fatalf("got stats.IP.PacketsDelivered.Value() = %d, want = 1", got)
- }
- // Even when the IGMP-specific validation checks fail, we expect the
- // corresponding IGMP counter to be incremented.
- if got := test.getMessageTypeStatValue(s.Stats()); got != 1 {
- t.Errorf("got test.getMessageTypeStatValue(s.Stats()) = %d, want = 1", got)
- }
- var expectedInvalidCount uint64
- if !test.expectValidIGMP {
- expectedInvalidCount = 1
- }
- if got := stats.IGMP.PacketsReceived.Invalid.Value(); got != expectedInvalidCount {
- t.Errorf("got stats.IGMP.PacketsReceived.Invalid.Value() = %d, want = %d", got, expectedInvalidCount)
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/ipv4/ipv4_state_autogen.go b/pkg/tcpip/network/ipv4/ipv4_state_autogen.go
new file mode 100644
index 000000000..19b672251
--- /dev/null
+++ b/pkg/tcpip/network/ipv4/ipv4_state_autogen.go
@@ -0,0 +1,113 @@
+// automatically generated by stateify.
+
+package ipv4
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *icmpv4DestinationUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv4.icmpv4DestinationUnreachableSockError"
+}
+
+func (i *icmpv4DestinationUnreachableSockError) StateFields() []string {
+ return []string{}
+}
+
+func (i *icmpv4DestinationUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *icmpv4DestinationUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *icmpv4DestinationHostUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv4.icmpv4DestinationHostUnreachableSockError"
+}
+
+func (i *icmpv4DestinationHostUnreachableSockError) StateFields() []string {
+ return []string{
+ "icmpv4DestinationUnreachableSockError",
+ }
+}
+
+func (i *icmpv4DestinationHostUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationHostUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
+}
+
+func (i *icmpv4DestinationHostUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
+}
+
+func (i *icmpv4DestinationPortUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv4.icmpv4DestinationPortUnreachableSockError"
+}
+
+func (i *icmpv4DestinationPortUnreachableSockError) StateFields() []string {
+ return []string{
+ "icmpv4DestinationUnreachableSockError",
+ }
+}
+
+func (i *icmpv4DestinationPortUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationPortUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
+}
+
+func (i *icmpv4DestinationPortUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
+}
+
+func (e *icmpv4FragmentationNeededSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv4.icmpv4FragmentationNeededSockError"
+}
+
+func (e *icmpv4FragmentationNeededSockError) StateFields() []string {
+ return []string{
+ "icmpv4DestinationUnreachableSockError",
+ "mtu",
+ }
+}
+
+func (e *icmpv4FragmentationNeededSockError) beforeSave() {}
+
+// +checklocksignore
+func (e *icmpv4FragmentationNeededSockError) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.icmpv4DestinationUnreachableSockError)
+ stateSinkObject.Save(1, &e.mtu)
+}
+
+func (e *icmpv4FragmentationNeededSockError) afterLoad() {}
+
+// +checklocksignore
+func (e *icmpv4FragmentationNeededSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.icmpv4DestinationUnreachableSockError)
+ stateSourceObject.Load(1, &e.mtu)
+}
+
+func init() {
+ state.Register((*icmpv4DestinationUnreachableSockError)(nil))
+ state.Register((*icmpv4DestinationHostUnreachableSockError)(nil))
+ state.Register((*icmpv4DestinationPortUnreachableSockError)(nil))
+ state.Register((*icmpv4FragmentationNeededSockError)(nil))
+}
diff --git a/pkg/tcpip/network/ipv4/ipv4_test.go b/pkg/tcpip/network/ipv4/ipv4_test.go
deleted file mode 100644
index 4a4448cf9..000000000
--- a/pkg/tcpip/network/ipv4/ipv4_test.go
+++ /dev/null
@@ -1,3352 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv4_test
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io/ioutil"
- "math"
- "net"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- iptestutil "gvisor.dev/gvisor/pkg/tcpip/network/internal/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/raw"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- extraHeaderReserve = 50
- defaultMTU = 65536
-)
-
-func TestExcludeBroadcast(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
-
- ep := stack.LinkEndpoint(channel.New(256, defaultMTU, ""))
- if testing.Verbose() {
- ep = sniffer.New(ep)
- }
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
-
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv4EmptySubnet,
- NIC: 1,
- }})
-
- randomAddr := tcpip.FullAddress{NIC: 1, Addr: "\x0a\x00\x00\x01", Port: 53}
-
- var wq waiter.Queue
- t.Run("WithoutPrimaryAddress", func(t *testing.T) {
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatal(err)
- }
- defer ep.Close()
-
- // Cannot connect using a broadcast address as the source.
- {
- err := ep.Connect(randomAddr)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Errorf("got ep.Connect(...) = %v, want = %v", err, &tcpip.ErrNoRoute{})
- }
- }
-
- // However, we can bind to a broadcast address to listen.
- if err := ep.Bind(tcpip.FullAddress{Addr: header.IPv4Broadcast, Port: 53, NIC: 1}); err != nil {
- t.Errorf("Bind failed: %v", err)
- }
- })
-
- t.Run("WithPrimaryAddress", func(t *testing.T) {
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatal(err)
- }
- defer ep.Close()
-
- // Add a valid primary endpoint address, now we can connect.
- if err := s.AddAddress(1, ipv4.ProtocolNumber, "\x0a\x00\x00\x02"); err != nil {
- t.Fatalf("AddAddress failed: %v", err)
- }
- if err := ep.Connect(randomAddr); err != nil {
- t.Errorf("Connect failed: %v", err)
- }
- })
-}
-
-func TestForwarding(t *testing.T) {
- const (
- incomingNICID = 1
- outgoingNICID = 2
- randomSequence = 123
- randomIdent = 42
- randomTimeOffset = 0x10203040
- )
-
- incomingIPv4Addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10.0.0.1").To4()),
- PrefixLen: 8,
- }
- outgoingIPv4Addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("11.0.0.1").To4()),
- PrefixLen: 8,
- }
- outgoingLinkAddr := tcpip.LinkAddress("\x02\x03\x03\x04\x05\x06")
- remoteIPv4Addr1 := testutil.MustParse4("10.0.0.2")
- remoteIPv4Addr2 := testutil.MustParse4("11.0.0.2")
- unreachableIPv4Addr := testutil.MustParse4("12.0.0.2")
- multicastIPv4Addr := testutil.MustParse4("225.0.0.0")
- linkLocalIPv4Addr := testutil.MustParse4("169.254.0.0")
-
- tests := []struct {
- name string
- TTL uint8
- sourceAddr tcpip.Address
- destAddr tcpip.Address
- expectErrorICMP bool
- ipFlags uint8
- mtu uint32
- payloadLength int
- options header.IPv4Options
- forwardedOptions header.IPv4Options
- icmpType header.ICMPv4Type
- icmpCode header.ICMPv4Code
- expectPacketUnrouteableError bool
- expectLinkLocalSourceError bool
- expectLinkLocalDestError bool
- expectPacketForwarded bool
- expectedFragmentsForwarded []fragmentInfo
- }{
- {
- name: "TTL of zero",
- TTL: 0,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- expectErrorICMP: true,
- icmpType: header.ICMPv4TimeExceeded,
- icmpCode: header.ICMPv4TTLExceeded,
- mtu: ipv4.MaxTotalSize,
- },
- {
- name: "TTL of one",
- TTL: 1,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- expectPacketForwarded: true,
- mtu: ipv4.MaxTotalSize,
- },
- {
- name: "TTL of two",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- expectPacketForwarded: true,
- mtu: ipv4.MaxTotalSize,
- },
- {
- name: "Max TTL",
- TTL: math.MaxUint8,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- expectPacketForwarded: true,
- mtu: ipv4.MaxTotalSize,
- },
- {
- name: "four EOL options",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- expectPacketForwarded: true,
- mtu: ipv4.MaxTotalSize,
- options: header.IPv4Options{0, 0, 0, 0},
- forwardedOptions: header.IPv4Options{0, 0, 0, 0},
- },
- {
- name: "TS type 1 full",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- mtu: ipv4.MaxTotalSize,
- options: header.IPv4Options{
- 68, 12, 13, 0xF1,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- },
- expectErrorICMP: true,
- icmpType: header.ICMPv4ParamProblem,
- icmpCode: header.ICMPv4UnusedCode,
- },
- {
- name: "TS type 0",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- mtu: ipv4.MaxTotalSize,
- options: header.IPv4Options{
- 68, 24, 21, 0x00,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 0, 0, 0, 0,
- },
- forwardedOptions: header.IPv4Options{
- 68, 24, 25, 0x00,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 0x00, 0xad, 0x1c, 0x40, // time we expect from fakeclock
- },
- expectPacketForwarded: true,
- },
- {
- name: "end of options list",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- mtu: ipv4.MaxTotalSize,
- options: header.IPv4Options{
- 68, 12, 13, 0x11,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 0, 10, 3, 99, // EOL followed by junk
- 1, 2, 3, 4,
- },
- forwardedOptions: header.IPv4Options{
- 68, 12, 13, 0x21,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 0, // End of Options hides following bytes.
- 0, 0, 0, // 7 bytes unknown option removed.
- 0, 0, 0, 0,
- },
- expectPacketForwarded: true,
- },
- {
- name: "Network unreachable",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: unreachableIPv4Addr,
- expectErrorICMP: true,
- mtu: ipv4.MaxTotalSize,
- icmpType: header.ICMPv4DstUnreachable,
- icmpCode: header.ICMPv4NetUnreachable,
- expectPacketUnrouteableError: true,
- },
- {
- name: "Multicast destination",
- TTL: 2,
- destAddr: multicastIPv4Addr,
- expectPacketUnrouteableError: true,
- },
- {
- name: "Link local destination",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: linkLocalIPv4Addr,
- expectLinkLocalDestError: true,
- },
- {
- name: "Link local source",
- TTL: 2,
- sourceAddr: linkLocalIPv4Addr,
- destAddr: remoteIPv4Addr2,
- expectLinkLocalSourceError: true,
- },
- {
- name: "Fragmentation needed and DF set",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- ipFlags: header.IPv4FlagDontFragment,
- // We've picked this MTU because it is:
- //
- // 1) Greater than the minimum MTU that IPv4 hosts are required to process
- // (576 bytes). As per RFC 1812, Section 4.3.2.3:
- //
- // The ICMP datagram SHOULD contain as much of the original datagram as
- // possible without the length of the ICMP datagram exceeding 576 bytes.
- //
- // Therefore, setting an MTU greater than 576 bytes ensures that we can fit a
- // complete ICMP packet on the incoming endpoint (and make assertions about
- // it).
- //
- // 2) Less than `ipv4.MaxTotalSize`, which lets us build an IPv4 packet whose
- // size exceeds the MTU.
- mtu: 1000,
- payloadLength: 1004,
- expectErrorICMP: true,
- icmpType: header.ICMPv4DstUnreachable,
- icmpCode: header.ICMPv4FragmentationNeeded,
- },
- {
- name: "Fragmentation needed and DF not set",
- TTL: 2,
- sourceAddr: remoteIPv4Addr1,
- destAddr: remoteIPv4Addr2,
- mtu: 1000,
- payloadLength: 1004,
- expectPacketForwarded: true,
- // Combined, these fragments have length of 1012 octets, which is equal to
- // the length of the payload (1004 octets), plus the length of the ICMP
- // header (8 octets).
- expectedFragmentsForwarded: []fragmentInfo{
- // The first fragment has a length of the greatest multiple of 8 which is
- // less than or equal to to `mtu - header.IPv4MinimumSize`.
- {offset: 0, payloadSize: uint16(976), more: true},
- // The next fragment holds the rest of the packet.
- {offset: uint16(976), payloadSize: 36, more: false},
- },
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4},
- Clock: clock,
- })
-
- // Advance the clock by some unimportant amount to make
- // it give a more recognisable signature than 00,00,00,00.
- clock.Advance(time.Millisecond * randomTimeOffset)
-
- // We expect at most a single packet in response to our ICMP Echo Request.
- incomingEndpoint := channel.New(1, test.mtu, "")
- if err := s.CreateNIC(incomingNICID, incomingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", incomingNICID, err)
- }
- incomingIPv4ProtoAddr := tcpip.ProtocolAddress{Protocol: header.IPv4ProtocolNumber, AddressWithPrefix: incomingIPv4Addr}
- if err := s.AddProtocolAddress(incomingNICID, incomingIPv4ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", incomingNICID, incomingIPv4ProtoAddr, err)
- }
-
- expectedEmittedPacketCount := 1
- if len(test.expectedFragmentsForwarded) > expectedEmittedPacketCount {
- expectedEmittedPacketCount = len(test.expectedFragmentsForwarded)
- }
- outgoingEndpoint := channel.New(expectedEmittedPacketCount, test.mtu, outgoingLinkAddr)
- if err := s.CreateNIC(outgoingNICID, outgoingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", outgoingNICID, err)
- }
- outgoingIPv4ProtoAddr := tcpip.ProtocolAddress{Protocol: header.IPv4ProtocolNumber, AddressWithPrefix: outgoingIPv4Addr}
- if err := s.AddProtocolAddress(outgoingNICID, outgoingIPv4ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", outgoingNICID, outgoingIPv4ProtoAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: incomingIPv4Addr.Subnet(),
- NIC: incomingNICID,
- },
- {
- Destination: outgoingIPv4Addr.Subnet(),
- NIC: outgoingNICID,
- },
- })
-
- if err := s.SetForwardingDefaultAndAllNICs(header.IPv4ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", header.IPv4ProtocolNumber, err)
- }
-
- ipHeaderLength := header.IPv4MinimumSize + len(test.options)
- if ipHeaderLength > header.IPv4MaximumHeaderSize {
- t.Fatalf("got ipHeaderLength = %d, want <= %d ", ipHeaderLength, header.IPv4MaximumHeaderSize)
- }
- icmpHeaderLength := header.ICMPv4MinimumSize
- totalLength := ipHeaderLength + icmpHeaderLength + test.payloadLength
- hdr := buffer.NewPrependable(totalLength)
- hdr.Prepend(test.payloadLength)
- icmpH := header.ICMPv4(hdr.Prepend(icmpHeaderLength))
- icmpH.SetIdent(randomIdent)
- icmpH.SetSequence(randomSequence)
- icmpH.SetType(header.ICMPv4Echo)
- icmpH.SetCode(header.ICMPv4UnusedCode)
- icmpH.SetChecksum(0)
- icmpH.SetChecksum(^header.Checksum(icmpH, 0))
- ip := header.IPv4(hdr.Prepend(ipHeaderLength))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLength),
- Protocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: test.TTL,
- SrcAddr: test.sourceAddr,
- DstAddr: test.destAddr,
- Flags: test.ipFlags,
- })
- if len(test.options) != 0 {
- ip.SetHeaderLength(uint8(ipHeaderLength))
- // Copy options manually. We do not use Encode for options so we can
- // verify malformed options with handcrafted payloads.
- if want, got := copy(ip.Options(), test.options), len(test.options); want != got {
- t.Fatalf("got copy(ip.Options(), test.options) = %d, want = %d", got, want)
- }
- }
- ip.SetChecksum(0)
- ip.SetChecksum(^ip.CalculateChecksum())
- requestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- })
- requestPkt.NetworkProtocolNumber = header.IPv4ProtocolNumber
- incomingEndpoint.InjectInbound(header.IPv4ProtocolNumber, requestPkt)
-
- reply, ok := incomingEndpoint.Read()
-
- if test.expectErrorICMP {
- if !ok {
- t.Fatalf("expected ICMP packet type %d through incoming NIC", test.icmpType)
- }
-
- // We expect the ICMP packet to contain as much of the original packet as
- // possible up to a limit of 576 bytes, split between payload, IP header,
- // and ICMP header.
- expectedICMPPayloadLength := func() int {
- maxICMPPacketLength := header.IPv4MinimumProcessableDatagramSize
- maxICMPPayloadLength := maxICMPPacketLength - icmpHeaderLength - ipHeaderLength
- if len(hdr.View()) > maxICMPPayloadLength {
- return maxICMPPayloadLength
- }
- return len(hdr.View())
- }
-
- checker.IPv4(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(incomingIPv4Addr.Address),
- checker.DstAddr(test.sourceAddr),
- checker.TTL(ipv4.DefaultTTL),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- checker.ICMPv4Type(test.icmpType),
- checker.ICMPv4Code(test.icmpCode),
- checker.ICMPv4Payload(hdr.View()[:expectedICMPPayloadLength()]),
- ),
- )
- } else if ok {
- t.Fatalf("expected no ICMP packet through incoming NIC, instead found: %#v", reply)
- }
-
- if test.expectPacketForwarded {
- if len(test.expectedFragmentsForwarded) != 0 {
- var fragmentedPackets []*stack.PacketBuffer
- for i := 0; i < len(test.expectedFragmentsForwarded); i++ {
- reply, ok = outgoingEndpoint.Read()
- if !ok {
- t.Fatal("expected ICMP Echo fragment through outgoing NIC")
- }
- fragmentedPackets = append(fragmentedPackets, reply.Pkt)
- }
-
- // The forwarded packet's TTL will have been decremented.
- ipHeader := header.IPv4(requestPkt.NetworkHeader().View())
- ipHeader.SetTTL(ipHeader.TTL() - 1)
-
- // Forwarded packets have available header bytes equalling the sum of the
- // maximum IP header size and the maximum size allocated for link layer
- // headers. In this case, no size is allocated for link layer headers.
- expectedAvailableHeaderBytes := header.IPv4MaximumHeaderSize
- if err := compareFragments(fragmentedPackets, requestPkt, test.mtu, test.expectedFragmentsForwarded, header.ICMPv4ProtocolNumber, true /* withIPHeader */, expectedAvailableHeaderBytes); err != nil {
- t.Error(err)
- }
- } else {
- reply, ok = outgoingEndpoint.Read()
- if !ok {
- t.Fatal("expected ICMP Echo packet through outgoing NIC")
- }
-
- checker.IPv4(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(test.sourceAddr),
- checker.DstAddr(test.destAddr),
- checker.TTL(test.TTL-1),
- checker.IPv4Options(test.forwardedOptions),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- checker.ICMPv4Type(header.ICMPv4Echo),
- checker.ICMPv4Code(header.ICMPv4UnusedCode),
- checker.ICMPv4Payload(nil),
- ),
- )
- }
- } else {
- if reply, ok = outgoingEndpoint.Read(); ok {
- t.Fatalf("expected no ICMP Echo packet through outgoing NIC, instead found: %#v", reply)
- }
- }
- boolToInt := func(val bool) uint64 {
- if val {
- return 1
- }
- return 0
- }
-
- if got, want := s.Stats().IP.Forwarding.LinkLocalSource.Value(), boolToInt(test.expectLinkLocalSourceError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.LinkLocalSource.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.LinkLocalDestination.Value(), boolToInt(test.expectLinkLocalDestError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.LinkLocalDestination.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), boolToInt(test.icmpType == header.ICMPv4ParamProblem); got != want {
- t.Errorf("got s.Stats().IP.MalformedPacketsReceived.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.ExhaustedTTL.Value(), boolToInt(test.TTL <= 0); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.ExhaustedTTL.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.Unrouteable.Value(), boolToInt(test.expectPacketUnrouteableError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.Unrouteable.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.Errors.Value(), boolToInt(!test.expectPacketForwarded); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.Errors.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.PacketTooBig.Value(), boolToInt(test.icmpCode == header.ICMPv4FragmentationNeeded); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.PacketTooBig.Value() = %d, want = %d", got, want)
- }
- })
- }
-}
-
-// TestIPv4Sanity sends IP/ICMP packets with various problems to the stack and
-// checks the response.
-func TestIPv4Sanity(t *testing.T) {
- const (
- ttl = 255
- nicID = 1
- randomSequence = 123
- randomIdent = 42
- // In some cases Linux sets the error pointer to the start of the option
- // (offset 0) instead of the actual wrong value, which is the length byte
- // (offset 1). For compatibility we must do the same. Use this constant
- // to indicate where this happens.
- pointerOffsetForInvalidLength = 0
- randomTimeOffset = 0x10203040
- )
- var (
- ipv4Addr = tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.1.58").To4()),
- PrefixLen: 24,
- }
- remoteIPv4Addr = tcpip.Address(net.ParseIP("10.0.0.1").To4())
- )
-
- tests := []struct {
- name string
- headerLength uint8 // value of 0 means "use correct size"
- badHeaderChecksum bool
- maxTotalLength uint16
- transportProtocol uint8
- TTL uint8
- options header.IPv4Options
- replyOptions header.IPv4Options // reply should look like this
- shouldFail bool
- expectErrorICMP bool
- ICMPType header.ICMPv4Type
- ICMPCode header.ICMPv4Code
- paramProblemPointer uint8
- }{
- {
- name: "valid no options",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- },
- {
- name: "bad header checksum",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- badHeaderChecksum: true,
- shouldFail: true,
- },
- // The TTL tests check that we are not rejecting an incoming packet
- // with a zero or one TTL, which has been a point of confusion in the
- // past as RFC 791 says: "If this field contains the value zero, then the
- // datagram must be destroyed". However RFC 1122 section 3.2.1.7 clarifies
- // for the case of the destination host, stating as follows.
- //
- // A host MUST NOT send a datagram with a Time-to-Live (TTL)
- // value of zero.
- //
- // A host MUST NOT discard a datagram just because it was
- // received with TTL less than 2.
- {
- name: "zero TTL",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: 0,
- },
- {
- name: "one TTL",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: 1,
- },
- {
- name: "End options",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{0, 0, 0, 0},
- replyOptions: header.IPv4Options{0, 0, 0, 0},
- },
- {
- name: "NOP options",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{1, 1, 1, 1},
- replyOptions: header.IPv4Options{1, 1, 1, 1},
- },
- {
- name: "NOP and End options",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{1, 1, 0, 0},
- replyOptions: header.IPv4Options{1, 1, 0, 0},
- },
- {
- name: "bad header length",
- headerLength: header.IPv4MinimumSize - 1,
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- shouldFail: true,
- },
- {
- name: "bad total length (0)",
- maxTotalLength: 0,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- shouldFail: true,
- },
- {
- name: "bad total length (ip - 1)",
- maxTotalLength: uint16(header.IPv4MinimumSize - 1),
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- shouldFail: true,
- },
- {
- name: "bad total length (ip + icmp - 1)",
- maxTotalLength: uint16(header.IPv4MinimumSize + header.ICMPv4MinimumSize - 1),
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- shouldFail: true,
- },
- {
- name: "bad protocol",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: 99,
- TTL: ttl,
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4DstUnreachable,
- ICMPCode: header.ICMPv4ProtoUnreachable,
- },
- {
- name: "timestamp option overflow",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 12, 13, 0x11,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- },
- replyOptions: header.IPv4Options{
- 68, 12, 13, 0x21,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- },
- },
- {
- name: "timestamp option overflow full",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 12, 13, 0xF1,
- // ^ Counter full (15/0xF)
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + 3,
- replyOptions: header.IPv4Options{},
- },
- {
- name: "unknown option",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{10, 4, 9, 0},
- // ^^
- // The unknown option should be stripped out of the reply.
- replyOptions: header.IPv4Options{},
- },
- {
- name: "bad option - no length",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 1, 1, 1, 68,
- // ^-start of timestamp.. but no length..
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + 3,
- },
- {
- name: "bad option - length 0",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 0, 9, 0,
- // ^
- 1, 2, 3, 4,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + pointerOffsetForInvalidLength,
- },
- {
- name: "bad option - length 1",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 1, 9, 0,
- // ^
- 1, 2, 3, 4,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + pointerOffsetForInvalidLength,
- },
- {
- name: "bad option - length big",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 9, 9, 0,
- // ^
- // There are only 8 bytes allocated to options so 9 bytes of timestamp
- // space is not possible. (Second byte)
- 1, 2, 3, 4,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + pointerOffsetForInvalidLength,
- },
- {
- // This tests for some linux compatible behaviour.
- // The ICMP pointer returned is 22 for Linux but the
- // error is actually in spot 21.
- name: "bad option - length bad",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- // Timestamps are in multiples of 4 or 8 but never 7.
- // The option space should be padded out.
- options: header.IPv4Options{
- 68, 7, 5, 0,
- // ^ ^ Linux points here which is wrong.
- // | Not a multiple of 4
- 1, 2, 3, 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptTSPointerOffset,
- },
- {
- name: "multiple type 0 with room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 24, 21, 0x00,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 0, 0, 0, 0,
- },
- replyOptions: header.IPv4Options{
- 68, 24, 25, 0x00,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 0x00, 0xad, 0x1c, 0x40, // time we expect from fakeclock
- },
- },
- {
- // The timestamp area is full so add to the overflow count.
- name: "multiple type 1 timestamps",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 20, 21, 0x11,
- // ^
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 192, 168, 1, 13,
- 5, 6, 7, 8,
- },
- // Overflow count is the top nibble of the 4th byte.
- replyOptions: header.IPv4Options{
- 68, 20, 21, 0x21,
- // ^
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 192, 168, 1, 13,
- 5, 6, 7, 8,
- },
- },
- {
- name: "multiple type 1 timestamps with room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 28, 21, 0x01,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 192, 168, 1, 13,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- replyOptions: header.IPv4Options{
- 68, 28, 29, 0x01,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 192, 168, 1, 13,
- 5, 6, 7, 8,
- 192, 168, 1, 58, // New IP Address.
- 0x00, 0xad, 0x1c, 0x40, // time we expect from fakeclock
- },
- },
- {
- // Timestamp pointer uses one based counting so 0 is invalid.
- name: "timestamp pointer invalid",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 8, 0, 0x00,
- // ^ 0 instead of 5 or more.
- 0, 0, 0, 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + 2,
- },
- {
- // Timestamp pointer cannot be less than 5. It must point past the header
- // which is 4 bytes. (1 based counting)
- name: "timestamp pointer too small by 1",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 8, header.IPv4OptionTimestampHdrLength, 0x00,
- // ^ header is 4 bytes, so 4 should fail.
- 0, 0, 0, 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptTSPointerOffset,
- },
- {
- name: "valid timestamp pointer",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 8, header.IPv4OptionTimestampHdrLength + 1, 0x00,
- // ^ header is 4 bytes, so 5 should succeed.
- 0, 0, 0, 0,
- },
- replyOptions: header.IPv4Options{
- 68, 8, 9, 0x00,
- 0x00, 0xad, 0x1c, 0x40, // time we expect from fakeclock
- },
- },
- {
- // Needs 8 bytes for a type 1 timestamp but there are only 4 free.
- name: "bad timer element alignment",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 20, 17, 0x01,
- // ^^ ^^ 20 byte area, next free spot at 17.
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptTSPointerOffset,
- },
- // End of option list with illegal option after it, which should be ignored.
- {
- name: "end of options list",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 12, 13, 0x11,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 0, 10, 3, 99, // EOL followed by junk
- },
- replyOptions: header.IPv4Options{
- 68, 12, 13, 0x21,
- 192, 168, 1, 12,
- 1, 2, 3, 4,
- 0, // End of Options hides following bytes.
- 0, 0, 0, // 3 bytes unknown option removed.
- },
- },
- {
- // Timestamp with a size much too small.
- name: "timestamp truncated",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 68, 1, 0, 0,
- // ^ Smallest possible is 8. Linux points at the 68.
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + pointerOffsetForInvalidLength,
- },
- {
- name: "single record route with room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 7, 4, // 3 byte header
- 0, 0, 0, 0,
- 0,
- },
- replyOptions: header.IPv4Options{
- 7, 7, 8, // 3 byte header
- 192, 168, 1, 58, // New IP Address.
- 0, // padding to multiple of 4 bytes.
- },
- },
- {
- name: "multiple record route with room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 23, 20, // 3 byte header
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 0, 0, 0, 0,
- 0,
- },
- replyOptions: header.IPv4Options{
- 7, 23, 24,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 192, 168, 1, 58, // New IP Address.
- 0, // padding to multiple of 4 bytes.
- },
- },
- {
- name: "single record route with no room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 7, 8, // 3 byte header
- 1, 2, 3, 4,
- 0,
- },
- replyOptions: header.IPv4Options{
- 7, 7, 8, // 3 byte header
- 1, 2, 3, 4,
- 0, // padding to multiple of 4 bytes.
- },
- },
- {
- // Unlike timestamp, this should just succeed.
- name: "multiple record route with no room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 23, 24, // 3 byte header
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 0,
- },
- replyOptions: header.IPv4Options{
- 7, 23, 24,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 13, 14, 15, 16,
- 17, 18, 19, 20,
- 0, // padding to multiple of 4 bytes.
- },
- },
- {
- // Pointer uses one based counting so 0 is invalid.
- name: "record route pointer zero",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 8, 0, // 3 byte header
- 0, 0, 0, 0,
- 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptRRPointerOffset,
- },
- {
- // Pointer must be 4 or more as it must point past the 3 byte header
- // using 1 based counting. 3 should fail.
- name: "record route pointer too small by 1",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 8, header.IPv4OptionRecordRouteHdrLength, // 3 byte header
- 0, 0, 0, 0,
- 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptRRPointerOffset,
- },
- {
- // Pointer must be 4 or more as it must point past the 3 byte header
- // using 1 based counting. Check 4 passes. (Duplicates "single
- // record route with room")
- name: "valid record route pointer",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 7, header.IPv4OptionRecordRouteHdrLength + 1, // 3 byte header
- 0, 0, 0, 0,
- 0,
- },
- replyOptions: header.IPv4Options{
- 7, 7, 8, // 3 byte header
- 192, 168, 1, 58, // New IP Address.
- 0, // padding to multiple of 4 bytes.
- },
- },
- {
- // Confirm Linux bug for bug compatibility.
- // Linux returns slot 22 but the error is in slot 21.
- name: "multiple record route with not enough room",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 8, 8, // 3 byte header
- // ^ ^ Linux points here. We must too.
- // | Not enough room. 1 byte free, need 4.
- 1, 2, 3, 4,
- 0,
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + header.IPv4OptRRPointerOffset,
- },
- {
- name: "duplicate record route",
- maxTotalLength: ipv4.MaxTotalSize,
- transportProtocol: uint8(header.ICMPv4ProtocolNumber),
- TTL: ttl,
- options: header.IPv4Options{
- 7, 7, 8, // 3 byte header
- 1, 2, 3, 4,
- 7, 7, 8, // 3 byte header
- 1, 2, 3, 4,
- 0, 0, // pad
- },
- shouldFail: true,
- expectErrorICMP: true,
- ICMPType: header.ICMPv4ParamProblem,
- ICMPCode: header.ICMPv4UnusedCode,
- paramProblemPointer: header.IPv4MinimumSize + 7,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4},
- Clock: clock,
- })
- // Advance the clock by some unimportant amount to make
- // it give a more recognisable signature than 00,00,00,00.
- clock.Advance(time.Millisecond * randomTimeOffset)
-
- // We expect at most a single packet in response to our ICMP Echo Request.
- e := channel.New(1, ipv4.MaxTotalSize, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- ipv4ProtoAddr := tcpip.ProtocolAddress{Protocol: header.IPv4ProtocolNumber, AddressWithPrefix: ipv4Addr}
- if err := s.AddProtocolAddress(nicID, ipv4ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, ipv4ProtoAddr, err)
- }
-
- // Default routes for IPv4 so ICMP can find a route to the remote
- // node when attempting to send the ICMP Echo Reply.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- })
-
- if len(test.options)%4 != 0 {
- t.Fatalf("options must be aligned to 32 bits, invalid test options: %x (len=%d)", test.options, len(test.options))
- }
- ipHeaderLength := header.IPv4MinimumSize + len(test.options)
- if ipHeaderLength > header.IPv4MaximumHeaderSize {
- t.Fatalf("IP header length too large: got = %d, want <= %d ", ipHeaderLength, header.IPv4MaximumHeaderSize)
- }
- totalLen := uint16(ipHeaderLength + header.ICMPv4MinimumSize)
- hdr := buffer.NewPrependable(int(totalLen))
- icmpH := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))
-
- // Specify ident/seq to make sure we get the same in the response.
- icmpH.SetIdent(randomIdent)
- icmpH.SetSequence(randomSequence)
- icmpH.SetType(header.ICMPv4Echo)
- icmpH.SetCode(header.ICMPv4UnusedCode)
- icmpH.SetChecksum(0)
- icmpH.SetChecksum(^header.Checksum(icmpH, 0))
- ip := header.IPv4(hdr.Prepend(ipHeaderLength))
- if test.maxTotalLength < totalLen {
- totalLen = test.maxTotalLength
- }
- ip.Encode(&header.IPv4Fields{
- TotalLength: totalLen,
- Protocol: test.transportProtocol,
- TTL: test.TTL,
- SrcAddr: remoteIPv4Addr,
- DstAddr: ipv4Addr.Address,
- })
- if test.headerLength != 0 {
- ip.SetHeaderLength(test.headerLength)
- } else {
- // Set the calculated header length, since we may manually add options.
- ip.SetHeaderLength(uint8(ipHeaderLength))
- }
- if len(test.options) != 0 {
- // Copy options manually. We do not use Encode for options so we can
- // verify malformed options with handcrafted payloads.
- if want, got := copy(ip.Options(), test.options), len(test.options); want != got {
- t.Fatalf("got copy(ip.Options(), test.options) = %d, want = %d", got, want)
- }
- }
- ip.SetChecksum(0)
- ipHeaderChecksum := ip.CalculateChecksum()
- if test.badHeaderChecksum {
- ipHeaderChecksum += 42
- }
- ip.SetChecksum(^ipHeaderChecksum)
- requestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- })
- e.InjectInbound(header.IPv4ProtocolNumber, requestPkt)
- reply, ok := e.Read()
- if !ok {
- if test.shouldFail {
- if test.expectErrorICMP {
- t.Fatalf("ICMP error response (type %d, code %d) missing", test.ICMPType, test.ICMPCode)
- }
- return // Expected silent failure.
- }
- t.Fatal("expected ICMP echo reply missing")
- }
-
- // We didn't expect a packet. Register our surprise but carry on to
- // provide more information about what we got.
- if test.shouldFail && !test.expectErrorICMP {
- t.Error("unexpected packet response")
- }
-
- // Check the route that brought the packet to us.
- if reply.Route.LocalAddress != ipv4Addr.Address {
- t.Errorf("got pkt.Route.LocalAddress = %s, want = %s", reply.Route.LocalAddress, ipv4Addr.Address)
- }
- if reply.Route.RemoteAddress != remoteIPv4Addr {
- t.Errorf("got pkt.Route.RemoteAddress = %s, want = %s", reply.Route.RemoteAddress, remoteIPv4Addr)
- }
-
- // Make sure it's all in one buffer for checker.
- replyIPHeader := header.IPv4(stack.PayloadSince(reply.Pkt.NetworkHeader()))
-
- // At this stage we only know it's probably an IP+ICMP header so verify
- // that much.
- checker.IPv4(t, replyIPHeader,
- checker.SrcAddr(ipv4Addr.Address),
- checker.DstAddr(remoteIPv4Addr),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- ),
- )
-
- // Don't proceed any further if the checker found problems.
- if t.Failed() {
- t.FailNow()
- }
-
- // OK it's ICMP. We can safely look at the type now.
- replyICMPHeader := header.ICMPv4(replyIPHeader.Payload())
- switch replyICMPHeader.Type() {
- case header.ICMPv4ParamProblem:
- if !test.shouldFail {
- t.Fatalf("got Parameter Problem with pointer %d, wanted Echo Reply", replyICMPHeader.Pointer())
- }
- if !test.expectErrorICMP {
- t.Fatalf("got Parameter Problem with pointer %d, wanted no response", replyICMPHeader.Pointer())
- }
- checker.IPv4(t, replyIPHeader,
- checker.IPFullLength(uint16(header.IPv4MinimumSize+header.ICMPv4MinimumSize+requestPkt.Size())),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.ICMPv4(
- checker.ICMPv4Type(test.ICMPType),
- checker.ICMPv4Code(test.ICMPCode),
- checker.ICMPv4Pointer(test.paramProblemPointer),
- checker.ICMPv4Payload(hdr.View()),
- ),
- )
- return
- case header.ICMPv4DstUnreachable:
- if !test.shouldFail {
- t.Fatalf("got ICMP error packet type %d, code %d, wanted Echo Reply",
- header.ICMPv4DstUnreachable, replyICMPHeader.Code())
- }
- if !test.expectErrorICMP {
- t.Fatalf("got ICMP error packet type %d, code %d, wanted no response",
- header.ICMPv4DstUnreachable, replyICMPHeader.Code())
- }
- checker.IPv4(t, replyIPHeader,
- checker.IPFullLength(uint16(header.IPv4MinimumSize+header.ICMPv4MinimumSize+requestPkt.Size())),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.ICMPv4(
- checker.ICMPv4Type(test.ICMPType),
- checker.ICMPv4Code(test.ICMPCode),
- checker.ICMPv4Payload(hdr.View()),
- ),
- )
- return
- case header.ICMPv4EchoReply:
- if test.shouldFail {
- if !test.expectErrorICMP {
- t.Error("got Echo Reply packet, want no response")
- } else {
- t.Errorf("got Echo Reply, want ICMP error type %d, code %d", test.ICMPType, test.ICMPCode)
- }
- }
- // If the IP options change size then the packet will change size, so
- // some IP header fields will need to be adjusted for the checks.
- sizeChange := len(test.replyOptions) - len(test.options)
-
- checker.IPv4(t, replyIPHeader,
- checker.IPv4HeaderLength(ipHeaderLength+sizeChange),
- checker.IPv4Options(test.replyOptions),
- checker.IPFullLength(uint16(requestPkt.Size()+sizeChange)),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- checker.ICMPv4Code(header.ICMPv4UnusedCode),
- checker.ICMPv4Seq(randomSequence),
- checker.ICMPv4Ident(randomIdent),
- ),
- )
- default:
- t.Fatalf("unexpected ICMP response, got type %d, want = %d, %d or %d",
- replyICMPHeader.Type(), header.ICMPv4EchoReply, header.ICMPv4DstUnreachable, header.ICMPv4ParamProblem)
- }
- })
- }
-}
-
-// compareFragments compares the contents of a set of fragmented packets against
-// the contents of a source packet.
-//
-// If withIPHeader is set to true, we will validate the fragmented packets' IP
-// headers against the source packet's IP header. If set to false, we validate
-// the fragmented packets' IP headers against each other.
-func compareFragments(packets []*stack.PacketBuffer, sourcePacket *stack.PacketBuffer, mtu uint32, wantFragments []fragmentInfo, proto tcpip.TransportProtocolNumber, withIPHeader bool, expectedAvailableHeaderBytes int) error {
- // Make a complete array of the sourcePacket packet.
- var source header.IPv4
- vv := buffer.NewVectorisedView(sourcePacket.Size(), sourcePacket.Views())
-
- // If the packet to be fragmented contains an IPv4 header, use that header for
- // validating fragment headers. Else, use the header of the first fragment.
- if withIPHeader {
- source = header.IPv4(vv.ToView())
- } else {
- source = header.IPv4(packets[0].NetworkHeader().View())
- source = append(source, vv.ToView()...)
- }
-
- // Make a copy of the IP header, which will be modified in some fields to make
- // an expected header.
- sourceCopy := header.IPv4(append(buffer.View(nil), source[:source.HeaderLength()]...))
- sourceCopy.SetChecksum(0)
- sourceCopy.SetFlagsFragmentOffset(0, 0)
- sourceCopy.SetTotalLength(0)
- // Build up an array of the bytes sent.
- var reassembledPayload buffer.VectorisedView
- for i, packet := range packets {
- // Confirm that the packet is valid.
- allBytes := buffer.NewVectorisedView(packet.Size(), packet.Views())
- fragmentIPHeader := header.IPv4(allBytes.ToView())
- if !fragmentIPHeader.IsValid(len(fragmentIPHeader)) {
- return fmt.Errorf("fragment #%d: IP packet is invalid:\n%s", i, hex.Dump(fragmentIPHeader))
- }
- if got := len(fragmentIPHeader); got > int(mtu) {
- return fmt.Errorf("fragment #%d: got len(fragmentIPHeader) = %d, want <= %d", i, got, mtu)
- }
- if got := fragmentIPHeader.TransportProtocol(); got != proto {
- return fmt.Errorf("fragment #%d: got fragmentIPHeader.TransportProtocol() = %d, want = %d", i, got, uint8(proto))
- }
- if got, want := packet.NetworkProtocolNumber, sourcePacket.NetworkProtocolNumber; got != want {
- return fmt.Errorf("fragment #%d: got fragment.NetworkProtocolNumber = %d, want = %d", i, got, want)
- }
- if got := packet.AvailableHeaderBytes(); got != expectedAvailableHeaderBytes {
- return fmt.Errorf("fragment #%d: got packet.AvailableHeaderBytes() = %d, want = %d", i, got, expectedAvailableHeaderBytes)
- }
- if got, want := fragmentIPHeader.CalculateChecksum(), uint16(0xffff); got != want {
- return fmt.Errorf("fragment #%d: got ip.CalculateChecksum() = %#x, want = %#x", i, got, want)
- }
- if wantFragments[i].more {
- sourceCopy.SetFlagsFragmentOffset(sourceCopy.Flags()|header.IPv4FlagMoreFragments, wantFragments[i].offset)
- } else {
- sourceCopy.SetFlagsFragmentOffset(sourceCopy.Flags()&^header.IPv4FlagMoreFragments, wantFragments[i].offset)
- }
- reassembledPayload.AppendView(packet.TransportHeader().View())
- reassembledPayload.AppendView(packet.Data().AsRange().ToOwnedView())
- // Clear out the checksum and length from the ip because we can't compare
- // it.
- sourceCopy.SetTotalLength(wantFragments[i].payloadSize + header.IPv4MinimumSize)
- sourceCopy.SetChecksum(0)
- sourceCopy.SetChecksum(^sourceCopy.CalculateChecksum())
-
- // If we are validating against the original IP header, we should exclude the
- // ID field, which will only be set fo fragmented packets.
- if withIPHeader {
- fragmentIPHeader.SetID(0)
- fragmentIPHeader.SetChecksum(0)
- fragmentIPHeader.SetChecksum(^fragmentIPHeader.CalculateChecksum())
- }
- if diff := cmp.Diff(fragmentIPHeader[:fragmentIPHeader.HeaderLength()], sourceCopy[:sourceCopy.HeaderLength()]); diff != "" {
- return fmt.Errorf("fragment #%d: fragmentIPHeader mismatch (-want +got):\n%s", i, diff)
- }
- }
-
- expected := buffer.View(source[source.HeaderLength():])
- if diff := cmp.Diff(expected, reassembledPayload.ToView()); diff != "" {
- return fmt.Errorf("reassembledPayload mismatch (-want +got):\n%s", diff)
- }
-
- return nil
-}
-
-type fragmentInfo struct {
- offset uint16
- more bool
- payloadSize uint16
-}
-
-var fragmentationTests = []struct {
- description string
- mtu uint32
- transportHeaderLength int
- payloadSize int
- wantFragments []fragmentInfo
-}{
- {
- description: "No fragmentation",
- mtu: 1280,
- transportHeaderLength: 0,
- payloadSize: 1000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1000, more: false},
- },
- },
- {
- description: "Fragmented",
- mtu: 1280,
- transportHeaderLength: 0,
- payloadSize: 2000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1256, more: true},
- {offset: 1256, payloadSize: 744, more: false},
- },
- },
- {
- description: "Fragmented with the minimum mtu",
- mtu: header.IPv4MinimumMTU,
- transportHeaderLength: 0,
- payloadSize: 100,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 48, more: true},
- {offset: 48, payloadSize: 48, more: true},
- {offset: 96, payloadSize: 4, more: false},
- },
- },
- {
- description: "Fragmented with mtu not a multiple of 8",
- mtu: header.IPv4MinimumMTU + 1,
- transportHeaderLength: 0,
- payloadSize: 100,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 48, more: true},
- {offset: 48, payloadSize: 48, more: true},
- {offset: 96, payloadSize: 4, more: false},
- },
- },
- {
- description: "No fragmentation with big header",
- mtu: 2000,
- transportHeaderLength: 100,
- payloadSize: 1000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1100, more: false},
- },
- },
- {
- description: "Fragmented with big header",
- mtu: 1280,
- transportHeaderLength: 100,
- payloadSize: 1200,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1256, more: true},
- {offset: 1256, payloadSize: 44, more: false},
- },
- },
- {
- description: "Fragmented with MTU smaller than header",
- mtu: 300,
- transportHeaderLength: 1000,
- payloadSize: 500,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 280, more: true},
- {offset: 280, payloadSize: 280, more: true},
- {offset: 560, payloadSize: 280, more: true},
- {offset: 840, payloadSize: 280, more: true},
- {offset: 1120, payloadSize: 280, more: true},
- {offset: 1400, payloadSize: 100, more: false},
- },
- },
-}
-
-func TestFragmentationWritePacket(t *testing.T) {
- const ttl = 42
-
- for _, ft := range fragmentationTests {
- t.Run(ft.description, func(t *testing.T) {
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, nil, math.MaxInt32)
- r := buildRoute(t, ep)
- pkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)
- source := pkt.Clone()
- err := r.WritePacket(stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- }, pkt)
- if err != nil {
- t.Fatalf("r.WritePacket(...): %s", err)
- }
- if got := len(ep.WrittenPackets); got != len(ft.wantFragments) {
- t.Errorf("got len(ep.WrittenPackets) = %d, want = %d", got, len(ft.wantFragments))
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != len(ft.wantFragments) {
- t.Errorf("got c.Route.Stats().IP.PacketsSent.Value() = %d, want = %d", got, len(ft.wantFragments))
- }
- if got := r.Stats().IP.OutgoingPacketErrors.Value(); got != 0 {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0", got)
- }
- if err := compareFragments(ep.WrittenPackets, source, ft.mtu, ft.wantFragments, tcp.ProtocolNumber, false /* withIPHeader */, extraHeaderReserve); err != nil {
- t.Error(err)
- }
- })
- }
-}
-
-func TestFragmentationWritePackets(t *testing.T) {
- const ttl = 42
- writePacketsTests := []struct {
- description string
- insertBefore int
- insertAfter int
- }{
- {
- description: "Single packet",
- insertBefore: 0,
- insertAfter: 0,
- },
- {
- description: "With packet before",
- insertBefore: 1,
- insertAfter: 0,
- },
- {
- description: "With packet after",
- insertBefore: 0,
- insertAfter: 1,
- },
- {
- description: "With packet before and after",
- insertBefore: 1,
- insertAfter: 1,
- },
- }
- tinyPacket := iptestutil.MakeRandPkt(header.TCPMinimumSize, extraHeaderReserve+header.IPv4MinimumSize, []int{1}, header.IPv4ProtocolNumber)
-
- for _, test := range writePacketsTests {
- t.Run(test.description, func(t *testing.T) {
- for _, ft := range fragmentationTests {
- t.Run(ft.description, func(t *testing.T) {
- var pkts stack.PacketBufferList
- for i := 0; i < test.insertBefore; i++ {
- pkts.PushBack(tinyPacket.Clone())
- }
- pkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)
- pkts.PushBack(pkt.Clone())
- for i := 0; i < test.insertAfter; i++ {
- pkts.PushBack(tinyPacket.Clone())
- }
-
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, nil, math.MaxInt32)
- r := buildRoute(t, ep)
-
- wantTotalPackets := len(ft.wantFragments) + test.insertBefore + test.insertAfter
- n, err := r.WritePackets(pkts, stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- })
- if err != nil {
- t.Errorf("got WritePackets(_, _, _) = (_, %s), want = (_, nil)", err)
- }
- if n != wantTotalPackets {
- t.Errorf("got WritePackets(_, _, _) = (%d, _), want = (%d, _)", n, wantTotalPackets)
- }
- if got := len(ep.WrittenPackets); got != wantTotalPackets {
- t.Errorf("got len(ep.WrittenPackets) = %d, want = %d", got, wantTotalPackets)
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != wantTotalPackets {
- t.Errorf("got c.Route.Stats().IP.PacketsSent.Value() = %d, want = %d", got, wantTotalPackets)
- }
- if got := int(r.Stats().IP.OutgoingPacketErrors.Value()); got != 0 {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0", got)
- }
-
- if wantTotalPackets == 0 {
- return
- }
-
- fragments := ep.WrittenPackets[test.insertBefore : len(ft.wantFragments)+test.insertBefore]
- if err := compareFragments(fragments, pkt, ft.mtu, ft.wantFragments, tcp.ProtocolNumber, false /* withIPHeader */, extraHeaderReserve); err != nil {
- t.Error(err)
- }
- })
- }
- })
- }
-}
-
-// TestFragmentationErrors checks that errors are returned from WritePacket
-// correctly.
-func TestFragmentationErrors(t *testing.T) {
- const ttl = 42
-
- tests := []struct {
- description string
- mtu uint32
- transportHeaderLength int
- payloadSize int
- allowPackets int
- outgoingErrors int
- mockError tcpip.Error
- wantError tcpip.Error
- }{
- {
- description: "No frag",
- mtu: 2000,
- payloadSize: 1000,
- transportHeaderLength: 0,
- allowPackets: 0,
- outgoingErrors: 1,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error on first frag",
- mtu: 500,
- payloadSize: 1000,
- transportHeaderLength: 0,
- allowPackets: 0,
- outgoingErrors: 3,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error on second frag",
- mtu: 500,
- payloadSize: 1000,
- transportHeaderLength: 0,
- allowPackets: 1,
- outgoingErrors: 2,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error on first frag MTU smaller than header",
- mtu: 500,
- transportHeaderLength: 1000,
- payloadSize: 500,
- allowPackets: 0,
- outgoingErrors: 4,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error when MTU is smaller than IPv4 minimum MTU",
- mtu: header.IPv4MinimumMTU - 1,
- transportHeaderLength: 0,
- payloadSize: 500,
- allowPackets: 0,
- outgoingErrors: 1,
- mockError: nil,
- wantError: &tcpip.ErrInvalidEndpointState{},
- },
- }
-
- for _, ft := range tests {
- t.Run(ft.description, func(t *testing.T) {
- pkt := iptestutil.MakeRandPkt(ft.transportHeaderLength, extraHeaderReserve+header.IPv4MinimumSize, []int{ft.payloadSize}, header.IPv4ProtocolNumber)
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, ft.mockError, ft.allowPackets)
- r := buildRoute(t, ep)
- err := r.WritePacket(stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- }, pkt)
- if diff := cmp.Diff(ft.wantError, err); diff != "" {
- t.Fatalf("unexpected error from r.WritePacket(_, _, _), (-want, +got):\n%s", diff)
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != ft.allowPackets {
- t.Errorf("got r.Stats().IP.PacketsSent.Value() = %d, want = %d", got, ft.allowPackets)
- }
- if got := int(r.Stats().IP.OutgoingPacketErrors.Value()); got != ft.outgoingErrors {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = %d", got, ft.outgoingErrors)
- }
- })
- }
-}
-
-func TestInvalidFragments(t *testing.T) {
- const (
- nicID = 1
- linkAddr = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- addr1 = "\x0a\x00\x00\x01"
- addr2 = "\x0a\x00\x00\x02"
- tos = 0
- ident = 1
- ttl = 48
- protocol = 6
- )
-
- payloadGen := func(payloadLen int) []byte {
- payload := make([]byte, payloadLen)
- for i := 0; i < len(payload); i++ {
- payload[i] = 0x30
- }
- return payload
- }
-
- type fragmentData struct {
- ipv4fields header.IPv4Fields
- // 0 means insert the correct IHL. Non 0 means override the correct IHL.
- overrideIHL int // For 0 use 1 as it is an int and will be divided by 4.
- payload []byte
- autoChecksum bool // If true, the Checksum field will be overwritten.
- }
-
- tests := []struct {
- name string
- fragments []fragmentData
- wantMalformedIPPackets uint64
- wantMalformedFragments uint64
- }{
- {
- name: "IHL and TotalLength zero, FragmentOffset non-zero",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: 0,
- ID: ident,
- Flags: header.IPv4FlagDontFragment | header.IPv4FlagMoreFragments,
- FragmentOffset: 59776,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- overrideIHL: 1, // See note above.
- payload: payloadGen(12),
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 0,
- },
- {
- name: "IHL and TotalLength zero, FragmentOffset zero",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: 0,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- overrideIHL: 1, // See note above.
- payload: payloadGen(12),
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 0,
- },
- {
- // Payload 17 octets and Fragment offset 65520
- // Leading to the fragment end to be past 65536.
- name: "fragment ends past 65536",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 17,
- ID: ident,
- Flags: 0,
- FragmentOffset: 65520,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(17),
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 1,
- },
- {
- // Payload 16 octets and fragment offset 65520
- // Leading to the fragment end to be exactly 65536.
- name: "fragment ends exactly at 65536",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 16,
- ID: ident,
- Flags: 0,
- FragmentOffset: 65520,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(16),
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 0,
- wantMalformedFragments: 0,
- },
- {
- name: "IHL less than IPv4 minimum size",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 28,
- ID: ident,
- Flags: 0,
- FragmentOffset: 1944,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(28),
- overrideIHL: header.IPv4MinimumSize - 12,
- autoChecksum: true,
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize - 12,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(28),
- overrideIHL: header.IPv4MinimumSize - 12,
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 2,
- wantMalformedFragments: 0,
- },
- {
- name: "fragment with short TotalLength and extra payload",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 28,
- ID: ident,
- Flags: 0,
- FragmentOffset: 28816,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(28),
- overrideIHL: header.IPv4MinimumSize + 4,
- autoChecksum: true,
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 4,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(28),
- overrideIHL: header.IPv4MinimumSize + 4,
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 1,
- },
- {
- name: "multiple fragments with More Fragments flag set to false",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 8,
- ID: ident,
- Flags: 0,
- FragmentOffset: 128,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(8),
- autoChecksum: true,
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 8,
- ID: ident,
- Flags: 0,
- FragmentOffset: 8,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(8),
- autoChecksum: true,
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 8,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: payloadGen(8),
- autoChecksum: true,
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocol,
- },
- })
- e := channel.New(0, 1500, linkAddr)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, addr2, err)
- }
-
- for _, f := range test.fragments {
- pktSize := header.IPv4MinimumSize + len(f.payload)
- hdr := buffer.NewPrependable(pktSize)
-
- ip := header.IPv4(hdr.Prepend(pktSize))
- ip.Encode(&f.ipv4fields)
- if want, got := len(f.payload), copy(ip[header.IPv4MinimumSize:], f.payload); want != got {
- t.Fatalf("copied %d bytes, expected %d bytes.", got, want)
- }
- // Encode sets this up correctly. If we want a different value for
- // testing then we need to overwrite the good value.
- if f.overrideIHL != 0 {
- ip.SetHeaderLength(uint8(f.overrideIHL))
- // If we are asked to add options (type not specified) then pad
- // with 0 (EOL). RFC 791 page 23 says "The padding is zero".
- for i := header.IPv4MinimumSize; i < f.overrideIHL; i++ {
- ip[i] = byte(header.IPv4OptionListEndType)
- }
- }
-
- if f.autoChecksum {
- ip.SetChecksum(0)
- ip.SetChecksum(^ip.CalculateChecksum())
- }
-
- vv := hdr.View().ToVectorisedView()
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- }
-
- if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), test.wantMalformedIPPackets; got != want {
- t.Errorf("incorrect Stats.IP.MalformedPacketsReceived, got: %d, want: %d", got, want)
- }
- if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), test.wantMalformedFragments; got != want {
- t.Errorf("incorrect Stats.IP.MalformedFragmentsReceived, got: %d, want: %d", got, want)
- }
- })
- }
-}
-
-func TestFragmentReassemblyTimeout(t *testing.T) {
- const (
- nicID = 1
- linkAddr = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- addr1 = "\x0a\x00\x00\x01"
- addr2 = "\x0a\x00\x00\x02"
- tos = 0
- ident = 1
- ttl = 48
- protocol = 99
- data = "TEST_FRAGMENT_REASSEMBLY_TIMEOUT"
- )
-
- type fragmentData struct {
- ipv4fields header.IPv4Fields
- payload []byte
- }
-
- tests := []struct {
- name string
- fragments []fragmentData
- expectICMP bool
- }{
- {
- name: "first fragment only",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 16,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[:16],
- },
- },
- expectICMP: true,
- },
- {
- name: "two first fragments",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 16,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[:16],
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 16,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[:16],
- },
- },
- expectICMP: true,
- },
- {
- name: "second fragment only",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: uint16(header.IPv4MinimumSize + len(data) - 16),
- ID: ident,
- Flags: 0,
- FragmentOffset: 8,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[16:],
- },
- },
- expectICMP: false,
- },
- {
- name: "two fragments with a gap",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 8,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[:8],
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: uint16(header.IPv4MinimumSize + len(data) - 16),
- ID: ident,
- Flags: 0,
- FragmentOffset: 16,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[16:],
- },
- },
- expectICMP: true,
- },
- {
- name: "two fragments with a gap in reverse order",
- fragments: []fragmentData{
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: uint16(header.IPv4MinimumSize + len(data) - 16),
- ID: ident,
- Flags: 0,
- FragmentOffset: 16,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[16:],
- },
- {
- ipv4fields: header.IPv4Fields{
- TOS: tos,
- TotalLength: header.IPv4MinimumSize + 8,
- ID: ident,
- Flags: header.IPv4FlagMoreFragments,
- FragmentOffset: 0,
- TTL: ttl,
- Protocol: protocol,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- payload: []byte(data)[:8],
- },
- },
- expectICMP: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocol,
- },
- Clock: clock,
- })
- e := channel.New(1, 1500, linkAddr)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, addr2, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- }})
-
- var firstFragmentSent buffer.View
- for _, f := range test.fragments {
- pktSize := header.IPv4MinimumSize
- hdr := buffer.NewPrependable(pktSize)
-
- ip := header.IPv4(hdr.Prepend(pktSize))
- ip.Encode(&f.ipv4fields)
-
- ip.SetChecksum(0)
- ip.SetChecksum(^ip.CalculateChecksum())
-
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(f.payload)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- })
-
- if firstFragmentSent == nil && ip.FragmentOffset() == 0 {
- firstFragmentSent = stack.PayloadSince(pkt.NetworkHeader())
- }
-
- e.InjectInbound(header.IPv4ProtocolNumber, pkt)
- }
-
- clock.Advance(ipv4.ReassembleTimeout)
-
- reply, ok := e.Read()
- if !test.expectICMP {
- if ok {
- t.Fatalf("unexpected ICMP error message received: %#v", reply)
- }
- return
- }
- if !ok {
- t.Fatal("expected ICMP error message missing")
- }
- if firstFragmentSent == nil {
- t.Fatalf("unexpected ICMP error message received: %#v", reply)
- }
-
- checker.IPv4(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(addr2),
- checker.DstAddr(addr1),
- checker.IPFullLength(uint16(header.IPv4MinimumSize+header.ICMPv4MinimumSize+firstFragmentSent.Size())),
- checker.IPv4HeaderLength(header.IPv4MinimumSize),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4TimeExceeded),
- checker.ICMPv4Code(header.ICMPv4ReassemblyTimeout),
- checker.ICMPv4Checksum(),
- checker.ICMPv4Payload(firstFragmentSent),
- ),
- )
- })
- }
-}
-
-// TestReceiveFragments feeds fragments in through the incoming packet path to
-// test reassembly
-func TestReceiveFragments(t *testing.T) {
- const (
- nicID = 1
-
- addr1 = "\x0c\xa8\x00\x01" // 192.168.0.1
- addr2 = "\x0c\xa8\x00\x02" // 192.168.0.2
- addr3 = "\x0c\xa8\x00\x03" // 192.168.0.3
- )
-
- // Build and return a UDP header containing payload.
- udpGen := func(payloadLen int, multiplier uint8, src, dst tcpip.Address) buffer.View {
- payload := buffer.NewView(payloadLen)
- for i := 0; i < len(payload); i++ {
- payload[i] = uint8(i) * multiplier
- }
-
- udpLength := header.UDPMinimumSize + len(payload)
-
- hdr := buffer.NewPrependable(udpLength)
- u := header.UDP(hdr.Prepend(udpLength))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: uint16(udpLength),
- })
- copy(u.Payload(), payload)
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, src, dst, uint16(udpLength))
- sum = header.Checksum(payload, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
- return hdr.View()
- }
-
- // UDP header plus a payload of 0..256
- ipv4Payload1Addr1ToAddr2 := udpGen(256, 1, addr1, addr2)
- udpPayload1Addr1ToAddr2 := ipv4Payload1Addr1ToAddr2[header.UDPMinimumSize:]
- ipv4Payload1Addr3ToAddr2 := udpGen(256, 1, addr3, addr2)
- udpPayload1Addr3ToAddr2 := ipv4Payload1Addr3ToAddr2[header.UDPMinimumSize:]
- // UDP header plus a payload of 0..256 in increments of 2.
- ipv4Payload2Addr1ToAddr2 := udpGen(128, 2, addr1, addr2)
- udpPayload2Addr1ToAddr2 := ipv4Payload2Addr1ToAddr2[header.UDPMinimumSize:]
- // UDP header plus a payload of 0..256 in increments of 3.
- // Used to test cases where the fragment blocks are not a multiple of
- // the fragment block size of 8 (RFC 791 section 3.1 page 14).
- ipv4Payload3Addr1ToAddr2 := udpGen(127, 3, addr1, addr2)
- udpPayload3Addr1ToAddr2 := ipv4Payload3Addr1ToAddr2[header.UDPMinimumSize:]
- // Used to test the max reassembled IPv4 payload length.
- ipv4Payload4Addr1ToAddr2 := udpGen(header.UDPMaximumSize-header.UDPMinimumSize, 4, addr1, addr2)
- udpPayload4Addr1ToAddr2 := ipv4Payload4Addr1ToAddr2[header.UDPMinimumSize:]
-
- type fragmentData struct {
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- id uint16
- flags uint8
- fragmentOffset uint16
- payload buffer.View
- }
-
- tests := []struct {
- name string
- fragments []fragmentData
- expectedPayloads [][]byte
- }{
- {
- name: "No fragmentation",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2,
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "No fragmentation with size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 0,
- payload: ipv4Payload3Addr1ToAddr2,
- },
- },
- expectedPayloads: [][]byte{udpPayload3Addr1ToAddr2},
- },
- {
- name: "More fragments without payload",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2,
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Non-zero fragment offset without payload",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 8,
- payload: ipv4Payload1Addr1ToAddr2,
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments out of order",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments with last fragment size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload3Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload3Addr1ToAddr2[64:],
- },
- },
- expectedPayloads: [][]byte{udpPayload3Addr1ToAddr2},
- },
- {
- name: "Two fragments with first fragment size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload3Addr1ToAddr2[:63],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 63,
- payload: ipv4Payload3Addr1ToAddr2[63:],
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Second fragment has MoreFlags set",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with different IDs",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 2,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two interleaved fragmented packets",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 2,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload2Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 2,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload2Addr1ToAddr2[64:],
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2, udpPayload2Addr1ToAddr2},
- },
- {
- name: "Two interleaved fragmented packets from different sources but with same ID",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- {
- srcAddr: addr3,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr3ToAddr2[:32],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 64,
- payload: ipv4Payload1Addr1ToAddr2[64:],
- },
- {
- srcAddr: addr3,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 32,
- payload: ipv4Payload1Addr3ToAddr2[32:],
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2, udpPayload1Addr3ToAddr2},
- },
- {
- name: "Fragment without followup",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload1Addr1ToAddr2[:64],
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments reassembled into a maximum UDP packet",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload4Addr1ToAddr2[:65512],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: 0,
- fragmentOffset: 65512,
- payload: ipv4Payload4Addr1ToAddr2[65512:],
- },
- },
- expectedPayloads: [][]byte{udpPayload4Addr1ToAddr2},
- },
- {
- name: "Two fragments with MF flag reassembled into a maximum UDP packet",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 0,
- payload: ipv4Payload4Addr1ToAddr2[:65512],
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- id: 1,
- flags: header.IPv4FlagMoreFragments,
- fragmentOffset: 65512,
- payload: ipv4Payload4Addr1ToAddr2[65512:],
- },
- },
- expectedPayloads: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- // Setup a stack and endpoint.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- RawFactory: raw.EndpointFactory{},
- })
- e := channel.New(0, 1280, "\xf0\x00")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, header.IPv4ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, addr2, err)
- }
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(udp.ProtocolNumber, header.IPv4ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, header.IPv4ProtocolNumber, err)
- }
- defer ep.Close()
-
- bindAddr := tcpip.FullAddress{Addr: addr2, Port: 80}
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%+v): %s", bindAddr, err)
- }
-
- // Bring up a raw endpoint so we can examine network headers.
- epRaw, err := s.NewRawEndpoint(udp.ProtocolNumber, header.IPv4ProtocolNumber, &wq, true /* associated */)
- if err != nil {
- t.Fatalf("NewRawEndpoint(%d, %d, _, true): %s", udp.ProtocolNumber, header.IPv4ProtocolNumber, err)
- }
- defer epRaw.Close()
-
- // Prepare and send the fragments.
- for _, frag := range test.fragments {
- hdr := buffer.NewPrependable(header.IPv4MinimumSize)
-
- // Serialize IPv4 fixed header.
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: header.IPv4MinimumSize + uint16(len(frag.payload)),
- ID: frag.id,
- Flags: frag.flags,
- FragmentOffset: frag.fragmentOffset,
- TTL: 64,
- Protocol: uint8(header.UDPProtocolNumber),
- SrcAddr: frag.srcAddr,
- DstAddr: frag.dstAddr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(frag.payload)
-
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- }
-
- if got, want := s.Stats().UDP.PacketsReceived.Value(), uint64(len(test.expectedPayloads)); got != want {
- t.Errorf("got UDP Rx Packets = %d, want = %d", got, want)
- }
-
- for i, expectedPayload := range test.expectedPayloads {
- // Check UDP payload delivered by UDP endpoint.
- var buf bytes.Buffer
- result, err := ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("(i=%d) ep.Read: %s", i, err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: len(expectedPayload),
- Total: len(expectedPayload),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("(i=%d) ep.Read: unexpected result (-want +got):\n%s", i, diff)
- }
- if diff := cmp.Diff(expectedPayload, buf.Bytes()); diff != "" {
- t.Errorf("(i=%d) ep.Read: UDP payload mismatch (-want +got):\n%s", i, diff)
- }
-
- // Check IPv4 header in packet delivered by raw endpoint.
- buf.Reset()
- result, err = epRaw.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("(i=%d) epRaw.Read: %s", i, err)
- }
- // Reassambly does not take care of checksum. Here we write our own
- // check routine instead of using checker.IPv4.
- ip := header.IPv4(buf.Bytes())
- for _, check := range []checker.NetworkChecker{
- checker.FragmentFlags(0),
- checker.FragmentOffset(0),
- checker.IPFullLength(uint16(header.IPv4MinimumSize + header.UDPMinimumSize + len(expectedPayload))),
- } {
- check(t, []header.Network{ip})
- }
- }
-
- res, err := ep.Read(ioutil.Discard, tcpip.ReadOptions{})
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("(last) got Read = (%#v, %v), want = (_, %s)", res, err, &tcpip.ErrWouldBlock{})
- }
- })
- }
-}
-
-func TestWriteStats(t *testing.T) {
- const nPackets = 3
-
- tests := []struct {
- name string
- setup func(*testing.T, *stack.Stack)
- allowPackets int
- expectSent int
- expectOutputDropped int
- expectPostroutingDropped int
- expectWritten int
- }{
- {
- name: "Accept all",
- // No setup needed, tables accept everything by default.
- setup: func(*testing.T, *stack.Stack) {},
- allowPackets: math.MaxInt32,
- expectSent: nPackets,
- expectOutputDropped: 0,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Accept all with error",
- // No setup needed, tables accept everything by default.
- setup: func(*testing.T, *stack.Stack) {},
- allowPackets: nPackets - 1,
- expectSent: nPackets - 1,
- expectOutputDropped: 0,
- expectPostroutingDropped: 0,
- expectWritten: nPackets - 1,
- }, {
- name: "Drop all with Output chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Output DROP rule.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Output]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %s", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: 0,
- expectOutputDropped: nPackets,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Drop all with Postrouting chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.NATID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Postrouting]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- if err := ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %s", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: 0,
- expectOutputDropped: 0,
- expectPostroutingDropped: nPackets,
- expectWritten: nPackets,
- }, {
- name: "Drop some with Output chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Output DROP rule that matches only 1
- // of the 3 packets.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- // We'll match and DROP the last packet.
- ruleIdx := filter.BuiltinChains[stack.Output]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}
- // Make sure the next rule is ACCEPT.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %s", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: nPackets - 1,
- expectOutputDropped: 1,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Drop some with Postrouting chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Postrouting DROP rule that matches only 1
- // of the 3 packets.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.NATID, false /* ipv6 */)
- // We'll match and DROP the last packet.
- ruleIdx := filter.BuiltinChains[stack.Postrouting]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}
- // Make sure the next rule is ACCEPT.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %s", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: nPackets - 1,
- expectOutputDropped: 0,
- expectPostroutingDropped: 1,
- expectWritten: nPackets,
- },
- }
-
- // Parameterize the tests to run with both WritePacket and WritePackets.
- writers := []struct {
- name string
- writePackets func(*stack.Route, stack.PacketBufferList) (int, tcpip.Error)
- }{
- {
- name: "WritePacket",
- writePackets: func(rt *stack.Route, pkts stack.PacketBufferList) (int, tcpip.Error) {
- nWritten := 0
- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {
- if err := rt.WritePacket(stack.NetworkHeaderParams{}, pkt); err != nil {
- return nWritten, err
- }
- nWritten++
- }
- return nWritten, nil
- },
- }, {
- name: "WritePackets",
- writePackets: func(rt *stack.Route, pkts stack.PacketBufferList) (int, tcpip.Error) {
- return rt.WritePackets(pkts, stack.NetworkHeaderParams{})
- },
- },
- }
-
- for _, writer := range writers {
- t.Run(writer.name, func(t *testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ep := iptestutil.NewMockLinkEndpoint(header.IPv4MinimumMTU, &tcpip.ErrInvalidEndpointState{}, test.allowPackets)
- rt := buildRoute(t, ep)
-
- var pkts stack.PacketBufferList
- for i := 0; i < nPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: header.UDPMinimumSize + int(rt.MaxHeaderLength()),
- Data: buffer.NewView(0).ToVectorisedView(),
- })
- pkt.TransportHeader().Push(header.UDPMinimumSize)
- pkts.PushBack(pkt)
- }
-
- test.setup(t, rt.Stack())
-
- nWritten, _ := writer.writePackets(rt, pkts)
-
- if got := int(rt.Stats().IP.PacketsSent.Value()); got != test.expectSent {
- t.Errorf("got rt.Stats().IP.PacketsSent.Value() = %d, want = %d", got, test.expectSent)
- }
- if got := int(rt.Stats().IP.IPTablesOutputDropped.Value()); got != test.expectOutputDropped {
- t.Errorf("got rt.Stats().IP.IPTablesOutputDropped.Value() = %d, want = %d", got, test.expectOutputDropped)
- }
- if got := int(rt.Stats().IP.IPTablesPostroutingDropped.Value()); got != test.expectPostroutingDropped {
- t.Errorf("got rt.Stats().IP.IPTablesPostroutingDropped.Value() = %d, want = %d", got, test.expectPostroutingDropped)
- }
- if nWritten != test.expectWritten {
- t.Errorf("got nWritten = %d, want = %d", nWritten, test.expectWritten)
- }
- })
- }
- })
- }
-}
-
-func buildRoute(t *testing.T, ep stack.LinkEndpoint) *stack.Route {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatalf("CreateNIC(1, _) failed: %s", err)
- }
- const (
- src = "\x10\x00\x00\x01"
- dst = "\x10\x00\x00\x02"
- )
- if err := s.AddAddress(1, ipv4.ProtocolNumber, src); err != nil {
- t.Fatalf("AddAddress(1, %d, %s) failed: %s", ipv4.ProtocolNumber, src, err)
- }
- {
- mask := tcpip.AddressMask(header.IPv4Broadcast)
- subnet, err := tcpip.NewSubnet(dst, mask)
- if err != nil {
- t.Fatalf("NewSubnet(%s, %s) failed: %v", dst, mask, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: subnet,
- NIC: 1,
- }})
- }
- rt, err := s.FindRoute(1, src, dst, ipv4.ProtocolNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(1, %s, %s, %d, false) = %s", src, dst, ipv4.ProtocolNumber, err)
- }
- return rt
-}
-
-// limitedMatcher is an iptables matcher that matches after a certain number of
-// packets are checked against it.
-type limitedMatcher struct {
- limit int
-}
-
-// Name implements Matcher.Name.
-func (*limitedMatcher) Name() string {
- return "limitedMatcher"
-}
-
-// Match implements Matcher.Match.
-func (lm *limitedMatcher) Match(stack.Hook, *stack.PacketBuffer, string, string) (bool, bool) {
- if lm.limit == 0 {
- return true, false
- }
- lm.limit--
- return false, false
-}
-
-func TestPacketQueuing(t *testing.T) {
- const nicID = 1
-
- var (
- host1NICLinkAddr = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x06")
- host2NICLinkAddr = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x09")
-
- host1IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.1").To4()),
- PrefixLen: 24,
- },
- }
- host2IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.2").To4()),
- PrefixLen: 8,
- },
- }
- )
-
- tests := []struct {
- name string
- rxPkt func(*channel.Endpoint)
- checkResp func(*testing.T, *channel.Endpoint)
- }{
- {
- name: "ICMP Error",
- rxPkt: func(e *channel.Endpoint) {
- hdr := buffer.NewPrependable(header.IPv4MinimumSize + header.UDPMinimumSize)
- u := header.UDP(hdr.Prepend(header.UDPMinimumSize))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: header.UDPMinimumSize,
- })
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, host2IPv4Addr.AddressWithPrefix.Address, host1IPv4Addr.AddressWithPrefix.Address, header.UDPMinimumSize)
- sum = header.Checksum(nil, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: header.IPv4MinimumSize + header.UDPMinimumSize,
- TTL: ipv4.DefaultTTL,
- Protocol: uint8(udp.ProtocolNumber),
- SrcAddr: host2IPv4Addr.AddressWithPrefix.Address,
- DstAddr: host1IPv4Addr.AddressWithPrefix.Address,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- },
- checkResp: func(t *testing.T, e *channel.Endpoint) {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != header.IPv4ProtocolNumber {
- t.Errorf("got p.Proto = %d, want = %d", p.Proto, header.IPv4ProtocolNumber)
- }
- if p.Route.RemoteLinkAddress != host2NICLinkAddr {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, host2NICLinkAddr)
- }
- checker.IPv4(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(host1IPv4Addr.AddressWithPrefix.Address),
- checker.DstAddr(host2IPv4Addr.AddressWithPrefix.Address),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4DstUnreachable),
- checker.ICMPv4Code(header.ICMPv4PortUnreachable)))
- },
- },
-
- {
- name: "Ping",
- rxPkt: func(e *channel.Endpoint) {
- totalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))
- pkt.SetType(header.ICMPv4Echo)
- pkt.SetCode(0)
- pkt.SetChecksum(0)
- pkt.SetChecksum(^header.Checksum(pkt, 0))
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- Protocol: uint8(icmp.ProtocolNumber4),
- TTL: ipv4.DefaultTTL,
- SrcAddr: host2IPv4Addr.AddressWithPrefix.Address,
- DstAddr: host1IPv4Addr.AddressWithPrefix.Address,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- },
- checkResp: func(t *testing.T, e *channel.Endpoint) {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != header.IPv4ProtocolNumber {
- t.Errorf("got p.Proto = %d, want = %d", p.Proto, header.IPv4ProtocolNumber)
- }
- if p.Route.RemoteLinkAddress != host2NICLinkAddr {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, host2NICLinkAddr)
- }
- checker.IPv4(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(host1IPv4Addr.AddressWithPrefix.Address),
- checker.DstAddr(host2IPv4Addr.AddressWithPrefix.Address),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4EchoReply),
- checker.ICMPv4Code(header.ICMPv4UnusedCode)))
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e := channel.New(1, defaultMTU, host1NICLinkAddr)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- Clock: clock,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddProtocolAddress(nicID, host1IPv4Addr); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID, host1IPv4Addr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: host1IPv4Addr.AddressWithPrefix.Subnet(),
- NIC: nicID,
- },
- })
-
- // Receive a packet to trigger link resolution before a response is sent.
- test.rxPkt(e)
-
- // Wait for a ARP request since link address resolution should be
- // performed.
- {
- clock.RunImmediatelyScheduledJobs()
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != arp.ProtocolNumber {
- t.Errorf("got p.Proto = %d, want = %d", p.Proto, arp.ProtocolNumber)
- }
- if p.Route.RemoteLinkAddress != header.EthernetBroadcastAddress {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, header.EthernetBroadcastAddress)
- }
- rep := header.ARP(p.Pkt.NetworkHeader().View())
- if got := rep.Op(); got != header.ARPRequest {
- t.Errorf("got Op() = %d, want = %d", got, header.ARPRequest)
- }
- if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != host1NICLinkAddr {
- t.Errorf("got HardwareAddressSender = %s, want = %s", got, host1NICLinkAddr)
- }
- if got := tcpip.Address(rep.ProtocolAddressSender()); got != host1IPv4Addr.AddressWithPrefix.Address {
- t.Errorf("got ProtocolAddressSender = %s, want = %s", got, host1IPv4Addr.AddressWithPrefix.Address)
- }
- if got := tcpip.Address(rep.ProtocolAddressTarget()); got != host2IPv4Addr.AddressWithPrefix.Address {
- t.Errorf("got ProtocolAddressTarget = %s, want = %s", got, host2IPv4Addr.AddressWithPrefix.Address)
- }
- }
-
- // Send an ARP reply to complete link address resolution.
- {
- hdr := buffer.View(make([]byte, header.ARPSize))
- packet := header.ARP(hdr)
- packet.SetIPv4OverEthernet()
- packet.SetOp(header.ARPReply)
- copy(packet.HardwareAddressSender(), host2NICLinkAddr)
- copy(packet.ProtocolAddressSender(), host2IPv4Addr.AddressWithPrefix.Address)
- copy(packet.HardwareAddressTarget(), host1NICLinkAddr)
- copy(packet.ProtocolAddressTarget(), host1IPv4Addr.AddressWithPrefix.Address)
- e.InjectInbound(arp.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.ToVectorisedView(),
- }))
- }
-
- // Expect the response now that the link address has resolved.
- clock.RunImmediatelyScheduledJobs()
- test.checkResp(t, e)
-
- // Since link resolution was already performed, it shouldn't be performed
- // again.
- test.rxPkt(e)
- test.checkResp(t, e)
- })
- }
-}
-
-// TestCloseLocking test that lock ordering is followed when closing an
-// endpoint.
-func TestCloseLocking(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
-
- iterations = 1000
- )
-
- var (
- src = testutil.MustParse4("16.0.0.1")
- dst = testutil.MustParse4("16.0.0.2")
- )
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
-
- // Perform NAT so that the endpoint tries to search for a sibling endpoint
- // which ends up taking the protocol and endpoint lock (in that order).
- table := stack.Table{
- Rules: []stack.Rule{
- {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
- {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
- {Target: &stack.RedirectTarget{Port: 5, NetworkProtocol: header.IPv4ProtocolNumber}},
- {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
- {Target: &stack.ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
- },
- BuiltinChains: [stack.NumHooks]int{
- stack.Prerouting: 0,
- stack.Input: 1,
- stack.Forward: stack.HookUnset,
- stack.Output: 2,
- stack.Postrouting: 3,
- },
- Underflows: [stack.NumHooks]int{
- stack.Prerouting: 0,
- stack.Input: 1,
- stack.Forward: stack.HookUnset,
- stack.Output: 2,
- stack.Postrouting: 3,
- },
- }
- if err := s.IPTables().ReplaceTable(stack.NATID, table, false /* ipv6 */); err != nil {
- t.Fatalf("s.IPTables().ReplaceTable(...): %s", err)
- }
-
- e := channel.New(0, defaultMTU, "")
- if err := s.CreateNIC(nicID1, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
- }
-
- if err := s.AddAddress(nicID1, ipv4.ProtocolNumber, src); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) failed: %s", nicID1, ipv4.ProtocolNumber, src, err)
- }
-
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv4EmptySubnet,
- NIC: nicID1,
- }})
-
- var wq waiter.Queue
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatal(err)
- }
- defer ep.Close()
-
- addr := tcpip.FullAddress{NIC: nicID1, Addr: dst, Port: 53}
- if err := ep.Connect(addr); err != nil {
- t.Errorf("ep.Connect(%#v): %s", addr, err)
- }
-
- var wg sync.WaitGroup
- defer wg.Wait()
-
- // Writing packets should trigger NAT which requires the stack to search the
- // protocol for network endpoints with the destination address.
- //
- // Creating and removing interfaces should modify the protocol and endpoint
- // which requires taking the locks of each.
- //
- // We expect the protocol > endpoint lock ordering to be followed here.
- wg.Add(2)
- go func() {
- defer wg.Done()
-
- data := []byte{1, 2, 3, 4}
-
- for i := 0; i < iterations; i++ {
- var r bytes.Reader
- r.Reset(data)
- if n, err := ep.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Errorf("ep.Write(_, _): %s", err)
- return
- } else if want := int64(len(data)); n != want {
- t.Errorf("got ep.Write(_, _) = (%d, _), want = (%d, _)", n, want)
- return
- }
- }
- }()
- go func() {
- defer wg.Done()
-
- for i := 0; i < iterations; i++ {
- if err := s.CreateNIC(nicID2, loopback.New()); err != nil {
- t.Errorf("CreateNIC(%d, _): %s", nicID2, err)
- return
- }
- if err := s.RemoveNIC(nicID2); err != nil {
- t.Errorf("RemoveNIC(%d): %s", nicID2, err)
- return
- }
- }
- }()
-}
diff --git a/pkg/tcpip/network/ipv4/stats_test.go b/pkg/tcpip/network/ipv4/stats_test.go
deleted file mode 100644
index d1f9e3cf5..000000000
--- a/pkg/tcpip/network/ipv4/stats_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv4
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-var _ stack.NetworkInterface = (*testInterface)(nil)
-
-type testInterface struct {
- stack.NetworkInterface
- nicID tcpip.NICID
-}
-
-func (t *testInterface) ID() tcpip.NICID {
- return t.nicID
-}
-
-func knownNICIDs(proto *protocol) []tcpip.NICID {
- var nicIDs []tcpip.NICID
-
- for k := range proto.mu.eps {
- nicIDs = append(nicIDs, k)
- }
-
- return nicIDs
-}
-
-func TestClearEndpointFromProtocolOnClose(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)
- nic := testInterface{nicID: 1}
- ep := proto.NewEndpoint(&nic, nil).(*endpoint)
- var nicIDs []tcpip.NICID
-
- proto.mu.Lock()
- foundEP, hasEndpointBeforeClose := proto.mu.eps[nic.ID()]
- nicIDs = knownNICIDs(proto)
- proto.mu.Unlock()
-
- if !hasEndpointBeforeClose {
- t.Fatalf("expected to find the nic id %d in the protocol's endpoint map (%v)", nic.ID(), nicIDs)
- }
- if foundEP != ep {
- t.Fatalf("found an incorrect endpoint mapped to nic id %d", nic.ID())
- }
-
- ep.Close()
-
- proto.mu.Lock()
- _, hasEP := proto.mu.eps[nic.ID()]
- nicIDs = knownNICIDs(proto)
- proto.mu.Unlock()
- if hasEP {
- t.Fatalf("unexpectedly found an endpoint mapped to the nic id %d in the protocol's known nic ids (%v)", nic.ID(), nicIDs)
- }
-}
-
-func TestMultiCounterStatsInitialization(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)
- var nic testInterface
- ep := proto.NewEndpoint(&nic, nil).(*endpoint)
- // At this point, the Stack's stats and the NetworkEndpoint's stats are
- // expected to be bound by a MultiCounterStat.
- refStack := s.Stats()
- refEP := ep.stats.localStats
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.ip).Elem(), []reflect.Value{reflect.ValueOf(&refEP.IP).Elem(), reflect.ValueOf(&refStack.IP).Elem()}); err != nil {
- t.Error(err)
- }
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.icmp).Elem(), []reflect.Value{reflect.ValueOf(&refEP.ICMP).Elem(), reflect.ValueOf(&refStack.ICMP.V4).Elem()}); err != nil {
- t.Error(err)
- }
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.igmp).Elem(), []reflect.Value{reflect.ValueOf(&refEP.IGMP).Elem(), reflect.ValueOf(&refStack.IGMP).Elem()}); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/tcpip/network/ipv6/BUILD b/pkg/tcpip/network/ipv6/BUILD
deleted file mode 100644
index f99cbf8f3..000000000
--- a/pkg/tcpip/network/ipv6/BUILD
+++ /dev/null
@@ -1,72 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ipv6",
- srcs = [
- "dhcpv6configurationfromndpra_string.go",
- "icmp.go",
- "ipv6.go",
- "mld.go",
- "ndp.go",
- "stats.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/network/hash",
- "//pkg/tcpip/network/internal/fragmentation",
- "//pkg/tcpip/network/internal/ip",
- "//pkg/tcpip/stack",
- ],
-)
-
-go_test(
- name = "ipv6_test",
- size = "small",
- srcs = [
- "icmp_test.go",
- "ipv6_test.go",
- "ndp_test.go",
- ],
- library = ":ipv6",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/internal/testutil",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "ipv6_x_test",
- size = "small",
- srcs = ["mld_test.go"],
- deps = [
- ":ipv6",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- ],
-)
diff --git a/pkg/tcpip/network/ipv6/icmp_test.go b/pkg/tcpip/network/ipv6/icmp_test.go
deleted file mode 100644
index 7c2a3e56b..000000000
--- a/pkg/tcpip/network/ipv6/icmp_test.go
+++ /dev/null
@@ -1,1730 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6
-
-import (
- "bytes"
- "net"
- "reflect"
- "strings"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- nicID = 1
-
- linkAddr0 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
- linkAddr1 = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- linkAddr2 = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0f")
-
- defaultChannelSize = 1
- defaultMTU = 65536
-
- arbitraryHopLimit = 42
-)
-
-var (
- lladdr0 = header.LinkLocalAddr(linkAddr0)
- lladdr1 = header.LinkLocalAddr(linkAddr1)
-)
-
-type stubLinkEndpoint struct {
- stack.LinkEndpoint
-}
-
-func (*stubLinkEndpoint) MTU() uint32 {
- return defaultMTU
-}
-
-func (*stubLinkEndpoint) Capabilities() stack.LinkEndpointCapabilities {
- // Indicate that resolution for link layer addresses is required to send
- // packets over this link. This is needed so the NIC knows to allocate a
- // neighbor table.
- return stack.CapabilityResolutionRequired
-}
-
-func (*stubLinkEndpoint) MaxHeaderLength() uint16 {
- return 0
-}
-
-func (*stubLinkEndpoint) LinkAddress() tcpip.LinkAddress {
- return ""
-}
-
-func (*stubLinkEndpoint) WritePacket(stack.RouteInfo, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) tcpip.Error {
- return nil
-}
-
-func (*stubLinkEndpoint) Attach(stack.NetworkDispatcher) {}
-
-type stubDispatcher struct {
- stack.TransportDispatcher
-}
-
-func (*stubDispatcher) DeliverTransportPacket(tcpip.TransportProtocolNumber, *stack.PacketBuffer) stack.TransportPacketDisposition {
- return stack.TransportPacketHandled
-}
-
-func (*stubDispatcher) DeliverRawPacket(tcpip.TransportProtocolNumber, *stack.PacketBuffer) {
- // No-op.
-}
-
-var _ stack.NetworkInterface = (*testInterface)(nil)
-
-type testInterface struct {
- stack.LinkEndpoint
-
- probeCount int
- confirmationCount int
-
- nicID tcpip.NICID
-}
-
-func (*testInterface) ID() tcpip.NICID {
- return nicID
-}
-
-func (*testInterface) IsLoopback() bool {
- return false
-}
-
-func (*testInterface) Name() string {
- return ""
-}
-
-func (*testInterface) Enabled() bool {
- return true
-}
-
-func (*testInterface) Promiscuous() bool {
- return false
-}
-
-func (*testInterface) Spoofing() bool {
- return false
-}
-
-func (t *testInterface) WritePacket(r *stack.Route, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
- return t.LinkEndpoint.WritePacket(r.Fields(), protocol, pkt)
-}
-
-func (t *testInterface) WritePackets(r *stack.Route, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, tcpip.Error) {
- return t.LinkEndpoint.WritePackets(r.Fields(), pkts, protocol)
-}
-
-func (t *testInterface) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
- var r stack.RouteInfo
- r.NetProto = protocol
- r.RemoteLinkAddress = remoteLinkAddr
- return t.LinkEndpoint.WritePacket(r, protocol, pkt)
-}
-
-func (t *testInterface) HandleNeighborProbe(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress) tcpip.Error {
- t.probeCount++
- return nil
-}
-
-func (t *testInterface) HandleNeighborConfirmation(tcpip.NetworkProtocolNumber, tcpip.Address, tcpip.LinkAddress, stack.ReachabilityConfirmationFlags) tcpip.Error {
- t.confirmationCount++
- return nil
-}
-
-func (*testInterface) PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {
- return tcpip.AddressWithPrefix{}, nil
-}
-
-func (*testInterface) CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool {
- return false
-}
-
-func handleICMPInIPv6(ep stack.NetworkEndpoint, src, dst tcpip.Address, icmp header.ICMPv6, hopLimit uint8, includeRouterAlert bool) {
- var extensionHeaders header.IPv6ExtHdrSerializer
- if includeRouterAlert {
- extensionHeaders = header.IPv6ExtHdrSerializer{
- header.IPv6SerializableHopByHopExtHdr{
- &header.IPv6RouterAlertOption{Value: header.IPv6RouterAlertMLD},
- },
- }
- }
- ip := buffer.NewView(header.IPv6MinimumSize + extensionHeaders.Length())
- header.IPv6(ip).Encode(&header.IPv6Fields{
- PayloadLength: uint16(len(icmp)),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: hopLimit,
- SrcAddr: src,
- DstAddr: dst,
- ExtensionHeaders: extensionHeaders,
- })
-
- vv := ip.ToVectorisedView()
- vv.AppendView(buffer.View(icmp))
- ep.HandlePacket(stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
-}
-
-func TestICMPCounts(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- })
- if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {
- t.Fatalf("CreateNIC(_, _) = %s", err)
- }
- {
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }},
- )
- }
-
- netProto := s.NetworkProtocolInstance(ProtocolNumber)
- if netProto == nil {
- t.Fatalf("cannot find protocol instance for network protocol %d", ProtocolNumber)
- }
- ep := netProto.NewEndpoint(&testInterface{}, &stubDispatcher{})
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatalf("expected network endpoint to implement stack.AddressableEndpoint")
- }
- addr := lladdr0.WithPrefix()
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(addr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", addr, err)
- } else {
- ep.DecRef()
- }
-
- var tllData [header.NDPLinkLayerAddressSize]byte
- header.NDPOptions(tllData[:]).Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
-
- types := []struct {
- typ header.ICMPv6Type
- hopLimit uint8
- includeRouterAlert bool
- size int
- extraData []byte
- }{
- {
- typ: header.ICMPv6DstUnreachable,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6DstUnreachableMinimumSize,
- },
- {
- typ: header.ICMPv6PacketTooBig,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6PacketTooBigMinimumSize,
- },
- {
- typ: header.ICMPv6TimeExceeded,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6MinimumSize,
- },
- {
- typ: header.ICMPv6ParamProblem,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6MinimumSize,
- },
- {
- typ: header.ICMPv6EchoRequest,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6EchoMinimumSize,
- },
- {
- typ: header.ICMPv6EchoReply,
- hopLimit: arbitraryHopLimit,
- size: header.ICMPv6EchoMinimumSize,
- },
- {
- typ: header.ICMPv6RouterSolicit,
- hopLimit: header.NDPHopLimit,
- size: header.ICMPv6MinimumSize,
- },
- {
- typ: header.ICMPv6RouterAdvert,
- hopLimit: header.NDPHopLimit,
- size: header.ICMPv6HeaderSize + header.NDPRAMinimumSize,
- },
- {
- typ: header.ICMPv6NeighborSolicit,
- hopLimit: header.NDPHopLimit,
- size: header.ICMPv6NeighborSolicitMinimumSize,
- },
- {
- typ: header.ICMPv6NeighborAdvert,
- hopLimit: header.NDPHopLimit,
- size: header.ICMPv6NeighborAdvertMinimumSize,
- extraData: tllData[:],
- },
- {
- typ: header.ICMPv6RedirectMsg,
- hopLimit: header.NDPHopLimit,
- size: header.ICMPv6MinimumSize,
- },
- {
- typ: header.ICMPv6MulticastListenerQuery,
- hopLimit: header.MLDHopLimit,
- includeRouterAlert: true,
- size: header.MLDMinimumSize + header.ICMPv6HeaderSize,
- },
- {
- typ: header.ICMPv6MulticastListenerReport,
- hopLimit: header.MLDHopLimit,
- includeRouterAlert: true,
- size: header.MLDMinimumSize + header.ICMPv6HeaderSize,
- },
- {
- typ: header.ICMPv6MulticastListenerDone,
- hopLimit: header.MLDHopLimit,
- includeRouterAlert: true,
- size: header.MLDMinimumSize + header.ICMPv6HeaderSize,
- },
- {
- typ: 255, /* Unrecognized */
- size: 50,
- },
- }
-
- for _, typ := range types {
- icmp := header.ICMPv6(buffer.NewView(typ.size + len(typ.extraData)))
- copy(icmp[typ.size:], typ.extraData)
- icmp.SetType(typ.typ)
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp[:typ.size],
- Src: lladdr0,
- Dst: lladdr1,
- PayloadCsum: header.Checksum(typ.extraData, 0 /* initial */),
- PayloadLen: len(typ.extraData),
- }))
- handleICMPInIPv6(ep, lladdr1, lladdr0, icmp, typ.hopLimit, typ.includeRouterAlert)
- }
-
- // Construct an empty ICMP packet so that
- // Stats().ICMP.ICMPv6ReceivedPacketStats.Invalid is incremented.
- handleICMPInIPv6(ep, lladdr1, lladdr0, header.ICMPv6(buffer.NewView(header.IPv6MinimumSize)), arbitraryHopLimit, false)
-
- icmpv6Stats := s.Stats().ICMP.V6.PacketsReceived
- visitStats(reflect.ValueOf(&icmpv6Stats).Elem(), func(name string, s *tcpip.StatCounter) {
- if got, want := s.Value(), uint64(1); got != want {
- t.Errorf("got %s = %d, want = %d", name, got, want)
- }
- })
- if t.Failed() {
- t.Logf("stats:\n%+v", s.Stats())
- }
-}
-
-func visitStats(v reflect.Value, f func(string, *tcpip.StatCounter)) {
- t := v.Type()
- for i := 0; i < v.NumField(); i++ {
- v := v.Field(i)
- if s, ok := v.Interface().(*tcpip.StatCounter); ok {
- f(t.Field(i).Name, s)
- } else {
- visitStats(v, f)
- }
- }
-}
-
-type testContext struct {
- s0 *stack.Stack
- s1 *stack.Stack
-
- linkEP0 *channel.Endpoint
- linkEP1 *channel.Endpoint
-
- clock *faketime.ManualClock
-}
-
-type endpointWithResolutionCapability struct {
- stack.LinkEndpoint
-}
-
-func (e endpointWithResolutionCapability) Capabilities() stack.LinkEndpointCapabilities {
- return e.LinkEndpoint.Capabilities() | stack.CapabilityResolutionRequired
-}
-
-func newTestContext(t *testing.T) *testContext {
- clock := faketime.NewManualClock()
- c := &testContext{
- s0: stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- Clock: clock,
- }),
- s1: stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- Clock: clock,
- }),
- clock: clock,
- }
-
- c.linkEP0 = channel.New(defaultChannelSize, defaultMTU, linkAddr0)
-
- wrappedEP0 := stack.LinkEndpoint(endpointWithResolutionCapability{LinkEndpoint: c.linkEP0})
- if testing.Verbose() {
- wrappedEP0 = sniffer.New(wrappedEP0)
- }
- if err := c.s0.CreateNIC(nicID, wrappedEP0); err != nil {
- t.Fatalf("CreateNIC s0: %v", err)
- }
- if err := c.s0.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress lladdr0: %v", err)
- }
-
- c.linkEP1 = channel.New(defaultChannelSize, defaultMTU, linkAddr1)
- wrappedEP1 := stack.LinkEndpoint(endpointWithResolutionCapability{LinkEndpoint: c.linkEP1})
- if err := c.s1.CreateNIC(nicID, wrappedEP1); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
- if err := c.s1.AddAddress(nicID, ProtocolNumber, lladdr1); err != nil {
- t.Fatalf("AddAddress lladdr1: %v", err)
- }
-
- subnet0, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- c.s0.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet0,
- NIC: nicID,
- }},
- )
- subnet1, err := tcpip.NewSubnet(lladdr0, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr0))))
- if err != nil {
- t.Fatal(err)
- }
- c.s1.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet1,
- NIC: nicID,
- }},
- )
-
- t.Cleanup(func() {
- if err := c.s0.RemoveNIC(nicID); err != nil {
- t.Errorf("c.s0.RemoveNIC(%d): %s", nicID, err)
- }
- if err := c.s1.RemoveNIC(nicID); err != nil {
- t.Errorf("c.s1.RemoveNIC(%d): %s", nicID, err)
- }
-
- c.linkEP0.Close()
- c.linkEP1.Close()
- })
-
- return c
-}
-
-type routeArgs struct {
- src, dst *channel.Endpoint
- typ header.ICMPv6Type
- remoteLinkAddr tcpip.LinkAddress
-}
-
-func routeICMPv6Packet(t *testing.T, clock *faketime.ManualClock, args routeArgs, fn func(*testing.T, header.ICMPv6)) {
- t.Helper()
-
- clock.RunImmediatelyScheduledJobs()
- pi, ok := args.src.Read()
- if !ok {
- t.Fatal("packet didn't arrive")
- }
-
- {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buffer.NewVectorisedView(pi.Pkt.Size(), pi.Pkt.Views()),
- })
- args.dst.InjectLinkAddr(pi.Proto, args.dst.LinkAddress(), pkt)
- }
-
- if pi.Proto != ProtocolNumber {
- t.Errorf("unexpected protocol number %d", pi.Proto)
- return
- }
-
- if len(args.remoteLinkAddr) != 0 && pi.Route.RemoteLinkAddress != args.remoteLinkAddr {
- t.Errorf("got remote link address = %s, want = %s", pi.Route.RemoteLinkAddress, args.remoteLinkAddr)
- }
-
- // Pull the full payload since network header. Needed for header.IPv6 to
- // extract its payload.
- ipv6 := header.IPv6(stack.PayloadSince(pi.Pkt.NetworkHeader()))
- transProto := tcpip.TransportProtocolNumber(ipv6.NextHeader())
- if transProto != header.ICMPv6ProtocolNumber {
- t.Errorf("unexpected transport protocol number %d", transProto)
- return
- }
- icmpv6 := header.ICMPv6(ipv6.Payload())
- if got, want := icmpv6.Type(), args.typ; got != want {
- t.Errorf("got ICMPv6 type = %d, want = %d", got, want)
- return
- }
- if fn != nil {
- fn(t, icmpv6)
- }
-}
-
-func TestLinkResolution(t *testing.T) {
- c := newTestContext(t)
-
- r, err := c.s0.FindRoute(nicID, lladdr0, lladdr1, ProtocolNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(%d, %s, %s, _, false) = (_, %s), want = (_, nil)", nicID, lladdr0, lladdr1, err)
- }
- defer r.Release()
-
- hdr := buffer.NewPrependable(int(r.MaxHeaderLength()) + header.IPv6MinimumSize + header.ICMPv6EchoMinimumSize)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6EchoMinimumSize))
- pkt.SetType(header.ICMPv6EchoRequest)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: r.LocalAddress(),
- Dst: r.RemoteAddress(),
- }))
-
- // We can't send our payload directly over the route because that
- // doesn't provoke NDP discovery.
- var wq waiter.Queue
- ep, err := c.s0.NewEndpoint(header.ICMPv6ProtocolNumber, ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(_) = (_, %s), want = (_, nil)", err)
- }
-
- {
- var r bytes.Reader
- r.Reset(hdr.View())
- if _, err := ep.Write(&r, tcpip.WriteOptions{To: &tcpip.FullAddress{NIC: nicID, Addr: lladdr1}}); err != nil {
- t.Fatalf("ep.Write(_): %s", err)
- }
- }
- for _, args := range []routeArgs{
- {src: c.linkEP0, dst: c.linkEP1, typ: header.ICMPv6NeighborSolicit, remoteLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.SolicitedNodeAddr(lladdr1))},
- {src: c.linkEP1, dst: c.linkEP0, typ: header.ICMPv6NeighborAdvert},
- } {
- routeICMPv6Packet(t, c.clock, args, func(t *testing.T, icmpv6 header.ICMPv6) {
- if got, want := tcpip.Address(icmpv6[8:][:16]), lladdr1; got != want {
- t.Errorf("%d: got target = %s, want = %s", icmpv6.Type(), got, want)
- }
- })
- }
-
- for _, args := range []routeArgs{
- {src: c.linkEP0, dst: c.linkEP1, typ: header.ICMPv6EchoRequest},
- {src: c.linkEP1, dst: c.linkEP0, typ: header.ICMPv6EchoReply},
- } {
- routeICMPv6Packet(t, c.clock, args, nil)
- }
-}
-
-func TestICMPChecksumValidationSimple(t *testing.T) {
- var tllData [header.NDPLinkLayerAddressSize]byte
- header.NDPOptions(tllData[:]).Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
-
- types := []struct {
- name string
- typ header.ICMPv6Type
- size int
- extraData []byte
- statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
- routerOnly bool
- }{
- {
- name: "DstUnreachable",
- typ: header.ICMPv6DstUnreachable,
- size: header.ICMPv6DstUnreachableMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.DstUnreachable
- },
- },
- {
- name: "PacketTooBig",
- typ: header.ICMPv6PacketTooBig,
- size: header.ICMPv6PacketTooBigMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.PacketTooBig
- },
- },
- {
- name: "TimeExceeded",
- typ: header.ICMPv6TimeExceeded,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.TimeExceeded
- },
- },
- {
- name: "ParamProblem",
- typ: header.ICMPv6ParamProblem,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.ParamProblem
- },
- },
- {
- name: "EchoRequest",
- typ: header.ICMPv6EchoRequest,
- size: header.ICMPv6EchoMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoRequest
- },
- },
- {
- name: "EchoReply",
- typ: header.ICMPv6EchoReply,
- size: header.ICMPv6EchoMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoReply
- },
- },
- {
- name: "RouterSolicit",
- typ: header.ICMPv6RouterSolicit,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RouterSolicit
- },
- // Hosts MUST silently discard any received Router Solicitation messages.
- routerOnly: true,
- },
- {
- name: "RouterAdvert",
- typ: header.ICMPv6RouterAdvert,
- size: header.ICMPv6HeaderSize + header.NDPRAMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RouterAdvert
- },
- },
- {
- name: "NeighborSolicit",
- typ: header.ICMPv6NeighborSolicit,
- size: header.ICMPv6NeighborSolicitMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.NeighborSolicit
- },
- },
- {
- name: "NeighborAdvert",
- typ: header.ICMPv6NeighborAdvert,
- size: header.ICMPv6NeighborAdvertMinimumSize,
- extraData: tllData[:],
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.NeighborAdvert
- },
- },
- {
- name: "RedirectMsg",
- typ: header.ICMPv6RedirectMsg,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RedirectMsg
- },
- },
- }
-
- for _, typ := range types {
- for _, isRouter := range []bool{false, true} {
- name := typ.name
- if isRouter {
- name += " (Router)"
- }
- t.Run(name, func(t *testing.T) {
- e := channel.New(0, 1280, linkAddr0)
-
- // Indicate that resolution for link layer addresses is required to
- // send packets over this link. This is needed so the NIC knows to
- // allocate a neighbor table.
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- if isRouter {
- if err := s.SetForwardingDefaultAndAllNICs(ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ProtocolNumber, err)
- }
- }
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(_, _) = %s", err)
- }
-
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(_, %d, %s) = %s", ProtocolNumber, lladdr0, err)
- }
- {
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }},
- )
- }
-
- handleIPv6Payload := func(checksum bool) {
- icmp := header.ICMPv6(buffer.NewView(typ.size + len(typ.extraData)))
- copy(icmp[typ.size:], typ.extraData)
- icmp.SetType(typ.typ)
- if checksum {
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: lladdr1,
- Dst: lladdr0,
- }))
- }
- ip := header.IPv6(buffer.NewView(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(len(icmp)),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: header.NDPHopLimit,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- })
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buffer.NewVectorisedView(len(ip)+len(icmp), []buffer.View{buffer.View(ip), buffer.View(icmp)}),
- })
- e.InjectInbound(ProtocolNumber, pkt)
- }
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- routerOnly := stats.RouterOnlyPacketsDroppedByHost
- typStat := typ.statCounter(stats)
-
- // Initial stat counts should be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
- if got := routerOnly.Value(); got != 0 {
- t.Fatalf("got RouterOnlyPacketsReceivedByHost = %d, want = 0", got)
- }
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got %s = %d, want = 0", typ.name, got)
- }
-
- // Without setting checksum, the incoming packet should
- // be invalid.
- handleIPv6Payload(false)
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- // Router only count should not have increased.
- if got := routerOnly.Value(); got != 0 {
- t.Fatalf("got RouterOnlyPacketsReceivedByHost = %d, want = 0", got)
- }
- // Rx count of type typ.typ should not have increased.
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got %s = %d, want = 0", typ.name, got)
- }
-
- // When checksum is set, it should be received.
- handleIPv6Payload(true)
- if got := typStat.Value(); got != 1 {
- t.Fatalf("got %s = %d, want = 1", typ.name, got)
- }
- // Invalid count should not have increased again.
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- if !isRouter && typ.routerOnly {
- // Router only count should have increased.
- if got := routerOnly.Value(); got != 1 {
- t.Fatalf("got RouterOnlyPacketsReceivedByHost = %d, want = 1", got)
- }
- }
- })
- }
- }
-}
-
-func TestICMPChecksumValidationWithPayload(t *testing.T) {
- const simpleBodySize = 64
- simpleBody := func(view buffer.View) {
- for i := 0; i < simpleBodySize; i++ {
- view[i] = uint8(i)
- }
- }
-
- const errorICMPBodySize = header.IPv6MinimumSize + simpleBodySize
- errorICMPBody := func(view buffer.View) {
- ip := header.IPv6(view)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: simpleBodySize,
- TransportProtocol: 10,
- HopLimit: 20,
- SrcAddr: lladdr0,
- DstAddr: lladdr1,
- })
- simpleBody(view[header.IPv6MinimumSize:])
- }
-
- types := []struct {
- name string
- typ header.ICMPv6Type
- size int
- statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
- payloadSize int
- payload func(buffer.View)
- }{
- {
- "DstUnreachable",
- header.ICMPv6DstUnreachable,
- header.ICMPv6DstUnreachableMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.DstUnreachable
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "PacketTooBig",
- header.ICMPv6PacketTooBig,
- header.ICMPv6PacketTooBigMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.PacketTooBig
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "TimeExceeded",
- header.ICMPv6TimeExceeded,
- header.ICMPv6MinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.TimeExceeded
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "ParamProblem",
- header.ICMPv6ParamProblem,
- header.ICMPv6MinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.ParamProblem
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "EchoRequest",
- header.ICMPv6EchoRequest,
- header.ICMPv6EchoMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoRequest
- },
- simpleBodySize,
- simpleBody,
- },
- {
- "EchoReply",
- header.ICMPv6EchoReply,
- header.ICMPv6EchoMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoReply
- },
- simpleBodySize,
- simpleBody,
- },
- }
-
- for _, typ := range types {
- t.Run(typ.name, func(t *testing.T) {
- e := channel.New(10, 1280, linkAddr0)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(_, _) = %s", err)
- }
-
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(_, %d, %s) = %s", ProtocolNumber, lladdr0, err)
- }
- {
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }},
- )
- }
-
- handleIPv6Payload := func(typ header.ICMPv6Type, size, payloadSize int, payloadFn func(buffer.View), checksum bool) {
- icmpSize := size + payloadSize
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + icmpSize)
- icmpHdr := header.ICMPv6(hdr.Prepend(icmpSize))
- icmpHdr.SetType(typ)
- payloadFn(icmpHdr.Payload())
-
- if checksum {
- icmpHdr.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmpHdr,
- Src: lladdr1,
- Dst: lladdr0,
- }))
- }
-
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(icmpSize),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: header.NDPHopLimit,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- })
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- })
- e.InjectInbound(ProtocolNumber, pkt)
- }
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- typStat := typ.statCounter(stats)
-
- // Initial stat counts should be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got = %d, want = 0", got)
- }
-
- // Without setting checksum, the incoming packet should
- // be invalid.
- handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, false)
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- // Rx count of type typ.typ should not have increased.
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got = %d, want = 0", got)
- }
-
- // When checksum is set, it should be received.
- handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, true)
- if got := typStat.Value(); got != 1 {
- t.Fatalf("got = %d, want = 0", got)
- }
- // Invalid count should not have increased again.
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- })
- }
-}
-
-func TestICMPChecksumValidationWithPayloadMultipleViews(t *testing.T) {
- const simpleBodySize = 64
- simpleBody := func(view buffer.View) {
- for i := 0; i < simpleBodySize; i++ {
- view[i] = uint8(i)
- }
- }
-
- const errorICMPBodySize = header.IPv6MinimumSize + simpleBodySize
- errorICMPBody := func(view buffer.View) {
- ip := header.IPv6(view)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: simpleBodySize,
- TransportProtocol: 10,
- HopLimit: 20,
- SrcAddr: lladdr0,
- DstAddr: lladdr1,
- })
- simpleBody(view[header.IPv6MinimumSize:])
- }
-
- types := []struct {
- name string
- typ header.ICMPv6Type
- size int
- statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
- payloadSize int
- payload func(buffer.View)
- }{
- {
- "DstUnreachable",
- header.ICMPv6DstUnreachable,
- header.ICMPv6DstUnreachableMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.DstUnreachable
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "PacketTooBig",
- header.ICMPv6PacketTooBig,
- header.ICMPv6PacketTooBigMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.PacketTooBig
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "TimeExceeded",
- header.ICMPv6TimeExceeded,
- header.ICMPv6MinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.TimeExceeded
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "ParamProblem",
- header.ICMPv6ParamProblem,
- header.ICMPv6MinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.ParamProblem
- },
- errorICMPBodySize,
- errorICMPBody,
- },
- {
- "EchoRequest",
- header.ICMPv6EchoRequest,
- header.ICMPv6EchoMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoRequest
- },
- simpleBodySize,
- simpleBody,
- },
- {
- "EchoReply",
- header.ICMPv6EchoReply,
- header.ICMPv6EchoMinimumSize,
- func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.EchoReply
- },
- simpleBodySize,
- simpleBody,
- },
- }
-
- for _, typ := range types {
- t.Run(typ.name, func(t *testing.T) {
- e := channel.New(10, 1280, linkAddr0)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, lladdr0, err)
- }
- {
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }},
- )
- }
-
- handleIPv6Payload := func(typ header.ICMPv6Type, size, payloadSize int, payloadFn func(buffer.View), checksum bool) {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + size)
- icmpHdr := header.ICMPv6(hdr.Prepend(size))
- icmpHdr.SetType(typ)
-
- payload := buffer.NewView(payloadSize)
- payloadFn(payload)
-
- if checksum {
- icmpHdr.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmpHdr,
- Src: lladdr1,
- Dst: lladdr0,
- PayloadCsum: header.Checksum(payload, 0 /* initial */),
- PayloadLen: len(payload),
- }))
- }
-
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(size + payloadSize),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: header.NDPHopLimit,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- })
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buffer.NewVectorisedView(header.IPv6MinimumSize+size+payloadSize, []buffer.View{hdr.View(), payload}),
- })
- e.InjectInbound(ProtocolNumber, pkt)
- }
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- typStat := typ.statCounter(stats)
-
- // Initial stat counts should be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got = %d, want = 0", got)
- }
-
- // Without setting checksum, the incoming packet should
- // be invalid.
- handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, false)
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- // Rx count of type typ.typ should not have increased.
- if got := typStat.Value(); got != 0 {
- t.Fatalf("got = %d, want = 0", got)
- }
-
- // When checksum is set, it should be received.
- handleIPv6Payload(typ.typ, typ.size, typ.payloadSize, typ.payload, true)
- if got := typStat.Value(); got != 1 {
- t.Fatalf("got = %d, want = 0", got)
- }
- // Invalid count should not have increased again.
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- })
- }
-}
-
-func TestLinkAddressRequest(t *testing.T) {
- const nicID = 1
-
- snaddr := header.SolicitedNodeAddr(lladdr0)
- mcaddr := header.EthernetAddressFromMulticastIPv6Address(snaddr)
-
- tests := []struct {
- name string
- nicAddr tcpip.Address
- localAddr tcpip.Address
- remoteLinkAddr tcpip.LinkAddress
-
- expectedErr tcpip.Error
- expectedRemoteAddr tcpip.Address
- expectedRemoteLinkAddr tcpip.LinkAddress
- }{
- {
- name: "Unicast",
- nicAddr: lladdr1,
- localAddr: lladdr1,
- remoteLinkAddr: linkAddr1,
- expectedRemoteAddr: lladdr0,
- expectedRemoteLinkAddr: linkAddr1,
- },
- {
- name: "Multicast",
- nicAddr: lladdr1,
- localAddr: lladdr1,
- remoteLinkAddr: "",
- expectedRemoteAddr: snaddr,
- expectedRemoteLinkAddr: mcaddr,
- },
- {
- name: "Unicast with unspecified source",
- nicAddr: lladdr1,
- remoteLinkAddr: linkAddr1,
- expectedRemoteAddr: lladdr0,
- expectedRemoteLinkAddr: linkAddr1,
- },
- {
- name: "Multicast with unspecified source",
- nicAddr: lladdr1,
- remoteLinkAddr: "",
- expectedRemoteAddr: snaddr,
- expectedRemoteLinkAddr: mcaddr,
- },
- {
- name: "Unicast with unassigned address",
- localAddr: lladdr1,
- remoteLinkAddr: linkAddr1,
- expectedErr: &tcpip.ErrBadLocalAddress{},
- },
- {
- name: "Multicast with unassigned address",
- localAddr: lladdr1,
- remoteLinkAddr: "",
- expectedErr: &tcpip.ErrBadLocalAddress{},
- },
- {
- name: "Unicast with no local address available",
- remoteLinkAddr: linkAddr1,
- expectedErr: &tcpip.ErrNetworkUnreachable{},
- },
- {
- name: "Multicast with no local address available",
- remoteLinkAddr: "",
- expectedErr: &tcpip.ErrNetworkUnreachable{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
-
- linkEP := channel.New(defaultChannelSize, defaultMTU, linkAddr0)
- if err := s.CreateNIC(nicID, linkEP); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- ep, err := s.GetNetworkEndpoint(nicID, ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, ProtocolNumber, err)
- }
- linkRes, ok := ep.(stack.LinkAddressResolver)
- if !ok {
- t.Fatalf("expected %T to implement stack.LinkAddressResolver", ep)
- }
-
- if len(test.nicAddr) != 0 {
- if err := s.AddAddress(nicID, ProtocolNumber, test.nicAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, ProtocolNumber, test.nicAddr, err)
- }
- }
-
- {
- err := linkRes.LinkAddressRequest(lladdr0, test.localAddr, test.remoteLinkAddr)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Fatalf("unexpected error from p.LinkAddressRequest(%s, %s, %s, _), (-want, +got):\n%s", lladdr0, test.localAddr, test.remoteLinkAddr, diff)
- }
- }
-
- if test.expectedErr != nil {
- return
- }
-
- pkt, ok := linkEP.Read()
- if !ok {
- t.Fatal("expected to send a link address request")
- }
-
- var want stack.RouteInfo
- want.NetProto = ProtocolNumber
- want.RemoteLinkAddress = test.expectedRemoteLinkAddr
- if diff := cmp.Diff(want, pkt.Route, cmp.AllowUnexported(want)); diff != "" {
- t.Errorf("route info mismatch (-want +got):\n%s", diff)
- }
- checker.IPv6(t, stack.PayloadSince(pkt.Pkt.NetworkHeader()),
- checker.SrcAddr(lladdr1),
- checker.DstAddr(test.expectedRemoteAddr),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(lladdr0),
- checker.NDPNSOptions([]header.NDPOption{header.NDPSourceLinkLayerAddressOption(linkAddr0)}),
- ))
- })
- }
-}
-
-func TestPacketQueing(t *testing.T) {
- const nicID = 1
-
- var (
- host1NICLinkAddr = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x06")
- host2NICLinkAddr = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x09")
-
- host1IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::1").To16()),
- PrefixLen: 64,
- },
- }
- host2IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::2").To16()),
- PrefixLen: 64,
- },
- }
- )
-
- tests := []struct {
- name string
- rxPkt func(*channel.Endpoint)
- checkResp func(*testing.T, *channel.Endpoint)
- }{
- {
- name: "ICMP Error",
- rxPkt: func(e *channel.Endpoint) {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.UDPMinimumSize)
- u := header.UDP(hdr.Prepend(header.UDPMinimumSize))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: header.UDPMinimumSize,
- })
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, host2IPv6Addr.AddressWithPrefix.Address, host1IPv6Addr.AddressWithPrefix.Address, header.UDPMinimumSize)
- sum = header.Checksum(nil, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: DefaultTTL,
- SrcAddr: host2IPv6Addr.AddressWithPrefix.Address,
- DstAddr: host1IPv6Addr.AddressWithPrefix.Address,
- })
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- },
- checkResp: func(t *testing.T, e *channel.Endpoint) {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != ProtocolNumber {
- t.Errorf("got p.Proto = %d, want = %d", p.Proto, ProtocolNumber)
- }
- if p.Route.RemoteLinkAddress != host2NICLinkAddr {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, host2NICLinkAddr)
- }
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(host1IPv6Addr.AddressWithPrefix.Address),
- checker.DstAddr(host2IPv6Addr.AddressWithPrefix.Address),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6DstUnreachable),
- checker.ICMPv6Code(header.ICMPv6PortUnreachable)))
- },
- },
-
- {
- name: "Ping",
- rxPkt: func(e *channel.Endpoint) {
- totalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))
- pkt.SetType(header.ICMPv6EchoRequest)
- pkt.SetCode(0)
- pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: host2IPv6Addr.AddressWithPrefix.Address,
- Dst: host1IPv6Addr.AddressWithPrefix.Address,
- }))
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: header.ICMPv6MinimumSize,
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: DefaultTTL,
- SrcAddr: host2IPv6Addr.AddressWithPrefix.Address,
- DstAddr: host1IPv6Addr.AddressWithPrefix.Address,
- })
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- },
- checkResp: func(t *testing.T, e *channel.Endpoint) {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != ProtocolNumber {
- t.Errorf("got p.Proto = %d, want = %d", p.Proto, ProtocolNumber)
- }
- if p.Route.RemoteLinkAddress != host2NICLinkAddr {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, host2NICLinkAddr)
- }
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(host1IPv6Addr.AddressWithPrefix.Address),
- checker.DstAddr(host2IPv6Addr.AddressWithPrefix.Address),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6EchoReply),
- checker.ICMPv6Code(header.ICMPv6UnusedCode)))
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
-
- e := channel.New(1, header.IPv6MinimumMTU, host1NICLinkAddr)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- Clock: clock,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddProtocolAddress(nicID, host1IPv6Addr); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID, host1IPv6Addr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: host1IPv6Addr.AddressWithPrefix.Subnet(),
- NIC: nicID,
- },
- })
-
- // Receive a packet to trigger link resolution before a response is sent.
- test.rxPkt(e)
-
- // Wait for a neighbor solicitation since link address resolution should
- // be performed.
- {
- clock.RunImmediatelyScheduledJobs()
- p, ok := e.Read()
- if !ok {
- t.Fatalf("timed out waiting for packet")
- }
- if p.Proto != ProtocolNumber {
- t.Errorf("got Proto = %d, want = %d", p.Proto, ProtocolNumber)
- }
- snmc := header.SolicitedNodeAddr(host2IPv6Addr.AddressWithPrefix.Address)
- if want := header.EthernetAddressFromMulticastIPv6Address(snmc); p.Route.RemoteLinkAddress != want {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, want)
- }
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(host1IPv6Addr.AddressWithPrefix.Address),
- checker.DstAddr(snmc),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(host2IPv6Addr.AddressWithPrefix.Address),
- checker.NDPNSOptions([]header.NDPOption{header.NDPSourceLinkLayerAddressOption(host1NICLinkAddr)}),
- ))
- }
-
- // Send a neighbor advertisement to complete link address resolution.
- {
- naSize := header.ICMPv6NeighborAdvertMinimumSize + header.NDPLinkLayerAddressSize
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + naSize)
- pkt := header.ICMPv6(hdr.Prepend(naSize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(pkt.MessageBody())
- na.SetSolicitedFlag(true)
- na.SetOverrideFlag(true)
- na.SetTargetAddress(host2IPv6Addr.AddressWithPrefix.Address)
- na.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(host2NICLinkAddr),
- })
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: host2IPv6Addr.AddressWithPrefix.Address,
- Dst: host1IPv6Addr.AddressWithPrefix.Address,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: header.NDPHopLimit,
- SrcAddr: host2IPv6Addr.AddressWithPrefix.Address,
- DstAddr: host1IPv6Addr.AddressWithPrefix.Address,
- })
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- }
-
- // Expect the response now that the link address has resolved.
- clock.RunImmediatelyScheduledJobs()
- test.checkResp(t, e)
-
- // Since link resolution was already performed, it shouldn't be performed
- // again.
- test.rxPkt(e)
- test.checkResp(t, e)
- })
- }
-}
-
-func TestCallsToNeighborCache(t *testing.T) {
- tests := []struct {
- name string
- createPacket func() header.ICMPv6
- multicast bool
- source tcpip.Address
- destination tcpip.Address
- wantProbeCount int
- wantConfirmationCount int
- }{
- {
- name: "Unicast Neighbor Solicitation without source link-layer address option",
- createPacket: func() header.ICMPv6 {
- nsSize := header.ICMPv6NeighborSolicitMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(nsSize))
- icmp.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
- ns.SetTargetAddress(lladdr0)
- return icmp
- },
- source: lladdr1,
- destination: lladdr0,
- // "The source link-layer address option SHOULD be included in unicast
- // solicitations." - RFC 4861 section 4.3
- //
- // A Neighbor Advertisement needs to be sent in response, but the
- // Neighbor Cache shouldn't be updated since we have no useful
- // information about the sender.
- wantProbeCount: 0,
- },
- {
- name: "Unicast Neighbor Solicitation with source link-layer address option",
- createPacket: func() header.ICMPv6 {
- nsSize := header.ICMPv6NeighborSolicitMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(nsSize))
- icmp.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
- ns.SetTargetAddress(lladdr0)
- ns.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(linkAddr1),
- })
- return icmp
- },
- source: lladdr1,
- destination: lladdr0,
- wantProbeCount: 1,
- },
- {
- name: "Multicast Neighbor Solicitation without source link-layer address option",
- createPacket: func() header.ICMPv6 {
- nsSize := header.ICMPv6NeighborSolicitMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(nsSize))
- icmp.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
- ns.SetTargetAddress(lladdr0)
- return icmp
- },
- source: lladdr1,
- destination: header.SolicitedNodeAddr(lladdr0),
- // "The source link-layer address option MUST be included in multicast
- // solicitations." - RFC 4861 section 4.3
- wantProbeCount: 0,
- },
- {
- name: "Multicast Neighbor Solicitation with source link-layer address option",
- createPacket: func() header.ICMPv6 {
- nsSize := header.ICMPv6NeighborSolicitMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(nsSize))
- icmp.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(icmp.MessageBody())
- ns.SetTargetAddress(lladdr0)
- ns.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(linkAddr1),
- })
- return icmp
- },
- source: lladdr1,
- destination: header.SolicitedNodeAddr(lladdr0),
- wantProbeCount: 1,
- },
- {
- name: "Unicast Neighbor Advertisement without target link-layer address option",
- createPacket: func() header.ICMPv6 {
- naSize := header.ICMPv6NeighborAdvertMinimumSize
- icmp := header.ICMPv6(buffer.NewView(naSize))
- icmp.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
- na.SetSolicitedFlag(true)
- na.SetOverrideFlag(false)
- na.SetTargetAddress(lladdr1)
- return icmp
- },
- source: lladdr1,
- destination: lladdr0,
- // "When responding to unicast solicitations, the target link-layer
- // address option can be omitted since the sender of the solicitation has
- // the correct link-layer address; otherwise, it would not be able to
- // send the unicast solicitation in the first place."
- // - RFC 4861 section 4.4
- wantConfirmationCount: 1,
- },
- {
- name: "Unicast Neighbor Advertisement with target link-layer address option",
- createPacket: func() header.ICMPv6 {
- naSize := header.ICMPv6NeighborAdvertMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(naSize))
- icmp.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
- na.SetSolicitedFlag(true)
- na.SetOverrideFlag(false)
- na.SetTargetAddress(lladdr1)
- na.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
- return icmp
- },
- source: lladdr1,
- destination: lladdr0,
- wantConfirmationCount: 1,
- },
- {
- name: "Multicast Neighbor Advertisement without target link-layer address option",
- createPacket: func() header.ICMPv6 {
- naSize := header.ICMPv6NeighborAdvertMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(naSize))
- icmp.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
- na.SetSolicitedFlag(false)
- na.SetOverrideFlag(false)
- na.SetTargetAddress(lladdr1)
- return icmp
- },
- source: lladdr1,
- destination: header.IPv6AllNodesMulticastAddress,
- // "Target link-layer address MUST be included for multicast solicitations
- // in order to avoid infinite Neighbor Solicitation "recursion" when the
- // peer node does not have a cache entry to return a Neighbor
- // Advertisements message." - RFC 4861 section 4.4
- wantConfirmationCount: 0,
- },
- {
- name: "Multicast Neighbor Advertisement with target link-layer address option",
- createPacket: func() header.ICMPv6 {
- naSize := header.ICMPv6NeighborAdvertMinimumSize + header.NDPLinkLayerAddressSize
- icmp := header.ICMPv6(buffer.NewView(naSize))
- icmp.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(icmp.MessageBody())
- na.SetSolicitedFlag(false)
- na.SetOverrideFlag(false)
- na.SetTargetAddress(lladdr1)
- na.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
- return icmp
- },
- source: lladdr1,
- destination: header.IPv6AllNodesMulticastAddress,
- wantConfirmationCount: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- })
- {
- if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {
- t.Fatalf("CreateNIC(_, _) = %s", err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(_, %d, %s) = %s", ProtocolNumber, lladdr0, err)
- }
- }
- {
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr1))))
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable(
- []tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }},
- )
- }
-
- netProto := s.NetworkProtocolInstance(ProtocolNumber)
- if netProto == nil {
- t.Fatalf("cannot find protocol instance for network protocol %d", ProtocolNumber)
- }
-
- testInterface := testInterface{LinkEndpoint: channel.New(0, header.IPv6MinimumMTU, linkAddr0)}
- ep := netProto.NewEndpoint(&testInterface, &stubDispatcher{})
- defer ep.Close()
-
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
- if !ok {
- t.Fatalf("expected network endpoint to implement stack.AddressableEndpoint")
- }
- addr := lladdr0.WithPrefix()
- if ep, err := addressableEndpoint.AddAndAcquirePermanentAddress(addr, stack.CanBePrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */); err != nil {
- t.Fatalf("addressableEndpoint.AddAndAcquirePermanentAddress(%s, CanBePrimaryEndpoint, AddressConfigStatic, false): %s", addr, err)
- } else {
- ep.DecRef()
- }
-
- icmp := test.createPacket()
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: test.source,
- Dst: test.destination,
- }))
- handleICMPInIPv6(ep, test.source, test.destination, icmp, header.NDPHopLimit, false)
-
- // Confirm the endpoint calls the correct NUDHandler method.
- if testInterface.probeCount != test.wantProbeCount {
- t.Errorf("got testInterface.probeCount = %d, want = %d", testInterface.probeCount, test.wantProbeCount)
- }
- if testInterface.confirmationCount != test.wantConfirmationCount {
- t.Errorf("got testInterface.confirmationCount = %d, want = %d", testInterface.confirmationCount, test.wantConfirmationCount)
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/ipv6/ipv6_state_autogen.go b/pkg/tcpip/network/ipv6/ipv6_state_autogen.go
new file mode 100644
index 000000000..13d427822
--- /dev/null
+++ b/pkg/tcpip/network/ipv6/ipv6_state_autogen.go
@@ -0,0 +1,136 @@
+// automatically generated by stateify.
+
+package ipv6
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (i *icmpv6DestinationUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv6.icmpv6DestinationUnreachableSockError"
+}
+
+func (i *icmpv6DestinationUnreachableSockError) StateFields() []string {
+ return []string{}
+}
+
+func (i *icmpv6DestinationUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *icmpv6DestinationUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *icmpv6DestinationNetworkUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv6.icmpv6DestinationNetworkUnreachableSockError"
+}
+
+func (i *icmpv6DestinationNetworkUnreachableSockError) StateFields() []string {
+ return []string{
+ "icmpv6DestinationUnreachableSockError",
+ }
+}
+
+func (i *icmpv6DestinationNetworkUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationNetworkUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (i *icmpv6DestinationNetworkUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationNetworkUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (i *icmpv6DestinationPortUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv6.icmpv6DestinationPortUnreachableSockError"
+}
+
+func (i *icmpv6DestinationPortUnreachableSockError) StateFields() []string {
+ return []string{
+ "icmpv6DestinationUnreachableSockError",
+ }
+}
+
+func (i *icmpv6DestinationPortUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationPortUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (i *icmpv6DestinationPortUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (i *icmpv6DestinationAddressUnreachableSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv6.icmpv6DestinationAddressUnreachableSockError"
+}
+
+func (i *icmpv6DestinationAddressUnreachableSockError) StateFields() []string {
+ return []string{
+ "icmpv6DestinationUnreachableSockError",
+ }
+}
+
+func (i *icmpv6DestinationAddressUnreachableSockError) beforeSave() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationAddressUnreachableSockError) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (i *icmpv6DestinationAddressUnreachableSockError) afterLoad() {}
+
+// +checklocksignore
+func (i *icmpv6DestinationAddressUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
+}
+
+func (e *icmpv6PacketTooBigSockError) StateTypeName() string {
+ return "pkg/tcpip/network/ipv6.icmpv6PacketTooBigSockError"
+}
+
+func (e *icmpv6PacketTooBigSockError) StateFields() []string {
+ return []string{
+ "mtu",
+ }
+}
+
+func (e *icmpv6PacketTooBigSockError) beforeSave() {}
+
+// +checklocksignore
+func (e *icmpv6PacketTooBigSockError) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.mtu)
+}
+
+func (e *icmpv6PacketTooBigSockError) afterLoad() {}
+
+// +checklocksignore
+func (e *icmpv6PacketTooBigSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.mtu)
+}
+
+func init() {
+ state.Register((*icmpv6DestinationUnreachableSockError)(nil))
+ state.Register((*icmpv6DestinationNetworkUnreachableSockError)(nil))
+ state.Register((*icmpv6DestinationPortUnreachableSockError)(nil))
+ state.Register((*icmpv6DestinationAddressUnreachableSockError)(nil))
+ state.Register((*icmpv6PacketTooBigSockError)(nil))
+}
diff --git a/pkg/tcpip/network/ipv6/ipv6_test.go b/pkg/tcpip/network/ipv6/ipv6_test.go
deleted file mode 100644
index d2a23fd4f..000000000
--- a/pkg/tcpip/network/ipv6/ipv6_test.go
+++ /dev/null
@@ -1,3491 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io/ioutil"
- "math"
- "net"
- "reflect"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- iptestutil "gvisor.dev/gvisor/pkg/tcpip/network/internal/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- addr1 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- addr2 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- // The least significant 3 bytes are the same as addr2 so both addr2 and
- // addr3 will have the same solicited-node address.
- addr3 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x02"
- addr4 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x03"
-
- // Tests use the extension header identifier values as uint8 instead of
- // header.IPv6ExtensionHeaderIdentifier.
- hopByHopExtHdrID = uint8(header.IPv6HopByHopOptionsExtHdrIdentifier)
- routingExtHdrID = uint8(header.IPv6RoutingExtHdrIdentifier)
- fragmentExtHdrID = uint8(header.IPv6FragmentExtHdrIdentifier)
- destinationExtHdrID = uint8(header.IPv6DestinationOptionsExtHdrIdentifier)
- noNextHdrID = uint8(header.IPv6NoNextHeaderIdentifier)
- unknownHdrID = uint8(header.IPv6UnknownExtHdrIdentifier)
-
- extraHeaderReserve = 50
-)
-
-// testReceiveICMP tests receiving an ICMP packet from src to dst. want is the
-// expected Neighbor Advertisement received count after receiving the packet.
-func testReceiveICMP(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst tcpip.Address, want uint64) {
- t.Helper()
-
- // Receive ICMP packet.
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6NeighborAdvertMinimumSize)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6NeighborAdvertMinimumSize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: src,
- Dst: dst,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 255,
- SrcAddr: src,
- DstAddr: dst,
- })
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- stats := s.Stats().ICMP.V6.PacketsReceived
-
- if got := stats.NeighborAdvert.Value(); got != want {
- t.Fatalf("got NeighborAdvert = %d, want = %d", got, want)
- }
-}
-
-// testReceiveUDP tests receiving a UDP packet from src to dst. want is the
-// expected UDP received count after receiving the packet.
-func testReceiveUDP(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst tcpip.Address, want uint64) {
- t.Helper()
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
-
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{Addr: dst, Port: 80}); err != nil {
- t.Fatalf("ep.Bind(...) failed: %v", err)
- }
-
- // Receive UDP Packet.
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.UDPMinimumSize)
- u := header.UDP(hdr.Prepend(header.UDPMinimumSize))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: header.UDPMinimumSize,
- })
-
- // UDP pseudo-header checksum.
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, src, dst, header.UDPMinimumSize)
-
- // UDP checksum
- sum = header.Checksum(nil, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
-
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: 255,
- SrcAddr: src,
- DstAddr: dst,
- })
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- stat := s.Stats().UDP.PacketsReceived
-
- if got := stat.Value(); got != want {
- t.Fatalf("got UDPPacketsReceived = %d, want = %d", got, want)
- }
-}
-
-func compareFragments(packets []*stack.PacketBuffer, sourcePacket *stack.PacketBuffer, mtu uint32, wantFragments []fragmentInfo, proto tcpip.TransportProtocolNumber) error {
- // sourcePacket does not have its IP Header populated. Let's copy the one
- // from the first fragment.
- source := header.IPv6(packets[0].NetworkHeader().View())
- sourceIPHeadersLen := len(source)
- vv := buffer.NewVectorisedView(sourcePacket.Size(), sourcePacket.Views())
- source = append(source, vv.ToView()...)
-
- var reassembledPayload buffer.VectorisedView
- for i, fragment := range packets {
- // Confirm that the packet is valid.
- allBytes := buffer.NewVectorisedView(fragment.Size(), fragment.Views())
- fragmentIPHeaders := header.IPv6(allBytes.ToView())
- if !fragmentIPHeaders.IsValid(len(fragmentIPHeaders)) {
- return fmt.Errorf("fragment #%d: IP packet is invalid:\n%s", i, hex.Dump(fragmentIPHeaders))
- }
-
- fragmentIPHeadersLength := fragment.NetworkHeader().View().Size()
- if fragmentIPHeadersLength != sourceIPHeadersLen {
- return fmt.Errorf("fragment #%d: got fragmentIPHeadersLength = %d, want = %d", i, fragmentIPHeadersLength, sourceIPHeadersLen)
- }
-
- if got := len(fragmentIPHeaders); got > int(mtu) {
- return fmt.Errorf("fragment #%d: got len(fragmentIPHeaders) = %d, want <= %d", i, got, mtu)
- }
-
- sourceIPHeader := source[:header.IPv6MinimumSize]
- fragmentIPHeader := fragmentIPHeaders[:header.IPv6MinimumSize]
-
- if got := fragmentIPHeaders.PayloadLength(); got != wantFragments[i].payloadSize {
- return fmt.Errorf("fragment #%d: got fragmentIPHeaders.PayloadLength() = %d, want = %d", i, got, wantFragments[i].payloadSize)
- }
-
- // We expect the IPv6 Header to be similar across each fragment, besides the
- // payload length.
- sourceIPHeader.SetPayloadLength(0)
- fragmentIPHeader.SetPayloadLength(0)
- if diff := cmp.Diff(fragmentIPHeader, sourceIPHeader); diff != "" {
- return fmt.Errorf("fragment #%d: fragmentIPHeader mismatch (-want +got):\n%s", i, diff)
- }
-
- if got := fragment.AvailableHeaderBytes(); got != extraHeaderReserve {
- return fmt.Errorf("fragment #%d: got packet.AvailableHeaderBytes() = %d, want = %d", i, got, extraHeaderReserve)
- }
- if fragment.NetworkProtocolNumber != sourcePacket.NetworkProtocolNumber {
- return fmt.Errorf("fragment #%d: got fragment.NetworkProtocolNumber = %d, want = %d", i, fragment.NetworkProtocolNumber, sourcePacket.NetworkProtocolNumber)
- }
-
- if len(packets) > 1 {
- // If the source packet was big enough that it needed fragmentation, let's
- // inspect the fragment header. Because no other extension headers are
- // supported, it will always be the last extension header.
- fragmentHeader := header.IPv6Fragment(fragmentIPHeaders[fragmentIPHeadersLength-header.IPv6FragmentHeaderSize : fragmentIPHeadersLength])
-
- if got := fragmentHeader.More(); got != wantFragments[i].more {
- return fmt.Errorf("fragment #%d: got fragmentHeader.More() = %t, want = %t", i, got, wantFragments[i].more)
- }
- if got := fragmentHeader.FragmentOffset(); got != wantFragments[i].offset {
- return fmt.Errorf("fragment #%d: got fragmentHeader.FragmentOffset() = %d, want = %d", i, got, wantFragments[i].offset)
- }
- if got := fragmentHeader.NextHeader(); got != uint8(proto) {
- return fmt.Errorf("fragment #%d: got fragmentHeader.NextHeader() = %d, want = %d", i, got, uint8(proto))
- }
- }
-
- // Store the reassembled payload as we parse each fragment. The payload
- // includes the Transport header and everything after.
- reassembledPayload.AppendView(fragment.TransportHeader().View())
- reassembledPayload.AppendView(fragment.Data().AsRange().ToOwnedView())
- }
-
- if diff := cmp.Diff(buffer.View(source[sourceIPHeadersLen:]), reassembledPayload.ToView()); diff != "" {
- return fmt.Errorf("reassembledPayload mismatch (-want +got):\n%s", diff)
- }
-
- return nil
-}
-
-// TestReceiveOnAllNodesMulticastAddr tests that IPv6 endpoints receive ICMP and
-// UDP packets destined to the IPv6 link-local all-nodes multicast address.
-func TestReceiveOnAllNodesMulticastAddr(t *testing.T) {
- tests := []struct {
- name string
- protocolFactory stack.TransportProtocolFactory
- rxf func(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst tcpip.Address, want uint64)
- }{
- {"ICMP", icmp.NewProtocol6, testReceiveICMP},
- {"UDP", udp.NewProtocol, testReceiveUDP},
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{test.protocolFactory},
- })
- e := channel.New(10, header.IPv6MinimumMTU, linkAddr1)
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(_) = %s", err)
- }
-
- // Should receive a packet destined to the all-nodes
- // multicast address.
- test.rxf(t, s, e, addr1, header.IPv6AllNodesMulticastAddress, 1)
- })
- }
-}
-
-// TestReceiveOnSolicitedNodeAddr tests that IPv6 endpoints receive ICMP and UDP
-// packets destined to the IPv6 solicited-node address of an assigned IPv6
-// address.
-func TestReceiveOnSolicitedNodeAddr(t *testing.T) {
- tests := []struct {
- name string
- protocolFactory stack.TransportProtocolFactory
- rxf func(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst tcpip.Address, want uint64)
- }{
- {"ICMP", icmp.NewProtocol6, testReceiveICMP},
- {"UDP", udp.NewProtocol, testReceiveUDP},
- }
-
- snmc := header.SolicitedNodeAddr(addr2)
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{test.protocolFactory},
- })
- e := channel.New(1, header.IPv6MinimumMTU, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
-
- // Should not receive a packet destined to the solicited node address of
- // addr2/addr3 yet as we haven't added those addresses.
- test.rxf(t, s, e, addr1, snmc, 0)
-
- if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, addr2, err)
- }
-
- // Should receive a packet destined to the solicited node address of
- // addr2/addr3 now that we have added added addr2.
- test.rxf(t, s, e, addr1, snmc, 1)
-
- if err := s.AddAddress(nicID, ProtocolNumber, addr3); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, addr3, err)
- }
-
- // Should still receive a packet destined to the solicited node address of
- // addr2/addr3 now that we have added addr3.
- test.rxf(t, s, e, addr1, snmc, 2)
-
- if err := s.RemoveAddress(nicID, addr2); err != nil {
- t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, addr2, err)
- }
-
- // Should still receive a packet destined to the solicited node address of
- // addr2/addr3 now that we have removed addr2.
- test.rxf(t, s, e, addr1, snmc, 3)
-
- // Make sure addr3's endpoint does not get removed from the NIC by
- // incrementing its reference count with a route.
- r, err := s.FindRoute(nicID, addr3, addr4, ProtocolNumber, false)
- if err != nil {
- t.Fatalf("FindRoute(%d, %s, %s, %d, false): %s", nicID, addr3, addr4, ProtocolNumber, err)
- }
- defer r.Release()
-
- if err := s.RemoveAddress(nicID, addr3); err != nil {
- t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, addr3, err)
- }
-
- // Should not receive a packet destined to the solicited node address of
- // addr2/addr3 yet as both of them got removed, even though a route using
- // addr3 exists.
- test.rxf(t, s, e, addr1, snmc, 3)
- })
- }
-}
-
-// TestAddIpv6Address tests adding IPv6 addresses.
-func TestAddIpv6Address(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- addr tcpip.Address
- }{
- // This test is in response to b/140943433.
- {
- "Nil",
- tcpip.Address([]byte(nil)),
- },
- {
- "ValidUnicast",
- addr1,
- },
- {
- "ValidLinkLocalUnicast",
- lladdr0,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, ProtocolNumber, test.addr); err != nil {
- t.Fatalf("AddAddress(%d, %d, nil) = %s", nicID, ProtocolNumber, err)
- }
-
- if addr, err := s.GetMainNICAddress(nicID, ProtocolNumber); err != nil {
- t.Fatalf("stack.GetMainNICAddress(%d, %d): %s", nicID, ProtocolNumber, err)
- } else if addr.Address != test.addr {
- t.Fatalf("got stack.GetMainNICAddress(%d, %d) = %s, want = %s", nicID, ProtocolNumber, addr.Address, test.addr)
- }
- })
- }
-}
-
-func TestReceiveIPv6ExtHdrs(t *testing.T) {
- tests := []struct {
- name string
- extHdr func(nextHdr uint8) ([]byte, uint8)
- shouldAccept bool
- countersToBeIncremented func(*tcpip.Stats) []*tcpip.StatCounter
- // Should we expect an ICMP response and if so, with what contents?
- expectICMP bool
- ICMPType header.ICMPv6Type
- ICMPCode header.ICMPv6Code
- pointer uint32
- multicast bool
- }{
- {
- name: "None",
- extHdr: func(nextHdr uint8) ([]byte, uint8) { return nil, nextHdr },
- shouldAccept: true,
- expectICMP: false,
- },
- {
- name: "hopbyhop with router alert option",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0,
- }, hopByHopExtHdrID
- },
- shouldAccept: true,
- countersToBeIncremented: func(stats *tcpip.Stats) []*tcpip.StatCounter {
- return []*tcpip.StatCounter{stats.IP.OptionRouterAlertReceived}
- },
- },
- {
- name: "hopbyhop with two router alert options",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0, 0, 0,
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- countersToBeIncremented: func(stats *tcpip.Stats) []*tcpip.StatCounter {
- return []*tcpip.StatCounter{
- stats.IP.OptionRouterAlertReceived,
- stats.IP.MalformedPacketsReceived,
- }
- },
- },
- {
- name: "hopbyhop with unknown option skippable action",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Skippable unknown.
- 62, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "hopbyhop with unknown option discard action",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard unknown.
- 127, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "hopbyhop with unknown option discard and send icmp action (unicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- //^ Unknown option.
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "hopbyhop with unknown option discard and send icmp action (multicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- //^ Unknown option.
- }, hopByHopExtHdrID
- },
- multicast: true,
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "hopbyhop with unknown option discard and send icmp action unless multicast dest (unicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- //^ Unknown option.
- }, hopByHopExtHdrID
- },
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "hopbyhop with unknown option discard and send icmp action unless multicast dest (multicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- //^ Unknown option.
- }, hopByHopExtHdrID
- },
- multicast: true,
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "routing with zero segments left",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
- 1, 0, 2, 3, 4, 5,
- }, routingExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "routing with non-zero segments left",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
- 1, 1, 2, 3, 4, 5,
- }, routingExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6ErroneousHeader,
- pointer: header.IPv6FixedHeaderSize + 2,
- },
- {
- name: "atomic fragment with zero ID",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
- 0, 0, 0, 0, 0, 0,
- }, fragmentExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "atomic fragment with non-zero ID",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
- 0, 0, 1, 2, 3, 4,
- }, fragmentExtHdrID
- },
- shouldAccept: true,
- expectICMP: false,
- },
- {
- name: "fragment",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0,
- 1, 0, 1, 2, 3, 4,
- }, fragmentExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "No next header",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return nil, noNextHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "unknown next header (first)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 0, 63, 4, 1, 2, 3, 4,
- }, unknownHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownHeader,
- pointer: header.IPv6NextHeaderOffset,
- },
- {
- name: "unknown next header (not first)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- unknownHdrID, 0,
- 63, 4, 1, 2, 3, 4,
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownHeader,
- pointer: header.IPv6FixedHeaderSize,
- },
- {
- name: "destination with unknown option skippable action",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Skippable unknown.
- 62, 6, 1, 2, 3, 4, 5, 6,
- }, destinationExtHdrID
- },
- shouldAccept: true,
- expectICMP: false,
- },
- {
- name: "destination with unknown option discard action",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard unknown.
- 127, 6, 1, 2, 3, 4, 5, 6,
- }, destinationExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "destination with unknown option discard and send icmp action (unicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- //^ 191 is an unknown option.
- }, destinationExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "destination with unknown option discard and send icmp action (muilticast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- //^ 191 is an unknown option.
- }, destinationExtHdrID
- },
- multicast: true,
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "destination with unknown option discard and send icmp action unless multicast dest (unicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- //^ 255 is unknown.
- }, destinationExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownOption,
- pointer: header.IPv6FixedHeaderSize + 8,
- },
- {
- name: "destination with unknown option discard and send icmp action unless multicast dest (multicast)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- //^ 255 is unknown.
- }, destinationExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- multicast: true,
- },
- {
- name: "atomic fragment - routing",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Fragment extension header.
- routingExtHdrID, 0, 0, 0, 1, 2, 3, 4,
-
- // Routing extension header.
- nextHdr, 0, 1, 0, 2, 3, 4, 5,
- }, fragmentExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "hop by hop (with skippable unknown) - routing",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Hop By Hop extension header with skippable unknown option.
- routingExtHdrID, 0, 62, 4, 1, 2, 3, 4,
-
- // Routing extension header.
- nextHdr, 0, 1, 0, 2, 3, 4, 5,
- }, hopByHopExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "routing - hop by hop (with skippable unknown)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Routing extension header.
- hopByHopExtHdrID, 0, 1, 0, 2, 3, 4, 5,
- // ^^^ The HopByHop extension header may not appear after the first
- // extension header.
-
- // Hop By Hop extension header with skippable unknown option.
- nextHdr, 0, 62, 4, 1, 2, 3, 4,
- }, routingExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownHeader,
- pointer: header.IPv6FixedHeaderSize,
- },
- {
- name: "routing - hop by hop (with send icmp unknown)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Routing extension header.
- hopByHopExtHdrID, 0, 1, 0, 2, 3, 4, 5,
- // ^^^ The HopByHop extension header may not appear after the first
- // extension header.
-
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Skippable unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- }, routingExtHdrID
- },
- shouldAccept: false,
- expectICMP: true,
- ICMPType: header.ICMPv6ParamProblem,
- ICMPCode: header.ICMPv6UnknownHeader,
- pointer: header.IPv6FixedHeaderSize,
- },
- {
- name: "hopbyhop (with skippable unknown) - routing - atomic fragment - destination (with skippable unknown)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Hop By Hop extension header with skippable unknown option.
- routingExtHdrID, 0, 62, 4, 1, 2, 3, 4,
-
- // Routing extension header.
- fragmentExtHdrID, 0, 1, 0, 2, 3, 4, 5,
-
- // Fragment extension header.
- destinationExtHdrID, 0, 0, 0, 1, 2, 3, 4,
-
- // Destination extension header with skippable unknown option.
- nextHdr, 0, 63, 4, 1, 2, 3, 4,
- }, hopByHopExtHdrID
- },
- shouldAccept: true,
- },
- {
- name: "hopbyhop (with discard unknown) - routing - atomic fragment - destination (with skippable unknown)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Hop By Hop extension header with discard action for unknown option.
- routingExtHdrID, 0, 65, 4, 1, 2, 3, 4,
-
- // Routing extension header.
- fragmentExtHdrID, 0, 1, 0, 2, 3, 4, 5,
-
- // Fragment extension header.
- destinationExtHdrID, 0, 0, 0, 1, 2, 3, 4,
-
- // Destination extension header with skippable unknown option.
- nextHdr, 0, 63, 4, 1, 2, 3, 4,
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- {
- name: "hopbyhop (with skippable unknown) - routing - atomic fragment - destination (with discard unknown)",
- extHdr: func(nextHdr uint8) ([]byte, uint8) {
- return []byte{
- // Hop By Hop extension header with skippable unknown option.
- routingExtHdrID, 0, 62, 4, 1, 2, 3, 4,
-
- // Routing extension header.
- fragmentExtHdrID, 0, 1, 0, 2, 3, 4, 5,
-
- // Fragment extension header.
- destinationExtHdrID, 0, 0, 0, 1, 2, 3, 4,
-
- // Destination extension header with discard action for unknown
- // option.
- nextHdr, 0, 65, 4, 1, 2, 3, 4,
- }, hopByHopExtHdrID
- },
- shouldAccept: false,
- expectICMP: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- e := channel.New(1, header.IPv6MinimumMTU, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, addr2, err)
- }
-
- // Add a default route so that a return packet knows where to go.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.WritableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, ProtocolNumber, err)
- }
- defer ep.Close()
-
- bindAddr := tcpip.FullAddress{Addr: addr2, Port: 80}
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%+v): %s", bindAddr, err)
- }
-
- udpPayload := []byte{1, 2, 3, 4, 5, 6, 7, 8}
- udpLength := header.UDPMinimumSize + len(udpPayload)
- extHdrBytes, ipv6NextHdr := test.extHdr(uint8(header.UDPProtocolNumber))
- extHdrLen := len(extHdrBytes)
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + extHdrLen + udpLength)
-
- // Serialize UDP message.
- u := header.UDP(hdr.Prepend(udpLength))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: uint16(udpLength),
- })
- copy(u.Payload(), udpPayload)
-
- dstAddr := tcpip.Address(addr2)
- if test.multicast {
- dstAddr = header.IPv6AllNodesMulticastAddress
- }
-
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, addr1, dstAddr, uint16(udpLength))
- sum = header.Checksum(udpPayload, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
-
- // Copy extension header bytes between the UDP message and the IPv6
- // fixed header.
- copy(hdr.Prepend(extHdrLen), extHdrBytes)
-
- // Serialize IPv6 fixed header.
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- // We're lying about transport protocol here to be able to generate
- // raw extension headers from the test definitions.
- TransportProtocol: tcpip.TransportProtocolNumber(ipv6NextHdr),
- HopLimit: 255,
- SrcAddr: addr1,
- DstAddr: dstAddr,
- })
-
- stats := s.Stats()
- var counters []*tcpip.StatCounter
- // Make sure that the counters we expect to be incremented are initially
- // set to zero.
- if fn := test.countersToBeIncremented; fn != nil {
- counters = fn(&stats)
- }
- for i := range counters {
- if got := counters[i].Value(); got != 0 {
- t.Errorf("before writing packet: got test.countersToBeIncremented(&stats)[%d].Value() = %d, want = 0", i, got)
- }
- }
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- for i := range counters {
- if got := counters[i].Value(); got != 1 {
- t.Errorf("after writing packet: got test.countersToBeIncremented(&stats)[%d].Value() = %d, want = 1", i, got)
- }
- }
-
- udpReceiveStat := stats.UDP.PacketsReceived
- if !test.shouldAccept {
- if got := udpReceiveStat.Value(); got != 0 {
- t.Errorf("got UDP Rx Packets = %d, want = 0", got)
- }
-
- if !test.expectICMP {
- if p, ok := e.Read(); ok {
- t.Fatalf("unexpected packet received: %#v", p)
- }
- return
- }
-
- // ICMP required.
- p, ok := e.Read()
- if !ok {
- t.Fatalf("expected packet wasn't written out")
- }
-
- // Pack the output packet into a single buffer.View as the checkers
- // assume that.
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- pkt := vv.ToView()
- if got, want := len(pkt), header.IPv6FixedHeaderSize+header.ICMPv6MinimumSize+hdr.UsedLength(); got != want {
- t.Fatalf("got an ICMP packet of size = %d, want = %d", got, want)
- }
-
- ipHdr := header.IPv6(pkt)
- checker.IPv6(t, ipHdr, checker.ICMPv6(
- checker.ICMPv6Type(test.ICMPType),
- checker.ICMPv6Code(test.ICMPCode)))
-
- // We know we are looking at no extension headers in the error ICMP
- // packets.
- icmpPkt := header.ICMPv6(ipHdr.Payload())
- // We know we sent small packets that won't be truncated when reflected
- // back to us.
- originalPacket := icmpPkt.Payload()
- if got, want := icmpPkt.TypeSpecific(), test.pointer; got != want {
- t.Errorf("unexpected ICMPv6 pointer, got = %d, want = %d\n", got, want)
- }
- if diff := cmp.Diff(hdr.View(), buffer.View(originalPacket)); diff != "" {
- t.Errorf("ICMPv6 payload mismatch (-want +got):\n%s", diff)
- }
- return
- }
-
- // Expect a UDP packet.
- if got := udpReceiveStat.Value(); got != 1 {
- t.Errorf("got UDP Rx Packets = %d, want = 1", got)
- }
- var buf bytes.Buffer
- result, err := ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("Read: %s", err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: len(udpPayload),
- Total: len(udpPayload),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(udpPayload, buf.Bytes()); diff != "" {
- t.Errorf("got UDP payload mismatch (-want +got):\n%s", diff)
- }
-
- // Should not have any more UDP packets.
- res, err := ep.Read(ioutil.Discard, tcpip.ReadOptions{})
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got Read = (%v, %v), want = (_, %s)", res, err, &tcpip.ErrWouldBlock{})
- }
- })
- }
-}
-
-// fragmentData holds the IPv6 payload for a fragmented IPv6 packet.
-type fragmentData struct {
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- nextHdr uint8
- data buffer.VectorisedView
-}
-
-func TestReceiveIPv6Fragments(t *testing.T) {
- const (
- udpPayload1Length = 256
- udpPayload2Length = 128
- // Used to test cases where the fragment blocks are not a multiple of
- // the fragment block size of 8 (RFC 8200 section 4.5).
- udpPayload3Length = 127
- udpPayload4Length = header.IPv6MaximumPayloadSize - header.UDPMinimumSize
- udpMaximumSizeMinus15 = header.UDPMaximumSize - 15
- fragmentExtHdrLen = 8
- // Note, not all routing extension headers will be 8 bytes but this test
- // uses 8 byte routing extension headers for most sub tests.
- routingExtHdrLen = 8
- )
-
- udpGen := func(payload []byte, multiplier uint8, src, dst tcpip.Address) buffer.View {
- payloadLen := len(payload)
- for i := 0; i < payloadLen; i++ {
- payload[i] = uint8(i) * multiplier
- }
-
- udpLength := header.UDPMinimumSize + payloadLen
-
- hdr := buffer.NewPrependable(udpLength)
- u := header.UDP(hdr.Prepend(udpLength))
- u.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: 80,
- Length: uint16(udpLength),
- })
- copy(u.Payload(), payload)
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, src, dst, uint16(udpLength))
- sum = header.Checksum(payload, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
- return hdr.View()
- }
-
- var udpPayload1Addr1ToAddr2Buf [udpPayload1Length]byte
- udpPayload1Addr1ToAddr2 := udpPayload1Addr1ToAddr2Buf[:]
- ipv6Payload1Addr1ToAddr2 := udpGen(udpPayload1Addr1ToAddr2, 1, addr1, addr2)
-
- var udpPayload1Addr3ToAddr2Buf [udpPayload1Length]byte
- udpPayload1Addr3ToAddr2 := udpPayload1Addr3ToAddr2Buf[:]
- ipv6Payload1Addr3ToAddr2 := udpGen(udpPayload1Addr3ToAddr2, 4, addr3, addr2)
-
- var udpPayload2Addr1ToAddr2Buf [udpPayload2Length]byte
- udpPayload2Addr1ToAddr2 := udpPayload2Addr1ToAddr2Buf[:]
- ipv6Payload2Addr1ToAddr2 := udpGen(udpPayload2Addr1ToAddr2, 2, addr1, addr2)
-
- var udpPayload3Addr1ToAddr2Buf [udpPayload3Length]byte
- udpPayload3Addr1ToAddr2 := udpPayload3Addr1ToAddr2Buf[:]
- ipv6Payload3Addr1ToAddr2 := udpGen(udpPayload3Addr1ToAddr2, 3, addr1, addr2)
-
- var udpPayload4Addr1ToAddr2Buf [udpPayload4Length]byte
- udpPayload4Addr1ToAddr2 := udpPayload4Addr1ToAddr2Buf[:]
- ipv6Payload4Addr1ToAddr2 := udpGen(udpPayload4Addr1ToAddr2, 4, addr1, addr2)
-
- tests := []struct {
- name string
- expectedPayload []byte
- fragments []fragmentData
- expectedPayloads [][]byte
- }{
- {
- name: "No fragmentation",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: uint8(header.UDPProtocolNumber),
- data: ipv6Payload1Addr1ToAddr2.ToVectorisedView(),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Atomic fragment",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2),
- []buffer.View{
- // Fragment extension header.
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 0, 0, 0, 0, 0},
-
- ipv6Payload1Addr1ToAddr2,
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Atomic fragment with size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload3Addr1ToAddr2),
- []buffer.View{
- // Fragment extension header.
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 0, 0, 0, 0, 0},
-
- ipv6Payload3Addr1ToAddr2,
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload3Addr1ToAddr2},
- },
- {
- name: "Two fragments",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments out of order",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments with different Next Header values",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- // NextHeader value is different than the one in the first fragment, so
- // this NextHeader should be ignored.
- []byte{uint8(header.IPv6NoNextHeaderIdentifier), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments with last fragment size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload3Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload3Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload3Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload3Addr1ToAddr2},
- },
- {
- name: "Two fragments with first fragment size not a multiple of fragment block size",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+63,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload3Addr1ToAddr2[:63],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload3Addr1ToAddr2)-63,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload3Addr1ToAddr2[63:],
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with different IDs",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 2
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 2},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments reassembled into a maximum UDP packet",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+udpMaximumSizeMinus15,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload4Addr1ToAddr2[:udpMaximumSizeMinus15],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload4Addr1ToAddr2)-udpMaximumSizeMinus15,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = udpMaximumSizeMinus15/8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0,
- udpMaximumSizeMinus15 >> 8,
- udpMaximumSizeMinus15 & 0xff,
- 0, 0, 0, 1},
-
- ipv6Payload4Addr1ToAddr2[udpMaximumSizeMinus15:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload4Addr1ToAddr2},
- },
- {
- name: "Two fragments with MF flag reassembled into a maximum UDP packet",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+udpMaximumSizeMinus15,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload4Addr1ToAddr2[:udpMaximumSizeMinus15],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload4Addr1ToAddr2)-udpMaximumSizeMinus15,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = udpMaximumSizeMinus15/8, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0,
- udpMaximumSizeMinus15 >> 8,
- (udpMaximumSizeMinus15 & 0xff) + 1,
- 0, 0, 0, 1},
-
- ipv6Payload4Addr1ToAddr2[udpMaximumSizeMinus15:],
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with per-fragment routing header with zero segments left",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: routingExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+64,
- []buffer.View{
- // Routing extension header.
- //
- // Segments left = 0.
- []byte{fragmentExtHdrID, 0, 1, 0, 2, 3, 4, 5},
-
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: routingExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Routing extension header.
- //
- // Segments left = 0.
- []byte{fragmentExtHdrID, 0, 1, 0, 2, 3, 4, 5},
-
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments with per-fragment routing header with non-zero segments left",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: routingExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+64,
- []buffer.View{
- // Routing extension header.
- //
- // Segments left = 1.
- []byte{fragmentExtHdrID, 0, 1, 1, 2, 3, 4, 5},
-
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: routingExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Routing extension header.
- //
- // Segments left = 1.
- []byte{fragmentExtHdrID, 0, 1, 1, 2, 3, 4, 5},
-
- // Fragment extension header.
- //
- // Fragment offset = 9, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 72, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with routing header with zero segments left",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{routingExtHdrID, 0, 0, 1, 0, 0, 0, 1},
-
- // Routing extension header.
- //
- // Segments left = 0.
- []byte{uint8(header.UDPProtocolNumber), 0, 1, 0, 2, 3, 4, 5},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 9, More = false, ID = 1
- []byte{routingExtHdrID, 0, 0, 72, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two fragments with routing header with non-zero segments left",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- routingExtHdrLen+fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{routingExtHdrID, 0, 0, 1, 0, 0, 0, 1},
-
- // Routing extension header.
- //
- // Segments left = 1.
- []byte{uint8(header.UDPProtocolNumber), 0, 1, 1, 2, 3, 4, 5},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 9, More = false, ID = 1
- []byte{routingExtHdrID, 0, 0, 72, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with routing header with zero segments left across fragments",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- // The length of this payload is fragmentExtHdrLen+8 because the
- // first 8 bytes of the 16 byte routing extension header is in
- // this fragment.
- fragmentExtHdrLen+8,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{routingExtHdrID, 0, 0, 1, 0, 0, 0, 1},
-
- // Routing extension header (part 1)
- //
- // Segments left = 0.
- []byte{uint8(header.UDPProtocolNumber), 1, 1, 0, 2, 3, 4, 5},
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- // The length of this payload is
- // fragmentExtHdrLen+8+len(ipv6Payload1Addr1ToAddr2) because the last 8 bytes of
- // the 16 byte routing extension header is in this fagment.
- fragmentExtHdrLen+8+len(ipv6Payload1Addr1ToAddr2),
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 1, More = false, ID = 1
- []byte{routingExtHdrID, 0, 0, 8, 0, 0, 0, 1},
-
- // Routing extension header (part 2)
- []byte{6, 7, 8, 9, 10, 11, 12, 13},
-
- ipv6Payload1Addr1ToAddr2,
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- {
- name: "Two fragments with routing header with non-zero segments left across fragments",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- // The length of this payload is fragmentExtHdrLen+8 because the
- // first 8 bytes of the 16 byte routing extension header is in
- // this fragment.
- fragmentExtHdrLen+8,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{routingExtHdrID, 0, 0, 1, 0, 0, 0, 1},
-
- // Routing extension header (part 1)
- //
- // Segments left = 1.
- []byte{uint8(header.UDPProtocolNumber), 1, 1, 1, 2, 3, 4, 5},
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- // The length of this payload is
- // fragmentExtHdrLen+8+len(ipv6Payload1Addr1ToAddr2) because the last 8 bytes of
- // the 16 byte routing extension header is in this fagment.
- fragmentExtHdrLen+8+len(ipv6Payload1Addr1ToAddr2),
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 1, More = false, ID = 1
- []byte{routingExtHdrID, 0, 0, 8, 0, 0, 0, 1},
-
- // Routing extension header (part 2)
- []byte{6, 7, 8, 9, 10, 11, 12, 13},
-
- ipv6Payload1Addr1ToAddr2,
- },
- ),
- },
- },
- expectedPayloads: nil,
- },
- // As per RFC 6946, IPv6 atomic fragments MUST NOT interfere with "normal"
- // fragmented traffic.
- {
- name: "Two fragments with atomic",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- // This fragment has the same ID as the other fragments but is an atomic
- // fragment. It should not interfere with the other fragments.
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload2Addr1ToAddr2),
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 0, 0, 0, 0, 1},
-
- ipv6Payload2Addr1ToAddr2,
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload2Addr1ToAddr2, udpPayload1Addr1ToAddr2},
- },
- {
- name: "Two interleaved fragmented packets",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+32,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 2
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 2},
-
- ipv6Payload2Addr1ToAddr2[:32],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload2Addr1ToAddr2)-32,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 4, More = false, ID = 2
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 32, 0, 0, 0, 2},
-
- ipv6Payload2Addr1ToAddr2[32:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2, udpPayload2Addr1ToAddr2},
- },
- {
- name: "Two interleaved fragmented packets from different sources but with same ID",
- fragments: []fragmentData{
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[:64],
- },
- ),
- },
- {
- srcAddr: addr3,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+32,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 0, More = true, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 1, 0, 0, 0, 1},
-
- ipv6Payload1Addr3ToAddr2[:32],
- },
- ),
- },
- {
- srcAddr: addr1,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-64,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 8, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 64, 0, 0, 0, 1},
-
- ipv6Payload1Addr1ToAddr2[64:],
- },
- ),
- },
- {
- srcAddr: addr3,
- dstAddr: addr2,
- nextHdr: fragmentExtHdrID,
- data: buffer.NewVectorisedView(
- fragmentExtHdrLen+len(ipv6Payload1Addr1ToAddr2)-32,
- []buffer.View{
- // Fragment extension header.
- //
- // Fragment offset = 4, More = false, ID = 1
- []byte{uint8(header.UDPProtocolNumber), 0, 0, 32, 0, 0, 0, 1},
-
- ipv6Payload1Addr3ToAddr2[32:],
- },
- ),
- },
- },
- expectedPayloads: [][]byte{udpPayload1Addr1ToAddr2, udpPayload1Addr3ToAddr2},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- e := channel.New(0, header.IPv6MinimumMTU, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, addr2, err)
- }
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, ProtocolNumber, err)
- }
- defer ep.Close()
-
- bindAddr := tcpip.FullAddress{Addr: addr2, Port: 80}
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%+v): %s", bindAddr, err)
- }
-
- for _, f := range test.fragments {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize)
-
- // Serialize IPv6 fixed header.
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(f.data.Size()),
- // We're lying about transport protocol here so that we can generate
- // raw extension headers for the tests.
- TransportProtocol: tcpip.TransportProtocolNumber(f.nextHdr),
- HopLimit: 255,
- SrcAddr: f.srcAddr,
- DstAddr: f.dstAddr,
- })
-
- vv := hdr.View().ToVectorisedView()
- vv.Append(f.data)
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- }
-
- if got, want := s.Stats().UDP.PacketsReceived.Value(), uint64(len(test.expectedPayloads)); got != want {
- t.Errorf("got UDP Rx Packets = %d, want = %d", got, want)
- }
-
- for i, p := range test.expectedPayloads {
- var buf bytes.Buffer
- _, err := ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("(i=%d) Read: %s", i, err)
- }
- if diff := cmp.Diff(p, buf.Bytes()); diff != "" {
- t.Errorf("(i=%d) got UDP payload mismatch (-want +got):\n%s", i, diff)
- }
- }
-
- res, err := ep.Read(ioutil.Discard, tcpip.ReadOptions{})
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("(last) got Read = (%v, %v), want = (_, %s)", res, err, &tcpip.ErrWouldBlock{})
- }
- })
- }
-}
-
-func TestInvalidIPv6Fragments(t *testing.T) {
- const (
- addr1 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- addr2 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- linkAddr1 = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- nicID = 1
- hoplimit = 255
- ident = 1
- data = "TEST_INVALID_IPV6_FRAGMENTS"
- )
-
- type fragmentData struct {
- ipv6Fields header.IPv6Fields
- ipv6FragmentFields header.IPv6SerializableFragmentExtHdr
- payload []byte
- }
-
- tests := []struct {
- name string
- fragments []fragmentData
- wantMalformedIPPackets uint64
- wantMalformedFragments uint64
- expectICMP bool
- expectICMPType header.ICMPv6Type
- expectICMPCode header.ICMPv6Code
- expectICMPTypeSpecific uint32
- }{
- {
- name: "fragment size is not a multiple of 8 and the M flag is true",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 9,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0 >> 3,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:9],
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 1,
- expectICMP: true,
- expectICMPType: header.ICMPv6ParamProblem,
- expectICMPCode: header.ICMPv6ErroneousHeader,
- expectICMPTypeSpecific: header.IPv6PayloadLenOffset,
- },
- {
- name: "fragments reassembled into a payload exceeding the max IPv6 payload size",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: ((header.IPv6MaximumPayloadSize + 1) - 16) >> 3,
- M: false,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- },
- wantMalformedIPPackets: 1,
- wantMalformedFragments: 1,
- expectICMP: true,
- expectICMPType: header.ICMPv6ParamProblem,
- expectICMPCode: header.ICMPv6ErroneousHeader,
- expectICMPTypeSpecific: header.IPv6MinimumSize + 2, /* offset for 'Fragment Offset' in the fragment header */
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- NewProtocol,
- },
- })
- e := channel.New(1, 1500, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, addr2, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- }})
-
- var expectICMPPayload buffer.View
- for _, f := range test.fragments {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.IPv6FragmentHeaderSize)
-
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize + header.IPv6FragmentHeaderSize))
- encodeArgs := f.ipv6Fields
- encodeArgs.ExtensionHeaders = append(encodeArgs.ExtensionHeaders, &f.ipv6FragmentFields)
- ip.Encode(&encodeArgs)
-
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(f.payload)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- })
-
- if test.expectICMP {
- expectICMPPayload = stack.PayloadSince(pkt.NetworkHeader())
- }
-
- e.InjectInbound(ProtocolNumber, pkt)
- }
-
- if got, want := s.Stats().IP.MalformedPacketsReceived.Value(), test.wantMalformedIPPackets; got != want {
- t.Errorf("got Stats.IP.MalformedPacketsReceived = %d, want = %d", got, want)
- }
- if got, want := s.Stats().IP.MalformedFragmentsReceived.Value(), test.wantMalformedFragments; got != want {
- t.Errorf("got Stats.IP.MalformedFragmentsReceived = %d, want = %d", got, want)
- }
-
- reply, ok := e.Read()
- if !test.expectICMP {
- if ok {
- t.Fatalf("unexpected ICMP error message received: %#v", reply)
- }
- return
- }
- if !ok {
- t.Fatal("expected ICMP error message missing")
- }
-
- checker.IPv6(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(addr2),
- checker.DstAddr(addr1),
- checker.IPFullLength(uint16(header.IPv6MinimumSize+header.ICMPv6MinimumSize+expectICMPPayload.Size())),
- checker.ICMPv6(
- checker.ICMPv6Type(test.expectICMPType),
- checker.ICMPv6Code(test.expectICMPCode),
- checker.ICMPv6TypeSpecific(test.expectICMPTypeSpecific),
- checker.ICMPv6Payload(expectICMPPayload),
- ),
- )
- })
- }
-}
-
-func TestFragmentReassemblyTimeout(t *testing.T) {
- const (
- addr1 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- addr2 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- linkAddr1 = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- nicID = 1
- hoplimit = 255
- ident = 1
- data = "TEST_FRAGMENT_REASSEMBLY_TIMEOUT"
- )
-
- type fragmentData struct {
- ipv6Fields header.IPv6Fields
- ipv6FragmentFields header.IPv6SerializableFragmentExtHdr
- payload []byte
- }
-
- tests := []struct {
- name string
- fragments []fragmentData
- expectICMP bool
- }{
- {
- name: "first fragment only",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- },
- expectICMP: true,
- },
- {
- name: "two first fragments",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- },
- expectICMP: true,
- },
- {
- name: "second fragment only",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: uint16(header.IPv6FragmentHeaderSize + len(data) - 16),
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 8,
- M: false,
- Identification: ident,
- },
- payload: []byte(data)[16:],
- },
- },
- expectICMP: false,
- },
- {
- name: "two fragments with a gap",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: uint16(header.IPv6FragmentHeaderSize + len(data) - 16),
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 8,
- M: false,
- Identification: ident,
- },
- payload: []byte(data)[16:],
- },
- },
- expectICMP: true,
- },
- {
- name: "two fragments with a gap in reverse order",
- fragments: []fragmentData{
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: uint16(header.IPv6FragmentHeaderSize + len(data) - 16),
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 8,
- M: false,
- Identification: ident,
- },
- payload: []byte(data)[16:],
- },
- {
- ipv6Fields: header.IPv6Fields{
- PayloadLength: header.IPv6FragmentHeaderSize + 16,
- TransportProtocol: header.UDPProtocolNumber,
- HopLimit: hoplimit,
- SrcAddr: addr1,
- DstAddr: addr2,
- },
- ipv6FragmentFields: header.IPv6SerializableFragmentExtHdr{
- FragmentOffset: 0,
- M: true,
- Identification: ident,
- },
- payload: []byte(data)[:16],
- },
- },
- expectICMP: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- NewProtocol,
- },
- Clock: clock,
- })
-
- e := channel.New(1, 1500, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addr2, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- }})
-
- var firstFragmentSent buffer.View
- for _, f := range test.fragments {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.IPv6FragmentHeaderSize)
-
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize + header.IPv6FragmentHeaderSize))
- encodeArgs := f.ipv6Fields
- encodeArgs.ExtensionHeaders = append(encodeArgs.ExtensionHeaders, &f.ipv6FragmentFields)
- ip.Encode(&encodeArgs)
-
- fragHDR := header.IPv6Fragment(hdr.View()[header.IPv6MinimumSize:])
-
- vv := hdr.View().ToVectorisedView()
- vv.AppendView(f.payload)
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- })
-
- if firstFragmentSent == nil && fragHDR.FragmentOffset() == 0 {
- firstFragmentSent = stack.PayloadSince(pkt.NetworkHeader())
- }
-
- e.InjectInbound(ProtocolNumber, pkt)
- }
-
- clock.Advance(ReassembleTimeout)
-
- reply, ok := e.Read()
- if !test.expectICMP {
- if ok {
- t.Fatalf("unexpected ICMP error message received: %#v", reply)
- }
- return
- }
- if !ok {
- t.Fatal("expected ICMP error message missing")
- }
- if firstFragmentSent == nil {
- t.Fatalf("unexpected ICMP error message received: %#v", reply)
- }
-
- checker.IPv6(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(addr2),
- checker.DstAddr(addr1),
- checker.IPFullLength(uint16(header.IPv6MinimumSize+header.ICMPv6MinimumSize+firstFragmentSent.Size())),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6TimeExceeded),
- checker.ICMPv6Code(header.ICMPv6ReassemblyTimeout),
- checker.ICMPv6Payload(firstFragmentSent),
- ),
- )
- })
- }
-}
-
-func TestWriteStats(t *testing.T) {
- const nPackets = 3
- tests := []struct {
- name string
- setup func(*testing.T, *stack.Stack)
- allowPackets int
- expectSent int
- expectOutputDropped int
- expectPostroutingDropped int
- expectWritten int
- }{
- {
- name: "Accept all",
- // No setup needed, tables accept everything by default.
- setup: func(*testing.T, *stack.Stack) {},
- allowPackets: math.MaxInt32,
- expectSent: nPackets,
- expectOutputDropped: 0,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Accept all with error",
- // No setup needed, tables accept everything by default.
- setup: func(*testing.T, *stack.Stack) {},
- allowPackets: nPackets - 1,
- expectSent: nPackets - 1,
- expectOutputDropped: 0,
- expectPostroutingDropped: 0,
- expectWritten: nPackets - 1,
- }, {
- name: "Drop all with Output chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Output DROP rule.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Output]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %v", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: 0,
- expectOutputDropped: nPackets,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Drop all with Postrouting chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Output DROP rule.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.NATID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Postrouting]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- if err := ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %v", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: 0,
- expectOutputDropped: 0,
- expectPostroutingDropped: nPackets,
- expectWritten: nPackets,
- }, {
- name: "Drop some with Output chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Output DROP rule that matches only 1
- // of the 3 packets.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- // We'll match and DROP the last packet.
- ruleIdx := filter.BuiltinChains[stack.Output]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}
- // Make sure the next rule is ACCEPT.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %v", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: nPackets - 1,
- expectOutputDropped: 1,
- expectPostroutingDropped: 0,
- expectWritten: nPackets,
- }, {
- name: "Drop some with Postrouting chain",
- setup: func(t *testing.T, stk *stack.Stack) {
- // Install Postrouting DROP rule that matches only 1
- // of the 3 packets.
- ipt := stk.IPTables()
- filter := ipt.GetTable(stack.NATID, true /* ipv6 */)
- // We'll match and DROP the last packet.
- ruleIdx := filter.BuiltinChains[stack.Postrouting]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}
- // Make sure the next rule is ACCEPT.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("failed to replace table: %v", err)
- }
- },
- allowPackets: math.MaxInt32,
- expectSent: nPackets - 1,
- expectOutputDropped: 0,
- expectPostroutingDropped: 1,
- expectWritten: nPackets,
- },
- }
-
- writers := []struct {
- name string
- writePackets func(*stack.Route, stack.PacketBufferList) (int, tcpip.Error)
- }{
- {
- name: "WritePacket",
- writePackets: func(rt *stack.Route, pkts stack.PacketBufferList) (int, tcpip.Error) {
- nWritten := 0
- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {
- if err := rt.WritePacket(stack.NetworkHeaderParams{}, pkt); err != nil {
- return nWritten, err
- }
- nWritten++
- }
- return nWritten, nil
- },
- }, {
- name: "WritePackets",
- writePackets: func(rt *stack.Route, pkts stack.PacketBufferList) (int, tcpip.Error) {
- return rt.WritePackets(pkts, stack.NetworkHeaderParams{})
- },
- },
- }
-
- for _, writer := range writers {
- t.Run(writer.name, func(t *testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ep := iptestutil.NewMockLinkEndpoint(header.IPv6MinimumMTU, &tcpip.ErrInvalidEndpointState{}, test.allowPackets)
- rt := buildRoute(t, ep)
- var pkts stack.PacketBufferList
- for i := 0; i < nPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: header.UDPMinimumSize + int(rt.MaxHeaderLength()),
- Data: buffer.NewView(0).ToVectorisedView(),
- })
- pkt.TransportHeader().Push(header.UDPMinimumSize)
- pkts.PushBack(pkt)
- }
-
- test.setup(t, rt.Stack())
-
- nWritten, _ := writer.writePackets(rt, pkts)
-
- if got := int(rt.Stats().IP.PacketsSent.Value()); got != test.expectSent {
- t.Errorf("got rt.Stats().IP.PacketsSent.Value() = %d, want = %d", got, test.expectSent)
- }
- if got := int(rt.Stats().IP.IPTablesOutputDropped.Value()); got != test.expectOutputDropped {
- t.Errorf("got rt.Stats().IP.IPTablesOutputDropped.Value() = %d, want = %d", got, test.expectOutputDropped)
- }
- if got := int(rt.Stats().IP.IPTablesPostroutingDropped.Value()); got != test.expectPostroutingDropped {
- t.Errorf("got r.Stats().IP.IPTablesPostroutingDropped.Value() = %d, want = %d", got, test.expectPostroutingDropped)
- }
- if nWritten != test.expectWritten {
- t.Errorf("got nWritten = %d, want = %d", nWritten, test.expectWritten)
- }
- })
- }
- })
- }
-}
-
-func buildRoute(t *testing.T, ep stack.LinkEndpoint) *stack.Route {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatalf("CreateNIC(1, _) failed: %s", err)
- }
- const (
- src = "\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- dst = "\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- )
- if err := s.AddAddress(1, ProtocolNumber, src); err != nil {
- t.Fatalf("AddAddress(1, %d, %s) failed: %s", ProtocolNumber, src, err)
- }
- {
- mask := tcpip.AddressMask("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff")
- subnet, err := tcpip.NewSubnet(dst, mask)
- if err != nil {
- t.Fatalf("NewSubnet(%s, %s) failed: %v", dst, mask, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: subnet,
- NIC: 1,
- }})
- }
- rt, err := s.FindRoute(1, src, dst, ProtocolNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(1, %s, %s, %d, false) = %s, want = nil", src, dst, ProtocolNumber, err)
- }
- return rt
-}
-
-// limitedMatcher is an iptables matcher that matches after a certain number of
-// packets are checked against it.
-type limitedMatcher struct {
- limit int
-}
-
-// Name implements Matcher.Name.
-func (*limitedMatcher) Name() string {
- return "limitedMatcher"
-}
-
-// Match implements Matcher.Match.
-func (lm *limitedMatcher) Match(stack.Hook, *stack.PacketBuffer, string, string) (bool, bool) {
- if lm.limit == 0 {
- return true, false
- }
- lm.limit--
- return false, false
-}
-
-func knownNICIDs(proto *protocol) []tcpip.NICID {
- var nicIDs []tcpip.NICID
-
- for k := range proto.mu.eps {
- nicIDs = append(nicIDs, k)
- }
-
- return nicIDs
-}
-
-func TestClearEndpointFromProtocolOnClose(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)
- var nic testInterface
- ep := proto.NewEndpoint(&nic, nil).(*endpoint)
- var nicIDs []tcpip.NICID
-
- proto.mu.Lock()
- foundEP, hasEndpointBeforeClose := proto.mu.eps[nic.ID()]
- nicIDs = knownNICIDs(proto)
- proto.mu.Unlock()
- if !hasEndpointBeforeClose {
- t.Fatalf("expected to find the nic id %d in the protocol's known nic ids (%v)", nic.ID(), nicIDs)
- }
- if foundEP != ep {
- t.Fatalf("found an incorrect endpoint mapped to nic id %d", nic.ID())
- }
-
- ep.Close()
-
- proto.mu.Lock()
- _, hasEndpointAfterClose := proto.mu.eps[nic.ID()]
- nicIDs = knownNICIDs(proto)
- proto.mu.Unlock()
- if hasEndpointAfterClose {
- t.Fatalf("unexpectedly found an endpoint mapped to the nic id %d in the protocol's known nic ids (%v)", nic.ID(), nicIDs)
- }
-}
-
-type fragmentInfo struct {
- offset uint16
- more bool
- payloadSize uint16
-}
-
-var fragmentationTests = []struct {
- description string
- mtu uint32
- transHdrLen int
- payloadSize int
- wantFragments []fragmentInfo
-}{
- {
- description: "No fragmentation",
- mtu: header.IPv6MinimumMTU,
- transHdrLen: 0,
- payloadSize: 1000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1000, more: false},
- },
- },
- {
- description: "Fragmented",
- mtu: header.IPv6MinimumMTU,
- transHdrLen: 0,
- payloadSize: 2000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1240, more: true},
- {offset: 154, payloadSize: 776, more: false},
- },
- },
- {
- description: "Fragmented with mtu not a multiple of 8",
- mtu: header.IPv6MinimumMTU + 1,
- transHdrLen: 0,
- payloadSize: 2000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1240, more: true},
- {offset: 154, payloadSize: 776, more: false},
- },
- },
- {
- description: "No fragmentation with big header",
- mtu: 2000,
- transHdrLen: 100,
- payloadSize: 1000,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1100, more: false},
- },
- },
- {
- description: "Fragmented with big header",
- mtu: header.IPv6MinimumMTU,
- transHdrLen: 100,
- payloadSize: 1200,
- wantFragments: []fragmentInfo{
- {offset: 0, payloadSize: 1240, more: true},
- {offset: 154, payloadSize: 76, more: false},
- },
- },
-}
-
-func TestFragmentationWritePacket(t *testing.T) {
- const ttl = 42
-
- for _, ft := range fragmentationTests {
- t.Run(ft.description, func(t *testing.T) {
- pkt := iptestutil.MakeRandPkt(ft.transHdrLen, extraHeaderReserve+header.IPv6MinimumSize, []int{ft.payloadSize}, header.IPv6ProtocolNumber)
- source := pkt.Clone()
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, nil, math.MaxInt32)
- r := buildRoute(t, ep)
- err := r.WritePacket(stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- }, pkt)
- if err != nil {
- t.Fatalf("WritePacket(_, _, _): = %s", err)
- }
- if got := len(ep.WrittenPackets); got != len(ft.wantFragments) {
- t.Errorf("got len(ep.WrittenPackets) = %d, want = %d", got, len(ft.wantFragments))
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != len(ft.wantFragments) {
- t.Errorf("got c.Route.Stats().IP.PacketsSent.Value() = %d, want = %d", got, len(ft.wantFragments))
- }
- if got := r.Stats().IP.OutgoingPacketErrors.Value(); got != 0 {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0", got)
- }
- if err := compareFragments(ep.WrittenPackets, source, ft.mtu, ft.wantFragments, tcp.ProtocolNumber); err != nil {
- t.Error(err)
- }
- })
- }
-}
-
-func TestFragmentationWritePackets(t *testing.T) {
- const ttl = 42
- tests := []struct {
- description string
- insertBefore int
- insertAfter int
- }{
- {
- description: "Single packet",
- insertBefore: 0,
- insertAfter: 0,
- },
- {
- description: "With packet before",
- insertBefore: 1,
- insertAfter: 0,
- },
- {
- description: "With packet after",
- insertBefore: 0,
- insertAfter: 1,
- },
- {
- description: "With packet before and after",
- insertBefore: 1,
- insertAfter: 1,
- },
- }
- tinyPacket := iptestutil.MakeRandPkt(header.TCPMinimumSize, extraHeaderReserve+header.IPv6MinimumSize, []int{1}, header.IPv6ProtocolNumber)
-
- for _, test := range tests {
- t.Run(test.description, func(t *testing.T) {
- for _, ft := range fragmentationTests {
- t.Run(ft.description, func(t *testing.T) {
- var pkts stack.PacketBufferList
- for i := 0; i < test.insertBefore; i++ {
- pkts.PushBack(tinyPacket.Clone())
- }
- pkt := iptestutil.MakeRandPkt(ft.transHdrLen, extraHeaderReserve+header.IPv6MinimumSize, []int{ft.payloadSize}, header.IPv6ProtocolNumber)
- source := pkt
- pkts.PushBack(pkt.Clone())
- for i := 0; i < test.insertAfter; i++ {
- pkts.PushBack(tinyPacket.Clone())
- }
-
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, nil, math.MaxInt32)
- r := buildRoute(t, ep)
-
- wantTotalPackets := len(ft.wantFragments) + test.insertBefore + test.insertAfter
- n, err := r.WritePackets(pkts, stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- })
- if n != wantTotalPackets || err != nil {
- t.Errorf("got WritePackets(_, _, _) = (%d, %s), want = (%d, nil)", n, err, wantTotalPackets)
- }
- if got := len(ep.WrittenPackets); got != wantTotalPackets {
- t.Errorf("got len(ep.WrittenPackets) = %d, want = %d", got, wantTotalPackets)
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != wantTotalPackets {
- t.Errorf("got c.Route.Stats().IP.PacketsSent.Value() = %d, want = %d", got, wantTotalPackets)
- }
- if got := r.Stats().IP.OutgoingPacketErrors.Value(); got != 0 {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = 0", got)
- }
-
- if wantTotalPackets == 0 {
- return
- }
-
- fragments := ep.WrittenPackets[test.insertBefore : len(ft.wantFragments)+test.insertBefore]
- if err := compareFragments(fragments, source, ft.mtu, ft.wantFragments, tcp.ProtocolNumber); err != nil {
- t.Error(err)
- }
- })
- }
- })
- }
-}
-
-// TestFragmentationErrors checks that errors are returned from WritePacket
-// correctly.
-func TestFragmentationErrors(t *testing.T) {
- const ttl = 42
-
- tests := []struct {
- description string
- mtu uint32
- transHdrLen int
- payloadSize int
- allowPackets int
- outgoingErrors int
- mockError tcpip.Error
- wantError tcpip.Error
- }{
- {
- description: "No frag",
- mtu: 2000,
- payloadSize: 1000,
- transHdrLen: 0,
- allowPackets: 0,
- outgoingErrors: 1,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error on first frag",
- mtu: 1300,
- payloadSize: 3000,
- transHdrLen: 0,
- allowPackets: 0,
- outgoingErrors: 3,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error on second frag",
- mtu: 1500,
- payloadSize: 4000,
- transHdrLen: 0,
- allowPackets: 1,
- outgoingErrors: 2,
- mockError: &tcpip.ErrAborted{},
- wantError: &tcpip.ErrAborted{},
- },
- {
- description: "Error when MTU is smaller than transport header",
- mtu: header.IPv6MinimumMTU,
- transHdrLen: 1500,
- payloadSize: 500,
- allowPackets: 0,
- outgoingErrors: 1,
- mockError: nil,
- wantError: &tcpip.ErrMessageTooLong{},
- },
- {
- description: "Error when MTU is smaller than IPv6 minimum MTU",
- mtu: header.IPv6MinimumMTU - 1,
- transHdrLen: 0,
- payloadSize: 500,
- allowPackets: 0,
- outgoingErrors: 1,
- mockError: nil,
- wantError: &tcpip.ErrInvalidEndpointState{},
- },
- }
-
- for _, ft := range tests {
- t.Run(ft.description, func(t *testing.T) {
- pkt := iptestutil.MakeRandPkt(ft.transHdrLen, extraHeaderReserve+header.IPv6MinimumSize, []int{ft.payloadSize}, header.IPv6ProtocolNumber)
- ep := iptestutil.NewMockLinkEndpoint(ft.mtu, ft.mockError, ft.allowPackets)
- r := buildRoute(t, ep)
- err := r.WritePacket(stack.NetworkHeaderParams{
- Protocol: tcp.ProtocolNumber,
- TTL: ttl,
- TOS: stack.DefaultTOS,
- }, pkt)
- if diff := cmp.Diff(ft.wantError, err); diff != "" {
- t.Errorf("unexpected error from WritePacket(_, _, _), (-want, +got):\n%s", diff)
- }
- if got := int(r.Stats().IP.PacketsSent.Value()); got != ft.allowPackets {
- t.Errorf("got r.Stats().IP.PacketsSent.Value() = %d, want = %d", got, ft.allowPackets)
- }
- if got := int(r.Stats().IP.OutgoingPacketErrors.Value()); got != ft.outgoingErrors {
- t.Errorf("got r.Stats().IP.OutgoingPacketErrors.Value() = %d, want = %d", got, ft.outgoingErrors)
- }
- })
- }
-}
-
-func TestForwarding(t *testing.T) {
- const (
- incomingNICID = 1
- outgoingNICID = 2
- randomSequence = 123
- randomIdent = 42
- )
-
- incomingIPv6Addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10::1").To16()),
- PrefixLen: 64,
- }
- outgoingIPv6Addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("11::1").To16()),
- PrefixLen: 64,
- }
- multicastIPv6Addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("ff00::").To16()),
- PrefixLen: 64,
- }
-
- remoteIPv6Addr1 := tcpip.Address(net.ParseIP("10::2").To16())
- remoteIPv6Addr2 := tcpip.Address(net.ParseIP("11::2").To16())
- unreachableIPv6Addr := tcpip.Address(net.ParseIP("12::2").To16())
- linkLocalIPv6Addr := tcpip.Address(net.ParseIP("fe80::").To16())
-
- tests := []struct {
- name string
- extHdr func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker)
- TTL uint8
- expectErrorICMP bool
- expectPacketForwarded bool
- payloadLength int
- countUnrouteablePackets uint64
- sourceAddr tcpip.Address
- destAddr tcpip.Address
- icmpType header.ICMPv6Type
- icmpCode header.ICMPv6Code
- expectPacketUnrouteableError bool
- expectLinkLocalSourceError bool
- expectLinkLocalDestError bool
- expectExtensionHeaderError bool
- }{
- {
- name: "TTL of zero",
- TTL: 0,
- expectErrorICMP: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- icmpType: header.ICMPv6TimeExceeded,
- icmpCode: header.ICMPv6HopLimitExceeded,
- },
- {
- name: "TTL of one",
- TTL: 1,
- expectErrorICMP: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- icmpType: header.ICMPv6TimeExceeded,
- icmpCode: header.ICMPv6HopLimitExceeded,
- },
- {
- name: "TTL of two",
- TTL: 2,
- expectPacketForwarded: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- },
- {
- name: "TTL of three",
- TTL: 3,
- expectPacketForwarded: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- },
- {
- name: "Max TTL",
- TTL: math.MaxUint8,
- expectPacketForwarded: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- },
- {
- name: "Network unreachable",
- TTL: 2,
- expectErrorICMP: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: unreachableIPv6Addr,
- icmpType: header.ICMPv6DstUnreachable,
- icmpCode: header.ICMPv6NetworkUnreachable,
- expectPacketUnrouteableError: true,
- },
- {
- name: "Multicast destination",
- TTL: 2,
- countUnrouteablePackets: 1,
- sourceAddr: remoteIPv6Addr1,
- destAddr: multicastIPv6Addr.Address,
- expectPacketForwarded: true,
- },
- {
- name: "Link local destination",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: linkLocalIPv6Addr,
- expectLinkLocalDestError: true,
- },
- {
- name: "Link local source",
- TTL: 2,
- sourceAddr: linkLocalIPv6Addr,
- destAddr: remoteIPv6Addr2,
- expectLinkLocalSourceError: true,
- },
- {
- name: "Hopbyhop with unknown option skippable action",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Skippable unknown.
- 62, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, checker.IPv6ExtHdr(checker.IPv6HopByHopExtensionHeader(checker.IPv6UnknownOption(), checker.IPv6UnknownOption()))
- },
- expectPacketForwarded: true,
- },
- {
- name: "Hopbyhop with unknown option discard action",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard unknown.
- 127, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, nil
- },
- expectExtensionHeaderError: true,
- },
- {
- name: "Hopbyhop with unknown option discard and send icmp action (unicast)",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, nil
- },
- expectErrorICMP: true,
- icmpType: header.ICMPv6ParamProblem,
- icmpCode: header.ICMPv6UnknownOption,
- expectExtensionHeaderError: true,
- },
- {
- name: "Hopbyhop with unknown option discard and send icmp action (multicast)",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: multicastIPv6Addr.Address,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP if option is unknown.
- 191, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, nil
- },
- expectErrorICMP: true,
- icmpType: header.ICMPv6ParamProblem,
- icmpCode: header.ICMPv6UnknownOption,
- expectExtensionHeaderError: true,
- },
- {
- name: "Hopbyhop with unknown option discard and send icmp action unless multicast dest (unicast)",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, nil
- },
- expectErrorICMP: true,
- icmpType: header.ICMPv6ParamProblem,
- icmpCode: header.ICMPv6UnknownOption,
- expectExtensionHeaderError: true,
- },
- {
- name: "Hopbyhop with unknown option discard and send icmp action unless multicast dest (multicast)",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: multicastIPv6Addr.Address,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Skippable unknown.
- 63, 4, 1, 2, 3, 4,
-
- // Discard & send ICMP unless packet is for multicast destination if
- // option is unknown.
- 255, 6, 1, 2, 3, 4, 5, 6,
- }, hopByHopExtHdrID, nil
- },
- expectExtensionHeaderError: true,
- },
- {
- name: "Hopbyhop with router alert option",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 0,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0,
- }, hopByHopExtHdrID, checker.IPv6ExtHdr(checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)))
- },
- expectPacketForwarded: true,
- },
- {
- name: "Hopbyhop with two router alert options",
- TTL: 2,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- extHdr: func(nextHdr uint8) ([]byte, uint8, checker.NetworkChecker) {
- return []byte{
- nextHdr, 1,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0,
-
- // Router Alert option.
- 5, 2, 0, 0, 0, 0,
- }, hopByHopExtHdrID, nil
- },
- expectExtensionHeaderError: true,
- },
- {
- name: "Can't fragment",
- TTL: 2,
- payloadLength: header.IPv6MinimumMTU + 1,
- expectErrorICMP: true,
- sourceAddr: remoteIPv6Addr1,
- destAddr: remoteIPv6Addr2,
- icmpType: header.ICMPv6PacketTooBig,
- icmpCode: header.ICMPv6UnusedCode,
- },
- {
- name: "Can't fragment multicast",
- TTL: 2,
- payloadLength: header.IPv6MinimumMTU + 1,
- sourceAddr: remoteIPv6Addr1,
- destAddr: multicastIPv6Addr.Address,
- expectErrorICMP: true,
- icmpType: header.ICMPv6PacketTooBig,
- icmpCode: header.ICMPv6UnusedCode,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- })
- // We expect at most a single packet in response to our ICMP Echo Request.
- incomingEndpoint := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(incomingNICID, incomingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", incomingNICID, err)
- }
- incomingIPv6ProtoAddr := tcpip.ProtocolAddress{Protocol: ProtocolNumber, AddressWithPrefix: incomingIPv6Addr}
- if err := s.AddProtocolAddress(incomingNICID, incomingIPv6ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", incomingNICID, incomingIPv6ProtoAddr, err)
- }
-
- outgoingEndpoint := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(outgoingNICID, outgoingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", outgoingNICID, err)
- }
- outgoingIPv6ProtoAddr := tcpip.ProtocolAddress{Protocol: ProtocolNumber, AddressWithPrefix: outgoingIPv6Addr}
- if err := s.AddProtocolAddress(outgoingNICID, outgoingIPv6ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", outgoingNICID, outgoingIPv6ProtoAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: incomingIPv6Addr.Subnet(),
- NIC: incomingNICID,
- },
- {
- Destination: outgoingIPv6Addr.Subnet(),
- NIC: outgoingNICID,
- },
- {
- Destination: multicastIPv6Addr.Subnet(),
- NIC: outgoingNICID,
- },
- })
-
- if err := s.SetForwardingDefaultAndAllNICs(ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ProtocolNumber, err)
- }
-
- transportProtocol := header.ICMPv6ProtocolNumber
- var extHdrBytes []byte
- extHdrChecker := checker.IPv6ExtHdr()
- if test.extHdr != nil {
- nextHdrID := hopByHopExtHdrID
- extHdrBytes, nextHdrID, extHdrChecker = test.extHdr(uint8(header.ICMPv6ProtocolNumber))
- transportProtocol = tcpip.TransportProtocolNumber(nextHdrID)
- }
- extHdrLen := len(extHdrBytes)
-
- ipHeaderLength := header.IPv6MinimumSize
- icmpHeaderLength := header.ICMPv6MinimumSize
- totalLength := ipHeaderLength + icmpHeaderLength + test.payloadLength + extHdrLen
- hdr := buffer.NewPrependable(totalLength)
- hdr.Prepend(test.payloadLength)
- icmpH := header.ICMPv6(hdr.Prepend(icmpHeaderLength))
-
- icmpH.SetIdent(randomIdent)
- icmpH.SetSequence(randomSequence)
- icmpH.SetType(header.ICMPv6EchoRequest)
- icmpH.SetCode(header.ICMPv6UnusedCode)
- icmpH.SetChecksum(0)
- icmpH.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmpH,
- Src: test.sourceAddr,
- Dst: test.destAddr,
- }))
- copy(hdr.Prepend(extHdrLen), extHdrBytes)
- ip := header.IPv6(hdr.Prepend(ipHeaderLength))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(header.ICMPv6MinimumSize + test.payloadLength),
- TransportProtocol: transportProtocol,
- HopLimit: test.TTL,
- SrcAddr: test.sourceAddr,
- DstAddr: test.destAddr,
- })
- requestPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- })
- incomingEndpoint.InjectInbound(ProtocolNumber, requestPkt)
-
- reply, ok := incomingEndpoint.Read()
-
- if test.expectErrorICMP {
- if !ok {
- t.Fatalf("expected ICMP packet type %d through incoming NIC", test.icmpType)
- }
-
- // As per RFC 4443, page 9:
- //
- // The returned ICMP packet will contain as much of invoking packet
- // as possible without the ICMPv6 packet exceeding the minimum IPv6
- // MTU.
- expectedICMPPayloadLength := func() int {
- maxICMPPayloadLength := header.IPv6MinimumMTU - ipHeaderLength - icmpHeaderLength
- if len(hdr.View()) > maxICMPPayloadLength {
- return maxICMPPayloadLength
- }
- return len(hdr.View())
- }
-
- checker.IPv6(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(incomingIPv6Addr.Address),
- checker.DstAddr(test.sourceAddr),
- checker.TTL(DefaultTTL),
- checker.ICMPv6(
- checker.ICMPv6Type(test.icmpType),
- checker.ICMPv6Code(test.icmpCode),
- checker.ICMPv6Payload(hdr.View()[:expectedICMPPayloadLength()]),
- ),
- )
-
- if n := outgoingEndpoint.Drain(); n != 0 {
- t.Fatalf("got e2.Drain() = %d, want = 0", n)
- }
- } else if ok {
- t.Fatalf("expected no ICMP packet through incoming NIC, instead found: %#v", reply)
- }
-
- reply, ok = outgoingEndpoint.Read()
- if test.expectPacketForwarded {
- if !ok {
- t.Fatal("expected ICMP Echo Request packet through outgoing NIC")
- }
-
- checker.IPv6WithExtHdr(t, stack.PayloadSince(reply.Pkt.NetworkHeader()),
- checker.SrcAddr(test.sourceAddr),
- checker.DstAddr(test.destAddr),
- checker.TTL(test.TTL-1),
- extHdrChecker,
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6EchoRequest),
- checker.ICMPv6Code(header.ICMPv6UnusedCode),
- checker.ICMPv6Payload(nil),
- ),
- )
-
- if n := incomingEndpoint.Drain(); n != 0 {
- t.Fatalf("got e1.Drain() = %d, want = 0", n)
- }
- } else if ok {
- t.Fatalf("expected no ICMP Echo packet through outgoing NIC, instead found: %#v", reply)
- }
-
- boolToInt := func(val bool) uint64 {
- if val {
- return 1
- }
- return 0
- }
-
- if got, want := s.Stats().IP.Forwarding.LinkLocalSource.Value(), boolToInt(test.expectLinkLocalSourceError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.LinkLocalSource.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.LinkLocalDestination.Value(), boolToInt(test.expectLinkLocalDestError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.LinkLocalDestination.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.ExhaustedTTL.Value(), boolToInt(test.TTL <= 1); got != want {
- t.Errorf("got rt.Stats().IP.Forwarding.ExhaustedTTL.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.Unrouteable.Value(), boolToInt(test.expectPacketUnrouteableError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.Unrouteable.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.Errors.Value(), boolToInt(!test.expectPacketForwarded); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.Errors.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.ExtensionHeaderProblem.Value(), boolToInt(test.expectExtensionHeaderError); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.ExtensionHeaderProblem.Value() = %d, want = %d", got, want)
- }
-
- if got, want := s.Stats().IP.Forwarding.PacketTooBig.Value(), boolToInt(test.icmpType == header.ICMPv6PacketTooBig); got != want {
- t.Errorf("got s.Stats().IP.Forwarding.PacketTooBig.Value() = %d, want = %d", got, want)
- }
- })
- }
-}
-
-func TestMultiCounterStatsInitialization(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- proto := s.NetworkProtocolInstance(ProtocolNumber).(*protocol)
- var nic testInterface
- ep := proto.NewEndpoint(&nic, nil).(*endpoint)
- // At this point, the Stack's stats and the NetworkEndpoint's stats are
- // supposed to be bound.
- refStack := s.Stats()
- refEP := ep.stats.localStats
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.ip).Elem(), []reflect.Value{reflect.ValueOf(&refStack.IP).Elem(), reflect.ValueOf(&refEP.IP).Elem()}); err != nil {
- t.Error(err)
- }
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&ep.stats.icmp).Elem(), []reflect.Value{reflect.ValueOf(&refStack.ICMP.V6).Elem(), reflect.ValueOf(&refEP.ICMP).Elem()}); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/tcpip/network/ipv6/mld_test.go b/pkg/tcpip/network/ipv6/mld_test.go
deleted file mode 100644
index bc9cf6999..000000000
--- a/pkg/tcpip/network/ipv6/mld_test.go
+++ /dev/null
@@ -1,603 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6_test
-
-import (
- "bytes"
- "math/rand"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-var (
- linkLocalAddr = testutil.MustParse6("fe80::1")
- globalAddr = testutil.MustParse6("a80::1")
- globalMulticastAddr = testutil.MustParse6("ff05:100::2")
-
- linkLocalAddrSNMC = header.SolicitedNodeAddr(linkLocalAddr)
- globalAddrSNMC = header.SolicitedNodeAddr(globalAddr)
-)
-
-func validateMLDPacket(t *testing.T, p buffer.View, localAddress, remoteAddress tcpip.Address, mldType header.ICMPv6Type, groupAddress tcpip.Address) {
- t.Helper()
-
- checker.IPv6WithExtHdr(t, p,
- checker.IPv6ExtHdr(
- checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)),
- ),
- checker.SrcAddr(localAddress),
- checker.DstAddr(remoteAddress),
- checker.TTL(header.MLDHopLimit),
- checker.MLD(mldType, header.MLDMinimumSize,
- checker.MLDMaxRespDelay(0),
- checker.MLDMulticastAddress(groupAddress),
- ),
- )
-}
-
-func TestIPv6JoinLeaveSolicitedNodeAddressPerformsMLD(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- MLD: ipv6.MLDOptions{
- Enabled: true,
- },
- })},
- })
- e := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- // The stack will join an address's solicited node multicast address when
- // an address is added. An MLD report message should be sent for the
- // solicited-node group.
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, linkLocalAddr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ipv6.ProtocolNumber, linkLocalAddr, err)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, linkLocalAddrSNMC, header.ICMPv6MulticastListenerReport, linkLocalAddrSNMC)
- }
-
- // The stack will leave an address's solicited node multicast address when
- // an address is removed. An MLD done message should be sent for the
- // solicited-node group.
- if err := s.RemoveAddress(nicID, linkLocalAddr); err != nil {
- t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, linkLocalAddr, err)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a done message to be sent")
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, header.IPv6AllRoutersLinkLocalMulticastAddress, header.ICMPv6MulticastListenerDone, linkLocalAddrSNMC)
- }
-}
-
-func TestSendQueuedMLDReports(t *testing.T) {
- const (
- nicID = 1
- maxReports = 2
- )
-
- tests := []struct {
- name string
- dadTransmits uint8
- retransmitTimer time.Duration
- }{
- {
- name: "DAD Disabled",
- dadTransmits: 0,
- retransmitTimer: 0,
- },
- {
- name: "DAD Enabled",
- dadTransmits: 1,
- retransmitTimer: time.Second,
- },
- }
-
- nonce := [...]byte{
- 1, 2, 3, 4, 5, 6,
- }
-
- const maxNSMessages = 2
- secureRNGBytes := make([]byte, len(nonce)*maxNSMessages)
- for b := secureRNGBytes[:]; len(b) > 0; b = b[len(nonce):] {
- if n := copy(b, nonce[:]); n != len(nonce) {
- t.Fatalf("got copy(...) = %d, want = %d", n, len(nonce))
- }
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- dadResolutionTime := test.retransmitTimer * time.Duration(test.dadTransmits)
- clock := faketime.NewManualClock()
- var secureRNG bytes.Reader
- secureRNG.Reset(secureRNGBytes[:])
- s := stack.New(stack.Options{
- SecureRNG: &secureRNG,
- RandSource: rand.NewSource(time.Now().UnixNano()),
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: test.dadTransmits,
- RetransmitTimer: test.retransmitTimer,
- },
- MLD: ipv6.MLDOptions{
- Enabled: true,
- },
- })},
- Clock: clock,
- })
-
- // Allow space for an extra packet so we can observe packets that were
- // unexpectedly sent.
- e := channel.New(maxReports+int(test.dadTransmits)+1 /* extra */, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- resolveDAD := func(addr, snmc tcpip.Address) {
- clock.Advance(dadResolutionTime)
- if p, ok := e.Read(); !ok {
- t.Fatal("expected DAD packet")
- } else {
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(header.IPv6Any),
- checker.DstAddr(snmc),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(addr),
- checker.NDPNSOptions([]header.NDPOption{header.NDPNonceOption(nonce[:])}),
- ))
- }
- }
-
- var reportCounter uint64
- reportStat := s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
- var doneCounter uint64
- doneStat := s.Stats().ICMP.V6.PacketsSent.MulticastListenerDone
- if got := doneStat.Value(); got != doneCounter {
- t.Errorf("got doneStat.Value() = %d, want = %d", got, doneCounter)
- }
-
- // Joining a group without an assigned address should send an MLD report
- // with the unspecified address.
- if err := s.JoinGroup(ipv6.ProtocolNumber, nicID, globalMulticastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", ipv6.ProtocolNumber, nicID, globalMulticastAddr, err)
- }
- reportCounter++
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Errorf("expected MLD report for %s", globalMulticastAddr)
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, globalMulticastAddr, header.ICMPv6MulticastListenerReport, globalMulticastAddr)
- }
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Errorf("got unexpected packet = %#v", p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Adding a global address should not send reports for the already joined
- // group since we should only send queued reports when a link-local
- // address is assigned.
- //
- // Note, we will still expect to send a report for the global address's
- // solicited node address from the unspecified address as per RFC 3590
- // section 4.
- if err := s.AddAddressWithOptions(nicID, ipv6.ProtocolNumber, globalAddr, stack.FirstPrimaryEndpoint); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s", nicID, ipv6.ProtocolNumber, globalAddr, stack.FirstPrimaryEndpoint, err)
- }
- reportCounter++
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Errorf("expected MLD report for %s", globalAddrSNMC)
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, globalAddrSNMC, header.ICMPv6MulticastListenerReport, globalAddrSNMC)
- }
- if dadResolutionTime != 0 {
- // Reports should not be sent when the address resolves.
- resolveDAD(globalAddr, globalAddrSNMC)
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
- }
- // Leave the group since we don't care about the global address's
- // solicited node multicast group membership.
- if err := s.LeaveGroup(ipv6.ProtocolNumber, nicID, globalAddrSNMC); err != nil {
- t.Fatalf("LeaveGroup(%d, %d, %s): %s", ipv6.ProtocolNumber, nicID, globalAddrSNMC, err)
- }
- if got := doneStat.Value(); got != doneCounter {
- t.Errorf("got doneStat.Value() = %d, want = %d", got, doneCounter)
- }
- if p, ok := e.Read(); ok {
- t.Errorf("got unexpected packet = %#v", p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Adding a link-local address should send a report for its solicited node
- // address and globalMulticastAddr.
- if err := s.AddAddressWithOptions(nicID, ipv6.ProtocolNumber, linkLocalAddr, stack.CanBePrimaryEndpoint); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s", nicID, ipv6.ProtocolNumber, linkLocalAddr, stack.CanBePrimaryEndpoint, err)
- }
- if dadResolutionTime != 0 {
- reportCounter++
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Errorf("expected MLD report for %s", linkLocalAddrSNMC)
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, linkLocalAddrSNMC, header.ICMPv6MulticastListenerReport, linkLocalAddrSNMC)
- }
- resolveDAD(linkLocalAddr, linkLocalAddrSNMC)
- }
-
- // We expect two batches of reports to be sent (1 batch when the
- // link-local address is assigned, and another after the maximum
- // unsolicited report interval.
- for i := 0; i < 2; i++ {
- // We expect reports to be sent (one for globalMulticastAddr and another
- // for linkLocalAddrSNMC).
- reportCounter += maxReports
- if got := reportStat.Value(); got != reportCounter {
- t.Errorf("got reportStat.Value() = %d, want = %d", got, reportCounter)
- }
-
- addrs := map[tcpip.Address]bool{
- globalMulticastAddr: false,
- linkLocalAddrSNMC: false,
- }
- for range addrs {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("expected MLD report for %s and %s; addrs = %#v", globalMulticastAddr, linkLocalAddrSNMC, addrs)
- }
-
- addr := header.IPv6(stack.PayloadSince(p.Pkt.NetworkHeader())).DestinationAddress()
- if seen, ok := addrs[addr]; !ok {
- t.Fatalf("got unexpected packet destined to %s", addr)
- } else if seen {
- t.Fatalf("got another packet destined to %s", addr)
- }
-
- addrs[addr] = true
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, addr, header.ICMPv6MulticastListenerReport, addr)
-
- clock.Advance(ipv6.UnsolicitedReportIntervalMax)
- }
- }
-
- // Should not send any more reports.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Errorf("got unexpected packet = %#v", p)
- }
- })
- }
-}
-
-// createAndInjectMLDPacket creates and injects an MLD packet with the
-// specified fields.
-func createAndInjectMLDPacket(e *channel.Endpoint, mldType header.ICMPv6Type, hopLimit uint8, srcAddress tcpip.Address, withRouterAlertOption bool, routerAlertValue header.IPv6RouterAlertValue) {
- var extensionHeaders header.IPv6ExtHdrSerializer
- if withRouterAlertOption {
- extensionHeaders = header.IPv6ExtHdrSerializer{
- header.IPv6SerializableHopByHopExtHdr{
- &header.IPv6RouterAlertOption{Value: routerAlertValue},
- },
- }
- }
-
- extensionHeadersLength := extensionHeaders.Length()
- payloadLength := extensionHeadersLength + header.ICMPv6HeaderSize + header.MLDMinimumSize
- buf := buffer.NewView(header.IPv6MinimumSize + payloadLength)
-
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- HopLimit: hopLimit,
- TransportProtocol: header.ICMPv6ProtocolNumber,
- SrcAddr: srcAddress,
- DstAddr: header.IPv6AllNodesMulticastAddress,
- ExtensionHeaders: extensionHeaders,
- })
-
- icmp := header.ICMPv6(ip.Payload()[extensionHeadersLength:])
- icmp.SetType(mldType)
- mld := header.MLD(icmp.MessageBody())
- mld.SetMaximumResponseDelay(0)
- mld.SetMulticastAddress(header.IPv6Any)
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: srcAddress,
- Dst: header.IPv6AllNodesMulticastAddress,
- }))
-
- e.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-}
-
-func TestMLDPacketValidation(t *testing.T) {
- const nicID = 1
- linkLocalAddr2 := testutil.MustParse6("fe80::2")
-
- tests := []struct {
- name string
- messageType header.ICMPv6Type
- srcAddr tcpip.Address
- includeRouterAlertOption bool
- routerAlertValue header.IPv6RouterAlertValue
- hopLimit uint8
- expectValidMLD bool
- getMessageTypeStatValue func(tcpip.Stats) uint64
- }{
- {
- name: "valid",
- messageType: header.ICMPv6MulticastListenerQuery,
- includeRouterAlertOption: true,
- routerAlertValue: header.IPv6RouterAlertMLD,
- srcAddr: linkLocalAddr2,
- hopLimit: header.MLDHopLimit,
- expectValidMLD: true,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.ICMP.V6.PacketsReceived.MulticastListenerQuery.Value() },
- },
- {
- name: "bad hop limit",
- messageType: header.ICMPv6MulticastListenerReport,
- includeRouterAlertOption: true,
- routerAlertValue: header.IPv6RouterAlertMLD,
- srcAddr: linkLocalAddr2,
- hopLimit: header.MLDHopLimit + 1,
- expectValidMLD: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.ICMP.V6.PacketsReceived.MulticastListenerReport.Value() },
- },
- {
- name: "src ip not link local",
- messageType: header.ICMPv6MulticastListenerReport,
- includeRouterAlertOption: true,
- routerAlertValue: header.IPv6RouterAlertMLD,
- srcAddr: globalAddr,
- hopLimit: header.MLDHopLimit,
- expectValidMLD: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.ICMP.V6.PacketsReceived.MulticastListenerReport.Value() },
- },
- {
- name: "missing router alert ip option",
- messageType: header.ICMPv6MulticastListenerDone,
- includeRouterAlertOption: false,
- srcAddr: linkLocalAddr2,
- hopLimit: header.MLDHopLimit,
- expectValidMLD: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.ICMP.V6.PacketsReceived.MulticastListenerDone.Value() },
- },
- {
- name: "incorrect router alert value",
- messageType: header.ICMPv6MulticastListenerDone,
- includeRouterAlertOption: true,
- routerAlertValue: header.IPv6RouterAlertRSVP,
- srcAddr: linkLocalAddr2,
- hopLimit: header.MLDHopLimit,
- expectValidMLD: false,
- getMessageTypeStatValue: func(stats tcpip.Stats) uint64 { return stats.ICMP.V6.PacketsReceived.MulticastListenerDone.Value() },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- MLD: ipv6.MLDOptions{
- Enabled: true,
- },
- })},
- })
- e := channel.New(nicID, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- stats := s.Stats()
- // Verify that every relevant stats is zero'd before we send a packet.
- if got := test.getMessageTypeStatValue(s.Stats()); got != 0 {
- t.Errorf("got test.getMessageTypeStatValue(s.Stats()) = %d, want = 0", got)
- }
- if got := stats.ICMP.V6.PacketsReceived.Invalid.Value(); got != 0 {
- t.Errorf("got stats.ICMP.V6.PacketsReceived.Invalid.Value() = %d, want = 0", got)
- }
- if got := stats.IP.PacketsDelivered.Value(); got != 0 {
- t.Fatalf("got stats.IP.PacketsDelivered.Value() = %d, want = 0", got)
- }
- createAndInjectMLDPacket(e, test.messageType, test.hopLimit, test.srcAddr, test.includeRouterAlertOption, test.routerAlertValue)
- // We always expect the packet to pass IP validation.
- if got := stats.IP.PacketsDelivered.Value(); got != 1 {
- t.Fatalf("got stats.IP.PacketsDelivered.Value() = %d, want = 1", got)
- }
- // Even when the MLD-specific validation checks fail, we expect the
- // corresponding MLD counter to be incremented.
- if got := test.getMessageTypeStatValue(s.Stats()); got != 1 {
- t.Errorf("got test.getMessageTypeStatValue(s.Stats()) = %d, want = 1", got)
- }
- var expectedInvalidCount uint64
- if !test.expectValidMLD {
- expectedInvalidCount = 1
- }
- if got := stats.ICMP.V6.PacketsReceived.Invalid.Value(); got != expectedInvalidCount {
- t.Errorf("got stats.ICMP.V6.PacketsReceived.Invalid.Value() = %d, want = %d", got, expectedInvalidCount)
- }
- })
- }
-}
-
-func TestMLDSkipProtocol(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- group tcpip.Address
- expectReport bool
- }{
- {
- name: "Reserverd0",
- group: "\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: false,
- },
- {
- name: "Interface Local",
- group: "\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: false,
- },
- {
- name: "Link Local",
- group: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Realm Local",
- group: "\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Admin Local",
- group: "\xff\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Site Local",
- group: "\xff\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(6)",
- group: "\xff\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(7)",
- group: "\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Organization Local",
- group: "\xff\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(9)",
- group: "\xff\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(A)",
- group: "\xff\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(B)",
- group: "\xff\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(C)",
- group: "\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Unassigned(D)",
- group: "\xff\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "Global",
- group: "\xff\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- {
- name: "ReservedF",
- group: "\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11",
- expectReport: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- MLD: ipv6.MLDOptions{
- Enabled: true,
- },
- })},
- })
- e := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, linkLocalAddr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ipv6.ProtocolNumber, linkLocalAddr, err)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, linkLocalAddrSNMC, header.ICMPv6MulticastListenerReport, linkLocalAddrSNMC)
- }
-
- if err := s.JoinGroup(ipv6.ProtocolNumber, nicID, test.group); err != nil {
- t.Fatalf("s.JoinGroup(%d, %d, %s): %s", ipv6.ProtocolNumber, nicID, test.group, err)
- }
- if isInGroup, err := s.IsInGroup(nicID, test.group); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.group, err)
- } else if !isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = false, want = true", nicID, test.group)
- }
-
- if !test.expectReport {
- if p, ok := e.Read(); ok {
- t.Fatalf("got e.Read() = (%#v, true), want = (_, false)", p)
- }
-
- return
- }
-
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, test.group, header.ICMPv6MulticastListenerReport, test.group)
- }
- })
- }
-}
diff --git a/pkg/tcpip/network/ipv6/ndp_test.go b/pkg/tcpip/network/ipv6/ndp_test.go
deleted file mode 100644
index f0186c64e..000000000
--- a/pkg/tcpip/network/ipv6/ndp_test.go
+++ /dev/null
@@ -1,1341 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6
-
-import (
- "bytes"
- "math/rand"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
-)
-
-var _ NDPDispatcher = (*testNDPDispatcher)(nil)
-
-// testNDPDispatcher is an NDPDispatcher only allows default router discovery.
-type testNDPDispatcher struct {
- addr tcpip.Address
-}
-
-func (*testNDPDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {
-}
-
-func (t *testNDPDispatcher) OnOffLinkRouteUpdated(_ tcpip.NICID, _ tcpip.Subnet, addr tcpip.Address, _ header.NDPRoutePreference) {
- t.addr = addr
-}
-
-func (t *testNDPDispatcher) OnOffLinkRouteInvalidated(_ tcpip.NICID, _ tcpip.Subnet, addr tcpip.Address) {
- t.addr = addr
-}
-
-func (*testNDPDispatcher) OnOnLinkPrefixDiscovered(tcpip.NICID, tcpip.Subnet) {
-}
-
-func (*testNDPDispatcher) OnOnLinkPrefixInvalidated(tcpip.NICID, tcpip.Subnet) {
-}
-
-func (*testNDPDispatcher) OnAutoGenAddress(tcpip.NICID, tcpip.AddressWithPrefix) {
-}
-
-func (*testNDPDispatcher) OnAutoGenAddressDeprecated(tcpip.NICID, tcpip.AddressWithPrefix) {
-}
-
-func (*testNDPDispatcher) OnAutoGenAddressInvalidated(tcpip.NICID, tcpip.AddressWithPrefix) {
-}
-
-func (*testNDPDispatcher) OnRecursiveDNSServerOption(tcpip.NICID, []tcpip.Address, time.Duration) {
-}
-
-func (*testNDPDispatcher) OnDNSSearchListOption(tcpip.NICID, []string, time.Duration) {
-}
-
-func (*testNDPDispatcher) OnDHCPv6Configuration(tcpip.NICID, DHCPv6ConfigurationFromNDPRA) {
-}
-
-func TestStackNDPEndpointInvalidateDefaultRouter(t *testing.T) {
- var ndpDisp testNDPDispatcher
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocolWithOptions(Options{
- NDPDisp: &ndpDisp,
- })},
- })
-
- if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- ep, err := s.GetNetworkEndpoint(nicID, ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, ProtocolNumber, err)
- }
-
- ipv6EP := ep.(*endpoint)
- ipv6EP.mu.Lock()
- ipv6EP.mu.ndp.handleOffLinkRouteDiscovery(offLinkRoute{dest: header.IPv6EmptySubnet, router: lladdr1}, time.Hour, header.MediumRoutePreference)
- ipv6EP.mu.Unlock()
-
- if ndpDisp.addr != lladdr1 {
- t.Fatalf("got ndpDisp.addr = %s, want = %s", ndpDisp.addr, lladdr1)
- }
-
- ndpDisp.addr = ""
- ndpEP := ep.(stack.NDPEndpoint)
- ndpEP.InvalidateDefaultRouter(lladdr1)
- if ndpDisp.addr != lladdr1 {
- t.Fatalf("got ndpDisp.addr = %s, want = %s", ndpDisp.addr, lladdr1)
- }
-}
-
-// TestNeighborSolicitationWithSourceLinkLayerOption tests that receiving a
-// valid NDP NS message with the Source Link Layer Address option results in a
-// new entry in the link address cache for the sender of the message.
-func TestNeighborSolicitationWithSourceLinkLayerOption(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- optsBuf []byte
- expectedLinkAddr tcpip.LinkAddress
- }{
- {
- name: "Valid",
- optsBuf: []byte{1, 1, 2, 3, 4, 5, 6, 7},
- expectedLinkAddr: "\x02\x03\x04\x05\x06\x07",
- },
- {
- name: "Too Small",
- optsBuf: []byte{1, 1, 2, 3, 4, 5, 6},
- },
- {
- name: "Invalid Length",
- optsBuf: []byte{1, 2, 2, 3, 4, 5, 6, 7},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- e := channel.New(0, 1280, linkAddr0)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, lladdr0, err)
- }
-
- ndpNSSize := header.ICMPv6NeighborSolicitMinimumSize + len(test.optsBuf)
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNSSize)
- pkt := header.ICMPv6(hdr.Prepend(ndpNSSize))
- pkt.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(pkt.MessageBody())
- ns.SetTargetAddress(lladdr0)
- opts := ns.Options()
- copy(opts, test.optsBuf)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: lladdr1,
- Dst: lladdr0,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 255,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- })
-
- invalid := s.Stats().ICMP.V6.PacketsReceived.Invalid
-
- // Invalid count should initially be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- neighbors, err := s.Neighbors(nicID, ProtocolNumber)
- if err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, ProtocolNumber, err)
- }
-
- neighborByAddr := make(map[tcpip.Address]stack.NeighborEntry)
- for _, n := range neighbors {
- if existing, ok := neighborByAddr[n.Addr]; ok {
- if diff := cmp.Diff(existing, n); diff != "" {
- t.Fatalf("s.Neighbors(%d, %d) returned unexpected duplicate neighbor entry (-existing +got):\n%s", nicID, ProtocolNumber, diff)
- }
- t.Fatalf("s.Neighbors(%d, %d) returned unexpected duplicate neighbor entry: %#v", nicID, ProtocolNumber, existing)
- }
- neighborByAddr[n.Addr] = n
- }
-
- if neigh, ok := neighborByAddr[lladdr1]; len(test.expectedLinkAddr) != 0 {
- // Invalid count should not have increased.
- if got := invalid.Value(); got != 0 {
- t.Errorf("got invalid = %d, want = 0", got)
- }
-
- if !ok {
- t.Fatalf("expected a neighbor entry for %q", lladdr1)
- }
- if neigh.LinkAddr != test.expectedLinkAddr {
- t.Errorf("got link address = %s, want = %s", neigh.LinkAddr, test.expectedLinkAddr)
- }
- if neigh.State != stack.Stale {
- t.Errorf("got NUD state = %s, want = %s", neigh.State, stack.Stale)
- }
- } else {
- // Invalid count should have increased.
- if got := invalid.Value(); got != 1 {
- t.Errorf("got invalid = %d, want = 1", got)
- }
-
- if ok {
- t.Fatalf("unexpectedly got neighbor entry: %#v", neigh)
- }
- }
- })
- }
-}
-
-func TestNeighborSolicitationResponse(t *testing.T) {
- const nicID = 1
- nicAddr := lladdr0
- remoteAddr := lladdr1
- nicAddrSNMC := header.SolicitedNodeAddr(nicAddr)
- nicLinkAddr := linkAddr0
- remoteLinkAddr0 := linkAddr1
- remoteLinkAddr1 := linkAddr2
-
- tests := []struct {
- name string
- nsOpts header.NDPOptionsSerializer
- nsSrcLinkAddr tcpip.LinkAddress
- nsSrc tcpip.Address
- nsDst tcpip.Address
- nsInvalid bool
- naDstLinkAddr tcpip.LinkAddress
- naSolicited bool
- naSrc tcpip.Address
- naDst tcpip.Address
- performsLinkResolution bool
- }{
- {
- name: "Unspecified source to solicited-node multicast destination",
- nsOpts: nil,
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: header.IPv6Any,
- nsDst: nicAddrSNMC,
- nsInvalid: false,
- naDstLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),
- naSolicited: false,
- naSrc: nicAddr,
- naDst: header.IPv6AllNodesMulticastAddress,
- },
- {
- name: "Unspecified source with source ll option to multicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: header.IPv6Any,
- nsDst: nicAddrSNMC,
- nsInvalid: true,
- },
- {
- name: "Unspecified source to unicast destination",
- nsOpts: nil,
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: header.IPv6Any,
- nsDst: nicAddr,
- nsInvalid: true,
- },
- {
- name: "Unspecified source with source ll option to unicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: header.IPv6Any,
- nsDst: nicAddr,
- nsInvalid: true,
- },
- {
- name: "Specified source with 1 source ll to multicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddrSNMC,
- nsInvalid: false,
- naDstLinkAddr: remoteLinkAddr0,
- naSolicited: true,
- naSrc: nicAddr,
- naDst: remoteAddr,
- },
- {
- name: "Specified source with 1 source ll different from route to multicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr1[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddrSNMC,
- nsInvalid: false,
- naDstLinkAddr: remoteLinkAddr1,
- naSolicited: true,
- naSrc: nicAddr,
- naDst: remoteAddr,
- },
- {
- name: "Specified source to multicast destination",
- nsOpts: nil,
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddrSNMC,
- nsInvalid: true,
- },
- {
- name: "Specified source with 2 source ll to multicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr1[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddrSNMC,
- nsInvalid: true,
- },
-
- {
- name: "Specified source to unicast destination",
- nsOpts: nil,
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddr,
- nsInvalid: false,
- naDstLinkAddr: remoteLinkAddr0,
- naSolicited: true,
- naSrc: nicAddr,
- naDst: remoteAddr,
- // Since we send a unicast solicitations to a node without an entry for
- // the remote, the node needs to perform neighbor discovery to get the
- // remote's link address to send the advertisement response.
- performsLinkResolution: true,
- },
- {
- name: "Specified source with 1 source ll to unicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddr,
- nsInvalid: false,
- naDstLinkAddr: remoteLinkAddr0,
- naSolicited: true,
- naSrc: nicAddr,
- naDst: remoteAddr,
- },
- {
- name: "Specified source with 1 source ll different from route to unicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr1[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddr,
- nsInvalid: false,
- naDstLinkAddr: remoteLinkAddr1,
- naSolicited: true,
- naSrc: nicAddr,
- naDst: remoteAddr,
- },
- {
- name: "Specified source with 2 source ll to unicast destination",
- nsOpts: header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr0[:]),
- header.NDPSourceLinkLayerAddressOption(remoteLinkAddr1[:]),
- },
- nsSrcLinkAddr: remoteLinkAddr0,
- nsSrc: remoteAddr,
- nsDst: nicAddr,
- nsInvalid: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- Clock: clock,
- })
- e := channel.New(1, 1280, nicLinkAddr)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, nicAddr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, nicAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv6EmptySubnet,
- NIC: 1,
- },
- })
-
- ndpNSSize := header.ICMPv6NeighborSolicitMinimumSize + test.nsOpts.Length()
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNSSize)
- pkt := header.ICMPv6(hdr.Prepend(ndpNSSize))
- pkt.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(pkt.MessageBody())
- ns.SetTargetAddress(nicAddr)
- opts := ns.Options()
- opts.Serialize(test.nsOpts)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: test.nsSrc,
- Dst: test.nsDst,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 255,
- SrcAddr: test.nsSrc,
- DstAddr: test.nsDst,
- })
-
- invalid := s.Stats().ICMP.V6.PacketsReceived.Invalid
-
- // Invalid count should initially be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
-
- e.InjectLinkAddr(ProtocolNumber, test.nsSrcLinkAddr, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- if test.nsInvalid {
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
-
- if p, got := e.Read(); got {
- t.Fatalf("unexpected response to an invalid NS = %+v", p.Pkt)
- }
-
- // If we expected the NS to be invalid, we have nothing else to check.
- return
- }
-
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
-
- if test.performsLinkResolution {
- clock.RunImmediatelyScheduledJobs()
- p, got := e.Read()
- if !got {
- t.Fatal("expected an NDP NS response")
- }
-
- respNSDst := header.SolicitedNodeAddr(test.nsSrc)
- var want stack.RouteInfo
- want.NetProto = ProtocolNumber
- want.RemoteLinkAddress = header.EthernetAddressFromMulticastIPv6Address(respNSDst)
- if diff := cmp.Diff(want, p.Route, cmp.AllowUnexported(want)); diff != "" {
- t.Errorf("route info mismatch (-want +got):\n%s", diff)
- }
-
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(nicAddr),
- checker.DstAddr(respNSDst),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(test.nsSrc),
- checker.NDPNSOptions([]header.NDPOption{
- header.NDPSourceLinkLayerAddressOption(nicLinkAddr),
- }),
- ))
-
- ser := header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- }
- ndpNASize := header.ICMPv6NeighborAdvertMinimumSize + ser.Length()
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNASize)
- pkt := header.ICMPv6(hdr.Prepend(ndpNASize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(pkt.MessageBody())
- na.SetSolicitedFlag(true)
- na.SetOverrideFlag(true)
- na.SetTargetAddress(test.nsSrc)
- na.Options().Serialize(ser)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: test.nsSrc,
- Dst: nicAddr,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: header.NDPHopLimit,
- SrcAddr: test.nsSrc,
- DstAddr: nicAddr,
- })
- e.InjectLinkAddr(ProtocolNumber, "", stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
- }
-
- clock.RunImmediatelyScheduledJobs()
- p, got := e.Read()
- if !got {
- t.Fatal("expected an NDP NA response")
- }
-
- if p.Route.LocalAddress != test.naSrc {
- t.Errorf("got p.Route.LocalAddress = %s, want = %s", p.Route.LocalAddress, test.naSrc)
- }
- if p.Route.LocalLinkAddress != nicLinkAddr {
- t.Errorf("p.Route.LocalLinkAddress = %s, want = %s", p.Route.LocalLinkAddress, nicLinkAddr)
- }
- if p.Route.RemoteAddress != test.naDst {
- t.Errorf("got p.Route.RemoteAddress = %s, want = %s", p.Route.RemoteAddress, test.naDst)
- }
- if p.Route.RemoteLinkAddress != test.naDstLinkAddr {
- t.Errorf("got p.Route.RemoteLinkAddress = %s, want = %s", p.Route.RemoteLinkAddress, test.naDstLinkAddr)
- }
-
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(test.naSrc),
- checker.DstAddr(test.naDst),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNA(
- checker.NDPNASolicitedFlag(test.naSolicited),
- checker.NDPNATargetAddress(nicAddr),
- checker.NDPNAOptions([]header.NDPOption{
- header.NDPTargetLinkLayerAddressOption(nicLinkAddr[:]),
- }),
- ))
- })
- }
-}
-
-// TestNeighborAdvertisementWithTargetLinkLayerOption tests that receiving a
-// valid NDP NA message with the Target Link Layer Address option does not
-// result in a new entry in the neighbor cache for the target of the message.
-func TestNeighborAdvertisementWithTargetLinkLayerOption(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- optsBuf []byte
- isValid bool
- }{
- {
- name: "Valid",
- optsBuf: []byte{2, 1, 2, 3, 4, 5, 6, 7},
- isValid: true,
- },
- {
- name: "Too Small",
- optsBuf: []byte{2, 1, 2, 3, 4, 5, 6},
- },
- {
- name: "Invalid Length",
- optsBuf: []byte{2, 2, 2, 3, 4, 5, 6, 7},
- },
- {
- name: "Multiple",
- optsBuf: []byte{
- 2, 1, 2, 3, 4, 5, 6, 7,
- 2, 1, 2, 3, 4, 5, 6, 8,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- e := channel.New(0, 1280, linkAddr0)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, lladdr0, err)
- }
-
- ndpNASize := header.ICMPv6NeighborAdvertMinimumSize + len(test.optsBuf)
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNASize)
- pkt := header.ICMPv6(hdr.Prepend(ndpNASize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- ns := header.NDPNeighborAdvert(pkt.MessageBody())
- ns.SetTargetAddress(lladdr1)
- opts := ns.Options()
- copy(opts, test.optsBuf)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: lladdr1,
- Dst: lladdr0,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 255,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- })
-
- invalid := s.Stats().ICMP.V6.PacketsReceived.Invalid
-
- // Invalid count should initially be 0.
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
-
- e.InjectInbound(ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- neighbors, err := s.Neighbors(nicID, ProtocolNumber)
- if err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, ProtocolNumber, err)
- }
-
- neighborByAddr := make(map[tcpip.Address]stack.NeighborEntry)
- for _, n := range neighbors {
- if existing, ok := neighborByAddr[n.Addr]; ok {
- if diff := cmp.Diff(existing, n); diff != "" {
- t.Fatalf("s.Neighbors(%d, %d) returned unexpected duplicate neighbor entry (-existing +got):\n%s", nicID, ProtocolNumber, diff)
- }
- t.Fatalf("s.Neighbors(%d, %d) returned unexpected duplicate neighbor entry: %#v", nicID, ProtocolNumber, existing)
- }
- neighborByAddr[n.Addr] = n
- }
-
- if neigh, ok := neighborByAddr[lladdr1]; ok {
- t.Fatalf("unexpectedly got neighbor entry: %#v", neigh)
- }
-
- if test.isValid {
- // Invalid count should not have increased.
- if got := invalid.Value(); got != 0 {
- t.Errorf("got invalid = %d, want = 0", got)
- }
- } else {
- // Invalid count should have increased.
- if got := invalid.Value(); got != 1 {
- t.Errorf("got invalid = %d, want = 1", got)
- }
- }
- })
- }
-}
-
-func TestNDPValidation(t *testing.T) {
- const nicID = 1
-
- handleIPv6Payload := func(payload buffer.View, hopLimit uint8, atomicFragment bool, ep stack.NetworkEndpoint) {
- var extHdrs header.IPv6ExtHdrSerializer
- if atomicFragment {
- extHdrs = append(extHdrs, &header.IPv6SerializableFragmentExtHdr{})
- }
- extHdrsLen := extHdrs.Length()
-
- ip := buffer.NewView(header.IPv6MinimumSize + extHdrsLen)
- header.IPv6(ip).Encode(&header.IPv6Fields{
- PayloadLength: uint16(len(payload) + extHdrsLen),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: hopLimit,
- SrcAddr: lladdr1,
- DstAddr: lladdr0,
- ExtensionHeaders: extHdrs,
- })
- vv := ip.ToVectorisedView()
- vv.AppendView(payload)
- ep.HandlePacket(stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: vv,
- }))
- }
-
- var tllData [header.NDPLinkLayerAddressSize]byte
- header.NDPOptions(tllData[:]).Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
-
- var sllData [header.NDPLinkLayerAddressSize]byte
- header.NDPOptions(sllData[:]).Serialize(header.NDPOptionsSerializer{
- header.NDPSourceLinkLayerAddressOption(linkAddr1),
- })
-
- types := []struct {
- name string
- typ header.ICMPv6Type
- size int
- extraData []byte
- statCounter func(tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
- routerOnly bool
- }{
- {
- name: "RouterSolicit",
- typ: header.ICMPv6RouterSolicit,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RouterSolicit
- },
- routerOnly: true,
- },
- {
- name: "RouterAdvert",
- typ: header.ICMPv6RouterAdvert,
- size: header.ICMPv6HeaderSize + header.NDPRAMinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RouterAdvert
- },
- },
- {
- name: "NeighborSolicit",
- typ: header.ICMPv6NeighborSolicit,
- size: header.ICMPv6NeighborSolicitMinimumSize,
- extraData: sllData[:],
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.NeighborSolicit
- },
- },
- {
- name: "NeighborAdvert",
- typ: header.ICMPv6NeighborAdvert,
- size: header.ICMPv6NeighborAdvertMinimumSize,
- extraData: tllData[:],
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.NeighborAdvert
- },
- },
- {
- name: "RedirectMsg",
- typ: header.ICMPv6RedirectMsg,
- size: header.ICMPv6MinimumSize,
- statCounter: func(stats tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return stats.RedirectMsg
- },
- },
- }
-
- subTests := []struct {
- name string
- atomicFragment bool
- hopLimit uint8
- code header.ICMPv6Code
- valid bool
- }{
- {
- name: "Valid",
- atomicFragment: false,
- hopLimit: header.NDPHopLimit,
- code: 0,
- valid: true,
- },
- {
- name: "Fragmented",
- atomicFragment: true,
- hopLimit: header.NDPHopLimit,
- code: 0,
- valid: false,
- },
- {
- name: "Invalid hop limit",
- atomicFragment: false,
- hopLimit: header.NDPHopLimit - 1,
- code: 0,
- valid: false,
- },
- {
- name: "Invalid ICMPv6 code",
- atomicFragment: false,
- hopLimit: header.NDPHopLimit,
- code: 1,
- valid: false,
- },
- }
-
- subnet, err := tcpip.NewSubnet(lladdr1, tcpip.AddressMask(strings.Repeat("\xff", len(lladdr0))))
- if err != nil {
- t.Fatal(err)
- }
-
- for _, typ := range types {
- for _, isRouter := range []bool{false, true} {
- name := typ.name
- if isRouter {
- name += " (Router)"
- }
-
- t.Run(name, func(t *testing.T) {
- for _, test := range subTests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol6},
- })
-
- if isRouter {
- if err := s.SetForwardingDefaultAndAllNICs(ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ProtocolNumber, err)
- }
- }
-
- if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID, ProtocolNumber, lladdr0, err)
- }
-
- ep, err := s.GetNetworkEndpoint(nicID, ProtocolNumber)
- if err != nil {
- t.Fatal("cannot find network endpoint instance for IPv6")
- }
-
- s.SetRouteTable([]tcpip.Route{{
- Destination: subnet,
- NIC: nicID,
- }})
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- routerOnly := stats.RouterOnlyPacketsDroppedByHost
- typStat := typ.statCounter(stats)
-
- icmpH := header.ICMPv6(buffer.NewView(typ.size + len(typ.extraData)))
- copy(icmpH[typ.size:], typ.extraData)
- icmpH.SetType(typ.typ)
- icmpH.SetCode(test.code)
- icmpH.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmpH[:typ.size],
- Src: lladdr0,
- Dst: lladdr1,
- PayloadCsum: header.Checksum(typ.extraData /* initial */, 0),
- PayloadLen: len(typ.extraData),
- }))
-
- // Rx count of the NDP message should initially be 0.
- if got := typStat.Value(); got != 0 {
- t.Errorf("got %s = %d, want = 0", typ.name, got)
- }
-
- // Invalid count should initially be 0.
- if got := invalid.Value(); got != 0 {
- t.Errorf("got invalid.Value() = %d, want = 0", got)
- }
-
- // Should initially not have dropped any packets.
- if got := routerOnly.Value(); got != 0 {
- t.Errorf("got routerOnly.Value() = %d, want = 0", got)
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- handleIPv6Payload(buffer.View(icmpH), test.hopLimit, test.atomicFragment, ep)
-
- // Rx count of the NDP packet should have increased.
- if got := typStat.Value(); got != 1 {
- t.Errorf("got %s = %d, want = 1", typ.name, got)
- }
-
- want := uint64(0)
- if !test.valid {
- // Invalid count should have increased.
- want = 1
- }
- if got := invalid.Value(); got != want {
- t.Errorf("got invalid.Value() = %d, want = %d", got, want)
- }
-
- want = 0
- if test.valid && !isRouter && typ.routerOnly {
- // Router only packets are expected to be dropped when operating
- // as a host.
- want = 1
- }
- if got := routerOnly.Value(); got != want {
- t.Errorf("got routerOnly.Value() = %d, want = %d", got, want)
- }
- })
- }
- })
- }
- }
-}
-
-// TestNeighborAdvertisementValidation tests that the NIC validates received
-// Neighbor Advertisements.
-//
-// In particular, if the IP Destination Address is a multicast address, and the
-// Solicited flag is not zero, the Neighbor Advertisement is invalid and should
-// be discarded.
-func TestNeighborAdvertisementValidation(t *testing.T) {
- tests := []struct {
- name string
- ipDstAddr tcpip.Address
- solicitedFlag bool
- valid bool
- }{
- {
- name: "Multicast IP destination address with Solicited flag set",
- ipDstAddr: header.IPv6AllNodesMulticastAddress,
- solicitedFlag: true,
- valid: false,
- },
- {
- name: "Multicast IP destination address with Solicited flag unset",
- ipDstAddr: header.IPv6AllNodesMulticastAddress,
- solicitedFlag: false,
- valid: true,
- },
- {
- name: "Unicast IP destination address with Solicited flag set",
- ipDstAddr: lladdr0,
- solicitedFlag: true,
- valid: true,
- },
- {
- name: "Unicast IP destination address with Solicited flag unset",
- ipDstAddr: lladdr0,
- solicitedFlag: false,
- valid: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
- e := channel.New(0, header.IPv6MinimumMTU, linkAddr0)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, lladdr0, err)
- }
-
- ndpNASize := header.ICMPv6NeighborAdvertMinimumSize
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + ndpNASize)
- pkt := header.ICMPv6(hdr.Prepend(ndpNASize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(pkt.MessageBody())
- na.SetTargetAddress(lladdr1)
- na.SetSolicitedFlag(test.solicitedFlag)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: lladdr1,
- Dst: test.ipDstAddr,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: header.ICMPv6ProtocolNumber,
- HopLimit: 255,
- SrcAddr: lladdr1,
- DstAddr: test.ipDstAddr,
- })
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- rxNA := stats.NeighborAdvert
-
- if got := rxNA.Value(); got != 0 {
- t.Fatalf("got rxNA = %d, want = 0", got)
- }
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
-
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- if got := rxNA.Value(); got != 1 {
- t.Fatalf("got rxNA = %d, want = 1", got)
- }
- var wantInvalid uint64 = 1
- if test.valid {
- wantInvalid = 0
- }
- if got := invalid.Value(); got != wantInvalid {
- t.Fatalf("got invalid = %d, want = %d", got, wantInvalid)
- }
- // As per RFC 4861 section 7.2.5:
- // When a valid Neighbor Advertisement is received ...
- // If no entry exists, the advertisement SHOULD be silently discarded.
- // There is no need to create an entry if none exists, since the
- // recipient has apparently not initiated any communication with the
- // target.
- if neighbors, err := s.Neighbors(nicID, ProtocolNumber); err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, ProtocolNumber, err)
- } else if len(neighbors) != 0 {
- t.Fatalf("got len(neighbors) = %d, want = 0; neighbors = %#v", len(neighbors), neighbors)
- }
- })
- }
-}
-
-// TestRouterAdvertValidation tests that when the NIC is configured to handle
-// NDP Router Advertisement packets, it validates the Router Advertisement
-// properly before handling them.
-func TestRouterAdvertValidation(t *testing.T) {
- tests := []struct {
- name string
- src tcpip.Address
- hopLimit uint8
- code header.ICMPv6Code
- ndpPayload []byte
- expectedSuccess bool
- }{
- {
- "OK",
- lladdr0,
- 255,
- 0,
- []byte{
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- true,
- },
- {
- "NonLinkLocalSourceAddr",
- addr1,
- 255,
- 0,
- []byte{
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- false,
- },
- {
- "HopLimitNot255",
- lladdr0,
- 254,
- 0,
- []byte{
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- false,
- },
- {
- "NonZeroCode",
- lladdr0,
- 255,
- 1,
- []byte{
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- },
- false,
- },
- {
- "NDPPayloadTooSmall",
- lladdr0,
- 255,
- 0,
- []byte{
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0,
- },
- false,
- },
- {
- "OKWithOptions",
- lladdr0,
- 255,
- 0,
- []byte{
- // RA payload
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
-
- // Option #1 (TargetLinkLayerAddress)
- 2, 1, 0, 0, 0, 0, 0, 0,
-
- // Option #2 (unrecognized)
- 255, 1, 0, 0, 0, 0, 0, 0,
-
- // Option #3 (PrefixInformation)
- 3, 4, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- },
- true,
- },
- {
- "OptionWithZeroLength",
- lladdr0,
- 255,
- 0,
- []byte{
- // RA payload
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
-
- // Option #1 (TargetLinkLayerAddress)
- // Invalid as it has 0 length.
- 2, 0, 0, 0, 0, 0, 0, 0,
-
- // Option #2 (unrecognized)
- 255, 1, 0, 0, 0, 0, 0, 0,
-
- // Option #3 (PrefixInformation)
- 3, 4, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- },
- false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e := channel.New(10, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(_) = %s", err)
- }
-
- icmpSize := header.ICMPv6HeaderSize + len(test.ndpPayload)
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + icmpSize)
- pkt := header.ICMPv6(hdr.Prepend(icmpSize))
- pkt.SetType(header.ICMPv6RouterAdvert)
- pkt.SetCode(test.code)
- copy(pkt.MessageBody(), test.ndpPayload)
- payloadLength := hdr.UsedLength()
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: test.src,
- Dst: header.IPv6AllNodesMulticastAddress,
- }))
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: test.hopLimit,
- SrcAddr: test.src,
- DstAddr: header.IPv6AllNodesMulticastAddress,
- })
-
- stats := s.Stats().ICMP.V6.PacketsReceived
- invalid := stats.Invalid
- rxRA := stats.RouterAdvert
-
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
- if got := rxRA.Value(); got != 0 {
- t.Fatalf("got rxRA = %d, want = 0", got)
- }
-
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-
- if got := rxRA.Value(); got != 1 {
- t.Fatalf("got rxRA = %d, want = 1", got)
- }
-
- if test.expectedSuccess {
- if got := invalid.Value(); got != 0 {
- t.Fatalf("got invalid = %d, want = 0", got)
- }
- } else {
- if got := invalid.Value(); got != 1 {
- t.Fatalf("got invalid = %d, want = 1", got)
- }
- }
- })
- }
-}
-
-// TestCheckDuplicateAddress checks that calls to CheckDuplicateAddress and DAD
-// performed when adding new addresses do not interfere with each other.
-func TestCheckDuplicateAddress(t *testing.T) {
- const nicID = 1
-
- clock := faketime.NewManualClock()
- dadConfigs := stack.DADConfigurations{
- DupAddrDetectTransmits: 1,
- RetransmitTimer: time.Second,
- }
-
- nonces := [...][]byte{
- {1, 2, 3, 4, 5, 6},
- {7, 8, 9, 10, 11, 12},
- }
-
- var secureRNGBytes []byte
- for _, n := range nonces {
- secureRNGBytes = append(secureRNGBytes, n...)
- }
- var secureRNG bytes.Reader
- secureRNG.Reset(secureRNGBytes[:])
- s := stack.New(stack.Options{
- Clock: clock,
- RandSource: rand.NewSource(time.Now().UnixNano()),
- SecureRNG: &secureRNG,
- NetworkProtocols: []stack.NetworkProtocolFactory{NewProtocolWithOptions(Options{
- DADConfigs: dadConfigs,
- })},
- })
- // This test is expected to send at max 2 DAD messages. We allow an extra
- // packet to be stored to catch unexpected packets.
- e := channel.New(3, header.IPv6MinimumMTU, linkAddr0)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- dadPacketsSent := 0
- snmc := header.SolicitedNodeAddr(lladdr0)
- remoteLinkAddr := header.EthernetAddressFromMulticastIPv6Address(snmc)
- checkDADMsg := func() {
- clock.RunImmediatelyScheduledJobs()
- p, ok := e.Read()
- if !ok {
- t.Fatalf("expected %d-th DAD message", dadPacketsSent)
- }
-
- if p.Proto != header.IPv6ProtocolNumber {
- t.Errorf("(i=%d) got p.Proto = %d, want = %d", dadPacketsSent, p.Proto, header.IPv6ProtocolNumber)
- }
-
- if p.Route.RemoteLinkAddress != remoteLinkAddr {
- t.Errorf("(i=%d) got p.Route.RemoteLinkAddress = %s, want = %s", dadPacketsSent, p.Route.RemoteLinkAddress, remoteLinkAddr)
- }
-
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(header.IPv6Any),
- checker.DstAddr(snmc),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(lladdr0),
- checker.NDPNSOptions([]header.NDPOption{header.NDPNonceOption(nonces[dadPacketsSent])}),
- ))
- }
- if err := s.AddAddress(nicID, ProtocolNumber, lladdr0); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, ProtocolNumber, lladdr0, err)
- }
- checkDADMsg()
-
- // Start DAD for the address we just added.
- //
- // Even though the stack will perform DAD before the added address transitions
- // from tentative to assigned, this DAD request should be independent of that.
- ch := make(chan stack.DADResult, 3)
- dadRequestsMade := 1
- dadPacketsSent++
- if res, err := s.CheckDuplicateAddress(nicID, ProtocolNumber, lladdr0, func(r stack.DADResult) {
- ch <- r
- }); err != nil {
- t.Fatalf("s.CheckDuplicateAddress(%d, %d, %s, _): %s", nicID, ProtocolNumber, lladdr0, err)
- } else if res != stack.DADStarting {
- t.Fatalf("got s.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", nicID, ProtocolNumber, lladdr0, res, stack.DADStarting)
- }
- checkDADMsg()
-
- // Remove the address and make sure our DAD request was not stopped.
- if err := s.RemoveAddress(nicID, lladdr0); err != nil {
- t.Fatalf("RemoveAddress(%d, %s): %s", nicID, lladdr0, err)
- }
- // Should not restart DAD since we already requested DAD above - the handler
- // should be called when the original request completes so we should not send
- // an extra DAD message here.
- dadRequestsMade++
- if res, err := s.CheckDuplicateAddress(nicID, ProtocolNumber, lladdr0, func(r stack.DADResult) {
- ch <- r
- }); err != nil {
- t.Fatalf("s.CheckDuplicateAddress(%d, %d, %s, _): %s", nicID, ProtocolNumber, lladdr0, err)
- } else if res != stack.DADAlreadyRunning {
- t.Fatalf("got s.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", nicID, ProtocolNumber, lladdr0, res, stack.DADAlreadyRunning)
- }
-
- // Wait for DAD to complete.
- clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer)
- for i := 0; i < dadRequestsMade; i++ {
- if diff := cmp.Diff(&stack.DADSucceeded{}, <-ch); diff != "" {
- t.Errorf("(i=%d) DAD result mismatch (-want +got):\n%s", i, diff)
- }
- }
- // Should have no more results.
- select {
- case r := <-ch:
- t.Errorf("unexpectedly got an extra DAD result; r = %#v", r)
- default:
- }
-
- // Should have no more packets.
- if p, ok := e.Read(); ok {
- t.Errorf("got unexpected packet = %#v", p)
- }
-}
diff --git a/pkg/tcpip/network/multicast_group_test.go b/pkg/tcpip/network/multicast_group_test.go
deleted file mode 100644
index 1b96b1fb8..000000000
--- a/pkg/tcpip/network/multicast_group_test.go
+++ /dev/null
@@ -1,1278 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ip_test
-
-import (
- "fmt"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const (
- linkAddr = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
-
- defaultIPv4PrefixLength = 24
-
- igmpMembershipQuery = uint8(header.IGMPMembershipQuery)
- igmpv1MembershipReport = uint8(header.IGMPv1MembershipReport)
- igmpv2MembershipReport = uint8(header.IGMPv2MembershipReport)
- igmpLeaveGroup = uint8(header.IGMPLeaveGroup)
- mldQuery = uint8(header.ICMPv6MulticastListenerQuery)
- mldReport = uint8(header.ICMPv6MulticastListenerReport)
- mldDone = uint8(header.ICMPv6MulticastListenerDone)
-
- maxUnsolicitedReports = 2
-)
-
-var (
- stackIPv4Addr = testutil.MustParse4("10.0.0.1")
- linkLocalIPv6Addr1 = testutil.MustParse6("fe80::1")
- linkLocalIPv6Addr2 = testutil.MustParse6("fe80::2")
-
- ipv4MulticastAddr1 = testutil.MustParse4("224.0.0.3")
- ipv4MulticastAddr2 = testutil.MustParse4("224.0.0.4")
- ipv4MulticastAddr3 = testutil.MustParse4("224.0.0.5")
- ipv6MulticastAddr1 = testutil.MustParse6("ff02::3")
- ipv6MulticastAddr2 = testutil.MustParse6("ff02::4")
- ipv6MulticastAddr3 = testutil.MustParse6("ff02::5")
-)
-
-var (
- // unsolicitedIGMPReportIntervalMaxTenthSec is the maximum amount of time the
- // NIC will wait before sending an unsolicited report after joining a
- // multicast group, in deciseconds.
- unsolicitedIGMPReportIntervalMaxTenthSec = func() uint8 {
- const decisecond = time.Second / 10
- if ipv4.UnsolicitedReportIntervalMax%decisecond != 0 {
- panic(fmt.Sprintf("UnsolicitedReportIntervalMax of %d is a lossy conversion to deciseconds", ipv4.UnsolicitedReportIntervalMax))
- }
- return uint8(ipv4.UnsolicitedReportIntervalMax / decisecond)
- }()
-
- ipv6AddrSNMC = header.SolicitedNodeAddr(linkLocalIPv6Addr1)
-)
-
-// validateMLDPacket checks that a passed PacketInfo is an IPv6 MLD packet
-// sent to the provided address with the passed fields set.
-func validateMLDPacket(t *testing.T, p channel.PacketInfo, remoteAddress tcpip.Address, mldType uint8, maxRespTime byte, groupAddress tcpip.Address) {
- t.Helper()
-
- payload := header.IPv6(stack.PayloadSince(p.Pkt.NetworkHeader()))
- checker.IPv6WithExtHdr(t, payload,
- checker.IPv6ExtHdr(
- checker.IPv6HopByHopExtensionHeader(checker.IPv6RouterAlert(header.IPv6RouterAlertMLD)),
- ),
- checker.SrcAddr(linkLocalIPv6Addr1),
- checker.DstAddr(remoteAddress),
- // Hop Limit for an MLD message must be 1 as per RFC 2710 section 3.
- checker.TTL(1),
- checker.MLD(header.ICMPv6Type(mldType), header.MLDMinimumSize,
- checker.MLDMaxRespDelay(time.Duration(maxRespTime)*time.Millisecond),
- checker.MLDMulticastAddress(groupAddress),
- ),
- )
-}
-
-// validateIGMPPacket checks that a passed PacketInfo is an IPv4 IGMP packet
-// sent to the provided address with the passed fields set.
-func validateIGMPPacket(t *testing.T, p channel.PacketInfo, remoteAddress tcpip.Address, igmpType uint8, maxRespTime byte, groupAddress tcpip.Address) {
- t.Helper()
-
- payload := header.IPv4(stack.PayloadSince(p.Pkt.NetworkHeader()))
- checker.IPv4(t, payload,
- checker.SrcAddr(stackIPv4Addr),
- checker.DstAddr(remoteAddress),
- // TTL for an IGMP message must be 1 as per RFC 2236 section 2.
- checker.TTL(1),
- checker.IPv4RouterAlert(),
- checker.IGMP(
- checker.IGMPType(header.IGMPType(igmpType)),
- checker.IGMPMaxRespTime(header.DecisecondToDuration(maxRespTime)),
- checker.IGMPGroupAddress(groupAddress),
- ),
- )
-}
-
-func createStack(t *testing.T, v4, mgpEnabled bool) (*channel.Endpoint, *stack.Stack, *faketime.ManualClock) {
- t.Helper()
-
- e := channel.New(maxUnsolicitedReports, header.IPv6MinimumMTU, linkAddr)
- s, clock := createStackWithLinkEndpoint(t, v4, mgpEnabled, e)
- return e, s, clock
-}
-
-func createStackWithLinkEndpoint(t *testing.T, v4, mgpEnabled bool, e stack.LinkEndpoint) (*stack.Stack, *faketime.ManualClock) {
- t.Helper()
-
- igmpEnabled := v4 && mgpEnabled
- mldEnabled := !v4 && mgpEnabled
-
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocolWithOptions(ipv4.Options{
- IGMP: ipv4.IGMPOptions{
- Enabled: igmpEnabled,
- },
- }),
- ipv6.NewProtocolWithOptions(ipv6.Options{
- MLD: ipv6.MLDOptions{
- Enabled: mldEnabled,
- },
- }),
- },
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- addr := tcpip.AddressWithPrefix{
- Address: stackIPv4Addr,
- PrefixLen: defaultIPv4PrefixLength,
- }
- if err := s.AddAddressWithPrefix(nicID, ipv4.ProtocolNumber, addr); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s): %s", nicID, ipv4.ProtocolNumber, addr, err)
- }
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, linkLocalIPv6Addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID, ipv6.ProtocolNumber, linkLocalIPv6Addr1, err)
- }
-
- return s, clock
-}
-
-// checkInitialIPv6Groups checks the initial IPv6 groups that a NIC will join
-// when it is created with an IPv6 address.
-//
-// To not interfere with tests, checkInitialIPv6Groups will leave the added
-// address's solicited node multicast group so that the tests can all assume
-// the NIC has not joined any IPv6 groups.
-func checkInitialIPv6Groups(t *testing.T, e *channel.Endpoint, s *stack.Stack, clock *faketime.ManualClock) (reportCounter uint64, leaveCounter uint64) {
- t.Helper()
-
- stats := s.Stats().ICMP.V6.PacketsSent
-
- reportCounter++
- if got := stats.MulticastListenerReport.Value(); got != reportCounter {
- t.Errorf("got stats.MulticastListenerReport.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- validateMLDPacket(t, p, ipv6AddrSNMC, mldReport, 0, ipv6AddrSNMC)
- }
-
- // Leave the group to not affect the tests. This is fine since we are not
- // testing DAD or the solicited node address specifically.
- if err := s.LeaveGroup(ipv6.ProtocolNumber, nicID, ipv6AddrSNMC); err != nil {
- t.Fatalf("LeaveGroup(%d, %d, %s): %s", ipv6.ProtocolNumber, nicID, ipv6AddrSNMC, err)
- }
- leaveCounter++
- if got := stats.MulticastListenerDone.Value(); got != leaveCounter {
- t.Errorf("got stats.MulticastListenerDone.Value() = %d, want = %d", got, leaveCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, ipv6AddrSNMC)
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
-
- return reportCounter, leaveCounter
-}
-
-// createAndInjectIGMPPacket creates and injects an IGMP packet with the
-// specified fields.
-func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType byte, maxRespTime byte, groupAddress tcpip.Address) {
- options := header.IPv4OptionsSerializer{
- &header.IPv4SerializableRouterAlertOption{},
- }
- buf := buffer.NewView(header.IPv4MinimumSize + int(options.Length()) + header.IGMPQueryMinimumSize)
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(len(buf)),
- TTL: header.IGMPTTL,
- Protocol: uint8(header.IGMPProtocolNumber),
- SrcAddr: remoteIPv4Addr,
- DstAddr: header.IPv4AllSystems,
- Options: options,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- igmp := header.IGMP(ip.Payload())
- igmp.SetType(header.IGMPType(igmpType))
- igmp.SetMaxRespTime(maxRespTime)
- igmp.SetGroupAddress(groupAddress)
- igmp.SetChecksum(header.IGMPCalculateChecksum(igmp))
-
- e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-}
-
-// createAndInjectMLDPacket creates and injects an MLD packet with the
-// specified fields.
-func createAndInjectMLDPacket(e *channel.Endpoint, mldType uint8, maxRespDelay byte, groupAddress tcpip.Address) {
- extensionHeaders := header.IPv6ExtHdrSerializer{
- header.IPv6SerializableHopByHopExtHdr{
- &header.IPv6RouterAlertOption{Value: header.IPv6RouterAlertMLD},
- },
- }
-
- extensionHeadersLength := extensionHeaders.Length()
- payloadLength := extensionHeadersLength + header.ICMPv6HeaderSize + header.MLDMinimumSize
- buf := buffer.NewView(header.IPv6MinimumSize + payloadLength)
-
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- HopLimit: header.MLDHopLimit,
- TransportProtocol: header.ICMPv6ProtocolNumber,
- SrcAddr: linkLocalIPv6Addr2,
- DstAddr: header.IPv6AllNodesMulticastAddress,
- ExtensionHeaders: extensionHeaders,
- })
-
- icmp := header.ICMPv6(ip.Payload()[extensionHeadersLength:])
- icmp.SetType(header.ICMPv6Type(mldType))
- mld := header.MLD(icmp.MessageBody())
- mld.SetMaximumResponseDelay(uint16(maxRespDelay))
- mld.SetMulticastAddress(groupAddress)
- icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: linkLocalIPv6Addr2,
- Dst: header.IPv6AllNodesMulticastAddress,
- }))
-
- e.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-}
-
-// TestMGPDisabled tests that the multicast group protocol is not enabled by
-// default.
-func TestMGPDisabled(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- receivedQueryStat func(*stack.Stack) *tcpip.StatCounter
- rxQuery func(*channel.Endpoint)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.MembershipQuery
- },
- rxQuery: func(e *channel.Endpoint) {
- createAndInjectIGMPPacket(e, igmpMembershipQuery, unsolicitedIGMPReportIntervalMaxTenthSec, header.IPv4Any)
- },
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerQuery
- },
- rxQuery: func(e *channel.Endpoint) {
- createAndInjectMLDPacket(e, mldQuery, 0, header.IPv6Any)
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, false /* mgpEnabled */)
-
- // This NIC may join multicast groups when it is enabled but since MGP is
- // disabled, no reports should be sent.
- sentReportStat := test.sentReportStat(s)
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet, stack with disabled MGP sent packet = %#v", p.Pkt)
- }
-
- // Test joining a specific group explicitly and verify that no reports are
- // sent.
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet, stack with disabled IGMP sent packet = %#v", p.Pkt)
- }
-
- // Inject a general query message. This should only trigger a report to be
- // sent if the MGP was enabled.
- test.rxQuery(e)
- if got := test.receivedQueryStat(s).Value(); got != 1 {
- t.Fatalf("got receivedQueryStat(_).Value() = %d, want = 1", got)
- }
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet, stack with disabled IGMP sent packet = %+v", p.Pkt)
- }
- })
- }
-}
-
-func TestMGPReceiveCounters(t *testing.T) {
- tests := []struct {
- name string
- headerType uint8
- maxRespTime byte
- groupAddress tcpip.Address
- statCounter func(*stack.Stack) *tcpip.StatCounter
- rxMGPkt func(*channel.Endpoint, byte, byte, tcpip.Address)
- }{
- {
- name: "IGMP Membership Query",
- headerType: igmpMembershipQuery,
- maxRespTime: unsolicitedIGMPReportIntervalMaxTenthSec,
- groupAddress: header.IPv4Any,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.MembershipQuery
- },
- rxMGPkt: createAndInjectIGMPPacket,
- },
- {
- name: "IGMPv1 Membership Report",
- headerType: igmpv1MembershipReport,
- maxRespTime: 0,
- groupAddress: header.IPv4AllSystems,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.V1MembershipReport
- },
- rxMGPkt: createAndInjectIGMPPacket,
- },
- {
- name: "IGMPv2 Membership Report",
- headerType: igmpv2MembershipReport,
- maxRespTime: 0,
- groupAddress: header.IPv4AllSystems,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.V2MembershipReport
- },
- rxMGPkt: createAndInjectIGMPPacket,
- },
- {
- name: "IGMP Leave Group",
- headerType: igmpLeaveGroup,
- maxRespTime: 0,
- groupAddress: header.IPv4AllRoutersGroup,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.LeaveGroup
- },
- rxMGPkt: createAndInjectIGMPPacket,
- },
- {
- name: "MLD Query",
- headerType: mldQuery,
- maxRespTime: 0,
- groupAddress: header.IPv6Any,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerQuery
- },
- rxMGPkt: createAndInjectMLDPacket,
- },
- {
- name: "MLD Report",
- headerType: mldReport,
- maxRespTime: 0,
- groupAddress: header.IPv6Any,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerReport
- },
- rxMGPkt: createAndInjectMLDPacket,
- },
- {
- name: "MLD Done",
- headerType: mldDone,
- maxRespTime: 0,
- groupAddress: header.IPv6Any,
- statCounter: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerDone
- },
- rxMGPkt: createAndInjectMLDPacket,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, _ := createStack(t, len(test.groupAddress) == header.IPv4AddressSize /* v4 */, true /* mgpEnabled */)
-
- test.rxMGPkt(e, test.headerType, test.maxRespTime, test.groupAddress)
- if got := test.statCounter(s).Value(); got != 1 {
- t.Fatalf("got %s received = %d, want = 1", test.name, got)
- }
- })
- }
-}
-
-// TestMGPJoinGroup tests that when explicitly joining a multicast group, the
-// stack schedules and sends correct Membership Reports.
-func TestMGPJoinGroup(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- maxUnsolicitedResponseDelay time.Duration
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- receivedQueryStat func(*stack.Stack) *tcpip.StatCounter
- validateReport func(*testing.T, channel.PacketInfo)
- checkInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) (uint64, uint64)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- maxUnsolicitedResponseDelay: ipv4.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.MembershipQuery
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateIGMPPacket(t, p, ipv4MulticastAddr1, igmpv2MembershipReport, 0, ipv4MulticastAddr1)
- },
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- maxUnsolicitedResponseDelay: ipv6.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerQuery
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateMLDPacket(t, p, ipv6MulticastAddr1, mldReport, 0, ipv6MulticastAddr1)
- },
- checkInitialGroups: checkInitialIPv6Groups,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)
-
- var reportCounter uint64
- if test.checkInitialGroups != nil {
- reportCounter, _ = test.checkInitialGroups(t, e, s, clock)
- }
-
- // Test joining a specific address explicitly and verify a Report is sent
- // immediately.
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- reportCounter++
- sentReportStat := test.sentReportStat(s)
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Verify the second report is sent by the maximum unsolicited response
- // interval.
- p, ok := e.Read()
- if ok {
- t.Fatalf("sent unexpected packet, expected report only after advancing the clock = %#v", p.Pkt)
- }
- clock.Advance(test.maxUnsolicitedResponseDelay)
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p)
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
- })
- }
-}
-
-// TestMGPLeaveGroup tests that when leaving a previously joined multicast
-// group the stack sends a leave/done message.
-func TestMGPLeaveGroup(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- sentLeaveStat func(*stack.Stack) *tcpip.StatCounter
- validateReport func(*testing.T, channel.PacketInfo)
- validateLeave func(*testing.T, channel.PacketInfo)
- checkInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) (uint64, uint64)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.LeaveGroup
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateIGMPPacket(t, p, ipv4MulticastAddr1, igmpv2MembershipReport, 0, ipv4MulticastAddr1)
- },
- validateLeave: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateIGMPPacket(t, p, header.IPv4AllRoutersGroup, igmpLeaveGroup, 0, ipv4MulticastAddr1)
- },
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerDone
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateMLDPacket(t, p, ipv6MulticastAddr1, mldReport, 0, ipv6MulticastAddr1)
- },
- validateLeave: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, ipv6MulticastAddr1)
- },
- checkInitialGroups: checkInitialIPv6Groups,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)
-
- var reportCounter uint64
- var leaveCounter uint64
- if test.checkInitialGroups != nil {
- reportCounter, leaveCounter = test.checkInitialGroups(t, e, s, clock)
- }
-
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- reportCounter++
- if got := test.sentReportStat(s).Value(); got != reportCounter {
- t.Errorf("got sentReportStat(_).Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Leaving the group should trigger an leave/done message to be sent.
- if err := s.LeaveGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("LeaveGroup(%d, nic, %s): %s", test.protoNum, test.multicastAddr, err)
- }
- leaveCounter++
- if got := test.sentLeaveStat(s).Value(); got != leaveCounter {
- t.Fatalf("got sentLeaveStat(_).Value() = %d, want = %d", got, leaveCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a leave message to be sent")
- } else {
- test.validateLeave(t, p)
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
- })
- }
-}
-
-// TestMGPQueryMessages tests that a report is sent in response to query
-// messages.
-func TestMGPQueryMessages(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- maxUnsolicitedResponseDelay time.Duration
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- receivedQueryStat func(*stack.Stack) *tcpip.StatCounter
- rxQuery func(*channel.Endpoint, uint8, tcpip.Address)
- validateReport func(*testing.T, channel.PacketInfo)
- maxRespTimeToDuration func(uint8) time.Duration
- checkInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) (uint64, uint64)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- maxUnsolicitedResponseDelay: ipv4.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsReceived.MembershipQuery
- },
- rxQuery: func(e *channel.Endpoint, maxRespTime uint8, groupAddress tcpip.Address) {
- createAndInjectIGMPPacket(e, igmpMembershipQuery, maxRespTime, groupAddress)
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateIGMPPacket(t, p, ipv4MulticastAddr1, igmpv2MembershipReport, 0, ipv4MulticastAddr1)
- },
- maxRespTimeToDuration: header.DecisecondToDuration,
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- maxUnsolicitedResponseDelay: ipv6.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- receivedQueryStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsReceived.MulticastListenerQuery
- },
- rxQuery: func(e *channel.Endpoint, maxRespTime uint8, groupAddress tcpip.Address) {
- createAndInjectMLDPacket(e, mldQuery, maxRespTime, groupAddress)
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateMLDPacket(t, p, ipv6MulticastAddr1, mldReport, 0, ipv6MulticastAddr1)
- },
- maxRespTimeToDuration: func(d uint8) time.Duration {
- return time.Duration(d) * time.Millisecond
- },
- checkInitialGroups: checkInitialIPv6Groups,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- subTests := []struct {
- name string
- multicastAddr tcpip.Address
- expectReport bool
- }{
- {
- name: "Unspecified",
- multicastAddr: tcpip.Address(strings.Repeat("\x00", len(test.multicastAddr))),
- expectReport: true,
- },
- {
- name: "Specified",
- multicastAddr: test.multicastAddr,
- expectReport: true,
- },
- {
- name: "Specified other address",
- multicastAddr: func() tcpip.Address {
- addrBytes := []byte(test.multicastAddr)
- addrBytes[len(addrBytes)-1]++
- return tcpip.Address(addrBytes)
- }(),
- expectReport: false,
- },
- }
-
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)
-
- var reportCounter uint64
- if test.checkInitialGroups != nil {
- reportCounter, _ = test.checkInitialGroups(t, e, s, clock)
- }
-
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- sentReportStat := test.sentReportStat(s)
- for i := 0; i < maxUnsolicitedReports; i++ {
- sentReportStat := test.sentReportStat(s)
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("(i=%d) got sentReportStat.Value() = %d, want = %d", i, got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatalf("expected %d-th report message to be sent", i)
- } else {
- test.validateReport(t, p)
- }
- clock.Advance(test.maxUnsolicitedResponseDelay)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Should not send any more packets until a query.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
-
- // Receive a query message which should trigger a report to be sent at
- // some time before the maximum response time if the report is
- // targeted at the host.
- const maxRespTime = 100
- test.rxQuery(e, maxRespTime, subTest.multicastAddr)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p.Pkt)
- }
-
- if subTest.expectReport {
- clock.Advance(test.maxRespTimeToDuration(maxRespTime))
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p)
- }
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
- })
- }
- })
- }
-}
-
-// TestMGPQueryMessages tests that no further reports or leave/done messages
-// are sent after receiving a report.
-func TestMGPReportMessages(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- sentLeaveStat func(*stack.Stack) *tcpip.StatCounter
- rxReport func(*channel.Endpoint)
- validateReport func(*testing.T, channel.PacketInfo)
- maxRespTimeToDuration func(uint8) time.Duration
- checkInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) (uint64, uint64)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.LeaveGroup
- },
- rxReport: func(e *channel.Endpoint) {
- createAndInjectIGMPPacket(e, igmpv2MembershipReport, 0, ipv4MulticastAddr1)
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateIGMPPacket(t, p, ipv4MulticastAddr1, igmpv2MembershipReport, 0, ipv4MulticastAddr1)
- },
- maxRespTimeToDuration: header.DecisecondToDuration,
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerDone
- },
- rxReport: func(e *channel.Endpoint) {
- createAndInjectMLDPacket(e, mldReport, 0, ipv6MulticastAddr1)
- },
- validateReport: func(t *testing.T, p channel.PacketInfo) {
- t.Helper()
-
- validateMLDPacket(t, p, ipv6MulticastAddr1, mldReport, 0, ipv6MulticastAddr1)
- },
- maxRespTimeToDuration: func(d uint8) time.Duration {
- return time.Duration(d) * time.Millisecond
- },
- checkInitialGroups: checkInitialIPv6Groups,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)
-
- var reportCounter uint64
- var leaveCounter uint64
- if test.checkInitialGroups != nil {
- reportCounter, leaveCounter = test.checkInitialGroups(t, e, s, clock)
- }
-
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- sentReportStat := test.sentReportStat(s)
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Receiving a report for a group we joined should cancel any further
- // reports.
- test.rxReport(e)
- clock.Advance(time.Hour)
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); ok {
- t.Errorf("sent unexpected packet = %#v", p)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Leaving a group after getting a report should not send a leave/done
- // message.
- if err := s.LeaveGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("LeaveGroup(%d, nic, %s): %s", test.protoNum, test.multicastAddr, err)
- }
- clock.Advance(time.Hour)
- if got := test.sentLeaveStat(s).Value(); got != leaveCounter {
- t.Fatalf("got sentLeaveStat(_).Value() = %d, want = %d", got, leaveCounter)
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
- })
- }
-}
-
-func TestMGPWithNICLifecycle(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddrs []tcpip.Address
- finalMulticastAddr tcpip.Address
- maxUnsolicitedResponseDelay time.Duration
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- sentLeaveStat func(*stack.Stack) *tcpip.StatCounter
- validateReport func(*testing.T, channel.PacketInfo, tcpip.Address)
- validateLeave func(*testing.T, channel.PacketInfo, tcpip.Address)
- getAndCheckGroupAddress func(*testing.T, map[tcpip.Address]bool, channel.PacketInfo) tcpip.Address
- checkInitialGroups func(*testing.T, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) (uint64, uint64)
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddrs: []tcpip.Address{ipv4MulticastAddr1, ipv4MulticastAddr2},
- finalMulticastAddr: ipv4MulticastAddr3,
- maxUnsolicitedResponseDelay: ipv4.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.LeaveGroup
- },
- validateReport: func(t *testing.T, p channel.PacketInfo, addr tcpip.Address) {
- t.Helper()
-
- validateIGMPPacket(t, p, addr, igmpv2MembershipReport, 0, addr)
- },
- validateLeave: func(t *testing.T, p channel.PacketInfo, addr tcpip.Address) {
- t.Helper()
-
- validateIGMPPacket(t, p, header.IPv4AllRoutersGroup, igmpLeaveGroup, 0, addr)
- },
- getAndCheckGroupAddress: func(t *testing.T, seen map[tcpip.Address]bool, p channel.PacketInfo) tcpip.Address {
- t.Helper()
-
- ipv4 := header.IPv4(stack.PayloadSince(p.Pkt.NetworkHeader()))
- if got := tcpip.TransportProtocolNumber(ipv4.Protocol()); got != header.IGMPProtocolNumber {
- t.Fatalf("got ipv4.Protocol() = %d, want = %d", got, header.IGMPProtocolNumber)
- }
- addr := header.IGMP(ipv4.Payload()).GroupAddress()
- s, ok := seen[addr]
- if !ok {
- t.Fatalf("unexpectedly got a packet for group %s", addr)
- }
- if s {
- t.Fatalf("already saw packet for group %s", addr)
- }
- seen[addr] = true
- return addr
- },
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddrs: []tcpip.Address{ipv6MulticastAddr1, ipv6MulticastAddr2},
- finalMulticastAddr: ipv6MulticastAddr3,
- maxUnsolicitedResponseDelay: ipv6.UnsolicitedReportIntervalMax,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- sentLeaveStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerDone
- },
- validateReport: func(t *testing.T, p channel.PacketInfo, addr tcpip.Address) {
- t.Helper()
-
- validateMLDPacket(t, p, addr, mldReport, 0, addr)
- },
- validateLeave: func(t *testing.T, p channel.PacketInfo, addr tcpip.Address) {
- t.Helper()
-
- validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, addr)
- },
- getAndCheckGroupAddress: func(t *testing.T, seen map[tcpip.Address]bool, p channel.PacketInfo) tcpip.Address {
- t.Helper()
-
- ipv6 := header.IPv6(stack.PayloadSince(p.Pkt.NetworkHeader()))
-
- ipv6HeaderIter := header.MakeIPv6PayloadIterator(
- header.IPv6ExtensionHeaderIdentifier(ipv6.NextHeader()),
- buffer.View(ipv6.Payload()).ToVectorisedView(),
- )
-
- var transport header.IPv6RawPayloadHeader
- for {
- h, done, err := ipv6HeaderIter.Next()
- if err != nil {
- t.Fatalf("ipv6HeaderIter.Next(): %s", err)
- }
- if done {
- t.Fatalf("ipv6HeaderIter.Next() = (%T, %t, _), want = (_, false, _)", h, done)
- }
- if t, ok := h.(header.IPv6RawPayloadHeader); ok {
- transport = t
- break
- }
- }
-
- if got := tcpip.TransportProtocolNumber(transport.Identifier); got != header.ICMPv6ProtocolNumber {
- t.Fatalf("got ipv6.NextHeader() = %d, want = %d", got, header.ICMPv6ProtocolNumber)
- }
- icmpv6 := header.ICMPv6(transport.Buf.ToView())
- if got := icmpv6.Type(); got != header.ICMPv6MulticastListenerReport && got != header.ICMPv6MulticastListenerDone {
- t.Fatalf("got icmpv6.Type() = %d, want = %d or %d", got, header.ICMPv6MulticastListenerReport, header.ICMPv6MulticastListenerDone)
- }
- addr := header.MLD(icmpv6.MessageBody()).MulticastAddress()
- s, ok := seen[addr]
- if !ok {
- t.Fatalf("unexpectedly got a packet for group %s", addr)
- }
- if s {
- t.Fatalf("already saw packet for group %s", addr)
- }
- seen[addr] = true
- return addr
- },
- checkInitialGroups: checkInitialIPv6Groups,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e, s, clock := createStack(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */)
-
- var reportCounter uint64
- var leaveCounter uint64
- if test.checkInitialGroups != nil {
- reportCounter, leaveCounter = test.checkInitialGroups(t, e, s, clock)
- }
-
- sentReportStat := test.sentReportStat(s)
- for _, a := range test.multicastAddrs {
- if err := s.JoinGroup(test.protoNum, nicID, a); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, a, err)
- }
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatalf("expected a report message to be sent for %s", a)
- } else {
- test.validateReport(t, p, a)
- }
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Leave messages should be sent for the joined groups when the NIC is
- // disabled.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("DisableNIC(%d): %s", nicID, err)
- }
- sentLeaveStat := test.sentLeaveStat(s)
- leaveCounter += uint64(len(test.multicastAddrs))
- if got := sentLeaveStat.Value(); got != leaveCounter {
- t.Errorf("got sentLeaveStat.Value() = %d, want = %d", got, leaveCounter)
- }
- {
- seen := make(map[tcpip.Address]bool)
- for _, a := range test.multicastAddrs {
- seen[a] = false
- }
-
- for i := range test.multicastAddrs {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("expected (%d-th) leave message to be sent", i)
- }
-
- test.validateLeave(t, p, test.getAndCheckGroupAddress(t, seen, p))
- }
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Reports should be sent for the joined groups when the NIC is enabled.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("EnableNIC(%d): %s", nicID, err)
- }
- reportCounter += uint64(len(test.multicastAddrs))
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- {
- seen := make(map[tcpip.Address]bool)
- for _, a := range test.multicastAddrs {
- seen[a] = false
- }
-
- for i := range test.multicastAddrs {
- p, ok := e.Read()
- if !ok {
- t.Fatalf("expected (%d-th) report message to be sent", i)
- }
-
- test.validateReport(t, p, test.getAndCheckGroupAddress(t, seen, p))
- }
- }
- if t.Failed() {
- t.FailNow()
- }
-
- // Joining/leaving a group while disabled should not send any messages.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("DisableNIC(%d): %s", nicID, err)
- }
- leaveCounter += uint64(len(test.multicastAddrs))
- if got := sentLeaveStat.Value(); got != leaveCounter {
- t.Errorf("got sentLeaveStat.Value() = %d, want = %d", got, leaveCounter)
- }
- for i := range test.multicastAddrs {
- if _, ok := e.Read(); !ok {
- t.Fatalf("expected (%d-th) leave message to be sent", i)
- }
- }
- for _, a := range test.multicastAddrs {
- if err := s.LeaveGroup(test.protoNum, nicID, a); err != nil {
- t.Fatalf("LeaveGroup(%d, nic, %s): %s", test.protoNum, a, err)
- }
- if got := sentLeaveStat.Value(); got != leaveCounter {
- t.Errorf("got sentLeaveStat.Value() = %d, want = %d", got, leaveCounter)
- }
- if p, ok := e.Read(); ok {
- t.Fatalf("leaving group %s on disabled NIC sent unexpected packet = %#v", a, p.Pkt)
- }
- }
- if err := s.JoinGroup(test.protoNum, nicID, test.finalMulticastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.finalMulticastAddr, err)
- }
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); ok {
- t.Fatalf("joining group %s on disabled NIC sent unexpected packet = %#v", test.finalMulticastAddr, p.Pkt)
- }
-
- // A report should only be sent for the group we last joined after
- // enabling the NIC since the original groups were all left.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("EnableNIC(%d): %s", nicID, err)
- }
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p, test.finalMulticastAddr)
- }
-
- clock.Advance(test.maxUnsolicitedResponseDelay)
- reportCounter++
- if got := sentReportStat.Value(); got != reportCounter {
- t.Errorf("got sentReportStat.Value() = %d, want = %d", got, reportCounter)
- }
- if p, ok := e.Read(); !ok {
- t.Fatal("expected a report message to be sent")
- } else {
- test.validateReport(t, p, test.finalMulticastAddr)
- }
-
- // Should not send any more packets.
- clock.Advance(time.Hour)
- if p, ok := e.Read(); ok {
- t.Fatalf("sent unexpected packet = %#v", p)
- }
- })
- }
-}
-
-// TestMGPDisabledOnLoopback tests that the multicast group protocol is not
-// performed on loopback interfaces since they have no neighbours.
-func TestMGPDisabledOnLoopback(t *testing.T) {
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- multicastAddr tcpip.Address
- sentReportStat func(*stack.Stack) *tcpip.StatCounter
- }{
- {
- name: "IGMP",
- protoNum: ipv4.ProtocolNumber,
- multicastAddr: ipv4MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().IGMP.PacketsSent.V2MembershipReport
- },
- },
- {
- name: "MLD",
- protoNum: ipv6.ProtocolNumber,
- multicastAddr: ipv6MulticastAddr1,
- sentReportStat: func(s *stack.Stack) *tcpip.StatCounter {
- return s.Stats().ICMP.V6.PacketsSent.MulticastListenerReport
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, clock := createStackWithLinkEndpoint(t, test.protoNum == ipv4.ProtocolNumber /* v4 */, true /* mgpEnabled */, loopback.New())
-
- sentReportStat := test.sentReportStat(s)
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
- clock.Advance(time.Hour)
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
-
- // Test joining a specific group explicitly and verify that no reports are
- // sent.
- if err := s.JoinGroup(test.protoNum, nicID, test.multicastAddr); err != nil {
- t.Fatalf("JoinGroup(%d, %d, %s): %s", test.protoNum, nicID, test.multicastAddr, err)
- }
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
- clock.Advance(time.Hour)
- if got := sentReportStat.Value(); got != 0 {
- t.Fatalf("got sentReportStat.Value() = %d, want = 0", got)
- }
- })
- }
-}
diff --git a/pkg/tcpip/ports/BUILD b/pkg/tcpip/ports/BUILD
deleted file mode 100644
index fe98a52af..000000000
--- a/pkg/tcpip/ports/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ports",
- srcs = [
- "flags.go",
- "ports.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- ],
-)
-
-go_test(
- name = "ports_test",
- srcs = ["ports_test.go"],
- library = ":ports",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/testutil",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/ports/ports_state_autogen.go b/pkg/tcpip/ports/ports_state_autogen.go
new file mode 100644
index 000000000..2719f6c41
--- /dev/null
+++ b/pkg/tcpip/ports/ports_state_autogen.go
@@ -0,0 +1,42 @@
+// automatically generated by stateify.
+
+package ports
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *Flags) StateTypeName() string {
+ return "pkg/tcpip/ports.Flags"
+}
+
+func (f *Flags) StateFields() []string {
+ return []string{
+ "MostRecent",
+ "LoadBalanced",
+ "TupleOnly",
+ }
+}
+
+func (f *Flags) beforeSave() {}
+
+// +checklocksignore
+func (f *Flags) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.MostRecent)
+ stateSinkObject.Save(1, &f.LoadBalanced)
+ stateSinkObject.Save(2, &f.TupleOnly)
+}
+
+func (f *Flags) afterLoad() {}
+
+// +checklocksignore
+func (f *Flags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.MostRecent)
+ stateSourceObject.Load(1, &f.LoadBalanced)
+ stateSourceObject.Load(2, &f.TupleOnly)
+}
+
+func init() {
+ state.Register((*Flags)(nil))
+}
diff --git a/pkg/tcpip/ports/ports_test.go b/pkg/tcpip/ports/ports_test.go
deleted file mode 100644
index a91b130df..000000000
--- a/pkg/tcpip/ports/ports_test.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ports
-
-import (
- "math"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const (
- fakeTransNumber tcpip.TransportProtocolNumber = 1
- fakeNetworkNumber tcpip.NetworkProtocolNumber = 2
-)
-
-var (
- fakeIPAddress = testutil.MustParse4("8.8.8.8")
- fakeIPAddress1 = testutil.MustParse4("8.8.8.9")
-)
-
-type portReserveTestAction struct {
- port uint16
- ip tcpip.Address
- want tcpip.Error
- flags Flags
- release bool
- device tcpip.NICID
- dest tcpip.FullAddress
-}
-
-func TestPortReservation(t *testing.T) {
- for _, test := range []struct {
- tname string
- actions []portReserveTestAction
- }{
- {
- tname: "bind to ip",
- actions: []portReserveTestAction{
- {port: 80, ip: fakeIPAddress, want: nil},
- {port: 80, ip: fakeIPAddress1, want: nil},
- /* N.B. Order of tests matters! */
- {port: 80, ip: anyIPAddress, want: &tcpip.ErrPortInUse{}},
- {port: 80, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}, flags: Flags{LoadBalanced: true}},
- },
- },
- {
- tname: "bind to inaddr any",
- actions: []portReserveTestAction{
- {port: 22, ip: anyIPAddress, want: nil},
- {port: 22, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}},
- /* release fakeIPAddress, but anyIPAddress is still inuse */
- {port: 22, ip: fakeIPAddress, release: true},
- {port: 22, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}},
- {port: 22, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}, flags: Flags{LoadBalanced: true}},
- /* Release port 22 from any IP address, then try to reserve fake IP address on 22 */
- {port: 22, ip: anyIPAddress, want: nil, release: true},
- {port: 22, ip: fakeIPAddress, want: nil},
- },
- }, {
- tname: "bind to zero port",
- actions: []portReserveTestAction{
- {port: 00, ip: fakeIPAddress, want: nil},
- {port: 00, ip: fakeIPAddress, want: nil},
- {port: 00, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- },
- }, {
- tname: "bind to ip with reuseport",
- actions: []portReserveTestAction{
- {port: 25, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 25, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
-
- {port: 25, ip: fakeIPAddress, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
- {port: 25, ip: anyIPAddress, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
-
- {port: 25, ip: anyIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- },
- }, {
- tname: "bind to inaddr any with reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: anyIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: anyIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
-
- {port: 24, ip: anyIPAddress, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
-
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, release: true, want: nil},
-
- {port: 24, ip: anyIPAddress, flags: Flags{LoadBalanced: true}, release: true},
- {port: 24, ip: anyIPAddress, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
-
- {port: 24, ip: anyIPAddress, flags: Flags{LoadBalanced: true}, release: true},
- {port: 24, ip: anyIPAddress, flags: Flags{}, want: nil},
- },
- }, {
- tname: "bind twice with device fails",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 3, want: nil},
- {port: 24, ip: fakeIPAddress, device: 3, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind to device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 1, want: nil},
- {port: 24, ip: fakeIPAddress, device: 2, want: nil},
- },
- }, {
- tname: "bind to device and then without device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind without device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, want: nil},
- {port: 24, ip: fakeIPAddress, device: 123, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, want: nil},
- {port: 24, ip: fakeIPAddress, device: 123, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 0, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 456, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 789, want: nil},
- {port: 24, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 123, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: nil},
- },
- }, {
- tname: "binding with reuseport and device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 123, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 456, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 789, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 999, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "mixing reuseport and not reuseport by binding to device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 456, want: nil},
- {port: 24, ip: fakeIPAddress, device: 789, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 999, want: nil},
- },
- }, {
- tname: "can't bind to 0 after mixing reuseport and not reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 456, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind and release",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 345, flags: Flags{}, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 789, flags: Flags{LoadBalanced: true}, want: nil},
-
- // Release the bind to device 0 and try again.
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: nil, release: true},
- {port: 24, ip: fakeIPAddress, device: 345, flags: Flags{}, want: nil},
- },
- }, {
- tname: "bind twice with reuseport once",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "release an unreserved device",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 456, flags: Flags{}, want: nil},
- // The below don't exist.
- {port: 24, ip: fakeIPAddress, device: 345, flags: Flags{}, want: nil, release: true},
- {port: 9999, ip: fakeIPAddress, device: 123, flags: Flags{}, want: nil, release: true},
- // Release all.
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{}, want: nil, release: true},
- {port: 24, ip: fakeIPAddress, device: 456, flags: Flags{}, want: nil, release: true},
- },
- }, {
- tname: "bind with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 123, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{MostRecent: true}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, want: &tcpip.ErrPortInUse{}},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{MostRecent: true}, want: nil},
- },
- }, {
- tname: "bind twice with reuseaddr once",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, device: 123, flags: Flags{}, want: nil},
- {port: 24, ip: fakeIPAddress, device: 0, flags: Flags{MostRecent: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with reuseaddr and reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- },
- }, {
- tname: "bind with reuseaddr and reuseport, and then reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with reuseaddr and reuseport, and then reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with reuseaddr and reuseport twice, and then reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: nil},
- },
- }, {
- tname: "bind with reuseaddr and reuseport twice, and then reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- },
- }, {
- tname: "bind with reuseaddr, and then reuseaddr and reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind with reuseport, and then reuseaddr and reuseport",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind tuple with reuseaddr, and then wildcard with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{}, want: nil},
- },
- }, {
- tname: "bind tuple with reuseaddr, and then wildcard",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil},
- {port: 24, ip: fakeIPAddress, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind wildcard with reuseaddr, and then tuple with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil},
- },
- }, {
- tname: "bind tuple with reuseaddr, and then wildcard",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind two tuples with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 25}, want: nil},
- },
- }, {
- tname: "bind two tuples",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil},
- {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 25}, want: nil},
- },
- }, {
- tname: "bind wildcard, and then tuple with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{}, want: nil},
- {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: &tcpip.ErrPortInUse{}},
- },
- }, {
- tname: "bind wildcard twice with reuseaddr",
- actions: []portReserveTestAction{
- {port: 24, ip: anyIPAddress, flags: Flags{TupleOnly: true}, want: nil},
- {port: 24, ip: anyIPAddress, flags: Flags{TupleOnly: true}, want: nil},
- },
- },
- } {
- t.Run(test.tname, func(t *testing.T) {
- pm := NewPortManager()
- net := []tcpip.NetworkProtocolNumber{fakeNetworkNumber}
- rng := rand.New(rand.NewSource(time.Now().UnixNano()))
-
- for _, test := range test.actions {
- first, _ := pm.PortRange()
- if test.release {
- portRes := Reservation{
- Networks: net,
- Transport: fakeTransNumber,
- Addr: test.ip,
- Port: test.port,
- Flags: test.flags,
- BindToDevice: test.device,
- Dest: test.dest,
- }
- pm.ReleasePort(portRes)
- continue
- }
- portRes := Reservation{
- Networks: net,
- Transport: fakeTransNumber,
- Addr: test.ip,
- Port: test.port,
- Flags: test.flags,
- BindToDevice: test.device,
- Dest: test.dest,
- }
- gotPort, err := pm.ReservePort(rng, portRes, nil /* testPort */)
- if diff := cmp.Diff(test.want, err); diff != "" {
- t.Fatalf("unexpected error from ReservePort(%+v, _), (-want, +got):\n%s", portRes, diff)
- }
- if test.port == 0 && (gotPort == 0 || gotPort < first) {
- t.Fatalf("ReservePort(%+v, _) = %d, want port number >= %d to be picked", portRes, gotPort, first)
- }
- }
- })
- }
-}
-
-func TestPickEphemeralPort(t *testing.T) {
- const (
- firstEphemeral = 32000
- numEphemeralPorts = 1000
- )
-
- for _, test := range []struct {
- name string
- f func(port uint16) (bool, tcpip.Error)
- wantErr tcpip.Error
- wantPort uint16
- }{
- {
- name: "no-port-available",
- f: func(port uint16) (bool, tcpip.Error) {
- return false, nil
- },
- wantErr: &tcpip.ErrNoPortAvailable{},
- },
- {
- name: "port-tester-error",
- f: func(port uint16) (bool, tcpip.Error) {
- return false, &tcpip.ErrBadBuffer{}
- },
- wantErr: &tcpip.ErrBadBuffer{},
- },
- {
- name: "only-port-16042-available",
- f: func(port uint16) (bool, tcpip.Error) {
- if port == firstEphemeral+42 {
- return true, nil
- }
- return false, nil
- },
- wantPort: firstEphemeral + 42,
- },
- {
- name: "only-port-under-16000-available",
- f: func(port uint16) (bool, tcpip.Error) {
- if port < firstEphemeral {
- return true, nil
- }
- return false, nil
- },
- wantErr: &tcpip.ErrNoPortAvailable{},
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- pm := NewPortManager()
- rng := rand.New(rand.NewSource(time.Now().UnixNano()))
- if err := pm.SetPortRange(firstEphemeral, firstEphemeral+numEphemeralPorts); err != nil {
- t.Fatalf("failed to set ephemeral port range: %s", err)
- }
- port, err := pm.PickEphemeralPort(rng, test.f)
- if diff := cmp.Diff(test.wantErr, err); diff != "" {
- t.Fatalf("unexpected error from PickEphemeralPort(..), (-want, +got):\n%s", diff)
- }
- if port != test.wantPort {
- t.Errorf("got PickEphemeralPort(..) = (%d, nil); want (%d, nil)", port, test.wantPort)
- }
- })
- }
-}
-
-func TestPickEphemeralPortStable(t *testing.T) {
- const (
- firstEphemeral = 32000
- numEphemeralPorts = 1000
- )
-
- for _, test := range []struct {
- name string
- f func(port uint16) (bool, tcpip.Error)
- wantErr tcpip.Error
- wantPort uint16
- }{
- {
- name: "no-port-available",
- f: func(port uint16) (bool, tcpip.Error) {
- return false, nil
- },
- wantErr: &tcpip.ErrNoPortAvailable{},
- },
- {
- name: "port-tester-error",
- f: func(port uint16) (bool, tcpip.Error) {
- return false, &tcpip.ErrBadBuffer{}
- },
- wantErr: &tcpip.ErrBadBuffer{},
- },
- {
- name: "only-port-16042-available",
- f: func(port uint16) (bool, tcpip.Error) {
- if port == firstEphemeral+42 {
- return true, nil
- }
- return false, nil
- },
- wantPort: firstEphemeral + 42,
- },
- {
- name: "only-port-under-16000-available",
- f: func(port uint16) (bool, tcpip.Error) {
- if port < firstEphemeral {
- return true, nil
- }
- return false, nil
- },
- wantErr: &tcpip.ErrNoPortAvailable{},
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- pm := NewPortManager()
- if err := pm.SetPortRange(firstEphemeral, firstEphemeral+numEphemeralPorts); err != nil {
- t.Fatalf("failed to set ephemeral port range: %s", err)
- }
- portOffset := uint32(rand.Int31n(int32(numEphemeralPorts)))
- port, err := pm.PickEphemeralPortStable(portOffset, test.f)
- if diff := cmp.Diff(test.wantErr, err); diff != "" {
- t.Fatalf("unexpected error from PickEphemeralPort(..), (-want, +got):\n%s", diff)
- }
- if port != test.wantPort {
- t.Errorf("got PickEphemeralPort(..) = (%d, nil); want (%d, nil)", port, test.wantPort)
- }
- })
- }
-}
-
-// TestOverflow addresses b/183593432, wherein an overflowing uint16 causes a
-// port allocation failure.
-func TestOverflow(t *testing.T) {
- // Use a small range and start at offsets that will cause an overflow.
- count := uint16(50)
- for offset := uint32(math.MaxUint16 - count); offset < math.MaxUint16; offset++ {
- reservedPorts := make(map[uint16]struct{})
- // Ensure we can reserve everything in the allowed range.
- for i := uint16(0); i < count; i++ {
- port, err := pickEphemeralPort(offset, firstEphemeral, count, func(port uint16) (bool, tcpip.Error) {
- if _, ok := reservedPorts[port]; !ok {
- reservedPorts[port] = struct{}{}
- return true, nil
- }
- return false, nil
- })
- if err != nil {
- t.Fatalf("port picking failed at iteration %d, for offset %d, len(reserved): %+v", i, offset, len(reservedPorts))
- }
- if port < firstEphemeral || port > firstEphemeral+count {
- t.Fatalf("reserved port %d, which is not in range [%d, %d]", port, firstEphemeral, firstEphemeral+count-1)
- }
- }
- }
-}
diff --git a/pkg/tcpip/sample/tun_tcp_connect/BUILD b/pkg/tcpip/sample/tun_tcp_connect/BUILD
deleted file mode 100644
index db9b91815..000000000
--- a/pkg/tcpip/sample/tun_tcp_connect/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "tun_tcp_connect",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/rawfile",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/link/tun",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/sample/tun_tcp_connect/main.go b/pkg/tcpip/sample/tun_tcp_connect/main.go
deleted file mode 100644
index 009cab643..000000000
--- a/pkg/tcpip/sample/tun_tcp_connect/main.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-// This sample creates a stack with TCP and IPv4 protocols on top of a TUN
-// device, and connects to a peer. Similar to "nc <address> <port>". While the
-// sample is running, attempts to connect to its IPv4 address will result in
-// a RST segment.
-//
-// As an example of how to run it, a TUN device can be created and enabled on
-// a linux host as follows (this only needs to be done once per boot):
-//
-// [sudo] ip tuntap add user <username> mode tun <device-name>
-// [sudo] ip link set <device-name> up
-// [sudo] ip addr add <ipv4-address>/<mask-length> dev <device-name>
-//
-// A concrete example:
-//
-// $ sudo ip tuntap add user wedsonaf mode tun tun0
-// $ sudo ip link set tun0 up
-// $ sudo ip addr add 192.168.1.1/24 dev tun0
-//
-// Then one can run tun_tcp_connect as such:
-//
-// $ ./tun/tun_tcp_connect tun0 192.168.1.2 0 192.168.1.1 1234
-//
-// This will attempt to connect to the linux host's stack. One can run nc in
-// listen mode to accept a connect from tun_tcp_connect and exchange data.
-package main
-
-import (
- "bytes"
- "fmt"
- "log"
- "math/rand"
- "net"
- "os"
- "strconv"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/link/rawfile"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/link/tun"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// writer reads from standard input and writes to the endpoint until standard
-// input is closed. It signals that it's done by closing the provided channel.
-func writer(ch chan struct{}, ep tcpip.Endpoint) {
- defer func() {
- ep.Shutdown(tcpip.ShutdownWrite)
- close(ch)
- }()
-
- var b bytes.Buffer
- if err := func() error {
- for {
- if _, err := b.ReadFrom(os.Stdin); err != nil {
- return fmt.Errorf("b.ReadFrom failed: %w", err)
- }
-
- for b.Len() != 0 {
- if _, err := ep.Write(&b, tcpip.WriteOptions{Atomic: true}); err != nil {
- return fmt.Errorf("ep.Write failed: %s", err)
- }
- }
- }
- }(); err != nil {
- fmt.Println(err)
- }
-}
-
-func main() {
- if len(os.Args) != 6 {
- log.Fatal("Usage: ", os.Args[0], " <tun-device> <local-ipv4-address> <local-port> <remote-ipv4-address> <remote-port>")
- }
-
- tunName := os.Args[1]
- addrName := os.Args[2]
- portName := os.Args[3]
- remoteAddrName := os.Args[4]
- remotePortName := os.Args[5]
-
- rand.Seed(time.Now().UnixNano())
-
- addr := tcpip.Address(net.ParseIP(addrName).To4())
- remote := tcpip.FullAddress{
- NIC: 1,
- Addr: tcpip.Address(net.ParseIP(remoteAddrName).To4()),
- }
-
- var localPort uint16
- if v, err := strconv.Atoi(portName); err != nil {
- log.Fatalf("Unable to convert port %v: %v", portName, err)
- } else {
- localPort = uint16(v)
- }
-
- if v, err := strconv.Atoi(remotePortName); err != nil {
- log.Fatalf("Unable to convert port %v: %v", remotePortName, err)
- } else {
- remote.Port = uint16(v)
- }
-
- // Create the stack with ipv4 and tcp protocols, then add a tun-based
- // NIC and ipv4 address.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- })
-
- mtu, err := rawfile.GetMTU(tunName)
- if err != nil {
- log.Fatal(err)
- }
-
- fd, err := tun.Open(tunName)
- if err != nil {
- log.Fatal(err)
- }
-
- linkEP, err := fdbased.New(&fdbased.Options{FDs: []int{fd}, MTU: mtu})
- if err != nil {
- log.Fatal(err)
- }
- if err := s.CreateNIC(1, sniffer.New(linkEP)); err != nil {
- log.Fatal(err)
- }
-
- if err := s.AddAddress(1, ipv4.ProtocolNumber, addr); err != nil {
- log.Fatal(err)
- }
-
- // Add default route.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: 1,
- },
- })
-
- // Create TCP endpoint.
- var wq waiter.Queue
- ep, e := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if e != nil {
- log.Fatal(e)
- }
-
- // Bind if a port is specified.
- if localPort != 0 {
- if err := ep.Bind(tcpip.FullAddress{0, "", localPort}); err != nil {
- log.Fatal("Bind failed: ", err)
- }
- }
-
- // Issue connect request and wait for it to complete.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- wq.EventRegister(&waitEntry, waiter.WritableEvents)
- terr := ep.Connect(remote)
- if _, ok := terr.(*tcpip.ErrConnectStarted); ok {
- fmt.Println("Connect is pending...")
- <-notifyCh
- terr = ep.LastError()
- }
- wq.EventUnregister(&waitEntry)
-
- if terr != nil {
- log.Fatal("Unable to connect: ", terr)
- }
-
- fmt.Println("Connected")
-
- // Start the writer in its own goroutine.
- writerCompletedCh := make(chan struct{})
- go writer(writerCompletedCh, ep) // S/R-SAFE: sample code.
-
- // Read data and write to standard output until the peer closes the
- // connection from its side.
- wq.EventRegister(&waitEntry, waiter.ReadableEvents)
- for {
- _, err := ep.Read(os.Stdout, tcpip.ReadOptions{})
- if err != nil {
- if _, ok := err.(*tcpip.ErrClosedForReceive); ok {
- break
- }
-
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- <-notifyCh
- continue
- }
-
- log.Fatal("Read() failed:", err)
- }
- }
- wq.EventUnregister(&waitEntry)
-
- // The reader has completed. Now wait for the writer as well.
- <-writerCompletedCh
-
- ep.Close()
-}
diff --git a/pkg/tcpip/sample/tun_tcp_echo/BUILD b/pkg/tcpip/sample/tun_tcp_echo/BUILD
deleted file mode 100644
index 43264b76d..000000000
--- a/pkg/tcpip/sample/tun_tcp_echo/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "tun_tcp_echo",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/rawfile",
- "//pkg/tcpip/link/tun",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/sample/tun_tcp_echo/main.go b/pkg/tcpip/sample/tun_tcp_echo/main.go
deleted file mode 100644
index c10b19aa0..000000000
--- a/pkg/tcpip/sample/tun_tcp_echo/main.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-// This sample creates a stack with TCP and IPv4 protocols on top of a TUN
-// device, and listens on a port. Data received by the server in the accepted
-// connections is echoed back to the clients.
-package main
-
-import (
- "bytes"
- "flag"
- "io"
- "log"
- "math/rand"
- "net"
- "os"
- "strconv"
- "strings"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/link/rawfile"
- "gvisor.dev/gvisor/pkg/tcpip/link/tun"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-var tap = flag.Bool("tap", false, "use tap istead of tun")
-var mac = flag.String("mac", "aa:00:01:01:01:01", "mac address to use in tap device")
-
-type endpointWriter struct {
- ep tcpip.Endpoint
-}
-
-type tcpipError struct {
- inner tcpip.Error
-}
-
-func (e *tcpipError) Error() string {
- return e.inner.String()
-}
-
-func (e *endpointWriter) Write(p []byte) (int, error) {
- var r bytes.Reader
- r.Reset(p)
- n, err := e.ep.Write(&r, tcpip.WriteOptions{})
- if err != nil {
- return int(n), &tcpipError{
- inner: err,
- }
- }
- if n != int64(len(p)) {
- return int(n), io.ErrShortWrite
- }
- return int(n), nil
-}
-
-func echo(wq *waiter.Queue, ep tcpip.Endpoint) {
- defer ep.Close()
-
- // Create wait queue entry that notifies a channel.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
-
- wq.EventRegister(&waitEntry, waiter.ReadableEvents)
- defer wq.EventUnregister(&waitEntry)
-
- w := endpointWriter{
- ep: ep,
- }
-
- for {
- _, err := ep.Read(&w, tcpip.ReadOptions{})
- if err != nil {
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- <-notifyCh
- continue
- }
-
- return
- }
- }
-}
-
-func main() {
- flag.Parse()
- if len(flag.Args()) != 3 {
- log.Fatal("Usage: ", os.Args[0], " <tun-device> <local-address> <local-port>")
- }
-
- tunName := flag.Arg(0)
- addrName := flag.Arg(1)
- portName := flag.Arg(2)
-
- rand.Seed(time.Now().UnixNano())
-
- // Parse the mac address.
- maddr, err := net.ParseMAC(*mac)
- if err != nil {
- log.Fatalf("Bad MAC address: %v", *mac)
- }
-
- // Parse the IP address. Support both ipv4 and ipv6.
- parsedAddr := net.ParseIP(addrName)
- if parsedAddr == nil {
- log.Fatalf("Bad IP address: %v", addrName)
- }
-
- var addr tcpip.Address
- var proto tcpip.NetworkProtocolNumber
- if parsedAddr.To4() != nil {
- addr = tcpip.Address(parsedAddr.To4())
- proto = ipv4.ProtocolNumber
- } else if parsedAddr.To16() != nil {
- addr = tcpip.Address(parsedAddr.To16())
- proto = ipv6.ProtocolNumber
- } else {
- log.Fatalf("Unknown IP type: %v", addrName)
- }
-
- localPort, err := strconv.Atoi(portName)
- if err != nil {
- log.Fatalf("Unable to convert port %v: %v", portName, err)
- }
-
- // Create the stack with ip and tcp protocols, then add a tun-based
- // NIC and address.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol, arp.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- })
-
- mtu, err := rawfile.GetMTU(tunName)
- if err != nil {
- log.Fatal(err)
- }
-
- var fd int
- if *tap {
- fd, err = tun.OpenTAP(tunName)
- } else {
- fd, err = tun.Open(tunName)
- }
- if err != nil {
- log.Fatal(err)
- }
-
- linkEP, err := fdbased.New(&fdbased.Options{
- FDs: []int{fd},
- MTU: mtu,
- EthernetHeader: *tap,
- Address: tcpip.LinkAddress(maddr),
- })
- if err != nil {
- log.Fatal(err)
- }
- if err := s.CreateNIC(1, linkEP); err != nil {
- log.Fatal(err)
- }
-
- if err := s.AddAddress(1, proto, addr); err != nil {
- log.Fatal(err)
- }
-
- subnet, err := tcpip.NewSubnet(tcpip.Address(strings.Repeat("\x00", len(addr))), tcpip.AddressMask(strings.Repeat("\x00", len(addr))))
- if err != nil {
- log.Fatal(err)
- }
-
- // Add default route.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: subnet,
- NIC: 1,
- },
- })
-
- // Create TCP endpoint, bind it, then start listening.
- var wq waiter.Queue
- ep, e := s.NewEndpoint(tcp.ProtocolNumber, proto, &wq)
- if e != nil {
- log.Fatal(e)
- }
-
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{0, "", uint16(localPort)}); err != nil {
- log.Fatal("Bind failed: ", err)
- }
-
- if err := ep.Listen(10); err != nil {
- log.Fatal("Listen failed: ", err)
- }
-
- // Wait for connections to appear.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- wq.EventRegister(&waitEntry, waiter.ReadableEvents)
- defer wq.EventUnregister(&waitEntry)
-
- for {
- n, wq, err := ep.Accept(nil)
- if err != nil {
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- <-notifyCh
- continue
- }
-
- log.Fatal("Accept() failed:", err)
- }
-
- go echo(wq, n) // S/R-SAFE: sample code.
- }
-}
diff --git a/pkg/tcpip/seqnum/BUILD b/pkg/tcpip/seqnum/BUILD
deleted file mode 100644
index 45f503845..000000000
--- a/pkg/tcpip/seqnum/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "seqnum",
- srcs = ["seqnum.go"],
- visibility = ["//visibility:public"],
-)
diff --git a/pkg/tcpip/seqnum/seqnum_state_autogen.go b/pkg/tcpip/seqnum/seqnum_state_autogen.go
new file mode 100644
index 000000000..23e79811d
--- /dev/null
+++ b/pkg/tcpip/seqnum/seqnum_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package seqnum
diff --git a/pkg/tcpip/sock_err_list.go b/pkg/tcpip/sock_err_list.go
new file mode 100644
index 000000000..0be1993af
--- /dev/null
+++ b/pkg/tcpip/sock_err_list.go
@@ -0,0 +1,221 @@
+package tcpip
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type sockErrorElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (sockErrorElementMapper) linkerFor(elem *SockError) *SockError { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type sockErrorList struct {
+ head *SockError
+ tail *SockError
+}
+
+// Reset resets list l to the empty state.
+func (l *sockErrorList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *sockErrorList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *sockErrorList) Front() *SockError {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *sockErrorList) Back() *SockError {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *sockErrorList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (sockErrorElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *sockErrorList) PushFront(e *SockError) {
+ linker := sockErrorElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ sockErrorElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *sockErrorList) PushBack(e *SockError) {
+ linker := sockErrorElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ sockErrorElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *sockErrorList) PushBackList(m *sockErrorList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ sockErrorElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ sockErrorElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *sockErrorList) InsertAfter(b, e *SockError) {
+ bLinker := sockErrorElementMapper{}.linkerFor(b)
+ eLinker := sockErrorElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ sockErrorElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *sockErrorList) InsertBefore(a, e *SockError) {
+ aLinker := sockErrorElementMapper{}.linkerFor(a)
+ eLinker := sockErrorElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ sockErrorElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *sockErrorList) Remove(e *SockError) {
+ linker := sockErrorElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ sockErrorElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ sockErrorElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type sockErrorEntry struct {
+ next *SockError
+ prev *SockError
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *sockErrorEntry) Next() *SockError {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *sockErrorEntry) Prev() *SockError {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *sockErrorEntry) SetNext(elem *SockError) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *sockErrorEntry) SetPrev(elem *SockError) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/stack/BUILD b/pkg/tcpip/stack/BUILD
deleted file mode 100644
index e0847e58a..000000000
--- a/pkg/tcpip/stack/BUILD
+++ /dev/null
@@ -1,152 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "most_shards")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "neighbor_entry_list",
- out = "neighbor_entry_list.go",
- package = "stack",
- prefix = "neighborEntry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*neighborEntry",
- "Linker": "*neighborEntry",
- },
-)
-
-go_template_instance(
- name = "packet_buffer_list",
- out = "packet_buffer_list.go",
- package = "stack",
- prefix = "PacketBuffer",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*PacketBuffer",
- "Linker": "*PacketBuffer",
- },
-)
-
-go_template_instance(
- name = "tuple_list",
- out = "tuple_list.go",
- package = "stack",
- prefix = "tuple",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*tuple",
- "Linker": "*tuple",
- },
-)
-
-go_library(
- name = "stack",
- srcs = [
- "addressable_endpoint_state.go",
- "conntrack.go",
- "headertype_string.go",
- "hook_string.go",
- "icmp_rate_limit.go",
- "iptables.go",
- "iptables_state.go",
- "iptables_targets.go",
- "iptables_types.go",
- "neighbor_cache.go",
- "neighbor_entry.go",
- "neighbor_entry_list.go",
- "neighborstate_string.go",
- "nic.go",
- "nic_stats.go",
- "nud.go",
- "packet_buffer.go",
- "packet_buffer_list.go",
- "packet_buffer_unsafe.go",
- "pending_packets.go",
- "rand.go",
- "registration.go",
- "route.go",
- "stack.go",
- "stack_global_state.go",
- "stack_options.go",
- "tcp.go",
- "transport_demuxer.go",
- "tuple_list.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/atomicbitops",
- "//pkg/buffer",
- "//pkg/ilist",
- "//pkg/log",
- "//pkg/rand",
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/hash/jenkins",
- "//pkg/tcpip/header",
- "//pkg/tcpip/ports",
- "//pkg/tcpip/seqnum",
- "//pkg/tcpip/transport/tcpconntrack",
- "//pkg/waiter",
- "@org_golang_x_time//rate:go_default_library",
- ],
-)
-
-go_test(
- name = "stack_x_test",
- size = "small",
- srcs = [
- "addressable_endpoint_state_test.go",
- "ndp_test.go",
- "nud_test.go",
- "stack_test.go",
- "transport_demuxer_test.go",
- "transport_test.go",
- ],
- shard_count = most_shards,
- deps = [
- ":stack",
- "//pkg/rand",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/ports",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "stack_test",
- size = "small",
- srcs = [
- "forwarding_test.go",
- "neighbor_cache_test.go",
- "neighbor_entry_test.go",
- "nic_test.go",
- "packet_buffer_test.go",
- ],
- library = ":stack",
- deps = [
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/testutil",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/stack/addressable_endpoint_state_test.go b/pkg/tcpip/stack/addressable_endpoint_state_test.go
deleted file mode 100644
index 140f146f6..000000000
--- a/pkg/tcpip/stack/addressable_endpoint_state_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-// TestAddressableEndpointStateCleanup tests that cleaning up an addressable
-// endpoint state removes permanent addresses and leaves groups.
-func TestAddressableEndpointStateCleanup(t *testing.T) {
- var ep fakeNetworkEndpoint
- if err := ep.Enable(); err != nil {
- t.Fatalf("ep.Enable(): %s", err)
- }
-
- var s stack.AddressableEndpointState
- s.Init(&ep)
-
- addr := tcpip.AddressWithPrefix{
- Address: "\x01",
- PrefixLen: 8,
- }
-
- {
- ep, err := s.AddAndAcquirePermanentAddress(addr, stack.NeverPrimaryEndpoint, stack.AddressConfigStatic, false /* deprecated */)
- if err != nil {
- t.Fatalf("s.AddAndAcquirePermanentAddress(%s, %d, %d, false): %s", addr, stack.NeverPrimaryEndpoint, stack.AddressConfigStatic, err)
- }
- // We don't need the address endpoint.
- ep.DecRef()
- }
- {
- ep := s.AcquireAssignedAddress(addr.Address, false /* allowTemp */, stack.NeverPrimaryEndpoint)
- if ep == nil {
- t.Fatalf("got s.AcquireAssignedAddress(%s, false, NeverPrimaryEndpoint) = nil, want = non-nil", addr.Address)
- }
- ep.DecRef()
- }
-
- s.Cleanup()
- if ep := s.AcquireAssignedAddress(addr.Address, false /* allowTemp */, stack.NeverPrimaryEndpoint); ep != nil {
- ep.DecRef()
- t.Fatalf("got s.AcquireAssignedAddress(%s, false, NeverPrimaryEndpoint) = %s, want = nil", addr.Address, ep.AddressWithPrefix())
- }
-}
diff --git a/pkg/tcpip/stack/forwarding_test.go b/pkg/tcpip/stack/forwarding_test.go
deleted file mode 100644
index 72f66441f..000000000
--- a/pkg/tcpip/stack/forwarding_test.go
+++ /dev/null
@@ -1,790 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
- "encoding/binary"
- "math"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-const (
- fwdTestNetNumber tcpip.NetworkProtocolNumber = math.MaxUint32
- fwdTestNetHeaderLen = 12
- fwdTestNetDefaultPrefixLen = 8
-
- // fwdTestNetDefaultMTU is the MTU, in bytes, used throughout the tests,
- // except where another value is explicitly used. It is chosen to match
- // the MTU of loopback interfaces on linux systems.
- fwdTestNetDefaultMTU = 65536
-
- dstAddrOffset = 0
- srcAddrOffset = 1
- protocolNumberOffset = 2
-)
-
-var _ LinkAddressResolver = (*fwdTestNetworkEndpoint)(nil)
-var _ NetworkEndpoint = (*fwdTestNetworkEndpoint)(nil)
-
-// fwdTestNetworkEndpoint is a network-layer protocol endpoint.
-// Headers of this protocol are fwdTestNetHeaderLen bytes, but we currently only
-// use the first three: destination address, source address, and transport
-// protocol. They're all one byte fields to simplify parsing.
-type fwdTestNetworkEndpoint struct {
- AddressableEndpointState
-
- nic NetworkInterface
- proto *fwdTestNetworkProtocol
- dispatcher TransportDispatcher
-
- mu struct {
- sync.RWMutex
- forwarding bool
- }
-}
-
-func (*fwdTestNetworkEndpoint) Enable() tcpip.Error {
- return nil
-}
-
-func (*fwdTestNetworkEndpoint) Enabled() bool {
- return true
-}
-
-func (*fwdTestNetworkEndpoint) Disable() {}
-
-func (f *fwdTestNetworkEndpoint) MTU() uint32 {
- return f.nic.MTU() - uint32(f.MaxHeaderLength())
-}
-
-func (*fwdTestNetworkEndpoint) DefaultTTL() uint8 {
- return 123
-}
-
-func (f *fwdTestNetworkEndpoint) HandlePacket(pkt *PacketBuffer) {
- if _, _, ok := f.proto.Parse(pkt); !ok {
- return
- }
-
- netHdr := pkt.NetworkHeader().View()
- _, dst := f.proto.ParseAddresses(netHdr)
-
- addressEndpoint := f.AcquireAssignedAddress(dst, f.nic.Promiscuous(), CanBePrimaryEndpoint)
- if addressEndpoint != nil {
- addressEndpoint.DecRef()
- // Dispatch the packet to the transport protocol.
- f.dispatcher.DeliverTransportPacket(tcpip.TransportProtocolNumber(netHdr[protocolNumberOffset]), pkt)
- return
- }
-
- r, err := f.proto.stack.FindRoute(0, "", dst, fwdTestNetNumber, false /* multicastLoop */)
- if err != nil {
- return
- }
- defer r.Release()
-
- vv := buffer.NewVectorisedView(pkt.Size(), pkt.Views())
- pkt = NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength()),
- Data: vv.ToView().ToVectorisedView(),
- })
- // TODO(gvisor.dev/issue/1085) Decrease the TTL field in forwarded packets.
- _ = r.WriteHeaderIncludedPacket(pkt)
-}
-
-func (f *fwdTestNetworkEndpoint) MaxHeaderLength() uint16 {
- return f.nic.MaxHeaderLength() + fwdTestNetHeaderLen
-}
-
-func (f *fwdTestNetworkEndpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
- return f.proto.Number()
-}
-
-func (f *fwdTestNetworkEndpoint) WritePacket(r *Route, params NetworkHeaderParams, pkt *PacketBuffer) tcpip.Error {
- // Add the protocol's header to the packet and send it to the link
- // endpoint.
- b := pkt.NetworkHeader().Push(fwdTestNetHeaderLen)
- b[dstAddrOffset] = r.RemoteAddress()[0]
- b[srcAddrOffset] = r.LocalAddress()[0]
- b[protocolNumberOffset] = byte(params.Protocol)
-
- return f.nic.WritePacket(r, fwdTestNetNumber, pkt)
-}
-
-// WritePackets implements LinkEndpoint.WritePackets.
-func (*fwdTestNetworkEndpoint) WritePackets(*Route, PacketBufferList, NetworkHeaderParams) (int, tcpip.Error) {
- panic("not implemented")
-}
-
-func (f *fwdTestNetworkEndpoint) WriteHeaderIncludedPacket(r *Route, pkt *PacketBuffer) tcpip.Error {
- // The network header should not already be populated.
- if _, ok := pkt.NetworkHeader().Consume(fwdTestNetHeaderLen); !ok {
- return &tcpip.ErrMalformedHeader{}
- }
-
- return f.nic.WritePacket(r, fwdTestNetNumber, pkt)
-}
-
-func (f *fwdTestNetworkEndpoint) Close() {
- f.AddressableEndpointState.Cleanup()
-}
-
-// Stats implements stack.NetworkEndpoint.
-func (*fwdTestNetworkEndpoint) Stats() NetworkEndpointStats {
- return &fwdTestNetworkEndpointStats{}
-}
-
-var _ NetworkEndpointStats = (*fwdTestNetworkEndpointStats)(nil)
-
-type fwdTestNetworkEndpointStats struct{}
-
-// IsNetworkEndpointStats implements stack.NetworkEndpointStats.
-func (*fwdTestNetworkEndpointStats) IsNetworkEndpointStats() {}
-
-var _ NetworkProtocol = (*fwdTestNetworkProtocol)(nil)
-
-// fwdTestNetworkProtocol is a network-layer protocol that implements Address
-// resolution.
-type fwdTestNetworkProtocol struct {
- stack *Stack
-
- neigh *neighborCache
- addrResolveDelay time.Duration
- onLinkAddressResolved func(*neighborCache, tcpip.Address, tcpip.LinkAddress)
- onResolveStaticAddress func(tcpip.Address) (tcpip.LinkAddress, bool)
-}
-
-func (*fwdTestNetworkProtocol) Number() tcpip.NetworkProtocolNumber {
- return fwdTestNetNumber
-}
-
-func (*fwdTestNetworkProtocol) MinimumPacketSize() int {
- return fwdTestNetHeaderLen
-}
-
-func (*fwdTestNetworkProtocol) DefaultPrefixLen() int {
- return fwdTestNetDefaultPrefixLen
-}
-
-func (*fwdTestNetworkProtocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) {
- return tcpip.Address(v[srcAddrOffset : srcAddrOffset+1]), tcpip.Address(v[dstAddrOffset : dstAddrOffset+1])
-}
-
-func (*fwdTestNetworkProtocol) Parse(pkt *PacketBuffer) (tcpip.TransportProtocolNumber, bool, bool) {
- netHeader, ok := pkt.NetworkHeader().Consume(fwdTestNetHeaderLen)
- if !ok {
- return 0, false, false
- }
- return tcpip.TransportProtocolNumber(netHeader[protocolNumberOffset]), true, true
-}
-
-func (f *fwdTestNetworkProtocol) NewEndpoint(nic NetworkInterface, dispatcher TransportDispatcher) NetworkEndpoint {
- e := &fwdTestNetworkEndpoint{
- nic: nic,
- proto: f,
- dispatcher: dispatcher,
- }
- e.AddressableEndpointState.Init(e)
- return e
-}
-
-func (*fwdTestNetworkProtocol) SetOption(tcpip.SettableNetworkProtocolOption) tcpip.Error {
- return &tcpip.ErrUnknownProtocolOption{}
-}
-
-func (*fwdTestNetworkProtocol) Option(tcpip.GettableNetworkProtocolOption) tcpip.Error {
- return &tcpip.ErrUnknownProtocolOption{}
-}
-
-func (*fwdTestNetworkProtocol) Close() {}
-
-func (*fwdTestNetworkProtocol) Wait() {}
-
-func (f *fwdTestNetworkEndpoint) LinkAddressRequest(addr, _ tcpip.Address, remoteLinkAddr tcpip.LinkAddress) tcpip.Error {
- if fn := f.proto.onLinkAddressResolved; fn != nil {
- f.proto.stack.clock.AfterFunc(f.proto.addrResolveDelay, func() {
- fn(f.proto.neigh, addr, remoteLinkAddr)
- })
- }
- return nil
-}
-
-func (f *fwdTestNetworkEndpoint) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) {
- if fn := f.proto.onResolveStaticAddress; fn != nil {
- return fn(addr)
- }
- return "", false
-}
-
-func (*fwdTestNetworkEndpoint) LinkAddressProtocol() tcpip.NetworkProtocolNumber {
- return fwdTestNetNumber
-}
-
-// Forwarding implements stack.ForwardingNetworkEndpoint.
-func (f *fwdTestNetworkEndpoint) Forwarding() bool {
- f.mu.RLock()
- defer f.mu.RUnlock()
- return f.mu.forwarding
-
-}
-
-// SetForwarding implements stack.ForwardingNetworkEndpoint.
-func (f *fwdTestNetworkEndpoint) SetForwarding(v bool) {
- f.mu.Lock()
- defer f.mu.Unlock()
- f.mu.forwarding = v
-}
-
-// fwdTestPacketInfo holds all the information about an outbound packet.
-type fwdTestPacketInfo struct {
- RemoteLinkAddress tcpip.LinkAddress
- LocalLinkAddress tcpip.LinkAddress
- Pkt *PacketBuffer
-}
-
-var _ LinkEndpoint = (*fwdTestLinkEndpoint)(nil)
-
-type fwdTestLinkEndpoint struct {
- dispatcher NetworkDispatcher
- mtu uint32
- linkAddr tcpip.LinkAddress
-
- // C is where outbound packets are queued.
- C chan fwdTestPacketInfo
-}
-
-// InjectInbound injects an inbound packet.
-func (e *fwdTestLinkEndpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {
- e.InjectLinkAddr(protocol, "", pkt)
-}
-
-// InjectLinkAddr injects an inbound packet with a remote link address.
-func (e *fwdTestLinkEndpoint) InjectLinkAddr(protocol tcpip.NetworkProtocolNumber, remote tcpip.LinkAddress, pkt *PacketBuffer) {
- e.dispatcher.DeliverNetworkPacket(remote, "" /* local */, protocol, pkt)
-}
-
-// Attach saves the stack network-layer dispatcher for use later when packets
-// are injected.
-func (e *fwdTestLinkEndpoint) Attach(dispatcher NetworkDispatcher) {
- e.dispatcher = dispatcher
-}
-
-// IsAttached implements stack.LinkEndpoint.IsAttached.
-func (e *fwdTestLinkEndpoint) IsAttached() bool {
- return e.dispatcher != nil
-}
-
-// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized
-// during construction.
-func (e *fwdTestLinkEndpoint) MTU() uint32 {
- return e.mtu
-}
-
-// Capabilities implements stack.LinkEndpoint.Capabilities.
-func (e fwdTestLinkEndpoint) Capabilities() LinkEndpointCapabilities {
- caps := LinkEndpointCapabilities(0)
- return caps | CapabilityResolutionRequired
-}
-
-// MaxHeaderLength returns the maximum size of the link layer header. Given it
-// doesn't have a header, it just returns 0.
-func (*fwdTestLinkEndpoint) MaxHeaderLength() uint16 {
- return 0
-}
-
-// LinkAddress returns the link address of this endpoint.
-func (e *fwdTestLinkEndpoint) LinkAddress() tcpip.LinkAddress {
- return e.linkAddr
-}
-
-func (e fwdTestLinkEndpoint) WritePacket(r RouteInfo, _ tcpip.NetworkProtocolNumber, pkt *PacketBuffer) tcpip.Error {
- p := fwdTestPacketInfo{
- RemoteLinkAddress: r.RemoteLinkAddress,
- LocalLinkAddress: r.LocalLinkAddress,
- Pkt: pkt,
- }
-
- select {
- case e.C <- p:
- default:
- }
-
- return nil
-}
-
-// WritePackets stores outbound packets into the channel.
-func (e *fwdTestLinkEndpoint) WritePackets(r RouteInfo, pkts PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, tcpip.Error) {
- n := 0
- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {
- e.WritePacket(r, protocol, pkt)
- n++
- }
-
- return n, nil
-}
-
-// Wait implements stack.LinkEndpoint.Wait.
-func (*fwdTestLinkEndpoint) Wait() {}
-
-// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.
-func (*fwdTestLinkEndpoint) ARPHardwareType() header.ARPHardwareType {
- panic("not implemented")
-}
-
-// AddHeader implements stack.LinkEndpoint.AddHeader.
-func (e *fwdTestLinkEndpoint) AddHeader(tcpip.LinkAddress, tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *PacketBuffer) {
- panic("not implemented")
-}
-
-func fwdTestNetFactory(t *testing.T, proto *fwdTestNetworkProtocol) (*faketime.ManualClock, *fwdTestLinkEndpoint, *fwdTestLinkEndpoint) {
- clock := faketime.NewManualClock()
- // Create a stack with the network protocol and two NICs.
- s := New(Options{
- NetworkProtocols: []NetworkProtocolFactory{func(s *Stack) NetworkProtocol {
- proto.stack = s
- return proto
- }},
- Clock: clock,
- })
-
- protoNum := proto.Number()
- if err := s.SetForwardingDefaultAndAllNICs(protoNum, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", protoNum, err)
- }
-
- // NIC 1 has the link address "a", and added the network address 1.
- ep1 := &fwdTestLinkEndpoint{
- C: make(chan fwdTestPacketInfo, 300),
- mtu: fwdTestNetDefaultMTU,
- linkAddr: "a",
- }
- if err := s.CreateNIC(1, ep1); err != nil {
- t.Fatal("CreateNIC #1 failed:", err)
- }
- if err := s.AddAddress(1, fwdTestNetNumber, "\x01"); err != nil {
- t.Fatal("AddAddress #1 failed:", err)
- }
-
- // NIC 2 has the link address "b", and added the network address 2.
- ep2 := &fwdTestLinkEndpoint{
- C: make(chan fwdTestPacketInfo, 300),
- mtu: fwdTestNetDefaultMTU,
- linkAddr: "b",
- }
- if err := s.CreateNIC(2, ep2); err != nil {
- t.Fatal("CreateNIC #2 failed:", err)
- }
- if err := s.AddAddress(2, fwdTestNetNumber, "\x02"); err != nil {
- t.Fatal("AddAddress #2 failed:", err)
- }
-
- nic, ok := s.nics[2]
- if !ok {
- t.Fatal("NIC 2 does not exist")
- }
-
- if l, ok := nic.linkAddrResolvers[fwdTestNetNumber]; ok {
- proto.neigh = &l.neigh
- }
-
- // Route all packets to NIC 2.
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, NIC: 2}})
- }
-
- return clock, ep1, ep2
-}
-
-func TestForwardingWithStaticResolver(t *testing.T) {
- // Create a network protocol with a static resolver.
- proto := &fwdTestNetworkProtocol{
- onResolveStaticAddress:
- // The network address 3 is resolved to the link address "c".
- func(addr tcpip.Address) (tcpip.LinkAddress, bool) {
- if addr == "\x03" {
- return "c", true
- }
- return "", false
- },
- }
-
- clock, ep1, ep2 := fwdTestNetFactory(t, proto)
-
- // Inject an inbound packet to address 3 on NIC 1, and see if it is
- // forwarded to NIC 2.
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- // Test that the static address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
-}
-
-func TestForwardingWithFakeResolver(t *testing.T) {
- proto := fwdTestNetworkProtocol{
- addrResolveDelay: 500 * time.Millisecond,
- onLinkAddressResolved: func(neigh *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress) {
- t.Helper()
- if len(linkAddr) != 0 {
- t.Fatalf("got linkAddr=%q, want unspecified", linkAddr)
- }
- // Any address will be resolved to the link address "c".
- neigh.handleConfirmation(addr, "c", ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- },
- }
- clock, ep1, ep2 := fwdTestNetFactory(t, &proto)
-
- // Inject an inbound packet to address 3 on NIC 1, and see if it is
- // forwarded to NIC 2.
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- // Test that the address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
-}
-
-func TestForwardingWithNoResolver(t *testing.T) {
- // Create a network protocol without a resolver.
- proto := &fwdTestNetworkProtocol{}
-
- // Whether or not we use the neighbor cache here does not matter since
- // neither linkAddrCache nor neighborCache will be used.
- clock, ep1, ep2 := fwdTestNetFactory(t, proto)
-
- // inject an inbound packet to address 3 on NIC 1, and see if it is
- // forwarded to NIC 2.
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case <-ep2.C:
- t.Fatal("Packet should not be forwarded")
- default:
- }
-}
-
-func TestForwardingResolutionFailsForQueuedPackets(t *testing.T) {
- proto := &fwdTestNetworkProtocol{
- addrResolveDelay: 50 * time.Millisecond,
- onLinkAddressResolved: func(*neighborCache, tcpip.Address, tcpip.LinkAddress) {
- // Don't resolve the link address.
- },
- }
-
- clock, ep1, ep2 := fwdTestNetFactory(t, proto)
-
- const numPackets int = 5
- // These packets will all be enqueued in the packet queue to wait for link
- // address resolution.
- for i := 0; i < numPackets; i++ {
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- }
-
- // All packets should fail resolution.
- for i := 0; i < numPackets; i++ {
- clock.Advance(proto.addrResolveDelay)
- select {
- case got := <-ep2.C:
- t.Fatalf("got %#v; packets should have failed resolution and not been forwarded", got)
- default:
- }
- }
-}
-
-func TestForwardingWithFakeResolverPartialTimeout(t *testing.T) {
- proto := fwdTestNetworkProtocol{
- addrResolveDelay: 500 * time.Millisecond,
- onLinkAddressResolved: func(neigh *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress) {
- t.Helper()
- if len(linkAddr) != 0 {
- t.Fatalf("got linkAddr=%q, want unspecified", linkAddr)
- }
- // Only packets to address 3 will be resolved to the
- // link address "c".
- if addr == "\x03" {
- neigh.handleConfirmation(addr, "c", ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- }
- },
- }
- clock, ep1, ep2 := fwdTestNetFactory(t, &proto)
-
- // Inject an inbound packet to address 4 on NIC 1. This packet should
- // not be forwarded.
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 4
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- // Inject an inbound packet to address 3 on NIC 1, and see if it is
- // forwarded to NIC 2.
- buf = buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- if nh := PayloadSince(p.Pkt.NetworkHeader()); nh[dstAddrOffset] != 3 {
- t.Fatalf("got p.Pkt.NetworkHeader[dstAddrOffset] = %d, want = 3", nh[dstAddrOffset])
- }
-
- // Test that the address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
-}
-
-func TestForwardingWithFakeResolverTwoPackets(t *testing.T) {
- proto := fwdTestNetworkProtocol{
- addrResolveDelay: 500 * time.Millisecond,
- onLinkAddressResolved: func(neigh *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress) {
- t.Helper()
- if len(linkAddr) != 0 {
- t.Fatalf("got linkAddr=%q, want unspecified", linkAddr)
- }
- // Any packets will be resolved to the link address "c".
- neigh.handleConfirmation(addr, "c", ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- },
- }
- clock, ep1, ep2 := fwdTestNetFactory(t, &proto)
-
- // Inject two inbound packets to address 3 on NIC 1.
- for i := 0; i < 2; i++ {
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- }
-
- for i := 0; i < 2; i++ {
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- if nh := PayloadSince(p.Pkt.NetworkHeader()); nh[dstAddrOffset] != 3 {
- t.Fatalf("got p.Pkt.NetworkHeader[dstAddrOffset] = %d, want = 3", nh[dstAddrOffset])
- }
-
- // Test that the address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
- }
-}
-
-func TestForwardingWithFakeResolverManyPackets(t *testing.T) {
- proto := fwdTestNetworkProtocol{
- addrResolveDelay: 500 * time.Millisecond,
- onLinkAddressResolved: func(neigh *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress) {
- t.Helper()
- if len(linkAddr) != 0 {
- t.Fatalf("got linkAddr=%q, want unspecified", linkAddr)
- }
- // Any packets will be resolved to the link address "c".
- neigh.handleConfirmation(addr, "c", ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- },
- }
- clock, ep1, ep2 := fwdTestNetFactory(t, &proto)
-
- for i := 0; i < maxPendingPacketsPerResolution+5; i++ {
- // Inject inbound 'maxPendingPacketsPerResolution + 5' packets on NIC 1.
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = 3
- // Set the packet sequence number.
- binary.BigEndian.PutUint16(buf[fwdTestNetHeaderLen:], uint16(i))
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- }
-
- for i := 0; i < maxPendingPacketsPerResolution; i++ {
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- b := PayloadSince(p.Pkt.NetworkHeader())
- if b[dstAddrOffset] != 3 {
- t.Fatalf("got b[dstAddrOffset] = %d, want = 3", b[dstAddrOffset])
- }
- if len(b) < fwdTestNetHeaderLen+2 {
- t.Fatalf("packet is too short to hold a sequence number: len(b) = %d", b)
- }
- seqNumBuf := b[fwdTestNetHeaderLen:]
-
- // The first 5 packets should not be forwarded so the sequence number should
- // start with 5.
- want := uint16(i + 5)
- if n := binary.BigEndian.Uint16(seqNumBuf); n != want {
- t.Fatalf("got the packet #%d, want = #%d", n, want)
- }
-
- // Test that the address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
- }
-}
-
-func TestForwardingWithFakeResolverManyResolutions(t *testing.T) {
- proto := fwdTestNetworkProtocol{
- addrResolveDelay: 500 * time.Millisecond,
- onLinkAddressResolved: func(neigh *neighborCache, addr tcpip.Address, linkAddr tcpip.LinkAddress) {
- t.Helper()
- if len(linkAddr) != 0 {
- t.Fatalf("got linkAddr=%q, want unspecified", linkAddr)
- }
- // Any packets will be resolved to the link address "c".
- neigh.handleConfirmation(addr, "c", ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- },
- }
- clock, ep1, ep2 := fwdTestNetFactory(t, &proto)
-
- for i := 0; i < maxPendingResolutions+5; i++ {
- // Inject inbound 'maxPendingResolutions + 5' packets on NIC 1.
- // Each packet has a different destination address (3 to
- // maxPendingResolutions + 7).
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = byte(3 + i)
- ep1.InjectInbound(fwdTestNetNumber, NewPacketBuffer(PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- }
-
- for i := 0; i < maxPendingResolutions; i++ {
- var p fwdTestPacketInfo
-
- clock.Advance(proto.addrResolveDelay)
- select {
- case p = <-ep2.C:
- default:
- t.Fatal("packet not forwarded")
- }
-
- // The first 5 packets (address 3 to 7) should not be forwarded
- // because their address resolutions are interrupted.
- if nh := PayloadSince(p.Pkt.NetworkHeader()); nh[dstAddrOffset] < 8 {
- t.Fatalf("got p.Pkt.NetworkHeader[dstAddrOffset] = %d, want p.Pkt.NetworkHeader[dstAddrOffset] >= 8", nh[dstAddrOffset])
- }
-
- // Test that the address resolution happened correctly.
- if p.RemoteLinkAddress != "c" {
- t.Fatalf("got p.RemoteLinkAddress = %s, want = c", p.RemoteLinkAddress)
- }
- if p.LocalLinkAddress != "b" {
- t.Fatalf("got p.LocalLinkAddress = %s, want = b", p.LocalLinkAddress)
- }
- }
-}
diff --git a/pkg/tcpip/stack/ndp_test.go b/pkg/tcpip/stack/ndp_test.go
deleted file mode 100644
index 4d5431da1..000000000
--- a/pkg/tcpip/stack/ndp_test.go
+++ /dev/null
@@ -1,5569 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack_test
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- cryptorand "gvisor.dev/gvisor/pkg/rand"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-var (
- addr1 = testutil.MustParse6("a00::1")
- addr2 = testutil.MustParse6("a00::2")
- addr3 = testutil.MustParse6("a00::3")
-)
-
-const (
- linkAddr1 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
- linkAddr2 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x07")
- linkAddr3 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x08")
- linkAddr4 = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x09")
-
- defaultPrefixLen = 128
-)
-
-var (
- llAddr1 = header.LinkLocalAddr(linkAddr1)
- llAddr2 = header.LinkLocalAddr(linkAddr2)
- llAddr3 = header.LinkLocalAddr(linkAddr3)
- llAddr4 = header.LinkLocalAddr(linkAddr4)
- dstAddr = tcpip.FullAddress{
- Addr: "\x0a\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- Port: 25,
- }
-)
-
-func addrForSubnet(subnet tcpip.Subnet, linkAddr tcpip.LinkAddress) tcpip.AddressWithPrefix {
- if !header.IsValidUnicastEthernetAddress(linkAddr) {
- return tcpip.AddressWithPrefix{}
- }
-
- addrBytes := []byte(subnet.ID())
- header.EthernetAdddressToModifiedEUI64IntoBuf(linkAddr, addrBytes[header.IIDOffsetInIPv6Address:])
- return tcpip.AddressWithPrefix{
- Address: tcpip.Address(addrBytes),
- PrefixLen: 64,
- }
-}
-
-// prefixSubnetAddr returns a prefix (Address + Length), the prefix's equivalent
-// tcpip.Subnet, and an address where the lower half of the address is composed
-// of the EUI-64 of linkAddr if it is a valid unicast ethernet address.
-func prefixSubnetAddr(offset uint8, linkAddr tcpip.LinkAddress) (tcpip.AddressWithPrefix, tcpip.Subnet, tcpip.AddressWithPrefix) {
- prefixBytes := []byte{1, 2, 3, 4, 5, 6, 7, 8 + offset, 0, 0, 0, 0, 0, 0, 0, 0}
- prefix := tcpip.AddressWithPrefix{
- Address: tcpip.Address(prefixBytes),
- PrefixLen: 64,
- }
-
- subnet := prefix.Subnet()
-
- return prefix, subnet, addrForSubnet(subnet, linkAddr)
-}
-
-// ndpDADEvent is a set of parameters that was passed to
-// ndpDispatcher.OnDuplicateAddressDetectionResult.
-type ndpDADEvent struct {
- nicID tcpip.NICID
- addr tcpip.Address
- res stack.DADResult
-}
-
-type ndpOffLinkRouteEvent struct {
- nicID tcpip.NICID
- subnet tcpip.Subnet
- router tcpip.Address
- prf header.NDPRoutePreference
- // true if route was updated, false if invalidated.
- updated bool
-}
-
-type ndpPrefixEvent struct {
- nicID tcpip.NICID
- prefix tcpip.Subnet
- // true if prefix was discovered, false if invalidated.
- discovered bool
-}
-
-type ndpAutoGenAddrEventType int
-
-const (
- newAddr ndpAutoGenAddrEventType = iota
- deprecatedAddr
- invalidatedAddr
-)
-
-type ndpAutoGenAddrEvent struct {
- nicID tcpip.NICID
- addr tcpip.AddressWithPrefix
- eventType ndpAutoGenAddrEventType
-}
-
-func (e ndpAutoGenAddrEvent) String() string {
- return fmt.Sprintf("%T{nicID=%d addr=%s eventType=%d}", e, e.nicID, e.addr, e.eventType)
-}
-
-type ndpRDNSS struct {
- addrs []tcpip.Address
- lifetime time.Duration
-}
-
-type ndpRDNSSEvent struct {
- nicID tcpip.NICID
- rdnss ndpRDNSS
-}
-
-type ndpDNSSLEvent struct {
- nicID tcpip.NICID
- domainNames []string
- lifetime time.Duration
-}
-
-type ndpDHCPv6Event struct {
- nicID tcpip.NICID
- configuration ipv6.DHCPv6ConfigurationFromNDPRA
-}
-
-var _ ipv6.NDPDispatcher = (*ndpDispatcher)(nil)
-
-// ndpDispatcher implements NDPDispatcher so tests can know when various NDP
-// related events happen for test purposes.
-type ndpDispatcher struct {
- dadC chan ndpDADEvent
- offLinkRouteC chan ndpOffLinkRouteEvent
- prefixC chan ndpPrefixEvent
- autoGenAddrC chan ndpAutoGenAddrEvent
- rdnssC chan ndpRDNSSEvent
- dnsslC chan ndpDNSSLEvent
- routeTable []tcpip.Route
- dhcpv6ConfigurationC chan ndpDHCPv6Event
-}
-
-// Implements ipv6.NDPDispatcher.OnDuplicateAddressDetectionResult.
-func (n *ndpDispatcher) OnDuplicateAddressDetectionResult(nicID tcpip.NICID, addr tcpip.Address, res stack.DADResult) {
- if n.dadC != nil {
- n.dadC <- ndpDADEvent{
- nicID,
- addr,
- res,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnOffLinkRouteUpdated.
-func (n *ndpDispatcher) OnOffLinkRouteUpdated(nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address, prf header.NDPRoutePreference) {
- if c := n.offLinkRouteC; c != nil {
- c <- ndpOffLinkRouteEvent{
- nicID,
- subnet,
- router,
- prf,
- true,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnOffLinkRouteInvalidated.
-func (n *ndpDispatcher) OnOffLinkRouteInvalidated(nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address) {
- if c := n.offLinkRouteC; c != nil {
- var prf header.NDPRoutePreference
- c <- ndpOffLinkRouteEvent{
- nicID,
- subnet,
- router,
- prf,
- false,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnOnLinkPrefixDiscovered.
-func (n *ndpDispatcher) OnOnLinkPrefixDiscovered(nicID tcpip.NICID, prefix tcpip.Subnet) {
- if c := n.prefixC; c != nil {
- c <- ndpPrefixEvent{
- nicID,
- prefix,
- true,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnOnLinkPrefixInvalidated.
-func (n *ndpDispatcher) OnOnLinkPrefixInvalidated(nicID tcpip.NICID, prefix tcpip.Subnet) {
- if c := n.prefixC; c != nil {
- c <- ndpPrefixEvent{
- nicID,
- prefix,
- false,
- }
- }
-}
-
-func (n *ndpDispatcher) OnAutoGenAddress(nicID tcpip.NICID, addr tcpip.AddressWithPrefix) {
- if c := n.autoGenAddrC; c != nil {
- c <- ndpAutoGenAddrEvent{
- nicID,
- addr,
- newAddr,
- }
- }
-}
-
-func (n *ndpDispatcher) OnAutoGenAddressDeprecated(nicID tcpip.NICID, addr tcpip.AddressWithPrefix) {
- if c := n.autoGenAddrC; c != nil {
- c <- ndpAutoGenAddrEvent{
- nicID,
- addr,
- deprecatedAddr,
- }
- }
-}
-
-func (n *ndpDispatcher) OnAutoGenAddressInvalidated(nicID tcpip.NICID, addr tcpip.AddressWithPrefix) {
- if c := n.autoGenAddrC; c != nil {
- c <- ndpAutoGenAddrEvent{
- nicID,
- addr,
- invalidatedAddr,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnRecursiveDNSServerOption.
-func (n *ndpDispatcher) OnRecursiveDNSServerOption(nicID tcpip.NICID, addrs []tcpip.Address, lifetime time.Duration) {
- if c := n.rdnssC; c != nil {
- c <- ndpRDNSSEvent{
- nicID,
- ndpRDNSS{
- addrs,
- lifetime,
- },
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnDNSSearchListOption.
-func (n *ndpDispatcher) OnDNSSearchListOption(nicID tcpip.NICID, domainNames []string, lifetime time.Duration) {
- if n.dnsslC != nil {
- n.dnsslC <- ndpDNSSLEvent{
- nicID,
- domainNames,
- lifetime,
- }
- }
-}
-
-// Implements ipv6.NDPDispatcher.OnDHCPv6Configuration.
-func (n *ndpDispatcher) OnDHCPv6Configuration(nicID tcpip.NICID, configuration ipv6.DHCPv6ConfigurationFromNDPRA) {
- if c := n.dhcpv6ConfigurationC; c != nil {
- c <- ndpDHCPv6Event{
- nicID,
- configuration,
- }
- }
-}
-
-// channelLinkWithHeaderLength is a channel.Endpoint with a configurable
-// header length.
-type channelLinkWithHeaderLength struct {
- *channel.Endpoint
- headerLength uint16
-}
-
-func (l *channelLinkWithHeaderLength) MaxHeaderLength() uint16 {
- return l.headerLength
-}
-
-// Check e to make sure that the event is for addr on nic with ID 1, and the
-// resolved flag set to resolved with the specified err.
-func checkDADEvent(e ndpDADEvent, nicID tcpip.NICID, addr tcpip.Address, res stack.DADResult) string {
- return cmp.Diff(ndpDADEvent{nicID: nicID, addr: addr, res: res}, e, cmp.AllowUnexported(e))
-}
-
-// TestDADDisabled tests that an address successfully resolves immediately
-// when DAD is not enabled (the default for an empty stack.Options).
-func TestDADDisabled(t *testing.T) {
- const nicID = 1
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- })},
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: addr1,
- PrefixLen: defaultPrefixLen,
- }
- if err := s.AddAddressWithPrefix(nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addrWithPrefix, err)
- }
-
- // Should get the address immediately since we should not have performed
- // DAD on it.
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DAD event")
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
- t.Fatal(err)
- }
-
- // We should not have sent any NDP NS messages.
- if got := s.Stats().ICMP.V6.PacketsSent.NeighborSolicit.Value(); got != 0 {
- t.Fatalf("got NeighborSolicit = %d, want = 0", got)
- }
-}
-
-func TestDADResolveLoopback(t *testing.T) {
- const nicID = 1
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
-
- dadConfigs := stack.DADConfigurations{
- RetransmitTimer: time.Second,
- DupAddrDetectTransmits: 1,
- }
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- Clock: clock,
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- DADConfigs: dadConfigs,
- })},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: addr1,
- PrefixLen: defaultPrefixLen,
- }
- if err := s.AddAddressWithPrefix(nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addrWithPrefix, err)
- }
-
- // Address should not be considered bound to the NIC yet (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // DAD should not resolve after the normal resolution time since our DAD
- // message was looped back - we should extend our DAD process.
- dadResolutionTime := time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer
- clock.Advance(dadResolutionTime)
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Error(err)
- }
-
- // Make sure the address does not resolve before the extended resolution time
- // has passed.
- const delta = time.Nanosecond
- // DAD will send extra NS probes if an NS message is looped back.
- const extraTransmits = 3
- clock.Advance(dadResolutionTime*extraTransmits - delta)
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Error(err)
- }
-
- // DAD should now resolve.
- clock.Advance(delta)
- if diff := checkDADEvent(<-ndpDisp.dadC, nicID, addr1, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
-}
-
-// TestDADResolve tests that an address successfully resolves after performing
-// DAD for various values of DupAddrDetectTransmits and RetransmitTimer.
-// Included in the subtests is a test to make sure that an invalid
-// RetransmitTimer (<1ms) values get fixed to the default RetransmitTimer of 1s.
-// This tests also validates the NDP NS packet that is transmitted.
-func TestDADResolve(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- linkHeaderLen uint16
- dupAddrDetectTransmits uint8
- retransTimer time.Duration
- expectedRetransmitTimer time.Duration
- }{
- {
- name: "1:1s:1s",
- dupAddrDetectTransmits: 1,
- retransTimer: time.Second,
- expectedRetransmitTimer: time.Second,
- },
- {
- name: "2:1s:1s",
- linkHeaderLen: 1,
- dupAddrDetectTransmits: 2,
- retransTimer: time.Second,
- expectedRetransmitTimer: time.Second,
- },
- {
- name: "1:2s:2s",
- linkHeaderLen: 2,
- dupAddrDetectTransmits: 1,
- retransTimer: 2 * time.Second,
- expectedRetransmitTimer: 2 * time.Second,
- },
- // 0s is an invalid RetransmitTimer timer and will be fixed to
- // the default RetransmitTimer value of 1s.
- {
- name: "1:0s:1s",
- linkHeaderLen: 3,
- dupAddrDetectTransmits: 1,
- retransTimer: 0,
- expectedRetransmitTimer: time.Second,
- },
- }
-
- nonces := [][]byte{
- {1, 2, 3, 4, 5, 6},
- {7, 8, 9, 10, 11, 12},
- }
-
- var secureRNGBytes []byte
- for _, n := range nonces {
- secureRNGBytes = append(secureRNGBytes, n...)
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
-
- e := channelLinkWithHeaderLength{
- Endpoint: channel.New(int(test.dupAddrDetectTransmits), 1280, linkAddr1),
- headerLength: test.linkHeaderLen,
- }
- e.Endpoint.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- var secureRNG bytes.Reader
- secureRNG.Reset(secureRNGBytes)
-
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- Clock: clock,
- RandSource: rand.NewSource(time.Now().UnixNano()),
- SecureRNG: &secureRNG,
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- DADConfigs: stack.DADConfigurations{
- RetransmitTimer: test.retransTimer,
- DupAddrDetectTransmits: test.dupAddrDetectTransmits,
- },
- })},
- })
- if err := s.CreateNIC(nicID, &e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // We add a default route so the call to FindRoute below will succeed
- // once we have an assigned address.
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- Gateway: addr3,
- NIC: nicID,
- }})
-
- addrWithPrefix := tcpip.AddressWithPrefix{
- Address: addr1,
- PrefixLen: defaultPrefixLen,
- }
- if err := s.AddAddressWithPrefix(nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addrWithPrefix, err)
- }
-
- // Make sure the address does not resolve before the resolution time has
- // passed.
- const delta = time.Nanosecond
- clock.Advance(test.expectedRetransmitTimer*time.Duration(test.dupAddrDetectTransmits) - delta)
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Error(err)
- }
- // Should not get a route even if we specify the local address as the
- // tentative address.
- {
- r, err := s.FindRoute(nicID, "", addr2, header.IPv6ProtocolNumber, false)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Errorf("got FindRoute(%d, '', %s, %d, false) = (%+v, %v), want = (_, %s)", nicID, addr2, header.IPv6ProtocolNumber, r, err, &tcpip.ErrNoRoute{})
- }
- if r != nil {
- r.Release()
- }
- }
- {
- r, err := s.FindRoute(nicID, addr1, addr2, header.IPv6ProtocolNumber, false)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Errorf("got FindRoute(%d, %s, %s, %d, false) = (%+v, %v), want = (_, %s)", nicID, addr1, addr2, header.IPv6ProtocolNumber, r, err, &tcpip.ErrNoRoute{})
- }
- if r != nil {
- r.Release()
- }
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- // Wait for DAD to resolve.
- clock.Advance(delta)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatalf("expected DAD event for %s on NIC(%d)", addr1, nicID)
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
- t.Error(err)
- }
- // Should get a route using the address now that it is resolved.
- {
- r, err := s.FindRoute(nicID, "", addr2, header.IPv6ProtocolNumber, false)
- if err != nil {
- t.Errorf("got FindRoute(%d, '', %s, %d, false): %s", nicID, addr2, header.IPv6ProtocolNumber, err)
- } else if r.LocalAddress() != addr1 {
- t.Errorf("got r.LocalAddress() = %s, want = %s", r.LocalAddress(), addr1)
- }
- r.Release()
- }
- {
- r, err := s.FindRoute(nicID, addr1, addr2, header.IPv6ProtocolNumber, false)
- if err != nil {
- t.Errorf("got FindRoute(%d, %s, %s, %d, false): %s", nicID, addr1, addr2, header.IPv6ProtocolNumber, err)
- } else if r.LocalAddress() != addr1 {
- t.Errorf("got r.LocalAddress() = %s, want = %s", r.LocalAddress(), addr1)
- }
- if r != nil {
- r.Release()
- }
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- // Should not have sent any more NS messages.
- if got := s.Stats().ICMP.V6.PacketsSent.NeighborSolicit.Value(); got != uint64(test.dupAddrDetectTransmits) {
- t.Fatalf("got NeighborSolicit = %d, want = %d", got, test.dupAddrDetectTransmits)
- }
-
- // Validate the sent Neighbor Solicitation messages.
- for i := uint8(0); i < test.dupAddrDetectTransmits; i++ {
- p, ok := e.Read()
- if !ok {
- t.Fatal("packet didn't arrive")
- }
-
- // Make sure its an IPv6 packet.
- if p.Proto != header.IPv6ProtocolNumber {
- t.Fatalf("got Proto = %d, want = %d", p.Proto, header.IPv6ProtocolNumber)
- }
-
- // Make sure the right remote link address is used.
- snmc := header.SolicitedNodeAddr(addr1)
- if want := header.EthernetAddressFromMulticastIPv6Address(snmc); p.Route.RemoteLinkAddress != want {
- t.Errorf("got remote link address = %s, want = %s", p.Route.RemoteLinkAddress, want)
- }
-
- // Check NDP NS packet.
- //
- // As per RFC 4861 section 4.3, a possible option is the Source Link
- // Layer option, but this option MUST NOT be included when the source
- // address of the packet is the unspecified address.
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(header.IPv6Any),
- checker.DstAddr(snmc),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(addr1),
- checker.NDPNSOptions([]header.NDPOption{header.NDPNonceOption(nonces[i])}),
- ))
-
- if l, want := p.Pkt.AvailableHeaderBytes(), int(test.linkHeaderLen); l != want {
- t.Errorf("got p.Pkt.AvailableHeaderBytes() = %d; want = %d", l, want)
- }
- }
- })
- }
-}
-
-func rxNDPSolicit(e *channel.Endpoint, tgt tcpip.Address) {
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6NeighborSolicitMinimumSize)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6NeighborSolicitMinimumSize))
- pkt.SetType(header.ICMPv6NeighborSolicit)
- ns := header.NDPNeighborSolicit(pkt.MessageBody())
- ns.SetTargetAddress(tgt)
- snmc := header.SolicitedNodeAddr(tgt)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: header.IPv6Any,
- Dst: snmc,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: 255,
- SrcAddr: header.IPv6Any,
- DstAddr: snmc,
- })
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{Data: hdr.View().ToVectorisedView()}))
-}
-
-// TestDADFail tests to make sure that the DAD process fails if another node is
-// detected to be performing DAD on the same address (receive an NS message from
-// a node doing DAD for the same address), or if another node is detected to own
-// the address already (receive an NA message for the tentative address).
-func TestDADFail(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- rxPkt func(e *channel.Endpoint, tgt tcpip.Address)
- getStat func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
- expectedHolderLinkAddress tcpip.LinkAddress
- }{
- {
- name: "RxSolicit",
- rxPkt: rxNDPSolicit,
- getStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return s.NeighborSolicit
- },
- expectedHolderLinkAddress: "",
- },
- {
- name: "RxAdvert",
- rxPkt: func(e *channel.Endpoint, tgt tcpip.Address) {
- naSize := header.ICMPv6NeighborAdvertMinimumSize + header.NDPLinkLayerAddressSize
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + naSize)
- pkt := header.ICMPv6(hdr.Prepend(naSize))
- pkt.SetType(header.ICMPv6NeighborAdvert)
- na := header.NDPNeighborAdvert(pkt.MessageBody())
- na.SetSolicitedFlag(true)
- na.SetOverrideFlag(true)
- na.SetTargetAddress(tgt)
- na.Options().Serialize(header.NDPOptionsSerializer{
- header.NDPTargetLinkLayerAddressOption(linkAddr1),
- })
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: tgt,
- Dst: header.IPv6AllNodesMulticastAddress,
- }))
- payloadLength := hdr.UsedLength()
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: 255,
- SrcAddr: tgt,
- DstAddr: header.IPv6AllNodesMulticastAddress,
- })
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{Data: hdr.View().ToVectorisedView()}))
- },
- getStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
- return s.NeighborAdvert
- },
- expectedHolderLinkAddress: linkAddr1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
- dadConfigs := stack.DefaultDADConfigurations()
- dadConfigs.RetransmitTimer = time.Second * 2
-
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- DADConfigs: dadConfigs,
- })},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addr1, err)
- }
-
- // Address should not be considered bound to the NIC yet
- // (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Receive a packet to simulate an address conflict.
- test.rxPkt(e, addr1)
-
- stat := test.getStat(s.Stats().ICMP.V6.PacketsReceived)
- if got := stat.Value(); got != 1 {
- t.Fatalf("got stat = %d, want = 1", got)
- }
-
- // Wait for DAD to fail and make sure the address did
- // not get resolved.
- clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, &stack.DADDupAddrDetected{HolderLinkAddress: test.expectedHolderLinkAddress}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- // If we don't get a failure event after the
- // expected resolution time + extra 1s buffer,
- // something is wrong.
- t.Fatal("timed out waiting for DAD failure")
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Attempting to add the address again should not fail if the address's
- // state was cleaned up when DAD failed.
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addr1, err)
- }
- })
- }
-}
-
-func TestDADStop(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- stopFn func(t *testing.T, s *stack.Stack)
- skipFinalAddrCheck bool
- }{
- // Tests to make sure that DAD stops when an address is removed.
- {
- name: "Remove address",
- stopFn: func(t *testing.T, s *stack.Stack) {
- if err := s.RemoveAddress(nicID, addr1); err != nil {
- t.Fatalf("RemoveAddress(%d, %s): %s", nicID, addr1, err)
- }
- },
- },
-
- // Tests to make sure that DAD stops when the NIC is disabled.
- {
- name: "Disable NIC",
- stopFn: func(t *testing.T, s *stack.Stack) {
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("DisableNIC(%d): %s", nicID, err)
- }
- },
- },
-
- // Tests to make sure that DAD stops when the NIC is removed.
- {
- name: "Remove NIC",
- stopFn: func(t *testing.T, s *stack.Stack) {
- if err := s.RemoveNIC(nicID); err != nil {
- t.Fatalf("RemoveNIC(%d): %s", nicID, err)
- }
- },
- // The NIC is removed so we can't check its addresses after calling
- // stopFn.
- skipFinalAddrCheck: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
-
- dadConfigs := stack.DADConfigurations{
- RetransmitTimer: time.Second,
- DupAddrDetectTransmits: 2,
- }
-
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- DADConfigs: dadConfigs,
- })},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID, header.IPv6ProtocolNumber, addr1, err)
- }
-
- // Address should not be considered bound to the NIC yet (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- test.stopFn(t, s)
-
- // Wait for DAD to fail (since the address was removed during DAD).
- clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, &stack.DADAborted{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- // If we don't get a failure event after the expected resolution
- // time + extra 1s buffer, something is wrong.
- t.Fatal("timed out waiting for DAD failure")
- }
-
- if !test.skipFinalAddrCheck {
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
- }
-
- // Should not have sent more than 1 NS message.
- if got := s.Stats().ICMP.V6.PacketsSent.NeighborSolicit.Value(); got > 1 {
- t.Errorf("got NeighborSolicit = %d, want <= 1", got)
- }
- })
- }
-}
-
-// TestSetNDPConfigurations tests that we can update and use per-interface NDP
-// configurations without affecting the default NDP configurations or other
-// interfaces' configurations.
-func TestSetNDPConfigurations(t *testing.T) {
- const nicID1 = 1
- const nicID2 = 2
- const nicID3 = 3
-
- tests := []struct {
- name string
- dupAddrDetectTransmits uint8
- retransmitTimer time.Duration
- expectedRetransmitTimer time.Duration
- }{
- {
- "OK",
- 1,
- time.Second,
- time.Second,
- },
- {
- "Invalid Retransmit Timer",
- 1,
- 0,
- time.Second,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- expectDADEvent := func(nicID tcpip.NICID, addr tcpip.Address) {
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatalf("expected DAD event for %s", addr)
- }
- }
-
- // This NIC(1)'s NDP configurations will be updated to
- // be different from the default.
- if err := s.CreateNIC(nicID1, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID1, err)
- }
-
- // Created before updating NIC(1)'s NDP configurations
- // but updating NIC(1)'s NDP configurations should not
- // affect other existing NICs.
- if err := s.CreateNIC(nicID2, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID2, err)
- }
-
- // Update the configurations on NIC(1) to use DAD.
- if ipv6Ep, err := s.GetNetworkEndpoint(nicID1, header.IPv6ProtocolNumber); err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID1, header.IPv6ProtocolNumber, err)
- } else {
- dad := ipv6Ep.(stack.DuplicateAddressDetector)
- dad.SetDADConfigurations(stack.DADConfigurations{
- DupAddrDetectTransmits: test.dupAddrDetectTransmits,
- RetransmitTimer: test.retransmitTimer,
- })
- }
-
- // Created after updating NIC(1)'s NDP configurations
- // but the stack's default NDP configurations should not
- // have been updated.
- if err := s.CreateNIC(nicID3, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID3, err)
- }
-
- // Add addresses for each NIC.
- addrWithPrefix1 := tcpip.AddressWithPrefix{Address: addr1, PrefixLen: defaultPrefixLen}
- if err := s.AddAddressWithPrefix(nicID1, header.IPv6ProtocolNumber, addrWithPrefix1); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID1, header.IPv6ProtocolNumber, addrWithPrefix1, err)
- }
- addrWithPrefix2 := tcpip.AddressWithPrefix{Address: addr2, PrefixLen: defaultPrefixLen}
- if err := s.AddAddressWithPrefix(nicID2, header.IPv6ProtocolNumber, addrWithPrefix2); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID2, header.IPv6ProtocolNumber, addrWithPrefix2, err)
- }
- expectDADEvent(nicID2, addr2)
- addrWithPrefix3 := tcpip.AddressWithPrefix{Address: addr3, PrefixLen: defaultPrefixLen}
- if err := s.AddAddressWithPrefix(nicID3, header.IPv6ProtocolNumber, addrWithPrefix3); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s) = %s", nicID3, header.IPv6ProtocolNumber, addrWithPrefix3, err)
- }
- expectDADEvent(nicID3, addr3)
-
- // Address should not be considered bound to NIC(1) yet
- // (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID1, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Should get the address on NIC(2) and NIC(3)
- // immediately since we should not have performed DAD on
- // it as the stack was configured to not do DAD by
- // default and we only updated the NDP configurations on
- // NIC(1).
- if err := checkGetMainNICAddress(s, nicID2, header.IPv6ProtocolNumber, addrWithPrefix2); err != nil {
- t.Fatal(err)
- }
- if err := checkGetMainNICAddress(s, nicID3, header.IPv6ProtocolNumber, addrWithPrefix3); err != nil {
- t.Fatal(err)
- }
-
- // Sleep until right before resolution to make sure the address didn't
- // resolve on NIC(1) yet.
- const delta = 1
- clock.Advance(time.Duration(test.dupAddrDetectTransmits)*test.expectedRetransmitTimer - delta)
- if err := checkGetMainNICAddress(s, nicID1, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Wait for DAD to resolve.
- clock.Advance(delta)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID1, addr1, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD resolution")
- }
- if err := checkGetMainNICAddress(s, nicID1, header.IPv6ProtocolNumber, addrWithPrefix1); err != nil {
- t.Fatal(err)
- }
- })
- }
-}
-
-// raBuf returns a valid NDP Router Advertisement with options, router
-// preference and DHCPv6 configurations specified.
-func raBuf(ip tcpip.Address, rl uint16, managedAddress, otherConfigurations bool, prf header.NDPRoutePreference, optSer header.NDPOptionsSerializer) *stack.PacketBuffer {
- const flagsByte = 1
- const routerLifetimeOffset = 2
-
- icmpSize := header.ICMPv6HeaderSize + header.NDPRAMinimumSize + optSer.Length()
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + icmpSize)
- pkt := header.ICMPv6(hdr.Prepend(icmpSize))
- pkt.SetType(header.ICMPv6RouterAdvert)
- pkt.SetCode(0)
- raPayload := pkt.MessageBody()
- ra := header.NDPRouterAdvert(raPayload)
- // Populate the Router Lifetime.
- binary.BigEndian.PutUint16(raPayload[routerLifetimeOffset:], rl)
- // Populate the Managed Address flag field.
- if managedAddress {
- // The Managed Addresses flag field is the 7th bit of the flags byte.
- raPayload[flagsByte] |= 1 << 7
- }
- // Populate the Other Configurations flag field.
- if otherConfigurations {
- // The Other Configurations flag field is the 6th bit of the flags byte.
- raPayload[flagsByte] |= 1 << 6
- }
- // The Prf field is held in the flags byte.
- raPayload[flagsByte] |= byte(prf) << 3
- opts := ra.Options()
- opts.Serialize(optSer)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: ip,
- Dst: header.IPv6AllNodesMulticastAddress,
- }))
- payloadLength := hdr.UsedLength()
- iph := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- iph.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLength),
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: header.NDPHopLimit,
- SrcAddr: ip,
- DstAddr: header.IPv6AllNodesMulticastAddress,
- })
-
- return stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- })
-}
-
-// raBufWithOpts returns a valid NDP Router Advertisement with options.
-//
-// Note, raBufWithOpts does not populate any of the RA fields other than the
-// Router Lifetime.
-func raBufWithOpts(ip tcpip.Address, rl uint16, optSer header.NDPOptionsSerializer) *stack.PacketBuffer {
- return raBuf(ip, rl, false /* managedAddress */, false /* otherConfigurations */, 0 /* prf */, optSer)
-}
-
-// raBufWithDHCPv6 returns a valid NDP Router Advertisement with DHCPv6 related
-// fields set.
-//
-// Note, raBufWithDHCPv6 does not populate any of the RA fields other than the
-// DHCPv6 related ones.
-func raBufWithDHCPv6(ip tcpip.Address, managedAddresses, otherConfigurations bool) *stack.PacketBuffer {
- return raBuf(ip, 0, managedAddresses, otherConfigurations, 0 /* prf */, header.NDPOptionsSerializer{})
-}
-
-// raBuf returns a valid NDP Router Advertisement.
-//
-// Note, raBuf does not populate any of the RA fields other than the
-// Router Lifetime.
-func raBufSimple(ip tcpip.Address, rl uint16) *stack.PacketBuffer {
- return raBufWithOpts(ip, rl, header.NDPOptionsSerializer{})
-}
-
-// raBufWithPrf returns a valid NDP Router Advertisement with a preference.
-//
-// Note, raBufWithPrf does not populate any of the RA fields other than the
-// Router Lifetime and Default Router Preference fields.
-func raBufWithPrf(ip tcpip.Address, rl uint16, prf header.NDPRoutePreference) *stack.PacketBuffer {
- return raBuf(ip, rl, false /* managedAddress */, false /* otherConfigurations */, prf, header.NDPOptionsSerializer{})
-}
-
-// raBufWithPI returns a valid NDP Router Advertisement with a single Prefix
-// Information option.
-//
-// Note, raBufWithPI does not populate any of the RA fields other than the
-// Router Lifetime.
-func raBufWithPI(ip tcpip.Address, rl uint16, prefix tcpip.AddressWithPrefix, onLink, auto bool, vl, pl uint32) *stack.PacketBuffer {
- flags := uint8(0)
- if onLink {
- // The OnLink flag is the 7th bit in the flags byte.
- flags |= 1 << 7
- }
- if auto {
- // The Address Auto-Configuration flag is the 6th bit in the
- // flags byte.
- flags |= 1 << 6
- }
-
- // A valid header.NDPPrefixInformation must be 30 bytes.
- buf := [30]byte{}
- // The first byte in a header.NDPPrefixInformation is the Prefix Length
- // field.
- buf[0] = uint8(prefix.PrefixLen)
- // The 2nd byte within a header.NDPPrefixInformation is the Flags field.
- buf[1] = flags
- // The Valid Lifetime field starts after the 2nd byte within a
- // header.NDPPrefixInformation.
- binary.BigEndian.PutUint32(buf[2:], vl)
- // The Preferred Lifetime field starts after the 6th byte within a
- // header.NDPPrefixInformation.
- binary.BigEndian.PutUint32(buf[6:], pl)
- // The Prefix Address field starts after the 14th byte within a
- // header.NDPPrefixInformation.
- copy(buf[14:], prefix.Address)
- return raBufWithOpts(ip, rl, header.NDPOptionsSerializer{
- header.NDPPrefixInformation(buf[:]),
- })
-}
-
-// raBufWithRIO returns a valid NDP Router Advertisement with a single Route
-// Information option.
-//
-// All fields in the RA will be zero except the RIO option.
-func raBufWithRIO(t *testing.T, ip tcpip.Address, prefix tcpip.AddressWithPrefix, lifetimeSeconds uint32, prf header.NDPRoutePreference) *stack.PacketBuffer {
- // buf will hold the route information option after the Type and Length
- // fields.
- //
- // 2.3. Route Information Option
- //
- // 0 1 2 3
- // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | Type | Length | Prefix Length |Resvd|Prf|Resvd|
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | Route Lifetime |
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | Prefix (Variable Length) |
- // . .
- // . .
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- var buf [22]byte
- buf[0] = uint8(prefix.PrefixLen)
- buf[1] = byte(prf) << 3
- binary.BigEndian.PutUint32(buf[2:], lifetimeSeconds)
- if n := copy(buf[6:], prefix.Address); n != len(prefix.Address) {
- t.Fatalf("got copy(...) = %d, want = %d", n, len(prefix.Address))
- }
- return raBufWithOpts(ip, 0 /* router lifetime */, header.NDPOptionsSerializer{
- header.NDPRouteInformation(buf[:]),
- })
-}
-
-func TestDynamicConfigurationsDisabled(t *testing.T) {
- const (
- nicID = 1
- maxRtrSolicitDelay = time.Second
- )
-
- prefix := tcpip.AddressWithPrefix{
- Address: testutil.MustParse6("102:304:506:708::"),
- PrefixLen: 64,
- }
-
- tests := []struct {
- name string
- config func(bool) ipv6.NDPConfigurations
- ra *stack.PacketBuffer
- }{
- {
- name: "No Router Discovery",
- config: func(enable bool) ipv6.NDPConfigurations {
- return ipv6.NDPConfigurations{DiscoverDefaultRouters: enable}
- },
- ra: raBufSimple(llAddr2, 1000),
- },
- {
- name: "No Prefix Discovery",
- config: func(enable bool) ipv6.NDPConfigurations {
- return ipv6.NDPConfigurations{DiscoverOnLinkPrefixes: enable}
- },
- ra: raBufWithPI(llAddr2, 0, prefix, true, false, 10, 0),
- },
- {
- name: "No Autogenerate Addresses",
- config: func(enable bool) ipv6.NDPConfigurations {
- return ipv6.NDPConfigurations{AutoGenGlobalAddresses: enable}
- },
- ra: raBufWithPI(llAddr2, 0, prefix, false, true, 10, 0),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- // Being configured to discover routers/prefixes or auto-generate
- // addresses means RAs must be handled, and router/prefix discovery or
- // SLAAC must be enabled.
- //
- // This tests all possible combinations of the configurations where
- // router/prefix discovery or SLAAC are disabled.
- for i := 0; i < 7; i++ {
- handle := ipv6.HandlingRAsDisabled
- if i&1 != 0 {
- handle = ipv6.HandlingRAsEnabledWhenForwardingDisabled
- }
- enable := i&2 != 0
- forwarding := i&4 == 0
-
- t.Run(fmt.Sprintf("HandleRAs(%s), Forwarding(%t), Enabled(%t)", handle, forwarding, enable), func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),
- prefixC: make(chan ndpPrefixEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- ndpConfigs := test.config(enable)
- ndpConfigs.HandleRAs = handle
- ndpConfigs.MaxRtrSolicitations = 1
- ndpConfigs.RtrSolicitationInterval = maxRtrSolicitDelay
- ndpConfigs.MaxRtrSolicitationDelay = maxRtrSolicitDelay
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- Clock: clock,
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ndpConfigs,
- NDPDisp: &ndpDisp,
- })},
- })
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, forwarding); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", ipv6.ProtocolNumber, forwarding, err)
- }
-
- e := channel.New(1, 1280, linkAddr1)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- handleRAsDisabled := handle == ipv6.HandlingRAsDisabled || forwarding
- ep, err := s.GetNetworkEndpoint(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, ipv6.ProtocolNumber, err)
- }
- stats := ep.Stats()
- v6Stats, ok := stats.(*ipv6.Stats)
- if !ok {
- t.Fatalf("got v6Stats = %T, expected = %T", stats, v6Stats)
- }
-
- // Make sure that when handling RAs are enabled, we solicit routers.
- clock.Advance(maxRtrSolicitDelay)
- if got, want := v6Stats.ICMP.PacketsSent.RouterSolicit.Value(), boolToUint64(!handleRAsDisabled); got != want {
- t.Errorf("got v6Stats.ICMP.PacketsSent.RouterSolicit.Value() = %d, want = %d", got, want)
- }
- if handleRAsDisabled {
- if p, ok := e.Read(); ok {
- t.Errorf("unexpectedly got a packet = %#v", p)
- }
- } else if p, ok := e.Read(); !ok {
- t.Error("expected router solicitation packet")
- } else if p.Proto != header.IPv6ProtocolNumber {
- t.Errorf("got Proto = %d, want = %d", p.Proto, header.IPv6ProtocolNumber)
- } else {
- if want := header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersLinkLocalMulticastAddress); p.Route.RemoteLinkAddress != want {
- t.Errorf("got remote link address = %s, want = %s", p.Route.RemoteLinkAddress, want)
- }
-
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(header.IPv6Any),
- checker.DstAddr(header.IPv6AllRoutersLinkLocalMulticastAddress),
- checker.TTL(header.NDPHopLimit),
- checker.NDPRS(checker.NDPRSOptions(nil)),
- )
- }
-
- // Make sure we do not discover any routers or prefixes, or perform
- // SLAAC on reception of an RA.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra.Clone())
- // Make sure that the unhandled RA stat is only incremented when
- // handling RAs is disabled.
- if got, want := v6Stats.UnhandledRouterAdvertisements.Value(), boolToUint64(handleRAsDisabled); got != want {
- t.Errorf("got v6Stats.UnhandledRouterAdvertisements.Value() = %d, want = %d", got, want)
- }
- select {
- case e := <-ndpDisp.offLinkRouteC:
- t.Errorf("unexpectedly updated an off-link route when configured not to: %#v", e)
- default:
- }
- select {
- case e := <-ndpDisp.prefixC:
- t.Errorf("unexpectedly discovered a prefix when configured not to: %#v", e)
- default:
- }
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Errorf("unexpectedly auto-generated an address when configured not to: %#v", e)
- default:
- }
- })
- }
- })
- }
-}
-
-func boolToUint64(v bool) uint64 {
- if v {
- return 1
- }
- return 0
-}
-
-func checkOffLinkRouteEvent(e ndpOffLinkRouteEvent, nicID tcpip.NICID, subnet tcpip.Subnet, router tcpip.Address, prf header.NDPRoutePreference, updated bool) string {
- return cmp.Diff(ndpOffLinkRouteEvent{nicID: nicID, subnet: subnet, router: router, prf: prf, updated: updated}, e, cmp.AllowUnexported(e))
-}
-
-func testWithRAs(t *testing.T, f func(*testing.T, ipv6.HandleRAsConfiguration, bool)) {
- tests := [...]struct {
- name string
- handleRAs ipv6.HandleRAsConfiguration
- forwarding bool
- }{
- {
- name: "Handle RAs when forwarding disabled",
- handleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- forwarding: false,
- },
- {
- name: "Always Handle RAs with forwarding disabled",
- handleRAs: ipv6.HandlingRAsAlwaysEnabled,
- forwarding: false,
- },
- {
- name: "Always Handle RAs with forwarding enabled",
- handleRAs: ipv6.HandlingRAsAlwaysEnabled,
- forwarding: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- f(t, test.handleRAs, test.forwarding)
- })
- }
-}
-
-func TestOffLinkRouteDiscovery(t *testing.T) {
- const nicID = 1
-
- moreSpecificPrefix := tcpip.AddressWithPrefix{Address: testutil.MustParse6("a00::"), PrefixLen: 16}
- tests := []struct {
- name string
-
- discoverDefaultRouters bool
- discoverMoreSpecificRoutes bool
-
- dest tcpip.Subnet
- ra func(*testing.T, tcpip.Address, uint16, header.NDPRoutePreference) *stack.PacketBuffer
- }{
- {
- name: "Default router discovery",
- discoverDefaultRouters: true,
- discoverMoreSpecificRoutes: false,
- dest: header.IPv6EmptySubnet,
- ra: func(_ *testing.T, router tcpip.Address, lifetimeSeconds uint16, prf header.NDPRoutePreference) *stack.PacketBuffer {
- return raBufWithPrf(router, lifetimeSeconds, prf)
- },
- },
- {
- name: "More-specific route discovery",
- discoverDefaultRouters: false,
- discoverMoreSpecificRoutes: true,
- dest: moreSpecificPrefix.Subnet(),
- ra: func(t *testing.T, router tcpip.Address, lifetimeSeconds uint16, prf header.NDPRoutePreference) *stack.PacketBuffer {
- return raBufWithRIO(t, router, moreSpecificPrefix, uint32(lifetimeSeconds), prf)
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- testWithRAs(t, func(t *testing.T, handleRAs ipv6.HandleRAsConfiguration, forwarding bool) {
- ndpDisp := ndpDispatcher{
- offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: handleRAs,
- DiscoverDefaultRouters: test.discoverDefaultRouters,
- DiscoverMoreSpecificRoutes: test.discoverMoreSpecificRoutes,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- expectOffLinkRouteEvent := func(addr tcpip.Address, prf header.NDPRoutePreference, updated bool) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.offLinkRouteC:
- if diff := checkOffLinkRouteEvent(e, nicID, test.dest, addr, prf, updated); diff != "" {
- t.Errorf("off-link route event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected router discovery event")
- }
- }
-
- expectAsyncOffLinkRouteInvalidationEvent := func(addr tcpip.Address, timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- select {
- case e := <-ndpDisp.offLinkRouteC:
- var prf header.NDPRoutePreference
- if diff := checkOffLinkRouteEvent(e, nicID, test.dest, addr, prf, false); diff != "" {
- t.Errorf("off-link route event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for router discovery event")
- }
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, forwarding); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", ipv6.ProtocolNumber, forwarding, err)
- }
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- // Rx an RA from lladdr2 with zero lifetime. It should not be
- // remembered.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 0, header.MediumRoutePreference))
- select {
- case <-ndpDisp.offLinkRouteC:
- t.Fatal("unexpectedly updated an off-link route with 0 lifetime")
- default:
- }
-
- // Discover an off-link route through llAddr2.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.ReservedRoutePreference))
- if test.discoverMoreSpecificRoutes {
- // The reserved value is considered invalid with more-specific route
- // discovery so we inject the same packet but with the default
- // (medium) preference value.
- select {
- case <-ndpDisp.offLinkRouteC:
- t.Fatal("unexpectedly updated an off-link route with a reserved preference value")
- default:
- }
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.MediumRoutePreference))
- }
- expectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, true)
-
- // Rx an RA from another router (lladdr3) with non-zero lifetime and
- // non-default preference value.
- const l3LifetimeSeconds = 6
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr3, l3LifetimeSeconds, header.HighRoutePreference))
- expectOffLinkRouteEvent(llAddr3, header.HighRoutePreference, true)
-
- // Rx an RA from lladdr2 with lesser lifetime and default (medium)
- // preference value.
- const l2LifetimeSeconds = 2
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, l2LifetimeSeconds, header.MediumRoutePreference))
- select {
- case <-ndpDisp.offLinkRouteC:
- t.Fatal("should not receive a off-link route event when updating lifetimes for known routers")
- default:
- }
-
- // Rx an RA from lladdr2 with a different preference.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, l2LifetimeSeconds, header.LowRoutePreference))
- expectOffLinkRouteEvent(llAddr2, header.LowRoutePreference, true)
-
- // Wait for lladdr2's router invalidation job to execute. The lifetime
- // of the router should have been updated to the most recent (smaller)
- // lifetime.
- //
- // Wait for the normal lifetime plus an extra bit for the
- // router to get invalidated. If we don't get an invalidation
- // event after this time, then something is wrong.
- expectAsyncOffLinkRouteInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second)
-
- // Rx an RA from lladdr2 with huge lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 1000, header.MediumRoutePreference))
- expectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, true)
-
- // Rx an RA from lladdr2 with zero lifetime. It should be invalidated.
- e.InjectInbound(header.IPv6ProtocolNumber, test.ra(t, llAddr2, 0, header.MediumRoutePreference))
- expectOffLinkRouteEvent(llAddr2, header.MediumRoutePreference, false)
-
- // Wait for lladdr3's router invalidation job to execute. The lifetime
- // of the router should have been updated to the most recent (smaller)
- // lifetime.
- //
- // Wait for the normal lifetime plus an extra bit for the
- // router to get invalidated. If we don't get an invalidation
- // event after this time, then something is wrong.
- expectAsyncOffLinkRouteInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second)
- })
- })
- }
-}
-
-// TestRouterDiscoveryMaxRouters tests that only
-// ipv6.MaxDiscoveredOffLinkRoutes discovered routers are remembered.
-func TestRouterDiscoveryMaxRouters(t *testing.T) {
- const nicID = 1
-
- ndpDisp := ndpDispatcher{
- offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- DiscoverDefaultRouters: true,
- },
- NDPDisp: &ndpDisp,
- })},
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- // Receive an RA from 2 more than the max number of discovered routers.
- for i := 1; i <= ipv6.MaxDiscoveredOffLinkRoutes+2; i++ {
- linkAddr := []byte{2, 2, 3, 4, 5, 0}
- linkAddr[5] = byte(i)
- llAddr := header.LinkLocalAddr(tcpip.LinkAddress(linkAddr))
-
- e.InjectInbound(header.IPv6ProtocolNumber, raBufSimple(llAddr, 5))
-
- if i <= ipv6.MaxDiscoveredOffLinkRoutes {
- select {
- case e := <-ndpDisp.offLinkRouteC:
- if diff := checkOffLinkRouteEvent(e, nicID, header.IPv6EmptySubnet, llAddr, header.MediumRoutePreference, true); diff != "" {
- t.Errorf("off-link route event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected router discovery event")
- }
-
- } else {
- select {
- case <-ndpDisp.offLinkRouteC:
- t.Fatal("should not have discovered a new router after we already discovered the max number of routers")
- default:
- }
- }
- }
-}
-
-// Check e to make sure that the event is for prefix on nic with ID 1, and the
-// discovered flag set to discovered.
-func checkPrefixEvent(e ndpPrefixEvent, prefix tcpip.Subnet, discovered bool) string {
- return cmp.Diff(ndpPrefixEvent{nicID: 1, prefix: prefix, discovered: discovered}, e, cmp.AllowUnexported(e))
-}
-
-func TestPrefixDiscovery(t *testing.T) {
- prefix1, subnet1, _ := prefixSubnetAddr(0, "")
- prefix2, subnet2, _ := prefixSubnetAddr(1, "")
- prefix3, subnet3, _ := prefixSubnetAddr(2, "")
-
- testWithRAs(t, func(t *testing.T, handleRAs ipv6.HandleRAsConfiguration, forwarding bool) {
- ndpDisp := ndpDispatcher{
- prefixC: make(chan ndpPrefixEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: handleRAs,
- DiscoverOnLinkPrefixes: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- expectPrefixEvent := func(prefix tcpip.Subnet, discovered bool) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, prefix, discovered); diff != "" {
- t.Errorf("prefix event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected prefix discovery event")
- }
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, forwarding); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", ipv6.ProtocolNumber, forwarding, err)
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with zero valid lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, false, 0, 0))
- select {
- case <-ndpDisp.prefixC:
- t.Fatal("unexpectedly discovered a prefix with 0 lifetime")
- default:
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, false, 100, 0))
- expectPrefixEvent(subnet1, true)
-
- // Receive an RA with prefix2 in a PI.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, false, 100, 0))
- expectPrefixEvent(subnet2, true)
-
- // Receive an RA with prefix3 in a PI.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix3, true, false, 100, 0))
- expectPrefixEvent(subnet3, true)
-
- // Receive an RA with prefix1 in a PI with lifetime = 0.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, false, 0, 0))
- expectPrefixEvent(subnet1, false)
-
- // Receive an RA with prefix2 in a PI with lesser lifetime.
- lifetime := uint32(2)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, false, lifetime, 0))
- select {
- case <-ndpDisp.prefixC:
- t.Fatal("unexpectedly received prefix event when updating lifetime")
- default:
- }
-
- // Wait for prefix2's most recent invalidation job plus some buffer to
- // expire.
- clock.Advance(time.Duration(lifetime) * time.Second)
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, subnet2, false); diff != "" {
- t.Errorf("prefix event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for prefix discovery event")
- }
-
- // Receive RA to invalidate prefix3.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix3, true, false, 0, 0))
- expectPrefixEvent(subnet3, false)
- })
-}
-
-func TestPrefixDiscoveryWithInfiniteLifetime(t *testing.T) {
- prefix := tcpip.AddressWithPrefix{
- Address: testutil.MustParse6("102:304:506:708::"),
- PrefixLen: 64,
- }
- subnet := prefix.Subnet()
-
- ndpDisp := ndpDispatcher{
- prefixC: make(chan ndpPrefixEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- DiscoverOnLinkPrefixes: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- expectPrefixEvent := func(prefix tcpip.Subnet, discovered bool) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, prefix, discovered); diff != "" {
- t.Errorf("prefix event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected prefix discovery event")
- }
- }
-
- // Receive an RA with prefix in an NDP Prefix Information option (PI)
- // with infinite valid lifetime which should not get invalidated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, false, infiniteLifetimeSeconds, 0))
- expectPrefixEvent(subnet, true)
- clock.Advance(header.NDPInfiniteLifetime)
- select {
- case <-ndpDisp.prefixC:
- t.Fatal("unexpectedly invalidated a prefix with infinite lifetime")
- default:
- }
-
- // Receive an RA with finite lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, false, infiniteLifetimeSeconds-1, 0))
- clock.Advance(header.NDPInfiniteLifetime - time.Second)
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, subnet, false); diff != "" {
- t.Errorf("prefix event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for prefix discovery event")
- }
-
- // Receive an RA with finite lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, false, infiniteLifetimeSeconds-1, 0))
- expectPrefixEvent(subnet, true)
-
- // Receive an RA with prefix with an infinite lifetime.
- // The prefix should not be invalidated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, false, infiniteLifetimeSeconds, 0))
- clock.Advance(header.NDPInfiniteLifetime)
- select {
- case <-ndpDisp.prefixC:
- t.Fatal("unexpectedly invalidated a prefix with infinite lifetime")
- default:
- }
-
- // Receive an RA with 0 lifetime.
- // The prefix should get invalidated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, false, 0, 0))
- expectPrefixEvent(subnet, false)
-}
-
-// TestPrefixDiscoveryMaxRouters tests that only
-// ipv6.MaxDiscoveredOnLinkPrefixes discovered on-link prefixes are remembered.
-func TestPrefixDiscoveryMaxOnLinkPrefixes(t *testing.T) {
- ndpDisp := ndpDispatcher{
- prefixC: make(chan ndpPrefixEvent, ipv6.MaxDiscoveredOnLinkPrefixes+3),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- DiscoverDefaultRouters: false,
- DiscoverOnLinkPrefixes: true,
- },
- NDPDisp: &ndpDisp,
- })},
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- optSer := make(header.NDPOptionsSerializer, ipv6.MaxDiscoveredOnLinkPrefixes+2)
- prefixes := [ipv6.MaxDiscoveredOnLinkPrefixes + 2]tcpip.Subnet{}
-
- // Receive an RA with 2 more than the max number of discovered on-link
- // prefixes.
- for i := 0; i < ipv6.MaxDiscoveredOnLinkPrefixes+2; i++ {
- prefixAddr := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0}
- prefixAddr[7] = byte(i)
- prefix := tcpip.AddressWithPrefix{
- Address: tcpip.Address(prefixAddr[:]),
- PrefixLen: 64,
- }
- prefixes[i] = prefix.Subnet()
- buf := [30]byte{}
- buf[0] = uint8(prefix.PrefixLen)
- buf[1] = 128
- binary.BigEndian.PutUint32(buf[2:], 10)
- copy(buf[14:], prefix.Address)
-
- optSer[i] = header.NDPPrefixInformation(buf[:])
- }
-
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithOpts(llAddr1, 0, optSer))
- for i := 0; i < ipv6.MaxDiscoveredOnLinkPrefixes+2; i++ {
- if i < ipv6.MaxDiscoveredOnLinkPrefixes {
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, prefixes[i], true); diff != "" {
- t.Errorf("prefix event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected prefix discovery event")
- }
- } else {
- select {
- case <-ndpDisp.prefixC:
- t.Fatal("should not have discovered a new prefix after we already discovered the max number of prefixes")
- default:
- }
- }
- }
-}
-
-// Checks to see if list contains an IPv6 address, item.
-func containsV6Addr(list []tcpip.ProtocolAddress, item tcpip.AddressWithPrefix) bool {
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: item,
- }
-
- return containsAddr(list, protocolAddress)
-}
-
-// Check e to make sure that the event is for addr on nic with ID 1, and the
-// event type is set to eventType.
-func checkAutoGenAddrEvent(e ndpAutoGenAddrEvent, addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) string {
- return cmp.Diff(ndpAutoGenAddrEvent{nicID: 1, addr: addr, eventType: eventType}, e, cmp.AllowUnexported(e))
-}
-
-const minVLSeconds = uint32(ipv6.MinPrefixInformationValidLifetimeForUpdate / time.Second)
-const infiniteLifetimeSeconds = uint32(header.NDPInfiniteLifetime / time.Second)
-
-// TestAutoGenAddr tests that an address is properly generated and invalidated
-// when configured to do so.
-func TestAutoGenAddr(t *testing.T) {
- prefix1, _, addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, addr2 := prefixSubnetAddr(1, linkAddr1)
-
- testWithRAs(t, func(t *testing.T, handleRAs ipv6.HandleRAsConfiguration, forwarding bool) {
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: handleRAs,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, forwarding); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", ipv6.ProtocolNumber, forwarding, err)
- }
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with zero valid lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 0, 0))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly auto-generated an address with 0 lifetime")
- default:
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 0))
- expectAutoGenAddrEvent(addr1, newAddr)
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr1) {
- t.Fatalf("Should have %s in the list of addresses", addr1)
- }
-
- // Receive an RA with prefix2 in an NDP Prefix Information option (PI)
- // with preferred lifetime > valid lifetime
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 5, 6))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly auto-generated an address with preferred lifetime > valid lifetime")
- default:
- }
-
- // Receive an RA with prefix2 in a PI with a valid lifetime that exceeds
- // the minimum.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, minVLSeconds+1, 0))
- expectAutoGenAddrEvent(addr2, newAddr)
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr1) {
- t.Fatalf("Should have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr2) {
- t.Fatalf("Should have %s in the list of addresses", addr2)
- }
-
- // Refresh valid lifetime for addr of prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 0))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly auto-generated an address when we already have an address for a prefix")
- default:
- }
-
- // Wait for addr of prefix1 to be invalidated.
- clock.Advance(ipv6.MinPrefixInformationValidLifetimeForUpdate)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr1, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- if containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr1) {
- t.Fatalf("Should not have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr2) {
- t.Fatalf("Should have %s in the list of addresses", addr2)
- }
- })
-}
-
-func addressCheck(addrs []tcpip.ProtocolAddress, containList, notContainList []tcpip.AddressWithPrefix) string {
- ret := ""
- for _, c := range containList {
- if !containsV6Addr(addrs, c) {
- ret += fmt.Sprintf("should have %s in the list of addresses\n", c)
- }
- }
- for _, c := range notContainList {
- if containsV6Addr(addrs, c) {
- ret += fmt.Sprintf("should not have %s in the list of addresses\n", c)
- }
- }
- return ret
-}
-
-// TestAutoGenTempAddr tests that temporary SLAAC addresses are generated when
-// configured to do so as part of IPv6 Privacy Extensions.
-func TestAutoGenTempAddr(t *testing.T) {
- const nicID = 1
-
- prefix1, _, addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, addr2 := prefixSubnetAddr(1, linkAddr1)
-
- tests := []struct {
- name string
- dupAddrTransmits uint8
- retransmitTimer time.Duration
- }{
- {
- name: "DAD disabled",
- },
- {
- name: "DAD enabled",
- dupAddrTransmits: 1,
- retransmitTimer: time.Second,
- },
- }
-
- for i, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- seed := []byte{uint8(i)}
- var tempIIDHistory [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistory[:], seed, nicID)
- newTempAddr := func(stableAddr tcpip.Address) tcpip.AddressWithPrefix {
- return header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], stableAddr)
- }
-
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 2),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: test.dupAddrTransmits,
- RetransmitTimer: test.retransmitTimer,
- },
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- MaxTempAddrValidLifetime: 2 * ipv6.MinPrefixInformationValidLifetimeForUpdate,
- MaxTempAddrPreferredLifetime: 2 * ipv6.MinPrefixInformationValidLifetimeForUpdate,
- },
- NDPDisp: &ndpDisp,
- TempIIDSeed: seed,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrEventAsync := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- clock.RunImmediatelyScheduledJobs()
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- }
-
- expectDADEventAsync := func(addr tcpip.Address) {
- t.Helper()
-
- clock.Advance(time.Duration(test.dupAddrTransmits) * test.retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD event")
- }
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with zero valid lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 0, 0))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpectedly auto-generated an address with 0 lifetime; event = %+v", e)
- default:
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero valid lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 0))
- expectAutoGenAddrEvent(addr1, newAddr)
- expectDADEventAsync(addr1.Address)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpectedly got an auto gen addr event = %+v", e)
- default:
- }
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero valid & preferred lifetimes.
- tempAddr1 := newTempAddr(addr1.Address)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 100))
- expectAutoGenAddrEvent(tempAddr1, newAddr)
- expectDADEventAsync(tempAddr1.Address)
- if mismatch := addressCheck(s.NICInfo()[1].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Receive an RA with prefix2 in an NDP Prefix Information option (PI)
- // with preferred lifetime > valid lifetime
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 5, 6))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpectedly auto-generated an address with preferred lifetime > valid lifetime; event = %+v", e)
- default:
- }
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Receive an RA with prefix2 in a PI with a valid lifetime that exceeds
- // the minimum and won't be reached in this test.
- tempAddr2 := newTempAddr(addr2.Address)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 2*minVLSeconds, 2*minVLSeconds))
- expectAutoGenAddrEvent(addr2, newAddr)
- expectDADEventAsync(addr2.Address)
- expectAutoGenAddrEventAsync(tempAddr2, newAddr)
- expectDADEventAsync(tempAddr2.Address)
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1, addr2, tempAddr2}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Deprecate prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 0))
- expectAutoGenAddrEvent(addr1, deprecatedAddr)
- expectAutoGenAddrEvent(tempAddr1, deprecatedAddr)
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1, addr2, tempAddr2}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Refresh lifetimes for prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 100))
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1, addr2, tempAddr2}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Reduce valid lifetime and deprecate addresses of prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, minVLSeconds, 0))
- expectAutoGenAddrEvent(addr1, deprecatedAddr)
- expectAutoGenAddrEvent(tempAddr1, deprecatedAddr)
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr1, tempAddr1, addr2, tempAddr2}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Wait for addrs of prefix1 to be invalidated. They should be
- // invalidated at the same time.
- clock.Advance(ipv6.MinPrefixInformationValidLifetimeForUpdate)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- var nextAddr tcpip.AddressWithPrefix
- if e.addr == addr1 {
- if diff := checkAutoGenAddrEvent(e, addr1, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- nextAddr = tempAddr1
- } else {
- if diff := checkAutoGenAddrEvent(e, tempAddr1, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- nextAddr = addr1
- }
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, nextAddr, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr2, tempAddr2}, []tcpip.AddressWithPrefix{addr1, tempAddr1}); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Receive an RA with prefix2 in a PI w/ 0 lifetimes.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 0, 0))
- expectAutoGenAddrEvent(addr2, deprecatedAddr)
- expectAutoGenAddrEvent(tempAddr2, deprecatedAddr)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Errorf("got unexpected auto gen addr event = %+v", e)
- default:
- }
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr2, tempAddr2}, []tcpip.AddressWithPrefix{addr1, tempAddr1}); mismatch != "" {
- t.Fatal(mismatch)
- }
- })
- }
-}
-
-// TestNoAutoGenTempAddrForLinkLocal test that temporary SLAAC addresses are not
-// generated for auto generated link-local addresses.
-func TestNoAutoGenTempAddrForLinkLocal(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- dupAddrTransmits uint8
- retransmitTimer time.Duration
- }{
- {
- name: "DAD disabled",
- },
- {
- name: "DAD enabled",
- dupAddrTransmits: 1,
- retransmitTimer: time.Second,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- AutoGenTempGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- AutoGenLinkLocal: true,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // The stable link-local address should auto-generate and resolve DAD.
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, tcpip.AddressWithPrefix{Address: llAddr1, PrefixLen: header.IIDOffsetInIPv6Address * 8}, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- clock.Advance(time.Duration(test.dupAddrTransmits) * test.retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, llAddr1, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD event")
- }
-
- // No new addresses should be generated.
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Errorf("got unxpected auto gen addr event = %+v", e)
- default:
- }
- })
- }
-}
-
-// TestNoAutoGenTempAddrWithoutStableAddr tests that a temporary SLAAC address
-// will not be generated until after DAD completes, even if a new Router
-// Advertisement is received to refresh lifetimes.
-func TestNoAutoGenTempAddrWithoutStableAddr(t *testing.T) {
- const (
- nicID = 1
- dadTransmits = 1
- retransmitTimer = 2 * time.Second
- )
-
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
- var tempIIDHistory [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistory[:], nil, nicID)
- tempAddr := header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], addr.Address)
-
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: dadTransmits,
- RetransmitTimer: retransmitTimer,
- },
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // Receive an RA to trigger SLAAC for prefix.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, 100, 100))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
-
- // DAD on the stable address for prefix has not yet completed. Receiving a new
- // RA that would refresh lifetimes should not generate a temporary SLAAC
- // address for the prefix.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, 100, 100))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %+v", e)
- default:
- }
-
- // Wait for DAD to complete for the stable address then expect the temporary
- // address to be generated.
- clock.Advance(dadTransmits * retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD event")
- }
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, tempAddr, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
-}
-
-// TestAutoGenTempAddrRegen tests that temporary SLAAC addresses are
-// regenerated.
-func TestAutoGenTempAddrRegen(t *testing.T) {
- const (
- nicID = 1
- regenAdv = 2 * time.Second
-
- numTempAddrs = 3
- maxTempAddrValidLifetime = numTempAddrs * ipv6.MinPrefixInformationValidLifetimeForUpdate
- )
-
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
- var tempIIDHistory [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistory[:], nil, nicID)
- var tempAddrs [numTempAddrs]tcpip.AddressWithPrefix
- for i := 0; i < len(tempAddrs); i++ {
- tempAddrs[i] = header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], addr.Address)
- }
-
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- ndpConfigs := ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- RegenAdvanceDuration: regenAdv,
- MaxTempAddrValidLifetime: maxTempAddrValidLifetime,
- MaxTempAddrPreferredLifetime: ipv6.MinPrefixInformationValidLifetimeForUpdate,
- }
- clock := faketime.NewManualClock()
- randSource := savingRandSource{
- s: rand.NewSource(time.Now().UnixNano()),
- }
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ndpConfigs,
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- RandSource: &randSource,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrEventAsync := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType, timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- }
-
- tempDesyncFactor := time.Duration(randSource.lastInt63) % ipv6.MaxDesyncFactor
- effectiveMaxTempAddrPL := ipv6.MinPrefixInformationValidLifetimeForUpdate - tempDesyncFactor
- // The time since the last regeneration before a new temporary address is
- // generated.
- tempAddrRegenenerationTime := effectiveMaxTempAddrPL - regenAdv
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero valid & preferred lifetimes.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, minVLSeconds, minVLSeconds))
- expectAutoGenAddrEvent(addr, newAddr)
- expectAutoGenAddrEvent(tempAddrs[0], newAddr)
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr, tempAddrs[0]}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Wait for regeneration
- expectAutoGenAddrEventAsync(tempAddrs[1], newAddr, tempAddrRegenenerationTime)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, minVLSeconds, minVLSeconds))
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr, tempAddrs[0], tempAddrs[1]}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
- expectAutoGenAddrEventAsync(tempAddrs[0], deprecatedAddr, regenAdv)
-
- // Wait for regeneration
- expectAutoGenAddrEventAsync(tempAddrs[2], newAddr, tempAddrRegenenerationTime-regenAdv)
- expectAutoGenAddrEventAsync(tempAddrs[1], deprecatedAddr, regenAdv)
-
- // Stop generating temporary addresses
- ndpConfigs.AutoGenTempGlobalAddresses = false
- if ipv6Ep, err := s.GetNetworkEndpoint(nicID, header.IPv6ProtocolNumber); err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, header.IPv6ProtocolNumber, err)
- } else {
- ndpEP := ipv6Ep.(ipv6.NDPEndpoint)
- ndpEP.SetNDPConfigurations(ndpConfigs)
- }
-
- // Refresh lifetimes and wait for the last temporary address to be deprecated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, minVLSeconds, minVLSeconds))
- expectAutoGenAddrEventAsync(tempAddrs[2], deprecatedAddr, effectiveMaxTempAddrPL-regenAdv)
-
- // Refresh lifetimes such that the prefix is valid and preferred forever.
- //
- // This should not affect the lifetimes of temporary addresses because they
- // are capped by the maximum valid and preferred lifetimes for temporary
- // addresses.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, infiniteLifetimeSeconds, infiniteLifetimeSeconds))
-
- // Wait for all the temporary addresses to get invalidated.
- invalidateAfter := maxTempAddrValidLifetime - clock.NowMonotonic().Sub(tcpip.MonotonicTime{})
- for _, addr := range tempAddrs {
- expectAutoGenAddrEventAsync(addr, invalidatedAddr, invalidateAfter)
- invalidateAfter = tempAddrRegenenerationTime
- }
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr}, tempAddrs[:]); mismatch != "" {
- t.Fatal(mismatch)
- }
-}
-
-// TestAutoGenTempAddrRegenJobUpdates tests that a temporary address's
-// regeneration job gets updated when refreshing the address's lifetimes.
-func TestAutoGenTempAddrRegenJobUpdates(t *testing.T) {
- const (
- nicID = 1
- regenAdv = 2 * time.Second
-
- numTempAddrs = 3
- maxTempAddrPreferredLifetime = ipv6.MinPrefixInformationValidLifetimeForUpdate
- maxTempAddrPreferredLifetimeSeconds = uint32(maxTempAddrPreferredLifetime / time.Second)
- )
-
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
- var tempIIDHistory [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistory[:], nil, nicID)
- var tempAddrs [numTempAddrs]tcpip.AddressWithPrefix
- for i := 0; i < len(tempAddrs); i++ {
- tempAddrs[i] = header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], addr.Address)
- }
-
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- ndpConfigs := ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- RegenAdvanceDuration: regenAdv,
- MaxTempAddrPreferredLifetime: maxTempAddrPreferredLifetime,
- MaxTempAddrValidLifetime: maxTempAddrPreferredLifetime * 2,
- }
- clock := faketime.NewManualClock()
- initialTime := clock.NowMonotonic()
- randSource := savingRandSource{
- s: rand.NewSource(time.Now().UnixNano()),
- }
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ndpConfigs,
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- RandSource: &randSource,
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- tempDesyncFactor := time.Duration(randSource.lastInt63) % ipv6.MaxDesyncFactor
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrEventAsync := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType, timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- }
-
- // Receive an RA with prefix1 in an NDP Prefix Information option (PI)
- // with non-zero valid & preferred lifetimes.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, maxTempAddrPreferredLifetimeSeconds, maxTempAddrPreferredLifetimeSeconds))
- expectAutoGenAddrEvent(addr, newAddr)
- expectAutoGenAddrEvent(tempAddrs[0], newAddr)
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr, tempAddrs[0]}, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Deprecate the prefix.
- //
- // A new temporary address should be generated after the regeneration
- // time has passed since the prefix is deprecated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, maxTempAddrPreferredLifetimeSeconds, 0))
- expectAutoGenAddrEvent(addr, deprecatedAddr)
- expectAutoGenAddrEvent(tempAddrs[0], deprecatedAddr)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %#v", e)
- default:
- }
-
- effectiveMaxTempAddrPL := maxTempAddrPreferredLifetime - tempDesyncFactor
- // The time since the last regeneration before a new temporary address is
- // generated.
- tempAddrRegenenerationTime := effectiveMaxTempAddrPL - regenAdv
-
- // Advance the clock by the regeneration time but don't expect a new temporary
- // address as the prefix is deprecated.
- clock.Advance(tempAddrRegenenerationTime)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %#v", e)
- default:
- }
-
- // Prefer the prefix again.
- //
- // A new temporary address should immediately be generated since the
- // regeneration time has already passed since the last address was generated
- // - this regeneration does not depend on a job.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, maxTempAddrPreferredLifetimeSeconds, maxTempAddrPreferredLifetimeSeconds))
- expectAutoGenAddrEvent(tempAddrs[1], newAddr)
- // Wait for the first temporary address to be deprecated.
- expectAutoGenAddrEventAsync(tempAddrs[0], deprecatedAddr, regenAdv)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %s", e)
- default:
- }
-
- // Increase the maximum lifetimes for temporary addresses to large values
- // then refresh the lifetimes of the prefix.
- //
- // A new address should not be generated after the regeneration time that was
- // expected for the previous check. This is because the preferred lifetime for
- // the temporary addresses has increased, so it will take more time to
- // regenerate a new temporary address. Note, new addresses are only
- // regenerated after the preferred lifetime - the regenerate advance duration
- // as paased.
- const largeLifetimeSeconds = minVLSeconds * 2
- const largeLifetime = time.Duration(largeLifetimeSeconds) * time.Second
- ndpConfigs.MaxTempAddrValidLifetime = 2 * largeLifetime
- ndpConfigs.MaxTempAddrPreferredLifetime = largeLifetime
- ipv6Ep, err := s.GetNetworkEndpoint(nicID, header.IPv6ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, header.IPv6ProtocolNumber, err)
- }
- ndpEP := ipv6Ep.(ipv6.NDPEndpoint)
- ndpEP.SetNDPConfigurations(ndpConfigs)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, largeLifetimeSeconds, largeLifetimeSeconds))
- timeSinceInitialTime := clock.NowMonotonic().Sub(initialTime)
- clock.Advance(largeLifetime - timeSinceInitialTime)
- expectAutoGenAddrEvent(tempAddrs[0], deprecatedAddr)
- // to offset the advement of time to test the first temporary address's
- // deprecation after the second was generated
- advLess := regenAdv
- expectAutoGenAddrEventAsync(tempAddrs[2], newAddr, timeSinceInitialTime-advLess-(tempDesyncFactor+regenAdv))
- expectAutoGenAddrEventAsync(tempAddrs[1], deprecatedAddr, regenAdv)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %+v", e)
- default:
- }
-}
-
-// TestMixedSLAACAddrConflictRegen tests SLAAC address regeneration in response
-// to a mix of DAD conflicts and NIC-local conflicts.
-func TestMixedSLAACAddrConflictRegen(t *testing.T) {
- const (
- nicID = 1
- nicName = "nic"
- lifetimeSeconds = 9999
- // From stack.maxSLAACAddrLocalRegenAttempts
- maxSLAACAddrLocalRegenAttempts = 10
- // We use 2 more addreses than the maximum local regeneration attempts
- // because we want to also trigger regeneration in response to a DAD
- // conflicts for this test.
- maxAddrs = maxSLAACAddrLocalRegenAttempts + 2
- dupAddrTransmits = 1
- retransmitTimer = time.Second
- )
-
- var tempIIDHistoryWithModifiedEUI64 [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistoryWithModifiedEUI64[:], nil, nicID)
-
- var tempIIDHistoryWithOpaqueIID [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistoryWithOpaqueIID[:], nil, nicID)
-
- prefix, subnet, stableAddrWithModifiedEUI64 := prefixSubnetAddr(0, linkAddr1)
- var stableAddrsWithOpaqueIID [maxAddrs]tcpip.AddressWithPrefix
- var tempAddrsWithOpaqueIID [maxAddrs]tcpip.AddressWithPrefix
- var tempAddrsWithModifiedEUI64 [maxAddrs]tcpip.AddressWithPrefix
- addrBytes := []byte(subnet.ID())
- for i := 0; i < maxAddrs; i++ {
- stableAddrsWithOpaqueIID[i] = tcpip.AddressWithPrefix{
- Address: tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet, nicName, uint8(i), nil)),
- PrefixLen: header.IIDOffsetInIPv6Address * 8,
- }
- // When generating temporary addresses, the resolved stable address for the
- // SLAAC prefix will be the first address stable address generated for the
- // prefix as we will not simulate address conflicts for the stable addresses
- // in tests involving temporary addresses. Address conflicts for stable
- // addresses will be done in their own tests.
- tempAddrsWithOpaqueIID[i] = header.GenerateTempIPv6SLAACAddr(tempIIDHistoryWithOpaqueIID[:], stableAddrsWithOpaqueIID[0].Address)
- tempAddrsWithModifiedEUI64[i] = header.GenerateTempIPv6SLAACAddr(tempIIDHistoryWithModifiedEUI64[:], stableAddrWithModifiedEUI64.Address)
- }
-
- tests := []struct {
- name string
- addrs []tcpip.AddressWithPrefix
- tempAddrs bool
- initialExpect tcpip.AddressWithPrefix
- maxAddrs int
- nicNameFromID func(tcpip.NICID, string) string
- }{
- {
- name: "Stable addresses with opaque IIDs",
- addrs: stableAddrsWithOpaqueIID[:],
- maxAddrs: 1,
- nicNameFromID: func(tcpip.NICID, string) string {
- return nicName
- },
- },
- {
- name: "Temporary addresses with opaque IIDs",
- addrs: tempAddrsWithOpaqueIID[:],
- tempAddrs: true,
- initialExpect: stableAddrsWithOpaqueIID[0],
- maxAddrs: 1 /* initial (stable) address */ + maxSLAACAddrLocalRegenAttempts,
- nicNameFromID: func(tcpip.NICID, string) string {
- return nicName
- },
- },
- {
- name: "Temporary addresses with modified EUI64",
- addrs: tempAddrsWithModifiedEUI64[:],
- tempAddrs: true,
- maxAddrs: 1 /* initial (stable) address */ + maxSLAACAddrLocalRegenAttempts,
- initialExpect: stableAddrWithModifiedEUI64,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- // We may receive a deprecated and invalidated event for each SLAAC
- // address that is assigned.
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, test.maxAddrs*2),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- Clock: clock,
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: test.tempAddrs,
- AutoGenAddressConflictRetries: 1,
- },
- NDPDisp: &ndpDisp,
- OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: test.nicNameFromID,
- },
- })},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
-
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- Gateway: llAddr2,
- NIC: nicID,
- }})
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- manuallyAssignedAddresses := make(map[tcpip.Address]struct{})
- for j := 0; j < len(test.addrs)-1; j++ {
- // The NIC will not attempt to generate an address in response to a
- // NIC-local conflict after some maximum number of attempts. We skip
- // creating a conflict for the address that would be generated as part
- // of the last attempt so we can simulate a DAD conflict for this
- // address and restart the NIC-local generation process.
- if j == maxSLAACAddrLocalRegenAttempts-1 {
- continue
- }
-
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, test.addrs[j].Address); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, ipv6.ProtocolNumber, test.addrs[j].Address, err)
- }
-
- manuallyAssignedAddresses[test.addrs[j].Address] = struct{}{}
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrAsyncEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- if diff := checkAutoGenAddrEvent(<-ndpDisp.autoGenAddrC, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- }
-
- expectDADEventAsync := func(addr tcpip.Address) {
- t.Helper()
-
- clock.Advance(dupAddrTransmits * retransmitTimer)
- if diff := checkDADEvent(<-ndpDisp.dadC, nicID, addr, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- }
-
- // Enable DAD.
- ndpDisp.dadC = make(chan ndpDADEvent, 2)
- if ipv6Ep, err := s.GetNetworkEndpoint(nicID, header.IPv6ProtocolNumber); err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, header.IPv6ProtocolNumber, err)
- } else {
- ndpEP := ipv6Ep.(stack.DuplicateAddressDetector)
- ndpEP.SetDADConfigurations(stack.DADConfigurations{
- DupAddrDetectTransmits: dupAddrTransmits,
- RetransmitTimer: retransmitTimer,
- })
- }
-
- // Do SLAAC for prefix.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, lifetimeSeconds))
- if test.initialExpect != (tcpip.AddressWithPrefix{}) {
- expectAutoGenAddrEvent(test.initialExpect, newAddr)
- expectDADEventAsync(test.initialExpect.Address)
- }
-
- // The last local generation attempt should succeed, but we introduce a
- // DAD failure to restart the local generation process.
- addr := test.addrs[maxSLAACAddrLocalRegenAttempts-1]
- expectAutoGenAddrAsyncEvent(addr, newAddr)
- rxNDPSolicit(e, addr.Address)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DAD event")
- }
- expectAutoGenAddrEvent(addr, invalidatedAddr)
-
- // The last address generated should resolve DAD.
- addr = test.addrs[len(test.addrs)-1]
- expectAutoGenAddrAsyncEvent(addr, newAddr)
- expectDADEventAsync(addr.Address)
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpected auto gen addr event = %+v", e)
- default:
- }
-
- // Wait for all the SLAAC addresses to be invalidated.
- clock.Advance(lifetimeSeconds * time.Second)
- gotAddresses := make(map[tcpip.Address]struct{})
- for _, a := range s.NICInfo()[nicID].ProtocolAddresses {
- gotAddresses[a.AddressWithPrefix.Address] = struct{}{}
- }
- if diff := cmp.Diff(manuallyAssignedAddresses, gotAddresses); diff != "" {
- t.Fatalf("assigned addresses mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// stackAndNdpDispatcherWithDefaultRoute returns an ndpDispatcher,
-// channel.Endpoint and stack.Stack.
-//
-// stack.Stack will have a default route through the router (llAddr3) installed
-// and a static link-address (linkAddr3) added to the link address cache for the
-// router.
-func stackAndNdpDispatcherWithDefaultRoute(t *testing.T, nicID tcpip.NICID) (*ndpDispatcher, *channel.Endpoint, *stack.Stack, *faketime.ManualClock) {
- t.Helper()
- ndpDisp := &ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: ndpDisp,
- })},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- s.SetRouteTable([]tcpip.Route{{
- Destination: header.IPv6EmptySubnet,
- Gateway: llAddr3,
- NIC: nicID,
- }})
-
- if err := s.AddStaticNeighbor(nicID, ipv6.ProtocolNumber, llAddr3, linkAddr3); err != nil {
- t.Fatalf("s.AddStaticNeighbor(%d, %d, %s, %s): %s", nicID, ipv6.ProtocolNumber, llAddr3, linkAddr3, err)
- }
- return ndpDisp, e, s, clock
-}
-
-// addrForNewConnectionTo returns the local address used when creating a new
-// connection to addr.
-func addrForNewConnectionTo(t *testing.T, s *stack.Stack, addr tcpip.FullAddress) tcpip.Address {
- t.Helper()
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(header.UDPProtocolNumber, header.IPv6ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", header.UDPProtocolNumber, header.IPv6ProtocolNumber, err)
- }
- defer ep.Close()
- ep.SocketOptions().SetV6Only(true)
- if err := ep.Connect(addr); err != nil {
- t.Fatalf("ep.Connect(%+v): %s", addr, err)
- }
- got, err := ep.GetLocalAddress()
- if err != nil {
- t.Fatalf("ep.GetLocalAddress(): %s", err)
- }
- return got.Addr
-}
-
-// addrForNewConnection returns the local address used when creating a new
-// connection.
-func addrForNewConnection(t *testing.T, s *stack.Stack) tcpip.Address {
- t.Helper()
-
- return addrForNewConnectionTo(t, s, dstAddr)
-}
-
-// addrForNewConnectionWithAddr returns the local address used when creating a
-// new connection with a specific local address.
-func addrForNewConnectionWithAddr(t *testing.T, s *stack.Stack, addr tcpip.FullAddress) tcpip.Address {
- t.Helper()
-
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(header.UDPProtocolNumber, header.IPv6ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", header.UDPProtocolNumber, header.IPv6ProtocolNumber, err)
- }
- defer ep.Close()
- ep.SocketOptions().SetV6Only(true)
- if err := ep.Bind(addr); err != nil {
- t.Fatalf("ep.Bind(%+v): %s", addr, err)
- }
- if err := ep.Connect(dstAddr); err != nil {
- t.Fatalf("ep.Connect(%+v): %s", dstAddr, err)
- }
- got, err := ep.GetLocalAddress()
- if err != nil {
- t.Fatalf("ep.GetLocalAddress(): %s", err)
- }
- return got.Addr
-}
-
-// TestAutoGenAddrDeprecateFromPI tests deprecating a SLAAC address when
-// receiving a PI with 0 preferred lifetime.
-func TestAutoGenAddrDeprecateFromPI(t *testing.T) {
- const nicID = 1
-
- prefix1, _, addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, addr2 := prefixSubnetAddr(1, linkAddr1)
-
- ndpDisp, e, s, _ := stackAndNdpDispatcherWithDefaultRoute(t, nicID)
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectPrimaryAddr := func(addr tcpip.AddressWithPrefix) {
- t.Helper()
-
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addr); err != nil {
- t.Fatal(err)
- }
-
- if got := addrForNewConnection(t, s); got != addr.Address {
- t.Errorf("got addrForNewConnection = %s, want = %s", got, addr.Address)
- }
- }
-
- // Receive PI for prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 100))
- expectAutoGenAddrEvent(addr1, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should have %s in the list of addresses", addr1)
- }
- expectPrimaryAddr(addr1)
-
- // Deprecate addr for prefix1 immedaitely.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 0))
- expectAutoGenAddrEvent(addr1, deprecatedAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should have %s in the list of addresses", addr1)
- }
- // addr should still be the primary endpoint as there are no other addresses.
- expectPrimaryAddr(addr1)
-
- // Refresh lifetimes of addr generated from prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 100))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- expectPrimaryAddr(addr1)
-
- // Receive PI for prefix2.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 100, 100))
- expectAutoGenAddrEvent(addr2, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- expectPrimaryAddr(addr2)
-
- // Deprecate addr for prefix2 immedaitely.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 100, 0))
- expectAutoGenAddrEvent(addr2, deprecatedAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- // addr1 should be the primary endpoint now since addr2 is deprecated but
- // addr1 is not.
- expectPrimaryAddr(addr1)
- // addr2 is deprecated but if explicitly requested, it should be used.
- fullAddr2 := tcpip.FullAddress{Addr: addr2.Address, NIC: nicID}
- if got := addrForNewConnectionWithAddr(t, s, fullAddr2); got != addr2.Address {
- t.Errorf("got addrForNewConnectionWithAddr(_, _, %+v) = %s, want = %s", fullAddr2, got, addr2.Address)
- }
-
- // Another PI w/ 0 preferred lifetime should not result in a deprecation
- // event.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 100, 0))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- expectPrimaryAddr(addr1)
- if got := addrForNewConnectionWithAddr(t, s, fullAddr2); got != addr2.Address {
- t.Errorf("got addrForNewConnectionWithAddr(_, _, %+v) = %s, want = %s", fullAddr2, got, addr2.Address)
- }
-
- // Refresh lifetimes of addr generated from prefix2.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 100, 100))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- expectPrimaryAddr(addr2)
-}
-
-// TestAutoGenAddrJobDeprecation tests that an address is properly deprecated
-// when its preferred lifetime expires.
-func TestAutoGenAddrJobDeprecation(t *testing.T) {
- const nicID = 1
-
- prefix1, _, addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, addr2 := prefixSubnetAddr(1, linkAddr1)
-
- ndpDisp, e, s, clock := stackAndNdpDispatcherWithDefaultRoute(t, nicID)
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrEventAfter := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType, timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- }
-
- expectPrimaryAddr := func(addr tcpip.AddressWithPrefix) {
- t.Helper()
-
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addr); err != nil {
- t.Fatal(err)
- }
-
- if got := addrForNewConnection(t, s); got != addr.Address {
- t.Errorf("got addrForNewConnection = %s, want = %s", got, addr.Address)
- }
- }
-
- // Receive PI for prefix2.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, infiniteLifetimeSeconds, infiniteLifetimeSeconds))
- expectAutoGenAddrEvent(addr2, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- expectPrimaryAddr(addr2)
-
- // Receive a PI for prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, 100, 90))
- expectAutoGenAddrEvent(addr1, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- expectPrimaryAddr(addr1)
-
- // Refresh lifetime for addr of prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, minVLSeconds, minVLSeconds-1))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- expectPrimaryAddr(addr1)
-
- // Wait for addr of prefix1 to be deprecated.
- expectAutoGenAddrEventAfter(addr1, deprecatedAddr, ipv6.MinPrefixInformationValidLifetimeForUpdate-time.Second)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should not have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- // addr2 should be the primary endpoint now since addr1 is deprecated but
- // addr2 is not.
- expectPrimaryAddr(addr2)
-
- // addr1 is deprecated but if explicitly requested, it should be used.
- fullAddr1 := tcpip.FullAddress{Addr: addr1.Address, NIC: nicID}
- if got := addrForNewConnectionWithAddr(t, s, fullAddr1); got != addr1.Address {
- t.Errorf("got addrForNewConnectionWithAddr(_, _, %+v) = %s, want = %s", fullAddr1, got, addr1.Address)
- }
-
- // Refresh valid lifetime for addr of prefix1, w/ 0 preferred lifetime to make
- // sure we do not get a deprecation event again.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, minVLSeconds, 0))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- expectPrimaryAddr(addr2)
- if got := addrForNewConnectionWithAddr(t, s, fullAddr1); got != addr1.Address {
- t.Errorf("got addrForNewConnectionWithAddr(_, _, %+v) = %s, want = %s", fullAddr1, got, addr1.Address)
- }
-
- // Refresh lifetimes for addr of prefix1.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, minVLSeconds, minVLSeconds-1))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- // addr1 is the primary endpoint again since it is non-deprecated now.
- expectPrimaryAddr(addr1)
-
- // Wait for addr of prefix1 to be deprecated.
- expectAutoGenAddrEventAfter(addr1, deprecatedAddr, ipv6.MinPrefixInformationValidLifetimeForUpdate-time.Second)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should not have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- // addr2 should be the primary endpoint now since it is not deprecated.
- expectPrimaryAddr(addr2)
- if got := addrForNewConnectionWithAddr(t, s, fullAddr1); got != addr1.Address {
- t.Errorf("got addrForNewConnectionWithAddr(_, _, %+v) = %s, want = %s", fullAddr1, got, addr1.Address)
- }
-
- // Wait for addr of prefix1 to be invalidated.
- expectAutoGenAddrEventAfter(addr1, invalidatedAddr, time.Second)
- if containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should not have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
- expectPrimaryAddr(addr2)
-
- // Refresh both lifetimes for addr of prefix2 to the same value.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, minVLSeconds, minVLSeconds))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
-
- // Wait for a deprecation then invalidation events, or just an invalidation
- // event. We need to cover both cases but cannot deterministically hit both
- // cases because the deprecation and invalidation handlers could be handled in
- // either deprecation then invalidation, or invalidation then deprecation
- // (which should be cancelled by the invalidation handler).
- //
- // Since we're about to cause both events to fire, we need the dispatcher
- // channel to be able to hold both.
- if got, want := len(ndpDisp.autoGenAddrC), 0; got != want {
- t.Fatalf("got len(ndpDisp.autoGenAddrC) = %d, want %d", got, want)
- }
- if got, want := cap(ndpDisp.autoGenAddrC), 1; got != want {
- t.Fatalf("got cap(ndpDisp.autoGenAddrC) = %d, want %d", got, want)
- }
- ndpDisp.autoGenAddrC = make(chan ndpAutoGenAddrEvent, 2)
- clock.Advance(ipv6.MinPrefixInformationValidLifetimeForUpdate)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr2, deprecatedAddr); diff == "" {
- // If we get a deprecation event first, we should get an invalidation
- // event almost immediately after.
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr2, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- } else if diff := checkAutoGenAddrEvent(e, addr2, invalidatedAddr); diff == "" {
- // If we get an invalidation event first, we should not get a deprecation
- // event after.
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto-generated event")
- default:
- }
- } else {
- t.Fatalf("got unexpected auto-generated event")
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- if containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should not have %s in the list of addresses", addr1)
- }
- if containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should not have %s in the list of addresses", addr2)
- }
- // Should not have any primary endpoints.
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- defer close(ch)
- ep, err := s.NewEndpoint(header.UDPProtocolNumber, header.IPv6ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", header.UDPProtocolNumber, header.IPv6ProtocolNumber, err)
- }
- defer ep.Close()
- ep.SocketOptions().SetV6Only(true)
-
- {
- err := ep.Connect(dstAddr)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Errorf("got ep.Connect(%+v) = %s, want = %s", dstAddr, err, &tcpip.ErrNoRoute{})
- }
- }
-}
-
-// Tests transitioning a SLAAC address's valid lifetime between finite and
-// infinite values.
-func TestAutoGenAddrFiniteToInfiniteToFiniteVL(t *testing.T) {
- const infiniteVLSeconds = 2
-
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
-
- tests := []struct {
- name string
- infiniteVL uint32
- }{
- {
- name: "EqualToInfiniteVL",
- infiniteVL: infiniteVLSeconds,
- },
- // Our implementation supports changing header.NDPInfiniteLifetime for tests
- // such that a packet can be received where the lifetime field has a value
- // greater than header.NDPInfiniteLifetime. Because of this, we test to make
- // sure that receiving a value greater than header.NDPInfiniteLifetime is
- // handled the same as when receiving a value equal to
- // header.NDPInfiniteLifetime.
- {
- name: "MoreThanInfiniteVL",
- infiniteVL: infiniteVLSeconds + 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- // Receive an RA with finite prefix.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, minVLSeconds, 0))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
-
- default:
- t.Fatal("expected addr auto gen event")
- }
-
- // Receive an new RA with prefix with infinite VL.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, test.infiniteVL, 0))
-
- // Receive a new RA with prefix with finite VL.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, minVLSeconds, 0))
-
- clock.Advance(ipv6.MinPrefixInformationValidLifetimeForUpdate)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
-
- default:
- t.Fatal("timeout waiting for addr auto gen event")
- }
- })
- }
-}
-
-// TestAutoGenAddrValidLifetimeUpdates tests that the valid lifetime of an
-// auto-generated address only gets updated when required to, as specified in
-// RFC 4862 section 5.5.3.e.
-func TestAutoGenAddrValidLifetimeUpdates(t *testing.T) {
- const infiniteVL = 4294967295
-
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
-
- tests := []struct {
- name string
- ovl uint32
- nvl uint32
- evl uint32
- }{
- // Should update the VL to the minimum VL for updating if the
- // new VL is less than minVLSeconds but was originally greater than
- // it.
- {
- "LargeVLToVLLessThanMinVLForUpdate",
- 9999,
- 1,
- minVLSeconds,
- },
- {
- "LargeVLTo0",
- 9999,
- 0,
- minVLSeconds,
- },
- {
- "InfiniteVLToVLLessThanMinVLForUpdate",
- infiniteVL,
- 1,
- minVLSeconds,
- },
- {
- "InfiniteVLTo0",
- infiniteVL,
- 0,
- minVLSeconds,
- },
-
- // Should not update VL if original VL was less than minVLSeconds
- // and the new VL is also less than minVLSeconds.
- {
- "ShouldNotUpdateWhenBothOldAndNewAreLessThanMinVLForUpdate",
- minVLSeconds - 1,
- minVLSeconds - 3,
- minVLSeconds - 1,
- },
-
- // Should take the new VL if the new VL is greater than the
- // remaining time or is greater than minVLSeconds.
- {
- "MorethanMinVLToLesserButStillMoreThanMinVLForUpdate",
- minVLSeconds + 5,
- minVLSeconds + 3,
- minVLSeconds + 3,
- },
- {
- "SmallVLToGreaterVLButStillLessThanMinVLForUpdate",
- minVLSeconds - 3,
- minVLSeconds - 1,
- minVLSeconds - 1,
- },
- {
- "SmallVLToGreaterVLThatIsMoreThaMinVLForUpdate",
- minVLSeconds - 3,
- minVLSeconds + 1,
- minVLSeconds + 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 10),
- }
- e := channel.New(10, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- // Receive an RA with prefix with initial VL,
- // test.ovl.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, test.ovl, 0))
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
-
- // Receive an new RA with prefix with new VL,
- // test.nvl.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, test.nvl, 0))
-
- //
- // Validate that the VL for the address got set
- // to test.evl.
- //
-
- // The address should not be invalidated until the effective valid
- // lifetime has passed.
- const delta = 1
- clock.Advance(time.Duration(test.evl)*time.Second - delta)
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly received an auto gen addr event")
- default:
- }
-
- // Wait for the invalidation event.
- clock.Advance(delta)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timeout waiting for addr auto gen event")
- }
- })
- }
-}
-
-// TestAutoGenAddrRemoval tests that when auto-generated addresses are removed
-// by the user, its resources will be cleaned up and an invalidation event will
-// be sent to the integrator.
-func TestAutoGenAddrRemoval(t *testing.T) {
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
-
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- // Receive a PI to auto-generate an address.
- const lifetimeSeconds = 1
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, 0))
- expectAutoGenAddrEvent(addr, newAddr)
-
- // Removing the address should result in an invalidation event
- // immediately.
- if err := s.RemoveAddress(1, addr.Address); err != nil {
- t.Fatalf("RemoveAddress(_, %s) = %s", addr.Address, err)
- }
- expectAutoGenAddrEvent(addr, invalidatedAddr)
-
- // Wait for the original valid lifetime to make sure the original job got
- // cancelled/cleaned up.
- clock.Advance(lifetimeSeconds * time.Second)
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly received an auto gen addr event")
- default:
- }
-}
-
-// TestAutoGenAddrAfterRemoval tests adding a SLAAC address that was previously
-// assigned to the NIC but is in the permanentExpired state.
-func TestAutoGenAddrAfterRemoval(t *testing.T) {
- const nicID = 1
-
- prefix1, _, addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, addr2 := prefixSubnetAddr(1, linkAddr1)
- ndpDisp, e, s, _ := stackAndNdpDispatcherWithDefaultRoute(t, nicID)
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectPrimaryAddr := func(addr tcpip.AddressWithPrefix) {
- t.Helper()
-
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addr); err != nil {
- t.Fatal(err)
- }
-
- if got := addrForNewConnection(t, s); got != addr.Address {
- t.Errorf("got addrForNewConnection = %s, want = %s", got, addr.Address)
- }
- }
-
- // Receive a PI to auto-generate addr1 with a large valid and preferred
- // lifetime.
- const largeLifetimeSeconds = 999
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, prefix1, true, true, largeLifetimeSeconds, largeLifetimeSeconds))
- expectAutoGenAddrEvent(addr1, newAddr)
- expectPrimaryAddr(addr1)
-
- // Add addr2 as a static address.
- protoAddr2 := tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: addr2,
- }
- if err := s.AddProtocolAddressWithOptions(nicID, protoAddr2, stack.FirstPrimaryEndpoint); err != nil {
- t.Fatalf("AddProtocolAddressWithOptions(%d, %+v, %d) = %s", nicID, protoAddr2, stack.FirstPrimaryEndpoint, err)
- }
- // addr2 should be more preferred now since it is at the front of the primary
- // list.
- expectPrimaryAddr(addr2)
-
- // Get a route using addr2 to increment its reference count then remove it
- // to leave it in the permanentExpired state.
- r, err := s.FindRoute(nicID, addr2.Address, addr3, header.IPv6ProtocolNumber, false)
- if err != nil {
- t.Fatalf("FindRoute(%d, %s, %s, %d, false): %s", nicID, addr2.Address, addr3, header.IPv6ProtocolNumber, err)
- }
- defer r.Release()
- if err := s.RemoveAddress(nicID, addr2.Address); err != nil {
- t.Fatalf("s.RemoveAddress(%d, %s): %s", nicID, addr2.Address, err)
- }
- // addr1 should be preferred again since addr2 is in the expired state.
- expectPrimaryAddr(addr1)
-
- // Receive a PI to auto-generate addr2 as valid and preferred.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, prefix2, true, true, largeLifetimeSeconds, largeLifetimeSeconds))
- expectAutoGenAddrEvent(addr2, newAddr)
- // addr2 should be more preferred now that it is closer to the front of the
- // primary list and not deprecated.
- expectPrimaryAddr(addr2)
-
- // Removing the address should result in an invalidation event immediately.
- // It should still be in the permanentExpired state because r is still held.
- //
- // We remove addr2 here to make sure addr2 was marked as a SLAAC address
- // (it was previously marked as a static address).
- if err := s.RemoveAddress(1, addr2.Address); err != nil {
- t.Fatalf("RemoveAddress(_, %s) = %s", addr2.Address, err)
- }
- expectAutoGenAddrEvent(addr2, invalidatedAddr)
- // addr1 should be more preferred since addr2 is in the expired state.
- expectPrimaryAddr(addr1)
-
- // Receive a PI to auto-generate addr2 as valid and deprecated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, prefix2, true, true, largeLifetimeSeconds, 0))
- expectAutoGenAddrEvent(addr2, newAddr)
- // addr1 should still be more preferred since addr2 is deprecated, even though
- // it is closer to the front of the primary list.
- expectPrimaryAddr(addr1)
-
- // Receive a PI to refresh addr2's preferred lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, prefix2, true, true, largeLifetimeSeconds, largeLifetimeSeconds))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly got an auto gen addr event")
- default:
- }
- // addr2 should be more preferred now that it is not deprecated.
- expectPrimaryAddr(addr2)
-
- if err := s.RemoveAddress(1, addr2.Address); err != nil {
- t.Fatalf("RemoveAddress(_, %s) = %s", addr2.Address, err)
- }
- expectAutoGenAddrEvent(addr2, invalidatedAddr)
- expectPrimaryAddr(addr1)
-}
-
-// TestAutoGenAddrStaticConflict tests that if SLAAC generates an address that
-// is already assigned to the NIC, the static address remains.
-func TestAutoGenAddrStaticConflict(t *testing.T) {
- prefix, _, addr := prefixSubnetAddr(0, linkAddr1)
-
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- // Add the address as a static address before SLAAC tries to add it.
- if err := s.AddProtocolAddress(1, tcpip.ProtocolAddress{Protocol: header.IPv6ProtocolNumber, AddressWithPrefix: addr}); err != nil {
- t.Fatalf("AddAddress(_, %d, %s) = %s", header.IPv6ProtocolNumber, addr.Address, err)
- }
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr) {
- t.Fatalf("Should have %s in the list of addresses", addr1)
- }
-
- // Receive a PI where the generated address will be the same as the one
- // that we already have assigned statically.
- const lifetimeSeconds = 1
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, 0))
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly received an auto gen addr event for an address we already have statically")
- default:
- }
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr) {
- t.Fatalf("Should have %s in the list of addresses", addr1)
- }
-
- // Should not get an invalidation event after the PI's invalidation
- // time.
- clock.Advance(lifetimeSeconds * time.Second)
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly received an auto gen addr event")
- default:
- }
- if !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr) {
- t.Fatalf("Should have %s in the list of addresses", addr1)
- }
-}
-
-func makeSecretKey(t *testing.T) []byte {
- secretKey := make([]byte, header.OpaqueIIDSecretKeyMinBytes)
- n, err := cryptorand.Read(secretKey)
- if err != nil {
- t.Fatalf("cryptorand.Read(_): %s", err)
- }
- if l := len(secretKey); n != l {
- t.Fatalf("got cryptorand.Read(_) = (%d, nil), want = (%d, nil)", n, l)
- }
- return secretKey
-}
-
-// TestAutoGenAddrWithOpaqueIID tests that SLAAC generated addresses will use
-// opaque interface identifiers when configured to do so.
-func TestAutoGenAddrWithOpaqueIID(t *testing.T) {
- const nicID = 1
- const nicName = "nic1"
-
- secretKey := makeSecretKey(t)
-
- prefix1, subnet1, _ := prefixSubnetAddr(0, linkAddr1)
- prefix2, subnet2, _ := prefixSubnetAddr(1, linkAddr1)
- // addr1 and addr2 are the addresses that are expected to be generated when
- // stack.Stack is configured to generate opaque interface identifiers as
- // defined by RFC 7217.
- addrBytes := []byte(subnet1.ID())
- addr1 := tcpip.AddressWithPrefix{
- Address: tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet1, nicName, 0, secretKey)),
- PrefixLen: 64,
- }
- addrBytes = []byte(subnet2.ID())
- addr2 := tcpip.AddressWithPrefix{
- Address: tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet2, nicName, 0, secretKey)),
- PrefixLen: 64,
- }
-
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: func(_ tcpip.NICID, nicName string) string {
- return nicName
- },
- SecretKey: secretKey,
- },
- })},
- Clock: clock,
- })
- opts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, opts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v, _) = %s", nicID, opts, err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- // Receive an RA with prefix1 in a PI.
- const validLifetimeSecondPrefix1 = 1
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix1, true, true, validLifetimeSecondPrefix1, 0))
- expectAutoGenAddrEvent(addr1, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should have %s in the list of addresses", addr1)
- }
-
- // Receive an RA with prefix2 in a PI with a large valid lifetime.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix2, true, true, 100, 0))
- expectAutoGenAddrEvent(addr2, newAddr)
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
-
- // Wait for addr of prefix1 to be invalidated.
- clock.Advance(validLifetimeSecondPrefix1 * time.Second)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr1, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- if containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {
- t.Fatalf("should not have %s in the list of addresses", addr1)
- }
- if !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr2) {
- t.Fatalf("should have %s in the list of addresses", addr2)
- }
-}
-
-func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
- const nicID = 1
- const nicName = "nic"
- const dadTransmits = 1
- const retransmitTimer = time.Second
- const maxMaxRetries = 3
- const lifetimeSeconds = 10
-
- secretKey := makeSecretKey(t)
-
- prefix, subnet, _ := prefixSubnetAddr(0, linkAddr1)
-
- addrForSubnet := func(subnet tcpip.Subnet, dadCounter uint8) tcpip.AddressWithPrefix {
- addrBytes := []byte(subnet.ID())
- return tcpip.AddressWithPrefix{
- Address: tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet, nicName, dadCounter, secretKey)),
- PrefixLen: 64,
- }
- }
-
- expectAutoGenAddrEvent := func(t *testing.T, ndpDisp *ndpDispatcher, addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- expectAutoGenAddrEventAsync := func(t *testing.T, clock *faketime.ManualClock, ndpDisp *ndpDispatcher, addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- clock.RunImmediatelyScheduledJobs()
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for addr auto gen event")
- }
- }
-
- expectDADEvent := func(t *testing.T, clock *faketime.ManualClock, ndpDisp *ndpDispatcher, addr tcpip.Address, res stack.DADResult) {
- t.Helper()
-
- clock.RunImmediatelyScheduledJobs()
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, res); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DAD event")
- }
- }
-
- expectDADEventAsync := func(t *testing.T, clock *faketime.ManualClock, ndpDisp *ndpDispatcher, addr tcpip.Address, res stack.DADResult) {
- t.Helper()
-
- clock.Advance(dadTransmits * retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, res); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD event")
- }
- }
-
- stableAddrForTempAddrTest := addrForSubnet(subnet, 0)
-
- addrTypes := []struct {
- name string
- ndpConfigs ipv6.NDPConfigurations
- autoGenLinkLocal bool
- prepareFn func(t *testing.T, clock *faketime.ManualClock, ndpDisp *ndpDispatcher, e *channel.Endpoint, tempIIDHistory []byte) []tcpip.AddressWithPrefix
- addrGenFn func(dadCounter uint8, tempIIDHistory []byte) tcpip.AddressWithPrefix
- }{
- {
- name: "Global address",
- ndpConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- },
- prepareFn: func(_ *testing.T, _ *faketime.ManualClock, _ *ndpDispatcher, e *channel.Endpoint, _ []byte) []tcpip.AddressWithPrefix {
- // Receive an RA with prefix1 in a PI.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, lifetimeSeconds))
- return nil
-
- },
- addrGenFn: func(dadCounter uint8, _ []byte) tcpip.AddressWithPrefix {
- return addrForSubnet(subnet, dadCounter)
- },
- },
- {
- name: "LinkLocal address",
- ndpConfigs: ipv6.NDPConfigurations{},
- autoGenLinkLocal: true,
- prepareFn: func(*testing.T, *faketime.ManualClock, *ndpDispatcher, *channel.Endpoint, []byte) []tcpip.AddressWithPrefix {
- return nil
- },
- addrGenFn: func(dadCounter uint8, _ []byte) tcpip.AddressWithPrefix {
- return addrForSubnet(header.IPv6LinkLocalPrefix.Subnet(), dadCounter)
- },
- },
- {
- name: "Temporary address",
- ndpConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- },
- prepareFn: func(t *testing.T, clock *faketime.ManualClock, ndpDisp *ndpDispatcher, e *channel.Endpoint, tempIIDHistory []byte) []tcpip.AddressWithPrefix {
- header.InitialTempIID(tempIIDHistory, nil, nicID)
-
- // Generate a stable SLAAC address so temporary addresses will be
- // generated.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, 100, 100))
- expectAutoGenAddrEvent(t, ndpDisp, stableAddrForTempAddrTest, newAddr)
- expectDADEventAsync(t, clock, ndpDisp, stableAddrForTempAddrTest.Address, &stack.DADSucceeded{})
-
- // The stable address will be assigned throughout the test.
- return []tcpip.AddressWithPrefix{stableAddrForTempAddrTest}
- },
- addrGenFn: func(_ uint8, tempIIDHistory []byte) tcpip.AddressWithPrefix {
- return header.GenerateTempIPv6SLAACAddr(tempIIDHistory, stableAddrForTempAddrTest.Address)
- },
- },
- }
-
- for _, addrType := range addrTypes {
- t.Run(addrType.name, func(t *testing.T) {
- for maxRetries := uint8(0); maxRetries <= maxMaxRetries; maxRetries++ {
- for numFailures := uint8(0); numFailures <= maxRetries+1; numFailures++ {
- maxRetries := maxRetries
- numFailures := numFailures
- addrType := addrType
-
- t.Run(fmt.Sprintf("%d max retries and %d failures", maxRetries, numFailures), func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- ndpConfigs := addrType.ndpConfigs
- ndpConfigs.AutoGenAddressConflictRetries = maxRetries
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: addrType.autoGenLinkLocal,
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: dadTransmits,
- RetransmitTimer: retransmitTimer,
- },
- NDPConfigs: ndpConfigs,
- NDPDisp: &ndpDisp,
- OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: func(_ tcpip.NICID, nicName string) string {
- return nicName
- },
- SecretKey: secretKey,
- },
- })},
- Clock: clock,
- })
- opts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, opts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, opts, err)
- }
-
- var tempIIDHistory [header.IIDSize]byte
- stableAddrs := addrType.prepareFn(t, clock, &ndpDisp, e, tempIIDHistory[:])
-
- // Simulate DAD conflicts so the address is regenerated.
- for i := uint8(0); i < numFailures; i++ {
- addr := addrType.addrGenFn(i, tempIIDHistory[:])
- expectAutoGenAddrEventAsync(t, clock, &ndpDisp, addr, newAddr)
-
- // Should not have any new addresses assigned to the NIC.
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, stableAddrs, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // Simulate a DAD conflict.
- rxNDPSolicit(e, addr.Address)
- expectAutoGenAddrEvent(t, &ndpDisp, addr, invalidatedAddr)
- expectDADEvent(t, clock, &ndpDisp, addr.Address, &stack.DADDupAddrDetected{})
-
- // Attempting to add the address manually should not fail if the
- // address's state was cleaned up when DAD failed.
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr.Address); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addr.Address, err)
- }
- if err := s.RemoveAddress(nicID, addr.Address); err != nil {
- t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, addr.Address, err)
- }
- expectDADEvent(t, clock, &ndpDisp, addr.Address, &stack.DADAborted{})
- }
-
- // Should not have any new addresses assigned to the NIC.
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, stableAddrs, nil); mismatch != "" {
- t.Fatal(mismatch)
- }
-
- // If we had less failures than generation attempts, we should have
- // an address after DAD resolves.
- if maxRetries+1 > numFailures {
- addr := addrType.addrGenFn(numFailures, tempIIDHistory[:])
- expectAutoGenAddrEventAsync(t, clock, &ndpDisp, addr, newAddr)
- expectDADEventAsync(t, clock, &ndpDisp, addr.Address, &stack.DADSucceeded{})
- if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, append(stableAddrs, addr), nil); mismatch != "" {
- t.Fatal(mismatch)
- }
- }
-
- // Should not attempt address generation again.
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpectedly got an auto-generated address event = %+v", e)
- default:
- }
- })
- }
- }
- })
- }
-}
-
-// TestAutoGenAddrWithEUI64IIDNoDADRetries tests that a regeneration attempt is
-// not made for SLAAC addresses generated with an IID based on the NIC's link
-// address.
-func TestAutoGenAddrWithEUI64IIDNoDADRetries(t *testing.T) {
- const nicID = 1
- const dadTransmits = 1
- const retransmitTimer = time.Second
- const maxRetries = 3
- const lifetimeSeconds = 10
-
- prefix, subnet, _ := prefixSubnetAddr(0, linkAddr1)
-
- addrTypes := []struct {
- name string
- ndpConfigs ipv6.NDPConfigurations
- autoGenLinkLocal bool
- subnet tcpip.Subnet
- triggerSLAACFn func(e *channel.Endpoint)
- }{
- {
- name: "Global address",
- ndpConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenAddressConflictRetries: maxRetries,
- },
- subnet: subnet,
- triggerSLAACFn: func(e *channel.Endpoint) {
- // Receive an RA with prefix1 in a PI.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, lifetimeSeconds))
-
- },
- },
- {
- name: "LinkLocal address",
- ndpConfigs: ipv6.NDPConfigurations{
- AutoGenAddressConflictRetries: maxRetries,
- },
- autoGenLinkLocal: true,
- subnet: header.IPv6LinkLocalPrefix.Subnet(),
- triggerSLAACFn: func(e *channel.Endpoint) {},
- },
- }
-
- for _, addrType := range addrTypes {
- addrType := addrType
-
- t.Run(addrType.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: addrType.autoGenLinkLocal,
- NDPConfigs: addrType.ndpConfigs,
- NDPDisp: &ndpDisp,
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: dadTransmits,
- RetransmitTimer: retransmitTimer,
- },
- })},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- addrType.triggerSLAACFn(e)
-
- addrBytes := []byte(addrType.subnet.ID())
- header.EthernetAdddressToModifiedEUI64IntoBuf(linkAddr1, addrBytes[header.IIDOffsetInIPv6Address:])
- addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(addrBytes),
- PrefixLen: 64,
- }
- expectAutoGenAddrEvent(addr, newAddr)
-
- // Simulate a DAD conflict.
- rxNDPSolicit(e, addr.Address)
- expectAutoGenAddrEvent(addr, invalidatedAddr)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DAD event")
- }
-
- // Should not attempt address regeneration.
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Fatalf("unexpectedly got an auto-generated address event = %+v", e)
- default:
- }
- })
- }
-}
-
-// TestAutoGenAddrContinuesLifetimesAfterRetry tests that retrying address
-// generation in response to DAD conflicts does not refresh the lifetimes.
-func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {
- const nicID = 1
- const nicName = "nic"
- const dadTransmits = 1
- const retransmitTimer = 2 * time.Second
- const failureTimer = time.Second
- const maxRetries = 1
- const lifetimeSeconds = 5
-
- secretKey := makeSecretKey(t)
-
- prefix, subnet, _ := prefixSubnetAddr(0, linkAddr1)
-
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 2),
- }
- e := channel.New(0, 1280, linkAddr1)
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: dadTransmits,
- RetransmitTimer: retransmitTimer,
- },
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenAddressConflictRetries: maxRetries,
- },
- NDPDisp: &ndpDisp,
- OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: func(_ tcpip.NICID, nicName string) string {
- return nicName
- },
- SecretKey: secretKey,
- },
- })},
- Clock: clock,
- })
- opts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, opts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, opts, err)
- }
-
- expectAutoGenAddrEvent := func(addr tcpip.AddressWithPrefix, eventType ndpAutoGenAddrEventType) {
- t.Helper()
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, eventType); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- }
-
- // Receive an RA with prefix in a PI.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, lifetimeSeconds, lifetimeSeconds))
-
- addrBytes := []byte(subnet.ID())
- addr := tcpip.AddressWithPrefix{
- Address: tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet, nicName, 0, secretKey)),
- PrefixLen: 64,
- }
- expectAutoGenAddrEvent(addr, newAddr)
-
- // Simulate a DAD conflict after some time has passed.
- clock.Advance(failureTimer)
- rxNDPSolicit(e, addr.Address)
- expectAutoGenAddrEvent(addr, invalidatedAddr)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DAD event")
- }
-
- // Let the next address resolve.
- addr.Address = tcpip.Address(header.AppendOpaqueInterfaceIdentifier(addrBytes[:header.IIDOffsetInIPv6Address], subnet, nicName, 1, secretKey))
- expectAutoGenAddrEvent(addr, newAddr)
- clock.Advance(dadTransmits * retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD event")
- }
-
- // Address should be deprecated/invalidated after the lifetime expires.
- //
- // Note, the remaining lifetime is calculated from when the PI was first
- // processed. Since we wait for some time before simulating a DAD conflict
- // and more time for the new address to resolve, the new address is only
- // expected to be valid for the remaining time. The DAD conflict should
- // not have reset the lifetimes.
- //
- // We expect either just the invalidation event or the deprecation event
- // followed by the invalidation event.
- clock.Advance(lifetimeSeconds*time.Second - failureTimer - dadTransmits*retransmitTimer)
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if e.eventType == deprecatedAddr {
- if diff := checkAutoGenAddrEvent(e, addr, deprecatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
-
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for invalidated auto gen addr event after deprecation")
- }
- } else {
- if diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- }
- default:
- t.Fatal("timed out waiting for auto gen addr event")
- }
-}
-
-// TestNDPRecursiveDNSServerDispatch tests that we properly dispatch an event
-// to the integrator when an RA is received with the NDP Recursive DNS Server
-// option with at least one valid address.
-func TestNDPRecursiveDNSServerDispatch(t *testing.T) {
- tests := []struct {
- name string
- opt header.NDPRecursiveDNSServer
- expected *ndpRDNSS
- }{
- {
- "Unspecified",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- }),
- nil,
- },
- {
- "Multicast",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 2,
- 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- }),
- nil,
- },
- {
- "OptionTooSmall",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 2,
- 1, 2, 3, 4, 5, 6, 7, 8,
- }),
- nil,
- },
- {
- "0Addresses",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 2,
- }),
- nil,
- },
- {
- "Valid1Address",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 2,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 1,
- }),
- &ndpRDNSS{
- []tcpip.Address{
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x01",
- },
- 2 * time.Second,
- },
- },
- {
- "Valid2Addresses",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 1,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 1,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 2,
- }),
- &ndpRDNSS{
- []tcpip.Address{
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x01",
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x02",
- },
- time.Second,
- },
- },
- {
- "Valid3Addresses",
- header.NDPRecursiveDNSServer([]byte{
- 0, 0,
- 0, 0, 0, 0,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 1,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 2,
- 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 3,
- }),
- &ndpRDNSS{
- []tcpip.Address{
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x01",
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x02",
- "\x01\x02\x03\x04\x05\x06\x07\x08\x00\x00\x00\x00\x00\x00\x00\x03",
- },
- 0,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- // We do not expect more than a single RDNSS
- // event at any time for this test.
- rdnssC: make(chan ndpRDNSSEvent, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- },
- NDPDisp: &ndpDisp,
- })},
- })
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(1) = %s", err)
- }
-
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithOpts(llAddr1, 0, header.NDPOptionsSerializer{test.opt}))
-
- if test.expected != nil {
- select {
- case e := <-ndpDisp.rdnssC:
- if e.nicID != 1 {
- t.Errorf("got rdnss nicID = %d, want = 1", e.nicID)
- }
- if diff := cmp.Diff(e.rdnss.addrs, test.expected.addrs); diff != "" {
- t.Errorf("rdnss addrs mismatch (-want +got):\n%s", diff)
- }
- if e.rdnss.lifetime != test.expected.lifetime {
- t.Errorf("got rdnss lifetime = %s, want = %s", e.rdnss.lifetime, test.expected.lifetime)
- }
- default:
- t.Fatal("expected an RDNSS option event")
- }
- }
-
- // Should have no more RDNSS options.
- select {
- case e := <-ndpDisp.rdnssC:
- t.Fatalf("unexpectedly got a new RDNSS option event: %+v", e)
- default:
- }
- })
- }
-}
-
-// TestNDPDNSSearchListDispatch tests that the integrator is informed when an
-// NDP DNS Search List option is received with at least one domain name in the
-// search list.
-func TestNDPDNSSearchListDispatch(t *testing.T) {
- const nicID = 1
-
- ndpDisp := ndpDispatcher{
- dnsslC: make(chan ndpDNSSLEvent, 3),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- },
- NDPDisp: &ndpDisp,
- })},
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- optSer := header.NDPOptionsSerializer{
- header.NDPDNSSearchList([]byte{
- 0, 0,
- 0, 0, 0, 0,
- 2, 'h', 'i',
- 0,
- }),
- header.NDPDNSSearchList([]byte{
- 0, 0,
- 0, 0, 0, 1,
- 1, 'i',
- 0,
- 2, 'a', 'm',
- 2, 'm', 'e',
- 0,
- }),
- header.NDPDNSSearchList([]byte{
- 0, 0,
- 0, 0, 1, 0,
- 3, 'x', 'y', 'z',
- 0,
- 5, 'h', 'e', 'l', 'l', 'o',
- 5, 'w', 'o', 'r', 'l', 'd',
- 0,
- 4, 't', 'h', 'i', 's',
- 2, 'i', 's',
- 1, 'a',
- 4, 't', 'e', 's', 't',
- 0,
- }),
- }
- expected := []struct {
- domainNames []string
- lifetime time.Duration
- }{
- {
- domainNames: []string{
- "hi",
- },
- lifetime: 0,
- },
- {
- domainNames: []string{
- "i",
- "am.me",
- },
- lifetime: time.Second,
- },
- {
- domainNames: []string{
- "xyz",
- "hello.world",
- "this.is.a.test",
- },
- lifetime: 256 * time.Second,
- },
- }
-
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithOpts(llAddr1, 0, optSer))
-
- for i, expected := range expected {
- select {
- case dnssl := <-ndpDisp.dnsslC:
- if dnssl.nicID != nicID {
- t.Errorf("got %d-th dnssl nicID = %d, want = %d", i, dnssl.nicID, nicID)
- }
- if diff := cmp.Diff(dnssl.domainNames, expected.domainNames); diff != "" {
- t.Errorf("%d-th dnssl domain names mismatch (-want +got):\n%s", i, diff)
- }
- if dnssl.lifetime != expected.lifetime {
- t.Errorf("got %d-th dnssl lifetime = %s, want = %s", i, dnssl.lifetime, expected.lifetime)
- }
- default:
- t.Fatal("expected a DNSSL event")
- }
- }
-
- // Should have no more DNSSL options.
- select {
- case <-ndpDisp.dnsslC:
- t.Fatal("unexpectedly got a DNSSL event")
- default:
- }
-}
-
-func TestNoCleanupNDPStateWhenForwardingEnabled(t *testing.T) {
- const (
- lifetimeSeconds = 999
- nicID = 1
- )
-
- ndpDisp := ndpDispatcher{
- offLinkRouteC: make(chan ndpOffLinkRouteEvent, 1),
- prefixC: make(chan ndpPrefixEvent, 1),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: true,
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- DiscoverDefaultRouters: true,
- DiscoverOnLinkPrefixes: true,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- })
-
- e1 := channel.New(0, header.IPv6MinimumMTU, linkAddr1)
- if err := s.CreateNIC(nicID, e1); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- llAddr := tcpip.AddressWithPrefix{Address: llAddr1, PrefixLen: header.IPv6LinkLocalPrefix.PrefixLen}
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, llAddr, newAddr); diff != "" {
- t.Errorf("auto-gen addr mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", llAddr, nicID)
- }
-
- prefix, subnet, addr := prefixSubnetAddr(0, linkAddr1)
- e1.InjectInbound(
- header.IPv6ProtocolNumber,
- raBufWithPI(
- llAddr3,
- lifetimeSeconds,
- prefix,
- true, /* onLink */
- true, /* auto */
- lifetimeSeconds,
- lifetimeSeconds,
- ),
- )
- select {
- case e := <-ndpDisp.offLinkRouteC:
- if diff := checkOffLinkRouteEvent(e, nicID, header.IPv6EmptySubnet, llAddr3, header.MediumRoutePreference, true /* discovered */); diff != "" {
- t.Errorf("off-link route event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Errorf("expected off-link route event for %s on NIC(%d)", llAddr3, nicID)
- }
- select {
- case e := <-ndpDisp.prefixC:
- if diff := checkPrefixEvent(e, subnet, true /* discovered */); diff != "" {
- t.Errorf("off-link route event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Errorf("expected prefix event for %s on NIC(%d)", prefix, nicID)
- }
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, addr, newAddr); diff != "" {
- t.Errorf("auto-gen addr mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", addr, nicID)
- }
-
- // Enabling or disabling forwarding should not invalidate discovered prefixes
- // or routers, or auto-generated address.
- for _, forwarding := range [...]bool{true, false} {
- t.Run(fmt.Sprintf("Transition forwarding to %t", forwarding), func(t *testing.T) {
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, forwarding); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", ipv6.ProtocolNumber, forwarding, err)
- }
- select {
- case e := <-ndpDisp.offLinkRouteC:
- t.Errorf("unexpected off-link route event = %#v", e)
- default:
- }
- select {
- case e := <-ndpDisp.prefixC:
- t.Errorf("unexpected prefix event = %#v", e)
- default:
- }
- select {
- case e := <-ndpDisp.autoGenAddrC:
- t.Errorf("unexpected auto-gen addr event = %#v", e)
- default:
- }
- })
- }
-}
-
-func TestCleanupNDPState(t *testing.T) {
- const (
- lifetimeSeconds = 5
- maxRouterAndPrefixEvents = 4
- nicID1 = 1
- nicID2 = 2
- )
-
- prefix1, subnet1, e1Addr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, subnet2, e1Addr2 := prefixSubnetAddr(1, linkAddr1)
- e2Addr1 := addrForSubnet(subnet1, linkAddr2)
- e2Addr2 := addrForSubnet(subnet2, linkAddr2)
- llAddrWithPrefix1 := tcpip.AddressWithPrefix{
- Address: llAddr1,
- PrefixLen: 64,
- }
- llAddrWithPrefix2 := tcpip.AddressWithPrefix{
- Address: llAddr2,
- PrefixLen: 64,
- }
-
- tests := []struct {
- name string
- cleanupFn func(t *testing.T, s *stack.Stack)
- keepAutoGenLinkLocal bool
- maxAutoGenAddrEvents int
- skipFinalAddrCheck bool
- }{
- // A NIC should cleanup all NDP state when it is disabled.
- {
- name: "Disable NIC",
- cleanupFn: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- if err := s.DisableNIC(nicID1); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID1, err)
- }
- if err := s.DisableNIC(nicID2); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID2, err)
- }
- },
- keepAutoGenLinkLocal: false,
- maxAutoGenAddrEvents: 6,
- },
-
- // A NIC should cleanup all NDP state when it is removed.
- {
- name: "Remove NIC",
- cleanupFn: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- if err := s.RemoveNIC(nicID1); err != nil {
- t.Fatalf("s.RemoveNIC(%d): %s", nicID1, err)
- }
- if err := s.RemoveNIC(nicID2); err != nil {
- t.Fatalf("s.RemoveNIC(%d): %s", nicID2, err)
- }
- },
- keepAutoGenLinkLocal: false,
- maxAutoGenAddrEvents: 6,
- // The NICs are removed so we can't check their addresses after calling
- // stopFn.
- skipFinalAddrCheck: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- offLinkRouteC: make(chan ndpOffLinkRouteEvent, maxRouterAndPrefixEvents),
- prefixC: make(chan ndpPrefixEvent, maxRouterAndPrefixEvents),
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, test.maxAutoGenAddrEvents),
- }
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: true,
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- DiscoverDefaultRouters: true,
- DiscoverOnLinkPrefixes: true,
- AutoGenGlobalAddresses: true,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- })
-
- expectOffLinkRouteEvent := func() (bool, ndpOffLinkRouteEvent) {
- select {
- case e := <-ndpDisp.offLinkRouteC:
- return true, e
- default:
- }
-
- return false, ndpOffLinkRouteEvent{}
- }
-
- expectPrefixEvent := func() (bool, ndpPrefixEvent) {
- select {
- case e := <-ndpDisp.prefixC:
- return true, e
- default:
- }
-
- return false, ndpPrefixEvent{}
- }
-
- expectAutoGenAddrEvent := func() (bool, ndpAutoGenAddrEvent) {
- select {
- case e := <-ndpDisp.autoGenAddrC:
- return true, e
- default:
- }
-
- return false, ndpAutoGenAddrEvent{}
- }
-
- e1 := channel.New(0, 1280, linkAddr1)
- if err := s.CreateNIC(nicID1, e1); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID1, err)
- }
- // We have other tests that make sure we receive the *correct* events
- // on normal discovery of routers/prefixes, and auto-generated
- // addresses. Here we just make sure we get an event and let other tests
- // handle the correctness check.
- expectAutoGenAddrEvent()
-
- e2 := channel.New(0, 1280, linkAddr2)
- if err := s.CreateNIC(nicID2, e2); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID2, err)
- }
- expectAutoGenAddrEvent()
-
- // Receive RAs on NIC(1) and NIC(2) from default routers (llAddr3 and
- // llAddr4) w/ PI (for prefix1 in RA from llAddr3 and prefix2 in RA from
- // llAddr4) to discover multiple routers and prefixes, and auto-gen
- // multiple addresses.
-
- e1.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, lifetimeSeconds, prefix1, true, true, lifetimeSeconds, lifetimeSeconds))
- if ok, _ := expectOffLinkRouteEvent(); !ok {
- t.Errorf("expected off-link route event for %s on NIC(%d)", llAddr3, nicID1)
- }
- if ok, _ := expectPrefixEvent(); !ok {
- t.Errorf("expected prefix event for %s on NIC(%d)", prefix1, nicID1)
- }
- if ok, _ := expectAutoGenAddrEvent(); !ok {
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", e1Addr1, nicID1)
- }
-
- e1.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr4, lifetimeSeconds, prefix2, true, true, lifetimeSeconds, lifetimeSeconds))
- if ok, _ := expectOffLinkRouteEvent(); !ok {
- t.Errorf("expected off-link route event for %s on NIC(%d)", llAddr4, nicID1)
- }
- if ok, _ := expectPrefixEvent(); !ok {
- t.Errorf("expected prefix event for %s on NIC(%d)", prefix2, nicID1)
- }
- if ok, _ := expectAutoGenAddrEvent(); !ok {
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", e1Addr2, nicID1)
- }
-
- e2.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, lifetimeSeconds, prefix1, true, true, lifetimeSeconds, lifetimeSeconds))
- if ok, _ := expectOffLinkRouteEvent(); !ok {
- t.Errorf("expected off-link route event for %s on NIC(%d)", llAddr3, nicID2)
- }
- if ok, _ := expectPrefixEvent(); !ok {
- t.Errorf("expected prefix event for %s on NIC(%d)", prefix1, nicID2)
- }
- if ok, _ := expectAutoGenAddrEvent(); !ok {
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", e1Addr2, nicID2)
- }
-
- e2.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr4, lifetimeSeconds, prefix2, true, true, lifetimeSeconds, lifetimeSeconds))
- if ok, _ := expectOffLinkRouteEvent(); !ok {
- t.Errorf("expected off-link route event for %s on NIC(%d)", llAddr4, nicID2)
- }
- if ok, _ := expectPrefixEvent(); !ok {
- t.Errorf("expected prefix event for %s on NIC(%d)", prefix2, nicID2)
- }
- if ok, _ := expectAutoGenAddrEvent(); !ok {
- t.Errorf("expected auto-gen addr event for %s on NIC(%d)", e2Addr2, nicID2)
- }
-
- // We should have the auto-generated addresses added.
- nicinfo := s.NICInfo()
- nic1Addrs := nicinfo[nicID1].ProtocolAddresses
- nic2Addrs := nicinfo[nicID2].ProtocolAddresses
- if !containsV6Addr(nic1Addrs, llAddrWithPrefix1) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", llAddrWithPrefix1, nicID1, nic1Addrs)
- }
- if !containsV6Addr(nic1Addrs, e1Addr1) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", e1Addr1, nicID1, nic1Addrs)
- }
- if !containsV6Addr(nic1Addrs, e1Addr2) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", e1Addr2, nicID1, nic1Addrs)
- }
- if !containsV6Addr(nic2Addrs, llAddrWithPrefix2) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", llAddrWithPrefix2, nicID2, nic2Addrs)
- }
- if !containsV6Addr(nic2Addrs, e2Addr1) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", e2Addr1, nicID2, nic2Addrs)
- }
- if !containsV6Addr(nic2Addrs, e2Addr2) {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", e2Addr2, nicID2, nic2Addrs)
- }
-
- // We can't proceed any further if we already failed the test (missing
- // some discovery/auto-generated address events or addresses).
- if t.Failed() {
- t.FailNow()
- }
-
- test.cleanupFn(t, s)
-
- // Collect invalidation events after having NDP state cleaned up.
- gotOffLinkRouteEvents := make(map[ndpOffLinkRouteEvent]int)
- for i := 0; i < maxRouterAndPrefixEvents; i++ {
- ok, e := expectOffLinkRouteEvent()
- if !ok {
- t.Errorf("expected %d off-link route events after becoming a router; got = %d", maxRouterAndPrefixEvents, i)
- break
- }
- gotOffLinkRouteEvents[e]++
- }
- gotPrefixEvents := make(map[ndpPrefixEvent]int)
- for i := 0; i < maxRouterAndPrefixEvents; i++ {
- ok, e := expectPrefixEvent()
- if !ok {
- t.Errorf("expected %d prefix events after becoming a router; got = %d", maxRouterAndPrefixEvents, i)
- break
- }
- gotPrefixEvents[e]++
- }
- gotAutoGenAddrEvents := make(map[ndpAutoGenAddrEvent]int)
- for i := 0; i < test.maxAutoGenAddrEvents; i++ {
- ok, e := expectAutoGenAddrEvent()
- if !ok {
- t.Errorf("expected %d auto-generated address events after becoming a router; got = %d", test.maxAutoGenAddrEvents, i)
- break
- }
- gotAutoGenAddrEvents[e]++
- }
-
- // No need to proceed any further if we already failed the test (missing
- // some invalidation events).
- if t.Failed() {
- t.FailNow()
- }
-
- expectedOffLinkRouteEvents := map[ndpOffLinkRouteEvent]int{
- {nicID: nicID1, subnet: header.IPv6EmptySubnet, router: llAddr3, updated: false}: 1,
- {nicID: nicID1, subnet: header.IPv6EmptySubnet, router: llAddr4, updated: false}: 1,
- {nicID: nicID2, subnet: header.IPv6EmptySubnet, router: llAddr3, updated: false}: 1,
- {nicID: nicID2, subnet: header.IPv6EmptySubnet, router: llAddr4, updated: false}: 1,
- }
- if diff := cmp.Diff(expectedOffLinkRouteEvents, gotOffLinkRouteEvents); diff != "" {
- t.Errorf("off-link route events mismatch (-want +got):\n%s", diff)
- }
- expectedPrefixEvents := map[ndpPrefixEvent]int{
- {nicID: nicID1, prefix: subnet1, discovered: false}: 1,
- {nicID: nicID1, prefix: subnet2, discovered: false}: 1,
- {nicID: nicID2, prefix: subnet1, discovered: false}: 1,
- {nicID: nicID2, prefix: subnet2, discovered: false}: 1,
- }
- if diff := cmp.Diff(expectedPrefixEvents, gotPrefixEvents); diff != "" {
- t.Errorf("prefix events mismatch (-want +got):\n%s", diff)
- }
- expectedAutoGenAddrEvents := map[ndpAutoGenAddrEvent]int{
- {nicID: nicID1, addr: e1Addr1, eventType: invalidatedAddr}: 1,
- {nicID: nicID1, addr: e1Addr2, eventType: invalidatedAddr}: 1,
- {nicID: nicID2, addr: e2Addr1, eventType: invalidatedAddr}: 1,
- {nicID: nicID2, addr: e2Addr2, eventType: invalidatedAddr}: 1,
- }
-
- if !test.keepAutoGenLinkLocal {
- expectedAutoGenAddrEvents[ndpAutoGenAddrEvent{nicID: nicID1, addr: llAddrWithPrefix1, eventType: invalidatedAddr}] = 1
- expectedAutoGenAddrEvents[ndpAutoGenAddrEvent{nicID: nicID2, addr: llAddrWithPrefix2, eventType: invalidatedAddr}] = 1
- }
-
- if diff := cmp.Diff(expectedAutoGenAddrEvents, gotAutoGenAddrEvents); diff != "" {
- t.Errorf("auto-generated address events mismatch (-want +got):\n%s", diff)
- }
-
- if !test.skipFinalAddrCheck {
- // Make sure the auto-generated addresses got removed.
- nicinfo = s.NICInfo()
- nic1Addrs = nicinfo[nicID1].ProtocolAddresses
- nic2Addrs = nicinfo[nicID2].ProtocolAddresses
- if containsV6Addr(nic1Addrs, llAddrWithPrefix1) != test.keepAutoGenLinkLocal {
- if test.keepAutoGenLinkLocal {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", llAddrWithPrefix1, nicID1, nic1Addrs)
- } else {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", llAddrWithPrefix1, nicID1, nic1Addrs)
- }
- }
- if containsV6Addr(nic1Addrs, e1Addr1) {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", e1Addr1, nicID1, nic1Addrs)
- }
- if containsV6Addr(nic1Addrs, e1Addr2) {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", e1Addr2, nicID1, nic1Addrs)
- }
- if containsV6Addr(nic2Addrs, llAddrWithPrefix2) != test.keepAutoGenLinkLocal {
- if test.keepAutoGenLinkLocal {
- t.Errorf("missing %s from the list of addresses for NIC(%d): %+v", llAddrWithPrefix2, nicID2, nic2Addrs)
- } else {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", llAddrWithPrefix2, nicID2, nic2Addrs)
- }
- }
- if containsV6Addr(nic2Addrs, e2Addr1) {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", e2Addr1, nicID2, nic2Addrs)
- }
- if containsV6Addr(nic2Addrs, e2Addr2) {
- t.Errorf("still have %s in the list of addresses for NIC(%d): %+v", e2Addr2, nicID2, nic2Addrs)
- }
- }
-
- // Should not get any more events (invalidation timers should have been
- // cancelled when the NDP state was cleaned up).
- clock.Advance(lifetimeSeconds * time.Second)
- select {
- case <-ndpDisp.offLinkRouteC:
- t.Error("unexpected off-link route event")
- default:
- }
- select {
- case <-ndpDisp.prefixC:
- t.Error("unexpected prefix event")
- default:
- }
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Error("unexpected auto-generated address event")
- default:
- }
- })
- }
-}
-
-// TestDHCPv6ConfigurationFromNDPDA tests that the NDPDispatcher is properly
-// informed when new information about what configurations are available via
-// DHCPv6 is learned.
-func TestDHCPv6ConfigurationFromNDPDA(t *testing.T) {
- const nicID = 1
-
- ndpDisp := ndpDispatcher{
- dhcpv6ConfigurationC: make(chan ndpDHCPv6Event, 1),
- }
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- },
- NDPDisp: &ndpDisp,
- })},
- })
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- expectDHCPv6Event := func(configuration ipv6.DHCPv6ConfigurationFromNDPRA) {
- t.Helper()
- select {
- case e := <-ndpDisp.dhcpv6ConfigurationC:
- if diff := cmp.Diff(ndpDHCPv6Event{nicID: nicID, configuration: configuration}, e, cmp.AllowUnexported(e)); diff != "" {
- t.Errorf("dhcpv6 event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected DHCPv6 configuration event")
- }
- }
-
- expectNoDHCPv6Event := func() {
- t.Helper()
- select {
- case <-ndpDisp.dhcpv6ConfigurationC:
- t.Fatal("unexpected DHCPv6 configuration event")
- default:
- }
- }
-
- // Even if the first RA reports no DHCPv6 configurations are available, the
- // dispatcher should get an event.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, false))
- expectDHCPv6Event(ipv6.DHCPv6NoConfiguration)
- // Receiving the same update again should not result in an event to the
- // dispatcher.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, false))
- expectNoDHCPv6Event()
-
- // Receive an RA that updates the DHCPv6 configuration to Other
- // Configurations.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectDHCPv6Event(ipv6.DHCPv6OtherConfigurations)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectNoDHCPv6Event()
-
- // Receive an RA that updates the DHCPv6 configuration to Managed Address.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, false))
- expectDHCPv6Event(ipv6.DHCPv6ManagedAddress)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, false))
- expectNoDHCPv6Event()
-
- // Receive an RA that updates the DHCPv6 configuration to none.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, false))
- expectDHCPv6Event(ipv6.DHCPv6NoConfiguration)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, false))
- expectNoDHCPv6Event()
-
- // Receive an RA that updates the DHCPv6 configuration to Managed Address.
- //
- // Note, when the M flag is set, the O flag is redundant.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, true))
- expectDHCPv6Event(ipv6.DHCPv6ManagedAddress)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, true))
- expectNoDHCPv6Event()
- // Even though the DHCPv6 flags are different, the effective configuration is
- // the same so we should not receive a new event.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, false))
- expectNoDHCPv6Event()
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, true, true))
- expectNoDHCPv6Event()
-
- // Receive an RA that updates the DHCPv6 configuration to Other
- // Configurations.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectDHCPv6Event(ipv6.DHCPv6OtherConfigurations)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectNoDHCPv6Event()
-
- // Cycling the NIC should cause the last DHCPv6 configuration to be cleared.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
-
- // Receive an RA that updates the DHCPv6 configuration to Other
- // Configurations.
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectDHCPv6Event(ipv6.DHCPv6OtherConfigurations)
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithDHCPv6(llAddr2, false, true))
- expectNoDHCPv6Event()
-}
-
-var _ rand.Source = (*savingRandSource)(nil)
-
-type savingRandSource struct {
- s rand.Source
-
- lastInt63 int64
-}
-
-func (d *savingRandSource) Int63() int64 {
- i := d.s.Int63()
- d.lastInt63 = i
- return i
-}
-func (d *savingRandSource) Seed(seed int64) {
- d.s.Seed(seed)
-}
-
-// TestRouterSolicitation tests the initial Router Solicitations that are sent
-// when a NIC newly becomes enabled.
-func TestRouterSolicitation(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- linkHeaderLen uint16
- linkAddr tcpip.LinkAddress
- nicAddr tcpip.Address
- expectedSrcAddr tcpip.Address
- expectedNDPOpts []header.NDPOption
- maxRtrSolicit uint8
- rtrSolicitInt time.Duration
- effectiveRtrSolicitInt time.Duration
- maxRtrSolicitDelay time.Duration
- effectiveMaxRtrSolicitDelay time.Duration
- }{
- {
- name: "Single RS with 2s delay and interval",
- expectedSrcAddr: header.IPv6Any,
- maxRtrSolicit: 1,
- rtrSolicitInt: 2 * time.Second,
- effectiveRtrSolicitInt: 2 * time.Second,
- maxRtrSolicitDelay: 2 * time.Second,
- effectiveMaxRtrSolicitDelay: 2 * time.Second,
- },
- {
- name: "Single RS with 4s delay and interval",
- expectedSrcAddr: header.IPv6Any,
- maxRtrSolicit: 1,
- rtrSolicitInt: 4 * time.Second,
- effectiveRtrSolicitInt: 4 * time.Second,
- maxRtrSolicitDelay: 4 * time.Second,
- effectiveMaxRtrSolicitDelay: 4 * time.Second,
- },
- {
- name: "Two RS with delay",
- linkHeaderLen: 1,
- nicAddr: llAddr1,
- expectedSrcAddr: llAddr1,
- maxRtrSolicit: 2,
- rtrSolicitInt: 2 * time.Second,
- effectiveRtrSolicitInt: 2 * time.Second,
- maxRtrSolicitDelay: 500 * time.Millisecond,
- effectiveMaxRtrSolicitDelay: 500 * time.Millisecond,
- },
- {
- name: "Single RS without delay",
- linkHeaderLen: 2,
- linkAddr: linkAddr1,
- nicAddr: llAddr1,
- expectedSrcAddr: llAddr1,
- expectedNDPOpts: []header.NDPOption{
- header.NDPSourceLinkLayerAddressOption(linkAddr1),
- },
- maxRtrSolicit: 1,
- rtrSolicitInt: 2 * time.Second,
- effectiveRtrSolicitInt: 2 * time.Second,
- maxRtrSolicitDelay: 0,
- effectiveMaxRtrSolicitDelay: 0,
- },
- {
- name: "Two RS without delay and invalid zero interval",
- linkHeaderLen: 3,
- linkAddr: linkAddr1,
- expectedSrcAddr: header.IPv6Any,
- maxRtrSolicit: 2,
- rtrSolicitInt: 0,
- effectiveRtrSolicitInt: 4 * time.Second,
- maxRtrSolicitDelay: 0,
- effectiveMaxRtrSolicitDelay: 0,
- },
- {
- name: "Three RS without delay",
- linkAddr: linkAddr1,
- expectedSrcAddr: header.IPv6Any,
- maxRtrSolicit: 3,
- rtrSolicitInt: 500 * time.Millisecond,
- effectiveRtrSolicitInt: 500 * time.Millisecond,
- maxRtrSolicitDelay: 0,
- effectiveMaxRtrSolicitDelay: 0,
- },
- {
- name: "Two RS with invalid negative delay",
- linkAddr: linkAddr1,
- expectedSrcAddr: header.IPv6Any,
- maxRtrSolicit: 2,
- rtrSolicitInt: time.Second,
- effectiveRtrSolicitInt: time.Second,
- maxRtrSolicitDelay: -3 * time.Second,
- effectiveMaxRtrSolicitDelay: time.Second,
- },
- }
-
- subTests := []struct {
- name string
- handleRAs ipv6.HandleRAsConfiguration
- afterFirstRS func(*testing.T, *stack.Stack)
- }{
- {
- name: "Handle RAs when forwarding disabled",
- handleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- afterFirstRS: func(*testing.T, *stack.Stack) {},
- },
-
- // Enabling forwarding when RAs are always configured to be handled
- // should not stop router solicitations.
- {
- name: "Handle RAs always",
- handleRAs: ipv6.HandlingRAsAlwaysEnabled,
- afterFirstRS: func(t *testing.T, s *stack.Stack) {
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- e := channelLinkWithHeaderLength{
- Endpoint: channel.New(int(test.maxRtrSolicit), 1280, test.linkAddr),
- headerLength: test.linkHeaderLen,
- }
- e.Endpoint.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- waitForPkt := func(timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- p, ok := e.Read()
- if !ok {
- t.Fatal("expected router solicitation packet")
- }
-
- if p.Proto != header.IPv6ProtocolNumber {
- t.Fatalf("got Proto = %d, want = %d", p.Proto, header.IPv6ProtocolNumber)
- }
-
- // Make sure the right remote link address is used.
- if want := header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersLinkLocalMulticastAddress); p.Route.RemoteLinkAddress != want {
- t.Errorf("got remote link address = %s, want = %s", p.Route.RemoteLinkAddress, want)
- }
-
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(test.expectedSrcAddr),
- checker.DstAddr(header.IPv6AllRoutersLinkLocalMulticastAddress),
- checker.TTL(header.NDPHopLimit),
- checker.NDPRS(checker.NDPRSOptions(test.expectedNDPOpts)),
- )
-
- if l, want := p.Pkt.AvailableHeaderBytes(), int(test.linkHeaderLen); l != want {
- t.Errorf("got p.Pkt.AvailableHeaderBytes() = %d; want = %d", l, want)
- }
- }
- waitForNothing := func(timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- if p, ok := e.Read(); ok {
- t.Fatalf("unexpectedly got a packet = %#v", p)
- }
- }
- randSource := savingRandSource{
- s: rand.NewSource(time.Now().UnixNano()),
- }
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: subTest.handleRAs,
- MaxRtrSolicitations: test.maxRtrSolicit,
- RtrSolicitationInterval: test.rtrSolicitInt,
- MaxRtrSolicitationDelay: test.maxRtrSolicitDelay,
- },
- })},
- Clock: clock,
- RandSource: &randSource,
- })
-
- opts := stack.NICOptions{Disabled: true}
- if err := s.CreateNICWithOptions(nicID, &e, opts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, opts, err)
- }
-
- if addr := test.nicAddr; addr != "" {
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, addr, err)
- }
- }
-
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("EnableNIC(%d): %s", nicID, err)
- }
-
- // Make sure each RS is sent at the right time.
- remaining := test.maxRtrSolicit
- if remaining != 0 {
- maxRtrSolicitDelay := test.maxRtrSolicitDelay
- if maxRtrSolicitDelay < 0 {
- maxRtrSolicitDelay = ipv6.DefaultNDPConfigurations().MaxRtrSolicitationDelay
- }
- var actualRtrSolicitDelay time.Duration
- if maxRtrSolicitDelay != 0 {
- actualRtrSolicitDelay = time.Duration(randSource.lastInt63) % maxRtrSolicitDelay
- }
- waitForPkt(actualRtrSolicitDelay)
- remaining--
- }
-
- subTest.afterFirstRS(t, s)
-
- for ; remaining != 0; remaining-- {
- if test.effectiveRtrSolicitInt != 0 {
- waitForNothing(test.effectiveRtrSolicitInt - time.Nanosecond)
- waitForPkt(time.Nanosecond)
- } else {
- waitForPkt(0)
- }
- }
-
- // Make sure no more RS.
- if test.effectiveRtrSolicitInt > test.effectiveMaxRtrSolicitDelay {
- waitForNothing(test.effectiveRtrSolicitInt)
- } else {
- waitForNothing(test.effectiveMaxRtrSolicitDelay)
- }
-
- if got, want := s.Stats().ICMP.V6.PacketsSent.RouterSolicit.Value(), uint64(test.maxRtrSolicit); got != want {
- t.Fatalf("got sent RouterSolicit = %d, want = %d", got, want)
- }
- })
- }
- })
- }
-}
-
-func TestStopStartSolicitingRouters(t *testing.T) {
- const nicID = 1
- const delay = 0
- const interval = 500 * time.Millisecond
- const maxRtrSolicitations = 3
-
- tests := []struct {
- name string
- startFn func(t *testing.T, s *stack.Stack)
- // first is used to tell stopFn that it is being called for the first time
- // after router solicitations were last enabled.
- stopFn func(t *testing.T, s *stack.Stack, first bool)
- }{
- // Tests that when forwarding is enabled or disabled, router solicitations
- // are stopped or started, respectively.
- {
- name: "Enable and disable forwarding",
- startFn: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, false); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, false): %s", ipv6.ProtocolNumber, err)
- }
- },
- stopFn: func(t *testing.T, s *stack.Stack, _ bool) {
- t.Helper()
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
- },
- },
-
- // Tests that when a NIC is enabled or disabled, router solicitations
- // are started or stopped, respectively.
- {
- name: "Enable and disable NIC",
- startFn: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- },
- stopFn: func(t *testing.T, s *stack.Stack, _ bool) {
- t.Helper()
-
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- },
- },
-
- // Tests that when a NIC is removed, router solicitations are stopped. We
- // cannot start router solications on a removed NIC.
- {
- name: "Remove NIC",
- stopFn: func(t *testing.T, s *stack.Stack, first bool) {
- t.Helper()
-
- // Only try to remove the NIC the first time stopFn is called since it's
- // impossible to remove an already removed NIC.
- if !first {
- return
- }
-
- if err := s.RemoveNIC(nicID); err != nil {
- t.Fatalf("s.RemoveNIC(%d): %s", nicID, err)
- }
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e := channel.New(maxRtrSolicitations, 1280, linkAddr1)
- waitForPkt := func(clock *faketime.ManualClock, timeout time.Duration) {
- t.Helper()
-
- clock.Advance(timeout)
- p, ok := e.Read()
- if !ok {
- t.Fatal("timed out waiting for packet")
- }
-
- if p.Proto != header.IPv6ProtocolNumber {
- t.Fatalf("got Proto = %d, want = %d", p.Proto, header.IPv6ProtocolNumber)
- }
- checker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),
- checker.SrcAddr(header.IPv6Any),
- checker.DstAddr(header.IPv6AllRoutersLinkLocalMulticastAddress),
- checker.TTL(header.NDPHopLimit),
- checker.NDPRS())
- }
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- MaxRtrSolicitations: maxRtrSolicitations,
- RtrSolicitationInterval: interval,
- MaxRtrSolicitationDelay: delay,
- },
- })},
- Clock: clock,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // Stop soliciting routers.
- test.stopFn(t, s, true /* first */)
- clock.Advance(delay)
- if _, ok := e.Read(); ok {
- // A single RS may have been sent before solicitations were stopped.
- clock.Advance(interval)
- if _, ok = e.Read(); ok {
- t.Fatal("should not have sent more than one RS message")
- }
- }
-
- // Stopping router solicitations after it has already been stopped should
- // do nothing.
- test.stopFn(t, s, false /* first */)
- clock.Advance(delay)
- if _, ok := e.Read(); ok {
- t.Fatal("unexpectedly got a packet after router solicitation has been stopepd")
- }
-
- // If test.startFn is nil, there is no way to restart router solications.
- if test.startFn == nil {
- return
- }
-
- // Start soliciting routers.
- test.startFn(t, s)
- waitForPkt(clock, delay)
- waitForPkt(clock, interval)
- waitForPkt(clock, interval)
- clock.Advance(interval)
- if _, ok := e.Read(); ok {
- t.Fatal("unexpectedly got an extra packet after sending out the expected RSs")
- }
-
- // Starting router solicitations after it has already completed should do
- // nothing.
- test.startFn(t, s)
- clock.Advance(interval)
- if _, ok := e.Read(); ok {
- t.Fatal("unexpectedly got a packet after finishing router solicitations")
- }
- })
- }
-}
diff --git a/pkg/tcpip/stack/neighbor_cache_test.go b/pkg/tcpip/stack/neighbor_cache_test.go
deleted file mode 100644
index 7de25fe37..000000000
--- a/pkg/tcpip/stack/neighbor_cache_test.go
+++ /dev/null
@@ -1,1584 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
- "fmt"
- "math"
- "math/rand"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
-)
-
-const (
- // entryStoreSize is the default number of entries that will be generated and
- // added to the entry store. This number needs to be larger than the size of
- // the neighbor cache to give ample opportunity for verifying behavior during
- // cache overflows. Four times the size of the neighbor cache allows for
- // three complete cache overflows.
- entryStoreSize = 4 * neighborCacheSize
-
- // typicalLatency is the typical latency for an ARP or NDP packet to travel
- // to a router and back.
- typicalLatency = time.Millisecond
-
- // testEntryBroadcastAddr is a special address that indicates a packet should
- // be sent to all nodes.
- testEntryBroadcastAddr = tcpip.Address("broadcast")
-
- // testEntryBroadcastLinkAddr is a special link address sent back to
- // multicast neighbor probes.
- testEntryBroadcastLinkAddr = tcpip.LinkAddress("mac_broadcast")
-
- // infiniteDuration indicates that a task will not occur in our lifetime.
- infiniteDuration = time.Duration(math.MaxInt64)
-)
-
-// unorderedEventsDiffOpts returns options passed to cmp.Diff to sort slices of
-// events for cases where ordering must be ignored.
-func unorderedEventsDiffOpts() []cmp.Option {
- return []cmp.Option{
- cmpopts.SortSlices(func(a, b testEntryEventInfo) bool {
- return strings.Compare(string(a.Entry.Addr), string(b.Entry.Addr)) < 0
- }),
- }
-}
-
-// unorderedEntriesDiffOpts returns options passed to cmp.Diff to sort slices of
-// entries for cases where ordering must be ignored.
-func unorderedEntriesDiffOpts() []cmp.Option {
- return []cmp.Option{
- cmpopts.SortSlices(func(a, b NeighborEntry) bool {
- return strings.Compare(string(a.Addr), string(b.Addr)) < 0
- }),
- }
-}
-
-func newTestNeighborResolver(nudDisp NUDDispatcher, config NUDConfigurations, clock tcpip.Clock) *testNeighborResolver {
- config.resetInvalidFields()
- rng := rand.New(rand.NewSource(time.Now().UnixNano()))
- linkRes := &testNeighborResolver{
- clock: clock,
- entries: newTestEntryStore(),
- delay: typicalLatency,
- }
- linkRes.neigh.init(&nic{
- stack: &Stack{
- clock: clock,
- nudDisp: nudDisp,
- nudConfigs: config,
- randomGenerator: rng,
- },
- id: 1,
- stats: makeNICStats(tcpip.NICStats{}.FillIn()),
- }, linkRes)
- return linkRes
-}
-
-// testEntryStore contains a set of IP to NeighborEntry mappings.
-type testEntryStore struct {
- mu sync.RWMutex
- entriesMap map[tcpip.Address]NeighborEntry
-}
-
-func toAddress(i uint16) tcpip.Address {
- return tcpip.Address([]byte{
- 1,
- 0,
- byte(i >> 8),
- byte(i),
- })
-}
-
-func toLinkAddress(i uint16) tcpip.LinkAddress {
- return tcpip.LinkAddress([]byte{
- 1,
- 0,
- 0,
- 0,
- byte(i >> 8),
- byte(i),
- })
-}
-
-// newTestEntryStore returns a testEntryStore pre-populated with entries.
-func newTestEntryStore() *testEntryStore {
- store := &testEntryStore{
- entriesMap: make(map[tcpip.Address]NeighborEntry),
- }
- for i := uint16(0); i < entryStoreSize; i++ {
- addr := toAddress(i)
- linkAddr := toLinkAddress(i)
-
- store.entriesMap[addr] = NeighborEntry{
- Addr: addr,
- LinkAddr: linkAddr,
- }
- }
- return store
-}
-
-// size returns the number of entries in the store.
-func (s *testEntryStore) size() uint16 {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return uint16(len(s.entriesMap))
-}
-
-// entry returns the entry at index i. Returns an empty entry and false if i is
-// out of bounds.
-func (s *testEntryStore) entry(i uint16) (NeighborEntry, bool) {
- return s.entryByAddr(toAddress(i))
-}
-
-// entryByAddr returns the entry matching addr for situations when the index is
-// not available. Returns an empty entry and false if no entries match addr.
-func (s *testEntryStore) entryByAddr(addr tcpip.Address) (NeighborEntry, bool) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- entry, ok := s.entriesMap[addr]
- return entry, ok
-}
-
-// entries returns all entries in the store.
-func (s *testEntryStore) entries() []NeighborEntry {
- entries := make([]NeighborEntry, 0, len(s.entriesMap))
- s.mu.RLock()
- defer s.mu.RUnlock()
- for i := uint16(0); i < entryStoreSize; i++ {
- addr := toAddress(i)
- if entry, ok := s.entriesMap[addr]; ok {
- entries = append(entries, entry)
- }
- }
- return entries
-}
-
-// set modifies the link addresses of an entry.
-func (s *testEntryStore) set(i uint16, linkAddr tcpip.LinkAddress) {
- addr := toAddress(i)
- s.mu.Lock()
- defer s.mu.Unlock()
- if entry, ok := s.entriesMap[addr]; ok {
- entry.LinkAddr = linkAddr
- s.entriesMap[addr] = entry
- }
-}
-
-// testNeighborResolver implements LinkAddressResolver to emulate sending a
-// neighbor probe.
-type testNeighborResolver struct {
- clock tcpip.Clock
- neigh neighborCache
- entries *testEntryStore
- delay time.Duration
- onLinkAddressRequest func()
- dropReplies bool
-}
-
-var _ LinkAddressResolver = (*testNeighborResolver)(nil)
-
-func (r *testNeighborResolver) LinkAddressRequest(targetAddr, _ tcpip.Address, _ tcpip.LinkAddress) tcpip.Error {
- if !r.dropReplies {
- // Delay handling the request to emulate network latency.
- r.clock.AfterFunc(r.delay, func() {
- r.fakeRequest(targetAddr)
- })
- }
-
- // Execute post address resolution action, if available.
- if f := r.onLinkAddressRequest; f != nil {
- f()
- }
- return nil
-}
-
-// fakeRequest emulates handling a response for a link address request.
-func (r *testNeighborResolver) fakeRequest(addr tcpip.Address) {
- if entry, ok := r.entries.entryByAddr(addr); ok {
- r.neigh.handleConfirmation(addr, entry.LinkAddr, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- }
-}
-
-func (*testNeighborResolver) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) {
- if addr == testEntryBroadcastAddr {
- return testEntryBroadcastLinkAddr, true
- }
- return "", false
-}
-
-func (*testNeighborResolver) LinkAddressProtocol() tcpip.NetworkProtocolNumber {
- return 0
-}
-
-func TestNeighborCacheGetConfig(t *testing.T) {
- nudDisp := testNUDDispatcher{}
- c := DefaultNUDConfigurations()
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, c, clock)
-
- if got, want := linkRes.neigh.config(), c; got != want {
- t.Errorf("got linkRes.neigh.config() = %+v, want = %+v", got, want)
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- defer nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestNeighborCacheSetConfig(t *testing.T) {
- nudDisp := testNUDDispatcher{}
- c := DefaultNUDConfigurations()
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, c, clock)
-
- c.MinRandomFactor = 1
- c.MaxRandomFactor = 1
- linkRes.neigh.setConfig(c)
-
- if got, want := linkRes.neigh.config(), c; got != want {
- t.Errorf("got linkRes.neigh.config() = %+v, want = %+v", got, want)
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- defer nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func addReachableEntryWithRemoved(nudDisp *testNUDDispatcher, clock *faketime.ManualClock, linkRes *testNeighborResolver, entry NeighborEntry, removed []NeighborEntry) error {
- var gotLinkResolutionResult LinkResolutionResult
-
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- gotLinkResolutionResult = r
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- return fmt.Errorf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
-
- {
- var wantEvents []testEntryEventInfo
-
- for _, removedEntry := range removed {
- wantEvents = append(wantEvents, testEntryEventInfo{
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: removedEntry.Addr,
- LinkAddr: removedEntry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- })
- }
-
- wantEvents = append(wantEvents, testEntryEventInfo{
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: "",
- State: Incomplete,
- UpdatedAt: clock.Now(),
- },
- })
-
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- clock.Advance(typicalLatency)
-
- select {
- case <-ch:
- default:
- return fmt.Errorf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
- wantLinkResolutionResult := LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}
- if diff := cmp.Diff(wantLinkResolutionResult, gotLinkResolutionResult); diff != "" {
- return fmt.Errorf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func addReachableEntry(nudDisp *testNUDDispatcher, clock *faketime.ManualClock, linkRes *testNeighborResolver, entry NeighborEntry) error {
- return addReachableEntryWithRemoved(nudDisp, clock, linkRes, entry, nil /* removed */)
-}
-
-func TestNeighborCacheEntry(t *testing.T) {
- c := DefaultNUDConfigurations()
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, c, clock)
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- if _, _, err := linkRes.neigh.entry(entry.Addr, "", nil); err != nil {
- t.Fatalf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
- }
-
- // No more events should have been dispatched.
- nudDisp.mu.Lock()
- defer nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestNeighborCacheRemoveEntry(t *testing.T) {
- config := DefaultNUDConfigurations()
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- linkRes.neigh.removeEntry(entry.Addr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- {
- _, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- }
-}
-
-type testContext struct {
- clock *faketime.ManualClock
- linkRes *testNeighborResolver
- nudDisp *testNUDDispatcher
-}
-
-func newTestContext(c NUDConfigurations) testContext {
- nudDisp := &testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(nudDisp, c, clock)
-
- return testContext{
- clock: clock,
- linkRes: linkRes,
- nudDisp: nudDisp,
- }
-}
-
-type overflowOptions struct {
- startAtEntryIndex uint16
- wantStaticEntries []NeighborEntry
-}
-
-func (c *testContext) overflowCache(opts overflowOptions) error {
- // Fill the neighbor cache to capacity to verify the LRU eviction strategy is
- // working properly after the entry removal.
- for i := opts.startAtEntryIndex; i < c.linkRes.entries.size(); i++ {
- var removedEntries []NeighborEntry
-
- // When beyond the full capacity, the cache will evict an entry as per the
- // LRU eviction strategy. Note that the number of static entries should not
- // affect the total number of dynamic entries that can be added.
- if i >= neighborCacheSize+opts.startAtEntryIndex {
- removedEntry, ok := c.linkRes.entries.entry(i - neighborCacheSize)
- if !ok {
- return fmt.Errorf("got linkRes.entries.entry(%d) = _, false, want = true", i-neighborCacheSize)
- }
- removedEntries = append(removedEntries, removedEntry)
- }
-
- entry, ok := c.linkRes.entries.entry(i)
- if !ok {
- return fmt.Errorf("got c.linkRes.entries.entry(%d) = _, false, want = true", i)
- }
- if err := addReachableEntryWithRemoved(c.nudDisp, c.clock, c.linkRes, entry, removedEntries); err != nil {
- return fmt.Errorf("addReachableEntryWithRemoved(...) = %s", err)
- }
- }
-
- // Expect to find only the most recent entries. The order of entries reported
- // by entries() is nondeterministic, so entries have to be sorted before
- // comparison.
- wantUnorderedEntries := opts.wantStaticEntries
- for i := c.linkRes.entries.size() - neighborCacheSize; i < c.linkRes.entries.size(); i++ {
- entry, ok := c.linkRes.entries.entry(i)
- if !ok {
- return fmt.Errorf("got c.linkRes.entries.entry(%d) = _, false, want = true", i)
- }
- durationReachableNanos := time.Duration(c.linkRes.entries.size()-i-1) * typicalLatency
- wantEntry := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: c.clock.Now().Add(-durationReachableNanos),
- }
- wantUnorderedEntries = append(wantUnorderedEntries, wantEntry)
- }
-
- if diff := cmp.Diff(wantUnorderedEntries, c.linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
- return fmt.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
- }
-
- // No more events should have been dispatched.
- c.nudDisp.mu.Lock()
- defer c.nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), c.nudDisp.mu.events); diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-
- return nil
-}
-
-// TestNeighborCacheOverflow verifies that the LRU cache eviction strategy
-// respects the dynamic entry count.
-func TestNeighborCacheOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
- opts := overflowOptions{
- startAtEntryIndex: 0,
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-// TestNeighborCacheRemoveEntryThenOverflow verifies that the LRU cache
-// eviction strategy respects the dynamic entry count when an entry is removed.
-func TestNeighborCacheRemoveEntryThenOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
-
- // Add a dynamic entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- // Remove the entry
- c.linkRes.neigh.removeEntry(entry.Addr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- opts := overflowOptions{
- startAtEntryIndex: 0,
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-// TestNeighborCacheDuplicateStaticEntryWithSameLinkAddress verifies that
-// adding a duplicate static entry with the same link address does not dispatch
-// any events.
-func TestNeighborCacheDuplicateStaticEntryWithSameLinkAddress(t *testing.T) {
- config := DefaultNUDConfigurations()
- c := newTestContext(config)
-
- // Add a static entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- staticLinkAddr := entry.LinkAddr + "static"
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // Add a duplicate static entry with the same link address.
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- c.nudDisp.mu.Lock()
- defer c.nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), c.nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-// TestNeighborCacheDuplicateStaticEntryWithDifferentLinkAddress verifies that
-// adding a duplicate static entry with a different link address dispatches a
-// change event.
-func TestNeighborCacheDuplicateStaticEntryWithDifferentLinkAddress(t *testing.T) {
- config := DefaultNUDConfigurations()
- c := newTestContext(config)
-
- // Add a static entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- staticLinkAddr := entry.LinkAddr + "static"
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // Add a duplicate entry with a different link address
- staticLinkAddr += "duplicate"
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-}
-
-// TestNeighborCacheRemoveStaticEntryThenOverflow verifies that the LRU cache
-// eviction strategy respects the dynamic entry count when a static entry is
-// added then removed. In this case, the dynamic entry count shouldn't have
-// been touched.
-func TestNeighborCacheRemoveStaticEntryThenOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
-
- // Add a static entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- staticLinkAddr := entry.LinkAddr + "static"
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // Remove the static entry that was just added
- c.linkRes.neigh.removeEntry(entry.Addr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- opts := overflowOptions{
- startAtEntryIndex: 0,
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-// TestNeighborCacheOverwriteWithStaticEntryThenOverflow verifies that the LRU
-// cache eviction strategy keeps count of the dynamic entry count when an entry
-// is overwritten by a static entry. Static entries should not count towards
-// the size of the LRU cache.
-func TestNeighborCacheOverwriteWithStaticEntryThenOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
-
- // Add a dynamic entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- // Override the entry with a static one using the same address
- staticLinkAddr := entry.LinkAddr + "static"
- c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: c.clock.Now(),
- },
- },
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- opts := overflowOptions{
- startAtEntryIndex: 1,
- wantStaticEntries: []NeighborEntry{
- {
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-func TestNeighborCacheAddStaticEntryThenOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
-
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- c.linkRes.neigh.addStaticEntry(entry.Addr, entry.LinkAddr)
- e, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Errorf("unexpected error from c.linkRes.neigh.entry(%s, \"\", nil): %s", entry.Addr, err)
- }
- want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- }
- if diff := cmp.Diff(want, e); diff != "" {
- t.Errorf("c.linkRes.neigh.entry(%s, \"\", nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- opts := overflowOptions{
- startAtEntryIndex: 1,
- wantStaticEntries: []NeighborEntry{
- {
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-func TestNeighborCacheClear(t *testing.T) {
- config := DefaultNUDConfigurations()
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- // Add a dynamic entry.
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- // Add a static entry.
- linkRes.neigh.addStaticEntry(entryTestAddr1, entryTestLinkAddr1)
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Static,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // Clear should remove both dynamic and static entries.
- linkRes.neigh.clear()
-
- // Remove events dispatched from clear() have no deterministic order so they
- // need to be sorted before comparison.
- wantUnorderedEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Static,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- defer nudDisp.mu.Unlock()
- if diff := cmp.Diff(wantUnorderedEvents, nudDisp.mu.events, unorderedEventsDiffOpts()...); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-// TestNeighborCacheClearThenOverflow verifies that the LRU cache eviction
-// strategy keeps count of the dynamic entry count when all entries are
-// cleared.
-func TestNeighborCacheClearThenOverflow(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- c := newTestContext(config)
-
- // Add a dynamic entry
- entry, ok := c.linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- // Clear the cache.
- c.linkRes.neigh.clear()
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: c.clock.Now(),
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- opts := overflowOptions{
- startAtEntryIndex: 0,
- }
- if err := c.overflowCache(opts); err != nil {
- t.Errorf("c.overflowCache(%+v): %s", opts, err)
- }
-}
-
-func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {
- config := DefaultNUDConfigurations()
- // Stay in Reachable so the cache can overflow
- config.BaseReachableTime = infiniteDuration
- config.MinRandomFactor = 1
- config.MaxRandomFactor = 1
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- startedAt := clock.Now()
-
- // The following logic is very similar to overflowCache, but
- // periodically refreshes the frequently used entry.
-
- // Fill the neighbor cache to capacity
- for i := uint16(0); i < neighborCacheSize; i++ {
- entry, ok := linkRes.entries.entry(i)
- if !ok {
- t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
- }
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
- }
-
- frequentlyUsedEntry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
-
- // Keep adding more entries
- for i := uint16(neighborCacheSize); i < linkRes.entries.size(); i++ {
- // Periodically refresh the frequently used entry
- if i%(neighborCacheSize/2) == 0 {
- if _, _, err := linkRes.neigh.entry(frequentlyUsedEntry.Addr, "", nil); err != nil {
- t.Errorf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", frequentlyUsedEntry.Addr, err)
- }
- }
-
- entry, ok := linkRes.entries.entry(i)
- if !ok {
- t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
- }
-
- // An entry should have been removed, as per the LRU eviction strategy
- removedEntry, ok := linkRes.entries.entry(i - neighborCacheSize + 1)
- if !ok {
- t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i-neighborCacheSize+1)
- }
-
- if err := addReachableEntryWithRemoved(&nudDisp, clock, linkRes, entry, []NeighborEntry{removedEntry}); err != nil {
- t.Fatalf("addReachableEntryWithRemoved(...) = %s", err)
- }
- }
-
- // Expect to find only the frequently used entry and the most recent entries.
- // The order of entries reported by entries() is nondeterministic, so entries
- // have to be sorted before comparison.
- wantUnsortedEntries := []NeighborEntry{
- {
- Addr: frequentlyUsedEntry.Addr,
- LinkAddr: frequentlyUsedEntry.LinkAddr,
- State: Reachable,
- // Can be inferred since the frequently used entry is the first to
- // be created and transitioned to Reachable.
- UpdatedAt: startedAt.Add(typicalLatency),
- },
- }
-
- for i := linkRes.entries.size() - neighborCacheSize + 1; i < linkRes.entries.size(); i++ {
- entry, ok := linkRes.entries.entry(i)
- if !ok {
- t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
- }
- durationReachableNanos := time.Duration(linkRes.entries.size()-i-1) * typicalLatency
- wantUnsortedEntries = append(wantUnsortedEntries, NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now().Add(-durationReachableNanos),
- })
- }
-
- if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
- t.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
- }
-
- // No more events should have been dispatched.
- nudDisp.mu.Lock()
- defer nudDisp.mu.Unlock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestNeighborCacheConcurrent(t *testing.T) {
- const concurrentProcesses = 16
-
- config := DefaultNUDConfigurations()
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- storeEntries := linkRes.entries.entries()
- for _, entry := range storeEntries {
- var wg sync.WaitGroup
- for r := 0; r < concurrentProcesses; r++ {
- wg.Add(1)
- go func(entry NeighborEntry) {
- defer wg.Done()
- switch e, _, err := linkRes.neigh.entry(entry.Addr, "", nil); err.(type) {
- case nil, *tcpip.ErrWouldBlock:
- default:
- t.Errorf("got linkRes.neigh.entry(%s, '', nil) = (%+v, _, %s), want (_, _, nil) or (_, _, %s)", entry.Addr, e, err, &tcpip.ErrWouldBlock{})
- }
- }(entry)
- }
-
- // Wait for all goroutines to send a request
- wg.Wait()
-
- // Process all the requests for a single entry concurrently
- clock.Advance(typicalLatency)
- }
-
- // All goroutines add in the same order and add more values than can fit in
- // the cache. Our eviction strategy requires that the last entries are
- // present, up to the size of the neighbor cache, and the rest are missing.
- // The order of entries reported by entries() is nondeterministic, so entries
- // have to be sorted before comparison.
- var wantUnsortedEntries []NeighborEntry
- for i := linkRes.entries.size() - neighborCacheSize; i < linkRes.entries.size(); i++ {
- entry, ok := linkRes.entries.entry(i)
- if !ok {
- t.Errorf("got linkRes.entries.entry(%d) = _, false, want = true", i)
- }
- durationReachableNanos := time.Duration(linkRes.entries.size()-i-1) * typicalLatency
- wantUnsortedEntries = append(wantUnsortedEntries, NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now().Add(-durationReachableNanos),
- })
- }
-
- if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
- t.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestNeighborCacheReplace(t *testing.T) {
- config := DefaultNUDConfigurations()
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- // Notify of a link address change
- var updatedLinkAddr tcpip.LinkAddress
- {
- entry, ok := linkRes.entries.entry(1)
- if !ok {
- t.Fatal("got linkRes.entries.entry(1) = _, false, want = true")
- }
- updatedLinkAddr = entry.LinkAddr
- }
- linkRes.entries.set(0, updatedLinkAddr)
- linkRes.neigh.handleConfirmation(entry.Addr, updatedLinkAddr, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
-
- // Requesting the entry again should start neighbor reachability confirmation.
- //
- // Verify the entry's new link address and the new state.
- {
- e, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Fatalf("linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
- }
- want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: updatedLinkAddr,
- State: Delay,
- UpdatedAt: clock.Now(),
- }
- if diff := cmp.Diff(want, e); diff != "" {
- t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
- }
- }
-
- clock.Advance(config.DelayFirstProbeTime + typicalLatency)
-
- // Verify that the neighbor is now reachable.
- {
- e, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Errorf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
- }
- want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: updatedLinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- }
- if diff := cmp.Diff(want, e); diff != "" {
- t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
- }
- }
-}
-
-func TestNeighborCacheResolutionFailed(t *testing.T) {
- config := DefaultNUDConfigurations()
-
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
-
- var requestCount uint32
- linkRes.onLinkAddressRequest = func() {
- atomic.AddUint32(&requestCount, 1)
- }
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
-
- // First, sanity check that resolution is working
- if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
- t.Fatalf("addReachableEntry(...) = %s", err)
- }
-
- got, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Fatalf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
- }
- want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- }
- if diff := cmp.Diff(want, got); diff != "" {
- t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
- }
-
- // Verify address resolution fails for an unknown address.
- before := atomic.LoadUint32(&requestCount)
-
- entry.Addr += "2"
- {
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{Err: &tcpip.ErrTimeout{}}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- waitFor := config.DelayFirstProbeTime + typicalLatency*time.Duration(config.MaxMulticastProbes)
- clock.Advance(waitFor)
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
- }
-
- maxAttempts := linkRes.neigh.config().MaxUnicastProbes
- if got, want := atomic.LoadUint32(&requestCount)-before, maxAttempts; got != want {
- t.Errorf("got link address request count = %d, want = %d", got, want)
- }
-}
-
-// TestNeighborCacheResolutionTimeout simulates sending MaxMulticastProbes
-// probes and not retrieving a confirmation before the duration defined by
-// MaxMulticastProbes * RetransmitTimer.
-func TestNeighborCacheResolutionTimeout(t *testing.T) {
- config := DefaultNUDConfigurations()
- config.RetransmitTimer = time.Millisecond // small enough to cause timeout
-
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(nil, config, clock)
- // large enough to cause timeout
- linkRes.delay = time.Minute
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
-
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{Err: &tcpip.ErrTimeout{}}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- waitFor := config.RetransmitTimer * time.Duration(config.MaxMulticastProbes)
- clock.Advance(waitFor)
-
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
-}
-
-// TestNeighborCacheRetryResolution simulates retrying communication after
-// failing to perform address resolution.
-func TestNeighborCacheRetryResolution(t *testing.T) {
- config := DefaultNUDConfigurations()
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, config, clock)
- // Simulate a faulty link.
- linkRes.dropReplies = true
-
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
- }
-
- // Perform address resolution with a faulty link, which will fail.
- {
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{Err: &tcpip.ErrTimeout{}}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: "",
- State: Incomplete,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- waitFor := config.RetransmitTimer * time.Duration(config.MaxMulticastProbes)
- clock.Advance(waitFor)
-
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: "",
- State: Unreachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- {
- wantEntries := []NeighborEntry{
- {
- Addr: entry.Addr,
- LinkAddr: "",
- State: Unreachable,
- UpdatedAt: clock.Now(),
- },
- }
- if diff := cmp.Diff(linkRes.neigh.entries(), wantEntries, unorderedEntriesDiffOpts()...); diff != "" {
- t.Fatalf("neighbor entries mismatch (-got, +want):\n%s", diff)
- }
- }
- }
-
- // Retry address resolution with a working link.
- linkRes.dropReplies = false
- {
- incompleteEntry, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- if incompleteEntry.State != Incomplete {
- t.Fatalf("got entry.State = %s, want = %s", incompleteEntry.State, Incomplete)
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: "",
- State: Incomplete,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- clock.Advance(typicalLatency)
-
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
-
- {
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- {
- gotEntry, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Fatalf("linkRes.neigh.entry(%s, '', _): %s", entry.Addr, err)
- }
-
- wantEntry := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- }
- if diff := cmp.Diff(gotEntry, wantEntry); diff != "" {
- t.Fatalf("neighbor entry mismatch (-got, +want):\n%s", diff)
- }
- }
- }
-}
-
-func BenchmarkCacheClear(b *testing.B) {
- b.StopTimer()
- config := DefaultNUDConfigurations()
- clock := tcpip.NewStdClock()
- linkRes := newTestNeighborResolver(nil, config, clock)
- linkRes.delay = 0
-
- // Clear for every possible size of the cache
- for cacheSize := uint16(0); cacheSize < neighborCacheSize; cacheSize++ {
- // Fill the neighbor cache to capacity.
- for i := uint16(0); i < cacheSize; i++ {
- entry, ok := linkRes.entries.entry(i)
- if !ok {
- b.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
- }
-
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- b.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- b.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
-
- select {
- case <-ch:
- default:
- b.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
- }
- }
-
- b.StartTimer()
- linkRes.neigh.clear()
- b.StopTimer()
- }
-}
diff --git a/pkg/tcpip/stack/neighbor_entry_list.go b/pkg/tcpip/stack/neighbor_entry_list.go
new file mode 100644
index 000000000..d78430080
--- /dev/null
+++ b/pkg/tcpip/stack/neighbor_entry_list.go
@@ -0,0 +1,221 @@
+package stack
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type neighborEntryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (neighborEntryElementMapper) linkerFor(elem *neighborEntry) *neighborEntry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type neighborEntryList struct {
+ head *neighborEntry
+ tail *neighborEntry
+}
+
+// Reset resets list l to the empty state.
+func (l *neighborEntryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *neighborEntryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *neighborEntryList) Front() *neighborEntry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *neighborEntryList) Back() *neighborEntry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *neighborEntryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (neighborEntryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *neighborEntryList) PushFront(e *neighborEntry) {
+ linker := neighborEntryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ neighborEntryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *neighborEntryList) PushBack(e *neighborEntry) {
+ linker := neighborEntryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ neighborEntryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *neighborEntryList) PushBackList(m *neighborEntryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ neighborEntryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ neighborEntryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *neighborEntryList) InsertAfter(b, e *neighborEntry) {
+ bLinker := neighborEntryElementMapper{}.linkerFor(b)
+ eLinker := neighborEntryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ neighborEntryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *neighborEntryList) InsertBefore(a, e *neighborEntry) {
+ aLinker := neighborEntryElementMapper{}.linkerFor(a)
+ eLinker := neighborEntryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ neighborEntryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *neighborEntryList) Remove(e *neighborEntry) {
+ linker := neighborEntryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ neighborEntryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ neighborEntryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type neighborEntryEntry struct {
+ next *neighborEntry
+ prev *neighborEntry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *neighborEntryEntry) Next() *neighborEntry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *neighborEntryEntry) Prev() *neighborEntry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *neighborEntryEntry) SetNext(elem *neighborEntry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *neighborEntryEntry) SetPrev(elem *neighborEntry) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/stack/neighbor_entry_test.go b/pkg/tcpip/stack/neighbor_entry_test.go
deleted file mode 100644
index 59d86d6d4..000000000
--- a/pkg/tcpip/stack/neighbor_entry_test.go
+++ /dev/null
@@ -1,2269 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
- "fmt"
- "math"
- "math/rand"
- "sync"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-const (
- entryTestNetNumber tcpip.NetworkProtocolNumber = math.MaxUint32
-
- entryTestNICID tcpip.NICID = 1
-
- entryTestLinkAddr1 = tcpip.LinkAddress("\x0a\x00\x00\x00\x00\x01")
- entryTestLinkAddr2 = tcpip.LinkAddress("\x0a\x00\x00\x00\x00\x02")
-)
-
-var (
- entryTestAddr1 = testutil.MustParse6("a::1")
- entryTestAddr2 = testutil.MustParse6("a::2")
-)
-
-// runImmediatelyScheduledJobs runs all jobs scheduled to run at the current
-// time.
-func runImmediatelyScheduledJobs(clock *faketime.ManualClock) {
- clock.Advance(immediateDuration)
-}
-
-// The following unit tests exercise every state transition and verify its
-// behavior with RFC 4681 and RFC 7048.
-//
-// | From | To | Cause | Update | Action | Event |
-// | =========== | =========== | ========================================== | ======== | ===========| ======= |
-// | Unknown | Unknown | Confirmation w/ unknown address | | | Added |
-// | Unknown | Incomplete | Packet queued to unknown address | | Send probe | Added |
-// | Unknown | Stale | Probe | | | Added |
-// | Incomplete | Incomplete | Retransmit timer expired | | Send probe | Changed |
-// | Incomplete | Reachable | Solicited confirmation | LinkAddr | Notify | Changed |
-// | Incomplete | Stale | Unsolicited confirmation | LinkAddr | Notify | Changed |
-// | Incomplete | Stale | Probe | LinkAddr | Notify | Changed |
-// | Incomplete | Unreachable | Max probes sent without reply | | Notify | Changed |
-// | Reachable | Reachable | Confirmation w/ different isRouter flag | IsRouter | | |
-// | Reachable | Stale | Reachable timer expired | | | Changed |
-// | Reachable | Stale | Probe or confirmation w/ different address | | | Changed |
-// | Stale | Reachable | Solicited override confirmation | LinkAddr | | Changed |
-// | Stale | Reachable | Solicited confirmation w/o address | | Notify | Changed |
-// | Stale | Stale | Override confirmation | LinkAddr | | Changed |
-// | Stale | Stale | Probe w/ different address | LinkAddr | | Changed |
-// | Stale | Delay | Packet sent | | | Changed |
-// | Delay | Reachable | Upper-layer confirmation | | | Changed |
-// | Delay | Reachable | Solicited override confirmation | LinkAddr | | Changed |
-// | Delay | Reachable | Solicited confirmation w/o address | | Notify | Changed |
-// | Delay | Stale | Probe or confirmation w/ different address | | | Changed |
-// | Delay | Probe | Delay timer expired | | Send probe | Changed |
-// | Probe | Reachable | Solicited override confirmation | LinkAddr | | Changed |
-// | Probe | Reachable | Solicited confirmation w/ same address | | Notify | Changed |
-// | Probe | Reachable | Solicited confirmation w/o address | | Notify | Changed |
-// | Probe | Stale | Probe or confirmation w/ different address | | | Changed |
-// | Probe | Probe | Retransmit timer expired | | | Changed |
-// | Probe | Unreachable | Max probes sent without reply | | Notify | Changed |
-// | Unreachable | Incomplete | Packet queued | | Send probe | Changed |
-// | Unreachable | Stale | Probe w/ different address | LinkAddr | | Changed |
-
-type testEntryEventType uint8
-
-const (
- entryTestAdded testEntryEventType = iota
- entryTestChanged
- entryTestRemoved
-)
-
-func (t testEntryEventType) String() string {
- switch t {
- case entryTestAdded:
- return "add"
- case entryTestChanged:
- return "change"
- case entryTestRemoved:
- return "remove"
- default:
- return fmt.Sprintf("unknown (%d)", t)
- }
-}
-
-// Fields are exported for use with cmp.Diff.
-type testEntryEventInfo struct {
- EventType testEntryEventType
- NICID tcpip.NICID
- Entry NeighborEntry
-}
-
-func (e testEntryEventInfo) String() string {
- return fmt.Sprintf("%s event for NIC #%d, %#v", e.EventType, e.NICID, e.Entry)
-}
-
-// testNUDDispatcher implements NUDDispatcher to validate the dispatching of
-// events upon certain NUD state machine events.
-type testNUDDispatcher struct {
- mu struct {
- sync.Mutex
- events []testEntryEventInfo
- }
-}
-
-var _ NUDDispatcher = (*testNUDDispatcher)(nil)
-
-func (d *testNUDDispatcher) queueEvent(e testEntryEventInfo) {
- d.mu.Lock()
- defer d.mu.Unlock()
- d.mu.events = append(d.mu.events, e)
-}
-
-func (d *testNUDDispatcher) OnNeighborAdded(nicID tcpip.NICID, entry NeighborEntry) {
- d.queueEvent(testEntryEventInfo{
- EventType: entryTestAdded,
- NICID: nicID,
- Entry: entry,
- })
-}
-
-func (d *testNUDDispatcher) OnNeighborChanged(nicID tcpip.NICID, entry NeighborEntry) {
- d.queueEvent(testEntryEventInfo{
- EventType: entryTestChanged,
- NICID: nicID,
- Entry: entry,
- })
-}
-
-func (d *testNUDDispatcher) OnNeighborRemoved(nicID tcpip.NICID, entry NeighborEntry) {
- d.queueEvent(testEntryEventInfo{
- EventType: entryTestRemoved,
- NICID: nicID,
- Entry: entry,
- })
-}
-
-type entryTestLinkResolver struct {
- mu struct {
- sync.Mutex
- probes []entryTestProbeInfo
- }
-}
-
-var _ LinkAddressResolver = (*entryTestLinkResolver)(nil)
-
-type entryTestProbeInfo struct {
- RemoteAddress tcpip.Address
- RemoteLinkAddress tcpip.LinkAddress
- LocalAddress tcpip.Address
-}
-
-func (p entryTestProbeInfo) String() string {
- return fmt.Sprintf("probe with RemoteAddress=%q, RemoteLinkAddress=%q, LocalAddress=%q", p.RemoteAddress, p.RemoteLinkAddress, p.LocalAddress)
-}
-
-// LinkAddressRequest sends a request for the LinkAddress of addr. Broadcasts
-// to the local network if linkAddr is the zero value.
-func (r *entryTestLinkResolver) LinkAddressRequest(targetAddr, localAddr tcpip.Address, linkAddr tcpip.LinkAddress) tcpip.Error {
- r.mu.Lock()
- defer r.mu.Unlock()
- r.mu.probes = append(r.mu.probes, entryTestProbeInfo{
- RemoteAddress: targetAddr,
- RemoteLinkAddress: linkAddr,
- LocalAddress: localAddr,
- })
- return nil
-}
-
-// ResolveStaticAddress attempts to resolve address without sending requests.
-// It either resolves the name immediately or returns the empty LinkAddress.
-func (*entryTestLinkResolver) ResolveStaticAddress(tcpip.Address) (tcpip.LinkAddress, bool) {
- return "", false
-}
-
-// LinkAddressProtocol returns the network protocol of the addresses this
-// resolver can resolve.
-func (*entryTestLinkResolver) LinkAddressProtocol() tcpip.NetworkProtocolNumber {
- return entryTestNetNumber
-}
-
-func entryTestSetup(c NUDConfigurations) (*neighborEntry, *testNUDDispatcher, *entryTestLinkResolver, *faketime.ManualClock) {
- clock := faketime.NewManualClock()
- disp := testNUDDispatcher{}
- nic := nic{
- LinkEndpoint: nil, // entryTestLinkResolver doesn't use a LinkEndpoint
-
- id: entryTestNICID,
- stack: &Stack{
- clock: clock,
- nudDisp: &disp,
- nudConfigs: c,
- randomGenerator: rand.New(rand.NewSource(time.Now().UnixNano())),
- },
- stats: makeNICStats(tcpip.NICStats{}.FillIn()),
- }
- netEP := (&testIPv6Protocol{}).NewEndpoint(&nic, nil)
- nic.networkEndpoints = map[tcpip.NetworkProtocolNumber]NetworkEndpoint{
- header.IPv6ProtocolNumber: netEP,
- }
-
- var linkRes entryTestLinkResolver
- // Stub out the neighbor cache to verify deletion from the cache.
- l := &linkResolver{
- resolver: &linkRes,
- }
- l.neigh.init(&nic, &linkRes)
-
- entry := newNeighborEntry(&l.neigh, entryTestAddr1 /* remoteAddr */, l.neigh.state)
- l.neigh.mu.Lock()
- l.neigh.mu.cache[entryTestAddr1] = entry
- l.neigh.mu.Unlock()
- nic.linkAddrResolvers = map[tcpip.NetworkProtocolNumber]*linkResolver{
- header.IPv6ProtocolNumber: l,
- }
-
- return entry, &disp, &linkRes, clock
-}
-
-// TestEntryInitiallyUnknown verifies that the state of a newly created
-// neighborEntry is Unknown.
-func TestEntryInitiallyUnknown(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- e.mu.Lock()
- if e.mu.neigh.State != Unknown {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Unknown)
- }
- e.mu.Unlock()
-
- clock.Advance(c.RetransmitTimer)
-
- // No probes should have been sent.
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Fatalf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryUnknownToUnknownWhenConfirmationWithUnknownAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Unknown {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Unknown)
- }
- e.mu.Unlock()
-
- clock.Advance(time.Hour)
-
- // No probes should have been sent.
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Fatalf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryUnknownToIncomplete(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
-}
-
-func unknownToIncomplete(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- if e.mu.neigh.State != Unknown {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Unknown)
- }
- e.handlePacketQueuedLocked(entryTestAddr2)
- if e.mu.neigh.State != Incomplete {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Incomplete)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- runImmediatelyScheduledJobs(clock)
- wantProbes := []entryTestProbeInfo{
- {
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: tcpip.LinkAddress(""),
- LocalAddress: entryTestAddr2,
- },
- }
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: tcpip.LinkAddress(""),
- State: Incomplete,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
- return nil
-}
-
-func TestEntryUnknownToStale(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-}
-
-func unknownToStale(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- if e.mu.neigh.State != Unknown {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Unknown)
- }
- e.handleProbeLocked(entryTestLinkAddr1)
- if e.mu.neigh.State != Stale {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func TestEntryIncompleteToIncompleteDoesNotChangeUpdatedAt(t *testing.T) {
- c := DefaultNUDConfigurations()
- c.MaxMulticastProbes = 3
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
-
- // UpdatedAt should remain the same during address resolution.
- e.mu.Lock()
- startedAt := e.mu.neigh.UpdatedAt
- e.mu.Unlock()
-
- // Wait for the rest of the reachability probe transmissions, signifying
- // Incomplete to Incomplete transitions.
- for i := uint32(1); i < c.MaxMulticastProbes; i++ {
- clock.Advance(c.RetransmitTimer)
-
- wantProbes := []entryTestProbeInfo{
- {
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: tcpip.LinkAddress(""),
- LocalAddress: entryTestAddr2,
- },
- }
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- t.Fatalf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
-
- e.mu.Lock()
- if got, want := e.mu.neigh.UpdatedAt, startedAt; got != want {
- t.Errorf("got e.mu.neigh.UpdatedAt = %q, want = %q", got, want)
- }
- e.mu.Unlock()
- }
-
- // UpdatedAt should change after failing address resolution. Timing out after
- // sending the last probe transitions the entry to Unreachable.
- clock.Advance(c.RetransmitTimer)
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: tcpip.LinkAddress(""),
- State: Unreachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryIncompleteToReachable(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
-}
-
-func incompleteToReachableWithFlags(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock, flags ReachabilityConfirmationFlags) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- if e.mu.neigh.State != Incomplete {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Incomplete)
- }
- e.handleConfirmationLocked(entryTestLinkAddr1, flags)
- if e.mu.neigh.State != Reachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- return fmt.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func incompleteToReachable(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := incompleteToReachableWithFlags(e, nudDisp, linkRes, clock, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- }); err != nil {
- return err
- }
-
- e.mu.Lock()
- isRouter := e.mu.isRouter
- e.mu.Unlock()
- if isRouter {
- return fmt.Errorf("got e.mu.isRouter = %t, want = false", isRouter)
- }
-
- return nil
-}
-
-func TestEntryIncompleteToReachableWithRouterFlag(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachableWithRouterFlag(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachableWithRouterFlag(...) = %s", err)
- }
-}
-
-func incompleteToReachableWithRouterFlag(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := incompleteToReachableWithFlags(e, nudDisp, linkRes, clock, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: true,
- }); err != nil {
- return err
- }
-
- e.mu.Lock()
- isRouter := e.mu.isRouter
- e.mu.Unlock()
- if !isRouter {
- return fmt.Errorf("got e.mu.isRouter = %t, want = true", isRouter)
- }
-
- return nil
-}
-
-func TestEntryIncompleteToStaleWhenUnsolicitedConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- if e.mu.isRouter {
- t.Errorf("got e.mu.isRouter = %t, want = false", e.mu.isRouter)
- }
- e.mu.Unlock()
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryIncompleteToStaleWhenProbe(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr1)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryIncompleteToUnreachable(t *testing.T) {
- c := DefaultNUDConfigurations()
- c.MaxMulticastProbes = 3
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToUnreachable(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToUnreachable(...) = %s", err)
- }
-}
-
-func incompleteToUnreachable(c NUDConfigurations, e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Incomplete {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Incomplete)
- }
- }
-
- // The first probe was sent in the transition from Unknown to Incomplete.
- clock.Advance(c.RetransmitTimer)
-
- // Observe each subsequent multicast probe transmitted.
- for i := uint32(1); i < c.MaxMulticastProbes; i++ {
- wantProbes := []entryTestProbeInfo{{
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: "",
- LocalAddress: entryTestAddr2,
- }}
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probe #%d mismatch (-want, +got):\n%s", i+1, diff)
- }
-
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Incomplete {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Incomplete)
- }
-
- clock.Advance(c.RetransmitTimer)
- }
-
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Unreachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Unreachable)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: tcpip.LinkAddress(""),
- State: Unreachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-
- return nil
-}
-
-type testLocker struct{}
-
-var _ sync.Locker = (*testLocker)(nil)
-
-func (*testLocker) Lock() {}
-func (*testLocker) Unlock() {}
-
-func TestEntryReachableToReachableClearsRouterWhenConfirmationWithoutRouter(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachableWithRouterFlag(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachableWithRouterFlag(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if got, want := e.mu.isRouter, false; got != want {
- t.Errorf("got e.mu.isRouter = %t, want = %t", got, want)
- }
- ipv6EP := e.cache.nic.networkEndpoints[header.IPv6ProtocolNumber].(*testIPv6Endpoint)
- if ipv6EP.invalidatedRtr != e.mu.neigh.Addr {
- t.Errorf("got ipv6EP.invalidatedRtr = %s, want = %s", ipv6EP.invalidatedRtr, e.mu.neigh.Addr)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events)
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestEntryReachableToReachableWhenProbeWithSameAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr1)
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events)
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestEntryReachableToStaleWhenTimeout(t *testing.T) {
- c := DefaultNUDConfigurations()
- // Eliminate random factors from ReachableTime computation so the transition
- // from Stale to Reachable will only take BaseReachableTime duration.
- c.MinRandomFactor = 1
- c.MaxRandomFactor = 1
-
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
- if err := reachableToStale(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("reachableToStale(...) = %s", err)
- }
-}
-
-// reachableToStale transitions a neighborEntry in Reachable state to Stale
-// state. Depends on the elimination of random factors in the ReachableTime
-// computation.
-//
-// c.MinRandomFactor = 1
-// c.MaxRandomFactor = 1
-func reachableToStale(c NUDConfigurations, e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- // Ensure there are no random factors in the ReachableTime computation.
- if c.MinRandomFactor != 1 {
- return fmt.Errorf("got c.MinRandomFactor = %f, want = 1", c.MinRandomFactor)
- }
- if c.MaxRandomFactor != 1 {
- return fmt.Errorf("got c.MaxRandomFactor = %f, want = 1", c.MaxRandomFactor)
- }
-
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Reachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Reachable)
- }
- }
-
- clock.Advance(c.BaseReachableTime)
-
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Stale {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Stale)
- }
- }
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
-
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func TestEntryReachableToStaleWhenProbeWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr2)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryReachableToStaleWhenConfirmationWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryReachableToStaleWhenConfirmationWithDifferentAddressAndOverride(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToStaleWhenProbeWithSameAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr1)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToReachableWhenSolicitedOverrideConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr2 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr2)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToReachableWhenSolicitedConfirmationWithoutAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked("" /* linkAddr */, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToStaleWhenOverrideConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr2 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr2)
- }
- e.mu.Unlock()
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToStaleWhenProbeUpdateAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr2)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr2 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr2)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryStaleToDelay(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-}
-
-func staleToDelay(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- if e.mu.neigh.State != Stale {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.handlePacketQueuedLocked(entryTestAddr2)
- if e.mu.neigh.State != Delay {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Delay)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Delay,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-
- return nil
-}
-
-func TestEntryDelayToReachableWhenUpperLevelConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleUpperLevelConfirmationLocked()
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToReachableWhenSolicitedOverrideConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr2 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr2)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToReachableWhenSolicitedConfirmationWithoutAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked("" /* linkAddr */, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
- if e.mu.neigh.State != Reachable {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToDelayWhenOverrideConfirmationWithSameAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Delay {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Delay)
- }
- if e.mu.neigh.LinkAddr != entryTestLinkAddr1 {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, entryTestLinkAddr1)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- if diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToStaleWhenProbeWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr2)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToStaleWhenConfirmationWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryDelayToProbe(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
-}
-
-func delayToProbe(c NUDConfigurations, e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Delay {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Delay)
- }
- }
-
- // Wait for the first unicast probe to be transmitted, marking the
- // transition from Delay to Probe.
- clock.Advance(c.DelayFirstProbeTime)
-
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Probe {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Probe)
- }
- }
-
- wantProbes := []entryTestProbeInfo{
- {
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: entryTestLinkAddr1,
- },
- }
- {
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Probe,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func TestEntryProbeToStaleWhenProbeWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleProbeLocked(entryTestLinkAddr2)
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryProbeToStaleWhenConfirmationWithDifferentAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr2, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Stale {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Stale)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr2,
- State: Stale,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- if diff := cmp.Diff(wantEvents, nudDisp.mu.events); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- nudDisp.mu.Unlock()
-}
-
-func TestEntryProbeToProbeWhenOverrideConfirmationWithSameAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
-
- e.mu.Lock()
- e.handleConfirmationLocked(entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: false,
- Override: true,
- IsRouter: false,
- })
- if e.mu.neigh.State != Probe {
- t.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Probe)
- }
- if got, want := e.mu.neigh.LinkAddr, entryTestLinkAddr1; got != want {
- t.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", got, want)
- }
- e.mu.Unlock()
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- t.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- // No events should have been dispatched.
- nudDisp.mu.Lock()
- diff := cmp.Diff([]testEntryEventInfo(nil), nudDisp.mu.events)
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-}
-
-func TestEntryProbeToReachableWhenSolicitedOverrideConfirmation(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
- if err := probeToReachableWithOverride(e, nudDisp, linkRes, clock, entryTestLinkAddr2); err != nil {
- t.Fatalf("probeToReachableWithOverride(...) = %s", err)
- }
-}
-
-func probeToReachableWithFlags(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock, linkAddr tcpip.LinkAddress, flags ReachabilityConfirmationFlags) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- prevLinkAddr := e.mu.neigh.LinkAddr
- if e.mu.neigh.State != Probe {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Probe)
- }
- e.handleConfirmationLocked(linkAddr, flags)
-
- if e.mu.neigh.State != Reachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Reachable)
- }
- if linkAddr == "" {
- linkAddr = prevLinkAddr
- }
- if e.mu.neigh.LinkAddr != linkAddr {
- return fmt.Errorf("got e.mu.neigh.LinkAddr = %q, want = %q", e.mu.neigh.LinkAddr, linkAddr)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- // No probes should have been sent.
- runImmediatelyScheduledJobs(clock)
- {
- linkRes.mu.Lock()
- diff := cmp.Diff([]entryTestProbeInfo(nil), linkRes.mu.probes)
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: linkAddr,
- State: Reachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
-
- return nil
-}
-
-func probeToReachableWithOverride(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock, linkAddr tcpip.LinkAddress) error {
- return probeToReachableWithFlags(e, nudDisp, linkRes, clock, linkAddr, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: true,
- IsRouter: false,
- })
-}
-
-func TestEntryProbeToReachableWhenSolicitedConfirmationWithSameAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
- if err := probeToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("probeToReachable(...) = %s", err)
- }
-}
-
-func probeToReachable(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- return probeToReachableWithFlags(e, nudDisp, linkRes, clock, entryTestLinkAddr1, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
-}
-
-func TestEntryProbeToReachableWhenSolicitedConfirmationWithoutAddress(t *testing.T) {
- c := DefaultNUDConfigurations()
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
- if err := probeToReachableWithoutAddress(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("probeToReachableWithoutAddress(...) = %s", err)
- }
-}
-
-func probeToReachableWithoutAddress(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- return probeToReachableWithFlags(e, nudDisp, linkRes, clock, "" /* linkAddr */, ReachabilityConfirmationFlags{
- Solicited: true,
- Override: false,
- IsRouter: false,
- })
-}
-
-func TestEntryProbeToUnreachable(t *testing.T) {
- c := DefaultNUDConfigurations()
- c.MaxMulticastProbes = 3
- c.MaxUnicastProbes = 3
- c.DelayFirstProbeTime = c.RetransmitTimer
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToStale(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToStale(...) = %s", err)
- }
- if err := staleToDelay(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("staleToDelay(...) = %s", err)
- }
- if err := delayToProbe(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("delayToProbe(...) = %s", err)
- }
- if err := probeToUnreachable(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("probeToUnreachable(...) = %s", err)
- }
-}
-
-func probeToUnreachable(c NUDConfigurations, e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Probe {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Probe)
- }
- }
-
- // The first probe was sent in the transition from Delay to Probe.
- clock.Advance(c.RetransmitTimer)
-
- // Observe each subsequent unicast probe transmitted.
- for i := uint32(1); i < c.MaxUnicastProbes; i++ {
- wantProbes := []entryTestProbeInfo{{
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: entryTestLinkAddr1,
- }}
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probe #%d mismatch (-want, +got):\n%s", i+1, diff)
- }
-
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Probe {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Probe)
- }
-
- clock.Advance(c.RetransmitTimer)
- }
-
- {
- e.mu.Lock()
- state := e.mu.neigh.State
- e.mu.Unlock()
- if state != Unreachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", state, Unreachable)
- }
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Unreachable,
- UpdatedAt: clock.Now(),
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
-
- return nil
-}
-
-func TestEntryUnreachableToIncomplete(t *testing.T) {
- c := DefaultNUDConfigurations()
- c.MaxMulticastProbes = 3
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToUnreachable(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToUnreachable(...) = %s", err)
- }
- if err := unreachableToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unreachableToIncomplete(...) = %s", err)
- }
-}
-
-func unreachableToIncomplete(e *neighborEntry, nudDisp *testNUDDispatcher, linkRes *entryTestLinkResolver, clock *faketime.ManualClock) error {
- if err := func() error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- if e.mu.neigh.State != Unreachable {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Unreachable)
- }
- e.handlePacketQueuedLocked(entryTestAddr2)
- if e.mu.neigh.State != Incomplete {
- return fmt.Errorf("got e.mu.neigh.State = %q, want = %q", e.mu.neigh.State, Incomplete)
- }
- return nil
- }(); err != nil {
- return err
- }
-
- runImmediatelyScheduledJobs(clock)
- wantProbes := []entryTestProbeInfo{
- {
- RemoteAddress: entryTestAddr1,
- RemoteLinkAddress: tcpip.LinkAddress(""),
- LocalAddress: entryTestAddr2,
- },
- }
- linkRes.mu.Lock()
- diff := cmp.Diff(wantProbes, linkRes.mu.probes)
- linkRes.mu.probes = nil
- linkRes.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("link address resolver probes mismatch (-want, +got):\n%s", diff)
- }
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestChanged,
- NICID: entryTestNICID,
- Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: tcpip.LinkAddress(""),
- State: Incomplete,
- UpdatedAt: clock.Now(),
- },
- },
- }
- {
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
- }
- }
- return nil
-}
-
-func TestEntryUnreachableToStale(t *testing.T) {
- c := DefaultNUDConfigurations()
- c.MaxMulticastProbes = 3
- // Eliminate random factors from ReachableTime computation so the transition
- // from Stale to Reachable will only take BaseReachableTime duration.
- c.MinRandomFactor = 1
- c.MaxRandomFactor = 1
-
- e, nudDisp, linkRes, clock := entryTestSetup(c)
-
- if err := unknownToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unknownToIncomplete(...) = %s", err)
- }
- if err := incompleteToUnreachable(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToUnreachable(...) = %s", err)
- }
- if err := unreachableToIncomplete(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("unreachableToIncomplete(...) = %s", err)
- }
- if err := incompleteToReachable(e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("incompleteToReachable(...) = %s", err)
- }
- if err := reachableToStale(c, e, nudDisp, linkRes, clock); err != nil {
- t.Fatalf("reachableToStale(...) = %s", err)
- }
-}
diff --git a/pkg/tcpip/stack/nic_test.go b/pkg/tcpip/stack/nic_test.go
deleted file mode 100644
index 5cb342f78..000000000
--- a/pkg/tcpip/stack/nic_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
-)
-
-var _ AddressableEndpoint = (*testIPv6Endpoint)(nil)
-var _ NetworkEndpoint = (*testIPv6Endpoint)(nil)
-var _ NDPEndpoint = (*testIPv6Endpoint)(nil)
-
-// An IPv6 NetworkEndpoint that throws away outgoing packets.
-//
-// We use this instead of ipv6.endpoint because the ipv6 package depends on
-// the stack package which this test lives in, causing a cyclic dependency.
-type testIPv6Endpoint struct {
- AddressableEndpointState
-
- nic NetworkInterface
- protocol *testIPv6Protocol
-
- invalidatedRtr tcpip.Address
-}
-
-func (*testIPv6Endpoint) Enable() tcpip.Error {
- return nil
-}
-
-func (*testIPv6Endpoint) Enabled() bool {
- return true
-}
-
-func (*testIPv6Endpoint) Disable() {}
-
-// DefaultTTL implements NetworkEndpoint.DefaultTTL.
-func (*testIPv6Endpoint) DefaultTTL() uint8 {
- return 0
-}
-
-// MTU implements NetworkEndpoint.MTU.
-func (e *testIPv6Endpoint) MTU() uint32 {
- return e.nic.MTU() - header.IPv6MinimumSize
-}
-
-// MaxHeaderLength implements NetworkEndpoint.MaxHeaderLength.
-func (e *testIPv6Endpoint) MaxHeaderLength() uint16 {
- return e.nic.MaxHeaderLength() + header.IPv6MinimumSize
-}
-
-// WritePacket implements NetworkEndpoint.WritePacket.
-func (*testIPv6Endpoint) WritePacket(*Route, NetworkHeaderParams, *PacketBuffer) tcpip.Error {
- return nil
-}
-
-// WritePackets implements NetworkEndpoint.WritePackets.
-func (*testIPv6Endpoint) WritePackets(*Route, PacketBufferList, NetworkHeaderParams) (int, tcpip.Error) {
- // Our tests don't use this so we don't support it.
- return 0, &tcpip.ErrNotSupported{}
-}
-
-// WriteHeaderIncludedPacket implements
-// NetworkEndpoint.WriteHeaderIncludedPacket.
-func (*testIPv6Endpoint) WriteHeaderIncludedPacket(*Route, *PacketBuffer) tcpip.Error {
- // Our tests don't use this so we don't support it.
- return &tcpip.ErrNotSupported{}
-}
-
-// HandlePacket implements NetworkEndpoint.HandlePacket.
-func (*testIPv6Endpoint) HandlePacket(*PacketBuffer) {}
-
-// Close implements NetworkEndpoint.Close.
-func (e *testIPv6Endpoint) Close() {
- e.AddressableEndpointState.Cleanup()
-}
-
-// NetworkProtocolNumber implements NetworkEndpoint.NetworkProtocolNumber.
-func (*testIPv6Endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
- return header.IPv6ProtocolNumber
-}
-
-func (e *testIPv6Endpoint) InvalidateDefaultRouter(rtr tcpip.Address) {
- e.invalidatedRtr = rtr
-}
-
-// Stats implements NetworkEndpoint.
-func (*testIPv6Endpoint) Stats() NetworkEndpointStats {
- return &testIPv6EndpointStats{}
-}
-
-var _ NetworkEndpointStats = (*testIPv6EndpointStats)(nil)
-
-type testIPv6EndpointStats struct{}
-
-// IsNetworkEndpointStats implements stack.NetworkEndpointStats.
-func (*testIPv6EndpointStats) IsNetworkEndpointStats() {}
-
-// We use this instead of ipv6.protocol because the ipv6 package depends on
-// the stack package which this test lives in, causing a cyclic dependency.
-type testIPv6Protocol struct{}
-
-// Number implements NetworkProtocol.Number.
-func (*testIPv6Protocol) Number() tcpip.NetworkProtocolNumber {
- return header.IPv6ProtocolNumber
-}
-
-// MinimumPacketSize implements NetworkProtocol.MinimumPacketSize.
-func (*testIPv6Protocol) MinimumPacketSize() int {
- return header.IPv6MinimumSize
-}
-
-// DefaultPrefixLen implements NetworkProtocol.DefaultPrefixLen.
-func (*testIPv6Protocol) DefaultPrefixLen() int {
- return header.IPv6AddressSize * 8
-}
-
-// ParseAddresses implements NetworkProtocol.ParseAddresses.
-func (*testIPv6Protocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) {
- h := header.IPv6(v)
- return h.SourceAddress(), h.DestinationAddress()
-}
-
-// NewEndpoint implements NetworkProtocol.NewEndpoint.
-func (p *testIPv6Protocol) NewEndpoint(nic NetworkInterface, _ TransportDispatcher) NetworkEndpoint {
- e := &testIPv6Endpoint{
- nic: nic,
- protocol: p,
- }
- e.AddressableEndpointState.Init(e)
- return e
-}
-
-// SetOption implements NetworkProtocol.SetOption.
-func (*testIPv6Protocol) SetOption(tcpip.SettableNetworkProtocolOption) tcpip.Error {
- return nil
-}
-
-// Option implements NetworkProtocol.Option.
-func (*testIPv6Protocol) Option(tcpip.GettableNetworkProtocolOption) tcpip.Error {
- return nil
-}
-
-// Close implements NetworkProtocol.Close.
-func (*testIPv6Protocol) Close() {}
-
-// Wait implements NetworkProtocol.Wait.
-func (*testIPv6Protocol) Wait() {}
-
-// Parse implements NetworkProtocol.Parse.
-func (*testIPv6Protocol) Parse(*PacketBuffer) (tcpip.TransportProtocolNumber, bool, bool) {
- return 0, false, false
-}
-
-func TestDisabledRxStatsWhenNICDisabled(t *testing.T) {
- // When the NIC is disabled, the only field that matters is the stats field.
- // This test is limited to stats counter checks.
- nic := nic{
- stats: makeNICStats(tcpip.NICStats{}.FillIn()),
- }
-
- if got := nic.stats.local.DisabledRx.Packets.Value(); got != 0 {
- t.Errorf("got DisabledRx.Packets = %d, want = 0", got)
- }
- if got := nic.stats.local.DisabledRx.Bytes.Value(); got != 0 {
- t.Errorf("got DisabledRx.Bytes = %d, want = 0", got)
- }
- if got := nic.stats.local.Rx.Packets.Value(); got != 0 {
- t.Errorf("got Rx.Packets = %d, want = 0", got)
- }
- if got := nic.stats.local.Rx.Bytes.Value(); got != 0 {
- t.Errorf("got Rx.Bytes = %d, want = 0", got)
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- nic.DeliverNetworkPacket("", "", 0, NewPacketBuffer(PacketBufferOptions{
- Data: buffer.View([]byte{1, 2, 3, 4}).ToVectorisedView(),
- }))
-
- if got := nic.stats.local.DisabledRx.Packets.Value(); got != 1 {
- t.Errorf("got DisabledRx.Packets = %d, want = 1", got)
- }
- if got := nic.stats.local.DisabledRx.Bytes.Value(); got != 4 {
- t.Errorf("got DisabledRx.Bytes = %d, want = 4", got)
- }
- if got := nic.stats.local.Rx.Packets.Value(); got != 0 {
- t.Errorf("got Rx.Packets = %d, want = 0", got)
- }
- if got := nic.stats.local.Rx.Bytes.Value(); got != 0 {
- t.Errorf("got Rx.Bytes = %d, want = 0", got)
- }
-}
-
-func TestMultiCounterStatsInitialization(t *testing.T) {
- global := tcpip.NICStats{}.FillIn()
- nic := nic{
- stats: makeNICStats(global),
- }
- multi := nic.stats.multiCounterNICStats
- local := nic.stats.local
- if err := testutil.ValidateMultiCounterStats(reflect.ValueOf(&multi).Elem(), []reflect.Value{reflect.ValueOf(&local).Elem(), reflect.ValueOf(&global).Elem()}); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/tcpip/stack/nud_test.go b/pkg/tcpip/stack/nud_test.go
deleted file mode 100644
index 1aeb2f8a5..000000000
--- a/pkg/tcpip/stack/nud_test.go
+++ /dev/null
@@ -1,816 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack_test
-
-import (
- "math"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-const (
- defaultBaseReachableTime = 30 * time.Second
- minimumBaseReachableTime = time.Millisecond
- defaultMinRandomFactor = 0.5
- defaultMaxRandomFactor = 1.5
- defaultRetransmitTimer = time.Second
- minimumRetransmitTimer = time.Millisecond
- defaultDelayFirstProbeTime = 5 * time.Second
- defaultMaxMulticastProbes = 3
- defaultMaxUnicastProbes = 3
-
- defaultFakeRandomNum = 0.5
-)
-
-// fakeRand is a deterministic random number generator.
-type fakeRand struct {
- num float32
-}
-
-var _ rand.Source = (*fakeRand)(nil)
-
-func (f *fakeRand) Int63() int64 {
- return int64(f.num * float32(1<<63))
-}
-
-func (*fakeRand) Seed(int64) {}
-
-func TestNUDFunctions(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- nicID tcpip.NICID
- netProtoFactory []stack.NetworkProtocolFactory
- extraLinkCapabilities stack.LinkEndpointCapabilities
- expectedErr tcpip.Error
- }{
- {
- name: "Invalid NICID",
- nicID: nicID + 1,
- netProtoFactory: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- extraLinkCapabilities: stack.CapabilityResolutionRequired,
- expectedErr: &tcpip.ErrUnknownNICID{},
- },
- {
- name: "No network protocol",
- nicID: nicID,
- expectedErr: &tcpip.ErrNotSupported{},
- },
- {
- name: "With IPv6",
- nicID: nicID,
- netProtoFactory: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- expectedErr: &tcpip.ErrNotSupported{},
- },
- {
- name: "With resolution capability",
- nicID: nicID,
- extraLinkCapabilities: stack.CapabilityResolutionRequired,
- expectedErr: &tcpip.ErrNotSupported{},
- },
- {
- name: "With IPv6 and resolution capability",
- nicID: nicID,
- netProtoFactory: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- extraLinkCapabilities: stack.CapabilityResolutionRequired,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NUDConfigs: stack.DefaultNUDConfigurations(),
- NetworkProtocols: test.netProtoFactory,
- Clock: clock,
- })
-
- e := channel.New(0, 0, linkAddr1)
- e.LinkEPCapabilities &^= stack.CapabilityResolutionRequired
- e.LinkEPCapabilities |= test.extraLinkCapabilities
-
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- configs := stack.DefaultNUDConfigurations()
- configs.BaseReachableTime = time.Hour
-
- {
- err := s.SetNUDConfigurations(test.nicID, ipv6.ProtocolNumber, configs)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Errorf("s.SetNUDConfigurations(%d, %d, _) error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, diff)
- }
- }
-
- {
- gotConfigs, err := s.NUDConfigurations(test.nicID, ipv6.ProtocolNumber)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Errorf("s.NUDConfigurations(%d, %d) error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, diff)
- } else if test.expectedErr == nil {
- if diff := cmp.Diff(configs, gotConfigs); diff != "" {
- t.Errorf("got configs mismatch (-want +got):\n%s", diff)
- }
- }
- }
-
- for _, addr := range []tcpip.Address{llAddr1, llAddr2} {
- {
- err := s.AddStaticNeighbor(test.nicID, ipv6.ProtocolNumber, addr, linkAddr1)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Errorf("s.AddStaticNeighbor(%d, %d, %s, %s) error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, addr, linkAddr1, diff)
- }
- }
- }
-
- {
- wantErr := test.expectedErr
- for i := 0; i < 2; i++ {
- {
- err := s.RemoveNeighbor(test.nicID, ipv6.ProtocolNumber, llAddr1)
- if diff := cmp.Diff(wantErr, err); diff != "" {
- t.Errorf("s.RemoveNeighbor(%d, %d, '') error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, diff)
- }
- }
-
- if test.expectedErr != nil {
- break
- }
-
- // Removing a neighbor that does not exist should give us a bad address
- // error.
- wantErr = &tcpip.ErrBadAddress{}
- }
- }
-
- {
- neighbors, err := s.Neighbors(test.nicID, ipv6.ProtocolNumber)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Errorf("s.Neigbors(%d, %d) error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, diff)
- } else if test.expectedErr == nil {
- if diff := cmp.Diff(
- []stack.NeighborEntry{{Addr: llAddr2, LinkAddr: linkAddr1, State: stack.Static, UpdatedAt: clock.Now()}},
- neighbors,
- ); diff != "" {
- t.Errorf("neighbors mismatch (-want +got):\n%s", diff)
- }
- }
- }
-
- {
- err := s.ClearNeighbors(test.nicID, ipv6.ProtocolNumber)
- if diff := cmp.Diff(test.expectedErr, err); diff != "" {
- t.Errorf("s.ClearNeigbors(%d, %d) error mismatch (-want +got):\n%s", test.nicID, ipv6.ProtocolNumber, diff)
- } else if test.expectedErr == nil {
- if neighbors, err := s.Neighbors(test.nicID, ipv6.ProtocolNumber); err != nil {
- t.Errorf("s.Neighbors(%d, %d): %s", test.nicID, ipv6.ProtocolNumber, err)
- } else if len(neighbors) != 0 {
- t.Errorf("got len(neighbors) = %d, want = 0; neighbors = %#v", len(neighbors), neighbors)
- }
- }
- }
- })
- }
-}
-
-func TestDefaultNUDConfigurations(t *testing.T) {
- const nicID = 1
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The networking
- // stack will only allocate neighbor caches if a protocol providing link
- // address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: stack.DefaultNUDConfigurations(),
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- c, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got, want := c, stack.DefaultNUDConfigurations(); got != want {
- t.Errorf("got stack.NUDConfigurations(%d, %d) = %+v, want = %+v", nicID, ipv6.ProtocolNumber, got, want)
- }
-}
-
-func TestNUDConfigurationsBaseReachableTime(t *testing.T) {
- tests := []struct {
- name string
- baseReachableTime time.Duration
- want time.Duration
- }{
- // Invalid cases
- {
- name: "EqualToZero",
- baseReachableTime: 0,
- want: defaultBaseReachableTime,
- },
- // Valid cases
- {
- name: "MoreThanZero",
- baseReachableTime: time.Millisecond,
- want: time.Millisecond,
- },
- {
- name: "MoreThanDefaultBaseReachableTime",
- baseReachableTime: 2 * defaultBaseReachableTime,
- want: 2 * defaultBaseReachableTime,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.BaseReachableTime = test.baseReachableTime
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.BaseReachableTime; got != test.want {
- t.Errorf("got BaseReachableTime = %q, want = %q", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsMinRandomFactor(t *testing.T) {
- tests := []struct {
- name string
- minRandomFactor float32
- want float32
- }{
- // Invalid cases
- {
- name: "LessThanZero",
- minRandomFactor: -1,
- want: defaultMinRandomFactor,
- },
- {
- name: "EqualToZero",
- minRandomFactor: 0,
- want: defaultMinRandomFactor,
- },
- // Valid cases
- {
- name: "MoreThanZero",
- minRandomFactor: 1,
- want: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.MinRandomFactor = test.minRandomFactor
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.MinRandomFactor; got != test.want {
- t.Errorf("got MinRandomFactor = %f, want = %f", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsMaxRandomFactor(t *testing.T) {
- tests := []struct {
- name string
- minRandomFactor float32
- maxRandomFactor float32
- want float32
- }{
- // Invalid cases
- {
- name: "LessThanZero",
- minRandomFactor: defaultMinRandomFactor,
- maxRandomFactor: -1,
- want: defaultMaxRandomFactor,
- },
- {
- name: "EqualToZero",
- minRandomFactor: defaultMinRandomFactor,
- maxRandomFactor: 0,
- want: defaultMaxRandomFactor,
- },
- {
- name: "LessThanMinRandomFactor",
- minRandomFactor: defaultMinRandomFactor,
- maxRandomFactor: defaultMinRandomFactor * 0.99,
- want: defaultMaxRandomFactor,
- },
- {
- name: "MoreThanMinRandomFactorWhenMinRandomFactorIsLargerThanMaxRandomFactorDefault",
- minRandomFactor: defaultMaxRandomFactor * 2,
- maxRandomFactor: defaultMaxRandomFactor,
- want: defaultMaxRandomFactor * 6,
- },
- // Valid cases
- {
- name: "EqualToMinRandomFactor",
- minRandomFactor: defaultMinRandomFactor,
- maxRandomFactor: defaultMinRandomFactor,
- want: defaultMinRandomFactor,
- },
- {
- name: "MoreThanMinRandomFactor",
- minRandomFactor: defaultMinRandomFactor,
- maxRandomFactor: defaultMinRandomFactor * 1.1,
- want: defaultMinRandomFactor * 1.1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.MinRandomFactor = test.minRandomFactor
- c.MaxRandomFactor = test.maxRandomFactor
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.MaxRandomFactor; got != test.want {
- t.Errorf("got MaxRandomFactor = %f, want = %f", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsRetransmitTimer(t *testing.T) {
- tests := []struct {
- name string
- retransmitTimer time.Duration
- want time.Duration
- }{
- // Invalid cases
- {
- name: "EqualToZero",
- retransmitTimer: 0,
- want: defaultRetransmitTimer,
- },
- {
- name: "LessThanMinimumRetransmitTimer",
- retransmitTimer: minimumRetransmitTimer - time.Nanosecond,
- want: defaultRetransmitTimer,
- },
- // Valid cases
- {
- name: "EqualToMinimumRetransmitTimer",
- retransmitTimer: minimumRetransmitTimer,
- want: minimumBaseReachableTime,
- },
- {
- name: "LargetThanMinimumRetransmitTimer",
- retransmitTimer: 2 * minimumBaseReachableTime,
- want: 2 * minimumBaseReachableTime,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.RetransmitTimer = test.retransmitTimer
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.RetransmitTimer; got != test.want {
- t.Errorf("got RetransmitTimer = %q, want = %q", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsDelayFirstProbeTime(t *testing.T) {
- tests := []struct {
- name string
- delayFirstProbeTime time.Duration
- want time.Duration
- }{
- // Invalid cases
- {
- name: "EqualToZero",
- delayFirstProbeTime: 0,
- want: defaultDelayFirstProbeTime,
- },
- // Valid cases
- {
- name: "MoreThanZero",
- delayFirstProbeTime: time.Millisecond,
- want: time.Millisecond,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.DelayFirstProbeTime = test.delayFirstProbeTime
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.DelayFirstProbeTime; got != test.want {
- t.Errorf("got DelayFirstProbeTime = %q, want = %q", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsMaxMulticastProbes(t *testing.T) {
- tests := []struct {
- name string
- maxMulticastProbes uint32
- want uint32
- }{
- // Invalid cases
- {
- name: "EqualToZero",
- maxMulticastProbes: 0,
- want: defaultMaxMulticastProbes,
- },
- // Valid cases
- {
- name: "MoreThanZero",
- maxMulticastProbes: 1,
- want: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.MaxMulticastProbes = test.maxMulticastProbes
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.MaxMulticastProbes; got != test.want {
- t.Errorf("got MaxMulticastProbes = %q, want = %q", got, test.want)
- }
- })
- }
-}
-
-func TestNUDConfigurationsMaxUnicastProbes(t *testing.T) {
- tests := []struct {
- name string
- maxUnicastProbes uint32
- want uint32
- }{
- // Invalid cases
- {
- name: "EqualToZero",
- maxUnicastProbes: 0,
- want: defaultMaxUnicastProbes,
- },
- // Valid cases
- {
- name: "MoreThanZero",
- maxUnicastProbes: 1,
- want: 1,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const nicID = 1
-
- c := stack.DefaultNUDConfigurations()
- c.MaxUnicastProbes = test.maxUnicastProbes
-
- e := channel.New(0, 1280, linkAddr1)
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
-
- s := stack.New(stack.Options{
- // A neighbor cache is required to store NUDConfigurations. The
- // networking stack will only allocate neighbor caches if a protocol
- // providing link address resolution is specified (e.g. ARP or IPv6).
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- NUDConfigs: c,
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- sc, err := s.NUDConfigurations(nicID, ipv6.ProtocolNumber)
- if err != nil {
- t.Fatalf("got stack.NUDConfigurations(%d, %d) = %s", nicID, ipv6.ProtocolNumber, err)
- }
- if got := sc.MaxUnicastProbes; got != test.want {
- t.Errorf("got MaxUnicastProbes = %q, want = %q", got, test.want)
- }
- })
- }
-}
-
-// TestNUDStateReachableTime verifies the correctness of the ReachableTime
-// computation.
-func TestNUDStateReachableTime(t *testing.T) {
- tests := []struct {
- name string
- baseReachableTime time.Duration
- minRandomFactor float32
- maxRandomFactor float32
- want time.Duration
- }{
- {
- name: "AllZeros",
- baseReachableTime: 0,
- minRandomFactor: 0,
- maxRandomFactor: 0,
- want: 0,
- },
- {
- name: "ZeroMaxRandomFactor",
- baseReachableTime: time.Second,
- minRandomFactor: 0,
- maxRandomFactor: 0,
- want: 0,
- },
- {
- name: "ZeroMinRandomFactor",
- baseReachableTime: time.Second,
- minRandomFactor: 0,
- maxRandomFactor: 1,
- want: time.Duration(defaultFakeRandomNum * float32(time.Second)),
- },
- {
- name: "FractionalRandomFactor",
- baseReachableTime: time.Duration(math.MaxInt64),
- minRandomFactor: 0.001,
- maxRandomFactor: 0.002,
- want: time.Duration((0.001 + (0.001 * defaultFakeRandomNum)) * float32(math.MaxInt64)),
- },
- {
- name: "MinAndMaxRandomFactorsEqual",
- baseReachableTime: time.Second,
- minRandomFactor: 1,
- maxRandomFactor: 1,
- want: time.Second,
- },
- {
- name: "MinAndMaxRandomFactorsDifferent",
- baseReachableTime: time.Second,
- minRandomFactor: 1,
- maxRandomFactor: 2,
- want: time.Duration((1.0 + defaultFakeRandomNum) * float32(time.Second)),
- },
- {
- name: "MaxInt64",
- baseReachableTime: time.Duration(math.MaxInt64),
- minRandomFactor: 1,
- maxRandomFactor: 1,
- want: time.Duration(math.MaxInt64),
- },
- {
- name: "Overflow",
- baseReachableTime: time.Duration(math.MaxInt64),
- minRandomFactor: 1.5,
- maxRandomFactor: 1.5,
- want: time.Duration(math.MaxInt64),
- },
- {
- name: "DoubleOverflow",
- baseReachableTime: time.Duration(math.MaxInt64),
- minRandomFactor: 2.5,
- maxRandomFactor: 2.5,
- want: time.Duration(math.MaxInt64),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := stack.NUDConfigurations{
- BaseReachableTime: test.baseReachableTime,
- MinRandomFactor: test.minRandomFactor,
- MaxRandomFactor: test.maxRandomFactor,
- }
- // A fake random number generator is used to ensure deterministic
- // results.
- rng := fakeRand{
- num: defaultFakeRandomNum,
- }
- var clock faketime.NullClock
- s := stack.NewNUDState(c, &clock, rand.New(&rng))
- if got, want := s.ReachableTime(), test.want; got != want {
- t.Errorf("got ReachableTime = %q, want = %q", got, want)
- }
- })
- }
-}
-
-// TestNUDStateRecomputeReachableTime exercises the ReachableTime function
-// twice to verify recomputation of reachable time when the min random factor,
-// max random factor, or base reachable time changes.
-func TestNUDStateRecomputeReachableTime(t *testing.T) {
- const defaultBase = time.Second
- const defaultMin = 2.0 * defaultMaxRandomFactor
- const defaultMax = 3.0 * defaultMaxRandomFactor
-
- tests := []struct {
- name string
- baseReachableTime time.Duration
- minRandomFactor float32
- maxRandomFactor float32
- want time.Duration
- }{
- {
- name: "BaseReachableTime",
- baseReachableTime: 2 * defaultBase,
- minRandomFactor: defaultMin,
- maxRandomFactor: defaultMax,
- want: time.Duration((defaultMin + (defaultMax-defaultMin)*defaultFakeRandomNum) * float32(2*defaultBase)),
- },
- {
- name: "MinRandomFactor",
- baseReachableTime: defaultBase,
- minRandomFactor: defaultMax,
- maxRandomFactor: defaultMax,
- want: time.Duration(defaultMax * float32(defaultBase)),
- },
- {
- name: "MaxRandomFactor",
- baseReachableTime: defaultBase,
- minRandomFactor: defaultMin,
- maxRandomFactor: defaultMin,
- want: time.Duration(defaultMin * float32(defaultBase)),
- },
- {
- name: "BothRandomFactor",
- baseReachableTime: defaultBase,
- minRandomFactor: 2 * defaultMin,
- maxRandomFactor: 2 * defaultMax,
- want: time.Duration((2*defaultMin + (2*defaultMax-2*defaultMin)*defaultFakeRandomNum) * float32(defaultBase)),
- },
- {
- name: "BaseReachableTimeAndBothRandomFactors",
- baseReachableTime: 2 * defaultBase,
- minRandomFactor: 2 * defaultMin,
- maxRandomFactor: 2 * defaultMax,
- want: time.Duration((2*defaultMin + (2*defaultMax-2*defaultMin)*defaultFakeRandomNum) * float32(2*defaultBase)),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := stack.DefaultNUDConfigurations()
- c.BaseReachableTime = defaultBase
- c.MinRandomFactor = defaultMin
- c.MaxRandomFactor = defaultMax
-
- // A fake random number generator is used to ensure deterministic
- // results.
- rng := fakeRand{
- num: defaultFakeRandomNum,
- }
- var clock faketime.NullClock
- s := stack.NewNUDState(c, &clock, rand.New(&rng))
- old := s.ReachableTime()
-
- if got, want := s.ReachableTime(), old; got != want {
- t.Errorf("got ReachableTime = %q, want = %q", got, want)
- }
-
- // Check for recomputation when changing the min random factor, the max
- // random factor, the base reachability time, or any permutation of those
- // three options.
- c.BaseReachableTime = test.baseReachableTime
- c.MinRandomFactor = test.minRandomFactor
- c.MaxRandomFactor = test.maxRandomFactor
- s.SetConfig(c)
-
- if got, want := s.ReachableTime(), test.want; got != want {
- t.Errorf("got ReachableTime = %q, want = %q", got, want)
- }
-
- // Verify that ReachableTime isn't recomputed when none of the
- // configuration options change. The random factor is changed so that if
- // a recompution were to occur, ReachableTime would change.
- rng.num = defaultFakeRandomNum / 2.0
- if got, want := s.ReachableTime(), test.want; got != want {
- t.Errorf("got ReachableTime = %q, want = %q", got, want)
- }
- })
- }
-}
diff --git a/pkg/tcpip/stack/packet_buffer_list.go b/pkg/tcpip/stack/packet_buffer_list.go
new file mode 100644
index 000000000..ce7057d6b
--- /dev/null
+++ b/pkg/tcpip/stack/packet_buffer_list.go
@@ -0,0 +1,221 @@
+package stack
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type PacketBufferElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (PacketBufferElementMapper) linkerFor(elem *PacketBuffer) *PacketBuffer { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type PacketBufferList struct {
+ head *PacketBuffer
+ tail *PacketBuffer
+}
+
+// Reset resets list l to the empty state.
+func (l *PacketBufferList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *PacketBufferList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *PacketBufferList) Front() *PacketBuffer {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *PacketBufferList) Back() *PacketBuffer {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *PacketBufferList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (PacketBufferElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *PacketBufferList) PushFront(e *PacketBuffer) {
+ linker := PacketBufferElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ PacketBufferElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *PacketBufferList) PushBack(e *PacketBuffer) {
+ linker := PacketBufferElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ PacketBufferElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *PacketBufferList) PushBackList(m *PacketBufferList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ PacketBufferElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ PacketBufferElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *PacketBufferList) InsertAfter(b, e *PacketBuffer) {
+ bLinker := PacketBufferElementMapper{}.linkerFor(b)
+ eLinker := PacketBufferElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ PacketBufferElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *PacketBufferList) InsertBefore(a, e *PacketBuffer) {
+ aLinker := PacketBufferElementMapper{}.linkerFor(a)
+ eLinker := PacketBufferElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ PacketBufferElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *PacketBufferList) Remove(e *PacketBuffer) {
+ linker := PacketBufferElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ PacketBufferElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ PacketBufferElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type PacketBufferEntry struct {
+ next *PacketBuffer
+ prev *PacketBuffer
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *PacketBufferEntry) Next() *PacketBuffer {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *PacketBufferEntry) Prev() *PacketBuffer {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *PacketBufferEntry) SetNext(elem *PacketBuffer) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *PacketBufferEntry) SetPrev(elem *PacketBuffer) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/stack/packet_buffer_test.go b/pkg/tcpip/stack/packet_buffer_test.go
deleted file mode 100644
index a8da34992..000000000
--- a/pkg/tcpip/stack/packet_buffer_test.go
+++ /dev/null
@@ -1,649 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at //
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestPacketHeaderPush(t *testing.T) {
- for _, test := range []struct {
- name string
- reserved int
- link []byte
- network []byte
- transport []byte
- data []byte
- }{
- {
- name: "construct empty packet",
- },
- {
- name: "construct link header only packet",
- reserved: 60,
- link: makeView(10),
- },
- {
- name: "construct link and network header only packet",
- reserved: 60,
- link: makeView(10),
- network: makeView(20),
- },
- {
- name: "construct header only packet",
- reserved: 60,
- link: makeView(10),
- network: makeView(20),
- transport: makeView(30),
- },
- {
- name: "construct data only packet",
- data: makeView(40),
- },
- {
- name: "construct L3 packet",
- reserved: 60,
- network: makeView(20),
- transport: makeView(30),
- data: makeView(40),
- },
- {
- name: "construct L2 packet",
- reserved: 60,
- link: makeView(10),
- network: makeView(20),
- transport: makeView(30),
- data: makeView(40),
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- pk := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: test.reserved,
- // Make a copy of data to make sure our truth data won't be taint by
- // PacketBuffer.
- Data: buffer.NewViewFromBytes(test.data).ToVectorisedView(),
- })
-
- allHdrSize := len(test.link) + len(test.network) + len(test.transport)
-
- // Check the initial values for packet.
- checkInitialPacketBuffer(t, pk, PacketBufferOptions{
- ReserveHeaderBytes: test.reserved,
- Data: buffer.View(test.data).ToVectorisedView(),
- })
-
- // Push headers.
- if v := test.transport; len(v) > 0 {
- copy(pk.TransportHeader().Push(len(v)), v)
- }
- if v := test.network; len(v) > 0 {
- copy(pk.NetworkHeader().Push(len(v)), v)
- }
- if v := test.link; len(v) > 0 {
- copy(pk.LinkHeader().Push(len(v)), v)
- }
-
- // Check the after values for packet.
- if got, want := pk.ReservedHeaderBytes(), test.reserved; got != want {
- t.Errorf("After pk.ReservedHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.AvailableHeaderBytes(), test.reserved-allHdrSize; got != want {
- t.Errorf("After pk.AvailableHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.HeaderSize(), allHdrSize; got != want {
- t.Errorf("After pk.HeaderSize() = %d, want %d", got, want)
- }
- if got, want := pk.Size(), allHdrSize+len(test.data); got != want {
- t.Errorf("After pk.Size() = %d, want %d", got, want)
- }
- // Check the after state.
- checkPacketContents(t, "After ", pk, packetContents{
- link: test.link,
- network: test.network,
- transport: test.transport,
- data: test.data,
- })
- })
- }
-}
-
-func TestPacketHeaderConsume(t *testing.T) {
- for _, test := range []struct {
- name string
- data []byte
- link int
- network int
- transport int
- }{
- {
- name: "parse L2 packet",
- data: concatViews(makeView(10), makeView(20), makeView(30), makeView(40)),
- link: 10,
- network: 20,
- transport: 30,
- },
- {
- name: "parse L3 packet",
- data: concatViews(makeView(20), makeView(30), makeView(40)),
- network: 20,
- transport: 30,
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- pk := NewPacketBuffer(PacketBufferOptions{
- // Make a copy of data to make sure our truth data won't be taint by
- // PacketBuffer.
- Data: buffer.NewViewFromBytes(test.data).ToVectorisedView(),
- })
-
- // Check the initial values for packet.
- checkInitialPacketBuffer(t, pk, PacketBufferOptions{
- Data: buffer.View(test.data).ToVectorisedView(),
- })
-
- // Consume headers.
- if size := test.link; size > 0 {
- if _, ok := pk.LinkHeader().Consume(size); !ok {
- t.Fatalf("pk.LinkHeader().Consume() = false, want true")
- }
- }
- if size := test.network; size > 0 {
- if _, ok := pk.NetworkHeader().Consume(size); !ok {
- t.Fatalf("pk.NetworkHeader().Consume() = false, want true")
- }
- }
- if size := test.transport; size > 0 {
- if _, ok := pk.TransportHeader().Consume(size); !ok {
- t.Fatalf("pk.TransportHeader().Consume() = false, want true")
- }
- }
-
- allHdrSize := test.link + test.network + test.transport
-
- // Check the after values for packet.
- if got, want := pk.ReservedHeaderBytes(), 0; got != want {
- t.Errorf("After pk.ReservedHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.AvailableHeaderBytes(), 0; got != want {
- t.Errorf("After pk.AvailableHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.HeaderSize(), allHdrSize; got != want {
- t.Errorf("After pk.HeaderSize() = %d, want %d", got, want)
- }
- if got, want := pk.Size(), len(test.data); got != want {
- t.Errorf("After pk.Size() = %d, want %d", got, want)
- }
- // Check the after state of pk.
- checkPacketContents(t, "After ", pk, packetContents{
- link: test.data[:test.link],
- network: test.data[test.link:][:test.network],
- transport: test.data[test.link+test.network:][:test.transport],
- data: test.data[allHdrSize:],
- })
- })
- }
-}
-
-func TestPacketHeaderConsumeDataTooShort(t *testing.T) {
- data := makeView(10)
-
- pk := NewPacketBuffer(PacketBufferOptions{
- // Make a copy of data to make sure our truth data won't be taint by
- // PacketBuffer.
- Data: buffer.NewViewFromBytes(data).ToVectorisedView(),
- })
-
- // Consume should fail if pkt.Data is too short.
- if _, ok := pk.LinkHeader().Consume(11); ok {
- t.Fatalf("pk.LinkHeader().Consume() = _, true; want _, false")
- }
- if _, ok := pk.NetworkHeader().Consume(11); ok {
- t.Fatalf("pk.NetworkHeader().Consume() = _, true; want _, false")
- }
- if _, ok := pk.TransportHeader().Consume(11); ok {
- t.Fatalf("pk.TransportHeader().Consume() = _, true; want _, false")
- }
-
- // Check packet should look the same as initial packet.
- checkInitialPacketBuffer(t, pk, PacketBufferOptions{
- Data: buffer.View(data).ToVectorisedView(),
- })
-}
-
-// This is a very obscure use-case seen in the code that verifies packets
-// before sending them out. It tries to parse the headers to verify.
-// PacketHeader was initially not designed to mix Push() and Consume(), but it
-// works and it's been relied upon. Include a test here.
-func TestPacketHeaderPushConsumeMixed(t *testing.T) {
- link := makeView(10)
- network := makeView(20)
- data := makeView(30)
-
- initData := append([]byte(nil), network...)
- initData = append(initData, data...)
- pk := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: len(link),
- Data: buffer.NewViewFromBytes(initData).ToVectorisedView(),
- })
-
- // 1. Consume network header
- gotNetwork, ok := pk.NetworkHeader().Consume(len(network))
- if !ok {
- t.Fatalf("pk.NetworkHeader().Consume(%d) = _, false; want _, true", len(network))
- }
- checkViewEqual(t, "gotNetwork", gotNetwork, network)
-
- // 2. Push link header
- copy(pk.LinkHeader().Push(len(link)), link)
-
- checkPacketContents(t, "" /* prefix */, pk, packetContents{
- link: link,
- network: network,
- data: data,
- })
-}
-
-func TestPacketHeaderPushConsumeMixedTooLong(t *testing.T) {
- link := makeView(10)
- network := makeView(20)
- data := makeView(30)
-
- initData := concatViews(network, data)
- pk := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: len(link),
- Data: buffer.NewViewFromBytes(initData).ToVectorisedView(),
- })
-
- // 1. Push link header
- copy(pk.LinkHeader().Push(len(link)), link)
-
- checkPacketContents(t, "" /* prefix */, pk, packetContents{
- link: link,
- data: initData,
- })
-
- // 2. Consume network header, with a number of bytes too large.
- gotNetwork, ok := pk.NetworkHeader().Consume(len(initData) + 1)
- if ok {
- t.Fatalf("pk.NetworkHeader().Consume(%d) = %q, true; want _, false", len(initData)+1, gotNetwork)
- }
-
- checkPacketContents(t, "" /* prefix */, pk, packetContents{
- link: link,
- data: initData,
- })
-}
-
-func TestPacketHeaderPushCalledAtMostOnce(t *testing.T) {
- const headerSize = 10
-
- pk := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: headerSize * int(numHeaderType),
- })
-
- for _, h := range []PacketHeader{
- pk.TransportHeader(),
- pk.NetworkHeader(),
- pk.LinkHeader(),
- } {
- t.Run("PushedTwice/"+h.typ.String(), func(t *testing.T) {
- h.Push(headerSize)
-
- defer func() { recover() }()
- h.Push(headerSize)
- t.Fatal("Second push should have panicked")
- })
- }
-}
-
-func TestPacketHeaderConsumeCalledAtMostOnce(t *testing.T) {
- const headerSize = 10
-
- pk := NewPacketBuffer(PacketBufferOptions{
- Data: makeView(headerSize * int(numHeaderType)).ToVectorisedView(),
- })
-
- for _, h := range []PacketHeader{
- pk.LinkHeader(),
- pk.NetworkHeader(),
- pk.TransportHeader(),
- } {
- t.Run("ConsumedTwice/"+h.typ.String(), func(t *testing.T) {
- if _, ok := h.Consume(headerSize); !ok {
- t.Fatal("First consume should succeed")
- }
-
- defer func() { recover() }()
- h.Consume(headerSize)
- t.Fatal("Second consume should have panicked")
- })
- }
-}
-
-func TestPacketHeaderPushThenConsumePanics(t *testing.T) {
- const headerSize = 10
-
- pk := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: headerSize * int(numHeaderType),
- })
-
- for _, h := range []PacketHeader{
- pk.TransportHeader(),
- pk.NetworkHeader(),
- pk.LinkHeader(),
- } {
- t.Run(h.typ.String(), func(t *testing.T) {
- h.Push(headerSize)
-
- defer func() { recover() }()
- h.Consume(headerSize)
- t.Fatal("Consume should have panicked")
- })
- }
-}
-
-func TestPacketHeaderConsumeThenPushPanics(t *testing.T) {
- const headerSize = 10
-
- pk := NewPacketBuffer(PacketBufferOptions{
- Data: makeView(headerSize * int(numHeaderType)).ToVectorisedView(),
- })
-
- for _, h := range []PacketHeader{
- pk.LinkHeader(),
- pk.NetworkHeader(),
- pk.TransportHeader(),
- } {
- t.Run(h.typ.String(), func(t *testing.T) {
- h.Consume(headerSize)
-
- defer func() { recover() }()
- h.Push(headerSize)
- t.Fatal("Push should have panicked")
- })
- }
-}
-
-func TestPacketBufferData(t *testing.T) {
- for _, tc := range []struct {
- name string
- makePkt func(*testing.T) *PacketBuffer
- data string
- }{
- {
- name: "inbound packet",
- makePkt: func(*testing.T) *PacketBuffer {
- pkt := NewPacketBuffer(PacketBufferOptions{
- Data: vv("aabbbbccccccDATA"),
- })
- pkt.LinkHeader().Consume(2)
- pkt.NetworkHeader().Consume(4)
- pkt.TransportHeader().Consume(6)
- return pkt
- },
- data: "DATA",
- },
- {
- name: "outbound packet",
- makePkt: func(*testing.T) *PacketBuffer {
- pkt := NewPacketBuffer(PacketBufferOptions{
- ReserveHeaderBytes: 12,
- Data: vv("DATA"),
- })
- copy(pkt.TransportHeader().Push(6), []byte("cccccc"))
- copy(pkt.NetworkHeader().Push(4), []byte("bbbb"))
- copy(pkt.LinkHeader().Push(2), []byte("aa"))
- return pkt
- },
- data: "DATA",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- // PullUp
- for _, n := range []int{1, len(tc.data)} {
- t.Run(fmt.Sprintf("PullUp%d", n), func(t *testing.T) {
- pkt := tc.makePkt(t)
- v, ok := pkt.Data().PullUp(n)
- wantV := []byte(tc.data)[:n]
- if !ok || !bytes.Equal(v, wantV) {
- t.Errorf("pkt.Data().PullUp(%d) = %q, %t; want %q, true", n, v, ok, wantV)
- }
- })
- }
- t.Run("PullUpOutOfBounds", func(t *testing.T) {
- n := len(tc.data) + 1
- pkt := tc.makePkt(t)
- v, ok := pkt.Data().PullUp(n)
- if ok || v != nil {
- t.Errorf("pkt.Data().PullUp(%d) = %q, %t; want nil, false", n, v, ok)
- }
- })
-
- // DeleteFront
- for _, n := range []int{1, len(tc.data)} {
- t.Run(fmt.Sprintf("DeleteFront%d", n), func(t *testing.T) {
- pkt := tc.makePkt(t)
- pkt.Data().DeleteFront(n)
-
- checkData(t, pkt, []byte(tc.data)[n:])
- })
- }
-
- // CapLength
- for _, n := range []int{0, 1, len(tc.data)} {
- t.Run(fmt.Sprintf("CapLength%d", n), func(t *testing.T) {
- pkt := tc.makePkt(t)
- pkt.Data().CapLength(n)
-
- want := []byte(tc.data)
- if n < len(want) {
- want = want[:n]
- }
- checkData(t, pkt, want)
- })
- }
-
- // Views
- t.Run("Views", func(t *testing.T) {
- pkt := tc.makePkt(t)
- checkData(t, pkt, []byte(tc.data))
- })
-
- // AppendView
- t.Run("AppendView", func(t *testing.T) {
- s := "APPEND"
-
- pkt := tc.makePkt(t)
- pkt.Data().AppendView(buffer.View(s))
-
- checkData(t, pkt, []byte(tc.data+s))
- })
-
- // ReadFromVV
- for _, n := range []int{0, 1, 2, 7, 10, 14, 20} {
- t.Run(fmt.Sprintf("ReadFromVV%d", n), func(t *testing.T) {
- s := "TO READ"
- srcVV := vv(s, s)
- s += s
-
- pkt := tc.makePkt(t)
- pkt.Data().ReadFromVV(&srcVV, n)
-
- if n < len(s) {
- s = s[:n]
- }
- checkData(t, pkt, []byte(tc.data+s))
- })
- }
-
- // ExtractVV
- t.Run("ExtractVV", func(t *testing.T) {
- pkt := tc.makePkt(t)
- extractedVV := pkt.Data().ExtractVV()
-
- got := extractedVV.ToOwnedView()
- want := []byte(tc.data)
- if !bytes.Equal(got, want) {
- t.Errorf("pkt.Data().ExtractVV().ToOwnedView() = %q, want %q", got, want)
- }
- })
- })
- }
-}
-
-type packetContents struct {
- link buffer.View
- network buffer.View
- transport buffer.View
- data buffer.View
-}
-
-func checkPacketContents(t *testing.T, prefix string, pk *PacketBuffer, want packetContents) {
- t.Helper()
- // Headers.
- checkPacketHeader(t, prefix+"pk.LinkHeader", pk.LinkHeader(), want.link)
- checkPacketHeader(t, prefix+"pk.NetworkHeader", pk.NetworkHeader(), want.network)
- checkPacketHeader(t, prefix+"pk.TransportHeader", pk.TransportHeader(), want.transport)
- // Data.
- checkData(t, pk, want.data)
- // Whole packet.
- checkViewEqual(t, prefix+"pk.Views()",
- concatViews(pk.Views()...),
- concatViews(want.link, want.network, want.transport, want.data))
- // PayloadSince.
- checkViewEqual(t, prefix+"PayloadSince(LinkHeader)",
- PayloadSince(pk.LinkHeader()),
- concatViews(want.link, want.network, want.transport, want.data))
- checkViewEqual(t, prefix+"PayloadSince(NetworkHeader)",
- PayloadSince(pk.NetworkHeader()),
- concatViews(want.network, want.transport, want.data))
- checkViewEqual(t, prefix+"PayloadSince(TransportHeader)",
- PayloadSince(pk.TransportHeader()),
- concatViews(want.transport, want.data))
-}
-
-func checkInitialPacketBuffer(t *testing.T, pk *PacketBuffer, opts PacketBufferOptions) {
- t.Helper()
- reserved := opts.ReserveHeaderBytes
- if got, want := pk.ReservedHeaderBytes(), reserved; got != want {
- t.Errorf("Initial pk.ReservedHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.AvailableHeaderBytes(), reserved; got != want {
- t.Errorf("Initial pk.AvailableHeaderBytes() = %d, want %d", got, want)
- }
- if got, want := pk.HeaderSize(), 0; got != want {
- t.Errorf("Initial pk.HeaderSize() = %d, want %d", got, want)
- }
- data := opts.Data.ToView()
- if got, want := pk.Size(), len(data); got != want {
- t.Errorf("Initial pk.Size() = %d, want %d", got, want)
- }
- checkPacketContents(t, "Initial ", pk, packetContents{
- data: data,
- })
-}
-
-func checkPacketHeader(t *testing.T, name string, h PacketHeader, want []byte) {
- t.Helper()
- checkViewEqual(t, name+".View()", h.View(), want)
-}
-
-func checkViewEqual(t *testing.T, what string, got, want buffer.View) {
- t.Helper()
- if !bytes.Equal(got, want) {
- t.Errorf("%s = %x, want %x", what, got, want)
- }
-}
-
-func checkData(t *testing.T, pkt *PacketBuffer, want []byte) {
- t.Helper()
- if got := concatViews(pkt.Data().Views()...); !bytes.Equal(got, want) {
- t.Errorf("pkt.Data().Views() = 0x%x, want 0x%x", got, want)
- }
- if got := pkt.Data().Size(); got != len(want) {
- t.Errorf("pkt.Data().Size() = %d, want %d", got, len(want))
- }
-
- t.Run("AsRange", func(t *testing.T) {
- // Full range
- checkRange(t, pkt.Data().AsRange(), want)
-
- // SubRange
- for _, off := range []int{0, 1, len(want), len(want) + 1} {
- t.Run(fmt.Sprintf("SubRange%d", off), func(t *testing.T) {
- // Empty when off is greater than the size of range.
- var sub []byte
- if off < len(want) {
- sub = want[off:]
- }
- checkRange(t, pkt.Data().AsRange().SubRange(off), sub)
- })
- }
-
- // Capped
- for _, n := range []int{0, 1, len(want), len(want) + 1} {
- t.Run(fmt.Sprintf("Capped%d", n), func(t *testing.T) {
- sub := want
- if n < len(sub) {
- sub = sub[:n]
- }
- checkRange(t, pkt.Data().AsRange().Capped(n), sub)
- })
- }
- })
-}
-
-func checkRange(t *testing.T, r Range, data []byte) {
- if got, want := r.Size(), len(data); got != want {
- t.Errorf("r.Size() = %d, want %d", got, want)
- }
- if got := r.AsView(); !bytes.Equal(got, data) {
- t.Errorf("r.AsView() = %x, want %x", got, data)
- }
- if got := r.ToOwnedView(); !bytes.Equal(got, data) {
- t.Errorf("r.ToOwnedView() = %x, want %x", got, data)
- }
- if got, want := r.Checksum(), header.Checksum(data, 0 /* initial */); got != want {
- t.Errorf("r.Checksum() = %x, want %x", got, want)
- }
-}
-
-func vv(pieces ...string) buffer.VectorisedView {
- var views []buffer.View
- var size int
- for _, p := range pieces {
- v := buffer.View([]byte(p))
- size += len(v)
- views = append(views, v)
- }
- return buffer.NewVectorisedView(size, views)
-}
-
-func makeView(size int) buffer.View {
- b := byte(size)
- return bytes.Repeat([]byte{b}, size)
-}
-
-func concatViews(views ...buffer.View) buffer.View {
- var all buffer.View
- for _, v := range views {
- all = append(all, v...)
- }
- return all
-}
diff --git a/pkg/tcpip/stack/stack_state_autogen.go b/pkg/tcpip/stack/stack_state_autogen.go
new file mode 100644
index 000000000..47c7e386e
--- /dev/null
+++ b/pkg/tcpip/stack/stack_state_autogen.go
@@ -0,0 +1,1284 @@
+// automatically generated by stateify.
+
+package stack
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *tuple) StateTypeName() string {
+ return "pkg/tcpip/stack.tuple"
+}
+
+func (t *tuple) StateFields() []string {
+ return []string{
+ "tupleEntry",
+ "tupleID",
+ "conn",
+ "direction",
+ }
+}
+
+func (t *tuple) beforeSave() {}
+
+// +checklocksignore
+func (t *tuple) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.tupleEntry)
+ stateSinkObject.Save(1, &t.tupleID)
+ stateSinkObject.Save(2, &t.conn)
+ stateSinkObject.Save(3, &t.direction)
+}
+
+func (t *tuple) afterLoad() {}
+
+// +checklocksignore
+func (t *tuple) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.tupleEntry)
+ stateSourceObject.Load(1, &t.tupleID)
+ stateSourceObject.Load(2, &t.conn)
+ stateSourceObject.Load(3, &t.direction)
+}
+
+func (ti *tupleID) StateTypeName() string {
+ return "pkg/tcpip/stack.tupleID"
+}
+
+func (ti *tupleID) StateFields() []string {
+ return []string{
+ "srcAddr",
+ "srcPort",
+ "dstAddr",
+ "dstPort",
+ "transProto",
+ "netProto",
+ }
+}
+
+func (ti *tupleID) beforeSave() {}
+
+// +checklocksignore
+func (ti *tupleID) StateSave(stateSinkObject state.Sink) {
+ ti.beforeSave()
+ stateSinkObject.Save(0, &ti.srcAddr)
+ stateSinkObject.Save(1, &ti.srcPort)
+ stateSinkObject.Save(2, &ti.dstAddr)
+ stateSinkObject.Save(3, &ti.dstPort)
+ stateSinkObject.Save(4, &ti.transProto)
+ stateSinkObject.Save(5, &ti.netProto)
+}
+
+func (ti *tupleID) afterLoad() {}
+
+// +checklocksignore
+func (ti *tupleID) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ti.srcAddr)
+ stateSourceObject.Load(1, &ti.srcPort)
+ stateSourceObject.Load(2, &ti.dstAddr)
+ stateSourceObject.Load(3, &ti.dstPort)
+ stateSourceObject.Load(4, &ti.transProto)
+ stateSourceObject.Load(5, &ti.netProto)
+}
+
+func (cn *conn) StateTypeName() string {
+ return "pkg/tcpip/stack.conn"
+}
+
+func (cn *conn) StateFields() []string {
+ return []string{
+ "original",
+ "reply",
+ "manip",
+ "tcbHook",
+ "tcb",
+ "lastUsed",
+ }
+}
+
+func (cn *conn) beforeSave() {}
+
+// +checklocksignore
+func (cn *conn) StateSave(stateSinkObject state.Sink) {
+ cn.beforeSave()
+ var lastUsedValue unixTime = cn.saveLastUsed()
+ stateSinkObject.SaveValue(5, lastUsedValue)
+ stateSinkObject.Save(0, &cn.original)
+ stateSinkObject.Save(1, &cn.reply)
+ stateSinkObject.Save(2, &cn.manip)
+ stateSinkObject.Save(3, &cn.tcbHook)
+ stateSinkObject.Save(4, &cn.tcb)
+}
+
+func (cn *conn) afterLoad() {}
+
+// +checklocksignore
+func (cn *conn) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &cn.original)
+ stateSourceObject.Load(1, &cn.reply)
+ stateSourceObject.Load(2, &cn.manip)
+ stateSourceObject.Load(3, &cn.tcbHook)
+ stateSourceObject.Load(4, &cn.tcb)
+ stateSourceObject.LoadValue(5, new(unixTime), func(y interface{}) { cn.loadLastUsed(y.(unixTime)) })
+}
+
+func (ct *ConnTrack) StateTypeName() string {
+ return "pkg/tcpip/stack.ConnTrack"
+}
+
+func (ct *ConnTrack) StateFields() []string {
+ return []string{
+ "seed",
+ "buckets",
+ }
+}
+
+// +checklocksignore
+func (ct *ConnTrack) StateSave(stateSinkObject state.Sink) {
+ ct.beforeSave()
+ stateSinkObject.Save(0, &ct.seed)
+ stateSinkObject.Save(1, &ct.buckets)
+}
+
+func (ct *ConnTrack) afterLoad() {}
+
+// +checklocksignore
+func (ct *ConnTrack) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ct.seed)
+ stateSourceObject.Load(1, &ct.buckets)
+}
+
+func (b *bucket) StateTypeName() string {
+ return "pkg/tcpip/stack.bucket"
+}
+
+func (b *bucket) StateFields() []string {
+ return []string{
+ "tuples",
+ }
+}
+
+func (b *bucket) beforeSave() {}
+
+// +checklocksignore
+func (b *bucket) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ stateSinkObject.Save(0, &b.tuples)
+}
+
+func (b *bucket) afterLoad() {}
+
+// +checklocksignore
+func (b *bucket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &b.tuples)
+}
+
+func (u *unixTime) StateTypeName() string {
+ return "pkg/tcpip/stack.unixTime"
+}
+
+func (u *unixTime) StateFields() []string {
+ return []string{
+ "second",
+ "nano",
+ }
+}
+
+func (u *unixTime) beforeSave() {}
+
+// +checklocksignore
+func (u *unixTime) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.second)
+ stateSinkObject.Save(1, &u.nano)
+}
+
+func (u *unixTime) afterLoad() {}
+
+// +checklocksignore
+func (u *unixTime) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.second)
+ stateSourceObject.Load(1, &u.nano)
+}
+
+func (it *IPTables) StateTypeName() string {
+ return "pkg/tcpip/stack.IPTables"
+}
+
+func (it *IPTables) StateFields() []string {
+ return []string{
+ "mu",
+ "v4Tables",
+ "v6Tables",
+ "modified",
+ "priorities",
+ "connections",
+ "reaperDone",
+ }
+}
+
+// +checklocksignore
+func (it *IPTables) StateSave(stateSinkObject state.Sink) {
+ it.beforeSave()
+ stateSinkObject.Save(0, &it.mu)
+ stateSinkObject.Save(1, &it.v4Tables)
+ stateSinkObject.Save(2, &it.v6Tables)
+ stateSinkObject.Save(3, &it.modified)
+ stateSinkObject.Save(4, &it.priorities)
+ stateSinkObject.Save(5, &it.connections)
+ stateSinkObject.Save(6, &it.reaperDone)
+}
+
+// +checklocksignore
+func (it *IPTables) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &it.mu)
+ stateSourceObject.Load(1, &it.v4Tables)
+ stateSourceObject.Load(2, &it.v6Tables)
+ stateSourceObject.Load(3, &it.modified)
+ stateSourceObject.Load(4, &it.priorities)
+ stateSourceObject.Load(5, &it.connections)
+ stateSourceObject.Load(6, &it.reaperDone)
+ stateSourceObject.AfterLoad(it.afterLoad)
+}
+
+func (table *Table) StateTypeName() string {
+ return "pkg/tcpip/stack.Table"
+}
+
+func (table *Table) StateFields() []string {
+ return []string{
+ "Rules",
+ "BuiltinChains",
+ "Underflows",
+ }
+}
+
+func (table *Table) beforeSave() {}
+
+// +checklocksignore
+func (table *Table) StateSave(stateSinkObject state.Sink) {
+ table.beforeSave()
+ stateSinkObject.Save(0, &table.Rules)
+ stateSinkObject.Save(1, &table.BuiltinChains)
+ stateSinkObject.Save(2, &table.Underflows)
+}
+
+func (table *Table) afterLoad() {}
+
+// +checklocksignore
+func (table *Table) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &table.Rules)
+ stateSourceObject.Load(1, &table.BuiltinChains)
+ stateSourceObject.Load(2, &table.Underflows)
+}
+
+func (r *Rule) StateTypeName() string {
+ return "pkg/tcpip/stack.Rule"
+}
+
+func (r *Rule) StateFields() []string {
+ return []string{
+ "Filter",
+ "Matchers",
+ "Target",
+ }
+}
+
+func (r *Rule) beforeSave() {}
+
+// +checklocksignore
+func (r *Rule) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Filter)
+ stateSinkObject.Save(1, &r.Matchers)
+ stateSinkObject.Save(2, &r.Target)
+}
+
+func (r *Rule) afterLoad() {}
+
+// +checklocksignore
+func (r *Rule) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Filter)
+ stateSourceObject.Load(1, &r.Matchers)
+ stateSourceObject.Load(2, &r.Target)
+}
+
+func (fl *IPHeaderFilter) StateTypeName() string {
+ return "pkg/tcpip/stack.IPHeaderFilter"
+}
+
+func (fl *IPHeaderFilter) StateFields() []string {
+ return []string{
+ "Protocol",
+ "CheckProtocol",
+ "Dst",
+ "DstMask",
+ "DstInvert",
+ "Src",
+ "SrcMask",
+ "SrcInvert",
+ "InputInterface",
+ "InputInterfaceMask",
+ "InputInterfaceInvert",
+ "OutputInterface",
+ "OutputInterfaceMask",
+ "OutputInterfaceInvert",
+ }
+}
+
+func (fl *IPHeaderFilter) beforeSave() {}
+
+// +checklocksignore
+func (fl *IPHeaderFilter) StateSave(stateSinkObject state.Sink) {
+ fl.beforeSave()
+ stateSinkObject.Save(0, &fl.Protocol)
+ stateSinkObject.Save(1, &fl.CheckProtocol)
+ stateSinkObject.Save(2, &fl.Dst)
+ stateSinkObject.Save(3, &fl.DstMask)
+ stateSinkObject.Save(4, &fl.DstInvert)
+ stateSinkObject.Save(5, &fl.Src)
+ stateSinkObject.Save(6, &fl.SrcMask)
+ stateSinkObject.Save(7, &fl.SrcInvert)
+ stateSinkObject.Save(8, &fl.InputInterface)
+ stateSinkObject.Save(9, &fl.InputInterfaceMask)
+ stateSinkObject.Save(10, &fl.InputInterfaceInvert)
+ stateSinkObject.Save(11, &fl.OutputInterface)
+ stateSinkObject.Save(12, &fl.OutputInterfaceMask)
+ stateSinkObject.Save(13, &fl.OutputInterfaceInvert)
+}
+
+func (fl *IPHeaderFilter) afterLoad() {}
+
+// +checklocksignore
+func (fl *IPHeaderFilter) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fl.Protocol)
+ stateSourceObject.Load(1, &fl.CheckProtocol)
+ stateSourceObject.Load(2, &fl.Dst)
+ stateSourceObject.Load(3, &fl.DstMask)
+ stateSourceObject.Load(4, &fl.DstInvert)
+ stateSourceObject.Load(5, &fl.Src)
+ stateSourceObject.Load(6, &fl.SrcMask)
+ stateSourceObject.Load(7, &fl.SrcInvert)
+ stateSourceObject.Load(8, &fl.InputInterface)
+ stateSourceObject.Load(9, &fl.InputInterfaceMask)
+ stateSourceObject.Load(10, &fl.InputInterfaceInvert)
+ stateSourceObject.Load(11, &fl.OutputInterface)
+ stateSourceObject.Load(12, &fl.OutputInterfaceMask)
+ stateSourceObject.Load(13, &fl.OutputInterfaceInvert)
+}
+
+func (l *neighborEntryList) StateTypeName() string {
+ return "pkg/tcpip/stack.neighborEntryList"
+}
+
+func (l *neighborEntryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *neighborEntryList) beforeSave() {}
+
+// +checklocksignore
+func (l *neighborEntryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *neighborEntryList) afterLoad() {}
+
+// +checklocksignore
+func (l *neighborEntryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *neighborEntryEntry) StateTypeName() string {
+ return "pkg/tcpip/stack.neighborEntryEntry"
+}
+
+func (e *neighborEntryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *neighborEntryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *neighborEntryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *neighborEntryEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *neighborEntryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (p *PacketBufferList) StateTypeName() string {
+ return "pkg/tcpip/stack.PacketBufferList"
+}
+
+func (p *PacketBufferList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (p *PacketBufferList) beforeSave() {}
+
+// +checklocksignore
+func (p *PacketBufferList) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.head)
+ stateSinkObject.Save(1, &p.tail)
+}
+
+func (p *PacketBufferList) afterLoad() {}
+
+// +checklocksignore
+func (p *PacketBufferList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.head)
+ stateSourceObject.Load(1, &p.tail)
+}
+
+func (e *PacketBufferEntry) StateTypeName() string {
+ return "pkg/tcpip/stack.PacketBufferEntry"
+}
+
+func (e *PacketBufferEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *PacketBufferEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *PacketBufferEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *PacketBufferEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *PacketBufferEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (t *TransportEndpointID) StateTypeName() string {
+ return "pkg/tcpip/stack.TransportEndpointID"
+}
+
+func (t *TransportEndpointID) StateFields() []string {
+ return []string{
+ "LocalPort",
+ "LocalAddress",
+ "RemotePort",
+ "RemoteAddress",
+ }
+}
+
+func (t *TransportEndpointID) beforeSave() {}
+
+// +checklocksignore
+func (t *TransportEndpointID) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.LocalPort)
+ stateSinkObject.Save(1, &t.LocalAddress)
+ stateSinkObject.Save(2, &t.RemotePort)
+ stateSinkObject.Save(3, &t.RemoteAddress)
+}
+
+func (t *TransportEndpointID) afterLoad() {}
+
+// +checklocksignore
+func (t *TransportEndpointID) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.LocalPort)
+ stateSourceObject.Load(1, &t.LocalAddress)
+ stateSourceObject.Load(2, &t.RemotePort)
+ stateSourceObject.Load(3, &t.RemoteAddress)
+}
+
+func (g *GSOType) StateTypeName() string {
+ return "pkg/tcpip/stack.GSOType"
+}
+
+func (g *GSOType) StateFields() []string {
+ return nil
+}
+
+func (g *GSO) StateTypeName() string {
+ return "pkg/tcpip/stack.GSO"
+}
+
+func (g *GSO) StateFields() []string {
+ return []string{
+ "Type",
+ "NeedsCsum",
+ "CsumOffset",
+ "MSS",
+ "L3HdrLen",
+ "MaxSize",
+ }
+}
+
+func (g *GSO) beforeSave() {}
+
+// +checklocksignore
+func (g *GSO) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.Type)
+ stateSinkObject.Save(1, &g.NeedsCsum)
+ stateSinkObject.Save(2, &g.CsumOffset)
+ stateSinkObject.Save(3, &g.MSS)
+ stateSinkObject.Save(4, &g.L3HdrLen)
+ stateSinkObject.Save(5, &g.MaxSize)
+}
+
+func (g *GSO) afterLoad() {}
+
+// +checklocksignore
+func (g *GSO) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.Type)
+ stateSourceObject.Load(1, &g.NeedsCsum)
+ stateSourceObject.Load(2, &g.CsumOffset)
+ stateSourceObject.Load(3, &g.MSS)
+ stateSourceObject.Load(4, &g.L3HdrLen)
+ stateSourceObject.Load(5, &g.MaxSize)
+}
+
+func (t *TransportEndpointInfo) StateTypeName() string {
+ return "pkg/tcpip/stack.TransportEndpointInfo"
+}
+
+func (t *TransportEndpointInfo) StateFields() []string {
+ return []string{
+ "NetProto",
+ "TransProto",
+ "ID",
+ "BindNICID",
+ "BindAddr",
+ "RegisterNICID",
+ }
+}
+
+func (t *TransportEndpointInfo) beforeSave() {}
+
+// +checklocksignore
+func (t *TransportEndpointInfo) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.NetProto)
+ stateSinkObject.Save(1, &t.TransProto)
+ stateSinkObject.Save(2, &t.ID)
+ stateSinkObject.Save(3, &t.BindNICID)
+ stateSinkObject.Save(4, &t.BindAddr)
+ stateSinkObject.Save(5, &t.RegisterNICID)
+}
+
+func (t *TransportEndpointInfo) afterLoad() {}
+
+// +checklocksignore
+func (t *TransportEndpointInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.NetProto)
+ stateSourceObject.Load(1, &t.TransProto)
+ stateSourceObject.Load(2, &t.ID)
+ stateSourceObject.Load(3, &t.BindNICID)
+ stateSourceObject.Load(4, &t.BindAddr)
+ stateSourceObject.Load(5, &t.RegisterNICID)
+}
+
+func (t *TCPCubicState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPCubicState"
+}
+
+func (t *TCPCubicState) StateFields() []string {
+ return []string{
+ "WLastMax",
+ "WMax",
+ "T",
+ "TimeSinceLastCongestion",
+ "C",
+ "K",
+ "Beta",
+ "WC",
+ "WEst",
+ }
+}
+
+func (t *TCPCubicState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPCubicState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.WLastMax)
+ stateSinkObject.Save(1, &t.WMax)
+ stateSinkObject.Save(2, &t.T)
+ stateSinkObject.Save(3, &t.TimeSinceLastCongestion)
+ stateSinkObject.Save(4, &t.C)
+ stateSinkObject.Save(5, &t.K)
+ stateSinkObject.Save(6, &t.Beta)
+ stateSinkObject.Save(7, &t.WC)
+ stateSinkObject.Save(8, &t.WEst)
+}
+
+func (t *TCPCubicState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPCubicState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.WLastMax)
+ stateSourceObject.Load(1, &t.WMax)
+ stateSourceObject.Load(2, &t.T)
+ stateSourceObject.Load(3, &t.TimeSinceLastCongestion)
+ stateSourceObject.Load(4, &t.C)
+ stateSourceObject.Load(5, &t.K)
+ stateSourceObject.Load(6, &t.Beta)
+ stateSourceObject.Load(7, &t.WC)
+ stateSourceObject.Load(8, &t.WEst)
+}
+
+func (t *TCPRACKState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPRACKState"
+}
+
+func (t *TCPRACKState) StateFields() []string {
+ return []string{
+ "XmitTime",
+ "EndSequence",
+ "FACK",
+ "RTT",
+ "Reord",
+ "DSACKSeen",
+ "ReoWnd",
+ "ReoWndIncr",
+ "ReoWndPersist",
+ "RTTSeq",
+ }
+}
+
+func (t *TCPRACKState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPRACKState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.XmitTime)
+ stateSinkObject.Save(1, &t.EndSequence)
+ stateSinkObject.Save(2, &t.FACK)
+ stateSinkObject.Save(3, &t.RTT)
+ stateSinkObject.Save(4, &t.Reord)
+ stateSinkObject.Save(5, &t.DSACKSeen)
+ stateSinkObject.Save(6, &t.ReoWnd)
+ stateSinkObject.Save(7, &t.ReoWndIncr)
+ stateSinkObject.Save(8, &t.ReoWndPersist)
+ stateSinkObject.Save(9, &t.RTTSeq)
+}
+
+func (t *TCPRACKState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPRACKState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.XmitTime)
+ stateSourceObject.Load(1, &t.EndSequence)
+ stateSourceObject.Load(2, &t.FACK)
+ stateSourceObject.Load(3, &t.RTT)
+ stateSourceObject.Load(4, &t.Reord)
+ stateSourceObject.Load(5, &t.DSACKSeen)
+ stateSourceObject.Load(6, &t.ReoWnd)
+ stateSourceObject.Load(7, &t.ReoWndIncr)
+ stateSourceObject.Load(8, &t.ReoWndPersist)
+ stateSourceObject.Load(9, &t.RTTSeq)
+}
+
+func (t *TCPEndpointID) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPEndpointID"
+}
+
+func (t *TCPEndpointID) StateFields() []string {
+ return []string{
+ "LocalPort",
+ "LocalAddress",
+ "RemotePort",
+ "RemoteAddress",
+ }
+}
+
+func (t *TCPEndpointID) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPEndpointID) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.LocalPort)
+ stateSinkObject.Save(1, &t.LocalAddress)
+ stateSinkObject.Save(2, &t.RemotePort)
+ stateSinkObject.Save(3, &t.RemoteAddress)
+}
+
+func (t *TCPEndpointID) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPEndpointID) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.LocalPort)
+ stateSourceObject.Load(1, &t.LocalAddress)
+ stateSourceObject.Load(2, &t.RemotePort)
+ stateSourceObject.Load(3, &t.RemoteAddress)
+}
+
+func (t *TCPFastRecoveryState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPFastRecoveryState"
+}
+
+func (t *TCPFastRecoveryState) StateFields() []string {
+ return []string{
+ "Active",
+ "First",
+ "Last",
+ "MaxCwnd",
+ "HighRxt",
+ "RescueRxt",
+ }
+}
+
+func (t *TCPFastRecoveryState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPFastRecoveryState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.Active)
+ stateSinkObject.Save(1, &t.First)
+ stateSinkObject.Save(2, &t.Last)
+ stateSinkObject.Save(3, &t.MaxCwnd)
+ stateSinkObject.Save(4, &t.HighRxt)
+ stateSinkObject.Save(5, &t.RescueRxt)
+}
+
+func (t *TCPFastRecoveryState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPFastRecoveryState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.Active)
+ stateSourceObject.Load(1, &t.First)
+ stateSourceObject.Load(2, &t.Last)
+ stateSourceObject.Load(3, &t.MaxCwnd)
+ stateSourceObject.Load(4, &t.HighRxt)
+ stateSourceObject.Load(5, &t.RescueRxt)
+}
+
+func (t *TCPReceiverState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPReceiverState"
+}
+
+func (t *TCPReceiverState) StateFields() []string {
+ return []string{
+ "RcvNxt",
+ "RcvAcc",
+ "RcvWndScale",
+ "PendingBufUsed",
+ }
+}
+
+func (t *TCPReceiverState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPReceiverState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.RcvNxt)
+ stateSinkObject.Save(1, &t.RcvAcc)
+ stateSinkObject.Save(2, &t.RcvWndScale)
+ stateSinkObject.Save(3, &t.PendingBufUsed)
+}
+
+func (t *TCPReceiverState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPReceiverState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.RcvNxt)
+ stateSourceObject.Load(1, &t.RcvAcc)
+ stateSourceObject.Load(2, &t.RcvWndScale)
+ stateSourceObject.Load(3, &t.PendingBufUsed)
+}
+
+func (t *TCPRTTState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPRTTState"
+}
+
+func (t *TCPRTTState) StateFields() []string {
+ return []string{
+ "SRTT",
+ "RTTVar",
+ "SRTTInited",
+ }
+}
+
+func (t *TCPRTTState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPRTTState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.SRTT)
+ stateSinkObject.Save(1, &t.RTTVar)
+ stateSinkObject.Save(2, &t.SRTTInited)
+}
+
+func (t *TCPRTTState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPRTTState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.SRTT)
+ stateSourceObject.Load(1, &t.RTTVar)
+ stateSourceObject.Load(2, &t.SRTTInited)
+}
+
+func (t *TCPSenderState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPSenderState"
+}
+
+func (t *TCPSenderState) StateFields() []string {
+ return []string{
+ "LastSendTime",
+ "DupAckCount",
+ "SndCwnd",
+ "Ssthresh",
+ "SndCAAckCount",
+ "Outstanding",
+ "SackedOut",
+ "SndWnd",
+ "SndUna",
+ "SndNxt",
+ "RTTMeasureSeqNum",
+ "RTTMeasureTime",
+ "Closed",
+ "RTO",
+ "RTTState",
+ "MaxPayloadSize",
+ "SndWndScale",
+ "MaxSentAck",
+ "FastRecovery",
+ "Cubic",
+ "RACKState",
+ }
+}
+
+func (t *TCPSenderState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPSenderState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.LastSendTime)
+ stateSinkObject.Save(1, &t.DupAckCount)
+ stateSinkObject.Save(2, &t.SndCwnd)
+ stateSinkObject.Save(3, &t.Ssthresh)
+ stateSinkObject.Save(4, &t.SndCAAckCount)
+ stateSinkObject.Save(5, &t.Outstanding)
+ stateSinkObject.Save(6, &t.SackedOut)
+ stateSinkObject.Save(7, &t.SndWnd)
+ stateSinkObject.Save(8, &t.SndUna)
+ stateSinkObject.Save(9, &t.SndNxt)
+ stateSinkObject.Save(10, &t.RTTMeasureSeqNum)
+ stateSinkObject.Save(11, &t.RTTMeasureTime)
+ stateSinkObject.Save(12, &t.Closed)
+ stateSinkObject.Save(13, &t.RTO)
+ stateSinkObject.Save(14, &t.RTTState)
+ stateSinkObject.Save(15, &t.MaxPayloadSize)
+ stateSinkObject.Save(16, &t.SndWndScale)
+ stateSinkObject.Save(17, &t.MaxSentAck)
+ stateSinkObject.Save(18, &t.FastRecovery)
+ stateSinkObject.Save(19, &t.Cubic)
+ stateSinkObject.Save(20, &t.RACKState)
+}
+
+func (t *TCPSenderState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPSenderState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.LastSendTime)
+ stateSourceObject.Load(1, &t.DupAckCount)
+ stateSourceObject.Load(2, &t.SndCwnd)
+ stateSourceObject.Load(3, &t.Ssthresh)
+ stateSourceObject.Load(4, &t.SndCAAckCount)
+ stateSourceObject.Load(5, &t.Outstanding)
+ stateSourceObject.Load(6, &t.SackedOut)
+ stateSourceObject.Load(7, &t.SndWnd)
+ stateSourceObject.Load(8, &t.SndUna)
+ stateSourceObject.Load(9, &t.SndNxt)
+ stateSourceObject.Load(10, &t.RTTMeasureSeqNum)
+ stateSourceObject.Load(11, &t.RTTMeasureTime)
+ stateSourceObject.Load(12, &t.Closed)
+ stateSourceObject.Load(13, &t.RTO)
+ stateSourceObject.Load(14, &t.RTTState)
+ stateSourceObject.Load(15, &t.MaxPayloadSize)
+ stateSourceObject.Load(16, &t.SndWndScale)
+ stateSourceObject.Load(17, &t.MaxSentAck)
+ stateSourceObject.Load(18, &t.FastRecovery)
+ stateSourceObject.Load(19, &t.Cubic)
+ stateSourceObject.Load(20, &t.RACKState)
+}
+
+func (t *TCPSACKInfo) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPSACKInfo"
+}
+
+func (t *TCPSACKInfo) StateFields() []string {
+ return []string{
+ "Blocks",
+ "ReceivedBlocks",
+ "MaxSACKED",
+ }
+}
+
+func (t *TCPSACKInfo) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPSACKInfo) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.Blocks)
+ stateSinkObject.Save(1, &t.ReceivedBlocks)
+ stateSinkObject.Save(2, &t.MaxSACKED)
+}
+
+func (t *TCPSACKInfo) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPSACKInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.Blocks)
+ stateSourceObject.Load(1, &t.ReceivedBlocks)
+ stateSourceObject.Load(2, &t.MaxSACKED)
+}
+
+func (r *RcvBufAutoTuneParams) StateTypeName() string {
+ return "pkg/tcpip/stack.RcvBufAutoTuneParams"
+}
+
+func (r *RcvBufAutoTuneParams) StateFields() []string {
+ return []string{
+ "MeasureTime",
+ "CopiedBytes",
+ "PrevCopiedBytes",
+ "RcvBufSize",
+ "RTT",
+ "RTTVar",
+ "RTTMeasureSeqNumber",
+ "RTTMeasureTime",
+ "Disabled",
+ }
+}
+
+func (r *RcvBufAutoTuneParams) beforeSave() {}
+
+// +checklocksignore
+func (r *RcvBufAutoTuneParams) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.MeasureTime)
+ stateSinkObject.Save(1, &r.CopiedBytes)
+ stateSinkObject.Save(2, &r.PrevCopiedBytes)
+ stateSinkObject.Save(3, &r.RcvBufSize)
+ stateSinkObject.Save(4, &r.RTT)
+ stateSinkObject.Save(5, &r.RTTVar)
+ stateSinkObject.Save(6, &r.RTTMeasureSeqNumber)
+ stateSinkObject.Save(7, &r.RTTMeasureTime)
+ stateSinkObject.Save(8, &r.Disabled)
+}
+
+func (r *RcvBufAutoTuneParams) afterLoad() {}
+
+// +checklocksignore
+func (r *RcvBufAutoTuneParams) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.MeasureTime)
+ stateSourceObject.Load(1, &r.CopiedBytes)
+ stateSourceObject.Load(2, &r.PrevCopiedBytes)
+ stateSourceObject.Load(3, &r.RcvBufSize)
+ stateSourceObject.Load(4, &r.RTT)
+ stateSourceObject.Load(5, &r.RTTVar)
+ stateSourceObject.Load(6, &r.RTTMeasureSeqNumber)
+ stateSourceObject.Load(7, &r.RTTMeasureTime)
+ stateSourceObject.Load(8, &r.Disabled)
+}
+
+func (t *TCPRcvBufState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPRcvBufState"
+}
+
+func (t *TCPRcvBufState) StateFields() []string {
+ return []string{
+ "RcvBufUsed",
+ "RcvAutoParams",
+ "RcvClosed",
+ }
+}
+
+func (t *TCPRcvBufState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPRcvBufState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.RcvBufUsed)
+ stateSinkObject.Save(1, &t.RcvAutoParams)
+ stateSinkObject.Save(2, &t.RcvClosed)
+}
+
+func (t *TCPRcvBufState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPRcvBufState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.RcvBufUsed)
+ stateSourceObject.Load(1, &t.RcvAutoParams)
+ stateSourceObject.Load(2, &t.RcvClosed)
+}
+
+func (t *TCPSndBufState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPSndBufState"
+}
+
+func (t *TCPSndBufState) StateFields() []string {
+ return []string{
+ "SndBufSize",
+ "SndBufUsed",
+ "SndClosed",
+ "PacketTooBigCount",
+ "SndMTU",
+ }
+}
+
+func (t *TCPSndBufState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPSndBufState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.SndBufSize)
+ stateSinkObject.Save(1, &t.SndBufUsed)
+ stateSinkObject.Save(2, &t.SndClosed)
+ stateSinkObject.Save(3, &t.PacketTooBigCount)
+ stateSinkObject.Save(4, &t.SndMTU)
+}
+
+func (t *TCPSndBufState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPSndBufState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.SndBufSize)
+ stateSourceObject.Load(1, &t.SndBufUsed)
+ stateSourceObject.Load(2, &t.SndClosed)
+ stateSourceObject.Load(3, &t.PacketTooBigCount)
+ stateSourceObject.Load(4, &t.SndMTU)
+}
+
+func (t *TCPEndpointStateInner) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPEndpointStateInner"
+}
+
+func (t *TCPEndpointStateInner) StateFields() []string {
+ return []string{
+ "TSOffset",
+ "SACKPermitted",
+ "SendTSOk",
+ "RecentTS",
+ }
+}
+
+func (t *TCPEndpointStateInner) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPEndpointStateInner) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.TSOffset)
+ stateSinkObject.Save(1, &t.SACKPermitted)
+ stateSinkObject.Save(2, &t.SendTSOk)
+ stateSinkObject.Save(3, &t.RecentTS)
+}
+
+func (t *TCPEndpointStateInner) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPEndpointStateInner) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.TSOffset)
+ stateSourceObject.Load(1, &t.SACKPermitted)
+ stateSourceObject.Load(2, &t.SendTSOk)
+ stateSourceObject.Load(3, &t.RecentTS)
+}
+
+func (t *TCPEndpointState) StateTypeName() string {
+ return "pkg/tcpip/stack.TCPEndpointState"
+}
+
+func (t *TCPEndpointState) StateFields() []string {
+ return []string{
+ "TCPEndpointStateInner",
+ "ID",
+ "SegTime",
+ "RcvBufState",
+ "SndBufState",
+ "SACK",
+ "Receiver",
+ "Sender",
+ }
+}
+
+func (t *TCPEndpointState) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPEndpointState) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.TCPEndpointStateInner)
+ stateSinkObject.Save(1, &t.ID)
+ stateSinkObject.Save(2, &t.SegTime)
+ stateSinkObject.Save(3, &t.RcvBufState)
+ stateSinkObject.Save(4, &t.SndBufState)
+ stateSinkObject.Save(5, &t.SACK)
+ stateSinkObject.Save(6, &t.Receiver)
+ stateSinkObject.Save(7, &t.Sender)
+}
+
+func (t *TCPEndpointState) afterLoad() {}
+
+// +checklocksignore
+func (t *TCPEndpointState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.TCPEndpointStateInner)
+ stateSourceObject.Load(1, &t.ID)
+ stateSourceObject.Load(2, &t.SegTime)
+ stateSourceObject.Load(3, &t.RcvBufState)
+ stateSourceObject.Load(4, &t.SndBufState)
+ stateSourceObject.Load(5, &t.SACK)
+ stateSourceObject.Load(6, &t.Receiver)
+ stateSourceObject.Load(7, &t.Sender)
+}
+
+func (ep *multiPortEndpoint) StateTypeName() string {
+ return "pkg/tcpip/stack.multiPortEndpoint"
+}
+
+func (ep *multiPortEndpoint) StateFields() []string {
+ return []string{
+ "demux",
+ "netProto",
+ "transProto",
+ "endpoints",
+ "flags",
+ }
+}
+
+func (ep *multiPortEndpoint) beforeSave() {}
+
+// +checklocksignore
+func (ep *multiPortEndpoint) StateSave(stateSinkObject state.Sink) {
+ ep.beforeSave()
+ stateSinkObject.Save(0, &ep.demux)
+ stateSinkObject.Save(1, &ep.netProto)
+ stateSinkObject.Save(2, &ep.transProto)
+ stateSinkObject.Save(3, &ep.endpoints)
+ stateSinkObject.Save(4, &ep.flags)
+}
+
+func (ep *multiPortEndpoint) afterLoad() {}
+
+// +checklocksignore
+func (ep *multiPortEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ep.demux)
+ stateSourceObject.Load(1, &ep.netProto)
+ stateSourceObject.Load(2, &ep.transProto)
+ stateSourceObject.Load(3, &ep.endpoints)
+ stateSourceObject.Load(4, &ep.flags)
+}
+
+func (l *tupleList) StateTypeName() string {
+ return "pkg/tcpip/stack.tupleList"
+}
+
+func (l *tupleList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *tupleList) beforeSave() {}
+
+// +checklocksignore
+func (l *tupleList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *tupleList) afterLoad() {}
+
+// +checklocksignore
+func (l *tupleList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *tupleEntry) StateTypeName() string {
+ return "pkg/tcpip/stack.tupleEntry"
+}
+
+func (e *tupleEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *tupleEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *tupleEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *tupleEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *tupleEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*tuple)(nil))
+ state.Register((*tupleID)(nil))
+ state.Register((*conn)(nil))
+ state.Register((*ConnTrack)(nil))
+ state.Register((*bucket)(nil))
+ state.Register((*unixTime)(nil))
+ state.Register((*IPTables)(nil))
+ state.Register((*Table)(nil))
+ state.Register((*Rule)(nil))
+ state.Register((*IPHeaderFilter)(nil))
+ state.Register((*neighborEntryList)(nil))
+ state.Register((*neighborEntryEntry)(nil))
+ state.Register((*PacketBufferList)(nil))
+ state.Register((*PacketBufferEntry)(nil))
+ state.Register((*TransportEndpointID)(nil))
+ state.Register((*GSOType)(nil))
+ state.Register((*GSO)(nil))
+ state.Register((*TransportEndpointInfo)(nil))
+ state.Register((*TCPCubicState)(nil))
+ state.Register((*TCPRACKState)(nil))
+ state.Register((*TCPEndpointID)(nil))
+ state.Register((*TCPFastRecoveryState)(nil))
+ state.Register((*TCPReceiverState)(nil))
+ state.Register((*TCPRTTState)(nil))
+ state.Register((*TCPSenderState)(nil))
+ state.Register((*TCPSACKInfo)(nil))
+ state.Register((*RcvBufAutoTuneParams)(nil))
+ state.Register((*TCPRcvBufState)(nil))
+ state.Register((*TCPSndBufState)(nil))
+ state.Register((*TCPEndpointStateInner)(nil))
+ state.Register((*TCPEndpointState)(nil))
+ state.Register((*multiPortEndpoint)(nil))
+ state.Register((*tupleList)(nil))
+ state.Register((*tupleEntry)(nil))
+}
diff --git a/pkg/tcpip/stack/stack_test.go b/pkg/tcpip/stack/stack_test.go
deleted file mode 100644
index 21951d05a..000000000
--- a/pkg/tcpip/stack/stack_test.go
+++ /dev/null
@@ -1,4543 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package stack_test contains tests for the stack. It is in its own package so
-// that the tests can also validate that all definitions needed to implement
-// transport and network protocols are properly exported by the stack package.
-package stack_test
-
-import (
- "bytes"
- "fmt"
- "math"
- "net"
- "sort"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/rand"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-const (
- fakeNetNumber tcpip.NetworkProtocolNumber = math.MaxUint32
- fakeNetHeaderLen = 12
- fakeDefaultPrefixLen = 8
-
- // fakeControlProtocol is used for control packets that represent
- // destination port unreachable.
- fakeControlProtocol tcpip.TransportProtocolNumber = 2
-
- // defaultMTU is the MTU, in bytes, used throughout the tests, except
- // where another value is explicitly used. It is chosen to match the MTU
- // of loopback interfaces on linux systems.
- defaultMTU = 65536
-
- dstAddrOffset = 0
- srcAddrOffset = 1
- protocolNumberOffset = 2
-)
-
-func checkGetMainNICAddress(s *stack.Stack, nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, want tcpip.AddressWithPrefix) error {
- if addr, err := s.GetMainNICAddress(nicID, proto); err != nil {
- return fmt.Errorf("stack.GetMainNICAddress(%d, %d): %s", nicID, proto, err)
- } else if addr != want {
- return fmt.Errorf("got stack.GetMainNICAddress(%d, %d) = %s, want = %s", nicID, proto, addr, want)
- }
- return nil
-}
-
-// fakeNetworkEndpoint is a network-layer protocol endpoint. It counts sent and
-// received packets; the counts of all endpoints are aggregated in the protocol
-// descriptor.
-//
-// Headers of this protocol are fakeNetHeaderLen bytes, but we currently only
-// use the first three: destination address, source address, and transport
-// protocol. They're all one byte fields to simplify parsing.
-type fakeNetworkEndpoint struct {
- stack.AddressableEndpointState
-
- mu struct {
- sync.RWMutex
-
- enabled bool
- forwarding bool
- }
-
- nic stack.NetworkInterface
- proto *fakeNetworkProtocol
- dispatcher stack.TransportDispatcher
-}
-
-func (f *fakeNetworkEndpoint) Enable() tcpip.Error {
- f.mu.Lock()
- defer f.mu.Unlock()
- f.mu.enabled = true
- return nil
-}
-
-func (f *fakeNetworkEndpoint) Enabled() bool {
- f.mu.RLock()
- defer f.mu.RUnlock()
- return f.mu.enabled
-}
-
-func (f *fakeNetworkEndpoint) Disable() {
- f.mu.Lock()
- defer f.mu.Unlock()
- f.mu.enabled = false
-}
-
-func (f *fakeNetworkEndpoint) MTU() uint32 {
- return f.nic.MTU() - uint32(f.MaxHeaderLength())
-}
-
-func (*fakeNetworkEndpoint) DefaultTTL() uint8 {
- return 123
-}
-
-func (f *fakeNetworkEndpoint) HandlePacket(pkt *stack.PacketBuffer) {
- if _, _, ok := f.proto.Parse(pkt); !ok {
- return
- }
-
- // Increment the received packet count in the protocol descriptor.
- netHdr := pkt.NetworkHeader().View()
-
- dst := tcpip.Address(netHdr[dstAddrOffset:][:1])
- addressEndpoint := f.AcquireAssignedAddress(dst, f.nic.Promiscuous(), stack.CanBePrimaryEndpoint)
- if addressEndpoint == nil {
- return
- }
- addressEndpoint.DecRef()
-
- f.proto.packetCount[int(dst[0])%len(f.proto.packetCount)]++
-
- // Handle control packets.
- if netHdr[protocolNumberOffset] == uint8(fakeControlProtocol) {
- hdr, ok := pkt.Data().PullUp(fakeNetHeaderLen)
- if !ok {
- return
- }
- // DeleteFront invalidates slices. Make a copy before trimming.
- nb := append([]byte(nil), hdr...)
- pkt.Data().DeleteFront(fakeNetHeaderLen)
- f.dispatcher.DeliverTransportError(
- tcpip.Address(nb[srcAddrOffset:srcAddrOffset+1]),
- tcpip.Address(nb[dstAddrOffset:dstAddrOffset+1]),
- fakeNetNumber,
- tcpip.TransportProtocolNumber(nb[protocolNumberOffset]),
- // Nothing checks the error.
- nil, /* transport error */
- pkt,
- )
- return
- }
-
- // Dispatch the packet to the transport protocol.
- f.dispatcher.DeliverTransportPacket(tcpip.TransportProtocolNumber(pkt.NetworkHeader().View()[protocolNumberOffset]), pkt)
-}
-
-func (f *fakeNetworkEndpoint) MaxHeaderLength() uint16 {
- return f.nic.MaxHeaderLength() + fakeNetHeaderLen
-}
-
-func (f *fakeNetworkEndpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
- return f.proto.Number()
-}
-
-func (f *fakeNetworkEndpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error {
- // Increment the sent packet count in the protocol descriptor.
- f.proto.sendPacketCount[int(r.RemoteAddress()[0])%len(f.proto.sendPacketCount)]++
-
- // Add the protocol's header to the packet and send it to the link
- // endpoint.
- hdr := pkt.NetworkHeader().Push(fakeNetHeaderLen)
- pkt.NetworkProtocolNumber = fakeNetNumber
- hdr[dstAddrOffset] = r.RemoteAddress()[0]
- hdr[srcAddrOffset] = r.LocalAddress()[0]
- hdr[protocolNumberOffset] = byte(params.Protocol)
-
- if r.Loop()&stack.PacketLoop != 0 {
- f.HandlePacket(pkt.Clone())
- }
- if r.Loop()&stack.PacketOut == 0 {
- return nil
- }
-
- return f.nic.WritePacket(r, fakeNetNumber, pkt)
-}
-
-// WritePackets implements stack.LinkEndpoint.WritePackets.
-func (*fakeNetworkEndpoint) WritePackets(*stack.Route, stack.PacketBufferList, stack.NetworkHeaderParams) (int, tcpip.Error) {
- panic("not implemented")
-}
-
-func (*fakeNetworkEndpoint) WriteHeaderIncludedPacket(*stack.Route, *stack.PacketBuffer) tcpip.Error {
- return &tcpip.ErrNotSupported{}
-}
-
-func (f *fakeNetworkEndpoint) Close() {
- f.AddressableEndpointState.Cleanup()
-}
-
-// Stats implements NetworkEndpoint.
-func (*fakeNetworkEndpoint) Stats() stack.NetworkEndpointStats {
- return &fakeNetworkEndpointStats{}
-}
-
-var _ stack.NetworkEndpointStats = (*fakeNetworkEndpointStats)(nil)
-
-type fakeNetworkEndpointStats struct{}
-
-// IsNetworkEndpointStats implements stack.NetworkEndpointStats.
-func (*fakeNetworkEndpointStats) IsNetworkEndpointStats() {}
-
-// fakeNetworkProtocol is a network-layer protocol descriptor. It aggregates the
-// number of packets sent and received via endpoints of this protocol. The index
-// where packets are added is given by the packet's destination address MOD 10.
-type fakeNetworkProtocol struct {
- packetCount [10]int
- sendPacketCount [10]int
- defaultTTL uint8
-}
-
-func (*fakeNetworkProtocol) Number() tcpip.NetworkProtocolNumber {
- return fakeNetNumber
-}
-
-func (*fakeNetworkProtocol) MinimumPacketSize() int {
- return fakeNetHeaderLen
-}
-
-func (*fakeNetworkProtocol) DefaultPrefixLen() int {
- return fakeDefaultPrefixLen
-}
-
-func (f *fakeNetworkProtocol) PacketCount(intfAddr byte) int {
- return f.packetCount[int(intfAddr)%len(f.packetCount)]
-}
-
-func (*fakeNetworkProtocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) {
- return tcpip.Address(v[srcAddrOffset : srcAddrOffset+1]), tcpip.Address(v[dstAddrOffset : dstAddrOffset+1])
-}
-
-func (f *fakeNetworkProtocol) NewEndpoint(nic stack.NetworkInterface, dispatcher stack.TransportDispatcher) stack.NetworkEndpoint {
- e := &fakeNetworkEndpoint{
- nic: nic,
- proto: f,
- dispatcher: dispatcher,
- }
- e.AddressableEndpointState.Init(e)
- return e
-}
-
-func (f *fakeNetworkProtocol) SetOption(option tcpip.SettableNetworkProtocolOption) tcpip.Error {
- switch v := option.(type) {
- case *tcpip.DefaultTTLOption:
- f.defaultTTL = uint8(*v)
- return nil
- default:
- return &tcpip.ErrUnknownProtocolOption{}
- }
-}
-
-func (f *fakeNetworkProtocol) Option(option tcpip.GettableNetworkProtocolOption) tcpip.Error {
- switch v := option.(type) {
- case *tcpip.DefaultTTLOption:
- *v = tcpip.DefaultTTLOption(f.defaultTTL)
- return nil
- default:
- return &tcpip.ErrUnknownProtocolOption{}
- }
-}
-
-// Close implements NetworkProtocol.Close.
-func (*fakeNetworkProtocol) Close() {}
-
-// Wait implements NetworkProtocol.Wait.
-func (*fakeNetworkProtocol) Wait() {}
-
-// Parse implements NetworkProtocol.Parse.
-func (*fakeNetworkProtocol) Parse(pkt *stack.PacketBuffer) (tcpip.TransportProtocolNumber, bool, bool) {
- hdr, ok := pkt.NetworkHeader().Consume(fakeNetHeaderLen)
- if !ok {
- return 0, false, false
- }
- pkt.NetworkProtocolNumber = fakeNetNumber
- return tcpip.TransportProtocolNumber(hdr[protocolNumberOffset]), true, true
-}
-
-// Forwarding implements stack.ForwardingNetworkEndpoint.
-func (f *fakeNetworkEndpoint) Forwarding() bool {
- f.mu.RLock()
- defer f.mu.RUnlock()
- return f.mu.forwarding
-}
-
-// SetForwarding implements stack.ForwardingNetworkEndpoint.
-func (f *fakeNetworkEndpoint) SetForwarding(v bool) {
- f.mu.Lock()
- defer f.mu.Unlock()
- f.mu.forwarding = v
-}
-
-func fakeNetFactory(*stack.Stack) stack.NetworkProtocol {
- return &fakeNetworkProtocol{}
-}
-
-// linkEPWithMockedAttach is a stack.LinkEndpoint that tests can use to verify
-// that LinkEndpoint.Attach was called.
-type linkEPWithMockedAttach struct {
- stack.LinkEndpoint
- attached bool
-}
-
-// Attach implements stack.LinkEndpoint.Attach.
-func (l *linkEPWithMockedAttach) Attach(d stack.NetworkDispatcher) {
- l.LinkEndpoint.Attach(d)
- l.attached = d != nil
-}
-
-func (l *linkEPWithMockedAttach) isAttached() bool {
- return l.attached
-}
-
-// Checks to see if list contains an address.
-func containsAddr(list []tcpip.ProtocolAddress, item tcpip.ProtocolAddress) bool {
- for _, i := range list {
- if i == item {
- return true
- }
- }
-
- return false
-}
-
-func TestNetworkReceive(t *testing.T) {
- // Create a stack with the fake network protocol, one nic, and two
- // addresses attached to it: 1 & 2.
- ep := channel.New(10, defaultMTU, "")
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x02"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
-
- buf := buffer.NewView(30)
-
- // Make sure packet with wrong address is not delivered.
- buf[dstAddrOffset] = 3
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeNet.packetCount[1] != 0 {
- t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 0)
- }
- if fakeNet.packetCount[2] != 0 {
- t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 0)
- }
-
- // Make sure packet is delivered to first endpoint.
- buf[dstAddrOffset] = 1
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeNet.packetCount[1] != 1 {
- t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1)
- }
- if fakeNet.packetCount[2] != 0 {
- t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 0)
- }
-
- // Make sure packet is delivered to second endpoint.
- buf[dstAddrOffset] = 2
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeNet.packetCount[1] != 1 {
- t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1)
- }
- if fakeNet.packetCount[2] != 1 {
- t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1)
- }
-
- // Make sure packet is not delivered if protocol number is wrong.
- ep.InjectInbound(fakeNetNumber-1, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeNet.packetCount[1] != 1 {
- t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1)
- }
- if fakeNet.packetCount[2] != 1 {
- t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1)
- }
-
- // Make sure packet that is too small is dropped.
- buf.CapLength(2)
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeNet.packetCount[1] != 1 {
- t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1)
- }
- if fakeNet.packetCount[2] != 1 {
- t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1)
- }
-}
-
-func sendTo(s *stack.Stack, addr tcpip.Address, payload buffer.View) tcpip.Error {
- r, err := s.FindRoute(0, "", addr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- return err
- }
- defer r.Release()
- return send(r, payload)
-}
-
-func send(r *stack.Route, payload buffer.View) tcpip.Error {
- return r.WritePacket(stack.NetworkHeaderParams{Protocol: fakeTransNumber, TTL: 123, TOS: stack.DefaultTOS}, stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength()),
- Data: payload.ToVectorisedView(),
- }))
-}
-
-func testSendTo(t *testing.T, s *stack.Stack, addr tcpip.Address, ep *channel.Endpoint, payload buffer.View) {
- t.Helper()
- ep.Drain()
- if err := sendTo(s, addr, payload); err != nil {
- t.Error("sendTo failed:", err)
- }
- if got, want := ep.Drain(), 1; got != want {
- t.Errorf("sendTo packet count: got = %d, want %d", got, want)
- }
-}
-
-func testSend(t *testing.T, r *stack.Route, ep *channel.Endpoint, payload buffer.View) {
- t.Helper()
- ep.Drain()
- if err := send(r, payload); err != nil {
- t.Error("send failed:", err)
- }
- if got, want := ep.Drain(), 1; got != want {
- t.Errorf("send packet count: got = %d, want %d", got, want)
- }
-}
-
-func testFailingSend(t *testing.T, r *stack.Route, payload buffer.View, wantErr tcpip.Error) {
- t.Helper()
- if gotErr := send(r, payload); gotErr != wantErr {
- t.Errorf("send failed: got = %s, want = %s ", gotErr, wantErr)
- }
-}
-
-func testFailingSendTo(t *testing.T, s *stack.Stack, addr tcpip.Address, payload buffer.View, wantErr tcpip.Error) {
- t.Helper()
- if gotErr := sendTo(s, addr, payload); gotErr != wantErr {
- t.Errorf("sendto failed: got = %s, want = %s ", gotErr, wantErr)
- }
-}
-
-func testRecv(t *testing.T, fakeNet *fakeNetworkProtocol, localAddrByte byte, ep *channel.Endpoint, buf buffer.View) {
- t.Helper()
- // testRecvInternal injects one packet, and we expect to receive it.
- want := fakeNet.PacketCount(localAddrByte) + 1
- testRecvInternal(t, fakeNet, localAddrByte, ep, buf, want)
-}
-
-func testFailingRecv(t *testing.T, fakeNet *fakeNetworkProtocol, localAddrByte byte, ep *channel.Endpoint, buf buffer.View) {
- t.Helper()
- // testRecvInternal injects one packet, and we do NOT expect to receive it.
- want := fakeNet.PacketCount(localAddrByte)
- testRecvInternal(t, fakeNet, localAddrByte, ep, buf, want)
-}
-
-func testRecvInternal(t *testing.T, fakeNet *fakeNetworkProtocol, localAddrByte byte, ep *channel.Endpoint, buf buffer.View, want int) {
- t.Helper()
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if got := fakeNet.PacketCount(localAddrByte); got != want {
- t.Errorf("receive packet count: got = %d, want %d", got, want)
- }
-}
-
-func TestNetworkSend(t *testing.T) {
- // Create a stack with the fake network protocol, one nic, and one
- // address: 1. The route table sends all packets through the only
- // existing nic.
- ep := channel.New(10, defaultMTU, "")
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("NewNIC failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- // Make sure that the link-layer endpoint received the outbound packet.
- testSendTo(t, s, "\x03", ep, nil)
-}
-
-func TestNetworkSendMultiRoute(t *testing.T) {
- // Create a stack with the fake network protocol, two nics, and two
- // addresses per nic, the first nic has odd address, the second one has
- // even addresses.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep1 := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep1); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x03"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- ep2 := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(2, ep2); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(2, fakeNetNumber, "\x02"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- if err := s.AddAddress(2, fakeNetNumber, "\x04"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- // Set a route table that sends all packets with odd destination
- // addresses through the first NIC, and all even destination address
- // through the second one.
- {
- subnet0, err := tcpip.NewSubnet("\x00", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- subnet1, err := tcpip.NewSubnet("\x01", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{
- {Destination: subnet1, Gateway: "\x00", NIC: 1},
- {Destination: subnet0, Gateway: "\x00", NIC: 2},
- })
- }
-
- // Send a packet to an odd destination.
- testSendTo(t, s, "\x05", ep1, nil)
-
- // Send a packet to an even destination.
- testSendTo(t, s, "\x06", ep2, nil)
-}
-
-func testRoute(t *testing.T, s *stack.Stack, nic tcpip.NICID, srcAddr, dstAddr, expectedSrcAddr tcpip.Address) {
- r, err := s.FindRoute(nic, srcAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
-
- defer r.Release()
-
- if r.LocalAddress() != expectedSrcAddr {
- t.Fatalf("got Route.LocalAddress() = %s, want = %s", expectedSrcAddr, r.LocalAddress())
- }
-
- if r.RemoteAddress() != dstAddr {
- t.Fatalf("got Route.RemoteAddress() = %s, want = %s", dstAddr, r.RemoteAddress())
- }
-}
-
-func testNoRoute(t *testing.T, s *stack.Stack, nic tcpip.NICID, srcAddr, dstAddr tcpip.Address) {
- _, err := s.FindRoute(nic, srcAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Fatalf("FindRoute returned unexpected error, got = %v, want = %s", err, &tcpip.ErrNoRoute{})
- }
-}
-
-// TestAttachToLinkEndpointImmediately tests that a LinkEndpoint is attached to
-// a NetworkDispatcher when the NIC is created.
-func TestAttachToLinkEndpointImmediately(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- nicOpts stack.NICOptions
- }{
- {
- name: "Create enabled NIC",
- nicOpts: stack.NICOptions{Disabled: false},
- },
- {
- name: "Create disabled NIC",
- nicOpts: stack.NICOptions{Disabled: true},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- e := linkEPWithMockedAttach{
- LinkEndpoint: loopback.New(),
- }
-
- if err := s.CreateNICWithOptions(nicID, &e, test.nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, test.nicOpts, err)
- }
- if !e.isAttached() {
- t.Fatal("link endpoint not attached to a network dispatcher")
- }
- })
- }
-}
-
-func TestDisableUnknownNIC(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- err := s.DisableNIC(1)
- if _, ok := err.(*tcpip.ErrUnknownNICID); !ok {
- t.Fatalf("got s.DisableNIC(1) = %v, want = %s", err, &tcpip.ErrUnknownNICID{})
- }
-}
-
-func TestDisabledNICsNICInfoAndCheckNIC(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- e := loopback.New()
- nicOpts := stack.NICOptions{Disabled: true}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, nicOpts, err)
- }
-
- checkNIC := func(enabled bool) {
- t.Helper()
-
- allNICInfo := s.NICInfo()
- nicInfo, ok := allNICInfo[nicID]
- if !ok {
- t.Errorf("entry for %d missing from allNICInfo = %+v", nicID, allNICInfo)
- } else if nicInfo.Flags.Running != enabled {
- t.Errorf("got nicInfo.Flags.Running = %t, want = %t", nicInfo.Flags.Running, enabled)
- }
-
- if got := s.CheckNIC(nicID); got != enabled {
- t.Errorf("got s.CheckNIC(%d) = %t, want = %t", nicID, got, enabled)
- }
- }
-
- // NIC should initially report itself as disabled.
- checkNIC(false)
-
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- checkNIC(true)
-
- // If the NIC is not reporting a correct enabled status, we cannot trust the
- // next check so end the test here.
- if t.Failed() {
- t.FailNow()
- }
-
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- checkNIC(false)
-}
-
-func TestRemoveUnknownNIC(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- err := s.RemoveNIC(1)
- if _, ok := err.(*tcpip.ErrUnknownNICID); !ok {
- t.Fatalf("got s.RemoveNIC(1) = %v, want = %s", err, &tcpip.ErrUnknownNICID{})
- }
-}
-
-func TestRemoveNIC(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- e := linkEPWithMockedAttach{
- LinkEndpoint: loopback.New(),
- }
- if err := s.CreateNIC(nicID, &e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // NIC should be present in NICInfo and attached to a NetworkDispatcher.
- allNICInfo := s.NICInfo()
- if _, ok := allNICInfo[nicID]; !ok {
- t.Errorf("entry for %d missing from allNICInfo = %+v", nicID, allNICInfo)
- }
- if !e.isAttached() {
- t.Fatal("link endpoint not attached to a network dispatcher")
- }
-
- // Removing a NIC should remove it from NICInfo and e should be detached from
- // the NetworkDispatcher.
- if err := s.RemoveNIC(nicID); err != nil {
- t.Fatalf("s.RemoveNIC(%d): %s", nicID, err)
- }
- if nicInfo, ok := s.NICInfo()[nicID]; ok {
- t.Errorf("got unexpected NICInfo entry for deleted NIC %d = %+v", nicID, nicInfo)
- }
- if e.isAttached() {
- t.Error("link endpoint for removed NIC still attached to a network dispatcher")
- }
-}
-
-func TestRouteWithDownNIC(t *testing.T) {
- tests := []struct {
- name string
- downFn func(s *stack.Stack, nicID tcpip.NICID) tcpip.Error
- upFn func(s *stack.Stack, nicID tcpip.NICID) tcpip.Error
- }{
- {
- name: "Disabled NIC",
- downFn: (*stack.Stack).DisableNIC,
- upFn: (*stack.Stack).EnableNIC,
- },
-
- // Once a NIC is removed, it cannot be brought up.
- {
- name: "Removed NIC",
- downFn: (*stack.Stack).RemoveNIC,
- },
- }
-
- const unspecifiedNIC = 0
- const nicID1 = 1
- const nicID2 = 2
- const addr1 = tcpip.Address("\x01")
- const addr2 = tcpip.Address("\x02")
- const nic1Dst = tcpip.Address("\x05")
- const nic2Dst = tcpip.Address("\x06")
-
- setup := func(t *testing.T) (*stack.Stack, *channel.Endpoint, *channel.Endpoint) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep1 := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID1, ep1); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
- }
-
- if err := s.AddAddress(nicID1, fakeNetNumber, addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID1, fakeNetNumber, addr1, err)
- }
-
- ep2 := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID2, ep2); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID2, err)
- }
-
- if err := s.AddAddress(nicID2, fakeNetNumber, addr2); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID2, fakeNetNumber, addr2, err)
- }
-
- // Set a route table that sends all packets with odd destination
- // addresses through the first NIC, and all even destination address
- // through the second one.
- {
- subnet0, err := tcpip.NewSubnet("\x00", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- subnet1, err := tcpip.NewSubnet("\x01", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{
- {Destination: subnet1, Gateway: "\x00", NIC: nicID1},
- {Destination: subnet0, Gateway: "\x00", NIC: nicID2},
- })
- }
-
- return s, ep1, ep2
- }
-
- // Tests that routes through a down NIC are not used when looking up a route
- // for a destination.
- t.Run("Find", func(t *testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, _, _ := setup(t)
-
- // Test routes to odd address.
- testRoute(t, s, unspecifiedNIC, "", "\x05", addr1)
- testRoute(t, s, unspecifiedNIC, addr1, "\x05", addr1)
- testRoute(t, s, nicID1, addr1, "\x05", addr1)
-
- // Test routes to even address.
- testRoute(t, s, unspecifiedNIC, "", "\x06", addr2)
- testRoute(t, s, unspecifiedNIC, addr2, "\x06", addr2)
- testRoute(t, s, nicID2, addr2, "\x06", addr2)
-
- // Bringing NIC1 down should result in no routes to odd addresses. Routes to
- // even addresses should continue to be available as NIC2 is still up.
- if err := test.downFn(s, nicID1); err != nil {
- t.Fatalf("test.downFn(_, %d): %s", nicID1, err)
- }
- testNoRoute(t, s, unspecifiedNIC, "", nic1Dst)
- testNoRoute(t, s, unspecifiedNIC, addr1, nic1Dst)
- testNoRoute(t, s, nicID1, addr1, nic1Dst)
- testRoute(t, s, unspecifiedNIC, "", nic2Dst, addr2)
- testRoute(t, s, unspecifiedNIC, addr2, nic2Dst, addr2)
- testRoute(t, s, nicID2, addr2, nic2Dst, addr2)
-
- // Bringing NIC2 down should result in no routes to even addresses. No
- // route should be available to any address as routes to odd addresses
- // were made unavailable by bringing NIC1 down above.
- if err := test.downFn(s, nicID2); err != nil {
- t.Fatalf("test.downFn(_, %d): %s", nicID2, err)
- }
- testNoRoute(t, s, unspecifiedNIC, "", nic1Dst)
- testNoRoute(t, s, unspecifiedNIC, addr1, nic1Dst)
- testNoRoute(t, s, nicID1, addr1, nic1Dst)
- testNoRoute(t, s, unspecifiedNIC, "", nic2Dst)
- testNoRoute(t, s, unspecifiedNIC, addr2, nic2Dst)
- testNoRoute(t, s, nicID2, addr2, nic2Dst)
-
- if upFn := test.upFn; upFn != nil {
- // Bringing NIC1 up should make routes to odd addresses available
- // again. Routes to even addresses should continue to be unavailable
- // as NIC2 is still down.
- if err := upFn(s, nicID1); err != nil {
- t.Fatalf("test.upFn(_, %d): %s", nicID1, err)
- }
- testRoute(t, s, unspecifiedNIC, "", nic1Dst, addr1)
- testRoute(t, s, unspecifiedNIC, addr1, nic1Dst, addr1)
- testRoute(t, s, nicID1, addr1, nic1Dst, addr1)
- testNoRoute(t, s, unspecifiedNIC, "", nic2Dst)
- testNoRoute(t, s, unspecifiedNIC, addr2, nic2Dst)
- testNoRoute(t, s, nicID2, addr2, nic2Dst)
- }
- })
- }
- })
-
- // Tests that writing a packet using a Route through a down NIC fails.
- t.Run("WritePacket", func(t *testing.T) {
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, ep1, ep2 := setup(t)
-
- r1, err := s.FindRoute(nicID1, addr1, nic1Dst, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Errorf("FindRoute(%d, %s, %s, %d, false): %s", nicID1, addr1, nic1Dst, fakeNetNumber, err)
- }
- defer r1.Release()
-
- r2, err := s.FindRoute(nicID2, addr2, nic2Dst, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Errorf("FindRoute(%d, %s, %s, %d, false): %s", nicID2, addr2, nic2Dst, fakeNetNumber, err)
- }
- defer r2.Release()
-
- // If we failed to get routes r1 or r2, we cannot proceed with the test.
- if t.Failed() {
- t.FailNow()
- }
-
- buf := buffer.View([]byte{1})
- testSend(t, r1, ep1, buf)
- testSend(t, r2, ep2, buf)
-
- // Writes with Routes that use NIC1 after being brought down should fail.
- if err := test.downFn(s, nicID1); err != nil {
- t.Fatalf("test.downFn(_, %d): %s", nicID1, err)
- }
- testFailingSend(t, r1, buf, &tcpip.ErrInvalidEndpointState{})
- testSend(t, r2, ep2, buf)
-
- // Writes with Routes that use NIC2 after being brought down should fail.
- if err := test.downFn(s, nicID2); err != nil {
- t.Fatalf("test.downFn(_, %d): %s", nicID2, err)
- }
- testFailingSend(t, r1, buf, &tcpip.ErrInvalidEndpointState{})
- testFailingSend(t, r2, buf, &tcpip.ErrInvalidEndpointState{})
-
- if upFn := test.upFn; upFn != nil {
- // Writes with Routes that use NIC1 after being brought up should
- // succeed.
- //
- // TODO(gvisor.dev/issue/1491): Should we instead completely
- // invalidate all Routes that were bound to a NIC that was brought
- // down at some point?
- if err := upFn(s, nicID1); err != nil {
- t.Fatalf("test.upFn(_, %d): %s", nicID1, err)
- }
- testSend(t, r1, ep1, buf)
- testFailingSend(t, r2, buf, &tcpip.ErrInvalidEndpointState{})
- }
- })
- }
- })
-}
-
-func TestRoutes(t *testing.T) {
- // Create a stack with the fake network protocol, two nics, and two
- // addresses per nic, the first nic has odd address, the second one has
- // even addresses.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep1 := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep1); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x03"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- ep2 := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(2, ep2); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(2, fakeNetNumber, "\x02"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- if err := s.AddAddress(2, fakeNetNumber, "\x04"); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- // Set a route table that sends all packets with odd destination
- // addresses through the first NIC, and all even destination address
- // through the second one.
- {
- subnet0, err := tcpip.NewSubnet("\x00", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- subnet1, err := tcpip.NewSubnet("\x01", "\x01")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{
- {Destination: subnet1, Gateway: "\x00", NIC: 1},
- {Destination: subnet0, Gateway: "\x00", NIC: 2},
- })
- }
-
- // Test routes to odd address.
- testRoute(t, s, 0, "", "\x05", "\x01")
- testRoute(t, s, 0, "\x01", "\x05", "\x01")
- testRoute(t, s, 1, "\x01", "\x05", "\x01")
- testRoute(t, s, 0, "\x03", "\x05", "\x03")
- testRoute(t, s, 1, "\x03", "\x05", "\x03")
-
- // Test routes to even address.
- testRoute(t, s, 0, "", "\x06", "\x02")
- testRoute(t, s, 0, "\x02", "\x06", "\x02")
- testRoute(t, s, 2, "\x02", "\x06", "\x02")
- testRoute(t, s, 0, "\x04", "\x06", "\x04")
- testRoute(t, s, 2, "\x04", "\x06", "\x04")
-
- // Try to send to odd numbered address from even numbered ones, then
- // vice-versa.
- testNoRoute(t, s, 0, "\x02", "\x05")
- testNoRoute(t, s, 2, "\x02", "\x05")
- testNoRoute(t, s, 0, "\x04", "\x05")
- testNoRoute(t, s, 2, "\x04", "\x05")
-
- testNoRoute(t, s, 0, "\x01", "\x06")
- testNoRoute(t, s, 1, "\x01", "\x06")
- testNoRoute(t, s, 0, "\x03", "\x06")
- testNoRoute(t, s, 1, "\x03", "\x06")
-}
-
-func TestAddressRemoval(t *testing.T) {
- const localAddrByte byte = 0x01
- localAddr := tcpip.Address([]byte{localAddrByte})
- remoteAddr := tcpip.Address("\x02")
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
-
- buf := buffer.NewView(30)
-
- // Send and receive packets, and verify they are received.
- buf[dstAddrOffset] = localAddrByte
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
-
- // Remove the address, then check that send/receive doesn't work anymore.
- if err := s.RemoveAddress(1, localAddr); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
-
- // Check that removing the same address fails.
- err := s.RemoveAddress(1, localAddr)
- if _, ok := err.(*tcpip.ErrBadLocalAddress); !ok {
- t.Fatalf("RemoveAddress returned unexpected error, got = %v, want = %s", err, &tcpip.ErrBadLocalAddress{})
- }
-}
-
-func TestAddressRemovalWithRouteHeld(t *testing.T) {
- const localAddrByte byte = 0x01
- localAddr := tcpip.Address([]byte{localAddrByte})
- remoteAddr := tcpip.Address("\x02")
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
- buf := buffer.NewView(30)
-
- if err := s.AddAddress(1, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- r, err := s.FindRoute(0, "", remoteAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
-
- // Send and receive packets, and verify they are received.
- buf[dstAddrOffset] = localAddrByte
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSend(t, r, ep, nil)
- testSendTo(t, s, remoteAddr, ep, nil)
-
- // Remove the address, then check that send/receive doesn't work anymore.
- if err := s.RemoveAddress(1, localAddr); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- testFailingSend(t, r, nil, &tcpip.ErrInvalidEndpointState{})
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
-
- // Check that removing the same address fails.
- {
- err := s.RemoveAddress(1, localAddr)
- if _, ok := err.(*tcpip.ErrBadLocalAddress); !ok {
- t.Fatalf("RemoveAddress returned unexpected error, got = %v, want = %s", err, &tcpip.ErrBadLocalAddress{})
- }
- }
-}
-
-func verifyAddress(t *testing.T, s *stack.Stack, nicID tcpip.NICID, addr tcpip.Address) {
- t.Helper()
- info, ok := s.NICInfo()[nicID]
- if !ok {
- t.Fatalf("NICInfo() failed to find nicID=%d", nicID)
- }
- if len(addr) == 0 {
- // No address given, verify that there is no address assigned to the NIC.
- for _, a := range info.ProtocolAddresses {
- if a.Protocol == fakeNetNumber && a.AddressWithPrefix != (tcpip.AddressWithPrefix{}) {
- t.Errorf("verify no-address: got = %s, want = %s", a.AddressWithPrefix, tcpip.AddressWithPrefix{})
- }
- }
- return
- }
- // Address given, verify the address is assigned to the NIC and no other
- // address is.
- found := false
- for _, a := range info.ProtocolAddresses {
- if a.Protocol == fakeNetNumber {
- if a.AddressWithPrefix.Address == addr {
- found = true
- } else {
- t.Errorf("verify address: got = %s, want = %s", a.AddressWithPrefix.Address, addr)
- }
- }
- }
- if !found {
- t.Errorf("verify address: couldn't find %s on the NIC", addr)
- }
-}
-
-func TestEndpointExpiration(t *testing.T) {
- const (
- localAddrByte byte = 0x01
- remoteAddr tcpip.Address = "\x03"
- noAddr tcpip.Address = ""
- nicID tcpip.NICID = 1
- )
- localAddr := tcpip.Address([]byte{localAddrByte})
-
- for _, promiscuous := range []bool{true, false} {
- for _, spoofing := range []bool{true, false} {
- t.Run(fmt.Sprintf("promiscuous=%t spoofing=%t", promiscuous, spoofing), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
- buf := buffer.NewView(30)
- buf[dstAddrOffset] = localAddrByte
-
- if promiscuous {
- if err := s.SetPromiscuousMode(nicID, true); err != nil {
- t.Fatal("SetPromiscuousMode failed:", err)
- }
- }
-
- if spoofing {
- if err := s.SetSpoofing(nicID, true); err != nil {
- t.Fatal("SetSpoofing failed:", err)
- }
- }
-
- // 1. No Address yet, send should only work for spoofing, receive for
- // promiscuous mode.
- //-----------------------
- verifyAddress(t, s, nicID, noAddr)
- if promiscuous {
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- } else {
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- }
- if spoofing {
- // FIXME(b/139841518):Spoofing doesn't work if there is no primary address.
- // testSendTo(t, s, remoteAddr, ep, nil)
- } else {
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
- }
-
- // 2. Add Address, everything should work.
- //-----------------------
- if err := s.AddAddress(nicID, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
- verifyAddress(t, s, nicID, localAddr)
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
-
- // 3. Remove the address, send should only work for spoofing, receive
- // for promiscuous mode.
- //-----------------------
- if err := s.RemoveAddress(nicID, localAddr); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
- verifyAddress(t, s, nicID, noAddr)
- if promiscuous {
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- } else {
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- }
- if spoofing {
- // FIXME(b/139841518):Spoofing doesn't work if there is no primary address.
- // testSendTo(t, s, remoteAddr, ep, nil)
- } else {
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
- }
-
- // 4. Add Address back, everything should work again.
- //-----------------------
- if err := s.AddAddress(nicID, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
- verifyAddress(t, s, nicID, localAddr)
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
-
- // 5. Take a reference to the endpoint by getting a route. Verify that
- // we can still send/receive, including sending using the route.
- //-----------------------
- r, err := s.FindRoute(0, "", remoteAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
- testSend(t, r, ep, nil)
-
- // 6. Remove the address. Send should only work for spoofing, receive
- // for promiscuous mode.
- //-----------------------
- if err := s.RemoveAddress(nicID, localAddr); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
- verifyAddress(t, s, nicID, noAddr)
- if promiscuous {
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- } else {
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- }
- if spoofing {
- testSend(t, r, ep, nil)
- testSendTo(t, s, remoteAddr, ep, nil)
- } else {
- testFailingSend(t, r, nil, &tcpip.ErrInvalidEndpointState{})
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
- }
-
- // 7. Add Address back, everything should work again.
- //-----------------------
- if err := s.AddAddress(nicID, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
- verifyAddress(t, s, nicID, localAddr)
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
- testSend(t, r, ep, nil)
-
- // 8. Remove the route, sendTo/recv should still work.
- //-----------------------
- r.Release()
- verifyAddress(t, s, nicID, localAddr)
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- testSendTo(t, s, remoteAddr, ep, nil)
-
- // 9. Remove the address. Send should only work for spoofing, receive
- // for promiscuous mode.
- //-----------------------
- if err := s.RemoveAddress(nicID, localAddr); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
- verifyAddress(t, s, nicID, noAddr)
- if promiscuous {
- testRecv(t, fakeNet, localAddrByte, ep, buf)
- } else {
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
- }
- if spoofing {
- // FIXME(b/139841518):Spoofing doesn't work if there is no primary address.
- // testSendTo(t, s, remoteAddr, ep, nil)
- } else {
- testFailingSendTo(t, s, remoteAddr, nil, &tcpip.ErrNoRoute{})
- }
- })
- }
- }
-}
-
-func TestPromiscuousMode(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
-
- buf := buffer.NewView(30)
-
- // Write a packet, and check that it doesn't get delivered as we don't
- // have a matching endpoint.
- const localAddrByte byte = 0x01
- buf[dstAddrOffset] = localAddrByte
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
-
- // Set promiscuous mode, then check that packet is delivered.
- if err := s.SetPromiscuousMode(1, true); err != nil {
- t.Fatal("SetPromiscuousMode failed:", err)
- }
- testRecv(t, fakeNet, localAddrByte, ep, buf)
-
- // Check that we can't get a route as there is no local address.
- _, err := s.FindRoute(0, "", "\x02", fakeNetNumber, false /* multicastLoop */)
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Fatalf("FindRoute returned unexpected error: got = %v, want = %s", err, &tcpip.ErrNoRoute{})
- }
-
- // Set promiscuous mode to false, then check that packet can't be
- // delivered anymore.
- if err := s.SetPromiscuousMode(1, false); err != nil {
- t.Fatal("SetPromiscuousMode failed:", err)
- }
- testFailingRecv(t, fakeNet, localAddrByte, ep, buf)
-}
-
-// TestExternalSendWithHandleLocal tests that the stack creates a non-local
-// route when spoofing or promiscuous mode are enabled.
-//
-// This test makes sure that packets are transmitted from the stack.
-func TestExternalSendWithHandleLocal(t *testing.T) {
- const (
- unspecifiedNICID = 0
- nicID = 1
-
- localAddr = tcpip.Address("\x01")
- dstAddr = tcpip.Address("\x03")
- )
-
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
-
- tests := []struct {
- name string
- configureStack func(*testing.T, *stack.Stack)
- }{
- {
- name: "Default",
- configureStack: func(*testing.T, *stack.Stack) {},
- },
- {
- name: "Spoofing",
- configureStack: func(t *testing.T, s *stack.Stack) {
- if err := s.SetSpoofing(nicID, true); err != nil {
- t.Fatalf("s.SetSpoofing(%d, true): %s", nicID, err)
- }
- },
- },
- {
- name: "Promiscuous",
- configureStack: func(t *testing.T, s *stack.Stack) {
- if err := s.SetPromiscuousMode(nicID, true); err != nil {
- t.Fatalf("s.SetPromiscuousMode(%d, true): %s", nicID, err)
- }
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, handleLocal := range []bool{true, false} {
- t.Run(fmt.Sprintf("HandleLocal=%t", handleLocal), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- HandleLocal: handleLocal,
- })
-
- ep := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddAddress(nicID, fakeNetNumber, localAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, fakeNetNumber, localAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, NIC: nicID}})
-
- test.configureStack(t, s)
-
- r, err := s.FindRoute(unspecifiedNICID, localAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("s.FindRoute(%d, %s, %s, %d, false): %s", unspecifiedNICID, localAddr, dstAddr, fakeNetNumber, err)
- }
- defer r.Release()
-
- if r.LocalAddress() != localAddr {
- t.Errorf("got r.LocalAddress() = %s, want = %s", r.LocalAddress(), localAddr)
- }
- if r.RemoteAddress() != dstAddr {
- t.Errorf("got r.RemoteAddress() = %s, want = %s", r.RemoteAddress(), dstAddr)
- }
-
- if n := ep.Drain(); n != 0 {
- t.Fatalf("got ep.Drain() = %d, want = 0", n)
- }
- if err := r.WritePacket(stack.NetworkHeaderParams{
- Protocol: fakeTransNumber,
- TTL: 123,
- TOS: stack.DefaultTOS,
- }, stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength()),
- Data: buffer.NewView(10).ToVectorisedView(),
- })); err != nil {
- t.Fatalf("r.WritePacket(nil, _, _): %s", err)
- }
- if n := ep.Drain(); n != 1 {
- t.Fatalf("got ep.Drain() = %d, want = 1", n)
- }
- })
- }
- })
- }
-}
-
-func TestSpoofingWithAddress(t *testing.T) {
- localAddr := tcpip.Address("\x01")
- nonExistentLocalAddr := tcpip.Address("\x02")
- dstAddr := tcpip.Address("\x03")
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, localAddr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- // With address spoofing disabled, FindRoute does not permit an address
- // that was not added to the NIC to be used as the source.
- r, err := s.FindRoute(0, nonExistentLocalAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err == nil {
- t.Errorf("FindRoute succeeded with route %+v when it should have failed", r)
- }
-
- // With address spoofing enabled, FindRoute permits any address to be used
- // as the source.
- if err := s.SetSpoofing(1, true); err != nil {
- t.Fatal("SetSpoofing failed:", err)
- }
- r, err = s.FindRoute(0, nonExistentLocalAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
- if r.LocalAddress() != nonExistentLocalAddr {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nonExistentLocalAddr)
- }
- if r.RemoteAddress() != dstAddr {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), dstAddr)
- }
- // Sending a packet works.
- testSendTo(t, s, dstAddr, ep, nil)
- testSend(t, r, ep, nil)
-
- // FindRoute should also work with a local address that exists on the NIC.
- r, err = s.FindRoute(0, localAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
- if r.LocalAddress() != localAddr {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nonExistentLocalAddr)
- }
- if r.RemoteAddress() != dstAddr {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), dstAddr)
- }
- // Sending a packet using the route works.
- testSend(t, r, ep, nil)
-}
-
-func TestSpoofingNoAddress(t *testing.T) {
- nonExistentLocalAddr := tcpip.Address("\x01")
- dstAddr := tcpip.Address("\x02")
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- // With address spoofing disabled, FindRoute does not permit an address
- // that was not added to the NIC to be used as the source.
- r, err := s.FindRoute(0, nonExistentLocalAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err == nil {
- t.Errorf("FindRoute succeeded with route %+v when it should have failed", r)
- }
- // Sending a packet fails.
- testFailingSendTo(t, s, dstAddr, nil, &tcpip.ErrNoRoute{})
-
- // With address spoofing enabled, FindRoute permits any address to be used
- // as the source.
- if err := s.SetSpoofing(1, true); err != nil {
- t.Fatal("SetSpoofing failed:", err)
- }
- r, err = s.FindRoute(0, nonExistentLocalAddr, dstAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatal("FindRoute failed:", err)
- }
- if r.LocalAddress() != nonExistentLocalAddr {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nonExistentLocalAddr)
- }
- if r.RemoteAddress() != dstAddr {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), dstAddr)
- }
- // Sending a packet works.
- // FIXME(b/139841518):Spoofing doesn't work if there is no primary address.
- // testSendTo(t, s, remoteAddr, ep, nil)
-}
-
-func TestOutgoingBroadcastWithEmptyRouteTable(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
- s.SetRouteTable([]tcpip.Route{})
-
- // If there is no endpoint, it won't work.
- {
- _, err := s.FindRoute(1, header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if _, ok := err.(*tcpip.ErrNetworkUnreachable); !ok {
- t.Fatalf("got FindRoute(1, %s, %s, %d) = %s, want = %s", header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, err, &tcpip.ErrNetworkUnreachable{})
- }
- }
-
- protoAddr := tcpip.ProtocolAddress{Protocol: fakeNetNumber, AddressWithPrefix: tcpip.AddressWithPrefix{Address: header.IPv4Any}}
- if err := s.AddProtocolAddress(1, protoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(1, %v) failed: %v", protoAddr, err)
- }
- r, err := s.FindRoute(1, header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(1, %v, %v, %d) failed: %v", header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, err)
- }
- if r.LocalAddress() != header.IPv4Any {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), header.IPv4Any)
- }
-
- if r.RemoteAddress() != header.IPv4Broadcast {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), header.IPv4Broadcast)
- }
-
- // If the NIC doesn't exist, it won't work.
- {
- _, err := s.FindRoute(2, header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if _, ok := err.(*tcpip.ErrNetworkUnreachable); !ok {
- t.Fatalf("got FindRoute(2, %v, %v, %d) = %v want = %v", header.IPv4Any, header.IPv4Broadcast, fakeNetNumber, err, &tcpip.ErrNetworkUnreachable{})
- }
- }
-}
-
-func TestOutgoingBroadcastWithRouteTable(t *testing.T) {
- defaultAddr := tcpip.AddressWithPrefix{Address: header.IPv4Any}
- // Local subnet on NIC1: 192.168.1.58/24, gateway 192.168.1.1.
- nic1Addr := tcpip.AddressWithPrefix{Address: "\xc0\xa8\x01\x3a", PrefixLen: 24}
- nic1Gateway := testutil.MustParse4("192.168.1.1")
- // Local subnet on NIC2: 10.10.10.5/24, gateway 10.10.10.1.
- nic2Addr := tcpip.AddressWithPrefix{Address: "\x0a\x0a\x0a\x05", PrefixLen: 24}
- nic2Gateway := testutil.MustParse4("10.10.10.1")
-
- // Create a new stack with two NICs.
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatalf("CreateNIC failed: %s", err)
- }
- if err := s.CreateNIC(2, ep); err != nil {
- t.Fatalf("CreateNIC failed: %s", err)
- }
- nic1ProtoAddr := tcpip.ProtocolAddress{Protocol: fakeNetNumber, AddressWithPrefix: nic1Addr}
- if err := s.AddProtocolAddress(1, nic1ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(1, %v) failed: %v", nic1ProtoAddr, err)
- }
-
- nic2ProtoAddr := tcpip.ProtocolAddress{Protocol: fakeNetNumber, AddressWithPrefix: nic2Addr}
- if err := s.AddProtocolAddress(2, nic2ProtoAddr); err != nil {
- t.Fatalf("AddAddress(2, %v) failed: %v", nic2ProtoAddr, err)
- }
-
- // Set the initial route table.
- rt := []tcpip.Route{
- {Destination: nic1Addr.Subnet(), NIC: 1},
- {Destination: nic2Addr.Subnet(), NIC: 2},
- {Destination: defaultAddr.Subnet(), Gateway: nic2Gateway, NIC: 2},
- {Destination: defaultAddr.Subnet(), Gateway: nic1Gateway, NIC: 1},
- }
- s.SetRouteTable(rt)
-
- // When an interface is given, the route for a broadcast goes through it.
- r, err := s.FindRoute(1, nic1Addr.Address, header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(1, %v, %v, %d) failed: %v", nic1Addr.Address, header.IPv4Broadcast, fakeNetNumber, err)
- }
- if r.LocalAddress() != nic1Addr.Address {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nic1Addr.Address)
- }
-
- if r.RemoteAddress() != header.IPv4Broadcast {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), header.IPv4Broadcast)
- }
-
- // When an interface is not given, it consults the route table.
- // 1. Case: Using the default route.
- r, err = s.FindRoute(0, "", header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(0, \"\", %s, %d) failed: %s", header.IPv4Broadcast, fakeNetNumber, err)
- }
- if r.LocalAddress() != nic2Addr.Address {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nic2Addr.Address)
- }
-
- if r.RemoteAddress() != header.IPv4Broadcast {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), header.IPv4Broadcast)
- }
-
- // 2. Case: Having an explicit route for broadcast will select that one.
- rt = append(
- []tcpip.Route{
- {Destination: tcpip.AddressWithPrefix{Address: header.IPv4Broadcast, PrefixLen: 8 * header.IPv4AddressSize}.Subnet(), NIC: 1},
- },
- rt...,
- )
- s.SetRouteTable(rt)
- r, err = s.FindRoute(0, "", header.IPv4Broadcast, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(0, \"\", %s, %d) failed: %s", header.IPv4Broadcast, fakeNetNumber, err)
- }
- if r.LocalAddress() != nic1Addr.Address {
- t.Errorf("got Route.LocalAddress() = %s, want = %s", r.LocalAddress(), nic1Addr.Address)
- }
-
- if r.RemoteAddress() != header.IPv4Broadcast {
- t.Errorf("got Route.RemoteAddress() = %s, want = %s", r.RemoteAddress(), header.IPv4Broadcast)
- }
-}
-
-func TestMulticastOrIPv6LinkLocalNeedsNoRoute(t *testing.T) {
- for _, tc := range []struct {
- name string
- routeNeeded bool
- address tcpip.Address
- }{
- // IPv4 multicast address range: 224.0.0.0 - 239.255.255.255
- // <=> 0xe0.0x00.0x00.0x00 - 0xef.0xff.0xff.0xff
- {"IPv4 Multicast 1", false, "\xe0\x00\x00\x00"},
- {"IPv4 Multicast 2", false, "\xef\xff\xff\xff"},
- {"IPv4 Unicast 1", true, "\xdf\xff\xff\xff"},
- {"IPv4 Unicast 2", true, "\xf0\x00\x00\x00"},
- {"IPv4 Unicast 3", true, "\x00\x00\x00\x00"},
-
- // IPv6 multicast address is 0xff[8] + flags[4] + scope[4] + groupId[112]
- {"IPv6 Multicast 1", false, "\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Multicast 2", false, "\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Multicast 3", false, "\xff\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"},
-
- // IPv6 link-local address starts with fe80::/10.
- {"IPv6 Unicast Link-Local 1", false, "\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Link-Local 2", false, "\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"},
- {"IPv6 Unicast Link-Local 3", false, "\xfe\x80\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff"},
- {"IPv6 Unicast Link-Local 4", false, "\xfe\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Link-Local 5", false, "\xfe\xbf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"},
-
- // IPv6 addresses that are neither multicast nor link-local.
- {"IPv6 Unicast Not Link-Local 1", true, "\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Not Link-Local 2", true, "\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"},
- {"IPv6 Unicast Not Link-local 3", true, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Not Link-Local 4", true, "\xfe\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Not Link-Local 5", true, "\xfe\xdf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Not Link-Local 6", true, "\xfd\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- {"IPv6 Unicast Not Link-Local 7", true, "\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
- } {
- t.Run(tc.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- s.SetRouteTable([]tcpip.Route{})
-
- var anyAddr tcpip.Address
- if len(tc.address) == header.IPv4AddressSize {
- anyAddr = header.IPv4Any
- } else {
- anyAddr = header.IPv6Any
- }
-
- var want tcpip.Error = &tcpip.ErrNetworkUnreachable{}
- if tc.routeNeeded {
- want = &tcpip.ErrNoRoute{}
- }
-
- // If there is no endpoint, it won't work.
- if _, err := s.FindRoute(1, anyAddr, tc.address, fakeNetNumber, false /* multicastLoop */); err != want {
- t.Fatalf("got FindRoute(1, %v, %v, %v) = %v, want = %v", anyAddr, tc.address, fakeNetNumber, err, want)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, anyAddr); err != nil {
- t.Fatalf("AddAddress(%v, %v) failed: %v", fakeNetNumber, anyAddr, err)
- }
-
- if r, err := s.FindRoute(1, anyAddr, tc.address, fakeNetNumber, false /* multicastLoop */); tc.routeNeeded {
- // Route table is empty but we need a route, this should cause an error.
- if _, ok := err.(*tcpip.ErrNoRoute); !ok {
- t.Fatalf("got FindRoute(1, %v, %v, %v) = %v, want = %v", anyAddr, tc.address, fakeNetNumber, err, &tcpip.ErrNoRoute{})
- }
- } else {
- if err != nil {
- t.Fatalf("FindRoute(1, %v, %v, %v) failed: %v", anyAddr, tc.address, fakeNetNumber, err)
- }
- if r.LocalAddress() != anyAddr {
- t.Errorf("Bad local address: got %v, want = %v", r.LocalAddress(), anyAddr)
- }
- if r.RemoteAddress() != tc.address {
- t.Errorf("Bad remote address: got %v, want = %v", r.RemoteAddress(), tc.address)
- }
- }
- // If the NIC doesn't exist, it won't work.
- if _, err := s.FindRoute(2, anyAddr, tc.address, fakeNetNumber, false /* multicastLoop */); err != want {
- t.Fatalf("got FindRoute(2, %v, %v, %v) = %v want = %v", anyAddr, tc.address, fakeNetNumber, err, want)
- }
- })
- }
-}
-
-func TestNetworkOption(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- TransportProtocols: []stack.TransportProtocolFactory{},
- })
-
- opt := tcpip.DefaultTTLOption(5)
- if err := s.SetNetworkProtocolOption(fakeNetNumber, &opt); err != nil {
- t.Fatalf("s.SetNetworkProtocolOption(%d, &%T(%d)): %s", fakeNetNumber, opt, opt, err)
- }
-
- var optGot tcpip.DefaultTTLOption
- if err := s.NetworkProtocolOption(fakeNetNumber, &optGot); err != nil {
- t.Fatalf("s.NetworkProtocolOption(%d, &%T): %s", fakeNetNumber, optGot, err)
- }
-
- if opt != optGot {
- t.Errorf("got optGot = %d, want = %d", optGot, opt)
- }
-}
-
-func TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {
- const nicID = 1
-
- for _, addrLen := range []int{4, 16} {
- t.Run(fmt.Sprintf("addrLen=%d", addrLen), func(t *testing.T) {
- for canBe := 0; canBe < 3; canBe++ {
- t.Run(fmt.Sprintf("canBe=%d", canBe), func(t *testing.T) {
- for never := 0; never < 3; never++ {
- t.Run(fmt.Sprintf("never=%d", never), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- // Insert <canBe> primary and <never> never-primary addresses.
- // Each one will add a network endpoint to the NIC.
- primaryAddrAdded := make(map[tcpip.AddressWithPrefix]struct{})
- for i := 0; i < canBe+never; i++ {
- var behavior stack.PrimaryEndpointBehavior
- if i < canBe {
- behavior = stack.CanBePrimaryEndpoint
- } else {
- behavior = stack.NeverPrimaryEndpoint
- }
- // Add an address and in case of a primary one include a
- // prefixLen.
- address := tcpip.Address(bytes.Repeat([]byte{byte(i)}, addrLen))
- if behavior == stack.CanBePrimaryEndpoint {
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: address,
- PrefixLen: addrLen * 8,
- },
- }
- if err := s.AddProtocolAddressWithOptions(nicID, protocolAddress, behavior); err != nil {
- t.Fatalf("AddProtocolAddressWithOptions(%d, %#v, %d): %s", nicID, protocolAddress, behavior, err)
- }
- // Remember the address/prefix.
- primaryAddrAdded[protocolAddress.AddressWithPrefix] = struct{}{}
- } else {
- if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address, behavior); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s:", nicID, fakeNetNumber, address, behavior, err)
- }
- }
- }
- // Check that GetMainNICAddress returns an address if at least
- // one primary address was added. In that case make sure the
- // address/prefixLen matches what we added.
- gotAddr, err := s.GetMainNICAddress(nicID, fakeNetNumber)
- if err != nil {
- t.Fatalf("GetMainNICAddress(%d, %d): %s", nicID, fakeNetNumber, err)
- }
- if len(primaryAddrAdded) == 0 {
- // No primary addresses present.
- if wantAddr := (tcpip.AddressWithPrefix{}); gotAddr != wantAddr {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, gotAddr, wantAddr)
- }
- } else {
- // At least one primary address was added, verify the returned
- // address is in the list of primary addresses we added.
- if _, ok := primaryAddrAdded[gotAddr]; !ok {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, gotAddr, primaryAddrAdded)
- }
- }
- })
- }
- })
- }
- })
- }
-}
-
-func TestGetMainNICAddressErrors(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- // Sanity check with a successful call.
- if addr, err := s.GetMainNICAddress(nicID, ipv4.ProtocolNumber); err != nil {
- t.Errorf("s.GetMainNICAddress(%d, %d): %s", nicID, ipv4.ProtocolNumber, err)
- } else if want := (tcpip.AddressWithPrefix{}); addr != want {
- t.Errorf("got s.GetMainNICAddress(%d, %d) = %s, want = %s", nicID, ipv4.ProtocolNumber, addr, want)
- }
-
- const unknownNICID = nicID + 1
- switch addr, err := s.GetMainNICAddress(unknownNICID, ipv4.ProtocolNumber); err.(type) {
- case *tcpip.ErrUnknownNICID:
- default:
- t.Errorf("got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrUnknownNICID)", unknownNICID, ipv4.ProtocolNumber, addr, err)
- }
-
- // ARP is not an addressable network endpoint.
- switch addr, err := s.GetMainNICAddress(nicID, arp.ProtocolNumber); err.(type) {
- case *tcpip.ErrNotSupported:
- default:
- t.Errorf("got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrNotSupported)", nicID, arp.ProtocolNumber, addr, err)
- }
-
- const unknownProtocolNumber = 1234
- switch addr, err := s.GetMainNICAddress(nicID, unknownProtocolNumber); err.(type) {
- case *tcpip.ErrUnknownProtocol:
- default:
- t.Errorf("got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrUnknownProtocol)", nicID, unknownProtocolNumber, addr, err)
- }
-}
-
-func TestGetMainNICAddressAddRemove(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(1, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- for _, tc := range []struct {
- name string
- address tcpip.Address
- prefixLen int
- }{
- {"IPv4", "\x01\x01\x01\x01", 24},
- {"IPv6", "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 116},
- } {
- t.Run(tc.name, func(t *testing.T) {
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tc.address,
- PrefixLen: tc.prefixLen,
- },
- }
- if err := s.AddProtocolAddress(1, protocolAddress); err != nil {
- t.Fatal("AddProtocolAddress failed:", err)
- }
-
- // Check that we get the right initial address and prefix length.
- if err := checkGetMainNICAddress(s, 1, fakeNetNumber, protocolAddress.AddressWithPrefix); err != nil {
- t.Fatal(err)
- }
-
- if err := s.RemoveAddress(1, protocolAddress.AddressWithPrefix.Address); err != nil {
- t.Fatal("RemoveAddress failed:", err)
- }
-
- // Check that we get no address after removal.
- if err := checkGetMainNICAddress(s, 1, fakeNetNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
- })
- }
-}
-
-// Simple network address generator. Good for 255 addresses.
-type addressGenerator struct{ cnt byte }
-
-func (g *addressGenerator) next(addrLen int) tcpip.Address {
- g.cnt++
- return tcpip.Address(bytes.Repeat([]byte{g.cnt}, addrLen))
-}
-
-func verifyAddresses(t *testing.T, expectedAddresses, gotAddresses []tcpip.ProtocolAddress) {
- t.Helper()
-
- if len(gotAddresses) != len(expectedAddresses) {
- t.Fatalf("got len(addresses) = %d, want = %d", len(gotAddresses), len(expectedAddresses))
- }
-
- sort.Slice(gotAddresses, func(i, j int) bool {
- return gotAddresses[i].AddressWithPrefix.Address < gotAddresses[j].AddressWithPrefix.Address
- })
- sort.Slice(expectedAddresses, func(i, j int) bool {
- return expectedAddresses[i].AddressWithPrefix.Address < expectedAddresses[j].AddressWithPrefix.Address
- })
-
- for i, gotAddr := range gotAddresses {
- expectedAddr := expectedAddresses[i]
- if gotAddr != expectedAddr {
- t.Errorf("got address = %+v, wanted = %+v", gotAddr, expectedAddr)
- }
- }
-}
-
-func TestAddAddress(t *testing.T) {
- const nicID = 1
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- var addrGen addressGenerator
- expectedAddresses := make([]tcpip.ProtocolAddress, 0, 2)
- for _, addrLen := range []int{4, 16} {
- address := addrGen.next(addrLen)
- if err := s.AddAddress(nicID, fakeNetNumber, address); err != nil {
- t.Fatalf("AddAddress(address=%s) failed: %s", address, err)
- }
- expectedAddresses = append(expectedAddresses, tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{Address: address, PrefixLen: fakeDefaultPrefixLen},
- })
- }
-
- gotAddresses := s.AllAddresses()[nicID]
- verifyAddresses(t, expectedAddresses, gotAddresses)
-}
-
-func TestAddProtocolAddress(t *testing.T) {
- const nicID = 1
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- var addrGen addressGenerator
- addrLenRange := []int{4, 16}
- prefixLenRange := []int{8, 13, 20, 32}
- expectedAddresses := make([]tcpip.ProtocolAddress, 0, len(addrLenRange)*len(prefixLenRange))
- for _, addrLen := range addrLenRange {
- for _, prefixLen := range prefixLenRange {
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: addrGen.next(addrLen),
- PrefixLen: prefixLen,
- },
- }
- if err := s.AddProtocolAddress(nicID, protocolAddress); err != nil {
- t.Errorf("AddProtocolAddress(%+v) failed: %s", protocolAddress, err)
- }
- expectedAddresses = append(expectedAddresses, protocolAddress)
- }
- }
-
- gotAddresses := s.AllAddresses()[nicID]
- verifyAddresses(t, expectedAddresses, gotAddresses)
-}
-
-func TestAddAddressWithOptions(t *testing.T) {
- const nicID = 1
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- addrLenRange := []int{4, 16}
- behaviorRange := []stack.PrimaryEndpointBehavior{stack.CanBePrimaryEndpoint, stack.FirstPrimaryEndpoint, stack.NeverPrimaryEndpoint}
- expectedAddresses := make([]tcpip.ProtocolAddress, 0, len(addrLenRange)*len(behaviorRange))
- var addrGen addressGenerator
- for _, addrLen := range addrLenRange {
- for _, behavior := range behaviorRange {
- address := addrGen.next(addrLen)
- if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address, behavior); err != nil {
- t.Fatalf("AddAddressWithOptions(address=%s, behavior=%d) failed: %s", address, behavior, err)
- }
- expectedAddresses = append(expectedAddresses, tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{Address: address, PrefixLen: fakeDefaultPrefixLen},
- })
- }
- }
-
- gotAddresses := s.AllAddresses()[nicID]
- verifyAddresses(t, expectedAddresses, gotAddresses)
-}
-
-func TestAddProtocolAddressWithOptions(t *testing.T) {
- const nicID = 1
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatal("CreateNIC failed:", err)
- }
-
- addrLenRange := []int{4, 16}
- prefixLenRange := []int{8, 13, 20, 32}
- behaviorRange := []stack.PrimaryEndpointBehavior{stack.CanBePrimaryEndpoint, stack.FirstPrimaryEndpoint, stack.NeverPrimaryEndpoint}
- expectedAddresses := make([]tcpip.ProtocolAddress, 0, len(addrLenRange)*len(prefixLenRange)*len(behaviorRange))
- var addrGen addressGenerator
- for _, addrLen := range addrLenRange {
- for _, prefixLen := range prefixLenRange {
- for _, behavior := range behaviorRange {
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: addrGen.next(addrLen),
- PrefixLen: prefixLen,
- },
- }
- if err := s.AddProtocolAddressWithOptions(nicID, protocolAddress, behavior); err != nil {
- t.Fatalf("AddProtocolAddressWithOptions(%+v, %d) failed: %s", protocolAddress, behavior, err)
- }
- expectedAddresses = append(expectedAddresses, protocolAddress)
- }
- }
- }
-
- gotAddresses := s.AllAddresses()[nicID]
- verifyAddresses(t, expectedAddresses, gotAddresses)
-}
-
-func TestCreateNICWithOptions(t *testing.T) {
- type callArgsAndExpect struct {
- nicID tcpip.NICID
- opts stack.NICOptions
- err tcpip.Error
- }
-
- tests := []struct {
- desc string
- calls []callArgsAndExpect
- }{
- {
- desc: "DuplicateNICID",
- calls: []callArgsAndExpect{
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{Name: "eth1"},
- err: nil,
- },
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{Name: "eth2"},
- err: &tcpip.ErrDuplicateNICID{},
- },
- },
- },
- {
- desc: "DuplicateName",
- calls: []callArgsAndExpect{
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{Name: "lo"},
- err: nil,
- },
- {
- nicID: tcpip.NICID(2),
- opts: stack.NICOptions{Name: "lo"},
- err: &tcpip.ErrDuplicateNICID{},
- },
- },
- },
- {
- desc: "Unnamed",
- calls: []callArgsAndExpect{
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{},
- err: nil,
- },
- {
- nicID: tcpip.NICID(2),
- opts: stack.NICOptions{},
- err: nil,
- },
- },
- },
- {
- desc: "UnnamedDuplicateNICID",
- calls: []callArgsAndExpect{
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{},
- err: nil,
- },
- {
- nicID: tcpip.NICID(1),
- opts: stack.NICOptions{},
- err: &tcpip.ErrDuplicateNICID{},
- },
- },
- },
- }
- for _, test := range tests {
- t.Run(test.desc, func(t *testing.T) {
- s := stack.New(stack.Options{})
- ep := channel.New(0, 0, "\x00\x00\x00\x00\x00\x00")
- for _, call := range test.calls {
- if got, want := s.CreateNICWithOptions(call.nicID, ep, call.opts), call.err; got != want {
- t.Fatalf("CreateNICWithOptions(%v, _, %+v) = %v, want %v", call.nicID, call.opts, got, want)
- }
- }
- })
- }
-}
-
-func TestNICStats(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- nics := []struct {
- addr tcpip.Address
- txByteCount int
- rxByteCount int
- }{
- {
- addr: "\x01",
- txByteCount: 30,
- rxByteCount: 10,
- },
- {
- addr: "\x02",
- txByteCount: 50,
- rxByteCount: 20,
- },
- }
-
- var txBytesTotal, rxBytesTotal, txPacketsTotal, rxPacketsTotal int
- for i, nic := range nics {
- nicid := tcpip.NICID(i)
- ep := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicid, ep); err != nil {
- t.Fatal("CreateNIC failed: ", err)
- }
- if err := s.AddAddress(nicid, fakeNetNumber, nic.addr); err != nil {
- t.Fatal("AddAddress failed:", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet(nic.addr, "\xff")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: nicid}})
- }
-
- nicStats := s.NICInfo()[nicid].Stats
-
- // Inbound packet.
- rxBuffer := buffer.NewView(nic.rxByteCount)
- ep.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: rxBuffer.ToVectorisedView(),
- }))
- if got, want := nicStats.Rx.Packets.Value(), uint64(1); got != want {
- t.Errorf("got Rx.Packets.Value() = %d, want = %d", got, want)
- }
- if got, want := nicStats.Rx.Bytes.Value(), uint64(nic.rxByteCount); got != want {
- t.Errorf("got Rx.Bytes.Value() = %d, want = %d", got, want)
- }
- rxPacketsTotal++
- rxBytesTotal += nic.rxByteCount
-
- // Outbound packet.
- txBuffer := buffer.NewView(nic.txByteCount)
- actualTxLength := nic.txByteCount + fakeNetHeaderLen
- if err := sendTo(s, nic.addr, txBuffer); err != nil {
- t.Fatal("sendTo failed: ", err)
- }
- want := ep.Drain()
- if got := nicStats.Tx.Packets.Value(); got != uint64(want) {
- t.Errorf("got Tx.Packets.Value() = %d, ep.Drain() = %d", got, want)
- }
- if got, want := nicStats.Tx.Bytes.Value(), uint64(actualTxLength); got != want {
- t.Errorf("got Tx.Bytes.Value() = %d, want = %d", got, want)
- }
- txPacketsTotal += want
- txBytesTotal += actualTxLength
- }
-
- // Now verify that each NIC stats was correctly aggregated at the stack level.
- if got, want := s.Stats().NICs.Rx.Packets.Value(), uint64(rxPacketsTotal); got != want {
- t.Errorf("got s.Stats().NIC.Rx.Packets.Value() = %d, want = %d", got, want)
- }
- if got, want := s.Stats().NICs.Rx.Bytes.Value(), uint64(rxBytesTotal); got != want {
- t.Errorf("got s.Stats().Rx.Bytes.Value() = %d, want = %d", got, want)
- }
- if got, want := s.Stats().NICs.Tx.Packets.Value(), uint64(txPacketsTotal); got != want {
- t.Errorf("got Tx.Packets.Value() = %d, ep.Drain() = %d", got, want)
- }
- if got, want := s.Stats().NICs.Tx.Bytes.Value(), uint64(txBytesTotal); got != want {
- t.Errorf("got Tx.Bytes.Value() = %d, want = %d", got, want)
- }
-}
-
-// TestNICContextPreservation tests that you can read out via stack.NICInfo the
-// Context data you pass via NICContext.Context in stack.CreateNICWithOptions.
-func TestNICContextPreservation(t *testing.T) {
- var ctx *int
- tests := []struct {
- name string
- opts stack.NICOptions
- want stack.NICContext
- }{
- {
- "context_set",
- stack.NICOptions{Context: ctx},
- ctx,
- },
- {
- "context_not_set",
- stack.NICOptions{},
- nil,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{})
- id := tcpip.NICID(1)
- ep := channel.New(0, 0, "\x00\x00\x00\x00\x00\x00")
- if err := s.CreateNICWithOptions(id, ep, test.opts); err != nil {
- t.Fatalf("got stack.CreateNICWithOptions(%d, %+v, %+v) = %s, want nil", id, ep, test.opts, err)
- }
- nicinfos := s.NICInfo()
- nicinfo, ok := nicinfos[id]
- if !ok {
- t.Fatalf("got nicinfos[%d] = _, %t, want _, true; nicinfos = %+v", id, ok, nicinfos)
- }
- if got, want := nicinfo.Context == test.want, true; got != want {
- t.Fatalf("got nicinfo.Context == ctx = %t, want %t; nicinfo.Context = %p, ctx = %p", got, want, nicinfo.Context, test.want)
- }
- })
- }
-}
-
-// TestNICAutoGenLinkLocalAddr tests the auto-generation of IPv6 link-local
-// addresses.
-func TestNICAutoGenLinkLocalAddr(t *testing.T) {
- const nicID = 1
-
- var secretKey [header.OpaqueIIDSecretKeyMinBytes]byte
- n, err := rand.Read(secretKey[:])
- if err != nil {
- t.Fatalf("rand.Read(_): %s", err)
- }
- if n != header.OpaqueIIDSecretKeyMinBytes {
- t.Fatalf("expected rand.Read to read %d bytes, read %d bytes", header.OpaqueIIDSecretKeyMinBytes, n)
- }
-
- nicNameFunc := func(_ tcpip.NICID, name string) string {
- return name
- }
-
- tests := []struct {
- name string
- nicName string
- autoGen bool
- linkAddr tcpip.LinkAddress
- iidOpts ipv6.OpaqueInterfaceIdentifierOptions
- shouldGen bool
- expectedAddr tcpip.Address
- }{
- {
- name: "Disabled",
- nicName: "nic1",
- autoGen: false,
- linkAddr: linkAddr1,
- shouldGen: false,
- },
- {
- name: "Disabled without OIID options",
- nicName: "nic1",
- autoGen: false,
- linkAddr: linkAddr1,
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- SecretKey: secretKey[:],
- },
- shouldGen: false,
- },
-
- // Tests for EUI64 based addresses.
- {
- name: "EUI64 Enabled",
- autoGen: true,
- linkAddr: linkAddr1,
- shouldGen: true,
- expectedAddr: header.LinkLocalAddr(linkAddr1),
- },
- {
- name: "EUI64 Empty MAC",
- autoGen: true,
- shouldGen: false,
- },
- {
- name: "EUI64 Invalid MAC",
- autoGen: true,
- linkAddr: "\x01\x02\x03",
- shouldGen: false,
- },
- {
- name: "EUI64 Multicast MAC",
- autoGen: true,
- linkAddr: "\x01\x02\x03\x04\x05\x06",
- shouldGen: false,
- },
- {
- name: "EUI64 Unspecified MAC",
- autoGen: true,
- linkAddr: "\x00\x00\x00\x00\x00\x00",
- shouldGen: false,
- },
-
- // Tests for Opaque IID based addresses.
- {
- name: "OIID Enabled",
- nicName: "nic1",
- autoGen: true,
- linkAddr: linkAddr1,
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- SecretKey: secretKey[:],
- },
- shouldGen: true,
- expectedAddr: header.LinkLocalAddrWithOpaqueIID("nic1", 0, secretKey[:]),
- },
- // These are all cases where we would not have generated a
- // link-local address if opaque IIDs were disabled.
- {
- name: "OIID Empty MAC and empty nicName",
- autoGen: true,
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- SecretKey: secretKey[:1],
- },
- shouldGen: true,
- expectedAddr: header.LinkLocalAddrWithOpaqueIID("", 0, secretKey[:1]),
- },
- {
- name: "OIID Invalid MAC",
- nicName: "test",
- autoGen: true,
- linkAddr: "\x01\x02\x03",
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- SecretKey: secretKey[:2],
- },
- shouldGen: true,
- expectedAddr: header.LinkLocalAddrWithOpaqueIID("test", 0, secretKey[:2]),
- },
- {
- name: "OIID Multicast MAC",
- nicName: "test2",
- autoGen: true,
- linkAddr: "\x01\x02\x03\x04\x05\x06",
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- SecretKey: secretKey[:3],
- },
- shouldGen: true,
- expectedAddr: header.LinkLocalAddrWithOpaqueIID("test2", 0, secretKey[:3]),
- },
- {
- name: "OIID Unspecified MAC and nil SecretKey",
- nicName: "test3",
- autoGen: true,
- linkAddr: "\x00\x00\x00\x00\x00\x00",
- iidOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: nicNameFunc,
- },
- shouldGen: true,
- expectedAddr: header.LinkLocalAddrWithOpaqueIID("test3", 0, nil),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ndpDisp := ndpDispatcher{
- autoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),
- }
- opts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: test.autoGen,
- NDPDisp: &ndpDisp,
- OpaqueIIDOpts: test.iidOpts,
- })},
- }
-
- e := channel.New(0, 1280, test.linkAddr)
- s := stack.New(opts)
- nicOpts := stack.NICOptions{Name: test.nicName, Disabled: true}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, opts, err)
- }
-
- // A new disabled NIC should not have any address, even if auto generation
- // was enabled.
- allStackAddrs := s.AllAddresses()
- allNICAddrs, ok := allStackAddrs[nicID]
- if !ok {
- t.Fatalf("entry for %d missing from allStackAddrs = %+v", nicID, allStackAddrs)
- }
- if l := len(allNICAddrs); l != 0 {
- t.Fatalf("got len(allNICAddrs) = %d, want = 0", l)
- }
-
- // Enabling the NIC should attempt auto-generation of a link-local
- // address.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
-
- var expectedMainAddr tcpip.AddressWithPrefix
- if test.shouldGen {
- expectedMainAddr = tcpip.AddressWithPrefix{
- Address: test.expectedAddr,
- PrefixLen: header.IPv6LinkLocalPrefix.PrefixLen,
- }
-
- // Should have auto-generated an address and resolved immediately (DAD
- // is disabled).
- select {
- case e := <-ndpDisp.autoGenAddrC:
- if diff := checkAutoGenAddrEvent(e, expectedMainAddr, newAddr); diff != "" {
- t.Errorf("auto-gen addr event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected addr auto gen event")
- }
- } else {
- // Should not have auto-generated an address.
- select {
- case <-ndpDisp.autoGenAddrC:
- t.Fatal("unexpectedly auto-generated an address")
- default:
- }
- }
-
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, expectedMainAddr); err != nil {
- t.Fatal(err)
- }
-
- // Disabling the NIC should remove the auto-generated address.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
- })
- }
-}
-
-// TestNoLinkLocalAutoGenForLoopbackNIC tests that IPv6 link-local addresses are
-// not auto-generated for loopback NICs.
-func TestNoLinkLocalAutoGenForLoopbackNIC(t *testing.T) {
- const nicID = 1
- const nicName = "nicName"
-
- tests := []struct {
- name string
- opaqueIIDOpts ipv6.OpaqueInterfaceIdentifierOptions
- }{
- {
- name: "IID From MAC",
- opaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{},
- },
- {
- name: "Opaque IID",
- opaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: func(_ tcpip.NICID, nicName string) string {
- return nicName
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- opts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: true,
- OpaqueIIDOpts: test.opaqueIIDOpts,
- })},
- }
-
- e := loopback.New()
- s := stack.New(opts)
- nicOpts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %+v) = %s", nicID, nicOpts, err)
- }
-
- if err := checkGetMainNICAddress(s, 1, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
- })
- }
-}
-
-// TestNICAutoGenAddrDoesDAD tests that the successful auto-generation of IPv6
-// link-local addresses will only be assigned after the DAD process resolves.
-func TestNICAutoGenAddrDoesDAD(t *testing.T) {
- const nicID = 1
-
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
- dadConfigs := stack.DefaultDADConfigurations()
- clock := faketime.NewManualClock()
- opts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- AutoGenLinkLocal: true,
- NDPDisp: &ndpDisp,
- DADConfigs: dadConfigs,
- })},
- Clock: clock,
- }
-
- e := channel.New(int(dadConfigs.DupAddrDetectTransmits), 1280, linkAddr1)
- s := stack.New(opts)
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- // Address should not be considered bound to the
- // NIC yet (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- linkLocalAddr := header.LinkLocalAddr(linkAddr1)
-
- // Wait for DAD to resolve.
- clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, linkLocalAddr, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
- }
- default:
- // We should get a resolution event after 1s (default time to
- // resolve as per default NDP configurations). Waiting for that
- // resolution time + an extra 1s without a resolution event
- // means something is wrong.
- t.Fatal("timed out waiting for DAD resolution")
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{Address: linkLocalAddr, PrefixLen: header.IPv6LinkLocalPrefix.PrefixLen}); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestNewPEB tests that a new PrimaryEndpointBehavior value (peb) is respected
-// when an address's kind gets "promoted" to permanent from permanentExpired.
-func TestNewPEBOnPromotionToPermanent(t *testing.T) {
- const nicID = 1
-
- pebs := []stack.PrimaryEndpointBehavior{
- stack.NeverPrimaryEndpoint,
- stack.CanBePrimaryEndpoint,
- stack.FirstPrimaryEndpoint,
- }
-
- for _, pi := range pebs {
- for _, ps := range pebs {
- t.Run(fmt.Sprintf("%d-to-%d", pi, ps), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
- ep1 := channel.New(10, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep1); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- // Add a permanent address with initial
- // PrimaryEndpointBehavior (peb), pi. If pi is
- // NeverPrimaryEndpoint, the address should not
- // be returned by a call to GetMainNICAddress;
- // else, it should.
- const address1 = tcpip.Address("\x01")
- if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address1, pi); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s", nicID, fakeNetNumber, address1, pi, err)
- }
- addr, err := s.GetMainNICAddress(nicID, fakeNetNumber)
- if err != nil {
- t.Fatalf("GetMainNICAddress(%d, %d): %s", nicID, fakeNetNumber, err)
- }
- if pi == stack.NeverPrimaryEndpoint {
- if want := (tcpip.AddressWithPrefix{}); addr != want {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, addr, want)
-
- }
- } else if addr.Address != address1 {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, addr.Address, address1)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatalf("NewSubnet failed: %v", err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- // Take a route through the address so its ref
- // count gets incremented and does not actually
- // get deleted when RemoveAddress is called
- // below. This is because we want to test that a
- // new peb is respected when an address gets
- // "promoted" to permanent from a
- // permanentExpired kind.
- const address2 = tcpip.Address("\x02")
- r, err := s.FindRoute(nicID, address1, address2, fakeNetNumber, false)
- if err != nil {
- t.Fatalf("FindRoute(%d, %s, %s, %d, false): %s", nicID, address1, address2, fakeNetNumber, err)
- }
- defer r.Release()
- if err := s.RemoveAddress(nicID, address1); err != nil {
- t.Fatalf("RemoveAddress(%d, %s): %s", nicID, address1, err)
- }
-
- //
- // At this point, the address should still be
- // known by the NIC, but have its
- // kind = permanentExpired.
- //
-
- // Add some other address with peb set to
- // FirstPrimaryEndpoint.
- const address3 = tcpip.Address("\x03")
- if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address3, stack.FirstPrimaryEndpoint); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s", nicID, fakeNetNumber, address3, stack.FirstPrimaryEndpoint, err)
-
- }
-
- // Add back the address we removed earlier and
- // make sure the new peb was respected.
- // (The address should just be promoted now).
- if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address1, ps); err != nil {
- t.Fatalf("AddAddressWithOptions(%d, %d, %s, %d): %s", nicID, fakeNetNumber, address1, pi, err)
- }
- var primaryAddrs []tcpip.Address
- for _, pa := range s.NICInfo()[nicID].ProtocolAddresses {
- primaryAddrs = append(primaryAddrs, pa.AddressWithPrefix.Address)
- }
- var expectedList []tcpip.Address
- switch ps {
- case stack.FirstPrimaryEndpoint:
- expectedList = []tcpip.Address{
- "\x01",
- "\x03",
- }
- case stack.CanBePrimaryEndpoint:
- expectedList = []tcpip.Address{
- "\x03",
- "\x01",
- }
- case stack.NeverPrimaryEndpoint:
- expectedList = []tcpip.Address{
- "\x03",
- }
- }
- if !cmp.Equal(primaryAddrs, expectedList) {
- t.Fatalf("got NIC's primary addresses = %v, want = %v", primaryAddrs, expectedList)
- }
-
- // Once we remove the other address, if the new
- // peb, ps, was NeverPrimaryEndpoint, no address
- // should be returned by a call to
- // GetMainNICAddress; else, our original address
- // should be returned.
- if err := s.RemoveAddress(nicID, address3); err != nil {
- t.Fatalf("RemoveAddress(%d, %s): %s", nicID, address3, err)
- }
- addr, err = s.GetMainNICAddress(nicID, fakeNetNumber)
- if err != nil {
- t.Fatalf("GetMainNICAddress(%d, %d): %s", nicID, fakeNetNumber, err)
- }
- if ps == stack.NeverPrimaryEndpoint {
- if want := (tcpip.AddressWithPrefix{}); addr != want {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, addr, want)
- }
- } else {
- if addr.Address != address1 {
- t.Fatalf("got GetMainNICAddress(%d, %d) = %s, want = %s", nicID, fakeNetNumber, addr.Address, address1)
- }
- }
- })
- }
- }
-}
-
-func TestIPv6SourceAddressSelectionScopeAndSameAddress(t *testing.T) {
- const (
- nicID = 1
- lifetimeSeconds = 9999
- )
-
- var (
- linkLocalAddr1 = testutil.MustParse6("fe80::1")
- linkLocalAddr2 = testutil.MustParse6("fe80::2")
- linkLocalMulticastAddr = testutil.MustParse6("ff02::1")
- uniqueLocalAddr1 = testutil.MustParse6("fc00::1")
- uniqueLocalAddr2 = testutil.MustParse6("fd00::2")
- globalAddr1 = testutil.MustParse6("a000::1")
- globalAddr2 = testutil.MustParse6("a000::2")
- globalAddr3 = testutil.MustParse6("a000::3")
- ipv4MappedIPv6Addr1 = testutil.MustParse6("::ffff:0.0.0.1")
- ipv4MappedIPv6Addr2 = testutil.MustParse6("::ffff:0.0.0.2")
- toredoAddr1 = testutil.MustParse6("2001::1")
- toredoAddr2 = testutil.MustParse6("2001::2")
- ipv6ToIPv4Addr1 = testutil.MustParse6("2002::1")
- ipv6ToIPv4Addr2 = testutil.MustParse6("2002::2")
- )
-
- prefix1, _, stableGlobalAddr1 := prefixSubnetAddr(0, linkAddr1)
- prefix2, _, stableGlobalAddr2 := prefixSubnetAddr(1, linkAddr1)
-
- var tempIIDHistory [header.IIDSize]byte
- header.InitialTempIID(tempIIDHistory[:], nil, nicID)
- tempGlobalAddr1 := header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], stableGlobalAddr1.Address).Address
- tempGlobalAddr2 := header.GenerateTempIPv6SLAACAddr(tempIIDHistory[:], stableGlobalAddr2.Address).Address
-
- // Rule 3 is not tested here, and is instead tested by NDP's AutoGenAddr test.
- tests := []struct {
- name string
- slaacPrefixForTempAddrBeforeNICAddrAdd tcpip.AddressWithPrefix
- nicAddrs []tcpip.Address
- slaacPrefixForTempAddrAfterNICAddrAdd tcpip.AddressWithPrefix
- remoteAddr tcpip.Address
- expectedLocalAddr tcpip.Address
- }{
- // Test Rule 1 of RFC 6724 section 5 (prefer same address).
- {
- name: "Same Global most preferred (last address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, globalAddr1},
- remoteAddr: globalAddr1,
- expectedLocalAddr: globalAddr1,
- },
- {
- name: "Same Global most preferred (first address)",
- nicAddrs: []tcpip.Address{globalAddr1, uniqueLocalAddr1},
- remoteAddr: globalAddr1,
- expectedLocalAddr: globalAddr1,
- },
- {
- name: "Same Link Local most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, linkLocalAddr1},
- remoteAddr: linkLocalAddr1,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Same Link Local most preferred (first address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, globalAddr1},
- remoteAddr: linkLocalAddr1,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Same Unique Local most preferred (last address)",
- nicAddrs: []tcpip.Address{uniqueLocalAddr1, globalAddr1},
- remoteAddr: uniqueLocalAddr1,
- expectedLocalAddr: uniqueLocalAddr1,
- },
- {
- name: "Same Unique Local most preferred (first address)",
- nicAddrs: []tcpip.Address{globalAddr1, uniqueLocalAddr1},
- remoteAddr: uniqueLocalAddr1,
- expectedLocalAddr: uniqueLocalAddr1,
- },
-
- // Test Rule 2 of RFC 6724 section 5 (prefer appropriate scope).
- {
- name: "Global most preferred (last address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, globalAddr1},
- remoteAddr: globalAddr2,
- expectedLocalAddr: globalAddr1,
- },
- {
- name: "Global most preferred (first address)",
- nicAddrs: []tcpip.Address{globalAddr1, linkLocalAddr1},
- remoteAddr: globalAddr2,
- expectedLocalAddr: globalAddr1,
- },
- {
- name: "Link Local most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, linkLocalAddr1},
- remoteAddr: linkLocalAddr2,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Link Local most preferred (first address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, globalAddr1},
- remoteAddr: linkLocalAddr2,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Link Local most preferred for link local multicast (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, linkLocalAddr1},
- remoteAddr: linkLocalMulticastAddr,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Link Local most preferred for link local multicast (first address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, globalAddr1},
- remoteAddr: linkLocalMulticastAddr,
- expectedLocalAddr: linkLocalAddr1,
- },
-
- // Test Rule 6 of 6724 section 5 (prefer matching label).
- {
- name: "Unique Local most preferred (last address)",
- nicAddrs: []tcpip.Address{uniqueLocalAddr1, globalAddr1, ipv4MappedIPv6Addr1, toredoAddr1, ipv6ToIPv4Addr1},
- remoteAddr: uniqueLocalAddr2,
- expectedLocalAddr: uniqueLocalAddr1,
- },
- {
- name: "Unique Local most preferred (first address)",
- nicAddrs: []tcpip.Address{globalAddr1, ipv4MappedIPv6Addr1, toredoAddr1, ipv6ToIPv4Addr1, uniqueLocalAddr1},
- remoteAddr: uniqueLocalAddr2,
- expectedLocalAddr: uniqueLocalAddr1,
- },
- {
- name: "Toredo most preferred (first address)",
- nicAddrs: []tcpip.Address{toredoAddr1, uniqueLocalAddr1, globalAddr1, ipv4MappedIPv6Addr1, ipv6ToIPv4Addr1},
- remoteAddr: toredoAddr2,
- expectedLocalAddr: toredoAddr1,
- },
- {
- name: "Toredo most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, ipv4MappedIPv6Addr1, ipv6ToIPv4Addr1, uniqueLocalAddr1, toredoAddr1},
- remoteAddr: toredoAddr2,
- expectedLocalAddr: toredoAddr1,
- },
- {
- name: "6To4 most preferred (first address)",
- nicAddrs: []tcpip.Address{ipv6ToIPv4Addr1, toredoAddr1, uniqueLocalAddr1, globalAddr1, ipv4MappedIPv6Addr1},
- remoteAddr: ipv6ToIPv4Addr2,
- expectedLocalAddr: ipv6ToIPv4Addr1,
- },
- {
- name: "6To4 most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, ipv4MappedIPv6Addr1, uniqueLocalAddr1, toredoAddr1, ipv6ToIPv4Addr1},
- remoteAddr: ipv6ToIPv4Addr2,
- expectedLocalAddr: ipv6ToIPv4Addr1,
- },
- {
- name: "IPv4 mapped IPv6 most preferred (first address)",
- nicAddrs: []tcpip.Address{ipv4MappedIPv6Addr1, ipv6ToIPv4Addr1, toredoAddr1, uniqueLocalAddr1, globalAddr1},
- remoteAddr: ipv4MappedIPv6Addr2,
- expectedLocalAddr: ipv4MappedIPv6Addr1,
- },
- {
- name: "IPv4 mapped IPv6 most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, ipv6ToIPv4Addr1, uniqueLocalAddr1, toredoAddr1, ipv4MappedIPv6Addr1},
- remoteAddr: ipv4MappedIPv6Addr2,
- expectedLocalAddr: ipv4MappedIPv6Addr1,
- },
-
- // Test Rule 7 of RFC 6724 section 5 (prefer temporary addresses).
- {
- name: "Temp Global most preferred (last address)",
- slaacPrefixForTempAddrBeforeNICAddrAdd: prefix1,
- nicAddrs: []tcpip.Address{linkLocalAddr1, uniqueLocalAddr1, globalAddr1},
- remoteAddr: globalAddr2,
- expectedLocalAddr: tempGlobalAddr1,
- },
- {
- name: "Temp Global most preferred (first address)",
- nicAddrs: []tcpip.Address{linkLocalAddr1, uniqueLocalAddr1, globalAddr1},
- slaacPrefixForTempAddrAfterNICAddrAdd: prefix1,
- remoteAddr: globalAddr2,
- expectedLocalAddr: tempGlobalAddr1,
- },
-
- // Test Rule 8 of RFC 6724 section 5 (use longest matching prefix).
- {
- name: "Longest prefix matched most preferred (first address)",
- nicAddrs: []tcpip.Address{globalAddr2, globalAddr1},
- remoteAddr: globalAddr3,
- expectedLocalAddr: globalAddr2,
- },
- {
- name: "Longest prefix matched most preferred (last address)",
- nicAddrs: []tcpip.Address{globalAddr1, globalAddr2},
- remoteAddr: globalAddr3,
- expectedLocalAddr: globalAddr2,
- },
-
- // Test returning the endpoint that is closest to the front when
- // candidate addresses are "equal" from the perspective of RFC 6724
- // section 5.
- {
- name: "Unique Local for Global",
- nicAddrs: []tcpip.Address{linkLocalAddr1, uniqueLocalAddr1, uniqueLocalAddr2},
- remoteAddr: globalAddr2,
- expectedLocalAddr: uniqueLocalAddr1,
- },
- {
- name: "Link Local for Global",
- nicAddrs: []tcpip.Address{linkLocalAddr1, linkLocalAddr2},
- remoteAddr: globalAddr2,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Link Local for Unique Local",
- nicAddrs: []tcpip.Address{linkLocalAddr1, linkLocalAddr2},
- remoteAddr: uniqueLocalAddr2,
- expectedLocalAddr: linkLocalAddr1,
- },
- {
- name: "Temp Global for Global",
- slaacPrefixForTempAddrBeforeNICAddrAdd: prefix1,
- slaacPrefixForTempAddrAfterNICAddrAdd: prefix2,
- remoteAddr: globalAddr1,
- expectedLocalAddr: tempGlobalAddr2,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e := channel.New(0, 1280, linkAddr1)
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPConfigs: ipv6.NDPConfigurations{
- HandleRAs: ipv6.HandlingRAsEnabledWhenForwardingDisabled,
- AutoGenGlobalAddresses: true,
- AutoGenTempGlobalAddresses: true,
- },
- NDPDisp: &ndpDispatcher{},
- })},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- if test.slaacPrefixForTempAddrBeforeNICAddrAdd != (tcpip.AddressWithPrefix{}) {
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, test.slaacPrefixForTempAddrBeforeNICAddrAdd, true, true, lifetimeSeconds, lifetimeSeconds))
- }
-
- for _, a := range test.nicAddrs {
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, a); err != nil {
- t.Errorf("s.AddAddress(%d, %d, %s): %s", nicID, ipv6.ProtocolNumber, a, err)
- }
- }
-
- if test.slaacPrefixForTempAddrAfterNICAddrAdd != (tcpip.AddressWithPrefix{}) {
- e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr3, 0, test.slaacPrefixForTempAddrAfterNICAddrAdd, true, true, lifetimeSeconds, lifetimeSeconds))
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- netEP, err := s.GetNetworkEndpoint(nicID, header.IPv6ProtocolNumber)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, header.IPv6ProtocolNumber, err)
- }
-
- addressableEndpoint, ok := netEP.(stack.AddressableEndpoint)
- if !ok {
- t.Fatal("network endpoint is not addressable")
- }
-
- addressEP := addressableEndpoint.AcquireOutgoingPrimaryAddress(test.remoteAddr, false /* allowExpired */)
- if addressEP == nil {
- t.Fatal("expected a non-nil address endpoint")
- }
- defer addressEP.DecRef()
-
- if got := addressEP.AddressWithPrefix().Address; got != test.expectedLocalAddr {
- t.Errorf("got local address = %s, want = %s", got, test.expectedLocalAddr)
- }
- })
- }
-}
-
-func TestAddRemoveIPv4BroadcastAddressOnNICEnableDisable(t *testing.T) {
- const nicID = 1
- broadcastAddr := tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: header.IPv4Broadcast,
- PrefixLen: 32,
- },
- }
-
- e := loopback.New()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- nicOpts := stack.NICOptions{Disabled: true}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNIC(%d, _, %+v) = %s", nicID, nicOpts, err)
- }
-
- {
- allStackAddrs := s.AllAddresses()
- if allNICAddrs, ok := allStackAddrs[nicID]; !ok {
- t.Fatalf("entry for %d missing from allStackAddrs = %+v", nicID, allStackAddrs)
- } else if containsAddr(allNICAddrs, broadcastAddr) {
- t.Fatalf("got allNICAddrs = %+v, don't want = %+v", allNICAddrs, broadcastAddr)
- }
- }
-
- // Enabling the NIC should add the IPv4 broadcast address.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
-
- {
- allStackAddrs := s.AllAddresses()
- if allNICAddrs, ok := allStackAddrs[nicID]; !ok {
- t.Fatalf("entry for %d missing from allStackAddrs = %+v", nicID, allStackAddrs)
- } else if !containsAddr(allNICAddrs, broadcastAddr) {
- t.Fatalf("got allNICAddrs = %+v, want = %+v", allNICAddrs, broadcastAddr)
- }
- }
-
- // Disabling the NIC should remove the IPv4 broadcast address.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
-
- {
- allStackAddrs := s.AllAddresses()
- if allNICAddrs, ok := allStackAddrs[nicID]; !ok {
- t.Fatalf("entry for %d missing from allStackAddrs = %+v", nicID, allStackAddrs)
- } else if containsAddr(allNICAddrs, broadcastAddr) {
- t.Fatalf("got allNICAddrs = %+v, don't want = %+v", allNICAddrs, broadcastAddr)
- }
- }
-}
-
-// TestLeaveIPv6SolicitedNodeAddrBeforeAddrRemoval tests that removing an IPv6
-// address after leaving its solicited node multicast address does not result in
-// an error.
-func TestLeaveIPv6SolicitedNodeAddrBeforeAddrRemoval(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- })
- e := channel.New(10, 1280, linkAddr1)
- if err := s.CreateNIC(1, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- if err := s.AddAddress(nicID, ipv6.ProtocolNumber, addr1); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID, ipv6.ProtocolNumber, addr1, err)
- }
-
- // The NIC should have joined addr1's solicited node multicast address.
- snmc := header.SolicitedNodeAddr(addr1)
- in, err := s.IsInGroup(nicID, snmc)
- if err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, snmc, err)
- }
- if !in {
- t.Fatalf("got IsInGroup(%d, %s) = false, want = true", nicID, snmc)
- }
-
- if err := s.LeaveGroup(ipv6.ProtocolNumber, nicID, snmc); err != nil {
- t.Fatalf("LeaveGroup(%d, %d, %s): %s", ipv6.ProtocolNumber, nicID, snmc, err)
- }
- in, err = s.IsInGroup(nicID, snmc)
- if err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, snmc, err)
- }
- if in {
- t.Fatalf("got IsInGroup(%d, %s) = true, want = false", nicID, snmc)
- }
-
- if err := s.RemoveAddress(nicID, addr1); err != nil {
- t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, addr1, err)
- }
-}
-
-func TestJoinLeaveMulticastOnNICEnableDisable(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- addr tcpip.Address
- }{
- {
- name: "IPv6 All-Nodes",
- proto: header.IPv6ProtocolNumber,
- addr: header.IPv6AllNodesMulticastAddress,
- },
- {
- name: "IPv4 All-Systems",
- proto: header.IPv4ProtocolNumber,
- addr: header.IPv4AllSystems,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- e := loopback.New()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- })
- nicOpts := stack.NICOptions{Disabled: true}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNIC(%d, _, %+v) = %s", nicID, nicOpts, err)
- }
-
- // Should not be in the multicast group yet because the NIC has not been
- // enabled yet.
- if isInGroup, err := s.IsInGroup(nicID, test.addr); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.addr, err)
- } else if isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = true, want = false", nicID, test.addr)
- }
-
- // The all-nodes multicast group should be joined when the NIC is enabled.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
-
- if isInGroup, err := s.IsInGroup(nicID, test.addr); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.addr, err)
- } else if !isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = false, want = true", nicID, test.addr)
- }
-
- // The multicast group should be left when the NIC is disabled.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
-
- if isInGroup, err := s.IsInGroup(nicID, test.addr); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.addr, err)
- } else if isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = true, want = false", nicID, test.addr)
- }
-
- // The all-nodes multicast group should be joined when the NIC is enabled.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
-
- if isInGroup, err := s.IsInGroup(nicID, test.addr); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.addr, err)
- } else if !isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = false, want = true", nicID, test.addr)
- }
-
- // Leaving the group before disabling the NIC should not cause an error.
- if err := s.LeaveGroup(test.proto, nicID, test.addr); err != nil {
- t.Fatalf("s.LeaveGroup(%d, %d, %s): %s", test.proto, nicID, test.addr, err)
- }
-
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
-
- if isInGroup, err := s.IsInGroup(nicID, test.addr); err != nil {
- t.Fatalf("IsInGroup(%d, %s): %s", nicID, test.addr, err)
- } else if isInGroup {
- t.Fatalf("got IsInGroup(%d, %s) = true, want = false", nicID, test.addr)
- }
- })
- }
-}
-
-// TestDoDADWhenNICEnabled tests that IPv6 endpoints that were added while a NIC
-// was disabled have DAD performed on them when the NIC is enabled.
-func TestDoDADWhenNICEnabled(t *testing.T) {
- const dadTransmits = 1
- const retransmitTimer = time.Second
- const nicID = 1
-
- ndpDisp := ndpDispatcher{
- dadC: make(chan ndpDADEvent, 1),
- }
- clock := faketime.NewManualClock()
- opts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{
- DADConfigs: stack.DADConfigurations{
- DupAddrDetectTransmits: dadTransmits,
- RetransmitTimer: retransmitTimer,
- },
- NDPDisp: &ndpDisp,
- })},
- Clock: clock,
- }
-
- e := channel.New(dadTransmits, 1280, linkAddr1)
- s := stack.New(opts)
- nicOpts := stack.NICOptions{Disabled: true}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNIC(%d, _, %+v) = %s", nicID, nicOpts, err)
- }
-
- addr := tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: llAddr1,
- PrefixLen: 128,
- },
- }
- if err := s.AddProtocolAddress(nicID, addr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %+v): %s", nicID, addr, err)
- }
-
- // Address should be in the list of all addresses.
- if addrs := s.AllAddresses()[nicID]; !containsV6Addr(addrs, addr.AddressWithPrefix) {
- t.Fatalf("got s.AllAddresses()[%d] = %+v, want = %+v", nicID, addrs, addr)
- }
-
- // Address should be tentative so it should not be a main address.
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Enabling the NIC should start DAD for the address.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- if addrs := s.AllAddresses()[nicID]; !containsV6Addr(addrs, addr.AddressWithPrefix) {
- t.Fatalf("got s.AllAddresses()[%d] = %+v, want = %+v", nicID, addrs, addr)
- }
-
- // Address should not be considered bound to the NIC yet (DAD ongoing).
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
- t.Fatal(err)
- }
-
- // Wait for DAD to resolve.
- clock.Advance(dadTransmits * retransmitTimer)
- select {
- case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.AddressWithPrefix.Address, &stack.DADSucceeded{}); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("timed out waiting for DAD resolution")
- }
- if addrs := s.AllAddresses()[nicID]; !containsV6Addr(addrs, addr.AddressWithPrefix) {
- t.Fatalf("got s.AllAddresses()[%d] = %+v, want = %+v", nicID, addrs, addr)
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addr.AddressWithPrefix); err != nil {
- t.Fatal(err)
- }
-
- // Enabling the NIC again should be a no-op.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- if addrs := s.AllAddresses()[nicID]; !containsV6Addr(addrs, addr.AddressWithPrefix) {
- t.Fatalf("got s.AllAddresses()[%d] = %+v, want = %+v", nicID, addrs, addr)
- }
- if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addr.AddressWithPrefix); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStackReceiveBufferSizeOption(t *testing.T) {
- const sMin = stack.MinBufferSize
- testCases := []struct {
- name string
- rs tcpip.ReceiveBufferSizeOption
- err tcpip.Error
- }{
- // Invalid configurations.
- {"min_below_zero", tcpip.ReceiveBufferSizeOption{Min: -1, Default: sMin, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"min_zero", tcpip.ReceiveBufferSizeOption{Min: 0, Default: sMin, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"default_below_min", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin - 1, Max: sMin - 1}, &tcpip.ErrInvalidOptionValue{}},
- {"default_above_max", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"max_below_min", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin - 1}, &tcpip.ErrInvalidOptionValue{}},
-
- // Valid Configurations
- {"in_ascending_order", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin + 2}, nil},
- {"all_equal", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin, Max: sMin}, nil},
- {"min_default_equal", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin, Max: sMin + 1}, nil},
- {"default_max_equal", tcpip.ReceiveBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin + 1}, nil},
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- s := stack.New(stack.Options{})
- defer s.Close()
- if err := s.SetOption(tc.rs); err != tc.err {
- t.Fatalf("s.SetOption(%#v) = %v, want: %v", tc.rs, err, tc.err)
- }
- var rs tcpip.ReceiveBufferSizeOption
- if tc.err == nil {
- if err := s.Option(&rs); err != nil {
- t.Fatalf("s.Option(%#v) = %v, want: nil", rs, err)
- }
- if got, want := rs, tc.rs; got != want {
- t.Fatalf("s.Option(..) returned unexpected value got: %#v, want: %#v", got, want)
- }
- }
- })
- }
-}
-
-func TestStackSendBufferSizeOption(t *testing.T) {
- const sMin = stack.MinBufferSize
- testCases := []struct {
- name string
- ss tcpip.SendBufferSizeOption
- err tcpip.Error
- }{
- // Invalid configurations.
- {"min_below_zero", tcpip.SendBufferSizeOption{Min: -1, Default: sMin, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"min_zero", tcpip.SendBufferSizeOption{Min: 0, Default: sMin, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"default_below_min", tcpip.SendBufferSizeOption{Min: 0, Default: sMin - 1, Max: sMin - 1}, &tcpip.ErrInvalidOptionValue{}},
- {"default_above_max", tcpip.SendBufferSizeOption{Min: 0, Default: sMin + 1, Max: sMin}, &tcpip.ErrInvalidOptionValue{}},
- {"max_below_min", tcpip.SendBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin - 1}, &tcpip.ErrInvalidOptionValue{}},
-
- // Valid Configurations
- {"in_ascending_order", tcpip.SendBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin + 2}, nil},
- {"all_equal", tcpip.SendBufferSizeOption{Min: sMin, Default: sMin, Max: sMin}, nil},
- {"min_default_equal", tcpip.SendBufferSizeOption{Min: sMin, Default: sMin, Max: sMin + 1}, nil},
- {"default_max_equal", tcpip.SendBufferSizeOption{Min: sMin, Default: sMin + 1, Max: sMin + 1}, nil},
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- s := stack.New(stack.Options{})
- defer s.Close()
- err := s.SetOption(tc.ss)
- if diff := cmp.Diff(tc.err, err); diff != "" {
- t.Fatalf("unexpected error from s.SetOption(%+v), (-want, +got):\n%s", tc.ss, diff)
- }
- if tc.err == nil {
- var ss tcpip.SendBufferSizeOption
- if err := s.Option(&ss); err != nil {
- t.Fatalf("s.Option(%+v) = %v, want: nil", ss, err)
- }
- if got, want := ss, tc.ss; got != want {
- t.Fatalf("s.Option(..) returned unexpected value got: %#v, want: %#v", got, want)
- }
- }
- })
- }
-}
-
-func TestOutgoingSubnetBroadcast(t *testing.T) {
- const (
- unspecifiedNICID = 0
- nicID1 = 1
- )
-
- defaultAddr := tcpip.AddressWithPrefix{
- Address: header.IPv4Any,
- PrefixLen: 0,
- }
- defaultSubnet := defaultAddr.Subnet()
- ipv4Addr := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 24,
- }
- ipv4Subnet := ipv4Addr.Subnet()
- ipv4SubnetBcast := ipv4Subnet.Broadcast()
- ipv4Gateway := testutil.MustParse4("192.168.1.1")
- ipv4AddrPrefix31 := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 31,
- }
- ipv4Subnet31 := ipv4AddrPrefix31.Subnet()
- ipv4Subnet31Bcast := ipv4Subnet31.Broadcast()
- ipv4AddrPrefix32 := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 32,
- }
- ipv4Subnet32 := ipv4AddrPrefix32.Subnet()
- ipv4Subnet32Bcast := ipv4Subnet32.Broadcast()
- ipv6Addr := tcpip.AddressWithPrefix{
- Address: "\x20\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- PrefixLen: 64,
- }
- ipv6Subnet := ipv6Addr.Subnet()
- ipv6SubnetBcast := ipv6Subnet.Broadcast()
- remNetAddr := tcpip.AddressWithPrefix{
- Address: "\x64\x0a\x7b\x18",
- PrefixLen: 24,
- }
- remNetSubnet := remNetAddr.Subnet()
- remNetSubnetBcast := remNetSubnet.Broadcast()
-
- tests := []struct {
- name string
- nicAddr tcpip.ProtocolAddress
- routes []tcpip.Route
- remoteAddr tcpip.Address
- expectedLocalAddress tcpip.Address
- expectedRemoteAddress tcpip.Address
- expectedRemoteLinkAddress tcpip.LinkAddress
- expectedNextHop tcpip.Address
- expectedNetProto tcpip.NetworkProtocolNumber
- expectedLoop stack.PacketLooping
- }{
- // Broadcast to a locally attached subnet populates the broadcast MAC.
- {
- name: "IPv4 Broadcast to local subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4SubnetBcast,
- expectedLocalAddress: ipv4Addr.Address,
- expectedRemoteAddress: ipv4SubnetBcast,
- expectedRemoteLinkAddress: header.EthernetBroadcastAddress,
- expectedNetProto: header.IPv4ProtocolNumber,
- expectedLoop: stack.PacketOut | stack.PacketLoop,
- },
- // Broadcast to a locally attached /31 subnet does not populate the
- // broadcast MAC.
- {
- name: "IPv4 Broadcast to local /31 subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4AddrPrefix31,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet31,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4Subnet31Bcast,
- expectedLocalAddress: ipv4AddrPrefix31.Address,
- expectedRemoteAddress: ipv4Subnet31Bcast,
- expectedNetProto: header.IPv4ProtocolNumber,
- expectedLoop: stack.PacketOut,
- },
- // Broadcast to a locally attached /32 subnet does not populate the
- // broadcast MAC.
- {
- name: "IPv4 Broadcast to local /32 subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4AddrPrefix32,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet32,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4Subnet32Bcast,
- expectedLocalAddress: ipv4AddrPrefix32.Address,
- expectedRemoteAddress: ipv4Subnet32Bcast,
- expectedNetProto: header.IPv4ProtocolNumber,
- expectedLoop: stack.PacketOut,
- },
- // IPv6 has no notion of a broadcast.
- {
- name: "IPv6 'Broadcast' to local subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: ipv6Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv6Subnet,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv6SubnetBcast,
- expectedLocalAddress: ipv6Addr.Address,
- expectedRemoteAddress: ipv6SubnetBcast,
- expectedNetProto: header.IPv6ProtocolNumber,
- expectedLoop: stack.PacketOut,
- },
- // Broadcast to a remote subnet in the route table is send to the next-hop
- // gateway.
- {
- name: "IPv4 Broadcast to remote subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: remNetSubnet,
- Gateway: ipv4Gateway,
- NIC: nicID1,
- },
- },
- remoteAddr: remNetSubnetBcast,
- expectedLocalAddress: ipv4Addr.Address,
- expectedRemoteAddress: remNetSubnetBcast,
- expectedNextHop: ipv4Gateway,
- expectedNetProto: header.IPv4ProtocolNumber,
- expectedLoop: stack.PacketOut,
- },
- // Broadcast to an unknown subnet follows the default route. Note that this
- // is essentially just routing an unknown destination IP, because w/o any
- // subnet prefix information a subnet broadcast address is just a normal IP.
- {
- name: "IPv4 Broadcast to unknown subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: defaultSubnet,
- Gateway: ipv4Gateway,
- NIC: nicID1,
- },
- },
- remoteAddr: remNetSubnetBcast,
- expectedLocalAddress: ipv4Addr.Address,
- expectedRemoteAddress: remNetSubnetBcast,
- expectedNextHop: ipv4Gateway,
- expectedNetProto: header.IPv4ProtocolNumber,
- expectedLoop: stack.PacketOut,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- })
- ep := channel.New(0, defaultMTU, "")
- ep.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID1, ep); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
- }
- if err := s.AddProtocolAddress(nicID1, test.nicAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %+v): %s", nicID1, test.nicAddr, err)
- }
-
- s.SetRouteTable(test.routes)
-
- var netProto tcpip.NetworkProtocolNumber
- switch l := len(test.remoteAddr); l {
- case header.IPv4AddressSize:
- netProto = header.IPv4ProtocolNumber
- case header.IPv6AddressSize:
- netProto = header.IPv6ProtocolNumber
- default:
- t.Fatalf("got unexpected address length = %d bytes", l)
- }
-
- r, err := s.FindRoute(unspecifiedNICID, "" /* localAddr */, test.remoteAddr, netProto, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(%d, '', %s, %d): %s", unspecifiedNICID, test.remoteAddr, netProto, err)
- }
- if r.LocalAddress() != test.expectedLocalAddress {
- t.Errorf("got r.LocalAddress() = %s, want = %s", r.LocalAddress(), test.expectedLocalAddress)
- }
- if r.RemoteAddress() != test.expectedRemoteAddress {
- t.Errorf("got r.RemoteAddress = %s, want = %s", r.RemoteAddress(), test.expectedRemoteAddress)
- }
- if got := r.RemoteLinkAddress(); got != test.expectedRemoteLinkAddress {
- t.Errorf("got r.RemoteLinkAddress() = %s, want = %s", got, test.expectedRemoteLinkAddress)
- }
- if r.NextHop() != test.expectedNextHop {
- t.Errorf("got r.NextHop() = %s, want = %s", r.NextHop(), test.expectedNextHop)
- }
- if r.NetProto() != test.expectedNetProto {
- t.Errorf("got r.NetProto() = %d, want = %d", r.NetProto(), test.expectedNetProto)
- }
- if r.Loop() != test.expectedLoop {
- t.Errorf("got r.Loop() = %x, want = %x", r.Loop(), test.expectedLoop)
- }
- })
- }
-}
-
-func TestResolveWith(t *testing.T) {
- const (
- unspecifiedNICID = 0
- nicID = 1
- )
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol},
- })
- ep := channel.New(0, defaultMTU, "")
- ep.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- addr := tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address([]byte{192, 168, 1, 58}),
- PrefixLen: 24,
- },
- }
- if err := s.AddProtocolAddress(nicID, addr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, addr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{{Destination: header.IPv4EmptySubnet, NIC: nicID}})
-
- remoteAddr := tcpip.Address([]byte{192, 168, 1, 59})
- r, err := s.FindRoute(unspecifiedNICID, "" /* localAddr */, remoteAddr, header.IPv4ProtocolNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("FindRoute(%d, '', %s, %d): %s", unspecifiedNICID, remoteAddr, header.IPv4ProtocolNumber, err)
- }
- defer r.Release()
-
- // Should initially require resolution.
- if !r.IsResolutionRequired() {
- t.Fatal("got r.IsResolutionRequired() = false, want = true")
- }
-
- // Manually resolving the route should no longer require resolution.
- r.ResolveWith("\x01")
- if r.IsResolutionRequired() {
- t.Fatal("got r.IsResolutionRequired() = true, want = false")
- }
-}
-
-// TestRouteReleaseAfterAddrRemoval tests that releasing a Route after its
-// associated address is removed should not cause a panic.
-func TestRouteReleaseAfterAddrRemoval(t *testing.T) {
- const (
- nicID = 1
- localAddr = tcpip.Address("\x01")
- remoteAddr = tcpip.Address("\x02")
- )
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- ep := channel.New(0, defaultMTU, "")
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddAddress(nicID, fakeNetNumber, localAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, fakeNetNumber, localAddr, err)
- }
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- r, err := s.FindRoute(nicID, localAddr, remoteAddr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("s.FindRoute(%d, %s, %s, %d, false): %s", nicID, localAddr, remoteAddr, fakeNetNumber, err)
- }
- // Should not panic.
- defer r.Release()
-
- // Check that removing the same address fails.
- if err := s.RemoveAddress(nicID, localAddr); err != nil {
- t.Fatalf("s.RemoveAddress(%d, %s): %s", nicID, localAddr, err)
- }
-}
-
-func TestGetNetworkEndpoint(t *testing.T) {
- const nicID = 1
-
- tests := []struct {
- name string
- protoFactory stack.NetworkProtocolFactory
- protoNum tcpip.NetworkProtocolNumber
- }{
- {
- name: "IPv4",
- protoFactory: ipv4.NewProtocol,
- protoNum: ipv4.ProtocolNumber,
- },
- {
- name: "IPv6",
- protoFactory: ipv6.NewProtocol,
- protoNum: ipv6.ProtocolNumber,
- },
- }
-
- factories := make([]stack.NetworkProtocolFactory, 0, len(tests))
- for _, test := range tests {
- factories = append(factories, test.protoFactory)
- }
-
- s := stack.New(stack.Options{
- NetworkProtocols: factories,
- })
-
- if err := s.CreateNIC(nicID, channel.New(0, defaultMTU, "")); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ep, err := s.GetNetworkEndpoint(nicID, test.protoNum)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID, test.protoNum, err)
- }
-
- if got := ep.NetworkProtocolNumber(); got != test.protoNum {
- t.Fatalf("got ep.NetworkProtocolNumber() = %d, want = %d", got, test.protoNum)
- }
- })
- }
-}
-
-func TestGetMainNICAddressWhenNICDisabled(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- })
-
- if err := s.CreateNIC(nicID, channel.New(0, defaultMTU, "")); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- protocolAddress := tcpip.ProtocolAddress{
- Protocol: fakeNetNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: "\x01",
- PrefixLen: 8,
- },
- }
- if err := s.AddProtocolAddress(nicID, protocolAddress); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, protocolAddress, err)
- }
-
- // Check that we get the right initial address and prefix length.
- if err := checkGetMainNICAddress(s, nicID, fakeNetNumber, protocolAddress.AddressWithPrefix); err != nil {
- t.Fatal(err)
- }
-
- // Should still get the address when the NIC is diabled.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("DisableNIC(%d): %s", nicID, err)
- }
- if err := checkGetMainNICAddress(s, nicID, fakeNetNumber, protocolAddress.AddressWithPrefix); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestAddRoute tests Stack.AddRoute
-func TestAddRoute(t *testing.T) {
- s := stack.New(stack.Options{})
-
- subnet1, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
-
- subnet2, err := tcpip.NewSubnet("\x01", "\x01")
- if err != nil {
- t.Fatal(err)
- }
-
- expected := []tcpip.Route{
- {Destination: subnet1, Gateway: "\x00", NIC: 1},
- {Destination: subnet2, Gateway: "\x00", NIC: 1},
- }
-
- // Initialize the route table with one route.
- s.SetRouteTable([]tcpip.Route{expected[0]})
-
- // Add another route.
- s.AddRoute(expected[1])
-
- rt := s.GetRouteTable()
- if got, want := len(rt), len(expected); got != want {
- t.Fatalf("Unexpected route table length got = %d, want = %d", got, want)
- }
- for i, route := range rt {
- if got, want := route, expected[i]; got != want {
- t.Fatalf("Unexpected route got = %#v, want = %#v", got, want)
- }
- }
-}
-
-// TestRemoveRoutes tests Stack.RemoveRoutes
-func TestRemoveRoutes(t *testing.T) {
- s := stack.New(stack.Options{})
-
- addressToRemove := tcpip.Address("\x01")
- subnet1, err := tcpip.NewSubnet(addressToRemove, "\x01")
- if err != nil {
- t.Fatal(err)
- }
-
- subnet2, err := tcpip.NewSubnet(addressToRemove, "\x01")
- if err != nil {
- t.Fatal(err)
- }
-
- subnet3, err := tcpip.NewSubnet("\x02", "\x02")
- if err != nil {
- t.Fatal(err)
- }
-
- // Initialize the route table with three routes.
- s.SetRouteTable([]tcpip.Route{
- {Destination: subnet1, Gateway: "\x00", NIC: 1},
- {Destination: subnet2, Gateway: "\x00", NIC: 1},
- {Destination: subnet3, Gateway: "\x00", NIC: 1},
- })
-
- // Remove routes with the specific address.
- s.RemoveRoutes(func(r tcpip.Route) bool {
- return r.Destination.ID() == addressToRemove
- })
-
- expected := []tcpip.Route{{Destination: subnet3, Gateway: "\x00", NIC: 1}}
- rt := s.GetRouteTable()
- if got, want := len(rt), len(expected); got != want {
- t.Fatalf("Unexpected route table length got = %d, want = %d", got, want)
- }
- for i, route := range rt {
- if got, want := route, expected[i]; got != want {
- t.Fatalf("Unexpected route got = %#v, want = %#v", got, want)
- }
- }
-}
-
-func TestFindRouteWithForwarding(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
-
- nic1Addr = tcpip.Address("\x01")
- nic2Addr = tcpip.Address("\x02")
- remoteAddr = tcpip.Address("\x03")
- )
-
- type netCfg struct {
- proto tcpip.NetworkProtocolNumber
- factory stack.NetworkProtocolFactory
- nic1Addr tcpip.Address
- nic2Addr tcpip.Address
- remoteAddr tcpip.Address
- }
-
- fakeNetCfg := netCfg{
- proto: fakeNetNumber,
- factory: fakeNetFactory,
- nic1Addr: nic1Addr,
- nic2Addr: nic2Addr,
- remoteAddr: remoteAddr,
- }
-
- globalIPv6Addr1 := tcpip.Address(net.ParseIP("a::1").To16())
- globalIPv6Addr2 := tcpip.Address(net.ParseIP("a::2").To16())
-
- ipv6LinkLocalNIC1WithGlobalRemote := netCfg{
- proto: ipv6.ProtocolNumber,
- factory: ipv6.NewProtocol,
- nic1Addr: llAddr1,
- nic2Addr: globalIPv6Addr2,
- remoteAddr: globalIPv6Addr1,
- }
- ipv6GlobalNIC1WithLinkLocalRemote := netCfg{
- proto: ipv6.ProtocolNumber,
- factory: ipv6.NewProtocol,
- nic1Addr: globalIPv6Addr1,
- nic2Addr: llAddr1,
- remoteAddr: llAddr2,
- }
- ipv6GlobalNIC1WithLinkLocalMulticastRemote := netCfg{
- proto: ipv6.ProtocolNumber,
- factory: ipv6.NewProtocol,
- nic1Addr: globalIPv6Addr1,
- nic2Addr: globalIPv6Addr2,
- remoteAddr: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- }
-
- tests := []struct {
- name string
-
- netCfg netCfg
- forwardingEnabled bool
-
- addrNIC tcpip.NICID
- localAddr tcpip.Address
-
- findRouteErr tcpip.Error
- dependentOnForwarding bool
- }{
- {
- name: "forwarding disabled and localAddr not on specified NIC but route from different NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- addrNIC: nicID1,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr not on specified NIC but route from different NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: true,
- addrNIC: nicID1,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and localAddr on specified NIC but route from different NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- addrNIC: nicID1,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr on specified NIC but route from different NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: true,
- addrNIC: nicID1,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: nil,
- dependentOnForwarding: true,
- },
- {
- name: "forwarding disabled and localAddr on specified NIC and route from same NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- addrNIC: nicID2,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr on specified NIC and route from same NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: true,
- addrNIC: nicID2,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and localAddr not on specified NIC but route from same NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- addrNIC: nicID2,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr not on specified NIC but route from same NIC",
- netCfg: fakeNetCfg,
- forwardingEnabled: true,
- addrNIC: nicID2,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and localAddr on same NIC as route",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr on same NIC as route",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- localAddr: fakeNetCfg.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and localAddr on different NIC as route",
- netCfg: fakeNetCfg,
- forwardingEnabled: false,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and localAddr on different NIC as route",
- netCfg: fakeNetCfg,
- forwardingEnabled: true,
- localAddr: fakeNetCfg.nic1Addr,
- findRouteErr: nil,
- dependentOnForwarding: true,
- },
- {
- name: "forwarding disabled and specified NIC only has link-local addr with route on different NIC",
- netCfg: ipv6LinkLocalNIC1WithGlobalRemote,
- forwardingEnabled: false,
- addrNIC: nicID1,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and specified NIC only has link-local addr with route on different NIC",
- netCfg: ipv6LinkLocalNIC1WithGlobalRemote,
- forwardingEnabled: true,
- addrNIC: nicID1,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and link-local local addr with route on different NIC",
- netCfg: ipv6LinkLocalNIC1WithGlobalRemote,
- forwardingEnabled: false,
- localAddr: ipv6LinkLocalNIC1WithGlobalRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and link-local local addr with route on same NIC",
- netCfg: ipv6LinkLocalNIC1WithGlobalRemote,
- forwardingEnabled: true,
- localAddr: ipv6LinkLocalNIC1WithGlobalRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNoRoute{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and global local addr with route on same NIC",
- netCfg: ipv6LinkLocalNIC1WithGlobalRemote,
- forwardingEnabled: true,
- localAddr: ipv6LinkLocalNIC1WithGlobalRemote.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and link-local local addr with route on same NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalRemote,
- forwardingEnabled: false,
- localAddr: ipv6GlobalNIC1WithLinkLocalRemote.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and link-local local addr with route on same NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalRemote,
- forwardingEnabled: true,
- localAddr: ipv6GlobalNIC1WithLinkLocalRemote.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and global local addr with link-local remote on different NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalRemote,
- forwardingEnabled: false,
- localAddr: ipv6GlobalNIC1WithLinkLocalRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNetworkUnreachable{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and global local addr with link-local remote on different NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalRemote,
- forwardingEnabled: true,
- localAddr: ipv6GlobalNIC1WithLinkLocalRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNetworkUnreachable{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and global local addr with link-local multicast remote on different NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalMulticastRemote,
- forwardingEnabled: false,
- localAddr: ipv6GlobalNIC1WithLinkLocalMulticastRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNetworkUnreachable{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and global local addr with link-local multicast remote on different NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalMulticastRemote,
- forwardingEnabled: true,
- localAddr: ipv6GlobalNIC1WithLinkLocalMulticastRemote.nic1Addr,
- findRouteErr: &tcpip.ErrNetworkUnreachable{},
- dependentOnForwarding: false,
- },
- {
- name: "forwarding disabled and global local addr with link-local multicast remote on same NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalMulticastRemote,
- forwardingEnabled: false,
- localAddr: ipv6GlobalNIC1WithLinkLocalMulticastRemote.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- {
- name: "forwarding enabled and global local addr with link-local multicast remote on same NIC",
- netCfg: ipv6GlobalNIC1WithLinkLocalMulticastRemote,
- forwardingEnabled: true,
- localAddr: ipv6GlobalNIC1WithLinkLocalMulticastRemote.nic2Addr,
- findRouteErr: nil,
- dependentOnForwarding: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{test.netCfg.factory},
- })
-
- ep1 := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID1, ep1); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s:", nicID1, err)
- }
-
- ep2 := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID2, ep2); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s:", nicID2, err)
- }
-
- if err := s.AddAddress(nicID1, test.netCfg.proto, test.netCfg.nic1Addr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID1, test.netCfg.proto, test.netCfg.nic1Addr, err)
- }
-
- if err := s.AddAddress(nicID2, test.netCfg.proto, test.netCfg.nic2Addr); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID2, test.netCfg.proto, test.netCfg.nic2Addr, err)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(test.netCfg.proto, test.forwardingEnabled); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, %t): %s", test.netCfg.proto, test.forwardingEnabled, err)
- }
-
- s.SetRouteTable([]tcpip.Route{{Destination: test.netCfg.remoteAddr.WithPrefix().Subnet(), NIC: nicID2}})
-
- r, err := s.FindRoute(test.addrNIC, test.localAddr, test.netCfg.remoteAddr, test.netCfg.proto, false /* multicastLoop */)
- if err == nil {
- defer r.Release()
- }
- if diff := cmp.Diff(test.findRouteErr, err); diff != "" {
- t.Fatalf("unexpected error from FindRoute(%d, %s, %s, %d, false), (-want, +got):\n%s", test.addrNIC, test.localAddr, test.netCfg.remoteAddr, test.netCfg.proto, diff)
- }
-
- if test.findRouteErr != nil {
- return
- }
-
- if r.LocalAddress() != test.localAddr {
- t.Errorf("got r.LocalAddress() = %s, want = %s", r.LocalAddress(), test.localAddr)
- }
- if r.RemoteAddress() != test.netCfg.remoteAddr {
- t.Errorf("got r.RemoteAddress() = %s, want = %s", r.RemoteAddress(), test.netCfg.remoteAddr)
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- // Sending a packet should always go through NIC2 since we only install a
- // route to test.netCfg.remoteAddr through NIC2.
- data := buffer.View([]byte{1, 2, 3, 4})
- if err := send(r, data); err != nil {
- t.Fatalf("send(_, _): %s", err)
- }
- if n := ep1.Drain(); n != 0 {
- t.Errorf("got %d unexpected packets from ep1", n)
- }
- pkt, ok := ep2.Read()
- if !ok {
- t.Fatal("packet not sent through ep2")
- }
- if pkt.Route.LocalAddress != test.localAddr {
- t.Errorf("got pkt.Route.LocalAddress = %s, want = %s", pkt.Route.LocalAddress, test.localAddr)
- }
- if pkt.Route.RemoteAddress != test.netCfg.remoteAddr {
- t.Errorf("got pkt.Route.RemoteAddress = %s, want = %s", pkt.Route.RemoteAddress, test.netCfg.remoteAddr)
- }
-
- if !test.forwardingEnabled || !test.dependentOnForwarding {
- return
- }
-
- // Disabling forwarding when the route is dependent on forwarding being
- // enabled should make the route invalid.
- if err := s.SetForwardingDefaultAndAllNICs(test.netCfg.proto, false); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, false): %s", test.netCfg.proto, err)
- }
- {
- err := send(r, data)
- if _, ok := err.(*tcpip.ErrInvalidEndpointState); !ok {
- t.Fatalf("got send(_, _) = %s, want = %s", err, &tcpip.ErrInvalidEndpointState{})
- }
- }
- if n := ep1.Drain(); n != 0 {
- t.Errorf("got %d unexpected packets from ep1", n)
- }
- if n := ep2.Drain(); n != 0 {
- t.Errorf("got %d unexpected packets from ep2", n)
- }
- })
- }
-}
-
-func TestWritePacketToRemote(t *testing.T) {
- const nicID = 1
- const MTU = 1280
- e := channel.New(1, MTU, linkAddr1)
- s := stack.New(stack.Options{})
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("CreateNIC(%d) = %s", nicID, err)
- }
- tests := []struct {
- name string
- protocol tcpip.NetworkProtocolNumber
- payload []byte
- }{
- {
- name: "SuccessIPv4",
- protocol: header.IPv4ProtocolNumber,
- payload: []byte{1, 2, 3, 4},
- },
- {
- name: "SuccessIPv6",
- protocol: header.IPv6ProtocolNumber,
- payload: []byte{5, 6, 7, 8},
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if err := s.WritePacketToRemote(nicID, linkAddr2, test.protocol, buffer.View(test.payload).ToVectorisedView()); err != nil {
- t.Fatalf("s.WritePacketToRemote(_, _, _, _) = %s", err)
- }
-
- pkt, ok := e.Read()
- if got, want := ok, true; got != want {
- t.Fatalf("e.Read() = %t, want %t", got, want)
- }
- if got, want := pkt.Proto, test.protocol; got != want {
- t.Fatalf("pkt.Proto = %d, want %d", got, want)
- }
- if pkt.Route.RemoteLinkAddress != linkAddr2 {
- t.Fatalf("pkt.Route.RemoteAddress = %s, want %s", pkt.Route.RemoteLinkAddress, linkAddr2)
- }
- if diff := cmp.Diff(pkt.Pkt.Data().AsRange().ToOwnedView(), buffer.View(test.payload)); diff != "" {
- t.Errorf("pkt.Pkt.Data mismatch (-want +got):\n%s", diff)
- }
- })
- }
-
- t.Run("InvalidNICID", func(t *testing.T) {
- err := s.WritePacketToRemote(234, linkAddr2, header.IPv4ProtocolNumber, buffer.View([]byte{1}).ToVectorisedView())
- if _, ok := err.(*tcpip.ErrUnknownDevice); !ok {
- t.Fatalf("s.WritePacketToRemote(_, _, _, _) = %s, want = %s", err, &tcpip.ErrUnknownDevice{})
- }
- pkt, ok := e.Read()
- if got, want := ok, false; got != want {
- t.Fatalf("e.Read() = %t, %v; want %t", got, pkt, want)
- }
- })
-}
-
-func TestClearNeighborCacheOnNICDisable(t *testing.T) {
- const (
- nicID = 1
- linkAddr = tcpip.LinkAddress("\x02\x02\x03\x04\x05\x06")
- )
-
- var (
- ipv4Addr = testutil.MustParse4("1.2.3.4")
- ipv6Addr = testutil.MustParse6("102:304:102:304:102:304:102:304")
- )
-
- clock := faketime.NewManualClock()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- Clock: clock,
- })
- e := channel.New(0, 0, "")
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- addrs := []struct {
- proto tcpip.NetworkProtocolNumber
- addr tcpip.Address
- }{
- {
- proto: ipv4.ProtocolNumber,
- addr: ipv4Addr,
- },
- {
- proto: ipv6.ProtocolNumber,
- addr: ipv6Addr,
- },
- }
- for _, addr := range addrs {
- if err := s.AddStaticNeighbor(nicID, addr.proto, addr.addr, linkAddr); err != nil {
- t.Fatalf("s.AddStaticNeighbor(%d, %d, %s, %s): %s", nicID, addr.proto, addr.addr, linkAddr, err)
- }
-
- if neighbors, err := s.Neighbors(nicID, addr.proto); err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, addr.proto, err)
- } else if diff := cmp.Diff(
- []stack.NeighborEntry{{Addr: addr.addr, LinkAddr: linkAddr, State: stack.Static, UpdatedAt: clock.Now()}},
- neighbors,
- ); diff != "" {
- t.Fatalf("proto=%d neighbors mismatch (-want +got):\n%s", addr.proto, diff)
- }
- }
-
- // Disabling the NIC should clear the neighbor table.
- if err := s.DisableNIC(nicID); err != nil {
- t.Fatalf("s.DisableNIC(%d): %s", nicID, err)
- }
- for _, addr := range addrs {
- if neighbors, err := s.Neighbors(nicID, addr.proto); err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, addr.proto, err)
- } else if len(neighbors) != 0 {
- t.Fatalf("got proto=%d len(neighbors) = %d, want = 0; neighbors = %#v", addr.proto, len(neighbors), neighbors)
- }
- }
-
- // Enabling the NIC should have an empty neighbor table.
- if err := s.EnableNIC(nicID); err != nil {
- t.Fatalf("s.EnableNIC(%d): %s", nicID, err)
- }
- for _, addr := range addrs {
- if neighbors, err := s.Neighbors(nicID, addr.proto); err != nil {
- t.Fatalf("s.Neighbors(%d, %d): %s", nicID, addr.proto, err)
- } else if len(neighbors) != 0 {
- t.Fatalf("got proto=%d len(neighbors) = %d, want = 0; neighbors = %#v", addr.proto, len(neighbors), neighbors)
- }
- }
-}
-
-func TestGetLinkAddressErrors(t *testing.T) {
- const (
- nicID = 1
- unknownNICID = nicID + 1
- )
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- if err := s.CreateNIC(nicID, channel.New(0, 0, "")); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- {
- err := s.GetLinkAddress(unknownNICID, "", "", ipv4.ProtocolNumber, nil)
- if _, ok := err.(*tcpip.ErrUnknownNICID); !ok {
- t.Errorf("got s.GetLinkAddress(%d, '', '', %d, nil) = %s, want = %s", unknownNICID, ipv4.ProtocolNumber, err, &tcpip.ErrUnknownNICID{})
- }
- }
- {
- err := s.GetLinkAddress(nicID, "", "", ipv4.ProtocolNumber, nil)
- if _, ok := err.(*tcpip.ErrNotSupported); !ok {
- t.Errorf("got s.GetLinkAddress(%d, '', '', %d, nil) = %s, want = %s", unknownNICID, ipv4.ProtocolNumber, err, &tcpip.ErrNotSupported{})
- }
- }
-}
-
-func TestStaticGetLinkAddress(t *testing.T) {
- const (
- nicID = 1
- )
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- })
- e := channel.New(0, 0, "")
- e.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
-
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- addr tcpip.Address
- expectedLinkAddr tcpip.LinkAddress
- }{
- {
- name: "IPv4",
- proto: ipv4.ProtocolNumber,
- addr: header.IPv4Broadcast,
- expectedLinkAddr: header.EthernetBroadcastAddress,
- },
- {
- name: "IPv6",
- proto: ipv6.ProtocolNumber,
- addr: header.IPv6AllNodesMulticastAddress,
- expectedLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ch := make(chan stack.LinkResolutionResult, 1)
- if err := s.GetLinkAddress(nicID, test.addr, "", test.proto, func(r stack.LinkResolutionResult) {
- ch <- r
- }); err != nil {
- t.Fatalf("s.GetLinkAddress(%d, %s, '', %d, _): %s", nicID, test.addr, test.proto, err)
- }
-
- if diff := cmp.Diff(stack.LinkResolutionResult{LinkAddress: test.expectedLinkAddr, Err: nil}, <-ch); diff != "" {
- t.Fatalf("link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/pkg/tcpip/stack/stack_unsafe_state_autogen.go b/pkg/tcpip/stack/stack_unsafe_state_autogen.go
new file mode 100644
index 000000000..758ab3457
--- /dev/null
+++ b/pkg/tcpip/stack/stack_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package stack
diff --git a/pkg/tcpip/stack/transport_demuxer_test.go b/pkg/tcpip/stack/transport_demuxer_test.go
deleted file mode 100644
index 45b09110d..000000000
--- a/pkg/tcpip/stack/transport_demuxer_test.go
+++ /dev/null
@@ -1,446 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack_test
-
-import (
- "io/ioutil"
- "math"
- "math/rand"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/ports"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- testSrcAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- testDstAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
-
- testSrcAddrV4 = "\x0a\x00\x00\x01"
- testDstAddrV4 = "\x0a\x00\x00\x02"
-
- testDstPort = 1234
- testSrcPort = 4096
-)
-
-type testContext struct {
- linkEps map[tcpip.NICID]*channel.Endpoint
- s *stack.Stack
- wq waiter.Queue
-}
-
-// newDualTestContextMultiNIC creates the testing context and also linkEpIDs NICs.
-func newDualTestContextMultiNIC(t *testing.T, mtu uint32, linkEpIDs []tcpip.NICID) *testContext {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- linkEps := make(map[tcpip.NICID]*channel.Endpoint)
- for _, linkEpID := range linkEpIDs {
- channelEp := channel.New(256, mtu, "")
- if err := s.CreateNIC(linkEpID, channelEp); err != nil {
- t.Fatalf("CreateNIC failed: %s", err)
- }
- linkEps[linkEpID] = channelEp
-
- if err := s.AddAddress(linkEpID, ipv4.ProtocolNumber, testDstAddrV4); err != nil {
- t.Fatalf("AddAddress IPv4 failed: %s", err)
- }
-
- if err := s.AddAddress(linkEpID, ipv6.ProtocolNumber, testDstAddrV6); err != nil {
- t.Fatalf("AddAddress IPv6 failed: %s", err)
- }
- }
-
- s.SetRouteTable([]tcpip.Route{
- {Destination: header.IPv4EmptySubnet, NIC: 1},
- {Destination: header.IPv6EmptySubnet, NIC: 1},
- })
-
- return &testContext{
- s: s,
- linkEps: linkEps,
- }
-}
-
-type headers struct {
- srcPort uint16
- dstPort uint16
-}
-
-func newPayload() []byte {
- b := make([]byte, 30+rand.Intn(100))
- for i := range b {
- b[i] = byte(rand.Intn(256))
- }
- return b
-}
-
-func (c *testContext) sendV4Packet(payload []byte, h *headers, linkEpID tcpip.NICID) {
- buf := buffer.NewView(header.UDPMinimumSize + header.IPv4MinimumSize + len(payload))
- payloadStart := len(buf) - len(payload)
- copy(buf[payloadStart:], payload)
-
- // Initialize the IP header.
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TOS: 0x80,
- TotalLength: uint16(len(buf)),
- TTL: 65,
- Protocol: uint8(udp.ProtocolNumber),
- SrcAddr: testSrcAddrV4,
- DstAddr: testDstAddrV4,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Initialize the UDP header.
- u := header.UDP(buf[header.IPv4MinimumSize:])
- u.Encode(&header.UDPFields{
- SrcPort: h.srcPort,
- DstPort: h.dstPort,
- Length: uint16(header.UDPMinimumSize + len(payload)),
- })
-
- // Calculate the UDP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(udp.ProtocolNumber, testSrcAddrV4, testDstAddrV4, uint16(len(u)))
-
- // Calculate the UDP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- u.SetChecksum(^u.CalculateChecksum(xsum))
-
- // Inject packet.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- })
- c.linkEps[linkEpID].InjectInbound(ipv4.ProtocolNumber, pkt)
-}
-
-func (c *testContext) sendV6Packet(payload []byte, h *headers, linkEpID tcpip.NICID) {
- // Allocate a buffer for data and headers.
- buf := buffer.NewView(header.UDPMinimumSize + header.IPv6MinimumSize + len(payload))
- copy(buf[len(buf)-len(payload):], payload)
-
- // Initialize the IP header.
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(header.UDPMinimumSize + len(payload)),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: 65,
- SrcAddr: testSrcAddrV6,
- DstAddr: testDstAddrV6,
- })
-
- // Initialize the UDP header.
- u := header.UDP(buf[header.IPv6MinimumSize:])
- u.Encode(&header.UDPFields{
- SrcPort: h.srcPort,
- DstPort: h.dstPort,
- Length: uint16(header.UDPMinimumSize + len(payload)),
- })
-
- // Calculate the UDP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(udp.ProtocolNumber, testSrcAddrV6, testDstAddrV6, uint16(len(u)))
-
- // Calculate the UDP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- u.SetChecksum(^u.CalculateChecksum(xsum))
-
- // Inject packet.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- })
- c.linkEps[linkEpID].InjectInbound(ipv6.ProtocolNumber, pkt)
-}
-
-func TestTransportDemuxerRegister(t *testing.T) {
- for _, test := range []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- want tcpip.Error
- }{
- {"failure", ipv6.ProtocolNumber, &tcpip.ErrUnknownProtocol{}},
- {"success", ipv4.ProtocolNumber, nil},
- } {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- var wq waiter.Queue
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatal(err)
- }
- tEP, ok := ep.(stack.TransportEndpoint)
- if !ok {
- t.Fatalf("%T does not implement stack.TransportEndpoint", ep)
- }
- if got, want := s.RegisterTransportEndpoint([]tcpip.NetworkProtocolNumber{test.proto}, udp.ProtocolNumber, stack.TransportEndpointID{}, tEP, ports.Flags{}, 0), test.want; got != want {
- t.Fatalf("s.RegisterTransportEndpoint(...) = %s, want %s", got, want)
- }
- })
- }
-}
-
-func TestTransportDemuxerRegisterMultiple(t *testing.T) {
- type test struct {
- flags ports.Flags
- want tcpip.Error
- }
- for _, subtest := range []struct {
- name string
- tests []test
- }{
- {"zeroFlags", []test{
- {ports.Flags{}, nil},
- {ports.Flags{}, &tcpip.ErrPortInUse{}},
- }},
- {"multibindFlags", []test{
- // Allow multiple registrations same TransportEndpointID with multibind flags.
- {ports.Flags{LoadBalanced: true, MostRecent: true}, nil},
- {ports.Flags{LoadBalanced: true, MostRecent: true}, nil},
- // Disallow registration w/same ID for a non-multibindflag.
- {ports.Flags{TupleOnly: true}, &tcpip.ErrPortInUse{}},
- }},
- } {
- t.Run(subtest.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- var eps []tcpip.Endpoint
- for idx, test := range subtest.tests {
- var wq waiter.Queue
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatal(err)
- }
- eps = append(eps, ep)
- tEP, ok := ep.(stack.TransportEndpoint)
- if !ok {
- t.Fatalf("%T does not implement stack.TransportEndpoint", ep)
- }
- id := stack.TransportEndpointID{LocalPort: 1}
- if got, want := s.RegisterTransportEndpoint([]tcpip.NetworkProtocolNumber{ipv4.ProtocolNumber}, udp.ProtocolNumber, id, tEP, test.flags, 0), test.want; got != want {
- t.Fatalf("test index: %d, s.RegisterTransportEndpoint(ipv4.ProtocolNumber, udp.ProtocolNumber, _, _, %+v, 0) = %s, want %s", idx, test.flags, got, want)
- }
- }
- for _, ep := range eps {
- ep.Close()
- }
- })
- }
-}
-
-// TestBindToDeviceDistribution injects varied packets on input devices and checks that
-// the distribution of packets received matches expectations.
-func TestBindToDeviceDistribution(t *testing.T) {
- type endpointSockopts struct {
- reuse bool
- bindToDevice tcpip.NICID
- }
- tcs := []struct {
- name string
- // endpoints will received the inject packets.
- endpoints []endpointSockopts
- // wantDistributions is the want ratio of packets received on each
- // endpoint for each NIC on which packets are injected.
- wantDistributions map[tcpip.NICID][]float64
- }{
- {
- name: "BindPortReuse",
- // 5 endpoints that all have reuse set.
- endpoints: []endpointSockopts{
- {reuse: true, bindToDevice: 0},
- {reuse: true, bindToDevice: 0},
- {reuse: true, bindToDevice: 0},
- {reuse: true, bindToDevice: 0},
- {reuse: true, bindToDevice: 0},
- },
- wantDistributions: map[tcpip.NICID][]float64{
- // Injected packets on dev0 get distributed evenly.
- 1: {0.2, 0.2, 0.2, 0.2, 0.2},
- },
- },
- {
- name: "BindToDevice",
- // 3 endpoints with various bindings.
- endpoints: []endpointSockopts{
- {reuse: false, bindToDevice: 1},
- {reuse: false, bindToDevice: 2},
- {reuse: false, bindToDevice: 3},
- },
- wantDistributions: map[tcpip.NICID][]float64{
- // Injected packets on dev0 go only to the endpoint bound to dev0.
- 1: {1, 0, 0},
- // Injected packets on dev1 go only to the endpoint bound to dev1.
- 2: {0, 1, 0},
- // Injected packets on dev2 go only to the endpoint bound to dev2.
- 3: {0, 0, 1},
- },
- },
- {
- name: "ReuseAndBindToDevice",
- // 6 endpoints with various bindings.
- endpoints: []endpointSockopts{
- {reuse: true, bindToDevice: 1},
- {reuse: true, bindToDevice: 1},
- {reuse: true, bindToDevice: 2},
- {reuse: true, bindToDevice: 2},
- {reuse: true, bindToDevice: 2},
- {reuse: true, bindToDevice: 0},
- },
- wantDistributions: map[tcpip.NICID][]float64{
- // Injected packets on dev0 get distributed among endpoints bound to
- // dev0.
- 1: {0.5, 0.5, 0, 0, 0, 0},
- // Injected packets on dev1 get distributed among endpoints bound to
- // dev1 or unbound.
- 2: {0, 0, 1. / 3, 1. / 3, 1. / 3, 0},
- // Injected packets on dev999 go only to the unbound.
- 1000: {0, 0, 0, 0, 0, 1},
- },
- },
- }
- protos := map[string]tcpip.NetworkProtocolNumber{
- "IPv4": ipv4.ProtocolNumber,
- "IPv6": ipv6.ProtocolNumber,
- }
-
- for _, test := range tcs {
- for protoName, protoNum := range protos {
- for device, wantDistribution := range test.wantDistributions {
- t.Run(test.name+protoName+"-"+strconv.Itoa(int(device)), func(t *testing.T) {
- // Create the NICs.
- var devices []tcpip.NICID
- for d := range test.wantDistributions {
- devices = append(devices, d)
- }
- c := newDualTestContextMultiNIC(t, defaultMTU, devices)
-
- // Create endpoints and bind each to a NIC, sometimes reusing ports.
- eps := make(map[tcpip.Endpoint]int)
- pollChannel := make(chan tcpip.Endpoint)
- for i, endpoint := range test.endpoints {
- // Try to receive the data.
- wq := waiter.Queue{}
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- t.Cleanup(func() {
- wq.EventUnregister(&we)
- close(ch)
- })
-
- var err tcpip.Error
- ep, err := c.s.NewEndpoint(udp.ProtocolNumber, protoNum, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- t.Cleanup(ep.Close)
- eps[ep] = i
-
- go func(ep tcpip.Endpoint) {
- for range ch {
- pollChannel <- ep
- }
- }(ep)
-
- ep.SocketOptions().SetReusePort(endpoint.reuse)
- if err := ep.SocketOptions().SetBindToDevice(int32(endpoint.bindToDevice)); err != nil {
- t.Fatalf("SetSockOpt(&%T(%d)) on endpoint %d failed: %s", endpoint.bindToDevice, endpoint.bindToDevice, i, err)
- }
-
- var dstAddr tcpip.Address
- switch protoNum {
- case ipv4.ProtocolNumber:
- dstAddr = testDstAddrV4
- case ipv6.ProtocolNumber:
- dstAddr = testDstAddrV6
- default:
- t.Fatalf("unexpected protocol number: %d", protoNum)
- }
- if err := ep.Bind(tcpip.FullAddress{Addr: dstAddr, Port: testDstPort}); err != nil {
- t.Fatalf("ep.Bind(...) on endpoint %d failed: %s", i, err)
- }
- }
-
- // Send packets across a range of ports, checking that packets from
- // the same source port are always demultiplexed to the same
- // destination endpoint.
- npackets := 10_000
- nports := 1_000
- if got, want := len(test.endpoints), len(wantDistribution); got != want {
- t.Fatalf("got len(test.endpoints) = %d, want %d", got, want)
- }
- endpoints := make(map[uint16]tcpip.Endpoint)
- stats := make(map[tcpip.Endpoint]int)
- for i := 0; i < npackets; i++ {
- // Send a packet.
- port := uint16(i % nports)
- payload := newPayload()
- hdrs := &headers{
- srcPort: testSrcPort + port,
- dstPort: testDstPort,
- }
- switch protoNum {
- case ipv4.ProtocolNumber:
- c.sendV4Packet(payload, hdrs, device)
- case ipv6.ProtocolNumber:
- c.sendV6Packet(payload, hdrs, device)
- default:
- t.Fatalf("unexpected protocol number: %d", protoNum)
- }
-
- ep := <-pollChannel
- if _, err := ep.Read(ioutil.Discard, tcpip.ReadOptions{}); err != nil {
- t.Fatalf("Read on endpoint %d failed: %s", eps[ep], err)
- }
- stats[ep]++
- if i < nports {
- endpoints[uint16(i)] = ep
- } else {
- // Check that all packets from one client are handled by the same
- // socket.
- if want, got := endpoints[port], ep; want != got {
- t.Fatalf("Packet sent on port %d expected on endpoint %d but received on endpoint %d", port, eps[want], eps[got])
- }
- }
- }
-
- // Check that a packet distribution is as expected.
- for ep, i := range eps {
- wantRatio := wantDistribution[i]
- wantRecv := wantRatio * float64(npackets)
- actualRecv := stats[ep]
- actualRatio := float64(stats[ep]) / float64(npackets)
- // The deviation is less than 10%.
- if math.Abs(actualRatio-wantRatio) > 0.05 {
- t.Errorf("want about %.0f%% (%.0f of %d) packets to arrive on endpoint %d, got %.0f%% (%d of %d)", wantRatio*100, wantRecv, npackets, i, actualRatio*100, actualRecv, npackets)
- }
- }
- })
- }
- }
- }
-}
diff --git a/pkg/tcpip/stack/transport_test.go b/pkg/tcpip/stack/transport_test.go
deleted file mode 100644
index 839178809..000000000
--- a/pkg/tcpip/stack/transport_test.go
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack_test
-
-import (
- "bytes"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/ports"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- fakeTransNumber tcpip.TransportProtocolNumber = 1
- fakeTransHeaderLen int = 3
-)
-
-// fakeTransportEndpoint is a transport-layer protocol endpoint. It counts
-// received packets; the counts of all endpoints are aggregated in the protocol
-// descriptor.
-//
-// Headers of this protocol are fakeTransHeaderLen bytes, but we currently don't
-// use it.
-type fakeTransportEndpoint struct {
- stack.TransportEndpointInfo
- tcpip.DefaultSocketOptionsHandler
-
- proto *fakeTransportProtocol
- peerAddr tcpip.Address
- route *stack.Route
- uniqueID uint64
-
- // acceptQueue is non-nil iff bound.
- acceptQueue []*fakeTransportEndpoint
-
- // ops is used to set and get socket options.
- ops tcpip.SocketOptions
-}
-
-func (f *fakeTransportEndpoint) Info() tcpip.EndpointInfo {
- return &f.TransportEndpointInfo
-}
-
-func (*fakeTransportEndpoint) Stats() tcpip.EndpointStats {
- return nil
-}
-
-func (*fakeTransportEndpoint) SetOwner(owner tcpip.PacketOwner) {}
-
-func (f *fakeTransportEndpoint) SocketOptions() *tcpip.SocketOptions {
- return &f.ops
-}
-
-func newFakeTransportEndpoint(proto *fakeTransportProtocol, netProto tcpip.NetworkProtocolNumber, s *stack.Stack) tcpip.Endpoint {
- ep := &fakeTransportEndpoint{TransportEndpointInfo: stack.TransportEndpointInfo{NetProto: netProto}, proto: proto, uniqueID: s.UniqueID()}
- ep.ops.InitHandler(ep, s, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)
- return ep
-}
-
-func (f *fakeTransportEndpoint) Abort() {
- f.Close()
-}
-
-func (f *fakeTransportEndpoint) Close() {
- // TODO(gvisor.dev/issue/5153): Consider retaining the route.
- f.route.Release()
-}
-
-func (*fakeTransportEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
- return mask
-}
-
-func (*fakeTransportEndpoint) Read(io.Writer, tcpip.ReadOptions) (tcpip.ReadResult, tcpip.Error) {
- return tcpip.ReadResult{}, nil
-}
-
-func (f *fakeTransportEndpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) {
- if len(f.route.RemoteAddress()) == 0 {
- return 0, &tcpip.ErrNoRoute{}
- }
-
- v := make([]byte, p.Len())
- if _, err := io.ReadFull(p, v); err != nil {
- return 0, &tcpip.ErrBadBuffer{}
- }
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(f.route.MaxHeaderLength()) + fakeTransHeaderLen,
- Data: buffer.View(v).ToVectorisedView(),
- })
- _ = pkt.TransportHeader().Push(fakeTransHeaderLen)
- if err := f.route.WritePacket(stack.NetworkHeaderParams{Protocol: fakeTransNumber, TTL: 123, TOS: stack.DefaultTOS}, pkt); err != nil {
- return 0, err
- }
-
- return int64(len(v)), nil
-}
-
-// SetSockOpt sets a socket option. Currently not supported.
-func (*fakeTransportEndpoint) SetSockOpt(tcpip.SettableSocketOption) tcpip.Error {
- return &tcpip.ErrInvalidEndpointState{}
-}
-
-// SetSockOptInt sets a socket option. Currently not supported.
-func (*fakeTransportEndpoint) SetSockOptInt(tcpip.SockOptInt, int) tcpip.Error {
- return &tcpip.ErrInvalidEndpointState{}
-}
-
-// GetSockOptInt implements tcpip.Endpoint.GetSockOptInt.
-func (*fakeTransportEndpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
- return -1, &tcpip.ErrUnknownProtocolOption{}
-}
-
-// GetSockOpt implements tcpip.Endpoint.GetSockOpt.
-func (*fakeTransportEndpoint) GetSockOpt(tcpip.GettableSocketOption) tcpip.Error {
- return &tcpip.ErrInvalidEndpointState{}
-}
-
-// Disconnect implements tcpip.Endpoint.Disconnect.
-func (*fakeTransportEndpoint) Disconnect() tcpip.Error {
- return &tcpip.ErrNotSupported{}
-}
-
-func (f *fakeTransportEndpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
- f.peerAddr = addr.Addr
-
- // Find the route.
- r, err := f.proto.stack.FindRoute(addr.NIC, "", addr.Addr, fakeNetNumber, false /* multicastLoop */)
- if err != nil {
- return &tcpip.ErrNoRoute{}
- }
-
- // Try to register so that we can start receiving packets.
- f.ID.RemoteAddress = addr.Addr
- err = f.proto.stack.RegisterTransportEndpoint([]tcpip.NetworkProtocolNumber{fakeNetNumber}, fakeTransNumber, f.ID, f, ports.Flags{}, 0 /* bindToDevice */)
- if err != nil {
- r.Release()
- return err
- }
-
- f.route = r
-
- return nil
-}
-
-func (f *fakeTransportEndpoint) UniqueID() uint64 {
- return f.uniqueID
-}
-
-func (*fakeTransportEndpoint) ConnectEndpoint(e tcpip.Endpoint) tcpip.Error {
- return nil
-}
-
-func (*fakeTransportEndpoint) Shutdown(tcpip.ShutdownFlags) tcpip.Error {
- return nil
-}
-
-func (*fakeTransportEndpoint) Reset() {
-}
-
-func (*fakeTransportEndpoint) Listen(int) tcpip.Error {
- return nil
-}
-
-func (f *fakeTransportEndpoint) Accept(*tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpip.Error) {
- if len(f.acceptQueue) == 0 {
- return nil, nil, nil
- }
- a := f.acceptQueue[0]
- f.acceptQueue = f.acceptQueue[1:]
- return a, nil, nil
-}
-
-func (f *fakeTransportEndpoint) Bind(a tcpip.FullAddress) tcpip.Error {
- if err := f.proto.stack.RegisterTransportEndpoint(
- []tcpip.NetworkProtocolNumber{fakeNetNumber},
- fakeTransNumber,
- stack.TransportEndpointID{LocalAddress: a.Addr},
- f,
- ports.Flags{},
- 0, /* bindtoDevice */
- ); err != nil {
- return err
- }
- f.acceptQueue = []*fakeTransportEndpoint{}
- return nil
-}
-
-func (*fakeTransportEndpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {
- return tcpip.FullAddress{}, nil
-}
-
-func (*fakeTransportEndpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) {
- return tcpip.FullAddress{}, nil
-}
-
-func (f *fakeTransportEndpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) {
- // Increment the number of received packets.
- f.proto.packetCount++
- if f.acceptQueue == nil {
- return
- }
-
- netHdr := pkt.NetworkHeader().View()
- route, err := f.proto.stack.FindRoute(pkt.NICID, tcpip.Address(netHdr[dstAddrOffset]), tcpip.Address(netHdr[srcAddrOffset]), pkt.NetworkProtocolNumber, false /* multicastLoop */)
- if err != nil {
- return
- }
-
- ep := &fakeTransportEndpoint{
- TransportEndpointInfo: stack.TransportEndpointInfo{
- ID: f.ID,
- NetProto: f.NetProto,
- },
- proto: f.proto,
- peerAddr: route.RemoteAddress(),
- route: route,
- }
- ep.ops.InitHandler(ep, f.proto.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)
- f.acceptQueue = append(f.acceptQueue, ep)
-}
-
-func (f *fakeTransportEndpoint) HandleError(stack.TransportError, *stack.PacketBuffer) {
- // Increment the number of received control packets.
- f.proto.controlCount++
-}
-
-func (*fakeTransportEndpoint) State() uint32 {
- return 0
-}
-
-func (*fakeTransportEndpoint) ModerateRecvBuf(copied int) {}
-
-func (*fakeTransportEndpoint) Resume(*stack.Stack) {}
-
-func (*fakeTransportEndpoint) Wait() {}
-
-func (*fakeTransportEndpoint) LastError() tcpip.Error {
- return nil
-}
-
-type fakeTransportGoodOption bool
-
-type fakeTransportBadOption bool
-
-type fakeTransportInvalidValueOption int
-
-type fakeTransportProtocolOptions struct {
- good bool
-}
-
-// fakeTransportProtocol is a transport-layer protocol descriptor. It
-// aggregates the number of packets received via endpoints of this protocol.
-type fakeTransportProtocol struct {
- stack *stack.Stack
-
- packetCount int
- controlCount int
- opts fakeTransportProtocolOptions
-}
-
-func (*fakeTransportProtocol) Number() tcpip.TransportProtocolNumber {
- return fakeTransNumber
-}
-
-func (f *fakeTransportProtocol) NewEndpoint(netProto tcpip.NetworkProtocolNumber, _ *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
- return newFakeTransportEndpoint(f, netProto, f.stack), nil
-}
-
-func (*fakeTransportProtocol) NewRawEndpoint(tcpip.NetworkProtocolNumber, *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
- return nil, &tcpip.ErrUnknownProtocol{}
-}
-
-func (*fakeTransportProtocol) MinimumPacketSize() int {
- return fakeTransHeaderLen
-}
-
-func (*fakeTransportProtocol) ParsePorts(buffer.View) (src, dst uint16, err tcpip.Error) {
- return 0, 0, nil
-}
-
-func (*fakeTransportProtocol) HandleUnknownDestinationPacket(stack.TransportEndpointID, *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition {
- return stack.UnknownDestinationPacketHandled
-}
-
-func (f *fakeTransportProtocol) SetOption(option tcpip.SettableTransportProtocolOption) tcpip.Error {
- switch v := option.(type) {
- case *tcpip.TCPModerateReceiveBufferOption:
- f.opts.good = bool(*v)
- return nil
- default:
- return &tcpip.ErrUnknownProtocolOption{}
- }
-}
-
-func (f *fakeTransportProtocol) Option(option tcpip.GettableTransportProtocolOption) tcpip.Error {
- switch v := option.(type) {
- case *tcpip.TCPModerateReceiveBufferOption:
- *v = tcpip.TCPModerateReceiveBufferOption(f.opts.good)
- return nil
- default:
- return &tcpip.ErrUnknownProtocolOption{}
- }
-}
-
-// Abort implements TransportProtocol.Abort.
-func (*fakeTransportProtocol) Abort() {}
-
-// Close implements tcpip.Endpoint.Close.
-func (*fakeTransportProtocol) Close() {}
-
-// Wait implements TransportProtocol.Wait.
-func (*fakeTransportProtocol) Wait() {}
-
-// Parse implements TransportProtocol.Parse.
-func (*fakeTransportProtocol) Parse(pkt *stack.PacketBuffer) bool {
- _, ok := pkt.TransportHeader().Consume(fakeTransHeaderLen)
- return ok
-}
-
-func fakeTransFactory(s *stack.Stack) stack.TransportProtocol {
- return &fakeTransportProtocol{stack: s}
-}
-
-func TestTransportReceive(t *testing.T) {
- linkEP := channel.New(10, defaultMTU, "")
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- TransportProtocols: []stack.TransportProtocolFactory{fakeTransFactory},
- })
- if err := s.CreateNIC(1, linkEP); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatalf("AddAddress failed: %v", err)
- }
-
- // Create endpoint and connect to remote address.
- wq := waiter.Queue{}
- ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
-
- fakeTrans := s.TransportProtocolInstance(fakeTransNumber).(*fakeTransportProtocol)
-
- // Create buffer that will hold the packet.
- buf := buffer.NewView(30)
-
- // Make sure packet with wrong protocol is not delivered.
- buf[0] = 1
- buf[2] = 0
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.packetCount != 0 {
- t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 0)
- }
-
- // Make sure packet from the wrong source is not delivered.
- buf[0] = 1
- buf[1] = 3
- buf[2] = byte(fakeTransNumber)
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.packetCount != 0 {
- t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 0)
- }
-
- // Make sure packet is delivered.
- buf[0] = 1
- buf[1] = 2
- buf[2] = byte(fakeTransNumber)
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.packetCount != 1 {
- t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 1)
- }
-}
-
-func TestTransportControlReceive(t *testing.T) {
- linkEP := channel.New(10, defaultMTU, "")
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- TransportProtocols: []stack.TransportProtocolFactory{fakeTransFactory},
- })
- if err := s.CreateNIC(1, linkEP); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatalf("AddAddress failed: %v", err)
- }
-
- // Create endpoint and connect to remote address.
- wq := waiter.Queue{}
- ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
-
- fakeTrans := s.TransportProtocolInstance(fakeTransNumber).(*fakeTransportProtocol)
-
- // Create buffer that will hold the control packet.
- buf := buffer.NewView(2*fakeNetHeaderLen + 30)
-
- // Outer packet contains the control protocol number.
- buf[0] = 1
- buf[1] = 0xfe
- buf[2] = uint8(fakeControlProtocol)
-
- // Make sure packet with wrong protocol is not delivered.
- buf[fakeNetHeaderLen+0] = 0
- buf[fakeNetHeaderLen+1] = 1
- buf[fakeNetHeaderLen+2] = 0
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.controlCount != 0 {
- t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 0)
- }
-
- // Make sure packet from the wrong source is not delivered.
- buf[fakeNetHeaderLen+0] = 3
- buf[fakeNetHeaderLen+1] = 1
- buf[fakeNetHeaderLen+2] = byte(fakeTransNumber)
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.controlCount != 0 {
- t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 0)
- }
-
- // Make sure packet is delivered.
- buf[fakeNetHeaderLen+0] = 2
- buf[fakeNetHeaderLen+1] = 1
- buf[fakeNetHeaderLen+2] = byte(fakeTransNumber)
- linkEP.InjectInbound(fakeNetNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- if fakeTrans.controlCount != 1 {
- t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 1)
- }
-}
-
-func TestTransportSend(t *testing.T) {
- linkEP := channel.New(10, defaultMTU, "")
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- TransportProtocols: []stack.TransportProtocolFactory{fakeTransFactory},
- })
- if err := s.CreateNIC(1, linkEP); err != nil {
- t.Fatalf("CreateNIC failed: %v", err)
- }
-
- if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil {
- t.Fatalf("AddAddress failed: %v", err)
- }
-
- {
- subnet, err := tcpip.NewSubnet("\x00", "\x00")
- if err != nil {
- t.Fatal(err)
- }
- s.SetRouteTable([]tcpip.Route{{Destination: subnet, Gateway: "\x00", NIC: 1}})
- }
-
- // Create endpoint and bind it.
- wq := waiter.Queue{}
- ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
-
- // Create buffer that will hold the payload.
- b := make([]byte, 30)
- var r bytes.Reader
- r.Reset(b)
- if _, err := ep.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("write failed: %v", err)
- }
-
- fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol)
-
- if fakeNet.sendPacketCount[2] != 1 {
- t.Errorf("sendPacketCount = %d, want %d", fakeNet.sendPacketCount[2], 1)
- }
-}
-
-func TestTransportOptions(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},
- TransportProtocols: []stack.TransportProtocolFactory{fakeTransFactory},
- })
-
- v := tcpip.TCPModerateReceiveBufferOption(true)
- if err := s.SetTransportProtocolOption(fakeTransNumber, &v); err != nil {
- t.Errorf("s.SetTransportProtocolOption(fakeTrans, &%T(%t)): %s", v, v, err)
- }
- v = false
- if err := s.TransportProtocolOption(fakeTransNumber, &v); err != nil {
- t.Fatalf("s.TransportProtocolOption(fakeTransNumber, &%T): %s", v, err)
- }
- if !v {
- t.Fatalf("got tcpip.TCPModerateReceiveBufferOption = false, want = true")
- }
-}
diff --git a/pkg/tcpip/stack/tuple_list.go b/pkg/tcpip/stack/tuple_list.go
new file mode 100644
index 000000000..31d0feefa
--- /dev/null
+++ b/pkg/tcpip/stack/tuple_list.go
@@ -0,0 +1,221 @@
+package stack
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type tupleElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (tupleElementMapper) linkerFor(elem *tuple) *tuple { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type tupleList struct {
+ head *tuple
+ tail *tuple
+}
+
+// Reset resets list l to the empty state.
+func (l *tupleList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *tupleList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *tupleList) Front() *tuple {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *tupleList) Back() *tuple {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *tupleList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (tupleElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *tupleList) PushFront(e *tuple) {
+ linker := tupleElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ tupleElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *tupleList) PushBack(e *tuple) {
+ linker := tupleElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ tupleElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *tupleList) PushBackList(m *tupleList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ tupleElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ tupleElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *tupleList) InsertAfter(b, e *tuple) {
+ bLinker := tupleElementMapper{}.linkerFor(b)
+ eLinker := tupleElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ tupleElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *tupleList) InsertBefore(a, e *tuple) {
+ aLinker := tupleElementMapper{}.linkerFor(a)
+ eLinker := tupleElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ tupleElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *tupleList) Remove(e *tuple) {
+ linker := tupleElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ tupleElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ tupleElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type tupleEntry struct {
+ next *tuple
+ prev *tuple
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *tupleEntry) Next() *tuple {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *tupleEntry) Prev() *tuple {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *tupleEntry) SetNext(elem *tuple) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *tupleEntry) SetPrev(elem *tuple) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/tcpip_state_autogen.go b/pkg/tcpip/tcpip_state_autogen.go
new file mode 100644
index 000000000..9c34de9ef
--- /dev/null
+++ b/pkg/tcpip/tcpip_state_autogen.go
@@ -0,0 +1,1323 @@
+// automatically generated by stateify.
+
+package tcpip
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *ErrAborted) StateTypeName() string {
+ return "pkg/tcpip.ErrAborted"
+}
+
+func (e *ErrAborted) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrAborted) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrAborted) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrAborted) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrAborted) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrAddressFamilyNotSupported) StateTypeName() string {
+ return "pkg/tcpip.ErrAddressFamilyNotSupported"
+}
+
+func (e *ErrAddressFamilyNotSupported) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrAddressFamilyNotSupported) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrAddressFamilyNotSupported) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrAddressFamilyNotSupported) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrAddressFamilyNotSupported) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrAlreadyBound) StateTypeName() string {
+ return "pkg/tcpip.ErrAlreadyBound"
+}
+
+func (e *ErrAlreadyBound) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrAlreadyBound) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrAlreadyBound) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrAlreadyBound) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrAlreadyBound) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrAlreadyConnected) StateTypeName() string {
+ return "pkg/tcpip.ErrAlreadyConnected"
+}
+
+func (e *ErrAlreadyConnected) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrAlreadyConnected) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrAlreadyConnected) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrAlreadyConnected) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrAlreadyConnected) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrAlreadyConnecting) StateTypeName() string {
+ return "pkg/tcpip.ErrAlreadyConnecting"
+}
+
+func (e *ErrAlreadyConnecting) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrAlreadyConnecting) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrAlreadyConnecting) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrAlreadyConnecting) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrAlreadyConnecting) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrBadAddress) StateTypeName() string {
+ return "pkg/tcpip.ErrBadAddress"
+}
+
+func (e *ErrBadAddress) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrBadAddress) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrBadAddress) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrBadAddress) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrBadAddress) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrBadBuffer) StateTypeName() string {
+ return "pkg/tcpip.ErrBadBuffer"
+}
+
+func (e *ErrBadBuffer) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrBadBuffer) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrBadBuffer) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrBadBuffer) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrBadBuffer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrBadLocalAddress) StateTypeName() string {
+ return "pkg/tcpip.ErrBadLocalAddress"
+}
+
+func (e *ErrBadLocalAddress) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrBadLocalAddress) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrBadLocalAddress) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrBadLocalAddress) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrBadLocalAddress) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrBroadcastDisabled) StateTypeName() string {
+ return "pkg/tcpip.ErrBroadcastDisabled"
+}
+
+func (e *ErrBroadcastDisabled) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrBroadcastDisabled) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrBroadcastDisabled) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrBroadcastDisabled) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrBroadcastDisabled) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrClosedForReceive) StateTypeName() string {
+ return "pkg/tcpip.ErrClosedForReceive"
+}
+
+func (e *ErrClosedForReceive) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrClosedForReceive) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrClosedForReceive) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrClosedForReceive) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrClosedForReceive) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrClosedForSend) StateTypeName() string {
+ return "pkg/tcpip.ErrClosedForSend"
+}
+
+func (e *ErrClosedForSend) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrClosedForSend) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrClosedForSend) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrClosedForSend) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrClosedForSend) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrConnectStarted) StateTypeName() string {
+ return "pkg/tcpip.ErrConnectStarted"
+}
+
+func (e *ErrConnectStarted) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrConnectStarted) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrConnectStarted) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrConnectStarted) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrConnectStarted) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrConnectionAborted) StateTypeName() string {
+ return "pkg/tcpip.ErrConnectionAborted"
+}
+
+func (e *ErrConnectionAborted) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrConnectionAborted) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrConnectionAborted) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrConnectionAborted) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrConnectionAborted) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrConnectionRefused) StateTypeName() string {
+ return "pkg/tcpip.ErrConnectionRefused"
+}
+
+func (e *ErrConnectionRefused) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrConnectionRefused) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrConnectionRefused) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrConnectionRefused) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrConnectionRefused) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrConnectionReset) StateTypeName() string {
+ return "pkg/tcpip.ErrConnectionReset"
+}
+
+func (e *ErrConnectionReset) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrConnectionReset) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrConnectionReset) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrConnectionReset) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrConnectionReset) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrDestinationRequired) StateTypeName() string {
+ return "pkg/tcpip.ErrDestinationRequired"
+}
+
+func (e *ErrDestinationRequired) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrDestinationRequired) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrDestinationRequired) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrDestinationRequired) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrDestinationRequired) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrDuplicateAddress) StateTypeName() string {
+ return "pkg/tcpip.ErrDuplicateAddress"
+}
+
+func (e *ErrDuplicateAddress) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrDuplicateAddress) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrDuplicateAddress) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrDuplicateAddress) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrDuplicateAddress) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrDuplicateNICID) StateTypeName() string {
+ return "pkg/tcpip.ErrDuplicateNICID"
+}
+
+func (e *ErrDuplicateNICID) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrDuplicateNICID) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrDuplicateNICID) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrDuplicateNICID) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrDuplicateNICID) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrInvalidEndpointState) StateTypeName() string {
+ return "pkg/tcpip.ErrInvalidEndpointState"
+}
+
+func (e *ErrInvalidEndpointState) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrInvalidEndpointState) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrInvalidEndpointState) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrInvalidEndpointState) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrInvalidEndpointState) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrInvalidOptionValue) StateTypeName() string {
+ return "pkg/tcpip.ErrInvalidOptionValue"
+}
+
+func (e *ErrInvalidOptionValue) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrInvalidOptionValue) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrInvalidOptionValue) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrInvalidOptionValue) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrInvalidOptionValue) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrInvalidPortRange) StateTypeName() string {
+ return "pkg/tcpip.ErrInvalidPortRange"
+}
+
+func (e *ErrInvalidPortRange) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrInvalidPortRange) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrInvalidPortRange) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrInvalidPortRange) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrInvalidPortRange) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrMalformedHeader) StateTypeName() string {
+ return "pkg/tcpip.ErrMalformedHeader"
+}
+
+func (e *ErrMalformedHeader) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrMalformedHeader) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrMalformedHeader) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrMalformedHeader) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrMalformedHeader) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrMessageTooLong) StateTypeName() string {
+ return "pkg/tcpip.ErrMessageTooLong"
+}
+
+func (e *ErrMessageTooLong) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrMessageTooLong) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrMessageTooLong) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNetworkUnreachable) StateTypeName() string {
+ return "pkg/tcpip.ErrNetworkUnreachable"
+}
+
+func (e *ErrNetworkUnreachable) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNetworkUnreachable) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNetworkUnreachable) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNetworkUnreachable) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNetworkUnreachable) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNoBufferSpace) StateTypeName() string {
+ return "pkg/tcpip.ErrNoBufferSpace"
+}
+
+func (e *ErrNoBufferSpace) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNoBufferSpace) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNoBufferSpace) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNoBufferSpace) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNoBufferSpace) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNoPortAvailable) StateTypeName() string {
+ return "pkg/tcpip.ErrNoPortAvailable"
+}
+
+func (e *ErrNoPortAvailable) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNoPortAvailable) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNoPortAvailable) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNoPortAvailable) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNoPortAvailable) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNoRoute) StateTypeName() string {
+ return "pkg/tcpip.ErrNoRoute"
+}
+
+func (e *ErrNoRoute) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNoRoute) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNoRoute) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNoRoute) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNoRoute) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNoSuchFile) StateTypeName() string {
+ return "pkg/tcpip.ErrNoSuchFile"
+}
+
+func (e *ErrNoSuchFile) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNoSuchFile) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNoSuchFile) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNoSuchFile) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNoSuchFile) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNotConnected) StateTypeName() string {
+ return "pkg/tcpip.ErrNotConnected"
+}
+
+func (e *ErrNotConnected) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNotConnected) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNotConnected) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNotConnected) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNotConnected) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNotPermitted) StateTypeName() string {
+ return "pkg/tcpip.ErrNotPermitted"
+}
+
+func (e *ErrNotPermitted) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNotPermitted) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNotPermitted) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNotPermitted) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNotPermitted) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrNotSupported) StateTypeName() string {
+ return "pkg/tcpip.ErrNotSupported"
+}
+
+func (e *ErrNotSupported) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrNotSupported) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrNotSupported) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrNotSupported) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrNotSupported) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrPortInUse) StateTypeName() string {
+ return "pkg/tcpip.ErrPortInUse"
+}
+
+func (e *ErrPortInUse) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrPortInUse) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrPortInUse) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrPortInUse) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrPortInUse) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrQueueSizeNotSupported) StateTypeName() string {
+ return "pkg/tcpip.ErrQueueSizeNotSupported"
+}
+
+func (e *ErrQueueSizeNotSupported) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrQueueSizeNotSupported) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrQueueSizeNotSupported) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrQueueSizeNotSupported) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrQueueSizeNotSupported) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrTimeout) StateTypeName() string {
+ return "pkg/tcpip.ErrTimeout"
+}
+
+func (e *ErrTimeout) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrTimeout) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrTimeout) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrTimeout) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrTimeout) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrUnknownDevice) StateTypeName() string {
+ return "pkg/tcpip.ErrUnknownDevice"
+}
+
+func (e *ErrUnknownDevice) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrUnknownDevice) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrUnknownDevice) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrUnknownDevice) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrUnknownDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrUnknownNICID) StateTypeName() string {
+ return "pkg/tcpip.ErrUnknownNICID"
+}
+
+func (e *ErrUnknownNICID) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrUnknownNICID) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrUnknownNICID) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrUnknownNICID) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrUnknownNICID) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrUnknownProtocol) StateTypeName() string {
+ return "pkg/tcpip.ErrUnknownProtocol"
+}
+
+func (e *ErrUnknownProtocol) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrUnknownProtocol) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrUnknownProtocol) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrUnknownProtocol) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrUnknownProtocol) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrUnknownProtocolOption) StateTypeName() string {
+ return "pkg/tcpip.ErrUnknownProtocolOption"
+}
+
+func (e *ErrUnknownProtocolOption) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrUnknownProtocolOption) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrUnknownProtocolOption) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrUnknownProtocolOption) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrUnknownProtocolOption) StateLoad(stateSourceObject state.Source) {
+}
+
+func (e *ErrWouldBlock) StateTypeName() string {
+ return "pkg/tcpip.ErrWouldBlock"
+}
+
+func (e *ErrWouldBlock) StateFields() []string {
+ return []string{}
+}
+
+func (e *ErrWouldBlock) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrWouldBlock) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+}
+
+func (e *ErrWouldBlock) afterLoad() {}
+
+// +checklocksignore
+func (e *ErrWouldBlock) StateLoad(stateSourceObject state.Source) {
+}
+
+func (l *sockErrorList) StateTypeName() string {
+ return "pkg/tcpip.sockErrorList"
+}
+
+func (l *sockErrorList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *sockErrorList) beforeSave() {}
+
+// +checklocksignore
+func (l *sockErrorList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *sockErrorList) afterLoad() {}
+
+// +checklocksignore
+func (l *sockErrorList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *sockErrorEntry) StateTypeName() string {
+ return "pkg/tcpip.sockErrorEntry"
+}
+
+func (e *sockErrorEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *sockErrorEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *sockErrorEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *sockErrorEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *sockErrorEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (so *SocketOptions) StateTypeName() string {
+ return "pkg/tcpip.SocketOptions"
+}
+
+func (so *SocketOptions) StateFields() []string {
+ return []string{
+ "handler",
+ "broadcastEnabled",
+ "passCredEnabled",
+ "noChecksumEnabled",
+ "reuseAddressEnabled",
+ "reusePortEnabled",
+ "keepAliveEnabled",
+ "multicastLoopEnabled",
+ "receiveTOSEnabled",
+ "receiveTClassEnabled",
+ "receivePacketInfoEnabled",
+ "hdrIncludedEnabled",
+ "v6OnlyEnabled",
+ "quickAckEnabled",
+ "delayOptionEnabled",
+ "corkOptionEnabled",
+ "receiveOriginalDstAddress",
+ "recvErrEnabled",
+ "errQueue",
+ "bindToDevice",
+ "sendBufferSize",
+ "receiveBufferSize",
+ "linger",
+ }
+}
+
+func (so *SocketOptions) beforeSave() {}
+
+// +checklocksignore
+func (so *SocketOptions) StateSave(stateSinkObject state.Sink) {
+ so.beforeSave()
+ stateSinkObject.Save(0, &so.handler)
+ stateSinkObject.Save(1, &so.broadcastEnabled)
+ stateSinkObject.Save(2, &so.passCredEnabled)
+ stateSinkObject.Save(3, &so.noChecksumEnabled)
+ stateSinkObject.Save(4, &so.reuseAddressEnabled)
+ stateSinkObject.Save(5, &so.reusePortEnabled)
+ stateSinkObject.Save(6, &so.keepAliveEnabled)
+ stateSinkObject.Save(7, &so.multicastLoopEnabled)
+ stateSinkObject.Save(8, &so.receiveTOSEnabled)
+ stateSinkObject.Save(9, &so.receiveTClassEnabled)
+ stateSinkObject.Save(10, &so.receivePacketInfoEnabled)
+ stateSinkObject.Save(11, &so.hdrIncludedEnabled)
+ stateSinkObject.Save(12, &so.v6OnlyEnabled)
+ stateSinkObject.Save(13, &so.quickAckEnabled)
+ stateSinkObject.Save(14, &so.delayOptionEnabled)
+ stateSinkObject.Save(15, &so.corkOptionEnabled)
+ stateSinkObject.Save(16, &so.receiveOriginalDstAddress)
+ stateSinkObject.Save(17, &so.recvErrEnabled)
+ stateSinkObject.Save(18, &so.errQueue)
+ stateSinkObject.Save(19, &so.bindToDevice)
+ stateSinkObject.Save(20, &so.sendBufferSize)
+ stateSinkObject.Save(21, &so.receiveBufferSize)
+ stateSinkObject.Save(22, &so.linger)
+}
+
+func (so *SocketOptions) afterLoad() {}
+
+// +checklocksignore
+func (so *SocketOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &so.handler)
+ stateSourceObject.Load(1, &so.broadcastEnabled)
+ stateSourceObject.Load(2, &so.passCredEnabled)
+ stateSourceObject.Load(3, &so.noChecksumEnabled)
+ stateSourceObject.Load(4, &so.reuseAddressEnabled)
+ stateSourceObject.Load(5, &so.reusePortEnabled)
+ stateSourceObject.Load(6, &so.keepAliveEnabled)
+ stateSourceObject.Load(7, &so.multicastLoopEnabled)
+ stateSourceObject.Load(8, &so.receiveTOSEnabled)
+ stateSourceObject.Load(9, &so.receiveTClassEnabled)
+ stateSourceObject.Load(10, &so.receivePacketInfoEnabled)
+ stateSourceObject.Load(11, &so.hdrIncludedEnabled)
+ stateSourceObject.Load(12, &so.v6OnlyEnabled)
+ stateSourceObject.Load(13, &so.quickAckEnabled)
+ stateSourceObject.Load(14, &so.delayOptionEnabled)
+ stateSourceObject.Load(15, &so.corkOptionEnabled)
+ stateSourceObject.Load(16, &so.receiveOriginalDstAddress)
+ stateSourceObject.Load(17, &so.recvErrEnabled)
+ stateSourceObject.Load(18, &so.errQueue)
+ stateSourceObject.Load(19, &so.bindToDevice)
+ stateSourceObject.Load(20, &so.sendBufferSize)
+ stateSourceObject.Load(21, &so.receiveBufferSize)
+ stateSourceObject.Load(22, &so.linger)
+}
+
+func (l *LocalSockError) StateTypeName() string {
+ return "pkg/tcpip.LocalSockError"
+}
+
+func (l *LocalSockError) StateFields() []string {
+ return []string{
+ "info",
+ }
+}
+
+func (l *LocalSockError) beforeSave() {}
+
+// +checklocksignore
+func (l *LocalSockError) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.info)
+}
+
+func (l *LocalSockError) afterLoad() {}
+
+// +checklocksignore
+func (l *LocalSockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.info)
+}
+
+func (s *SockError) StateTypeName() string {
+ return "pkg/tcpip.SockError"
+}
+
+func (s *SockError) StateFields() []string {
+ return []string{
+ "sockErrorEntry",
+ "Err",
+ "Cause",
+ "Payload",
+ "Dst",
+ "Offender",
+ "NetProto",
+ }
+}
+
+func (s *SockError) beforeSave() {}
+
+// +checklocksignore
+func (s *SockError) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.sockErrorEntry)
+ stateSinkObject.Save(1, &s.Err)
+ stateSinkObject.Save(2, &s.Cause)
+ stateSinkObject.Save(3, &s.Payload)
+ stateSinkObject.Save(4, &s.Dst)
+ stateSinkObject.Save(5, &s.Offender)
+ stateSinkObject.Save(6, &s.NetProto)
+}
+
+func (s *SockError) afterLoad() {}
+
+// +checklocksignore
+func (s *SockError) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.sockErrorEntry)
+ stateSourceObject.Load(1, &s.Err)
+ stateSourceObject.Load(2, &s.Cause)
+ stateSourceObject.Load(3, &s.Payload)
+ stateSourceObject.Load(4, &s.Dst)
+ stateSourceObject.Load(5, &s.Offender)
+ stateSourceObject.Load(6, &s.NetProto)
+}
+
+func (s *stdClock) StateTypeName() string {
+ return "pkg/tcpip.stdClock"
+}
+
+func (s *stdClock) StateFields() []string {
+ return []string{
+ "maxMonotonic",
+ }
+}
+
+func (s *stdClock) beforeSave() {}
+
+// +checklocksignore
+func (s *stdClock) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.maxMonotonic)
+}
+
+// +checklocksignore
+func (s *stdClock) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.maxMonotonic)
+ stateSourceObject.AfterLoad(s.afterLoad)
+}
+
+func (mt *MonotonicTime) StateTypeName() string {
+ return "pkg/tcpip.MonotonicTime"
+}
+
+func (mt *MonotonicTime) StateFields() []string {
+ return []string{
+ "nanoseconds",
+ }
+}
+
+func (mt *MonotonicTime) beforeSave() {}
+
+// +checklocksignore
+func (mt *MonotonicTime) StateSave(stateSinkObject state.Sink) {
+ mt.beforeSave()
+ stateSinkObject.Save(0, &mt.nanoseconds)
+}
+
+func (mt *MonotonicTime) afterLoad() {}
+
+// +checklocksignore
+func (mt *MonotonicTime) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mt.nanoseconds)
+}
+
+func (f *FullAddress) StateTypeName() string {
+ return "pkg/tcpip.FullAddress"
+}
+
+func (f *FullAddress) StateFields() []string {
+ return []string{
+ "NIC",
+ "Addr",
+ "Port",
+ }
+}
+
+func (f *FullAddress) beforeSave() {}
+
+// +checklocksignore
+func (f *FullAddress) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.NIC)
+ stateSinkObject.Save(1, &f.Addr)
+ stateSinkObject.Save(2, &f.Port)
+}
+
+func (f *FullAddress) afterLoad() {}
+
+// +checklocksignore
+func (f *FullAddress) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.NIC)
+ stateSourceObject.Load(1, &f.Addr)
+ stateSourceObject.Load(2, &f.Port)
+}
+
+func (c *ControlMessages) StateTypeName() string {
+ return "pkg/tcpip.ControlMessages"
+}
+
+func (c *ControlMessages) StateFields() []string {
+ return []string{
+ "HasTimestamp",
+ "Timestamp",
+ "HasInq",
+ "Inq",
+ "HasTOS",
+ "TOS",
+ "HasTClass",
+ "TClass",
+ "HasIPPacketInfo",
+ "PacketInfo",
+ "HasOriginalDstAddress",
+ "OriginalDstAddress",
+ "SockErr",
+ }
+}
+
+func (c *ControlMessages) beforeSave() {}
+
+// +checklocksignore
+func (c *ControlMessages) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.HasTimestamp)
+ stateSinkObject.Save(1, &c.Timestamp)
+ stateSinkObject.Save(2, &c.HasInq)
+ stateSinkObject.Save(3, &c.Inq)
+ stateSinkObject.Save(4, &c.HasTOS)
+ stateSinkObject.Save(5, &c.TOS)
+ stateSinkObject.Save(6, &c.HasTClass)
+ stateSinkObject.Save(7, &c.TClass)
+ stateSinkObject.Save(8, &c.HasIPPacketInfo)
+ stateSinkObject.Save(9, &c.PacketInfo)
+ stateSinkObject.Save(10, &c.HasOriginalDstAddress)
+ stateSinkObject.Save(11, &c.OriginalDstAddress)
+ stateSinkObject.Save(12, &c.SockErr)
+}
+
+func (c *ControlMessages) afterLoad() {}
+
+// +checklocksignore
+func (c *ControlMessages) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.HasTimestamp)
+ stateSourceObject.Load(1, &c.Timestamp)
+ stateSourceObject.Load(2, &c.HasInq)
+ stateSourceObject.Load(3, &c.Inq)
+ stateSourceObject.Load(4, &c.HasTOS)
+ stateSourceObject.Load(5, &c.TOS)
+ stateSourceObject.Load(6, &c.HasTClass)
+ stateSourceObject.Load(7, &c.TClass)
+ stateSourceObject.Load(8, &c.HasIPPacketInfo)
+ stateSourceObject.Load(9, &c.PacketInfo)
+ stateSourceObject.Load(10, &c.HasOriginalDstAddress)
+ stateSourceObject.Load(11, &c.OriginalDstAddress)
+ stateSourceObject.Load(12, &c.SockErr)
+}
+
+func (l *LinkPacketInfo) StateTypeName() string {
+ return "pkg/tcpip.LinkPacketInfo"
+}
+
+func (l *LinkPacketInfo) StateFields() []string {
+ return []string{
+ "Protocol",
+ "PktType",
+ }
+}
+
+func (l *LinkPacketInfo) beforeSave() {}
+
+// +checklocksignore
+func (l *LinkPacketInfo) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Protocol)
+ stateSinkObject.Save(1, &l.PktType)
+}
+
+func (l *LinkPacketInfo) afterLoad() {}
+
+// +checklocksignore
+func (l *LinkPacketInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Protocol)
+ stateSourceObject.Load(1, &l.PktType)
+}
+
+func (l *LingerOption) StateTypeName() string {
+ return "pkg/tcpip.LingerOption"
+}
+
+func (l *LingerOption) StateFields() []string {
+ return []string{
+ "Enabled",
+ "Timeout",
+ }
+}
+
+func (l *LingerOption) beforeSave() {}
+
+// +checklocksignore
+func (l *LingerOption) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.Enabled)
+ stateSinkObject.Save(1, &l.Timeout)
+}
+
+func (l *LingerOption) afterLoad() {}
+
+// +checklocksignore
+func (l *LingerOption) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.Enabled)
+ stateSourceObject.Load(1, &l.Timeout)
+}
+
+func (i *IPPacketInfo) StateTypeName() string {
+ return "pkg/tcpip.IPPacketInfo"
+}
+
+func (i *IPPacketInfo) StateFields() []string {
+ return []string{
+ "NIC",
+ "LocalAddr",
+ "DestinationAddr",
+ }
+}
+
+func (i *IPPacketInfo) beforeSave() {}
+
+// +checklocksignore
+func (i *IPPacketInfo) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.NIC)
+ stateSinkObject.Save(1, &i.LocalAddr)
+ stateSinkObject.Save(2, &i.DestinationAddr)
+}
+
+func (i *IPPacketInfo) afterLoad() {}
+
+// +checklocksignore
+func (i *IPPacketInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.NIC)
+ stateSourceObject.Load(1, &i.LocalAddr)
+ stateSourceObject.Load(2, &i.DestinationAddr)
+}
+
+func init() {
+ state.Register((*ErrAborted)(nil))
+ state.Register((*ErrAddressFamilyNotSupported)(nil))
+ state.Register((*ErrAlreadyBound)(nil))
+ state.Register((*ErrAlreadyConnected)(nil))
+ state.Register((*ErrAlreadyConnecting)(nil))
+ state.Register((*ErrBadAddress)(nil))
+ state.Register((*ErrBadBuffer)(nil))
+ state.Register((*ErrBadLocalAddress)(nil))
+ state.Register((*ErrBroadcastDisabled)(nil))
+ state.Register((*ErrClosedForReceive)(nil))
+ state.Register((*ErrClosedForSend)(nil))
+ state.Register((*ErrConnectStarted)(nil))
+ state.Register((*ErrConnectionAborted)(nil))
+ state.Register((*ErrConnectionRefused)(nil))
+ state.Register((*ErrConnectionReset)(nil))
+ state.Register((*ErrDestinationRequired)(nil))
+ state.Register((*ErrDuplicateAddress)(nil))
+ state.Register((*ErrDuplicateNICID)(nil))
+ state.Register((*ErrInvalidEndpointState)(nil))
+ state.Register((*ErrInvalidOptionValue)(nil))
+ state.Register((*ErrInvalidPortRange)(nil))
+ state.Register((*ErrMalformedHeader)(nil))
+ state.Register((*ErrMessageTooLong)(nil))
+ state.Register((*ErrNetworkUnreachable)(nil))
+ state.Register((*ErrNoBufferSpace)(nil))
+ state.Register((*ErrNoPortAvailable)(nil))
+ state.Register((*ErrNoRoute)(nil))
+ state.Register((*ErrNoSuchFile)(nil))
+ state.Register((*ErrNotConnected)(nil))
+ state.Register((*ErrNotPermitted)(nil))
+ state.Register((*ErrNotSupported)(nil))
+ state.Register((*ErrPortInUse)(nil))
+ state.Register((*ErrQueueSizeNotSupported)(nil))
+ state.Register((*ErrTimeout)(nil))
+ state.Register((*ErrUnknownDevice)(nil))
+ state.Register((*ErrUnknownNICID)(nil))
+ state.Register((*ErrUnknownProtocol)(nil))
+ state.Register((*ErrUnknownProtocolOption)(nil))
+ state.Register((*ErrWouldBlock)(nil))
+ state.Register((*sockErrorList)(nil))
+ state.Register((*sockErrorEntry)(nil))
+ state.Register((*SocketOptions)(nil))
+ state.Register((*LocalSockError)(nil))
+ state.Register((*SockError)(nil))
+ state.Register((*stdClock)(nil))
+ state.Register((*MonotonicTime)(nil))
+ state.Register((*FullAddress)(nil))
+ state.Register((*ControlMessages)(nil))
+ state.Register((*LinkPacketInfo)(nil))
+ state.Register((*LingerOption)(nil))
+ state.Register((*IPPacketInfo)(nil))
+}
diff --git a/pkg/tcpip/tcpip_test.go b/pkg/tcpip/tcpip_test.go
deleted file mode 100644
index c96ae2f02..000000000
--- a/pkg/tcpip/tcpip_test.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcpip
-
-import (
- "bytes"
- "fmt"
- "io"
- "net"
- "testing"
-
- "github.com/google/go-cmp/cmp"
-)
-
-func TestLimitedWriter_Write(t *testing.T) {
- var b bytes.Buffer
- l := LimitedWriter{
- W: &b,
- N: 5,
- }
- if n, err := l.Write([]byte{0, 1, 2}); err != nil {
- t.Errorf("got l.Write(3/5) = (_, %s), want nil", err)
- } else if n != 3 {
- t.Errorf("got l.Write(3/5) = (%d, _), want 3", n)
- }
- if n, err := l.Write([]byte{3, 4, 5}); err != io.ErrShortWrite {
- t.Errorf("got l.Write(3/2) = (_, %s), want io.ErrShortWrite", err)
- } else if n != 2 {
- t.Errorf("got l.Write(3/2) = (%d, _), want 2", n)
- }
- if l.N != 0 {
- t.Errorf("got l.N = %d, want 0", l.N)
- }
- l.N = 1
- if n, err := l.Write([]byte{5}); err != nil {
- t.Errorf("got l.Write(1/1) = (_, %s), want nil", err)
- } else if n != 1 {
- t.Errorf("got l.Write(1/1) = (%d, _), want 1", n)
- }
- if diff := cmp.Diff(b.Bytes(), []byte{0, 1, 2, 3, 4, 5}); diff != "" {
- t.Errorf("%T wrote incorrect data: (-want +got):\n%s", l, diff)
- }
-}
-
-func TestSubnetContains(t *testing.T) {
- tests := []struct {
- s Address
- m AddressMask
- a Address
- want bool
- }{
- {"\xa0", "\xf0", "\x90", false},
- {"\xa0", "\xf0", "\xa0", true},
- {"\xa0", "\xf0", "\xa5", true},
- {"\xa0", "\xf0", "\xaf", true},
- {"\xa0", "\xf0", "\xb0", false},
- {"\xa0", "\xf0", "", false},
- {"\xa0", "\xf0", "\xa0\x00", false},
- {"\xc2\x80", "\xff\xf0", "\xc2\x80", true},
- {"\xc2\x80", "\xff\xf0", "\xc2\x00", false},
- {"\xc2\x00", "\xff\xf0", "\xc2\x00", true},
- {"\xc2\x00", "\xff\xf0", "\xc2\x80", false},
- }
- for _, tt := range tests {
- s, err := NewSubnet(tt.s, tt.m)
- if err != nil {
- t.Errorf("NewSubnet(%v, %v) = %v", tt.s, tt.m, err)
- continue
- }
- if got := s.Contains(tt.a); got != tt.want {
- t.Errorf("Subnet(%v).Contains(%v) = %v, want %v", s, tt.a, got, tt.want)
- }
- }
-}
-
-func TestSubnetBits(t *testing.T) {
- tests := []struct {
- a AddressMask
- want1 int
- want0 int
- }{
- {"\x00", 0, 8},
- {"\x00\x00", 0, 16},
- {"\x36", 0, 8},
- {"\x5c", 0, 8},
- {"\x5c\x5c", 0, 16},
- {"\x5c\x36", 0, 16},
- {"\x36\x5c", 0, 16},
- {"\x36\x36", 0, 16},
- {"\xff", 8, 0},
- {"\xff\xff", 16, 0},
- }
- for _, tt := range tests {
- s := &Subnet{mask: tt.a}
- got1, got0 := s.Bits()
- if got1 != tt.want1 || got0 != tt.want0 {
- t.Errorf("Subnet{mask: %x}.Bits() = %d, %d, want %d, %d", tt.a, got1, got0, tt.want1, tt.want0)
- }
- }
-}
-
-func TestSubnetPrefix(t *testing.T) {
- tests := []struct {
- a AddressMask
- want int
- }{
- {"\x00", 0},
- {"\x00\x00", 0},
- {"\x36", 0},
- {"\x86", 1},
- {"\xc5", 2},
- {"\xff\x00", 8},
- {"\xff\x36", 8},
- {"\xff\x8c", 9},
- {"\xff\xc8", 10},
- {"\xff", 8},
- {"\xff\xff", 16},
- }
- for _, tt := range tests {
- s := &Subnet{mask: tt.a}
- got := s.Prefix()
- if got != tt.want {
- t.Errorf("Subnet{mask: %x}.Bits() = %d want %d", tt.a, got, tt.want)
- }
- }
-}
-
-func TestSubnetCreation(t *testing.T) {
- tests := []struct {
- a Address
- m AddressMask
- want error
- }{
- {"\xa0", "\xf0", nil},
- {"\xa0\xa0", "\xf0", errSubnetLengthMismatch},
- {"\xaa", "\xf0", errSubnetAddressMasked},
- {"", "", nil},
- }
- for _, tt := range tests {
- if _, err := NewSubnet(tt.a, tt.m); err != tt.want {
- t.Errorf("NewSubnet(%v, %v) = %v, want %v", tt.a, tt.m, err, tt.want)
- }
- }
-}
-
-func TestAddressString(t *testing.T) {
- for _, want := range []string{
- // Taken from stdlib.
- "2001:db8::123:12:1",
- "2001:db8::1",
- "2001:db8:0:1:0:1:0:1",
- "2001:db8:1:0:1:0:1:0",
- "2001::1:0:0:1",
- "2001:db8:0:0:1::",
- "2001:db8::1:0:0:1",
- "2001:db8::a:b:c:d",
-
- // Leading zeros.
- "::1",
- // Trailing zeros.
- "8::",
- // No zeros.
- "1:1:1:1:1:1:1:1",
- // Longer sequence is after other zeros, but not at the end.
- "1:0:0:1::1",
- // Longer sequence is at the beginning, shorter sequence is at
- // the end.
- "::1:1:1:0:0",
- // Longer sequence is not at the beginning, shorter sequence is
- // at the end.
- "1::1:1:0:0",
- // Longer sequence is at the beginning, shorter sequence is not
- // at the end.
- "::1:1:0:0:1",
- // Neither sequence is at an end, longer is after shorter.
- "1:0:0:1::1",
- // Shorter sequence is at the beginning, longer sequence is not
- // at the end.
- "0:0:1:1::1",
- // Shorter sequence is at the beginning, longer sequence is at
- // the end.
- "0:0:1:1:1::",
- // Short sequences at both ends, longer one in the middle.
- "0:1:1::1:1:0",
- // Short sequences at both ends, longer one in the middle.
- "0:1::1:0:0",
- // Short sequences at both ends, longer one in the middle.
- "0:0:1::1:0",
- // Longer sequence surrounded by shorter sequences, but none at
- // the end.
- "1:0:1::1:0:1",
- } {
- addr := Address(net.ParseIP(want))
- if got := addr.String(); got != want {
- t.Errorf("Address(%x).String() = '%s', want = '%s'", addr, got, want)
- }
- }
-}
-
-func TestAddressWithPrefixSubnet(t *testing.T) {
- tests := []struct {
- addr Address
- prefixLen int
- subnetAddr Address
- subnetMask AddressMask
- }{
- {"\xaa\x55\x33\x42", -1, "\x00\x00\x00\x00", "\x00\x00\x00\x00"},
- {"\xaa\x55\x33\x42", 0, "\x00\x00\x00\x00", "\x00\x00\x00\x00"},
- {"\xaa\x55\x33\x42", 1, "\x80\x00\x00\x00", "\x80\x00\x00\x00"},
- {"\xaa\x55\x33\x42", 7, "\xaa\x00\x00\x00", "\xfe\x00\x00\x00"},
- {"\xaa\x55\x33\x42", 8, "\xaa\x00\x00\x00", "\xff\x00\x00\x00"},
- {"\xaa\x55\x33\x42", 24, "\xaa\x55\x33\x00", "\xff\xff\xff\x00"},
- {"\xaa\x55\x33\x42", 31, "\xaa\x55\x33\x42", "\xff\xff\xff\xfe"},
- {"\xaa\x55\x33\x42", 32, "\xaa\x55\x33\x42", "\xff\xff\xff\xff"},
- {"\xaa\x55\x33\x42", 33, "\xaa\x55\x33\x42", "\xff\xff\xff\xff"},
- }
- for _, tt := range tests {
- ap := AddressWithPrefix{Address: tt.addr, PrefixLen: tt.prefixLen}
- gotSubnet := ap.Subnet()
- wantSubnet, err := NewSubnet(tt.subnetAddr, tt.subnetMask)
- if err != nil {
- t.Errorf("NewSubnet(%q, %q) failed: %s", tt.subnetAddr, tt.subnetMask, err)
- continue
- }
- if gotSubnet != wantSubnet {
- t.Errorf("got subnet = %q, want = %q", gotSubnet, wantSubnet)
- }
- }
-}
-
-func TestAddressUnspecified(t *testing.T) {
- tests := []struct {
- addr Address
- unspecified bool
- }{
- {
- addr: "",
- unspecified: true,
- },
- {
- addr: "\x00",
- unspecified: true,
- },
- {
- addr: "\x01",
- unspecified: false,
- },
- {
- addr: "\x00\x00",
- unspecified: true,
- },
- {
- addr: "\x01\x00",
- unspecified: false,
- },
- {
- addr: "\x00\x01",
- unspecified: false,
- },
- {
- addr: "\x01\x01",
- unspecified: false,
- },
- }
-
- for _, test := range tests {
- t.Run(fmt.Sprintf("addr=%s", test.addr), func(t *testing.T) {
- if got := test.addr.Unspecified(); got != test.unspecified {
- t.Fatalf("got addr.Unspecified() = %t, want = %t", got, test.unspecified)
- }
- })
- }
-}
-
-func TestAddressMatchingPrefix(t *testing.T) {
- tests := []struct {
- addrA Address
- addrB Address
- prefix uint8
- }{
- {
- addrA: "\x01\x01",
- addrB: "\x01\x01",
- prefix: 16,
- },
- {
- addrA: "\x01\x01",
- addrB: "\x01\x00",
- prefix: 15,
- },
- {
- addrA: "\x01\x01",
- addrB: "\x81\x00",
- prefix: 0,
- },
- {
- addrA: "\x01\x01",
- addrB: "\x01\x80",
- prefix: 8,
- },
- {
- addrA: "\x01\x01",
- addrB: "\x02\x80",
- prefix: 6,
- },
- }
-
- for _, test := range tests {
- if got := test.addrA.MatchingPrefix(test.addrB); got != test.prefix {
- t.Errorf("got (%s).MatchingPrefix(%s) = %d, want = %d", test.addrA, test.addrB, got, test.prefix)
- }
- }
-}
diff --git a/pkg/tcpip/tests/integration/BUILD b/pkg/tcpip/tests/integration/BUILD
deleted file mode 100644
index 181ef799e..000000000
--- a/pkg/tcpip/tests/integration/BUILD
+++ /dev/null
@@ -1,141 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "forward_test",
- size = "small",
- srcs = ["forward_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "iptables_test",
- size = "small",
- srcs = ["iptables_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/udp",
- ],
-)
-
-go_test(
- name = "link_resolution_test",
- size = "small",
- srcs = ["link_resolution_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/pipe",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
-
-go_test(
- name = "loopback_test",
- size = "small",
- srcs = ["loopback_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "multicast_broadcast_test",
- size = "small",
- srcs = ["multicast_broadcast_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "route_test",
- size = "small",
- srcs = ["route_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/tests/utils",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/udp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/tests/integration/forward_test.go b/pkg/tcpip/tests/integration/forward_test.go
deleted file mode 100644
index 92fa6257d..000000000
--- a/pkg/tcpip/tests/integration/forward_test.go
+++ /dev/null
@@ -1,690 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package forward_test
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const ttl = 64
-
-var (
- ipv4GlobalMulticastAddr = testutil.MustParse4("224.0.1.10")
- ipv6GlobalMulticastAddr = testutil.MustParse6("ff0e::a")
-)
-
-func rxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv4EchoRequest(e, src, dst, ttl)
-}
-
-func rxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv6EchoRequest(e, src, dst, ttl)
-}
-
-func forwardedICMPv4EchoRequestChecker(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv4(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ttl-1),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4Echo)))
-}
-
-func forwardedICMPv6EchoRequestChecker(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv6(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ttl-1),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6EchoRequest)))
-}
-
-func TestForwarding(t *testing.T) {
- const listenPort = 8080
-
- type endpointAndAddresses struct {
- serverEP tcpip.Endpoint
- serverAddr tcpip.Address
- serverReadableCH chan struct{}
-
- clientEP tcpip.Endpoint
- clientAddr tcpip.Address
- clientReadableCH chan struct{}
- }
-
- newEP := func(t *testing.T, s *stack.Stack, transProto tcpip.TransportProtocolNumber, netProto tcpip.NetworkProtocolNumber) (tcpip.Endpoint, chan struct{}) {
- t.Helper()
- var wq waiter.Queue
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- ep, err := s.NewEndpoint(transProto, netProto, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", transProto, netProto, err)
- }
-
- t.Cleanup(func() {
- wq.EventUnregister(&we)
- })
-
- return ep, ch
- }
-
- tests := []struct {
- name string
- epAndAddrs func(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack, proto tcpip.TransportProtocolNumber) endpointAndAddresses
- }{
- {
- name: "IPv4 host1 server with host2 client",
- epAndAddrs: func(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack, proto tcpip.TransportProtocolNumber) endpointAndAddresses {
- ep1, ep1WECH := newEP(t, host1Stack, proto, ipv4.ProtocolNumber)
- ep2, ep2WECH := newEP(t, host2Stack, proto, ipv4.ProtocolNumber)
- return endpointAndAddresses{
- serverEP: ep1,
- serverAddr: utils.Host1IPv4Addr.AddressWithPrefix.Address,
- serverReadableCH: ep1WECH,
-
- clientEP: ep2,
- clientAddr: utils.Host2IPv4Addr.AddressWithPrefix.Address,
- clientReadableCH: ep2WECH,
- }
- },
- },
- {
- name: "IPv6 host2 server with host1 client",
- epAndAddrs: func(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack, proto tcpip.TransportProtocolNumber) endpointAndAddresses {
- ep1, ep1WECH := newEP(t, host2Stack, proto, ipv6.ProtocolNumber)
- ep2, ep2WECH := newEP(t, host1Stack, proto, ipv6.ProtocolNumber)
- return endpointAndAddresses{
- serverEP: ep1,
- serverAddr: utils.Host2IPv6Addr.AddressWithPrefix.Address,
- serverReadableCH: ep1WECH,
-
- clientEP: ep2,
- clientAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,
- clientReadableCH: ep2WECH,
- }
- },
- },
- {
- name: "IPv4 host2 server with routerNIC1 client",
- epAndAddrs: func(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack, proto tcpip.TransportProtocolNumber) endpointAndAddresses {
- ep1, ep1WECH := newEP(t, host2Stack, proto, ipv4.ProtocolNumber)
- ep2, ep2WECH := newEP(t, routerStack, proto, ipv4.ProtocolNumber)
- return endpointAndAddresses{
- serverEP: ep1,
- serverAddr: utils.Host2IPv4Addr.AddressWithPrefix.Address,
- serverReadableCH: ep1WECH,
-
- clientEP: ep2,
- clientAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- clientReadableCH: ep2WECH,
- }
- },
- },
- {
- name: "IPv6 routerNIC2 server with host1 client",
- epAndAddrs: func(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack, proto tcpip.TransportProtocolNumber) endpointAndAddresses {
- ep1, ep1WECH := newEP(t, routerStack, proto, ipv6.ProtocolNumber)
- ep2, ep2WECH := newEP(t, host1Stack, proto, ipv6.ProtocolNumber)
- return endpointAndAddresses{
- serverEP: ep1,
- serverAddr: utils.RouterNIC2IPv6Addr.AddressWithPrefix.Address,
- serverReadableCH: ep1WECH,
-
- clientEP: ep2,
- clientAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,
- clientReadableCH: ep2WECH,
- }
- },
- },
- }
-
- subTests := []struct {
- name string
- proto tcpip.TransportProtocolNumber
- expectedConnectErr tcpip.Error
- setupServer func(t *testing.T, ep tcpip.Endpoint)
- setupServerConn func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{})
- needRemoteAddr bool
- }{
- {
- name: "UDP",
- proto: udp.ProtocolNumber,
- expectedConnectErr: nil,
- setupServerConn: func(t *testing.T, ep tcpip.Endpoint, _ <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
- t.Helper()
-
- if err := ep.Connect(clientAddr); err != nil {
- t.Fatalf("ep.Connect(%#v): %s", clientAddr, err)
- }
- return nil, nil
- },
- needRemoteAddr: true,
- },
- {
- name: "TCP",
- proto: tcp.ProtocolNumber,
- expectedConnectErr: &tcpip.ErrConnectStarted{},
- setupServer: func(t *testing.T, ep tcpip.Endpoint) {
- t.Helper()
-
- if err := ep.Listen(1); err != nil {
- t.Fatalf("ep.Listen(1): %s", err)
- }
- },
- setupServerConn: func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
- t.Helper()
-
- var addr tcpip.FullAddress
- for {
- newEP, wq, err := ep.Accept(&addr)
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- <-ch
- continue
- }
- if err != nil {
- t.Fatalf("ep.Accept(_): %s", err)
- }
- if diff := cmp.Diff(clientAddr, addr, checker.IgnoreCmpPath(
- "NIC",
- )); diff != "" {
- t.Errorf("accepted address mismatch (-want +got):\n%s", diff)
- }
-
- we, newCH := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- return newEP, newCH
- }
- },
- needRemoteAddr: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},
- }
-
- host1Stack := stack.New(stackOpts)
- routerStack := stack.New(stackOpts)
- host2Stack := stack.New(stackOpts)
- utils.SetupRoutedStacks(t, host1Stack, routerStack, host2Stack)
-
- epsAndAddrs := test.epAndAddrs(t, host1Stack, routerStack, host2Stack, subTest.proto)
- defer epsAndAddrs.serverEP.Close()
- defer epsAndAddrs.clientEP.Close()
-
- serverAddr := tcpip.FullAddress{Addr: epsAndAddrs.serverAddr, Port: listenPort}
- if err := epsAndAddrs.serverEP.Bind(serverAddr); err != nil {
- t.Fatalf("epsAndAddrs.serverEP.Bind(%#v): %s", serverAddr, err)
- }
- clientAddr := tcpip.FullAddress{Addr: epsAndAddrs.clientAddr}
- if err := epsAndAddrs.clientEP.Bind(clientAddr); err != nil {
- t.Fatalf("epsAndAddrs.clientEP.Bind(%#v): %s", clientAddr, err)
- }
-
- if subTest.setupServer != nil {
- subTest.setupServer(t, epsAndAddrs.serverEP)
- }
- {
- err := epsAndAddrs.clientEP.Connect(serverAddr)
- if diff := cmp.Diff(subTest.expectedConnectErr, err); diff != "" {
- t.Fatalf("unexpected error from epsAndAddrs.clientEP.Connect(%#v), (-want, +got):\n%s", serverAddr, diff)
- }
- }
- if addr, err := epsAndAddrs.clientEP.GetLocalAddress(); err != nil {
- t.Fatalf("epsAndAddrs.clientEP.GetLocalAddress(): %s", err)
- } else {
- clientAddr = addr
- clientAddr.NIC = 0
- }
-
- serverEP := epsAndAddrs.serverEP
- serverCH := epsAndAddrs.serverReadableCH
- if ep, ch := subTest.setupServerConn(t, serverEP, serverCH, clientAddr); ep != nil {
- defer ep.Close()
- serverEP = ep
- serverCH = ch
- }
-
- write := func(ep tcpip.Endpoint, data []byte) {
- t.Helper()
-
- var r bytes.Reader
- r.Reset(data)
- var wOpts tcpip.WriteOptions
- n, err := ep.Write(&r, wOpts)
- if err != nil {
- t.Fatalf("ep.Write(_, %#v): %s", wOpts, err)
- }
- if want := int64(len(data)); n != want {
- t.Fatalf("got ep.Write(_, %#v) = (%d, _), want = (%d, _)", wOpts, n, want)
- }
- }
-
- data := []byte{1, 2, 3, 4}
- write(epsAndAddrs.clientEP, data)
-
- read := func(ch chan struct{}, ep tcpip.Endpoint, data []byte, expectedFrom tcpip.FullAddress) {
- t.Helper()
-
- var buf bytes.Buffer
- var res tcpip.ReadResult
- for {
- var err tcpip.Error
- opts := tcpip.ReadOptions{NeedRemoteAddr: subTest.needRemoteAddr}
- res, err = ep.Read(&buf, opts)
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- <-ch
- continue
- }
- if err != nil {
- t.Fatalf("ep.Read(_, %d, %#v): %s", len(data), opts, err)
- }
- break
- }
-
- readResult := tcpip.ReadResult{
- Count: len(data),
- Total: len(data),
- }
- if subTest.needRemoteAddr {
- readResult.RemoteAddr = expectedFrom
- }
- if diff := cmp.Diff(readResult, res, checker.IgnoreCmpPath(
- "ControlMessages",
- "RemoteAddr.NIC",
- )); diff != "" {
- t.Errorf("ep.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(buf.Bytes(), data); diff != "" {
- t.Errorf("received data mismatch (-want +got):\n%s", diff)
- }
-
- if t.Failed() {
- t.FailNow()
- }
- }
-
- read(serverCH, serverEP, data, clientAddr)
-
- data = []byte{5, 6, 7, 8, 9, 10, 11, 12}
- write(serverEP, data)
- read(epsAndAddrs.clientReadableCH, epsAndAddrs.clientEP, data, serverAddr)
- })
- }
- })
- }
-}
-
-func TestMulticastForwarding(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
- )
-
- var (
- ipv4LinkLocalUnicastAddr = testutil.MustParse4("169.254.0.10")
- ipv4LinkLocalMulticastAddr = testutil.MustParse4("224.0.0.10")
-
- ipv6LinkLocalUnicastAddr = testutil.MustParse6("fe80::a")
- ipv6LinkLocalMulticastAddr = testutil.MustParse6("ff02::a")
- )
-
- tests := []struct {
- name string
- srcAddr, dstAddr tcpip.Address
- rx func(*channel.Endpoint, tcpip.Address, tcpip.Address)
- expectForward bool
- checker func(*testing.T, []byte)
- }{
- {
- name: "IPv4 link-local multicast destination",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: ipv4LinkLocalMulticastAddr,
- rx: rxICMPv4EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv4 link-local source",
- srcAddr: ipv4LinkLocalUnicastAddr,
- dstAddr: utils.RemoteIPv4Addr,
- rx: rxICMPv4EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv4 link-local destination",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: ipv4LinkLocalUnicastAddr,
- rx: rxICMPv4EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv4 non-link-local unicast",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- rx: rxICMPv4EchoRequest,
- expectForward: true,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv4EchoRequestChecker(t, b, utils.RemoteIPv4Addr, utils.Ipv4Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv4 non-link-local multicast",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: ipv4GlobalMulticastAddr,
- rx: rxICMPv4EchoRequest,
- expectForward: true,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv4EchoRequestChecker(t, b, utils.RemoteIPv4Addr, ipv4GlobalMulticastAddr)
- },
- },
-
- {
- name: "IPv6 link-local multicast destination",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: ipv6LinkLocalMulticastAddr,
- rx: rxICMPv6EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv6 link-local source",
- srcAddr: ipv6LinkLocalUnicastAddr,
- dstAddr: utils.RemoteIPv6Addr,
- rx: rxICMPv6EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv6 link-local destination",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: ipv6LinkLocalUnicastAddr,
- rx: rxICMPv6EchoRequest,
- expectForward: false,
- },
- {
- name: "IPv6 non-link-local unicast",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- rx: rxICMPv6EchoRequest,
- expectForward: true,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv6EchoRequestChecker(t, b, utils.RemoteIPv6Addr, utils.Ipv6Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv6 non-link-local multicast",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: ipv6GlobalMulticastAddr,
- rx: rxICMPv6EchoRequest,
- expectForward: true,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv6EchoRequestChecker(t, b, utils.RemoteIPv6Addr, ipv6GlobalMulticastAddr)
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
-
- e1 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID1, e1); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID1, err)
- }
-
- e2 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID2, e2); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID2, err)
- }
-
- if err := s.AddAddress(nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address, err)
- }
- if err := s.AddAddress(nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address, err)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv4.ProtocolNumber, err)
- }
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID2,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID2,
- },
- })
-
- test.rx(e1, test.srcAddr, test.dstAddr)
-
- p, ok := e2.Read()
- if ok != test.expectForward {
- t.Fatalf("got e2.Read() = (%#v, %t), want = (_, %t)", p, ok, test.expectForward)
- }
-
- if test.expectForward {
- test.checker(t, stack.PayloadSince(p.Pkt.NetworkHeader()))
- }
- })
- }
-}
-
-func TestPerInterfaceForwarding(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
- )
-
- tests := []struct {
- name string
- srcAddr, dstAddr tcpip.Address
- rx func(*channel.Endpoint, tcpip.Address, tcpip.Address)
- checker func(*testing.T, []byte)
- }{
- {
- name: "IPv4 unicast",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- rx: rxICMPv4EchoRequest,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv4EchoRequestChecker(t, b, utils.RemoteIPv4Addr, utils.Ipv4Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv4 multicast",
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: ipv4GlobalMulticastAddr,
- rx: rxICMPv4EchoRequest,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv4EchoRequestChecker(t, b, utils.RemoteIPv4Addr, ipv4GlobalMulticastAddr)
- },
- },
-
- {
- name: "IPv6 unicast",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- rx: rxICMPv6EchoRequest,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv6EchoRequestChecker(t, b, utils.RemoteIPv6Addr, utils.Ipv6Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv6 multicast",
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: ipv6GlobalMulticastAddr,
- rx: rxICMPv6EchoRequest,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv6EchoRequestChecker(t, b, utils.RemoteIPv6Addr, ipv6GlobalMulticastAddr)
- },
- },
- }
-
- netProtos := [...]tcpip.NetworkProtocolNumber{ipv4.ProtocolNumber, ipv6.ProtocolNumber}
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- // ARP is not used in this test but it is a network protocol that does
- // not support forwarding. We install the protocol to make sure that
- // forwarding information for a NIC is only reported for network
- // protocols that support forwarding.
- arp.NewProtocol,
-
- ipv4.NewProtocol,
- ipv6.NewProtocol,
- },
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
-
- e1 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID1, e1); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID1, err)
- }
-
- e2 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID2, e2); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID2, err)
- }
-
- for _, add := range [...]struct {
- nicID tcpip.NICID
- addr tcpip.ProtocolAddress
- }{
- {
- nicID: nicID1,
- addr: utils.RouterNIC1IPv4Addr,
- },
- {
- nicID: nicID1,
- addr: utils.RouterNIC1IPv6Addr,
- },
- {
- nicID: nicID2,
- addr: utils.RouterNIC2IPv4Addr,
- },
- {
- nicID: nicID2,
- addr: utils.RouterNIC2IPv6Addr,
- },
- } {
- if err := s.AddProtocolAddress(add.nicID, add.addr); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", add.nicID, add.addr, err)
- }
- }
-
- // Only enable forwarding on NIC1 and make sure that only packets arriving
- // on NIC1 are forwarded.
- for _, netProto := range netProtos {
- if err := s.SetNICForwarding(nicID1, netProto, true); err != nil {
- t.Fatalf("s.SetNICForwarding(%d, %d, true): %s", nicID1, netProtos, err)
- }
- }
-
- nicsInfo := s.NICInfo()
- for _, subTest := range [...]struct {
- nicID tcpip.NICID
- nicEP *channel.Endpoint
- otherNICID tcpip.NICID
- otherNICEP *channel.Endpoint
- expectForwarding bool
- }{
- {
- nicID: nicID1,
- nicEP: e1,
- otherNICID: nicID2,
- otherNICEP: e2,
- expectForwarding: true,
- },
- {
- nicID: nicID2,
- nicEP: e2,
- otherNICID: nicID2,
- otherNICEP: e1,
- expectForwarding: false,
- },
- } {
- t.Run(fmt.Sprintf("Packet arriving at NIC%d", subTest.nicID), func(t *testing.T) {
- nicInfo, ok := nicsInfo[subTest.nicID]
- if !ok {
- t.Errorf("expected NIC info for NIC %d; got = %#v", subTest.nicID, nicsInfo)
- } else {
- forwarding := make(map[tcpip.NetworkProtocolNumber]bool)
- for _, netProto := range netProtos {
- forwarding[netProto] = subTest.expectForwarding
- }
-
- if diff := cmp.Diff(forwarding, nicInfo.Forwarding); diff != "" {
- t.Errorf("nicsInfo[%d].Forwarding mismatch (-want +got):\n%s", subTest.nicID, diff)
- }
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: subTest.otherNICID,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: subTest.otherNICID,
- },
- })
-
- test.rx(subTest.nicEP, test.srcAddr, test.dstAddr)
- if p, ok := subTest.nicEP.Read(); ok {
- t.Errorf("unexpectedly got a response from the interface the packet arrived on: %#v", p)
- }
- if p, ok := subTest.otherNICEP.Read(); ok != subTest.expectForwarding {
- t.Errorf("got otherNICEP.Read() = (%#v, %t), want = (_, %t)", p, ok, subTest.expectForwarding)
- } else if subTest.expectForwarding {
- test.checker(t, stack.PayloadSince(p.Pkt.NetworkHeader()))
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/integration/iptables_test.go b/pkg/tcpip/tests/integration/iptables_test.go
deleted file mode 100644
index f9ab7d0af..000000000
--- a/pkg/tcpip/tests/integration/iptables_test.go
+++ /dev/null
@@ -1,1134 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-type inputIfNameMatcher struct {
- name string
-}
-
-var _ stack.Matcher = (*inputIfNameMatcher)(nil)
-
-func (*inputIfNameMatcher) Name() string {
- return "inputIfNameMatcher"
-}
-
-func (im *inputIfNameMatcher) Match(hook stack.Hook, _ *stack.PacketBuffer, inNicName, _ string) (bool, bool) {
- return (hook == stack.Input && im.name != "" && im.name == inNicName), false
-}
-
-const (
- nicID = 1
- nicName = "nic1"
- anotherNicName = "nic2"
- linkAddr = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
- srcAddrV4 = "\x0a\x00\x00\x01"
- dstAddrV4 = "\x0a\x00\x00\x02"
- srcAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- dstAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- payloadSize = 20
-)
-
-func genStackV6(t *testing.T) (*stack.Stack, *channel.Endpoint) {
- t.Helper()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- })
- e := channel.New(0, header.IPv6MinimumMTU, linkAddr)
- nicOpts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, nicOpts, err)
- }
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, dstAddrV6); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, dstAddrV6, err)
- }
- return s, e
-}
-
-func genStackV4(t *testing.T) (*stack.Stack, *channel.Endpoint) {
- t.Helper()
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- e := channel.New(0, header.IPv4MinimumMTU, linkAddr)
- nicOpts := stack.NICOptions{Name: nicName}
- if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, nicOpts, err)
- }
- if err := s.AddAddress(nicID, header.IPv4ProtocolNumber, dstAddrV4); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, dstAddrV4, err)
- }
- return s, e
-}
-
-func genPacketV6() *stack.PacketBuffer {
- pktSize := header.IPv6MinimumSize + payloadSize
- hdr := buffer.NewPrependable(pktSize)
- ip := header.IPv6(hdr.Prepend(pktSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: payloadSize,
- TransportProtocol: 99,
- HopLimit: 255,
- SrcAddr: srcAddrV6,
- DstAddr: dstAddrV6,
- })
- vv := hdr.View().ToVectorisedView()
- return stack.NewPacketBuffer(stack.PacketBufferOptions{Data: vv})
-}
-
-func genPacketV4() *stack.PacketBuffer {
- pktSize := header.IPv4MinimumSize + payloadSize
- hdr := buffer.NewPrependable(pktSize)
- ip := header.IPv4(hdr.Prepend(pktSize))
- ip.Encode(&header.IPv4Fields{
- TOS: 0,
- TotalLength: uint16(pktSize),
- ID: 1,
- Flags: 0,
- FragmentOffset: 16,
- TTL: 48,
- Protocol: 99,
- SrcAddr: srcAddrV4,
- DstAddr: dstAddrV4,
- })
- ip.SetChecksum(0)
- ip.SetChecksum(^ip.CalculateChecksum())
- vv := hdr.View().ToVectorisedView()
- return stack.NewPacketBuffer(stack.PacketBufferOptions{Data: vv})
-}
-
-func TestIPTablesStatsForInput(t *testing.T) {
- tests := []struct {
- name string
- setupStack func(*testing.T) (*stack.Stack, *channel.Endpoint)
- setupFilter func(*testing.T, *stack.Stack)
- genPacket func() *stack.PacketBuffer
- proto tcpip.NetworkProtocolNumber
- expectReceived int
- expectInputDropped int
- }{
- {
- name: "IPv6 Accept",
- setupStack: genStackV6,
- setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
- genPacket: genPacketV6,
- proto: header.IPv6ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- {
- name: "IPv4 Accept",
- setupStack: genStackV4,
- setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
- genPacket: genPacketV4,
- proto: header.IPv4ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- {
- name: "IPv6 Drop (input interface matches)",
- setupStack: genStackV6,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: nicName}
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}
- // Make sure the packet is not dropped by the next rule.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, true, err)
- }
- },
- genPacket: genPacketV6,
- proto: header.IPv6ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 1,
- },
- {
- name: "IPv4 Drop (input interface matches)",
- setupStack: genStackV4,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: nicName}
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, false, err)
- }
- },
- genPacket: genPacketV4,
- proto: header.IPv4ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 1,
- },
- {
- name: "IPv6 Accept (input interface does not match)",
- setupStack: genStackV6,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, true, err)
- }
- },
- genPacket: genPacketV6,
- proto: header.IPv6ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- {
- name: "IPv4 Accept (input interface does not match)",
- setupStack: genStackV4,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, false, err)
- }
- },
- genPacket: genPacketV4,
- proto: header.IPv4ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- {
- name: "IPv6 Drop (input interface does not match but invert is true)",
- setupStack: genStackV6,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{
- InputInterface: anotherNicName,
- InputInterfaceInvert: true,
- }
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, true, err)
- }
- },
- genPacket: genPacketV6,
- proto: header.IPv6ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 1,
- },
- {
- name: "IPv4 Drop (input interface does not match but invert is true)",
- setupStack: genStackV4,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{
- InputInterface: anotherNicName,
- InputInterfaceInvert: true,
- }
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, false, err)
- }
- },
- genPacket: genPacketV4,
- proto: header.IPv4ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 1,
- },
- {
- name: "IPv6 Accept (input interface does not match using a matcher)",
- setupStack: genStackV6,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, true, err)
- }
- },
- genPacket: genPacketV6,
- proto: header.IPv6ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- {
- name: "IPv4 Accept (input interface does not match using a matcher)",
- setupStack: genStackV4,
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
- ruleIdx := filter.BuiltinChains[stack.Input]
- filter.Rules[ruleIdx].Target = &stack.DropTarget{}
- filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, false, err)
- }
- },
- genPacket: genPacketV4,
- proto: header.IPv4ProtocolNumber,
- expectReceived: 1,
- expectInputDropped: 0,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s, e := test.setupStack(t)
- test.setupFilter(t, s)
- e.InjectInbound(test.proto, test.genPacket())
-
- if got := int(s.Stats().IP.PacketsReceived.Value()); got != test.expectReceived {
- t.Errorf("got PacketReceived = %d, want = %d", got, test.expectReceived)
- }
- if got := int(s.Stats().IP.IPTablesInputDropped.Value()); got != test.expectInputDropped {
- t.Errorf("got IPTablesInputDropped = %d, want = %d", got, test.expectInputDropped)
- }
- })
- }
-}
-
-var _ stack.LinkEndpoint = (*channelEndpointWithoutWritePacket)(nil)
-
-// channelEndpointWithoutWritePacket is a channel endpoint that does not support
-// stack.LinkEndpoint.WritePacket.
-type channelEndpointWithoutWritePacket struct {
- *channel.Endpoint
-
- t *testing.T
-}
-
-func (c *channelEndpointWithoutWritePacket) WritePacket(stack.RouteInfo, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) tcpip.Error {
- c.t.Error("unexpectedly called WritePacket; all writes should go through WritePackets")
- return &tcpip.ErrNotSupported{}
-}
-
-var _ stack.Matcher = (*udpSourcePortMatcher)(nil)
-
-type udpSourcePortMatcher struct {
- port uint16
-}
-
-func (*udpSourcePortMatcher) Name() string {
- return "udpSourcePortMatcher"
-}
-
-func (m *udpSourcePortMatcher) Match(_ stack.Hook, pkt *stack.PacketBuffer, _, _ string) (matches, hotdrop bool) {
- udp := header.UDP(pkt.TransportHeader().View())
- if len(udp) < header.UDPMinimumSize {
- // Drop immediately as the packet is invalid.
- return false, true
- }
-
- return udp.SourcePort() == m.port, false
-}
-
-func TestIPTableWritePackets(t *testing.T) {
- const (
- nicID = 1
-
- dropLocalPort = utils.LocalPort - 1
- acceptPackets = 2
- dropPackets = 3
- )
-
- udpHdr := func(hdr buffer.View, srcAddr, dstAddr tcpip.Address, srcPort, dstPort uint16) {
- u := header.UDP(hdr)
- u.Encode(&header.UDPFields{
- SrcPort: srcPort,
- DstPort: dstPort,
- Length: header.UDPMinimumSize,
- })
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, srcAddr, dstAddr, header.UDPMinimumSize)
- sum = header.Checksum(hdr, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
- }
-
- tests := []struct {
- name string
- setupFilter func(*testing.T, *stack.Stack)
- genPacket func(*stack.Route) stack.PacketBufferList
- proto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- expectSent uint64
- expectOutputDropped uint64
- }{
- {
- name: "IPv4 Accept",
- setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
- genPacket: func(r *stack.Route) stack.PacketBufferList {
- var pkts stack.PacketBufferList
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), utils.LocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
-
- return pkts
- },
- proto: header.IPv4ProtocolNumber,
- remoteAddr: dstAddrV4,
- expectSent: 1,
- expectOutputDropped: 0,
- },
- {
- name: "IPv4 Drop Other Port",
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- table := stack.Table{
- Rules: []stack.Rule{
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber},
- },
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber},
- },
- {
- Matchers: []stack.Matcher{&udpSourcePortMatcher{port: dropLocalPort}},
- Target: &stack.DropTarget{NetworkProtocol: header.IPv4ProtocolNumber},
- },
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber},
- },
- {
- Target: &stack.ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber},
- },
- },
- BuiltinChains: [stack.NumHooks]int{
- stack.Prerouting: stack.HookUnset,
- stack.Input: 0,
- stack.Forward: 1,
- stack.Output: 2,
- stack.Postrouting: stack.HookUnset,
- },
- Underflows: [stack.NumHooks]int{
- stack.Prerouting: stack.HookUnset,
- stack.Input: 0,
- stack.Forward: 1,
- stack.Output: 2,
- stack.Postrouting: stack.HookUnset,
- },
- }
-
- if err := s.IPTables().ReplaceTable(stack.FilterID, table, false /* ipv4 */); err != nil {
- t.Fatalf("ReplaceTable(%d, _, false): %s", stack.FilterID, err)
- }
- },
- genPacket: func(r *stack.Route) stack.PacketBufferList {
- var pkts stack.PacketBufferList
-
- for i := 0; i < acceptPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), utils.LocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
- }
- for i := 0; i < dropPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), dropLocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
- }
-
- return pkts
- },
- proto: header.IPv4ProtocolNumber,
- remoteAddr: dstAddrV4,
- expectSent: acceptPackets,
- expectOutputDropped: dropPackets,
- },
- {
- name: "IPv6 Accept",
- setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
- genPacket: func(r *stack.Route) stack.PacketBufferList {
- var pkts stack.PacketBufferList
-
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), utils.LocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
-
- return pkts
- },
- proto: header.IPv6ProtocolNumber,
- remoteAddr: dstAddrV6,
- expectSent: 1,
- expectOutputDropped: 0,
- },
- {
- name: "IPv6 Drop Other Port",
- setupFilter: func(t *testing.T, s *stack.Stack) {
- t.Helper()
-
- table := stack.Table{
- Rules: []stack.Rule{
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber},
- },
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber},
- },
- {
- Matchers: []stack.Matcher{&udpSourcePortMatcher{port: dropLocalPort}},
- Target: &stack.DropTarget{NetworkProtocol: header.IPv6ProtocolNumber},
- },
- {
- Target: &stack.AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber},
- },
- {
- Target: &stack.ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber},
- },
- },
- BuiltinChains: [stack.NumHooks]int{
- stack.Prerouting: stack.HookUnset,
- stack.Input: 0,
- stack.Forward: 1,
- stack.Output: 2,
- stack.Postrouting: stack.HookUnset,
- },
- Underflows: [stack.NumHooks]int{
- stack.Prerouting: stack.HookUnset,
- stack.Input: 0,
- stack.Forward: 1,
- stack.Output: 2,
- stack.Postrouting: stack.HookUnset,
- },
- }
-
- if err := s.IPTables().ReplaceTable(stack.FilterID, table, true /* ipv6 */); err != nil {
- t.Fatalf("ReplaceTable(%d, _, true): %s", stack.FilterID, err)
- }
- },
- genPacket: func(r *stack.Route) stack.PacketBufferList {
- var pkts stack.PacketBufferList
-
- for i := 0; i < acceptPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), utils.LocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
- }
- for i := 0; i < dropPackets; i++ {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength() + header.UDPMinimumSize),
- })
- hdr := pkt.TransportHeader().Push(header.UDPMinimumSize)
- udpHdr(hdr, r.LocalAddress(), r.RemoteAddress(), dropLocalPort, utils.RemotePort)
- pkts.PushFront(pkt)
- }
-
- return pkts
- },
- proto: header.IPv6ProtocolNumber,
- remoteAddr: dstAddrV6,
- expectSent: acceptPackets,
- expectOutputDropped: dropPackets,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- e := channelEndpointWithoutWritePacket{
- Endpoint: channel.New(4, header.IPv6MinimumMTU, linkAddr),
- t: t,
- }
- if err := s.CreateNIC(nicID, &e); err != nil {
- t.Fatalf("CreateNIC(%d, _) = %s", nicID, err)
- }
- if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, srcAddrV6); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, srcAddrV6, err)
- }
- if err := s.AddAddress(nicID, header.IPv4ProtocolNumber, srcAddrV4); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, srcAddrV4, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
-
- test.setupFilter(t, s)
-
- r, err := s.FindRoute(nicID, "", test.remoteAddr, test.proto, false)
- if err != nil {
- t.Fatalf("FindRoute(%d, '', %s, %d, false): %s", nicID, test.remoteAddr, test.proto, err)
- }
- defer r.Release()
-
- pkts := test.genPacket(r)
- pktsLen := pkts.Len()
- if n, err := r.WritePackets(pkts, stack.NetworkHeaderParams{
- Protocol: header.UDPProtocolNumber,
- TTL: 64,
- }); err != nil {
- t.Fatalf("WritePackets(...): %s", err)
- } else if n != pktsLen {
- t.Fatalf("got WritePackets(...) = %d, want = %d", n, pktsLen)
- }
-
- if got := s.Stats().IP.PacketsSent.Value(); got != test.expectSent {
- t.Errorf("got PacketSent = %d, want = %d", got, test.expectSent)
- }
- if got := s.Stats().IP.IPTablesOutputDropped.Value(); got != test.expectOutputDropped {
- t.Errorf("got IPTablesOutputDropped = %d, want = %d", got, test.expectOutputDropped)
- }
- })
- }
-}
-
-const ttl = 64
-
-var (
- ipv4GlobalMulticastAddr = testutil.MustParse4("224.0.1.10")
- ipv6GlobalMulticastAddr = testutil.MustParse6("ff0e::a")
-)
-
-func rxICMPv4EchoReply(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv4EchoReply(e, src, dst, ttl)
-}
-
-func rxICMPv6EchoReply(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv6EchoReply(e, src, dst, ttl)
-}
-
-func forwardedICMPv4EchoReplyChecker(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv4(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ttl-1),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4EchoReply)))
-}
-
-func forwardedICMPv6EchoReplyChecker(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv6(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ttl-1),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6EchoReply)))
-}
-
-func boolToInt(v bool) uint64 {
- if v {
- return 1
- }
- return 0
-}
-
-func setupDropFilter(hook stack.Hook, f stack.IPHeaderFilter) func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber) {
- return func(t *testing.T, s *stack.Stack, netProto tcpip.NetworkProtocolNumber) {
- t.Helper()
-
- ipv6 := netProto == ipv6.ProtocolNumber
-
- ipt := s.IPTables()
- filter := ipt.GetTable(stack.FilterID, ipv6)
- ruleIdx := filter.BuiltinChains[hook]
- filter.Rules[ruleIdx].Filter = f
- filter.Rules[ruleIdx].Target = &stack.DropTarget{NetworkProtocol: netProto}
- // Make sure the packet is not dropped by the next rule.
- filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{NetworkProtocol: netProto}
- if err := ipt.ReplaceTable(stack.FilterID, filter, ipv6); err != nil {
- t.Fatalf("ipt.ReplaceTable(%d, _, %t): %s", stack.FilterID, ipv6, err)
- }
- }
-}
-
-func TestForwardingHook(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
-
- nic1Name = "nic1"
- nic2Name = "nic2"
-
- otherNICName = "otherNIC"
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- local bool
- srcAddr, dstAddr tcpip.Address
- rx func(*channel.Endpoint, tcpip.Address, tcpip.Address)
- checker func(*testing.T, []byte)
- }{
- {
- name: "IPv4 remote",
- netProto: ipv4.ProtocolNumber,
- local: false,
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- rx: rxICMPv4EchoReply,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv4EchoReplyChecker(t, b, utils.RemoteIPv4Addr, utils.Ipv4Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv4 local",
- netProto: ipv4.ProtocolNumber,
- local: true,
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: utils.Ipv4Addr.Address,
- rx: rxICMPv4EchoReply,
- },
- {
- name: "IPv6 remote",
- netProto: ipv6.ProtocolNumber,
- local: false,
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- rx: rxICMPv6EchoReply,
- checker: func(t *testing.T, b []byte) {
- forwardedICMPv6EchoReplyChecker(t, b, utils.RemoteIPv6Addr, utils.Ipv6Addr2.AddressWithPrefix.Address)
- },
- },
- {
- name: "IPv6 local",
- netProto: ipv6.ProtocolNumber,
- local: true,
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: utils.Ipv6Addr.Address,
- rx: rxICMPv6EchoReply,
- },
- }
-
- subTests := []struct {
- name string
- setupFilter func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber)
- expectForward bool
- }{
- {
- name: "Accept",
- setupFilter: func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber) { /* no filter */ },
- expectForward: true,
- },
-
- {
- name: "Drop",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{}),
- expectForward: false,
- },
- {
- name: "Drop with input NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: nic1Name}),
- expectForward: false,
- },
- {
- name: "Drop with output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{OutputInterface: nic2Name}),
- expectForward: false,
- },
- {
- name: "Drop with input and output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: nic1Name, OutputInterface: nic2Name}),
- expectForward: false,
- },
-
- {
- name: "Drop with other input NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: otherNICName}),
- expectForward: true,
- },
- {
- name: "Drop with other output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{OutputInterface: otherNICName}),
- expectForward: true,
- },
- {
- name: "Drop with other input and output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: otherNICName, OutputInterface: nic2Name}),
- expectForward: true,
- },
- {
- name: "Drop with input and other output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: nic1Name, OutputInterface: otherNICName}),
- expectForward: true,
- },
- {
- name: "Drop with other input and other output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: otherNICName, OutputInterface: otherNICName}),
- expectForward: true,
- },
-
- {
- name: "Drop with inverted input NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{InputInterface: nic1Name, InputInterfaceInvert: true}),
- expectForward: true,
- },
- {
- name: "Drop with inverted output NIC filtering",
- setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{OutputInterface: nic2Name, OutputInterfaceInvert: true}),
- expectForward: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- })
-
- subTest.setupFilter(t, s, test.netProto)
-
- e1 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNICWithOptions(nicID1, e1, stack.NICOptions{Name: nic1Name}); err != nil {
- t.Fatalf("s.CreateNICWithOptions(%d, _, _): %s", nicID1, err)
- }
-
- e2 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNICWithOptions(nicID2, e2, stack.NICOptions{Name: nic2Name}); err != nil {
- t.Fatalf("s.CreateNICWithOptions(%d, _, _): %s", nicID2, err)
- }
-
- if err := s.AddAddress(nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address, err)
- }
- if err := s.AddAddress(nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address, err)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv4.ProtocolNumber, err)
- }
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID2,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID2,
- },
- })
-
- test.rx(e1, test.srcAddr, test.dstAddr)
-
- expectTransmitPacket := subTest.expectForward && !test.local
-
- ep1, err := s.GetNetworkEndpoint(nicID1, test.netProto)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID1, test.netProto, err)
- }
- ep1Stats := ep1.Stats()
- ipEP1Stats, ok := ep1Stats.(stack.IPNetworkEndpointStats)
- if !ok {
- t.Fatalf("got ep1Stats = %T, want = stack.IPNetworkEndpointStats", ep1Stats)
- }
- ip1Stats := ipEP1Stats.IPStats()
-
- if got := ip1Stats.PacketsReceived.Value(); got != 1 {
- t.Errorf("got ip1Stats.PacketsReceived.Value() = %d, want = 1", got)
- }
- if got := ip1Stats.ValidPacketsReceived.Value(); got != 1 {
- t.Errorf("got ip1Stats.ValidPacketsReceived.Value() = %d, want = 1", got)
- }
- if got, want := ip1Stats.IPTablesForwardDropped.Value(), boolToInt(!subTest.expectForward); got != want {
- t.Errorf("got ip1Stats.IPTablesForwardDropped.Value() = %d, want = %d", got, want)
- }
- if got := ip1Stats.PacketsSent.Value(); got != 0 {
- t.Errorf("got ip1Stats.PacketsSent.Value() = %d, want = 0", got)
- }
-
- ep2, err := s.GetNetworkEndpoint(nicID2, test.netProto)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID2, test.netProto, err)
- }
- ep2Stats := ep2.Stats()
- ipEP2Stats, ok := ep2Stats.(stack.IPNetworkEndpointStats)
- if !ok {
- t.Fatalf("got ep2Stats = %T, want = stack.IPNetworkEndpointStats", ep2Stats)
- }
- ip2Stats := ipEP2Stats.IPStats()
- if got := ip2Stats.PacketsReceived.Value(); got != 0 {
- t.Errorf("got ip2Stats.PacketsReceived.Value() = %d, want = 0", got)
- }
- if got, want := ip2Stats.ValidPacketsReceived.Value(), boolToInt(subTest.expectForward && test.local); got != want {
- t.Errorf("got ip2Stats.ValidPacketsReceived.Value() = %d, want = %d", got, want)
- }
- if got, want := ip2Stats.PacketsSent.Value(), boolToInt(expectTransmitPacket); got != want {
- t.Errorf("got ip2Stats.PacketsSent.Value() = %d, want = %d", got, want)
- }
-
- p, ok := e2.Read()
- if ok != expectTransmitPacket {
- t.Fatalf("got e2.Read() = (%#v, %t), want = (_, %t)", p, ok, expectTransmitPacket)
- }
- if expectTransmitPacket {
- test.checker(t, stack.PayloadSince(p.Pkt.NetworkHeader()))
- }
- })
- }
- })
- }
-}
-
-func TestInputHookWithLocalForwarding(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
-
- nic1Name = "nic1"
- nic2Name = "nic2"
-
- otherNICName = "otherNIC"
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- rx func(*channel.Endpoint)
- checker func(*testing.T, []byte)
- }{
- {
- name: "IPv4",
- netProto: ipv4.ProtocolNumber,
- rx: func(e *channel.Endpoint) {
- utils.RxICMPv4EchoRequest(e, utils.RemoteIPv4Addr, utils.Ipv4Addr2.AddressWithPrefix.Address, ttl)
- },
- checker: func(t *testing.T, b []byte) {
- checker.IPv4(t, b,
- checker.SrcAddr(utils.Ipv4Addr2.AddressWithPrefix.Address),
- checker.DstAddr(utils.RemoteIPv4Addr),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4EchoReply)))
- },
- },
- {
- name: "IPv6",
- netProto: ipv6.ProtocolNumber,
- rx: func(e *channel.Endpoint) {
- utils.RxICMPv6EchoRequest(e, utils.RemoteIPv6Addr, utils.Ipv6Addr2.AddressWithPrefix.Address, ttl)
- },
- checker: func(t *testing.T, b []byte) {
- checker.IPv6(t, b,
- checker.SrcAddr(utils.Ipv6Addr2.AddressWithPrefix.Address),
- checker.DstAddr(utils.RemoteIPv6Addr),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6EchoReply)))
- },
- },
- }
-
- subTests := []struct {
- name string
- setupFilter func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber)
- expectDrop bool
- }{
- {
- name: "Accept",
- setupFilter: func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber) { /* no filter */ },
- expectDrop: false,
- },
-
- {
- name: "Drop",
- setupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{}),
- expectDrop: true,
- },
- {
- name: "Drop with input NIC filtering on arrival NIC",
- setupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: nic1Name}),
- expectDrop: true,
- },
- {
- name: "Drop with input NIC filtering on delivered NIC",
- setupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: nic2Name}),
- expectDrop: false,
- },
-
- {
- name: "Drop with input NIC filtering on other NIC",
- setupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: otherNICName}),
- expectDrop: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- })
-
- subTest.setupFilter(t, s, test.netProto)
-
- e1 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNICWithOptions(nicID1, e1, stack.NICOptions{Name: nic1Name}); err != nil {
- t.Fatalf("s.CreateNICWithOptions(%d, _, _): %s", nicID1, err)
- }
- if err := s.AddProtocolAddress(nicID1, utils.Ipv4Addr1); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID1, utils.Ipv4Addr1, err)
- }
- if err := s.AddProtocolAddress(nicID1, utils.Ipv6Addr1); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID1, utils.Ipv6Addr1, err)
- }
-
- e2 := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNICWithOptions(nicID2, e2, stack.NICOptions{Name: nic2Name}); err != nil {
- t.Fatalf("s.CreateNICWithOptions(%d, _, _): %s", nicID2, err)
- }
- if err := s.AddProtocolAddress(nicID2, utils.Ipv4Addr2); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID2, utils.Ipv4Addr2, err)
- }
- if err := s.AddProtocolAddress(nicID2, utils.Ipv6Addr2); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID2, utils.Ipv6Addr2, err)
- }
-
- if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv4.ProtocolNumber, err)
- }
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID1,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID1,
- },
- })
-
- test.rx(e1)
-
- ep1, err := s.GetNetworkEndpoint(nicID1, test.netProto)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID1, test.netProto, err)
- }
- ep1Stats := ep1.Stats()
- ipEP1Stats, ok := ep1Stats.(stack.IPNetworkEndpointStats)
- if !ok {
- t.Fatalf("got ep1Stats = %T, want = stack.IPNetworkEndpointStats", ep1Stats)
- }
- ip1Stats := ipEP1Stats.IPStats()
-
- if got := ip1Stats.PacketsReceived.Value(); got != 1 {
- t.Errorf("got ip1Stats.PacketsReceived.Value() = %d, want = 1", got)
- }
- if got := ip1Stats.ValidPacketsReceived.Value(); got != 1 {
- t.Errorf("got ip1Stats.ValidPacketsReceived.Value() = %d, want = 1", got)
- }
- if got, want := ip1Stats.PacketsSent.Value(), boolToInt(!subTest.expectDrop); got != want {
- t.Errorf("got ip1Stats.PacketsSent.Value() = %d, want = %d", got, want)
- }
-
- ep2, err := s.GetNetworkEndpoint(nicID2, test.netProto)
- if err != nil {
- t.Fatalf("s.GetNetworkEndpoint(%d, %d): %s", nicID2, test.netProto, err)
- }
- ep2Stats := ep2.Stats()
- ipEP2Stats, ok := ep2Stats.(stack.IPNetworkEndpointStats)
- if !ok {
- t.Fatalf("got ep2Stats = %T, want = stack.IPNetworkEndpointStats", ep2Stats)
- }
- ip2Stats := ipEP2Stats.IPStats()
- if got := ip2Stats.PacketsReceived.Value(); got != 0 {
- t.Errorf("got ip2Stats.PacketsReceived.Value() = %d, want = 0", got)
- }
- if got := ip2Stats.ValidPacketsReceived.Value(); got != 1 {
- t.Errorf("got ip2Stats.ValidPacketsReceived.Value() = %d, want = 1", got)
- }
- if got, want := ip2Stats.IPTablesInputDropped.Value(), boolToInt(subTest.expectDrop); got != want {
- t.Errorf("got ip2Stats.IPTablesInputDropped.Value() = %d, want = %d", got, want)
- }
- if got := ip2Stats.PacketsSent.Value(); got != 0 {
- t.Errorf("got ip2Stats.PacketsSent.Value() = %d, want = 0", got)
- }
-
- if p, ok := e1.Read(); ok == subTest.expectDrop {
- t.Errorf("got e1.Read() = (%#v, %t), want = (_, %t)", p, ok, !subTest.expectDrop)
- } else if !subTest.expectDrop {
- test.checker(t, stack.PayloadSince(p.Pkt.NetworkHeader()))
- }
- if p, ok := e2.Read(); ok {
- t.Errorf("got e1.Read() = (%#v, true), want = (_, false)", p)
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/integration/link_resolution_test.go b/pkg/tcpip/tests/integration/link_resolution_test.go
deleted file mode 100644
index 27caa0c28..000000000
--- a/pkg/tcpip/tests/integration/link_resolution_test.go
+++ /dev/null
@@ -1,1640 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package link_resolution_test
-
-import (
- "bytes"
- "fmt"
- "net"
- "runtime"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/pipe"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- tcptestutil "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func setupStack(t *testing.T, stackOpts stack.Options, host1NICID, host2NICID tcpip.NICID) (*stack.Stack, *stack.Stack) {
- host1Stack := stack.New(stackOpts)
- host2Stack := stack.New(stackOpts)
-
- host1NIC, host2NIC := pipe.New(utils.LinkAddr1, utils.LinkAddr2)
-
- if err := host1Stack.CreateNIC(host1NICID, utils.NewEthernetEndpoint(host1NIC)); err != nil {
- t.Fatalf("host1Stack.CreateNIC(%d, _): %s", host1NICID, err)
- }
- if err := host2Stack.CreateNIC(host2NICID, utils.NewEthernetEndpoint(host2NIC)); err != nil {
- t.Fatalf("host2Stack.CreateNIC(%d, _): %s", host2NICID, err)
- }
-
- if err := host1Stack.AddProtocolAddress(host1NICID, utils.Ipv4Addr1); err != nil {
- t.Fatalf("host1Stack.AddProtocolAddress(%d, %#v): %s", host1NICID, utils.Ipv4Addr1, err)
- }
- if err := host2Stack.AddProtocolAddress(host2NICID, utils.Ipv4Addr2); err != nil {
- t.Fatalf("host2Stack.AddProtocolAddress(%d, %#v): %s", host2NICID, utils.Ipv4Addr2, err)
- }
- if err := host1Stack.AddProtocolAddress(host1NICID, utils.Ipv6Addr1); err != nil {
- t.Fatalf("host1Stack.AddProtocolAddress(%d, %#v): %s", host1NICID, utils.Ipv6Addr1, err)
- }
- if err := host2Stack.AddProtocolAddress(host2NICID, utils.Ipv6Addr2); err != nil {
- t.Fatalf("host2Stack.AddProtocolAddress(%d, %#v): %s", host2NICID, utils.Ipv6Addr2, err)
- }
-
- host1Stack.SetRouteTable([]tcpip.Route{
- {
- Destination: utils.Ipv4Addr1.AddressWithPrefix.Subnet(),
- NIC: host1NICID,
- },
- {
- Destination: utils.Ipv6Addr1.AddressWithPrefix.Subnet(),
- NIC: host1NICID,
- },
- })
- host2Stack.SetRouteTable([]tcpip.Route{
- {
- Destination: utils.Ipv4Addr2.AddressWithPrefix.Subnet(),
- NIC: host2NICID,
- },
- {
- Destination: utils.Ipv6Addr2.AddressWithPrefix.Subnet(),
- NIC: host2NICID,
- },
- })
-
- return host1Stack, host2Stack
-}
-
-// TestPing tests that two hosts can ping eachother when link resolution is
-// enabled.
-func TestPing(t *testing.T) {
- const (
- host1NICID = 1
- host2NICID = 4
-
- // icmpDataOffset is the offset to the data in both ICMPv4 and ICMPv6 echo
- // request/reply packets.
- icmpDataOffset = 8
- )
-
- tests := []struct {
- name string
- transProto tcpip.TransportProtocolNumber
- netProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- icmpBuf func(*testing.T) []byte
- }{
- {
- name: "IPv4 Ping",
- transProto: icmp.ProtocolNumber4,
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- icmpBuf: func(t *testing.T) []byte {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- hdr := header.ICMPv4(make([]byte, header.ICMPv4MinimumSize+len(data)))
- hdr.SetType(header.ICMPv4Echo)
- if n := copy(hdr.Payload(), data[:]); n != len(data) {
- t.Fatalf("copied %d bytes but expected to copy %d bytes", n, len(data))
- }
- return hdr
- },
- },
- {
- name: "IPv6 Ping",
- transProto: icmp.ProtocolNumber6,
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- icmpBuf: func(t *testing.T) []byte {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- hdr := header.ICMPv6(make([]byte, header.ICMPv6MinimumSize+len(data)))
- hdr.SetType(header.ICMPv6EchoRequest)
- if n := copy(hdr.Payload(), data[:]); n != len(data) {
- t.Fatalf("copied %d bytes but expected to copy %d bytes", n, len(data))
- }
- return hdr
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4, icmp.NewProtocol6},
- }
-
- host1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)
-
- var wq waiter.Queue
- we, waiterCH := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- ep, err := host1Stack.NewEndpoint(test.transProto, test.netProto, &wq)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", test.transProto, test.netProto, err)
- }
- defer ep.Close()
-
- icmpBuf := test.icmpBuf(t)
- var r bytes.Reader
- r.Reset(icmpBuf)
- wOpts := tcpip.WriteOptions{To: &tcpip.FullAddress{Addr: test.remoteAddr}}
- if n, err := ep.Write(&r, wOpts); err != nil {
- t.Fatalf("ep.Write(_, _): %s", err)
- } else if want := int64(len(icmpBuf)); n != want {
- t.Fatalf("got ep.Write(_, _) = (%d, _), want = (%d, _)", n, want)
- }
-
- // Wait for the endpoint to be readable.
- <-waiterCH
-
- var buf bytes.Buffer
- opts := tcpip.ReadOptions{NeedRemoteAddr: true}
- res, err := ep.Read(&buf, opts)
- if err != nil {
- t.Fatalf("ep.Read(_, %d, %#v): %s", len(icmpBuf), opts, err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- RemoteAddr: tcpip.FullAddress{Addr: test.remoteAddr},
- }, res, checker.IgnoreCmpPath(
- "ControlMessages",
- "RemoteAddr.NIC",
- "RemoteAddr.Port",
- )); diff != "" {
- t.Errorf("ep.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(buf.Bytes()[icmpDataOffset:], icmpBuf[icmpDataOffset:]); diff != "" {
- t.Errorf("received data mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-type transportError struct {
- origin tcpip.SockErrOrigin
- typ uint8
- code uint8
- info uint32
- kind stack.TransportErrorKind
-}
-
-func TestTCPLinkResolutionFailure(t *testing.T) {
- const (
- host1NICID = 1
- host2NICID = 4
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- expectedWriteErr tcpip.Error
- sockError tcpip.SockError
- transErr transportError
- }{
- {
- name: "IPv4 with resolvable remote",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedWriteErr: nil,
- },
- {
- name: "IPv6 with resolvable remote",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedWriteErr: nil,
- },
- {
- name: "IPv4 without resolvable remote",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- expectedWriteErr: &tcpip.ErrNoRoute{},
- sockError: tcpip.SockError{
- Err: &tcpip.ErrNoRoute{},
- Dst: tcpip.FullAddress{
- NIC: host1NICID,
- Addr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- Port: 1234,
- },
- Offender: tcpip.FullAddress{
- NIC: host1NICID,
- Addr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- },
- NetProto: ipv4.ProtocolNumber,
- },
- transErr: transportError{
- origin: tcpip.SockExtErrorOriginICMP,
- typ: uint8(header.ICMPv4DstUnreachable),
- code: uint8(header.ICMPv4HostUnreachable),
- kind: stack.DestinationHostUnreachableTransportError,
- },
- },
- {
- name: "IPv6 without resolvable remote",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- expectedWriteErr: &tcpip.ErrNoRoute{},
- sockError: tcpip.SockError{
- Err: &tcpip.ErrNoRoute{},
- Dst: tcpip.FullAddress{
- NIC: host1NICID,
- Addr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- Port: 1234,
- },
- Offender: tcpip.FullAddress{
- NIC: host1NICID,
- Addr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- },
- NetProto: ipv6.ProtocolNumber,
- },
- transErr: transportError{
- origin: tcpip.SockExtErrorOriginICMP6,
- typ: uint8(header.ICMPv6DstUnreachable),
- code: uint8(header.ICMPv6AddressUnreachable),
- kind: stack.DestinationHostUnreachableTransportError,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- Clock: clock,
- }
-
- host1Stack, host2Stack := setupStack(t, stackOpts, host1NICID, host2NICID)
-
- var listenerWQ waiter.Queue
- listenerEP, err := host2Stack.NewEndpoint(tcp.ProtocolNumber, test.netProto, &listenerWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, test.netProto, err)
- }
- defer listenerEP.Close()
-
- listenerAddr := tcpip.FullAddress{Port: 1234}
- if err := listenerEP.Bind(listenerAddr); err != nil {
- t.Fatalf("listenerEP.Bind(%#v): %s", listenerAddr, err)
- }
-
- if err := listenerEP.Listen(1); err != nil {
- t.Fatalf("listenerEP.Listen(1): %s", err)
- }
-
- var clientWQ waiter.Queue
- we, ch := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&we, waiter.WritableEvents|waiter.EventErr)
- clientEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, test.netProto, &clientWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, test.netProto, err)
- }
- defer clientEP.Close()
-
- sockOpts := clientEP.SocketOptions()
- sockOpts.SetRecvError(true)
-
- remoteAddr := listenerAddr
- remoteAddr.Addr = test.remoteAddr
- {
- err := clientEP.Connect(remoteAddr)
- if _, ok := err.(*tcpip.ErrConnectStarted); !ok {
- t.Fatalf("got clientEP.Connect(%#v) = %s, want = %s", remoteAddr, err, &tcpip.ErrConnectStarted{})
- }
- }
-
- // Wait for an error due to link resolution failing, or the endpoint to be
- // writable.
- if test.expectedWriteErr != nil {
- nudConfigs, err := host1Stack.NUDConfigurations(host1NICID, test.netProto)
- if err != nil {
- t.Fatalf("host1Stack.NUDConfigurations(%d, %d): %s", host1NICID, test.netProto, err)
- }
- clock.Advance(time.Duration(nudConfigs.MaxMulticastProbes) * nudConfigs.RetransmitTimer)
- } else {
- clock.RunImmediatelyScheduledJobs()
- }
- <-ch
-
- {
- var r bytes.Reader
- r.Reset([]byte{0})
- var wOpts tcpip.WriteOptions
- _, err := clientEP.Write(&r, wOpts)
- if diff := cmp.Diff(test.expectedWriteErr, err); diff != "" {
- t.Errorf("unexpected error from clientEP.Write(_, %#v), (-want, +got):\n%s", wOpts, diff)
- }
- }
-
- if test.expectedWriteErr == nil {
- return
- }
-
- sockErr := sockOpts.DequeueErr()
- if sockErr == nil {
- t.Fatalf("got sockOpts.DequeueErr() = nil, want = non-nil")
- }
-
- sockErrCmpOpts := []cmp.Option{
- cmpopts.IgnoreUnexported(tcpip.SockError{}),
- cmp.Comparer(func(a, b tcpip.Error) bool {
- // tcpip.Error holds an unexported field but the errors netstack uses
- // are pre defined so we can simply compare pointers.
- return a == b
- }),
- checker.IgnoreCmpPath(
- // Ignore the payload since we do not know the TCP seq/ack numbers.
- "Payload",
- // Ignore the cause since we will compare its properties separately
- // since the concrete type of the cause is unknown.
- "Cause",
- ),
- }
-
- if addr, err := clientEP.GetLocalAddress(); err != nil {
- t.Fatalf("clientEP.GetLocalAddress(): %s", err)
- } else {
- test.sockError.Offender.Port = addr.Port
- }
- if diff := cmp.Diff(&test.sockError, sockErr, sockErrCmpOpts...); diff != "" {
- t.Errorf("socket error mismatch (-want +got):\n%s", diff)
- }
-
- transErr, ok := sockErr.Cause.(stack.TransportError)
- if !ok {
- t.Fatalf("socket error cause is not a transport error; cause = %#v", sockErr.Cause)
- }
- if diff := cmp.Diff(
- test.transErr,
- transportError{
- origin: transErr.Origin(),
- typ: transErr.Type(),
- code: transErr.Code(),
- info: transErr.Info(),
- kind: transErr.Kind(),
- },
- cmp.AllowUnexported(transportError{}),
- ); diff != "" {
- t.Errorf("socket error mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestForwardingWithLinkResolutionFailure(t *testing.T) {
- const (
- incomingNICID = 1
- outgoingNICID = 2
- ttl = 2
- expectedHostUnreachableErrorCount = 1
- )
- outgoingLinkAddr := tcptestutil.MustParseLink("02:03:03:04:05:06")
-
- rxICMPv4EchoRequest := func(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv4EchoRequest(e, src, dst, ttl)
- }
-
- rxICMPv6EchoRequest := func(e *channel.Endpoint, src, dst tcpip.Address) {
- utils.RxICMPv6EchoRequest(e, src, dst, ttl)
- }
-
- arpChecker := func(t *testing.T, request channel.PacketInfo, src, dst tcpip.Address) {
- if request.Proto != arp.ProtocolNumber {
- t.Errorf("got request.Proto = %d, want = %d", request.Proto, arp.ProtocolNumber)
- }
- if request.Route.RemoteLinkAddress != header.EthernetBroadcastAddress {
- t.Errorf("got request.Route.RemoteLinkAddress = %s, want = %s", request.Route.RemoteLinkAddress, header.EthernetBroadcastAddress)
- }
- rep := header.ARP(request.Pkt.NetworkHeader().View())
- if got := rep.Op(); got != header.ARPRequest {
- t.Errorf("got Op() = %d, want = %d", got, header.ARPRequest)
- }
- if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != outgoingLinkAddr {
- t.Errorf("got HardwareAddressSender = %s, want = %s", got, outgoingLinkAddr)
- }
- if got := tcpip.Address(rep.ProtocolAddressSender()); got != src {
- t.Errorf("got ProtocolAddressSender = %s, want = %s", got, src)
- }
- if got := tcpip.Address(rep.ProtocolAddressTarget()); got != dst {
- t.Errorf("got ProtocolAddressTarget = %s, want = %s", got, dst)
- }
- }
-
- ndpChecker := func(t *testing.T, request channel.PacketInfo, src, dst tcpip.Address) {
- if request.Proto != header.IPv6ProtocolNumber {
- t.Fatalf("got Proto = %d, want = %d", request.Proto, header.IPv6ProtocolNumber)
- }
-
- snmc := header.SolicitedNodeAddr(dst)
- if want := header.EthernetAddressFromMulticastIPv6Address(snmc); request.Route.RemoteLinkAddress != want {
- t.Errorf("got remote link address = %s, want = %s", request.Route.RemoteLinkAddress, want)
- }
-
- checker.IPv6(t, stack.PayloadSince(request.Pkt.NetworkHeader()),
- checker.SrcAddr(src),
- checker.DstAddr(snmc),
- checker.TTL(header.NDPHopLimit),
- checker.NDPNS(
- checker.NDPNSTargetAddress(dst),
- ))
- }
-
- icmpv4Checker := func(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv4(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ipv4.DefaultTTL),
- checker.ICMPv4(
- checker.ICMPv4Checksum(),
- checker.ICMPv4Type(header.ICMPv4DstUnreachable),
- checker.ICMPv4Code(header.ICMPv4HostUnreachable),
- ),
- )
- }
-
- icmpv6Checker := func(t *testing.T, b []byte, src, dst tcpip.Address) {
- checker.IPv6(t, b,
- checker.SrcAddr(src),
- checker.DstAddr(dst),
- checker.TTL(ipv6.DefaultTTL),
- checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6DstUnreachable),
- checker.ICMPv6Code(header.ICMPv6AddressUnreachable),
- ),
- )
- }
-
- tests := []struct {
- name string
- networkProtocolFactory []stack.NetworkProtocolFactory
- networkProtocolNumber tcpip.NetworkProtocolNumber
- sourceAddr tcpip.Address
- destAddr tcpip.Address
- incomingAddr tcpip.AddressWithPrefix
- outgoingAddr tcpip.AddressWithPrefix
- transportProtocol func(*stack.Stack) stack.TransportProtocol
- rx func(*channel.Endpoint, tcpip.Address, tcpip.Address)
- linkResolutionRequestChecker func(*testing.T, channel.PacketInfo, tcpip.Address, tcpip.Address)
- icmpReplyChecker func(*testing.T, []byte, tcpip.Address, tcpip.Address)
- mtu uint32
- }{
- {
- name: "IPv4 Host unreachable",
- networkProtocolFactory: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},
- networkProtocolNumber: header.IPv4ProtocolNumber,
- sourceAddr: tcptestutil.MustParse4("10.0.0.2"),
- destAddr: tcptestutil.MustParse4("11.0.0.2"),
- incomingAddr: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10.0.0.1").To4()),
- PrefixLen: 8,
- },
- outgoingAddr: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("11.0.0.1").To4()),
- PrefixLen: 8,
- },
- transportProtocol: icmp.NewProtocol4,
- linkResolutionRequestChecker: arpChecker,
- icmpReplyChecker: icmpv4Checker,
- rx: rxICMPv4EchoRequest,
- mtu: ipv4.MaxTotalSize,
- },
- {
- name: "IPv6 Host unreachable",
- networkProtocolFactory: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
- networkProtocolNumber: header.IPv6ProtocolNumber,
- sourceAddr: tcptestutil.MustParse6("10::2"),
- destAddr: tcptestutil.MustParse6("11::2"),
- incomingAddr: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10::1").To16()),
- PrefixLen: 64,
- },
- outgoingAddr: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("11::1").To16()),
- PrefixLen: 64,
- },
- transportProtocol: icmp.NewProtocol6,
- linkResolutionRequestChecker: ndpChecker,
- icmpReplyChecker: icmpv6Checker,
- rx: rxICMPv6EchoRequest,
- mtu: header.IPv6MinimumMTU,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
-
- s := stack.New(stack.Options{
- NetworkProtocols: test.networkProtocolFactory,
- TransportProtocols: []stack.TransportProtocolFactory{test.transportProtocol},
- Clock: clock,
- })
-
- // Set up endpoint through which we will receive packets.
- incomingEndpoint := channel.New(1, test.mtu, "")
- if err := s.CreateNIC(incomingNICID, incomingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", incomingNICID, err)
- }
- incomingProtoAddr := tcpip.ProtocolAddress{
- Protocol: test.networkProtocolNumber,
- AddressWithPrefix: test.incomingAddr,
- }
- if err := s.AddProtocolAddress(incomingNICID, incomingProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", incomingNICID, incomingProtoAddr, err)
- }
-
- // Set up endpoint through which we will attempt to forward packets.
- outgoingEndpoint := channel.New(1, test.mtu, outgoingLinkAddr)
- outgoingEndpoint.LinkEPCapabilities |= stack.CapabilityResolutionRequired
- if err := s.CreateNIC(outgoingNICID, outgoingEndpoint); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", outgoingNICID, err)
- }
- outgoingProtoAddr := tcpip.ProtocolAddress{
- Protocol: test.networkProtocolNumber,
- AddressWithPrefix: test.outgoingAddr,
- }
- if err := s.AddProtocolAddress(outgoingNICID, outgoingProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", outgoingNICID, outgoingProtoAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: test.incomingAddr.Subnet(),
- NIC: incomingNICID,
- },
- {
- Destination: test.outgoingAddr.Subnet(),
- NIC: outgoingNICID,
- },
- })
-
- if err := s.SetForwardingDefaultAndAllNICs(test.networkProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", test.networkProtocolNumber, err)
- }
-
- test.rx(incomingEndpoint, test.sourceAddr, test.destAddr)
-
- nudConfigs, err := s.NUDConfigurations(outgoingNICID, test.networkProtocolNumber)
- if err != nil {
- t.Fatalf("s.NUDConfigurations(%d, %d): %s", outgoingNICID, test.networkProtocolNumber, err)
- }
- // Trigger the first packet on the endpoint.
- clock.RunImmediatelyScheduledJobs()
-
- for i := 0; i < int(nudConfigs.MaxMulticastProbes); i++ {
- request, ok := outgoingEndpoint.Read()
- if !ok {
- t.Fatal("expected ARP packet through outgoing NIC")
- }
-
- test.linkResolutionRequestChecker(t, request, test.outgoingAddr.Address, test.destAddr)
-
- // Advance the clock the span of one request timeout.
- clock.Advance(nudConfigs.RetransmitTimer)
- }
-
- // Next, we make a blocking read to retrieve the error packet. This is
- // necessary because outgoing packets are dequeued asynchronously when
- // link resolution fails, and this dequeue is what triggers the ICMP
- // error.
- reply, ok := incomingEndpoint.Read()
- if !ok {
- t.Fatal("expected ICMP packet through incoming NIC")
- }
-
- test.icmpReplyChecker(t, stack.PayloadSince(reply.Pkt.NetworkHeader()), test.incomingAddr.Address, test.sourceAddr)
-
- // Since link resolution failed, we don't expect the packet to be
- // forwarded.
- forwardedPacket, ok := outgoingEndpoint.Read()
- if ok {
- t.Fatalf("expected no ICMP Echo packet through outgoing NIC, instead found: %#v", forwardedPacket)
- }
-
- if got, want := s.Stats().IP.Forwarding.HostUnreachable.Value(), expectedHostUnreachableErrorCount; int(got) != want {
- t.Errorf("got rt.Stats().IP.Forwarding.HostUnreachable.Value() = %d, want = %d", got, want)
- }
- })
- }
-}
-
-func TestGetLinkAddress(t *testing.T) {
- const (
- host1NICID = 1
- host2NICID = 4
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- remoteAddr, localAddr tcpip.Address
- expectedErr tcpip.Error
- }{
- {
- name: "IPv4 resolvable",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedErr: nil,
- },
- {
- name: "IPv6 resolvable",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedErr: nil,
- },
- {
- name: "IPv4 not resolvable",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- expectedErr: &tcpip.ErrTimeout{},
- },
- {
- name: "IPv6 not resolvable",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- expectedErr: &tcpip.ErrTimeout{},
- },
- {
- name: "IPv4 bad local address",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- localAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedErr: &tcpip.ErrBadLocalAddress{},
- },
- {
- name: "IPv6 bad local address",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- localAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedErr: &tcpip.ErrBadLocalAddress{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- Clock: clock,
- }
-
- host1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)
-
- ch := make(chan stack.LinkResolutionResult, 1)
- err := host1Stack.GetLinkAddress(host1NICID, test.remoteAddr, test.localAddr, test.netProto, func(r stack.LinkResolutionResult) {
- ch <- r
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got host1Stack.GetLinkAddress(%d, %s, '', %d, _) = %s, want = %s", host1NICID, test.remoteAddr, test.netProto, err, &tcpip.ErrWouldBlock{})
- }
- wantRes := stack.LinkResolutionResult{Err: test.expectedErr}
- if test.expectedErr == nil {
- wantRes.LinkAddress = utils.LinkAddr2
- }
-
- nudConfigs, err := host1Stack.NUDConfigurations(host1NICID, test.netProto)
- if err != nil {
- t.Fatalf("host1Stack.NUDConfigurations(%d, %d): %s", host1NICID, test.netProto, err)
- }
-
- clock.Advance(time.Duration(nudConfigs.MaxMulticastProbes) * nudConfigs.RetransmitTimer)
- select {
- case got := <-ch:
- if diff := cmp.Diff(wantRes, got); diff != "" {
- t.Fatalf("link resolution result mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("event didn't arrive")
- }
- })
- }
-}
-
-func TestRouteResolvedFields(t *testing.T) {
- const (
- host1NICID = 1
- host2NICID = 4
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- localAddr tcpip.Address
- remoteAddr tcpip.Address
- immediatelyResolvable bool
- expectedErr tcpip.Error
- expectedLinkAddr tcpip.LinkAddress
- }{
- {
- name: "IPv4 immediately resolvable",
- netProto: ipv4.ProtocolNumber,
- localAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- remoteAddr: header.IPv4AllSystems,
- immediatelyResolvable: true,
- expectedErr: nil,
- expectedLinkAddr: header.EthernetAddressFromMulticastIPv4Address(header.IPv4AllSystems),
- },
- {
- name: "IPv6 immediately resolvable",
- netProto: ipv6.ProtocolNumber,
- localAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- remoteAddr: header.IPv6AllNodesMulticastAddress,
- immediatelyResolvable: true,
- expectedErr: nil,
- expectedLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),
- },
- {
- name: "IPv4 resolvable",
- netProto: ipv4.ProtocolNumber,
- localAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- immediatelyResolvable: false,
- expectedErr: nil,
- expectedLinkAddr: utils.LinkAddr2,
- },
- {
- name: "IPv6 resolvable",
- netProto: ipv6.ProtocolNumber,
- localAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- immediatelyResolvable: false,
- expectedErr: nil,
- expectedLinkAddr: utils.LinkAddr2,
- },
- {
- name: "IPv4 not resolvable",
- netProto: ipv4.ProtocolNumber,
- localAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- immediatelyResolvable: false,
- expectedErr: &tcpip.ErrTimeout{},
- },
- {
- name: "IPv6 not resolvable",
- netProto: ipv6.ProtocolNumber,
- localAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- immediatelyResolvable: false,
- expectedErr: &tcpip.ErrTimeout{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- Clock: clock,
- }
-
- host1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)
- r, err := host1Stack.FindRoute(host1NICID, test.localAddr, test.remoteAddr, test.netProto, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("host1Stack.FindRoute(%d, %s, %s, %d, false): %s", host1NICID, test.localAddr, test.remoteAddr, test.netProto, err)
- }
- defer r.Release()
-
- var wantRouteInfo stack.RouteInfo
- wantRouteInfo.LocalLinkAddress = utils.LinkAddr1
- wantRouteInfo.LocalAddress = test.localAddr
- wantRouteInfo.RemoteAddress = test.remoteAddr
- wantRouteInfo.NetProto = test.netProto
- wantRouteInfo.Loop = stack.PacketOut
- wantRouteInfo.RemoteLinkAddress = test.expectedLinkAddr
-
- ch := make(chan stack.ResolvedFieldsResult, 1)
-
- if !test.immediatelyResolvable {
- wantUnresolvedRouteInfo := wantRouteInfo
- wantUnresolvedRouteInfo.RemoteLinkAddress = ""
-
- err := r.ResolvedFields(func(r stack.ResolvedFieldsResult) {
- ch <- r
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got r.ResolvedFields(_) = %s, want = %s", err, &tcpip.ErrWouldBlock{})
- }
-
- nudConfigs, err := host1Stack.NUDConfigurations(host1NICID, test.netProto)
- if err != nil {
- t.Fatalf("host1Stack.NUDConfigurations(%d, %d): %s", host1NICID, test.netProto, err)
- }
- clock.Advance(time.Duration(nudConfigs.MaxMulticastProbes) * nudConfigs.RetransmitTimer)
-
- select {
- case got := <-ch:
- if diff := cmp.Diff(stack.ResolvedFieldsResult{RouteInfo: wantRouteInfo, Err: test.expectedErr}, got, cmp.AllowUnexported(stack.RouteInfo{})); diff != "" {
- t.Errorf("route resolve result mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatalf("event didn't arrive")
- }
-
- if test.expectedErr != nil {
- return
- }
-
- // At this point the neighbor table should be populated so the route
- // should be immediately resolvable.
- }
-
- if err := r.ResolvedFields(func(r stack.ResolvedFieldsResult) {
- ch <- r
- }); err != nil {
- t.Errorf("r.ResolvedFields(_): %s", err)
- }
- select {
- case routeResolveRes := <-ch:
- if diff := cmp.Diff(stack.ResolvedFieldsResult{RouteInfo: wantRouteInfo, Err: nil}, routeResolveRes, cmp.AllowUnexported(stack.RouteInfo{})); diff != "" {
- t.Errorf("route resolve result from resolved route mismatch (-want +got):\n%s", diff)
- }
- default:
- t.Fatal("expected route to be immediately resolvable")
- }
- })
- }
-}
-
-func TestWritePacketsLinkResolution(t *testing.T) {
- const (
- host1NICID = 1
- host2NICID = 4
- )
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- expectedWriteErr tcpip.Error
- }{
- {
- name: "IPv4",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedWriteErr: nil,
- },
- {
- name: "IPv6",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedWriteErr: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- }
-
- host1Stack, host2Stack := setupStack(t, stackOpts, host1NICID, host2NICID)
-
- var serverWQ waiter.Queue
- serverWE, serverCH := waiter.NewChannelEntry(nil)
- serverWQ.EventRegister(&serverWE, waiter.ReadableEvents)
- serverEP, err := host2Stack.NewEndpoint(udp.ProtocolNumber, test.netProto, &serverWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.netProto, err)
- }
- defer serverEP.Close()
-
- serverAddr := tcpip.FullAddress{Port: 1234}
- if err := serverEP.Bind(serverAddr); err != nil {
- t.Fatalf("serverEP.Bind(%#v): %s", serverAddr, err)
- }
-
- r, err := host1Stack.FindRoute(host1NICID, "", test.remoteAddr, test.netProto, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("host1Stack.FindRoute(%d, '', %s, %d, false): %s", host1NICID, test.remoteAddr, test.netProto, err)
- }
- defer r.Release()
-
- data := []byte{1, 2}
- var pkts stack.PacketBufferList
- for _, d := range data {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: header.UDPMinimumSize + int(r.MaxHeaderLength()),
- Data: buffer.View([]byte{d}).ToVectorisedView(),
- })
- pkt.TransportProtocolNumber = udp.ProtocolNumber
- length := uint16(pkt.Size())
- udpHdr := header.UDP(pkt.TransportHeader().Push(header.UDPMinimumSize))
- udpHdr.Encode(&header.UDPFields{
- SrcPort: 5555,
- DstPort: serverAddr.Port,
- Length: length,
- })
- xsum := r.PseudoHeaderChecksum(udp.ProtocolNumber, length)
- xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())
- udpHdr.SetChecksum(^udpHdr.CalculateChecksum(xsum))
-
- pkts.PushBack(pkt)
- }
-
- params := stack.NetworkHeaderParams{
- Protocol: udp.ProtocolNumber,
- TTL: 64,
- TOS: stack.DefaultTOS,
- }
-
- if n, err := r.WritePackets(pkts, params); err != nil {
- t.Fatalf("r.WritePackets(_, %#v): %s", params, err)
- } else if want := pkts.Len(); want != n {
- t.Fatalf("got r.WritePackets(_, %#v) = %d, want = %d", params, n, want)
- }
-
- var writer bytes.Buffer
- count := 0
- for {
- var rOpts tcpip.ReadOptions
- res, err := serverEP.Read(&writer, rOpts)
- if err != nil {
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- // Should not have anymore bytes to read after we read the sent
- // number of bytes.
- if count == len(data) {
- break
- }
-
- <-serverCH
- continue
- }
-
- t.Fatalf("serverEP.Read(_, %#v): %s", rOpts, err)
- }
- count += res.Count
- }
-
- if got, want := host2Stack.Stats().UDP.PacketsReceived.Value(), uint64(len(data)); got != want {
- t.Errorf("got host2Stack.Stats().UDP.PacketsReceived.Value() = %d, want = %d", got, want)
- }
- if diff := cmp.Diff(data, writer.Bytes()); diff != "" {
- t.Errorf("read bytes mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-type eventType int
-
-const (
- entryAdded eventType = iota
- entryChanged
- entryRemoved
-)
-
-func (t eventType) String() string {
- switch t {
- case entryAdded:
- return "add"
- case entryChanged:
- return "change"
- case entryRemoved:
- return "remove"
- default:
- return fmt.Sprintf("unknown (%d)", t)
- }
-}
-
-type eventInfo struct {
- eventType eventType
- nicID tcpip.NICID
- entry stack.NeighborEntry
-}
-
-func (e eventInfo) String() string {
- return fmt.Sprintf("%s event for NIC #%d, %#v", e.eventType, e.nicID, e.entry)
-}
-
-var _ stack.NUDDispatcher = (*nudDispatcher)(nil)
-
-type nudDispatcher struct {
- c chan eventInfo
-}
-
-func (d *nudDispatcher) OnNeighborAdded(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryAdded,
- nicID: nicID,
- entry: entry,
- }
- d.c <- e
-}
-
-func (d *nudDispatcher) OnNeighborChanged(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryChanged,
- nicID: nicID,
- entry: entry,
- }
- d.c <- e
-}
-
-func (d *nudDispatcher) OnNeighborRemoved(nicID tcpip.NICID, entry stack.NeighborEntry) {
- e := eventInfo{
- eventType: entryRemoved,
- nicID: nicID,
- entry: entry,
- }
- d.c <- e
-}
-
-func (d *nudDispatcher) expectEvent(want eventInfo) error {
- select {
- case got := <-d.c:
- if diff := cmp.Diff(want, got, cmp.AllowUnexported(eventInfo{}), cmpopts.IgnoreFields(stack.NeighborEntry{}, "UpdatedAt")); diff != "" {
- return fmt.Errorf("got invalid event (-want +got):\n%s", diff)
- }
- return nil
- default:
- return fmt.Errorf("event didn't arrive")
- }
-}
-
-// TestTCPConfirmNeighborReachability tests that TCP informs layers beneath it
-// that the neighbor used for a route is reachable.
-func TestTCPConfirmNeighborReachability(t *testing.T) {
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- neighborAddr tcpip.Address
- getEndpoints func(*testing.T, *stack.Stack, *stack.Stack, *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{})
- isHost1Listener bool
- }{
- {
- name: "IPv4 active connection through neighbor",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Host2IPv4Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, _, host2Stack *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host2Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- },
- {
- name: "IPv6 active connection through neighbor",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Host2IPv6Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, _, host2Stack *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host2Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- },
- {
- name: "IPv4 active connection to neighbor",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, routerStack, _ *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := routerStack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("routerStack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- },
- {
- name: "IPv6 active connection to neighbor",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, routerStack, _ *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := routerStack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("routerStack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- },
- {
- name: "IPv4 passive connection to neighbor",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Host1IPv4Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, routerStack, _ *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := routerStack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("routerStack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- isHost1Listener: true,
- },
- {
- name: "IPv6 passive connection to neighbor",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, routerStack, _ *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := routerStack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("routerStack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- isHost1Listener: true,
- },
- {
- name: "IPv4 passive connection through neighbor",
- netProto: ipv4.ProtocolNumber,
- remoteAddr: utils.Host1IPv4Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, _, host2Stack *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host2Stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- isHost1Listener: true,
- },
- {
- name: "IPv6 passive connection through neighbor",
- netProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,
- neighborAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- getEndpoints: func(t *testing.T, host1Stack, _, host2Stack *stack.Stack) (tcpip.Endpoint, <-chan struct{}, tcpip.Endpoint, <-chan struct{}) {
- var listenerWQ waiter.Queue
- listenerWE, listenerCH := waiter.NewChannelEntry(nil)
- listenerWQ.EventRegister(&listenerWE, waiter.EventIn)
- listenerEP, err := host1Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &listenerWQ)
- if err != nil {
- t.Fatalf("host1Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
- t.Cleanup(listenerEP.Close)
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents|waiter.WritableEvents)
- clientEP, err := host2Stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &clientWQ)
- if err != nil {
- t.Fatalf("host2Stack.NewEndpoint(%d, %d, _): %s", tcp.ProtocolNumber, ipv6.ProtocolNumber, err)
- }
-
- return listenerEP, listenerCH, clientEP, clientCH
- },
- isHost1Listener: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- nudDisp := nudDispatcher{
- c: make(chan eventInfo, 3),
- }
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- Clock: clock,
- }
- host1StackOpts := stackOpts
- host1StackOpts.NUDDisp = &nudDisp
-
- host1Stack := stack.New(host1StackOpts)
- routerStack := stack.New(stackOpts)
- host2Stack := stack.New(stackOpts)
- utils.SetupRoutedStacks(t, host1Stack, routerStack, host2Stack)
-
- // Add a reachable dynamic entry to our neighbor table for the remote.
- {
- ch := make(chan stack.LinkResolutionResult, 1)
- err := host1Stack.GetLinkAddress(utils.Host1NICID, test.neighborAddr, "", test.netProto, func(r stack.LinkResolutionResult) {
- ch <- r
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got host1Stack.GetLinkAddress(%d, %s, '', %d, _) = %s, want = %s", utils.Host1NICID, test.neighborAddr, test.netProto, err, &tcpip.ErrWouldBlock{})
- }
- if diff := cmp.Diff(stack.LinkResolutionResult{LinkAddress: utils.LinkAddr2, Err: nil}, <-ch); diff != "" {
- t.Fatalf("link resolution mismatch (-want +got):\n%s", diff)
- }
- }
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryAdded,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Incomplete, Addr: test.neighborAddr},
- }); err != nil {
- t.Fatalf("error waiting for initial NUD event: %s", err)
- }
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Reachable, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for reachable NUD event: %s", err)
- }
-
- // Wait for the remote's neighbor entry to be stale before creating a
- // TCP connection from host1 to some remote.
- nudConfigs, err := host1Stack.NUDConfigurations(utils.Host1NICID, test.netProto)
- if err != nil {
- t.Fatalf("host1Stack.NUDConfigurations(%d, %d): %s", utils.Host1NICID, test.netProto, err)
- }
- // The maximum reachable time for a neighbor is some maximum random factor
- // applied to the base reachable time.
- //
- // See NUDConfigurations.BaseReachableTime for more information.
- maxReachableTime := time.Duration(float32(nudConfigs.BaseReachableTime) * nudConfigs.MaxRandomFactor)
- clock.Advance(maxReachableTime)
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Stale, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for stale NUD event: %s", err)
- }
-
- listenerEP, listenerCH, clientEP, clientCH := test.getEndpoints(t, host1Stack, routerStack, host2Stack)
- defer clientEP.Close()
- listenerAddr := tcpip.FullAddress{Addr: test.remoteAddr, Port: 1234}
- if err := listenerEP.Bind(listenerAddr); err != nil {
- t.Fatalf("listenerEP.Bind(%#v): %s", listenerAddr, err)
- }
- if err := listenerEP.Listen(1); err != nil {
- t.Fatalf("listenerEP.Listen(1): %s", err)
- }
- {
- err := clientEP.Connect(listenerAddr)
- if _, ok := err.(*tcpip.ErrConnectStarted); !ok {
- t.Fatalf("got clientEP.Connect(%#v) = %s, want = %s", listenerAddr, err, &tcpip.ErrConnectStarted{})
- }
- }
-
- // Wait for the TCP handshake to complete then make sure the neighbor is
- // reachable without entering the probe state as TCP should provide NUD
- // with confirmation that the neighbor is reachable (indicated by a
- // successful 3-way handshake).
- <-clientCH
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Delay, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for delay NUD event: %s", err)
- }
- <-listenerCH
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Reachable, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for reachable NUD event: %s", err)
- }
-
- peerEP, peerWQ, err := listenerEP.Accept(nil)
- if err != nil {
- t.Fatalf("listenerEP.Accept(): %s", err)
- }
- defer peerEP.Close()
- peerWE, peerCH := waiter.NewChannelEntry(nil)
- peerWQ.EventRegister(&peerWE, waiter.ReadableEvents)
-
- // Wait for the neighbor to be stale again then send data to the remote.
- //
- // On successful transmission, the neighbor should become reachable
- // without probing the neighbor as a TCP ACK would be received which is an
- // indication of the neighbor being reachable.
- clock.Advance(maxReachableTime)
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Stale, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for stale NUD event: %s", err)
- }
- {
- var r bytes.Reader
- r.Reset([]byte{0})
- var wOpts tcpip.WriteOptions
- if _, err := clientEP.Write(&r, wOpts); err != nil {
- t.Errorf("clientEP.Write(_, %#v): %s", wOpts, err)
- }
- }
- // Heads up, there is a race here.
- //
- // Incoming TCP segments are handled in
- // tcp.(*endpoint).handleSegmentLocked:
- //
- // - tcp.(*endpoint).rcv.handleRcvdSegment puts the segment on the
- // segment queue and notifies waiting readers (such as this channel)
- //
- // - tcp.(*endpoint).snd.handleRcvdSegment sends an ACK for the segment
- // and notifies the NUD machinery that the peer is reachable
- //
- // Thus we must permit a delay between the readable signal and the
- // expected NUD event.
- //
- // At the time of writing, this race is reliably hit with gotsan.
- <-peerCH
- for len(nudDisp.c) == 0 {
- runtime.Gosched()
- }
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Delay, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for delay NUD event: %s", err)
- }
- if test.isHost1Listener {
- // If host1 is not the client, host1 does not send any data so TCP
- // has no way to know it is making forward progress. Because of this,
- // TCP should not mark the route reachable and NUD should go through the
- // probe state.
- clock.Advance(nudConfigs.DelayFirstProbeTime)
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Probe, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for probe NUD event: %s", err)
- }
- }
- {
- var r bytes.Reader
- r.Reset([]byte{0})
- var wOpts tcpip.WriteOptions
- if _, err := peerEP.Write(&r, wOpts); err != nil {
- t.Errorf("peerEP.Write(_, %#v): %s", wOpts, err)
- }
- }
- <-clientCH
- if err := nudDisp.expectEvent(eventInfo{
- eventType: entryChanged,
- nicID: utils.Host1NICID,
- entry: stack.NeighborEntry{State: stack.Reachable, Addr: test.neighborAddr, LinkAddr: utils.LinkAddr2},
- }); err != nil {
- t.Fatalf("error waiting for reachable NUD event: %s", err)
- }
- })
- }
-}
-
-func TestDAD(t *testing.T) {
- dadConfigs := stack.DADConfigurations{
- DupAddrDetectTransmits: 1,
- RetransmitTimer: time.Second,
- }
-
- tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- dadNetProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- expectedResult stack.DADResult
- }{
- {
- name: "IPv4 own address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- expectedResult: &stack.DADSucceeded{},
- },
- {
- name: "IPv6 own address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- expectedResult: &stack.DADSucceeded{},
- },
- {
- name: "IPv4 duplicate address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},
- },
- {
- name: "IPv6 duplicate address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},
- },
- {
- name: "IPv4 no duplicate address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- expectedResult: &stack.DADSucceeded{},
- },
- {
- name: "IPv6 no duplicate address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- expectedResult: &stack.DADSucceeded{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- clock := faketime.NewManualClock()
- stackOpts := stack.Options{
- Clock: clock,
- NetworkProtocols: []stack.NetworkProtocolFactory{
- arp.NewProtocol,
- ipv4.NewProtocol,
- ipv6.NewProtocol,
- },
- }
-
- host1Stack, _ := setupStack(t, stackOpts, utils.Host1NICID, utils.Host2NICID)
-
- // DAD should be disabled by default.
- if res, err := host1Stack.CheckDuplicateAddress(utils.Host1NICID, test.netProto, test.remoteAddr, func(r stack.DADResult) {
- t.Errorf("unexpectedly called DAD completion handler when DAD was supposed to be disabled")
- }); err != nil {
- t.Fatalf("host1Stack.CheckDuplicateAddress(%d, %d, %s, _): %s", utils.Host1NICID, test.netProto, test.remoteAddr, err)
- } else if res != stack.DADDisabled {
- t.Errorf("got host1Stack.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", utils.Host1NICID, test.netProto, test.remoteAddr, res, stack.DADDisabled)
- }
-
- // Enable DAD then attempt to check if an address is duplicated.
- netEP, err := host1Stack.GetNetworkEndpoint(utils.Host1NICID, test.dadNetProto)
- if err != nil {
- t.Fatalf("host1Stack.GetNetworkEndpoint(%d, %d): %s", utils.Host1NICID, test.dadNetProto, err)
- }
- dad, ok := netEP.(stack.DuplicateAddressDetector)
- if !ok {
- t.Fatalf("expected %T to implement stack.DuplicateAddressDetector", netEP)
- }
- dad.SetDADConfigurations(dadConfigs)
- ch := make(chan stack.DADResult, 3)
- if res, err := host1Stack.CheckDuplicateAddress(utils.Host1NICID, test.netProto, test.remoteAddr, func(r stack.DADResult) {
- ch <- r
- }); err != nil {
- t.Fatalf("host1Stack.CheckDuplicateAddress(%d, %d, %s, _): %s", utils.Host1NICID, test.netProto, test.remoteAddr, err)
- } else if res != stack.DADStarting {
- t.Errorf("got host1Stack.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", utils.Host1NICID, test.netProto, test.remoteAddr, res, stack.DADStarting)
- }
-
- expectResults := 1
- if _, ok := test.expectedResult.(*stack.DADSucceeded); ok {
- const delta = time.Nanosecond
- clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits)*dadConfigs.RetransmitTimer - delta)
- select {
- case r := <-ch:
- t.Fatalf("unexpectedly got DAD result before the DAD timeout; r = %#v", r)
- default:
- }
-
- // If we expect the resolve to succeed try requesting DAD again on the
- // same address. The handler for the new request should be called once
- // the original DAD request completes.
- expectResults = 2
- if res, err := host1Stack.CheckDuplicateAddress(utils.Host1NICID, test.netProto, test.remoteAddr, func(r stack.DADResult) {
- ch <- r
- }); err != nil {
- t.Fatalf("host1Stack.CheckDuplicateAddress(%d, %d, %s, _): %s", utils.Host1NICID, test.netProto, test.remoteAddr, err)
- } else if res != stack.DADAlreadyRunning {
- t.Errorf("got host1Stack.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", utils.Host1NICID, test.netProto, test.remoteAddr, res, stack.DADAlreadyRunning)
- }
-
- clock.Advance(delta)
- }
-
- for i := 0; i < expectResults; i++ {
- if diff := cmp.Diff(test.expectedResult, <-ch); diff != "" {
- t.Errorf("(i=%d) DAD result mismatch (-want +got):\n%s", i, diff)
- }
- }
-
- // Should have no more results.
- select {
- case r := <-ch:
- t.Errorf("unexpectedly got an extra DAD result; r = %#v", r)
- default:
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/integration/loopback_test.go b/pkg/tcpip/tests/integration/loopback_test.go
deleted file mode 100644
index b2008f0b2..000000000
--- a/pkg/tcpip/tests/integration/loopback_test.go
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package loopback_test
-
-import (
- "bytes"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-var _ ipv6.NDPDispatcher = (*ndpDispatcher)(nil)
-
-type ndpDispatcher struct{}
-
-func (*ndpDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {
-}
-
-func (*ndpDispatcher) OnOffLinkRouteUpdated(tcpip.NICID, tcpip.Subnet, tcpip.Address, header.NDPRoutePreference) {
-}
-
-func (*ndpDispatcher) OnOffLinkRouteInvalidated(tcpip.NICID, tcpip.Subnet, tcpip.Address) {}
-
-func (*ndpDispatcher) OnOnLinkPrefixDiscovered(tcpip.NICID, tcpip.Subnet) {
-}
-
-func (*ndpDispatcher) OnOnLinkPrefixInvalidated(tcpip.NICID, tcpip.Subnet) {}
-
-func (*ndpDispatcher) OnAutoGenAddress(tcpip.NICID, tcpip.AddressWithPrefix) {
-}
-
-func (*ndpDispatcher) OnAutoGenAddressDeprecated(tcpip.NICID, tcpip.AddressWithPrefix) {}
-
-func (*ndpDispatcher) OnAutoGenAddressInvalidated(tcpip.NICID, tcpip.AddressWithPrefix) {}
-
-func (*ndpDispatcher) OnRecursiveDNSServerOption(tcpip.NICID, []tcpip.Address, time.Duration) {}
-
-func (*ndpDispatcher) OnDNSSearchListOption(tcpip.NICID, []string, time.Duration) {}
-
-func (*ndpDispatcher) OnDHCPv6Configuration(tcpip.NICID, ipv6.DHCPv6ConfigurationFromNDPRA) {}
-
-// TestInitialLoopbackAddresses tests that the loopback interface does not
-// auto-generate a link-local address when it is brought up.
-func TestInitialLoopbackAddresses(t *testing.T) {
- const nicID = 1
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocolWithOptions(ipv6.Options{
- NDPDisp: &ndpDispatcher{},
- AutoGenLinkLocal: true,
- OpaqueIIDOpts: ipv6.OpaqueInterfaceIdentifierOptions{
- NICNameFromID: func(nicID tcpip.NICID, nicName string) string {
- t.Fatalf("should not attempt to get name for NIC with ID = %d; nicName = %s", nicID, nicName)
- return ""
- },
- },
- })},
- })
-
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
-
- nicsInfo := s.NICInfo()
- if nicInfo, ok := nicsInfo[nicID]; !ok {
- t.Fatalf("did not find NIC with ID = %d in s.NICInfo() = %#v", nicID, nicsInfo)
- } else if got := len(nicInfo.ProtocolAddresses); got != 0 {
- t.Fatalf("got len(nicInfo.ProtocolAddresses) = %d, want = 0; nicInfo.ProtocolAddresses = %#v", got, nicInfo.ProtocolAddresses)
- }
-}
-
-// TestLoopbackAcceptAllInSubnetUDP tests that a loopback interface considers
-// itself bound to all addresses in the subnet of an assigned address and UDP
-// traffic is sent/received correctly.
-func TestLoopbackAcceptAllInSubnetUDP(t *testing.T) {
- const (
- nicID = 1
- localPort = 80
- )
-
- data := []byte{1, 2, 3, 4}
-
- ipv4ProtocolAddress := tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: utils.Ipv4Addr,
- }
- ipv4Bytes := []byte(ipv4ProtocolAddress.AddressWithPrefix.Address)
- ipv4Bytes[len(ipv4Bytes)-1]++
- otherIPv4Address := tcpip.Address(ipv4Bytes)
-
- ipv6ProtocolAddress := tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: utils.Ipv6Addr,
- }
- ipv6Bytes := []byte(utils.Ipv6Addr.Address)
- ipv6Bytes[len(ipv6Bytes)-1]++
- otherIPv6Address := tcpip.Address(ipv6Bytes)
-
- tests := []struct {
- name string
- addAddress tcpip.ProtocolAddress
- bindAddr tcpip.Address
- dstAddr tcpip.Address
- expectRx bool
- }{
- {
- name: "IPv4 bind to wildcard and send to assigned address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- expectRx: true,
- },
- {
- name: "IPv4 bind to wildcard and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: otherIPv4Address,
- expectRx: true,
- },
- {
- name: "IPv4 bind to wildcard send to other address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: utils.RemoteIPv4Addr,
- expectRx: false,
- },
- {
- name: "IPv4 bind to other subnet-local address and send to assigned address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: otherIPv4Address,
- dstAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- expectRx: false,
- },
- {
- name: "IPv4 bind and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: otherIPv4Address,
- dstAddr: otherIPv4Address,
- expectRx: true,
- },
- {
- name: "IPv4 bind to assigned address and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- dstAddr: otherIPv4Address,
- expectRx: false,
- },
-
- {
- name: "IPv6 bind and send to assigned address",
- addAddress: ipv6ProtocolAddress,
- bindAddr: utils.Ipv6Addr.Address,
- dstAddr: utils.Ipv6Addr.Address,
- expectRx: true,
- },
- {
- name: "IPv6 bind to wildcard and send to other subnet-local address",
- addAddress: ipv6ProtocolAddress,
- dstAddr: otherIPv6Address,
- expectRx: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddProtocolAddress(nicID, test.addAddress); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %+v): %s", nicID, test.addAddress, err)
- }
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
-
- var wq waiter.Queue
- rep, err := s.NewEndpoint(udp.ProtocolNumber, test.addAddress.Protocol, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.addAddress.Protocol, err)
- }
- defer rep.Close()
-
- bindAddr := tcpip.FullAddress{Addr: test.bindAddr, Port: localPort}
- if err := rep.Bind(bindAddr); err != nil {
- t.Fatalf("rep.Bind(%+v): %s", bindAddr, err)
- }
-
- sep, err := s.NewEndpoint(udp.ProtocolNumber, test.addAddress.Protocol, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.addAddress.Protocol, err)
- }
- defer sep.Close()
-
- wopts := tcpip.WriteOptions{
- To: &tcpip.FullAddress{
- Addr: test.dstAddr,
- Port: localPort,
- },
- }
- var r bytes.Reader
- r.Reset(data)
- n, err := sep.Write(&r, wopts)
- if err != nil {
- t.Fatalf("sep.Write(_, _): %s", err)
- }
- if want := int64(len(data)); n != want {
- t.Fatalf("got sep.Write(_, _) = (%d, nil), want = (%d, nil)", n, want)
- }
-
- var buf bytes.Buffer
- opts := tcpip.ReadOptions{NeedRemoteAddr: true}
- if res, err := rep.Read(&buf, opts); test.expectRx {
- if err != nil {
- t.Fatalf("rep.Read(_, %#v): %s", opts, err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- RemoteAddr: tcpip.FullAddress{
- Addr: test.addAddress.AddressWithPrefix.Address,
- },
- }, res,
- checker.IgnoreCmpPath("ControlMessages", "RemoteAddr.NIC", "RemoteAddr.Port"),
- ); diff != "" {
- t.Errorf("rep.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(data, buf.Bytes()); diff != "" {
- t.Errorf("got UDP payload mismatch (-want +got):\n%s", diff)
- }
- } else if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got rep.Read = (%v, %s) [with data %x], want = (_, %s)", res, err, buf.Bytes(), &tcpip.ErrWouldBlock{})
- }
- })
- }
-}
-
-// TestLoopbackSubnetLifetimeBoundToAddr tests that the lifetime of an address
-// in a loopback interface's associated subnet is bound to the permanently bound
-// address.
-func TestLoopbackSubnetLifetimeBoundToAddr(t *testing.T) {
- const nicID = 1
-
- protoAddr := tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: utils.Ipv4Addr,
- }
- addrBytes := []byte(utils.Ipv4Addr.Address)
- addrBytes[len(addrBytes)-1]++
- otherAddr := tcpip.Address(addrBytes)
-
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddProtocolAddress(nicID, protoAddr); err != nil {
- t.Fatalf("s.AddProtocolAddress(%d, %#v): %s", nicID, protoAddr, err)
- }
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- })
-
- r, err := s.FindRoute(nicID, otherAddr, utils.RemoteIPv4Addr, ipv4.ProtocolNumber, false /* multicastLoop */)
- if err != nil {
- t.Fatalf("s.FindRoute(%d, %s, %s, %d, false): %s", nicID, otherAddr, utils.RemoteIPv4Addr, ipv4.ProtocolNumber, err)
- }
- defer r.Release()
-
- params := stack.NetworkHeaderParams{
- Protocol: 111,
- TTL: 64,
- TOS: stack.DefaultTOS,
- }
- data := buffer.View([]byte{1, 2, 3, 4})
- if err := r.WritePacket(params, stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength()),
- Data: data.ToVectorisedView(),
- })); err != nil {
- t.Fatalf("r.WritePacket(%#v, _): %s", params, err)
- }
-
- // Removing the address should make the endpoint invalid.
- if err := s.RemoveAddress(nicID, protoAddr.AddressWithPrefix.Address); err != nil {
- t.Fatalf("s.RemoveAddress(%d, %s): %s", nicID, protoAddr.AddressWithPrefix.Address, err)
- }
- {
- err := r.WritePacket(params, stack.NewPacketBuffer(stack.PacketBufferOptions{
- ReserveHeaderBytes: int(r.MaxHeaderLength()),
- Data: data.ToVectorisedView(),
- }))
- if _, ok := err.(*tcpip.ErrInvalidEndpointState); !ok {
- t.Fatalf("got r.WritePacket(%#v, _) = %s, want = %s", params, err, &tcpip.ErrInvalidEndpointState{})
- }
- }
-}
-
-// TestLoopbackAcceptAllInSubnetTCP tests that a loopback interface considers
-// itself bound to all addresses in the subnet of an assigned address and TCP
-// traffic is sent/received correctly.
-func TestLoopbackAcceptAllInSubnetTCP(t *testing.T) {
- const (
- nicID = 1
- localPort = 80
- )
-
- ipv4ProtocolAddress := tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: utils.Ipv4Addr,
- }
- ipv4ProtocolAddress.AddressWithPrefix.PrefixLen = 8
- ipv4Bytes := []byte(ipv4ProtocolAddress.AddressWithPrefix.Address)
- ipv4Bytes[len(ipv4Bytes)-1]++
- otherIPv4Address := tcpip.Address(ipv4Bytes)
-
- ipv6ProtocolAddress := tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: utils.Ipv6Addr,
- }
- ipv6Bytes := []byte(utils.Ipv6Addr.Address)
- ipv6Bytes[len(ipv6Bytes)-1]++
- otherIPv6Address := tcpip.Address(ipv6Bytes)
-
- tests := []struct {
- name string
- addAddress tcpip.ProtocolAddress
- bindAddr tcpip.Address
- dstAddr tcpip.Address
- expectAccept bool
- }{
- {
- name: "IPv4 bind to wildcard and send to assigned address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- expectAccept: true,
- },
- {
- name: "IPv4 bind to wildcard and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: otherIPv4Address,
- expectAccept: true,
- },
- {
- name: "IPv4 bind to wildcard send to other address",
- addAddress: ipv4ProtocolAddress,
- dstAddr: utils.RemoteIPv4Addr,
- expectAccept: false,
- },
- {
- name: "IPv4 bind to other subnet-local address and send to assigned address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: otherIPv4Address,
- dstAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- expectAccept: false,
- },
- {
- name: "IPv4 bind and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: otherIPv4Address,
- dstAddr: otherIPv4Address,
- expectAccept: true,
- },
- {
- name: "IPv4 bind to assigned address and send to other subnet-local address",
- addAddress: ipv4ProtocolAddress,
- bindAddr: ipv4ProtocolAddress.AddressWithPrefix.Address,
- dstAddr: otherIPv4Address,
- expectAccept: false,
- },
-
- {
- name: "IPv6 bind and send to assigned address",
- addAddress: ipv6ProtocolAddress,
- bindAddr: utils.Ipv6Addr.Address,
- dstAddr: utils.Ipv6Addr.Address,
- expectAccept: true,
- },
- {
- name: "IPv6 bind to wildcard and send to other subnet-local address",
- addAddress: ipv6ProtocolAddress,
- dstAddr: otherIPv6Address,
- expectAccept: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- if err := s.AddProtocolAddress(nicID, test.addAddress); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, test.addAddress, err)
- }
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- })
-
- var wq waiter.Queue
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
- listeningEndpoint, err := s.NewEndpoint(tcp.ProtocolNumber, test.addAddress.Protocol, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.addAddress.Protocol, err)
- }
- defer listeningEndpoint.Close()
-
- bindAddr := tcpip.FullAddress{Addr: test.bindAddr, Port: localPort}
- if err := listeningEndpoint.Bind(bindAddr); err != nil {
- t.Fatalf("listeningEndpoint.Bind(%#v): %s", bindAddr, err)
- }
-
- if err := listeningEndpoint.Listen(1); err != nil {
- t.Fatalf("listeningEndpoint.Listen(1): %s", err)
- }
-
- connectingEndpoint, err := s.NewEndpoint(tcp.ProtocolNumber, test.addAddress.Protocol, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.addAddress.Protocol, err)
- }
- defer connectingEndpoint.Close()
-
- connectAddr := tcpip.FullAddress{
- Addr: test.dstAddr,
- Port: localPort,
- }
- {
- err := connectingEndpoint.Connect(connectAddr)
- if _, ok := err.(*tcpip.ErrConnectStarted); !ok {
- t.Fatalf("connectingEndpoint.Connect(%#v): %s", connectAddr, err)
- }
- }
-
- if !test.expectAccept {
- _, _, err := listeningEndpoint.Accept(nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got listeningEndpoint.Accept(nil) = %s, want = %s", err, &tcpip.ErrWouldBlock{})
- }
- return
- }
-
- // Wait for the listening endpoint to be "readable". That is, wait for a
- // new connection.
- <-ch
- var addr tcpip.FullAddress
- if _, _, err := listeningEndpoint.Accept(&addr); err != nil {
- t.Fatalf("listeningEndpoint.Accept(nil): %s", err)
- }
- if addr.Addr != test.addAddress.AddressWithPrefix.Address {
- t.Errorf("got addr.Addr = %s, want = %s", addr.Addr, test.addAddress.AddressWithPrefix.Address)
- }
- })
- }
-}
-
-func TestExternalLoopbackTraffic(t *testing.T) {
- const (
- nicID1 = 1
- nicID2 = 2
-
- numPackets = 1
- ttl = 64
- )
- ipv4Loopback := testutil.MustParse4("127.0.0.1")
-
- loopbackSourcedICMPv4 := func(e *channel.Endpoint) {
- utils.RxICMPv4EchoRequest(e, ipv4Loopback, utils.Ipv4Addr.Address, ttl)
- }
-
- loopbackSourcedICMPv6 := func(e *channel.Endpoint) {
- utils.RxICMPv6EchoRequest(e, header.IPv6Loopback, utils.Ipv6Addr.Address, ttl)
- }
-
- loopbackDestinedICMPv4 := func(e *channel.Endpoint) {
- utils.RxICMPv4EchoRequest(e, utils.RemoteIPv4Addr, ipv4Loopback, ttl)
- }
-
- loopbackDestinedICMPv6 := func(e *channel.Endpoint) {
- utils.RxICMPv6EchoRequest(e, utils.RemoteIPv6Addr, header.IPv6Loopback, ttl)
- }
-
- invalidSrcAddrStat := func(s tcpip.IPStats) *tcpip.StatCounter {
- return s.InvalidSourceAddressesReceived
- }
-
- invalidDestAddrStat := func(s tcpip.IPStats) *tcpip.StatCounter {
- return s.InvalidDestinationAddressesReceived
- }
-
- tests := []struct {
- name string
- allowExternalLoopback bool
- forwarding bool
- rxICMP func(*channel.Endpoint)
- invalidAddressStat func(tcpip.IPStats) *tcpip.StatCounter
- shouldAccept bool
- }{
- {
- name: "IPv4 external loopback sourced traffic without forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: false,
- rxICMP: loopbackSourcedICMPv4,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv4 external loopback sourced traffic without forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: false,
- rxICMP: loopbackSourcedICMPv4,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv4 external loopback sourced traffic with forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: true,
- rxICMP: loopbackSourcedICMPv4,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv4 external loopback sourced traffic with forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: true,
- rxICMP: loopbackSourcedICMPv4,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv4 external loopback destined traffic without forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: false,
- rxICMP: loopbackDestinedICMPv4,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv4 external loopback destined traffic without forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: false,
- rxICMP: loopbackDestinedICMPv4,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv4 external loopback destined traffic with forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: true,
- rxICMP: loopbackDestinedICMPv4,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv4 external loopback destined traffic with forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: true,
- rxICMP: loopbackDestinedICMPv4,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
-
- {
- name: "IPv6 external loopback sourced traffic without forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: false,
- rxICMP: loopbackSourcedICMPv6,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv6 external loopback sourced traffic without forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: false,
- rxICMP: loopbackSourcedICMPv6,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv6 external loopback sourced traffic with forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: true,
- rxICMP: loopbackSourcedICMPv6,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv6 external loopback sourced traffic with forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: true,
- rxICMP: loopbackSourcedICMPv6,
- invalidAddressStat: invalidSrcAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv6 external loopback destined traffic without forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: false,
- rxICMP: loopbackDestinedICMPv6,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv6 external loopback destined traffic without forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: false,
- rxICMP: loopbackDestinedICMPv6,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
- {
- name: "IPv6 external loopback destined traffic with forwarding and drop external loopback disabled",
- allowExternalLoopback: true,
- forwarding: true,
- rxICMP: loopbackDestinedICMPv6,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: true,
- },
- {
- name: "IPv6 external loopback destined traffic with forwarding and drop external loopback enabled",
- allowExternalLoopback: false,
- forwarding: true,
- rxICMP: loopbackDestinedICMPv6,
- invalidAddressStat: invalidDestAddrStat,
- shouldAccept: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocolWithOptions(ipv4.Options{
- AllowExternalLoopbackTraffic: test.allowExternalLoopback,
- }),
- ipv6.NewProtocolWithOptions(ipv6.Options{
- AllowExternalLoopbackTraffic: test.allowExternalLoopback,
- }),
- },
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4, icmp.NewProtocol6},
- })
- e := channel.New(1, header.IPv6MinimumMTU, "")
- if err := s.CreateNIC(nicID1, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
- }
- if err := s.AddAddressWithPrefix(nicID1, ipv4.ProtocolNumber, utils.Ipv4Addr); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s): %s", nicID1, ipv4.ProtocolNumber, utils.Ipv4Addr, err)
- }
- if err := s.AddAddressWithPrefix(nicID1, ipv6.ProtocolNumber, utils.Ipv6Addr); err != nil {
- t.Fatalf("AddAddressWithPrefix(%d, %d, %s): %s", nicID1, ipv6.ProtocolNumber, utils.Ipv6Addr, err)
- }
-
- if err := s.CreateNIC(nicID2, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID2, err)
- }
- if err := s.AddAddress(nicID2, ipv4.ProtocolNumber, ipv4Loopback); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID2, ipv4.ProtocolNumber, ipv4Loopback, err)
- }
- if err := s.AddAddress(nicID2, ipv6.ProtocolNumber, header.IPv6Loopback); err != nil {
- t.Fatalf("AddAddress(%d, %d, %s): %s", nicID2, ipv6.ProtocolNumber, header.IPv6Loopback, err)
- }
-
- if test.forwarding {
- if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ipv4.ProtocolNumber, err)
- }
- if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("SetForwardingDefaultAndAllNICs(%d, true): %s", ipv6.ProtocolNumber, err)
- }
- }
-
- s.SetRouteTable([]tcpip.Route{
- tcpip.Route{
- Destination: header.IPv4EmptySubnet,
- NIC: nicID1,
- },
- tcpip.Route{
- Destination: header.IPv6EmptySubnet,
- NIC: nicID1,
- },
- tcpip.Route{
- Destination: ipv4Loopback.WithPrefix().Subnet(),
- NIC: nicID2,
- },
- tcpip.Route{
- Destination: header.IPv6Loopback.WithPrefix().Subnet(),
- NIC: nicID2,
- },
- })
-
- stats := s.Stats().IP
- invalidAddressStat := test.invalidAddressStat(stats)
- deliveredPacketsStat := stats.PacketsDelivered
- if got := invalidAddressStat.Value(); got != 0 {
- t.Fatalf("got invalidAddressStat.Value() = %d, want = 0", got)
- }
- if got := deliveredPacketsStat.Value(); got != 0 {
- t.Fatalf("got deliveredPacketsStat.Value() = %d, want = 0", got)
- }
- test.rxICMP(e)
- var expectedInvalidPackets uint64
- if !test.shouldAccept {
- expectedInvalidPackets = numPackets
- }
- if got := invalidAddressStat.Value(); got != expectedInvalidPackets {
- t.Fatalf("got invalidAddressStat.Value() = %d, want = %d", got, expectedInvalidPackets)
- }
- if got, want := deliveredPacketsStat.Value(), numPackets-expectedInvalidPackets; got != want {
- t.Fatalf("got deliveredPacketsStat.Value() = %d, want = %d", got, want)
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/integration/multicast_broadcast_test.go b/pkg/tcpip/tests/integration/multicast_broadcast_test.go
deleted file mode 100644
index 2d0a6e6a7..000000000
--- a/pkg/tcpip/tests/integration/multicast_broadcast_test.go
+++ /dev/null
@@ -1,723 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package multicast_broadcast_test
-
-import (
- "bytes"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- defaultMTU = 1280
- ttl = 255
-)
-
-// TestPingMulticastBroadcast tests that responding to an Echo Request destined
-// to a multicast or broadcast address uses a unicast source address for the
-// reply.
-func TestPingMulticastBroadcast(t *testing.T) {
- const (
- nicID = 1
- ttl = 64
- )
-
- tests := []struct {
- name string
- protoNum tcpip.NetworkProtocolNumber
- rxICMP func(*channel.Endpoint, tcpip.Address, tcpip.Address, uint8)
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- expectedSrc tcpip.Address
- }{
- {
- name: "IPv4 unicast",
- protoNum: header.IPv4ProtocolNumber,
- dstAddr: utils.Ipv4Addr.Address,
- srcAddr: utils.RemoteIPv4Addr,
- rxICMP: utils.RxICMPv4EchoRequest,
- expectedSrc: utils.Ipv4Addr.Address,
- },
- {
- name: "IPv4 directed broadcast",
- protoNum: header.IPv4ProtocolNumber,
- rxICMP: utils.RxICMPv4EchoRequest,
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: utils.Ipv4SubnetBcast,
- expectedSrc: utils.Ipv4Addr.Address,
- },
- {
- name: "IPv4 broadcast",
- protoNum: header.IPv4ProtocolNumber,
- rxICMP: utils.RxICMPv4EchoRequest,
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: header.IPv4Broadcast,
- expectedSrc: utils.Ipv4Addr.Address,
- },
- {
- name: "IPv4 all-systems multicast",
- protoNum: header.IPv4ProtocolNumber,
- rxICMP: utils.RxICMPv4EchoRequest,
- srcAddr: utils.RemoteIPv4Addr,
- dstAddr: header.IPv4AllSystems,
- expectedSrc: utils.Ipv4Addr.Address,
- },
- {
- name: "IPv6 unicast",
- protoNum: header.IPv6ProtocolNumber,
- rxICMP: utils.RxICMPv6EchoRequest,
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: utils.Ipv6Addr.Address,
- expectedSrc: utils.Ipv6Addr.Address,
- },
- {
- name: "IPv6 all-nodes multicast",
- protoNum: header.IPv6ProtocolNumber,
- rxICMP: utils.RxICMPv6EchoRequest,
- srcAddr: utils.RemoteIPv6Addr,
- dstAddr: header.IPv6AllNodesMulticastAddress,
- expectedSrc: utils.Ipv6Addr.Address,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4, icmp.NewProtocol6},
- })
- // We only expect a single packet in response to our ICMP Echo Request.
- e := channel.New(1, defaultMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- ipv4ProtoAddr := tcpip.ProtocolAddress{Protocol: header.IPv4ProtocolNumber, AddressWithPrefix: utils.Ipv4Addr}
- if err := s.AddProtocolAddress(nicID, ipv4ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, ipv4ProtoAddr, err)
- }
- ipv6ProtoAddr := tcpip.ProtocolAddress{Protocol: header.IPv6ProtocolNumber, AddressWithPrefix: utils.Ipv6Addr}
- if err := s.AddProtocolAddress(nicID, ipv6ProtoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, ipv6ProtoAddr, err)
- }
-
- // Default routes for IPv4 and IPv6 so ICMP can find a route to the remote
- // node when attempting to send the ICMP Echo Reply.
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- })
-
- test.rxICMP(e, test.srcAddr, test.dstAddr, ttl)
- pkt, ok := e.Read()
- if !ok {
- t.Fatal("expected ICMP response")
- }
-
- if pkt.Route.LocalAddress != test.expectedSrc {
- t.Errorf("got pkt.Route.LocalAddress = %s, want = %s", pkt.Route.LocalAddress, test.expectedSrc)
- }
- // The destination of the response packet should be the source of the
- // original packet.
- if pkt.Route.RemoteAddress != test.srcAddr {
- t.Errorf("got pkt.Route.RemoteAddress = %s, want = %s", pkt.Route.RemoteAddress, test.srcAddr)
- }
-
- src, dst := s.NetworkProtocolInstance(test.protoNum).ParseAddresses(stack.PayloadSince(pkt.Pkt.NetworkHeader()))
- if src != test.expectedSrc {
- t.Errorf("got pkt source = %s, want = %s", src, test.expectedSrc)
- }
- // The destination of the response packet should be the source of the
- // original packet.
- if dst != test.srcAddr {
- t.Errorf("got pkt destination = %s, want = %s", dst, test.srcAddr)
- }
- })
- }
-
-}
-
-func rxIPv4UDP(e *channel.Endpoint, src, dst tcpip.Address, data []byte) {
- payloadLen := header.UDPMinimumSize + len(data)
- totalLen := header.IPv4MinimumSize + payloadLen
- hdr := buffer.NewPrependable(totalLen)
- u := header.UDP(hdr.Prepend(payloadLen))
- u.Encode(&header.UDPFields{
- SrcPort: utils.RemotePort,
- DstPort: utils.LocalPort,
- Length: uint16(payloadLen),
- })
- copy(u.Payload(), data)
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, src, dst, uint16(payloadLen))
- sum = header.Checksum(data, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
-
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- Protocol: uint8(udp.ProtocolNumber),
- TTL: ttl,
- SrcAddr: src,
- DstAddr: dst,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-}
-
-func rxIPv6UDP(e *channel.Endpoint, src, dst tcpip.Address, data []byte) {
- payloadLen := header.UDPMinimumSize + len(data)
- hdr := buffer.NewPrependable(header.IPv6MinimumSize + payloadLen)
- u := header.UDP(hdr.Prepend(payloadLen))
- u.Encode(&header.UDPFields{
- SrcPort: utils.RemotePort,
- DstPort: utils.LocalPort,
- Length: uint16(payloadLen),
- })
- copy(u.Payload(), data)
- sum := header.PseudoHeaderChecksum(udp.ProtocolNumber, src, dst, uint16(payloadLen))
- sum = header.Checksum(data, sum)
- u.SetChecksum(^u.CalculateChecksum(sum))
-
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(payloadLen),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: ttl,
- SrcAddr: src,
- DstAddr: dst,
- })
-
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-}
-
-// TestIncomingMulticastAndBroadcast tests receiving a packet destined to some
-// multicast or broadcast address.
-func TestIncomingMulticastAndBroadcast(t *testing.T) {
- const nicID = 1
-
- data := []byte{1, 2, 3, 4}
-
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- localAddr tcpip.AddressWithPrefix
- rxUDP func(*channel.Endpoint, tcpip.Address, tcpip.Address, []byte)
- bindAddr tcpip.Address
- dstAddr tcpip.Address
- expectRx bool
- }{
- {
- name: "IPv4 unicast binding to unicast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: utils.Ipv4Addr.Address,
- dstAddr: utils.Ipv4Addr.Address,
- expectRx: true,
- },
- {
- name: "IPv4 unicast binding to broadcast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: header.IPv4Broadcast,
- dstAddr: utils.Ipv4Addr.Address,
- expectRx: false,
- },
- {
- name: "IPv4 unicast binding to wildcard",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- dstAddr: utils.Ipv4Addr.Address,
- expectRx: true,
- },
-
- {
- name: "IPv4 directed broadcast binding to subnet broadcast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: utils.Ipv4SubnetBcast,
- dstAddr: utils.Ipv4SubnetBcast,
- expectRx: true,
- },
- {
- name: "IPv4 directed broadcast binding to broadcast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: header.IPv4Broadcast,
- dstAddr: utils.Ipv4SubnetBcast,
- expectRx: false,
- },
- {
- name: "IPv4 directed broadcast binding to wildcard",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- dstAddr: utils.Ipv4SubnetBcast,
- expectRx: true,
- },
-
- {
- name: "IPv4 broadcast binding to broadcast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: header.IPv4Broadcast,
- dstAddr: header.IPv4Broadcast,
- expectRx: true,
- },
- {
- name: "IPv4 broadcast binding to subnet broadcast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: utils.Ipv4SubnetBcast,
- dstAddr: header.IPv4Broadcast,
- expectRx: false,
- },
- {
- name: "IPv4 broadcast binding to wildcard",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- dstAddr: utils.Ipv4SubnetBcast,
- expectRx: true,
- },
-
- {
- name: "IPv4 all-systems multicast binding to all-systems multicast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: header.IPv4AllSystems,
- dstAddr: header.IPv4AllSystems,
- expectRx: true,
- },
- {
- name: "IPv4 all-systems multicast binding to wildcard",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- dstAddr: header.IPv4AllSystems,
- expectRx: true,
- },
- {
- name: "IPv4 all-systems multicast binding to unicast",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- bindAddr: utils.Ipv4Addr.Address,
- dstAddr: header.IPv4AllSystems,
- expectRx: false,
- },
-
- // IPv6 has no notion of a broadcast.
- {
- name: "IPv6 unicast binding to wildcard",
- dstAddr: utils.Ipv6Addr.Address,
- proto: header.IPv6ProtocolNumber,
- remoteAddr: utils.RemoteIPv6Addr,
- localAddr: utils.Ipv6Addr,
- rxUDP: rxIPv6UDP,
- expectRx: true,
- },
- {
- name: "IPv6 broadcast-like address binding to wildcard",
- dstAddr: utils.Ipv6SubnetBcast,
- proto: header.IPv6ProtocolNumber,
- remoteAddr: utils.RemoteIPv6Addr,
- localAddr: utils.Ipv6Addr,
- rxUDP: rxIPv6UDP,
- expectRx: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- e := channel.New(0, defaultMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- protoAddr := tcpip.ProtocolAddress{Protocol: test.proto, AddressWithPrefix: test.localAddr}
- if err := s.AddProtocolAddress(nicID, protoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, protoAddr, err)
- }
-
- var wq waiter.Queue
- ep, err := s.NewEndpoint(udp.ProtocolNumber, test.proto, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.proto, err)
- }
- defer ep.Close()
-
- bindAddr := tcpip.FullAddress{Addr: test.bindAddr, Port: utils.LocalPort}
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("ep.Bind(%#v): %s", bindAddr, err)
- }
-
- test.rxUDP(e, test.remoteAddr, test.dstAddr, data)
- var buf bytes.Buffer
- var opts tcpip.ReadOptions
- if res, err := ep.Read(&buf, opts); test.expectRx {
- if err != nil {
- t.Fatalf("ep.Read(_, %#v): %s", opts, err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- }, res, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("ep.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(data, buf.Bytes()); diff != "" {
- t.Errorf("got UDP payload mismatch (-want +got):\n%s", diff)
- }
- } else if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got Read = (%v, %s) [with data %x], want = (_, %s)", res, err, buf.Bytes(), &tcpip.ErrWouldBlock{})
- }
- })
- }
-}
-
-// TestReuseAddrAndBroadcast makes sure broadcast packets are received by all
-// interested endpoints.
-func TestReuseAddrAndBroadcast(t *testing.T) {
- const (
- nicID = 1
- localPort = 9000
- )
- loopbackBroadcast := testutil.MustParse4("127.255.255.255")
-
- tests := []struct {
- name string
- broadcastAddr tcpip.Address
- }{
- {
- name: "Subnet directed broadcast",
- broadcastAddr: loopbackBroadcast,
- },
- {
- name: "IPv4 broadcast",
- broadcastAddr: header.IPv4Broadcast,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- if err := s.CreateNIC(nicID, loopback.New()); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- protoAddr := tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: "\x7f\x00\x00\x01",
- PrefixLen: 8,
- },
- }
- if err := s.AddProtocolAddress(nicID, protoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, protoAddr, err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- // We use the empty subnet instead of just the loopback subnet so we
- // also have a route to the IPv4 Broadcast address.
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- })
-
- type endpointAndWaiter struct {
- ep tcpip.Endpoint
- ch chan struct{}
- }
- var eps []endpointAndWaiter
- // We create endpoints that bind to both the wildcard address and the
- // broadcast address to make sure both of these types of "broadcast
- // interested" endpoints receive broadcast packets.
- for _, bindWildcard := range []bool{false, true} {
- // Create multiple endpoints for each type of "broadcast interested"
- // endpoint so we can test that all endpoints receive the broadcast
- // packet.
- for i := 0; i < 2; i++ {
- var wq waiter.Queue
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("(eps[%d]) NewEndpoint(%d, %d, _): %s", len(eps), udp.ProtocolNumber, ipv4.ProtocolNumber, err)
- }
- defer ep.Close()
-
- ep.SocketOptions().SetReuseAddress(true)
- ep.SocketOptions().SetBroadcast(true)
-
- bindAddr := tcpip.FullAddress{Port: localPort}
- if bindWildcard {
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("eps[%d].Bind(%#v): %s", len(eps), bindAddr, err)
- }
- } else {
- bindAddr.Addr = test.broadcastAddr
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("eps[%d].Bind(%#v): %s", len(eps), bindAddr, err)
- }
- }
-
- eps = append(eps, endpointAndWaiter{ep: ep, ch: ch})
- }
- }
-
- for i, wep := range eps {
- writeOpts := tcpip.WriteOptions{
- To: &tcpip.FullAddress{
- Addr: test.broadcastAddr,
- Port: localPort,
- },
- }
- data := []byte{byte(i), 2, 3, 4}
- var r bytes.Reader
- r.Reset(data)
- if n, err := wep.ep.Write(&r, writeOpts); err != nil {
- t.Fatalf("eps[%d].Write(_, _): %s", i, err)
- } else if want := int64(len(data)); n != want {
- t.Fatalf("got eps[%d].Write(_, _) = (%d, nil), want = (%d, nil)", i, n, want)
- }
-
- for j, rep := range eps {
- // Wait for the endpoint to become readable.
- <-rep.ch
-
- var buf bytes.Buffer
- result, err := rep.ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Errorf("(eps[%d] write) eps[%d].Read: %s", i, j, err)
- continue
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("(eps[%d] write) eps[%d].Read: unexpected result (-want +got):\n%s", i, j, diff)
- }
- if diff := cmp.Diff([]byte(data), buf.Bytes()); diff != "" {
- t.Errorf("(eps[%d] write) got UDP payload from eps[%d] mismatch (-want +got):\n%s", i, j, diff)
- }
- }
- }
- })
- }
-}
-
-func TestUDPAddRemoveMembershipSocketOption(t *testing.T) {
- const (
- nicID = 1
- )
-
- data := []byte{1, 2, 3, 4}
-
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- localAddr tcpip.AddressWithPrefix
- rxUDP func(*channel.Endpoint, tcpip.Address, tcpip.Address, []byte)
- multicastAddr tcpip.Address
- }{
- {
- name: "IPv4 unicast binding to unicast",
- multicastAddr: "\xe0\x01\x02\x03",
- proto: header.IPv4ProtocolNumber,
- remoteAddr: utils.RemoteIPv4Addr,
- localAddr: utils.Ipv4Addr,
- rxUDP: rxIPv4UDP,
- },
- {
- name: "IPv6 broadcast-like address binding to wildcard",
- multicastAddr: "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04",
- proto: header.IPv6ProtocolNumber,
- remoteAddr: utils.RemoteIPv6Addr,
- localAddr: utils.Ipv6Addr,
- rxUDP: rxIPv6UDP,
- },
- }
-
- subTests := []struct {
- name string
- specifyNICID bool
- specifyNICAddr bool
- }{
- {
- name: "Specify NIC ID and NIC address",
- specifyNICID: true,
- specifyNICAddr: true,
- },
- {
- name: "Don't specify NIC ID or NIC address",
- specifyNICID: false,
- specifyNICAddr: false,
- },
- {
- name: "Specify NIC ID but don't specify NIC address",
- specifyNICID: true,
- specifyNICAddr: false,
- },
- {
- name: "Don't specify NIC ID but specify NIC address",
- specifyNICID: false,
- specifyNICAddr: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- })
- e := channel.New(0, defaultMTU, "")
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID, err)
- }
- protoAddr := tcpip.ProtocolAddress{Protocol: test.proto, AddressWithPrefix: test.localAddr}
- if err := s.AddProtocolAddress(nicID, protoAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %#v): %s", nicID, protoAddr, err)
- }
-
- // Set the route table so that UDP can find a NIC that is
- // routable to the multicast address when the NIC isn't specified.
- if !subTest.specifyNICID && !subTest.specifyNICAddr {
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv6EmptySubnet,
- NIC: nicID,
- },
- {
- Destination: header.IPv4EmptySubnet,
- NIC: nicID,
- },
- })
- }
-
- var wq waiter.Queue
- ep, err := s.NewEndpoint(udp.ProtocolNumber, test.proto, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, test.proto, err)
- }
- defer ep.Close()
-
- bindAddr := tcpip.FullAddress{Port: utils.LocalPort}
- if err := ep.Bind(bindAddr); err != nil {
- t.Fatalf("ep.Bind(%#v): %s", bindAddr, err)
- }
-
- memOpt := tcpip.MembershipOption{MulticastAddr: test.multicastAddr}
- if subTest.specifyNICID {
- memOpt.NIC = nicID
- }
- if subTest.specifyNICAddr {
- memOpt.InterfaceAddr = test.localAddr.Address
- }
-
- // We should receive UDP packets to the group once we join the
- // multicast group.
- addOpt := tcpip.AddMembershipOption(memOpt)
- if err := ep.SetSockOpt(&addOpt); err != nil {
- t.Fatalf("ep.SetSockOpt(&%#v): %s", addOpt, err)
- }
- test.rxUDP(e, test.remoteAddr, test.multicastAddr, data)
- var buf bytes.Buffer
- result, err := ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("ep.Read: %s", err)
- } else {
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("ep.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(data, buf.Bytes()); diff != "" {
- t.Errorf("got UDP payload mismatch (-want +got):\n%s", diff)
- }
- }
-
- // We should not receive UDP packets to the group once we leave
- // the multicast group.
- removeOpt := tcpip.RemoveMembershipOption(memOpt)
- if err := ep.SetSockOpt(&removeOpt); err != nil {
- t.Fatalf("ep.SetSockOpt(&%#v): %s", removeOpt, err)
- }
- {
- _, err := ep.Read(&buf, tcpip.ReadOptions{})
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got ep.Read = (_, %s), want = (_, %s)", err, &tcpip.ErrWouldBlock{})
- }
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/integration/route_test.go b/pkg/tcpip/tests/integration/route_test.go
deleted file mode 100644
index ac3c703d4..000000000
--- a/pkg/tcpip/tests/integration/route_test.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package route_test
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/tests/utils"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// TestLocalPing tests pinging a remote that is local the stack.
-//
-// This tests that a local route is created and packets do not leave the stack.
-func TestLocalPing(t *testing.T) {
- const (
- nicID = 1
-
- // icmpDataOffset is the offset to the data in both ICMPv4 and ICMPv6 echo
- // request/reply packets.
- icmpDataOffset = 8
- )
- ipv4Loopback := testutil.MustParse4("127.0.0.1")
-
- channelEP := func() stack.LinkEndpoint { return channel.New(1, header.IPv6MinimumMTU, "") }
- channelEPCheck := func(t *testing.T, e stack.LinkEndpoint) {
- channelEP := e.(*channel.Endpoint)
- if n := channelEP.Drain(); n != 0 {
- t.Fatalf("got channelEP.Drain() = %d, want = 0", n)
- }
- }
-
- ipv4ICMPBuf := func(t *testing.T) buffer.View {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- hdr := header.ICMPv4(make([]byte, header.ICMPv4MinimumSize+len(data)))
- hdr.SetType(header.ICMPv4Echo)
- if n := copy(hdr.Payload(), data[:]); n != len(data) {
- t.Fatalf("copied %d bytes but expected to copy %d bytes", n, len(data))
- }
- return buffer.View(hdr)
- }
-
- ipv6ICMPBuf := func(t *testing.T) buffer.View {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 9}
- hdr := header.ICMPv6(make([]byte, header.ICMPv6MinimumSize+len(data)))
- hdr.SetType(header.ICMPv6EchoRequest)
- if n := copy(hdr.Payload(), data[:]); n != len(data) {
- t.Fatalf("copied %d bytes but expected to copy %d bytes", n, len(data))
- }
- return buffer.View(hdr)
- }
-
- tests := []struct {
- name string
- transProto tcpip.TransportProtocolNumber
- netProto tcpip.NetworkProtocolNumber
- linkEndpoint func() stack.LinkEndpoint
- localAddr tcpip.Address
- icmpBuf func(*testing.T) buffer.View
- expectedConnectErr tcpip.Error
- checkLinkEndpoint func(t *testing.T, e stack.LinkEndpoint)
- }{
- {
- name: "IPv4 loopback",
- transProto: icmp.ProtocolNumber4,
- netProto: ipv4.ProtocolNumber,
- linkEndpoint: loopback.New,
- localAddr: ipv4Loopback,
- icmpBuf: ipv4ICMPBuf,
- checkLinkEndpoint: func(*testing.T, stack.LinkEndpoint) {},
- },
- {
- name: "IPv6 loopback",
- transProto: icmp.ProtocolNumber6,
- netProto: ipv6.ProtocolNumber,
- linkEndpoint: loopback.New,
- localAddr: header.IPv6Loopback,
- icmpBuf: ipv6ICMPBuf,
- checkLinkEndpoint: func(*testing.T, stack.LinkEndpoint) {},
- },
- {
- name: "IPv4 non-loopback",
- transProto: icmp.ProtocolNumber4,
- netProto: ipv4.ProtocolNumber,
- linkEndpoint: channelEP,
- localAddr: utils.Ipv4Addr.Address,
- icmpBuf: ipv4ICMPBuf,
- checkLinkEndpoint: channelEPCheck,
- },
- {
- name: "IPv6 non-loopback",
- transProto: icmp.ProtocolNumber6,
- netProto: ipv6.ProtocolNumber,
- linkEndpoint: channelEP,
- localAddr: utils.Ipv6Addr.Address,
- icmpBuf: ipv6ICMPBuf,
- checkLinkEndpoint: channelEPCheck,
- },
- {
- name: "IPv4 loopback without local address",
- transProto: icmp.ProtocolNumber4,
- netProto: ipv4.ProtocolNumber,
- linkEndpoint: loopback.New,
- icmpBuf: ipv4ICMPBuf,
- expectedConnectErr: &tcpip.ErrNoRoute{},
- checkLinkEndpoint: func(*testing.T, stack.LinkEndpoint) {},
- },
- {
- name: "IPv6 loopback without local address",
- transProto: icmp.ProtocolNumber6,
- netProto: ipv6.ProtocolNumber,
- linkEndpoint: loopback.New,
- icmpBuf: ipv6ICMPBuf,
- expectedConnectErr: &tcpip.ErrNoRoute{},
- checkLinkEndpoint: func(*testing.T, stack.LinkEndpoint) {},
- },
- {
- name: "IPv4 non-loopback without local address",
- transProto: icmp.ProtocolNumber4,
- netProto: ipv4.ProtocolNumber,
- linkEndpoint: channelEP,
- icmpBuf: ipv4ICMPBuf,
- expectedConnectErr: &tcpip.ErrNoRoute{},
- checkLinkEndpoint: channelEPCheck,
- },
- {
- name: "IPv6 non-loopback without local address",
- transProto: icmp.ProtocolNumber6,
- netProto: ipv6.ProtocolNumber,
- linkEndpoint: channelEP,
- icmpBuf: ipv6ICMPBuf,
- expectedConnectErr: &tcpip.ErrNoRoute{},
- checkLinkEndpoint: channelEPCheck,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, allowExternalLoopback := range []bool{true, false} {
- t.Run(fmt.Sprintf("AllowExternalLoopback=%t", allowExternalLoopback), func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocolWithOptions(ipv4.Options{
- AllowExternalLoopbackTraffic: allowExternalLoopback,
- }),
- ipv6.NewProtocolWithOptions(ipv6.Options{
- AllowExternalLoopbackTraffic: allowExternalLoopback,
- }),
- },
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4, icmp.NewProtocol6},
- HandleLocal: true,
- })
- e := test.linkEndpoint()
- if err := s.CreateNIC(nicID, e); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- if len(test.localAddr) != 0 {
- if err := s.AddAddress(nicID, test.netProto, test.localAddr); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s): %s", nicID, test.netProto, test.localAddr, err)
- }
- }
-
- var wq waiter.Queue
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- ep, err := s.NewEndpoint(test.transProto, test.netProto, &wq)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _): %s", test.transProto, test.netProto, err)
- }
- defer ep.Close()
-
- connAddr := tcpip.FullAddress{Addr: test.localAddr}
- if err := ep.Connect(connAddr); err != test.expectedConnectErr {
- t.Fatalf("got ep.Connect(%#v) = %s, want = %s", connAddr, err, test.expectedConnectErr)
- }
-
- if test.expectedConnectErr != nil {
- return
- }
-
- var r bytes.Reader
- payload := test.icmpBuf(t)
- r.Reset(payload)
- var wOpts tcpip.WriteOptions
- if n, err := ep.Write(&r, wOpts); err != nil {
- t.Fatalf("ep.Write(%#v, %#v): %s", payload, wOpts, err)
- } else if n != int64(len(payload)) {
- t.Fatalf("got ep.Write(%#v, %#v) = (%d, _, nil), want = (%d, _, nil)", payload, wOpts, n, len(payload))
- }
-
- // Wait for the endpoint to become readable.
- <-ch
-
- var w bytes.Buffer
- rr, err := ep.Read(&w, tcpip.ReadOptions{
- NeedRemoteAddr: true,
- })
- if err != nil {
- t.Fatalf("ep.Read(...): %s", err)
- }
- if diff := cmp.Diff(buffer.View(w.Bytes()[icmpDataOffset:]), payload[icmpDataOffset:]); diff != "" {
- t.Errorf("received data mismatch (-want +got):\n%s", diff)
- }
- if rr.RemoteAddr.Addr != test.localAddr {
- t.Errorf("got addr.Addr = %s, want = %s", rr.RemoteAddr.Addr, test.localAddr)
- }
-
- test.checkLinkEndpoint(t, e)
- })
- }
- })
- }
-}
-
-// TestLocalUDP tests sending UDP packets between two endpoints that are local
-// to the stack.
-//
-// This tests that that packets never leave the stack and the addresses
-// used when sending a packet.
-func TestLocalUDP(t *testing.T) {
- const (
- nicID = 1
- )
-
- tests := []struct {
- name string
- canBePrimaryAddr tcpip.ProtocolAddress
- firstPrimaryAddr tcpip.ProtocolAddress
- }{
- {
- name: "IPv4",
- canBePrimaryAddr: utils.Ipv4Addr1,
- firstPrimaryAddr: utils.Ipv4Addr2,
- },
- {
- name: "IPv6",
- canBePrimaryAddr: utils.Ipv6Addr1,
- firstPrimaryAddr: utils.Ipv6Addr2,
- },
- }
-
- subTests := []struct {
- name string
- addAddress bool
- expectedWriteErr tcpip.Error
- }{
- {
- name: "Unassigned local address",
- addAddress: false,
- expectedWriteErr: &tcpip.ErrNoRoute{},
- },
- {
- name: "Assigned local address",
- addAddress: true,
- expectedWriteErr: nil,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, subTest := range subTests {
- t.Run(subTest.name, func(t *testing.T) {
- stackOpts := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- HandleLocal: true,
- }
-
- s := stack.New(stackOpts)
- ep := channel.New(1, header.IPv6MinimumMTU, "")
-
- if err := s.CreateNIC(nicID, ep); err != nil {
- t.Fatalf("s.CreateNIC(%d, _): %s", nicID, err)
- }
-
- if subTest.addAddress {
- if err := s.AddProtocolAddressWithOptions(nicID, test.canBePrimaryAddr, stack.CanBePrimaryEndpoint); err != nil {
- t.Fatalf("s.AddProtocolAddressWithOptions(%d, %#v, %d): %s", nicID, test.canBePrimaryAddr, stack.FirstPrimaryEndpoint, err)
- }
- if err := s.AddProtocolAddressWithOptions(nicID, test.firstPrimaryAddr, stack.FirstPrimaryEndpoint); err != nil {
- t.Fatalf("s.AddProtocolAddressWithOptions(%d, %#v, %d): %s", nicID, test.firstPrimaryAddr, stack.FirstPrimaryEndpoint, err)
- }
- }
-
- var serverWQ waiter.Queue
- serverWE, serverCH := waiter.NewChannelEntry(nil)
- serverWQ.EventRegister(&serverWE, waiter.ReadableEvents)
- server, err := s.NewEndpoint(udp.ProtocolNumber, test.firstPrimaryAddr.Protocol, &serverWQ)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d): %s", udp.ProtocolNumber, test.firstPrimaryAddr.Protocol, err)
- }
- defer server.Close()
-
- bindAddr := tcpip.FullAddress{Port: 80}
- if err := server.Bind(bindAddr); err != nil {
- t.Fatalf("server.Bind(%#v): %s", bindAddr, err)
- }
-
- var clientWQ waiter.Queue
- clientWE, clientCH := waiter.NewChannelEntry(nil)
- clientWQ.EventRegister(&clientWE, waiter.ReadableEvents)
- client, err := s.NewEndpoint(udp.ProtocolNumber, test.firstPrimaryAddr.Protocol, &clientWQ)
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d): %s", udp.ProtocolNumber, test.firstPrimaryAddr.Protocol, err)
- }
- defer client.Close()
-
- serverAddr := tcpip.FullAddress{
- Addr: test.canBePrimaryAddr.AddressWithPrefix.Address,
- Port: 80,
- }
-
- clientPayload := []byte{1, 2, 3, 4}
- {
- var r bytes.Reader
- r.Reset(clientPayload)
- wOpts := tcpip.WriteOptions{
- To: &serverAddr,
- }
- if n, err := client.Write(&r, wOpts); err != subTest.expectedWriteErr {
- t.Fatalf("got client.Write(%#v, %#v) = (%d, %s), want = (_, %s)", clientPayload, wOpts, n, err, subTest.expectedWriteErr)
- } else if subTest.expectedWriteErr != nil {
- // Nothing else to test if we expected not to be able to send the
- // UDP packet.
- return
- } else if n != int64(len(clientPayload)) {
- t.Fatalf("got client.Write(%#v, %#v) = (%d, nil), want = (%d, nil)", clientPayload, wOpts, n, len(clientPayload))
- }
- }
-
- // Wait for the server endpoint to become readable.
- <-serverCH
-
- var clientAddr tcpip.FullAddress
- var readBuf bytes.Buffer
- if read, err := server.Read(&readBuf, tcpip.ReadOptions{NeedRemoteAddr: true}); err != nil {
- t.Fatalf("server.Read(_): %s", err)
- } else {
- clientAddr = read.RemoteAddr
-
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: readBuf.Len(),
- Total: readBuf.Len(),
- RemoteAddr: tcpip.FullAddress{
- Addr: test.canBePrimaryAddr.AddressWithPrefix.Address,
- },
- }, read, checker.IgnoreCmpPath(
- "ControlMessages",
- "RemoteAddr.NIC",
- "RemoteAddr.Port",
- )); diff != "" {
- t.Errorf("server.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(buffer.View(clientPayload), buffer.View(readBuf.Bytes())); diff != "" {
- t.Errorf("server read clientPayload mismatch (-want +got):\n%s", diff)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
-
- serverPayload := []byte{1, 2, 3, 4}
- {
- var r bytes.Reader
- r.Reset(serverPayload)
- wOpts := tcpip.WriteOptions{
- To: &clientAddr,
- }
- if n, err := server.Write(&r, wOpts); err != nil {
- t.Fatalf("server.Write(%#v, %#v): %s", serverPayload, wOpts, err)
- } else if n != int64(len(serverPayload)) {
- t.Fatalf("got server.Write(%#v, %#v) = (%d, nil), want = (%d, nil)", serverPayload, wOpts, n, len(serverPayload))
- }
- }
-
- // Wait for the client endpoint to become readable.
- <-clientCH
-
- readBuf.Reset()
- if read, err := client.Read(&readBuf, tcpip.ReadOptions{NeedRemoteAddr: true}); err != nil {
- t.Fatalf("client.Read(_): %s", err)
- } else {
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: readBuf.Len(),
- Total: readBuf.Len(),
- RemoteAddr: tcpip.FullAddress{Addr: serverAddr.Addr},
- }, read, checker.IgnoreCmpPath(
- "ControlMessages",
- "RemoteAddr.NIC",
- "RemoteAddr.Port",
- )); diff != "" {
- t.Errorf("client.Read: unexpected result (-want +got):\n%s", diff)
- }
- if diff := cmp.Diff(buffer.View(serverPayload), buffer.View(readBuf.Bytes())); diff != "" {
- t.Errorf("client read serverPayload mismatch (-want +got):\n%s", diff)
- }
- if t.Failed() {
- t.FailNow()
- }
- }
- })
- }
- })
- }
-}
diff --git a/pkg/tcpip/tests/utils/BUILD b/pkg/tcpip/tests/utils/BUILD
deleted file mode 100644
index a9699a367..000000000
--- a/pkg/tcpip/tests/utils/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "utils",
- srcs = ["utils.go"],
- visibility = ["//pkg/tcpip/tests:__subpackages__"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/ethernet",
- "//pkg/tcpip/link/nested",
- "//pkg/tcpip/link/pipe",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/icmp",
- ],
-)
diff --git a/pkg/tcpip/tests/utils/utils.go b/pkg/tcpip/tests/utils/utils.go
deleted file mode 100644
index 2e6ae55ea..000000000
--- a/pkg/tcpip/tests/utils/utils.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package utils holds common testing utilities for tcpip.
-package utils
-
-import (
- "net"
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/ethernet"
- "gvisor.dev/gvisor/pkg/tcpip/link/nested"
- "gvisor.dev/gvisor/pkg/tcpip/link/pipe"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
-)
-
-// Common NIC IDs used by tests.
-const (
- Host1NICID = 1
- RouterNICID1 = 2
- RouterNICID2 = 3
- Host2NICID = 4
-)
-
-// Common link addresses used by tests.
-const (
- LinkAddr1 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x06")
- LinkAddr2 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x07")
- LinkAddr3 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x08")
- LinkAddr4 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x09")
-)
-
-// Common IP addresses used by tests.
-var (
- Ipv4Addr = tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.1.58").To4()),
- PrefixLen: 24,
- }
- Ipv4Subnet = Ipv4Addr.Subnet()
- Ipv4SubnetBcast = Ipv4Subnet.Broadcast()
-
- Ipv6Addr = tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("200a::1").To16()),
- PrefixLen: 64,
- }
- Ipv6Subnet = Ipv6Addr.Subnet()
- Ipv6SubnetBcast = Ipv6Subnet.Broadcast()
-
- Ipv4Addr1 = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.1").To4()),
- PrefixLen: 24,
- },
- }
- Ipv4Addr2 = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.2").To4()),
- PrefixLen: 8,
- },
- }
- Ipv4Addr3 = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.3").To4()),
- PrefixLen: 8,
- },
- }
- Ipv6Addr1 = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::1").To16()),
- PrefixLen: 64,
- },
- }
- Ipv6Addr2 = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::2").To16()),
- PrefixLen: 64,
- },
- }
- Ipv6Addr3 = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::3").To16()),
- PrefixLen: 64,
- },
- }
-
- // Remote addrs.
- RemoteIPv4Addr = tcpip.Address(net.ParseIP("10.0.0.1").To4())
- RemoteIPv6Addr = tcpip.Address(net.ParseIP("200b::1").To16())
-)
-
-// Common ports for testing.
-const (
- RemotePort = 5555
- LocalPort = 80
-)
-
-// Common IP addresses used for testing.
-var (
- Host1IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.2").To4()),
- PrefixLen: 24,
- },
- }
- RouterNIC1IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("192.168.0.1").To4()),
- PrefixLen: 24,
- },
- }
- RouterNIC2IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10.0.0.1").To4()),
- PrefixLen: 8,
- },
- }
- Host2IPv4Addr = tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("10.0.0.2").To4()),
- PrefixLen: 8,
- },
- }
- Host1IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::2").To16()),
- PrefixLen: 64,
- },
- }
- RouterNIC1IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("a::1").To16()),
- PrefixLen: 64,
- },
- }
- RouterNIC2IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("b::1").To16()),
- PrefixLen: 64,
- },
- }
- Host2IPv6Addr = tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: tcpip.AddressWithPrefix{
- Address: tcpip.Address(net.ParseIP("b::2").To16()),
- PrefixLen: 64,
- },
- }
-)
-
-// NewEthernetEndpoint returns an ethernet link endpoint that wraps an inner
-// link endpoint and checks the destination link address before delivering
-// network packets to the network dispatcher.
-//
-// See ethernet.Endpoint for more details.
-func NewEthernetEndpoint(ep stack.LinkEndpoint) *EndpointWithDestinationCheck {
- var e EndpointWithDestinationCheck
- e.Endpoint.Init(ethernet.New(ep), &e)
- return &e
-}
-
-// EndpointWithDestinationCheck is a link endpoint that checks the destination
-// link address before delivering network packets to the network dispatcher.
-type EndpointWithDestinationCheck struct {
- nested.Endpoint
-}
-
-var _ stack.NetworkDispatcher = (*EndpointWithDestinationCheck)(nil)
-var _ stack.LinkEndpoint = (*EndpointWithDestinationCheck)(nil)
-
-// DeliverNetworkPacket implements stack.NetworkDispatcher.
-func (e *EndpointWithDestinationCheck) DeliverNetworkPacket(src, dst tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
- if dst == e.Endpoint.LinkAddress() || dst == header.EthernetBroadcastAddress || header.IsMulticastEthernetAddress(dst) {
- e.Endpoint.DeliverNetworkPacket(src, dst, proto, pkt)
- }
-}
-
-// SetupRoutedStacks creates the NICs, sets forwarding, adds addresses and sets
-// the route tables for the passed stacks.
-func SetupRoutedStacks(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack) {
- host1NIC, routerNIC1 := pipe.New(LinkAddr1, LinkAddr2)
- routerNIC2, host2NIC := pipe.New(LinkAddr3, LinkAddr4)
-
- if err := host1Stack.CreateNIC(Host1NICID, NewEthernetEndpoint(host1NIC)); err != nil {
- t.Fatalf("host1Stack.CreateNIC(%d, _): %s", Host1NICID, err)
- }
- if err := routerStack.CreateNIC(RouterNICID1, NewEthernetEndpoint(routerNIC1)); err != nil {
- t.Fatalf("routerStack.CreateNIC(%d, _): %s", RouterNICID1, err)
- }
- if err := routerStack.CreateNIC(RouterNICID2, NewEthernetEndpoint(routerNIC2)); err != nil {
- t.Fatalf("routerStack.CreateNIC(%d, _): %s", RouterNICID2, err)
- }
- if err := host2Stack.CreateNIC(Host2NICID, NewEthernetEndpoint(host2NIC)); err != nil {
- t.Fatalf("host2Stack.CreateNIC(%d, _): %s", Host2NICID, err)
- }
-
- if err := routerStack.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
- t.Fatalf("routerStack.SetForwardingDefaultAndAllNICs(%d): %s", ipv4.ProtocolNumber, err)
- }
- if err := routerStack.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
- t.Fatalf("routerStack.SetForwardingDefaultAndAllNICs(%d): %s", ipv6.ProtocolNumber, err)
- }
-
- if err := host1Stack.AddProtocolAddress(Host1NICID, Host1IPv4Addr); err != nil {
- t.Fatalf("host1Stack.AddProtocolAddress(%d, %#v): %s", Host1NICID, Host1IPv4Addr, err)
- }
- if err := routerStack.AddProtocolAddress(RouterNICID1, RouterNIC1IPv4Addr); err != nil {
- t.Fatalf("routerStack.AddProtocolAddress(%d, %#v): %s", RouterNICID1, RouterNIC1IPv4Addr, err)
- }
- if err := routerStack.AddProtocolAddress(RouterNICID2, RouterNIC2IPv4Addr); err != nil {
- t.Fatalf("routerStack.AddProtocolAddress(%d, %#v): %s", RouterNICID2, RouterNIC2IPv4Addr, err)
- }
- if err := host2Stack.AddProtocolAddress(Host2NICID, Host2IPv4Addr); err != nil {
- t.Fatalf("host2Stack.AddProtocolAddress(%d, %#v): %s", Host2NICID, Host2IPv4Addr, err)
- }
- if err := host1Stack.AddProtocolAddress(Host1NICID, Host1IPv6Addr); err != nil {
- t.Fatalf("host1Stack.AddProtocolAddress(%d, %#v): %s", Host1NICID, Host1IPv6Addr, err)
- }
- if err := routerStack.AddProtocolAddress(RouterNICID1, RouterNIC1IPv6Addr); err != nil {
- t.Fatalf("routerStack.AddProtocolAddress(%d, %#v): %s", RouterNICID1, RouterNIC1IPv6Addr, err)
- }
- if err := routerStack.AddProtocolAddress(RouterNICID2, RouterNIC2IPv6Addr); err != nil {
- t.Fatalf("routerStack.AddProtocolAddress(%d, %#v): %s", RouterNICID2, RouterNIC2IPv6Addr, err)
- }
- if err := host2Stack.AddProtocolAddress(Host2NICID, Host2IPv6Addr); err != nil {
- t.Fatalf("host2Stack.AddProtocolAddress(%d, %#v): %s", Host2NICID, Host2IPv6Addr, err)
- }
-
- host1Stack.SetRouteTable([]tcpip.Route{
- {
- Destination: Host1IPv4Addr.AddressWithPrefix.Subnet(),
- NIC: Host1NICID,
- },
- {
- Destination: Host1IPv6Addr.AddressWithPrefix.Subnet(),
- NIC: Host1NICID,
- },
- {
- Destination: Host2IPv4Addr.AddressWithPrefix.Subnet(),
- Gateway: RouterNIC1IPv4Addr.AddressWithPrefix.Address,
- NIC: Host1NICID,
- },
- {
- Destination: Host2IPv6Addr.AddressWithPrefix.Subnet(),
- Gateway: RouterNIC1IPv6Addr.AddressWithPrefix.Address,
- NIC: Host1NICID,
- },
- })
- routerStack.SetRouteTable([]tcpip.Route{
- {
- Destination: RouterNIC1IPv4Addr.AddressWithPrefix.Subnet(),
- NIC: RouterNICID1,
- },
- {
- Destination: RouterNIC1IPv6Addr.AddressWithPrefix.Subnet(),
- NIC: RouterNICID1,
- },
- {
- Destination: RouterNIC2IPv4Addr.AddressWithPrefix.Subnet(),
- NIC: RouterNICID2,
- },
- {
- Destination: RouterNIC2IPv6Addr.AddressWithPrefix.Subnet(),
- NIC: RouterNICID2,
- },
- })
- host2Stack.SetRouteTable([]tcpip.Route{
- {
- Destination: Host2IPv4Addr.AddressWithPrefix.Subnet(),
- NIC: Host2NICID,
- },
- {
- Destination: Host2IPv6Addr.AddressWithPrefix.Subnet(),
- NIC: Host2NICID,
- },
- {
- Destination: Host1IPv4Addr.AddressWithPrefix.Subnet(),
- Gateway: RouterNIC2IPv4Addr.AddressWithPrefix.Address,
- NIC: Host2NICID,
- },
- {
- Destination: Host1IPv6Addr.AddressWithPrefix.Subnet(),
- Gateway: RouterNIC2IPv6Addr.AddressWithPrefix.Address,
- NIC: Host2NICID,
- },
- })
-}
-
-func rxICMPv4Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty header.ICMPv4Type) {
- totalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))
- pkt.SetType(ty)
- pkt.SetCode(header.ICMPv4UnusedCode)
- pkt.SetChecksum(0)
- pkt.SetChecksum(^header.Checksum(pkt, 0))
- ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(totalLen),
- Protocol: uint8(icmp.ProtocolNumber4),
- TTL: ttl,
- SrcAddr: src,
- DstAddr: dst,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- e.InjectInbound(header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-}
-
-// RxICMPv4EchoRequest constructs and injects an ICMPv4 echo request packet on
-// the provided endpoint.
-func RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
- rxICMPv4Echo(e, src, dst, ttl, header.ICMPv4Echo)
-}
-
-// RxICMPv4EchoReply constructs and injects an ICMPv4 echo reply packet on
-// the provided endpoint.
-func RxICMPv4EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
- rxICMPv4Echo(e, src, dst, ttl, header.ICMPv4EchoReply)
-}
-
-func rxICMPv6Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty header.ICMPv6Type) {
- totalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize
- hdr := buffer.NewPrependable(totalLen)
- pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))
- pkt.SetType(ty)
- pkt.SetCode(header.ICMPv6UnusedCode)
- pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: pkt,
- Src: src,
- Dst: dst,
- }))
- ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
- ip.Encode(&header.IPv6Fields{
- PayloadLength: header.ICMPv6MinimumSize,
- TransportProtocol: icmp.ProtocolNumber6,
- HopLimit: ttl,
- SrcAddr: src,
- DstAddr: dst,
- })
-
- e.InjectInbound(header.IPv6ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: hdr.View().ToVectorisedView(),
- }))
-}
-
-// RxICMPv6EchoRequest constructs and injects an ICMPv6 echo request packet on
-// the provided endpoint.
-func RxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
- rxICMPv6Echo(e, src, dst, ttl, header.ICMPv6EchoRequest)
-}
-
-// RxICMPv6EchoReply constructs and injects an ICMPv6 echo reply packet on
-// the provided endpoint.
-func RxICMPv6EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
- rxICMPv6Echo(e, src, dst, ttl, header.ICMPv6EchoReply)
-}
diff --git a/pkg/tcpip/testutil/BUILD b/pkg/tcpip/testutil/BUILD
deleted file mode 100644
index 02ee86ff1..000000000
--- a/pkg/tcpip/testutil/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "testutil",
- testonly = True,
- srcs = [
- "testutil.go",
- "testutil_unsafe.go",
- ],
- visibility = ["//visibility:public"],
- deps = ["//pkg/tcpip"],
-)
-
-go_test(
- name = "testutil_test",
- srcs = ["testutil_test.go"],
- library = ":testutil",
- deps = ["//pkg/tcpip"],
-)
diff --git a/pkg/tcpip/testutil/testutil.go b/pkg/tcpip/testutil/testutil.go
deleted file mode 100644
index 94b580a70..000000000
--- a/pkg/tcpip/testutil/testutil.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides helper functions for netstack unit tests.
-package testutil
-
-import (
- "fmt"
- "net"
- "reflect"
- "strings"
-
- "gvisor.dev/gvisor/pkg/tcpip"
-)
-
-// MustParse4 parses an IPv4 string (e.g. "192.168.1.1") into a tcpip.Address.
-// Passing an IPv4-mapped IPv6 address will yield only the 4 IPv4 bytes.
-func MustParse4(addr string) tcpip.Address {
- ip := net.ParseIP(addr).To4()
- if ip == nil {
- panic(fmt.Sprintf("Parse4 expects IPv4 addresses, but was passed %q", addr))
- }
- return tcpip.Address(ip)
-}
-
-// MustParse6 parses an IPv6 string (e.g. "fe80::1") into a tcpip.Address. Passing
-// an IPv4 address will yield an IPv4-mapped IPv6 address.
-func MustParse6(addr string) tcpip.Address {
- ip := net.ParseIP(addr).To16()
- if ip == nil {
- panic(fmt.Sprintf("Parse6 was passed malformed address %q", addr))
- }
- return tcpip.Address(ip)
-}
-
-func checkFieldCounts(ref, multi reflect.Value) error {
- refTypeName := ref.Type().Name()
- multiTypeName := multi.Type().Name()
- refNumField := ref.NumField()
- multiNumField := multi.NumField()
-
- if refNumField != multiNumField {
- return fmt.Errorf("type %s has an incorrect number of fields: got = %d, want = %d (same as type %s)", multiTypeName, multiNumField, refNumField, refTypeName)
- }
-
- return nil
-}
-
-func validateField(ref reflect.Value, refName string, m tcpip.MultiCounterStat, multiName string) error {
- s, ok := ref.Addr().Interface().(**tcpip.StatCounter)
- if !ok {
- return fmt.Errorf("expected ref type's to be *StatCounter, but its type is %s", ref.Type().Elem().Name())
- }
-
- // The field names are expected to match (case insensitive).
- if !strings.EqualFold(refName, multiName) {
- return fmt.Errorf("wrong field name: got = %s, want = %s", multiName, refName)
- }
-
- base := (*s).Value()
- m.Increment()
- if (*s).Value() != base+1 {
- return fmt.Errorf("updates to the '%s MultiCounterStat' counters are not reflected in the '%s CounterStat'", multiName, refName)
- }
-
- return nil
-}
-
-// ValidateMultiCounterStats verifies that every counter stored in multi is
-// correctly tracking its counterpart in the given counters.
-func ValidateMultiCounterStats(multi reflect.Value, counters []reflect.Value) error {
- for _, c := range counters {
- if err := checkFieldCounts(c, multi); err != nil {
- return err
- }
- }
-
- for i := 0; i < multi.NumField(); i++ {
- multiName := multi.Type().Field(i).Name
- multiUnsafe := unsafeExposeUnexportedFields(multi.Field(i))
-
- if m, ok := multiUnsafe.Addr().Interface().(*tcpip.MultiCounterStat); ok {
- for _, c := range counters {
- if err := validateField(unsafeExposeUnexportedFields(c.Field(i)), c.Type().Field(i).Name, *m, multiName); err != nil {
- return err
- }
- }
- } else {
- var countersNextField []reflect.Value
- for _, c := range counters {
- countersNextField = append(countersNextField, c.Field(i))
- }
- if err := ValidateMultiCounterStats(multi.Field(i), countersNextField); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// MustParseLink parses a Link string into a tcpip.LinkAddress, panicking on
-// error.
-//
-// The string must be in the format aa:bb:cc:dd:ee:ff or aa-bb-cc-dd-ee-ff.
-func MustParseLink(addr string) tcpip.LinkAddress {
- parsed, err := tcpip.ParseMACAddress(addr)
- if err != nil {
- panic(fmt.Sprintf("tcpip.ParseMACAddress(%s): %s", addr, err))
- }
- return parsed
-}
diff --git a/pkg/tcpip/testutil/testutil_test.go b/pkg/tcpip/testutil/testutil_test.go
deleted file mode 100644
index 6aad9585d..000000000
--- a/pkg/tcpip/testutil/testutil_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
-)
-
-// Who tests the testutils?
-
-func TestMustParse4(t *testing.T) {
- tcs := []struct {
- str string
- addr tcpip.Address
- shouldPanic bool
- }{
- {
- str: "127.0.0.1",
- addr: "\x7f\x00\x00\x01",
- }, {
- str: "",
- shouldPanic: true,
- }, {
- str: "fe80::1",
- shouldPanic: true,
- }, {
- // In an ideal world this panics too, but net.IP
- // doesn't distinguish between IPv4 and IPv4-mapped
- // addresses.
- str: "::ffff:0.0.0.1",
- addr: "\x00\x00\x00\x01",
- },
- }
-
- for _, tc := range tcs {
- t.Run(tc.str, func(t *testing.T) {
- if tc.shouldPanic {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("panic expected, but did not occur")
- }
- }()
- }
- if got := MustParse4(tc.str); got != tc.addr {
- t.Errorf("got MustParse4(%s) = %s, want = %s", tc.str, got, tc.addr)
- }
- })
- }
-}
-
-func TestMustParse6(t *testing.T) {
- tcs := []struct {
- str string
- addr tcpip.Address
- shouldPanic bool
- }{
- {
- // In an ideal world this panics too, but net.IP
- // doesn't distinguish between IPv4 and IPv4-mapped
- // addresses.
- str: "127.0.0.1",
- addr: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x7f\x00\x00\x01",
- }, {
- str: "",
- shouldPanic: true,
- }, {
- str: "fe80::1",
- addr: "\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- }, {
- str: "::ffff:0.0.0.1",
- addr: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x01",
- },
- }
-
- for _, tc := range tcs {
- t.Run(tc.str, func(t *testing.T) {
- if tc.shouldPanic {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("panic expected, but did not occur")
- }
- }()
- }
- if got := MustParse6(tc.str); got != tc.addr {
- t.Errorf("got MustParse6(%s) = %s, want = %s", tc.str, got, tc.addr)
- }
- })
- }
-}
diff --git a/pkg/tcpip/testutil/testutil_unsafe.go b/pkg/tcpip/testutil/testutil_unsafe.go
deleted file mode 100644
index 5ff764800..000000000
--- a/pkg/tcpip/testutil/testutil_unsafe.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "reflect"
- "unsafe"
-)
-
-// unsafeExposeUnexportedFields takes a Value and returns a version of it in
-// which even unexported fields can be read and written.
-func unsafeExposeUnexportedFields(a reflect.Value) reflect.Value {
- return reflect.NewAt(a.Type(), unsafe.Pointer(a.UnsafeAddr())).Elem()
-}
diff --git a/pkg/tcpip/timer_test.go b/pkg/tcpip/timer_test.go
deleted file mode 100644
index ed1ed8ac6..000000000
--- a/pkg/tcpip/timer_test.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcpip_test
-
-import (
- "math"
- "sync"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
-)
-
-func TestMonotonicTimeBefore(t *testing.T) {
- var mt tcpip.MonotonicTime
- if mt.Before(mt) {
- t.Errorf("%#v.Before(%#v)", mt, mt)
- }
-
- one := mt.Add(1)
- if one.Before(mt) {
- t.Errorf("%#v.Before(%#v)", one, mt)
- }
- if !mt.Before(one) {
- t.Errorf("!%#v.Before(%#v)", mt, one)
- }
-}
-
-func TestMonotonicTimeAfter(t *testing.T) {
- var mt tcpip.MonotonicTime
- if mt.After(mt) {
- t.Errorf("%#v.After(%#v)", mt, mt)
- }
-
- one := mt.Add(1)
- if mt.After(one) {
- t.Errorf("%#v.After(%#v)", mt, one)
- }
- if !one.After(mt) {
- t.Errorf("!%#v.After(%#v)", one, mt)
- }
-}
-
-func TestMonotonicTimeAddSub(t *testing.T) {
- var mt tcpip.MonotonicTime
- if one, two := mt.Add(2), mt.Add(1).Add(1); one != two {
- t.Errorf("mt.Add(2) != mt.Add(1).Add(1) (%#v != %#v)", one, two)
- }
-
- min := mt.Add(math.MinInt64)
- max := mt.Add(math.MaxInt64)
-
- if overflow := mt.Add(1).Add(math.MaxInt64); overflow != max {
- t.Errorf("mt.Add(math.MaxInt64) != mt.Add(1).Add(math.MaxInt64) (%#v != %#v)", max, overflow)
- }
- if underflow := mt.Add(-1).Add(math.MinInt64); underflow != min {
- t.Errorf("mt.Add(math.MinInt64) != mt.Add(-1).Add(math.MinInt64) (%#v != %#v)", min, underflow)
- }
-
- if got, want := min.Sub(min), time.Duration(0); want != got {
- t.Errorf("got min.Sub(min) = %d, want %d", got, want)
- }
- if got, want := max.Sub(max), time.Duration(0); want != got {
- t.Errorf("got max.Sub(max) = %d, want %d", got, want)
- }
-
- if overflow, want := max.Sub(min), time.Duration(math.MaxInt64); overflow != want {
- t.Errorf("mt.Add(math.MaxInt64).Sub(mt.Add(math.MinInt64) != %s (%#v)", want, overflow)
- }
- if underflow, want := min.Sub(max), time.Duration(math.MinInt64); underflow != want {
- t.Errorf("mt.Add(math.MinInt64).Sub(mt.Add(math.MaxInt64) != %s (%#v)", want, underflow)
- }
-}
-
-func TestMonotonicTimeSub(t *testing.T) {
- var mt tcpip.MonotonicTime
-
- if one, two := mt.Add(2), mt.Add(1).Add(1); one != two {
- t.Errorf("mt.Add(2) != mt.Add(1).Add(1) (%#v != %#v)", one, two)
- }
-
- if max, overflow := mt.Add(math.MaxInt64), mt.Add(1).Add(math.MaxInt64); max != overflow {
- t.Errorf("mt.Add(math.MaxInt64) != mt.Add(1).Add(math.MaxInt64) (%#v != %#v)", max, overflow)
- }
- if max, underflow := mt.Add(math.MinInt64), mt.Add(-1).Add(math.MinInt64); max != underflow {
- t.Errorf("mt.Add(math.MinInt64) != mt.Add(-1).Add(math.MinInt64) (%#v != %#v)", max, underflow)
- }
-}
-
-const (
- shortDuration = 1 * time.Nanosecond
- middleDuration = 100 * time.Millisecond
-)
-
-func TestJobReschedule(t *testing.T) {
- clock := tcpip.NewStdClock()
- var wg sync.WaitGroup
- var lock sync.Mutex
-
- for i := 0; i < 2; i++ {
- wg.Add(1)
-
- go func() {
- lock.Lock()
- // Assigning a new timer value updates the timer's locker and function.
- // This test makes sure there is no data race when reassigning a timer
- // that has an active timer (even if it has been stopped as a stopped
- // timer may be blocked on a lock before it can check if it has been
- // stopped while another goroutine holds the same lock).
- job := tcpip.NewJob(clock, &lock, func() {
- wg.Done()
- })
- job.Schedule(shortDuration)
- lock.Unlock()
- }()
- }
- wg.Wait()
-}
-
-func stdClockWithAfter() (tcpip.Clock, func(time.Duration) <-chan time.Time) {
- return tcpip.NewStdClock(), time.After
-}
-
-func TestJobExecution(t *testing.T) {
- t.Parallel()
-
- clock, after := stdClockWithAfter()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- job := tcpip.NewJob(clock, &lock, func() {
- ch <- struct{}{}
- })
- job.Schedule(shortDuration)
-
- // Wait for timer to fire.
- select {
- case <-ch:
- case <-after(middleDuration):
- t.Fatal("timed out waiting for timer to fire")
- }
-
- // The timer should have fired only once.
- select {
- case <-ch:
- t.Fatal("no other timers should have fired")
- case <-after(middleDuration):
- }
-}
-
-func TestCancellableTimerResetFromLongDuration(t *testing.T) {
- t.Parallel()
-
- clock, after := stdClockWithAfter()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(middleDuration)
-
- lock.Lock()
- job.Cancel()
- lock.Unlock()
-
- job.Schedule(shortDuration)
-
- // Wait for timer to fire.
- select {
- case <-ch:
- case <-after(middleDuration):
- t.Fatal("timed out waiting for timer to fire")
- }
-
- // The timer should have fired only once.
- select {
- case <-ch:
- t.Fatal("no other timers should have fired")
- case <-after(middleDuration):
- }
-}
-
-func TestJobRescheduleFromShortDuration(t *testing.T) {
- t.Parallel()
-
- clock, after := stdClockWithAfter()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- lock.Lock()
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(shortDuration)
- job.Cancel()
- lock.Unlock()
-
- // Wait for timer to fire if it wasn't correctly stopped.
- select {
- case <-ch:
- t.Fatal("timer fired after being stopped")
- case <-after(middleDuration):
- }
-
- job.Schedule(shortDuration)
-
- // Wait for timer to fire.
- select {
- case <-ch:
- case <-after(middleDuration):
- t.Fatal("timed out waiting for timer to fire")
- }
-
- // The timer should have fired only once.
- select {
- case <-ch:
- t.Fatal("no other timers should have fired")
- case <-after(middleDuration):
- }
-}
-
-func TestJobImmediatelyCancel(t *testing.T) {
- t.Parallel()
-
- clock, after := stdClockWithAfter()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- for i := 0; i < 1000; i++ {
- lock.Lock()
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(shortDuration)
- job.Cancel()
- lock.Unlock()
- }
-
- // Wait for timer to fire if it wasn't correctly stopped.
- select {
- case <-ch:
- t.Fatal("timer fired after being stopped")
- case <-after(middleDuration):
- }
-}
-
-func stdClockWithAfterAndSleep() (tcpip.Clock, func(time.Duration) <-chan time.Time, func(time.Duration)) {
- clock, after := stdClockWithAfter()
- return clock, after, time.Sleep
-}
-
-func TestJobCancelledRescheduleWithoutLock(t *testing.T) {
- t.Parallel()
-
- clock, after, sleep := stdClockWithAfterAndSleep()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- lock.Lock()
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(shortDuration)
- job.Cancel()
- lock.Unlock()
-
- for i := 0; i < 10; i++ {
- job.Schedule(middleDuration)
-
- lock.Lock()
- // Sleep until the timer fires and gets blocked trying to take the lock.
- sleep(middleDuration * 2)
- job.Cancel()
- lock.Unlock()
- }
-
- // Wait for double the duration so timers that weren't correctly stopped can
- // fire.
- select {
- case <-ch:
- t.Fatal("timer fired after being stopped")
- case <-after(middleDuration * 2):
- }
-}
-
-func TestManyCancellableTimerResetAfterBlockedOnLock(t *testing.T) {
- t.Parallel()
-
- clock, after, sleep := stdClockWithAfterAndSleep()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- lock.Lock()
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(shortDuration)
- for i := 0; i < 10; i++ {
- // Sleep until the timer fires and gets blocked trying to take the lock.
- sleep(middleDuration)
- job.Cancel()
- job.Schedule(shortDuration)
- }
- lock.Unlock()
-
- // Wait for double the duration for the last timer to fire.
- select {
- case <-ch:
- case <-after(middleDuration):
- t.Fatal("timed out waiting for timer to fire")
- }
-
- // The timer should have fired only once.
- select {
- case <-ch:
- t.Fatal("no other timers should have fired")
- case <-after(middleDuration):
- }
-}
-
-func TestManyJobReschedulesUnderLock(t *testing.T) {
- t.Parallel()
-
- clock, after := stdClockWithAfter()
- var lock sync.Mutex
- ch := make(chan struct{})
-
- lock.Lock()
- job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
- job.Schedule(shortDuration)
- for i := 0; i < 10; i++ {
- job.Cancel()
- job.Schedule(shortDuration)
- }
- lock.Unlock()
-
- // Wait for double the duration for the last timer to fire.
- select {
- case <-ch:
- case <-after(middleDuration):
- t.Fatal("timed out waiting for timer to fire")
- }
-
- // The timer should have fired only once.
- select {
- case <-ch:
- t.Fatal("no other timers should have fired")
- case <-after(middleDuration):
- }
-}
diff --git a/pkg/tcpip/transport/icmp/BUILD b/pkg/tcpip/transport/icmp/BUILD
deleted file mode 100644
index bbc0e3ecc..000000000
--- a/pkg/tcpip/transport/icmp/BUILD
+++ /dev/null
@@ -1,59 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "icmp_packet_list",
- out = "icmp_packet_list.go",
- package = "icmp",
- prefix = "icmpPacket",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*icmpPacket",
- "Linker": "*icmpPacket",
- },
-)
-
-go_library(
- name = "icmp",
- srcs = [
- "endpoint.go",
- "endpoint_state.go",
- "icmp_packet_list.go",
- "protocol.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/tcpip/buffer"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/ports",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/raw",
- "//pkg/tcpip/transport/tcp",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "icmp_x_test",
- size = "small",
- srcs = ["icmp_test.go"],
- deps = [
- ":icmp",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/transport/icmp/icmp_packet_list.go b/pkg/tcpip/transport/icmp/icmp_packet_list.go
new file mode 100644
index 000000000..0aacdad3f
--- /dev/null
+++ b/pkg/tcpip/transport/icmp/icmp_packet_list.go
@@ -0,0 +1,221 @@
+package icmp
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type icmpPacketElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (icmpPacketElementMapper) linkerFor(elem *icmpPacket) *icmpPacket { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type icmpPacketList struct {
+ head *icmpPacket
+ tail *icmpPacket
+}
+
+// Reset resets list l to the empty state.
+func (l *icmpPacketList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *icmpPacketList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *icmpPacketList) Front() *icmpPacket {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *icmpPacketList) Back() *icmpPacket {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *icmpPacketList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (icmpPacketElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *icmpPacketList) PushFront(e *icmpPacket) {
+ linker := icmpPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ icmpPacketElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *icmpPacketList) PushBack(e *icmpPacket) {
+ linker := icmpPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ icmpPacketElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *icmpPacketList) PushBackList(m *icmpPacketList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ icmpPacketElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ icmpPacketElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *icmpPacketList) InsertAfter(b, e *icmpPacket) {
+ bLinker := icmpPacketElementMapper{}.linkerFor(b)
+ eLinker := icmpPacketElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ icmpPacketElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *icmpPacketList) InsertBefore(a, e *icmpPacket) {
+ aLinker := icmpPacketElementMapper{}.linkerFor(a)
+ eLinker := icmpPacketElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ icmpPacketElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *icmpPacketList) Remove(e *icmpPacket) {
+ linker := icmpPacketElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ icmpPacketElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ icmpPacketElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type icmpPacketEntry struct {
+ next *icmpPacket
+ prev *icmpPacket
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *icmpPacketEntry) Next() *icmpPacket {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *icmpPacketEntry) Prev() *icmpPacket {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *icmpPacketEntry) SetNext(elem *icmpPacket) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *icmpPacketEntry) SetPrev(elem *icmpPacket) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/icmp/icmp_state_autogen.go b/pkg/tcpip/transport/icmp/icmp_state_autogen.go
new file mode 100644
index 000000000..acfc8ff5f
--- /dev/null
+++ b/pkg/tcpip/transport/icmp/icmp_state_autogen.go
@@ -0,0 +1,168 @@
+// automatically generated by stateify.
+
+package icmp
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip/buffer"
+)
+
+func (p *icmpPacket) StateTypeName() string {
+ return "pkg/tcpip/transport/icmp.icmpPacket"
+}
+
+func (p *icmpPacket) StateFields() []string {
+ return []string{
+ "icmpPacketEntry",
+ "senderAddress",
+ "data",
+ "receivedAt",
+ }
+}
+
+func (p *icmpPacket) beforeSave() {}
+
+// +checklocksignore
+func (p *icmpPacket) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var dataValue buffer.VectorisedView = p.saveData()
+ stateSinkObject.SaveValue(2, dataValue)
+ var receivedAtValue int64 = p.saveReceivedAt()
+ stateSinkObject.SaveValue(3, receivedAtValue)
+ stateSinkObject.Save(0, &p.icmpPacketEntry)
+ stateSinkObject.Save(1, &p.senderAddress)
+}
+
+func (p *icmpPacket) afterLoad() {}
+
+// +checklocksignore
+func (p *icmpPacket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.icmpPacketEntry)
+ stateSourceObject.Load(1, &p.senderAddress)
+ stateSourceObject.LoadValue(2, new(buffer.VectorisedView), func(y interface{}) { p.loadData(y.(buffer.VectorisedView)) })
+ stateSourceObject.LoadValue(3, new(int64), func(y interface{}) { p.loadReceivedAt(y.(int64)) })
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/tcpip/transport/icmp.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "TransportEndpointInfo",
+ "DefaultSocketOptionsHandler",
+ "waiterQueue",
+ "uniqueID",
+ "rcvReady",
+ "rcvList",
+ "rcvBufSize",
+ "rcvClosed",
+ "shutdownFlags",
+ "state",
+ "ttl",
+ "owner",
+ "ops",
+ "frozen",
+ }
+}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.TransportEndpointInfo)
+ stateSinkObject.Save(1, &e.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(2, &e.waiterQueue)
+ stateSinkObject.Save(3, &e.uniqueID)
+ stateSinkObject.Save(4, &e.rcvReady)
+ stateSinkObject.Save(5, &e.rcvList)
+ stateSinkObject.Save(6, &e.rcvBufSize)
+ stateSinkObject.Save(7, &e.rcvClosed)
+ stateSinkObject.Save(8, &e.shutdownFlags)
+ stateSinkObject.Save(9, &e.state)
+ stateSinkObject.Save(10, &e.ttl)
+ stateSinkObject.Save(11, &e.owner)
+ stateSinkObject.Save(12, &e.ops)
+ stateSinkObject.Save(13, &e.frozen)
+}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.TransportEndpointInfo)
+ stateSourceObject.Load(1, &e.DefaultSocketOptionsHandler)
+ stateSourceObject.Load(2, &e.waiterQueue)
+ stateSourceObject.Load(3, &e.uniqueID)
+ stateSourceObject.Load(4, &e.rcvReady)
+ stateSourceObject.Load(5, &e.rcvList)
+ stateSourceObject.Load(6, &e.rcvBufSize)
+ stateSourceObject.Load(7, &e.rcvClosed)
+ stateSourceObject.Load(8, &e.shutdownFlags)
+ stateSourceObject.Load(9, &e.state)
+ stateSourceObject.Load(10, &e.ttl)
+ stateSourceObject.Load(11, &e.owner)
+ stateSourceObject.Load(12, &e.ops)
+ stateSourceObject.Load(13, &e.frozen)
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (l *icmpPacketList) StateTypeName() string {
+ return "pkg/tcpip/transport/icmp.icmpPacketList"
+}
+
+func (l *icmpPacketList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *icmpPacketList) beforeSave() {}
+
+// +checklocksignore
+func (l *icmpPacketList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *icmpPacketList) afterLoad() {}
+
+// +checklocksignore
+func (l *icmpPacketList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *icmpPacketEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/icmp.icmpPacketEntry"
+}
+
+func (e *icmpPacketEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *icmpPacketEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *icmpPacketEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *icmpPacketEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *icmpPacketEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*icmpPacket)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*icmpPacketList)(nil))
+ state.Register((*icmpPacketEntry)(nil))
+}
diff --git a/pkg/tcpip/transport/icmp/icmp_test.go b/pkg/tcpip/transport/icmp/icmp_test.go
deleted file mode 100644
index cc950cbde..000000000
--- a/pkg/tcpip/transport/icmp/icmp_test.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package icmp_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// TODO(https://gvisor.dev/issues/5623): Finish unit testing the icmp package.
-// See the issue for remaining areas of work.
-
-var (
- localV4Addr1 = testutil.MustParse4("10.0.0.1")
- localV4Addr2 = testutil.MustParse4("10.0.0.2")
- remoteV4Addr = testutil.MustParse4("10.0.0.3")
-)
-
-func addNICWithDefaultRoute(t *testing.T, s *stack.Stack, id tcpip.NICID, name string, addrV4 tcpip.Address) *channel.Endpoint {
- t.Helper()
-
- ep := channel.New(1 /* size */, header.IPv4MinimumMTU, "" /* linkAddr */)
- t.Cleanup(ep.Close)
-
- wep := stack.LinkEndpoint(ep)
- if testing.Verbose() {
- wep = sniffer.New(ep)
- }
-
- opts := stack.NICOptions{Name: name}
- if err := s.CreateNICWithOptions(id, wep, opts); err != nil {
- t.Fatalf("s.CreateNIC(%d, _) = %s", id, err)
- }
-
- if err := s.AddAddress(id, ipv4.ProtocolNumber, addrV4); err != nil {
- t.Fatalf("s.AddAddress(%d, %d, %s) = %s", id, ipv4.ProtocolNumber, addrV4, err)
- }
-
- s.AddRoute(tcpip.Route{
- Destination: header.IPv4EmptySubnet,
- NIC: id,
- })
-
- return ep
-}
-
-func writePayload(buf []byte) {
- for i := range buf {
- buf[i] = byte(i)
- }
-}
-
-func newICMPv4EchoRequest(payloadSize uint32) buffer.View {
- buf := buffer.NewView(header.ICMPv4MinimumSize + int(payloadSize))
- writePayload(buf[header.ICMPv4MinimumSize:])
-
- icmp := header.ICMPv4(buf)
- icmp.SetType(header.ICMPv4Echo)
- // No need to set the checksum; it is reset by the socket before the packet
- // is sent.
-
- return buf
-}
-
-// TestWriteUnboundWithBindToDevice exercises writing to an unbound ICMP socket
-// when SO_BINDTODEVICE is set to the non-default NIC for that subnet.
-//
-// Only IPv4 is tested. The logic to determine which NIC to use is agnostic to
-// the version of IP.
-func TestWriteUnboundWithBindToDevice(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{icmp.NewProtocol4},
- HandleLocal: true,
- })
-
- // Add two NICs, both with default routes on the same subnet. The first NIC
- // added will be the default NIC for that subnet.
- defaultEP := addNICWithDefaultRoute(t, s, 1, "default", localV4Addr1)
- alternateEP := addNICWithDefaultRoute(t, s, 2, "alternate", localV4Addr2)
-
- socket, err := s.NewEndpoint(icmp.ProtocolNumber4, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("s.NewEndpoint(%d, %d, _) = %s", icmp.ProtocolNumber4, ipv4.ProtocolNumber, err)
- }
- defer socket.Close()
-
- echoPayloadSize := defaultEP.MTU() - header.IPv4MinimumSize - header.ICMPv4MinimumSize
-
- // Send a packet without SO_BINDTODEVICE. This verifies that the first NIC
- // to be added is the default NIC to send packets when not explicitly bound.
- {
- buf := newICMPv4EchoRequest(echoPayloadSize)
- r := buf.Reader()
- n, err := socket.Write(&r, tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: remoteV4Addr},
- })
- if err != nil {
- t.Fatalf("socket.Write(_, {To:%s}) = %s", remoteV4Addr, err)
- }
- if n != int64(len(buf)) {
- t.Fatalf("got n = %d, want n = %d", n, len(buf))
- }
-
- // Verify the packet was sent out the default NIC.
- p, ok := defaultEP.Read()
- if !ok {
- t.Fatalf("got defaultEP.Read(_) = _, false; want = _, true (packet wasn't written out)")
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- checker.IPv4(t, b, []checker.NetworkChecker{
- checker.SrcAddr(localV4Addr1),
- checker.DstAddr(remoteV4Addr),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4Echo),
- checker.ICMPv4Payload(buf[header.ICMPv4MinimumSize:]),
- ),
- }...)
-
- // Verify the packet was not sent out the alternate NIC.
- if p, ok := alternateEP.Read(); ok {
- t.Fatalf("got alternateEP.Read(_) = %+v, true; want = _, false", p)
- }
- }
-
- // Send a packet with SO_BINDTODEVICE. This exercises reliance on
- // SO_BINDTODEVICE to route the packet to the alternate NIC.
- {
- // Use SO_BINDTODEVICE to send over the alternate NIC by default.
- socket.SocketOptions().SetBindToDevice(2)
-
- buf := newICMPv4EchoRequest(echoPayloadSize)
- r := buf.Reader()
- n, err := socket.Write(&r, tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: remoteV4Addr},
- })
- if err != nil {
- t.Fatalf("socket.Write(_, {To:%s}) = %s", tcpip.Address(remoteV4Addr), err)
- }
- if n != int64(len(buf)) {
- t.Fatalf("got n = %d, want n = %d", n, len(buf))
- }
-
- // Verify the packet was not sent out the default NIC.
- if p, ok := defaultEP.Read(); ok {
- t.Fatalf("got defaultEP.Read(_) = %+v, true; want = _, false", p)
- }
-
- // Verify the packet was sent out the alternate NIC.
- p, ok := alternateEP.Read()
- if !ok {
- t.Fatalf("got alternateEP.Read(_) = _, false; want = _, true (packet wasn't written out)")
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- checker.IPv4(t, b, []checker.NetworkChecker{
- checker.SrcAddr(localV4Addr2),
- checker.DstAddr(remoteV4Addr),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4Echo),
- checker.ICMPv4Payload(buf[header.ICMPv4MinimumSize:]),
- ),
- }...)
- }
-
- // Send a packet with SO_BINDTODEVICE cleared. This verifies that clearing
- // the device binding will fallback to using the default NIC to send
- // packets.
- {
- socket.SocketOptions().SetBindToDevice(0)
-
- buf := newICMPv4EchoRequest(echoPayloadSize)
- r := buf.Reader()
- n, err := socket.Write(&r, tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: remoteV4Addr},
- })
- if err != nil {
- t.Fatalf("socket.Write(_, {To:%s}) = %s", tcpip.Address(remoteV4Addr), err)
- }
- if n != int64(len(buf)) {
- t.Fatalf("got n = %d, want n = %d", n, len(buf))
- }
-
- // Verify the packet was sent out the default NIC.
- p, ok := defaultEP.Read()
- if !ok {
- t.Fatalf("got defaultEP.Read(_) = _, false; want = _, true (packet wasn't written out)")
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- checker.IPv4(t, b, []checker.NetworkChecker{
- checker.SrcAddr(localV4Addr1),
- checker.DstAddr(remoteV4Addr),
- checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4Echo),
- checker.ICMPv4Payload(buf[header.ICMPv4MinimumSize:]),
- ),
- }...)
-
- // Verify the packet was not sent out the alternate NIC.
- if p, ok := alternateEP.Read(); ok {
- t.Fatalf("got alternateEP.Read(_) = %+v, true; want = _, false", p)
- }
- }
-}
diff --git a/pkg/tcpip/transport/packet/BUILD b/pkg/tcpip/transport/packet/BUILD
deleted file mode 100644
index b989b1209..000000000
--- a/pkg/tcpip/transport/packet/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "packet_list",
- out = "packet_list.go",
- package = "packet",
- prefix = "packet",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*packet",
- "Linker": "*packet",
- },
-)
-
-go_library(
- name = "packet",
- srcs = [
- "endpoint.go",
- "endpoint_state.go",
- "packet_list.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/tcpip/buffer"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/transport/packet/packet_list.go b/pkg/tcpip/transport/packet/packet_list.go
new file mode 100644
index 000000000..2c983aad0
--- /dev/null
+++ b/pkg/tcpip/transport/packet/packet_list.go
@@ -0,0 +1,221 @@
+package packet
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type packetElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (packetElementMapper) linkerFor(elem *packet) *packet { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type packetList struct {
+ head *packet
+ tail *packet
+}
+
+// Reset resets list l to the empty state.
+func (l *packetList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *packetList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *packetList) Front() *packet {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *packetList) Back() *packet {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *packetList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (packetElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *packetList) PushFront(e *packet) {
+ linker := packetElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ packetElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *packetList) PushBack(e *packet) {
+ linker := packetElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ packetElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *packetList) PushBackList(m *packetList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ packetElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ packetElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *packetList) InsertAfter(b, e *packet) {
+ bLinker := packetElementMapper{}.linkerFor(b)
+ eLinker := packetElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ packetElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *packetList) InsertBefore(a, e *packet) {
+ aLinker := packetElementMapper{}.linkerFor(a)
+ eLinker := packetElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ packetElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *packetList) Remove(e *packet) {
+ linker := packetElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ packetElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ packetElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type packetEntry struct {
+ next *packet
+ prev *packet
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *packetEntry) Next() *packet {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *packetEntry) Prev() *packet {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *packetEntry) SetNext(elem *packet) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *packetEntry) SetPrev(elem *packet) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/packet/packet_state_autogen.go b/pkg/tcpip/transport/packet/packet_state_autogen.go
new file mode 100644
index 000000000..304f67c12
--- /dev/null
+++ b/pkg/tcpip/transport/packet/packet_state_autogen.go
@@ -0,0 +1,171 @@
+// automatically generated by stateify.
+
+package packet
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip/buffer"
+)
+
+func (p *packet) StateTypeName() string {
+ return "pkg/tcpip/transport/packet.packet"
+}
+
+func (p *packet) StateFields() []string {
+ return []string{
+ "packetEntry",
+ "data",
+ "receivedAt",
+ "senderAddr",
+ "packetInfo",
+ }
+}
+
+func (p *packet) beforeSave() {}
+
+// +checklocksignore
+func (p *packet) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var dataValue buffer.VectorisedView = p.saveData()
+ stateSinkObject.SaveValue(1, dataValue)
+ var receivedAtValue int64 = p.saveReceivedAt()
+ stateSinkObject.SaveValue(2, receivedAtValue)
+ stateSinkObject.Save(0, &p.packetEntry)
+ stateSinkObject.Save(3, &p.senderAddr)
+ stateSinkObject.Save(4, &p.packetInfo)
+}
+
+func (p *packet) afterLoad() {}
+
+// +checklocksignore
+func (p *packet) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.packetEntry)
+ stateSourceObject.Load(3, &p.senderAddr)
+ stateSourceObject.Load(4, &p.packetInfo)
+ stateSourceObject.LoadValue(1, new(buffer.VectorisedView), func(y interface{}) { p.loadData(y.(buffer.VectorisedView)) })
+ stateSourceObject.LoadValue(2, new(int64), func(y interface{}) { p.loadReceivedAt(y.(int64)) })
+}
+
+func (ep *endpoint) StateTypeName() string {
+ return "pkg/tcpip/transport/packet.endpoint"
+}
+
+func (ep *endpoint) StateFields() []string {
+ return []string{
+ "TransportEndpointInfo",
+ "DefaultSocketOptionsHandler",
+ "netProto",
+ "waiterQueue",
+ "cooked",
+ "rcvList",
+ "rcvBufSize",
+ "rcvClosed",
+ "closed",
+ "bound",
+ "boundNIC",
+ "lastError",
+ "ops",
+ "frozen",
+ }
+}
+
+// +checklocksignore
+func (ep *endpoint) StateSave(stateSinkObject state.Sink) {
+ ep.beforeSave()
+ stateSinkObject.Save(0, &ep.TransportEndpointInfo)
+ stateSinkObject.Save(1, &ep.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(2, &ep.netProto)
+ stateSinkObject.Save(3, &ep.waiterQueue)
+ stateSinkObject.Save(4, &ep.cooked)
+ stateSinkObject.Save(5, &ep.rcvList)
+ stateSinkObject.Save(6, &ep.rcvBufSize)
+ stateSinkObject.Save(7, &ep.rcvClosed)
+ stateSinkObject.Save(8, &ep.closed)
+ stateSinkObject.Save(9, &ep.bound)
+ stateSinkObject.Save(10, &ep.boundNIC)
+ stateSinkObject.Save(11, &ep.lastError)
+ stateSinkObject.Save(12, &ep.ops)
+ stateSinkObject.Save(13, &ep.frozen)
+}
+
+// +checklocksignore
+func (ep *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ep.TransportEndpointInfo)
+ stateSourceObject.Load(1, &ep.DefaultSocketOptionsHandler)
+ stateSourceObject.Load(2, &ep.netProto)
+ stateSourceObject.Load(3, &ep.waiterQueue)
+ stateSourceObject.Load(4, &ep.cooked)
+ stateSourceObject.Load(5, &ep.rcvList)
+ stateSourceObject.Load(6, &ep.rcvBufSize)
+ stateSourceObject.Load(7, &ep.rcvClosed)
+ stateSourceObject.Load(8, &ep.closed)
+ stateSourceObject.Load(9, &ep.bound)
+ stateSourceObject.Load(10, &ep.boundNIC)
+ stateSourceObject.Load(11, &ep.lastError)
+ stateSourceObject.Load(12, &ep.ops)
+ stateSourceObject.Load(13, &ep.frozen)
+ stateSourceObject.AfterLoad(ep.afterLoad)
+}
+
+func (l *packetList) StateTypeName() string {
+ return "pkg/tcpip/transport/packet.packetList"
+}
+
+func (l *packetList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *packetList) beforeSave() {}
+
+// +checklocksignore
+func (l *packetList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *packetList) afterLoad() {}
+
+// +checklocksignore
+func (l *packetList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *packetEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/packet.packetEntry"
+}
+
+func (e *packetEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *packetEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *packetEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *packetEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *packetEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*packet)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*packetList)(nil))
+ state.Register((*packetEntry)(nil))
+}
diff --git a/pkg/tcpip/transport/raw/BUILD b/pkg/tcpip/transport/raw/BUILD
deleted file mode 100644
index 2eab09088..000000000
--- a/pkg/tcpip/transport/raw/BUILD
+++ /dev/null
@@ -1,39 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "raw_packet_list",
- out = "raw_packet_list.go",
- package = "raw",
- prefix = "rawPacket",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*rawPacket",
- "Linker": "*rawPacket",
- },
-)
-
-go_library(
- name = "raw",
- srcs = [
- "endpoint.go",
- "endpoint_state.go",
- "protocol.go",
- "raw_packet_list.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/tcpip/buffer"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/packet",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/transport/raw/raw_packet_list.go b/pkg/tcpip/transport/raw/raw_packet_list.go
new file mode 100644
index 000000000..48804ff1b
--- /dev/null
+++ b/pkg/tcpip/transport/raw/raw_packet_list.go
@@ -0,0 +1,221 @@
+package raw
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type rawPacketElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (rawPacketElementMapper) linkerFor(elem *rawPacket) *rawPacket { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type rawPacketList struct {
+ head *rawPacket
+ tail *rawPacket
+}
+
+// Reset resets list l to the empty state.
+func (l *rawPacketList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *rawPacketList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *rawPacketList) Front() *rawPacket {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *rawPacketList) Back() *rawPacket {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *rawPacketList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (rawPacketElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *rawPacketList) PushFront(e *rawPacket) {
+ linker := rawPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ rawPacketElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *rawPacketList) PushBack(e *rawPacket) {
+ linker := rawPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ rawPacketElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *rawPacketList) PushBackList(m *rawPacketList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ rawPacketElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ rawPacketElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *rawPacketList) InsertAfter(b, e *rawPacket) {
+ bLinker := rawPacketElementMapper{}.linkerFor(b)
+ eLinker := rawPacketElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ rawPacketElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *rawPacketList) InsertBefore(a, e *rawPacket) {
+ aLinker := rawPacketElementMapper{}.linkerFor(a)
+ eLinker := rawPacketElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ rawPacketElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *rawPacketList) Remove(e *rawPacket) {
+ linker := rawPacketElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ rawPacketElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ rawPacketElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type rawPacketEntry struct {
+ next *rawPacket
+ prev *rawPacket
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *rawPacketEntry) Next() *rawPacket {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *rawPacketEntry) Prev() *rawPacket {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *rawPacketEntry) SetNext(elem *rawPacket) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *rawPacketEntry) SetPrev(elem *rawPacket) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/raw/raw_state_autogen.go b/pkg/tcpip/transport/raw/raw_state_autogen.go
new file mode 100644
index 000000000..aaeed9775
--- /dev/null
+++ b/pkg/tcpip/transport/raw/raw_state_autogen.go
@@ -0,0 +1,165 @@
+// automatically generated by stateify.
+
+package raw
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip/buffer"
+)
+
+func (p *rawPacket) StateTypeName() string {
+ return "pkg/tcpip/transport/raw.rawPacket"
+}
+
+func (p *rawPacket) StateFields() []string {
+ return []string{
+ "rawPacketEntry",
+ "data",
+ "receivedAt",
+ "senderAddr",
+ }
+}
+
+func (p *rawPacket) beforeSave() {}
+
+// +checklocksignore
+func (p *rawPacket) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var dataValue buffer.VectorisedView = p.saveData()
+ stateSinkObject.SaveValue(1, dataValue)
+ var receivedAtValue int64 = p.saveReceivedAt()
+ stateSinkObject.SaveValue(2, receivedAtValue)
+ stateSinkObject.Save(0, &p.rawPacketEntry)
+ stateSinkObject.Save(3, &p.senderAddr)
+}
+
+func (p *rawPacket) afterLoad() {}
+
+// +checklocksignore
+func (p *rawPacket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.rawPacketEntry)
+ stateSourceObject.Load(3, &p.senderAddr)
+ stateSourceObject.LoadValue(1, new(buffer.VectorisedView), func(y interface{}) { p.loadData(y.(buffer.VectorisedView)) })
+ stateSourceObject.LoadValue(2, new(int64), func(y interface{}) { p.loadReceivedAt(y.(int64)) })
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/tcpip/transport/raw.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "TransportEndpointInfo",
+ "DefaultSocketOptionsHandler",
+ "waiterQueue",
+ "associated",
+ "rcvList",
+ "rcvBufSize",
+ "rcvClosed",
+ "closed",
+ "connected",
+ "bound",
+ "owner",
+ "ops",
+ "frozen",
+ }
+}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.TransportEndpointInfo)
+ stateSinkObject.Save(1, &e.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(2, &e.waiterQueue)
+ stateSinkObject.Save(3, &e.associated)
+ stateSinkObject.Save(4, &e.rcvList)
+ stateSinkObject.Save(5, &e.rcvBufSize)
+ stateSinkObject.Save(6, &e.rcvClosed)
+ stateSinkObject.Save(7, &e.closed)
+ stateSinkObject.Save(8, &e.connected)
+ stateSinkObject.Save(9, &e.bound)
+ stateSinkObject.Save(10, &e.owner)
+ stateSinkObject.Save(11, &e.ops)
+ stateSinkObject.Save(12, &e.frozen)
+}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.TransportEndpointInfo)
+ stateSourceObject.Load(1, &e.DefaultSocketOptionsHandler)
+ stateSourceObject.Load(2, &e.waiterQueue)
+ stateSourceObject.Load(3, &e.associated)
+ stateSourceObject.Load(4, &e.rcvList)
+ stateSourceObject.Load(5, &e.rcvBufSize)
+ stateSourceObject.Load(6, &e.rcvClosed)
+ stateSourceObject.Load(7, &e.closed)
+ stateSourceObject.Load(8, &e.connected)
+ stateSourceObject.Load(9, &e.bound)
+ stateSourceObject.Load(10, &e.owner)
+ stateSourceObject.Load(11, &e.ops)
+ stateSourceObject.Load(12, &e.frozen)
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (l *rawPacketList) StateTypeName() string {
+ return "pkg/tcpip/transport/raw.rawPacketList"
+}
+
+func (l *rawPacketList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *rawPacketList) beforeSave() {}
+
+// +checklocksignore
+func (l *rawPacketList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *rawPacketList) afterLoad() {}
+
+// +checklocksignore
+func (l *rawPacketList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *rawPacketEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/raw.rawPacketEntry"
+}
+
+func (e *rawPacketEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *rawPacketEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *rawPacketEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *rawPacketEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *rawPacketEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*rawPacket)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*rawPacketList)(nil))
+ state.Register((*rawPacketEntry)(nil))
+}
diff --git a/pkg/tcpip/transport/tcp/BUILD b/pkg/tcpip/transport/tcp/BUILD
deleted file mode 100644
index 8436d2cf0..000000000
--- a/pkg/tcpip/transport/tcp/BUILD
+++ /dev/null
@@ -1,139 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "more_shards")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "tcp_segment_list",
- out = "tcp_segment_list.go",
- package = "tcp",
- prefix = "segment",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*segment",
- "Linker": "*segment",
- },
-)
-
-go_template_instance(
- name = "tcp_endpoint_list",
- out = "tcp_endpoint_list.go",
- package = "tcp",
- prefix = "endpoint",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*endpoint",
- "Linker": "*endpoint",
- },
-)
-
-go_library(
- name = "tcp",
- srcs = [
- "accept.go",
- "connect.go",
- "connect_unsafe.go",
- "cubic.go",
- "dispatcher.go",
- "endpoint.go",
- "endpoint_state.go",
- "forwarder.go",
- "protocol.go",
- "rack.go",
- "rcv.go",
- "reno.go",
- "reno_recovery.go",
- "sack.go",
- "sack_recovery.go",
- "sack_scoreboard.go",
- "segment.go",
- "segment_heap.go",
- "segment_queue.go",
- "segment_state.go",
- "segment_unsafe.go",
- "snd.go",
- "tcp_endpoint_list.go",
- "tcp_segment_list.go",
- "timer.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/tcpip/buffer"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/log",
- "//pkg/rand",
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/hash/jenkins",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/ports",
- "//pkg/tcpip/seqnum",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/raw",
- "//pkg/waiter",
- "@com_github_google_btree//:go_default_library",
- ],
-)
-
-go_test(
- name = "tcp_x_test",
- size = "medium",
- srcs = [
- "dual_stack_test.go",
- "sack_scoreboard_test.go",
- "tcp_noracedetector_test.go",
- "tcp_rack_test.go",
- "tcp_sack_test.go",
- "tcp_test.go",
- "tcp_timestamp_test.go",
- ],
- shard_count = more_shards,
- deps = [
- ":tcp",
- "//pkg/rand",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/seqnum",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/tcp/testing/context",
- "//pkg/test/testutil",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_test(
- name = "rcv_test",
- size = "small",
- srcs = ["rcv_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- ],
-)
-
-go_test(
- name = "tcp_test",
- size = "small",
- srcs = [
- "segment_test.go",
- "timer_test.go",
- ],
- library = ":tcp",
- deps = [
- "//pkg/sleep",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/stack",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/transport/tcp/dual_stack_test.go b/pkg/tcpip/transport/tcp/dual_stack_test.go
deleted file mode 100644
index 5342aacfd..000000000
--- a/pkg/tcpip/transport/tcp/dual_stack_test.go
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestV4MappedConnectOnV6Only(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- // Start connection attempt, it must fail.
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV4MappedAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrNoRoute{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
-}
-
-func testV4Connect(t *testing.T, c *context.Context, checkers ...checker.NetworkChecker) {
- // Start connection attempt.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&we)
-
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV4MappedAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
- synCheckers := append(checkers, checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- ))
- checker.IPv4(t, b, synCheckers...)
-
- tcp := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcp.SequenceNumber())
-
- iss := seqnum.Value(789)
- c.SendPacket(nil, &context.Headers{
- SrcPort: tcp.DestinationPort(),
- DstPort: tcp.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Receive ACK packet.
- ackCheckers := append(checkers, checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- ))
- checker.IPv4(t, c.GetPacket(), ackCheckers...)
-
- // Wait for connection to be established.
- select {
- case <-ch:
- if err := c.EP.LastError(); err != nil {
- t.Fatalf("Unexpected error when connecting: %v", err)
- }
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for connection")
- }
-}
-
-func TestV4MappedConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Test the connection request.
- testV4Connect(t, c)
-}
-
-func TestV4ConnectWhenBoundToWildcard(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV4Connect(t, c)
-}
-
-func TestV4ConnectWhenBoundToV4MappedWildcard(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to v4 mapped wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV4Connect(t, c)
-}
-
-func TestV4ConnectWhenBoundToV4Mapped(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to v4 mapped address.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV4MappedAddr}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV4Connect(t, c)
-}
-
-func testV6Connect(t *testing.T, c *context.Context, checkers ...checker.NetworkChecker) {
- // Start connection attempt to IPv6 address.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&we)
-
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV6Addr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("Connect(...) mismatch (-want +got):\n%s", d)
- }
-
- // Receive SYN packet.
- b := c.GetV6Packet()
- synCheckers := append(checkers, checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- ))
- checker.IPv6(t, b, synCheckers...)
-
- tcp := header.TCP(header.IPv6(b).Payload())
- c.IRS = seqnum.Value(tcp.SequenceNumber())
-
- iss := seqnum.Value(789)
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: tcp.DestinationPort(),
- DstPort: tcp.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Receive ACK packet.
- ackCheckers := append(checkers, checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- ))
- checker.IPv6(t, c.GetV6Packet(), ackCheckers...)
-
- // Wait for connection to be established.
- select {
- case <-ch:
- if err := c.EP.LastError(); err != nil {
- t.Fatalf("Unexpected error when connecting: %v", err)
- }
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for connection")
- }
-}
-
-func TestV6Connect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Test the connection request.
- testV6Connect(t, c)
-}
-
-func TestV6ConnectV6Only(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- // Test the connection request.
- testV6Connect(t, c)
-}
-
-func TestV6ConnectWhenBoundToWildcard(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV6Connect(t, c)
-}
-
-func TestStackV6OnlyConnectWhenBoundToWildcard(t *testing.T) {
- c := context.NewWithOpts(t, context.Options{
- EnableV6: true,
- MTU: defaultMTU,
- })
- defer c.Cleanup()
-
- // Create a v6 endpoint but don't set the v6-only TCP option.
- c.CreateV6Endpoint(false)
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV6Connect(t, c)
-}
-
-func TestV6ConnectWhenBoundToLocalAddress(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to local address.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV6Addr}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test the connection request.
- testV6Connect(t, c)
-}
-
-func TestV4RefuseOnV6Only(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Start listening.
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
-
- // Send a SYN request.
- irs := seqnum.Value(789)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the RST reply.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck),
- checker.TCPAckNum(uint32(irs)+1),
- ),
- )
-}
-
-func TestV6RefuseOnBoundToV4Mapped(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind and listen.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
-
- // Send a SYN request.
- irs := seqnum.Value(789)
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the RST reply.
- checker.IPv6(t, c.GetV6Packet(),
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck),
- checker.TCPAckNum(uint32(irs)+1),
- ),
- )
-}
-
-func testV4Accept(t *testing.T, c *context.Context) {
- c.SetGSOEnabled(true)
- defer c.SetGSOEnabled(false)
-
- // Start listening.
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
-
- // Send a SYN request.
- irs := seqnum.Value(789)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- iss := seqnum.Value(tcp.SequenceNumber())
- checker.IPv4(t, b,
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1),
- ),
- )
-
- // Send ACK.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- nep, _, err := c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- nep, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %v", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Check the peer address.
- addr, err := nep.GetRemoteAddress()
- if err != nil {
- t.Fatalf("GetRemoteAddress failed failed: %v", err)
- }
-
- if addr.Addr != context.TestAddr {
- t.Fatalf("Unexpected remote address: got %v, want %v", addr.Addr, context.TestAddr)
- }
-
- var r strings.Reader
- data := "Don't panic"
- r.Reset(data)
- nep.Write(&r, tcpip.WriteOptions{})
- b = c.GetPacket()
- tcp = header.IPv4(b).Payload()
- if string(tcp.Payload()) != data {
- t.Fatalf("Unexpected data: got %v, want %v", string(tcp.Payload()), data)
- }
-}
-
-func TestV4AcceptOnV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test acceptance.
- testV4Accept(t, c)
-}
-
-func TestV4AcceptOnBoundToV4MappedWildcard(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind to v4 mapped wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test acceptance.
- testV4Accept(t, c)
-}
-
-func TestV4AcceptOnBoundToV4Mapped(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind and listen.
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV4MappedAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test acceptance.
- testV4Accept(t, c)
-}
-
-func TestV6AcceptOnV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- // Bind and listen.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
-
- // Send a SYN request.
- irs := seqnum.Value(789)
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetV6Packet()
- tcp := header.TCP(header.IPv6(b).Payload())
- iss := seqnum.Value(tcp.SequenceNumber())
- checker.IPv6(t, b,
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1),
- ),
- )
-
- // Send ACK.
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
- var addr tcpip.FullAddress
- _, _, err := c.EP.Accept(&addr)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- _, _, err = c.EP.Accept(&addr)
- if err != nil {
- t.Fatalf("Accept failed: %v", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- if addr.Addr != context.TestV6Addr {
- t.Errorf("Unexpected remote address: got %s, want %s", addr.Addr, context.TestV6Addr)
- }
-}
-
-func TestV4AcceptOnV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test acceptance.
- testV4Accept(t, c)
-}
-
-func testV4ListenClose(t *testing.T, c *context.Context) {
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- const n = 32
-
- // Start listening.
- if err := c.EP.Listen(n); err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
-
- irs := seqnum.Value(789)
- for i := uint16(0); i < n; i++ {
- // Send a SYN request.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort + i,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
- }
-
- // Each of these ACKs will cause a syn-cookie based connection to be
- // accepted and delivered to the listening endpoint.
- for i := 0; i < n; i++ {
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- iss := seqnum.Value(tcp.SequenceNumber())
- // Send ACK.
- c.SendPacket(nil, &context.Headers{
- SrcPort: tcp.DestinationPort(),
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
- }
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
- nep, _, err := c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- nep, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %v", err)
- }
-
- case <-time.After(10 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
- nep.Close()
- c.EP.Close()
-}
-
-func TestV4ListenCloseOnV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %v", err)
- }
-
- // Test acceptance.
- testV4ListenClose(t, c)
-}
diff --git a/pkg/tcpip/transport/tcp/rcv_test.go b/pkg/tcpip/transport/tcp/rcv_test.go
deleted file mode 100644
index 8a026ec46..000000000
--- a/pkg/tcpip/transport/tcp/rcv_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rcv_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
-)
-
-func TestAcceptable(t *testing.T) {
- for _, tt := range []struct {
- segSeq seqnum.Value
- segLen seqnum.Size
- rcvNxt, rcvAcc seqnum.Value
- want bool
- }{
- // The segment is smaller than the window.
- {105, 2, 100, 104, false},
- {105, 2, 101, 105, true},
- {105, 2, 102, 106, true},
- {105, 2, 103, 107, true},
- {105, 2, 104, 108, true},
- {105, 2, 105, 109, true},
- {105, 2, 106, 110, true},
- {105, 2, 107, 111, false},
-
- // The segment is larger than the window.
- {105, 4, 103, 105, true},
- {105, 4, 104, 106, true},
- {105, 4, 105, 107, true},
- {105, 4, 106, 108, true},
- {105, 4, 107, 109, true},
- {105, 4, 108, 110, true},
- {105, 4, 109, 111, false},
- {105, 4, 110, 112, false},
-
- // The segment has no width.
- {105, 0, 100, 102, false},
- {105, 0, 101, 103, false},
- {105, 0, 102, 104, false},
- {105, 0, 103, 105, true},
- {105, 0, 104, 106, true},
- {105, 0, 105, 107, true},
- {105, 0, 106, 108, false},
- {105, 0, 107, 109, false},
-
- // The receive window has no width.
- {105, 2, 103, 103, false},
- {105, 2, 104, 104, false},
- {105, 2, 105, 105, false},
- {105, 2, 106, 106, false},
- {105, 2, 107, 107, false},
- {105, 2, 108, 108, false},
- {105, 2, 109, 109, false},
- } {
- if got := header.Acceptable(tt.segSeq, tt.segLen, tt.rcvNxt, tt.rcvAcc); got != tt.want {
- t.Errorf("header.Acceptable(%d, %d, %d, %d) = %t, want %t", tt.segSeq, tt.segLen, tt.rcvNxt, tt.rcvAcc, got, tt.want)
- }
- }
-}
diff --git a/pkg/tcpip/transport/tcp/sack_scoreboard_test.go b/pkg/tcpip/transport/tcp/sack_scoreboard_test.go
deleted file mode 100644
index b4e5ba0df..000000000
--- a/pkg/tcpip/transport/tcp/sack_scoreboard_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
-)
-
-const smss = 1500
-
-func initScoreboard(blocks []header.SACKBlock, iss seqnum.Value) *tcp.SACKScoreboard {
- s := tcp.NewSACKScoreboard(smss, iss)
- for _, blk := range blocks {
- s.Insert(blk)
- }
- return s
-}
-
-func TestSACKScoreboardIsSACKED(t *testing.T) {
- type blockTest struct {
- block header.SACKBlock
- sacked bool
- }
- testCases := []struct {
- comment string
- scoreboardBlocks []header.SACKBlock
- blockTests []blockTest
- iss seqnum.Value
- }{
- {
- "Test holes and unsacked SACK blocks in SACKed ranges and insertion of overlapping SACK blocks",
- []header.SACKBlock{{10, 20}, {10, 30}, {30, 40}, {41, 50}, {5, 10}, {1, 50}, {111, 120}, {101, 110}, {52, 120}},
- []blockTest{
- {header.SACKBlock{15, 21}, true},
- {header.SACKBlock{200, 201}, false},
- {header.SACKBlock{50, 51}, false},
- {header.SACKBlock{53, 120}, true},
- },
- 0,
- },
- {
- "Test disjoint SACKBlocks",
- []header.SACKBlock{{2288624809, 2288810057}, {2288811477, 2288838565}},
- []blockTest{
- {header.SACKBlock{2288624809, 2288810057}, true},
- {header.SACKBlock{2288811477, 2288838565}, true},
- {header.SACKBlock{2288810057, 2288811477}, false},
- },
- 2288624809,
- },
- {
- "Test sequence number wrap around",
- []header.SACKBlock{{4294254144, 225652}, {5340409, 5350509}},
- []blockTest{
- {header.SACKBlock{4294254144, 4294254145}, true},
- {header.SACKBlock{4294254143, 4294254144}, false},
- {header.SACKBlock{4294254144, 1}, true},
- {header.SACKBlock{225652, 5350509}, false},
- {header.SACKBlock{5340409, 5350509}, true},
- {header.SACKBlock{5350509, 5350609}, false},
- },
- 4294254144,
- },
- {
- "Test disjoint SACKBlocks out of order",
- []header.SACKBlock{{827450276, 827454536}, {827426028, 827428868}},
- []blockTest{
- {header.SACKBlock{827426028, 827428867}, true},
- {header.SACKBlock{827450168, 827450275}, false},
- },
- 827426000,
- },
- }
- for _, tc := range testCases {
- sb := initScoreboard(tc.scoreboardBlocks, tc.iss)
- for _, blkTest := range tc.blockTests {
- if want, got := blkTest.sacked, sb.IsSACKED(blkTest.block); got != want {
- t.Errorf("%s: s.IsSACKED(%v) = %v, want %v", tc.comment, blkTest.block, got, want)
- }
- }
- }
-}
-
-func TestSACKScoreboardIsRangeLost(t *testing.T) {
- s := tcp.NewSACKScoreboard(10, 0)
- s.Insert(header.SACKBlock{1, 25})
- s.Insert(header.SACKBlock{25, 50})
- s.Insert(header.SACKBlock{51, 100})
- s.Insert(header.SACKBlock{111, 120})
- s.Insert(header.SACKBlock{101, 110})
- s.Insert(header.SACKBlock{121, 141})
- s.Insert(header.SACKBlock{145, 146})
- s.Insert(header.SACKBlock{147, 148})
- s.Insert(header.SACKBlock{149, 150})
- s.Insert(header.SACKBlock{153, 154})
- s.Insert(header.SACKBlock{155, 156})
- testCases := []struct {
- block header.SACKBlock
- lost bool
- }{
- // Block not covered by SACK block and has more than
- // nDupAckThreshold discontiguous SACK blocks after it as well
- // as (nDupAckThreshold -1) * 10 (smss) bytes that have been
- // SACKED above the sequence number covered by this block.
- {block: header.SACKBlock{0, 1}, lost: true},
-
- // These blocks have all been SACKed and should not be
- // considered lost.
- {block: header.SACKBlock{1, 2}, lost: false},
- {block: header.SACKBlock{25, 26}, lost: false},
- {block: header.SACKBlock{1, 45}, lost: false},
-
- // Same as the first case above.
- {block: header.SACKBlock{50, 51}, lost: true},
-
- // This block has been SACKed and should not be considered lost.
- {block: header.SACKBlock{119, 120}, lost: false},
-
- // This one should return true because there are >
- // (nDupAckThreshold - 1) * 10 (smss) bytes that have been
- // sacked above this sequence number.
- {block: header.SACKBlock{120, 121}, lost: true},
-
- // This block has been SACKed and should not be considered lost.
- {block: header.SACKBlock{125, 126}, lost: false},
-
- // This block has not been SACKed and there are nDupAckThreshold
- // number of SACKed blocks after it.
- {block: header.SACKBlock{141, 145}, lost: true},
-
- // This block has not been SACKed and there are less than
- // nDupAckThreshold SACKed sequences after it.
- {block: header.SACKBlock{151, 152}, lost: false},
- }
- for _, tc := range testCases {
- if want, got := tc.lost, s.IsRangeLost(tc.block); got != want {
- t.Errorf("s.IsRangeLost(%v) = %v, want %v", tc.block, got, want)
- }
- }
-}
-
-func TestSACKScoreboardIsLost(t *testing.T) {
- s := tcp.NewSACKScoreboard(10, 0)
- s.Insert(header.SACKBlock{1, 25})
- s.Insert(header.SACKBlock{25, 50})
- s.Insert(header.SACKBlock{51, 100})
- s.Insert(header.SACKBlock{111, 120})
- s.Insert(header.SACKBlock{101, 110})
- s.Insert(header.SACKBlock{121, 141})
- s.Insert(header.SACKBlock{121, 141})
- s.Insert(header.SACKBlock{145, 146})
- s.Insert(header.SACKBlock{147, 148})
- s.Insert(header.SACKBlock{149, 150})
- s.Insert(header.SACKBlock{153, 154})
- s.Insert(header.SACKBlock{155, 156})
- testCases := []struct {
- seq seqnum.Value
- lost bool
- }{
- // Sequence number not covered by SACK block and has more than
- // nDupAckThreshold discontiguous SACK blocks after it as well
- // as (nDupAckThreshold -1) * 10 (smss) bytes that have been
- // SACKED above the sequence number.
- {seq: 0, lost: true},
-
- // These sequence numbers have all been SACKed and should not be
- // considered lost.
- {seq: 1, lost: false},
- {seq: 25, lost: false},
- {seq: 45, lost: false},
-
- // Same as first case above.
- {seq: 50, lost: true},
-
- // This block has been SACKed and should not be considered lost.
- {seq: 119, lost: false},
-
- // This one should return true because there are >
- // (nDupAckThreshold - 1) * 10 (smss) bytes that have been
- // sacked above this sequence number.
- {seq: 120, lost: true},
-
- // This sequence number has been SACKed and should not be
- // considered lost.
- {seq: 125, lost: false},
-
- // This sequence number has not been SACKed and there are
- // nDupAckThreshold number of SACKed blocks after it.
- {seq: 141, lost: true},
-
- // This sequence number has not been SACKed and there are less
- // than nDupAckThreshold SACKed sequences after it.
- {seq: 151, lost: false},
- }
- for _, tc := range testCases {
- if want, got := tc.lost, s.IsLost(tc.seq); got != want {
- t.Errorf("s.IsLost(%v) = %v, want %v", tc.seq, got, want)
- }
- }
-}
-
-func TestSACKScoreboardDelete(t *testing.T) {
- blocks := []header.SACKBlock{{4294254144, 225652}, {5340409, 5350509}}
- s := initScoreboard(blocks, 4294254143)
- s.Delete(5340408)
- if s.Empty() {
- t.Fatalf("s.Empty() = true, want false")
- }
- if got, want := s.Sacked(), blocks[1].Start.Size(blocks[1].End); got != want {
- t.Fatalf("incorrect sacked bytes in scoreboard got: %v, want: %v", got, want)
- }
- s.Delete(5340410)
- if s.Empty() {
- t.Fatal("s.Empty() = true, want false")
- }
- newSB := header.SACKBlock{5340410, 5350509}
- if !s.IsSACKED(newSB) {
- t.Fatalf("s.IsSACKED(%v) = false, want true, scoreboard: %v", newSB, s)
- }
- s.Delete(5350509)
- lastOctet := header.SACKBlock{5350508, 5350509}
- if s.IsSACKED(lastOctet) {
- t.Fatalf("s.IsSACKED(%v) = false, want true", lastOctet)
- }
-
- s.Delete(5350510)
- if !s.Empty() {
- t.Fatal("s.Empty() = false, want true")
- }
- if got, want := s.Sacked(), seqnum.Size(0); got != want {
- t.Fatalf("incorrect sacked bytes in scoreboard got: %v, want: %v", got, want)
- }
-}
diff --git a/pkg/tcpip/transport/tcp/segment_test.go b/pkg/tcpip/transport/tcp/segment_test.go
deleted file mode 100644
index 2e6ea06f5..000000000
--- a/pkg/tcpip/transport/tcp/segment_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
-)
-
-type segmentSizeWants struct {
- DataSize int
- SegMemSize int
-}
-
-func checkSegmentSize(t *testing.T, name string, seg *segment, want segmentSizeWants) {
- t.Helper()
- got := segmentSizeWants{
- DataSize: seg.data.Size(),
- SegMemSize: seg.segMemSize(),
- }
- if diff := cmp.Diff(got, want); diff != "" {
- t.Errorf("%s differs (-want +got):\n%s", name, diff)
- }
-}
-
-func TestSegmentMerge(t *testing.T) {
- var clock faketime.NullClock
- id := stack.TransportEndpointID{}
- seg1 := newOutgoingSegment(id, &clock, buffer.NewView(10))
- defer seg1.decRef()
- seg2 := newOutgoingSegment(id, &clock, buffer.NewView(20))
- defer seg2.decRef()
-
- checkSegmentSize(t, "seg1", seg1, segmentSizeWants{
- DataSize: 10,
- SegMemSize: SegSize + 10,
- })
- checkSegmentSize(t, "seg2", seg2, segmentSizeWants{
- DataSize: 20,
- SegMemSize: SegSize + 20,
- })
-
- seg1.merge(seg2)
-
- checkSegmentSize(t, "seg1", seg1, segmentSizeWants{
- DataSize: 30,
- SegMemSize: SegSize + 30,
- })
- checkSegmentSize(t, "seg2", seg2, segmentSizeWants{
- DataSize: 0,
- SegMemSize: SegSize,
- })
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_endpoint_list.go b/pkg/tcpip/transport/tcp/tcp_endpoint_list.go
new file mode 100644
index 000000000..a7dc5df81
--- /dev/null
+++ b/pkg/tcpip/transport/tcp/tcp_endpoint_list.go
@@ -0,0 +1,221 @@
+package tcp
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type endpointElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (endpointElementMapper) linkerFor(elem *endpoint) *endpoint { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type endpointList struct {
+ head *endpoint
+ tail *endpoint
+}
+
+// Reset resets list l to the empty state.
+func (l *endpointList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *endpointList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *endpointList) Front() *endpoint {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *endpointList) Back() *endpoint {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *endpointList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (endpointElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *endpointList) PushFront(e *endpoint) {
+ linker := endpointElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ endpointElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *endpointList) PushBack(e *endpoint) {
+ linker := endpointElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ endpointElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *endpointList) PushBackList(m *endpointList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ endpointElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ endpointElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *endpointList) InsertAfter(b, e *endpoint) {
+ bLinker := endpointElementMapper{}.linkerFor(b)
+ eLinker := endpointElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ endpointElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *endpointList) InsertBefore(a, e *endpoint) {
+ aLinker := endpointElementMapper{}.linkerFor(a)
+ eLinker := endpointElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ endpointElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *endpointList) Remove(e *endpoint) {
+ linker := endpointElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ endpointElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ endpointElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type endpointEntry struct {
+ next *endpoint
+ prev *endpoint
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *endpointEntry) Next() *endpoint {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *endpointEntry) Prev() *endpoint {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *endpointEntry) SetNext(elem *endpoint) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *endpointEntry) SetPrev(elem *endpoint) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/tcp/tcp_noracedetector_test.go b/pkg/tcpip/transport/tcp/tcp_noracedetector_test.go
deleted file mode 100644
index 84fb1c416..000000000
--- a/pkg/tcpip/transport/tcp/tcp_noracedetector_test.go
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// These tests are flaky when run under the go race detector due to some
-// iterations taking long enough that the retransmit timer can kick in causing
-// the congestion window measurements to fail due to extra packets etc.
-//
-//go:build !race
-// +build !race
-
-package tcp_test
-
-import (
- "bytes"
- "fmt"
- "math"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-func TestFastRecovery(t *testing.T) {
- maxPayload := 32
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
-
- const iterations = 3
- data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in one shot. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Do slow start for a few iterations.
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- expected = tcp.InitialCwnd << uint(i)
- if i > 0 {
- // Acknowledge all the data received so far if not on
- // first iteration.
- c.SendAck(790, bytesRead)
- }
-
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
- }
-
- // Send 3 duplicate acks. This should force an immediate retransmit of
- // the pending packet and put the sender into fast recovery.
- rtxOffset := bytesRead - maxPayload*expected
- for i := 0; i < 3; i++ {
- c.SendAck(790, rtxOffset)
- }
-
- // Receive the retransmitted packet.
- c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
-
- // Wait before checking metrics.
- metricPollFn := func() error {
- if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
- }
- if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.Stack().Stats().TCP.FastRecovery.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.FastRecovery.Value = %d, want = %d", got, want)
- }
- return nil
- }
-
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- // Now send 7 mode duplicate acks. Each of these should cause a window
- // inflation by 1 and cause the sender to send an extra packet.
- for i := 0; i < 7; i++ {
- c.SendAck(790, rtxOffset)
- }
-
- recover := bytesRead
-
- // Ensure no new packets arrive.
- c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.",
- 50*time.Millisecond)
-
- // Acknowledge half of the pending data.
- rtxOffset = bytesRead - expected*maxPayload/2
- c.SendAck(790, rtxOffset)
-
- // Receive the retransmit due to partial ack.
- c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
-
- // Wait before checking metrics.
- metricPollFn = func() error {
- if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(2); got != want {
- return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
- }
- if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(2); got != want {
- return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- // Receive the 10 extra packets that should have been released due to
- // the congestion window inflation in recovery.
- for i := 0; i < 10; i++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // A partial ACK during recovery should reduce congestion window by the
- // number acked. Since we had "expected" packets outstanding before sending
- // partial ack and we acked expected/2 , the cwnd and outstanding should
- // be expected/2 + 10 (7 dupAcks + 3 for the original 3 dupacks that triggered
- // fast recovery). Which means the sender should not send any more packets
- // till we ack this one.
- c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.",
- 50*time.Millisecond)
-
- // Acknowledge all pending data to recover point.
- c.SendAck(790, recover)
-
- // At this point, the cwnd should reset to expected/2 and there are 10
- // packets outstanding.
- //
- // NOTE: Technically netstack is incorrect in that we adjust the cwnd on
- // the same segment that takes us out of recovery. But because of that
- // the actual cwnd at exit of recovery will be expected/2 + 1 as we
- // acked a cwnd worth of packets which will increase the cwnd further by
- // 1 in congestion avoidance.
- //
- // Now in the first iteration since there are 10 packets outstanding.
- // We would expect to get expected/2 +1 - 10 packets. But subsequent
- // iterations will send us expected/2 + 1 + 1 (per iteration).
- expected = expected/2 + 1 - 10
- for i := 0; i < iterations; i++ {
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd.", expected), 50*time.Millisecond)
-
- // Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
-
- // In cogestion avoidance, the packets trains increase by 1 in
- // each iteration.
- if i == 0 {
- // After the first iteration we expect to get the full
- // congestion window worth of packets in every
- // iteration.
- expected += 10
- }
- expected++
- }
-}
-
-func TestExponentialIncreaseDuringSlowStart(t *testing.T) {
- maxPayload := 32
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
-
- const iterations = 3
- data := make([]byte, maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in one shot. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
-
- // Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
-
- // Double the number of expected packets for the next iteration.
- expected *= 2
- }
-}
-
-func TestCongestionAvoidance(t *testing.T) {
- maxPayload := 32
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
-
- const iterations = 3
- data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in one shot. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Do slow start for a few iterations.
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- expected = tcp.InitialCwnd << uint(i)
- if i > 0 {
- // Acknowledge all the data received so far if not on
- // first iteration.
- c.SendAck(790, bytesRead)
- }
-
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd (slow start phase).", 50*time.Millisecond)
- }
-
- // Don't acknowledge the first packet of the last packet train. Let's
- // wait for them to time out, which will trigger a restart of slow
- // start, and initialization of ssthresh to cwnd/2.
- rtxOffset := bytesRead - maxPayload*expected
- c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
-
- // Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
-
- // This part is tricky: when the timeout happened, we had "expected"
- // packets pending, cwnd reset to 1, and ssthresh set to expected/2.
- // By acknowledging "expected" packets, the slow-start part will
- // increase cwnd to expected/2 (which "consumes" expected/2-1 of the
- // acknowledgements), then the congestion avoidance part will consume
- // an extra expected/2 acks to take cwnd to expected/2 + 1. One ack
- // remains in the "ack count" (which will cause cwnd to be incremented
- // once it reaches cwnd acks).
- //
- // So we're straight into congestion avoidance with cwnd set to
- // expected/2 + 1.
- //
- // Check that packets trains of cwnd packets are sent, and that cwnd is
- // incremented by 1 after we acknowledge each packet.
- expected = expected/2 + 1
- for i := 0; i < iterations; i++ {
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd (congestion avoidance phase).", 50*time.Millisecond)
-
- // Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
-
- // In cogestion avoidance, the packets trains increase by 1 in
- // each iteration.
- expected++
- }
-}
-
-// cubicCwnd returns an estimate of a cubic window given the
-// originalCwnd, wMax, last congestion event time and sRTT.
-func cubicCwnd(origCwnd int, wMax int, congEventTime time.Time, sRTT time.Duration) int {
- cwnd := float64(origCwnd)
- // We wait 50ms between each iteration so sRTT as computed by cubic
- // should be close to 50ms.
- elapsed := (time.Since(congEventTime) + sRTT).Seconds()
- k := math.Cbrt(float64(wMax) * 0.3 / 0.7)
- wtRTT := 0.4*math.Pow(elapsed-k, 3) + float64(wMax)
- cwnd += (wtRTT - cwnd) / cwnd
- return int(cwnd)
-}
-
-func TestCubicCongestionAvoidance(t *testing.T) {
- maxPayload := 32
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- enableCUBIC(t, c)
-
- c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
-
- const iterations = 3
- data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in one shot. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Do slow start for a few iterations.
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- expected = tcp.InitialCwnd << uint(i)
- if i > 0 {
- // Acknowledge all the data received so far if not on
- // first iteration.
- c.SendAck(790, bytesRead)
- }
-
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd (during slow-start phase).", 50*time.Millisecond)
- }
-
- // Don't acknowledge the first packet of the last packet train. Let's
- // wait for them to time out, which will trigger a restart of slow
- // start, and initialization of ssthresh to cwnd * 0.7.
- rtxOffset := bytesRead - maxPayload*expected
- c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
-
- // Acknowledge all pending data.
- c.SendAck(790, bytesRead)
-
- // Store away the time we sent the ACK and assuming a 200ms RTO
- // we estimate that the sender will have an RTO 200ms from now
- // and go back into slow start.
- packetDropTime := time.Now().Add(200 * time.Millisecond)
-
- // This part is tricky: when the timeout happened, we had "expected"
- // packets pending, cwnd reset to 1, and ssthresh set to expected * 0.7.
- // By acknowledging "expected" packets, the slow-start part will
- // increase cwnd to expected/2 essentially putting the connection
- // straight into congestion avoidance.
- wMax := expected
- // Lower expected as per cubic spec after a congestion event.
- expected = int(float64(expected) * 0.7)
- cwnd := expected
- for i := 0; i < iterations; i++ {
- // Cubic grows window independent of ACKs. Cubic Window growth
- // is a function of time elapsed since last congestion event.
- // As a result the congestion window does not grow
- // deterministically in response to ACKs.
- //
- // We need to roughly estimate what the cwnd of the sender is
- // based on when we sent the dupacks.
- cwnd := cubicCwnd(cwnd, wMax, packetDropTime, 50*time.Millisecond)
-
- packetsExpected := cwnd
- for j := 0; j < packetsExpected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
- t.Logf("expected packets received, next trying to receive any extra packets that may come")
-
- // If our estimate was correct there should be no more pending packets.
- // We attempt to read a packet a few times with a short sleep in between
- // to ensure that we don't see the sender send any unexpected packets.
- unexpectedPackets := 0
- for {
- gotPacket := c.ReceiveNonBlockingAndCheckPacket(data, bytesRead, maxPayload)
- if !gotPacket {
- break
- }
- bytesRead += maxPayload
- unexpectedPackets++
- time.Sleep(1 * time.Millisecond)
- }
- if unexpectedPackets != 0 {
- t.Fatalf("received %d unexpected packets for iteration %d", unexpectedPackets, i)
- }
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd(congestion avoidance)", 5*time.Millisecond)
-
- // Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
- }
-}
-
-func TestRetransmit(t *testing.T) {
- maxPayload := 32
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
-
- const iterations = 3
- data := make([]byte, maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in two shots. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data[:len(data)/2])
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- r.Reset(data[len(data)/2:])
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Do slow start for a few iterations.
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- expected = tcp.InitialCwnd << uint(i)
- if i > 0 {
- // Acknowledge all the data received so far if not on
- // first iteration.
- c.SendAck(790, bytesRead)
- }
-
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
- }
-
- // Wait for a timeout and retransmit.
- rtxOffset := bytesRead - maxPayload*expected
- c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
-
- metricPollFn := func() error {
- if got, want := c.Stack().Stats().TCP.Timeouts.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.Timeouts.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.Retransmits.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Timeouts.Value(), uint64(1); got != want {
- return fmt.Errorf("got EP SendErrors.Timeouts.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Retransmits.Value(), uint64(1); got != want {
- return fmt.Errorf("got EP stats SendErrors.Retransmits.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.Stack().Stats().TCP.SlowStartRetransmits.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.SlowStartRetransmits.Value = %d, want = %d", got, want)
- }
-
- return nil
- }
-
- // Poll when checking metrics.
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- // Acknowledge half of the pending data.
- rtxOffset = bytesRead - expected*maxPayload/2
- c.SendAck(790, rtxOffset)
-
- // Receive the remaining data, making sure that acknowledged data is not
- // retransmitted.
- for offset := rtxOffset; offset < len(data); offset += maxPayload {
- c.ReceiveAndCheckPacket(data, offset, maxPayload)
- c.SendAck(790, offset+maxPayload)
- }
-
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_rack_test.go b/pkg/tcpip/transport/tcp/tcp_rack_test.go
deleted file mode 100644
index 89e9fb886..000000000
--- a/pkg/tcpip/transport/tcp/tcp_rack_test.go
+++ /dev/null
@@ -1,1102 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "bytes"
- "fmt"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-const (
- maxPayload = 10
- tsOptionSize = 12
- maxTCPOptionSize = 40
- mtu = header.TCPMinimumSize + header.IPv4MinimumSize + maxTCPOptionSize + maxPayload
- latency = 5 * time.Millisecond
-)
-
-func setStackTCPRecovery(t *testing.T, c *context.Context, recovery int) {
- t.Helper()
- opt := tcpip.TCPRecovery(recovery)
- if err := c.Stack().SetTransportProtocolOption(header.TCPProtocolNumber, &opt); err != nil {
- t.Fatalf("c.s.SetTransportProtocolOption(%d, &%v(%v)): %s", header.TCPProtocolNumber, opt, opt, err)
- }
-}
-
-// TestRACKUpdate tests the RACK related fields are updated when an ACK is
-// received on a SACK enabled connection.
-func TestRACKUpdate(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- var xmitTime tcpip.MonotonicTime
- probeDone := make(chan struct{})
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that the endpoint Sender.RACKState is what we expect.
- if state.Sender.RACKState.XmitTime.Before(xmitTime) {
- t.Fatalf("RACK transmit time failed to update when an ACK is received")
- }
-
- gotSeq := state.Sender.RACKState.EndSequence
- wantSeq := state.Sender.SndNxt
- if !gotSeq.LessThanEq(wantSeq) || gotSeq.LessThan(wantSeq) {
- t.Fatalf("RACK sequence number failed to update, got: %v, but want: %v", gotSeq, wantSeq)
- }
-
- if state.Sender.RACKState.RTT == 0 {
- t.Fatalf("RACK RTT failed to update when an ACK is received, got RACKState.RTT == 0 want != 0")
- }
- close(probeDone)
- })
- setStackSACKPermitted(t, c, true)
- createConnectedWithSACKAndTS(c)
-
- data := make([]byte, maxPayload)
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write the data.
- xmitTime = c.Stack().Clock().NowMonotonic()
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- bytesRead := 0
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- c.SendAck(seqnum.Value(context.TestInitialSequenceNumber).Add(1), bytesRead)
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- <-probeDone
-}
-
-// TestRACKDetectReorder tests that RACK detects packet reordering.
-func TestRACKDetectReorder(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- t.Skipf("Skipping this test as reorder detection does not consider DSACK.")
-
- var n int
- const ackNumToVerify = 2
- probeDone := make(chan struct{})
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- gotSeq := state.Sender.RACKState.FACK
- wantSeq := state.Sender.SndNxt
- // FACK should be updated to the highest ending sequence number of the
- // segment acknowledged most recently.
- if !gotSeq.LessThanEq(wantSeq) || gotSeq.LessThan(wantSeq) {
- t.Fatalf("RACK FACK failed to update, got: %v, but want: %v", gotSeq, wantSeq)
- }
-
- n++
- if n < ackNumToVerify {
- if state.Sender.RACKState.Reord {
- t.Fatalf("RACK reorder detected when there is no reordering")
- }
- return
- }
-
- if state.Sender.RACKState.Reord == false {
- t.Fatalf("RACK reorder detection failed")
- }
- close(probeDone)
- })
- setStackSACKPermitted(t, c, true)
- createConnectedWithSACKAndTS(c)
- data := make([]byte, ackNumToVerify*maxPayload)
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write the data.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- bytesRead := 0
- for i := 0; i < ackNumToVerify; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- }
-
- start := c.IRS.Add(maxPayload + 1)
- end := start.Add(maxPayload)
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAckWithSACK(seq, 0, []header.SACKBlock{{start, end}})
- c.SendAck(seq, bytesRead)
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- <-probeDone
-}
-
-func sendAndReceiveWithSACK(t *testing.T, c *context.Context, numPackets int, enableRACK bool) []byte {
- setStackSACKPermitted(t, c, true)
- if !enableRACK {
- setStackTCPRecovery(t, c, 0)
- }
- createConnectedWithSACKAndTS(c)
-
- data := make([]byte, numPackets*maxPayload)
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write the data.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- bytesRead := 0
- for i := 0; i < numPackets; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- // This delay is added to increase RTT as low RTT can cause TLP
- // before sending ACK.
- time.Sleep(latency)
- }
-
- return data
-}
-
-const (
- validDSACKDetected = 1
- failedToDetectDSACK = 2
- invalidDSACKDetected = 3
-)
-
-func addDSACKSeenCheckerProbe(t *testing.T, c *context.Context, numACK int, probeDone chan int) {
- var n int
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that RACK detects DSACK.
- n++
- if n < numACK {
- if state.Sender.RACKState.DSACKSeen {
- probeDone <- invalidDSACKDetected
- }
- return
- }
-
- if !state.Sender.RACKState.DSACKSeen {
- probeDone <- failedToDetectDSACK
- return
- }
- probeDone <- validDSACKDetected
- })
-}
-
-// TestRACKTLPRecovery tests that RACK sends a tail loss probe (TLP) in the
-// case of a tail loss. This simulates a situation where the TLP is able to
-// insinuate the SACK holes and sender is able to retransmit the rest.
-func TestRACKTLPRecovery(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- // Send 8 packets.
- numPackets := 8
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Packets [6-8] are lost. Send cumulative ACK for [1-5].
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // PTO should fire and send #8 packet as a TLP.
- c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
- var info tcpip.TCPInfoOption
- if err := c.EP.GetSockOpt(&info); err != nil {
- t.Fatalf("GetSockOpt failed: %v", err)
- }
-
- // Send the SACK after RTT because RACK RFC states that if the ACK for a
- // retransmission arrives before the smoothed RTT then the sender should not
- // update RACK state as it could be a spurious inference.
- time.Sleep(info.RTT)
-
- // Okay, let the sender know we got #8 using a SACK block.
- eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
- eighthPEnd := eighthPStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
-
- // The sender should be entering RACK based loss-recovery and sending #6 and
- // #7 one after another.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += 2 * maxPayload
- c.SendAck(seq, bytesRead)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // One fast retransmit after the SACK.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- // Recovery should be SACK recovery.
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- // Packets 6, 7 and 8 were retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 3},
- // TLP recovery should have been detected.
- {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 1},
- // No RTOs should have occurred.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKTLPFallbackRTO tests that RACK sends a tail loss probe (TLP) in the
-// case of a tail loss. This simulates a situation where either the TLP or its
-// ACK is lost. The sender should retransmit when RTO fires.
-func TestRACKTLPFallbackRTO(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- // Send 8 packets.
- numPackets := 8
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Packets [6-8] are lost. Send cumulative ACK for [1-5].
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // PTO should fire and send #8 packet as a TLP.
- c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
-
- // Either the TLP or the ACK the receiver sent with SACK blocks was lost.
-
- // Confirm that RTO fires and retransmits packet #6.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // No fast retransmits happened.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
- // No SACK recovery happened.
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 0},
- // TLP was unsuccessful.
- {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
- // RTO should have fired.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestNoTLPRecoveryOnDSACK tests the scenario where the sender speculates a
-// tail loss and sends a TLP. Everything is received and acked. The probe
-// segment is DSACKed. No fast recovery should be triggered in this case.
-func TestNoTLPRecoveryOnDSACK(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- // Send 8 packets.
- numPackets := 8
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Packets [1-5] are received first. [6-8] took a detour and will take a
- // while to arrive. Ack [1-5].
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // The tail loss probe (#8 packet) is received.
- c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
-
- // Now that all 8 packets are received + duplicate 8th packet, send ack.
- bytesRead += 3 * maxPayload
- eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
- eighthPEnd := eighthPStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
-
- // Wait for RTO and make sure that nothing else is received.
- var info tcpip.TCPInfoOption
- if err := c.EP.GetSockOpt(&info); err != nil {
- t.Fatalf("GetSockOpt failed: %v", err)
- }
- if p := c.GetPacketWithTimeout(info.RTO); p != nil {
- t.Errorf("received an unexpected packet: %v", p)
- }
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // Make sure no recovery was entered.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 0},
- {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
- // RTO should not have fired.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
- // Only #8 was retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestNoTLPOnSACK tests the scenario where there is not exactly a tail loss
-// due to the presence of multiple SACK holes. In such a scenario, TLP should
-// not be sent.
-func TestNoTLPOnSACK(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- // Send 8 packets.
- numPackets := 8
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Packets [1-5] and #7 were received. #6 and #8 were dropped.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- seventhStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
- seventhEnd := seventhStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhStart, seventhEnd}})
-
- // The sender should retransmit #6. If the sender sends a TLP, then #8 will
- // received and fail this test.
- c.ReceiveAndCheckPacketWithOptions(data, 5*maxPayload, maxPayload, tsOptionSize)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // #6 was retransmitted due to SACK recovery.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
- // RTO should not have fired.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
- // Only #6 was retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKOnePacketTailLoss tests the trivial case of a tail loss of only one
-// packet. The probe should itself repairs the loss instead of having to go
-// into any recovery.
-func TestRACKOnePacketTailLoss(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- // Send 3 packets.
- numPackets := 3
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Packets [1-2] are received. #3 is lost.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 2 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // PTO should fire and send #3 packet as a TLP.
- c.ReceiveAndCheckPacketWithOptions(data, 2*maxPayload, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- c.SendAck(seq, bytesRead)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // #3 was retransmitted as TLP.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
- // RTO should not have fired.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
- // Only #3 was retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKDetectDSACK tests that RACK detects DSACK with duplicate segments.
-// See: https://tools.ietf.org/html/rfc2883#section-4.1.1.
-func TestRACKDetectDSACK(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 2
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 8
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Cumulative ACK for [1-5] packets and SACK #8 packet (to prevent TLP).
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
- eighthPEnd := eighthPStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
-
- // Expect retransmission of #6 packet after RTO expires.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // Send DSACK block for #6 packet indicating both
- // initial and retransmitted packet are received and
- // packets [1-8] are received.
- start := c.IRS.Add(1 + seqnum.Size(bytesRead))
- end := start.Add(maxPayload)
- bytesRead += 3 * maxPayload
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Wait for the probe function to finish processing the
- // ACK before the test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // Check DSACK was received for one segment.
- {tcpStats.SegmentsAckedWithDSACK, "stats.TCP.SegmentsAckedWithDSACK", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
-
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKDetectDSACKWithOutOfOrder tests that RACK detects DSACK with out of
-// order segments.
-// See: https://tools.ietf.org/html/rfc2883#section-4.1.2.
-func TestRACKDetectDSACKWithOutOfOrder(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 2
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 10
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Cumulative ACK for [1-5] packets and SACK for #7 packet (to prevent TLP).
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- seventhPStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
- seventhPEnd := seventhPStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhPStart, seventhPEnd}})
-
- // Expect retransmission of #6 packet.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // Send DSACK block for #6 packet indicating both
- // initial and retransmitted packet are received and
- // packets [1-7] are received.
- start := c.IRS.Add(1 + seqnum.Size(bytesRead))
- end := start.Add(maxPayload)
- bytesRead += 2 * maxPayload
- // Send DSACK block for #6 along with SACK for out of
- // order #9 packet.
- start1 := c.IRS.Add(1 + seqnum.Size(bytesRead) + maxPayload)
- end1 := start1.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}, {start1, end1}})
-
- // Wait for the probe function to finish processing the
- // ACK before the test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-}
-
-// TestRACKDetectDSACKWithOutOfOrderDup tests that DSACK is detected on a
-// duplicate of out of order packet.
-// See: https://tools.ietf.org/html/rfc2883#section-4.1.3
-func TestRACKDetectDSACKWithOutOfOrderDup(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 4
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 10
- sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // ACK [1-5] packets.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // Send SACK indicating #6 packet is missing and received #7 packet.
- offset := seqnum.Size(bytesRead + maxPayload)
- start := c.IRS.Add(1 + offset)
- end := start.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Send SACK with #6 packet is missing and received [7-8] packets.
- end = start.Add(2 * maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Consider #8 packet is duplicated on the network and send DSACK.
- dsackStart := c.IRS.Add(1 + offset + maxPayload)
- dsackEnd := dsackStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{dsackStart, dsackEnd}, {start, end}})
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-}
-
-// TestRACKDetectDSACKSingleDup tests DSACK for a single duplicate subsegment.
-// See: https://tools.ietf.org/html/rfc2883#section-4.2.1.
-func TestRACKDetectDSACKSingleDup(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 4
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 4
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Send ACK for #1 packet.
- bytesRead := maxPayload
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAck(seq, bytesRead)
-
- // Missing [2-3] packets and received #4 packet.
- seq = seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- start := c.IRS.Add(1 + seqnum.Size(3*maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Expect retransmission of #2 packet.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // ACK for retransmitted #2 packet.
- bytesRead += maxPayload
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Simulate receving delayed subsegment of #2 packet and delayed #3 packet by
- // sending DSACK block for the subsegment.
- dsackStart := c.IRS.Add(1 + seqnum.Size(bytesRead))
- dsackEnd := dsackStart.Add(seqnum.Size(maxPayload / 2))
- c.SendAckWithSACK(seq, numPackets*maxPayload, []header.SACKBlock{{dsackStart, dsackEnd}})
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // Check DSACK was received for a subsegment.
- {tcpStats.SegmentsAckedWithDSACK, "stats.TCP.SegmentsAckedWithDSACK", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
-
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKDetectDSACKDupWithCumulativeACK tests DSACK for two non-contiguous
-// duplicate subsegments covered by the cumulative acknowledgement.
-// See: https://tools.ietf.org/html/rfc2883#section-4.2.2.
-func TestRACKDetectDSACKDupWithCumulativeACK(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 5
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 6
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Send ACK for #1 packet.
- bytesRead := maxPayload
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAck(seq, bytesRead)
-
- // Missing [2-5] packets and received #6 packet.
- seq = seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- start := c.IRS.Add(1 + seqnum.Size(5*maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Expect retransmission of #2 packet.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // Received delayed #2 packet.
- bytesRead += maxPayload
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Received delayed #4 packet.
- start1 := c.IRS.Add(1 + seqnum.Size(3*maxPayload))
- end1 := start1.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start1, end1}, {start, end}})
-
- // Simulate receiving retransmitted subsegment for #2 packet and delayed #3
- // packet by sending DSACK block for #2 packet.
- dsackStart := c.IRS.Add(1 + seqnum.Size(maxPayload))
- dsackEnd := dsackStart.Add(seqnum.Size(maxPayload / 2))
- c.SendAckWithSACK(seq, 4*maxPayload, []header.SACKBlock{{dsackStart, dsackEnd}, {start, end}})
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-}
-
-// TestRACKDetectDSACKDup tests two non-contiguous duplicate subsegments not
-// covered by the cumulative acknowledgement.
-// See: https://tools.ietf.org/html/rfc2883#section-4.2.3.
-func TestRACKDetectDSACKDup(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan int)
- const ackNumToVerify = 5
- addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
-
- numPackets := 7
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Send ACK for #1 packet.
- bytesRead := maxPayload
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAck(seq, bytesRead)
-
- // Missing [2-6] packets and SACK #7 packet.
- seq = seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- start := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Received delayed #3 packet.
- start1 := c.IRS.Add(1 + seqnum.Size(2*maxPayload))
- end1 := start1.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start1, end1}, {start, end}})
-
- // Expect retransmission of #2 packet.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // Consider #2 packet has been dropped and SACK #4 packet.
- start2 := c.IRS.Add(1 + seqnum.Size(3*maxPayload))
- end2 := start2.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start2, end2}, {start1, end1}, {start, end}})
-
- // Simulate receiving retransmitted subsegment for #3 packet and delayed #5
- // packet by sending DSACK block for the subsegment.
- dsackStart := c.IRS.Add(1 + seqnum.Size(2*maxPayload))
- dsackEnd := dsackStart.Add(seqnum.Size(maxPayload / 2))
- end1 = end1.Add(seqnum.Size(2 * maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{dsackStart, dsackEnd}, {start1, end1}})
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- err := <-probeDone
- switch err {
- case failedToDetectDSACK:
- t.Fatalf("RACK DSACK detection failed")
- case invalidDSACKDetected:
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-}
-
-// TestRACKWithInvalidDSACKBlock tests that DSACK is not detected when DSACK
-// is not the first SACK block.
-func TestRACKWithInvalidDSACKBlock(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan struct{})
- const ackNumToVerify = 2
- var n int
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that RACK does not detect DSACK when DSACK block is
- // not the first SACK block.
- n++
- t.Helper()
- if state.Sender.RACKState.DSACKSeen {
- t.Fatalf("RACK DSACK detected when there is no duplicate SACK")
- }
-
- if n == ackNumToVerify {
- close(probeDone)
- }
- })
-
- numPackets := 10
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Cumulative ACK for [1-5] packets and SACK for #7 packet (to prevent TLP).
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- bytesRead := 5 * maxPayload
- seventhPStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
- seventhPEnd := seventhPStart.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhPStart, seventhPEnd}})
-
- // Expect retransmission of #6 packet.
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
-
- // Send DSACK block for #6 packet indicating both
- // initial and retransmitted packet are received and
- // packets [1-7] are received.
- start := c.IRS.Add(1 + seqnum.Size(bytesRead))
- end := start.Add(maxPayload)
- bytesRead += 2 * maxPayload
-
- // Send DSACK block as second block. The first block is a SACK for #9 packet.
- start1 := c.IRS.Add(1 + seqnum.Size(bytesRead) + maxPayload)
- end1 := start1.Add(maxPayload)
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start1, end1}, {start, end}})
-
- // Wait for the probe function to finish processing the
- // ACK before the test completes.
- <-probeDone
-}
-
-func addReorderWindowCheckerProbe(c *context.Context, numACK int, probeDone chan error) {
- var n int
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that RACK detects DSACK.
- n++
- if n < numACK {
- return
- }
-
- if state.Sender.RACKState.ReoWnd == 0 || state.Sender.RACKState.ReoWnd > state.Sender.RTTState.SRTT {
- probeDone <- fmt.Errorf("got RACKState.ReoWnd: %d, expected it to be greater than 0 and less than %d", state.Sender.RACKState.ReoWnd, state.Sender.RTTState.SRTT)
- return
- }
-
- if state.Sender.RACKState.ReoWndIncr != 1 {
- probeDone <- fmt.Errorf("got RACKState.ReoWndIncr: %v, want: 1", state.Sender.RACKState.ReoWndIncr)
- return
- }
-
- if state.Sender.RACKState.ReoWndPersist > 0 {
- probeDone <- fmt.Errorf("got RACKState.ReoWndPersist: %v, want: greater than 0", state.Sender.RACKState.ReoWndPersist)
- return
- }
- probeDone <- nil
- })
-}
-
-func TestRACKCheckReorderWindow(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan error)
- const ackNumToVerify = 3
- addReorderWindowCheckerProbe(c, ackNumToVerify, probeDone)
-
- const numPackets = 7
- sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Send ACK for #1 packet.
- bytesRead := maxPayload
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAck(seq, bytesRead)
-
- // Missing [2-6] packets and SACK #7 packet.
- start := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- // Received delayed packets [2-6] which indicates there is reordering
- // in the connection.
- bytesRead += 6 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- if err := <-probeDone; err != nil {
- t.Fatalf("unexpected values for RACK variables: %v", err)
- }
-}
-
-func TestRACKWithDuplicateACK(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- const numPackets = 4
- data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
-
- // Send three duplicate ACKs to trigger fast recovery. The first
- // segment is considered as lost and will be retransmitted after
- // receiving the duplicate ACKs.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- start := c.IRS.Add(1 + seqnum.Size(maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- for i := 0; i < 3; i++ {
- c.SendAckWithSACK(seq, 0, []header.SACKBlock{{start, end}})
- end = end.Add(seqnum.Size(maxPayload))
- }
-
- // Receive the retransmitted packet.
- c.ReceiveAndCheckPacketWithOptions(data, 0, maxPayload, tsOptionSize)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- {tcpStats.FastRecovery, "stats.TCP.FastRecovery", 0},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
-
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestRACKUpdateSackedOut tests the sacked out field is updated when a SACK
-// is received.
-func TestRACKUpdateSackedOut(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- probeDone := make(chan struct{})
- ackNum := 0
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that the endpoint Sender.SackedOut is what we expect.
- if state.Sender.SackedOut != 2 && ackNum == 0 {
- t.Fatalf("SackedOut got updated to wrong value got: %v want: 2", state.Sender.SackedOut)
- }
-
- if state.Sender.SackedOut != 0 && ackNum == 1 {
- t.Fatalf("SackedOut got updated to wrong value got: %v want: 0", state.Sender.SackedOut)
- }
- if ackNum > 0 {
- close(probeDone)
- }
- ackNum++
- })
-
- sendAndReceiveWithSACK(t, c, 8, true /* enableRACK */)
-
- // ACK for [3-5] packets.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- start := c.IRS.Add(seqnum.Size(1 + 3*maxPayload))
- bytesRead := 2 * maxPayload
- end := start.Add(seqnum.Size(bytesRead))
- c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
-
- bytesRead += 3 * maxPayload
- c.SendAck(seq, bytesRead)
-
- // Wait for the probe function to finish processing the ACK before the
- // test completes.
- <-probeDone
-}
-
-// TestRACKWithWindowFull tests that RACK honors the receive window size.
-func TestRACKWithWindowFull(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- setStackSACKPermitted(t, c, true)
- createConnectedWithSACKAndTS(c)
-
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- const numPkts = 10
- data := make([]byte, numPkts*maxPayload)
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write the data.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- bytesRead := 0
- for i := 0; i < numPkts; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- if i == 0 {
- // Send ACK for the first packet to establish RTT.
- c.SendAck(seq, maxPayload)
- }
- }
-
- // SACK for #10 packet.
- start := c.IRS.Add(seqnum.Size(1 + (numPkts-1)*maxPayload))
- end := start.Add(seqnum.Size(maxPayload))
- c.SendAckWithSACK(seq, 2*maxPayload, []header.SACKBlock{{start, end}})
-
- var info tcpip.TCPInfoOption
- if err := c.EP.GetSockOpt(&info); err != nil {
- t.Fatalf("GetSockOpt failed: %v", err)
- }
- // Wait for RTT to trigger recovery.
- time.Sleep(info.RTT)
-
- // Expect retransmission of #2 packet.
- c.ReceiveAndCheckPacketWithOptions(data, 2*maxPayload, maxPayload, tsOptionSize)
-
- // Send ACK for #2 packet.
- c.SendAck(seq, 3*maxPayload)
-
- // Expect retransmission of #3 packet.
- c.ReceiveAndCheckPacketWithOptions(data, 3*maxPayload, maxPayload, tsOptionSize)
-
- // Send ACK with zero window size.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: seq,
- AckNum: c.IRS.Add(1 + 4*maxPayload),
- RcvWnd: 0,
- })
-
- // No packet should be received as the receive window size is zero.
- c.CheckNoPacket("unexpected packet received after userTimeout has expired")
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_sack_test.go b/pkg/tcpip/transport/tcp/tcp_sack_test.go
deleted file mode 100644
index 83e0653b9..000000000
--- a/pkg/tcpip/transport/tcp/tcp_sack_test.go
+++ /dev/null
@@ -1,704 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "bytes"
- "fmt"
- "log"
- "reflect"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// createConnectedWithSACKPermittedOption creates and connects c.ep with the
-// SACKPermitted option enabled if the stack in the context has the SACK support
-// enabled.
-func createConnectedWithSACKPermittedOption(c *context.Context) *context.RawEndpoint {
- return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled()})
-}
-
-// createConnectedWithSACKAndTS creates and connects c.ep with the SACK & TS
-// option enabled if the stack in the context has SACK and TS enabled.
-func createConnectedWithSACKAndTS(c *context.Context) *context.RawEndpoint {
- return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled(), TS: true})
-}
-
-func setStackSACKPermitted(t *testing.T, c *context.Context, enable bool) {
- t.Helper()
- opt := tcpip.TCPSACKEnabled(enable)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("c.s.SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-}
-
-// TestSackPermittedConnect establishes a connection with the SACK option
-// enabled.
-func TestSackPermittedConnect(t *testing.T) {
- for _, sackEnabled := range []bool{false, true} {
- t.Run(fmt.Sprintf("stack.sackEnabled: %v", sackEnabled), func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- setStackSACKPermitted(t, c, sackEnabled)
- setStackTCPRecovery(t, c, 0)
- rep := createConnectedWithSACKPermittedOption(c)
- data := []byte{1, 2, 3}
-
- rep.SendPacket(data, nil)
- savedSeqNum := rep.NextSeqNum
- rep.VerifyACKNoSACK()
-
- // Make an out of order packet and send it.
- rep.NextSeqNum += 3
- sackBlocks := []header.SACKBlock{
- {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))},
- }
- rep.SendPacket(data, nil)
-
- // Restore the saved sequence number so that the
- // VerifyXXX calls use the right sequence number for
- // checking ACK numbers.
- rep.NextSeqNum = savedSeqNum
- if sackEnabled {
- rep.VerifyACKHasSACK(sackBlocks)
- } else {
- rep.VerifyACKNoSACK()
- }
-
- // Send the missing segment.
- rep.SendPacket(data, nil)
- // The ACK should contain the cumulative ACK for all 9
- // bytes sent and no SACK blocks.
- rep.NextSeqNum += 3
- // Check that no SACK block is returned in the ACK.
- rep.VerifyACKNoSACK()
- })
- }
-}
-
-// TestSackDisabledConnect establishes a connection with the SACK option
-// disabled and verifies that no SACKs are sent for out of order segments.
-func TestSackDisabledConnect(t *testing.T) {
- for _, sackEnabled := range []bool{false, true} {
- t.Run(fmt.Sprintf("sackEnabled: %v", sackEnabled), func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- setStackSACKPermitted(t, c, sackEnabled)
- setStackTCPRecovery(t, c, 0)
-
- rep := c.CreateConnectedWithOptions(header.TCPSynOptions{})
-
- data := []byte{1, 2, 3}
-
- rep.SendPacket(data, nil)
- savedSeqNum := rep.NextSeqNum
- rep.VerifyACKNoSACK()
-
- // Make an out of order packet and send it.
- rep.NextSeqNum += 3
- rep.SendPacket(data, nil)
-
- // The ACK should contain the older sequence number and
- // no SACK blocks.
- rep.NextSeqNum = savedSeqNum
- rep.VerifyACKNoSACK()
-
- // Send the missing segment.
- rep.SendPacket(data, nil)
- // The ACK should contain the cumulative ACK for all 9
- // bytes sent and no SACK blocks.
- rep.NextSeqNum += 3
- // Check that no SACK block is returned in the ACK.
- rep.VerifyACKNoSACK()
- })
- }
-}
-
-// TestSackPermittedAccept accepts and establishes a connection with the
-// SACKPermitted option enabled if the connection request specifies the
-// SACKPermitted option. In case of SYN cookies SACK should be disabled as we
-// don't encode the SACK information in the cookie.
-func TestSackPermittedAccept(t *testing.T) {
- type testCase struct {
- cookieEnabled bool
- sackPermitted bool
- wndScale int
- wndSize uint16
- }
-
- testCases := []testCase{
- // When cookie is used window scaling is disabled.
- {true, false, -1, 0xffff}, // When cookie is used window scaling is disabled.
- {false, true, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default).
- }
-
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) {
- for _, sackEnabled := range []bool{false, true} {
- t.Run(fmt.Sprintf("test stack.sackEnabled: %v", sackEnabled), func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- if tc.cookieEnabled {
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
- setStackSACKPermitted(t, c, sackEnabled)
- setStackTCPRecovery(t, c, 0)
-
- rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, SACKPermitted: tc.sackPermitted})
- // Now verify no SACK blocks are
- // received when sack is disabled.
- data := []byte{1, 2, 3}
- rep.SendPacket(data, nil)
- rep.VerifyACKNoSACK()
-
- savedSeqNum := rep.NextSeqNum
-
- // Make an out of order packet and send
- // it.
- rep.NextSeqNum += 3
- sackBlocks := []header.SACKBlock{
- {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))},
- }
- rep.SendPacket(data, nil)
-
- // The ACK should contain the older
- // sequence number.
- rep.NextSeqNum = savedSeqNum
- if sackEnabled && tc.sackPermitted {
- rep.VerifyACKHasSACK(sackBlocks)
- } else {
- rep.VerifyACKNoSACK()
- }
-
- // Send the missing segment.
- rep.SendPacket(data, nil)
- // The ACK should contain the cumulative
- // ACK for all 9 bytes sent and no SACK
- // blocks.
- rep.NextSeqNum += 3
- // Check that no SACK block is returned
- // in the ACK.
- rep.VerifyACKNoSACK()
- })
- }
- })
- }
-}
-
-// TestSackDisabledAccept accepts and establishes a connection with
-// the SACKPermitted option disabled and verifies that no SACKs are
-// sent for out of order packets.
-func TestSackDisabledAccept(t *testing.T) {
- type testCase struct {
- cookieEnabled bool
- wndScale int
- wndSize uint16
- }
-
- testCases := []testCase{
- // When cookie is used window scaling is disabled.
- {true, -1, 0xffff}, // When cookie is used window scaling is disabled.
- {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default).
- }
-
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) {
- for _, sackEnabled := range []bool{false, true} {
- t.Run(fmt.Sprintf("test: sackEnabled: %v", sackEnabled), func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- if tc.cookieEnabled {
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- setStackSACKPermitted(t, c, sackEnabled)
- setStackTCPRecovery(t, c, 0)
-
- rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Now verify no SACK blocks are
- // received when sack is disabled.
- data := []byte{1, 2, 3}
- rep.SendPacket(data, nil)
- rep.VerifyACKNoSACK()
- savedSeqNum := rep.NextSeqNum
-
- // Make an out of order packet and send
- // it.
- rep.NextSeqNum += 3
- rep.SendPacket(data, nil)
-
- // The ACK should contain the older
- // sequence number and no SACK blocks.
- rep.NextSeqNum = savedSeqNum
- rep.VerifyACKNoSACK()
-
- // Send the missing segment.
- rep.SendPacket(data, nil)
- // The ACK should contain the cumulative
- // ACK for all 9 bytes sent and no SACK
- // blocks.
- rep.NextSeqNum += 3
- // Check that no SACK block is returned
- // in the ACK.
- rep.VerifyACKNoSACK()
- })
- }
- })
- }
-}
-
-func TestUpdateSACKBlocks(t *testing.T) {
- testCases := []struct {
- segStart seqnum.Value
- segEnd seqnum.Value
- rcvNxt seqnum.Value
- sackBlocks []header.SACKBlock
- updated []header.SACKBlock
- }{
- // Trivial cases where current SACK block list is empty and we
- // have an out of order delivery.
- {10, 11, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 11}}},
- {10, 12, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 12}}},
- {10, 20, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 20}}},
-
- // Cases where current SACK block list is not empty and we have
- // an out of order delivery. Tests that the updated SACK block
- // list has the first block as the one that contains the new
- // SACK block representing the segment that was just delivered.
- {10, 11, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 11}, {12, 20}}},
- {24, 30, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{24, 30}, {12, 20}}},
- {24, 30, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}}},
-
- // Ensure that we only retain header.MaxSACKBlocks and drop the
- // oldest one if adding a new block exceeds
- // header.MaxSACKBlocks.
- {24, 30, 9,
- []header.SACKBlock{{12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}, {72, 80}},
- []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}},
-
- // Cases where segment extends an existing SACK block.
- {10, 12, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 20}}},
- {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}},
- {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}},
- {15, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 22}}},
- {15, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 25}}},
- {11, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{11, 25}}},
- {10, 12, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 20}, {32, 40}}},
- {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}},
- {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}},
- {15, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 22}, {32, 40}}},
- {15, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 25}, {32, 40}}},
- {11, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{11, 25}, {32, 40}}},
-
- // Cases where segment contains rcvNxt.
- {10, 20, 15, []header.SACKBlock{{20, 30}, {40, 50}}, []header.SACKBlock{{40, 50}}},
- }
-
- for _, tc := range testCases {
- var sack tcp.SACKInfo
- copy(sack.Blocks[:], tc.sackBlocks)
- sack.NumBlocks = len(tc.sackBlocks)
- tcp.UpdateSACKBlocks(&sack, tc.segStart, tc.segEnd, tc.rcvNxt)
- if got, want := sack.Blocks[:sack.NumBlocks], tc.updated; !reflect.DeepEqual(got, want) {
- t.Errorf("UpdateSACKBlocks(%v, %v, %v, %v), got: %v, want: %v", tc.sackBlocks, tc.segStart, tc.segEnd, tc.rcvNxt, got, want)
- }
-
- }
-}
-
-func TestTrimSackBlockList(t *testing.T) {
- testCases := []struct {
- rcvNxt seqnum.Value
- sackBlocks []header.SACKBlock
- trimmed []header.SACKBlock
- }{
- // Simple cases where we trim whole entries.
- {2, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}},
- {21, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{22, 30}, {32, 40}}},
- {31, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{32, 40}}},
- {40, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}},
- // Cases where we need to update a block.
- {12, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{12, 20}, {22, 30}, {32, 40}}},
- {23, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{23, 30}, {32, 40}}},
- {33, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{33, 40}}},
- {41, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}},
- }
- for _, tc := range testCases {
- var sack tcp.SACKInfo
- copy(sack.Blocks[:], tc.sackBlocks)
- sack.NumBlocks = len(tc.sackBlocks)
- tcp.TrimSACKBlockList(&sack, tc.rcvNxt)
- if got, want := sack.Blocks[:sack.NumBlocks], tc.trimmed; !reflect.DeepEqual(got, want) {
- t.Errorf("TrimSackBlockList(%v, %v), got: %v, want: %v", tc.sackBlocks, tc.rcvNxt, got, want)
- }
- }
-}
-
-func TestSACKRecovery(t *testing.T) {
- const maxPayload = 10
- // See: tcp.makeOptions for why tsOptionSize is set to 12 here.
- const tsOptionSize = 12
- // Enabling SACK means the payload size is reduced to account
- // for the extra space required for the TCP options.
- //
- // We increase the MTU by 40 bytes to account for SACK and Timestamp
- // options.
- const maxTCPOptionSize = 40
-
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxTCPOptionSize+maxPayload))
- defer c.Cleanup()
-
- c.Stack().AddTCPProbe(func(s stack.TCPEndpointState) {
- // We use log.Printf instead of t.Logf here because this probe
- // can fire even when the test function has finished. This is
- // because closing the endpoint in cleanup() does not mean the
- // actual worker loop terminates immediately as it still has to
- // do a full TCP shutdown. But this test can finish running
- // before the shutdown is done. Using t.Logf in such a case
- // causes the test to panic due to logging after test finished.
- log.Printf("state: %+v\n", s)
- })
- setStackSACKPermitted(t, c, true)
- setStackTCPRecovery(t, c, 0)
- createConnectedWithSACKAndTS(c)
-
- const iterations = 3
- data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
- for i := range data {
- data[i] = byte(i)
- }
-
- // Write all the data in one shot. Packets will only be written at the
- // MTU size though.
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Do slow start for a few iterations.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- expected := tcp.InitialCwnd
- bytesRead := 0
- for i := 0; i < iterations; i++ {
- expected = tcp.InitialCwnd << uint(i)
- if i > 0 {
- // Acknowledge all the data received so far if not on
- // first iteration.
- c.SendAck(seq, bytesRead)
- }
-
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- }
-
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
- }
-
- // Send 3 duplicate acks. This should force an immediate retransmit of
- // the pending packet and put the sender into fast recovery.
- rtxOffset := bytesRead - maxPayload*expected
- start := c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1)
- end := start.Add(10)
- for i := 0; i < 3; i++ {
- c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
- end = end.Add(10)
- }
-
- // Receive the retransmitted packet.
- c.ReceiveAndCheckPacketWithOptions(data, rtxOffset, maxPayload, tsOptionSize)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- {tcpStats.FastRecovery, "stats.TCP.FastRecovery", 0},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
-
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- // Now send 7 mode duplicate ACKs. In SACK TCP dupAcks do not cause
- // window inflation and sending of packets is completely handled by the
- // SACK Recovery algorithm. We should see no packets being released, as
- // the cwnd at this point after entering recovery should be half of the
- // outstanding number of packets in flight.
- for i := 0; i < 7; i++ {
- c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
- end = end.Add(10)
- }
-
- recover := bytesRead
-
- // Ensure no new packets arrive.
- c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.",
- 50*time.Millisecond)
-
- // Acknowledge half of the pending data. This along with the 10 sacked
- // segments above should reduce the outstanding below the current
- // congestion window allowing the sender to transmit data.
- rtxOffset = bytesRead - expected*maxPayload/2
-
- // Now send a partial ACK w/ a SACK block that indicates that the next 3
- // segments are lost and we have received 6 segments after the lost
- // segments. This should cause the sender to immediately transmit all 3
- // segments in response to this ACK unlike in FastRecovery where only 1
- // segment is retransmitted per ACK.
- start = c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1)
- end = start.Add(60)
- c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
-
- // At this point, we acked expected/2 packets and we SACKED 6 packets and
- // 3 segments were considered lost due to the SACK block we sent.
- //
- // So total packets outstanding can be calculated as follows after 7
- // iterations of slow start -> 10/20/40/80/160/320/640. So expected
- // should be 640 at start, then we went to recover at which point the
- // cwnd should be set to 320 + 3 (for the 3 dupAcks which have left the
- // network).
- // Outstanding at this point after acking half the window
- // (320 packets) will be:
- // outstanding = 640-320-6(due to SACK block)-3 = 311
- //
- // The last 3 is due to the fact that the first 3 packets after
- // rtxOffset will be considered lost due to the SACK blocks sent.
- // Receive the retransmit due to partial ack.
-
- c.ReceiveAndCheckPacketWithOptions(data, rtxOffset, maxPayload, tsOptionSize)
- // Receive the 2 extra packets that should have been retransmitted as
- // those should be considered lost and immediately retransmitted based
- // on the SACK information in the previous ACK sent above.
- for i := 0; i < 2; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, rtxOffset+maxPayload*(i+1), maxPayload, tsOptionSize)
- }
-
- // Now we should get 9 more new unsent packets as the cwnd is 323 and
- // outstanding is 311.
- for i := 0; i < 9; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- }
-
- metricPollFn = func() error {
- // In SACK recovery only the first segment is fast retransmitted when
- // entering recovery.
- if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want {
- return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.FastRetransmit.Value(), uint64(1); got != want {
- return fmt.Errorf("got EP stats SendErrors.FastRetransmit = %d, want = %d", got, want)
- }
-
- if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(4); got != want {
- return fmt.Errorf("got stats.TCP.Retransmits.Value = %d, want = %d", got, want)
- }
-
- if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Retransmits.Value(), uint64(4); got != want {
- return fmt.Errorf("got EP stats Stats.SendErrors.Retransmits = %d, want = %d", got, want)
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.", 50*time.Millisecond)
-
- // Acknowledge all pending data to recover point.
- c.SendAck(seq, recover)
-
- // At this point, the cwnd should reset to expected/2 and there are 9
- // packets outstanding.
- //
- // Now in the first iteration since there are 9 packets outstanding.
- // We would expect to get expected/2 - 9 packets. But subsequent
- // iterations will send us expected/2 + 1 (per iteration).
- expected = expected/2 - 9
- for i := 0; i < iterations; i++ {
- // Read all packets expected on this iteration. Don't
- // acknowledge any of them just yet, so that we can measure the
- // congestion window.
- for j := 0; j < expected; j++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- bytesRead += maxPayload
- }
- // Check we don't receive any more packets on this iteration.
- // The timeout can't be too high or we'll trigger a timeout.
- c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd and iteration: %d.", expected, i), 50*time.Millisecond)
-
- // Acknowledge all the data received so far.
- c.SendAck(seq, bytesRead)
-
- // In cogestion avoidance, the packets trains increase by 1 in
- // each iteration.
- if i == 0 {
- // After the first iteration we expect to get the full
- // congestion window worth of packets in every
- // iteration.
- expected += 9
- }
- expected++
- }
-}
-
-// TestRecoveryEntry tests the following two properties of entering recovery:
-// - Fast SACK recovery is entered when SND.UNA is considered lost by the SACK
-// scoreboard but dupack count is still below threshold.
-// - Only enter recovery when at least one more byte of data beyond the highest
-// byte that was outstanding when fast retransmit was last entered is acked.
-func TestRecoveryEntry(t *testing.T) {
- c := context.New(t, uint32(mtu))
- defer c.Cleanup()
-
- numPackets := 5
- data := sendAndReceiveWithSACK(t, c, numPackets, false /* enableRACK */)
-
- // Ack #1 packet.
- seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendAck(seq, maxPayload)
-
- // Now SACK #3, #4 and #5 packets. This will simulate a situation where
- // SND.UNA should be considered lost and the sender should enter fast recovery
- // (even though dupack count is still below threshold).
- p3Start := c.IRS.Add(1 + seqnum.Size(2*maxPayload))
- p3End := p3Start.Add(maxPayload)
- p4Start := p3End
- p4End := p4Start.Add(maxPayload)
- p5Start := p4End
- p5End := p5Start.Add(maxPayload)
- c.SendAckWithSACK(seq, maxPayload, []header.SACKBlock{{p3Start, p3End}, {p4Start, p4End}, {p5Start, p5End}})
-
- // Expect #2 to be retransmitted.
- c.ReceiveAndCheckPacketWithOptions(data, maxPayload, maxPayload, tsOptionSize)
-
- metricPollFn := func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // SACK recovery must have happened.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- // #2 was retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
- // No RTOs should have fired yet.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-
- // Send 4 more packets.
- var r bytes.Reader
- data = append(data, data...)
- r.Reset(data[5*maxPayload : 9*maxPayload])
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- var sackBlocks []header.SACKBlock
- bytesRead := numPackets * maxPayload
- for i := 0; i < 4; i++ {
- c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
- if i > 0 {
- pStart := c.IRS.Add(1 + seqnum.Size(bytesRead))
- sackBlocks = append(sackBlocks, header.SACKBlock{pStart, pStart.Add(maxPayload)})
- c.SendAckWithSACK(seq, 5*maxPayload, sackBlocks)
- }
- bytesRead += maxPayload
- }
-
- // #6 should be retransmitted after RTO. The sender should NOT enter fast
- // recovery because the highest byte that was outstanding when fast recovery
- // was last entered is #5 packet's end. And the sender requires at least one
- // more byte beyond that (#6 packet start) to be acked to enter recovery.
- c.ReceiveAndCheckPacketWithOptions(data, 5*maxPayload, maxPayload, tsOptionSize)
- c.SendAck(seq, 9*maxPayload)
-
- metricPollFn = func() error {
- tcpStats := c.Stack().Stats().TCP
- stats := []struct {
- stat *tcpip.StatCounter
- name string
- want uint64
- }{
- // Only 1 SACK recovery must have happened.
- {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
- {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
- // #2 and #6 were retransmitted.
- {tcpStats.Retransmits, "stats.TCP.Retransmits", 2},
- // RTO should have fired once.
- {tcpStats.Timeouts, "stats.TCP.Timeouts", 1},
- }
- for _, s := range stats {
- if got, want := s.stat.Value(), s.want; got != want {
- return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
- }
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_segment_list.go b/pkg/tcpip/transport/tcp/tcp_segment_list.go
new file mode 100644
index 000000000..a14cff27e
--- /dev/null
+++ b/pkg/tcpip/transport/tcp/tcp_segment_list.go
@@ -0,0 +1,221 @@
+package tcp
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type segmentElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (segmentElementMapper) linkerFor(elem *segment) *segment { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type segmentList struct {
+ head *segment
+ tail *segment
+}
+
+// Reset resets list l to the empty state.
+func (l *segmentList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *segmentList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *segmentList) Front() *segment {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *segmentList) Back() *segment {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *segmentList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (segmentElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *segmentList) PushFront(e *segment) {
+ linker := segmentElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ segmentElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *segmentList) PushBack(e *segment) {
+ linker := segmentElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ segmentElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *segmentList) PushBackList(m *segmentList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ segmentElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ segmentElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *segmentList) InsertAfter(b, e *segment) {
+ bLinker := segmentElementMapper{}.linkerFor(b)
+ eLinker := segmentElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ segmentElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *segmentList) InsertBefore(a, e *segment) {
+ aLinker := segmentElementMapper{}.linkerFor(a)
+ eLinker := segmentElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ segmentElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *segmentList) Remove(e *segment) {
+ linker := segmentElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ segmentElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ segmentElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type segmentEntry struct {
+ next *segment
+ prev *segment
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *segmentEntry) Next() *segment {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *segmentEntry) Prev() *segment {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *segmentEntry) SetNext(elem *segment) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *segmentEntry) SetPrev(elem *segment) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/tcp/tcp_state_autogen.go b/pkg/tcpip/transport/tcp/tcp_state_autogen.go
new file mode 100644
index 000000000..9c46d31d7
--- /dev/null
+++ b/pkg/tcpip/transport/tcp/tcp_state_autogen.go
@@ -0,0 +1,897 @@
+// automatically generated by stateify.
+
+package tcp
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip/buffer"
+)
+
+func (c *cubicState) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.cubicState"
+}
+
+func (c *cubicState) StateFields() []string {
+ return []string{
+ "TCPCubicState",
+ "numCongestionEvents",
+ "s",
+ }
+}
+
+func (c *cubicState) beforeSave() {}
+
+// +checklocksignore
+func (c *cubicState) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.TCPCubicState)
+ stateSinkObject.Save(1, &c.numCongestionEvents)
+ stateSinkObject.Save(2, &c.s)
+}
+
+func (c *cubicState) afterLoad() {}
+
+// +checklocksignore
+func (c *cubicState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.TCPCubicState)
+ stateSourceObject.Load(1, &c.numCongestionEvents)
+ stateSourceObject.Load(2, &c.s)
+}
+
+func (s *SACKInfo) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.SACKInfo"
+}
+
+func (s *SACKInfo) StateFields() []string {
+ return []string{
+ "Blocks",
+ "NumBlocks",
+ }
+}
+
+func (s *SACKInfo) beforeSave() {}
+
+// +checklocksignore
+func (s *SACKInfo) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Blocks)
+ stateSinkObject.Save(1, &s.NumBlocks)
+}
+
+func (s *SACKInfo) afterLoad() {}
+
+// +checklocksignore
+func (s *SACKInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Blocks)
+ stateSourceObject.Load(1, &s.NumBlocks)
+}
+
+func (s *sndQueueInfo) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.sndQueueInfo"
+}
+
+func (s *sndQueueInfo) StateFields() []string {
+ return []string{
+ "TCPSndBufState",
+ }
+}
+
+func (s *sndQueueInfo) beforeSave() {}
+
+// +checklocksignore
+func (s *sndQueueInfo) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.TCPSndBufState)
+}
+
+func (s *sndQueueInfo) afterLoad() {}
+
+// +checklocksignore
+func (s *sndQueueInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.TCPSndBufState)
+}
+
+func (r *rcvQueueInfo) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.rcvQueueInfo"
+}
+
+func (r *rcvQueueInfo) StateFields() []string {
+ return []string{
+ "TCPRcvBufState",
+ "rcvQueue",
+ }
+}
+
+func (r *rcvQueueInfo) beforeSave() {}
+
+// +checklocksignore
+func (r *rcvQueueInfo) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.TCPRcvBufState)
+ stateSinkObject.Save(1, &r.rcvQueue)
+}
+
+func (r *rcvQueueInfo) afterLoad() {}
+
+// +checklocksignore
+func (r *rcvQueueInfo) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.TCPRcvBufState)
+ stateSourceObject.LoadWait(1, &r.rcvQueue)
+}
+
+func (a *accepted) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.accepted"
+}
+
+func (a *accepted) StateFields() []string {
+ return []string{
+ "endpoints",
+ "cap",
+ }
+}
+
+func (a *accepted) beforeSave() {}
+
+// +checklocksignore
+func (a *accepted) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ var endpointsValue []*endpoint = a.saveEndpoints()
+ stateSinkObject.SaveValue(0, endpointsValue)
+ stateSinkObject.Save(1, &a.cap)
+}
+
+func (a *accepted) afterLoad() {}
+
+// +checklocksignore
+func (a *accepted) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(1, &a.cap)
+ stateSourceObject.LoadValue(0, new([]*endpoint), func(y interface{}) { a.loadEndpoints(y.([]*endpoint)) })
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "TCPEndpointStateInner",
+ "TransportEndpointInfo",
+ "DefaultSocketOptionsHandler",
+ "waiterQueue",
+ "uniqueID",
+ "hardError",
+ "lastError",
+ "rcvQueueInfo",
+ "rcvMemUsed",
+ "ownedByUser",
+ "state",
+ "boundNICID",
+ "ttl",
+ "isConnectNotified",
+ "portFlags",
+ "boundBindToDevice",
+ "boundPortFlags",
+ "boundDest",
+ "effectiveNetProtos",
+ "workerRunning",
+ "workerCleanup",
+ "recentTSTime",
+ "shutdownFlags",
+ "tcpRecovery",
+ "sack",
+ "delay",
+ "scoreboard",
+ "segmentQueue",
+ "synRcvdCount",
+ "userMSS",
+ "maxSynRetries",
+ "windowClamp",
+ "sndQueueInfo",
+ "cc",
+ "keepalive",
+ "userTimeout",
+ "deferAccept",
+ "accepted",
+ "rcv",
+ "snd",
+ "connectingAddress",
+ "amss",
+ "sendTOS",
+ "gso",
+ "tcpLingerTimeout",
+ "closed",
+ "txHash",
+ "owner",
+ "ops",
+ "lastOutOfWindowAckTime",
+ }
+}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ var stateValue EndpointState = e.saveState()
+ stateSinkObject.SaveValue(10, stateValue)
+ stateSinkObject.Save(0, &e.TCPEndpointStateInner)
+ stateSinkObject.Save(1, &e.TransportEndpointInfo)
+ stateSinkObject.Save(2, &e.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(3, &e.waiterQueue)
+ stateSinkObject.Save(4, &e.uniqueID)
+ stateSinkObject.Save(5, &e.hardError)
+ stateSinkObject.Save(6, &e.lastError)
+ stateSinkObject.Save(7, &e.rcvQueueInfo)
+ stateSinkObject.Save(8, &e.rcvMemUsed)
+ stateSinkObject.Save(9, &e.ownedByUser)
+ stateSinkObject.Save(11, &e.boundNICID)
+ stateSinkObject.Save(12, &e.ttl)
+ stateSinkObject.Save(13, &e.isConnectNotified)
+ stateSinkObject.Save(14, &e.portFlags)
+ stateSinkObject.Save(15, &e.boundBindToDevice)
+ stateSinkObject.Save(16, &e.boundPortFlags)
+ stateSinkObject.Save(17, &e.boundDest)
+ stateSinkObject.Save(18, &e.effectiveNetProtos)
+ stateSinkObject.Save(19, &e.workerRunning)
+ stateSinkObject.Save(20, &e.workerCleanup)
+ stateSinkObject.Save(21, &e.recentTSTime)
+ stateSinkObject.Save(22, &e.shutdownFlags)
+ stateSinkObject.Save(23, &e.tcpRecovery)
+ stateSinkObject.Save(24, &e.sack)
+ stateSinkObject.Save(25, &e.delay)
+ stateSinkObject.Save(26, &e.scoreboard)
+ stateSinkObject.Save(27, &e.segmentQueue)
+ stateSinkObject.Save(28, &e.synRcvdCount)
+ stateSinkObject.Save(29, &e.userMSS)
+ stateSinkObject.Save(30, &e.maxSynRetries)
+ stateSinkObject.Save(31, &e.windowClamp)
+ stateSinkObject.Save(32, &e.sndQueueInfo)
+ stateSinkObject.Save(33, &e.cc)
+ stateSinkObject.Save(34, &e.keepalive)
+ stateSinkObject.Save(35, &e.userTimeout)
+ stateSinkObject.Save(36, &e.deferAccept)
+ stateSinkObject.Save(37, &e.accepted)
+ stateSinkObject.Save(38, &e.rcv)
+ stateSinkObject.Save(39, &e.snd)
+ stateSinkObject.Save(40, &e.connectingAddress)
+ stateSinkObject.Save(41, &e.amss)
+ stateSinkObject.Save(42, &e.sendTOS)
+ stateSinkObject.Save(43, &e.gso)
+ stateSinkObject.Save(44, &e.tcpLingerTimeout)
+ stateSinkObject.Save(45, &e.closed)
+ stateSinkObject.Save(46, &e.txHash)
+ stateSinkObject.Save(47, &e.owner)
+ stateSinkObject.Save(48, &e.ops)
+ stateSinkObject.Save(49, &e.lastOutOfWindowAckTime)
+}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.TCPEndpointStateInner)
+ stateSourceObject.Load(1, &e.TransportEndpointInfo)
+ stateSourceObject.Load(2, &e.DefaultSocketOptionsHandler)
+ stateSourceObject.LoadWait(3, &e.waiterQueue)
+ stateSourceObject.Load(4, &e.uniqueID)
+ stateSourceObject.Load(5, &e.hardError)
+ stateSourceObject.Load(6, &e.lastError)
+ stateSourceObject.Load(7, &e.rcvQueueInfo)
+ stateSourceObject.Load(8, &e.rcvMemUsed)
+ stateSourceObject.Load(9, &e.ownedByUser)
+ stateSourceObject.Load(11, &e.boundNICID)
+ stateSourceObject.Load(12, &e.ttl)
+ stateSourceObject.Load(13, &e.isConnectNotified)
+ stateSourceObject.Load(14, &e.portFlags)
+ stateSourceObject.Load(15, &e.boundBindToDevice)
+ stateSourceObject.Load(16, &e.boundPortFlags)
+ stateSourceObject.Load(17, &e.boundDest)
+ stateSourceObject.Load(18, &e.effectiveNetProtos)
+ stateSourceObject.Load(19, &e.workerRunning)
+ stateSourceObject.Load(20, &e.workerCleanup)
+ stateSourceObject.Load(21, &e.recentTSTime)
+ stateSourceObject.Load(22, &e.shutdownFlags)
+ stateSourceObject.Load(23, &e.tcpRecovery)
+ stateSourceObject.Load(24, &e.sack)
+ stateSourceObject.Load(25, &e.delay)
+ stateSourceObject.Load(26, &e.scoreboard)
+ stateSourceObject.LoadWait(27, &e.segmentQueue)
+ stateSourceObject.Load(28, &e.synRcvdCount)
+ stateSourceObject.Load(29, &e.userMSS)
+ stateSourceObject.Load(30, &e.maxSynRetries)
+ stateSourceObject.Load(31, &e.windowClamp)
+ stateSourceObject.Load(32, &e.sndQueueInfo)
+ stateSourceObject.Load(33, &e.cc)
+ stateSourceObject.Load(34, &e.keepalive)
+ stateSourceObject.Load(35, &e.userTimeout)
+ stateSourceObject.Load(36, &e.deferAccept)
+ stateSourceObject.Load(37, &e.accepted)
+ stateSourceObject.LoadWait(38, &e.rcv)
+ stateSourceObject.LoadWait(39, &e.snd)
+ stateSourceObject.Load(40, &e.connectingAddress)
+ stateSourceObject.Load(41, &e.amss)
+ stateSourceObject.Load(42, &e.sendTOS)
+ stateSourceObject.Load(43, &e.gso)
+ stateSourceObject.Load(44, &e.tcpLingerTimeout)
+ stateSourceObject.Load(45, &e.closed)
+ stateSourceObject.Load(46, &e.txHash)
+ stateSourceObject.Load(47, &e.owner)
+ stateSourceObject.Load(48, &e.ops)
+ stateSourceObject.Load(49, &e.lastOutOfWindowAckTime)
+ stateSourceObject.LoadValue(10, new(EndpointState), func(y interface{}) { e.loadState(y.(EndpointState)) })
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (k *keepalive) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.keepalive"
+}
+
+func (k *keepalive) StateFields() []string {
+ return []string{
+ "idle",
+ "interval",
+ "count",
+ "unacked",
+ }
+}
+
+func (k *keepalive) beforeSave() {}
+
+// +checklocksignore
+func (k *keepalive) StateSave(stateSinkObject state.Sink) {
+ k.beforeSave()
+ stateSinkObject.Save(0, &k.idle)
+ stateSinkObject.Save(1, &k.interval)
+ stateSinkObject.Save(2, &k.count)
+ stateSinkObject.Save(3, &k.unacked)
+}
+
+func (k *keepalive) afterLoad() {}
+
+// +checklocksignore
+func (k *keepalive) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &k.idle)
+ stateSourceObject.Load(1, &k.interval)
+ stateSourceObject.Load(2, &k.count)
+ stateSourceObject.Load(3, &k.unacked)
+}
+
+func (rc *rackControl) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.rackControl"
+}
+
+func (rc *rackControl) StateFields() []string {
+ return []string{
+ "TCPRACKState",
+ "exitedRecovery",
+ "minRTT",
+ "tlpRxtOut",
+ "tlpHighRxt",
+ "snd",
+ }
+}
+
+func (rc *rackControl) beforeSave() {}
+
+// +checklocksignore
+func (rc *rackControl) StateSave(stateSinkObject state.Sink) {
+ rc.beforeSave()
+ stateSinkObject.Save(0, &rc.TCPRACKState)
+ stateSinkObject.Save(1, &rc.exitedRecovery)
+ stateSinkObject.Save(2, &rc.minRTT)
+ stateSinkObject.Save(3, &rc.tlpRxtOut)
+ stateSinkObject.Save(4, &rc.tlpHighRxt)
+ stateSinkObject.Save(5, &rc.snd)
+}
+
+func (rc *rackControl) afterLoad() {}
+
+// +checklocksignore
+func (rc *rackControl) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rc.TCPRACKState)
+ stateSourceObject.Load(1, &rc.exitedRecovery)
+ stateSourceObject.Load(2, &rc.minRTT)
+ stateSourceObject.Load(3, &rc.tlpRxtOut)
+ stateSourceObject.Load(4, &rc.tlpHighRxt)
+ stateSourceObject.Load(5, &rc.snd)
+}
+
+func (r *receiver) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.receiver"
+}
+
+func (r *receiver) StateFields() []string {
+ return []string{
+ "TCPReceiverState",
+ "ep",
+ "rcvWnd",
+ "rcvWUP",
+ "prevBufUsed",
+ "closed",
+ "pendingRcvdSegments",
+ "lastRcvdAckTime",
+ }
+}
+
+func (r *receiver) beforeSave() {}
+
+// +checklocksignore
+func (r *receiver) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.TCPReceiverState)
+ stateSinkObject.Save(1, &r.ep)
+ stateSinkObject.Save(2, &r.rcvWnd)
+ stateSinkObject.Save(3, &r.rcvWUP)
+ stateSinkObject.Save(4, &r.prevBufUsed)
+ stateSinkObject.Save(5, &r.closed)
+ stateSinkObject.Save(6, &r.pendingRcvdSegments)
+ stateSinkObject.Save(7, &r.lastRcvdAckTime)
+}
+
+func (r *receiver) afterLoad() {}
+
+// +checklocksignore
+func (r *receiver) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.TCPReceiverState)
+ stateSourceObject.Load(1, &r.ep)
+ stateSourceObject.Load(2, &r.rcvWnd)
+ stateSourceObject.Load(3, &r.rcvWUP)
+ stateSourceObject.Load(4, &r.prevBufUsed)
+ stateSourceObject.Load(5, &r.closed)
+ stateSourceObject.Load(6, &r.pendingRcvdSegments)
+ stateSourceObject.Load(7, &r.lastRcvdAckTime)
+}
+
+func (r *renoState) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.renoState"
+}
+
+func (r *renoState) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (r *renoState) beforeSave() {}
+
+// +checklocksignore
+func (r *renoState) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.s)
+}
+
+func (r *renoState) afterLoad() {}
+
+// +checklocksignore
+func (r *renoState) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.s)
+}
+
+func (rr *renoRecovery) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.renoRecovery"
+}
+
+func (rr *renoRecovery) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (rr *renoRecovery) beforeSave() {}
+
+// +checklocksignore
+func (rr *renoRecovery) StateSave(stateSinkObject state.Sink) {
+ rr.beforeSave()
+ stateSinkObject.Save(0, &rr.s)
+}
+
+func (rr *renoRecovery) afterLoad() {}
+
+// +checklocksignore
+func (rr *renoRecovery) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rr.s)
+}
+
+func (sr *sackRecovery) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.sackRecovery"
+}
+
+func (sr *sackRecovery) StateFields() []string {
+ return []string{
+ "s",
+ }
+}
+
+func (sr *sackRecovery) beforeSave() {}
+
+// +checklocksignore
+func (sr *sackRecovery) StateSave(stateSinkObject state.Sink) {
+ sr.beforeSave()
+ stateSinkObject.Save(0, &sr.s)
+}
+
+func (sr *sackRecovery) afterLoad() {}
+
+// +checklocksignore
+func (sr *sackRecovery) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sr.s)
+}
+
+func (s *SACKScoreboard) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.SACKScoreboard"
+}
+
+func (s *SACKScoreboard) StateFields() []string {
+ return []string{
+ "smss",
+ "maxSACKED",
+ }
+}
+
+func (s *SACKScoreboard) beforeSave() {}
+
+// +checklocksignore
+func (s *SACKScoreboard) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.smss)
+ stateSinkObject.Save(1, &s.maxSACKED)
+}
+
+func (s *SACKScoreboard) afterLoad() {}
+
+// +checklocksignore
+func (s *SACKScoreboard) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.smss)
+ stateSourceObject.Load(1, &s.maxSACKED)
+}
+
+func (s *segment) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.segment"
+}
+
+func (s *segment) StateFields() []string {
+ return []string{
+ "segmentEntry",
+ "refCnt",
+ "ep",
+ "qFlags",
+ "srcAddr",
+ "dstAddr",
+ "netProto",
+ "nicID",
+ "data",
+ "hdr",
+ "sequenceNumber",
+ "ackNumber",
+ "flags",
+ "window",
+ "csum",
+ "csumValid",
+ "parsedOptions",
+ "options",
+ "hasNewSACKInfo",
+ "rcvdTime",
+ "xmitTime",
+ "xmitCount",
+ "acked",
+ "dataMemSize",
+ "lost",
+ }
+}
+
+func (s *segment) beforeSave() {}
+
+// +checklocksignore
+func (s *segment) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ var dataValue buffer.VectorisedView = s.saveData()
+ stateSinkObject.SaveValue(8, dataValue)
+ var optionsValue []byte = s.saveOptions()
+ stateSinkObject.SaveValue(17, optionsValue)
+ stateSinkObject.Save(0, &s.segmentEntry)
+ stateSinkObject.Save(1, &s.refCnt)
+ stateSinkObject.Save(2, &s.ep)
+ stateSinkObject.Save(3, &s.qFlags)
+ stateSinkObject.Save(4, &s.srcAddr)
+ stateSinkObject.Save(5, &s.dstAddr)
+ stateSinkObject.Save(6, &s.netProto)
+ stateSinkObject.Save(7, &s.nicID)
+ stateSinkObject.Save(9, &s.hdr)
+ stateSinkObject.Save(10, &s.sequenceNumber)
+ stateSinkObject.Save(11, &s.ackNumber)
+ stateSinkObject.Save(12, &s.flags)
+ stateSinkObject.Save(13, &s.window)
+ stateSinkObject.Save(14, &s.csum)
+ stateSinkObject.Save(15, &s.csumValid)
+ stateSinkObject.Save(16, &s.parsedOptions)
+ stateSinkObject.Save(18, &s.hasNewSACKInfo)
+ stateSinkObject.Save(19, &s.rcvdTime)
+ stateSinkObject.Save(20, &s.xmitTime)
+ stateSinkObject.Save(21, &s.xmitCount)
+ stateSinkObject.Save(22, &s.acked)
+ stateSinkObject.Save(23, &s.dataMemSize)
+ stateSinkObject.Save(24, &s.lost)
+}
+
+func (s *segment) afterLoad() {}
+
+// +checklocksignore
+func (s *segment) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.segmentEntry)
+ stateSourceObject.Load(1, &s.refCnt)
+ stateSourceObject.Load(2, &s.ep)
+ stateSourceObject.Load(3, &s.qFlags)
+ stateSourceObject.Load(4, &s.srcAddr)
+ stateSourceObject.Load(5, &s.dstAddr)
+ stateSourceObject.Load(6, &s.netProto)
+ stateSourceObject.Load(7, &s.nicID)
+ stateSourceObject.Load(9, &s.hdr)
+ stateSourceObject.Load(10, &s.sequenceNumber)
+ stateSourceObject.Load(11, &s.ackNumber)
+ stateSourceObject.Load(12, &s.flags)
+ stateSourceObject.Load(13, &s.window)
+ stateSourceObject.Load(14, &s.csum)
+ stateSourceObject.Load(15, &s.csumValid)
+ stateSourceObject.Load(16, &s.parsedOptions)
+ stateSourceObject.Load(18, &s.hasNewSACKInfo)
+ stateSourceObject.Load(19, &s.rcvdTime)
+ stateSourceObject.Load(20, &s.xmitTime)
+ stateSourceObject.Load(21, &s.xmitCount)
+ stateSourceObject.Load(22, &s.acked)
+ stateSourceObject.Load(23, &s.dataMemSize)
+ stateSourceObject.Load(24, &s.lost)
+ stateSourceObject.LoadValue(8, new(buffer.VectorisedView), func(y interface{}) { s.loadData(y.(buffer.VectorisedView)) })
+ stateSourceObject.LoadValue(17, new([]byte), func(y interface{}) { s.loadOptions(y.([]byte)) })
+}
+
+func (q *segmentQueue) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.segmentQueue"
+}
+
+func (q *segmentQueue) StateFields() []string {
+ return []string{
+ "list",
+ "ep",
+ "frozen",
+ }
+}
+
+func (q *segmentQueue) beforeSave() {}
+
+// +checklocksignore
+func (q *segmentQueue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.list)
+ stateSinkObject.Save(1, &q.ep)
+ stateSinkObject.Save(2, &q.frozen)
+}
+
+func (q *segmentQueue) afterLoad() {}
+
+// +checklocksignore
+func (q *segmentQueue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &q.list)
+ stateSourceObject.Load(1, &q.ep)
+ stateSourceObject.Load(2, &q.frozen)
+}
+
+func (s *sender) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.sender"
+}
+
+func (s *sender) StateFields() []string {
+ return []string{
+ "TCPSenderState",
+ "ep",
+ "lr",
+ "firstRetransmittedSegXmitTime",
+ "writeNext",
+ "writeList",
+ "rtt",
+ "minRTO",
+ "maxRTO",
+ "maxRetries",
+ "gso",
+ "state",
+ "cc",
+ "rc",
+ }
+}
+
+func (s *sender) beforeSave() {}
+
+// +checklocksignore
+func (s *sender) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.TCPSenderState)
+ stateSinkObject.Save(1, &s.ep)
+ stateSinkObject.Save(2, &s.lr)
+ stateSinkObject.Save(3, &s.firstRetransmittedSegXmitTime)
+ stateSinkObject.Save(4, &s.writeNext)
+ stateSinkObject.Save(5, &s.writeList)
+ stateSinkObject.Save(6, &s.rtt)
+ stateSinkObject.Save(7, &s.minRTO)
+ stateSinkObject.Save(8, &s.maxRTO)
+ stateSinkObject.Save(9, &s.maxRetries)
+ stateSinkObject.Save(10, &s.gso)
+ stateSinkObject.Save(11, &s.state)
+ stateSinkObject.Save(12, &s.cc)
+ stateSinkObject.Save(13, &s.rc)
+}
+
+func (s *sender) afterLoad() {}
+
+// +checklocksignore
+func (s *sender) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.TCPSenderState)
+ stateSourceObject.Load(1, &s.ep)
+ stateSourceObject.Load(2, &s.lr)
+ stateSourceObject.Load(3, &s.firstRetransmittedSegXmitTime)
+ stateSourceObject.Load(4, &s.writeNext)
+ stateSourceObject.Load(5, &s.writeList)
+ stateSourceObject.Load(6, &s.rtt)
+ stateSourceObject.Load(7, &s.minRTO)
+ stateSourceObject.Load(8, &s.maxRTO)
+ stateSourceObject.Load(9, &s.maxRetries)
+ stateSourceObject.Load(10, &s.gso)
+ stateSourceObject.Load(11, &s.state)
+ stateSourceObject.Load(12, &s.cc)
+ stateSourceObject.Load(13, &s.rc)
+}
+
+func (r *rtt) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.rtt"
+}
+
+func (r *rtt) StateFields() []string {
+ return []string{
+ "TCPRTTState",
+ }
+}
+
+func (r *rtt) beforeSave() {}
+
+// +checklocksignore
+func (r *rtt) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.TCPRTTState)
+}
+
+func (r *rtt) afterLoad() {}
+
+// +checklocksignore
+func (r *rtt) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.TCPRTTState)
+}
+
+func (l *endpointList) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.endpointList"
+}
+
+func (l *endpointList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *endpointList) beforeSave() {}
+
+// +checklocksignore
+func (l *endpointList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *endpointList) afterLoad() {}
+
+// +checklocksignore
+func (l *endpointList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *endpointEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.endpointEntry"
+}
+
+func (e *endpointEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *endpointEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *endpointEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *endpointEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *endpointEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (l *segmentList) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.segmentList"
+}
+
+func (l *segmentList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *segmentList) beforeSave() {}
+
+// +checklocksignore
+func (l *segmentList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *segmentList) afterLoad() {}
+
+// +checklocksignore
+func (l *segmentList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *segmentEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/tcp.segmentEntry"
+}
+
+func (e *segmentEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *segmentEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *segmentEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *segmentEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *segmentEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*cubicState)(nil))
+ state.Register((*SACKInfo)(nil))
+ state.Register((*sndQueueInfo)(nil))
+ state.Register((*rcvQueueInfo)(nil))
+ state.Register((*accepted)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*keepalive)(nil))
+ state.Register((*rackControl)(nil))
+ state.Register((*receiver)(nil))
+ state.Register((*renoState)(nil))
+ state.Register((*renoRecovery)(nil))
+ state.Register((*sackRecovery)(nil))
+ state.Register((*SACKScoreboard)(nil))
+ state.Register((*segment)(nil))
+ state.Register((*segmentQueue)(nil))
+ state.Register((*sender)(nil))
+ state.Register((*rtt)(nil))
+ state.Register((*endpointList)(nil))
+ state.Register((*endpointEntry)(nil))
+ state.Register((*segmentList)(nil))
+ state.Register((*segmentEntry)(nil))
+}
diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go
deleted file mode 100644
index 031f01357..000000000
--- a/pkg/tcpip/transport/tcp/tcp_test.go
+++ /dev/null
@@ -1,7966 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "math"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/rand"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- tcpiptestutil "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// endpointTester provides helper functions to test a tcpip.Endpoint.
-type endpointTester struct {
- ep tcpip.Endpoint
-}
-
-// CheckReadError issues a read to the endpoint and checking for an error.
-func (e *endpointTester) CheckReadError(t *testing.T, want tcpip.Error) {
- t.Helper()
- res, got := e.ep.Read(ioutil.Discard, tcpip.ReadOptions{})
- if got != want {
- t.Fatalf("ep.Read = %s, want %s", got, want)
- }
- if diff := cmp.Diff(tcpip.ReadResult{}, res); diff != "" {
- t.Errorf("ep.Read: unexpected non-zero result (-want +got):\n%s", diff)
- }
-}
-
-// CheckRead issues a read to the endpoint and checking for a success, returning
-// the data read.
-func (e *endpointTester) CheckRead(t *testing.T) []byte {
- t.Helper()
- var buf bytes.Buffer
- res, err := e.ep.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("ep.Read = _, %s; want _, nil", err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- }, res, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("ep.Read: unexpected result (-want +got):\n%s", diff)
- }
- return buf.Bytes()
-}
-
-// CheckReadFull reads from the endpoint for exactly count bytes.
-func (e *endpointTester) CheckReadFull(t *testing.T, count int, notifyRead <-chan struct{}, timeout time.Duration) []byte {
- t.Helper()
- var buf bytes.Buffer
- w := tcpip.LimitedWriter{
- W: &buf,
- N: int64(count),
- }
- for w.N != 0 {
- _, err := e.ep.Read(&w, tcpip.ReadOptions{})
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for receive to be notified.
- select {
- case <-notifyRead:
- case <-time.After(timeout):
- t.Fatalf("Timed out waiting for data to arrive")
- }
- continue
- } else if err != nil {
- t.Fatalf("ep.Read = _, %s; want _, nil", err)
- }
- }
- return buf.Bytes()
-}
-
-const (
- // defaultMTU is the MTU, in bytes, used throughout the tests, except
- // where another value is explicitly used. It is chosen to match the MTU
- // of loopback interfaces on linux systems.
- defaultMTU = 65535
-
- // defaultIPv4MSS is the MSS sent by the network stack in SYN/SYN-ACK for an
- // IPv4 endpoint when the MTU is set to defaultMTU in the test.
- defaultIPv4MSS = defaultMTU - header.IPv4MinimumSize - header.TCPMinimumSize
-)
-
-func TestGiveUpConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- var wq waiter.Queue
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Register for notification, then start connection attempt.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- wq.EventRegister(&waitEntry, waiter.EventHUp)
- defer wq.EventUnregister(&waitEntry)
-
- {
- err := ep.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("ep.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- // Close the connection, wait for completion.
- ep.Close()
-
- // Wait for ep to become writable.
- <-notifyCh
-
- // Call Connect again to retreive the handshake failure status
- // and stats updates.
- {
- err := ep.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrAborted{}, err); d != "" {
- t.Fatalf("ep.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- if got := c.Stack().Stats().TCP.FailedConnectionAttempts.Value(); got != 1 {
- t.Errorf("got stats.TCP.FailedConnectionAttempts.Value() = %d, want = 1", got)
- }
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
-}
-
-// Test for ICMP error handling without completing handshake.
-func TestConnectICMPError(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- var wq waiter.Queue
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- wq.EventRegister(&waitEntry, waiter.EventHUp)
- defer wq.EventUnregister(&waitEntry)
-
- {
- err := ep.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("ep.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- syn := c.GetPacket()
- checker.IPv4(t, syn, checker.TCP(checker.TCPFlags(header.TCPFlagSyn)))
-
- wep := ep.(interface {
- StopWork()
- ResumeWork()
- LastErrorLocked() tcpip.Error
- })
-
- // Stop the protocol loop, ensure that the ICMP error is processed and
- // the last ICMP error is read before the loop is resumed. This sanity
- // tests the handshake completion logic on ICMP errors.
- wep.StopWork()
-
- c.SendICMPPacket(header.ICMPv4DstUnreachable, header.ICMPv4HostUnreachable, nil, syn, defaultMTU)
-
- for {
- if err := wep.LastErrorLocked(); err != nil {
- if d := cmp.Diff(&tcpip.ErrNoRoute{}, err); d != "" {
- t.Errorf("ep.LastErrorLocked() mismatch (-want +got):\n%s", d)
- }
- break
- }
- time.Sleep(time.Millisecond)
- }
-
- wep.ResumeWork()
-
- <-notifyCh
-
- // The stack would have unregistered the endpoint because of the ICMP error.
- // Expect a RST for any subsequent packets sent to the endpoint.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: seqnum.Value(context.TestInitialSequenceNumber) + 1,
- AckNum: c.IRS + 1,
- })
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst)))
-}
-
-func TestConnectIncrementActiveConnection(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- want := stats.TCP.ActiveConnectionOpenings.Value() + 1
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- if got := stats.TCP.ActiveConnectionOpenings.Value(); got != want {
- t.Errorf("got stats.TCP.ActtiveConnectionOpenings.Value() = %d, want = %d", got, want)
- }
-}
-
-func TestConnectDoesNotIncrementFailedConnectionAttempts(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- want := stats.TCP.FailedConnectionAttempts.Value()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- if got := stats.TCP.FailedConnectionAttempts.Value(); got != want {
- t.Errorf("got stats.TCP.FailedConnectionAttempts.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).FailedConnectionAttempts.Value(); got != want {
- t.Errorf("got EP stats.FailedConnectionAttempts = %d, want = %d", got, want)
- }
-}
-
-func TestActiveFailedConnectionAttemptIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- c.EP = ep
- want := stats.TCP.FailedConnectionAttempts.Value() + 1
-
- {
- err := c.EP.Connect(tcpip.FullAddress{NIC: 2, Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrNoRoute{}, err); d != "" {
- t.Errorf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- if got := stats.TCP.FailedConnectionAttempts.Value(); got != want {
- t.Errorf("got stats.TCP.FailedConnectionAttempts.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).FailedConnectionAttempts.Value(); got != want {
- t.Errorf("got EP stats FailedConnectionAttempts = %d, want = %d", got, want)
- }
-}
-
-func TestCloseWithoutConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- c.EP.Close()
-
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-func TestTCPSegmentsSentIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- // SYN and ACK
- want := stats.TCP.SegmentsSent.Value() + 2
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- if got := stats.TCP.SegmentsSent.Value(); got != want {
- t.Errorf("got stats.TCP.SegmentsSent.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).SegmentsSent.Value(); got != want {
- t.Errorf("got EP stats SegmentsSent.Value() = %d, want = %d", got, want)
- }
-}
-
-func TestTCPResetsSentIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- stats := c.Stack().Stats()
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- want := stats.TCP.SegmentsSent.Value() + 1
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- // If the AckNum is not the increment of the last sequence number, a RST
- // segment is sent back in response.
- AckNum: c.IRS + 2,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- c.GetPacket()
-
- metricPollFn := func() error {
- if got := stats.TCP.ResetsSent.Value(); got != want {
- return fmt.Errorf("got stats.TCP.ResetsSent.Value() = %d, want = %d", got, want)
- }
- return nil
- }
- if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
- t.Error(err)
- }
-}
-
-// TestTCPResetsSentNoICMP confirms that we don't get an ICMP
-// DstUnreachable packet when we try send a packet which is not part
-// of an active session.
-func TestTCPResetsSentNoICMP(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- stats := c.Stack().Stats()
-
- // Send a SYN request for a closed port. This should elicit an RST
- // but NOT an ICMPv4 DstUnreachable packet.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- })
-
- // Receive whatever comes back.
- b := c.GetPacket()
- ipHdr := header.IPv4(b)
- if got, want := ipHdr.Protocol(), uint8(header.TCPProtocolNumber); got != want {
- t.Errorf("unexpected protocol, got = %d, want = %d", got, want)
- }
-
- // Read outgoing ICMP stats and check no ICMP DstUnreachable was recorded.
- sent := stats.ICMP.V4.PacketsSent
- if got, want := sent.DstUnreachable.Value(), uint64(0); got != want {
- t.Errorf("got ICMP DstUnreachable.Value() = %d, want = %d", got, want)
- }
-}
-
-// TestTCPResetSentForACKWhenNotUsingSynCookies checks that the stack generates
-// a RST if an ACK is received on the listening socket for which there is no
-// active handshake in progress and we are not using SYN cookies.
-func TestTCPResetSentForACKWhenNotUsingSynCookies(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPLingerTimeout to 5 seconds so that sockets are marked closed
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Lower stackwide TIME_WAIT timeout so that the reservations
- // are released instantly on Close.
- tcpTW := tcpip.TCPTimeWaitTimeoutOption(1 * time.Millisecond)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &tcpTW); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, tcpTW, tcpTW, err)
- }
-
- c.EP.Close()
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- c.GetPacket()
-
- // Since an active close was done we need to wait for a little more than
- // tcpLingerTimeout for the port reservations to be released and the
- // socket to move to a CLOSED state.
- time.Sleep(20 * time.Millisecond)
-
- // Now resend the same ACK, this ACK should generate a RST as there
- // should be no endpoint in SYN-RCVD state and we are not using
- // syn-cookies yet. The reason we send the same ACK is we need a valid
- // cookie(IRS) generated by the netstack without which the ACK will be
- // rejected.
- c.SendPacket(nil, ackHeaders)
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst)))
-}
-
-func TestTCPResetsReceivedIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- want := stats.TCP.ResetsReceived.Value() + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- rcvWnd := seqnum.Size(30000)
- c.CreateConnected(iss, rcvWnd, -1 /* epRcvBuf */)
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- SeqNum: iss.Add(1),
- AckNum: c.IRS.Add(1),
- RcvWnd: rcvWnd,
- Flags: header.TCPFlagRst,
- })
-
- if got := stats.TCP.ResetsReceived.Value(); got != want {
- t.Errorf("got stats.TCP.ResetsReceived.Value() = %d, want = %d", got, want)
- }
-}
-
-func TestTCPResetsDoNotGenerateResets(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- want := stats.TCP.ResetsReceived.Value() + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- rcvWnd := seqnum.Size(30000)
- c.CreateConnected(iss, rcvWnd, -1 /* epRcvBuf */)
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- SeqNum: iss.Add(1),
- AckNum: c.IRS.Add(1),
- RcvWnd: rcvWnd,
- Flags: header.TCPFlagRst,
- })
-
- if got := stats.TCP.ResetsReceived.Value(); got != want {
- t.Errorf("got stats.TCP.ResetsReceived.Value() = %d, want = %d", got, want)
- }
- c.CheckNoPacketTimeout("got an unexpected packet", 100*time.Millisecond)
-}
-
-func TestActiveHandshake(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-}
-
-func TestNonBlockingClose(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- ep := c.EP
- c.EP = nil
-
- // Close the endpoint and measure how long it takes.
- t0 := time.Now()
- ep.Close()
- if diff := time.Now().Sub(t0); diff > 3*time.Second {
- t.Fatalf("Took too long to close: %s", diff)
- }
-}
-
-func TestConnectResetAfterClose(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPLinger to 3 seconds so that sockets are marked closed
- // after 3 second in FIN_WAIT2 state.
- tcpLingerTimeout := 3 * time.Second
- opt := tcpip.TCPLingerTimeoutOption(tcpLingerTimeout)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- ep := c.EP
- c.EP = nil
-
- // Close the endpoint, make sure we get a FIN segment, then acknowledge
- // to complete closure of sender, but don't send our own FIN.
- ep.Close()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Wait for the ep to give up waiting for a FIN.
- time.Sleep(tcpLingerTimeout + 1*time.Second)
-
- // Now send an ACK and it should trigger a RST as the endpoint should
- // not exist anymore.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- for {
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- if tcpHdr.Flags() == header.TCPFlagAck|header.TCPFlagFin {
- // This is a retransmit of the FIN, ignore it.
- continue
- }
-
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- // RST is always generated with sndNxt which if the FIN
- // has been sent will be 1 higher than the sequence number
- // of the FIN itself.
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst),
- ),
- )
- break
- }
-}
-
-// TestCurrentConnectedIncrement tests increment of the current
-// established and connected counters.
-func TestCurrentConnectedIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPTimeWaitTimeout to 1 seconds so that sockets are marked closed
- // after 1 second in TIME_WAIT state.
- tcpTimeWaitTimeout := 1 * time.Second
- opt := tcpip.TCPTimeWaitTimeoutOption(tcpTimeWaitTimeout)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- ep := c.EP
- c.EP = nil
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 1 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 1", got)
- }
- gotConnected := c.Stack().Stats().TCP.CurrentConnected.Value()
- if gotConnected != 1 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 1", gotConnected)
- }
-
- ep.Close()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != gotConnected {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = %d", got, gotConnected)
- }
-
- // Ack and send FIN as well.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Check that the stack acks the FIN.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Wait for a little more than the TIME-WAIT duration for the socket to
- // transition to CLOSED state.
- time.Sleep(1200 * time.Millisecond)
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-// TestClosingWithEnqueuedSegments tests handling of still enqueued segments
-// when the endpoint transitions to StateClose. The in-flight segments would be
-// re-enqueued to a any listening endpoint.
-func TestClosingWithEnqueuedSegments(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- ep := c.EP
- c.EP = nil
-
- if got, want := tcp.EndpointState(ep.State()), tcp.StateEstablished; got != want {
- t.Errorf("unexpected endpoint state: want %d, got %d", want, got)
- }
-
- // Send a FIN for ESTABLISHED --> CLOSED-WAIT
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagFin | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Get the ACK for the FIN we sent.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Give the stack a few ms to transition the endpoint out of ESTABLISHED
- // state.
- time.Sleep(10 * time.Millisecond)
-
- if got, want := tcp.EndpointState(ep.State()), tcp.StateCloseWait; got != want {
- t.Errorf("unexpected endpoint state: want %d, got %d", want, got)
- }
-
- // Close the application endpoint for CLOSE_WAIT --> LAST_ACK
- ep.Close()
-
- // Get the FIN
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
-
- if got, want := tcp.EndpointState(ep.State()), tcp.StateLastAck; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // Pause the endpoint`s protocolMainLoop.
- ep.(interface{ StopWork() }).StopWork()
-
- // Enqueue last ACK followed by an ACK matching the endpoint
- //
- // Send Last ACK for LAST_ACK --> CLOSED
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(1),
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Send a packet with ACK set, this would generate RST when
- // not using SYN cookies as in this test.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss.Add(2),
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Unpause endpoint`s protocolMainLoop.
- ep.(interface{ ResumeWork() }).ResumeWork()
-
- // Wait for the protocolMainLoop to resume and update state.
- time.Sleep(10 * time.Millisecond)
-
- // Expect the endpoint to be closed.
- if got, want := tcp.EndpointState(ep.State()), tcp.StateClose; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- if got := c.Stack().Stats().TCP.EstablishedClosed.Value(); got != 1 {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedClosed = %d, want = 1", got)
- }
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
-
- // Check if the endpoint was moved to CLOSED and netstack a reset in
- // response to the ACK packet that we sent after last-ACK.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst),
- ),
- )
-}
-
-func TestSimpleReceive(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
-
- data := []byte{1, 2, 3}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Receive data.
- v := ept.CheckRead(t)
- if !bytes.Equal(data, v) {
- t.Fatalf("got data = %v, want = %v", v, data)
- }
-
- // Check that ACK is received.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-// TestUserSuppliedMSSOnConnect tests that the user supplied MSS is used when
-// creating a new active TCP socket. It should be present in the sent TCP
-// SYN segment.
-func TestUserSuppliedMSSOnConnect(t *testing.T) {
- const mtu = 5000
-
- ips := []struct {
- name string
- createEP func(*context.Context)
- connectAddr tcpip.Address
- checker func(*testing.T, *context.Context, uint16, int)
- maxMSS uint16
- }{
- {
- name: "IPv4",
- createEP: func(c *context.Context) {
- c.Create(-1)
- },
- connectAddr: context.TestAddr,
- checker: func(t *testing.T, c *context.Context, mss uint16, ws int) {
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: ws})))
- },
- maxMSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize,
- },
- {
- name: "IPv6",
- createEP: func(c *context.Context) {
- c.CreateV6Endpoint(true)
- },
- connectAddr: context.TestV6Addr,
- checker: func(t *testing.T, c *context.Context, mss uint16, ws int) {
- checker.IPv6(t, c.GetV6Packet(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: ws})))
- },
- maxMSS: mtu - header.IPv6MinimumSize - header.TCPMinimumSize,
- },
- }
-
- for _, ip := range ips {
- t.Run(ip.name, func(t *testing.T) {
- tests := []struct {
- name string
- setMSS uint16
- expMSS uint16
- }{
- {
- name: "EqualToMaxMSS",
- setMSS: ip.maxMSS,
- expMSS: ip.maxMSS,
- },
- {
- name: "LessThanMaxMSS",
- setMSS: ip.maxMSS - 1,
- expMSS: ip.maxMSS - 1,
- },
- {
- name: "GreaterThanMaxMSS",
- setMSS: ip.maxMSS + 1,
- expMSS: ip.maxMSS,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- ip.createEP(c)
-
- // Set the MSS socket option.
- if err := c.EP.SetSockOptInt(tcpip.MaxSegOption, int(test.setMSS)); err != nil {
- t.Fatalf("SetSockOptInt(MaxSegOption, %d): %s", test.setMSS, err)
- }
-
- // Get expected window size.
- rcvBufSize := c.EP.SocketOptions().GetReceiveBufferSize()
- ws := tcp.FindWndScale(seqnum.Size(rcvBufSize))
-
- connectAddr := tcpip.FullAddress{Addr: ip.connectAddr, Port: context.TestPort}
- {
- err := c.EP.Connect(connectAddr)
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("Connect(%+v) mismatch (-want +got):\n%s", connectAddr, d)
- }
- }
-
- // Receive SYN packet with our user supplied MSS.
- ip.checker(t, c, test.expMSS, ws)
- })
- }
- })
- }
-}
-
-// TestUserSuppliedMSSOnListenAccept tests that the user supplied MSS is used
-// when completing the handshake for a new TCP connection from a TCP
-// listening socket. It should be present in the sent TCP SYN-ACK segment.
-func TestUserSuppliedMSSOnListenAccept(t *testing.T) {
- const mtu = 5000
-
- ips := []struct {
- name string
- createEP func(*context.Context)
- sendPkt func(*context.Context, *context.Headers)
- checker func(*testing.T, *context.Context, uint16, uint16)
- maxMSS uint16
- }{
- {
- name: "IPv4",
- createEP: func(c *context.Context) {
- c.Create(-1)
- },
- sendPkt: func(c *context.Context, h *context.Headers) {
- c.SendPacket(nil, h)
- },
- checker: func(t *testing.T, c *context.Context, srcPort, mss uint16) {
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(srcPort),
- checker.TCPFlags(header.TCPFlagSyn|header.TCPFlagAck),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: -1})))
- },
- maxMSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize,
- },
- {
- name: "IPv6",
- createEP: func(c *context.Context) {
- c.CreateV6Endpoint(false)
- },
- sendPkt: func(c *context.Context, h *context.Headers) {
- c.SendV6Packet(nil, h)
- },
- checker: func(t *testing.T, c *context.Context, srcPort, mss uint16) {
- checker.IPv6(t, c.GetV6Packet(), checker.TCP(
- checker.DstPort(srcPort),
- checker.TCPFlags(header.TCPFlagSyn|header.TCPFlagAck),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: -1})))
- },
- maxMSS: mtu - header.IPv6MinimumSize - header.TCPMinimumSize,
- },
- }
-
- for _, ip := range ips {
- t.Run(ip.name, func(t *testing.T) {
- tests := []struct {
- name string
- setMSS uint16
- expMSS uint16
- }{
- {
- name: "EqualToMaxMSS",
- setMSS: ip.maxMSS,
- expMSS: ip.maxMSS,
- },
- {
- name: "LessThanMaxMSS",
- setMSS: ip.maxMSS - 1,
- expMSS: ip.maxMSS - 1,
- },
- {
- name: "GreaterThanMaxMSS",
- setMSS: ip.maxMSS + 1,
- expMSS: ip.maxMSS,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- ip.createEP(c)
-
- if err := c.EP.SetSockOptInt(tcpip.MaxSegOption, int(test.setMSS)); err != nil {
- t.Fatalf("SetSockOptInt(MaxSegOption, %d): %s", test.setMSS, err)
- }
-
- bindAddr := tcpip.FullAddress{Port: context.StackPort}
- if err := c.EP.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%+v): %s:", bindAddr, err)
- }
-
- backlog := 5
- // Keep the number of client requests twice to the backlog
- // such that half of the connections do not use syncookies
- // and the other half does.
- clientConnects := backlog * 2
-
- if err := c.EP.Listen(backlog); err != nil {
- t.Fatalf("Listen(%d): %s:", backlog, err)
- }
-
- for i := 0; i < clientConnects; i++ {
- // Send a SYN requests.
- iss := seqnum.Value(i)
- srcPort := context.TestPort + uint16(i)
- ip.sendPkt(c, &context.Headers{
- SrcPort: srcPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- })
-
- // Receive the SYN-ACK reply.
- ip.checker(t, c, srcPort, test.expMSS)
- }
- })
- }
- })
- }
-}
-func TestSendRstOnListenerRxSynAckV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: 100,
- AckNum: 200,
- })
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst),
- checker.TCPSeqNum(200)))
-}
-
-func TestSendRstOnListenerRxSynAckV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: 100,
- AckNum: 200,
- })
-
- checker.IPv6(t, c.GetV6Packet(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst),
- checker.TCPSeqNum(200)))
-}
-
-// TestTCPAckBeforeAcceptV4 tests that once the 3-way handshake is complete,
-// peers can send data and expect a response within a reasonable ammount of time
-// without calling Accept on the listening endpoint first.
-//
-// This test uses IPv4.
-func TestTCPAckBeforeAcceptV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- irs, iss := executeHandshake(t, c, context.TestPort, false /* synCookiesInUse */)
-
- // Send data before accepting the connection.
- c.SendPacket([]byte{1, 2, 3, 4}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- })
-
- // Receive ACK for the data we sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-}
-
-// TestTCPAckBeforeAcceptV6 tests that once the 3-way handshake is complete,
-// peers can send data and expect a response within a reasonable ammount of time
-// without calling Accept on the listening endpoint first.
-//
-// This test uses IPv6.
-func TestTCPAckBeforeAcceptV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- irs, iss := executeV6Handshake(t, c, context.TestPort, false /* synCookiesInUse */)
-
- // Send data before accepting the connection.
- c.SendV6Packet([]byte{1, 2, 3, 4}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- })
-
- // Receive ACK for the data we sent.
- checker.IPv6(t, c.GetV6Packet(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-}
-
-func TestSendRstOnListenerRxAckV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1 /* epRcvBuf */)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10 /* backlog */); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagFin | header.TCPFlagAck,
- SeqNum: 100,
- AckNum: 200,
- })
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst),
- checker.TCPSeqNum(200)))
-}
-
-func TestSendRstOnListenerRxAckV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true /* v6Only */)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10 /* backlog */); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagFin | header.TCPFlagAck,
- SeqNum: 100,
- AckNum: 200,
- })
-
- checker.IPv6(t, c.GetV6Packet(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst),
- checker.TCPSeqNum(200)))
-}
-
-// TestListenShutdown tests for the listening endpoint replying with RST
-// on read shutdown.
-func TestListenShutdown(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1 /* epRcvBuf */)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(1 /* backlog */); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- if err := c.EP.Shutdown(tcpip.ShutdownRead); err != nil {
- t.Fatal("Shutdown failed:", err)
- }
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: 100,
- AckNum: 200,
- })
-
- // Expect the listening endpoint to reset the connection.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),
- ))
-}
-
-var _ waiter.EntryCallback = (callback)(nil)
-
-type callback func(*waiter.Entry, waiter.EventMask)
-
-func (cb callback) Callback(entry *waiter.Entry, mask waiter.EventMask) {
- cb(entry, mask)
-}
-
-func TestListenerReadinessOnEvent(t *testing.T) {
- s := stack.New(stack.Options{
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- })
- {
- ep := loopback.New()
- if testing.Verbose() {
- ep = sniffer.New(ep)
- }
- const id = 1
- if err := s.CreateNIC(id, ep); err != nil {
- t.Fatalf("CreateNIC(%d, %T): %s", id, ep, err)
- }
- if err := s.AddAddress(id, ipv4.ProtocolNumber, context.StackAddr); err != nil {
- t.Fatalf("AddAddress(%d, ipv4.ProtocolNumber, %s): %s", id, context.StackAddr, err)
- }
- s.SetRouteTable([]tcpip.Route{
- {Destination: header.IPv4EmptySubnet, NIC: id},
- })
- }
-
- var wq waiter.Queue
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, _): %s", err)
- }
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{Addr: context.StackAddr}); err != nil {
- t.Fatalf("Bind(%s): %s", context.StackAddr, err)
- }
- const backlog = 1
- if err := ep.Listen(backlog); err != nil {
- t.Fatalf("Listen(%d): %s", backlog, err)
- }
-
- address, err := ep.GetLocalAddress()
- if err != nil {
- t.Fatalf("GetLocalAddress(): %s", err)
- }
-
- conn, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, _): %s", err)
- }
- defer conn.Close()
-
- events := make(chan waiter.EventMask)
- // Scope `entry` to allow a binding of the same name below.
- {
- entry := waiter.Entry{Callback: callback(func(_ *waiter.Entry, mask waiter.EventMask) {
- events <- ep.Readiness(mask)
- })}
- wq.EventRegister(&entry, waiter.EventIn)
- defer wq.EventUnregister(&entry)
- }
-
- entry, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&entry, waiter.EventOut)
- defer wq.EventUnregister(&entry)
-
- switch err := conn.Connect(address).(type) {
- case *tcpip.ErrConnectStarted:
- default:
- t.Fatalf("Connect(%#v): %v", address, err)
- }
-
- // Read at least one event.
- got := <-events
- for {
- select {
- case event := <-events:
- got |= event
- continue
- case <-ch:
- if want := waiter.ReadableEvents; got != want {
- t.Errorf("observed events = %b, want %b", got, want)
- }
- }
- break
- }
-}
-
-// TestListenCloseWhileConnect tests for the listening endpoint to
-// drain the accept-queue when closed. This should reset all of the
-// pending connections that are waiting to be accepted.
-func TestListenCloseWhileConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1 /* epRcvBuf */)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(1 /* backlog */); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&waitEntry)
-
- executeHandshake(t, c, context.TestPort, false /* synCookiesInUse */)
- // Wait for the new endpoint created because of handshake to be delivered
- // to the listening endpoint's accept queue.
- <-notifyCh
-
- // Close the listening endpoint.
- c.EP.Close()
-
- // Expect the listening endpoint to reset the connection.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),
- ))
-}
-
-func TestTOSV4(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- c.EP = ep
-
- const tos = 0xC0
- if err := c.EP.SetSockOptInt(tcpip.IPv4TOSOption, tos); err != nil {
- t.Errorf("SetSockOptInt(IPv4TOSOption, %d) failed: %s", tos, err)
- }
-
- v, err := c.EP.GetSockOptInt(tcpip.IPv4TOSOption)
- if err != nil {
- t.Errorf("GetSockoptInt(IPv4TOSOption) failed: %s", err)
- }
-
- if v != tos {
- t.Errorf("got GetSockOptInt(IPv4TOSOption) = %d, want = %d", v, tos)
- }
-
- testV4Connect(t, c, checker.TOS(tos, 0))
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)), // Acknum is initial sequence number + 1
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- checker.TOS(tos, 0),
- )
-
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(data, p) {
- t.Errorf("got data = %x, want = %x", p, data)
- }
-}
-
-func TestTrafficClassV6(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(false)
-
- const tos = 0xC0
- if err := c.EP.SetSockOptInt(tcpip.IPv6TrafficClassOption, tos); err != nil {
- t.Errorf("SetSockOpInt(IPv6TrafficClassOption, %d) failed: %s", tos, err)
- }
-
- v, err := c.EP.GetSockOptInt(tcpip.IPv6TrafficClassOption)
- if err != nil {
- t.Fatalf("GetSockoptInt(IPv6TrafficClassOption) failed: %s", err)
- }
-
- if v != tos {
- t.Errorf("got GetSockOptInt(IPv6TrafficClassOption) = %d, want = %d", v, tos)
- }
-
- // Test the connection request.
- testV6Connect(t, c, checker.TOS(tos, 0))
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received.
- b := c.GetV6Packet()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv6(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- checker.TOS(tos, 0),
- )
-
- if p := b[header.IPv6MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(data, p) {
- t.Errorf("got data = %x, want = %x", p, data)
- }
-}
-
-func TestConnectBindToDevice(t *testing.T) {
- for _, test := range []struct {
- name string
- device tcpip.NICID
- want tcp.EndpointState
- }{
- {"RightDevice", 1, tcp.StateEstablished},
- {"WrongDevice", 2, tcp.StateSynSent},
- {"AnyDevice", 0, tcp.StateEstablished},
- } {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
- if err := c.EP.SocketOptions().SetBindToDevice(int32(test.device)); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%d)): %s", test.device, test.device, err)
- }
- // Start connection attempt.
- waitEntry, _ := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&waitEntry)
-
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- ),
- )
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want {
- t.Fatalf("unexpected endpoint state: want %s, got %s", want, got)
- }
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- rcvWnd := seqnum.Size(30000)
- c.SendPacket(nil, &context.Headers{
- SrcPort: tcpHdr.DestinationPort(),
- DstPort: tcpHdr.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: rcvWnd,
- TCPOpts: nil,
- })
-
- c.GetPacket()
- if got, want := tcp.EndpointState(c.EP.State()), test.want; got != want {
- t.Fatalf("unexpected endpoint state: want %s, got %s", want, got)
- }
- })
- }
-}
-
-func TestSynSent(t *testing.T) {
- for _, test := range []struct {
- name string
- reset bool
- }{
- {"RstOnSynSent", true},
- {"CloseOnSynSent", false},
- } {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create an endpoint, don't handshake because we want to interfere with the
- // handshake process.
- c.Create(-1)
-
- // Start connection attempt.
- waitEntry, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.EventHUp)
- defer c.WQ.EventUnregister(&waitEntry)
-
- addr := tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort}
- err := c.EP.Connect(addr)
- if d := cmp.Diff(err, &tcpip.ErrConnectStarted{}); d != "" {
- t.Fatalf("Connect(...) mismatch (-want +got):\n%s", d)
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- ),
- )
-
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want {
- t.Fatalf("got State() = %s, want %s", got, want)
- }
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- if test.reset {
- // Send a packet with a proper ACK and a RST flag to cause the socket
- // to error and close out.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- rcvWnd := seqnum.Size(30000)
- c.SendPacket(nil, &context.Headers{
- SrcPort: tcpHdr.DestinationPort(),
- DstPort: tcpHdr.SourcePort(),
- Flags: header.TCPFlagRst | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: rcvWnd,
- TCPOpts: nil,
- })
- } else {
- c.EP.Close()
- }
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(3 * time.Second):
- t.Fatal("timed out waiting for packet to arrive")
- }
-
- ept := endpointTester{c.EP}
- if test.reset {
- ept.CheckReadError(t, &tcpip.ErrConnectionRefused{})
- } else {
- ept.CheckReadError(t, &tcpip.ErrAborted{})
- }
-
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-
- // Due to the RST the endpoint should be in an error state.
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateError; got != want {
- t.Fatalf("got State() = %s, want %s", got, want)
- }
- })
- }
-}
-
-func TestOutOfOrderReceive(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Send second half of data first, with seqnum 3 ahead of expected.
- data := []byte{1, 2, 3, 4, 5, 6}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data[3:], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(3),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Check that we get an ACK specifying which seqnum is expected.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Wait 200ms and check that no data has been received.
- time.Sleep(200 * time.Millisecond)
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Send the first 3 bytes now.
- c.SendPacket(data[:3], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Receive data.
- read := ept.CheckReadFull(t, 6, ch, 5*time.Second)
-
- // Check that we received the data in proper order.
- if !bytes.Equal(data, read) {
- t.Fatalf("got data = %v, want = %v", read, data)
- }
-
- // Check that the whole data is acknowledged.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestOutOfOrderFlood(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- rcvBufSz := math.MaxUint16
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, rcvBufSz)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Send 100 packets before the actual one that is expected.
- data := []byte{1, 2, 3, 4, 5, 6}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i := 0; i < 100; i++ {
- c.SendPacket(data[3:], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(6),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- }
-
- // Send packet with seqnum as initial + 3. It must be discarded because the
- // out-of-order buffer was filled by the previous packets.
- c.SendPacket(data[3:], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(3),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Now send the expected packet with initial sequence number.
- c.SendPacket(data[:3], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Check that only packet with initial sequence number is acknowledged.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+3),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestRstOnCloseWithUnreadData(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- data := []byte{1, 2, 3}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(3 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Check that ACK is received, this happens regardless of the read.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Now that we know we have unread data, let's just close the connection
- // and verify that netstack sends an RST rather than a FIN.
- c.EP.Close()
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),
- // We shouldn't consume a sequence number on RST.
- checker.TCPSeqNum(uint32(c.IRS)+1),
- ))
- // The RST puts the endpoint into an error state.
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateError; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // This final ACK should be ignored because an ACK on a reset doesn't mean
- // anything.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(len(data))),
- AckNum: c.IRS.Add(seqnum.Size(2)),
- RcvWnd: 30000,
- })
-}
-
-func TestRstOnCloseWithUnreadDataFinConvertRst(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- data := []byte{1, 2, 3}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(3 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Check that ACK is received, this happens regardless of the read.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Cause a FIN to be generated.
- c.EP.Shutdown(tcpip.ShutdownWrite)
-
- // Make sure we get the FIN but DON't ACK IT.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- ))
-
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateFinWait1; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // Cause a RST to be generated by closing the read end now since we have
- // unread data.
- c.EP.Shutdown(tcpip.ShutdownRead)
-
- // Make sure we get the RST
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),
- // RST is always generated with sndNxt which if the FIN
- // has been sent will be 1 higher than the sequence
- // number of the FIN itself.
- checker.TCPSeqNum(uint32(c.IRS)+2),
- ))
- // The RST puts the endpoint into an error state.
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateError; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // The ACK to the FIN should now be rejected since the connection has been
- // closed by a RST.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(len(data))),
- AckNum: c.IRS.Add(seqnum.Size(2)),
- RcvWnd: 30000,
- })
-}
-
-func TestShutdownRead(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- if err := c.EP.Shutdown(tcpip.ShutdownRead); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- ept.CheckReadError(t, &tcpip.ErrClosedForReceive{})
- var want uint64 = 1
- if got := c.EP.Stats().(*tcp.Stats).ReadErrors.ReadClosed.Value(); got != want {
- t.Fatalf("got EP stats Stats.ReadErrors.ReadClosed got %d want %d", got, want)
- }
-}
-
-func TestFullWindowReceive(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- const rcvBufSz = 10
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, rcvBufSz)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Fill up the window w/ tcp.SegOverheadFactor*rcvBufSz as netstack multiplies
- // the provided buffer value by tcp.SegOverheadFactor to calculate the actual
- // receive buffer size.
- data := make([]byte, tcp.SegOverheadFactor*rcvBufSz)
- for i := range data {
- data[i] = byte(i % 255)
- }
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(5 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Check that data is acknowledged, and window goes to zero.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPWindow(0),
- ),
- )
-
- // Receive data and check it.
- v := ept.CheckRead(t)
- if !bytes.Equal(data, v) {
- t.Fatalf("got data = %v, want = %v", v, data)
- }
-
- var want uint64 = 1
- if got := c.EP.Stats().(*tcp.Stats).ReceiveErrors.ZeroRcvWindowState.Value(); got != want {
- t.Fatalf("got EP stats ReceiveErrors.ZeroRcvWindowState got %d want %d", got, want)
- }
-
- // Check that we get an ACK for the newly non-zero window.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPWindow(10),
- ),
- )
-}
-
-// Test the stack receive window advertisement on receiving segments smaller than
-// segment overhead. It tests for the right edge of the window to not grow when
-// the endpoint is not being read from.
-func TestSmallSegReceiveWindowAdvertisement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- opt := tcpip.TCPReceiveBufferSizeRangeOption{
- Min: 1,
- Default: tcp.DefaultReceiveBufferSize,
- Max: tcp.DefaultReceiveBufferSize << tcp.FindWndScale(seqnum.Size(tcp.DefaultReceiveBufferSize)),
- }
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v): %s", tcp.ProtocolNumber, opt, err)
- }
-
- c.AcceptWithOptions(tcp.FindWndScale(seqnum.Size(opt.Default)), header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Bump up the receive buffer size such that, when the receive window grows,
- // the scaled window exceeds maxUint16.
- c.EP.SocketOptions().SetReceiveBufferSize(int64(opt.Max)*2, true /* notify */)
-
- // Keep the payload size < segment overhead and such that it is a multiple
- // of the window scaled value. This enables the test to perform equality
- // checks on the incoming receive window.
- payloadSize := 1 << c.RcvdWindowScale
- if payloadSize >= tcp.SegSize {
- t.Fatalf("payload size of %d is not less than the segment overhead of %d", payloadSize, tcp.SegSize)
- }
- payload := generateRandomPayload(t, payloadSize)
- payloadLen := seqnum.Size(len(payload))
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
-
- // Send payload to the endpoint and return the advertised receive window
- // from the endpoint.
- getIncomingRcvWnd := func() uint32 {
- c.SendPacket(payload, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- Flags: header.TCPFlagAck,
- RcvWnd: 30000,
- })
- iss = iss.Add(payloadLen)
-
- pkt := c.GetPacket()
- return uint32(header.TCP(header.IPv4(pkt).Payload()).WindowSize()) << c.RcvdWindowScale
- }
-
- // Read the advertised receive window with the ACK for payload.
- rcvWnd := getIncomingRcvWnd()
-
- // Check if the subsequent ACK to our send has not grown the right edge of
- // the window.
- if got, want := getIncomingRcvWnd(), rcvWnd-uint32(len(payload)); got != want {
- t.Fatalf("got incomingRcvwnd %d want %d", got, want)
- }
-
- // Read the data so that the subsequent ACK from the endpoint
- // grows the right edge of the window.
- var buf bytes.Buffer
- if _, err := c.EP.Read(&buf, tcpip.ReadOptions{}); err != nil {
- t.Fatalf("c.EP.Read: %s", err)
- }
-
- // Check if we have received max uint16 as our advertised
- // scaled window now after a read above.
- maxRcv := uint32(math.MaxUint16 << c.RcvdWindowScale)
- if got, want := getIncomingRcvWnd(), maxRcv; got != want {
- t.Fatalf("got incomingRcvwnd %d want %d", got, want)
- }
-
- // Check if the subsequent ACK to our send has not grown the right edge of
- // the window.
- if got, want := getIncomingRcvWnd(), maxRcv-uint32(len(payload)); got != want {
- t.Fatalf("got incomingRcvwnd %d want %d", got, want)
- }
-}
-
-func TestNoWindowShrinking(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Start off with a certain receive buffer then cut it in half and verify that
- // the right edge of the window does not shrink.
- // NOTE: Netstack doubles the value specified here.
- rcvBufSize := 65536
- // Enable window scaling with a scale of zero from our end.
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, rcvBufSize, []byte{
- header.TCPOptionWS, 3, 0, header.TCPOptionNOP,
- })
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Send a 1 byte payload so that we can record the current receive window.
- // Send a payload of half the size of rcvBufSize.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- payload := []byte{1}
- c.SendPacket(payload, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(5 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Read the 1 byte payload we just sent.
- if got, want := payload, ept.CheckRead(t); !bytes.Equal(got, want) {
- t.Fatalf("got data: %v, want: %v", got, want)
- }
-
- // Verify that the ACK does not shrink the window.
- pkt := c.GetPacket()
- iss = iss.Add(1)
- checker.IPv4(t, pkt,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- // Stash the initial window.
- initialWnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize() << c.RcvdWindowScale
- initialLastAcceptableSeq := iss.Add(seqnum.Size(initialWnd))
- // Now shrink the receive buffer to half its original size.
- c.EP.SocketOptions().SetReceiveBufferSize(int64(rcvBufSize), true /* notify */)
-
- data := generateRandomPayload(t, rcvBufSize)
- // Send a payload of half the size of rcvBufSize.
- c.SendPacket(data[:rcvBufSize/2], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- iss = iss.Add(seqnum.Size(rcvBufSize / 2))
-
- // Verify that the ACK does not shrink the window.
- pkt = c.GetPacket()
- checker.IPv4(t, pkt,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- newWnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize() << c.RcvdWindowScale
- newLastAcceptableSeq := iss.Add(seqnum.Size(newWnd))
- if newLastAcceptableSeq.LessThan(initialLastAcceptableSeq) {
- t.Fatalf("receive window shrunk unexpectedly got: %d, want >= %d", newLastAcceptableSeq, initialLastAcceptableSeq)
- }
-
- // Send another payload of half the size of rcvBufSize. This should fill up the
- // socket receive buffer and we should see a zero window.
- c.SendPacket(data[rcvBufSize/2:], &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- iss = iss.Add(seqnum.Size(rcvBufSize / 2))
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPWindow(0),
- ),
- )
-
- // Receive data and check it.
- read := ept.CheckReadFull(t, len(data), ch, 5*time.Second)
- if !bytes.Equal(data, read) {
- t.Fatalf("got data = %v, want = %v", read, data)
- }
-
- // Check that we get an ACK for the newly non-zero window, which is the new
- // receive buffer size we set after the connection was established.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPWindow(uint16(rcvBufSize/2)>>c.RcvdWindowScale),
- ),
- )
-}
-
-func TestSimpleSend(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(data, p) {
- t.Fatalf("got data = %v, want = %v", p, data)
- }
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1 + seqnum.Size(len(data))),
- RcvWnd: 30000,
- })
-}
-
-func TestZeroWindowSend(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 0 /* rcvWnd */, -1 /* epRcvBuf */)
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check if we got a zero-window probe.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Open up the window. Data should be received now.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Check that data is received.
- b = c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(data, p) {
- t.Fatalf("got data = %v, want = %v", p, data)
- }
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1 + seqnum.Size(len(data))),
- RcvWnd: 30000,
- })
-}
-
-func TestScaledWindowConnect(t *testing.T) {
- // This test ensures that window scaling is used when the peer
- // does advertise it and connection is established with Connect().
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set the window size greater than the maximum non-scaled window.
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, 65535*3, []byte{
- header.TCPOptionWS, 3, 0, header.TCPOptionNOP,
- })
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received, and that advertised window is 0x5fff,
- // that is, that it is scaled.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPWindow(0x5fff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-}
-
-func TestNonScaledWindowConnect(t *testing.T) {
- // This test ensures that window scaling is not used when the peer
- // doesn't advertise it and connection is established with Connect().
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set the window size greater than the maximum non-scaled window.
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, 65535*3)
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received, and that advertised window is 0xffff,
- // that is, that it's not scaled.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPWindow(0xffff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-}
-
-func TestScaledWindowAccept(t *testing.T) {
- // This test ensures that window scaling is used when the peer
- // does advertise it and connection is established with Accept().
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create EP and start listening.
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
-
- // Set the window size greater than the maximum non-scaled window.
- ep.SocketOptions().SetReceiveBufferSize(65535*6, true /* notify */)
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Do 3-way handshake.
- // wndScale expected is 3 as 65535 * 3 * 2 < 65535 * 2^3 but > 65535 *2 *2
- c.PassiveConnectWithOptions(100, 3 /* wndScale */, header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received, and that advertised window is 0x5fff,
- // that is, that it is scaled.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPWindow(0x5fff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-}
-
-func TestNonScaledWindowAccept(t *testing.T) {
- // This test ensures that window scaling is not used when the peer
- // doesn't advertise it and connection is established with Accept().
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create EP and start listening.
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
-
- // Set the window size greater than the maximum non-scaled window.
- ep.SocketOptions().SetReceiveBufferSize(65535*6, true /* notify */)
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Do 3-way handshake w/ window scaling disabled. The SYN-ACK to the SYN
- // should not carry the window scaling option.
- c.PassiveConnect(100, -1, header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received, and that advertised window is 0xffff,
- // that is, that it's not scaled.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPWindow(0xffff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-}
-
-func TestZeroScaledWindowReceive(t *testing.T) {
- // This test ensures that the endpoint sends a non-zero window size
- // advertisement when the scaled window transitions from 0 to non-zero,
- // but the actual window (not scaled) hasn't gotten to zero.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set the buffer size such that a window scale of 5 will be used.
- const bufSz = 65535 * 10
- const ws = uint32(5)
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, bufSz, []byte{
- header.TCPOptionWS, 3, 0, header.TCPOptionNOP,
- })
-
- // Write chunks of 50000 bytes.
- remain := 0
- sent := 0
- data := make([]byte, 50000)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- // Keep writing till the window drops below len(data).
- for {
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(sent)),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- sent += len(data)
- pkt := c.GetPacket()
- checker.IPv4(t, pkt,
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- // Don't reduce window to zero here.
- if wnd := int(header.TCP(header.IPv4(pkt).Payload()).WindowSize()); wnd<<ws < len(data) {
- remain = wnd << ws
- break
- }
- }
-
- // Make the window non-zero, but the scaled window zero.
- for remain >= 16 {
- data = data[:remain-15]
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(sent)),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- sent += len(data)
- pkt := c.GetPacket()
- checker.IPv4(t, pkt,
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- // Since the receive buffer is split between window advertisement and
- // application data buffer the window does not always reflect the space
- // available and actual space available can be a bit more than what is
- // advertised in the window.
- wnd := int(header.TCP(header.IPv4(pkt).Payload()).WindowSize())
- if wnd == 0 {
- break
- }
- remain = wnd << ws
- }
-
- // Read at least 2MSS of data. An ack should be sent in response to that.
- // Since buffer space is now split in half between window and application
- // data we need to read more than 1 MSS(65536) of data for a non-zero window
- // update to be sent. For 1MSS worth of window to be available we need to
- // read at least 128KB. Since our segments above were 50KB each it means
- // we need to read at 3 packets.
- w := tcpip.LimitedWriter{
- W: ioutil.Discard,
- N: defaultMTU * 2,
- }
- for w.N != 0 {
- res, err := c.EP.Read(&w, tcpip.ReadOptions{})
- t.Logf("err=%v res=%#v", err, res)
- if err != nil {
- t.Fatalf("Read failed: %s", err)
- }
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPWindowGreaterThanEq(uint16(defaultMTU>>ws)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestSegmentMerging(t *testing.T) {
- tests := []struct {
- name string
- stop func(tcpip.Endpoint)
- resume func(tcpip.Endpoint)
- }{
- {
- "stop work",
- func(ep tcpip.Endpoint) {
- ep.(interface{ StopWork() }).StopWork()
- },
- func(ep tcpip.Endpoint) {
- ep.(interface{ ResumeWork() }).ResumeWork()
- },
- },
- {
- "cork",
- func(ep tcpip.Endpoint) {
- ep.SocketOptions().SetCorkOption(true)
- },
- func(ep tcpip.Endpoint) {
- ep.SocketOptions().SetCorkOption(false)
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Send tcp.InitialCwnd number of segments to fill up
- // InitialWindow but don't ACK. That should prevent
- // anymore packets from going out.
- var r bytes.Reader
- for i := 0; i < tcp.InitialCwnd; i++ {
- r.Reset([]byte{0})
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write #%d failed: %s", i+1, err)
- }
- }
-
- // Now send the segments that should get merged as the congestion
- // window is full and we won't be able to send any more packets.
- var allData []byte
- for i, data := range [][]byte{{1, 2, 3, 4}, {5, 6, 7}, {8, 9}, {10}, {11}} {
- allData = append(allData, data...)
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write #%d failed: %s", i+1, err)
- }
- }
-
- // Check that we get tcp.InitialCwnd packets.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i := 0; i < tcp.InitialCwnd; i++ {
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(header.TCPMinimumSize+1),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+uint32(i)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- }
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1 + 10), // 10 for the 10 bytes of payload.
- RcvWnd: 30000,
- })
-
- // Check that data is received.
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(len(allData)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+11),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if got := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(got, allData) {
- t.Fatalf("got data = %v, want = %v", got, allData)
- }
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(11 + seqnum.Size(len(allData))),
- RcvWnd: 30000,
- })
- })
- }
-}
-
-func TestDelay(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- c.EP.SocketOptions().SetDelayOption(true)
-
- var allData []byte
- for i, data := range [][]byte{{0}, {1, 2, 3, 4}, {5, 6, 7}, {8, 9}, {10}, {11}} {
- allData = append(allData, data...)
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write #%d failed: %s", i+1, err)
- }
- }
-
- seq := c.IRS.Add(1)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for _, want := range [][]byte{allData[:1], allData[1:]} {
- // Check that data is received.
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(len(want)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(seq)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if got := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(got, want) {
- t.Fatalf("got data = %v, want = %v", got, want)
- }
-
- seq = seq.Add(seqnum.Size(len(want)))
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seq,
- RcvWnd: 30000,
- })
- }
-}
-
-func TestUndelay(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- c.EP.SocketOptions().SetDelayOption(true)
-
- allData := [][]byte{{0}, {1, 2, 3}}
- for i, data := range allData {
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write #%d failed: %s", i+1, err)
- }
- }
-
- seq := c.IRS.Add(1)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- // Check that data is received.
- first := c.GetPacket()
- checker.IPv4(t, first,
- checker.PayloadLen(len(allData[0])+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(seq)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if got, want := first[header.IPv4MinimumSize+header.TCPMinimumSize:], allData[0]; !bytes.Equal(got, want) {
- t.Fatalf("got first packet's data = %v, want = %v", got, want)
- }
-
- seq = seq.Add(seqnum.Size(len(allData[0])))
-
- // Check that we don't get the second packet yet.
- c.CheckNoPacketTimeout("delayed second packet transmitted", 100*time.Millisecond)
-
- c.EP.SocketOptions().SetDelayOption(false)
-
- // Check that data is received.
- second := c.GetPacket()
- checker.IPv4(t, second,
- checker.PayloadLen(len(allData[1])+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(seq)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if got, want := second[header.IPv4MinimumSize+header.TCPMinimumSize:], allData[1]; !bytes.Equal(got, want) {
- t.Fatalf("got second packet's data = %v, want = %v", got, want)
- }
-
- seq = seq.Add(seqnum.Size(len(allData[1])))
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seq,
- RcvWnd: 30000,
- })
-}
-
-func TestMSSNotDelayed(t *testing.T) {
- tests := []struct {
- name string
- fn func(tcpip.Endpoint)
- }{
- {"no-op", func(tcpip.Endpoint) {}},
- {"delay", func(ep tcpip.Endpoint) { ep.SocketOptions().SetDelayOption(true) }},
- {"cork", func(ep tcpip.Endpoint) { ep.SocketOptions().SetCorkOption(true) }},
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- const maxPayload = 100
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */, []byte{
- header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256),
- })
-
- test.fn(c.EP)
-
- allData := [][]byte{{0}, make([]byte, maxPayload), make([]byte, maxPayload)}
- for i, data := range allData {
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write #%d failed: %s", i+1, err)
- }
- }
-
- seq := c.IRS.Add(1)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i, data := range allData {
- // Check that data is received.
- packet := c.GetPacket()
- checker.IPv4(t, packet,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(seq)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if got, want := packet[header.IPv4MinimumSize+header.TCPMinimumSize:], data; !bytes.Equal(got, want) {
- t.Fatalf("got packet #%d's data = %v, want = %v", i+1, got, want)
- }
-
- seq = seq.Add(seqnum.Size(len(data)))
- }
-
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seq,
- RcvWnd: 30000,
- })
- })
- }
-}
-
-func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) {
- payloadMultiplier := 10
- dataLen := payloadMultiplier * maxPayload
- data := make([]byte, dataLen)
- for i := range data {
- data[i] = byte(i)
- }
-
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received in chunks.
- bytesReceived := 0
- numPackets := 0
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for bytesReceived != dataLen {
- b := c.GetPacket()
- numPackets++
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- payloadLen := len(tcpHdr.Payload())
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1+uint32(bytesReceived)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- pdata := data[bytesReceived : bytesReceived+payloadLen]
- if p := tcpHdr.Payload(); !bytes.Equal(pdata, p) {
- t.Fatalf("got data = %v, want = %v", p, pdata)
- }
- bytesReceived += payloadLen
- var options []byte
- if c.TimeStampEnabled {
- // If timestamp option is enabled, echo back the timestamp and increment
- // the TSEcr value included in the packet and send that back as the TSVal.
- parsedOpts := tcpHdr.ParsedOptions()
- tsOpt := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP}
- header.EncodeTSOption(parsedOpts.TSEcr+1, parsedOpts.TSVal, tsOpt[2:])
- options = tsOpt[:]
- }
- // Acknowledge the data.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1 + seqnum.Size(bytesReceived)),
- RcvWnd: 30000,
- TCPOpts: options,
- })
- }
- if numPackets == 1 {
- t.Fatalf("expected write to be broken up into multiple packets, but got 1 packet")
- }
-}
-
-func TestSendGreaterThanMTU(t *testing.T) {
- const maxPayload = 100
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestSetTTL(t *testing.T) {
- for _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {
- t.Run(fmt.Sprintf("TTL:%d", wantTTL), func(t *testing.T) {
- c := context.New(t, 65535)
- defer c.Cleanup()
-
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- if err := c.EP.SetSockOptInt(tcpip.TTLOption, int(wantTTL)); err != nil {
- t.Fatalf("SetSockOptInt(TTLOption, %d) failed: %s", wantTTL, err)
- }
-
- {
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
-
- checker.IPv4(t, b, checker.TTL(wantTTL))
- })
- }
-}
-
-func TestActiveSendMSSLessThanMTU(t *testing.T) {
- const maxPayload = 100
- c := context.New(t, 65535)
- defer c.Cleanup()
-
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */, []byte{
- header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256),
- })
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestPassiveSendMSSLessThanMTU(t *testing.T) {
- const maxPayload = 100
- const mtu = 1200
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- // Create EP and start listening.
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
-
- // Set the buffer size to a deterministic size so that we can check the
- // window scaling option.
- const rcvBufferSize = 0x20000
- ep.SocketOptions().SetReceiveBufferSize(rcvBufferSize*2, true /* notify */)
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Do 3-way handshake.
- c.PassiveConnect(maxPayload, -1, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize})
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Check that data gets properly segmented.
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestSynCookiePassiveSendMSSLessThanMTU(t *testing.T) {
- const maxPayload = 536
- const mtu = 2000
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- // Create EP and start listening.
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Do 3-way handshake.
- c.PassiveConnect(maxPayload, -1, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize})
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Check that data gets properly segmented.
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestForwarderSendMSSLessThanMTU(t *testing.T) {
- const maxPayload = 100
- const mtu = 1200
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- s := c.Stack()
- ch := make(chan tcpip.Error, 1)
- f := tcp.NewForwarder(s, 65536, 10, func(r *tcp.ForwarderRequest) {
- var err tcpip.Error
- c.EP, err = r.CreateEndpoint(&c.WQ)
- ch <- err
- })
- s.SetTransportProtocolHandler(tcp.ProtocolNumber, f.HandlePacket)
-
- // Do 3-way handshake.
- c.PassiveConnect(maxPayload, -1, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize})
-
- // Wait for connection to be available.
- select {
- case err := <-ch:
- if err != nil {
- t.Fatalf("Error creating endpoint: %s", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatalf("Timed out waiting for connection")
- }
-
- // Check that data gets properly segmented.
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestSynOptionsOnActiveConnect(t *testing.T) {
- const mtu = 1400
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Set the buffer size to a deterministic size so that we can check the
- // window scaling option.
- const rcvBufferSize = 0x20000
- const wndScale = 3
- c.EP.SocketOptions().SetReceiveBufferSize(rcvBufferSize*2, true /* notify */)
-
- // Start connection attempt.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&we)
-
- {
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
- mss := uint16(mtu - header.IPv4MinimumSize - header.TCPMinimumSize)
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: wndScale}),
- ),
- )
-
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- // Wait for retransmit.
- time.Sleep(1 * time.Second)
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- checker.SrcPort(tcpHdr.SourcePort()),
- checker.TCPSeqNum(tcpHdr.SequenceNumber()),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: wndScale}),
- ),
- )
-
- // Send SYN-ACK.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: tcpHdr.DestinationPort(),
- DstPort: tcpHdr.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- // Receive ACK packet.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- ),
- )
-
- // Wait for connection to be established.
- select {
- case <-ch:
- if err := c.EP.LastError(); err != nil {
- t.Fatalf("Connect failed: %s", err)
- }
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for connection")
- }
-}
-
-func TestCloseListener(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create listener.
- var wq waiter.Queue
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- if err := ep.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Close the listener and measure how long it takes.
- t0 := time.Now()
- ep.Close()
- if diff := time.Now().Sub(t0); diff > 3*time.Second {
- t.Fatalf("Took too long to close: %s", diff)
- }
-}
-
-func TestReceiveOnResetConnection(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- // Send RST segment.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagRst,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Try to read.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
-loop:
- for {
- switch _, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{}); err.(type) {
- case *tcpip.ErrWouldBlock:
- <-ch
- // Expect the state to be StateError and subsequent Reads to fail with HardError.
- _, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{})
- if d := cmp.Diff(&tcpip.ErrConnectionReset{}, err); d != "" {
- t.Fatalf("c.EP.Read() mismatch (-want +got):\n%s", d)
- }
- break loop
- case *tcpip.ErrConnectionReset:
- break loop
- default:
- t.Fatalf("got c.EP.Read(nil) = %v, want = %s", err, &tcpip.ErrConnectionReset{})
- }
- }
-
- if tcp.EndpointState(c.EP.State()) != tcp.StateError {
- t.Fatalf("got EP state is not StateError")
- }
-
- checkValid := func() []error {
- var errors []error
- if got := c.Stack().Stats().TCP.EstablishedResets.Value(); got != 1 {
- errors = append(errors, fmt.Errorf("got stats.TCP.EstablishedResets.Value() = %d, want = 1", got))
- }
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- errors = append(errors, fmt.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got))
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- errors = append(errors, fmt.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got))
- }
- return errors
- }
-
- start := time.Now()
- for time.Since(start) < time.Minute && len(checkValid()) > 0 {
- time.Sleep(50 * time.Millisecond)
- }
- for _, err := range checkValid() {
- t.Error(err)
- }
-}
-
-func TestSendOnResetConnection(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Send RST segment.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagRst,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Wait for the RST to be received.
- time.Sleep(1 * time.Second)
-
- // Try to write.
- var r bytes.Reader
- r.Reset(make([]byte, 10))
- _, err := c.EP.Write(&r, tcpip.WriteOptions{})
- if d := cmp.Diff(&tcpip.ErrConnectionReset{}, err); d != "" {
- t.Fatalf("c.EP.Write(...) mismatch (-want +got):\n%s", d)
- }
-}
-
-// TestMaxRetransmitsTimeout tests if the connection is timed out after
-// a segment has been retransmitted MaxRetries times.
-func TestMaxRetransmitsTimeout(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- const numRetries = 2
- opt := tcpip.TCPMaxRetriesOption(numRetries)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000 /* rcvWnd */, -1 /* epRcvBuf */)
-
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.EventHUp)
- defer c.WQ.EventUnregister(&waitEntry)
-
- var r bytes.Reader
- r.Reset(make([]byte, 1))
- _, err := c.EP.Write(&r, tcpip.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Expect first transmit and MaxRetries retransmits.
- for i := 0; i < numRetries+1; i++ {
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagPsh),
- ),
- )
- }
- // Wait for the connection to timeout after MaxRetries retransmits.
- initRTO := 1 * time.Second
- select {
- case <-notifyCh:
- case <-time.After((2 << numRetries) * initRTO):
- t.Fatalf("connection still alive after maximum retransmits.\n")
- }
-
- // Send an ACK and expect a RST as the connection would have been closed.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst),
- ),
- )
-
- if got := c.Stack().Stats().TCP.EstablishedTimedout.Value(); got != 1 {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedTimedout.Value() = %d, want = 1", got)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-// TestMaxRTO tests if the retransmit interval caps to MaxRTO.
-func TestMaxRTO(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- rto := 1 * time.Second
- opt := tcpip.TCPMaxRTOOption(rto)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000 /* rcvWnd */, -1 /* epRcvBuf */)
-
- var r bytes.Reader
- r.Reset(make([]byte, 1))
- _, err := c.EP.Write(&r, tcpip.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- const numRetransmits = 2
- for i := 0; i < numRetransmits; i++ {
- start := time.Now()
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- if time.Since(start).Round(time.Second).Seconds() != rto.Seconds() {
- t.Errorf("Retransmit interval not capped to MaxRTO.\n")
- }
- }
-}
-
-// TestZeroSizedWriteRetransmit tests that a zero sized write should not
-// result in a panic on an RTO as no segment should have been queued for
-// a zero sized write.
-func TestZeroSizedWriteRetransmit(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000 /* rcvWnd */, -1 /* epRcvBuf */)
-
- var r bytes.Reader
- _, err := c.EP.Write(&r, tcpip.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- // Now do a non-zero sized write to trigger actual sending of data.
- r.Reset(make([]byte, 1))
- _, err = c.EP.Write(&r, tcpip.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- // Do not ACK the packet and expect an original transmit and a
- // retransmit. This should not cause a panic.
- for i := 0; i < 2; i++ {
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- }
-}
-
-// TestRetransmitIPv4IDUniqueness tests that the IPv4 Identification field is
-// unique on retransmits.
-func TestRetransmitIPv4IDUniqueness(t *testing.T) {
- for _, tc := range []struct {
- name string
- size int
- }{
- {"1Byte", 1},
- {"512Bytes", 512},
- } {
- t.Run(tc.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000 /* rcvWnd */, -1 /* epRcvBuf */)
-
- // Disabling PMTU discovery causes all packets sent from this socket to
- // have DF=0. This needs to be done because the IPv4 ID uniqueness
- // applies only to non-atomic IPv4 datagrams as defined in RFC 6864
- // Section 4, and datagrams with DF=0 are non-atomic.
- if err := c.EP.SetSockOptInt(tcpip.MTUDiscoverOption, tcpip.PMTUDiscoveryDont); err != nil {
- t.Fatalf("disabling PMTU discovery via sockopt to force DF=0 failed: %s", err)
- }
-
- var r bytes.Reader
- r.Reset(make([]byte, tc.size))
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- pkt := c.GetPacket()
- checker.IPv4(t, pkt,
- checker.FragmentFlags(0),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- idSet := map[uint16]struct{}{header.IPv4(pkt).ID(): {}}
- // Expect two retransmitted packets, and that all packets received have
- // unique IPv4 ID values.
- for i := 0; i <= 2; i++ {
- pkt := c.GetPacket()
- checker.IPv4(t, pkt,
- checker.FragmentFlags(0),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- id := header.IPv4(pkt).ID()
- if _, exists := idSet[id]; exists {
- t.Fatalf("duplicate IPv4 ID=%d found in retransmitted packet", id)
- }
- idSet[id] = struct{}{}
- }
- })
- }
-}
-
-func TestFinImmediately(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Shutdown immediately, check that we get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
-
- // Ack and send FIN as well.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Check that the stack acks the FIN.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestFinRetransmit(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Shutdown immediately, check that we get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
-
- // Don't acknowledge yet. We should get a retransmit of the FIN.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
-
- // Ack and send FIN as well.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Check that the stack acks the FIN.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestFinWithNoPendingData(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Write something out, and have it acknowledged.
- view := make([]byte, 10)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- next := uint32(c.IRS) + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- // Shutdown, check that we get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- next++
-
- // Ack and send FIN as well.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- // Check that the stack acks the FIN.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestFinWithPendingDataCwndFull(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Write enough segments to fill the congestion window before ACK'ing
- // any of them.
- view := make([]byte, 10)
- var r bytes.Reader
- for i := tcp.InitialCwnd; i > 0; i-- {
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
- }
-
- next := uint32(c.IRS) + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i := tcp.InitialCwnd; i > 0; i-- {
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
- }
-
- // Shutdown the connection, check that the FIN segment isn't sent
- // because the congestion window doesn't allow it. Wait until a
- // retransmit is received.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Send the ACK that will allow the FIN to be sent as well.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- next++
-
- // Send a FIN that acknowledges everything. Get an ACK back.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestFinWithPendingData(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Write something out, and acknowledge it to get cwnd to 2.
- view := make([]byte, 10)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- next := uint32(c.IRS) + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- // Write new data, but don't acknowledge it.
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
-
- // Shutdown the connection, check that we do get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- next++
-
- // Send a FIN that acknowledges everything. Get an ACK back.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestFinWithPartialAck(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Write something out, and acknowledge it to get cwnd to 2. Also send
- // FIN from the test side.
- view := make([]byte, 10)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- next := uint32(c.IRS) + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
-
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- // Check that we get an ACK for the fin.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Write new data, but don't acknowledge it.
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- next += uint32(len(view))
-
- // Shutdown the connection, check that we do get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
- next++
-
- // Send an ACK for the data, but not for the FIN yet.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(1),
- AckNum: seqnum.Value(next - 1),
- RcvWnd: 30000,
- })
-
- // Check that we don't get a retransmit of the FIN.
- c.CheckNoPacketTimeout("FIN retransmitted when data was ack'd", 100*time.Millisecond)
-
- // Ack the FIN.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss.Add(1),
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-}
-
-func TestUpdateListenBacklog(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create listener.
- var wq waiter.Queue
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- if err := ep.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Update the backlog with another Listen() on the same endpoint.
- if err := ep.Listen(20); err != nil {
- t.Fatalf("Listen failed to update backlog: %s", err)
- }
-
- ep.Close()
-}
-
-func scaledSendWindow(t *testing.T, scale uint8) {
- // This test ensures that the endpoint is using the right scaling by
- // sending a buffer that is larger than the window size, and ensuring
- // that the endpoint doesn't send more than allowed.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- maxPayload := defaultMTU - header.IPv4MinimumSize - header.TCPMinimumSize
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 0, -1 /* epRcvBuf */, []byte{
- header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256),
- header.TCPOptionWS, 3, scale, header.TCPOptionNOP,
- })
-
- // Open up the window with a scaled value.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 1,
- })
-
- // Send some data. Check that it's capped by the window size.
- view := make([]byte, 65535)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that only data that fits in the scaled window is sent.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen((1<<scale)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Reset the connection to free resources.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagRst,
- SeqNum: iss,
- })
-}
-
-func TestScaledSendWindow(t *testing.T) {
- for scale := uint8(0); scale <= 14; scale++ {
- scaledSendWindow(t, scale)
- }
-}
-
-func TestReceivedValidSegmentCountIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- stats := c.Stack().Stats()
- want := stats.TCP.ValidSegmentsReceived.Value() + 1
-
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- if got := stats.TCP.ValidSegmentsReceived.Value(); got != want {
- t.Errorf("got stats.TCP.ValidSegmentsReceived.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).SegmentsReceived.Value(); got != want {
- t.Errorf("got EP stats Stats.SegmentsReceived = %d, want = %d", got, want)
- }
- // Ensure there were no errors during handshake. If these stats have
- // incremented, then the connection should not have been established.
- if got := c.EP.Stats().(*tcp.Stats).SendErrors.NoRoute.Value(); got != 0 {
- t.Errorf("got EP stats Stats.SendErrors.NoRoute = %d, want = %d", got, 0)
- }
-}
-
-func TestReceivedInvalidSegmentCountIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- stats := c.Stack().Stats()
- want := stats.TCP.InvalidSegmentsReceived.Value() + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- vv := c.BuildSegment(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- tcpbuf := vv.ToView()[header.IPv4MinimumSize:]
- tcpbuf[header.TCPDataOffset] = ((header.TCPMinimumSize - 1) / 4) << 4
-
- c.SendSegment(vv)
-
- if got := stats.TCP.InvalidSegmentsReceived.Value(); got != want {
- t.Errorf("got stats.TCP.InvalidSegmentsReceived.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).ReceiveErrors.MalformedPacketsReceived.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.MalformedPacketsReceived stats = %d, want = %d", got, want)
- }
-}
-
-func TestReceivedIncorrectChecksumIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
- stats := c.Stack().Stats()
- want := stats.TCP.ChecksumErrors.Value() + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- vv := c.BuildSegment([]byte{0x1, 0x2, 0x3}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- tcpbuf := vv.ToView()[header.IPv4MinimumSize:]
- // Overwrite a byte in the payload which should cause checksum
- // verification to fail.
- tcpbuf[(tcpbuf[header.TCPDataOffset]>>4)*4] = 0x4
-
- c.SendSegment(vv)
-
- if got := stats.TCP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.TCP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.EP.Stats().(*tcp.Stats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP stats Stats.ReceiveErrors.ChecksumErrors = %d, want = %d", got, want)
- }
-}
-
-func TestReceivedSegmentQueuing(t *testing.T) {
- // This test sends 200 segments containing a few bytes each to an
- // endpoint and checks that they're all received and acknowledged by
- // the endpoint, that is, that none of the segments are dropped by
- // internal queues.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- // Send 200 segments.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- data := []byte{1, 2, 3}
- for i := 0; i < 200; i++ {
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(i * len(data))),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- }
-
- // Receive ACKs for all segments.
- last := iss.Add(seqnum.Size(200 * len(data)))
- for {
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- ack := seqnum.Value(tcpHdr.AckNumber())
- if ack == last {
- break
- }
-
- if last.LessThan(ack) {
- t.Fatalf("Acknowledge (%v) beyond the expected (%v)", ack, last)
- }
- }
-}
-
-func TestReadAfterClosedState(t *testing.T) {
- // This test ensures that calling Read() or Peek() after the endpoint
- // has transitioned to closedState still works if there is pending
- // data. To transition to stateClosed without calling Close(), we must
- // shutdown the send path and the peer must send its own FIN.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPTimeWaitTimeout to 1 seconds so that sockets are marked closed
- // after 1 second in TIME_WAIT state.
- tcpTimeWaitTimeout := 1 * time.Second
- opt := tcpip.TCPTimeWaitTimeoutOption(tcpTimeWaitTimeout)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Shutdown immediately for write, check that we get a FIN.
- if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),
- ),
- )
-
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateFinWait1; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // Send some data and acknowledge the FIN.
- data := []byte{1, 2, 3}
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss,
- AckNum: c.IRS.Add(2),
- RcvWnd: 30000,
- })
-
- // Check that ACK is received.
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+2),
- checker.TCPAckNum(uint32(iss)+uint32(len(data))+1),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Give the stack the chance to transition to closed state from
- // TIME_WAIT.
- time.Sleep(tcpTimeWaitTimeout * 2)
-
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateClose; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- // Wait for receive to be notified.
- select {
- case <-ch:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Check that peek works.
- var peekBuf bytes.Buffer
- res, err := c.EP.Read(&peekBuf, tcpip.ReadOptions{Peek: true})
- if err != nil {
- t.Fatalf("Peek failed: %s", err)
- }
-
- if got, want := res.Count, len(data); got != want {
- t.Fatalf("res.Count = %d, want %d", got, want)
- }
- if !bytes.Equal(data, peekBuf.Bytes()) {
- t.Fatalf("got data = %v, want = %v", peekBuf.Bytes(), data)
- }
-
- // Receive data.
- v := ept.CheckRead(t)
- if !bytes.Equal(data, v) {
- t.Fatalf("got data = %v, want = %v", v, data)
- }
-
- // Now that we drained the queue, check that functions fail with the
- // right error code.
- ept.CheckReadError(t, &tcpip.ErrClosedForReceive{})
- var buf bytes.Buffer
- {
- _, err := c.EP.Read(&buf, tcpip.ReadOptions{Peek: true})
- if d := cmp.Diff(&tcpip.ErrClosedForReceive{}, err); d != "" {
- t.Fatalf("c.EP.Read(_, {Peek: true}) mismatch (-want +got):\n%s", d)
- }
- }
-}
-
-func TestReusePort(t *testing.T) {
- // This test ensures that ports are immediately available for reuse
- // after Close on the endpoints using them returns.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // First case, just an endpoint that was bound.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- c.EP.Close()
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- c.EP.Close()
-
- // Second case, an endpoint that was bound and is connecting..
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- {
- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("c.EP.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
- c.EP.Close()
-
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- c.EP.Close()
-
- // Third case, an endpoint that was bound and is listening.
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
- c.EP.Close()
-
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- c.EP.SocketOptions().SetReuseAddress(true)
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-}
-
-func checkRecvBufferSize(t *testing.T, ep tcpip.Endpoint, v int) {
- t.Helper()
-
- s := ep.SocketOptions().GetReceiveBufferSize()
- if int(s) != v {
- t.Fatalf("got receive buffer size = %d, want = %d", s, v)
- }
-}
-
-func checkSendBufferSize(t *testing.T, ep tcpip.Endpoint, v int) {
- t.Helper()
-
- if s := ep.SocketOptions().GetSendBufferSize(); int(s) != v {
- t.Fatalf("got send buffer size = %d, want = %d", s, v)
- }
-}
-
-func TestDefaultBufferSizes(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- })
-
- // Check the default values.
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- defer func() {
- if ep != nil {
- ep.Close()
- }
- }()
-
- checkSendBufferSize(t, ep, tcp.DefaultSendBufferSize)
- checkRecvBufferSize(t, ep, tcp.DefaultReceiveBufferSize)
-
- // Change the default send buffer size.
- {
- opt := tcpip.TCPSendBufferSizeRangeOption{
- Min: 1,
- Default: tcp.DefaultSendBufferSize * 2,
- Max: tcp.DefaultSendBufferSize * 20,
- }
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v): %s", tcp.ProtocolNumber, opt, err)
- }
- }
-
- ep.Close()
- ep, err = s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
-
- checkSendBufferSize(t, ep, tcp.DefaultSendBufferSize*2)
- checkRecvBufferSize(t, ep, tcp.DefaultReceiveBufferSize)
-
- // Change the default receive buffer size.
- {
- opt := tcpip.TCPReceiveBufferSizeRangeOption{
- Min: 1,
- Default: tcp.DefaultReceiveBufferSize * 3,
- Max: tcp.DefaultReceiveBufferSize * 30,
- }
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v): %s", tcp.ProtocolNumber, opt, err)
- }
- }
-
- ep.Close()
- ep, err = s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
-
- checkSendBufferSize(t, ep, tcp.DefaultSendBufferSize*2)
- checkRecvBufferSize(t, ep, tcp.DefaultReceiveBufferSize*3)
-}
-
-func TestBindToDeviceOption(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol}})
-
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- defer ep.Close()
-
- if err := s.CreateNIC(321, loopback.New()); err != nil {
- t.Errorf("CreateNIC failed: %s", err)
- }
-
- // nicIDPtr is used instead of taking the address of NICID literals, which is
- // a compiler error.
- nicIDPtr := func(s tcpip.NICID) *tcpip.NICID {
- return &s
- }
-
- testActions := []struct {
- name string
- setBindToDevice *tcpip.NICID
- setBindToDeviceError tcpip.Error
- getBindToDevice int32
- }{
- {"GetDefaultValue", nil, nil, 0},
- {"BindToNonExistent", nicIDPtr(999), &tcpip.ErrUnknownDevice{}, 0},
- {"BindToExistent", nicIDPtr(321), nil, 321},
- {"UnbindToDevice", nicIDPtr(0), nil, 0},
- }
- for _, testAction := range testActions {
- t.Run(testAction.name, func(t *testing.T) {
- if testAction.setBindToDevice != nil {
- bindToDevice := int32(*testAction.setBindToDevice)
- if gotErr, wantErr := ep.SocketOptions().SetBindToDevice(bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {
- t.Errorf("got SetSockOpt(&%T(%d)) = %s, want = %s", bindToDevice, bindToDevice, gotErr, wantErr)
- }
- }
- bindToDevice := ep.SocketOptions().GetBindToDevice()
- if bindToDevice != testAction.getBindToDevice {
- t.Errorf("got bindToDevice = %d, want %d", bindToDevice, testAction.getBindToDevice)
- }
- })
- }
-}
-
-func makeStack() (*stack.Stack, tcpip.Error) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{
- ipv4.NewProtocol,
- ipv6.NewProtocol,
- },
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- })
-
- id := loopback.New()
- if testing.Verbose() {
- id = sniffer.New(id)
- }
-
- if err := s.CreateNIC(1, id); err != nil {
- return nil, err
- }
-
- for _, ct := range []struct {
- number tcpip.NetworkProtocolNumber
- address tcpip.Address
- }{
- {ipv4.ProtocolNumber, context.StackAddr},
- {ipv6.ProtocolNumber, context.StackV6Addr},
- } {
- if err := s.AddAddress(1, ct.number, ct.address); err != nil {
- return nil, err
- }
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: 1,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: 1,
- },
- })
-
- return s, nil
-}
-
-func TestSelfConnect(t *testing.T) {
- // This test ensures that intentional self-connects work. In particular,
- // it checks that if an endpoint binds to say 127.0.0.1:1000 then
- // connects to 127.0.0.1:1000, then it will be connected to itself, and
- // is able to send and receive data through the same endpoint.
- s, err := makeStack()
- if err != nil {
- t.Fatal(err)
- }
-
- var wq waiter.Queue
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Register for notification, then start connection attempt.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- wq.EventRegister(&waitEntry, waiter.WritableEvents)
- defer wq.EventUnregister(&waitEntry)
-
- {
- err := ep.Connect(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort})
- if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != "" {
- t.Fatalf("ep.Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
-
- <-notifyCh
- if err := ep.LastError(); err != nil {
- t.Fatalf("Connect failed: %s", err)
- }
-
- // Write something.
- data := []byte{1, 2, 3}
- var r bytes.Reader
- r.Reset(data)
- if _, err := ep.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Read back what was written.
- wq.EventUnregister(&waitEntry)
- wq.EventRegister(&waitEntry, waiter.ReadableEvents)
- ept := endpointTester{ep}
- rd := ept.CheckReadFull(t, len(data), notifyCh, 5*time.Second)
-
- if !bytes.Equal(data, rd) {
- t.Fatalf("got data = %v, want = %v", rd, data)
- }
-}
-
-func TestConnectAvoidsBoundPorts(t *testing.T) {
- addressTypes := func(t *testing.T, network string) []string {
- switch network {
- case "ipv4":
- return []string{"v4"}
- case "ipv6":
- return []string{"v6"}
- case "dual":
- return []string{"v6", "mapped"}
- default:
- t.Fatalf("unknown network: '%s'", network)
- }
-
- panic("unreachable")
- }
-
- address := func(t *testing.T, addressType string, isAny bool) tcpip.Address {
- switch addressType {
- case "v4":
- if isAny {
- return ""
- }
- return context.StackAddr
- case "v6":
- if isAny {
- return ""
- }
- return context.StackV6Addr
- case "mapped":
- if isAny {
- return context.V4MappedWildcardAddr
- }
- return context.StackV4MappedAddr
- default:
- t.Fatalf("unknown address type: '%s'", addressType)
- }
-
- panic("unreachable")
- }
- // This test ensures that Endpoint.Connect doesn't select already-bound ports.
- networks := []string{"ipv4", "ipv6", "dual"}
- for _, exhaustedNetwork := range networks {
- t.Run(fmt.Sprintf("exhaustedNetwork=%s", exhaustedNetwork), func(t *testing.T) {
- for _, exhaustedAddressType := range addressTypes(t, exhaustedNetwork) {
- t.Run(fmt.Sprintf("exhaustedAddressType=%s", exhaustedAddressType), func(t *testing.T) {
- for _, isAny := range []bool{false, true} {
- t.Run(fmt.Sprintf("isAny=%t", isAny), func(t *testing.T) {
- for _, candidateNetwork := range networks {
- t.Run(fmt.Sprintf("candidateNetwork=%s", candidateNetwork), func(t *testing.T) {
- for _, candidateAddressType := range addressTypes(t, candidateNetwork) {
- t.Run(fmt.Sprintf("candidateAddressType=%s", candidateAddressType), func(t *testing.T) {
- s, err := makeStack()
- if err != nil {
- t.Fatal(err)
- }
-
- var wq waiter.Queue
- var eps []tcpip.Endpoint
- defer func() {
- for _, ep := range eps {
- ep.Close()
- }
- }()
- makeEP := func(network string) tcpip.Endpoint {
- var networkProtocolNumber tcpip.NetworkProtocolNumber
- switch network {
- case "ipv4":
- networkProtocolNumber = ipv4.ProtocolNumber
- case "ipv6", "dual":
- networkProtocolNumber = ipv6.ProtocolNumber
- default:
- t.Fatalf("unknown network: '%s'", network)
- }
- ep, err := s.NewEndpoint(tcp.ProtocolNumber, networkProtocolNumber, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- eps = append(eps, ep)
- switch network {
- case "ipv4":
- case "ipv6":
- ep.SocketOptions().SetV6Only(true)
- case "dual":
- ep.SocketOptions().SetV6Only(false)
- default:
- t.Fatalf("unknown network: '%s'", network)
- }
- return ep
- }
-
- var v4reserved, v6reserved bool
- switch exhaustedAddressType {
- case "v4", "mapped":
- v4reserved = true
- case "v6":
- v6reserved = true
- // Dual stack sockets bound to v6 any reserve on v4 as
- // well.
- if isAny {
- switch exhaustedNetwork {
- case "ipv6":
- case "dual":
- v4reserved = true
- default:
- t.Fatalf("unknown address type: '%s'", exhaustedNetwork)
- }
- }
- default:
- t.Fatalf("unknown address type: '%s'", exhaustedAddressType)
- }
- var collides bool
- switch candidateAddressType {
- case "v4", "mapped":
- collides = v4reserved
- case "v6":
- collides = v6reserved
- default:
- t.Fatalf("unknown address type: '%s'", candidateAddressType)
- }
-
- const (
- start = 16000
- end = 16050
- )
- if err := s.SetPortRange(start, end); err != nil {
- t.Fatalf("got s.SetPortRange(%d, %d) = %s, want = nil", start, end, err)
- }
- for i := start; i <= end; i++ {
- if makeEP(exhaustedNetwork).Bind(tcpip.FullAddress{Addr: address(t, exhaustedAddressType, isAny), Port: uint16(i)}); err != nil {
- t.Fatalf("Bind(%d) failed: %s", i, err)
- }
- }
- var want tcpip.Error = &tcpip.ErrConnectStarted{}
- if collides {
- want = &tcpip.ErrNoPortAvailable{}
- }
- if err := makeEP(candidateNetwork).Connect(tcpip.FullAddress{Addr: address(t, candidateAddressType, false), Port: 31337}); err != want {
- t.Fatalf("got ep.Connect(..) = %s, want = %s", err, want)
- }
- })
- }
- })
- }
- })
- }
- })
- }
- })
- }
-}
-
-func TestPathMTUDiscovery(t *testing.T) {
- // This test verifies the stack retransmits packets after it receives an
- // ICMP packet indicating that the path MTU has been exceeded.
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- // Create new connection with MSS of 1460.
- const maxPayload = 1500 - header.TCPMinimumSize - header.IPv4MinimumSize
- c.CreateConnectedWithRawOptions(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */, []byte{
- header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256),
- })
-
- // Send 3200 bytes of data.
- const writeSize = 3200
- data := make([]byte, writeSize)
- for i := range data {
- data[i] = byte(i)
- }
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- receivePackets := func(c *context.Context, sizes []int, which int, seqNum uint32) []byte {
- var ret []byte
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i, size := range sizes {
- p := c.GetPacket()
- if i == which {
- ret = p
- }
- checker.IPv4(t, p,
- checker.PayloadLen(size+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(seqNum),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
- seqNum += uint32(size)
- }
- return ret
- }
-
- // Receive three packets.
- sizes := []int{maxPayload, maxPayload, writeSize - 2*maxPayload}
- first := receivePackets(c, sizes, 0, uint32(c.IRS)+1)
-
- // Send "packet too big" messages back to netstack.
- const newMTU = 1200
- const newMaxPayload = newMTU - header.IPv4MinimumSize - header.TCPMinimumSize
- mtu := []byte{0, 0, newMTU / 256, newMTU % 256}
- c.SendICMPPacket(header.ICMPv4DstUnreachable, header.ICMPv4FragmentationNeeded, mtu, first, newMTU)
-
- // See retransmitted packets. None exceeding the new max.
- sizes = []int{newMaxPayload, maxPayload - newMaxPayload, newMaxPayload, maxPayload - newMaxPayload, writeSize - 2*maxPayload}
- receivePackets(c, sizes, -1, uint32(c.IRS)+1)
-}
-
-func TestTCPEndpointProbe(t *testing.T) {
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- invoked := make(chan struct{})
- c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) {
- // Validate that the endpoint ID is what we expect.
- //
- // We don't do an extensive validation of every field but a
- // basic sanity test.
- if got, want := state.ID.LocalAddress, tcpip.Address(context.StackAddr); got != want {
- t.Fatalf("got LocalAddress: %q, want: %q", got, want)
- }
- if got, want := state.ID.LocalPort, c.Port; got != want {
- t.Fatalf("got LocalPort: %d, want: %d", got, want)
- }
- if got, want := state.ID.RemoteAddress, tcpip.Address(context.TestAddr); got != want {
- t.Fatalf("got RemoteAddress: %q, want: %q", got, want)
- }
- if got, want := state.ID.RemotePort, uint16(context.TestPort); got != want {
- t.Fatalf("got RemotePort: %d, want: %d", got, want)
- }
-
- invoked <- struct{}{}
- })
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- data := []byte{1, 2, 3}
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
-
- select {
- case <-invoked:
- case <-time.After(100 * time.Millisecond):
- t.Fatalf("TCP Probe function was not called")
- }
-}
-
-func TestStackSetCongestionControl(t *testing.T) {
- testCases := []struct {
- cc tcpip.CongestionControlOption
- err tcpip.Error
- }{
- {"reno", nil},
- {"cubic", nil},
- {"blahblah", &tcpip.ErrNoSuchFile{}},
- }
-
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("SetTransportProtocolOption(.., %v)", tc.cc), func(t *testing.T) {
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- s := c.Stack()
-
- var oldCC tcpip.CongestionControlOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &oldCC); err != nil {
- t.Fatalf("s.TransportProtocolOption(%v, %v) = %s", tcp.ProtocolNumber, &oldCC, err)
- }
-
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &tc.cc); err != tc.err {
- t.Fatalf("s.SetTransportProtocolOption(%d, &%T(%s)) = %s, want = %s", tcp.ProtocolNumber, tc.cc, tc.cc, err, tc.err)
- }
-
- var cc tcpip.CongestionControlOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {
- t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &cc, err)
- }
-
- got, want := cc, oldCC
- // If SetTransportProtocolOption is expected to succeed
- // then the returned value for congestion control should
- // match the one specified in the
- // SetTransportProtocolOption call above, else it should
- // be what it was before the call to
- // SetTransportProtocolOption.
- if tc.err == nil {
- want = tc.cc
- }
- if got != want {
- t.Fatalf("got congestion control: %v, want: %v", got, want)
- }
- })
- }
-}
-
-func TestStackAvailableCongestionControl(t *testing.T) {
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- s := c.Stack()
-
- // Query permitted congestion control algorithms.
- var aCC tcpip.TCPAvailableCongestionControlOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil {
- t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &aCC, err)
- }
- if got, want := aCC, tcpip.TCPAvailableCongestionControlOption("reno cubic"); got != want {
- t.Fatalf("got tcpip.TCPAvailableCongestionControlOption: %v, want: %v", got, want)
- }
-}
-
-func TestStackSetAvailableCongestionControl(t *testing.T) {
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- s := c.Stack()
-
- // Setting AvailableCongestionControlOption should fail.
- aCC := tcpip.TCPAvailableCongestionControlOption("xyz")
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &aCC); err == nil {
- t.Fatalf("s.SetTransportProtocolOption(%d, &%T(%s)) = nil, want non-nil", tcp.ProtocolNumber, aCC, aCC)
- }
-
- // Verify that we still get the expected list of congestion control options.
- var cc tcpip.TCPAvailableCongestionControlOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {
- t.Fatalf("s.TransportProtocolOptio(%d, &%T(%s)): %s", tcp.ProtocolNumber, cc, cc, err)
- }
- if got, want := cc, tcpip.TCPAvailableCongestionControlOption("reno cubic"); got != want {
- t.Fatalf("got tcpip.TCPAvailableCongestionControlOption = %s, want = %s", got, want)
- }
-}
-
-func TestEndpointSetCongestionControl(t *testing.T) {
- testCases := []struct {
- cc tcpip.CongestionControlOption
- err tcpip.Error
- }{
- {"reno", nil},
- {"cubic", nil},
- {"blahblah", &tcpip.ErrNoSuchFile{}},
- }
-
- for _, connected := range []bool{false, true} {
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("SetSockOpt(.., %v) w/ connected = %v", tc.cc, connected), func(t *testing.T) {
- c := context.New(t, 1500)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- var oldCC tcpip.CongestionControlOption
- if err := c.EP.GetSockOpt(&oldCC); err != nil {
- t.Fatalf("c.EP.GetSockOpt(&%T) = %s", oldCC, err)
- }
-
- if connected {
- c.Connect(context.TestInitialSequenceNumber, 32768 /* rcvWnd */, nil)
- }
-
- if err := c.EP.SetSockOpt(&tc.cc); err != tc.err {
- t.Fatalf("got c.EP.SetSockOpt(&%#v) = %s, want %s", tc.cc, err, tc.err)
- }
-
- var cc tcpip.CongestionControlOption
- if err := c.EP.GetSockOpt(&cc); err != nil {
- t.Fatalf("c.EP.GetSockOpt(&%T): %s", cc, err)
- }
-
- got, want := cc, oldCC
- // If SetSockOpt is expected to succeed then the
- // returned value for congestion control should match
- // the one specified in the SetSockOpt above, else it
- // should be what it was before the call to SetSockOpt.
- if tc.err == nil {
- want = tc.cc
- }
- if got != want {
- t.Fatalf("got congestion control = %+v, want = %+v", got, want)
- }
- })
- }
- }
-}
-
-func enableCUBIC(t *testing.T, c *context.Context) {
- t.Helper()
- opt := tcpip.CongestionControlOption("cubic")
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%s)) %s", tcp.ProtocolNumber, opt, opt, err)
- }
-}
-
-func TestKeepalive(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- const keepAliveIdle = 100 * time.Millisecond
- const keepAliveInterval = 3 * time.Second
- keepAliveIdleOpt := tcpip.KeepaliveIdleOption(keepAliveIdle)
- if err := c.EP.SetSockOpt(&keepAliveIdleOpt); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", keepAliveIdleOpt, keepAliveIdle, err)
- }
- keepAliveIntervalOpt := tcpip.KeepaliveIntervalOption(keepAliveInterval)
- if err := c.EP.SetSockOpt(&keepAliveIntervalOpt); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", keepAliveIntervalOpt, keepAliveInterval, err)
- }
- c.EP.SetSockOptInt(tcpip.KeepaliveCountOption, 5)
- if err := c.EP.SetSockOptInt(tcpip.KeepaliveCountOption, 5); err != nil {
- t.Fatalf("c.EP.SetSockOptInt(tcpip.KeepaliveCountOption, 5): %s", err)
- }
- c.EP.SocketOptions().SetKeepAlive(true)
-
- // 5 unacked keepalives are sent. ACK each one, and check that the
- // connection stays alive after 5.
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for i := 0; i < 10; i++ {
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Acknowledge the keepalive.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS,
- RcvWnd: 30000,
- })
- }
-
- // Check that the connection is still alive.
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Send some data and wait before ACKing it. Keepalives should be disabled
- // during this period.
- view := make([]byte, 3)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- next := uint32(c.IRS) + 1
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Wait for the packet to be retransmitted. Verify that no keepalives
- // were sent.
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagPsh),
- ),
- )
- c.CheckNoPacket("Keepalive packet received while unACKed data is pending")
-
- next += uint32(len(view))
-
- // Send ACK. Keepalives should start sending again.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- // Now receive 5 keepalives, but don't ACK them. The connection
- // should be reset after 5.
- for i := 0; i < 5; i++ {
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next-1),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- }
-
- // Sleep for a litte over the KeepAlive interval to make sure
- // the timer has time to fire after the last ACK and close the
- // close the socket.
- time.Sleep(keepAliveInterval + keepAliveInterval/2)
-
- // The connection should be terminated after 5 unacked keepalives.
- // Send an ACK to trigger a RST from the stack as the endpoint should
- // be dead.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(checker.DstPort(context.TestPort), checker.TCPSeqNum(next), checker.TCPAckNum(uint32(0)), checker.TCPFlags(header.TCPFlagRst)),
- )
-
- if got := c.Stack().Stats().TCP.EstablishedTimedout.Value(); got != 1 {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedTimedout.Value() = %d, want = 1", got)
- }
-
- ept.CheckReadError(t, &tcpip.ErrTimeout{})
-
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-func executeHandshake(t *testing.T, c *context.Context, srcPort uint16, synCookieInUse bool) (irs, iss seqnum.Value) {
- t.Helper()
- // Send a SYN request.
- irs = seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: srcPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- iss = seqnum.Value(tcp.SequenceNumber())
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(srcPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs) + 1),
- }
-
- if synCookieInUse {
- // When cookies are in use window scaling is disabled.
- tcpCheckers = append(tcpCheckers, checker.TCPSynOptions(header.TCPSynOptions{
- WS: -1,
- MSS: c.MSSWithoutOptions(),
- }))
- }
-
- checker.IPv4(t, b, checker.TCP(tcpCheckers...))
-
- // Send ACK.
- c.SendPacket(nil, &context.Headers{
- SrcPort: srcPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
- return irs, iss
-}
-
-func executeV6Handshake(t *testing.T, c *context.Context, srcPort uint16, synCookieInUse bool) (irs, iss seqnum.Value) {
- t.Helper()
- // Send a SYN request.
- irs = seqnum.Value(context.TestInitialSequenceNumber)
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: srcPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetV6Packet()
- tcp := header.TCP(header.IPv6(b).Payload())
- iss = seqnum.Value(tcp.SequenceNumber())
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(srcPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs) + 1),
- }
-
- if synCookieInUse {
- // When cookies are in use window scaling is disabled.
- tcpCheckers = append(tcpCheckers, checker.TCPSynOptions(header.TCPSynOptions{
- WS: -1,
- MSS: c.MSSWithoutOptionsV6(),
- }))
- }
-
- checker.IPv6(t, b, checker.TCP(tcpCheckers...))
-
- // Send ACK.
- c.SendV6Packet(nil, &context.Headers{
- SrcPort: srcPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
- return irs, iss
-}
-
-// TestListenBacklogFull tests that netstack does not complete handshakes if the
-// listen backlog for the endpoint is full.
-func TestListenBacklogFull(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- // Start listening.
- listenBacklog := 10
- if err := c.EP.Listen(listenBacklog); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- lastPortOffset := uint16(0)
- for ; int(lastPortOffset) < listenBacklog; lastPortOffset++ {
- executeHandshake(t, c, context.TestPort+lastPortOffset, false /*synCookieInUse */)
- }
-
- time.Sleep(50 * time.Millisecond)
-
- // Now execute send one more SYN. The stack should not respond as the backlog
- // is full at this point.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort + lastPortOffset,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: seqnum.Value(context.TestInitialSequenceNumber),
- RcvWnd: 30000,
- })
- c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond)
-
- // Try to accept the connections in the backlog.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- for i := 0; i < listenBacklog; i++ {
- _, _, err = c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- _, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
- }
-
- // Now verify that there are no more connections that can be accepted.
- _, _, err = c.EP.Accept(nil)
- if !cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- select {
- case <-ch:
- t.Fatalf("unexpected endpoint delivered on Accept: %+v", c.EP)
- case <-time.After(1 * time.Second):
- }
- }
-
- // Now a new handshake must succeed.
- executeHandshake(t, c, context.TestPort+lastPortOffset, false /*synCookieInUse */)
-
- newEP, _, err := c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- newEP, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Now verify that the TCP socket is usable and in a connected state.
- data := "Don't panic"
- var r strings.Reader
- r.Reset(data)
- newEP.Write(&r, tcpip.WriteOptions{})
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- if string(tcp.Payload()) != data {
- t.Fatalf("unexpected data: got %s, want %s", string(tcp.Payload()), data)
- }
-}
-
-// TestListenNoAcceptMulticastBroadcastV4 makes sure that TCP segments with a
-// non unicast IPv4 address are not accepted.
-func TestListenNoAcceptNonUnicastV4(t *testing.T) {
- multicastAddr := tcpiptestutil.MustParse4("224.0.1.2")
- otherMulticastAddr := tcpiptestutil.MustParse4("224.0.1.3")
- subnet := context.StackAddrWithPrefix.Subnet()
- subnetBroadcastAddr := subnet.Broadcast()
-
- tests := []struct {
- name string
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- }{
- {
- name: "SourceUnspecified",
- srcAddr: header.IPv4Any,
- dstAddr: context.StackAddr,
- },
- {
- name: "SourceBroadcast",
- srcAddr: header.IPv4Broadcast,
- dstAddr: context.StackAddr,
- },
- {
- name: "SourceOurMulticast",
- srcAddr: multicastAddr,
- dstAddr: context.StackAddr,
- },
- {
- name: "SourceOtherMulticast",
- srcAddr: otherMulticastAddr,
- dstAddr: context.StackAddr,
- },
- {
- name: "DestUnspecified",
- srcAddr: context.TestAddr,
- dstAddr: header.IPv4Any,
- },
- {
- name: "DestBroadcast",
- srcAddr: context.TestAddr,
- dstAddr: header.IPv4Broadcast,
- },
- {
- name: "DestOurMulticast",
- srcAddr: context.TestAddr,
- dstAddr: multicastAddr,
- },
- {
- name: "DestOtherMulticast",
- srcAddr: context.TestAddr,
- dstAddr: otherMulticastAddr,
- },
- {
- name: "SrcSubnetBroadcast",
- srcAddr: subnetBroadcastAddr,
- dstAddr: context.StackAddr,
- },
- {
- name: "DestSubnetBroadcast",
- srcAddr: context.TestAddr,
- dstAddr: subnetBroadcastAddr,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
-
- if err := c.Stack().JoinGroup(header.IPv4ProtocolNumber, 1, multicastAddr); err != nil {
- t.Fatalf("JoinGroup failed: %s", err)
- }
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacketWithAddrs(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- }, test.srcAddr, test.dstAddr)
- c.CheckNoPacket("Should not have received a response")
-
- // Handle normal packet.
- c.SendPacketWithAddrs(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- }, context.TestAddr, context.StackAddr)
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1)))
- })
- }
-}
-
-// TestListenNoAcceptMulticastBroadcastV6 makes sure that TCP segments with a
-// non unicast IPv6 address are not accepted.
-func TestListenNoAcceptNonUnicastV6(t *testing.T) {
- multicastAddr := tcpiptestutil.MustParse6("ff0e::101")
- otherMulticastAddr := tcpiptestutil.MustParse6("ff0e::102")
-
- tests := []struct {
- name string
- srcAddr tcpip.Address
- dstAddr tcpip.Address
- }{
- {
- "SourceUnspecified",
- header.IPv6Any,
- context.StackV6Addr,
- },
- {
- "SourceAllNodes",
- header.IPv6AllNodesMulticastAddress,
- context.StackV6Addr,
- },
- {
- "SourceOurMulticast",
- multicastAddr,
- context.StackV6Addr,
- },
- {
- "SourceOtherMulticast",
- otherMulticastAddr,
- context.StackV6Addr,
- },
- {
- "DestUnspecified",
- context.TestV6Addr,
- header.IPv6Any,
- },
- {
- "DestAllNodes",
- context.TestV6Addr,
- header.IPv6AllNodesMulticastAddress,
- },
- {
- "DestOurMulticast",
- context.TestV6Addr,
- multicastAddr,
- },
- {
- "DestOtherMulticast",
- context.TestV6Addr,
- otherMulticastAddr,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateV6Endpoint(true)
-
- if err := c.Stack().JoinGroup(header.IPv6ProtocolNumber, 1, multicastAddr); err != nil {
- t.Fatalf("JoinGroup failed: %s", err)
- }
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendV6PacketWithAddrs(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- }, test.srcAddr, test.dstAddr)
- c.CheckNoPacket("Should not have received a response")
-
- // Handle normal packet.
- c.SendV6PacketWithAddrs(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- }, context.TestV6Addr, context.StackV6Addr)
- checker.IPv6(t, c.GetV6Packet(),
- checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1)))
- })
- }
-}
-
-func TestListenSynRcvdQueueFull(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send two SYN's the first one should get a SYN-ACK, the
- // second one should not get any response and is dropped as
- // the accept queue is full.
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- iss := seqnum.Value(tcp.SequenceNumber())
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs) + 1),
- }
- checker.IPv4(t, b, checker.TCP(tcpCheckers...))
-
- // Now complete the previous connection.
- // Send ACK.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
-
- // Verify if that is delivered to the accept queue.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
- <-ch
-
- // Now execute send one more SYN. The stack should not respond as the backlog
- // is full at this point.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort + 1,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: seqnum.Value(889),
- RcvWnd: 30000,
- })
- c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond)
-
- // Try to accept the connections in the backlog.
- newEP, _, err := c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- newEP, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Now verify that the TCP socket is usable and in a connected state.
- data := "Don't panic"
- var r strings.Reader
- r.Reset(data)
- newEP.Write(&r, tcpip.WriteOptions{})
- pkt := c.GetPacket()
- tcp = header.IPv4(pkt).Payload()
- if string(tcp.Payload()) != data {
- t.Fatalf("unexpected data: got %s, want %s", string(tcp.Payload()), data)
- }
-}
-
-func TestListenBacklogFullSynCookieInUse(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Test for SynCookies usage after filling up the backlog.
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- executeHandshake(t, c, context.TestPort, false)
-
- // Wait for this to be delivered to the accept queue.
- time.Sleep(50 * time.Millisecond)
-
- // Send a SYN request.
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- // pick a different src port for new SYN.
- SrcPort: context.TestPort + 1,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
- // The Syn should be dropped as the endpoint's backlog is full.
- c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond)
-
- // Verify that there is only one acceptable connection at this point.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- _, _, err = c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- _, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Now verify that there are no more connections that can be accepted.
- _, _, err = c.EP.Accept(nil)
- if !cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- select {
- case <-ch:
- t.Fatalf("unexpected endpoint delivered on Accept: %+v", c.EP)
- case <-time.After(1 * time.Second):
- }
- }
-}
-
-func TestSYNRetransmit(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Start listening.
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send the same SYN packet multiple times. We should still get a valid SYN-ACK
- // reply.
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- for i := 0; i < 5; i++ {
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
- }
-
- // Receive the SYN-ACK reply.
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs) + 1),
- }
- checker.IPv4(t, c.GetPacket(), checker.TCP(tcpCheckers...))
-}
-
-func TestSynRcvdBadSeqNumber(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- // Bind to wildcard.
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- // Start listening.
- if err := c.EP.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN to get a SYN-ACK. This should put the ep into SYN-RCVD state
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- iss := seqnum.Value(tcpHdr.SequenceNumber())
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs) + 1),
- }
- checker.IPv4(t, b, checker.TCP(tcpCheckers...))
-
- // Now send a packet with an out-of-window sequence number
- largeSeqnum := irs + seqnum.Value(tcpHdr.WindowSize()) + 1
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: largeSeqnum,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
-
- // Should receive an ACK with the expected SEQ number
- b = c.GetPacket()
- tcpCheckers = []checker.TransportChecker{
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPAckNum(uint32(irs) + 1),
- checker.TCPSeqNum(uint32(iss + 1)),
- }
- checker.IPv4(t, b, checker.TCP(tcpCheckers...))
-
- // Now that the socket replied appropriately with the ACK,
- // complete the connection to test that the large SEQ num
- // did not change the state from SYN-RCVD.
-
- // Get setup to be notified about connection establishment.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- // Send ACK to move to ESTABLISHED state.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- RcvWnd: 30000,
- })
-
- <-ch
- newEP, _, err := c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- // Now verify that the TCP socket is usable and in a connected state.
- data := "Don't panic"
- var r strings.Reader
- r.Reset(data)
- if _, err := newEP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- pkt := c.GetPacket()
- tcpHdr = header.IPv4(pkt).Payload()
- if string(tcpHdr.Payload()) != data {
- t.Fatalf("unexpected data: got %s, want %s", string(tcpHdr.Payload()), data)
- }
-}
-
-func TestPassiveConnectionAttemptIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- c.EP = ep
- if err := ep.Bind(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateListen; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- stats := c.Stack().Stats()
- want := stats.TCP.PassiveConnectionOpenings.Value() + 1
-
- srcPort := uint16(context.TestPort)
- executeHandshake(t, c, srcPort+1, false)
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- // Verify that there is only one acceptable connection at this point.
- _, _, err = c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- _, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- if got := stats.TCP.PassiveConnectionOpenings.Value(); got != want {
- t.Errorf("got stats.TCP.PassiveConnectionOpenings.Value() = %d, want = %d", got, want)
- }
-}
-
-func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- c.EP = ep
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if err := c.EP.Listen(1); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- srcPort := uint16(context.TestPort)
- // Now attempt a handshakes it will fill up the accept backlog.
- executeHandshake(t, c, srcPort, false)
-
- // Give time for the final ACK to be processed as otherwise the next handshake could
- // get accepted before the previous one based on goroutine scheduling.
- time.Sleep(50 * time.Millisecond)
-
- want := stats.TCP.ListenOverflowSynDrop.Value() + 1
-
- // Now we will send one more SYN and this one should get dropped
- // Send a SYN request.
- c.SendPacket(nil, &context.Headers{
- SrcPort: srcPort + 2,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: seqnum.Value(context.TestInitialSequenceNumber),
- RcvWnd: 30000,
- })
-
- checkValid := func() []error {
- var errors []error
- if got := stats.TCP.ListenOverflowSynDrop.Value(); got != want {
- errors = append(errors, fmt.Errorf("got stats.TCP.ListenOverflowSynDrop.Value() = %d, want = %d", got, want))
- }
- if got := c.EP.Stats().(*tcp.Stats).ReceiveErrors.ListenOverflowSynDrop.Value(); got != want {
- errors = append(errors, fmt.Errorf("got EP stats Stats.ReceiveErrors.ListenOverflowSynDrop = %d, want = %d", got, want))
- }
- return errors
- }
-
- start := time.Now()
- for time.Since(start) < time.Minute && len(checkValid()) > 0 {
- time.Sleep(50 * time.Millisecond)
- }
- for _, err := range checkValid() {
- t.Error(err)
- }
- if t.Failed() {
- t.FailNow()
- }
-
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- // Now check that there is one acceptable connections.
- _, _, err = c.EP.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- <-ch
- _, _, err = c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
- }
-}
-
-func TestListenDropIncrement(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- stats := c.Stack().Stats()
- c.Create(-1 /*epRcvBuf*/)
-
- if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if err := c.EP.Listen(1 /*backlog*/); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- initialDropped := stats.DroppedPackets.Value()
-
- // Send RST, FIN segments, that are expected to be dropped by the listener.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagRst,
- })
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagFin,
- })
-
- // To ensure that the RST, FIN sent earlier are indeed received and ignored
- // by the listener, send a SYN and wait for the SYN to be ACKd.
- irs := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: irs,
- })
- checker.IPv4(t, c.GetPacket(), checker.TCP(checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1),
- ))
-
- if got, want := stats.DroppedPackets.Value(), initialDropped+2; got != want {
- t.Fatalf("got stats.DroppedPackets.Value() = %d, want = %d", got, want)
- }
-}
-
-func TestEndpointBindListenAcceptState(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
-
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- ept := endpointTester{ep}
- ept.CheckReadError(t, &tcpip.ErrNotConnected{})
- if got := ep.Stats().(*tcp.Stats).ReadErrors.NotConnected.Value(); got != 1 {
- t.Errorf("got EP stats Stats.ReadErrors.NotConnected got %d want %d", got, 1)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
- if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- c.PassiveConnectWithOptions(100, 5, header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- aep, _, err := ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- aep, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
- if got, want := tcp.EndpointState(aep.State()), tcp.StateEstablished; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
- {
- err := aep.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})
- if d := cmp.Diff(&tcpip.ErrAlreadyConnected{}, err); d != "" {
- t.Errorf("Connect(...) mismatch (-want +got):\n%s", d)
- }
- }
- // Listening endpoint remains in listen state.
- if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
- ep.Close()
- // Give worker goroutines time to receive the close notification.
- time.Sleep(1 * time.Second)
- if got, want := tcp.EndpointState(ep.State()), tcp.StateClose; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
- // Accepted endpoint remains open when the listen endpoint is closed.
- if got, want := tcp.EndpointState(aep.State()), tcp.StateEstablished; got != want {
- t.Errorf("unexpected endpoint state: want %s, got %s", want, got)
- }
-
-}
-
-// This test verifies that the auto tuning does not grow the receive buffer if
-// the application is not reading the data actively.
-func TestReceiveBufferAutoTuningApplicationLimited(t *testing.T) {
- const mtu = 1500
- const mss = mtu - header.IPv4MinimumSize - header.TCPMinimumSize
-
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- stk := c.Stack()
- // Set lower limits for auto-tuning tests. This is required because the
- // test stops the worker which can cause packets to be dropped because
- // the segment queue holding unprocessed packets is limited to 500.
- const receiveBufferSize = 80 << 10 // 80KB.
- const maxReceiveBufferSize = receiveBufferSize * 10
- {
- opt := tcpip.TCPReceiveBufferSizeRangeOption{Min: 1, Default: receiveBufferSize, Max: maxReceiveBufferSize}
- if err := stk.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v): %s", tcp.ProtocolNumber, opt, err)
- }
- }
-
- // Enable auto-tuning.
- {
- opt := tcpip.TCPModerateReceiveBufferOption(true)
- if err := stk.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
- // Change the expected window scale to match the value needed for the
- // maximum buffer size defined above.
- c.WindowScale = uint8(tcp.FindWndScale(maxReceiveBufferSize))
-
- rawEP := c.CreateConnectedWithOptions(header.TCPSynOptions{TS: true, WS: 4})
-
- // NOTE: The timestamp values in the sent packets are meaningless to the
- // peer so we just increment the timestamp value by 1 every batch as we
- // are not really using them for anything. Send a single byte to verify
- // the advertised window.
- tsVal := rawEP.TSVal + 1
-
- // Introduce a 25ms latency by delaying the first byte.
- latency := 25 * time.Millisecond
- time.Sleep(latency)
- // Send an initial payload with atleast segment overhead size. The receive
- // window would not grow for smaller segments.
- rawEP.SendPacketWithTS(make([]byte, tcp.SegSize), tsVal)
-
- pkt := rawEP.VerifyAndReturnACKWithTS(tsVal)
- rcvWnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize()
-
- time.Sleep(25 * time.Millisecond)
-
- // Allocate a large enough payload for the test.
- payloadSize := receiveBufferSize * 2
- b := make([]byte, payloadSize)
-
- worker := (c.EP).(interface {
- StopWork()
- ResumeWork()
- })
- tsVal++
-
- // Stop the worker goroutine.
- worker.StopWork()
- start := 0
- end := payloadSize / 2
- packetsSent := 0
- for ; start < end; start += mss {
- packetEnd := start + mss
- if start+mss > end {
- packetEnd = end
- }
- rawEP.SendPacketWithTS(b[start:packetEnd], tsVal)
- packetsSent++
- }
-
- // Resume the worker so that it only sees the packets once all of them
- // are waiting to be read.
- worker.ResumeWork()
-
- // Since we sent almost the full receive buffer worth of data (some may have
- // been dropped due to segment overheads), we should get a zero window back.
- pkt = c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(pkt).Payload())
- gotRcvWnd := tcpHdr.WindowSize()
- wantAckNum := tcpHdr.AckNumber()
- if got, want := int(gotRcvWnd), 0; got != want {
- t.Fatalf("got rcvWnd: %d, want: %d", got, want)
- }
-
- time.Sleep(25 * time.Millisecond)
- // Verify that sending more data when receiveBuffer is exhausted.
- rawEP.SendPacketWithTS(b[start:start+mss], tsVal)
-
- // Now read all the data from the endpoint and verify that advertised
- // window increases to the full available buffer size.
- for {
- _, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{})
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- break
- }
- }
-
- // Verify that we receive a non-zero window update ACK. When running
- // under thread santizer this test can end up sending more than 1
- // ack, 1 for the non-zero window
- p := c.GetPacket()
- checker.IPv4(t, p, checker.TCP(
- checker.TCPAckNum(wantAckNum),
- func(t *testing.T, h header.Transport) {
- tcp, ok := h.(header.TCP)
- if !ok {
- return
- }
- // We use 10% here as the error margin upwards as the initial window we
- // got was afer 1 segment was already in the receive buffer queue.
- tolerance := 1.1
- if w := tcp.WindowSize(); w == 0 || w > uint16(float64(rcvWnd)*tolerance) {
- t.Errorf("expected a non-zero window: got %d, want <= %d", w, uint16(float64(rcvWnd)*tolerance))
- }
- },
- ))
-}
-
-// This test verifies that the advertised window is auto-tuned up as the
-// application is reading the data that is being received.
-func TestReceiveBufferAutoTuning(t *testing.T) {
- const mtu = 1500
- const mss = mtu - header.IPv4MinimumSize - header.TCPMinimumSize
-
- c := context.New(t, mtu)
- defer c.Cleanup()
-
- // Enable Auto-tuning.
- stk := c.Stack()
- // Disable out of window rate limiting for this test by setting it to 0 as we
- // use out of window ACKs to measure the advertised window.
- var tcpInvalidRateLimit stack.TCPInvalidRateLimitOption
- if err := stk.SetOption(tcpInvalidRateLimit); err != nil {
- t.Fatalf("e.stack.SetOption(%#v) = %s", tcpInvalidRateLimit, err)
- }
-
- const receiveBufferSize = 80 << 10 // 80KB.
- const maxReceiveBufferSize = receiveBufferSize * 10
- {
- opt := tcpip.TCPReceiveBufferSizeRangeOption{Min: 1, Default: receiveBufferSize, Max: maxReceiveBufferSize}
- if err := stk.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v): %s", tcp.ProtocolNumber, opt, err)
- }
- }
-
- // Enable auto-tuning.
- {
- opt := tcpip.TCPModerateReceiveBufferOption(true)
- if err := stk.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
- // Change the expected window scale to match the value needed for the
- // maximum buffer size used by stack.
- c.WindowScale = uint8(tcp.FindWndScale(maxReceiveBufferSize))
-
- rawEP := c.CreateConnectedWithOptions(header.TCPSynOptions{TS: true, WS: 4})
- tsVal := rawEP.TSVal
- rawEP.NextSeqNum--
- rawEP.SendPacketWithTS(nil, tsVal)
- rawEP.NextSeqNum++
- pkt := rawEP.VerifyAndReturnACKWithTS(tsVal)
- curRcvWnd := int(header.TCP(header.IPv4(pkt).Payload()).WindowSize()) << c.WindowScale
- scaleRcvWnd := func(rcvWnd int) uint16 {
- return uint16(rcvWnd >> c.WindowScale)
- }
- // Allocate a large array to send to the endpoint.
- b := make([]byte, receiveBufferSize*48)
-
- // In every iteration we will send double the number of bytes sent in
- // the previous iteration and read the same from the app. The received
- // window should grow by at least 2x of bytes read by the app in every
- // RTT.
- offset := 0
- payloadSize := receiveBufferSize / 8
- worker := (c.EP).(interface {
- StopWork()
- ResumeWork()
- })
- latency := 1 * time.Millisecond
- for i := 0; i < 5; i++ {
- tsVal++
-
- // Stop the worker goroutine.
- worker.StopWork()
- start := offset
- end := offset + payloadSize
- totalSent := 0
- packetsSent := 0
- for ; start < end; start += mss {
- rawEP.SendPacketWithTS(b[start:start+mss], tsVal)
- totalSent += mss
- packetsSent++
- }
-
- // Resume it so that it only sees the packets once all of them
- // are waiting to be read.
- worker.ResumeWork()
-
- // Give 1ms for the worker to process the packets.
- time.Sleep(1 * time.Millisecond)
-
- lastACK := c.GetPacket()
- // Discard any intermediate ACKs and only check the last ACK we get in a
- // short time period of few ms.
- for {
- time.Sleep(1 * time.Millisecond)
- pkt := c.GetPacketNonBlocking()
- if pkt == nil {
- break
- }
- lastACK = pkt
- }
- if got, want := int(header.TCP(header.IPv4(lastACK).Payload()).WindowSize()), int(scaleRcvWnd(curRcvWnd)); got > want {
- t.Fatalf("advertised window got: %d, want <= %d", got, want)
- }
-
- // Now read all the data from the endpoint and invoke the
- // moderation API to allow for receive buffer auto-tuning
- // to happen before we measure the new window.
- totalCopied := 0
- for {
- res, err := c.EP.Read(ioutil.Discard, tcpip.ReadOptions{})
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- break
- }
- totalCopied += res.Count
- }
-
- // Invoke the moderation API. This is required for auto-tuning
- // to happen. This method is normally expected to be invoked
- // from a higher layer than tcpip.Endpoint. So we simulate
- // copying to userspace by invoking it explicitly here.
- c.EP.ModerateRecvBuf(totalCopied)
-
- // Now send a keep-alive packet to trigger an ACK so that we can
- // measure the new window.
- rawEP.NextSeqNum--
- rawEP.SendPacketWithTS(nil, tsVal)
- rawEP.NextSeqNum++
-
- if i == 0 {
- // In the first iteration the receiver based RTT is not
- // yet known as a result the moderation code should not
- // increase the advertised window.
- rawEP.VerifyACKRcvWnd(scaleRcvWnd(curRcvWnd))
- } else {
- // Read loop above could generate an ACK if the window had dropped to
- // zero and then read had opened it up.
- lastACK := c.GetPacket()
- // Discard any intermediate ACKs and only check the last ACK we get in a
- // short time period of few ms.
- for {
- time.Sleep(1 * time.Millisecond)
- pkt := c.GetPacketNonBlocking()
- if pkt == nil {
- break
- }
- lastACK = pkt
- }
- curRcvWnd = int(header.TCP(header.IPv4(lastACK).Payload()).WindowSize()) << c.WindowScale
- // If thew new current window is close maxReceiveBufferSize then terminate
- // the loop. This can happen before all iterations are done due to timing
- // differences when running the test.
- if int(float64(curRcvWnd)*1.1) > maxReceiveBufferSize/2 {
- break
- }
- // Increase the latency after first two iterations to
- // establish a low RTT value in the receiver since it
- // only tracks the lowest value. This ensures that when
- // ModerateRcvBuf is called the elapsed time is always >
- // rtt. Without this the test is flaky due to delays due
- // to scheduling/wakeup etc.
- latency += 50 * time.Millisecond
- }
- time.Sleep(latency)
- offset += payloadSize
- payloadSize *= 2
- }
- // Check that at the end of our iterations the receive window grew close to the maximum
- // permissible size of maxReceiveBufferSize/2
- if got, want := int(float64(curRcvWnd)*1.1), maxReceiveBufferSize/2; got < want {
- t.Fatalf("unexpected rcvWnd got: %d, want > %d", got, want)
- }
-
-}
-
-func TestDelayEnabled(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- checkDelayOption(t, c, false, false) // Delay is disabled by default.
-
- for _, delayEnabled := range []bool{false, true} {
- t.Run(fmt.Sprintf("delayEnabled=%t", delayEnabled), func(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
- opt := tcpip.TCPDelayEnabled(delayEnabled)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, delayEnabled, err)
- }
- checkDelayOption(t, c, opt, delayEnabled)
- })
- }
-}
-
-func checkDelayOption(t *testing.T, c *context.Context, wantDelayEnabled tcpip.TCPDelayEnabled, wantDelayOption bool) {
- t.Helper()
-
- var gotDelayEnabled tcpip.TCPDelayEnabled
- if err := c.Stack().TransportProtocolOption(tcp.ProtocolNumber, &gotDelayEnabled); err != nil {
- t.Fatalf("TransportProtocolOption(tcp, &gotDelayEnabled) failed: %s", err)
- }
- if gotDelayEnabled != wantDelayEnabled {
- t.Errorf("TransportProtocolOption(tcp, &gotDelayEnabled) got %t, want %t", gotDelayEnabled, wantDelayEnabled)
- }
-
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, new(waiter.Queue))
- if err != nil {
- t.Fatalf("NewEndPoint(tcp, ipv4, new(waiter.Queue)) failed: %s", err)
- }
- gotDelayOption := ep.SocketOptions().GetDelayOption()
- if gotDelayOption != wantDelayOption {
- t.Errorf("ep.GetSockOptBool(tcpip.DelayOption) got: %t, want: %t", gotDelayOption, wantDelayOption)
- }
-}
-
-func TestTCPLingerTimeout(t *testing.T) {
- c := context.New(t, 1500 /* mtu */)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- testCases := []struct {
- name string
- tcpLingerTimeout time.Duration
- want time.Duration
- }{
- {"NegativeLingerTimeout", -123123, -1},
- // Zero is treated same as the stack's default TCP_LINGER2 timeout.
- {"ZeroLingerTimeout", 0, tcp.DefaultTCPLingerTimeout},
- {"InRangeLingerTimeout", 10 * time.Second, 10 * time.Second},
- // Values > stack's TCPLingerTimeout are capped to the stack's
- // value. Defaults to tcp.DefaultTCPLingerTimeout(60 seconds)
- {"AboveMaxLingerTimeout", tcp.MaxTCPLingerTimeout + 5*time.Second, tcp.MaxTCPLingerTimeout},
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- v := tcpip.TCPLingerTimeoutOption(tc.tcpLingerTimeout)
- if err := c.EP.SetSockOpt(&v); err != nil {
- t.Fatalf("SetSockOpt(&%T(%s)) = %s", v, tc.tcpLingerTimeout, err)
- }
-
- v = 0
- if err := c.EP.GetSockOpt(&v); err != nil {
- t.Fatalf("GetSockOpt(&%T) = %s", v, err)
- }
- if got, want := time.Duration(v), tc.want; got != want {
- t.Fatalf("got linger timeout = %s, want = %s", got, want)
- }
- })
- }
-}
-
-func TestTCPTimeWaitRSTIgnored(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- c.EP.Close()
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
-
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- // Now send a RST and this should be ignored and not
- // generate an ACK.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagRst,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- })
-
- c.CheckNoPacketTimeout("unexpected packet received in TIME_WAIT state", 1*time.Second)
-
- // Out of order ACK should generate an immediate ACK in
- // TIME_WAIT.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 3,
- })
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-}
-
-func TestTCPTimeWaitOutOfOrder(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- c.EP.Close()
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
-
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- // Out of order ACK should generate an immediate ACK in
- // TIME_WAIT.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 3,
- })
-
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-}
-
-func TestTCPTimeWaitNewSyn(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- c.EP.Close()
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
-
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- // Send a SYN request w/ sequence number lower than
- // the highest sequence number sent. We just reuse
- // the same number.
- iss = seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- c.CheckNoPacketTimeout("unexpected packet received in response to SYN", 1*time.Second)
-
- // drain any older notifications from the notification channel before attempting
- // 2nd connection.
- select {
- case <-ch:
- default:
- }
-
- // Send a SYN request w/ sequence number higher than
- // the highest sequence number sent.
- iss = iss.Add(3)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b = c.GetPacket()
- tcpHdr = header.IPv4(b).Payload()
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders = &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-}
-
-func TestTCPTimeWaitDuplicateFINExtendsTimeWait(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPTimeWaitTimeout to 5 seconds so that sockets are marked closed
- // after 5 seconds in TIME_WAIT state.
- tcpTimeWaitTimeout := 5 * time.Second
- opt := tcpip.TCPTimeWaitTimeoutOption(tcpTimeWaitTimeout)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%s)): %s", tcp.ProtocolNumber, opt, tcpTimeWaitTimeout, err)
- }
-
- want := c.Stack().Stats().TCP.EstablishedClosed.Value() + 1
-
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- c.EP.Close()
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+1),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
-
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- time.Sleep(2 * time.Second)
-
- // Now send a duplicate FIN. This should cause the TIME_WAIT to extend
- // by another 5 seconds and also send us a duplicate ACK as it should
- // indicate that the final ACK was potentially lost.
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+2)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- // Sleep for 4 seconds so at this point we are 1 second past the
- // original tcpLingerTimeout of 5 seconds.
- time.Sleep(4 * time.Second)
-
- // Send an ACK and it should not generate any packet as the socket
- // should still be in TIME_WAIT for another another 5 seconds due
- // to the duplicate FIN we sent earlier.
- *ackHeaders = *finHeaders
- ackHeaders.SeqNum = ackHeaders.SeqNum + 1
- ackHeaders.Flags = header.TCPFlagAck
- c.SendPacket(nil, ackHeaders)
-
- c.CheckNoPacketTimeout("unexpected packet received from endpoint in TIME_WAIT", 1*time.Second)
- // Now sleep for another 2 seconds so that we are past the
- // extended TIME_WAIT of 7 seconds (2 + 5).
- time.Sleep(2 * time.Second)
-
- // Resend the same ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Receive the RST that should be generated as there is no valid
- // endpoint.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(ackHeaders.AckNum)),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst)))
-
- if got := c.Stack().Stats().TCP.EstablishedClosed.Value(); got != want {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedClosed = %d, want = %d", got, want)
- }
- if got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentEstablished.Value() = %d, want = 0", got)
- }
-}
-
-func TestTCPCloseWithData(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- // Set TCPTimeWaitTimeout to 5 seconds so that sockets are marked closed
- // after 5 seconds in TIME_WAIT state.
- tcpTimeWaitTimeout := 5 * time.Second
- opt := tcpip.TCPTimeWaitTimeoutOption(tcpTimeWaitTimeout)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%s)): %s", tcp.ProtocolNumber, opt, tcpTimeWaitTimeout, err)
- }
-
- wq := &waiter.Queue{}
- ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- if err := ep.Listen(10); err != nil {
- t.Fatalf("Listen failed: %s", err)
- }
-
- // Send a SYN request.
- iss := seqnum.Value(context.TestInitialSequenceNumber)
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- })
-
- // Receive the SYN-ACK reply.
- b := c.GetPacket()
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- ackHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- RcvWnd: 30000,
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if cmp.Equal(&tcpip.ErrWouldBlock{}, err) {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- t.Fatalf("Accept failed: %s", err)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for accept")
- }
- }
-
- // Now trigger a passive close by sending a FIN.
- finHeaders := &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- SeqNum: iss + 1,
- AckNum: c.IRS + 2,
- RcvWnd: 30000,
- }
-
- c.SendPacket(nil, finHeaders)
-
- // Get the ACK to the FIN we just sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(iss)+2),
- checker.TCPFlags(header.TCPFlagAck)))
-
- // Now write a few bytes and then close the endpoint.
- data := []byte{1, 2, 3}
-
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- // Check that data is received.
- b = c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+2), // Acknum is initial sequence number + 1
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(data, p) {
- t.Errorf("got data = %x, want = %x", p, data)
- }
-
- c.EP.Close()
- // Check the FIN.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)+uint32(len(data))),
- checker.TCPAckNum(uint32(iss+2)),
- checker.TCPFlags(header.TCPFlagFin|header.TCPFlagAck)))
-
- // First send a partial ACK.
- ackHeaders = &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 2,
- AckNum: c.IRS + 1 + seqnum.Value(len(data)-1),
- RcvWnd: 30000,
- }
- c.SendPacket(nil, ackHeaders)
-
- // Now send a full ACK.
- ackHeaders = &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 2,
- AckNum: c.IRS + 1 + seqnum.Value(len(data)),
- RcvWnd: 30000,
- }
- c.SendPacket(nil, ackHeaders)
-
- // Now ACK the FIN.
- ackHeaders.AckNum++
- c.SendPacket(nil, ackHeaders)
-
- // Now send an ACK and we should get a RST back as the endpoint should
- // be in CLOSED state.
- ackHeaders = &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 2,
- AckNum: c.IRS + 1 + seqnum.Value(len(data)),
- RcvWnd: 30000,
- }
- c.SendPacket(nil, ackHeaders)
-
- // Check the RST.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(ackHeaders.AckNum)),
- checker.TCPAckNum(0),
- checker.TCPFlags(header.TCPFlagRst)))
-}
-
-func TestTCPUserTimeout(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.EventHUp)
- defer c.WQ.EventUnregister(&waitEntry)
-
- origEstablishedTimedout := c.Stack().Stats().TCP.EstablishedTimedout.Value()
-
- // Ensure that on the next retransmit timer fire, the user timeout has
- // expired.
- initRTO := 1 * time.Second
- userTimeout := initRTO / 2
- v := tcpip.TCPUserTimeoutOption(userTimeout)
- if err := c.EP.SetSockOpt(&v); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s): %s", v, userTimeout, err)
- }
-
- // Send some data and wait before ACKing it.
- view := make([]byte, 3)
- var r bytes.Reader
- r.Reset(view)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Write failed: %s", err)
- }
-
- next := uint32(c.IRS) + 1
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(len(view)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- // Wait for the retransmit timer to be fired and the user timeout to cause
- // close of the connection.
- select {
- case <-notifyCh:
- case <-time.After(2 * initRTO):
- t.Fatalf("connection still alive after %s, should have been closed after %s", 2*initRTO, userTimeout)
- }
-
- // No packet should be received as the connection should be silently
- // closed due to timeout.
- c.CheckNoPacket("unexpected packet received after userTimeout has expired")
-
- next += uint32(len(view))
-
- // The connection should be terminated after userTimeout has expired.
- // Send an ACK to trigger a RST from the stack as the endpoint should
- // be dead.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: seqnum.Value(next),
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(next),
- checker.TCPAckNum(uint32(0)),
- checker.TCPFlags(header.TCPFlagRst),
- ),
- )
-
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrTimeout{})
-
- if got, want := c.Stack().Stats().TCP.EstablishedTimedout.Value(), origEstablishedTimedout+1; got != want {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedTimedout = %d, want = %d", got, want)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-func TestKeepaliveWithUserTimeout(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRcvBuf */)
-
- origEstablishedTimedout := c.Stack().Stats().TCP.EstablishedTimedout.Value()
-
- const keepAliveIdle = 100 * time.Millisecond
- const keepAliveInterval = 3 * time.Second
- keepAliveIdleOption := tcpip.KeepaliveIdleOption(keepAliveIdle)
- if err := c.EP.SetSockOpt(&keepAliveIdleOption); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", keepAliveIdleOption, keepAliveIdle, err)
- }
- keepAliveIntervalOption := tcpip.KeepaliveIntervalOption(keepAliveInterval)
- if err := c.EP.SetSockOpt(&keepAliveIntervalOption); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", keepAliveIntervalOption, keepAliveInterval, err)
- }
- if err := c.EP.SetSockOptInt(tcpip.KeepaliveCountOption, 10); err != nil {
- t.Fatalf("c.EP.SetSockOptInt(tcpip.KeepaliveCountOption, 10): %s", err)
- }
- c.EP.SocketOptions().SetKeepAlive(true)
-
- // Set userTimeout to be the duration to be 1 keepalive
- // probes. Which means that after the first probe is sent
- // the second one should cause the connection to be
- // closed due to userTimeout being hit.
- userTimeout := tcpip.TCPUserTimeoutOption(keepAliveInterval)
- if err := c.EP.SetSockOpt(&userTimeout); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", userTimeout, keepAliveInterval, err)
- }
-
- // Check that the connection is still alive.
- ept := endpointTester{c.EP}
- ept.CheckReadError(t, &tcpip.ErrWouldBlock{})
-
- // Now receive 1 keepalives, but don't ACK it.
- b := c.GetPacket()
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- checker.IPv4(t, b,
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)),
- checker.TCPAckNum(uint32(iss)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-
- // Sleep for a litte over the KeepAlive interval to make sure
- // the timer has time to fire after the last ACK and close the
- // close the socket.
- time.Sleep(keepAliveInterval + keepAliveInterval/2)
-
- // The connection should be closed with a timeout.
- // Send an ACK to trigger a RST from the stack as the endpoint should
- // be dead.
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS + 1,
- RcvWnd: 30000,
- })
-
- checker.IPv4(t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS+1)),
- checker.TCPAckNum(uint32(0)),
- checker.TCPFlags(header.TCPFlagRst),
- ),
- )
-
- ept.CheckReadError(t, &tcpip.ErrTimeout{})
- if got, want := c.Stack().Stats().TCP.EstablishedTimedout.Value(), origEstablishedTimedout+1; got != want {
- t.Errorf("got c.Stack().Stats().TCP.EstablishedTimedout = %d, want = %d", got, want)
- }
- if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {
- t.Errorf("got stats.TCP.CurrentConnected.Value() = %d, want = 0", got)
- }
-}
-
-func TestIncreaseWindowOnRead(t *testing.T) {
- // This test ensures that the endpoint sends an ack,
- // after read() when the window grows by more than 1 MSS.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- const rcvBuf = 65535 * 10
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, rcvBuf)
-
- // Write chunks of ~30000 bytes. It's important that two
- // payloads make it equal or longer than MSS.
- remain := rcvBuf * 2
- sent := 0
- data := make([]byte, defaultMTU/2)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for remain > len(data) {
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(sent)),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- sent += len(data)
- remain -= len(data)
- pkt := c.GetPacket()
- checker.IPv4(t, pkt,
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- // Break once the window drops below defaultMTU/2
- if wnd := header.TCP(header.IPv4(pkt).Payload()).WindowSize(); wnd < defaultMTU/2 {
- break
- }
- }
-
- // We now have < 1 MSS in the buffer space. Read at least > 2 MSS
- // worth of data as receive buffer space
- w := tcpip.LimitedWriter{
- W: ioutil.Discard,
- // defaultMTU is a good enough estimate for the MSS used for this
- // connection.
- N: defaultMTU * 2,
- }
- for w.N != 0 {
- _, err := c.EP.Read(&w, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("Read failed: %s", err)
- }
- }
-
- // After reading > MSS worth of data, we surely crossed MSS. See the ack:
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPWindow(uint16(0xffff)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestIncreaseWindowOnBufferResize(t *testing.T) {
- // This test ensures that the endpoint sends an ack,
- // after available recv buffer grows to more than 1 MSS.
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- const rcvBuf = 65535 * 10
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, rcvBuf)
-
- // Write chunks of ~30000 bytes. It's important that two
- // payloads make it equal or longer than MSS.
- remain := rcvBuf
- sent := 0
- data := make([]byte, defaultMTU/2)
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- for remain > len(data) {
- c.SendPacket(data, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss.Add(seqnum.Size(sent)),
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- })
- sent += len(data)
- remain -= len(data)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPWindowLessThanEq(0xffff),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
- }
-
- // Increasing the buffer from should generate an ACK,
- // since window grew from small value to larger equal MSS
- c.EP.SocketOptions().SetReceiveBufferSize(rcvBuf*4, true /* notify */)
- checker.IPv4(t, c.GetPacket(),
- checker.PayloadLen(header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+uint32(sent)),
- checker.TCPWindow(uint16(0xffff)),
- checker.TCPFlags(header.TCPFlagAck),
- ),
- )
-}
-
-func TestTCPDeferAccept(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- const tcpDeferAccept = 1 * time.Second
- tcpDeferAcceptOption := tcpip.TCPDeferAcceptOption(tcpDeferAccept)
- if err := c.EP.SetSockOpt(&tcpDeferAcceptOption); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)): %s", tcpDeferAcceptOption, tcpDeferAccept, err)
- }
-
- irs, iss := executeHandshake(t, c, context.TestPort, false /* synCookiesInUse */)
-
- _, _, err := c.EP.Accept(nil)
- if d := cmp.Diff(&tcpip.ErrWouldBlock{}, err); d != "" {
- t.Fatalf("c.EP.Accept(nil) mismatch (-want +got):\n%s", d)
- }
-
- // Send data. This should result in an acceptable endpoint.
- c.SendPacket([]byte{1, 2, 3, 4}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- })
-
- // Receive ACK for the data we sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-
- // Give a bit of time for the socket to be delivered to the accept queue.
- time.Sleep(50 * time.Millisecond)
- aep, _, err := c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("got c.EP.Accept(nil) = %s, want: nil", err)
- }
-
- aep.Close()
- // Closing aep without reading the data should trigger a RST.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-}
-
-func TestTCPDeferAcceptTimeout(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.Create(-1)
-
- if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil {
- t.Fatal("Bind failed:", err)
- }
-
- if err := c.EP.Listen(10); err != nil {
- t.Fatal("Listen failed:", err)
- }
-
- const tcpDeferAccept = 1 * time.Second
- tcpDeferAcceptOpt := tcpip.TCPDeferAcceptOption(tcpDeferAccept)
- if err := c.EP.SetSockOpt(&tcpDeferAcceptOpt); err != nil {
- t.Fatalf("c.EP.SetSockOpt(&%T(%s)) failed: %s", tcpDeferAcceptOpt, tcpDeferAccept, err)
- }
-
- irs, iss := executeHandshake(t, c, context.TestPort, false /* synCookiesInUse */)
-
- _, _, err := c.EP.Accept(nil)
- if d := cmp.Diff(&tcpip.ErrWouldBlock{}, err); d != "" {
- t.Fatalf("c.EP.Accept(nil) mismatch (-want +got):\n%s", d)
- }
-
- // Sleep for a little of the tcpDeferAccept timeout.
- time.Sleep(tcpDeferAccept + 100*time.Millisecond)
-
- // On timeout expiry we should get a SYN-ACK retransmission.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn),
- checker.TCPAckNum(uint32(irs)+1)))
-
- // Send data. This should result in an acceptable endpoint.
- c.SendPacket([]byte{1, 2, 3, 4}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: context.StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: irs + 1,
- AckNum: iss + 1,
- })
-
- // Receive ACK for the data we sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-
- // Give sometime for the endpoint to be delivered to the accept queue.
- time.Sleep(50 * time.Millisecond)
- aep, _, err := c.EP.Accept(nil)
- if err != nil {
- t.Fatalf("got c.EP.Accept(nil) = %s, want: nil", err)
- }
-
- aep.Close()
- // Closing aep without reading the data should trigger a RST.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.SrcPort(context.StackPort),
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck),
- checker.TCPSeqNum(uint32(iss+1)),
- checker.TCPAckNum(uint32(irs+5))))
-}
-
-func TestResetDuringClose(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnected(context.TestInitialSequenceNumber, 30000, -1 /* epRecvBuf */)
- // Send some data to make sure there is some unread
- // data to trigger a reset on c.Close.
- irs := c.IRS
- iss := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
- c.SendPacket([]byte{1, 2, 3, 4}, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: iss,
- AckNum: irs.Add(1),
- RcvWnd: 30000,
- })
-
- // Receive ACK for the data we sent.
- checker.IPv4(t, c.GetPacket(), checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(irs.Add(1))),
- checker.TCPAckNum(uint32(iss)+4)))
-
- // Close in a separate goroutine so that we can trigger
- // a race with the RST we send below. This should not
- // panic due to the route being released depeding on
- // whether Close() sends an active RST or the RST sent
- // below is processed by the worker first.
- var wg sync.WaitGroup
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- c.SendPacket(nil, &context.Headers{
- SrcPort: context.TestPort,
- DstPort: c.Port,
- SeqNum: iss.Add(4),
- AckNum: c.IRS.Add(5),
- RcvWnd: 30000,
- Flags: header.TCPFlagRst,
- })
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- c.EP.Close()
- }()
-
- wg.Wait()
-}
-
-func TestStackTimeWaitReuse(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- s := c.Stack()
- var twReuse tcpip.TCPTimeWaitReuseOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &twReuse); err != nil {
- t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &twReuse, err)
- }
- if got, want := twReuse, tcpip.TCPTimeWaitReuseLoopbackOnly; got != want {
- t.Fatalf("got tcpip.TCPTimeWaitReuseOption: %v, want: %v", got, want)
- }
-}
-
-func TestSetStackTimeWaitReuse(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- s := c.Stack()
- testCases := []struct {
- v int
- err tcpip.Error
- }{
- {int(tcpip.TCPTimeWaitReuseDisabled), nil},
- {int(tcpip.TCPTimeWaitReuseGlobal), nil},
- {int(tcpip.TCPTimeWaitReuseLoopbackOnly), nil},
- {int(tcpip.TCPTimeWaitReuseLoopbackOnly) + 1, &tcpip.ErrInvalidOptionValue{}},
- {int(tcpip.TCPTimeWaitReuseDisabled) - 1, &tcpip.ErrInvalidOptionValue{}},
- }
-
- for _, tc := range testCases {
- opt := tcpip.TCPTimeWaitReuseOption(tc.v)
- err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt)
- if got, want := err, tc.err; got != want {
- t.Fatalf("s.SetTransportProtocolOption(%d, &%T(%d)) = %s, want = %s", tcp.ProtocolNumber, tc.v, tc.v, err, tc.err)
- }
- if tc.err != nil {
- continue
- }
-
- var twReuse tcpip.TCPTimeWaitReuseOption
- if err := s.TransportProtocolOption(tcp.ProtocolNumber, &twReuse); err != nil {
- t.Fatalf("s.TransportProtocolOption(%v, %v) = %v, want nil", tcp.ProtocolNumber, &twReuse, err)
- }
-
- if got, want := twReuse, tcpip.TCPTimeWaitReuseOption(tc.v); got != want {
- t.Fatalf("got tcpip.TCPTimeWaitReuseOption: %v, want: %v", got, want)
- }
- }
-}
-
-// generateRandomPayload generates a random byte slice of the specified length
-// causing a fatal test failure if it is unable to do so.
-func generateRandomPayload(t *testing.T, n int) []byte {
- t.Helper()
- buf := make([]byte, n)
- if _, err := rand.Read(buf); err != nil {
- t.Fatalf("rand.Read(buf) failed: %s", err)
- }
- return buf
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_timestamp_test.go b/pkg/tcpip/transport/tcp/tcp_timestamp_test.go
deleted file mode 100644
index 1deb1fe4d..000000000
--- a/pkg/tcpip/transport/tcp/tcp_timestamp_test.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_test
-
-import (
- "bytes"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp/testing/context"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// createConnectedWithTimestampOption creates and connects c.ep with the
-// timestamp option enabled.
-func createConnectedWithTimestampOption(c *context.Context) *context.RawEndpoint {
- return c.CreateConnectedWithOptions(header.TCPSynOptions{TS: true, TSVal: 1})
-}
-
-// TestTimeStampEnabledConnect tests that netstack sends the timestamp option on
-// an active connect and sets the TS Echo Reply fields correctly when the
-// SYN-ACK also indicates support for the TS option and provides a TSVal.
-func TestTimeStampEnabledConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- rep := createConnectedWithTimestampOption(c)
-
- // Register for read and validate that we have data to read.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- // The following tests ensure that TS option once enabled behaves
- // correctly as described in
- // https://tools.ietf.org/html/rfc7323#section-4.3.
- //
- // We are not testing delayed ACKs here, but we do test out of order
- // packet delivery and filling the sequence number hole created due to
- // the out of order packet.
- //
- // The test also verifies that the sequence numbers and timestamps are
- // as expected.
- data := []byte{1, 2, 3}
-
- // First we increment tsVal by a small amount.
- tsVal := rep.TSVal + 100
- rep.SendPacketWithTS(data, tsVal)
- rep.VerifyACKWithTS(tsVal)
-
- // Next we send an out of order packet.
- rep.NextSeqNum += 3
- tsVal += 200
- rep.SendPacketWithTS(data, tsVal)
-
- // The ACK should contain the original sequenceNumber and an older TS.
- rep.NextSeqNum -= 6
- rep.VerifyACKWithTS(tsVal - 200)
-
- // Next we fill the hole and the returned ACK should contain the
- // cumulative sequence number acking all data sent till now and have the
- // latest timestamp sent below in its TSEcr field.
- tsVal -= 100
- rep.SendPacketWithTS(data, tsVal)
- rep.NextSeqNum += 3
- rep.VerifyACKWithTS(tsVal)
-
- // Increment tsVal by a large value that doesn't result in a wrap around.
- tsVal += 0x7fffffff
- rep.SendPacketWithTS(data, tsVal)
- rep.VerifyACKWithTS(tsVal)
-
- // Increment tsVal again by a large value which should cause the
- // timestamp value to wrap around. The returned ACK should contain the
- // wrapped around timestamp in its tsEcr field and not the tsVal from
- // the previous packet sent above.
- tsVal += 0x7fffffff
- rep.SendPacketWithTS(data, tsVal)
- rep.VerifyACKWithTS(tsVal)
-
- select {
- case <-ch:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // There should be 5 views to read and each of them should
- // contain the same data.
- for i := 0; i < 5; i++ {
- buf := make([]byte, len(data))
- w := tcpip.SliceWriter(buf)
- result, err := c.EP.Read(&w, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("Unexpected error from Read: %v", err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: len(buf),
- Total: len(buf),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("Read: unexpected result (-want +got):\n%s", diff)
- }
- if got, want := buf, data; bytes.Compare(got, want) != 0 {
- t.Fatalf("Data is different: got: %v, want: %v", got, want)
- }
- }
-}
-
-// TestTimeStampDisabledConnect tests that netstack sends timestamp option on an
-// active connect but if the SYN-ACK doesn't specify the TS option then
-// timestamp option is not enabled and future packets do not contain a
-// timestamp.
-func TestTimeStampDisabledConnect(t *testing.T) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- c.CreateConnectedWithOptions(header.TCPSynOptions{})
-}
-
-func timeStampEnabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wndSize uint16) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- if cookieEnabled {
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- t.Logf("Test w/ CookieEnabled = %v", cookieEnabled)
- tsVal := rand.Uint32()
- c.AcceptWithOptions(wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, TS: true, TSVal: tsVal})
-
- // Now send some data and validate that timestamp is echoed correctly in the ACK.
- data := []byte{1, 2, 3}
-
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Unexpected error from Write: %s", err)
- }
-
- // Check that data is received and that the timestamp option TSEcr field
- // matches the expected value.
- b := c.GetPacket()
- checker.IPv4(t, b,
- // Add 12 bytes for the timestamp option + 2 NOPs to align at 4
- // byte boundary.
- checker.PayloadLen(len(data)+header.TCPMinimumSize+12),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(790),
- checker.TCPWindow(wndSize),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- checker.TCPTimestampChecker(true, 0, tsVal+1),
- ),
- )
-}
-
-// TestTimeStampEnabledAccept tests that if the SYN on a passive connect
-// specifies the Timestamp option then the Timestamp option is sent on a SYN-ACK
-// and echoes the tsVal field of the original SYN in the tcEcr field of the
-// SYN-ACK. We cover the cases where SYN cookies are enabled/disabled and verify
-// that Timestamp option is enabled in both cases if requested in the original
-// SYN.
-func TestTimeStampEnabledAccept(t *testing.T) {
- testCases := []struct {
- cookieEnabled bool
- wndScale int
- wndSize uint16
- }{
- {true, -1, 0xffff}, // When cookie is used window scaling is disabled.
- // DefaultReceiveBufferSize is 1MB >> 5. Advertised window will be 1/2 of that.
- {false, 5, 0x4000},
- }
- for _, tc := range testCases {
- timeStampEnabledAccept(t, tc.cookieEnabled, tc.wndScale, tc.wndSize)
- }
-}
-
-func timeStampDisabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wndSize uint16) {
- c := context.New(t, defaultMTU)
- defer c.Cleanup()
-
- if cookieEnabled {
- opt := tcpip.TCPAlwaysUseSynCookies(true)
- if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- t.Logf("Test w/ CookieEnabled = %v", cookieEnabled)
- c.AcceptWithOptions(wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS})
-
- // Now send some data with the accepted connection endpoint and validate
- // that no timestamp option is sent in the TCP segment.
- data := []byte{1, 2, 3}
-
- var r bytes.Reader
- r.Reset(data)
- if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
- t.Fatalf("Unexpected error from Write: %s", err)
- }
-
- // Check that data is received and that the timestamp option is disabled
- // when SYN cookies are enabled/disabled.
- b := c.GetPacket()
- checker.IPv4(t, b,
- checker.PayloadLen(len(data)+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(context.TestPort),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(790),
- checker.TCPWindow(wndSize),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- checker.TCPTimestampChecker(false, 0, 0),
- ),
- )
-}
-
-// TestTimeStampDisabledAccept tests that Timestamp option is not used when the
-// peer doesn't advertise it and connection is established with Accept().
-func TestTimeStampDisabledAccept(t *testing.T) {
- testCases := []struct {
- cookieEnabled bool
- wndScale int
- wndSize uint16
- }{
- {true, -1, 0xffff}, // When cookie is used window scaling is disabled.
- // DefaultReceiveBufferSize is 1MB >> 5. Advertised window will be half of
- // that.
- {false, 5, 0x4000},
- }
- for _, tc := range testCases {
- timeStampDisabledAccept(t, tc.cookieEnabled, tc.wndScale, tc.wndSize)
- }
-}
-
-func TestSendGreaterThanMTUWithOptions(t *testing.T) {
- const maxPayload = 100
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- createConnectedWithTimestampOption(c)
- testBrokenUpWrite(t, c, maxPayload)
-}
-
-func TestSegmentNotDroppedWhenTimestampMissing(t *testing.T) {
- const maxPayload = 100
- c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
- defer c.Cleanup()
-
- rep := createConnectedWithTimestampOption(c)
-
- // Register for read.
- we, ch := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&we, waiter.ReadableEvents)
- defer c.WQ.EventUnregister(&we)
-
- droppedPacketsStat := c.Stack().Stats().DroppedPackets
- droppedPackets := droppedPacketsStat.Value()
- data := []byte{1, 2, 3}
- // Send a packet with no TCP options/timestamp.
- rep.SendPacket(data, nil)
-
- select {
- case <-ch:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timed out waiting for data to arrive")
- }
-
- // Assert that DroppedPackets was not incremented.
- if got, want := droppedPacketsStat.Value(), droppedPackets; got != want {
- t.Fatalf("incorrect number of dropped packets, got: %v, want: %v", got, want)
- }
-
- // Issue a read and we should data.
- var buf bytes.Buffer
- result, err := c.EP.Read(&buf, tcpip.ReadOptions{})
- if err != nil {
- t.Fatalf("Unexpected error from Read: %v", err)
- }
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- }, result, checker.IgnoreCmpPath("ControlMessages")); diff != "" {
- t.Errorf("Read: unexpected result (-want +got):\n%s", diff)
- }
- if got, want := buf.Bytes(), data; bytes.Compare(got, want) != 0 {
- t.Fatalf("Data is different: got: %v, want: %v", got, want)
- }
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_unsafe_state_autogen.go b/pkg/tcpip/transport/tcp/tcp_unsafe_state_autogen.go
new file mode 100644
index 000000000..4cb82fcc9
--- /dev/null
+++ b/pkg/tcpip/transport/tcp/tcp_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package tcp
diff --git a/pkg/tcpip/transport/tcp/testing/context/BUILD b/pkg/tcpip/transport/tcp/testing/context/BUILD
deleted file mode 100644
index ce6a2c31d..000000000
--- a/pkg/tcpip/transport/tcp/testing/context/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "context",
- testonly = 1,
- srcs = ["context.go"],
- visibility = [
- "//visibility:public",
- ],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/seqnum",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/tcpip/transport/tcp/testing/context/context.go b/pkg/tcpip/transport/tcp/testing/context/context.go
deleted file mode 100644
index 96e4849d2..000000000
--- a/pkg/tcpip/transport/tcp/testing/context/context.go
+++ /dev/null
@@ -1,1233 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package context provides a test context for use in tcp tests. It also
-// provides helper methods to assert/check certain behaviours.
-package context
-
-import (
- "bytes"
- "context"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-const (
- // StackAddr is the IPv4 address assigned to the stack.
- StackAddr = "\x0a\x00\x00\x01"
-
- // StackPort is used as the listening port in tests for passive
- // connects.
- StackPort = 1234
-
- // TestAddr is the source address for packets sent to the stack via the
- // link layer endpoint.
- TestAddr = "\x0a\x00\x00\x02"
-
- // TestPort is the TCP port used for packets sent to the stack
- // via the link layer endpoint.
- TestPort = 4096
-
- // StackV6Addr is the IPv6 address assigned to the stack.
- StackV6Addr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
-
- // TestV6Addr is the source address for packets sent to the stack via
- // the link layer endpoint.
- TestV6Addr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
-
- // StackV4MappedAddr is StackAddr as a mapped v6 address.
- StackV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + StackAddr
-
- // TestV4MappedAddr is TestAddr as a mapped v6 address.
- TestV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + TestAddr
-
- // V4MappedWildcardAddr is the mapped v6 representation of 0.0.0.0.
- V4MappedWildcardAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00"
-
- // TestInitialSequenceNumber is the initial sequence number sent in packets that
- // are sent in response to a SYN or in the initial SYN sent to the stack.
- TestInitialSequenceNumber = 789
-)
-
-// StackAddrWithPrefix is StackAddr with its associated prefix length.
-var StackAddrWithPrefix = tcpip.AddressWithPrefix{
- Address: StackAddr,
- PrefixLen: 24,
-}
-
-// StackV6AddrWithPrefix is StackV6Addr with its associated prefix length.
-var StackV6AddrWithPrefix = tcpip.AddressWithPrefix{
- Address: StackV6Addr,
- PrefixLen: header.IIDOffsetInIPv6Address * 8,
-}
-
-// Headers is used to represent the TCP header fields when building a
-// new packet.
-type Headers struct {
- // SrcPort holds the src port value to be used in the packet.
- SrcPort uint16
-
- // DstPort holds the destination port value to be used in the packet.
- DstPort uint16
-
- // SeqNum is the value of the sequence number field in the TCP header.
- SeqNum seqnum.Value
-
- // AckNum represents the acknowledgement number field in the TCP header.
- AckNum seqnum.Value
-
- // Flags are the TCP flags in the TCP header.
- Flags header.TCPFlags
-
- // RcvWnd is the window to be advertised in the ReceiveWindow field of
- // the TCP header.
- RcvWnd seqnum.Size
-
- // TCPOpts holds the options to be sent in the option field of the TCP
- // header.
- TCPOpts []byte
-}
-
-// Options contains options for creating a new test context.
-type Options struct {
- // EnableV4 indicates whether IPv4 should be enabled.
- EnableV4 bool
-
- // EnableV6 indicates whether IPv4 should be enabled.
- EnableV6 bool
-
- // MTU indicates the maximum transmission unit on the link layer.
- MTU uint32
-}
-
-// Context provides an initialized Network stack and a link layer endpoint
-// for use in TCP tests.
-type Context struct {
- t *testing.T
- linkEP *channel.Endpoint
- s *stack.Stack
-
- // IRS holds the initial sequence number in the SYN sent by endpoint in
- // case of an active connect or the sequence number sent by the endpoint
- // in the SYN-ACK sent in response to a SYN when listening in passive
- // mode.
- IRS seqnum.Value
-
- // Port holds the port bound by EP below in case of an active connect or
- // the listening port number in case of a passive connect.
- Port uint16
-
- // EP is the test endpoint in the stack owned by this context. This endpoint
- // is used in various tests to either initiate an active connect or is used
- // as a passive listening endpoint to accept inbound connections.
- EP tcpip.Endpoint
-
- // Wq is the wait queue associated with EP and is used to block for events
- // on EP.
- WQ waiter.Queue
-
- // TimeStampEnabled is true if ep is connected with the timestamp option
- // enabled.
- TimeStampEnabled bool
-
- // WindowScale is the expected window scale in SYN packets sent by
- // the stack.
- WindowScale uint8
-
- // RcvdWindowScale is the actual window scale sent by the stack in
- // SYN/SYN-ACK.
- RcvdWindowScale uint8
-}
-
-// New allocates and initializes a test context containing a new
-// stack and a link-layer endpoint.
-func New(t *testing.T, mtu uint32) *Context {
- return NewWithOpts(t, Options{
- EnableV4: true,
- EnableV6: true,
- MTU: mtu,
- })
-}
-
-// NewWithOpts allocates and initializes a test context containing a new
-// stack and a link-layer endpoint with specific options.
-func NewWithOpts(t *testing.T, opts Options) *Context {
- if opts.MTU == 0 {
- panic("MTU must be greater than 0")
- }
-
- stackOpts := stack.Options{
- TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
- }
- if opts.EnableV4 {
- stackOpts.NetworkProtocols = append(stackOpts.NetworkProtocols, ipv4.NewProtocol)
- }
- if opts.EnableV6 {
- stackOpts.NetworkProtocols = append(stackOpts.NetworkProtocols, ipv6.NewProtocol)
- }
- s := stack.New(stackOpts)
-
- const sendBufferSize = 1 << 20 // 1 MiB
- const recvBufferSize = 1 << 20 // 1 MiB
- // Allow minimum send/receive buffer sizes to be 1 during tests.
- sendBufOpt := tcpip.TCPSendBufferSizeRangeOption{Min: 1, Default: sendBufferSize, Max: 10 * sendBufferSize}
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &sendBufOpt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v) failed: %s", tcp.ProtocolNumber, sendBufOpt, err)
- }
-
- rcvBufOpt := tcpip.TCPReceiveBufferSizeRangeOption{Min: 1, Default: recvBufferSize, Max: 10 * recvBufferSize}
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &rcvBufOpt); err != nil {
- t.Fatalf("SetTransportProtocolOption(%d, &%#v) failed: %s", tcp.ProtocolNumber, rcvBufOpt, err)
- }
-
- // Increase minimum RTO in tests to avoid test flakes due to early
- // retransmit in case the test executors are overloaded and cause timers
- // to fire earlier than expected.
- minRTOOpt := tcpip.TCPMinRTOOption(3 * time.Second)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &minRTOOpt); err != nil {
- t.Fatalf("s.SetTransportProtocolOption(%d, &%T(%d)): %s", tcp.ProtocolNumber, minRTOOpt, minRTOOpt, err)
- }
-
- // Some of the congestion control tests send up to 640 packets, we so
- // set the channel size to 1000.
- ep := channel.New(1000, opts.MTU, "")
- wep := stack.LinkEndpoint(ep)
- if testing.Verbose() {
- wep = sniffer.New(ep)
- }
- nicOpts := stack.NICOptions{Name: "nic1"}
- if err := s.CreateNICWithOptions(1, wep, nicOpts); err != nil {
- t.Fatalf("CreateNICWithOptions(_, _, %+v) failed: %v", opts, err)
- }
- wep2 := stack.LinkEndpoint(channel.New(1000, opts.MTU, ""))
- if testing.Verbose() {
- wep2 = sniffer.New(channel.New(1000, opts.MTU, ""))
- }
- opts2 := stack.NICOptions{Name: "nic2"}
- if err := s.CreateNICWithOptions(2, wep2, opts2); err != nil {
- t.Fatalf("CreateNICWithOptions(_, _, %+v) failed: %v", opts2, err)
- }
-
- var routeTable []tcpip.Route
-
- if opts.EnableV4 {
- v4ProtocolAddr := tcpip.ProtocolAddress{
- Protocol: ipv4.ProtocolNumber,
- AddressWithPrefix: StackAddrWithPrefix,
- }
- if err := s.AddProtocolAddress(1, v4ProtocolAddr); err != nil {
- t.Fatalf("AddProtocolAddress(1, %#v): %s", v4ProtocolAddr, err)
- }
- routeTable = append(routeTable, tcpip.Route{
- Destination: header.IPv4EmptySubnet,
- NIC: 1,
- })
- }
-
- if opts.EnableV6 {
- v6ProtocolAddr := tcpip.ProtocolAddress{
- Protocol: ipv6.ProtocolNumber,
- AddressWithPrefix: StackV6AddrWithPrefix,
- }
- if err := s.AddProtocolAddress(1, v6ProtocolAddr); err != nil {
- t.Fatalf("AddProtocolAddress(1, %#v): %s", v6ProtocolAddr, err)
- }
- routeTable = append(routeTable, tcpip.Route{
- Destination: header.IPv6EmptySubnet,
- NIC: 1,
- })
- }
-
- s.SetRouteTable(routeTable)
-
- return &Context{
- t: t,
- s: s,
- linkEP: ep,
- WindowScale: uint8(tcp.FindWndScale(recvBufferSize)),
- }
-}
-
-// Cleanup closes the context endpoint if required.
-func (c *Context) Cleanup() {
- if c.EP != nil {
- c.EP.Close()
- }
- c.Stack().Close()
-}
-
-// Stack returns a reference to the stack in the Context.
-func (c *Context) Stack() *stack.Stack {
- return c.s
-}
-
-// CheckNoPacketTimeout verifies that no packet is received during the time
-// specified by wait.
-func (c *Context) CheckNoPacketTimeout(errMsg string, wait time.Duration) {
- c.t.Helper()
-
- ctx, cancel := context.WithTimeout(context.Background(), wait)
- defer cancel()
- if _, ok := c.linkEP.ReadContext(ctx); ok {
- c.t.Fatal(errMsg)
- }
-}
-
-// CheckNoPacket verifies that no packet is received for 1 second.
-func (c *Context) CheckNoPacket(errMsg string) {
- c.CheckNoPacketTimeout(errMsg, 1*time.Second)
-}
-
-// GetPacketWithTimeout reads a packet from the link layer endpoint and verifies
-// that it is an IPv4 packet with the expected source and destination
-// addresses. If no packet is received in the specified timeout it will return
-// nil.
-func (c *Context) GetPacketWithTimeout(timeout time.Duration) []byte {
- c.t.Helper()
-
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- p, ok := c.linkEP.ReadContext(ctx)
- if !ok {
- return nil
- }
-
- if p.Proto != ipv4.ProtocolNumber {
- c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv4.ProtocolNumber)
- }
-
- // Just check that the stack set the transport protocol number for outbound
- // TCP messages.
- // TODO(gvisor.dev/issues/3810): Remove when protocol numbers are part
- // of the headerinfo.
- if p.Pkt.TransportProtocolNumber != tcp.ProtocolNumber {
- c.t.Fatalf("got p.Pkt.TransportProtocolNumber = %d, want = %d", p.Pkt.TransportProtocolNumber, tcp.ProtocolNumber)
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- if p.Pkt.GSOOptions.Type != stack.GSONone && p.Pkt.GSOOptions.L3HdrLen != header.IPv4MinimumSize {
- c.t.Errorf("got L3HdrLen = %d, want = %d", p.Pkt.GSOOptions.L3HdrLen, header.IPv4MinimumSize)
- }
-
- checker.IPv4(c.t, b, checker.SrcAddr(StackAddr), checker.DstAddr(TestAddr))
- return b
-}
-
-// GetPacket reads a packet from the link layer endpoint and verifies
-// that it is an IPv4 packet with the expected source and destination
-// addresses.
-func (c *Context) GetPacket() []byte {
- c.t.Helper()
-
- p := c.GetPacketWithTimeout(5 * time.Second)
- if p == nil {
- c.t.Fatalf("Packet wasn't written out")
- return nil
- }
-
- return p
-}
-
-// GetPacketNonBlocking reads a packet from the link layer endpoint
-// and verifies that it is an IPv4 packet with the expected source
-// and destination address. If no packet is available it will return
-// nil immediately.
-func (c *Context) GetPacketNonBlocking() []byte {
- c.t.Helper()
-
- p, ok := c.linkEP.Read()
- if !ok {
- return nil
- }
-
- if p.Proto != ipv4.ProtocolNumber {
- c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv4.ProtocolNumber)
- }
-
- // Just check that the stack set the transport protocol number for outbound
- // TCP messages.
- // TODO(gvisor.dev/issues/3810): Remove when protocol numbers are part
- // of the headerinfo.
- if p.Pkt.TransportProtocolNumber != tcp.ProtocolNumber {
- c.t.Fatalf("got p.Pkt.TransportProtocolNumber = %d, want = %d", p.Pkt.TransportProtocolNumber, tcp.ProtocolNumber)
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- checker.IPv4(c.t, b, checker.SrcAddr(StackAddr), checker.DstAddr(TestAddr))
- return b
-}
-
-// SendICMPPacket builds and sends an ICMPv4 packet via the link layer endpoint.
-func (c *Context) SendICMPPacket(typ header.ICMPv4Type, code header.ICMPv4Code, p1, p2 []byte, maxTotalSize int) {
- // Allocate a buffer data and headers.
- buf := buffer.NewView(header.IPv4MinimumSize + header.ICMPv4PayloadOffset + len(p2))
- if len(buf) > maxTotalSize {
- buf = buf[:maxTotalSize]
- }
-
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(len(buf)),
- TTL: 65,
- Protocol: uint8(header.ICMPv4ProtocolNumber),
- SrcAddr: TestAddr,
- DstAddr: StackAddr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- icmp := header.ICMPv4(buf[header.IPv4MinimumSize:])
- icmp.SetType(typ)
- icmp.SetCode(code)
- const icmpv4VariableHeaderOffset = 4
- copy(icmp[icmpv4VariableHeaderOffset:], p1)
- copy(icmp[header.ICMPv4PayloadOffset:], p2)
- icmp.SetChecksum(0)
- checksum := ^header.Checksum(icmp, 0 /* initial */)
- icmp.SetChecksum(checksum)
-
- // Inject packet.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- })
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)
-}
-
-// BuildSegment builds a TCP segment based on the given Headers and payload.
-func (c *Context) BuildSegment(payload []byte, h *Headers) buffer.VectorisedView {
- return c.BuildSegmentWithAddrs(payload, h, TestAddr, StackAddr)
-}
-
-// BuildSegmentWithAddrs builds a TCP segment based on the given Headers,
-// payload and source and destination IPv4 addresses.
-func (c *Context) BuildSegmentWithAddrs(payload []byte, h *Headers, src, dst tcpip.Address) buffer.VectorisedView {
- // Allocate a buffer for data and headers.
- buf := buffer.NewView(header.TCPMinimumSize + header.IPv4MinimumSize + len(h.TCPOpts) + len(payload))
- copy(buf[len(buf)-len(payload):], payload)
- copy(buf[len(buf)-len(payload)-len(h.TCPOpts):], h.TCPOpts)
-
- // Initialize the IP header.
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TotalLength: uint16(len(buf)),
- TTL: 65,
- Protocol: uint8(tcp.ProtocolNumber),
- SrcAddr: src,
- DstAddr: dst,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Initialize the TCP header.
- t := header.TCP(buf[header.IPv4MinimumSize:])
- t.Encode(&header.TCPFields{
- SrcPort: h.SrcPort,
- DstPort: h.DstPort,
- SeqNum: uint32(h.SeqNum),
- AckNum: uint32(h.AckNum),
- DataOffset: uint8(header.TCPMinimumSize + len(h.TCPOpts)),
- Flags: h.Flags,
- WindowSize: uint16(h.RcvWnd),
- })
-
- // Calculate the TCP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(tcp.ProtocolNumber, src, dst, uint16(len(t)))
-
- // Calculate the TCP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- t.SetChecksum(^t.CalculateChecksum(xsum))
-
- // Inject packet.
- return buf.ToVectorisedView()
-}
-
-// SendSegment sends a TCP segment that has already been built and written to a
-// buffer.VectorisedView.
-func (c *Context) SendSegment(s buffer.VectorisedView) {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: s,
- })
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)
-}
-
-// SendPacket builds and sends a TCP segment(with the provided payload & TCP
-// headers) in an IPv4 packet via the link layer endpoint.
-func (c *Context) SendPacket(payload []byte, h *Headers) {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: c.BuildSegment(payload, h),
- })
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)
-}
-
-// SendPacketWithAddrs builds and sends a TCP segment(with the provided payload
-// & TCPheaders) in an IPv4 packet via the link layer endpoint using the
-// provided source and destination IPv4 addresses.
-func (c *Context) SendPacketWithAddrs(payload []byte, h *Headers, src, dst tcpip.Address) {
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: c.BuildSegmentWithAddrs(payload, h, src, dst),
- })
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)
-}
-
-// SendAck sends an ACK packet.
-func (c *Context) SendAck(seq seqnum.Value, bytesReceived int) {
- c.SendAckWithSACK(seq, bytesReceived, nil)
-}
-
-// SendAckWithSACK sends an ACK packet which includes the sackBlocks specified.
-func (c *Context) SendAckWithSACK(seq seqnum.Value, bytesReceived int, sackBlocks []header.SACKBlock) {
- options := make([]byte, 40)
- offset := 0
- if len(sackBlocks) > 0 {
- offset += header.EncodeNOP(options[offset:])
- offset += header.EncodeNOP(options[offset:])
- offset += header.EncodeSACKBlocks(sackBlocks, options[offset:])
- }
-
- c.SendPacket(nil, &Headers{
- SrcPort: TestPort,
- DstPort: c.Port,
- Flags: header.TCPFlagAck,
- SeqNum: seq,
- AckNum: c.IRS.Add(1 + seqnum.Size(bytesReceived)),
- RcvWnd: 30000,
- TCPOpts: options[:offset],
- })
-}
-
-// ReceiveAndCheckPacket reads a packet from the link layer endpoint and
-// verifies that the packet packet payload of packet matches the slice
-// of data indicated by offset & size.
-func (c *Context) ReceiveAndCheckPacket(data []byte, offset, size int) {
- c.t.Helper()
-
- c.ReceiveAndCheckPacketWithOptions(data, offset, size, 0)
-}
-
-// ReceiveAndCheckPacketWithOptions reads a packet from the link layer endpoint
-// and verifies that the packet packet payload of packet matches the slice of
-// data indicated by offset & size and skips optlen bytes in addition to the IP
-// TCP headers when comparing the data.
-func (c *Context) ReceiveAndCheckPacketWithOptions(data []byte, offset, size, optlen int) {
- c.t.Helper()
-
- b := c.GetPacket()
- checker.IPv4(c.t, b,
- checker.PayloadLen(size+header.TCPMinimumSize+optlen),
- checker.TCP(
- checker.DstPort(TestPort),
- checker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),
- checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- pdata := data[offset:][:size]
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize+optlen:]; bytes.Compare(pdata, p) != 0 {
- c.t.Fatalf("Data is different: expected %v, got %v", pdata, p)
- }
-}
-
-// ReceiveNonBlockingAndCheckPacket reads a packet from the link layer endpoint
-// and verifies that the packet packet payload of packet matches the slice of
-// data indicated by offset & size. It returns true if a packet was received and
-// processed.
-func (c *Context) ReceiveNonBlockingAndCheckPacket(data []byte, offset, size int) bool {
- c.t.Helper()
-
- b := c.GetPacketNonBlocking()
- if b == nil {
- return false
- }
- checker.IPv4(c.t, b,
- checker.PayloadLen(size+header.TCPMinimumSize),
- checker.TCP(
- checker.DstPort(TestPort),
- checker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),
- checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
- ),
- )
-
- pdata := data[offset:][:size]
- if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; bytes.Compare(pdata, p) != 0 {
- c.t.Fatalf("Data is different: expected %v, got %v", pdata, p)
- }
- return true
-}
-
-// CreateV6Endpoint creates and initializes c.ep as a IPv6 Endpoint. If v6Only
-// is true then it sets the IP_V6ONLY option on the socket to make it a IPv6
-// only endpoint instead of a default dual stack socket.
-func (c *Context) CreateV6Endpoint(v6only bool) {
- var err tcpip.Error
- c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &c.WQ)
- if err != nil {
- c.t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- c.EP.SocketOptions().SetV6Only(v6only)
-}
-
-// GetV6Packet reads a single packet from the link layer endpoint of the context
-// and asserts that it is an IPv6 Packet with the expected src/dest addresses.
-func (c *Context) GetV6Packet() []byte {
- c.t.Helper()
-
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- defer cancel()
- p, ok := c.linkEP.ReadContext(ctx)
- if !ok {
- c.t.Fatalf("Packet wasn't written out")
- return nil
- }
-
- if p.Proto != ipv6.ProtocolNumber {
- c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv6.ProtocolNumber)
- }
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- checker.IPv6(c.t, b, checker.SrcAddr(StackV6Addr), checker.DstAddr(TestV6Addr))
- return b
-}
-
-// SendV6Packet builds and sends an IPv6 Packet via the link layer endpoint of
-// the context.
-func (c *Context) SendV6Packet(payload []byte, h *Headers) {
- c.SendV6PacketWithAddrs(payload, h, TestV6Addr, StackV6Addr)
-}
-
-// SendV6PacketWithAddrs builds and sends an IPv6 Packet via the link layer
-// endpoint of the context using the provided source and destination IPv6
-// addresses.
-func (c *Context) SendV6PacketWithAddrs(payload []byte, h *Headers, src, dst tcpip.Address) {
- // Allocate a buffer for data and headers.
- buf := buffer.NewView(header.TCPMinimumSize + header.IPv6MinimumSize + len(payload))
- copy(buf[len(buf)-len(payload):], payload)
-
- // Initialize the IP header.
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- PayloadLength: uint16(header.TCPMinimumSize + len(payload)),
- TransportProtocol: tcp.ProtocolNumber,
- HopLimit: 65,
- SrcAddr: src,
- DstAddr: dst,
- })
-
- // Initialize the TCP header.
- t := header.TCP(buf[header.IPv6MinimumSize:])
- t.Encode(&header.TCPFields{
- SrcPort: h.SrcPort,
- DstPort: h.DstPort,
- SeqNum: uint32(h.SeqNum),
- AckNum: uint32(h.AckNum),
- DataOffset: header.TCPMinimumSize,
- Flags: h.Flags,
- WindowSize: uint16(h.RcvWnd),
- })
-
- // Calculate the TCP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(tcp.ProtocolNumber, src, dst, uint16(len(t)))
-
- // Calculate the TCP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- t.SetChecksum(^t.CalculateChecksum(xsum))
-
- // Inject packet.
- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- })
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)
-}
-
-// CreateConnected creates a connected TCP endpoint.
-func (c *Context) CreateConnected(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf int) {
- c.CreateConnectedWithRawOptions(iss, rcvWnd, epRcvBuf, nil)
-}
-
-// Connect performs the 3-way handshake for c.EP with the provided Initial
-// Sequence Number (iss) and receive window(rcvWnd) and any options if
-// specified.
-//
-// It also sets the receive buffer for the endpoint to the specified
-// value in epRcvBuf.
-//
-// PreCondition: c.EP must already be created.
-func (c *Context) Connect(iss seqnum.Value, rcvWnd seqnum.Size, options []byte) {
- c.t.Helper()
-
- // Start connection attempt.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&waitEntry)
-
- err := c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort})
- if _, ok := err.(*tcpip.ErrConnectStarted); !ok {
- c.t.Fatalf("Unexpected return value from Connect: %v", err)
- }
-
- // Receive SYN packet.
- b := c.GetPacket()
- checker.IPv4(c.t, b,
- checker.TCP(
- checker.DstPort(TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- ),
- )
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want {
- c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- tcpHdr := header.TCP(header.IPv4(b).Payload())
- synOpts := header.ParseSynOptions(tcpHdr.Options(), false /* isAck */)
- c.IRS = seqnum.Value(tcpHdr.SequenceNumber())
-
- c.SendPacket(nil, &Headers{
- SrcPort: tcpHdr.DestinationPort(),
- DstPort: tcpHdr.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: rcvWnd,
- TCPOpts: options,
- })
-
- // Receive ACK packet.
- checker.IPv4(c.t, c.GetPacket(),
- checker.TCP(
- checker.DstPort(TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(c.IRS)+1),
- checker.TCPAckNum(uint32(iss)+1),
- ),
- )
-
- // Wait for connection to be established.
- select {
- case <-notifyCh:
- if err := c.EP.LastError(); err != nil {
- c.t.Fatalf("Unexpected error when connecting: %v", err)
- }
- case <-time.After(1 * time.Second):
- c.t.Fatalf("Timed out waiting for connection")
- }
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want {
- c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- c.RcvdWindowScale = uint8(synOpts.WS)
- c.Port = tcpHdr.SourcePort()
-}
-
-// Create creates a TCP endpoint.
-func (c *Context) Create(epRcvBuf int) {
- // Create TCP endpoint.
- var err tcpip.Error
- c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- c.t.Fatalf("NewEndpoint failed: %v", err)
- }
-
- if epRcvBuf != -1 {
- c.EP.SocketOptions().SetReceiveBufferSize(int64(epRcvBuf)*2, true /* notify */)
- }
-}
-
-// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends
-// the specified option bytes as the Option field in the initial SYN packet.
-//
-// It also sets the receive buffer for the endpoint to the specified
-// value in epRcvBuf.
-func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf int, options []byte) {
- c.Create(epRcvBuf)
- c.Connect(iss, rcvWnd, options)
-}
-
-// RawEndpoint is just a small wrapper around a TCP endpoint's state to make
-// sending data and ACK packets easy while being able to manipulate the sequence
-// numbers and timestamp values as needed.
-type RawEndpoint struct {
- C *Context
- SrcPort uint16
- DstPort uint16
- Flags header.TCPFlags
- NextSeqNum seqnum.Value
- AckNum seqnum.Value
- WndSize seqnum.Size
- RecentTS uint32 // Stores the latest timestamp to echo back.
- TSVal uint32 // TSVal stores the last timestamp sent by this endpoint.
-
- // SackPermitted is true if SACKPermitted option was negotiated for this endpoint.
- SACKPermitted bool
-}
-
-// SendPacketWithTS embeds the provided tsVal in the Timestamp option
-// for the packet to be sent out.
-func (r *RawEndpoint) SendPacketWithTS(payload []byte, tsVal uint32) {
- r.TSVal = tsVal
- tsOpt := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP}
- header.EncodeTSOption(r.TSVal, r.RecentTS, tsOpt[2:])
- r.SendPacket(payload, tsOpt[:])
-}
-
-// SendPacket is a small wrapper function to build and send packets.
-func (r *RawEndpoint) SendPacket(payload []byte, opts []byte) {
- packetHeaders := &Headers{
- SrcPort: r.SrcPort,
- DstPort: r.DstPort,
- Flags: r.Flags,
- SeqNum: r.NextSeqNum,
- AckNum: r.AckNum,
- RcvWnd: r.WndSize,
- TCPOpts: opts,
- }
- r.C.SendPacket(payload, packetHeaders)
- r.NextSeqNum = r.NextSeqNum.Add(seqnum.Size(len(payload)))
-}
-
-// VerifyAndReturnACKWithTS verifies that the tsEcr field int he ACK matches
-// the provided tsVal as well as returns the original packet.
-func (r *RawEndpoint) VerifyAndReturnACKWithTS(tsVal uint32) []byte {
- r.C.t.Helper()
- // Read ACK and verify that tsEcr of ACK packet is [1,2,3,4]
- ackPacket := r.C.GetPacket()
- checker.IPv4(r.C.t, ackPacket,
- checker.TCP(
- checker.DstPort(r.SrcPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(r.AckNum)),
- checker.TCPAckNum(uint32(r.NextSeqNum)),
- checker.TCPTimestampChecker(true, 0, tsVal),
- ),
- )
- // Store the parsed TSVal from the ack as recentTS.
- tcpSeg := header.TCP(header.IPv4(ackPacket).Payload())
- opts := tcpSeg.ParsedOptions()
- r.RecentTS = opts.TSVal
- return ackPacket
-}
-
-// VerifyACKWithTS verifies that the tsEcr field in the ack matches the provided
-// tsVal.
-func (r *RawEndpoint) VerifyACKWithTS(tsVal uint32) {
- r.C.t.Helper()
- _ = r.VerifyAndReturnACKWithTS(tsVal)
-}
-
-// VerifyACKRcvWnd verifies that the window advertised by the incoming ACK
-// matches the provided rcvWnd.
-func (r *RawEndpoint) VerifyACKRcvWnd(rcvWnd uint16) {
- r.C.t.Helper()
- ackPacket := r.C.GetPacket()
- checker.IPv4(r.C.t, ackPacket,
- checker.TCP(
- checker.DstPort(r.SrcPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(r.AckNum)),
- checker.TCPAckNum(uint32(r.NextSeqNum)),
- checker.TCPWindow(rcvWnd),
- ),
- )
-}
-
-// VerifyACKNoSACK verifies that the ACK does not contain a SACK block.
-func (r *RawEndpoint) VerifyACKNoSACK() {
- r.VerifyACKHasSACK(nil)
-}
-
-// VerifyACKHasSACK verifies that the ACK contains the specified SACKBlocks.
-func (r *RawEndpoint) VerifyACKHasSACK(sackBlocks []header.SACKBlock) {
- // Read ACK and verify that the TCP options in the segment do
- // not contain a SACK block.
- ackPacket := r.C.GetPacket()
- checker.IPv4(r.C.t, ackPacket,
- checker.TCP(
- checker.DstPort(r.SrcPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(r.AckNum)),
- checker.TCPAckNum(uint32(r.NextSeqNum)),
- checker.TCPSACKBlockChecker(sackBlocks),
- ),
- )
-}
-
-// CreateConnectedWithOptions creates and connects c.ep with the specified TCP
-// options enabled and returns a RawEndpoint which represents the other end of
-// the connection.
-//
-// It also verifies where required(eg.Timestamp) that the ACK to the SYN-ACK
-// does not carry an option that was not requested.
-func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) *RawEndpoint {
- var err tcpip.Error
- c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)
- if err != nil {
- c.t.Fatalf("c.s.NewEndpoint(tcp, ipv4...) = %v", err)
- }
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateInitial; got != want {
- c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- // Start connection attempt.
- waitEntry, notifyCh := waiter.NewChannelEntry(nil)
- c.WQ.EventRegister(&waitEntry, waiter.WritableEvents)
- defer c.WQ.EventUnregister(&waitEntry)
-
- testFullAddr := tcpip.FullAddress{Addr: TestAddr, Port: TestPort}
- err = c.EP.Connect(testFullAddr)
- if _, ok := err.(*tcpip.ErrConnectStarted); !ok {
- c.t.Fatalf("c.ep.Connect(%v) = %v", testFullAddr, err)
- }
- // Receive SYN packet.
- b := c.GetPacket()
- // Validate that the syn has the timestamp option and a valid
- // TS value.
- mss := uint16(c.linkEP.MTU() - header.IPv4MinimumSize - header.TCPMinimumSize)
-
- checker.IPv4(c.t, b,
- checker.TCP(
- checker.DstPort(TestPort),
- checker.TCPFlags(header.TCPFlagSyn),
- checker.TCPSynOptions(header.TCPSynOptions{
- MSS: mss,
- TS: true,
- WS: int(c.WindowScale),
- SACKPermitted: c.SACKEnabled(),
- }),
- ),
- )
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want {
- c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- tcpSeg := header.TCP(header.IPv4(b).Payload())
- synOptions := header.ParseSynOptions(tcpSeg.Options(), false)
-
- // Build options w/ tsVal to be sent in the SYN-ACK.
- synAckOptions := make([]byte, header.TCPOptionsMaximumSize)
- offset := 0
- if wantOptions.WS != -1 {
- offset += header.EncodeWSOption(wantOptions.WS, synAckOptions[offset:])
- }
- if wantOptions.TS {
- offset += header.EncodeTSOption(wantOptions.TSVal, synOptions.TSVal, synAckOptions[offset:])
- }
- if wantOptions.SACKPermitted {
- offset += header.EncodeSACKPermittedOption(synAckOptions[offset:])
- }
-
- offset += header.AddTCPOptionPadding(synAckOptions, offset)
-
- // Build SYN-ACK.
- c.IRS = seqnum.Value(tcpSeg.SequenceNumber())
- iss := seqnum.Value(TestInitialSequenceNumber)
- c.SendPacket(nil, &Headers{
- SrcPort: tcpSeg.DestinationPort(),
- DstPort: tcpSeg.SourcePort(),
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- SeqNum: iss,
- AckNum: c.IRS.Add(1),
- RcvWnd: 30000,
- TCPOpts: synAckOptions[:offset],
- })
-
- // Read ACK.
- ackPacket := c.GetPacket()
-
- // Verify TCP header fields.
- tcpCheckers := []checker.TransportChecker{
- checker.DstPort(TestPort),
- checker.TCPFlags(header.TCPFlagAck),
- checker.TCPSeqNum(uint32(c.IRS) + 1),
- checker.TCPAckNum(uint32(iss) + 1),
- }
-
- // Verify that tsEcr of ACK packet is wantOptions.TSVal if the
- // timestamp option was enabled, if not then we verify that
- // there is no timestamp in the ACK packet.
- if wantOptions.TS {
- tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(true, 0, wantOptions.TSVal))
- } else {
- tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(false, 0, 0))
- }
-
- checker.IPv4(c.t, ackPacket, checker.TCP(tcpCheckers...))
-
- ackSeg := header.TCP(header.IPv4(ackPacket).Payload())
- ackOptions := ackSeg.ParsedOptions()
-
- // Wait for connection to be established.
- select {
- case <-notifyCh:
- if err := c.EP.LastError(); err != nil {
- c.t.Fatalf("Unexpected error when connecting: %v", err)
- }
- case <-time.After(1 * time.Second):
- c.t.Fatalf("Timed out waiting for connection")
- }
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want {
- c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- // Store the source port in use by the endpoint.
- c.Port = tcpSeg.SourcePort()
-
- // Mark in context that timestamp option is enabled for this endpoint.
- c.TimeStampEnabled = true
- c.RcvdWindowScale = uint8(synOptions.WS)
- return &RawEndpoint{
- C: c,
- SrcPort: tcpSeg.DestinationPort(),
- DstPort: tcpSeg.SourcePort(),
- Flags: header.TCPFlagAck | header.TCPFlagPsh,
- NextSeqNum: iss + 1,
- AckNum: c.IRS.Add(1),
- WndSize: 30000,
- RecentTS: ackOptions.TSVal,
- TSVal: wantOptions.TSVal,
- SACKPermitted: wantOptions.SACKPermitted,
- }
-}
-
-// AcceptWithOptions initializes a listening endpoint and connects to it with the
-// provided options enabled. It also verifies that the SYN-ACK has the expected
-// values for the provided options.
-//
-// The function returns a RawEndpoint representing the other end of the accepted
-// endpoint.
-func (c *Context) AcceptWithOptions(wndScale int, synOptions header.TCPSynOptions) *RawEndpoint {
- // Create EP and start listening.
- wq := &waiter.Queue{}
- ep, err := c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
- if err != nil {
- c.t.Fatalf("NewEndpoint failed: %v", err)
- }
- defer ep.Close()
-
- if err := ep.Bind(tcpip.FullAddress{Port: StackPort}); err != nil {
- c.t.Fatalf("Bind failed: %v", err)
- }
- if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want {
- c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- if err := ep.Listen(10); err != nil {
- c.t.Fatalf("Listen failed: %v", err)
- }
- if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want {
- c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- rep := c.PassiveConnectWithOptions(100, wndScale, synOptions)
-
- // Try to accept the connection.
- we, ch := waiter.NewChannelEntry(nil)
- wq.EventRegister(&we, waiter.ReadableEvents)
- defer wq.EventUnregister(&we)
-
- c.EP, _, err = ep.Accept(nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- // Wait for connection to be established.
- select {
- case <-ch:
- c.EP, _, err = ep.Accept(nil)
- if err != nil {
- c.t.Fatalf("Accept failed: %v", err)
- }
-
- case <-time.After(1 * time.Second):
- c.t.Fatalf("Timed out waiting for accept")
- }
- }
- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want {
- c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got)
- }
-
- return rep
-}
-
-// PassiveConnect just disables WindowScaling and delegates the call to
-// PassiveConnectWithOptions.
-func (c *Context) PassiveConnect(maxPayload, wndScale int, synOptions header.TCPSynOptions) {
- synOptions.WS = -1
- c.PassiveConnectWithOptions(maxPayload, wndScale, synOptions)
-}
-
-// PassiveConnectWithOptions initiates a new connection (with the specified TCP
-// options enabled) to the port on which the Context.ep is listening for new
-// connections. It also validates that the SYN-ACK has the expected values for
-// the enabled options.
-//
-// NOTE: MSS is not a negotiated option and it can be asymmetric
-// in each direction. This function uses the maxPayload to set the MSS to be
-// sent to the peer on a connect and validates that the MSS in the SYN-ACK
-// response is equal to the MTU - (tcphdr len + iphdr len).
-//
-// wndScale is the expected window scale in the SYN-ACK and synOptions.WS is the
-// value of the window scaling option to be sent in the SYN. If synOptions.WS >
-// 0 then we send the WindowScale option.
-func (c *Context) PassiveConnectWithOptions(maxPayload, wndScale int, synOptions header.TCPSynOptions) *RawEndpoint {
- c.t.Helper()
- opts := make([]byte, header.TCPOptionsMaximumSize)
- offset := 0
- offset += header.EncodeMSSOption(uint32(maxPayload), opts)
-
- if synOptions.WS >= 0 {
- offset += header.EncodeWSOption(3, opts[offset:])
- }
- if synOptions.TS {
- offset += header.EncodeTSOption(synOptions.TSVal, synOptions.TSEcr, opts[offset:])
- }
-
- if synOptions.SACKPermitted {
- offset += header.EncodeSACKPermittedOption(opts[offset:])
- }
-
- paddingToAdd := 4 - offset%4
- // Now add any padding bytes that might be required to quad align the
- // options.
- for i := offset; i < offset+paddingToAdd; i++ {
- opts[i] = header.TCPOptionNOP
- }
- offset += paddingToAdd
-
- // Send a SYN request.
- iss := seqnum.Value(TestInitialSequenceNumber)
- c.SendPacket(nil, &Headers{
- SrcPort: TestPort,
- DstPort: StackPort,
- Flags: header.TCPFlagSyn,
- SeqNum: iss,
- RcvWnd: 30000,
- TCPOpts: opts[:offset],
- })
-
- // Receive the SYN-ACK reply. Make sure MSS and other expected options
- // are present.
- b := c.GetPacket()
- tcp := header.TCP(header.IPv4(b).Payload())
- rcvdSynOptions := header.ParseSynOptions(tcp.Options(), true /* isAck */)
- c.IRS = seqnum.Value(tcp.SequenceNumber())
-
- tcpCheckers := []checker.TransportChecker{
- checker.SrcPort(StackPort),
- checker.DstPort(TestPort),
- checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn),
- checker.TCPAckNum(uint32(iss) + 1),
- checker.TCPSynOptions(header.TCPSynOptions{MSS: synOptions.MSS, WS: wndScale, SACKPermitted: synOptions.SACKPermitted && c.SACKEnabled()}),
- }
-
- // If TS option was enabled in the original SYN then add a checker to
- // validate the Timestamp option in the SYN-ACK.
- if synOptions.TS {
- tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(synOptions.TS, 0, synOptions.TSVal))
- } else {
- tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(false, 0, 0))
- }
-
- checker.IPv4(c.t, b, checker.TCP(tcpCheckers...))
- rcvWnd := seqnum.Size(30000)
- ackHeaders := &Headers{
- SrcPort: TestPort,
- DstPort: StackPort,
- Flags: header.TCPFlagAck,
- SeqNum: iss + 1,
- AckNum: c.IRS + 1,
- RcvWnd: rcvWnd,
- }
-
- // If WS was expected to be in effect then scale the advertised window
- // correspondingly.
- if synOptions.WS > 0 {
- ackHeaders.RcvWnd = rcvWnd >> byte(synOptions.WS)
- }
-
- parsedOpts := tcp.ParsedOptions()
- if synOptions.TS {
- // Echo the tsVal back to the peer in the tsEcr field of the
- // timestamp option.
- // Increment TSVal by 1 from the value sent in the SYN and echo
- // the TSVal in the SYN-ACK in the TSEcr field.
- opts := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP}
- header.EncodeTSOption(synOptions.TSVal+1, parsedOpts.TSVal, opts[2:])
- ackHeaders.TCPOpts = opts[:]
- }
-
- // Send ACK.
- c.SendPacket(nil, ackHeaders)
-
- c.RcvdWindowScale = uint8(rcvdSynOptions.WS)
- c.Port = StackPort
-
- return &RawEndpoint{
- C: c,
- SrcPort: TestPort,
- DstPort: StackPort,
- Flags: header.TCPFlagPsh | header.TCPFlagAck,
- NextSeqNum: iss + 1,
- AckNum: c.IRS + 1,
- WndSize: rcvWnd,
- SACKPermitted: synOptions.SACKPermitted && c.SACKEnabled(),
- RecentTS: parsedOpts.TSVal,
- TSVal: synOptions.TSVal + 1,
- }
-}
-
-// SACKEnabled returns true if the TCP Protocol option SACKEnabled is set to true
-// for the Stack in the context.
-func (c *Context) SACKEnabled() bool {
- var v tcpip.TCPSACKEnabled
- if err := c.Stack().TransportProtocolOption(tcp.ProtocolNumber, &v); err != nil {
- // Stack doesn't support SACK. So just return.
- return false
- }
- return bool(v)
-}
-
-// SetGSOEnabled enables or disables generic segmentation offload.
-func (c *Context) SetGSOEnabled(enable bool) {
- if enable {
- c.linkEP.SupportedGSOKind = stack.HWGSOSupported
- } else {
- c.linkEP.SupportedGSOKind = stack.GSONotSupported
- }
-}
-
-// MSSWithoutOptions returns the value for the MSS used by the stack when no
-// options are in use.
-func (c *Context) MSSWithoutOptions() uint16 {
- return uint16(c.linkEP.MTU() - header.IPv4MinimumSize - header.TCPMinimumSize)
-}
-
-// MSSWithoutOptionsV6 returns the value for the MSS used by the stack when no
-// options are in use for IPv6 packets.
-func (c *Context) MSSWithoutOptionsV6() uint16 {
- return uint16(c.linkEP.MTU() - header.IPv6MinimumSize - header.TCPMinimumSize)
-}
diff --git a/pkg/tcpip/transport/tcp/timer_test.go b/pkg/tcpip/transport/tcp/timer_test.go
deleted file mode 100644
index 479752de7..000000000
--- a/pkg/tcpip/transport/tcp/timer_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp
-
-import (
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sleep"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
-)
-
-func TestCleanup(t *testing.T) {
- const (
- timerDurationSeconds = 2
- isAssertedTimeoutSeconds = timerDurationSeconds + 1
- )
-
- clock := faketime.NewManualClock()
-
- tmr := timer{}
- w := sleep.Waker{}
- tmr.init(clock, &w)
- tmr.enable(timerDurationSeconds * time.Second)
- tmr.cleanup()
-
- if want := (timer{}); tmr != want {
- t.Errorf("got tmr = %+v, want = %+v", tmr, want)
- }
-
- // The waker should not be asserted.
- for i := 0; i < isAssertedTimeoutSeconds; i++ {
- clock.Advance(time.Second)
- if w.IsAsserted() {
- t.Fatalf("waker asserted unexpectedly")
- }
- }
-}
diff --git a/pkg/tcpip/transport/tcpconntrack/BUILD b/pkg/tcpip/transport/tcpconntrack/BUILD
deleted file mode 100644
index 3ad6994a7..000000000
--- a/pkg/tcpip/transport/tcpconntrack/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tcpconntrack",
- srcs = ["tcp_conntrack.go"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- ],
-)
-
-go_test(
- name = "tcpconntrack_test",
- size = "small",
- srcs = ["tcp_conntrack_test.go"],
- deps = [
- ":tcpconntrack",
- "//pkg/tcpip/header",
- ],
-)
diff --git a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go
deleted file mode 100644
index 6c5ddc3c7..000000000
--- a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcpconntrack_test
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack"
-)
-
-// connected creates a connection tracker TCB and sets it to a connected state
-// by performing a 3-way handshake.
-func connected(t *testing.T, iss, irs uint32, isw, irw uint16) *tcpconntrack.TCB {
- // Send SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: iss,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: irw,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive SYN-ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: irs,
- AckNum: iss + 1,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- WindowSize: isw,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: iss + 1,
- AckNum: irs + 1,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: irw,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- return &tcb
-}
-
-func TestConnectionRefused(t *testing.T) {
- // Send SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive RST.
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: 1235,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagRst | header.TCPFlagAck,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultReset {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset)
- }
-}
-
-func TestConnectionRefusedInSynRcvd(t *testing.T) {
- // Send SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive SYN.
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Receive RST with no ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 790,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagRst,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultReset {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset)
- }
-}
-
-func TestConnectionResetInSynRcvd(t *testing.T) {
- // Send SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive SYN.
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send RST with no ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1235,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagRst,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultReset {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset)
- }
-}
-
-func TestRetransmitOnSynSent(t *testing.T) {
- // Send initial SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Retransmit the same SYN.
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultConnecting {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultConnecting)
- }
-}
-
-func TestRetransmitOnSynRcvd(t *testing.T) {
- // Send initial SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive SYN. This will cause the state to go to SYN-RCVD.
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Retransmit the original SYN.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Transmit a SYN-ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 790,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-}
-
-func TestClosedBySelf(t *testing.T) {
- tcb := connected(t, 1234, 789, 30000, 50000)
-
- // Send FIN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1235,
- AckNum: 790,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Receive FIN/ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 790,
- AckNum: 1236,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1236,
- AckNum: 791,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultClosedBySelf {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedBySelf)
- }
-}
-
-func TestClosedByPeer(t *testing.T) {
- tcb := connected(t, 1234, 789, 30000, 50000)
-
- // Receive FIN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 790,
- AckNum: 1235,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send FIN/ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1235,
- AckNum: 791,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Receive ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 791,
- AckNum: 1236,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultClosedByPeer {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedByPeer)
- }
-}
-
-func TestSendAndReceiveDataClosedBySelf(t *testing.T) {
- sseq := uint32(1234)
- rseq := uint32(789)
- tcb := connected(t, sseq, rseq, 30000, 50000)
- sseq++
- rseq++
-
- // Send some data.
- tcp := make(header.TCP, header.TCPMinimumSize+1024)
-
- for i := uint32(0); i < 10; i++ {
- // Send some data.
- tcp.Encode(&header.TCPFields{
- SeqNum: sseq,
- AckNum: rseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 30000,
- })
- sseq += uint32(len(tcp)) - header.TCPMinimumSize
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Receive ack for data.
- tcp.Encode(&header.TCPFields{
- SeqNum: rseq,
- AckNum: sseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp[:header.TCPMinimumSize]); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
- }
-
- for i := uint32(0); i < 10; i++ {
- // Receive some data.
- tcp.Encode(&header.TCPFields{
- SeqNum: rseq,
- AckNum: sseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 50000,
- })
- rseq += uint32(len(tcp)) - header.TCPMinimumSize
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send ack for data.
- tcp.Encode(&header.TCPFields{
- SeqNum: sseq,
- AckNum: rseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp[:header.TCPMinimumSize]); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
- }
-
- // Send FIN.
- tcp = tcp[:header.TCPMinimumSize]
- tcp.Encode(&header.TCPFields{
- SeqNum: sseq,
- AckNum: rseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 30000,
- })
- sseq++
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Receive FIN/ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: rseq,
- AckNum: sseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck | header.TCPFlagFin,
- WindowSize: 50000,
- })
- rseq++
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: sseq,
- AckNum: rseq,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultClosedBySelf {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedBySelf)
- }
-}
-
-func TestIgnoreBadResetOnSynSent(t *testing.T) {
- // Send SYN.
- tcp := make(header.TCP, header.TCPMinimumSize)
- tcp.Encode(&header.TCPFields{
- SeqNum: 1234,
- AckNum: 0,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn,
- WindowSize: 30000,
- })
-
- tcb := tcpconntrack.TCB{}
- tcb.Init(tcp)
-
- // Receive a RST with a bad ACK, it should not cause the connection to
- // be reset.
- acks := []uint32{1234, 1236, 1000, 5000}
- flags := []header.TCPFlags{header.TCPFlagRst, header.TCPFlagRst | header.TCPFlagAck}
- for _, a := range acks {
- for _, f := range flags {
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: a,
- DataOffset: header.TCPMinimumSize,
- Flags: f,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultConnecting {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
- }
- }
-
- // Complete the handshake.
- // Receive SYN-ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 789,
- AckNum: 1235,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagSyn | header.TCPFlagAck,
- WindowSize: 50000,
- })
-
- if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-
- // Send ACK.
- tcp.Encode(&header.TCPFields{
- SeqNum: 1235,
- AckNum: 790,
- DataOffset: header.TCPMinimumSize,
- Flags: header.TCPFlagAck,
- WindowSize: 30000,
- })
-
- if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive {
- t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive)
- }
-}
diff --git a/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go b/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go
new file mode 100644
index 000000000..ff53204da
--- /dev/null
+++ b/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package tcpconntrack
diff --git a/pkg/tcpip/transport/udp/BUILD b/pkg/tcpip/transport/udp/BUILD
deleted file mode 100644
index cdc344ab7..000000000
--- a/pkg/tcpip/transport/udp/BUILD
+++ /dev/null
@@ -1,65 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "udp_packet_list",
- out = "udp_packet_list.go",
- package = "udp",
- prefix = "udpPacket",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*udpPacket",
- "Linker": "*udpPacket",
- },
-)
-
-go_library(
- name = "udp",
- srcs = [
- "endpoint.go",
- "endpoint_state.go",
- "forwarder.go",
- "protocol.go",
- "udp_packet_list.go",
- ],
- imports = ["gvisor.dev/gvisor/pkg/tcpip/buffer"],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sleep",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/header/parse",
- "//pkg/tcpip/ports",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/raw",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "udp_x_test",
- size = "small",
- srcs = ["udp_test.go"],
- deps = [
- ":udp",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/checker",
- "//pkg/tcpip/faketime",
- "//pkg/tcpip/header",
- "//pkg/tcpip/link/channel",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/testutil",
- "//pkg/tcpip/transport/icmp",
- "//pkg/waiter",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/tcpip/transport/udp/udp_packet_list.go b/pkg/tcpip/transport/udp/udp_packet_list.go
new file mode 100644
index 000000000..c396f77c9
--- /dev/null
+++ b/pkg/tcpip/transport/udp/udp_packet_list.go
@@ -0,0 +1,221 @@
+package udp
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type udpPacketElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (udpPacketElementMapper) linkerFor(elem *udpPacket) *udpPacket { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type udpPacketList struct {
+ head *udpPacket
+ tail *udpPacket
+}
+
+// Reset resets list l to the empty state.
+func (l *udpPacketList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *udpPacketList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *udpPacketList) Front() *udpPacket {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *udpPacketList) Back() *udpPacket {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *udpPacketList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (udpPacketElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *udpPacketList) PushFront(e *udpPacket) {
+ linker := udpPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ udpPacketElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *udpPacketList) PushBack(e *udpPacket) {
+ linker := udpPacketElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ udpPacketElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *udpPacketList) PushBackList(m *udpPacketList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ udpPacketElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ udpPacketElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *udpPacketList) InsertAfter(b, e *udpPacket) {
+ bLinker := udpPacketElementMapper{}.linkerFor(b)
+ eLinker := udpPacketElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ udpPacketElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *udpPacketList) InsertBefore(a, e *udpPacket) {
+ aLinker := udpPacketElementMapper{}.linkerFor(a)
+ eLinker := udpPacketElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ udpPacketElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *udpPacketList) Remove(e *udpPacket) {
+ linker := udpPacketElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ udpPacketElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ udpPacketElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type udpPacketEntry struct {
+ next *udpPacket
+ prev *udpPacket
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *udpPacketEntry) Next() *udpPacket {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *udpPacketEntry) Prev() *udpPacket {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *udpPacketEntry) SetNext(elem *udpPacket) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *udpPacketEntry) SetPrev(elem *udpPacket) {
+ e.prev = elem
+}
diff --git a/pkg/tcpip/transport/udp/udp_state_autogen.go b/pkg/tcpip/transport/udp/udp_state_autogen.go
new file mode 100644
index 000000000..6a3da67e2
--- /dev/null
+++ b/pkg/tcpip/transport/udp/udp_state_autogen.go
@@ -0,0 +1,239 @@
+// automatically generated by stateify.
+
+package udp
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+ "gvisor.dev/gvisor/pkg/tcpip/buffer"
+)
+
+func (p *udpPacket) StateTypeName() string {
+ return "pkg/tcpip/transport/udp.udpPacket"
+}
+
+func (p *udpPacket) StateFields() []string {
+ return []string{
+ "udpPacketEntry",
+ "senderAddress",
+ "destinationAddress",
+ "packetInfo",
+ "data",
+ "receivedAt",
+ "tos",
+ }
+}
+
+func (p *udpPacket) beforeSave() {}
+
+// +checklocksignore
+func (p *udpPacket) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ var dataValue buffer.VectorisedView = p.saveData()
+ stateSinkObject.SaveValue(4, dataValue)
+ var receivedAtValue int64 = p.saveReceivedAt()
+ stateSinkObject.SaveValue(5, receivedAtValue)
+ stateSinkObject.Save(0, &p.udpPacketEntry)
+ stateSinkObject.Save(1, &p.senderAddress)
+ stateSinkObject.Save(2, &p.destinationAddress)
+ stateSinkObject.Save(3, &p.packetInfo)
+ stateSinkObject.Save(6, &p.tos)
+}
+
+func (p *udpPacket) afterLoad() {}
+
+// +checklocksignore
+func (p *udpPacket) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.udpPacketEntry)
+ stateSourceObject.Load(1, &p.senderAddress)
+ stateSourceObject.Load(2, &p.destinationAddress)
+ stateSourceObject.Load(3, &p.packetInfo)
+ stateSourceObject.Load(6, &p.tos)
+ stateSourceObject.LoadValue(4, new(buffer.VectorisedView), func(y interface{}) { p.loadData(y.(buffer.VectorisedView)) })
+ stateSourceObject.LoadValue(5, new(int64), func(y interface{}) { p.loadReceivedAt(y.(int64)) })
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/tcpip/transport/udp.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "TransportEndpointInfo",
+ "DefaultSocketOptionsHandler",
+ "waiterQueue",
+ "uniqueID",
+ "rcvReady",
+ "rcvList",
+ "rcvBufSize",
+ "rcvClosed",
+ "state",
+ "dstPort",
+ "ttl",
+ "multicastTTL",
+ "multicastAddr",
+ "multicastNICID",
+ "portFlags",
+ "lastError",
+ "boundBindToDevice",
+ "boundPortFlags",
+ "sendTOS",
+ "shutdownFlags",
+ "multicastMemberships",
+ "effectiveNetProtos",
+ "owner",
+ "ops",
+ "frozen",
+ }
+}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.TransportEndpointInfo)
+ stateSinkObject.Save(1, &e.DefaultSocketOptionsHandler)
+ stateSinkObject.Save(2, &e.waiterQueue)
+ stateSinkObject.Save(3, &e.uniqueID)
+ stateSinkObject.Save(4, &e.rcvReady)
+ stateSinkObject.Save(5, &e.rcvList)
+ stateSinkObject.Save(6, &e.rcvBufSize)
+ stateSinkObject.Save(7, &e.rcvClosed)
+ stateSinkObject.Save(8, &e.state)
+ stateSinkObject.Save(9, &e.dstPort)
+ stateSinkObject.Save(10, &e.ttl)
+ stateSinkObject.Save(11, &e.multicastTTL)
+ stateSinkObject.Save(12, &e.multicastAddr)
+ stateSinkObject.Save(13, &e.multicastNICID)
+ stateSinkObject.Save(14, &e.portFlags)
+ stateSinkObject.Save(15, &e.lastError)
+ stateSinkObject.Save(16, &e.boundBindToDevice)
+ stateSinkObject.Save(17, &e.boundPortFlags)
+ stateSinkObject.Save(18, &e.sendTOS)
+ stateSinkObject.Save(19, &e.shutdownFlags)
+ stateSinkObject.Save(20, &e.multicastMemberships)
+ stateSinkObject.Save(21, &e.effectiveNetProtos)
+ stateSinkObject.Save(22, &e.owner)
+ stateSinkObject.Save(23, &e.ops)
+ stateSinkObject.Save(24, &e.frozen)
+}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.TransportEndpointInfo)
+ stateSourceObject.Load(1, &e.DefaultSocketOptionsHandler)
+ stateSourceObject.Load(2, &e.waiterQueue)
+ stateSourceObject.Load(3, &e.uniqueID)
+ stateSourceObject.Load(4, &e.rcvReady)
+ stateSourceObject.Load(5, &e.rcvList)
+ stateSourceObject.Load(6, &e.rcvBufSize)
+ stateSourceObject.Load(7, &e.rcvClosed)
+ stateSourceObject.Load(8, &e.state)
+ stateSourceObject.Load(9, &e.dstPort)
+ stateSourceObject.Load(10, &e.ttl)
+ stateSourceObject.Load(11, &e.multicastTTL)
+ stateSourceObject.Load(12, &e.multicastAddr)
+ stateSourceObject.Load(13, &e.multicastNICID)
+ stateSourceObject.Load(14, &e.portFlags)
+ stateSourceObject.Load(15, &e.lastError)
+ stateSourceObject.Load(16, &e.boundBindToDevice)
+ stateSourceObject.Load(17, &e.boundPortFlags)
+ stateSourceObject.Load(18, &e.sendTOS)
+ stateSourceObject.Load(19, &e.shutdownFlags)
+ stateSourceObject.Load(20, &e.multicastMemberships)
+ stateSourceObject.Load(21, &e.effectiveNetProtos)
+ stateSourceObject.Load(22, &e.owner)
+ stateSourceObject.Load(23, &e.ops)
+ stateSourceObject.Load(24, &e.frozen)
+ stateSourceObject.AfterLoad(e.afterLoad)
+}
+
+func (m *multicastMembership) StateTypeName() string {
+ return "pkg/tcpip/transport/udp.multicastMembership"
+}
+
+func (m *multicastMembership) StateFields() []string {
+ return []string{
+ "nicID",
+ "multicastAddr",
+ }
+}
+
+func (m *multicastMembership) beforeSave() {}
+
+// +checklocksignore
+func (m *multicastMembership) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.nicID)
+ stateSinkObject.Save(1, &m.multicastAddr)
+}
+
+func (m *multicastMembership) afterLoad() {}
+
+// +checklocksignore
+func (m *multicastMembership) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.nicID)
+ stateSourceObject.Load(1, &m.multicastAddr)
+}
+
+func (l *udpPacketList) StateTypeName() string {
+ return "pkg/tcpip/transport/udp.udpPacketList"
+}
+
+func (l *udpPacketList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *udpPacketList) beforeSave() {}
+
+// +checklocksignore
+func (l *udpPacketList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *udpPacketList) afterLoad() {}
+
+// +checklocksignore
+func (l *udpPacketList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *udpPacketEntry) StateTypeName() string {
+ return "pkg/tcpip/transport/udp.udpPacketEntry"
+}
+
+func (e *udpPacketEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *udpPacketEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *udpPacketEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *udpPacketEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *udpPacketEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*udpPacket)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*multicastMembership)(nil))
+ state.Register((*udpPacketList)(nil))
+ state.Register((*udpPacketEntry)(nil))
+}
diff --git a/pkg/tcpip/transport/udp/udp_test.go b/pkg/tcpip/transport/udp/udp_test.go
deleted file mode 100644
index 4008cacf2..000000000
--- a/pkg/tcpip/transport/udp/udp_test.go
+++ /dev/null
@@ -1,2559 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package udp_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/checker"
- "gvisor.dev/gvisor/pkg/tcpip/faketime"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/link/channel"
- "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
- "gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/testutil"
- "gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// Addresses and ports used for testing. It is recommended that tests stick to
-// using these addresses as it allows using the testFlow helper.
-// Naming rules: 'stack*'' denotes local addresses and ports, while 'test*'
-// represents the remote endpoint.
-const (
- v4MappedAddrPrefix = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff"
- stackV6Addr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
- testV6Addr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
- stackV4MappedAddr = v4MappedAddrPrefix + stackAddr
- testV4MappedAddr = v4MappedAddrPrefix + testAddr
- multicastV4MappedAddr = v4MappedAddrPrefix + multicastAddr
- broadcastV4MappedAddr = v4MappedAddrPrefix + broadcastAddr
- v4MappedWildcardAddr = v4MappedAddrPrefix + "\x00\x00\x00\x00"
-
- stackAddr = "\x0a\x00\x00\x01"
- stackPort = 1234
- testAddr = "\x0a\x00\x00\x02"
- testPort = 4096
- invalidPort = 8192
- multicastAddr = "\xe8\x2b\xd3\xea"
- multicastV6Addr = "\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
- broadcastAddr = header.IPv4Broadcast
- testTOS = 0x80
-
- // defaultMTU is the MTU, in bytes, used throughout the tests, except
- // where another value is explicitly used. It is chosen to match the MTU
- // of loopback interfaces on linux systems.
- defaultMTU = 65536
-)
-
-// header4Tuple stores the 4-tuple {src-IP, src-port, dst-IP, dst-port} used in
-// a packet header. These values are used to populate a header or verify one.
-// Note that because they are used in packet headers, the addresses are never in
-// a V4-mapped format.
-type header4Tuple struct {
- srcAddr tcpip.FullAddress
- dstAddr tcpip.FullAddress
-}
-
-// testFlow implements a helper type used for sending and receiving test
-// packets. A given test flow value defines 1) the socket endpoint used for the
-// test and 2) the type of packet send or received on the endpoint. E.g., a
-// multicastV6Only flow is a V6 multicast packet passing through a V6-only
-// endpoint. The type provides helper methods to characterize the flow (e.g.,
-// isV4) as well as return a proper header4Tuple for it.
-type testFlow int
-
-const (
- unicastV4 testFlow = iota // V4 unicast on a V4 socket
- unicastV4in6 // V4-mapped unicast on a V6-dual socket
- unicastV6 // V6 unicast on a V6 socket
- unicastV6Only // V6 unicast on a V6-only socket
- multicastV4 // V4 multicast on a V4 socket
- multicastV4in6 // V4-mapped multicast on a V6-dual socket
- multicastV6 // V6 multicast on a V6 socket
- multicastV6Only // V6 multicast on a V6-only socket
- broadcast // V4 broadcast on a V4 socket
- broadcastIn6 // V4-mapped broadcast on a V6-dual socket
- reverseMulticast4 // V4 multicast src. Must fail.
- reverseMulticast6 // V6 multicast src. Must fail.
-)
-
-func (flow testFlow) String() string {
- switch flow {
- case unicastV4:
- return "unicastV4"
- case unicastV6:
- return "unicastV6"
- case unicastV6Only:
- return "unicastV6Only"
- case unicastV4in6:
- return "unicastV4in6"
- case multicastV4:
- return "multicastV4"
- case multicastV6:
- return "multicastV6"
- case multicastV6Only:
- return "multicastV6Only"
- case multicastV4in6:
- return "multicastV4in6"
- case broadcast:
- return "broadcast"
- case broadcastIn6:
- return "broadcastIn6"
- case reverseMulticast4:
- return "reverseMulticast4"
- case reverseMulticast6:
- return "reverseMulticast6"
- default:
- return "unknown"
- }
-}
-
-// packetDirection explains if a flow is incoming (read) or outgoing (write).
-type packetDirection int
-
-const (
- incoming packetDirection = iota
- outgoing
-)
-
-// header4Tuple returns the header4Tuple for the given flow and direction. Note
-// that the tuple contains no mapped addresses as those only exist at the socket
-// level but not at the packet header level.
-func (flow testFlow) header4Tuple(d packetDirection) header4Tuple {
- var h header4Tuple
- if flow.isV4() {
- if d == outgoing {
- h = header4Tuple{
- srcAddr: tcpip.FullAddress{Addr: stackAddr, Port: stackPort},
- dstAddr: tcpip.FullAddress{Addr: testAddr, Port: testPort},
- }
- } else {
- h = header4Tuple{
- srcAddr: tcpip.FullAddress{Addr: testAddr, Port: testPort},
- dstAddr: tcpip.FullAddress{Addr: stackAddr, Port: stackPort},
- }
- }
- if flow.isMulticast() {
- h.dstAddr.Addr = multicastAddr
- } else if flow.isBroadcast() {
- h.dstAddr.Addr = broadcastAddr
- }
- } else { // IPv6
- if d == outgoing {
- h = header4Tuple{
- srcAddr: tcpip.FullAddress{Addr: stackV6Addr, Port: stackPort},
- dstAddr: tcpip.FullAddress{Addr: testV6Addr, Port: testPort},
- }
- } else {
- h = header4Tuple{
- srcAddr: tcpip.FullAddress{Addr: testV6Addr, Port: testPort},
- dstAddr: tcpip.FullAddress{Addr: stackV6Addr, Port: stackPort},
- }
- }
- if flow.isMulticast() {
- h.dstAddr.Addr = multicastV6Addr
- }
- }
- if flow.isReverseMulticast() {
- h.srcAddr.Addr = flow.getMcastAddr()
- }
- return h
-}
-
-func (flow testFlow) getMcastAddr() tcpip.Address {
- if flow.isV4() {
- return multicastAddr
- }
- return multicastV6Addr
-}
-
-// mapAddrIfApplicable converts the given V4 address into its V4-mapped version
-// if it is applicable to the flow.
-func (flow testFlow) mapAddrIfApplicable(v4Addr tcpip.Address) tcpip.Address {
- if flow.isMapped() {
- return v4MappedAddrPrefix + v4Addr
- }
- return v4Addr
-}
-
-// netProto returns the protocol number used for the network packet.
-func (flow testFlow) netProto() tcpip.NetworkProtocolNumber {
- if flow.isV4() {
- return ipv4.ProtocolNumber
- }
- return ipv6.ProtocolNumber
-}
-
-// sockProto returns the protocol number used when creating the socket
-// endpoint for this flow.
-func (flow testFlow) sockProto() tcpip.NetworkProtocolNumber {
- switch flow {
- case unicastV4in6, unicastV6, unicastV6Only, multicastV4in6, multicastV6, multicastV6Only, broadcastIn6, reverseMulticast6:
- return ipv6.ProtocolNumber
- case unicastV4, multicastV4, broadcast, reverseMulticast4:
- return ipv4.ProtocolNumber
- default:
- panic(fmt.Sprintf("invalid testFlow given: %d", flow))
- }
-}
-
-func (flow testFlow) checkerFn() func(*testing.T, []byte, ...checker.NetworkChecker) {
- if flow.isV4() {
- return checker.IPv4
- }
- return checker.IPv6
-}
-
-func (flow testFlow) isV6() bool { return !flow.isV4() }
-func (flow testFlow) isV4() bool {
- return flow.sockProto() == ipv4.ProtocolNumber || flow.isMapped()
-}
-
-func (flow testFlow) isV6Only() bool {
- switch flow {
- case unicastV6Only, multicastV6Only:
- return true
- case unicastV4, unicastV4in6, unicastV6, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6, reverseMulticast4, reverseMulticast6:
- return false
- default:
- panic(fmt.Sprintf("invalid testFlow given: %d", flow))
- }
-}
-
-func (flow testFlow) isMulticast() bool {
- switch flow {
- case multicastV4, multicastV4in6, multicastV6, multicastV6Only:
- return true
- case unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6, reverseMulticast4, reverseMulticast6:
- return false
- default:
- panic(fmt.Sprintf("invalid testFlow given: %d", flow))
- }
-}
-
-func (flow testFlow) isBroadcast() bool {
- switch flow {
- case broadcast, broadcastIn6:
- return true
- case unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, multicastV6Only, reverseMulticast4, reverseMulticast6:
- return false
- default:
- panic(fmt.Sprintf("invalid testFlow given: %d", flow))
- }
-}
-
-func (flow testFlow) isMapped() bool {
- switch flow {
- case unicastV4in6, multicastV4in6, broadcastIn6:
- return true
- case unicastV4, unicastV6, unicastV6Only, multicastV4, multicastV6, multicastV6Only, broadcast, reverseMulticast4, reverseMulticast6:
- return false
- default:
- panic(fmt.Sprintf("invalid testFlow given: %d", flow))
- }
-}
-
-func (flow testFlow) isReverseMulticast() bool {
- switch flow {
- case reverseMulticast4, reverseMulticast6:
- return true
- default:
- return false
- }
-}
-
-type testContext struct {
- t *testing.T
- linkEP *channel.Endpoint
- s *stack.Stack
-
- ep tcpip.Endpoint
- wq waiter.Queue
-}
-
-func newDualTestContext(t *testing.T, mtu uint32) *testContext {
- t.Helper()
- return newDualTestContextWithHandleLocal(t, mtu, true)
-}
-
-func newDualTestContextWithHandleLocal(t *testing.T, mtu uint32, handleLocal bool) *testContext {
- t.Helper()
-
- options := stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, icmp.NewProtocol6, icmp.NewProtocol4},
- HandleLocal: handleLocal,
- Clock: &faketime.NullClock{},
- }
- s := stack.New(options)
- ep := channel.New(256, mtu, "")
- wep := stack.LinkEndpoint(ep)
-
- if testing.Verbose() {
- wep = sniffer.New(ep)
- }
- if err := s.CreateNIC(1, wep); err != nil {
- t.Fatalf("CreateNIC failed: %s", err)
- }
-
- if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr); err != nil {
- t.Fatalf("AddAddress failed: %s", err)
- }
-
- if err := s.AddAddress(1, ipv6.ProtocolNumber, stackV6Addr); err != nil {
- t.Fatalf("AddAddress failed: %s", err)
- }
-
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: header.IPv4EmptySubnet,
- NIC: 1,
- },
- {
- Destination: header.IPv6EmptySubnet,
- NIC: 1,
- },
- })
-
- return &testContext{
- t: t,
- s: s,
- linkEP: ep,
- }
-}
-
-func (c *testContext) cleanup() {
- if c.ep != nil {
- c.ep.Close()
- }
-}
-
-func (c *testContext) createEndpoint(proto tcpip.NetworkProtocolNumber) {
- c.t.Helper()
-
- var err tcpip.Error
- c.ep, err = c.s.NewEndpoint(udp.ProtocolNumber, proto, &c.wq)
- if err != nil {
- c.t.Fatal("NewEndpoint failed: ", err)
- }
-}
-
-func (c *testContext) createEndpointForFlow(flow testFlow) {
- c.t.Helper()
-
- c.createEndpoint(flow.sockProto())
- if flow.isV6Only() {
- c.ep.SocketOptions().SetV6Only(true)
- } else if flow.isBroadcast() {
- c.ep.SocketOptions().SetBroadcast(true)
- }
-}
-
-// getPacketAndVerify reads a packet from the link endpoint and verifies the
-// header against expected values from the given test flow. In addition, it
-// calls any extra checker functions provided.
-func (c *testContext) getPacketAndVerify(flow testFlow, checkers ...checker.NetworkChecker) []byte {
- c.t.Helper()
-
- p, ok := c.linkEP.Read()
- if !ok {
- c.t.Fatalf("Packet wasn't written out")
- return nil
- }
-
- if p.Proto != flow.netProto() {
- c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, flow.netProto())
- }
-
- if got, want := p.Pkt.TransportProtocolNumber, header.UDPProtocolNumber; got != want {
- c.t.Errorf("got p.Pkt.TransportProtocolNumber = %d, want = %d", got, want)
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- b := vv.ToView()
-
- h := flow.header4Tuple(outgoing)
- checkers = append(
- checkers,
- checker.SrcAddr(h.srcAddr.Addr),
- checker.DstAddr(h.dstAddr.Addr),
- checker.UDP(checker.DstPort(h.dstAddr.Port)),
- )
- flow.checkerFn()(c.t, b, checkers...)
- return b
-}
-
-// injectPacket creates a packet of the given flow and with the given payload,
-// and injects it into the link endpoint. If badChecksum is true, the packet has
-// a bad checksum in the UDP header.
-func (c *testContext) injectPacket(flow testFlow, payload []byte, badChecksum bool) {
- c.t.Helper()
-
- h := flow.header4Tuple(incoming)
- if flow.isV4() {
- buf := c.buildV4Packet(payload, &h)
- if badChecksum {
- // Invalidate the UDP header checksum field, taking care to avoid
- // overflow to zero, which would disable checksum validation.
- for u := header.UDP(buf[header.IPv4MinimumSize:]); ; {
- u.SetChecksum(u.Checksum() + 1)
- if u.Checksum() != 0 {
- break
- }
- }
- }
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- } else {
- buf := c.buildV6Packet(payload, &h)
- if badChecksum {
- // Invalidate the UDP header checksum field (Unlike IPv4, zero is
- // a valid checksum value for IPv6 so no need to avoid it).
- u := header.UDP(buf[header.IPv6MinimumSize:])
- u.SetChecksum(u.Checksum() + 1)
- }
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
- }
-}
-
-// buildV6Packet creates a V6 test packet with the given payload and header
-// values in a buffer.
-func (c *testContext) buildV6Packet(payload []byte, h *header4Tuple) buffer.View {
- // Allocate a buffer for data and headers.
- buf := buffer.NewView(header.UDPMinimumSize + header.IPv6MinimumSize + len(payload))
- payloadStart := len(buf) - len(payload)
- copy(buf[payloadStart:], payload)
-
- // Initialize the IP header.
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- TrafficClass: testTOS,
- PayloadLength: uint16(header.UDPMinimumSize + len(payload)),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: 65,
- SrcAddr: h.srcAddr.Addr,
- DstAddr: h.dstAddr.Addr,
- })
-
- // Initialize the UDP header.
- u := header.UDP(buf[header.IPv6MinimumSize:])
- u.Encode(&header.UDPFields{
- SrcPort: h.srcAddr.Port,
- DstPort: h.dstAddr.Port,
- Length: uint16(header.UDPMinimumSize + len(payload)),
- })
-
- // Calculate the UDP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(udp.ProtocolNumber, h.srcAddr.Addr, h.dstAddr.Addr, uint16(len(u)))
-
- // Calculate the UDP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- u.SetChecksum(^u.CalculateChecksum(xsum))
-
- return buf
-}
-
-// buildV4Packet creates a V4 test packet with the given payload and header
-// values in a buffer.
-func (c *testContext) buildV4Packet(payload []byte, h *header4Tuple) buffer.View {
- // Allocate a buffer for data and headers.
- buf := buffer.NewView(header.UDPMinimumSize + header.IPv4MinimumSize + len(payload))
- payloadStart := len(buf) - len(payload)
- copy(buf[payloadStart:], payload)
-
- // Initialize the IP header.
- ip := header.IPv4(buf)
- ip.Encode(&header.IPv4Fields{
- TOS: testTOS,
- TotalLength: uint16(len(buf)),
- TTL: 65,
- Protocol: uint8(udp.ProtocolNumber),
- SrcAddr: h.srcAddr.Addr,
- DstAddr: h.dstAddr.Addr,
- })
- ip.SetChecksum(^ip.CalculateChecksum())
-
- // Initialize the UDP header.
- u := header.UDP(buf[header.IPv4MinimumSize:])
- u.Encode(&header.UDPFields{
- SrcPort: h.srcAddr.Port,
- DstPort: h.dstAddr.Port,
- Length: uint16(header.UDPMinimumSize + len(payload)),
- })
-
- // Calculate the UDP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(udp.ProtocolNumber, h.srcAddr.Addr, h.dstAddr.Addr, uint16(len(u)))
-
- // Calculate the UDP checksum and set it.
- xsum = header.Checksum(payload, xsum)
- u.SetChecksum(^u.CalculateChecksum(xsum))
-
- return buf
-}
-
-func newPayload() []byte {
- return newMinPayload(30)
-}
-
-func newMinPayload(minSize int) []byte {
- b := make([]byte, minSize+rand.Intn(100))
- for i := range b {
- b[i] = byte(rand.Intn(256))
- }
- return b
-}
-
-func TestBindToDeviceOption(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- Clock: &faketime.NullClock{},
- })
-
- ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})
- if err != nil {
- t.Fatalf("NewEndpoint failed; %s", err)
- }
- defer ep.Close()
-
- opts := stack.NICOptions{Name: "my_device"}
- if err := s.CreateNICWithOptions(321, loopback.New(), opts); err != nil {
- t.Errorf("CreateNICWithOptions(_, _, %+v) failed: %s", opts, err)
- }
-
- // nicIDPtr is used instead of taking the address of NICID literals, which is
- // a compiler error.
- nicIDPtr := func(s tcpip.NICID) *tcpip.NICID {
- return &s
- }
-
- testActions := []struct {
- name string
- setBindToDevice *tcpip.NICID
- setBindToDeviceError tcpip.Error
- getBindToDevice int32
- }{
- {"GetDefaultValue", nil, nil, 0},
- {"BindToNonExistent", nicIDPtr(999), &tcpip.ErrUnknownDevice{}, 0},
- {"BindToExistent", nicIDPtr(321), nil, 321},
- {"UnbindToDevice", nicIDPtr(0), nil, 0},
- }
- for _, testAction := range testActions {
- t.Run(testAction.name, func(t *testing.T) {
- if testAction.setBindToDevice != nil {
- bindToDevice := int32(*testAction.setBindToDevice)
- if gotErr, wantErr := ep.SocketOptions().SetBindToDevice(bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {
- t.Errorf("got SetSockOpt(&%T(%d)) = %s, want = %s", bindToDevice, bindToDevice, gotErr, wantErr)
- }
- }
- bindToDevice := ep.SocketOptions().GetBindToDevice()
- if bindToDevice != testAction.getBindToDevice {
- t.Errorf("got bindToDevice = %d, want = %d", bindToDevice, testAction.getBindToDevice)
- }
- })
- }
-}
-
-// testReadInternal sends a packet of the given test flow into the stack by
-// injecting it into the link endpoint. It then attempts to read it from the
-// UDP endpoint and depending on if this was expected to succeed verifies its
-// correctness including any additional checker functions provided.
-func testReadInternal(c *testContext, flow testFlow, packetShouldBeDropped, expectReadError bool, checkers ...checker.ControlMessagesChecker) {
- c.t.Helper()
-
- payload := newPayload()
- c.injectPacket(flow, payload, false)
-
- // Try to receive the data.
- we, ch := waiter.NewChannelEntry(nil)
- c.wq.EventRegister(&we, waiter.ReadableEvents)
- defer c.wq.EventUnregister(&we)
-
- // Take a snapshot of the stats to validate them at the end of the test.
- epstats := c.ep.Stats().(*tcpip.TransportEndpointStats).Clone()
-
- var buf bytes.Buffer
- res, err := c.ep.Read(&buf, tcpip.ReadOptions{NeedRemoteAddr: true})
- if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- // Wait for data to become available.
- select {
- case <-ch:
- res, err = c.ep.Read(&buf, tcpip.ReadOptions{NeedRemoteAddr: true})
-
- default:
- if packetShouldBeDropped {
- return // expected to time out
- }
- c.t.Fatal("timed out waiting for data")
- }
- }
-
- if expectReadError && err != nil {
- c.checkEndpointReadStats(1, epstats, err)
- return
- }
-
- if err != nil {
- c.t.Fatal("Read failed:", err)
- }
-
- if packetShouldBeDropped {
- c.t.Fatalf("Read unexpectedly received data from %s", res.RemoteAddr.Addr)
- }
-
- // Check the read result.
- h := flow.header4Tuple(incoming)
- if diff := cmp.Diff(tcpip.ReadResult{
- Count: buf.Len(),
- Total: buf.Len(),
- RemoteAddr: tcpip.FullAddress{Addr: h.srcAddr.Addr},
- }, res, checker.IgnoreCmpPath(
- "ControlMessages", // ControlMessages will be checked later.
- "RemoteAddr.NIC",
- "RemoteAddr.Port",
- )); diff != "" {
- c.t.Fatalf("Read: unexpected result (-want +got):\n%s", diff)
- }
-
- // Check the payload.
- v := buf.Bytes()
- if !bytes.Equal(payload, v) {
- c.t.Fatalf("got payload = %x, want = %x", v, payload)
- }
-
- // Run any checkers against the ControlMessages.
- for _, f := range checkers {
- f(c.t, res.ControlMessages)
- }
-
- c.checkEndpointReadStats(1, epstats, err)
-}
-
-// testRead sends a packet of the given test flow into the stack by injecting it
-// into the link endpoint. It then reads it from the UDP endpoint and verifies
-// its correctness including any additional checker functions provided.
-func testRead(c *testContext, flow testFlow, checkers ...checker.ControlMessagesChecker) {
- c.t.Helper()
- testReadInternal(c, flow, false /* packetShouldBeDropped */, false /* expectReadError */, checkers...)
-}
-
-// testFailingRead sends a packet of the given test flow into the stack by
-// injecting it into the link endpoint. It then tries to read it from the UDP
-// endpoint and expects this to fail.
-func testFailingRead(c *testContext, flow testFlow, expectReadError bool) {
- c.t.Helper()
- testReadInternal(c, flow, true /* packetShouldBeDropped */, expectReadError)
-}
-
-func TestBindEphemeralPort(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- if err := c.ep.Bind(tcpip.FullAddress{}); err != nil {
- t.Fatalf("ep.Bind(...) failed: %s", err)
- }
-}
-
-func TestBindReservedPort(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
-
- addr, err := c.ep.GetLocalAddress()
- if err != nil {
- t.Fatalf("GetLocalAddress failed: %s", err)
- }
-
- // We can't bind the address reserved by the connected endpoint above.
- {
- ep, err := c.s.NewEndpoint(udp.ProtocolNumber, ipv6.ProtocolNumber, &c.wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
- {
- err := ep.Bind(addr)
- if _, ok := err.(*tcpip.ErrPortInUse); !ok {
- t.Fatalf("got ep.Bind(...) = %s, want = %s", err, &tcpip.ErrPortInUse{})
- }
- }
- }
-
- func() {
- ep, err := c.s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &c.wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
- // We can't bind ipv4-any on the port reserved by the connected endpoint
- // above, since the endpoint is dual-stack.
- {
- err := ep.Bind(tcpip.FullAddress{Port: addr.Port})
- if _, ok := err.(*tcpip.ErrPortInUse); !ok {
- t.Fatalf("got ep.Bind(...) = %s, want = %s", err, &tcpip.ErrPortInUse{})
- }
- }
- // We can bind an ipv4 address on this port, though.
- if err := ep.Bind(tcpip.FullAddress{Addr: stackAddr, Port: addr.Port}); err != nil {
- t.Fatalf("ep.Bind(...) failed: %s", err)
- }
- }()
-
- // Once the connected endpoint releases its port reservation, we are able to
- // bind ipv4-any once again.
- c.ep.Close()
- func() {
- ep, err := c.s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &c.wq)
- if err != nil {
- t.Fatalf("NewEndpoint failed: %s", err)
- }
- defer ep.Close()
- if err := ep.Bind(tcpip.FullAddress{Port: addr.Port}); err != nil {
- t.Fatalf("ep.Bind(...) failed: %s", err)
- }
- }()
-}
-
-func TestV4ReadOnV6(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV4in6)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- testRead(c, unicastV4in6)
-}
-
-func TestV4ReadOnBoundToV4MappedWildcard(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV4in6)
-
- // Bind to v4 mapped wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: v4MappedWildcardAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- testRead(c, unicastV4in6)
-}
-
-func TestV4ReadOnBoundToV4Mapped(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV4in6)
-
- // Bind to local address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: stackV4MappedAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- testRead(c, unicastV4in6)
-}
-
-func TestV6ReadOnV6(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV6)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- testRead(c, unicastV6)
-}
-
-// TestV4ReadSelfSource checks that packets coming from a local IP address are
-// correctly dropped when handleLocal is true and not otherwise.
-func TestV4ReadSelfSource(t *testing.T) {
- for _, tt := range []struct {
- name string
- handleLocal bool
- wantErr tcpip.Error
- wantInvalidSource uint64
- }{
- {"HandleLocal", false, nil, 0},
- {"NoHandleLocal", true, &tcpip.ErrWouldBlock{}, 1},
- } {
- t.Run(tt.name, func(t *testing.T) {
- c := newDualTestContextWithHandleLocal(t, defaultMTU, tt.handleLocal)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV4)
-
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV4.header4Tuple(incoming)
- h.srcAddr = h.dstAddr
-
- buf := c.buildV4Packet(payload, &h)
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- if got := c.s.Stats().IP.InvalidSourceAddressesReceived.Value(); got != tt.wantInvalidSource {
- t.Errorf("c.s.Stats().IP.InvalidSourceAddressesReceived got %d, want %d", got, tt.wantInvalidSource)
- }
-
- if _, err := c.ep.Read(ioutil.Discard, tcpip.ReadOptions{}); err != tt.wantErr {
- t.Errorf("got c.ep.Read = %s, want = %s", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestV4ReadOnV4(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV4)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Test acceptance.
- testRead(c, unicastV4)
-}
-
-// TestReadOnBoundToMulticast checks that an endpoint can bind to a multicast
-// address and receive data sent to that address.
-func TestReadOnBoundToMulticast(t *testing.T) {
- // FIXME(b/128189410): multicastV4in6 currently doesn't work as
- // AddMembershipOption doesn't handle V4in6 addresses.
- for _, flow := range []testFlow{multicastV4, multicastV6, multicastV6Only} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to multicast address.
- mcastAddr := flow.mapAddrIfApplicable(flow.getMcastAddr())
- if err := c.ep.Bind(tcpip.FullAddress{Addr: mcastAddr, Port: stackPort}); err != nil {
- c.t.Fatal("Bind failed:", err)
- }
-
- // Join multicast group.
- ifoptSet := tcpip.AddMembershipOption{NIC: 1, MulticastAddr: mcastAddr}
- if err := c.ep.SetSockOpt(&ifoptSet); err != nil {
- c.t.Fatalf("SetSockOpt(&%#v): %s", ifoptSet, err)
- }
-
- // Check that we receive multicast packets but not unicast or broadcast
- // ones.
- testRead(c, flow)
- testFailingRead(c, broadcast, false /* expectReadError */)
- testFailingRead(c, unicastV4, false /* expectReadError */)
- })
- }
-}
-
-// TestV4ReadOnBoundToBroadcast checks that an endpoint can bind to a broadcast
-// address and can receive only broadcast data.
-func TestV4ReadOnBoundToBroadcast(t *testing.T) {
- for _, flow := range []testFlow{broadcast, broadcastIn6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to broadcast address.
- bcastAddr := flow.mapAddrIfApplicable(broadcastAddr)
- if err := c.ep.Bind(tcpip.FullAddress{Addr: bcastAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Check that we receive broadcast packets but not unicast ones.
- testRead(c, flow)
- testFailingRead(c, unicastV4, false /* expectReadError */)
- })
- }
-}
-
-// TestReadFromMulticast checks that an endpoint will NOT receive a packet
-// that was sent with multicast SOURCE address.
-func TestReadFromMulticast(t *testing.T) {
- for _, flow := range []testFlow{reverseMulticast4, reverseMulticast6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- t.Fatalf("Bind failed: %s", err)
- }
- testFailingRead(c, flow, false /* expectReadError */)
- })
- }
-}
-
-// TestV4ReadBroadcastOnBoundToWildcard checks that an endpoint can bind to ANY
-// and receive broadcast and unicast data.
-func TestV4ReadBroadcastOnBoundToWildcard(t *testing.T) {
- for _, flow := range []testFlow{broadcast, broadcastIn6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s (", err)
- }
-
- // Check that we receive both broadcast and unicast packets.
- testRead(c, flow)
- testRead(c, unicastV4)
- })
- }
-}
-
-// testFailingWrite sends a packet of the given test flow into the UDP endpoint
-// and verifies it fails with the provided error code.
-func testFailingWrite(c *testContext, flow testFlow, wantErr tcpip.Error) {
- c.t.Helper()
- // Take a snapshot of the stats to validate them at the end of the test.
- epstats := c.ep.Stats().(*tcpip.TransportEndpointStats).Clone()
- h := flow.header4Tuple(outgoing)
- writeDstAddr := flow.mapAddrIfApplicable(h.dstAddr.Addr)
-
- var r bytes.Reader
- r.Reset(newPayload())
- _, gotErr := c.ep.Write(&r, tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: writeDstAddr, Port: h.dstAddr.Port},
- })
- c.checkEndpointWriteStats(1, epstats, gotErr)
- if gotErr != wantErr {
- c.t.Fatalf("Write returned unexpected error: got %v, want %v", gotErr, wantErr)
- }
-}
-
-// testWrite sends a packet of the given test flow from the UDP endpoint to the
-// flow's destination address:port. It then receives it from the link endpoint
-// and verifies its correctness including any additional checker functions
-// provided.
-func testWrite(c *testContext, flow testFlow, checkers ...checker.NetworkChecker) uint16 {
- c.t.Helper()
- return testWriteAndVerifyInternal(c, flow, true, checkers...)
-}
-
-// testWriteWithoutDestination sends a packet of the given test flow from the
-// UDP endpoint without giving a destination address:port. It then receives it
-// from the link endpoint and verifies its correctness including any additional
-// checker functions provided.
-func testWriteWithoutDestination(c *testContext, flow testFlow, checkers ...checker.NetworkChecker) uint16 {
- c.t.Helper()
- return testWriteAndVerifyInternal(c, flow, false, checkers...)
-}
-
-func testWriteNoVerify(c *testContext, flow testFlow, setDest bool) buffer.View {
- c.t.Helper()
- // Take a snapshot of the stats to validate them at the end of the test.
- epstats := c.ep.Stats().(*tcpip.TransportEndpointStats).Clone()
-
- writeOpts := tcpip.WriteOptions{}
- if setDest {
- h := flow.header4Tuple(outgoing)
- writeDstAddr := flow.mapAddrIfApplicable(h.dstAddr.Addr)
- writeOpts = tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: writeDstAddr, Port: h.dstAddr.Port},
- }
- }
- var r bytes.Reader
- payload := newPayload()
- r.Reset(payload)
- n, err := c.ep.Write(&r, writeOpts)
- if err != nil {
- c.t.Fatalf("Write failed: %s", err)
- }
- if n != int64(len(payload)) {
- c.t.Fatalf("Bad number of bytes written: got %v, want %v", n, len(payload))
- }
- c.checkEndpointWriteStats(1, epstats, err)
- return payload
-}
-
-func testWriteAndVerifyInternal(c *testContext, flow testFlow, setDest bool, checkers ...checker.NetworkChecker) uint16 {
- c.t.Helper()
- payload := testWriteNoVerify(c, flow, setDest)
- // Received the packet and check the payload.
- b := c.getPacketAndVerify(flow, checkers...)
- var udpH header.UDP
- if flow.isV4() {
- udpH = header.IPv4(b).Payload()
- } else {
- udpH = header.IPv6(b).Payload()
- }
- if !bytes.Equal(payload, udpH.Payload()) {
- c.t.Fatalf("Bad payload: got %x, want %x", udpH.Payload(), payload)
- }
-
- return udpH.SourcePort()
-}
-
-func testDualWrite(c *testContext) uint16 {
- c.t.Helper()
-
- v4Port := testWrite(c, unicastV4in6)
- v6Port := testWrite(c, unicastV6)
- if v4Port != v6Port {
- c.t.Fatalf("expected v4 and v6 ports to be equal: got v4Port = %d, v6Port = %d", v4Port, v6Port)
- }
-
- return v4Port
-}
-
-func TestDualWriteUnbound(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- testDualWrite(c)
-}
-
-func TestDualWriteBoundToWildcard(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- p := testDualWrite(c)
- if p != stackPort {
- c.t.Fatalf("Bad port: got %v, want %v", p, stackPort)
- }
-}
-
-func TestDualWriteConnectedToV6(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Connect to v6 address.
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, unicastV6)
-
- // Write to V4 mapped address.
- testFailingWrite(c, unicastV4in6, &tcpip.ErrNetworkUnreachable{})
- const want = 1
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).SendErrors.NoRoute.Value(); got != want {
- c.t.Fatalf("Endpoint stat not updated. got %d want %d", got, want)
- }
-}
-
-func TestDualWriteConnectedToV4Mapped(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Connect to v4 mapped address.
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, unicastV4in6)
-
- // Write to v6 address.
- testFailingWrite(c, unicastV6, &tcpip.ErrInvalidEndpointState{})
-}
-
-func TestV4WriteOnV6Only(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(unicastV6Only)
-
- // Write to V4 mapped address.
- testFailingWrite(c, unicastV4in6, &tcpip.ErrNoRoute{})
-}
-
-func TestV6WriteOnBoundToV4Mapped(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Bind to v4 mapped address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: stackV4MappedAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- // Write to v6 address.
- testFailingWrite(c, unicastV6, &tcpip.ErrInvalidEndpointState{})
-}
-
-func TestV6WriteOnConnected(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Connect to v6 address.
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
-
- testWriteWithoutDestination(c, unicastV6)
-}
-
-func TestV4WriteOnConnected(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Connect to v4 mapped address.
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
-
- testWriteWithoutDestination(c, unicastV4)
-}
-
-func TestWriteOnConnectedInvalidPort(t *testing.T) {
- protocols := map[string]tcpip.NetworkProtocolNumber{
- "ipv4": ipv4.ProtocolNumber,
- "ipv6": ipv6.ProtocolNumber,
- }
- for name, pn := range protocols {
- t.Run(name, func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(pn)
- if err := c.ep.Connect(tcpip.FullAddress{Addr: stackAddr, Port: invalidPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
- writeOpts := tcpip.WriteOptions{
- To: &tcpip.FullAddress{Addr: stackAddr, Port: invalidPort},
- }
- var r bytes.Reader
- payload := newPayload()
- r.Reset(payload)
- n, err := c.ep.Write(&r, writeOpts)
- if err != nil {
- c.t.Fatalf("c.ep.Write(...) = %s, want nil", err)
- }
- if got, want := n, int64(len(payload)); got != want {
- c.t.Fatalf("c.ep.Write(...) wrote %d bytes, want %d bytes", got, want)
- }
-
- {
- err := c.ep.LastError()
- if _, ok := err.(*tcpip.ErrConnectionRefused); !ok {
- c.t.Fatalf("expected c.ep.LastError() == ErrConnectionRefused, got: %+v", err)
- }
- }
- })
- }
-}
-
-// TestWriteOnBoundToV4Multicast checks that we can send packets out of a socket
-// that is bound to a V4 multicast address.
-func TestWriteOnBoundToV4Multicast(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, multicastV4, broadcast} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V4 mcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: multicastAddr, Port: stackPort}); err != nil {
- c.t.Fatal("Bind failed:", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-// TestWriteOnBoundToV4MappedMulticast checks that we can send packets out of a
-// socket that is bound to a V4-mapped multicast address.
-func TestWriteOnBoundToV4MappedMulticast(t *testing.T) {
- for _, flow := range []testFlow{unicastV4in6, multicastV4in6, broadcastIn6} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V4Mapped mcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: multicastV4MappedAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-// TestWriteOnBoundToV6Multicast checks that we can send packets out of a
-// socket that is bound to a V6 multicast address.
-func TestWriteOnBoundToV6Multicast(t *testing.T) {
- for _, flow := range []testFlow{unicastV6, multicastV6} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V6 mcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: multicastV6Addr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-// TestWriteOnBoundToV6Multicast checks that we can send packets out of a
-// V6-only socket that is bound to a V6 multicast address.
-func TestWriteOnBoundToV6OnlyMulticast(t *testing.T) {
- for _, flow := range []testFlow{unicastV6Only, multicastV6Only} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V6 mcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: multicastV6Addr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-// TestWriteOnBoundToBroadcast checks that we can send packets out of a
-// socket that is bound to the broadcast address.
-func TestWriteOnBoundToBroadcast(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, multicastV4, broadcast} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V4 broadcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: broadcastAddr, Port: stackPort}); err != nil {
- c.t.Fatal("Bind failed:", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-// TestWriteOnBoundToV4MappedBroadcast checks that we can send packets out of a
-// socket that is bound to the V4-mapped broadcast address.
-func TestWriteOnBoundToV4MappedBroadcast(t *testing.T) {
- for _, flow := range []testFlow{unicastV4in6, multicastV4in6, broadcastIn6} {
- t.Run(fmt.Sprintf("%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Bind to V4Mapped mcast address.
- if err := c.ep.Bind(tcpip.FullAddress{Addr: broadcastV4MappedAddr, Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testWrite(c, flow)
- })
- }
-}
-
-func TestReadIncrementsPacketsReceived(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- // Create IPv4 UDP endpoint
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- testRead(c, unicastV4)
-
- var want uint64 = 1
- if got := c.s.Stats().UDP.PacketsReceived.Value(); got != want {
- c.t.Fatalf("Read did not increment PacketsReceived: got %v, want %v", got, want)
- }
-}
-
-func TestReadIPPacketInfo(t *testing.T) {
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- flow testFlow
- expectedLocalAddr tcpip.Address
- expectedDestAddr tcpip.Address
- }{
- {
- name: "IPv4 unicast",
- proto: header.IPv4ProtocolNumber,
- flow: unicastV4,
- expectedLocalAddr: stackAddr,
- expectedDestAddr: stackAddr,
- },
- {
- name: "IPv4 multicast",
- proto: header.IPv4ProtocolNumber,
- flow: multicastV4,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedLocalAddr: multicastAddr,
- expectedDestAddr: multicastAddr,
- },
- {
- name: "IPv4 broadcast",
- proto: header.IPv4ProtocolNumber,
- flow: broadcast,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedLocalAddr: broadcastAddr,
- expectedDestAddr: broadcastAddr,
- },
- {
- name: "IPv6 unicast",
- proto: header.IPv6ProtocolNumber,
- flow: unicastV6,
- expectedLocalAddr: stackV6Addr,
- expectedDestAddr: stackV6Addr,
- },
- {
- name: "IPv6 multicast",
- proto: header.IPv6ProtocolNumber,
- flow: multicastV6,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedLocalAddr: multicastV6Addr,
- expectedDestAddr: multicastV6Addr,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(test.proto)
-
- bindAddr := tcpip.FullAddress{Port: stackPort}
- if err := c.ep.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%+v): %s", bindAddr, err)
- }
-
- if test.flow.isMulticast() {
- ifoptSet := tcpip.AddMembershipOption{NIC: 1, MulticastAddr: test.flow.getMcastAddr()}
- if err := c.ep.SetSockOpt(&ifoptSet); err != nil {
- c.t.Fatalf("SetSockOpt(&%#v): %s:", ifoptSet, err)
- }
- }
-
- c.ep.SocketOptions().SetReceivePacketInfo(true)
-
- testRead(c, test.flow, checker.ReceiveIPPacketInfo(tcpip.IPPacketInfo{
- NIC: 1,
- LocalAddr: test.expectedLocalAddr,
- DestinationAddr: test.expectedDestAddr,
- }))
-
- if got := c.s.Stats().UDP.PacketsReceived.Value(); got != 1 {
- t.Fatalf("Read did not increment PacketsReceived: got = %d, want = 1", got)
- }
- })
- }
-}
-
-func TestReadRecvOriginalDstAddr(t *testing.T) {
- tests := []struct {
- name string
- proto tcpip.NetworkProtocolNumber
- flow testFlow
- expectedOriginalDstAddr tcpip.FullAddress
- }{
- {
- name: "IPv4 unicast",
- proto: header.IPv4ProtocolNumber,
- flow: unicastV4,
- expectedOriginalDstAddr: tcpip.FullAddress{NIC: 1, Addr: stackAddr, Port: stackPort},
- },
- {
- name: "IPv4 multicast",
- proto: header.IPv4ProtocolNumber,
- flow: multicastV4,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedOriginalDstAddr: tcpip.FullAddress{NIC: 1, Addr: multicastAddr, Port: stackPort},
- },
- {
- name: "IPv4 broadcast",
- proto: header.IPv4ProtocolNumber,
- flow: broadcast,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedOriginalDstAddr: tcpip.FullAddress{NIC: 1, Addr: broadcastAddr, Port: stackPort},
- },
- {
- name: "IPv6 unicast",
- proto: header.IPv6ProtocolNumber,
- flow: unicastV6,
- expectedOriginalDstAddr: tcpip.FullAddress{NIC: 1, Addr: stackV6Addr, Port: stackPort},
- },
- {
- name: "IPv6 multicast",
- proto: header.IPv6ProtocolNumber,
- flow: multicastV6,
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- expectedOriginalDstAddr: tcpip.FullAddress{NIC: 1, Addr: multicastV6Addr, Port: stackPort},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(test.proto)
-
- bindAddr := tcpip.FullAddress{Port: stackPort}
- if err := c.ep.Bind(bindAddr); err != nil {
- t.Fatalf("Bind(%#v): %s", bindAddr, err)
- }
-
- if test.flow.isMulticast() {
- ifoptSet := tcpip.AddMembershipOption{NIC: 1, MulticastAddr: test.flow.getMcastAddr()}
- if err := c.ep.SetSockOpt(&ifoptSet); err != nil {
- c.t.Fatalf("SetSockOpt(&%#v): %s:", ifoptSet, err)
- }
- }
-
- c.ep.SocketOptions().SetReceiveOriginalDstAddress(true)
-
- testRead(c, test.flow, checker.ReceiveOriginalDstAddr(test.expectedOriginalDstAddr))
-
- if got := c.s.Stats().UDP.PacketsReceived.Value(); got != 1 {
- t.Fatalf("Read did not increment PacketsReceived: got = %d, want = 1", got)
- }
- })
- }
-}
-
-func TestWriteIncrementsPacketsSent(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- testDualWrite(c)
-
- var want uint64 = 2
- if got := c.s.Stats().UDP.PacketsSent.Value(); got != want {
- c.t.Fatalf("Write did not increment PacketsSent: got %v, want %v", got, want)
- }
-}
-
-func TestNoChecksum(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, unicastV6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- // Disable the checksum generation.
- c.ep.SocketOptions().SetNoChecksum(true)
- // This option is effective on IPv4 only.
- testWrite(c, flow, checker.UDP(checker.NoChecksum(flow.isV4())))
-
- // Enable the checksum generation.
- c.ep.SocketOptions().SetNoChecksum(false)
- testWrite(c, flow, checker.UDP(checker.NoChecksum(false)))
- })
- }
-}
-
-var _ stack.NetworkInterface = (*testInterface)(nil)
-
-type testInterface struct {
- stack.NetworkInterface
-}
-
-func (*testInterface) ID() tcpip.NICID {
- return 0
-}
-
-func (*testInterface) Enabled() bool {
- return true
-}
-
-func TestTTL(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- const multicastTTL = 42
- if err := c.ep.SetSockOptInt(tcpip.MulticastTTLOption, multicastTTL); err != nil {
- c.t.Fatalf("SetSockOptInt failed: %s", err)
- }
-
- var wantTTL uint8
- if flow.isMulticast() {
- wantTTL = multicastTTL
- } else {
- var p stack.NetworkProtocolFactory
- var n tcpip.NetworkProtocolNumber
- if flow.isV4() {
- p = ipv4.NewProtocol
- n = ipv4.ProtocolNumber
- } else {
- p = ipv6.NewProtocol
- n = ipv6.ProtocolNumber
- }
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{p},
- Clock: &faketime.NullClock{},
- })
- ep := s.NetworkProtocolInstance(n).NewEndpoint(&testInterface{}, nil)
- wantTTL = ep.DefaultTTL()
- ep.Close()
- }
-
- testWrite(c, flow, checker.TTL(wantTTL))
- })
- }
-}
-
-func TestSetTTL(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- for _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {
- t.Run(fmt.Sprintf("TTL:%d", wantTTL), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- if err := c.ep.SetSockOptInt(tcpip.TTLOption, int(wantTTL)); err != nil {
- c.t.Fatalf("SetSockOptInt(TTLOption, %d) failed: %s", wantTTL, err)
- }
-
- testWrite(c, flow, checker.TTL(wantTTL))
- })
- }
- })
- }
-}
-
-func TestSetTOS(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, multicastV4, broadcast} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- const tos = testTOS
- v, err := c.ep.GetSockOptInt(tcpip.IPv4TOSOption)
- if err != nil {
- c.t.Errorf("GetSockOptInt(IPv4TOSOption) failed: %s", err)
- }
- // Test for expected default value.
- if v != 0 {
- c.t.Errorf("got GetSockOptInt(IPv4TOSOption) = 0x%x, want = 0x%x", v, 0)
- }
-
- if err := c.ep.SetSockOptInt(tcpip.IPv4TOSOption, tos); err != nil {
- c.t.Errorf("SetSockOptInt(IPv4TOSOption, 0x%x) failed: %s", tos, err)
- }
-
- v, err = c.ep.GetSockOptInt(tcpip.IPv4TOSOption)
- if err != nil {
- c.t.Errorf("GetSockOptInt(IPv4TOSOption) failed: %s", err)
- }
-
- if v != tos {
- c.t.Errorf("got GetSockOptInt(IPv4TOSOption) = 0x%x, want = 0x%x", v, tos)
- }
-
- testWrite(c, flow, checker.TOS(tos, 0))
- })
- }
-}
-
-func TestSetTClass(t *testing.T) {
- for _, flow := range []testFlow{unicastV4in6, unicastV6, unicastV6Only, multicastV4in6, multicastV6, broadcastIn6} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
-
- const tClass = testTOS
- v, err := c.ep.GetSockOptInt(tcpip.IPv6TrafficClassOption)
- if err != nil {
- c.t.Errorf("GetSockOptInt(IPv6TrafficClassOption) failed: %s", err)
- }
- // Test for expected default value.
- if v != 0 {
- c.t.Errorf("got GetSockOptInt(IPv6TrafficClassOption) = 0x%x, want = 0x%x", v, 0)
- }
-
- if err := c.ep.SetSockOptInt(tcpip.IPv6TrafficClassOption, tClass); err != nil {
- c.t.Errorf("SetSockOptInt(IPv6TrafficClassOption, 0x%x) failed: %s", tClass, err)
- }
-
- v, err = c.ep.GetSockOptInt(tcpip.IPv6TrafficClassOption)
- if err != nil {
- c.t.Errorf("GetSockOptInt(IPv6TrafficClassOption) failed: %s", err)
- }
-
- if v != tClass {
- c.t.Errorf("got GetSockOptInt(IPv6TrafficClassOption) = 0x%x, want = 0x%x", v, tClass)
- }
-
- // The header getter for TClass is called TOS, so use that checker.
- testWrite(c, flow, checker.TOS(tClass, 0))
- })
- }
-}
-
-func TestReceiveTosTClass(t *testing.T) {
- const RcvTOSOpt = "ReceiveTosOption"
- const RcvTClassOpt = "ReceiveTClassOption"
-
- testCases := []struct {
- name string
- tests []testFlow
- }{
- {RcvTOSOpt, []testFlow{unicastV4, broadcast}},
- {RcvTClassOpt, []testFlow{unicastV4in6, unicastV6, unicastV6Only, broadcastIn6}},
- }
- for _, testCase := range testCases {
- for _, flow := range testCase.tests {
- t.Run(fmt.Sprintf("%s:flow:%s", testCase.name, flow), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpointForFlow(flow)
- name := testCase.name
-
- var optionGetter func() bool
- var optionSetter func(bool)
- switch name {
- case RcvTOSOpt:
- optionGetter = c.ep.SocketOptions().GetReceiveTOS
- optionSetter = c.ep.SocketOptions().SetReceiveTOS
- case RcvTClassOpt:
- optionGetter = c.ep.SocketOptions().GetReceiveTClass
- optionSetter = c.ep.SocketOptions().SetReceiveTClass
- default:
- t.Fatalf("unkown test variant: %s", name)
- }
-
- // Verify that setting and reading the option works.
- v := optionGetter()
- // Test for expected default value.
- if v != false {
- c.t.Errorf("got GetSockOptBool(%s) = %t, want = %t", name, v, false)
- }
-
- const want = true
- optionSetter(want)
-
- got := optionGetter()
- if got != want {
- c.t.Errorf("got GetSockOptBool(%s) = %t, want = %t", name, got, want)
- }
-
- // Verify that the correct received TOS or TClass is handed through as
- // ancillary data to the ControlMessages struct.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
- switch name {
- case RcvTClassOpt:
- testRead(c, flow, checker.ReceiveTClass(testTOS))
- case RcvTOSOpt:
- testRead(c, flow, checker.ReceiveTOS(testTOS))
- default:
- t.Fatalf("unknown test variant: %s", name)
- }
- })
- }
- }
-}
-
-func TestMulticastInterfaceOption(t *testing.T) {
- for _, flow := range []testFlow{multicastV4, multicastV4in6, multicastV6, multicastV6Only} {
- t.Run(fmt.Sprintf("flow:%s", flow), func(t *testing.T) {
- for _, bindTyp := range []string{"bound", "unbound"} {
- t.Run(bindTyp, func(t *testing.T) {
- for _, optTyp := range []string{"use local-addr", "use NICID", "use local-addr and NIC"} {
- t.Run(optTyp, func(t *testing.T) {
- h := flow.header4Tuple(outgoing)
- mcastAddr := h.dstAddr.Addr
- localIfAddr := h.srcAddr.Addr
-
- var ifoptSet tcpip.MulticastInterfaceOption
- switch optTyp {
- case "use local-addr":
- ifoptSet.InterfaceAddr = localIfAddr
- case "use NICID":
- ifoptSet.NIC = 1
- case "use local-addr and NIC":
- ifoptSet.InterfaceAddr = localIfAddr
- ifoptSet.NIC = 1
- default:
- t.Fatal("unknown test variant")
- }
-
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(flow.sockProto())
-
- if bindTyp == "bound" {
- // Bind the socket by connecting to the multicast address.
- // This may have an influence on how the multicast interface
- // is set.
- addr := tcpip.FullAddress{
- Addr: flow.mapAddrIfApplicable(mcastAddr),
- Port: stackPort,
- }
- if err := c.ep.Connect(addr); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
- }
-
- if err := c.ep.SetSockOpt(&ifoptSet); err != nil {
- c.t.Fatalf("SetSockOpt(&%#v): %s", ifoptSet, err)
- }
-
- // Verify multicast interface addr and NIC were set correctly.
- // Note that NIC must be 1 since this is our outgoing interface.
- var ifoptGot tcpip.MulticastInterfaceOption
- if err := c.ep.GetSockOpt(&ifoptGot); err != nil {
- c.t.Fatalf("GetSockOpt(&%T): %s", ifoptGot, err)
- } else if ifoptWant := (tcpip.MulticastInterfaceOption{NIC: 1, InterfaceAddr: ifoptSet.InterfaceAddr}); ifoptGot != ifoptWant {
- c.t.Errorf("got multicast interface option = %#v, want = %#v", ifoptGot, ifoptWant)
- }
- })
- }
- })
- }
- })
- }
-}
-
-// TestV4UnknownDestination verifies that we generate an ICMPv4 Destination
-// Unreachable message when a udp datagram is received on ports for which there
-// is no bound udp socket.
-func TestV4UnknownDestination(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- testCases := []struct {
- flow testFlow
- icmpRequired bool
- // largePayload if true, will result in a payload large enough
- // so that the final generated IPv4 packet is larger than
- // header.IPv4MinimumProcessableDatagramSize.
- largePayload bool
- // badChecksum if true, will set an invalid checksum in the
- // header.
- badChecksum bool
- }{
- {unicastV4, true, false, false},
- {unicastV4, true, true, false},
- {unicastV4, false, false, true},
- {unicastV4, false, true, true},
- {multicastV4, false, false, false},
- {multicastV4, false, true, false},
- {broadcast, false, false, false},
- {broadcast, false, true, false},
- }
- checksumErrors := uint64(0)
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("flow:%s icmpRequired:%t largePayload:%t badChecksum:%t", tc.flow, tc.icmpRequired, tc.largePayload, tc.badChecksum), func(t *testing.T) {
- payload := newPayload()
- if tc.largePayload {
- payload = newMinPayload(576)
- }
- c.injectPacket(tc.flow, payload, tc.badChecksum)
- if tc.badChecksum {
- checksumErrors++
- if got, want := c.s.Stats().UDP.ChecksumErrors.Value(), checksumErrors; got != want {
- t.Fatalf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- }
- if !tc.icmpRequired {
- if p, ok := c.linkEP.Read(); ok {
- t.Fatalf("unexpected packet received: %+v", p)
- }
- return
- }
-
- // ICMP required.
- p, ok := c.linkEP.Read()
- if !ok {
- t.Fatalf("packet wasn't written out")
- return
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- pkt := vv.ToView()
- if got, want := len(pkt), header.IPv4MinimumProcessableDatagramSize; got > want {
- t.Fatalf("got an ICMP packet of size: %d, want: sz <= %d", got, want)
- }
-
- hdr := header.IPv4(pkt)
- checker.IPv4(t, hdr, checker.ICMPv4(
- checker.ICMPv4Type(header.ICMPv4DstUnreachable),
- checker.ICMPv4Code(header.ICMPv4PortUnreachable)))
-
- // We need to compare the included data part of the UDP packet that is in
- // the ICMP packet with the matching original data.
- icmpPkt := header.ICMPv4(hdr.Payload())
- payloadIPHeader := header.IPv4(icmpPkt.Payload())
- incomingHeaderLength := header.IPv4MinimumSize + header.UDPMinimumSize
- wantLen := len(payload)
- if tc.largePayload {
- // To work out the data size we need to simulate what the sender would
- // have done. The wanted size is the total available minus the sum of
- // the headers in the UDP AND ICMP packets, given that we know the test
- // had only a minimal IP header but the ICMP sender will have allowed
- // for a maximally sized packet header.
- wantLen = header.IPv4MinimumProcessableDatagramSize - header.IPv4MaximumHeaderSize - header.ICMPv4MinimumSize - incomingHeaderLength
- }
-
- // In the case of large payloads the IP packet may be truncated. Update
- // the length field before retrieving the udp datagram payload.
- // Add back the two headers within the payload.
- payloadIPHeader.SetTotalLength(uint16(wantLen + incomingHeaderLength))
-
- origDgram := header.UDP(payloadIPHeader.Payload())
- if got, want := len(origDgram.Payload()), wantLen; got != want {
- t.Fatalf("unexpected payload length got: %d, want: %d", got, want)
- }
- if got, want := origDgram.Payload(), payload[:wantLen]; !bytes.Equal(got, want) {
- t.Fatalf("unexpected payload got: %d, want: %d", got, want)
- }
- })
- }
-}
-
-// TestV6UnknownDestination verifies that we generate an ICMPv6 Destination
-// Unreachable message when a udp datagram is received on ports for which there
-// is no bound udp socket.
-func TestV6UnknownDestination(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- testCases := []struct {
- flow testFlow
- icmpRequired bool
- // largePayload if true will result in a payload large enough to
- // create an IPv6 packet > header.IPv6MinimumMTU bytes.
- largePayload bool
- // badChecksum if true, will set an invalid checksum in the
- // header.
- badChecksum bool
- }{
- {unicastV6, true, false, false},
- {unicastV6, true, true, false},
- {unicastV6, false, false, true},
- {unicastV6, false, true, true},
- {multicastV6, false, false, false},
- {multicastV6, false, true, false},
- }
- checksumErrors := uint64(0)
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("flow:%s icmpRequired:%t largePayload:%t badChecksum:%t", tc.flow, tc.icmpRequired, tc.largePayload, tc.badChecksum), func(t *testing.T) {
- payload := newPayload()
- if tc.largePayload {
- payload = newMinPayload(1280)
- }
- c.injectPacket(tc.flow, payload, tc.badChecksum)
- if tc.badChecksum {
- checksumErrors++
- if got, want := c.s.Stats().UDP.ChecksumErrors.Value(), checksumErrors; got != want {
- t.Fatalf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- }
- if !tc.icmpRequired {
- if p, ok := c.linkEP.Read(); ok {
- t.Fatalf("unexpected packet received: %+v", p)
- }
- return
- }
-
- // ICMP required.
- p, ok := c.linkEP.Read()
- if !ok {
- t.Fatalf("packet wasn't written out")
- return
- }
-
- vv := buffer.NewVectorisedView(p.Pkt.Size(), p.Pkt.Views())
- pkt := vv.ToView()
- if got, want := len(pkt), header.IPv6MinimumMTU; got > want {
- t.Fatalf("got an ICMP packet of size: %d, want: sz <= %d", got, want)
- }
-
- hdr := header.IPv6(pkt)
- checker.IPv6(t, hdr, checker.ICMPv6(
- checker.ICMPv6Type(header.ICMPv6DstUnreachable),
- checker.ICMPv6Code(header.ICMPv6PortUnreachable)))
-
- icmpPkt := header.ICMPv6(hdr.Payload())
- payloadIPHeader := header.IPv6(icmpPkt.Payload())
- wantLen := len(payload)
- if tc.largePayload {
- wantLen = header.IPv6MinimumMTU - header.IPv6MinimumSize*2 - header.ICMPv6MinimumSize - header.UDPMinimumSize
- }
- // In case of large payloads the IP packet may be truncated. Update
- // the length field before retrieving the udp datagram payload.
- payloadIPHeader.SetPayloadLength(uint16(wantLen + header.UDPMinimumSize))
-
- origDgram := header.UDP(payloadIPHeader.Payload())
- if got, want := len(origDgram.Payload()), wantLen; got != want {
- t.Fatalf("unexpected payload length got: %d, want: %d", got, want)
- }
- if got, want := origDgram.Payload(), payload[:wantLen]; !bytes.Equal(got, want) {
- t.Fatalf("unexpected payload got: %v, want: %v", got, want)
- }
- })
- }
-}
-
-// TestIncrementMalformedPacketsReceived verifies if the malformed received
-// global and endpoint stats are incremented.
-func TestIncrementMalformedPacketsReceived(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV6.header4Tuple(incoming)
- buf := c.buildV6Packet(payload, &h)
-
- // Invalidate the UDP header length field.
- u := header.UDP(buf[header.IPv6MinimumSize:])
- u.SetLength(u.Length() + 1)
-
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- const want = 1
- if got := c.s.Stats().UDP.MalformedPacketsReceived.Value(); got != want {
- t.Errorf("got stats.UDP.MalformedPacketsReceived.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.MalformedPacketsReceived.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.MalformedPacketsReceived stats = %d, want = %d", got, want)
- }
-}
-
-// TestShortHeader verifies that when a packet with a too-short UDP header is
-// received, the malformed received global stat gets incremented.
-func TestShortHeader(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- h := unicastV6.header4Tuple(incoming)
-
- // Allocate a buffer for an IPv6 and too-short UDP header.
- const udpSize = header.UDPMinimumSize - 1
- buf := buffer.NewView(header.IPv6MinimumSize + udpSize)
- // Initialize the IP header.
- ip := header.IPv6(buf)
- ip.Encode(&header.IPv6Fields{
- TrafficClass: testTOS,
- PayloadLength: uint16(udpSize),
- TransportProtocol: udp.ProtocolNumber,
- HopLimit: 65,
- SrcAddr: h.srcAddr.Addr,
- DstAddr: h.dstAddr.Addr,
- })
-
- // Initialize the UDP header.
- udpHdr := header.UDP(buffer.NewView(header.UDPMinimumSize))
- udpHdr.Encode(&header.UDPFields{
- SrcPort: h.srcAddr.Port,
- DstPort: h.dstAddr.Port,
- Length: header.UDPMinimumSize,
- })
- // Calculate the UDP pseudo-header checksum.
- xsum := header.PseudoHeaderChecksum(udp.ProtocolNumber, h.srcAddr.Addr, h.dstAddr.Addr, uint16(len(udpHdr)))
- udpHdr.SetChecksum(^udpHdr.CalculateChecksum(xsum))
- // Copy all but the last byte of the UDP header into the packet.
- copy(buf[header.IPv6MinimumSize:], udpHdr)
-
- // Inject packet.
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- if got, want := c.s.Stats().NICs.MalformedL4RcvdPackets.Value(), uint64(1); got != want {
- t.Errorf("got c.s.Stats().NIC.MalformedL4RcvdPackets.Value() = %d, want = %d", got, want)
- }
-}
-
-// TestBadChecksumErrors verifies if a checksum error is detected,
-// global and endpoint stats are incremented.
-func TestBadChecksumErrors(t *testing.T) {
- for _, flow := range []testFlow{unicastV4, unicastV6} {
- t.Run(flow.String(), func(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(flow.sockProto())
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- c.injectPacket(flow, payload, true /* badChecksum */)
-
- const want = 1
- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d", got, want)
- }
- })
- }
-}
-
-// TestPayloadModifiedV4 verifies if a checksum error is detected,
-// global and endpoint stats are incremented.
-func TestPayloadModifiedV4(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv4.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV4.header4Tuple(incoming)
- buf := c.buildV4Packet(payload, &h)
- // Modify the payload so that the checksum value in the UDP header will be
- // incorrect.
- buf[len(buf)-1]++
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- const want = 1
- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d", got, want)
- }
-}
-
-// TestPayloadModifiedV6 verifies if a checksum error is detected,
-// global and endpoint stats are incremented.
-func TestPayloadModifiedV6(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV6.header4Tuple(incoming)
- buf := c.buildV6Packet(payload, &h)
- // Modify the payload so that the checksum value in the UDP header will be
- // incorrect.
- buf[len(buf)-1]++
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- const want = 1
- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d", got, want)
- }
-}
-
-// TestChecksumZeroV4 verifies if the checksum value is zero, global and
-// endpoint states are *not* incremented (UDP checksum is optional on IPv4).
-func TestChecksumZeroV4(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv4.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV4.header4Tuple(incoming)
- buf := c.buildV4Packet(payload, &h)
- // Set the checksum field in the UDP header to zero.
- u := header.UDP(buf[header.IPv4MinimumSize:])
- u.SetChecksum(0)
- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- const want = 0
- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d", got, want)
- }
-}
-
-// TestChecksumZeroV6 verifies if the checksum value is zero, global and
-// endpoint states are incremented (UDP checksum is *not* optional on IPv6).
-func TestChecksumZeroV6(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- payload := newPayload()
- h := unicastV6.header4Tuple(incoming)
- buf := c.buildV6Packet(payload, &h)
- // Set the checksum field in the UDP header to zero.
- u := header.UDP(buf[header.IPv6MinimumSize:])
- u.SetChecksum(0)
- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
- Data: buf.ToVectorisedView(),
- }))
-
- const want = 1
- if got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ChecksumErrors.Value() = %d, want = %d", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ChecksumErrors.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ChecksumErrors stats = %d, want = %d", got, want)
- }
-}
-
-// TestShutdownRead verifies endpoint read shutdown and error
-// stats increment on packet receive.
-func TestShutdownRead(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- // Bind to wildcard.
- if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}); err != nil {
- c.t.Fatalf("Bind failed: %s", err)
- }
-
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
-
- if err := c.ep.Shutdown(tcpip.ShutdownRead); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- testFailingRead(c, unicastV6, true /* expectReadError */)
-
- var want uint64 = 1
- if got := c.s.Stats().UDP.ReceiveBufferErrors.Value(); got != want {
- t.Errorf("got stats.UDP.ReceiveBufferErrors.Value() = %v, want = %v", got, want)
- }
- if got := c.ep.Stats().(*tcpip.TransportEndpointStats).ReceiveErrors.ClosedReceiver.Value(); got != want {
- t.Errorf("got EP Stats.ReceiveErrors.ClosedReceiver stats = %v, want = %v", got, want)
- }
-}
-
-// TestShutdownWrite verifies endpoint write shutdown and error
-// stats increment on packet write.
-func TestShutdownWrite(t *testing.T) {
- c := newDualTestContext(t, defaultMTU)
- defer c.cleanup()
-
- c.createEndpoint(ipv6.ProtocolNumber)
-
- if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil {
- c.t.Fatalf("Connect failed: %s", err)
- }
-
- if err := c.ep.Shutdown(tcpip.ShutdownWrite); err != nil {
- t.Fatalf("Shutdown failed: %s", err)
- }
-
- testFailingWrite(c, unicastV6, &tcpip.ErrClosedForSend{})
-}
-
-func (c *testContext) checkEndpointWriteStats(incr uint64, want tcpip.TransportEndpointStats, err tcpip.Error) {
- got := c.ep.Stats().(*tcpip.TransportEndpointStats).Clone()
- switch err.(type) {
- case nil:
- want.PacketsSent.IncrementBy(incr)
- case *tcpip.ErrMessageTooLong, *tcpip.ErrInvalidOptionValue:
- want.WriteErrors.InvalidArgs.IncrementBy(incr)
- case *tcpip.ErrClosedForSend:
- want.WriteErrors.WriteClosed.IncrementBy(incr)
- case *tcpip.ErrInvalidEndpointState:
- want.WriteErrors.InvalidEndpointState.IncrementBy(incr)
- case *tcpip.ErrNoRoute, *tcpip.ErrBroadcastDisabled, *tcpip.ErrNetworkUnreachable:
- want.SendErrors.NoRoute.IncrementBy(incr)
- default:
- want.SendErrors.SendToNetworkFailed.IncrementBy(incr)
- }
- if got != want {
- c.t.Errorf("Endpoint stats not matching for error %s got %+v want %+v", err, got, want)
- }
-}
-
-func (c *testContext) checkEndpointReadStats(incr uint64, want tcpip.TransportEndpointStats, err tcpip.Error) {
- got := c.ep.Stats().(*tcpip.TransportEndpointStats).Clone()
- switch err.(type) {
- case nil, *tcpip.ErrWouldBlock:
- case *tcpip.ErrClosedForReceive:
- want.ReadErrors.ReadClosed.IncrementBy(incr)
- default:
- c.t.Errorf("Endpoint error missing stats update err %v", err)
- }
- if got != want {
- c.t.Errorf("Endpoint stats not matching for error %s got %+v want %+v", err, got, want)
- }
-}
-
-func TestOutgoingSubnetBroadcast(t *testing.T) {
- const nicID1 = 1
-
- ipv4Addr := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 24,
- }
- ipv4Subnet := ipv4Addr.Subnet()
- ipv4SubnetBcast := ipv4Subnet.Broadcast()
- ipv4Gateway := testutil.MustParse4("192.168.1.1")
- ipv4AddrPrefix31 := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 31,
- }
- ipv4Subnet31 := ipv4AddrPrefix31.Subnet()
- ipv4Subnet31Bcast := ipv4Subnet31.Broadcast()
- ipv4AddrPrefix32 := tcpip.AddressWithPrefix{
- Address: "\xc0\xa8\x01\x3a",
- PrefixLen: 32,
- }
- ipv4Subnet32 := ipv4AddrPrefix32.Subnet()
- ipv4Subnet32Bcast := ipv4Subnet32.Broadcast()
- ipv6Addr := tcpip.AddressWithPrefix{
- Address: "\x20\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
- PrefixLen: 64,
- }
- ipv6Subnet := ipv6Addr.Subnet()
- ipv6SubnetBcast := ipv6Subnet.Broadcast()
- remNetAddr := tcpip.AddressWithPrefix{
- Address: "\x64\x0a\x7b\x18",
- PrefixLen: 24,
- }
- remNetSubnet := remNetAddr.Subnet()
- remNetSubnetBcast := remNetSubnet.Broadcast()
-
- tests := []struct {
- name string
- nicAddr tcpip.ProtocolAddress
- routes []tcpip.Route
- remoteAddr tcpip.Address
- requiresBroadcastOpt bool
- }{
- {
- name: "IPv4 Broadcast to local subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4SubnetBcast,
- requiresBroadcastOpt: true,
- },
- {
- name: "IPv4 Broadcast to local /31 subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4AddrPrefix31,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet31,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4Subnet31Bcast,
- requiresBroadcastOpt: false,
- },
- {
- name: "IPv4 Broadcast to local /32 subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4AddrPrefix32,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv4Subnet32,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv4Subnet32Bcast,
- requiresBroadcastOpt: false,
- },
- // IPv6 has no notion of a broadcast.
- {
- name: "IPv6 'Broadcast' to local subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv6ProtocolNumber,
- AddressWithPrefix: ipv6Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: ipv6Subnet,
- NIC: nicID1,
- },
- },
- remoteAddr: ipv6SubnetBcast,
- requiresBroadcastOpt: false,
- },
- {
- name: "IPv4 Broadcast to remote subnet",
- nicAddr: tcpip.ProtocolAddress{
- Protocol: header.IPv4ProtocolNumber,
- AddressWithPrefix: ipv4Addr,
- },
- routes: []tcpip.Route{
- {
- Destination: remNetSubnet,
- Gateway: ipv4Gateway,
- NIC: nicID1,
- },
- },
- remoteAddr: remNetSubnetBcast,
- // TODO(gvisor.dev/issue/3938): Once we support marking a route as
- // broadcast, this test should require the broadcast option to be set.
- requiresBroadcastOpt: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- s := stack.New(stack.Options{
- NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
- TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
- Clock: &faketime.NullClock{},
- })
- e := channel.New(0, defaultMTU, "")
- if err := s.CreateNIC(nicID1, e); err != nil {
- t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
- }
- if err := s.AddProtocolAddress(nicID1, test.nicAddr); err != nil {
- t.Fatalf("AddProtocolAddress(%d, %+v): %s", nicID1, test.nicAddr, err)
- }
-
- s.SetRouteTable(test.routes)
-
- var netProto tcpip.NetworkProtocolNumber
- switch l := len(test.remoteAddr); l {
- case header.IPv4AddressSize:
- netProto = header.IPv4ProtocolNumber
- case header.IPv6AddressSize:
- netProto = header.IPv6ProtocolNumber
- default:
- t.Fatalf("got unexpected address length = %d bytes", l)
- }
-
- wq := waiter.Queue{}
- ep, err := s.NewEndpoint(udp.ProtocolNumber, netProto, &wq)
- if err != nil {
- t.Fatalf("NewEndpoint(%d, %d, _): %s", udp.ProtocolNumber, netProto, err)
- }
- defer ep.Close()
-
- var r bytes.Reader
- data := []byte{1, 2, 3, 4}
- to := tcpip.FullAddress{
- Addr: test.remoteAddr,
- Port: 80,
- }
- opts := tcpip.WriteOptions{To: &to}
- expectedErrWithoutBcastOpt := func(err tcpip.Error) tcpip.Error {
- if _, ok := err.(*tcpip.ErrBroadcastDisabled); ok {
- return nil
- }
- return &tcpip.ErrBroadcastDisabled{}
- }
- if !test.requiresBroadcastOpt {
- expectedErrWithoutBcastOpt = nil
- }
-
- r.Reset(data)
- {
- n, err := ep.Write(&r, opts)
- if expectedErrWithoutBcastOpt != nil {
- if want := expectedErrWithoutBcastOpt(err); want != nil {
- t.Fatalf("got ep.Write(_, %#v) = (%d, %s), want = (_, %s)", opts, n, err, want)
- }
- } else if err != nil {
- t.Fatalf("got ep.Write(_, %#v) = (%d, %s), want = (_, nil)", opts, n, err)
- }
- }
-
- ep.SocketOptions().SetBroadcast(true)
-
- r.Reset(data)
- if n, err := ep.Write(&r, opts); err != nil {
- t.Fatalf("got ep.Write(_, %#v) = (%d, %s), want = (_, nil)", opts, n, err)
- }
-
- ep.SocketOptions().SetBroadcast(false)
-
- r.Reset(data)
- {
- n, err := ep.Write(&r, opts)
- if expectedErrWithoutBcastOpt != nil {
- if want := expectedErrWithoutBcastOpt(err); want != nil {
- t.Fatalf("got ep.Write(_, %#v) = (%d, %s), want = (_, %s)", opts, n, err, want)
- }
- } else if err != nil {
- t.Fatalf("got ep.Write(_, %#v) = (%d, %s), want = (_, nil)", opts, n, err)
- }
- }
- })
- }
-}
diff --git a/pkg/test/criutil/BUILD b/pkg/test/criutil/BUILD
deleted file mode 100644
index a7b082cee..000000000
--- a/pkg/test/criutil/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "criutil",
- testonly = 1,
- srcs = ["criutil.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- ],
-)
diff --git a/pkg/test/criutil/criutil.go b/pkg/test/criutil/criutil.go
deleted file mode 100644
index 3b41a2824..000000000
--- a/pkg/test/criutil/criutil.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package criutil contains utility functions for interacting with the
-// Container Runtime Interface (CRI), principally via the crictl command line
-// tool. This requires critools to be installed on the local system.
-package criutil
-
-import (
- "encoding/json"
- "fmt"
- "os"
- "os/exec"
- "path"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Crictl contains information required to run the crictl utility.
-type Crictl struct {
- logger testutil.Logger
- endpoint string
- cleanup []func()
-}
-
-// ResolvePath attempts to find binary paths. It may set the path to invalid,
-// which will cause the execution to fail with a sensible error.
-func ResolvePath(executable string) string {
- runtime, err := dockerutil.RuntimePath()
- if err == nil {
- // Check first the directory of the runtime itself.
- if dir := path.Dir(runtime); dir != "" && dir != "." {
- guess := path.Join(dir, executable)
- if fi, err := os.Stat(guess); err == nil && (fi.Mode()&0111) != 0 {
- return guess
- }
- }
- }
-
- // Favor /usr/local/bin, if it exists.
- localBin := fmt.Sprintf("/usr/local/bin/%s", executable)
- if _, err := os.Stat(localBin); err == nil {
- return localBin
- }
-
- // Try to find via the path.
- guess, _ := exec.LookPath(executable)
- if err == nil {
- return guess
- }
-
- // Return a bare path; this generates a suitable error.
- return executable
-}
-
-// NewCrictl returns a Crictl configured with a timeout and an endpoint over
-// which it will talk to containerd.
-func NewCrictl(logger testutil.Logger, endpoint string) *Crictl {
- // Attempt to find the executable, but don't bother propagating the
- // error at this point. The first command executed will return with a
- // binary not found error.
- return &Crictl{
- logger: logger,
- endpoint: endpoint,
- }
-}
-
-// CleanUp executes cleanup functions.
-func (cc *Crictl) CleanUp() {
- for _, c := range cc.cleanup {
- c()
- }
- cc.cleanup = nil
-}
-
-// RunPod creates a sandbox. It corresponds to `crictl runp`.
-func (cc *Crictl) RunPod(runtime, sbSpecFile string) (string, error) {
- podID, err := cc.run("runp", "--runtime", runtime, sbSpecFile)
- if err != nil {
- return "", fmt.Errorf("runp failed: %v", err)
- }
- // Strip the trailing newline from crictl output.
- return strings.TrimSpace(podID), nil
-}
-
-// Create creates a container within a sandbox. It corresponds to `crictl
-// create`.
-func (cc *Crictl) Create(podID, contSpecFile, sbSpecFile string) (string, error) {
- // In version 1.16.0, crictl annoying starting attempting to pull the
- // container, even if it was already available locally. We therefore
- // need to parse the version and add an appropriate --no-pull argument
- // since the image has already been loaded locally.
- out, err := cc.run("-v")
- if err != nil {
- return "", err
- }
- r := regexp.MustCompile("crictl version ([0-9]+)\\.([0-9]+)\\.([0-9+])")
- vs := r.FindStringSubmatch(out)
- if len(vs) != 4 {
- return "", fmt.Errorf("crictl -v had unexpected output: %s", out)
- }
- major, err := strconv.ParseUint(vs[1], 10, 64)
- if err != nil {
- return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out)
- }
- minor, err := strconv.ParseUint(vs[2], 10, 64)
- if err != nil {
- return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out)
- }
-
- args := []string{"create"}
- if (major == 1 && minor >= 16) || major > 1 {
- args = append(args, "--no-pull")
- }
- args = append(args, podID)
- args = append(args, contSpecFile)
- args = append(args, sbSpecFile)
-
- podID, err = cc.run(args...)
- if err != nil {
- time.Sleep(10 * time.Minute) // XXX
- return "", fmt.Errorf("create failed: %v", err)
- }
-
- // Strip the trailing newline from crictl output.
- return strings.TrimSpace(podID), nil
-}
-
-// Start starts a container. It corresponds to `crictl start`.
-func (cc *Crictl) Start(contID string) (string, error) {
- output, err := cc.run("start", contID)
- if err != nil {
- return "", fmt.Errorf("start failed: %v", err)
- }
- return output, nil
-}
-
-// Stop stops a container. It corresponds to `crictl stop`.
-func (cc *Crictl) Stop(contID string) error {
- _, err := cc.run("stop", contID)
- return err
-}
-
-// Exec execs a program inside a container. It corresponds to `crictl exec`.
-func (cc *Crictl) Exec(contID string, args ...string) (string, error) {
- a := []string{"exec", contID}
- a = append(a, args...)
- output, err := cc.run(a...)
- if err != nil {
- return "", fmt.Errorf("exec failed: %v", err)
- }
- return output, nil
-}
-
-// Logs retrieves the container logs. It corresponds to `crictl logs`.
-func (cc *Crictl) Logs(contID string, args ...string) (string, error) {
- a := []string{"logs", contID}
- a = append(a, args...)
- output, err := cc.run(a...)
- if err != nil {
- return "", fmt.Errorf("logs failed: %v", err)
- }
- return output, nil
-}
-
-// Rm removes a container. It corresponds to `crictl rm`.
-func (cc *Crictl) Rm(contID string) error {
- _, err := cc.run("rm", contID)
- return err
-}
-
-// StopPod stops a pod. It corresponds to `crictl stopp`.
-func (cc *Crictl) StopPod(podID string) error {
- _, err := cc.run("stopp", podID)
- return err
-}
-
-// containsConfig is a minimal copy of
-// https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto
-// It only contains fields needed for testing.
-type containerConfig struct {
- Status containerStatus
-}
-
-type containerStatus struct {
- Network containerNetwork
-}
-
-type containerNetwork struct {
- IP string
-}
-
-// PodIP returns a pod's IP address.
-func (cc *Crictl) PodIP(podID string) (string, error) {
- output, err := cc.run("inspectp", podID)
- if err != nil {
- return "", err
- }
- conf := &containerConfig{}
- if err := json.Unmarshal([]byte(output), conf); err != nil {
- return "", fmt.Errorf("failed to unmarshal JSON: %v, %s", err, output)
- }
- if conf.Status.Network.IP == "" {
- return "", fmt.Errorf("no IP found in config: %s", output)
- }
- return conf.Status.Network.IP, nil
-}
-
-// RmPod removes a container. It corresponds to `crictl rmp`.
-func (cc *Crictl) RmPod(podID string) error {
- _, err := cc.run("rmp", podID)
- return err
-}
-
-// Import imports the given container from the local Docker instance.
-func (cc *Crictl) Import(image string) error {
- // Note that we provide a 10 minute timeout after connect because we may
- // be pushing a lot of bytes in order to import the image. The connect
- // timeout stays the same and is inherited from the Crictl instance.
- cmd := testutil.Command(cc.logger,
- ResolvePath("ctr"),
- fmt.Sprintf("--connect-timeout=%s", 30*time.Second),
- fmt.Sprintf("--address=%s", cc.endpoint),
- "-n", "k8s.io", "images", "import", "-")
- cmd.Stderr = os.Stderr // Pass through errors.
-
- // Create a pipe and start the program.
- w, err := cmd.StdinPipe()
- if err != nil {
- return err
- }
- if err := cmd.Start(); err != nil {
- return err
- }
-
- // Save the image on the other end.
- if err := dockerutil.Save(cc.logger, image, w); err != nil {
- cmd.Wait()
- return err
- }
-
- // Close our pipe reference & see if it was loaded.
- if err := w.Close(); err != nil {
- return w.Close()
- }
-
- return cmd.Wait()
-}
-
-// StartContainer pulls the given image ands starts the container in the
-// sandbox with the given podID.
-//
-// Note that the image will always be imported from the local docker daemon.
-func (cc *Crictl) StartContainer(podID, image, sbSpec, contSpec string) (string, error) {
- if err := cc.Import(image); err != nil {
- return "", err
- }
-
- // Write the specs to files that can be read by crictl.
- sbSpecFile, cleanup, err := testutil.WriteTmpFile("sbSpec", sbSpec)
- if err != nil {
- return "", fmt.Errorf("failed to write sandbox spec: %v", err)
- }
- cc.cleanup = append(cc.cleanup, cleanup)
- contSpecFile, cleanup, err := testutil.WriteTmpFile("contSpec", contSpec)
- if err != nil {
- return "", fmt.Errorf("failed to write container spec: %v", err)
- }
- cc.cleanup = append(cc.cleanup, cleanup)
-
- return cc.startContainer(podID, image, sbSpecFile, contSpecFile)
-}
-
-func (cc *Crictl) startContainer(podID, image, sbSpecFile, contSpecFile string) (string, error) {
- contID, err := cc.Create(podID, contSpecFile, sbSpecFile)
- if err != nil {
- return "", fmt.Errorf("failed to create container in pod %q: %v", podID, err)
- }
-
- if _, err := cc.Start(contID); err != nil {
- return "", fmt.Errorf("failed to start container %q in pod %q: %v", contID, podID, err)
- }
-
- return contID, nil
-}
-
-// StopContainer stops and deletes the container with the given container ID.
-func (cc *Crictl) StopContainer(contID string) error {
- if err := cc.Stop(contID); err != nil {
- return fmt.Errorf("failed to stop container %q: %v", contID, err)
- }
-
- if err := cc.Rm(contID); err != nil {
- return fmt.Errorf("failed to remove container %q: %v", contID, err)
- }
-
- return nil
-}
-
-// StartPodAndContainer starts a sandbox and container in that sandbox. It
-// returns the pod ID and container ID.
-func (cc *Crictl) StartPodAndContainer(runtime, image, sbSpec, contSpec string) (string, string, error) {
- if err := cc.Import(image); err != nil {
- return "", "", err
- }
-
- // Write the specs to files that can be read by crictl.
- sbSpecFile, cleanup, err := testutil.WriteTmpFile("sbSpec", sbSpec)
- if err != nil {
- return "", "", fmt.Errorf("failed to write sandbox spec: %v", err)
- }
- cc.cleanup = append(cc.cleanup, cleanup)
- contSpecFile, cleanup, err := testutil.WriteTmpFile("contSpec", contSpec)
- if err != nil {
- return "", "", fmt.Errorf("failed to write container spec: %v", err)
- }
- cc.cleanup = append(cc.cleanup, cleanup)
-
- podID, err := cc.RunPod(runtime, sbSpecFile)
- if err != nil {
- return "", "", err
- }
-
- contID, err := cc.startContainer(podID, image, sbSpecFile, contSpecFile)
-
- return podID, contID, err
-}
-
-// StopPodAndContainer stops a container and pod.
-func (cc *Crictl) StopPodAndContainer(podID, contID string) error {
- if err := cc.StopContainer(contID); err != nil {
- return fmt.Errorf("failed to stop container %q in pod %q: %v", contID, podID, err)
- }
-
- if err := cc.StopPod(podID); err != nil {
- return fmt.Errorf("failed to stop pod %q: %v", podID, err)
- }
-
- if err := cc.RmPod(podID); err != nil {
- return fmt.Errorf("failed to remove pod %q: %v", podID, err)
- }
-
- return nil
-}
-
-// run runs crictl with the given args.
-func (cc *Crictl) run(args ...string) (string, error) {
- defaultArgs := []string{
- ResolvePath("crictl"),
- "--image-endpoint", fmt.Sprintf("unix://%s", cc.endpoint),
- "--runtime-endpoint", fmt.Sprintf("unix://%s", cc.endpoint),
- }
- fullArgs := append(defaultArgs, args...)
- out, err := testutil.Command(cc.logger, fullArgs...).CombinedOutput()
- return string(out), err
-}
diff --git a/pkg/test/dockerutil/BUILD b/pkg/test/dockerutil/BUILD
deleted file mode 100644
index 366f068e3..000000000
--- a/pkg/test/dockerutil/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "dockerutil",
- testonly = 1,
- srcs = [
- "container.go",
- "dockerutil.go",
- "exec.go",
- "network.go",
- "profile.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/testutil",
- "@com_github_docker_docker//api/types:go_default_library",
- "@com_github_docker_docker//api/types/container:go_default_library",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- "@com_github_docker_docker//api/types/network:go_default_library",
- "@com_github_docker_docker//client:go_default_library",
- "@com_github_docker_docker//pkg/stdcopy:go_default_library",
- "@com_github_docker_go_connections//nat:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "profile_test",
- size = "large",
- srcs = [
- "profile_test.go",
- ],
- library = ":dockerutil",
- tags = [
- # Requires docker and runsc to be configured before test runs.
- # Also requires the test to be run as root.
- "local",
- "manual",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/pkg/test/dockerutil/README.md b/pkg/test/dockerutil/README.md
deleted file mode 100644
index 870292096..000000000
--- a/pkg/test/dockerutil/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# dockerutil
-
-This package is for creating and controlling docker containers for testing
-runsc, gVisor's docker/kubernetes binary. A simple test may look like:
-
-```
- func TestSuperCool(t *testing.T) {
- ctx := context.Background()
- c := dockerutil.MakeContainer(ctx, t)
- got, err := c.Run(ctx, dockerutil.RunOpts{
- Image: "basic/alpine"
- }, "echo", "super cool")
- if err != nil {
- t.Fatalf("err was not nil: %v", err)
- }
- want := "super cool"
- if !strings.Contains(got, want){
- t.Fatalf("want: %s, got: %s", want, got)
- }
- }
-```
-
-For further examples, see many of our end to end tests elsewhere in the repo,
-such as those in //test/e2e or benchmarks at //test/benchmarks.
-
-dockerutil uses the "official" docker golang api, which is
-[very powerful](https://godoc.org/github.com/docker/docker/client). dockerutil
-is a thin wrapper around this API, allowing desired new use cases to be easily
-implemented.
-
-## Profiling
-
-dockerutil is capable of generating profiles. Currently, the only option is to
-use pprof profiles generated by `runsc debug`. The profiler will generate Block,
-CPU, Heap, Goroutine, and Mutex profiles. To generate profiles:
-
-* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc
- ARGS="--profile"` Also add other flags with ARGS like `--platform=kvm` or
- `--vfs2`.
-* Restart docker: `sudo service docker restart`
-
-To run and generate CPU profiles run:
-
-```
-make sudo TARGETS=//path/to:target \
- ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt"
-```
-
-Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof`
-
-Container name in most tests and benchmarks in gVisor is usually the test name
-and some random characters like so:
-`BenchmarkABSL-CleanCache-JF2J2ZYF3U7SL47QAA727CSJI3C4ZAW2`
-
-Profiling requires root as runsc debug inspects running containers in /var/run
-among other things.
-
-### Writing for Profiling
-
-The below shows an example of using profiles with dockerutil.
-
-```
-func TestSuperCool(t *testing.T){
- ctx := context.Background()
- // profiled and using runtime from dockerutil.runtime flag
- profiled := MakeContainer()
-
- // not profiled and using runtime runc
- native := MakeNativeContainer()
-
- err := profiled.Spawn(ctx, RunOpts{
- Image: "some/image",
- }, "sleep", "100000")
- // profiling has begun here
- ...
- expensive setup that I don't want to profile.
- ...
- profiled.RestartProfiles()
- // profiled activity
-}
-```
-
-In the above example, `profiled` would be profiled and `native` would not. The
-call to `RestartProfiles()` restarts the clock on profiling. This is useful if
-the main activity being tested is done with `docker exec` or `container.Spawn()`
-followed by one or more `container.Exec()` calls.
diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go
deleted file mode 100644
index 06152a444..000000000
--- a/pkg/test/dockerutil/container.go
+++ /dev/null
@@ -1,548 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerutil
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "path"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/mount"
- "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/client"
- "github.com/docker/docker/pkg/stdcopy"
- "github.com/docker/go-connections/nat"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Container represents a Docker Container allowing
-// user to configure and control as one would with the 'docker'
-// client. Container is backed by the offical golang docker API.
-// See: https://pkg.go.dev/github.com/docker/docker.
-type Container struct {
- Name string
- runtime string
-
- logger testutil.Logger
- client *client.Client
- id string
- mounts []mount.Mount
- links []string
- copyErr error
- cleanups []func()
-
- // profile is the profiling hook associated with this container.
- profile *profile
-}
-
-// RunOpts are options for running a container.
-type RunOpts struct {
- // Image is the image relative to images/. This will be mangled
- // appropriately, to ensure that only first-party images are used.
- Image string
-
- // Memory is the memory limit in bytes.
- Memory int
-
- // Cpus in which to allow execution. ("0", "1", "0-2").
- CpusetCpus string
-
- // Ports are the ports to be allocated.
- Ports []int
-
- // WorkDir sets the working directory.
- WorkDir string
-
- // ReadOnly sets the read-only flag.
- ReadOnly bool
-
- // Env are additional environment variables.
- Env []string
-
- // User is the user to use.
- User string
-
- // Privileged enables privileged mode.
- Privileged bool
-
- // CapAdd are the extra set of capabilities to add.
- CapAdd []string
-
- // CapDrop are the extra set of capabilities to drop.
- CapDrop []string
-
- // Mounts is the list of directories/files to be mounted inside the container.
- Mounts []mount.Mount
-
- // Links is the list of containers to be connected to the container.
- Links []string
-}
-
-func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
- // Slashes are not allowed in container names.
- name := testutil.RandomID(logger.Name())
- name = strings.ReplaceAll(name, "/", "-")
- client, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- return nil
- }
- client.NegotiateAPIVersion(ctx)
- return &Container{
- logger: logger,
- Name: name,
- runtime: runtime,
- client: client,
- }
-}
-
-// MakeContainer constructs a suitable Container object.
-//
-// The runtime used is determined by the runtime flag.
-//
-// Containers will check flags for profiling requests.
-func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
- return makeContainer(ctx, logger, *runtime)
-}
-
-// MakeNativeContainer constructs a suitable Container object.
-//
-// The runtime used will be the system default.
-//
-// Native containers aren't profiled.
-func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
- return makeContainer(ctx, logger, "" /*runtime*/)
-}
-
-// Spawn is analogous to 'docker run -d'.
-func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
- if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
- return err
- }
- return c.Start(ctx)
-}
-
-// SpawnProcess is analogous to 'docker run -it'. It returns a process
-// which represents the root process.
-func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
- config, hostconf, netconf := c.ConfigsFrom(r, args...)
- config.Tty = true
- config.OpenStdin = true
-
- if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
- return Process{}, err
- }
-
- // Open a connection to the container for parsing logs and for TTY.
- stream, err := c.client.ContainerAttach(ctx, c.id,
- types.ContainerAttachOptions{
- Stream: true,
- Stdin: true,
- Stdout: true,
- Stderr: true,
- })
- if err != nil {
- return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
- }
-
- c.cleanups = append(c.cleanups, func() { stream.Close() })
-
- if err := c.Start(ctx); err != nil {
- return Process{}, err
- }
-
- return Process{container: c, conn: stream}, nil
-}
-
-// Run is analogous to 'docker run'.
-func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
- if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
- return "", err
- }
-
- if err := c.Start(ctx); err != nil {
- return "", err
- }
-
- if err := c.Wait(ctx); err != nil {
- return "", err
- }
-
- return c.Logs(ctx)
-}
-
-// ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
-// and Start.
-func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
- return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
-}
-
-// MakeLink formats a link to add to a RunOpts.
-func (c *Container) MakeLink(target string) string {
- return fmt.Sprintf("%s:%s", c.Name, target)
-}
-
-// CreateFrom creates a container from the given configs.
-func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
- return c.create(ctx, profileImage, conf, hostconf, netconf)
-}
-
-// Create is analogous to 'docker create'.
-func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
- return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
-}
-
-func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
- if c.runtime != "" && c.runtime != "runc" {
- // Use the image name as provided here; which normally represents the
- // unmodified "basic/alpine" image name. This should be easy to grok.
- c.profileInit(profileImage)
- }
- cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, c.Name)
- if err != nil {
- return err
- }
- c.id = cont.ID
- return nil
-}
-
-func (c *Container) config(r RunOpts, args []string) *container.Config {
- ports := nat.PortSet{}
- for _, p := range r.Ports {
- port := nat.Port(fmt.Sprintf("%d", p))
- ports[port] = struct{}{}
- }
- env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
-
- return &container.Config{
- Image: testutil.ImageByName(r.Image),
- Cmd: args,
- ExposedPorts: ports,
- Env: env,
- WorkingDir: r.WorkDir,
- User: r.User,
- }
-}
-
-func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
- c.mounts = append(c.mounts, r.Mounts...)
-
- return &container.HostConfig{
- Runtime: c.runtime,
- Mounts: c.mounts,
- PublishAllPorts: true,
- Links: r.Links,
- CapAdd: r.CapAdd,
- CapDrop: r.CapDrop,
- Privileged: r.Privileged,
- ReadonlyRootfs: r.ReadOnly,
- Resources: container.Resources{
- Memory: int64(r.Memory), // In bytes.
- CpusetCpus: r.CpusetCpus,
- },
- }
-}
-
-// Start is analogous to 'docker start'.
-func (c *Container) Start(ctx context.Context) error {
- if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
- return fmt.Errorf("ContainerStart failed: %v", err)
- }
-
- if c.profile != nil {
- if err := c.profile.Start(c); err != nil {
- c.logger.Logf("profile.Start failed: %v", err)
- }
- }
-
- return nil
-}
-
-// Stop is analogous to 'docker stop'.
-func (c *Container) Stop(ctx context.Context) error {
- return c.client.ContainerStop(ctx, c.id, nil)
-}
-
-// Pause is analogous to'docker pause'.
-func (c *Container) Pause(ctx context.Context) error {
- return c.client.ContainerPause(ctx, c.id)
-}
-
-// Unpause is analogous to 'docker unpause'.
-func (c *Container) Unpause(ctx context.Context) error {
- return c.client.ContainerUnpause(ctx, c.id)
-}
-
-// Checkpoint is analogous to 'docker checkpoint'.
-func (c *Container) Checkpoint(ctx context.Context, name string) error {
- return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
-}
-
-// Restore is analogous to 'docker start --checkname [name]'.
-func (c *Container) Restore(ctx context.Context, name string) error {
- return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
-}
-
-// Logs is analogous 'docker logs'.
-func (c *Container) Logs(ctx context.Context) (string, error) {
- var out bytes.Buffer
- err := c.logs(ctx, &out, &out)
- return out.String(), err
-}
-
-func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
- opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
- writer, err := c.client.ContainerLogs(ctx, c.id, opts)
- if err != nil {
- return err
- }
- defer writer.Close()
- _, err = stdcopy.StdCopy(stdout, stderr, writer)
-
- return err
-}
-
-// ID returns the container id.
-func (c *Container) ID() string {
- return c.id
-}
-
-// SandboxPid returns the container's pid.
-func (c *Container) SandboxPid(ctx context.Context) (int, error) {
- resp, err := c.client.ContainerInspect(ctx, c.id)
- if err != nil {
- return -1, err
- }
- return resp.ContainerJSONBase.State.Pid, nil
-}
-
-// ErrNoIP indicates that no IP address is available.
-var ErrNoIP = errors.New("no IP available")
-
-// FindIP returns the IP address of the container.
-func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
- resp, err := c.client.ContainerInspect(ctx, c.id)
- if err != nil {
- return nil, err
- }
-
- var ip net.IP
- if ipv6 {
- ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
- } else {
- ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
- }
- if ip == nil {
- return net.IP{}, ErrNoIP
- }
- return ip, nil
-}
-
-// FindPort returns the host port that is mapped to 'sandboxPort'.
-func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
- desc, err := c.client.ContainerInspect(ctx, c.id)
- if err != nil {
- return -1, fmt.Errorf("error retrieving port: %v", err)
- }
-
- format := fmt.Sprintf("%d/tcp", sandboxPort)
- ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
- if !ok {
- return -1, fmt.Errorf("error retrieving port: %v", err)
-
- }
-
- port, err := strconv.Atoi(ports[0].HostPort)
- if err != nil {
- return -1, fmt.Errorf("error parsing port %q: %v", port, err)
- }
- return port, nil
-}
-
-// CopyFiles copies in and mounts the given files. They are always ReadOnly.
-func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
- dir, err := ioutil.TempDir("", c.Name)
- if err != nil {
- c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
- return
- }
- c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
- if err := os.Chmod(dir, 0755); err != nil {
- c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
- return
- }
- for _, name := range sources {
- src := name
- if !filepath.IsAbs(src) {
- src, err = testutil.FindFile(name)
- if err != nil {
- c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
- return
- }
- }
- dst := path.Join(dir, path.Base(name))
- if err := testutil.Copy(src, dst); err != nil {
- c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
- return
- }
- c.logger.Logf("copy: %s -> %s", src, dst)
- }
- opts.Mounts = append(opts.Mounts, mount.Mount{
- Type: mount.TypeBind,
- Source: dir,
- Target: target,
- ReadOnly: false,
- })
-}
-
-// Status inspects the container returns its status.
-func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
- resp, err := c.client.ContainerInspect(ctx, c.id)
- if err != nil {
- return types.ContainerState{}, err
- }
- return *resp.State, err
-}
-
-// Wait waits for the container to exit.
-func (c *Container) Wait(ctx context.Context) error {
- defer c.stopProfiling()
- statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
- select {
- case err := <-errChan:
- return err
- case res := <-statusChan:
- if res.StatusCode != 0 {
- var msg string
- if res.Error != nil {
- msg = res.Error.Message
- }
- return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
- }
- return nil
- }
-}
-
-// WaitTimeout waits for the container to exit with a timeout.
-func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
- ctx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
- statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
- select {
- case <-ctx.Done():
- if ctx.Err() == context.DeadlineExceeded {
- return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
- }
- return nil
- case err := <-errChan:
- return err
- case <-statusChan:
- return nil
- }
-}
-
-// WaitForOutput searches container logs for pattern and returns or timesout.
-func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
- matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
- if err != nil {
- return "", err
- }
- if len(matches) == 0 {
- return "", fmt.Errorf("didn't find pattern %s logs", pattern)
- }
- return matches[0], nil
-}
-
-// WaitForOutputSubmatch searches container logs for the given
-// pattern or times out. It returns any regexp submatches as well.
-func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
- ctx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
- re := regexp.MustCompile(pattern)
- for {
- logs, err := c.Logs(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
- }
- if matches := re.FindStringSubmatch(logs); matches != nil {
- return matches, nil
- }
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-// stopProfiling stops profiling.
-func (c *Container) stopProfiling() {
- if c.profile != nil {
- if err := c.profile.Stop(c); err != nil {
- // This most likely means that the runtime for the container
- // was too short to connect and actually get a profile.
- c.logger.Logf("warning: profile.Stop failed: %v", err)
- }
- }
-}
-
-// Kill kills the container.
-func (c *Container) Kill(ctx context.Context) error {
- defer c.stopProfiling()
- return c.client.ContainerKill(ctx, c.id, "")
-}
-
-// Remove is analogous to 'docker rm'.
-func (c *Container) Remove(ctx context.Context) error {
- // Remove the image.
- remove := types.ContainerRemoveOptions{
- RemoveVolumes: c.mounts != nil,
- RemoveLinks: c.links != nil,
- Force: true,
- }
- return c.client.ContainerRemove(ctx, c.Name, remove)
-}
-
-// CleanUp kills and deletes the container (best effort).
-func (c *Container) CleanUp(ctx context.Context) {
- // Execute all cleanups. We execute cleanups here to close any
- // open connections to the container before closing. Open connections
- // can cause Kill and Remove to hang.
- for _, c := range c.cleanups {
- c()
- }
- c.cleanups = nil
-
- // Kill the container.
- if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
- // Just log; can't do anything here.
- c.logger.Logf("error killing container %q: %v", c.Name, err)
- }
-
- // Remove the image.
- if err := c.Remove(ctx); err != nil {
- c.logger.Logf("error removing container %q: %v", c.Name, err)
- }
-
- // Forget all mounts.
- c.mounts = nil
-}
diff --git a/pkg/test/dockerutil/dockerutil.go b/pkg/test/dockerutil/dockerutil.go
deleted file mode 100644
index a40005799..000000000
--- a/pkg/test/dockerutil/dockerutil.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package dockerutil is a collection of utility functions.
-package dockerutil
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os/exec"
- "regexp"
- "strconv"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-var (
- // runtime is the runtime to use for tests. This will be applied to all
- // containers. Note that the default here ("runsc") corresponds to the
- // default used by the installations. This is important, because the
- // default installer for vm_tests (in tools/installers:head, invoked
- // via tools/vm:defs.bzl) will install with this name. So without
- // changing anything, tests should have a runsc runtime available to
- // them. Otherwise installers should update the existing runtime
- // instead of installing a new one.
- runtime = flag.String("runtime", "runsc", "specify which runtime to use")
-
- // config is the default Docker daemon configuration path.
- config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths")
-
- // The following flags are for the "pprof" profiler tool.
-
- // pprofBaseDir allows the user to change the directory to which profiles are
- // written. By default, profiles will appear under:
- // /tmp/profile/RUNTIME/CONTAINER_NAME/*.pprof.
- pprofBaseDir = flag.String("pprof-dir", "/tmp/profile", "base directory in: BASEDIR/RUNTIME/CONTINER_NAME/FILENAME (e.g. /tmp/profile/runtime/mycontainer/cpu.pprof)")
- pprofDuration = flag.Duration("pprof-duration", time.Hour, "profiling duration (automatically stopped at container exit)")
-
- // The below flags enable each type of profile. Multiple profiles can be
- // enabled for each run. The profile will be collected from the start.
- pprofBlock = flag.Bool("pprof-block", false, "enables block profiling with runsc debug")
- pprofCPU = flag.Bool("pprof-cpu", false, "enables CPU profiling with runsc debug")
- pprofHeap = flag.Bool("pprof-heap", false, "enables heap profiling with runsc debug")
- pprofMutex = flag.Bool("pprof-mutex", false, "enables mutex profiling with runsc debug")
-)
-
-// EnsureSupportedDockerVersion checks if correct docker is installed.
-//
-// This logs directly to stderr, as it is typically called from a Main wrapper.
-func EnsureSupportedDockerVersion() {
- cmd := exec.Command("docker", "version")
- out, err := cmd.CombinedOutput()
- if err != nil {
- log.Fatalf("error running %q: %v", "docker version", err)
- }
- re := regexp.MustCompile(`Version:\s+(\d+)\.(\d+)\.\d.*`)
- matches := re.FindStringSubmatch(string(out))
- if len(matches) != 3 {
- log.Fatalf("Invalid docker output: %s", out)
- }
- major, _ := strconv.Atoi(matches[1])
- minor, _ := strconv.Atoi(matches[2])
- if major < 17 || (major == 17 && minor < 9) {
- log.Fatalf("Docker version 17.09.0 or greater is required, found: %02d.%02d", major, minor)
- }
-}
-
-// RuntimePath returns the binary path for the current runtime.
-func RuntimePath() (string, error) {
- rs, err := runtimeMap()
- if err != nil {
- return "", err
- }
-
- p, ok := rs["path"].(string)
- if !ok {
- // The runtime does not declare a path.
- return "", fmt.Errorf("runtime does not declare a path: %v", rs)
- }
- return p, nil
-}
-
-// UsingVFS2 returns true if the 'runtime' has the vfs2 flag set.
-// TODO(gvisor.dev/issue/1624): Remove.
-func UsingVFS2() (bool, error) {
- rMap, err := runtimeMap()
- if err != nil {
- return false, err
- }
-
- list, ok := rMap["runtimeArgs"].([]interface{})
- if !ok {
- return false, fmt.Errorf("unexpected format: %v", rMap)
- }
-
- for _, element := range list {
- if element == "--vfs2" {
- return true, nil
- }
- }
- return false, nil
-}
-
-func runtimeMap() (map[string]interface{}, error) {
- // Read the configuration data; the file must exist.
- configBytes, err := ioutil.ReadFile(*config)
- if err != nil {
- return nil, err
- }
-
- // Unmarshal the configuration.
- c := make(map[string]interface{})
- if err := json.Unmarshal(configBytes, &c); err != nil {
- return nil, err
- }
-
- // Decode the expected configuration.
- r, ok := c["runtimes"]
- if !ok {
- return nil, fmt.Errorf("no runtimes declared: %v", c)
- }
- rs, ok := r.(map[string]interface{})
- if !ok {
- // The runtimes are not a map.
- return nil, fmt.Errorf("unexpected format: %v", rs)
- }
- r, ok = rs[*runtime]
- if !ok {
- // The expected runtime is not declared.
- return nil, fmt.Errorf("runtime %q not found: %v", *runtime, rs)
- }
- rs, ok = r.(map[string]interface{})
- if !ok {
- // The runtime is not a map.
- return nil, fmt.Errorf("unexpected format: %v", r)
- }
- return rs, nil
-}
-
-// Save exports a container image to the given Writer.
-//
-// Note that the writer should be actively consuming the output, otherwise it
-// is not guaranteed that the Save will make any progress and the call may
-// stall indefinitely.
-//
-// This is called by criutil in order to import imports.
-func Save(logger testutil.Logger, image string, w io.Writer) error {
- cmd := testutil.Command(logger, "docker", "save", testutil.ImageByName(image))
- cmd.Stdout = w // Send directly to the writer.
- return cmd.Run()
-}
-
-// Runtime returns the value of the flag runtime.
-func Runtime() string {
- return *runtime
-}
diff --git a/pkg/test/dockerutil/exec.go b/pkg/test/dockerutil/exec.go
deleted file mode 100644
index bf968acec..000000000
--- a/pkg/test/dockerutil/exec.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerutil
-
-import (
- "bytes"
- "context"
- "fmt"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/pkg/stdcopy"
-)
-
-// ExecOpts holds arguments for Exec calls.
-type ExecOpts struct {
- // Env are additional environment variables.
- Env []string
-
- // Privileged enables privileged mode.
- Privileged bool
-
- // User is the user to use.
- User string
-
- // Enables Tty and stdin for the created process.
- UseTTY bool
-
- // WorkDir is the working directory of the process.
- WorkDir string
-}
-
-// Exec creates a process inside the container.
-func (c *Container) Exec(ctx context.Context, opts ExecOpts, args ...string) (string, error) {
- p, err := c.doExec(ctx, opts, args)
- if err != nil {
- return "", err
- }
-
- if exitStatus, err := p.WaitExitStatus(ctx); err != nil {
- return "", err
- } else if exitStatus != 0 {
- out, _ := p.Logs()
- return out, fmt.Errorf("process terminated with status: %d", exitStatus)
- }
-
- return p.Logs()
-}
-
-// ExecProcess creates a process inside the container and returns a process struct
-// for the caller to use.
-func (c *Container) ExecProcess(ctx context.Context, opts ExecOpts, args ...string) (Process, error) {
- return c.doExec(ctx, opts, args)
-}
-
-func (c *Container) doExec(ctx context.Context, r ExecOpts, args []string) (Process, error) {
- config := c.execConfig(r, args)
- resp, err := c.client.ContainerExecCreate(ctx, c.id, config)
- if err != nil {
- return Process{}, fmt.Errorf("exec create failed with err: %v", err)
- }
-
- hijack, err := c.client.ContainerExecAttach(ctx, resp.ID, types.ExecStartCheck{})
- if err != nil {
- return Process{}, fmt.Errorf("exec attach failed with err: %v", err)
- }
-
- return Process{
- container: c,
- execid: resp.ID,
- conn: hijack,
- }, nil
-}
-
-func (c *Container) execConfig(r ExecOpts, cmd []string) types.ExecConfig {
- env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
- return types.ExecConfig{
- AttachStdin: r.UseTTY,
- AttachStderr: true,
- AttachStdout: true,
- Cmd: cmd,
- Privileged: r.Privileged,
- WorkingDir: r.WorkDir,
- Env: env,
- Tty: r.UseTTY,
- User: r.User,
- }
-
-}
-
-// Process represents a containerized process.
-type Process struct {
- container *Container
- execid string
- conn types.HijackedResponse
-}
-
-// Write writes buf to the process's stdin.
-func (p *Process) Write(timeout time.Duration, buf []byte) (int, error) {
- p.conn.Conn.SetDeadline(time.Now().Add(timeout))
- return p.conn.Conn.Write(buf)
-}
-
-// Read returns process's stdout and stderr.
-func (p *Process) Read() (string, string, error) {
- var stdout, stderr bytes.Buffer
- if err := p.read(&stdout, &stderr); err != nil {
- return "", "", err
- }
- return stdout.String(), stderr.String(), nil
-}
-
-// Logs returns combined stdout/stderr from the process.
-func (p *Process) Logs() (string, error) {
- var out bytes.Buffer
- if err := p.read(&out, &out); err != nil {
- return "", err
- }
- return out.String(), nil
-}
-
-func (p *Process) read(stdout, stderr *bytes.Buffer) error {
- _, err := stdcopy.StdCopy(stdout, stderr, p.conn.Reader)
- return err
-}
-
-// ExitCode returns the process's exit code.
-func (p *Process) ExitCode(ctx context.Context) (int, error) {
- _, exitCode, err := p.runningExitCode(ctx)
- return exitCode, err
-}
-
-// IsRunning checks if the process is running.
-func (p *Process) IsRunning(ctx context.Context) (bool, error) {
- running, _, err := p.runningExitCode(ctx)
- return running, err
-}
-
-// WaitExitStatus until process completes and returns exit status.
-func (p *Process) WaitExitStatus(ctx context.Context) (int, error) {
- waitChan := make(chan (int))
- errChan := make(chan (error))
-
- go func() {
- for {
- running, exitcode, err := p.runningExitCode(ctx)
- if err != nil {
- errChan <- fmt.Errorf("error waiting process %s: container %v", p.execid, p.container.Name)
- }
- if !running {
- waitChan <- exitcode
- }
- time.Sleep(time.Millisecond * 500)
- }
- }()
-
- select {
- case ws := <-waitChan:
- return ws, nil
- case err := <-errChan:
- return -1, err
- }
-}
-
-// runningExitCode collects if the process is running and the exit code.
-// The exit code is only valid if the process has exited.
-func (p *Process) runningExitCode(ctx context.Context) (bool, int, error) {
- // If execid is not empty, this is a execed process.
- if p.execid != "" {
- status, err := p.container.client.ContainerExecInspect(ctx, p.execid)
- return status.Running, status.ExitCode, err
- }
- // else this is the root process.
- status, err := p.container.Status(ctx)
- return status.Running, status.ExitCode, err
-}
diff --git a/pkg/test/dockerutil/network.go b/pkg/test/dockerutil/network.go
deleted file mode 100644
index dbe17fa5e..000000000
--- a/pkg/test/dockerutil/network.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerutil
-
-import (
- "context"
- "net"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/client"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Network is a docker network.
-type Network struct {
- client *client.Client
- id string
- logger testutil.Logger
- Name string
- containers []*Container
- Subnet *net.IPNet
-}
-
-// NewNetwork sets up the struct for a Docker network. Names of networks
-// will be unique.
-func NewNetwork(ctx context.Context, logger testutil.Logger) *Network {
- client, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- logger.Logf("create client failed with: %v", err)
- return nil
- }
- client.NegotiateAPIVersion(ctx)
-
- return &Network{
- logger: logger,
- Name: testutil.RandomID(logger.Name()),
- client: client,
- }
-}
-
-func (n *Network) networkCreate() types.NetworkCreate {
-
- var subnet string
- if n.Subnet != nil {
- subnet = n.Subnet.String()
- }
-
- ipam := network.IPAM{
- Config: []network.IPAMConfig{{
- Subnet: subnet,
- }},
- }
-
- return types.NetworkCreate{
- CheckDuplicate: true,
- IPAM: &ipam,
- }
-}
-
-// Create is analogous to 'docker network create'.
-func (n *Network) Create(ctx context.Context) error {
-
- opts := n.networkCreate()
- resp, err := n.client.NetworkCreate(ctx, n.Name, opts)
- if err != nil {
- return err
- }
- n.id = resp.ID
- return nil
-}
-
-// Connect is analogous to 'docker network connect' with the arguments provided.
-func (n *Network) Connect(ctx context.Context, container *Container, ipv4, ipv6 string) error {
- settings := network.EndpointSettings{
- IPAMConfig: &network.EndpointIPAMConfig{
- IPv4Address: ipv4,
- IPv6Address: ipv6,
- },
- }
- err := n.client.NetworkConnect(ctx, n.id, container.id, &settings)
- if err == nil {
- n.containers = append(n.containers, container)
- }
- return err
-}
-
-// Inspect returns this network's info.
-func (n *Network) Inspect(ctx context.Context) (types.NetworkResource, error) {
- return n.client.NetworkInspect(ctx, n.id, types.NetworkInspectOptions{Verbose: true})
-}
-
-// Cleanup cleans up the docker network.
-func (n *Network) Cleanup(ctx context.Context) error {
- n.containers = nil
-
- return n.client.NetworkRemove(ctx, n.id)
-}
diff --git a/pkg/test/dockerutil/profile.go b/pkg/test/dockerutil/profile.go
deleted file mode 100644
index 12fe98b16..000000000
--- a/pkg/test/dockerutil/profile.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerutil
-
-import (
- "context"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// profile represents profile-like operations on a container.
-//
-// It is meant to be added to containers such that the container type calls
-// the profile during its lifecycle. Standard implementations are below.
-
-// profile is for running profiles with 'runsc debug'.
-type profile struct {
- BasePath string
- Types []string
- Duration time.Duration
- cmd *exec.Cmd
-}
-
-// profileInit initializes a profile object, if required.
-//
-// N.B. The profiling filename initialized here will use the *image*
-// name, and not the unique container name. This is intentional. Most
-// of the time, profiling will be used for benchmarks. Benchmarks will
-// be run iteratively until a sufficiently large N is reached. It is
-// useful in this context to overwrite previous runs, and generate a
-// single profile result for the final test.
-func (c *Container) profileInit(image string) {
- if !*pprofBlock && !*pprofCPU && !*pprofMutex && !*pprofHeap {
- return // Nothing to do.
- }
- c.profile = &profile{
- BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.logger.Name(), image),
- Duration: *pprofDuration,
- }
- if *pprofCPU {
- c.profile.Types = append(c.profile.Types, "cpu")
- }
- if *pprofHeap {
- c.profile.Types = append(c.profile.Types, "heap")
- }
- if *pprofMutex {
- c.profile.Types = append(c.profile.Types, "mutex")
- }
- if *pprofBlock {
- c.profile.Types = append(c.profile.Types, "block")
- }
-}
-
-// createProcess creates the collection process.
-func (p *profile) createProcess(c *Container) error {
- // Ensure our directory exists.
- if err := os.MkdirAll(p.BasePath, 0755); err != nil {
- return err
- }
-
- // Find the runtime to invoke.
- path, err := RuntimePath()
- if err != nil {
- return fmt.Errorf("failed to get runtime path: %v", err)
- }
-
- // The root directory of this container's runtime.
- rootDir := fmt.Sprintf("/var/run/docker/runtime-%s/moby", c.runtime)
- if _, err := os.Stat(rootDir); os.IsNotExist(err) {
- // In docker v20+, due to https://github.com/moby/moby/issues/42345 the
- // rootDir seems to always be the following.
- rootDir = "/var/run/docker/runtime-runc/moby"
- }
-
- // Format is `runsc --root=rootDir debug --profile-*=file --duration=24h containerID`.
- args := []string{fmt.Sprintf("--root=%s", rootDir), "debug"}
- for _, profileArg := range p.Types {
- outputPath := filepath.Join(p.BasePath, fmt.Sprintf("%s.pprof", profileArg))
- args = append(args, fmt.Sprintf("--profile-%s=%s", profileArg, outputPath))
- }
- args = append(args, fmt.Sprintf("--duration=%s", p.Duration)) // Or until container exits.
- args = append(args, fmt.Sprintf("--delay=%s", p.Duration)) // Ditto.
- args = append(args, c.ID())
-
- // Best effort wait until container is running.
- for now := time.Now(); time.Since(now) < 5*time.Second; {
- if status, err := c.Status(context.Background()); err != nil {
- return fmt.Errorf("failed to get status with: %v", err)
- } else if status.Running {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- p.cmd = exec.Command(path, args...)
- p.cmd.Stderr = os.Stderr // Pass through errors.
- if err := p.cmd.Start(); err != nil {
- return fmt.Errorf("start process failed: %v", err)
- }
-
- return nil
-}
-
-// killProcess kills the process, if running.
-func (p *profile) killProcess() error {
- if p.cmd != nil && p.cmd.Process != nil {
- return p.cmd.Process.Signal(unix.SIGTERM)
- }
- return nil
-}
-
-// waitProcess waits for the process, if running.
-func (p *profile) waitProcess() error {
- defer func() { p.cmd = nil }()
- if p.cmd != nil {
- return p.cmd.Wait()
- }
- return nil
-}
-
-// Start is called when profiling is started.
-func (p *profile) Start(c *Container) error {
- return p.createProcess(c)
-}
-
-// Stop is called when profiling is started.
-func (p *profile) Stop(c *Container) error {
- killErr := p.killProcess()
- waitErr := p.waitProcess()
- if waitErr != nil && killErr != nil {
- return killErr
- }
- return waitErr // Ignore okay wait, err kill.
-}
diff --git a/pkg/test/dockerutil/profile_test.go b/pkg/test/dockerutil/profile_test.go
deleted file mode 100644
index 4fe9ce15c..000000000
--- a/pkg/test/dockerutil/profile_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package dockerutil
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
- "time"
-)
-
-type testCase struct {
- name string
- profile profile
- expectedFiles []string
-}
-
-func TestProfile(t *testing.T) {
- // Basepath and expected file names for each type of profile.
- tmpDir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("unable to create temporary directory: %v", err)
- }
- defer os.RemoveAll(tmpDir)
-
- // All expected names.
- basePath := tmpDir
- block := "block.pprof"
- cpu := "cpu.pprof"
- heap := "heap.pprof"
- mutex := "mutex.pprof"
-
- testCases := []testCase{
- {
- name: "One",
- profile: profile{
- BasePath: basePath,
- Types: []string{"cpu"},
- Duration: 2 * time.Second,
- },
- expectedFiles: []string{cpu},
- },
- {
- name: "All",
- profile: profile{
- BasePath: basePath,
- Types: []string{"block", "cpu", "heap", "mutex"},
- Duration: 2 * time.Second,
- },
- expectedFiles: []string{block, cpu, heap, mutex},
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- ctx := context.Background()
- c := MakeContainer(ctx, t)
-
- // Set basepath to include the container name so there are no conflicts.
- localProfile := tc.profile // Copy it.
- localProfile.BasePath = filepath.Join(localProfile.BasePath, tc.name)
-
- // Set directly on the container, to avoid flags.
- c.profile = &localProfile
-
- func() {
- defer c.CleanUp(ctx)
-
- // Start a container.
- if err := c.Spawn(ctx, RunOpts{
- Image: "basic/alpine",
- }, "sleep", "1000"); err != nil {
- t.Fatalf("run failed with: %v", err)
- }
-
- if status, err := c.Status(context.Background()); !status.Running {
- t.Fatalf("container is not yet running: %+v err: %v", status, err)
- }
-
- // End early if the expected files exist and have data.
- for start := time.Now(); time.Since(start) < localProfile.Duration; time.Sleep(100 * time.Millisecond) {
- if err := checkFiles(localProfile.BasePath, tc.expectedFiles); err == nil {
- break
- }
- }
- }()
-
- // Check all expected files exist and have data.
- if err := checkFiles(localProfile.BasePath, tc.expectedFiles); err != nil {
- t.Fatalf(err.Error())
- }
- })
- }
-}
-
-func checkFiles(basePath string, expectedFiles []string) error {
- for _, file := range expectedFiles {
- stat, err := os.Stat(filepath.Join(basePath, file))
- if err != nil {
- return fmt.Errorf("stat failed with: %v", err)
- } else if stat.Size() < 1 {
- return fmt.Errorf("file not written to: %+v", stat)
- }
- }
- return nil
-}
-
-func TestMain(m *testing.M) {
- EnsureSupportedDockerVersion()
- os.Exit(m.Run())
-}
diff --git a/pkg/test/testutil/BUILD b/pkg/test/testutil/BUILD
deleted file mode 100644
index 7ff13cf12..000000000
--- a/pkg/test/testutil/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "testutil",
- testonly = 1,
- srcs = [
- "sh.go",
- "testutil.go",
- "testutil_runfiles.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/sentry/watchdog",
- "//pkg/sync",
- "//runsc/config",
- "//runsc/specutils",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_kr_pty//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/test/testutil/sh.go b/pkg/test/testutil/sh.go
deleted file mode 100644
index cd5b0557a..000000000
--- a/pkg/test/testutil/sh.go
+++ /dev/null
@@ -1,515 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "os"
- "os/exec"
- "strings"
- "time"
-
- "github.com/kr/pty"
- "golang.org/x/sys/unix"
-)
-
-// Prompt is used as shell prompt.
-// It is meant to be unique enough to not be seen in command outputs.
-const Prompt = "PROMPT> "
-
-// Simplistic shell string escape.
-func shellEscape(s string) string {
- // specialChars is used to determine whether s needs quoting at all.
- const specialChars = "\\'\"`${[|&;<>()*?! \t\n"
- // If s needs quoting, escapedChars is the set of characters that are
- // escaped with a backslash.
- const escapedChars = "\\\"$`"
- if len(s) == 0 {
- return "''"
- }
- if !strings.ContainsAny(s, specialChars) {
- return s
- }
- var b bytes.Buffer
- b.WriteString("\"")
- for _, c := range s {
- if strings.ContainsAny(string(c), escapedChars) {
- b.WriteString("\\")
- }
- b.WriteRune(c)
- }
- b.WriteString("\"")
- return b.String()
-}
-
-type byteOrError struct {
- b byte
- err error
-}
-
-// Shell manages a /bin/sh invocation with convenience functions to handle I/O.
-// The shell is run in its own interactive TTY and should present its prompt.
-type Shell struct {
- // cmd is a reference to the underlying sh process.
- cmd *exec.Cmd
- // cmdFinished is closed when cmd exits.
- cmdFinished chan struct{}
-
- // echo is whether the shell will echo input back to us.
- // This helps setting expectations of getting feedback of written bytes.
- echo bool
- // Control characters we expect to see in the shell.
- controlCharIntr string
- controlCharEOF string
-
- // ptyMaster and ptyReplica are the TTY pair associated with the shell.
- ptyMaster *os.File
- ptyReplica *os.File
- // readCh is a channel where everything read from ptyMaster is written.
- readCh chan byteOrError
-
- // logger is used for logging. It may be nil.
- logger Logger
-}
-
-// cleanup kills the shell process and closes the TTY.
-// Users of this library get a reference to this function with NewShell.
-func (s *Shell) cleanup() {
- s.logf("cleanup", "Shell cleanup started.")
- if s.cmd.ProcessState == nil {
- if err := s.cmd.Process.Kill(); err != nil {
- s.logf("cleanup", "cannot kill shell process: %v", err)
- }
- // We don't log the error returned by Wait because the monitorExit
- // goroutine will already do so.
- s.cmd.Wait()
- }
- s.ptyReplica.Close()
- s.ptyMaster.Close()
- // Wait for monitorExit goroutine to write exit status to the debug log.
- <-s.cmdFinished
- // Empty out everything in the readCh, but don't wait too long for it.
- var extraBytes bytes.Buffer
- unreadTimeout := time.After(100 * time.Millisecond)
-unreadLoop:
- for {
- select {
- case r, ok := <-s.readCh:
- if !ok {
- break unreadLoop
- } else if r.err == nil {
- extraBytes.WriteByte(r.b)
- }
- case <-unreadTimeout:
- break unreadLoop
- }
- }
- if extraBytes.Len() > 0 {
- s.logIO("unread", extraBytes.Bytes(), nil)
- }
- s.logf("cleanup", "Shell cleanup complete.")
-}
-
-// logIO logs byte I/O to both standard logging and the test log, if provided.
-func (s *Shell) logIO(prefix string, b []byte, err error) {
- var sb strings.Builder
- if len(b) > 0 {
- sb.WriteString(fmt.Sprintf("%q", b))
- } else {
- sb.WriteString("(nothing)")
- }
- if err != nil {
- sb.WriteString(fmt.Sprintf(" [error: %v]", err))
- }
- s.logf(prefix, "%s", sb.String())
-}
-
-// logf logs something to both standard logging and the test log, if provided.
-func (s *Shell) logf(prefix, format string, values ...interface{}) {
- if s.logger != nil {
- s.logger.Logf("[%s] %s", prefix, fmt.Sprintf(format, values...))
- }
-}
-
-// monitorExit waits for the shell process to exit and logs the exit result.
-func (s *Shell) monitorExit() {
- if err := s.cmd.Wait(); err != nil {
- s.logf("cmd", "shell process terminated: %v", err)
- } else {
- s.logf("cmd", "shell process terminated successfully")
- }
- close(s.cmdFinished)
-}
-
-// reader continuously reads the shell output and populates readCh.
-func (s *Shell) reader(ctx context.Context) {
- b := make([]byte, 4096)
- defer close(s.readCh)
- for {
- select {
- case <-s.cmdFinished:
- // Shell process terminated; stop trying to read.
- return
- case <-ctx.Done():
- // Shell process will also have terminated in this case;
- // stop trying to read.
- // We don't print an error here because doing so would print this in the
- // normal case where the context passed to NewShell is canceled at the
- // end of a successful test.
- return
- default:
- // Shell still running, try reading.
- }
- if got, err := s.ptyMaster.Read(b); err != nil {
- s.readCh <- byteOrError{err: err}
- if err == io.EOF {
- return
- }
- } else {
- for i := 0; i < got; i++ {
- s.readCh <- byteOrError{b: b[i]}
- }
- }
- }
-}
-
-// readByte reads a single byte, respecting the context.
-func (s *Shell) readByte(ctx context.Context) (byte, error) {
- select {
- case <-ctx.Done():
- return 0, ctx.Err()
- case r := <-s.readCh:
- return r.b, r.err
- }
-}
-
-// readLoop reads as many bytes as possible until the context expires, b is
-// full, or a short time passes. It returns how many bytes it has successfully
-// read.
-func (s *Shell) readLoop(ctx context.Context, b []byte) (int, error) {
- soonCtx, soonCancel := context.WithTimeout(ctx, 5*time.Second)
- defer soonCancel()
- var i int
- for i = 0; i < len(b) && soonCtx.Err() == nil; i++ {
- next, err := s.readByte(soonCtx)
- if err != nil {
- if i > 0 {
- s.logIO("read", b[:i-1], err)
- } else {
- s.logIO("read", nil, err)
- }
- return i, err
- }
- b[i] = next
- }
- s.logIO("read", b[:i], soonCtx.Err())
- return i, soonCtx.Err()
-}
-
-// readLine reads a single line. Strips out all \r characters for convenience.
-// Upon error, it will still return what it has read so far.
-// It will also exit quickly if the line content it has read so far (without a
-// line break) matches `prompt`.
-func (s *Shell) readLine(ctx context.Context, prompt string) ([]byte, error) {
- soonCtx, soonCancel := context.WithTimeout(ctx, 5*time.Second)
- defer soonCancel()
- var lineData bytes.Buffer
- var b byte
- var err error
- for soonCtx.Err() == nil && b != '\n' {
- b, err = s.readByte(soonCtx)
- if err != nil {
- data := lineData.Bytes()
- s.logIO("read", data, err)
- return data, err
- }
- if b != '\r' {
- lineData.WriteByte(b)
- }
- if bytes.Equal(lineData.Bytes(), []byte(prompt)) {
- // Assume that there will not be any further output if we get the prompt.
- // This avoids waiting for the read deadline just to read the prompt.
- break
- }
- }
- data := lineData.Bytes()
- s.logIO("read", data, soonCtx.Err())
- return data, soonCtx.Err()
-}
-
-// Expect verifies that the next `len(want)` bytes we read match `want`.
-func (s *Shell) Expect(ctx context.Context, want []byte) error {
- errPrefix := fmt.Sprintf("want(%q)", want)
- b := make([]byte, len(want))
- got, err := s.readLoop(ctx, b)
- if err != nil {
- if ctx.Err() != nil {
- return fmt.Errorf("%s: context done (%w), got: %q", errPrefix, err, b[:got])
- }
- return fmt.Errorf("%s: %w", errPrefix, err)
- }
- if got < len(want) {
- return fmt.Errorf("%s: short read (read %d bytes, expected %d): %q", errPrefix, got, len(want), b[:got])
- }
- if !bytes.Equal(b, want) {
- return fmt.Errorf("got %q want %q", b, want)
- }
- return nil
-}
-
-// ExpectString verifies that the next `len(want)` bytes we read match `want`.
-func (s *Shell) ExpectString(ctx context.Context, want string) error {
- return s.Expect(ctx, []byte(want))
-}
-
-// ExpectPrompt verifies that the next few bytes we read are the shell prompt.
-func (s *Shell) ExpectPrompt(ctx context.Context) error {
- return s.ExpectString(ctx, Prompt)
-}
-
-// ExpectEmptyLine verifies that the next few bytes we read are an empty line,
-// as defined by any number of carriage or line break characters.
-func (s *Shell) ExpectEmptyLine(ctx context.Context) error {
- line, err := s.readLine(ctx, Prompt)
- if err != nil {
- return fmt.Errorf("cannot read line: %w", err)
- }
- if strings.Trim(string(line), "\r\n") != "" {
- return fmt.Errorf("line was not empty: %q", line)
- }
- return nil
-}
-
-// ExpectLine verifies that the next `len(want)` bytes we read match `want`,
-// followed by carriage returns or newline characters.
-func (s *Shell) ExpectLine(ctx context.Context, want string) error {
- if err := s.ExpectString(ctx, want); err != nil {
- return err
- }
- if err := s.ExpectEmptyLine(ctx); err != nil {
- return fmt.Errorf("ExpectLine(%q): no line break: %w", want, err)
- }
- return nil
-}
-
-// Write writes `b` to the shell and verifies that all of them get written.
-func (s *Shell) Write(b []byte) error {
- written, err := s.ptyMaster.Write(b)
- s.logIO("write", b[:written], err)
- if err != nil {
- return fmt.Errorf("write(%q): %w", b, err)
- }
- if written != len(b) {
- return fmt.Errorf("write(%q): wrote %d of %d bytes (%q)", b, written, len(b), b[:written])
- }
- return nil
-}
-
-// WriteLine writes `line` (to which \n will be appended) to the shell.
-// If the shell is in `echo` mode, it will also check that we got these bytes
-// back to read.
-func (s *Shell) WriteLine(ctx context.Context, line string) error {
- if err := s.Write([]byte(line + "\n")); err != nil {
- return err
- }
- if s.echo {
- // We expect to see everything we've typed.
- if err := s.ExpectLine(ctx, line); err != nil {
- return fmt.Errorf("echo: %w", err)
- }
- }
- return nil
-}
-
-// StartCommand is a convenience wrapper for WriteLine that mimics entering a
-// command line and pressing Enter. It does some basic shell argument escaping.
-func (s *Shell) StartCommand(ctx context.Context, cmd ...string) error {
- escaped := make([]string, len(cmd))
- for i, arg := range cmd {
- escaped[i] = shellEscape(arg)
- }
- return s.WriteLine(ctx, strings.Join(escaped, " "))
-}
-
-// GetCommandOutput gets all following bytes until the prompt is encountered.
-// This is useful for matching the output of a command.
-// All \r are removed for ease of matching.
-func (s *Shell) GetCommandOutput(ctx context.Context) ([]byte, error) {
- return s.ReadUntil(ctx, Prompt)
-}
-
-// ReadUntil gets all following bytes until a certain line is encountered.
-// This final line is not returned as part of the output, but everything before
-// it (including the \n) is included.
-// This is useful for matching the output of a command.
-// All \r are removed for ease of matching.
-func (s *Shell) ReadUntil(ctx context.Context, finalLine string) ([]byte, error) {
- var output bytes.Buffer
- for ctx.Err() == nil {
- line, err := s.readLine(ctx, finalLine)
- if err != nil {
- return nil, err
- }
- if bytes.Equal(line, []byte(finalLine)) {
- break
- }
- // readLine ensures that `line` either matches `finalLine` or contains \n.
- // Thus we can be confident that `line` has a \n here.
- output.Write(line)
- }
- return output.Bytes(), ctx.Err()
-}
-
-// RunCommand is a convenience wrapper for StartCommand + GetCommandOutput.
-func (s *Shell) RunCommand(ctx context.Context, cmd ...string) ([]byte, error) {
- if err := s.StartCommand(ctx, cmd...); err != nil {
- return nil, err
- }
- return s.GetCommandOutput(ctx)
-}
-
-// RefreshSTTY interprets output from `stty -a` to check whether we are in echo
-// mode and other settings.
-// It will assume that any line matching `expectPrompt` means the end of
-// the `stty -a` output.
-// Why do this rather than using `tcgets`? Because this function can be used in
-// conjunction with sub-shell processes that can allocate their own TTYs.
-func (s *Shell) RefreshSTTY(ctx context.Context, expectPrompt string) error {
- // Temporarily assume we will not get any output.
- // If echo is actually on, we'll get the "stty -a" line as if it was command
- // output. This is OK because we parse the output generously.
- s.echo = false
- if err := s.WriteLine(ctx, "stty -a"); err != nil {
- return fmt.Errorf("could not run `stty -a`: %w", err)
- }
- sttyOutput, err := s.ReadUntil(ctx, expectPrompt)
- if err != nil {
- return fmt.Errorf("cannot get `stty -a` output: %w", err)
- }
-
- // Set default control characters in case we can't see them in the output.
- s.controlCharIntr = "^C"
- s.controlCharEOF = "^D"
- // stty output has two general notations:
- // `a = b;` (for control characters), and `option` vs `-option` (for boolean
- // options). We parse both kinds here.
- // For `a = b;`, `controlChar` contains `a`, and `previousToken` is used to
- // set `controlChar` to `previousToken` when we see an "=" token.
- var previousToken, controlChar string
- for _, token := range strings.Fields(string(sttyOutput)) {
- if controlChar != "" {
- value := strings.TrimSuffix(token, ";")
- switch controlChar {
- case "intr":
- s.controlCharIntr = value
- case "eof":
- s.controlCharEOF = value
- }
- controlChar = ""
- } else {
- switch token {
- case "=":
- controlChar = previousToken
- case "-echo":
- s.echo = false
- case "echo":
- s.echo = true
- }
- }
- previousToken = token
- }
- s.logf("stty", "refreshed settings: echo=%v, intr=%q, eof=%q", s.echo, s.controlCharIntr, s.controlCharEOF)
- return nil
-}
-
-// sendControlCode sends `code` to the shell and expects to see `repr`.
-// If `expectLinebreak` is true, it also expects to see a linebreak.
-func (s *Shell) sendControlCode(ctx context.Context, code byte, repr string, expectLinebreak bool) error {
- if err := s.Write([]byte{code}); err != nil {
- return fmt.Errorf("cannot send %q: %w", code, err)
- }
- if err := s.ExpectString(ctx, repr); err != nil {
- return fmt.Errorf("did not see %s: %w", repr, err)
- }
- if expectLinebreak {
- if err := s.ExpectEmptyLine(ctx); err != nil {
- return fmt.Errorf("linebreak after %s: %v", repr, err)
- }
- }
- return nil
-}
-
-// SendInterrupt sends the \x03 (Ctrl+C) control character to the shell.
-func (s *Shell) SendInterrupt(ctx context.Context, expectLinebreak bool) error {
- return s.sendControlCode(ctx, 0x03, s.controlCharIntr, expectLinebreak)
-}
-
-// SendEOF sends the \x04 (Ctrl+D) control character to the shell.
-func (s *Shell) SendEOF(ctx context.Context, expectLinebreak bool) error {
- return s.sendControlCode(ctx, 0x04, s.controlCharEOF, expectLinebreak)
-}
-
-// NewShell returns a new managed sh process along with a cleanup function.
-// The caller is expected to call this function once it no longer needs the
-// shell.
-// The optional passed-in logger will be used for logging.
-func NewShell(ctx context.Context, logger Logger) (*Shell, func(), error) {
- ptyMaster, ptyReplica, err := pty.Open()
- if err != nil {
- return nil, nil, fmt.Errorf("cannot create PTY: %w", err)
- }
- cmd := exec.CommandContext(ctx, "/bin/sh", "--noprofile", "--norc", "-i")
- cmd.Stdin = ptyReplica
- cmd.Stdout = ptyReplica
- cmd.Stderr = ptyReplica
- cmd.SysProcAttr = &unix.SysProcAttr{
- Setsid: true,
- Setctty: true,
- Ctty: 0,
- }
- cmd.Env = append(cmd.Env, fmt.Sprintf("PS1=%s", Prompt))
- if err := cmd.Start(); err != nil {
- return nil, nil, fmt.Errorf("cannot start shell: %w", err)
- }
- s := &Shell{
- cmd: cmd,
- cmdFinished: make(chan struct{}),
- ptyMaster: ptyMaster,
- ptyReplica: ptyReplica,
- readCh: make(chan byteOrError, 1<<20),
- logger: logger,
- }
- s.logf("creation", "Shell spawned.")
- go s.monitorExit()
- go s.reader(ctx)
- setupCtx, setupCancel := context.WithTimeout(ctx, 5*time.Second)
- defer setupCancel()
- // We expect to see the prompt immediately on startup,
- // since the shell is started in interactive mode.
- if err := s.ExpectPrompt(setupCtx); err != nil {
- s.cleanup()
- return nil, nil, fmt.Errorf("did not get initial prompt: %w", err)
- }
- s.logf("creation", "Initial prompt observed.")
- // Get initial TTY settings.
- if err := s.RefreshSTTY(setupCtx, Prompt); err != nil {
- s.cleanup()
- return nil, nil, fmt.Errorf("cannot get initial STTY settings: %w", err)
- }
- return s, s.cleanup, nil
-}
diff --git a/pkg/test/testutil/testutil.go b/pkg/test/testutil/testutil.go
deleted file mode 100644
index f6a3e34c7..000000000
--- a/pkg/test/testutil/testutil.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil contains utility functions for runsc tests.
-package testutil
-
-import (
- "bufio"
- "context"
- "debug/elf"
- "encoding/base32"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "math/rand"
- "net/http"
- "os"
- "os/exec"
- "os/signal"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff"
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sentry/watchdog"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/runsc/config"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-var (
- checkpoint = flag.Bool("checkpoint", true, "control checkpoint/restore support")
- partition = flag.Int("partition", 1, "partition number, this is 1-indexed")
- totalPartitions = flag.Int("total_partitions", 1, "total number of partitions")
- isRunningWithHostNet = flag.Bool("hostnet", false, "whether test is running with hostnet")
-)
-
-// IsCheckpointSupported returns the relevant command line flag.
-func IsCheckpointSupported() bool {
- return *checkpoint
-}
-
-// IsRunningWithHostNet returns the relevant command line flag.
-func IsRunningWithHostNet() bool {
- return *isRunningWithHostNet
-}
-
-// ImageByName mangles the image name used locally. This depends on the image
-// build infrastructure in images/ and tools/vm.
-func ImageByName(name string) string {
- return fmt.Sprintf("gvisor.dev/images/%s", name)
-}
-
-// ConfigureExePath configures the executable for runsc in the test environment.
-func ConfigureExePath() error {
- path, err := FindFile("runsc/runsc")
- if err != nil {
- return err
- }
- specutils.ExePath = path
- return nil
-}
-
-// TmpDir returns the absolute path to a writable directory that can be used as
-// scratch by the test.
-func TmpDir() string {
- if dir, ok := os.LookupEnv("TEST_TMPDIR"); ok {
- return dir
- }
- return "/tmp"
-}
-
-// Logger is a simple logging wrapper.
-//
-// This is designed to be implemented by *testing.T.
-type Logger interface {
- Name() string
- Logf(fmt string, args ...interface{})
-}
-
-// DefaultLogger logs using the log package.
-type DefaultLogger string
-
-// Name implements Logger.Name.
-func (d DefaultLogger) Name() string {
- return string(d)
-}
-
-// Logf implements Logger.Logf.
-func (d DefaultLogger) Logf(fmt string, args ...interface{}) {
- log.Printf(fmt, args...)
-}
-
-// multiLogger logs to multiple Loggers.
-type multiLogger []Logger
-
-// Name implements Logger.Name.
-func (m multiLogger) Name() string {
- names := make([]string, len(m))
- for i, l := range m {
- names[i] = l.Name()
- }
- return strings.Join(names, "+")
-}
-
-// Logf implements Logger.Logf.
-func (m multiLogger) Logf(fmt string, args ...interface{}) {
- for _, l := range m {
- l.Logf(fmt, args...)
- }
-}
-
-// NewMultiLogger returns a new Logger that logs on multiple Loggers.
-func NewMultiLogger(loggers ...Logger) Logger {
- return multiLogger(loggers)
-}
-
-// Cmd is a simple wrapper.
-type Cmd struct {
- logger Logger
- *exec.Cmd
-}
-
-// CombinedOutput returns the output and logs.
-func (c *Cmd) CombinedOutput() ([]byte, error) {
- out, err := c.Cmd.CombinedOutput()
- if len(out) > 0 {
- c.logger.Logf("output: %s", string(out))
- }
- if err != nil {
- c.logger.Logf("error: %v", err)
- }
- return out, err
-}
-
-// Command is a simple wrapper around exec.Command, that logs.
-func Command(logger Logger, args ...string) *Cmd {
- logger.Logf("command: %s", strings.Join(args, " "))
- return &Cmd{
- logger: logger,
- Cmd: exec.Command(args[0], args[1:]...),
- }
-}
-
-// TestConfig returns the default configuration to use in tests. Note that
-// 'RootDir' must be set by caller if required.
-func TestConfig(t *testing.T) *config.Config {
- logDir := os.TempDir()
- if dir, ok := os.LookupEnv("TEST_UNDECLARED_OUTPUTS_DIR"); ok {
- logDir = dir + "/"
- }
-
- // Only register flags if config is being used. Otherwise anyone that uses
- // testutil will get flags registered and they may conflict.
- config.RegisterFlags()
-
- conf, err := config.NewFromFlags()
- if err != nil {
- panic(err)
- }
- // Change test defaults.
- conf.Debug = true
- conf.DebugLog = path.Join(logDir, "runsc.log."+t.Name()+".%TIMESTAMP%.%COMMAND%")
- conf.LogPackets = true
- conf.Network = config.NetworkNone
- conf.Strace = true
- conf.TestOnlyAllowRunAsCurrentUserWithoutChroot = true
- conf.WatchdogAction = watchdog.Panic
- return conf
-}
-
-// NewSpecWithArgs creates a simple spec with the given args suitable for use
-// in tests.
-func NewSpecWithArgs(args ...string) *specs.Spec {
- return &specs.Spec{
- // The host filesystem root is the container root.
- Root: &specs.Root{
- Path: "/",
- Readonly: true,
- },
- Process: &specs.Process{
- Args: args,
- Env: []string{
- "PATH=" + os.Getenv("PATH"),
- },
- Capabilities: specutils.AllCapabilities(),
- },
- Mounts: []specs.Mount{
- // Hide the host /etc to avoid any side-effects.
- // For example, bash reads /etc/passwd and if it is
- // very big, tests can fail by timeout.
- {
- Type: "tmpfs",
- Destination: "/etc",
- },
- // Root is readonly, but many tests want to write to tmpdir.
- // This creates a writable mount inside the root. Also, when tmpdir points
- // to "/tmp", it makes the the actual /tmp to be mounted and not a tmpfs
- // inside the sentry.
- {
- Type: "bind",
- Destination: TmpDir(),
- Source: TmpDir(),
- },
- },
- Hostname: "runsc-test-hostname",
- }
-}
-
-// SetupRootDir creates a root directory for containers.
-func SetupRootDir() (string, func(), error) {
- rootDir, err := ioutil.TempDir(TmpDir(), "containers")
- if err != nil {
- return "", nil, fmt.Errorf("error creating root dir: %v", err)
- }
- return rootDir, func() { os.RemoveAll(rootDir) }, nil
-}
-
-// SetupContainer creates a bundle and root dir for the container, generates a
-// test config, and writes the spec to config.json in the bundle dir.
-func SetupContainer(spec *specs.Spec, conf *config.Config) (rootDir, bundleDir string, cleanup func(), err error) {
- rootDir, rootCleanup, err := SetupRootDir()
- if err != nil {
- return "", "", nil, err
- }
- conf.RootDir = rootDir
- bundleDir, bundleCleanup, err := SetupBundleDir(spec)
- if err != nil {
- rootCleanup()
- return "", "", nil, err
- }
- return rootDir, bundleDir, func() {
- bundleCleanup()
- rootCleanup()
- }, err
-}
-
-// SetupBundleDir creates a bundle dir and writes the spec to config.json.
-func SetupBundleDir(spec *specs.Spec) (string, func(), error) {
- bundleDir, err := ioutil.TempDir(TmpDir(), "bundle")
- if err != nil {
- return "", nil, fmt.Errorf("error creating bundle dir: %v", err)
- }
- cleanup := func() { os.RemoveAll(bundleDir) }
- if err := writeSpec(bundleDir, spec); err != nil {
- cleanup()
- return "", nil, fmt.Errorf("error writing spec: %v", err)
- }
- return bundleDir, cleanup, nil
-}
-
-// writeSpec writes the spec to disk in the given directory.
-func writeSpec(dir string, spec *specs.Spec) error {
- b, err := json.Marshal(spec)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(filepath.Join(dir, "config.json"), b, 0755)
-}
-
-// idRandomSrc is a pseudo random generator used to in RandomID.
-var idRandomSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-// idRandomSrcMtx is the mutex protecting idRandomSrc.Read from being used
-// concurrently in differnt goroutines.
-var idRandomSrcMtx sync.Mutex
-
-// RandomID returns 20 random bytes following the given prefix.
-func RandomID(prefix string) string {
- // Read 20 random bytes.
- b := make([]byte, 20)
- // Rand.Read is not safe for concurrent use. Packetimpact tests can be run in
- // parallel now, so we have to protect the Read with a mutex. Otherwise we'll
- // run into name conflicts.
- // https://golang.org/pkg/math/rand/#Rand.Read
- idRandomSrcMtx.Lock()
- // "[Read] always returns len(p) and a nil error." --godoc
- if _, err := idRandomSrc.Read(b); err != nil {
- idRandomSrcMtx.Unlock()
- panic("rand.Read failed: " + err.Error())
- }
- idRandomSrcMtx.Unlock()
- if prefix != "" {
- prefix = prefix + "-"
- }
- return fmt.Sprintf("%s%s", prefix, base32.StdEncoding.EncodeToString(b))
-}
-
-// RandomContainerID generates a random container id for each test.
-//
-// The container id is used to create an abstract unix domain socket, which
-// must be unique. While the container forbids creating two containers with the
-// same name, sometimes between test runs the socket does not get cleaned up
-// quickly enough, causing container creation to fail.
-func RandomContainerID() string {
- return RandomID("test-container")
-}
-
-// Copy copies file from src to dst.
-func Copy(src, dst string) error {
- in, err := os.Open(src)
- if err != nil {
- return err
- }
- defer in.Close()
-
- st, err := in.Stat()
- if err != nil {
- return err
- }
-
- out, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, st.Mode().Perm())
- if err != nil {
- return err
- }
- defer out.Close()
-
- // Mirror the local user's permissions across all users. This is
- // because as we inject things into the container, the UID/GID will
- // change. Also, the build system may generate artifacts with different
- // modes. At the top-level (volume mapping) we have a big read-only
- // knob that can be applied to prevent modifications.
- //
- // Note that this must be done via a separate Chmod call, otherwise the
- // current process's umask will get in the way.
- var mode os.FileMode
- if st.Mode()&0100 != 0 {
- mode |= 0111
- }
- if st.Mode()&0200 != 0 {
- mode |= 0222
- }
- if st.Mode()&0400 != 0 {
- mode |= 0444
- }
- if err := os.Chmod(dst, mode); err != nil {
- return err
- }
-
- _, err = io.Copy(out, in)
- return err
-}
-
-// Poll is a shorthand function to poll for something with given timeout.
-func Poll(cb func() error, timeout time.Duration) error {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- return PollContext(ctx, cb)
-}
-
-// PollContext is like Poll, but takes a context instead of a timeout.
-func PollContext(ctx context.Context, cb func() error) error {
- b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
- return backoff.Retry(cb, b)
-}
-
-// WaitForHTTP tries GET requests on a port until the call succeeds or timeout.
-func WaitForHTTP(ip string, port int, timeout time.Duration) error {
- cb := func() error {
- c := &http.Client{
- // Calculate timeout to be able to do minimum 5 attempts.
- Timeout: timeout / 5,
- }
- url := fmt.Sprintf("http://%s:%d/", ip, port)
- resp, err := c.Get(url)
- if err != nil {
- log.Printf("Waiting %s: %v", url, err)
- return err
- }
- resp.Body.Close()
- return nil
- }
- return Poll(cb, timeout)
-}
-
-// Reaper reaps child processes.
-type Reaper struct {
- // mu protects ch, which will be nil if the reaper is not running.
- mu sync.Mutex
- ch chan os.Signal
-}
-
-// Start starts reaping child processes.
-func (r *Reaper) Start() {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if r.ch != nil {
- panic("reaper.Start called on a running reaper")
- }
-
- r.ch = make(chan os.Signal, 1)
- signal.Notify(r.ch, unix.SIGCHLD)
-
- go func() {
- for {
- r.mu.Lock()
- ch := r.ch
- r.mu.Unlock()
- if ch == nil {
- return
- }
-
- _, ok := <-ch
- if !ok {
- // Channel closed.
- return
- }
- for {
- cpid, _ := unix.Wait4(-1, nil, unix.WNOHANG, nil)
- if cpid < 1 {
- break
- }
- }
- }
- }()
-}
-
-// Stop stops reaping child processes.
-func (r *Reaper) Stop() {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if r.ch == nil {
- panic("reaper.Stop called on a stopped reaper")
- }
-
- signal.Stop(r.ch)
- close(r.ch)
- r.ch = nil
-}
-
-// StartReaper is a helper that starts a new Reaper and returns a function to
-// stop it.
-func StartReaper() func() {
- r := &Reaper{}
- r.Start()
- return r.Stop
-}
-
-// WaitUntilRead reads from the given reader until the wanted string is found
-// or until timeout.
-func WaitUntilRead(r io.Reader, want string, timeout time.Duration) error {
- sc := bufio.NewScanner(r)
- // done must be accessed atomically. A value greater than 0 indicates
- // that the read loop can exit.
- doneCh := make(chan bool)
- defer close(doneCh)
- go func() {
- for sc.Scan() {
- t := sc.Text()
- if strings.Contains(t, want) {
- doneCh <- true
- return
- }
- select {
- case <-doneCh:
- return
- default:
- }
- }
- doneCh <- false
- }()
-
- select {
- case <-time.After(timeout):
- return fmt.Errorf("timeout waiting to read %q", want)
- case res := <-doneCh:
- if !res {
- return fmt.Errorf("reader closed while waiting to read %q", want)
- }
- return nil
- }
-}
-
-// KillCommand kills the process running cmd unless it hasn't been started. It
-// returns an error if it cannot kill the process unless the reason is that the
-// process has already exited.
-//
-// KillCommand will also reap the process.
-func KillCommand(cmd *exec.Cmd) error {
- if cmd.Process == nil {
- return nil
- }
- if err := cmd.Process.Kill(); err != nil {
- if !strings.Contains(err.Error(), "process already finished") {
- return fmt.Errorf("failed to kill process %v: %v", cmd, err)
- }
- }
- return cmd.Wait()
-}
-
-// WriteTmpFile writes text to a temporary file, closes the file, and returns
-// the name of the file. A cleanup function is also returned.
-func WriteTmpFile(pattern, text string) (string, func(), error) {
- file, err := ioutil.TempFile(TmpDir(), pattern)
- if err != nil {
- return "", nil, err
- }
- defer file.Close()
- if _, err := file.Write([]byte(text)); err != nil {
- return "", nil, err
- }
- return file.Name(), func() { os.RemoveAll(file.Name()) }, nil
-}
-
-// IsStatic returns true iff the given file is a static binary.
-func IsStatic(filename string) (bool, error) {
- f, err := elf.Open(filename)
- if err != nil {
- return false, err
- }
- for _, prog := range f.Progs {
- if prog.Type == elf.PT_INTERP {
- return false, nil // Has interpreter.
- }
- }
- return true, nil
-}
-
-// TouchShardStatusFile indicates to Bazel that the test runner supports
-// sharding by creating or updating the last modified date of the file
-// specified by TEST_SHARD_STATUS_FILE.
-//
-// See https://docs.bazel.build/versions/master/test-encyclopedia.html#role-of-the-test-runner.
-func TouchShardStatusFile() error {
- if statusFile, ok := os.LookupEnv("TEST_SHARD_STATUS_FILE"); ok {
- cmd := exec.Command("touch", statusFile)
- if b, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf("touch %q failed:\n output: %s\n error: %s", statusFile, string(b), err.Error())
- }
- }
- return nil
-}
-
-// TestIndicesForShard returns indices for this test shard based on the
-// TEST_SHARD_INDEX and TEST_TOTAL_SHARDS environment vars, as well as
-// the passed partition flags.
-//
-// If either of the env vars are not present, then the function will return all
-// tests. If there are more shards than there are tests, then the returned list
-// may be empty.
-func TestIndicesForShard(numTests int) ([]int, error) {
- var (
- shardIndex = 0
- shardTotal = 1
- )
-
- indexStr, indexOk := os.LookupEnv("TEST_SHARD_INDEX")
- totalStr, totalOk := os.LookupEnv("TEST_TOTAL_SHARDS")
- if indexOk && totalOk {
- // Parse index and total to ints.
- var err error
- shardIndex, err = strconv.Atoi(indexStr)
- if err != nil {
- return nil, fmt.Errorf("invalid TEST_SHARD_INDEX %q: %v", indexStr, err)
- }
- shardTotal, err = strconv.Atoi(totalStr)
- if err != nil {
- return nil, fmt.Errorf("invalid TEST_TOTAL_SHARDS %q: %v", totalStr, err)
- }
- }
-
- // Combine with the partitions.
- partitionSize := shardTotal
- shardTotal = (*totalPartitions) * shardTotal
- shardIndex = partitionSize*(*partition-1) + shardIndex
-
- // Calculate!
- var indices []int
- numBlocks := int(math.Ceil(float64(numTests) / float64(shardTotal)))
- for i := 0; i < numBlocks; i++ {
- pick := i*shardTotal + shardIndex
- if pick < numTests {
- indices = append(indices, pick)
- }
- }
- return indices, nil
-}
diff --git a/pkg/test/testutil/testutil_runfiles.go b/pkg/test/testutil/testutil_runfiles.go
deleted file mode 100644
index 1dbd48a47..000000000
--- a/pkg/test/testutil/testutil_runfiles.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package testutil
-
-import (
- "fmt"
- "os"
- "path/filepath"
-)
-
-// FindFile searchs for a file inside the test run environment. It returns the
-// full path to the file. It fails if none or more than one file is found.
-func FindFile(path string) (string, error) {
- wd, err := os.Getwd()
- if err != nil {
- return "", err
- }
-
- // The test root is demarcated by a path element called "__main__". Search for
- // it backwards from the working directory.
- root := wd
- for {
- dir, name := filepath.Split(root)
- if name == "__main__" {
- break
- }
- if len(dir) == 0 {
- return "", fmt.Errorf("directory __main__ not found in %q", wd)
- }
- // Remove ending slash to loop around.
- root = dir[:len(dir)-1]
- }
-
- // Annoyingly, bazel adds the build type to the directory path for go
- // binaries, but not for c++ binaries. We use two different patterns to
- // to find our file.
- patterns := []string{
- // Try the obvious path first.
- filepath.Join(root, path),
- // If it was a go binary, use a wildcard to match the build
- // type. The pattern is: /test-path/__main__/directories/*/file.
- filepath.Join(root, filepath.Dir(path), "*", filepath.Base(path)),
- }
-
- for _, p := range patterns {
- matches, err := filepath.Glob(p)
- if err != nil {
- // "The only possible returned error is ErrBadPattern,
- // when pattern is malformed." -godoc
- return "", fmt.Errorf("error globbing %q: %v", p, err)
- }
- switch len(matches) {
- case 0:
- // Try the next pattern.
- case 1:
- // We found it.
- return matches[0], nil
- default:
- return "", fmt.Errorf("more than one match found for %q: %s", path, matches)
- }
- }
- return "", fmt.Errorf("file %q not found", path)
-}
diff --git a/pkg/unet/BUILD b/pkg/unet/BUILD
deleted file mode 100644
index 234125c38..000000000
--- a/pkg/unet/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "unet",
- srcs = [
- "unet.go",
- "unet_unsafe.go",
- ],
- visibility = ["//visibility:public"],
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "unet_test",
- size = "small",
- srcs = [
- "unet_test.go",
- ],
- library = ":unet",
- deps = [
- "//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/unet/unet_state_autogen.go b/pkg/unet/unet_state_autogen.go
new file mode 100644
index 000000000..9bbf31d35
--- /dev/null
+++ b/pkg/unet/unet_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package unet
diff --git a/pkg/unet/unet_test.go b/pkg/unet/unet_test.go
deleted file mode 100644
index 9875a3cda..000000000
--- a/pkg/unet/unet_test.go
+++ /dev/null
@@ -1,739 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package unet
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func randomFilename() (string, error) {
- // Return a randomly generated file in the test dir.
- f, err := ioutil.TempFile("", "unet-test")
- if err != nil {
- return "", err
- }
- file := f.Name()
- os.Remove(file)
- f.Close()
-
- cwd, err := os.Getwd()
- if err != nil {
- return "", err
- }
-
- // NOTE(b/26918832): We try to use relative path if possible. This is
- // to help conforming to the unix path length limit.
- if rel, err := filepath.Rel(cwd, file); err == nil {
- return rel, nil
- }
-
- return file, nil
-}
-
-func TestConnectFailure(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- if _, err := Connect(name, false); err == nil {
- t.Fatalf("Connect was successful, expected err")
- }
-}
-
-func TestBindFailure(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("First bind failed, got err %v expected nil", err)
- }
- defer ss.Close()
-
- if _, err = BindAndListen(name, false); err == nil {
- t.Fatalf("Second bind succeeded, expected non-nil err")
- }
-}
-
-func TestMultipleAccept(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("First bind failed, got err %v expected nil", err)
- }
- defer ss.Close()
-
- // Connect backlog times asynchronously.
- var wg sync.WaitGroup
- defer wg.Wait()
- for i := 0; i < backlog; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- s, err := Connect(name, false)
- if err != nil {
- t.Errorf("Connect failed, got err %v expected nil", err)
- return
- }
- s.Close()
- }()
- }
-
- // Accept backlog times.
- for i := 0; i < backlog; i++ {
- s, err := ss.Accept()
- if err != nil {
- t.Errorf("Accept failed, got err %v expected nil", err)
- continue
- }
- s.Close()
- }
-}
-
-func TestServerClose(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("First bind failed, got err %v expected nil", err)
- }
-
- // Make sure the first close succeeds.
- if err := ss.Close(); err != nil {
- t.Fatalf("First close failed, got err %v expected nil", err)
- }
-
- // The second one should fail.
- if err := ss.Close(); err == nil {
- t.Fatalf("Second close succeeded, expected non-nil err")
- }
-}
-
-func socketPair(t *testing.T, packet bool) (*Socket, *Socket) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- // Bind a server.
- ss, err := BindAndListen(name, packet)
- if err != nil {
- t.Fatalf("Error binding, got %v expected nil", err)
- }
- defer ss.Close()
-
- // Accept a client.
- acceptSocket := make(chan *Socket)
- acceptErr := make(chan error)
- go func() {
- server, err := ss.Accept()
- if err != nil {
- acceptErr <- err
- }
- acceptSocket <- server
- }()
-
- // Connect the client.
- client, err := Connect(name, packet)
- if err != nil {
- t.Fatalf("Error connecting, got %v expected nil", err)
- }
-
- // Grab the server handle.
- select {
- case server := <-acceptSocket:
- return server, client
- case err := <-acceptErr:
- t.Fatalf("Accept error: %v", err)
- }
- panic("unreachable")
-}
-
-func TestSendRecv(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Write on the client.
- w := client.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For client write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Read on the server.
- b := [][]byte{{'b'}}
- r := server.Reader(true)
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For server read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-}
-
-// TestSymmetric exists to assert that the two sockets received from socketPair
-// are interchangeable. They should be, this just provides a basic sanity check
-// by running TestSendRecv "backwards".
-func TestSymmetric(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Write on the server.
- w := server.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For server write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Read on the client.
- b := [][]byte{{'b'}}
- r := client.Reader(true)
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For client read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-}
-
-func TestPacket(t *testing.T) {
- server, client := socketPair(t, true)
- defer server.Close()
- defer client.Close()
-
- // Write on the client.
- w := client.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For client write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Write on the client again.
- w = client.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For client write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Read on the server.
- //
- // This should only get back a single byte, despite the buffer
- // being size two. This is because it's a _packet_ buffer.
- b := [][]byte{{'b', 'b'}}
- r := server.Reader(true)
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For server read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-
- // Do it again.
- r = server.Reader(true)
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For server read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-}
-
-func TestClose(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
-
- // Make sure the first close succeeds.
- if err := client.Close(); err != nil {
- t.Fatalf("First close failed, got err %v expected nil", err)
- }
-
- // The second one should fail.
- if err := client.Close(); err == nil {
- t.Fatalf("Second close succeeded, expected non-nil err")
- }
-}
-
-func TestNonBlockingSend(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Try up to 1000 writes, of 1000 bytes.
- blockCount := 0
- for i := 0; i < 1000; i++ {
- w := client.Writer(false)
- if n, err := w.WriteVec([][]byte{make([]byte, 1000)}); n != 1000 || err != nil {
- if err == unix.EWOULDBLOCK || err == unix.EAGAIN {
- // We're good. That's what we wanted.
- blockCount++
- } else {
- t.Fatalf("For client write, got n=%d err=%v, expected n=1000 err=nil", n, err)
- }
- }
- }
-
- if blockCount == 1000 {
- // Shouldn't have _always_ blocked.
- t.Fatalf("Socket always blocked!")
- } else if blockCount == 0 {
- // Should have started blocking eventually.
- t.Fatalf("Socket never blocked!")
- }
-}
-
-func TestNonBlockingRecv(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- b := [][]byte{{'b'}}
- r := client.Reader(false)
-
- // Expected to block immediately.
- _, err := r.ReadVec(b)
- if err != unix.EWOULDBLOCK && err != unix.EAGAIN {
- t.Fatalf("Read didn't block, got err %v expected blocking err", err)
- }
-
- // Put some data in the pipe.
- w := server.Writer(false)
- if n, err := w.WriteVec(b); n != 1 || err != nil {
- t.Fatalf("Write failed with n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Expect it not to block.
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("Read failed with n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Expect it to return a block error again.
- r = client.Reader(false)
- _, err = r.ReadVec(b)
- if err != unix.EWOULDBLOCK && err != unix.EAGAIN {
- t.Fatalf("Read didn't block, got err %v expected blocking err", err)
- }
-}
-
-func TestRecvVectors(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Write on the client.
- w := client.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a', 'b'}}); n != 2 || err != nil {
- t.Fatalf("For client write, got n=%d err=%v, expected n=2 err=nil", n, err)
- }
-
- // Read on the server.
- b := [][]byte{{'c'}, {'c'}}
- r := server.Reader(true)
- if n, err := r.ReadVec(b); n != 2 || err != nil {
- t.Fatalf("For server read, got n=%d err=%v, expected n=2 err=nil", n, err)
- }
- if b[0][0] != 'a' || b[1][0] != 'b' {
- t.Fatalf("Got bad read data, got %c,%c, expected a,b", b[0][0], b[1][0])
- }
-}
-
-func TestSendVectors(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Write on the client.
- w := client.Writer(true)
- if n, err := w.WriteVec([][]byte{{'a'}, {'b'}}); n != 2 || err != nil {
- t.Fatalf("For client write, got n=%d err=%v, expected n=2 err=nil", n, err)
- }
-
- // Read on the server.
- b := [][]byte{{'c', 'c'}}
- r := server.Reader(true)
- if n, err := r.ReadVec(b); n != 2 || err != nil {
- t.Fatalf("For server read, got n=%d err=%v, expected n=2 err=nil", n, err)
- }
- if b[0][0] != 'a' || b[0][1] != 'b' {
- t.Fatalf("Got bad read data, got %c,%c, expected a,b", b[0][0], b[0][1])
- }
-}
-
-func TestSendFDsNotEnabled(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Write on the server.
- w := server.Writer(true)
- w.PackFDs(0, 1, 2)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For server write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-
- // Read on the client, without enabling FDs.
- b := [][]byte{{'b'}}
- r := client.Reader(true)
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For client read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-
- // Make sure the FDs are not received.
- fds, err := r.ExtractFDs()
- if len(fds) != 0 || err != nil {
- t.Fatalf("Got fds=%v err=%v, expected len(fds)=0 err=nil", fds, err)
- }
-}
-
-func sendFDs(t *testing.T, s *Socket, fds []int) {
- w := s.Writer(true)
- w.PackFDs(fds...)
- if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil {
- t.Fatalf("For write, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
-}
-
-func recvFDs(t *testing.T, s *Socket, enableSize int, origFDs []int) {
- expected := len(origFDs)
-
- // Count the number of FDs.
- preEntries, err := ioutil.ReadDir("/proc/self/fd")
- if err != nil {
- t.Fatalf("Can't readdir, got err %v expected nil", err)
- }
-
- // Read on the client.
- b := [][]byte{{'b'}}
- r := s.Reader(true)
- if enableSize >= 0 {
- r.EnableFDs(enableSize)
- }
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- t.Fatalf("For client read, got n=%d err=%v, expected n=1 err=nil", n, err)
- }
- if b[0][0] != 'a' {
- t.Fatalf("Got bad read data, got %c, expected a", b[0][0])
- }
-
- // Count the new number of FDs.
- postEntries, err := ioutil.ReadDir("/proc/self/fd")
- if err != nil {
- t.Fatalf("Can't readdir, got err %v expected nil", err)
- }
- if len(preEntries)+expected != len(postEntries) {
- t.Errorf("Process fd count isn't right, expected %d got %d", len(preEntries)+expected, len(postEntries))
- }
-
- // Make sure the FDs are there.
- fds, err := r.ExtractFDs()
- if len(fds) != expected || err != nil {
- t.Fatalf("Got fds=%v err=%v, expected len(fds)=%d err=nil", fds, err, expected)
- }
-
- // Make sure they are different from the originals.
- for i := 0; i < len(fds); i++ {
- if fds[i] == origFDs[i] {
- t.Errorf("Got original fd for index %d, expected different", i)
- }
- }
-
- // Make sure they can be accessed as expected.
- for i := 0; i < len(fds); i++ {
- var st unix.Stat_t
- if err := unix.Fstat(fds[i], &st); err != nil {
- t.Errorf("fds[%d] can't be stated, got err %v expected nil", i, err)
- }
- }
-
- // Close them off.
- r.CloseFDs()
-
- // Make sure the count is back to normal.
- finalEntries, err := ioutil.ReadDir("/proc/self/fd")
- if err != nil {
- t.Fatalf("Can't readdir, got err %v expected nil", err)
- }
- if len(finalEntries) != len(preEntries) {
- t.Errorf("Process fd count isn't right, expected %d got %d", len(preEntries), len(finalEntries))
- }
-}
-
-func TestFDsSingle(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0})
- recvFDs(t, client, 1, []int{0})
-}
-
-func TestFDsMultiple(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- // Basic case, multiple FDs.
- sendFDs(t, server, []int{0, 1, 2})
- recvFDs(t, client, 3, []int{0, 1, 2})
-}
-
-// See TestSymmetric above.
-func TestFDsSymmetric(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0, 1, 2})
- recvFDs(t, client, 3, []int{0, 1, 2})
-}
-
-func TestFDsReceiveLargeBuffer(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0})
- recvFDs(t, client, 3, []int{0})
-}
-
-func TestFDsReceiveSmallBuffer(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0, 1, 2})
-
- // Per the spec, we may still receive more than the buffer. In fact,
- // it'll be rounded up and we can receive two with a size one buffer.
- recvFDs(t, client, 1, []int{0, 1})
-}
-
-func TestFDsReceiveNotEnabled(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0})
- recvFDs(t, client, -1, []int{})
-}
-
-func TestFDsReceiveSizeZero(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- sendFDs(t, server, []int{0})
- recvFDs(t, client, 0, []int{})
-}
-
-func TestGetPeerCred(t *testing.T) {
- server, client := socketPair(t, false)
- defer server.Close()
- defer client.Close()
-
- want := &unix.Ucred{
- Pid: int32(os.Getpid()),
- Uid: uint32(os.Getuid()),
- Gid: uint32(os.Getgid()),
- }
-
- if got, err := client.GetPeerCred(); err != nil || !reflect.DeepEqual(got, want) {
- t.Errorf("GetPeerCred() = %v, %v, want = %+v, %+v", got, err, want, nil)
- }
-}
-
-func newClosedSocket() (*Socket, error) {
- fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- return nil, err
- }
-
- s, err := NewSocket(fd)
- if err != nil {
- unix.Close(fd)
- return nil, err
- }
-
- return s, s.Close()
-}
-
-func TestGetPeerCredFailure(t *testing.T) {
- s, err := newClosedSocket()
- if err != nil {
- t.Fatalf("newClosedSocket got error %v want nil", err)
- }
-
- want := "bad file descriptor"
- if _, err := s.GetPeerCred(); err == nil || err.Error() != want {
- t.Errorf("s.GetPeerCred() = %v, want = %s", err, want)
- }
-}
-
-func TestAcceptClosed(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("Bind failed, got err %v expected nil", err)
- }
-
- if err := ss.Close(); err != nil {
- t.Fatalf("Close failed, got err %v expected nil", err)
- }
-
- if _, err := ss.Accept(); err == nil {
- t.Errorf("Accept on closed SocketServer, got err %v, want != nil", err)
- }
-}
-
-func TestCloseAfterAcceptStart(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("Bind failed, got err %v expected nil", err)
- }
-
- wg := sync.WaitGroup{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- time.Sleep(50 * time.Millisecond)
- if err := ss.Close(); err != nil {
- t.Errorf("Close failed, got err %v expected nil", err)
- }
- }()
-
- if _, err := ss.Accept(); err == nil {
- t.Errorf("Accept on closed SocketServer, got err %v, want != nil", err)
- }
-
- wg.Wait()
-}
-
-func TestReleaseAfterAcceptStart(t *testing.T) {
- name, err := randomFilename()
- if err != nil {
- t.Fatalf("Unable to generate file, got err %v expected nil", err)
- }
-
- ss, err := BindAndListen(name, false)
- if err != nil {
- t.Fatalf("Bind failed, got err %v expected nil", err)
- }
-
- wg := sync.WaitGroup{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- time.Sleep(50 * time.Millisecond)
- fd, err := ss.Release()
- if err != nil {
- t.Errorf("Release failed, got err %v expected nil", err)
- }
- unix.Close(fd)
- }()
-
- if _, err := ss.Accept(); err == nil {
- t.Errorf("Accept on closed SocketServer, got err %v, want != nil", err)
- }
-
- wg.Wait()
-}
-
-func TestControlMessage(t *testing.T) {
- for i := 0; i <= 10; i++ {
- var want []int
- for j := 0; j < i; j++ {
- want = append(want, i+j+1)
- }
-
- var cm ControlMessage
- cm.EnableFDs(i)
- cm.PackFDs(want...)
- got, err := cm.ExtractFDs()
- if err != nil || !reflect.DeepEqual(got, want) {
- t.Errorf("cm.ExtractFDs() = %v, %v, want = %v, %v", got, err, want, nil)
- }
- }
-}
-
-func benchmarkSendRecv(b *testing.B, packet bool) {
- server, client, err := SocketPair(packet)
- if err != nil {
- b.Fatalf("SocketPair: got %v, wanted nil", err)
- }
- defer server.Close()
- defer client.Close()
- go func() {
- buf := make([]byte, 1)
- for i := 0; i < b.N; i++ {
- n, err := server.Read(buf)
- if n != 1 || err != nil {
- b.Errorf("server.Read: got (%d, %v), wanted (1, nil)", n, err)
- return
- }
- n, err = server.Write(buf)
- if n != 1 || err != nil {
- b.Errorf("server.Write: got (%d, %v), wanted (1, nil)", n, err)
- return
- }
- }
- }()
- buf := make([]byte, 1)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- n, err := client.Write(buf)
- if n != 1 || err != nil {
- b.Fatalf("client.Write: got (%d, %v), wanted (1, nil)", n, err)
- }
- n, err = client.Read(buf)
- if n != 1 || err != nil {
- b.Fatalf("client.Read: got (%d, %v), wanted (1, nil)", n, err)
- }
- }
-}
-
-func BenchmarkSendRecvStream(b *testing.B) {
- benchmarkSendRecv(b, false)
-}
-
-func BenchmarkSendRecvPacket(b *testing.B) {
- benchmarkSendRecv(b, true)
-}
diff --git a/pkg/unet/unet_unsafe_state_autogen.go b/pkg/unet/unet_unsafe_state_autogen.go
new file mode 100644
index 000000000..9bbf31d35
--- /dev/null
+++ b/pkg/unet/unet_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package unet
diff --git a/pkg/urpc/BUILD b/pkg/urpc/BUILD
deleted file mode 100644
index 850c34ed0..000000000
--- a/pkg/urpc/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "urpc",
- srcs = ["urpc.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/fd",
- "//pkg/log",
- "//pkg/sync",
- "//pkg/unet",
- ],
-)
-
-go_test(
- name = "urpc_test",
- size = "small",
- srcs = ["urpc_test.go"],
- library = ":urpc",
- deps = ["//pkg/unet"],
-)
diff --git a/pkg/urpc/urpc_state_autogen.go b/pkg/urpc/urpc_state_autogen.go
new file mode 100644
index 000000000..5fdca6717
--- /dev/null
+++ b/pkg/urpc/urpc_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package urpc
diff --git a/pkg/urpc/urpc_test.go b/pkg/urpc/urpc_test.go
deleted file mode 100644
index c6c7ce9d4..000000000
--- a/pkg/urpc/urpc_test.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package urpc
-
-import (
- "errors"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-type test struct {
-}
-
-type testArg struct {
- StringArg string
- IntArg int
- FilePayload
-}
-
-type testResult struct {
- StringResult string
- IntResult int
- FilePayload
-}
-
-func (t test) Func(a *testArg, r *testResult) error {
- r.StringResult = a.StringArg
- r.IntResult = a.IntArg
- return nil
-}
-
-func (t test) Err(a *testArg, r *testResult) error {
- return errors.New("test error")
-}
-
-func (t test) FailNoFile(a *testArg, r *testResult) error {
- if a.Files == nil {
- return errors.New("no file found")
- }
-
- return nil
-}
-
-func (t test) SendFile(a *testArg, r *testResult) error {
- r.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr}
- return nil
-}
-
-func (t test) TooManyFiles(a *testArg, r *testResult) error {
- for i := 0; i <= maxFiles; i++ {
- r.Files = append(r.Files, os.Stdin)
- }
- return nil
-}
-
-func startServer(socket *unet.Socket) {
- s := NewServer()
- s.Register(test{})
- s.StartHandling(socket)
-}
-
-func testClient() (*Client, error) {
- serverSock, clientSock, err := unet.SocketPair(false)
- if err != nil {
- return nil, err
- }
- startServer(serverSock)
-
- return NewClient(clientSock), nil
-}
-
-func TestCall(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- if err := c.Call("test.Func", &testArg{}, &r); err != nil {
- t.Errorf("basic call failed: %v", err)
- } else if r.StringResult != "" || r.IntResult != 0 {
- t.Errorf("unexpected result, got %v expected zero value", r)
- }
- if err := c.Call("test.Func", &testArg{StringArg: "hello"}, &r); err != nil {
- t.Errorf("basic call failed: %v", err)
- } else if r.StringResult != "hello" {
- t.Errorf("unexpected result, got %v expected hello", r.StringResult)
- }
- if err := c.Call("test.Func", &testArg{IntArg: 1}, &r); err != nil {
- t.Errorf("basic call failed: %v", err)
- } else if r.IntResult != 1 {
- t.Errorf("unexpected result, got %v expected 1", r.IntResult)
- }
-}
-
-func TestUnknownMethod(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- if err := c.Call("test.Unknown", &testArg{}, &r); err == nil {
- t.Errorf("expected non-nil err, got nil")
- } else if err.Error() != ErrUnknownMethod.Error() {
- t.Errorf("expected test error, got %v", err)
- }
-}
-
-func TestErr(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- if err := c.Call("test.Err", &testArg{}, &r); err == nil {
- t.Errorf("expected non-nil err, got nil")
- } else if err.Error() != "test error" {
- t.Errorf("expected test error, got %v", err)
- }
-}
-
-func TestSendFile(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- if err := c.Call("test.FailNoFile", &testArg{}, &r); err == nil {
- t.Errorf("expected non-nil err, got nil")
- }
- if err := c.Call("test.FailNoFile", &testArg{FilePayload: FilePayload{Files: []*os.File{os.Stdin, os.Stdout, os.Stdin}}}, &r); err != nil {
- t.Errorf("expected nil err, got %v", err)
- }
-}
-
-func TestRecvFile(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- if err := c.Call("test.SendFile", &testArg{}, &r); err != nil {
- t.Errorf("expected nil err, got %v", err)
- }
- if r.Files == nil {
- t.Errorf("expected file, got nil")
- }
-}
-
-func TestShutdown(t *testing.T) {
- serverSock, clientSock, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- clientSock.Close()
-
- s := NewServer()
- if err := s.Handle(serverSock); err == nil {
- t.Errorf("expected non-nil err, got nil")
- }
-}
-
-func TestTooManyFiles(t *testing.T) {
- c, err := testClient()
- if err != nil {
- t.Fatalf("error creating test client: %v", err)
- }
- defer c.Close()
-
- var r testResult
- var a testArg
- for i := 0; i <= maxFiles; i++ {
- a.Files = append(a.Files, os.Stdin)
- }
-
- // Client-side error.
- if err := c.Call("test.Func", &a, &r); err != ErrTooManyFiles {
- t.Errorf("expected ErrTooManyFiles, got %v", err)
- }
-
- // Server-side error.
- if err := c.Call("test.TooManyFiles", &testArg{}, &r); err == nil {
- t.Errorf("expected non-nil err, got nil")
- } else if err.Error() != "too many files" {
- t.Errorf("expected too many files, got %v", err.Error())
- }
-}
diff --git a/pkg/usermem/BUILD b/pkg/usermem/BUILD
deleted file mode 100644
index 9c37a9626..000000000
--- a/pkg/usermem/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "usermem",
- srcs = [
- "bytes_io.go",
- "bytes_io_unsafe.go",
- "marshal.go",
- "usermem.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/atomicbitops",
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/safemem",
- ],
-)
-
-go_test(
- name = "usermem_test",
- size = "small",
- srcs = [
- "usermem_test.go",
- ],
- library = ":usermem",
- deps = [
- "//pkg/context",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/safemem",
- ],
-)
diff --git a/pkg/usermem/README.md b/pkg/usermem/README.md
deleted file mode 100644
index f6d2137eb..000000000
--- a/pkg/usermem/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-This package defines primitives for sentry access to application memory.
-
-Major types:
-
-- The `IO` interface represents a virtual address space and provides I/O
- methods on that address space. `IO` is the lowest-level primitive. The
- primary implementation of the `IO` interface is `mm.MemoryManager`.
-
-- `IOSequence` represents a collection of individually-contiguous address
- ranges in a `IO` that is operated on sequentially, analogous to Linux's
- `struct iov_iter`.
-
-Major usage patterns:
-
-- Access to a task's virtual memory, subject to the application's memory
- protections and while running on that task's goroutine, from a context that
- is at or above the level of the `kernel` package (e.g. most syscall
- implementations in `syscalls/linux`); use the `kernel.Task.Copy*` wrappers
- defined in `kernel/task_usermem.go`.
-
-- Access to a task's virtual memory, from a context that is at or above the
- level of the `kernel` package, but where any of the above constraints does
- not hold (e.g. `PTRACE_POKEDATA`, which ignores application memory
- protections); obtain the task's `mm.MemoryManager` by calling
- `kernel.Task.MemoryManager`, and call its `IO` methods directly.
-
-- Access to a task's virtual memory, from a context that is below the level of
- the `kernel` package (e.g. filesystem I/O); clients must pass I/O arguments
- from higher layers, usually in the form of an `IOSequence`. The
- `kernel.Task.SingleIOSequence` and `kernel.Task.IovecsIOSequence` functions
- in `kernel/task_usermem.go` are convenience functions for doing so.
diff --git a/pkg/usermem/usermem_state_autogen.go b/pkg/usermem/usermem_state_autogen.go
new file mode 100644
index 000000000..62f8af4c9
--- /dev/null
+++ b/pkg/usermem/usermem_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package usermem
diff --git a/pkg/usermem/usermem_test.go b/pkg/usermem/usermem_test.go
deleted file mode 100644
index a5e2fe69e..000000000
--- a/pkg/usermem/usermem_test.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package usermem
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/safemem"
-)
-
-// newContext returns a context.Context that we can use in these tests (we
-// can't use contexttest because it depends on usermem).
-func newContext() context.Context {
- return context.Background()
-}
-
-func newBytesIOString(s string) *BytesIO {
- return &BytesIO{[]byte(s)}
-}
-
-func TestBytesIOCopyOutSuccess(t *testing.T) {
- b := newBytesIOString("ABCDE")
- n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{})
- if wantN := 3; n != wantN || err != nil {
- t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := b.Bytes, []byte("AfooE"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyOutFailure(t *testing.T) {
- b := newBytesIOString("ABC")
- n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{})
- if wantN, wantErr := 2, linuxerr.EFAULT; n != wantN || err != wantErr {
- t.Errorf("CopyOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := b.Bytes, []byte("Afo"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyInSuccess(t *testing.T) {
- b := newBytesIOString("AfooE")
- var dst [3]byte
- n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{})
- if wantN := 3; n != wantN || err != nil {
- t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyInFailure(t *testing.T) {
- b := newBytesIOString("Afo")
- var dst [3]byte
- n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{})
- if wantN, wantErr := 2, linuxerr.EFAULT; n != wantN || err != wantErr {
- t.Errorf("CopyIn: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := dst[:], []byte("fo\x00"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOZeroOutSuccess(t *testing.T) {
- b := newBytesIOString("ABCD")
- n, err := b.ZeroOut(newContext(), 1, 2, IOOpts{})
- if wantN := int64(2); n != wantN || err != nil {
- t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := b.Bytes, []byte("A\x00\x00D"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOZeroOutFailure(t *testing.T) {
- b := newBytesIOString("ABC")
- n, err := b.ZeroOut(newContext(), 1, 3, IOOpts{})
- if wantN, wantErr := int64(2), linuxerr.EFAULT; n != wantN || err != wantErr {
- t.Errorf("ZeroOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := b.Bytes, []byte("A\x00\x00"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyOutFromSuccess(t *testing.T) {
- b := newBytesIOString("ABCDEFGH")
- n, err := b.CopyOutFrom(newContext(), hostarch.AddrRangeSeqFromSlice([]hostarch.AddrRange{
- {Start: 4, End: 7},
- {Start: 1, End: 4},
- }), safemem.FromIOReader{bytes.NewBufferString("barfoo")}, IOOpts{})
- if wantN := int64(6); n != wantN || err != nil {
- t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := b.Bytes, []byte("AfoobarH"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyOutFromFailure(t *testing.T) {
- b := newBytesIOString("ABCDE")
- n, err := b.CopyOutFrom(newContext(), hostarch.AddrRangeSeqFromSlice([]hostarch.AddrRange{
- {Start: 1, End: 4},
- {Start: 4, End: 7},
- }), safemem.FromIOReader{bytes.NewBufferString("foobar")}, IOOpts{})
- if wantN, wantErr := int64(4), linuxerr.EFAULT; n != wantN || err != wantErr {
- t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := b.Bytes, []byte("Afoob"); !bytes.Equal(got, want) {
- t.Errorf("Bytes: got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyInToSuccess(t *testing.T) {
- b := newBytesIOString("AfoobarH")
- var dst bytes.Buffer
- n, err := b.CopyInTo(newContext(), hostarch.AddrRangeSeqFromSlice([]hostarch.AddrRange{
- {Start: 4, End: 7},
- {Start: 1, End: 4},
- }), safemem.FromIOWriter{&dst}, IOOpts{})
- if wantN := int64(6); n != wantN || err != nil {
- t.Errorf("CopyInTo: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst.Bytes(), []byte("barfoo"); !bytes.Equal(got, want) {
- t.Errorf("dst.Bytes(): got %q, wanted %q", got, want)
- }
-}
-
-func TestBytesIOCopyInToFailure(t *testing.T) {
- b := newBytesIOString("Afoob")
- var dst bytes.Buffer
- n, err := b.CopyInTo(newContext(), hostarch.AddrRangeSeqFromSlice([]hostarch.AddrRange{
- {Start: 1, End: 4},
- {Start: 4, End: 7},
- }), safemem.FromIOWriter{&dst}, IOOpts{})
- if wantN, wantErr := int64(4), linuxerr.EFAULT; n != wantN || err != wantErr {
- t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
- }
- if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) {
- t.Errorf("dst.Bytes(): got %q, wanted %q", got, want)
- }
-}
-
-type testStruct struct {
- Int8 int8
- Uint8 uint8
- Int16 int16
- Uint16 uint16
- Int32 int32
- Uint32 uint32
- Int64 int64
- Uint64 uint64
-}
-
-func TestCopyStringInShort(t *testing.T) {
- // Tests for string length <= copyStringIncrement.
- want := strings.Repeat("A", copyStringIncrement-2)
- mem := want + "\x00"
- if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil {
- t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
- }
-}
-
-func TestCopyStringInLong(t *testing.T) {
- // Tests for copyStringIncrement < string length <= copyStringMaxInitBufLen
- // (requiring multiple calls to IO.CopyIn()).
- want := strings.Repeat("A", copyStringIncrement*3/4) + strings.Repeat("B", copyStringIncrement*3/4)
- mem := want + "\x00"
- if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil {
- t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
- }
-}
-
-func TestCopyStringInVeryLong(t *testing.T) {
- // Tests for string length > copyStringMaxInitBufLen (requiring buffer
- // reallocation).
- want := strings.Repeat("A", copyStringMaxInitBufLen*3/4) + strings.Repeat("B", copyStringMaxInitBufLen*3/4)
- mem := want + "\x00"
- if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringMaxInitBufLen, IOOpts{}); got != want || err != nil {
- t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
- }
-}
-
-func TestCopyStringInNoTerminatingZeroByte(t *testing.T) {
- want := strings.Repeat("A", copyStringIncrement-1)
- got, err := CopyStringIn(newContext(), newBytesIOString(want), 0, 2*copyStringIncrement, IOOpts{})
- if wantErr := linuxerr.EFAULT; got != want || err != wantErr {
- t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr)
- }
-}
-
-func TestCopyStringInTruncatedByMaxlen(t *testing.T) {
- got, err := CopyStringIn(newContext(), newBytesIOString(strings.Repeat("A", 10)), 0, 5, IOOpts{})
- if want, wantErr := strings.Repeat("A", 5), linuxerr.ENAMETOOLONG; got != want || err != wantErr {
- t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr)
- }
-}
-
-func TestCopyInt32StringsInVec(t *testing.T) {
- for _, test := range []struct {
- str string
- n int
- initial []int32
- final []int32
- }{
- {
- str: "100 200",
- n: len("100 200"),
- initial: []int32{1, 2},
- final: []int32{100, 200},
- },
- {
- // Fewer values ok
- str: "100",
- n: len("100"),
- initial: []int32{1, 2},
- final: []int32{100, 2},
- },
- {
- // Extra values ok
- str: "100 200 300",
- n: len("100 200 "),
- initial: []int32{1, 2},
- final: []int32{100, 200},
- },
- {
- // Leading and trailing whitespace ok
- str: " 100\t200\n",
- n: len(" 100\t200\n"),
- initial: []int32{1, 2},
- final: []int32{100, 200},
- },
- } {
- t.Run(fmt.Sprintf("%q", test.str), func(t *testing.T) {
- src := BytesIOSequence([]byte(test.str))
- dsts := append([]int32(nil), test.initial...)
- if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); n != int64(test.n) || err != nil {
- t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (%d, nil)", n, err, test.n)
- }
- if !reflect.DeepEqual(dsts, test.final) {
- t.Errorf("dsts: got %v, wanted %v", dsts, test.final)
- }
- })
- }
-}
-
-func TestCopyInt32StringsInVecRequiresOneValidValue(t *testing.T) {
- for _, s := range []string{"", "\n", "a123"} {
- t.Run(fmt.Sprintf("%q", s), func(t *testing.T) {
- src := BytesIOSequence([]byte(s))
- initial := []int32{1, 2}
- dsts := append([]int32(nil), initial...)
- if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); !linuxerr.Equals(linuxerr.EINVAL, err) {
- t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (_, %v)", n, err, linuxerr.EINVAL)
- }
- if !reflect.DeepEqual(dsts, initial) {
- t.Errorf("dsts: got %v, wanted %v", dsts, initial)
- }
- })
- }
-}
-
-func TestIOSequenceCopyOut(t *testing.T) {
- buf := []byte("ABCD")
- s := BytesIOSequence(buf)
-
- // CopyOut limited by len(src).
- n, err := s.CopyOut(newContext(), []byte("fo"))
- if wantN := 2; n != wantN || err != nil {
- t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("foCD"); !bytes.Equal(buf, want) {
- t.Errorf("buf: got %q, wanted %q", buf, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(2); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- // CopyOut limited by s.NumBytes().
- n, err = s.CopyOut(newContext(), []byte("obar"))
- if wantN := 2; n != wantN || err != nil {
- t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("foob"); !bytes.Equal(buf, want) {
- t.Errorf("buf: got %q, wanted %q", buf, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(0); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-}
-
-func TestIOSequenceCopyIn(t *testing.T) {
- s := BytesIOSequence([]byte("foob"))
- dst := []byte("ABCDEF")
-
- // CopyIn limited by len(dst).
- n, err := s.CopyIn(newContext(), dst[:2])
- if wantN := 2; n != wantN || err != nil {
- t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("foCDEF"); !bytes.Equal(dst, want) {
- t.Errorf("dst: got %q, wanted %q", dst, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(2); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- // CopyIn limited by s.Remaining().
- n, err = s.CopyIn(newContext(), dst[2:])
- if wantN := 2; n != wantN || err != nil {
- t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("foobEF"); !bytes.Equal(dst, want) {
- t.Errorf("dst: got %q, wanted %q", dst, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(0); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-}
-
-func TestIOSequenceZeroOut(t *testing.T) {
- buf := []byte("ABCD")
- s := BytesIOSequence(buf)
-
- // ZeroOut limited by toZero.
- n, err := s.ZeroOut(newContext(), 2)
- if wantN := int64(2); n != wantN || err != nil {
- t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("\x00\x00CD"); !bytes.Equal(buf, want) {
- t.Errorf("buf: got %q, wanted %q", buf, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(2); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- // ZeroOut limited by s.NumBytes().
- n, err = s.ZeroOut(newContext(), 4)
- if wantN := int64(2); n != wantN || err != nil {
- t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if want := []byte("\x00\x00\x00\x00"); !bytes.Equal(buf, want) {
- t.Errorf("buf: got %q, wanted %q", buf, want)
- }
- s = s.DropFirst(2)
- if got, want := s.NumBytes(), int64(0); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-}
-
-func TestIOSequenceTakeFirst(t *testing.T) {
- s := BytesIOSequence([]byte("foobar"))
- if got, want := s.NumBytes(), int64(6); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- s = s.TakeFirst(3)
- if got, want := s.NumBytes(), int64(3); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- // TakeFirst(n) where n > s.NumBytes() is a no-op.
- s = s.TakeFirst(9)
- if got, want := s.NumBytes(), int64(3); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-
- var dst [3]byte
- n, err := s.CopyIn(newContext(), dst[:])
- if wantN := 3; n != wantN || err != nil {
- t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
- }
- if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) {
- t.Errorf("dst: got %q, wanted %q", got, want)
- }
- s = s.DropFirst(3)
- if got, want := s.NumBytes(), int64(0); got != want {
- t.Errorf("NumBytes: got %v, wanted %v", got, want)
- }
-}
diff --git a/pkg/usermem/usermem_unsafe_state_autogen.go b/pkg/usermem/usermem_unsafe_state_autogen.go
new file mode 100644
index 000000000..62f8af4c9
--- /dev/null
+++ b/pkg/usermem/usermem_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package usermem
diff --git a/pkg/waiter/BUILD b/pkg/waiter/BUILD
deleted file mode 100644
index 852480a09..000000000
--- a/pkg/waiter/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "waiter_list",
- out = "waiter_list.go",
- package = "waiter",
- prefix = "waiter",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Entry",
- "Linker": "*Entry",
- },
-)
-
-go_library(
- name = "waiter",
- srcs = [
- "waiter.go",
- "waiter_list.go",
- ],
- visibility = ["//visibility:public"],
- deps = ["//pkg/sync"],
-)
-
-go_test(
- name = "waiter_test",
- size = "small",
- srcs = [
- "waiter_test.go",
- ],
- library = ":waiter",
-)
diff --git a/pkg/waiter/waiter_list.go b/pkg/waiter/waiter_list.go
new file mode 100644
index 000000000..f83aeb49d
--- /dev/null
+++ b/pkg/waiter/waiter_list.go
@@ -0,0 +1,221 @@
+package waiter
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type waiterElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (waiterElementMapper) linkerFor(elem *Entry) *Entry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type waiterList struct {
+ head *Entry
+ tail *Entry
+}
+
+// Reset resets list l to the empty state.
+func (l *waiterList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *waiterList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Front() *Entry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *waiterList) Back() *Entry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *waiterList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (waiterElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *waiterList) PushFront(e *Entry) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ waiterElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *waiterList) PushBack(e *Entry) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *waiterList) PushBackList(m *waiterList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ waiterElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ waiterElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *waiterList) InsertAfter(b, e *Entry) {
+ bLinker := waiterElementMapper{}.linkerFor(b)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ waiterElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *waiterList) InsertBefore(a, e *Entry) {
+ aLinker := waiterElementMapper{}.linkerFor(a)
+ eLinker := waiterElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ waiterElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *waiterList) Remove(e *Entry) {
+ linker := waiterElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ waiterElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ waiterElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type waiterEntry struct {
+ next *Entry
+ prev *Entry
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Next() *Entry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) Prev() *Entry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetNext(elem *Entry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *waiterEntry) SetPrev(elem *Entry) {
+ e.prev = elem
+}
diff --git a/pkg/waiter/waiter_state_autogen.go b/pkg/waiter/waiter_state_autogen.go
new file mode 100644
index 000000000..498b7e79e
--- /dev/null
+++ b/pkg/waiter/waiter_state_autogen.go
@@ -0,0 +1,126 @@
+// automatically generated by stateify.
+
+package waiter
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *Entry) StateTypeName() string {
+ return "pkg/waiter.Entry"
+}
+
+func (e *Entry) StateFields() []string {
+ return []string{
+ "Callback",
+ "mask",
+ "waiterEntry",
+ }
+}
+
+func (e *Entry) beforeSave() {}
+
+// +checklocksignore
+func (e *Entry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.Callback)
+ stateSinkObject.Save(1, &e.mask)
+ stateSinkObject.Save(2, &e.waiterEntry)
+}
+
+func (e *Entry) afterLoad() {}
+
+// +checklocksignore
+func (e *Entry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.Callback)
+ stateSourceObject.Load(1, &e.mask)
+ stateSourceObject.Load(2, &e.waiterEntry)
+}
+
+func (q *Queue) StateTypeName() string {
+ return "pkg/waiter.Queue"
+}
+
+func (q *Queue) StateFields() []string {
+ return []string{
+ "list",
+ }
+}
+
+func (q *Queue) beforeSave() {}
+
+// +checklocksignore
+func (q *Queue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.list)
+}
+
+func (q *Queue) afterLoad() {}
+
+// +checklocksignore
+func (q *Queue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.list)
+}
+
+func (l *waiterList) StateTypeName() string {
+ return "pkg/waiter.waiterList"
+}
+
+func (l *waiterList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *waiterList) beforeSave() {}
+
+// +checklocksignore
+func (l *waiterList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *waiterList) afterLoad() {}
+
+// +checklocksignore
+func (l *waiterList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *waiterEntry) StateTypeName() string {
+ return "pkg/waiter.waiterEntry"
+}
+
+func (e *waiterEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *waiterEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *waiterEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *waiterEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func init() {
+ state.Register((*Entry)(nil))
+ state.Register((*Queue)(nil))
+ state.Register((*waiterList)(nil))
+ state.Register((*waiterEntry)(nil))
+}
diff --git a/pkg/waiter/waiter_test.go b/pkg/waiter/waiter_test.go
deleted file mode 100644
index 6928f28b4..000000000
--- a/pkg/waiter/waiter_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package waiter
-
-import (
- "sync/atomic"
- "testing"
-)
-
-type callbackStub struct {
- f func(e *Entry, m EventMask)
-}
-
-// Callback implements EntryCallback.Callback.
-func (c *callbackStub) Callback(e *Entry, m EventMask) {
- c.f(e, m)
-}
-
-func TestEmptyQueue(t *testing.T) {
- var q Queue
-
- // Notify the zero-value of a queue.
- q.Notify(EventIn)
-
- // Register then unregister a waiter, then notify the queue.
- cnt := 0
- e := Entry{Callback: &callbackStub{func(*Entry, EventMask) { cnt++ }}}
- q.EventRegister(&e, EventIn)
- q.EventUnregister(&e)
- q.Notify(EventIn)
- if cnt != 0 {
- t.Errorf("Callback was called when it shouldn't have been")
- }
-}
-
-func TestMask(t *testing.T) {
- // Register a waiter.
- var q Queue
- var cnt int
- e := Entry{Callback: &callbackStub{func(*Entry, EventMask) { cnt++ }}}
- q.EventRegister(&e, EventIn|EventErr)
-
- // Notify with an overlapping mask.
- cnt = 0
- q.Notify(EventIn | EventOut)
- if cnt != 1 {
- t.Errorf("Callback wasn't called when it should have been")
- }
-
- // Notify with a subset mask.
- cnt = 0
- q.Notify(EventIn)
- if cnt != 1 {
- t.Errorf("Callback wasn't called when it should have been")
- }
-
- // Notify with a superset mask.
- cnt = 0
- q.Notify(EventIn | EventErr | EventOut)
- if cnt != 1 {
- t.Errorf("Callback wasn't called when it should have been")
- }
-
- // Notify with the exact same mask.
- cnt = 0
- q.Notify(EventIn | EventErr)
- if cnt != 1 {
- t.Errorf("Callback wasn't called when it should have been")
- }
-
- // Notify with a disjoint mask.
- cnt = 0
- q.Notify(EventOut | EventHUp)
- if cnt != 0 {
- t.Errorf("Callback was called when it shouldn't have been")
- }
-}
-
-func TestConcurrentRegistration(t *testing.T) {
- var q Queue
- var cnt int
- const concurrency = 1000
-
- ch1 := make(chan struct{})
- ch2 := make(chan struct{})
- ch3 := make(chan struct{})
-
- // Create goroutines that will all register/unregister concurrently.
- for i := 0; i < concurrency; i++ {
- go func() {
- var e Entry
- e.Callback = &callbackStub{func(entry *Entry, mask EventMask) {
- cnt++
- if entry != &e {
- t.Errorf("entry = %p, want %p", entry, &e)
- }
- if mask != EventIn {
- t.Errorf("mask = %#x want %#x", mask, EventIn)
- }
- }}
-
- // Wait for notification, then register.
- <-ch1
- q.EventRegister(&e, EventIn|EventErr)
-
- // Tell main goroutine that we're done registering.
- ch2 <- struct{}{}
-
- // Wait for notification, then unregister.
- <-ch3
- q.EventUnregister(&e)
-
- // Tell main goroutine that we're done unregistering.
- ch2 <- struct{}{}
- }()
- }
-
- // Let the goroutines register.
- close(ch1)
- for i := 0; i < concurrency; i++ {
- <-ch2
- }
-
- // Issue a notification.
- q.Notify(EventIn)
- if cnt != concurrency {
- t.Errorf("cnt = %d, want %d", cnt, concurrency)
- }
-
- // Let the goroutine unregister.
- close(ch3)
- for i := 0; i < concurrency; i++ {
- <-ch2
- }
-
- // Issue a notification.
- q.Notify(EventIn)
- if cnt != concurrency {
- t.Errorf("cnt = %d, want %d", cnt, concurrency)
- }
-}
-
-func TestConcurrentNotification(t *testing.T) {
- var q Queue
- var cnt int32
- const concurrency = 1000
- const waiterCount = 1000
-
- // Register waiters.
- for i := 0; i < waiterCount; i++ {
- var e Entry
- e.Callback = &callbackStub{func(entry *Entry, mask EventMask) {
- atomic.AddInt32(&cnt, 1)
- if entry != &e {
- t.Errorf("entry = %p, want %p", entry, &e)
- }
- if mask != EventIn {
- t.Errorf("mask = %#x want %#x", mask, EventIn)
- }
- }}
-
- q.EventRegister(&e, EventIn|EventErr)
- }
-
- // Launch notifiers.
- ch1 := make(chan struct{})
- ch2 := make(chan struct{})
- for i := 0; i < concurrency; i++ {
- go func() {
- <-ch1
- q.Notify(EventIn)
- ch2 <- struct{}{}
- }()
- }
-
- // Let notifiers go.
- close(ch1)
- for i := 0; i < concurrency; i++ {
- <-ch2
- }
-
- // Check the count.
- if cnt != concurrency*waiterCount {
- t.Errorf("cnt = %d, want %d", cnt, concurrency*waiterCount)
- }
-}
diff --git a/runsc/BUILD b/runsc/BUILD
deleted file mode 100644
index 7a7dcc8d5..000000000
--- a/runsc/BUILD
+++ /dev/null
@@ -1,53 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "runsc",
- srcs = [
- "main.go",
- "version.go",
- ],
- pure = True,
- tags = ["staging"],
- visibility = [
- "//visibility:public",
- ],
- x_defs = {"main.version": "{STABLE_VERSION}"},
- deps = ["//runsc/cli"],
-)
-
-# The runsc-race target is a race-compatible BUILD target. This must be built
-# via: bazel build --features=race :runsc-race
-#
-# This is neccessary because the race feature must apply to all dependencies
-# due a bug in gazelle file selection. The pure attribute must be off because
-# the race detector requires linking with non-Go components, although we still
-# require a static binary.
-#
-# Note that in the future this might be convertible to a compatible target by
-# using the pure and static attributes within a select function, but select is
-# not currently compatible with string attributes [1].
-#
-# [1] https://github.com/bazelbuild/bazel/issues/1698
-go_binary(
- name = "runsc-race",
- srcs = [
- "main.go",
- "version.go",
- ],
- static = True,
- visibility = [
- "//visibility:public",
- ],
- x_defs = {"main.version": "{STABLE_VERSION}"},
- deps = ["//runsc/cli"],
-)
-
-sh_test(
- name = "version_test",
- size = "small",
- srcs = ["version_test.sh"],
- args = ["$(location :runsc)"],
- data = [":runsc"],
-)
diff --git a/runsc/boot/BUILD b/runsc/boot/BUILD
deleted file mode 100644
index c9d2b3eff..000000000
--- a/runsc/boot/BUILD
+++ /dev/null
@@ -1,150 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "boot",
- srcs = [
- "compat.go",
- "compat_amd64.go",
- "compat_arm64.go",
- "controller.go",
- "debug.go",
- "events.go",
- "fs.go",
- "limits.go",
- "loader.go",
- "network.go",
- "strace.go",
- "vfs.go",
- ],
- visibility = [
- "//pkg/test:__subpackages__",
- "//runsc:__subpackages__",
- "//test:__subpackages__",
- ],
- deps = [
- "//pkg/abi",
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/cleanup",
- "//pkg/context",
- "//pkg/control/server",
- "//pkg/coverage",
- "//pkg/cpuid",
- "//pkg/errors/linuxerr",
- "//pkg/eventchannel",
- "//pkg/fd",
- "//pkg/flipcall",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/memutil",
- "//pkg/rand",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/arch:registers_go_proto",
- "//pkg/sentry/control",
- "//pkg/sentry/devices/memdev",
- "//pkg/sentry/devices/ttydev",
- "//pkg/sentry/devices/tundev",
- "//pkg/sentry/fdimport",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/dev",
- "//pkg/sentry/fs/gofer",
- "//pkg/sentry/fs/host",
- "//pkg/sentry/fs/proc",
- "//pkg/sentry/fs/ramfs",
- "//pkg/sentry/fs/sys",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/fs/tty",
- "//pkg/sentry/fs/user",
- "//pkg/sentry/fsimpl/cgroupfs",
- "//pkg/sentry/fsimpl/devpts",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/fsimpl/fuse",
- "//pkg/sentry/fsimpl/gofer",
- "//pkg/sentry/fsimpl/host",
- "//pkg/sentry/fsimpl/overlay",
- "//pkg/sentry/fsimpl/proc",
- "//pkg/sentry/fsimpl/sys",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/fsimpl/verity",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel:uncaught_signal_go_proto",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/sighandling",
- "//pkg/sentry/socket/hostinet",
- "//pkg/sentry/socket/netfilter",
- "//pkg/sentry/socket/netlink",
- "//pkg/sentry/socket/netlink/route",
- "//pkg/sentry/socket/netlink/uevent",
- "//pkg/sentry/socket/netstack",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/state",
- "//pkg/sentry/strace",
- "//pkg/sentry/syscalls/linux/vfs2",
- "//pkg/sentry/time",
- "//pkg/sentry/unimpl:unimplemented_syscall_go_proto",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sentry/watchdog",
- "//pkg/sync",
- "//pkg/tcpip",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/loopback",
- "//pkg/tcpip/link/packetsocket",
- "//pkg/tcpip/link/qdisc/fifo",
- "//pkg/tcpip/link/sniffer",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/icmp",
- "//pkg/tcpip/transport/raw",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "//pkg/urpc",
- "//runsc/boot/filter",
- "//runsc/boot/platforms",
- "//runsc/boot/pprof",
- "//runsc/config",
- "//runsc/specutils",
- "//runsc/specutils/seccomp",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "boot_test",
- size = "small",
- srcs = [
- "compat_test.go",
- "fs_test.go",
- "loader_test.go",
- ],
- library = ":boot",
- deps = [
- "//pkg/control/server",
- "//pkg/fd",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/unet",
- "//runsc/config",
- "//runsc/fsgofer",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/boot/boot_amd64_state_autogen.go b/runsc/boot/boot_amd64_state_autogen.go
new file mode 100644
index 000000000..23dd4b7b3
--- /dev/null
+++ b/runsc/boot/boot_amd64_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package boot
diff --git a/runsc/boot/boot_arm64_state_autogen.go b/runsc/boot/boot_arm64_state_autogen.go
new file mode 100644
index 000000000..23dd4b7b3
--- /dev/null
+++ b/runsc/boot/boot_arm64_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package boot
diff --git a/runsc/boot/boot_state_autogen.go b/runsc/boot/boot_state_autogen.go
new file mode 100644
index 000000000..0f6746d1f
--- /dev/null
+++ b/runsc/boot/boot_state_autogen.go
@@ -0,0 +1,39 @@
+// automatically generated by stateify.
+
+package boot
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *sandboxNetstackCreator) StateTypeName() string {
+ return "runsc/boot.sandboxNetstackCreator"
+}
+
+func (f *sandboxNetstackCreator) StateFields() []string {
+ return []string{
+ "clock",
+ "uniqueID",
+ }
+}
+
+func (f *sandboxNetstackCreator) beforeSave() {}
+
+// +checklocksignore
+func (f *sandboxNetstackCreator) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.clock)
+ stateSinkObject.Save(1, &f.uniqueID)
+}
+
+func (f *sandboxNetstackCreator) afterLoad() {}
+
+// +checklocksignore
+func (f *sandboxNetstackCreator) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.clock)
+ stateSourceObject.Load(1, &f.uniqueID)
+}
+
+func init() {
+ state.Register((*sandboxNetstackCreator)(nil))
+}
diff --git a/runsc/boot/compat_test.go b/runsc/boot/compat_test.go
deleted file mode 100644
index 839c5303b..000000000
--- a/runsc/boot/compat_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package boot
-
-import (
- "testing"
-)
-
-func TestOnceTracker(t *testing.T) {
- o := onceTracker{}
- if !o.shouldReport(nil) {
- t.Error("first call to checkAndMark, got: false, want: true")
- }
- o.onReported(nil)
- for i := 0; i < 2; i++ {
- if o.shouldReport(nil) {
- t.Error("after first call to checkAndMark, got: true, want: false")
- }
- }
-}
-
-func TestArgsTracker(t *testing.T) {
- for _, tc := range []struct {
- name string
- idx []int
- arg1_1 uint64
- arg1_2 uint64
- arg2_1 uint64
- arg2_2 uint64
- want bool
- }{
- {name: "same arg1", idx: []int{0}, arg1_1: 123, arg1_2: 123, want: false},
- {name: "same arg2", idx: []int{1}, arg2_1: 123, arg2_2: 123, want: false},
- {name: "diff arg1", idx: []int{0}, arg1_1: 123, arg1_2: 321, want: true},
- {name: "diff arg2", idx: []int{1}, arg2_1: 123, arg2_2: 321, want: true},
- {name: "cmd is uint32", idx: []int{0}, arg2_1: 0xdead00000123, arg2_2: 0xbeef00000123, want: false},
- {name: "same 2 args", idx: []int{0, 1}, arg2_1: 123, arg1_1: 321, arg2_2: 123, arg1_2: 321, want: false},
- {name: "diff 2 args", idx: []int{0, 1}, arg2_1: 123, arg1_1: 321, arg2_2: 789, arg1_2: 987, want: true},
- } {
- t.Run(tc.name, func(t *testing.T) {
- c := newArgsTracker(tc.idx...)
- regs := newRegs()
- setArgVal(0, tc.arg1_1, regs)
- setArgVal(1, tc.arg2_1, regs)
- if !c.shouldReport(regs) {
- t.Error("first call to shouldReport, got: false, want: true")
- }
- c.onReported(regs)
-
- setArgVal(0, tc.arg1_2, regs)
- setArgVal(1, tc.arg2_2, regs)
- if got := c.shouldReport(regs); tc.want != got {
- t.Errorf("second call to shouldReport, got: %t, want: %t", got, tc.want)
- }
- })
- }
-}
-
-func TestArgsTrackerLimit(t *testing.T) {
- c := newArgsTracker(0, 1)
- for i := 0; i < reportLimit; i++ {
- regs := newRegs()
- setArgVal(0, 123, regs)
- setArgVal(1, uint64(i), regs)
- if !c.shouldReport(regs) {
- t.Error("shouldReport before limit was reached, got: false, want: true")
- }
- c.onReported(regs)
- }
-
- // Should hit the count limit now.
- regs := newRegs()
- setArgVal(0, 123, regs)
- setArgVal(1, 123456, regs)
- if c.shouldReport(regs) {
- t.Error("shouldReport after limit was reached, got: true, want: false")
- }
-}
diff --git a/runsc/boot/filter/BUILD b/runsc/boot/filter/BUILD
deleted file mode 100644
index ed18f0047..000000000
--- a/runsc/boot/filter/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "filter",
- srcs = [
- "config.go",
- "config_amd64.go",
- "config_arm64.go",
- "config_profile.go",
- "extra_filters.go",
- "extra_filters_msan.go",
- "extra_filters_race.go",
- "filter.go",
- ],
- visibility = [
- "//runsc/boot:__subpackages__",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/log",
- "//pkg/seccomp",
- "//pkg/sentry/platform",
- "//pkg/tcpip/link/fdbased",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/boot/filter/filter_amd64_state_autogen.go b/runsc/boot/filter/filter_amd64_state_autogen.go
new file mode 100644
index 000000000..0f27e5568
--- /dev/null
+++ b/runsc/boot/filter/filter_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package filter
diff --git a/runsc/boot/filter/filter_arm64_state_autogen.go b/runsc/boot/filter/filter_arm64_state_autogen.go
new file mode 100644
index 000000000..e87cf5af7
--- /dev/null
+++ b/runsc/boot/filter/filter_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package filter
diff --git a/runsc/boot/filter/filter_race_state_autogen.go b/runsc/boot/filter/filter_race_state_autogen.go
new file mode 100644
index 000000000..c4a858e80
--- /dev/null
+++ b/runsc/boot/filter/filter_race_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build race
+
+package filter
diff --git a/runsc/boot/filter/filter_state_autogen.go b/runsc/boot/filter/filter_state_autogen.go
new file mode 100644
index 000000000..0d245ca8a
--- /dev/null
+++ b/runsc/boot/filter/filter_state_autogen.go
@@ -0,0 +1,7 @@
+// automatically generated by stateify.
+
+// +build go1.1
+// +build !msan,!race
+// +build msan
+
+package filter
diff --git a/runsc/boot/fs_test.go b/runsc/boot/fs_test.go
deleted file mode 100644
index 09ffda628..000000000
--- a/runsc/boot/fs_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package boot
-
-import (
- "reflect"
- "strings"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "gvisor.dev/gvisor/runsc/config"
-)
-
-func TestPodMountHintsHappy(t *testing.T) {
- spec := &specs.Spec{
- Annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "pod",
-
- MountPrefix + "mount2.source": "bar",
- MountPrefix + "mount2.type": "bind",
- MountPrefix + "mount2.share": "container",
- MountPrefix + "mount2.options": "rw,private",
- },
- }
- podHints, err := newPodMountHints(spec)
- if err != nil {
- t.Fatalf("newPodMountHints failed: %v", err)
- }
-
- // Check that fields were set correctly.
- mount1 := podHints.mounts["mount1"]
- if want := "mount1"; want != mount1.name {
- t.Errorf("mount1 name, want: %q, got: %q", want, mount1.name)
- }
- if want := "foo"; want != mount1.mount.Source {
- t.Errorf("mount1 source, want: %q, got: %q", want, mount1.mount.Source)
- }
- if want := "tmpfs"; want != mount1.mount.Type {
- t.Errorf("mount1 type, want: %q, got: %q", want, mount1.mount.Type)
- }
- if want := pod; want != mount1.share {
- t.Errorf("mount1 type, want: %q, got: %q", want, mount1.share)
- }
- if want := []string(nil); !reflect.DeepEqual(want, mount1.mount.Options) {
- t.Errorf("mount1 type, want: %q, got: %q", want, mount1.mount.Options)
- }
-
- mount2 := podHints.mounts["mount2"]
- if want := "mount2"; want != mount2.name {
- t.Errorf("mount2 name, want: %q, got: %q", want, mount2.name)
- }
- if want := "bar"; want != mount2.mount.Source {
- t.Errorf("mount2 source, want: %q, got: %q", want, mount2.mount.Source)
- }
- if want := "bind"; want != mount2.mount.Type {
- t.Errorf("mount2 type, want: %q, got: %q", want, mount2.mount.Type)
- }
- if want := container; want != mount2.share {
- t.Errorf("mount2 type, want: %q, got: %q", want, mount2.share)
- }
- if want := []string{"private", "rw"}; !reflect.DeepEqual(want, mount2.mount.Options) {
- t.Errorf("mount2 type, want: %q, got: %q", want, mount2.mount.Options)
- }
-}
-
-func TestPodMountHintsErrors(t *testing.T) {
- for _, tst := range []struct {
- name string
- annotations map[string]string
- error string
- }{
- {
- name: "too short",
- annotations: map[string]string{
- MountPrefix + "mount1": "foo",
- },
- error: "invalid mount annotation",
- },
- {
- name: "no name",
- annotations: map[string]string{
- MountPrefix + ".source": "foo",
- },
- error: "invalid mount name",
- },
- {
- name: "missing source",
- annotations: map[string]string{
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "pod",
- },
- error: "source field",
- },
- {
- name: "missing type",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.share": "pod",
- },
- error: "type field",
- },
- {
- name: "missing share",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "tmpfs",
- },
- error: "share field",
- },
- {
- name: "invalid field name",
- annotations: map[string]string{
- MountPrefix + "mount1.invalid": "foo",
- },
- error: "invalid mount annotation",
- },
- {
- name: "invalid source",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "",
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "pod",
- },
- error: "source cannot be empty",
- },
- {
- name: "invalid type",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "invalid-type",
- MountPrefix + "mount1.share": "pod",
- },
- error: "invalid type",
- },
- {
- name: "invalid share",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "invalid-share",
- },
- error: "invalid share",
- },
- {
- name: "invalid options",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "pod",
- MountPrefix + "mount1.options": "invalid-option",
- },
- error: "unknown mount option",
- },
- {
- name: "duplicate source",
- annotations: map[string]string{
- MountPrefix + "mount1.source": "foo",
- MountPrefix + "mount1.type": "tmpfs",
- MountPrefix + "mount1.share": "pod",
-
- MountPrefix + "mount2.source": "foo",
- MountPrefix + "mount2.type": "bind",
- MountPrefix + "mount2.share": "container",
- },
- error: "have the same mount source",
- },
- } {
- t.Run(tst.name, func(t *testing.T) {
- spec := &specs.Spec{Annotations: tst.annotations}
- podHints, err := newPodMountHints(spec)
- if err == nil || !strings.Contains(err.Error(), tst.error) {
- t.Errorf("newPodMountHints invalid error, want: .*%s.*, got: %v", tst.error, err)
- }
- if podHints != nil {
- t.Errorf("newPodMountHints must return nil on failure: %+v", podHints)
- }
- })
- }
-}
-
-func TestGetMountAccessType(t *testing.T) {
- const source = "foo"
- for _, tst := range []struct {
- name string
- annotations map[string]string
- want config.FileAccessType
- }{
- {
- name: "container=exclusive",
- annotations: map[string]string{
- MountPrefix + "mount1.source": source,
- MountPrefix + "mount1.type": "bind",
- MountPrefix + "mount1.share": "container",
- },
- want: config.FileAccessExclusive,
- },
- {
- name: "pod=shared",
- annotations: map[string]string{
- MountPrefix + "mount1.source": source,
- MountPrefix + "mount1.type": "bind",
- MountPrefix + "mount1.share": "pod",
- },
- want: config.FileAccessShared,
- },
- {
- name: "shared=shared",
- annotations: map[string]string{
- MountPrefix + "mount1.source": source,
- MountPrefix + "mount1.type": "bind",
- MountPrefix + "mount1.share": "shared",
- },
- want: config.FileAccessShared,
- },
- {
- name: "default=shared",
- annotations: map[string]string{
- MountPrefix + "mount1.source": source + "mismatch",
- MountPrefix + "mount1.type": "bind",
- MountPrefix + "mount1.share": "container",
- },
- want: config.FileAccessShared,
- },
- } {
- t.Run(tst.name, func(t *testing.T) {
- spec := &specs.Spec{Annotations: tst.annotations}
- podHints, err := newPodMountHints(spec)
- if err != nil {
- t.Fatalf("newPodMountHints failed: %v", err)
- }
- mounter := containerMounter{hints: podHints}
- conf := &config.Config{FileAccessMounts: config.FileAccessShared}
- if got := mounter.getMountAccessType(conf, &specs.Mount{Source: source}); got != tst.want {
- t.Errorf("getMountAccessType(), want: %v, got: %v", tst.want, got)
- }
- })
- }
-}
diff --git a/runsc/boot/loader_test.go b/runsc/boot/loader_test.go
deleted file mode 100644
index ac6c26d25..000000000
--- a/runsc/boot/loader_test.go
+++ /dev/null
@@ -1,731 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package boot
-
-import (
- "fmt"
- "math/rand"
- "os"
- "reflect"
- "testing"
- "time"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/control/server"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/unet"
- "gvisor.dev/gvisor/runsc/config"
- "gvisor.dev/gvisor/runsc/fsgofer"
-)
-
-func init() {
- log.SetLevel(log.Debug)
- rand.Seed(time.Now().UnixNano())
- if err := fsgofer.OpenProcSelfFD(); err != nil {
- panic(err)
- }
- config.RegisterFlags()
-}
-
-func testConfig() *config.Config {
- conf, err := config.NewFromFlags()
- if err != nil {
- panic(err)
- }
- // Change test defaults.
- conf.RootDir = "unused_root_dir"
- conf.Network = config.NetworkNone
- conf.DisableSeccomp = true
- return conf
-}
-
-// testSpec returns a simple spec that can be used in tests.
-func testSpec() *specs.Spec {
- return &specs.Spec{
- // The host filesystem root is the sandbox root.
- Root: &specs.Root{
- Path: "/",
- Readonly: true,
- },
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- }
-}
-
-// startGofer starts a new gofer routine serving 'root' path. It returns the
-// sandbox side of the connection, and a function that when called will stop the
-// gofer.
-func startGofer(root string) (int, func(), error) {
- fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- return 0, nil, err
- }
- sandboxEnd, goferEnd := fds[0], fds[1]
-
- socket, err := unet.NewSocket(goferEnd)
- if err != nil {
- unix.Close(sandboxEnd)
- unix.Close(goferEnd)
- return 0, nil, fmt.Errorf("error creating server on FD %d: %v", goferEnd, err)
- }
- at, err := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})
- if err != nil {
- return 0, nil, err
- }
- go func() {
- s := p9.NewServer(at)
- if err := s.Handle(socket); err != nil {
- log.Infof("Gofer is stopping. FD: %d, err: %v\n", goferEnd, err)
- }
- }()
- // Closing the gofer socket will stop the gofer and exit goroutine above.
- cleanup := func() {
- if err := socket.Close(); err != nil {
- log.Warningf("Error closing gofer socket: %v", err)
- }
- }
- return sandboxEnd, cleanup, nil
-}
-
-func createLoader(vfsEnabled bool, spec *specs.Spec) (*Loader, func(), error) {
- fd, err := server.CreateSocket(ControlSocketAddr(fmt.Sprintf("%010d", rand.Int())[:10]))
- if err != nil {
- return nil, nil, err
- }
- conf := testConfig()
- conf.VFS2 = vfsEnabled
-
- sandEnd, cleanup, err := startGofer(spec.Root.Path)
- if err != nil {
- return nil, nil, err
- }
-
- // Loader takes ownership of stdio.
- var stdio []int
- for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
- newFd, err := unix.Dup(int(f.Fd()))
- if err != nil {
- return nil, nil, err
- }
- stdio = append(stdio, newFd)
- }
-
- args := Args{
- ID: "foo",
- Spec: spec,
- Conf: conf,
- ControllerFD: fd,
- GoferFDs: []int{sandEnd},
- StdioFDs: stdio,
- }
- l, err := New(args)
- if err != nil {
- cleanup()
- return nil, nil, err
- }
- return l, cleanup, nil
-}
-
-// TestRun runs a simple application in a sandbox and checks that it succeeds.
-func TestRun(t *testing.T) {
- doRun(t, false)
-}
-
-// TestRunVFS2 runs TestRun in VFSv2.
-func TestRunVFS2(t *testing.T) {
- doRun(t, true)
-}
-
-func doRun(t *testing.T, vfsEnabled bool) {
- l, cleanup, err := createLoader(vfsEnabled, testSpec())
- if err != nil {
- t.Fatalf("error creating loader: %v", err)
- }
-
- defer l.Destroy()
- defer cleanup()
-
- // Start a goroutine to read the start chan result, otherwise Run will
- // block forever.
- var resultChanErr error
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- resultChanErr = <-l.ctrl.manager.startResultChan
- wg.Done()
- }()
-
- // Run the container.
- if err := l.Run(); err != nil {
- t.Errorf("error running container: %v", err)
- }
-
- // We should have not gotten an error on the startResultChan.
- wg.Wait()
- if resultChanErr != nil {
- t.Errorf("error on startResultChan: %v", resultChanErr)
- }
-
- // Wait for the application to exit. It should succeed.
- if status := l.WaitExit(); !status.Exited() || status.ExitStatus() != 0 {
- t.Errorf("application exited with %s, want exit status 0", status)
- }
-}
-
-// TestStartSignal tests that the controller Start message will cause
-// WaitForStartSignal to return.
-func TestStartSignal(t *testing.T) {
- doStartSignal(t, false)
-}
-
-// TestStartSignalVFS2 does TestStartSignal with VFS2.
-func TestStartSignalVFS2(t *testing.T) {
- doStartSignal(t, true)
-}
-
-func doStartSignal(t *testing.T, vfsEnabled bool) {
- l, cleanup, err := createLoader(vfsEnabled, testSpec())
- if err != nil {
- t.Fatalf("error creating loader: %v", err)
- }
- defer l.Destroy()
- defer cleanup()
-
- // We aren't going to wait on this application, so the control server
- // needs to be shut down manually.
- defer l.ctrl.srv.Stop(time.Hour)
-
- // Start a goroutine that calls WaitForStartSignal and writes to a
- // channel when it returns.
- waitFinished := make(chan struct{})
- go func() {
- l.WaitForStartSignal()
- // Pretend that Run() executed and returned no error.
- l.ctrl.manager.startResultChan <- nil
- waitFinished <- struct{}{}
- }()
-
- // Nothing has been written to the channel, so waitFinished should not
- // return. Give it a little bit of time to make sure the goroutine has
- // started.
- select {
- case <-waitFinished:
- t.Errorf("WaitForStartSignal completed but it should not have")
- case <-time.After(50 * time.Millisecond):
- // OK.
- }
-
- // Trigger the control server StartRoot method.
- cid := "foo"
- if err := l.ctrl.manager.StartRoot(&cid, nil); err != nil {
- t.Errorf("error calling StartRoot: %v", err)
- }
-
- // Now WaitForStartSignal should return (within a short amount of
- // time).
- select {
- case <-waitFinished:
- // OK.
- case <-time.After(50 * time.Millisecond):
- t.Errorf("WaitForStartSignal did not complete but it should have")
- }
-
-}
-
-type CreateMountTestcase struct {
- name string
- // Spec that will be used to create the mount manager. Note
- // that we can't mount procfs without a kernel, so each spec
- // MUST contain something other than procfs mounted at /proc.
- spec specs.Spec
- // Paths that are expected to exist in the resulting fs.
- expectedPaths []string
-}
-
-func createMountTestcases() []*CreateMountTestcase {
- testCases := []*CreateMountTestcase{
- {
- // Only proc.
- name: "only proc mount",
- spec: specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- },
- },
- // /proc, /dev, and /sys should always be mounted.
- expectedPaths: []string{"/proc", "/dev", "/sys"},
- },
- {
- // Mount at a deep path, with many components that do
- // not exist in the root.
- name: "deep mount path",
- spec: specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/some/very/very/deep/path",
- Type: "tmpfs",
- },
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- },
- },
- // /some/deep/path should be mounted, along with /proc, /dev, and /sys.
- expectedPaths: []string{"/some/very/very/deep/path", "/proc", "/dev", "/sys"},
- },
- {
- // Mounts are nested inside each other.
- name: "nested mounts",
- spec: specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- {
- Destination: "/foo",
- Type: "tmpfs",
- },
- {
- Destination: "/foo/qux",
- Type: "tmpfs",
- },
- {
- // File mounts with the same prefix.
- Destination: "/foo/qux-quz",
- Type: "tmpfs",
- },
- {
- Destination: "/foo/bar",
- Type: "tmpfs",
- },
- {
- Destination: "/foo/bar/baz",
- Type: "tmpfs",
- },
- {
- // A deep path that is in foo but not the other mounts.
- Destination: "/foo/some/very/very/deep/path",
- Type: "tmpfs",
- },
- },
- },
- expectedPaths: []string{"/foo", "/foo/bar", "/foo/bar/baz", "/foo/qux",
- "/foo/qux-quz", "/foo/some/very/very/deep/path", "/proc", "/dev", "/sys"},
- },
- {
- name: "mount inside /dev",
- spec: specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- {
- Destination: "/dev",
- Type: "tmpfs",
- },
- {
- // Mounted by runsc by default.
- Destination: "/dev/fd",
- Type: "tmpfs",
- },
- {
- // Mount with the same prefix.
- Destination: "/dev/fd-foo",
- Type: "tmpfs",
- },
- {
- // Unsupported fs type.
- Destination: "/dev/mqueue",
- Type: "mqueue",
- },
- {
- Destination: "/dev/foo",
- Type: "tmpfs",
- },
- {
- Destination: "/dev/bar",
- Type: "tmpfs",
- },
- },
- },
- expectedPaths: []string{"/proc", "/dev", "/dev/fd-foo", "/dev/foo", "/dev/bar", "/sys"},
- },
- {
- name: "mounts inside mandatory mounts",
- spec: specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- {
- Destination: "/sys/bar",
- Type: "tmpfs",
- },
- {
- Destination: "/tmp/baz",
- Type: "tmpfs",
- },
- {
- Destination: "/dev/goo",
- Type: "tmpfs",
- },
- },
- },
- expectedPaths: []string{"/proc", "/sys", "/sys/bar", "/tmp", "/tmp/baz", "/dev/goo"},
- },
- }
-
- return testCases
-}
-
-// Test that MountNamespace can be created with various specs.
-func TestCreateMountNamespace(t *testing.T) {
- for _, tc := range createMountTestcases() {
- t.Run(tc.name, func(t *testing.T) {
- conf := testConfig()
- ctx := contexttest.Context(t)
-
- sandEnd, cleanup, err := startGofer(tc.spec.Root.Path)
- if err != nil {
- t.Fatalf("failed to create gofer: %v", err)
- }
- defer cleanup()
-
- info := containerInfo{
- conf: conf,
- spec: &tc.spec,
- goferFDs: []*fd.FD{fd.New(sandEnd)},
- }
-
- mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */)
- mns, err := mntr.createMountNamespace(ctx, conf)
- if err != nil {
- t.Fatalf("failed to create mount namespace: %v", err)
- }
- ctx = fs.WithRoot(ctx, mns.Root())
- if err := mntr.mountSubmounts(ctx, conf, mns); err != nil {
- t.Fatalf("failed to create mount namespace: %v", err)
- }
-
- root := mns.Root()
- defer root.DecRef(ctx)
- for _, p := range tc.expectedPaths {
- maxTraversals := uint(0)
- if d, err := mns.FindInode(ctx, root, root, p, &maxTraversals); err != nil {
- t.Errorf("expected path %v to exist with spec %v, but got error %v", p, tc.spec, err)
- } else {
- d.DecRef(ctx)
- }
- }
- })
- }
-}
-
-// Test that MountNamespace can be created with various specs.
-func TestCreateMountNamespaceVFS2(t *testing.T) {
- for _, tc := range createMountTestcases() {
- t.Run(tc.name, func(t *testing.T) {
- spec := testSpec()
- spec.Mounts = tc.spec.Mounts
- spec.Root = tc.spec.Root
-
- t.Logf("Using root: %q", spec.Root.Path)
- l, loaderCleanup, err := createLoader(true /* VFS2 Enabled */, spec)
- if err != nil {
- t.Fatalf("failed to create loader: %v", err)
- }
- defer l.Destroy()
- defer loaderCleanup()
-
- mntr := newContainerMounter(&l.root, l.k, l.mountHints, true /* vfs2Enabled */)
- if err := mntr.processHints(l.root.conf, l.root.procArgs.Credentials); err != nil {
- t.Fatalf("failed process hints: %v", err)
- }
-
- ctx := l.k.SupervisorContext()
- mns, err := mntr.mountAll(l.root.conf, &l.root.procArgs)
- if err != nil {
- t.Fatalf("mountAll: %v", err)
- }
-
- root := mns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- for _, p := range tc.expectedPaths {
- target := &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(p),
- }
-
- if d, err := l.k.VFS().GetDentryAt(ctx, l.root.procArgs.Credentials, target, &vfs.GetDentryOptions{}); err != nil {
- t.Errorf("expected path %v to exist with spec %v, but got error %v", p, tc.spec, err)
- } else {
- d.DecRef(ctx)
- }
- }
- })
- }
-}
-
-// TestRestoreEnvironment tests that the correct mounts are collected from the spec and config
-// in order to build the environment for restoring.
-func TestRestoreEnvironment(t *testing.T) {
- testCases := []struct {
- name string
- spec *specs.Spec
- ioFDs []int
- errorExpected bool
- expectedRenv fs.RestoreEnvironment
- }{
- {
- name: "basic spec test",
- spec: &specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/some/very/very/deep/path",
- Type: "tmpfs",
- },
- {
- Destination: "/proc",
- Type: "tmpfs",
- },
- },
- },
- ioFDs: []int{0},
- errorExpected: false,
- expectedRenv: fs.RestoreEnvironment{
- MountSources: map[string][]fs.MountArgs{
- "9p": {
- {
- Dev: "9pfs-/",
- Flags: fs.MountSourceFlags{ReadOnly: true},
- DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true",
- },
- },
- "tmpfs": {
- {
- Dev: "none",
- },
- {
- Dev: "none",
- },
- {
- Dev: "none",
- },
- },
- "devtmpfs": {
- {
- Dev: "none",
- },
- },
- "devpts": {
- {
- Dev: "none",
- },
- },
- "sysfs": {
- {
- Dev: "none",
- },
- },
- },
- },
- },
- {
- name: "bind type test",
- spec: &specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/dev/fd-foo",
- Type: "bind",
- },
- },
- },
- ioFDs: []int{0, 1},
- errorExpected: false,
- expectedRenv: fs.RestoreEnvironment{
- MountSources: map[string][]fs.MountArgs{
- "9p": {
- {
- Dev: "9pfs-/",
- Flags: fs.MountSourceFlags{ReadOnly: true},
- DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true",
- },
- {
- Dev: "9pfs-/dev/fd-foo",
- DataString: "trans=fd,rfdno=1,wfdno=1,privateunixsocket=true,cache=remote_revalidating",
- },
- },
- "tmpfs": {
- {
- Dev: "none",
- },
- },
- "devtmpfs": {
- {
- Dev: "none",
- },
- },
- "devpts": {
- {
- Dev: "none",
- },
- },
- "proc": {
- {
- Dev: "none",
- },
- },
- "sysfs": {
- {
- Dev: "none",
- },
- },
- },
- },
- },
- {
- name: "options test",
- spec: &specs.Spec{
- Root: &specs.Root{
- Path: os.TempDir(),
- Readonly: true,
- },
- Mounts: []specs.Mount{
- {
- Destination: "/dev/fd-foo",
- Type: "tmpfs",
- Options: []string{"uid=1022", "noatime"},
- },
- },
- },
- ioFDs: []int{0},
- errorExpected: false,
- expectedRenv: fs.RestoreEnvironment{
- MountSources: map[string][]fs.MountArgs{
- "9p": {
- {
- Dev: "9pfs-/",
- Flags: fs.MountSourceFlags{ReadOnly: true},
- DataString: "trans=fd,rfdno=0,wfdno=0,privateunixsocket=true",
- },
- },
- "tmpfs": {
- {
- Dev: "none",
- Flags: fs.MountSourceFlags{NoAtime: true},
- DataString: "uid=1022",
- },
- {
- Dev: "none",
- },
- },
- "devtmpfs": {
- {
- Dev: "none",
- },
- },
- "devpts": {
- {
- Dev: "none",
- },
- },
- "proc": {
- {
- Dev: "none",
- },
- },
- "sysfs": {
- {
- Dev: "none",
- },
- },
- },
- },
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- conf := testConfig()
- var ioFDs []*fd.FD
- for _, ioFD := range tc.ioFDs {
- ioFDs = append(ioFDs, fd.New(ioFD))
- }
- info := containerInfo{
- conf: conf,
- spec: tc.spec,
- goferFDs: ioFDs,
- }
- mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */)
- actualRenv, err := mntr.createRestoreEnvironment(conf)
- if !tc.errorExpected && err != nil {
- t.Fatalf("could not create restore environment for test:%s", tc.name)
- } else if tc.errorExpected {
- if err == nil {
- t.Errorf("expected an error, but no error occurred.")
- }
- } else {
- if !reflect.DeepEqual(*actualRenv, tc.expectedRenv) {
- t.Errorf("restore environments did not match for test:%s\ngot:%+v\nwant:%+v\n", tc.name, *actualRenv, tc.expectedRenv)
- }
- }
- })
- }
-}
diff --git a/runsc/boot/platforms/BUILD b/runsc/boot/platforms/BUILD
deleted file mode 100644
index 77774f43c..000000000
--- a/runsc/boot/platforms/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "platforms",
- srcs = ["platforms.go"],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = [
- "//pkg/sentry/platform/kvm",
- "//pkg/sentry/platform/ptrace",
- ],
-)
diff --git a/runsc/boot/platforms/platforms_state_autogen.go b/runsc/boot/platforms/platforms_state_autogen.go
new file mode 100644
index 000000000..8676d25c1
--- /dev/null
+++ b/runsc/boot/platforms/platforms_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package platforms
diff --git a/runsc/boot/pprof/BUILD b/runsc/boot/pprof/BUILD
deleted file mode 100644
index 29cb42b2f..000000000
--- a/runsc/boot/pprof/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "pprof",
- srcs = ["pprof.go"],
- visibility = [
- "//runsc:__subpackages__",
- ],
-)
diff --git a/runsc/boot/pprof/pprof_state_autogen.go b/runsc/boot/pprof/pprof_state_autogen.go
new file mode 100644
index 000000000..64bdd7b52
--- /dev/null
+++ b/runsc/boot/pprof/pprof_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package pprof
diff --git a/runsc/cgroup/BUILD b/runsc/cgroup/BUILD
deleted file mode 100644
index f7e892584..000000000
--- a/runsc/cgroup/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cgroup",
- srcs = ["cgroup.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/log",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "cgroup_test",
- size = "small",
- srcs = ["cgroup_test.go"],
- library = ":cgroup",
- tags = ["local"],
- deps = [
- "//pkg/test/testutil",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- ],
-)
diff --git a/runsc/cgroup/cgroup_state_autogen.go b/runsc/cgroup/cgroup_state_autogen.go
new file mode 100644
index 000000000..934ed169b
--- /dev/null
+++ b/runsc/cgroup/cgroup_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cgroup
diff --git a/runsc/cgroup/cgroup_test.go b/runsc/cgroup/cgroup_test.go
deleted file mode 100644
index 1431b4e8f..000000000
--- a/runsc/cgroup/cgroup_test.go
+++ /dev/null
@@ -1,865 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cgroup
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-var debianMountinfo = `
-35 24 0:30 / /sys/fs/cgroup ro shared:9 - tmpfs tmpfs ro
-36 35 0:31 / /sys/fs/cgroup/unified rw shared:10 - cgroup2 cgroup2 rw
-37 35 0:32 / /sys/fs/cgroup/systemd rw - cgroup cgroup rw,name=systemd
-41 35 0:36 / /sys/fs/cgroup/cpu,cpuacct rw shared:16 - cgroup cgroup rw,cpu,cpuacct
-42 35 0:37 / /sys/fs/cgroup/freezer rw shared:17 - cgroup cgroup rw,freezer
-43 35 0:38 / /sys/fs/cgroup/hugetlb rw shared:18 - cgroup cgroup rw,hugetlb
-44 35 0:39 / /sys/fs/cgroup/cpuset rw shared:19 - cgroup cgroup rw,cpuset
-45 35 0:40 / /sys/fs/cgroup/net_cls,net_prio rw shared:20 - cgroup cgroup rw,net_cls,net_prio
-46 35 0:41 / /sys/fs/cgroup/pids rw shared:21 - cgroup cgroup rw,pids
-47 35 0:42 / /sys/fs/cgroup/perf_event rw shared:22 - cgroup cgroup rw,perf_event
-48 35 0:43 / /sys/fs/cgroup/memory rw shared:23 - cgroup cgroup rw,memory
-49 35 0:44 / /sys/fs/cgroup/blkio rw shared:24 - cgroup cgroup rw,blkio
-50 35 0:45 / /sys/fs/cgroup/devices rw shared:25 - cgroup cgroup rw,devices
-51 35 0:46 / /sys/fs/cgroup/rdma rw shared:26 - cgroup cgroup rw,rdma
-`
-
-var dindMountinfo = `
-05 04 0:64 / /sys/fs/cgroup rw - tmpfs tmpfs rw,mode=755
-06 05 0:32 /docker/136 /sys/fs/cgroup/systemd ro master:11 - cgroup cgroup rw,xattr,name=systemd
-07 05 0:36 /docker/136 /sys/fs/cgroup/cpu,cpuacct ro master:16 - cgroup cgroup rw,cpu,cpuacct
-08 05 0:37 /docker/136 /sys/fs/cgroup/freezer ro master:17 - cgroup cgroup rw,freezer
-09 05 0:38 /docker/136 /sys/fs/cgroup/hugetlb ro master:18 - cgroup cgroup rw,hugetlb
-10 05 0:39 /docker/136 /sys/fs/cgroup/cpuset ro master:19 - cgroup cgroup rw,cpuset
-11 05 0:40 /docker/136 /sys/fs/cgroup/net_cls,net_prio ro master:20 - cgroup cgroup rw,net_cls,net_prio
-12 05 0:41 /docker/136 /sys/fs/cgroup/pids ro master:21 - cgroup cgroup rw,pids
-13 05 0:42 /docker/136 /sys/fs/cgroup/perf_event ro master:22 - cgroup cgroup rw,perf_event
-14 05 0:43 /docker/136 /sys/fs/cgroup/memory ro master:23 - cgroup cgroup rw,memory
-16 05 0:44 /docker/136 /sys/fs/cgroup/blkio ro master:24 - cgroup cgroup rw,blkio
-17 05 0:45 /docker/136 /sys/fs/cgroup/devices ro master:25 - cgroup cgroup rw,devices
-18 05 0:46 / /sys/fs/cgroup/rdma ro master:26 - cgroup cgroup rw,rdma
-`
-
-func TestUninstallEnoent(t *testing.T) {
- c := Cgroup{
- // Use a non-existent name.
- Name: "runsc-test-uninstall-656e6f656e740a",
- Own: make(map[string]bool),
- }
- for key := range controllers {
- c.Own[key] = true
- }
- if err := c.Uninstall(); err != nil {
- t.Errorf("Uninstall() failed: %v", err)
- }
-}
-
-func TestCountCpuset(t *testing.T) {
- for _, tc := range []struct {
- str string
- want int
- error bool
- }{
- {str: "0", want: 1},
- {str: "0,1,2,8,9,10", want: 6},
- {str: "0-1", want: 2},
- {str: "0-7", want: 8},
- {str: "0-7,16,32-39,64,65", want: 19},
- {str: "a", error: true},
- {str: "5-a", error: true},
- {str: "a-5", error: true},
- {str: "-10", error: true},
- {str: "15-", error: true},
- {str: "-", error: true},
- {str: "--", error: true},
- } {
- t.Run(tc.str, func(t *testing.T) {
- got, err := countCpuset(tc.str)
- if tc.error {
- if err == nil {
- t.Errorf("countCpuset(%q) should have failed", tc.str)
- }
- } else {
- if err != nil {
- t.Errorf("countCpuset(%q) failed: %v", tc.str, err)
- }
- if tc.want != got {
- t.Errorf("countCpuset(%q) want: %d, got: %d", tc.str, tc.want, got)
- }
- }
- })
- }
-}
-
-func uint16Ptr(v uint16) *uint16 {
- return &v
-}
-
-func uint32Ptr(v uint32) *uint32 {
- return &v
-}
-
-func int64Ptr(v int64) *int64 {
- return &v
-}
-
-func uint64Ptr(v uint64) *uint64 {
- return &v
-}
-
-func boolPtr(v bool) *bool {
- return &v
-}
-
-func checkDir(t *testing.T, dir string, contents map[string]string) {
- all, err := ioutil.ReadDir(dir)
- if err != nil {
- t.Fatalf("ReadDir(%q): %v", dir, err)
- }
- fileCount := 0
- for _, file := range all {
- if file.IsDir() {
- // Only want to compare files.
- continue
- }
- fileCount++
-
- want, ok := contents[file.Name()]
- if !ok {
- t.Errorf("file not expected: %q", file.Name())
- continue
- }
- gotBytes, err := ioutil.ReadFile(filepath.Join(dir, file.Name()))
- if err != nil {
- t.Fatal(err.Error())
- }
- got := strings.TrimSuffix(string(gotBytes), "\n")
- if got != want {
- t.Errorf("wrong file content, file: %q, want: %q, got: %q", file.Name(), want, got)
- }
- }
- if fileCount != len(contents) {
- t.Errorf("file is missing, want: %v, got: %v", contents, all)
- }
-}
-
-func makeLinuxWeightDevice(major, minor int64, weight, leafWeight *uint16) specs.LinuxWeightDevice {
- rv := specs.LinuxWeightDevice{
- Weight: weight,
- LeafWeight: leafWeight,
- }
- rv.Major = major
- rv.Minor = minor
- return rv
-}
-
-func makeLinuxThrottleDevice(major, minor int64, rate uint64) specs.LinuxThrottleDevice {
- rv := specs.LinuxThrottleDevice{
- Rate: rate,
- }
- rv.Major = major
- rv.Minor = minor
- return rv
-}
-
-func TestBlockIO(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxBlockIO
- wants map[string]string
- }{
- {
- name: "simple",
- spec: &specs.LinuxBlockIO{
- Weight: uint16Ptr(1),
- LeafWeight: uint16Ptr(2),
- },
- wants: map[string]string{
- "blkio.weight": "1",
- "blkio.leaf_weight": "2",
- },
- },
- {
- name: "weight_device",
- spec: &specs.LinuxBlockIO{
- WeightDevice: []specs.LinuxWeightDevice{
- makeLinuxWeightDevice(1, 2, uint16Ptr(3), uint16Ptr(4)),
- },
- },
- wants: map[string]string{
- "blkio.weight_device": "1:2 3",
- "blkio.leaf_weight_device": "1:2 4",
- },
- },
- {
- name: "weight_device_nil_values",
- spec: &specs.LinuxBlockIO{
- WeightDevice: []specs.LinuxWeightDevice{
- makeLinuxWeightDevice(1, 2, nil, nil),
- },
- },
- },
- {
- name: "throttle",
- spec: &specs.LinuxBlockIO{
- ThrottleReadBpsDevice: []specs.LinuxThrottleDevice{
- makeLinuxThrottleDevice(1, 2, 3),
- },
- ThrottleReadIOPSDevice: []specs.LinuxThrottleDevice{
- makeLinuxThrottleDevice(4, 5, 6),
- },
- ThrottleWriteBpsDevice: []specs.LinuxThrottleDevice{
- makeLinuxThrottleDevice(7, 8, 9),
- },
- ThrottleWriteIOPSDevice: []specs.LinuxThrottleDevice{
- makeLinuxThrottleDevice(10, 11, 12),
- },
- },
- wants: map[string]string{
- "blkio.throttle.read_bps_device": "1:2 3",
- "blkio.throttle.read_iops_device": "4:5 6",
- "blkio.throttle.write_bps_device": "7:8 9",
- "blkio.throttle.write_iops_device": "10:11 12",
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxBlockIO{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- BlockIO: tc.spec,
- }
- ctrlr := blockIO{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestCPU(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxCPU
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxCPU{
- Shares: uint64Ptr(1),
- Quota: int64Ptr(2),
- Period: uint64Ptr(3),
- RealtimeRuntime: int64Ptr(4),
- RealtimePeriod: uint64Ptr(5),
- },
- wants: map[string]string{
- "cpu.shares": "1",
- "cpu.cfs_quota_us": "2",
- "cpu.cfs_period_us": "3",
- "cpu.rt_runtime_us": "4",
- "cpu.rt_period_us": "5",
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxCPU{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- CPU: tc.spec,
- }
- ctrlr := cpu{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestCPUSet(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxCPU
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxCPU{
- Cpus: "foo",
- Mems: "bar",
- },
- wants: map[string]string{
- "cpuset.cpus": "foo",
- "cpuset.mems": "bar",
- },
- },
- // Don't test nil values because they are copied from the parent.
- // See TestCPUSetAncestor().
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- CPU: tc.spec,
- }
- ctrlr := cpuSet{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-// TestCPUSetAncestor checks that, when not available, value is read from
-// parent directory.
-func TestCPUSetAncestor(t *testing.T) {
- // Prepare master directory with cgroup files that will be propagated to
- // children.
- grandpa, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(grandpa)
-
- if err := ioutil.WriteFile(filepath.Join(grandpa, "cpuset.cpus"), []byte("parent-cpus"), 0666); err != nil {
- t.Fatalf("ioutil.WriteFile(): %v", err)
- }
- if err := ioutil.WriteFile(filepath.Join(grandpa, "cpuset.mems"), []byte("parent-mems"), 0666); err != nil {
- t.Fatalf("ioutil.WriteFile(): %v", err)
- }
-
- for _, tc := range []struct {
- name string
- spec *specs.LinuxCPU
- }{
- {
- name: "nil_values",
- spec: &specs.LinuxCPU{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- // Create empty files in intermediate directory. They should be ignored
- // when reading, and then populated from parent.
- parent, err := ioutil.TempDir(grandpa, "parent")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(parent)
- if _, err := os.Create(filepath.Join(parent, "cpuset.cpus")); err != nil {
- t.Fatalf("os.Create(): %v", err)
- }
- if _, err := os.Create(filepath.Join(parent, "cpuset.mems")); err != nil {
- t.Fatalf("os.Create(): %v", err)
- }
-
- // cgroup files mmust exist.
- dir, err := ioutil.TempDir(parent, "child")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- if _, err := os.Create(filepath.Join(dir, "cpuset.cpus")); err != nil {
- t.Fatalf("os.Create(): %v", err)
- }
- if _, err := os.Create(filepath.Join(dir, "cpuset.mems")); err != nil {
- t.Fatalf("os.Create(): %v", err)
- }
-
- spec := &specs.LinuxResources{
- CPU: tc.spec,
- }
- ctrlr := cpuSet{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- want := map[string]string{
- "cpuset.cpus": "parent-cpus",
- "cpuset.mems": "parent-mems",
- }
- // Both path and dir must have been populated from grandpa.
- checkDir(t, parent, want)
- checkDir(t, dir, want)
- })
- }
-}
-
-func TestHugeTlb(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec []specs.LinuxHugepageLimit
- wants map[string]string
- }{
- {
- name: "single",
- spec: []specs.LinuxHugepageLimit{
- {
- Pagesize: "1G",
- Limit: 123,
- },
- },
- wants: map[string]string{
- "hugetlb.1G.limit_in_bytes": "123",
- },
- },
- {
- name: "multiple",
- spec: []specs.LinuxHugepageLimit{
- {
- Pagesize: "1G",
- Limit: 123,
- },
- {
- Pagesize: "2G",
- Limit: 456,
- },
- {
- Pagesize: "1P",
- Limit: 789,
- },
- },
- wants: map[string]string{
- "hugetlb.1G.limit_in_bytes": "123",
- "hugetlb.2G.limit_in_bytes": "456",
- "hugetlb.1P.limit_in_bytes": "789",
- },
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- HugepageLimits: tc.spec,
- }
- ctrlr := hugeTLB{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestMemory(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxMemory
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxMemory{
- Limit: int64Ptr(1),
- Reservation: int64Ptr(2),
- Swap: int64Ptr(3),
- Kernel: int64Ptr(4),
- KernelTCP: int64Ptr(5),
- Swappiness: uint64Ptr(6),
- DisableOOMKiller: boolPtr(true),
- },
- wants: map[string]string{
- "memory.limit_in_bytes": "1",
- "memory.soft_limit_in_bytes": "2",
- "memory.memsw.limit_in_bytes": "3",
- "memory.kmem.limit_in_bytes": "4",
- "memory.kmem.tcp.limit_in_bytes": "5",
- "memory.swappiness": "6",
- "memory.oom_control": "1",
- },
- },
- {
- // Disable OOM killer should only write when set to true.
- name: "oomkiller",
- spec: &specs.LinuxMemory{
- DisableOOMKiller: boolPtr(false),
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxMemory{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- Memory: tc.spec,
- }
- ctrlr := memory{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestNetworkClass(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxNetwork
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxNetwork{
- ClassID: uint32Ptr(1),
- },
- wants: map[string]string{
- "net_cls.classid": "1",
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxNetwork{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- Network: tc.spec,
- }
- ctrlr := networkClass{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestNetworkPriority(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxNetwork
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxNetwork{
- Priorities: []specs.LinuxInterfacePriority{
- {
- Name: "foo",
- Priority: 1,
- },
- },
- },
- wants: map[string]string{
- "net_prio.ifpriomap": "foo 1",
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxNetwork{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- Network: tc.spec,
- }
- ctrlr := networkPrio{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestPids(t *testing.T) {
- for _, tc := range []struct {
- name string
- spec *specs.LinuxPids
- wants map[string]string
- }{
- {
- name: "all",
- spec: &specs.LinuxPids{Limit: 1},
- wants: map[string]string{
- "pids.max": "1",
- },
- },
- {
- name: "nil_values",
- spec: &specs.LinuxPids{},
- },
- {
- name: "nil",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "cgroup")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- spec := &specs.LinuxResources{
- Pids: tc.spec,
- }
- ctrlr := pids{}
- if err := ctrlr.set(spec, dir); err != nil {
- t.Fatalf("ctrlr.set(): %v", err)
- }
- checkDir(t, dir, tc.wants)
- })
- }
-}
-
-func TestLoadPaths(t *testing.T) {
- for _, tc := range []struct {
- name string
- cgroups string
- mountinfo string
- want map[string]string
- err string
- }{
- {
- name: "empty",
- mountinfo: debianMountinfo,
- },
- {
- name: "abs-path",
- cgroups: "0:cpu:/path",
- mountinfo: debianMountinfo,
- want: map[string]string{"cpu": "/path"},
- },
- {
- name: "rel-path",
- cgroups: "0:cpu:rel-path",
- mountinfo: debianMountinfo,
- want: map[string]string{"cpu": "rel-path"},
- },
- {
- name: "non-controller",
- cgroups: "0:name=systemd:/path",
- mountinfo: debianMountinfo,
- want: map[string]string{"systemd": "/path"},
- },
- {
- name: "unknown-controller",
- cgroups: "0:ctr:/path",
- mountinfo: debianMountinfo,
- want: map[string]string{},
- },
- {
- name: "multiple",
- cgroups: "0:cpu:/path0\n" +
- "1:memory:/path1\n" +
- "2::/empty\n",
- mountinfo: debianMountinfo,
- want: map[string]string{
- "cpu": "/path0",
- "memory": "/path1",
- },
- },
- {
- name: "missing-field",
- cgroups: "0:nopath\n",
- mountinfo: debianMountinfo,
- err: "invalid cgroups file",
- },
- {
- name: "too-many-fields",
- cgroups: "0:ctr:/path:extra\n",
- mountinfo: debianMountinfo,
- err: "invalid cgroups file",
- },
- {
- name: "multiple-malformed",
- cgroups: "0:ctr0:/path0\n" +
- "1:ctr1:/path1\n" +
- "2:\n",
- mountinfo: debianMountinfo,
- err: "invalid cgroups file",
- },
- {
- name: "nested-cgroup",
- cgroups: "9:memory:/docker/136\n" +
- "2:cpu,cpuacct:/docker/136\n" +
- "1:name=systemd:/docker/136\n" +
- "0::/system.slice/containerd.service\n",
- mountinfo: dindMountinfo,
- // we want relative path to /sys/fs/cgroup inside the nested container.
- // Subcroup inside the container will be created at /sys/fs/cgroup/cpu
- // This will be /sys/fs/cgroup/cpu/docker/136/CGROUP_NAME
- // outside the container
- want: map[string]string{
- "memory": ".",
- "cpu": ".",
- "cpuacct": ".",
- "systemd": ".",
- },
- },
- {
- name: "nested-cgroup-submount",
- cgroups: "9:memory:/docker/136/test",
- mountinfo: dindMountinfo,
- want: map[string]string{
- "memory": "test",
- },
- },
- {
- name: "invalid-mount-info",
- cgroups: "0:memory:/path",
- mountinfo: "41 35 0:36 / /sys/fs/cgroup/memory rw shared:16 - invalid",
- want: map[string]string{
- "memory": "/path",
- },
- },
- {
- name: "invalid-rel-path-in-proc-cgroup",
- cgroups: "9:memory:invalid",
- mountinfo: dindMountinfo,
- err: "can't make invalid relative to /docker/136",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- r := strings.NewReader(tc.cgroups)
- mountinfo := strings.NewReader(tc.mountinfo)
- got, err := loadPathsHelper(r, mountinfo)
- if len(tc.err) == 0 {
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- } else if err == nil || !strings.Contains(err.Error(), tc.err) {
- t.Fatalf("Wrong error message, want: *%s*, got: %v", tc.err, err)
- }
- for key, vWant := range tc.want {
- vGot, ok := got[key]
- if !ok {
- t.Errorf("Missing controller %q", key)
- }
- if vWant != vGot {
- t.Errorf("Wrong controller %q value, want: %q, got: %q", key, vWant, vGot)
- }
- delete(got, key)
- }
- for k, v := range got {
- t.Errorf("Unexpected controller %q: %q", k, v)
- }
- })
- }
-}
-
-func TestOptional(t *testing.T) {
- for _, tc := range []struct {
- name string
- ctrlr controller
- spec *specs.LinuxResources
- err string
- }{
- {
- name: "net-cls",
- ctrlr: &networkClass{},
- spec: &specs.LinuxResources{Network: &specs.LinuxNetwork{ClassID: uint32Ptr(1)}},
- err: "Network.ClassID set but net_cls cgroup controller not found",
- },
- {
- name: "net-prio",
- ctrlr: &networkPrio{},
- spec: &specs.LinuxResources{Network: &specs.LinuxNetwork{
- Priorities: []specs.LinuxInterfacePriority{
- {Name: "foo", Priority: 1},
- },
- }},
- err: "Network.Priorities set but net_prio cgroup controller not found",
- },
- {
- name: "hugetlb",
- ctrlr: &hugeTLB{},
- spec: &specs.LinuxResources{HugepageLimits: []specs.LinuxHugepageLimit{
- {Pagesize: "1", Limit: 2},
- }},
- err: "HugepageLimits set but hugetlb cgroup controller not found",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- err := tc.ctrlr.skip(tc.spec)
- if err == nil {
- t.Fatalf("ctrlr.skip() didn't fail")
- }
- if !strings.Contains(err.Error(), tc.err) {
- t.Errorf("ctrlr.skip() want: *%s*, got: %q", tc.err, err)
- }
- })
- }
-}
diff --git a/runsc/cli/BUILD b/runsc/cli/BUILD
deleted file mode 100644
index 360e3cea6..000000000
--- a/runsc/cli/BUILD
+++ /dev/null
@@ -1,25 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cli",
- srcs = ["main.go"],
- visibility = [
- "//:__pkg__",
- "//runsc:__pkg__",
- ],
- deps = [
- "//pkg/coverage",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/platform",
- "//runsc/cmd",
- "//runsc/config",
- "//runsc/flag",
- "//runsc/specutils",
- "@com_github_google_subcommands//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/cli/cli_state_autogen.go b/runsc/cli/cli_state_autogen.go
new file mode 100644
index 000000000..e81991e0b
--- /dev/null
+++ b/runsc/cli/cli_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cli
diff --git a/runsc/cmd/BUILD b/runsc/cmd/BUILD
deleted file mode 100644
index 39c8ff603..000000000
--- a/runsc/cmd/BUILD
+++ /dev/null
@@ -1,105 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cmd",
- srcs = [
- "boot.go",
- "capability.go",
- "checkpoint.go",
- "chroot.go",
- "cmd.go",
- "create.go",
- "debug.go",
- "delete.go",
- "do.go",
- "error.go",
- "events.go",
- "exec.go",
- "gofer.go",
- "help.go",
- "install.go",
- "kill.go",
- "list.go",
- "mitigate.go",
- "mitigate_extras.go",
- "path.go",
- "pause.go",
- "ps.go",
- "restore.go",
- "resume.go",
- "run.go",
- "spec.go",
- "start.go",
- "state.go",
- "statefile.go",
- "symbolize.go",
- "syscalls.go",
- "verity_prepare.go",
- "wait.go",
- ],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = [
- "//pkg/coverage",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/sentry/control",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/platform",
- "//pkg/state/pretty",
- "//pkg/state/statefile",
- "//pkg/sync",
- "//pkg/unet",
- "//pkg/urpc",
- "//runsc/boot",
- "//runsc/config",
- "//runsc/console",
- "//runsc/container",
- "//runsc/flag",
- "//runsc/fsgofer",
- "//runsc/fsgofer/filter",
- "//runsc/mitigate",
- "//runsc/specutils",
- "@com_github_google_subcommands//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "cmd_test",
- size = "small",
- srcs = [
- "capability_test.go",
- "delete_test.go",
- "exec_test.go",
- "gofer_test.go",
- "mitigate_test.go",
- ],
- data = [
- "//runsc",
- ],
- library = ":cmd",
- deps = [
- "//pkg/abi/linux",
- "//pkg/log",
- "//pkg/sentry/control",
- "//pkg/sentry/kernel/auth",
- "//pkg/test/testutil",
- "//pkg/urpc",
- "//runsc/config",
- "//runsc/container",
- "//runsc/mitigate",
- "//runsc/mitigate/mock",
- "//runsc/specutils",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- ],
-)
diff --git a/runsc/cmd/capability_test.go b/runsc/cmd/capability_test.go
deleted file mode 100644
index 99075d82d..000000000
--- a/runsc/cmd/capability_test.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cmd
-
-import (
- "flag"
- "fmt"
- "os"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/syndtr/gocapability/capability"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/config"
- "gvisor.dev/gvisor/runsc/container"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-func init() {
- log.SetLevel(log.Debug)
- if err := testutil.ConfigureExePath(); err != nil {
- panic(err.Error())
- }
-}
-
-func checkProcessCaps(pid int, wantCaps *specs.LinuxCapabilities) error {
- curCaps, err := capability.NewPid2(pid)
- if err != nil {
- return fmt.Errorf("capability.NewPid2(%d) failed: %v", pid, err)
- }
- if err := curCaps.Load(); err != nil {
- return fmt.Errorf("unable to load capabilities: %v", err)
- }
- fmt.Printf("Capabilities (PID: %d): %v\n", pid, curCaps)
-
- for _, c := range allCapTypes {
- if err := checkCaps(c, curCaps, wantCaps); err != nil {
- return err
- }
- }
- return nil
-}
-
-func checkCaps(which capability.CapType, curCaps capability.Capabilities, wantCaps *specs.LinuxCapabilities) error {
- wantNames := getCaps(which, wantCaps)
- for name, c := range capFromName {
- want := specutils.ContainsStr(wantNames, name)
- got := curCaps.Get(which, c)
- if want != got {
- if want {
- return fmt.Errorf("capability %v:%s should be set", which, name)
- }
- return fmt.Errorf("capability %v:%s should NOT be set", which, name)
- }
- }
- return nil
-}
-
-func TestCapabilities(t *testing.T) {
- stop := testutil.StartReaper()
- defer stop()
-
- spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
- caps := []string{
- "CAP_CHOWN",
- "CAP_SYS_PTRACE", // ptrace is added due to the platform choice.
- }
- spec.Process.Capabilities = &specs.LinuxCapabilities{
- Permitted: caps,
- Bounding: caps,
- Effective: caps,
- Inheritable: caps,
- }
-
- conf := testutil.TestConfig(t)
-
- // Use --network=host to make sandbox use spec's capabilities.
- conf.Network = config.NetworkHost
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := container.Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := container.New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Check that sandbox and gofer have the proper capabilities.
- if err := checkProcessCaps(c.Sandbox.Pid, spec.Process.Capabilities); err != nil {
- t.Error(err)
- }
- if err := checkProcessCaps(c.GoferPid, goferCaps); err != nil {
- t.Error(err)
- }
-}
-
-func TestMain(m *testing.M) {
- flag.Parse()
- if err := specutils.MaybeRunAsRoot(); err != nil {
- fmt.Fprintf(os.Stderr, "Error running as root: %v", err)
- os.Exit(123)
- }
- os.Exit(m.Run())
-}
diff --git a/runsc/cmd/cmd_state_autogen.go b/runsc/cmd/cmd_state_autogen.go
new file mode 100644
index 000000000..f65d1508d
--- /dev/null
+++ b/runsc/cmd/cmd_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package cmd
diff --git a/runsc/cmd/delete_test.go b/runsc/cmd/delete_test.go
deleted file mode 100644
index e2d994a05..000000000
--- a/runsc/cmd/delete_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cmd
-
-import (
- "io/ioutil"
- "testing"
-
- "gvisor.dev/gvisor/runsc/config"
-)
-
-func TestNotFound(t *testing.T) {
- ids := []string{"123"}
- dir, err := ioutil.TempDir("", "metadata")
- if err != nil {
- t.Fatalf("error creating dir: %v", err)
- }
- conf := &config.Config{RootDir: dir}
-
- d := Delete{}
- if err := d.execute(ids, conf); err == nil {
- t.Error("Deleting non-existent container should have failed")
- }
-
- d = Delete{force: true}
- if err := d.execute(ids, conf); err != nil {
- t.Errorf("Deleting non-existent container with --force should NOT have failed: %v", err)
- }
-}
diff --git a/runsc/cmd/exec_test.go b/runsc/cmd/exec_test.go
deleted file mode 100644
index a1e980d08..000000000
--- a/runsc/cmd/exec_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cmd
-
-import (
- "os"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/urpc"
-)
-
-func TestUser(t *testing.T) {
- testCases := []struct {
- input string
- want user
- wantErr bool
- }{
- {input: "0", want: user{kuid: 0, kgid: 0}},
- {input: "7", want: user{kuid: 7, kgid: 0}},
- {input: "49:343", want: user{kuid: 49, kgid: 343}},
- {input: "0:2401", want: user{kuid: 0, kgid: 2401}},
- {input: "", wantErr: true},
- {input: "foo", wantErr: true},
- {input: ":123", wantErr: true},
- {input: "1:2:3", wantErr: true},
- }
-
- for _, tc := range testCases {
- var u user
- if err := u.Set(tc.input); err != nil && tc.wantErr {
- // We got an error and wanted one.
- continue
- } else if err == nil && tc.wantErr {
- t.Errorf("user.Set(%s): got no error, but wanted one", tc.input)
- } else if err != nil && !tc.wantErr {
- t.Errorf("user.Set(%s): got error %v, but wanted none", tc.input, err)
- } else if u != tc.want {
- t.Errorf("user.Set(%s): got %+v, but wanted %+v", tc.input, u, tc.want)
- }
- }
-}
-
-func TestCLIArgs(t *testing.T) {
- testCases := []struct {
- ex Exec
- argv []string
- expected control.ExecArgs
- }{
- {
- ex: Exec{
- cwd: "/foo/bar",
- user: user{kuid: 0, kgid: 0},
- extraKGIDs: []string{"1", "2", "3"},
- caps: []string{"CAP_DAC_OVERRIDE"},
- processPath: "",
- },
- argv: []string{"ls", "/"},
- expected: control.ExecArgs{
- Argv: []string{"ls", "/"},
- WorkingDirectory: "/foo/bar",
- FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}},
- KUID: 0,
- KGID: 0,
- ExtraKGIDs: []auth.KGID{1, 2, 3},
- Capabilities: &auth.TaskCapabilities{
- BoundingCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- InheritableCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- PermittedCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- },
- },
- },
- }
-
- for _, tc := range testCases {
- e, err := tc.ex.argsFromCLI(tc.argv, true)
- if err != nil {
- t.Errorf("argsFromCLI(%+v): got error: %+v", tc.ex, err)
- } else if !cmp.Equal(*e, tc.expected, cmpopts.IgnoreUnexported(os.File{})) {
- t.Errorf("argsFromCLI(%+v): got %+v, but expected %+v", tc.ex, *e, tc.expected)
- }
- }
-}
-
-func TestJSONArgs(t *testing.T) {
- testCases := []struct {
- // ex is provided to make sure it is overridden by p.
- ex Exec
- p specs.Process
- expected control.ExecArgs
- }{
- {
- ex: Exec{
- cwd: "/baz/quux",
- user: user{kuid: 1, kgid: 1},
- extraKGIDs: []string{"4", "5", "6"},
- caps: []string{"CAP_SETGID"},
- processPath: "/bin/foo",
- },
- p: specs.Process{
- User: specs.User{UID: 0, GID: 0, AdditionalGids: []uint32{1, 2, 3}},
- Args: []string{"ls", "/"},
- Cwd: "/foo/bar",
- Capabilities: &specs.LinuxCapabilities{
- Bounding: []string{"CAP_DAC_OVERRIDE"},
- Effective: []string{"CAP_DAC_OVERRIDE"},
- Inheritable: []string{"CAP_DAC_OVERRIDE"},
- Permitted: []string{"CAP_DAC_OVERRIDE"},
- },
- },
- expected: control.ExecArgs{
- Argv: []string{"ls", "/"},
- WorkingDirectory: "/foo/bar",
- FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}},
- KUID: 0,
- KGID: 0,
- ExtraKGIDs: []auth.KGID{1, 2, 3},
- Capabilities: &auth.TaskCapabilities{
- BoundingCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- InheritableCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- PermittedCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- },
- },
- },
- }
-
- for _, tc := range testCases {
- e, err := argsFromProcess(&tc.p, true)
- if err != nil {
- t.Errorf("argsFromProcess(%+v): got error: %+v", tc.p, err)
- } else if !cmp.Equal(*e, tc.expected, cmpopts.IgnoreUnexported(os.File{})) {
- t.Errorf("argsFromProcess(%+v): got %+v, but expected %+v", tc.p, *e, tc.expected)
- }
- }
-}
diff --git a/runsc/cmd/gofer_test.go b/runsc/cmd/gofer_test.go
deleted file mode 100644
index fea62a4f4..000000000
--- a/runsc/cmd/gofer_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cmd
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "testing"
-)
-
-func tmpDir() string {
- if dir, ok := os.LookupEnv("TEST_TMPDIR"); ok {
- return dir
- }
- return "/tmp"
-}
-
-type dir struct {
- rel string
- link string
-}
-
-func construct(root string, dirs []dir) error {
- for _, d := range dirs {
- p := path.Join(root, d.rel)
- if d.link == "" {
- if err := os.MkdirAll(p, 0755); err != nil {
- return fmt.Errorf("error creating dir: %v", err)
- }
- } else {
- if err := os.MkdirAll(path.Dir(p), 0755); err != nil {
- return fmt.Errorf("error creating dir: %v", err)
- }
- if err := os.Symlink(d.link, p); err != nil {
- return fmt.Errorf("error creating symlink: %v", err)
- }
- }
- }
- return nil
-}
-
-func TestResolveSymlinks(t *testing.T) {
- root, err := ioutil.TempDir(tmpDir(), "root")
- if err != nil {
- t.Fatal("ioutil.TempDir() failed:", err)
- }
- dirs := []dir{
- {"dir1/dir11/dir111/dir1111", ""}, // Just a boring dir
- {"dir1/lnk12", "dir11"}, // Link to sibling
- {"dir1/lnk13", "./dir11"}, // Link to sibling through self
- {"dir1/lnk14", "../dir1/dir11"}, // Link to sibling through parent
- {"dir1/dir15/lnk151", ".."}, // Link to parent
- {"dir1/lnk16", "dir11/dir111"}, // Link to child
- {"dir1/lnk17", "."}, // Link to self
- {"dir1/lnk18", "lnk13"}, // Link to link
- {"lnk2", "dir1/lnk13"}, // Link to link to link
- {"dir3/dir21/lnk211", "../.."}, // Link to root relative
- {"dir3/lnk22", "/"}, // Link to root absolute
- {"dir3/lnk23", "/dir1"}, // Link to dir absolute
- {"dir3/lnk24", "/dir1/lnk12"}, // Link to link absolute
- {"lnk5", "../../.."}, // Link outside root
- }
- if err := construct(root, dirs); err != nil {
- t.Fatal("construct failed:", err)
- }
-
- tests := []struct {
- name string
- rel string
- want string
- compareHost bool
- }{
- {name: "root", rel: "/", want: "/", compareHost: true},
- {name: "basic dir", rel: "/dir1/dir11/dir111", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "dot 1", rel: "/dir1/dir11/./dir111", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "dot 2", rel: "/dir1/././dir11/./././././dir111/.", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "dotdot 1", rel: "/dir1/dir11/../dir15", want: "/dir1/dir15", compareHost: true},
- {name: "dotdot 2", rel: "/dir1/dir11/dir1111/../..", want: "/dir1", compareHost: true},
-
- {name: "link sibling", rel: "/dir1/lnk12", want: "/dir1/dir11", compareHost: true},
- {name: "link sibling + dir", rel: "/dir1/lnk12/dir111", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "link sibling through self", rel: "/dir1/lnk13", want: "/dir1/dir11", compareHost: true},
- {name: "link sibling through parent", rel: "/dir1/lnk14", want: "/dir1/dir11", compareHost: true},
-
- {name: "link parent", rel: "/dir1/dir15/lnk151", want: "/dir1", compareHost: true},
- {name: "link parent + dir", rel: "/dir1/dir15/lnk151/dir11", want: "/dir1/dir11", compareHost: true},
- {name: "link child", rel: "/dir1/lnk16", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "link child + dir", rel: "/dir1/lnk16/dir1111", want: "/dir1/dir11/dir111/dir1111", compareHost: true},
- {name: "link self", rel: "/dir1/lnk17", want: "/dir1", compareHost: true},
- {name: "link self + dir", rel: "/dir1/lnk17/dir11", want: "/dir1/dir11", compareHost: true},
-
- {name: "link^2", rel: "/dir1/lnk18", want: "/dir1/dir11", compareHost: true},
- {name: "link^2 + dir", rel: "/dir1/lnk18/dir111", want: "/dir1/dir11/dir111", compareHost: true},
- {name: "link^3", rel: "/lnk2", want: "/dir1/dir11", compareHost: true},
- {name: "link^3 + dir", rel: "/lnk2/dir111", want: "/dir1/dir11/dir111", compareHost: true},
-
- {name: "link abs", rel: "/dir3/lnk23", want: "/dir1"},
- {name: "link abs + dir", rel: "/dir3/lnk23/dir11", want: "/dir1/dir11"},
- {name: "link^2 abs", rel: "/dir3/lnk24", want: "/dir1/dir11"},
- {name: "link^2 abs + dir", rel: "/dir3/lnk24/dir111", want: "/dir1/dir11/dir111"},
-
- {name: "root link rel", rel: "/dir3/dir21/lnk211", want: "/", compareHost: true},
- {name: "root link abs", rel: "/dir3/lnk22", want: "/"},
- {name: "root contain link", rel: "/lnk5/dir1", want: "/dir1"},
- {name: "root contain dotdot", rel: "/dir1/dir11/../../../../../../../..", want: "/"},
-
- {name: "crazy", rel: "/dir3/dir21/lnk211/dir3/lnk22/dir1/dir11/../../lnk5/dir3/../dir3/lnk24/dir111/dir1111/..", want: "/dir1/dir11/dir111"},
- }
- for _, tst := range tests {
- t.Run(tst.name, func(t *testing.T) {
- got, err := resolveSymlinks(root, tst.rel)
- if err != nil {
- t.Errorf("resolveSymlinks(root, %q) failed: %v", tst.rel, err)
- }
- want := path.Join(root, tst.want)
- if got != want {
- t.Errorf("resolveSymlinks(root, %q) got: %q, want: %q", tst.rel, got, want)
- }
- if tst.compareHost {
- // Check that host got to the same end result.
- host, err := filepath.EvalSymlinks(path.Join(root, tst.rel))
- if err != nil {
- t.Errorf("path.EvalSymlinks(root, %q) failed: %v", tst.rel, err)
- }
- if host != got {
- t.Errorf("resolveSymlinks(root, %q) got: %q, want: %q", tst.rel, host, got)
- }
- }
- })
- }
-}
-
-func TestResolveSymlinksLoop(t *testing.T) {
- root, err := ioutil.TempDir(tmpDir(), "root")
- if err != nil {
- t.Fatal("ioutil.TempDir() failed:", err)
- }
- dirs := []dir{
- {"loop1", "loop2"},
- {"loop2", "loop1"},
- }
- if err := construct(root, dirs); err != nil {
- t.Fatal("construct failed:", err)
- }
- if _, err := resolveSymlinks(root, "loop1"); err == nil {
- t.Errorf("resolveSymlinks() should have failed")
- }
-}
diff --git a/runsc/cmd/mitigate_test.go b/runsc/cmd/mitigate_test.go
deleted file mode 100644
index 51755d9f3..000000000
--- a/runsc/cmd/mitigate_test.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package cmd
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/runsc/mitigate/mock"
-)
-
-type executeTestCase struct {
- name string
- mitigateData string
- mitigateError error
- mitigateCPU int
- reverseData string
- reverseError error
- reverseCPU int
-}
-
-func TestExecute(t *testing.T) {
-
- partial := `processor : 1
-vendor_id : AuthenticAMD
-cpu family : 23
-model : 49
-model name : AMD EPYC 7B12
-physical id : 0
-bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
-power management:
-`
-
- for _, tc := range []executeTestCase{
- {
- name: "CascadeLake4",
- mitigateData: mock.CascadeLake4.MakeCPUString(),
- mitigateCPU: 2,
- reverseData: mock.CascadeLake4.MakeSysPossibleString(),
- reverseCPU: 4,
- },
- {
- name: "Empty",
- mitigateData: "",
- mitigateError: fmt.Errorf(`mitigate operation failed: no cpus found for: ""`),
- reverseData: "",
- reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from possible: ""`),
- },
- {
- name: "Partial",
- mitigateData: `processor : 0
-vendor_id : AuthenticAMD
-cpu family : 23
-model : 49
-model name : AMD EPYC 7B12
-physical id : 0
-core id : 0
-cpu cores : 1
-bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
-power management::84
-
-` + partial,
- mitigateError: fmt.Errorf(`mitigate operation failed: failed to match key "core id": %q`, partial),
- reverseData: "1-",
- reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from possible: %q`, "1-"),
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- m := &Mitigate{
- dryRun: true,
- }
- m.doExecuteTest(t, "Mitigate", tc.mitigateData, tc.mitigateCPU, tc.mitigateError)
-
- m.reverse = true
- m.doExecuteTest(t, "Reverse", tc.reverseData, tc.reverseCPU, tc.reverseError)
- })
- }
-}
-
-func TestExecuteSmoke(t *testing.T) {
- smokeMitigate, err := ioutil.ReadFile(cpuInfo)
- if err != nil {
- t.Fatalf("Failed to read %s: %v", cpuInfo, err)
- }
-
- m := &Mitigate{
- dryRun: true,
- }
-
- m.doExecuteTest(t, "Mitigate", string(smokeMitigate), 0, nil)
-
- smokeReverse, err := ioutil.ReadFile(allPossibleCPUs)
- if err != nil {
- t.Fatalf("Failed to read %s: %v", allPossibleCPUs, err)
- }
-
- m.reverse = true
- m.doExecuteTest(t, "Reverse", string(smokeReverse), 0, nil)
-}
-
-// doExecuteTest runs Execute with the mitigate operation and reverse operation.
-func (m *Mitigate) doExecuteTest(t *testing.T, name, data string, want int, wantErr error) {
- t.Run(name, func(t *testing.T) {
- file, err := ioutil.TempFile("", "outfile.txt")
- if err != nil {
- t.Fatalf("Failed to create tmpfile: %v", err)
- }
- defer os.Remove(file.Name())
-
- if _, err := file.WriteString(data); err != nil {
- t.Fatalf("Failed to write to file: %v", err)
- }
-
- // Set fields for mitigate and dryrun to keep test hermetic.
- m.path = file.Name()
-
- set, err := m.doExecute()
- if err = checkErr(wantErr, err); err != nil {
- t.Fatalf("Mitigate error mismatch: %v", err)
- }
-
- // case where test should end in error or we don't care
- // about how many cpus are returned.
- if wantErr != nil || want < 1 {
- return
- }
- got := len(set.GetRemainingList())
- if want != got {
- t.Fatalf("Failed wrong number of remaining CPUs: want %d, got %d", want, got)
- }
-
- })
-}
-
-// checkErr checks error for equality.
-func checkErr(want, got error) error {
- switch {
- case want == nil && got == nil:
- case want == nil || got == nil || want.Error() != strings.Trim(got.Error(), " "):
- return fmt.Errorf("got: %v want: %v", got, want)
- }
- return nil
-}
diff --git a/runsc/config/BUILD b/runsc/config/BUILD
deleted file mode 100644
index b1672bb9d..000000000
--- a/runsc/config/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "config",
- srcs = [
- "config.go",
- "flags.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/refs",
- "//pkg/sentry/watchdog",
- "//pkg/sync",
- "//runsc/flag",
- ],
-)
-
-go_test(
- name = "config_test",
- size = "small",
- srcs = [
- "config_test.go",
- ],
- library = ":config",
- deps = ["//runsc/flag"],
-)
diff --git a/runsc/config/config_state_autogen.go b/runsc/config/config_state_autogen.go
new file mode 100644
index 000000000..92fa90265
--- /dev/null
+++ b/runsc/config/config_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package config
diff --git a/runsc/config/config_test.go b/runsc/config/config_test.go
deleted file mode 100644
index 80ff2c0a6..000000000
--- a/runsc/config/config_test.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/runsc/flag"
-)
-
-func init() {
- RegisterFlags()
-}
-
-func TestDefault(t *testing.T) {
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- // "--root" is always set to something different than the default. Reset it
- // to make it easier to test that default values do not generate flags.
- c.RootDir = ""
-
- // All defaults doesn't require setting flags.
- flags := c.ToFlags()
- if len(flags) > 0 {
- t.Errorf("default flags not set correctly for: %s", flags)
- }
-}
-
-func setDefault(name string) error {
- fl := flag.CommandLine.Lookup(name)
- return fl.Value.Set(fl.DefValue)
-}
-
-func TestFromFlags(t *testing.T) {
- if err := flag.CommandLine.Lookup("root").Value.Set("some-path"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := flag.CommandLine.Lookup("debug").Value.Set("true"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := flag.CommandLine.Lookup("num-network-channels").Value.Set("123"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := flag.CommandLine.Lookup("network").Value.Set("none"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- defer func() {
- if err := setDefault("root"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := setDefault("debug"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := setDefault("num-network-channels"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- if err := setDefault("network"); err != nil {
- t.Errorf("Flag set: %v", err)
- }
- }()
-
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- if want := "some-path"; c.RootDir != want {
- t.Errorf("RootDir=%v, want: %v", c.RootDir, want)
- }
- if want := true; c.Debug != want {
- t.Errorf("Debug=%v, want: %v", c.Debug, want)
- }
- if want := 123; c.NumNetworkChannels != want {
- t.Errorf("NumNetworkChannels=%v, want: %v", c.NumNetworkChannels, want)
- }
- if want := NetworkNone; c.Network != want {
- t.Errorf("Network=%v, want: %v", c.Network, want)
- }
-}
-
-func TestToFlags(t *testing.T) {
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- c.RootDir = "some-path"
- c.Debug = true
- c.NumNetworkChannels = 123
- c.Network = NetworkNone
-
- flags := c.ToFlags()
- if len(flags) != 4 {
- t.Errorf("wrong number of flags set, want: 4, got: %d: %s", len(flags), flags)
- }
- t.Logf("Flags: %s", flags)
- fm := map[string]string{}
- for _, f := range flags {
- kv := strings.Split(f, "=")
- fm[kv[0]] = kv[1]
- }
- for name, want := range map[string]string{
- "--root": "some-path",
- "--debug": "true",
- "--num-network-channels": "123",
- "--network": "none",
- } {
- if got, ok := fm[name]; ok {
- if got != want {
- t.Errorf("flag %q, want: %q, got: %q", name, want, got)
- }
- } else {
- t.Errorf("flag %q not set", name)
- }
- }
-}
-
-// TestInvalidFlags checks that enum flags fail when value is not in enum set.
-func TestInvalidFlags(t *testing.T) {
- for _, tc := range []struct {
- name string
- error string
- }{
- {
- name: "file-access",
- error: "invalid file access type",
- },
- {
- name: "network",
- error: "invalid network type",
- },
- {
- name: "qdisc",
- error: "invalid qdisc",
- },
- {
- name: "watchdog-action",
- error: "invalid watchdog action",
- },
- {
- name: "ref-leak-mode",
- error: "invalid ref leak mode",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- defer setDefault(tc.name)
- if err := flag.CommandLine.Lookup(tc.name).Value.Set("invalid"); err == nil || !strings.Contains(err.Error(), tc.error) {
- t.Errorf("flag.Value.Set(invalid) wrong error reported: %v", err)
- }
- })
- }
-}
-
-func TestValidationFail(t *testing.T) {
- for _, tc := range []struct {
- name string
- flags map[string]string
- error string
- }{
- {
- name: "shared+overlay",
- flags: map[string]string{
- "file-access": "shared",
- "overlay": "true",
- },
- error: "overlay flag is incompatible",
- },
- {
- name: "network-channels",
- flags: map[string]string{
- "num-network-channels": "-1",
- },
- error: "num_network_channels must be > 0",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- for name, val := range tc.flags {
- defer setDefault(name)
- if err := flag.CommandLine.Lookup(name).Value.Set(val); err != nil {
- t.Errorf("%s=%q: %v", name, val, err)
- }
- }
- if _, err := NewFromFlags(); err == nil || !strings.Contains(err.Error(), tc.error) {
- t.Errorf("NewFromFlags() wrong error reported: %v", err)
- }
- })
- }
-}
-
-func TestOverride(t *testing.T) {
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- c.AllowFlagOverride = true
-
- t.Run("string", func(t *testing.T) {
- c.RootDir = "foobar"
- if err := c.Override("root", "bar"); err != nil {
- t.Fatalf("Override(root, bar) failed: %v", err)
- }
- defer setDefault("root")
- if c.RootDir != "bar" {
- t.Errorf("Override(root, bar) didn't work: %+v", c)
- }
- })
-
- t.Run("bool", func(t *testing.T) {
- c.Debug = true
- if err := c.Override("debug", "false"); err != nil {
- t.Fatalf("Override(debug, false) failed: %v", err)
- }
- defer setDefault("debug")
- if c.Debug {
- t.Errorf("Override(debug, false) didn't work: %+v", c)
- }
- })
-
- t.Run("enum", func(t *testing.T) {
- c.FileAccess = FileAccessShared
- if err := c.Override("file-access", "exclusive"); err != nil {
- t.Fatalf("Override(file-access, exclusive) failed: %v", err)
- }
- defer setDefault("file-access")
- if c.FileAccess != FileAccessExclusive {
- t.Errorf("Override(file-access, exclusive) didn't work: %+v", c)
- }
- })
-}
-
-func TestOverrideDisabled(t *testing.T) {
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- const errMsg = "flag override disabled"
- if err := c.Override("root", "path"); err == nil || !strings.Contains(err.Error(), errMsg) {
- t.Errorf("Override() wrong error: %v", err)
- }
-}
-
-func TestOverrideError(t *testing.T) {
- c, err := NewFromFlags()
- if err != nil {
- t.Fatal(err)
- }
- c.AllowFlagOverride = true
- for _, tc := range []struct {
- name string
- value string
- error string
- }{
- {
- name: "invalid",
- value: "valid",
- error: `flag "invalid" not found`,
- },
- {
- name: "debug",
- value: "invalid",
- error: "error setting flag debug",
- },
- {
- name: "file-access",
- value: "invalid",
- error: "invalid file access type",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- if err := c.Override(tc.name, tc.value); err == nil || !strings.Contains(err.Error(), tc.error) {
- t.Errorf("Override(%q, %q) wrong error: %v", tc.name, tc.value, err)
- }
- })
- }
-}
diff --git a/runsc/console/BUILD b/runsc/console/BUILD
deleted file mode 100644
index 06924bccd..000000000
--- a/runsc/console/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "console",
- srcs = [
- "console.go",
- ],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = [
- "@com_github_kr_pty//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/console/console_state_autogen.go b/runsc/console/console_state_autogen.go
new file mode 100644
index 000000000..80521cdb7
--- /dev/null
+++ b/runsc/console/console_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package console
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
deleted file mode 100644
index 5314549d6..000000000
--- a/runsc/container/BUILD
+++ /dev/null
@@ -1,76 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test", "more_shards")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "container",
- srcs = [
- "container.go",
- "hook.go",
- "state_file.go",
- "status.go",
- ],
- visibility = [
- "//runsc:__subpackages__",
- "//test:__subpackages__",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/cleanup",
- "//pkg/log",
- "//pkg/sentry/control",
- "//pkg/sentry/sighandling",
- "//pkg/sync",
- "//runsc/boot",
- "//runsc/cgroup",
- "//runsc/config",
- "//runsc/console",
- "//runsc/sandbox",
- "//runsc/specutils",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_gofrs_flock//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "container_test",
- size = "large",
- srcs = [
- "console_test.go",
- "container_norace_test.go",
- "container_race_test.go",
- "container_test.go",
- "multi_container_test.go",
- "shared_volume_test.go",
- ],
- data = [
- "//runsc",
- "//test/cmd/test_app",
- ],
- library = ":container",
- shard_count = more_shards,
- tags = ["requires-kvm"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/cleanup",
- "//pkg/log",
- "//pkg/sentry/control",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sync",
- "//pkg/test/testutil",
- "//pkg/unet",
- "//pkg/urpc",
- "//runsc/boot",
- "//runsc/boot/platforms",
- "//runsc/config",
- "//runsc/specutils",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_kr_pty//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
deleted file mode 100644
index 9d36086c3..000000000
--- a/runsc/container/console_test.go
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package container
-
-import (
- "bytes"
- "fmt"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/kr/pty"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/unet"
- "gvisor.dev/gvisor/pkg/urpc"
-)
-
-// socketPath creates a path inside bundleDir and ensures that the returned
-// path is under 108 charactors (the unix socket path length limit),
-// relativizing the path if necessary.
-func socketPath(bundleDir string) (string, error) {
- num := rand.Intn(10000)
- path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
- const maxPathLen = 108
- if len(path) <= maxPathLen {
- return path, nil
- }
-
- // Path is too large, try to make it smaller.
- cwd, err := os.Getwd()
- if err != nil {
- return "", fmt.Errorf("error getting cwd: %v", err)
- }
- path, err = filepath.Rel(cwd, path)
- if err != nil {
- return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
- }
- if len(path) > maxPathLen {
- return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
- }
- return path, nil
-}
-
-// createConsoleSocket creates a socket at the given path that will receive a
-// console fd from the sandbox. If an error occurs, t.Fatalf will be called.
-// The function returning should be deferred as cleanup.
-func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) {
- t.Helper()
- srv, err := unet.BindAndListen(path, false)
- if err != nil {
- t.Fatalf("error binding and listening to socket %q: %v", path, err)
- }
-
- cleanup := func() {
- // Log errors; nothing can be done.
- if err := srv.Close(); err != nil {
- t.Logf("error closing socket %q: %v", path, err)
- }
- if err := os.Remove(path); err != nil {
- t.Logf("error removing socket %q: %v", path, err)
- }
- }
-
- return srv, cleanup
-}
-
-// receiveConsolePTY accepts a connection on the server socket and reads fds.
-// It fails if more than one FD is received, or if the FD is not a PTY. It
-// returns the PTY master file.
-func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
- sock, err := srv.Accept()
- if err != nil {
- return nil, fmt.Errorf("error accepting socket connection: %v", err)
- }
-
- // Allow 3 fds to be received. We only expect 1.
- r := sock.Reader(true /* blocking */)
- r.EnableFDs(1)
-
- // The socket is closed right after sending the FD, so EOF is
- // an allowed error.
- b := [][]byte{{}}
- if _, err := r.ReadVec(b); err != nil && err != io.EOF {
- return nil, fmt.Errorf("error reading from socket connection: %v", err)
- }
-
- // We should have gotten a control message.
- fds, err := r.ExtractFDs()
- if err != nil {
- return nil, fmt.Errorf("error extracting fds from socket connection: %v", err)
- }
- if len(fds) != 1 {
- return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds))
- }
-
- // Verify that the fd is a terminal.
- if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
- return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
- }
-
- return os.NewFile(uintptr(fds[0]), "pty_master"), nil
-}
-
-// Test that an pty FD is sent over the console socket if one is provided.
-func TestConsoleSocket(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- spec := testutil.NewSpecWithArgs("true")
- spec.Process.Terminal = true
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- sock, err := socketPath(bundleDir)
- if err != nil {
- t.Fatalf("error getting socket path: %v", err)
- }
- srv, cleanup := createConsoleSocket(t, sock)
- defer cleanup()
-
- // Create the container and pass the socket name.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- ConsoleSocket: sock,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
-
- // Make sure we get a console PTY.
- ptyMaster, err := receiveConsolePTY(srv)
- if err != nil {
- t.Fatalf("error receiving console FD: %v", err)
- }
- ptyMaster.Close()
- })
- }
-}
-
-// Test that an pty FD is sent over the console socket if one is provided.
-func TestMultiContainerConsoleSocket(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- tru := []string{"true"}
- testSpecs, ids := createSpecs(sleep, tru)
- testSpecs[1].Process.Terminal = true
-
- bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: ids[0],
- Spec: testSpecs[0],
- BundleDir: bundleDir,
- }
- rootCont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer rootCont.Destroy()
- if err := rootCont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- sock, err := socketPath(bundleDir)
- if err != nil {
- t.Fatalf("error getting socket path: %v", err)
- }
- srv, cleanup := createConsoleSocket(t, sock)
- defer cleanup()
-
- // Create the container and pass the socket name.
- args = Args{
- ID: ids[1],
- Spec: testSpecs[1],
- BundleDir: bundleDir,
- ConsoleSocket: sock,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Make sure we get a console PTY.
- ptyMaster, err := receiveConsolePTY(srv)
- if err != nil {
- t.Fatalf("error receiving console FD: %v", err)
- }
- ptyMaster.Close()
- })
- }
-}
-
-// Test that job control signals work on a console created with "exec -ti".
-func TestJobControlSignalExec(t *testing.T) {
- spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
- conf := testutil.TestConfig(t)
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Create a pty master/replica. The replica will be passed to the exec
- // process.
- ptyMaster, ptyReplica, err := pty.Open()
- if err != nil {
- t.Fatalf("error opening pty: %v", err)
- }
- defer ptyMaster.Close()
- defer ptyReplica.Close()
-
- // Exec bash and attach a terminal. Note that occasionally /bin/sh
- // may be a different shell or have a different configuration (such
- // as disabling interactive mode and job control). Since we want to
- // explicitly test interactive mode, use /bin/bash. See b/116981926.
- execArgs := &control.ExecArgs{
- Filename: "/bin/bash",
- // Don't let bash execute from profile or rc files, otherwise
- // our PID counts get messed up.
- Argv: []string{"/bin/bash", "--noprofile", "--norc"},
- // Pass the pty replica as FD 0, 1, and 2.
- FilePayload: urpc.FilePayload{
- Files: []*os.File{ptyReplica, ptyReplica, ptyReplica},
- },
- StdioIsPty: true,
- }
-
- pid, err := c.Execute(conf, execArgs)
- if err != nil {
- t.Fatalf("error executing: %v", err)
- }
- if pid != 2 {
- t.Fatalf("exec got pid %d, wanted %d", pid, 2)
- }
-
- // Make sure all the processes are running.
- expectedPL := []*control.Process{
- // Root container process.
- newProcessBuilder().Cmd("sleep").Process(),
- // Bash from exec process.
- newProcessBuilder().PID(2).Cmd("bash").Process(),
- }
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Execute sleep.
- if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
- t.Fatalf("ptyMaster.Write: %v", err)
- }
-
- // Wait for it to start. Sleep's PPID is bash's PID.
- expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Send a SIGTERM to the foreground process for the exec PID. Note that
- // although we pass in the PID of "bash", it should actually terminate
- // "sleep", since that is the foreground process.
- if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
- t.Fatalf("error signaling container: %v", err)
- }
-
- // Sleep process should be gone.
- expectedPL = expectedPL[:len(expectedPL)-1]
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Sleep is dead, but it may take more time for bash to notice and
- // change the foreground process back to itself. We know it is done
- // when bash writes "Terminated" to the pty.
- if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
- t.Fatalf("bash did not take over pty: %v", err)
- }
-
- // Send a SIGKILL to the foreground process again. This time "bash"
- // should be killed. We use SIGKILL instead of SIGTERM or SIGINT
- // because bash ignores those.
- if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
- t.Fatalf("error signaling container: %v", err)
- }
- expectedPL = expectedPL[:1]
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Make sure the process indicates it was killed by a SIGKILL.
- ws, err := c.WaitPID(pid)
- if err != nil {
- t.Errorf("waiting on container failed: %v", err)
- }
- if !ws.Signaled() {
- t.Error("ws.Signaled() got false, want true")
- }
- if got, want := ws.Signal(), unix.SIGKILL; got != want {
- t.Errorf("ws.Signal() got %v, want %v", got, want)
- }
-}
-
-// Test that job control signals work on a console created with "run -ti".
-func TestJobControlSignalRootContainer(t *testing.T) {
- conf := testutil.TestConfig(t)
- // Don't let bash execute from profile or rc files, otherwise our PID
- // counts get messed up.
- spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc")
- spec.Process.Terminal = true
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- sock, err := socketPath(bundleDir)
- if err != nil {
- t.Fatalf("error getting socket path: %v", err)
- }
- srv, cleanup := createConsoleSocket(t, sock)
- defer cleanup()
-
- // Create the container and pass the socket name.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- ConsoleSocket: sock,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
-
- // Get the PTY master.
- ptyMaster, err := receiveConsolePTY(srv)
- if err != nil {
- t.Fatalf("error receiving console FD: %v", err)
- }
- defer ptyMaster.Close()
-
- // Bash output as well as sandbox output will be written to the PTY
- // file. Writes after a certain point will block unless we drain the
- // PTY, so we must continually copy from it.
- //
- // We log the output to stderr for debugabilitly, and also to a buffer,
- // since we wait on particular output from bash below. We use a custom
- // blockingBuffer which is thread-safe and also blocks on Read calls,
- // which makes this a suitable Reader for WaitUntilRead.
- ptyBuf := newBlockingBuffer()
- tee := io.TeeReader(ptyMaster, ptyBuf)
- go func() {
- _, _ = io.Copy(os.Stderr, tee)
- }()
-
- // Start the container.
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Start waiting for the container to exit in a goroutine. We do this
- // very early, otherwise it might exit before we have a chance to call
- // Wait.
- var (
- ws unix.WaitStatus
- wg sync.WaitGroup
- )
- wg.Add(1)
- go func() {
- var err error
- ws, err = c.Wait()
- if err != nil {
- t.Errorf("error waiting on container: %v", err)
- }
- wg.Done()
- }()
-
- // Wait for bash to start.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).Cmd("bash").Process(),
- }
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- // Execute sleep via the terminal.
- if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
- t.Fatalf("ptyMaster.Write(): %v", err)
- }
-
- // Wait for sleep to start.
- expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- // Reset the pty buffer, so there is less output for us to scan later.
- ptyBuf.Reset()
-
- // Send a SIGTERM to the foreground process. We pass PID=0, indicating
- // that the root process should be killed. However, by setting
- // fgProcess=true, the signal should actually be sent to sleep.
- if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
- t.Fatalf("error signaling container: %v", err)
- }
-
- // Sleep process should be gone.
- expectedPL = expectedPL[:len(expectedPL)-1]
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Sleep is dead, but it may take more time for bash to notice and
- // change the foreground process back to itself. We know it is done
- // when bash writes "Terminated" to the pty.
- if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
- t.Fatalf("bash did not take over pty: %v", err)
- }
-
- // Send a SIGKILL to the foreground process again. This time "bash"
- // should be killed. We use SIGKILL instead of SIGTERM or SIGINT
- // because bash ignores those.
- if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
- t.Fatalf("error signaling container: %v", err)
- }
-
- // Wait for the sandbox to exit. It should exit with a SIGKILL status.
- wg.Wait()
- if !ws.Signaled() {
- t.Error("ws.Signaled() got false, want true")
- }
- if got, want := ws.Signal(), unix.SIGKILL; got != want {
- t.Errorf("ws.Signal() got %v, want %v", got, want)
- }
-}
-
-// Test that terminal works with root and sub-containers.
-func TestMultiContainerTerminal(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Don't let bash execute from profile or rc files, otherwise our PID
- // counts get messed up.
- bash := []string{"/bin/bash", "--noprofile", "--norc"}
- testSpecs, ids := createSpecs(bash, bash)
-
- type termContainer struct {
- container *Container
- master *os.File
- }
- var containers []termContainer
- for i, spec := range testSpecs {
- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- spec.Process.Terminal = true
- sock, err := socketPath(bundleDir)
- if err != nil {
- t.Fatalf("error getting socket path: %v", err)
- }
- srv, cleanup := createConsoleSocket(t, sock)
- defer cleanup()
-
- // Create the container and pass the socket name.
- args := Args{
- ID: ids[i],
- Spec: spec,
- BundleDir: bundleDir,
- ConsoleSocket: sock,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
-
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Make sure we get a console PTY.
- ptyMaster, err := receiveConsolePTY(srv)
- if err != nil {
- t.Fatalf("error receiving console FD: %v", err)
- }
- defer ptyMaster.Close()
-
- containers = append(containers, termContainer{
- container: cont,
- master: ptyMaster,
- })
- }
-
- for _, tc := range containers {
- // Bash output as well as sandbox output will be written to the PTY
- // file. Writes after a certain point will block unless we drain the
- // PTY, so we must continually copy from it.
- //
- // We log the output to stderr for debuggability, and also to a buffer,
- // since we wait on particular output from bash below. We use a custom
- // blockingBuffer which is thread-safe and also blocks on Read calls,
- // which makes this a suitable Reader for WaitUntilRead.
- ptyBuf := newBlockingBuffer()
- tee := io.TeeReader(tc.master, ptyBuf)
- go func() {
- _, _ = io.Copy(os.Stderr, tee)
- }()
-
- // Wait for bash to start.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("bash").Process(),
- }
- if err := waitForProcessList(tc.container, expectedPL); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- // Execute echo command and check that it was executed correctly. Use
- // a variable to ensure it's not matching against command echo.
- if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
- t.Fatalf("master.Write(): %v", err)
- }
- if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
- t.Fatalf("echo didn't execute: %v", err)
- }
- }
- })
- }
-}
-
-// blockingBuffer is a thread-safe buffer that blocks when reading if the
-// buffer is empty. It implements io.ReadWriter.
-type blockingBuffer struct {
- // A send to readCh indicates that a previously empty buffer now has
- // data for reading.
- readCh chan struct{}
-
- // mu protects buf.
- mu sync.Mutex
- buf bytes.Buffer
-}
-
-func newBlockingBuffer() *blockingBuffer {
- return &blockingBuffer{
- readCh: make(chan struct{}, 1),
- }
-}
-
-// Write implements Writer.Write.
-func (bb *blockingBuffer) Write(p []byte) (int, error) {
- bb.mu.Lock()
- defer bb.mu.Unlock()
- l := bb.buf.Len()
- n, err := bb.buf.Write(p)
- if l == 0 && n > 0 {
- // New data!
- bb.readCh <- struct{}{}
- }
- return n, err
-}
-
-// Read implements Reader.Read. It will block until data is available.
-func (bb *blockingBuffer) Read(p []byte) (int, error) {
- for {
- bb.mu.Lock()
- n, err := bb.buf.Read(p)
- if n > 0 || err != io.EOF {
- if bb.buf.Len() == 0 {
- // Reset the readCh.
- select {
- case <-bb.readCh:
- default:
- }
- }
- bb.mu.Unlock()
- return n, err
- }
- bb.mu.Unlock()
-
- // Wait for new data.
- <-bb.readCh
- }
-}
-
-// Reset resets the buffer.
-func (bb *blockingBuffer) Reset() {
- bb.mu.Lock()
- defer bb.mu.Unlock()
- bb.buf.Reset()
- // Reset the readCh.
- select {
- case <-bb.readCh:
- default:
- }
-}
diff --git a/runsc/container/container_norace_test.go b/runsc/container/container_norace_test.go
deleted file mode 100644
index a4daf16ed..000000000
--- a/runsc/container/container_norace_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !race
-// +build !race
-
-package container
-
-// Allow both kvm and ptrace for non-race builds.
-var platformOptions = []configOption{ptrace, kvm}
diff --git a/runsc/container/container_race_test.go b/runsc/container/container_race_test.go
deleted file mode 100644
index 86a57145c..000000000
--- a/runsc/container/container_race_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build race
-// +build race
-
-package container
-
-// Only enabled ptrace with race builds.
-var platformOptions = []configOption{ptrace}
diff --git a/runsc/container/container_state_autogen.go b/runsc/container/container_state_autogen.go
new file mode 100644
index 000000000..5bc1c1aff
--- /dev/null
+++ b/runsc/container/container_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package container
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
deleted file mode 100644
index 5fb4a3672..000000000
--- a/runsc/container/container_test.go
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package container
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff"
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/bits"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/urpc"
- "gvisor.dev/gvisor/runsc/boot/platforms"
- "gvisor.dev/gvisor/runsc/config"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-func TestMain(m *testing.M) {
- log.SetLevel(log.Debug)
- flag.Parse()
- if err := testutil.ConfigureExePath(); err != nil {
- panic(err.Error())
- }
- if err := specutils.MaybeRunAsRoot(); err != nil {
- fmt.Fprintf(os.Stderr, "Error running as root: %v", err)
- os.Exit(123)
- }
- os.Exit(m.Run())
-}
-
-func execute(conf *config.Config, cont *Container, name string, arg ...string) (unix.WaitStatus, error) {
- args := &control.ExecArgs{
- Filename: name,
- Argv: append([]string{name}, arg...),
- }
- return cont.executeSync(conf, args)
-}
-
-func executeCombinedOutput(conf *config.Config, cont *Container, name string, arg ...string) ([]byte, error) {
- r, w, err := os.Pipe()
- if err != nil {
- return nil, err
- }
- defer r.Close()
-
- args := &control.ExecArgs{
- Filename: name,
- Argv: append([]string{name}, arg...),
- FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, w, w}},
- }
- ws, err := cont.executeSync(conf, args)
- w.Close()
- if err != nil {
- return nil, err
- }
- if ws != 0 {
- return nil, fmt.Errorf("exec failed, status: %v", ws)
- }
-
- out, err := ioutil.ReadAll(r)
- return out, err
-}
-
-// executeSync synchronously executes a new process.
-func (c *Container) executeSync(conf *config.Config, args *control.ExecArgs) (unix.WaitStatus, error) {
- pid, err := c.Execute(conf, args)
- if err != nil {
- return 0, fmt.Errorf("error executing: %v", err)
- }
- ws, err := c.WaitPID(pid)
- if err != nil {
- return 0, fmt.Errorf("error waiting: %v", err)
- }
- return ws, nil
-}
-
-// waitForProcessList waits for the given process list to show up in the container.
-func waitForProcessList(cont *Container, want []*control.Process) error {
- cb := func() error {
- got, err := cont.Processes()
- if err != nil {
- err = fmt.Errorf("error getting process data from container: %w", err)
- return &backoff.PermanentError{Err: err}
- }
- if !procListsEqual(got, want) {
- return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
- }
- return nil
- }
- // Gives plenty of time as tests can run slow under --race.
- return testutil.Poll(cb, 30*time.Second)
-}
-
-// waitForProcess waits for the given process to show up in the container.
-func waitForProcess(cont *Container, want *control.Process) error {
- cb := func() error {
- gots, err := cont.Processes()
- if err != nil {
- err = fmt.Errorf("error getting process data from container: %w", err)
- return &backoff.PermanentError{Err: err}
- }
- for _, got := range gots {
- if procEqual(got, want) {
- return nil
- }
- }
- return fmt.Errorf("container got process list: %s, want: %+v", procListToString(gots), want)
- }
- // Gives plenty of time as tests can run slow under --race.
- return testutil.Poll(cb, 30*time.Second)
-}
-
-func waitForProcessCount(cont *Container, want int) error {
- cb := func() error {
- pss, err := cont.Processes()
- if err != nil {
- err = fmt.Errorf("error getting process data from container: %w", err)
- return &backoff.PermanentError{Err: err}
- }
- if got := len(pss); got != want {
- log.Infof("Waiting for process count to reach %d. Current: %d", want, got)
- return fmt.Errorf("wrong process count, got: %d, want: %d", got, want)
- }
- return nil
- }
- // Gives plenty of time as tests can run slow under --race.
- return testutil.Poll(cb, 30*time.Second)
-}
-
-func blockUntilWaitable(pid int) error {
- _, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {
- var err error
- _, _, err1 := unix.Syscall6(unix.SYS_WAITID, 1, uintptr(pid), 0, unix.WEXITED|unix.WNOWAIT, 0, 0)
- if err1 != 0 {
- err = err1
- }
- return 0, 0, err
- })
- return err
-}
-
-// execPS executes `ps` inside the container and return the processes.
-func execPS(conf *config.Config, c *Container) ([]*control.Process, error) {
- out, err := executeCombinedOutput(conf, c, "/bin/ps", "-e")
- if err != nil {
- return nil, err
- }
- lines := strings.Split(string(out), "\n")
- if len(lines) < 1 {
- return nil, fmt.Errorf("missing header: %q", lines)
- }
- procs := make([]*control.Process, 0, len(lines)-1)
- for _, line := range lines[1:] {
- if len(line) == 0 {
- continue
- }
- fields := strings.Fields(line)
- if len(fields) != 4 {
- return nil, fmt.Errorf("malformed line: %s", line)
- }
- pid, err := strconv.Atoi(fields[0])
- if err != nil {
- return nil, err
- }
- cmd := fields[3]
- // Fill only the fields we need thus far.
- procs = append(procs, &control.Process{
- PID: kernel.ThreadID(pid),
- Cmd: cmd,
- })
- }
- return procs, nil
-}
-
-// procListsEqual is used to check whether 2 Process lists are equal. Fields
-// set to -1 in wants are ignored. Timestamp and threads fields are always
-// ignored.
-func procListsEqual(gots, wants []*control.Process) bool {
- if len(gots) != len(wants) {
- return false
- }
- for i := range gots {
- if !procEqual(gots[i], wants[i]) {
- return false
- }
- }
- return true
-}
-
-func procEqual(got, want *control.Process) bool {
- if want.UID != math.MaxUint32 && want.UID != got.UID {
- return false
- }
- if want.PID != -1 && want.PID != got.PID {
- return false
- }
- if want.PPID != -1 && want.PPID != got.PPID {
- return false
- }
- if len(want.TTY) != 0 && want.TTY != got.TTY {
- return false
- }
- if len(want.Cmd) != 0 && want.Cmd != got.Cmd {
- return false
- }
- return true
-}
-
-type processBuilder struct {
- process control.Process
-}
-
-func newProcessBuilder() *processBuilder {
- return &processBuilder{
- process: control.Process{
- UID: math.MaxUint32,
- PID: -1,
- PPID: -1,
- },
- }
-}
-
-func (p *processBuilder) Cmd(cmd string) *processBuilder {
- p.process.Cmd = cmd
- return p
-}
-
-func (p *processBuilder) PID(pid kernel.ThreadID) *processBuilder {
- p.process.PID = pid
- return p
-}
-
-func (p *processBuilder) PPID(ppid kernel.ThreadID) *processBuilder {
- p.process.PPID = ppid
- return p
-}
-
-func (p *processBuilder) UID(uid auth.KUID) *processBuilder {
- p.process.UID = uid
- return p
-}
-
-func (p *processBuilder) Process() *control.Process {
- return &p.process
-}
-
-func procListToString(pl []*control.Process) string {
- strs := make([]string, 0, len(pl))
- for _, p := range pl {
- strs = append(strs, fmt.Sprintf("%+v", p))
- }
- return fmt.Sprintf("[%s]", strings.Join(strs, ","))
-}
-
-// createWriteableOutputFile creates an output file that can be read and
-// written to in the sandbox.
-func createWriteableOutputFile(path string) (*os.File, error) {
- outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
- if err != nil {
- return nil, fmt.Errorf("error creating file: %q, %v", path, err)
- }
-
- // Chmod to allow writing after umask.
- if err := outputFile.Chmod(0666); err != nil {
- return nil, fmt.Errorf("error chmoding file: %q, %v", path, err)
- }
- return outputFile, nil
-}
-
-func waitForFileNotEmpty(f *os.File) error {
- op := func() error {
- fi, err := f.Stat()
- if err != nil {
- return err
- }
- if fi.Size() == 0 {
- return fmt.Errorf("file %q is empty", f.Name())
- }
- return nil
- }
-
- return testutil.Poll(op, 30*time.Second)
-}
-
-func waitForFileExist(path string) error {
- op := func() error {
- if _, err := os.Stat(path); os.IsNotExist(err) {
- return err
- }
- return nil
- }
-
- return testutil.Poll(op, 30*time.Second)
-}
-
-// readOutputNum reads a file at given filepath and returns the int at the
-// requested position.
-func readOutputNum(file string, position int) (int, error) {
- f, err := os.Open(file)
- if err != nil {
- return 0, fmt.Errorf("error opening file: %q, %v", file, err)
- }
-
- // Ensure that there is content in output file.
- if err := waitForFileNotEmpty(f); err != nil {
- return 0, fmt.Errorf("error waiting for output file: %v", err)
- }
-
- b, err := ioutil.ReadAll(f)
- if err != nil {
- return 0, fmt.Errorf("error reading file: %v", err)
- }
- if len(b) == 0 {
- return 0, fmt.Errorf("error no content was read")
- }
-
- // Strip leading null bytes caused by file offset not being 0 upon restore.
- b = bytes.Trim(b, "\x00")
- nums := strings.Split(string(b), "\n")
-
- if position >= len(nums) {
- return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums)
- }
- if position == -1 {
- // Expectation of newline at the end of last position.
- position = len(nums) - 2
- }
- num, err := strconv.Atoi(nums[position])
- if err != nil {
- return 0, fmt.Errorf("error getting number from file: %v", err)
- }
- return num, nil
-}
-
-// run starts the sandbox and waits for it to exit, checking that the
-// application succeeded.
-func run(spec *specs.Spec, conf *config.Config) error {
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- return fmt.Errorf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create, start and wait for the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- Attached: true,
- }
- ws, err := Run(conf, args)
- if err != nil {
- return fmt.Errorf("running container: %v", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- return fmt.Errorf("container failed, waitStatus: %v", ws)
- }
- return nil
-}
-
-type configOption int
-
-const (
- overlay configOption = iota
- ptrace
- kvm
- nonExclusiveFS
-)
-
-var (
- noOverlay = append(platformOptions, nonExclusiveFS)
- all = append(noOverlay, overlay)
-)
-
-func configsHelper(t *testing.T, opts ...configOption) map[string]*config.Config {
- // Always load the default config.
- cs := make(map[string]*config.Config)
- testutil.TestConfig(t)
- for _, o := range opts {
- c := testutil.TestConfig(t)
- switch o {
- case overlay:
- c.Overlay = true
- cs["overlay"] = c
- case ptrace:
- c.Platform = platforms.Ptrace
- cs["ptrace"] = c
- case kvm:
- c.Platform = platforms.KVM
- cs["kvm"] = c
- case nonExclusiveFS:
- c.FileAccess = config.FileAccessShared
- cs["non-exclusive"] = c
- default:
- panic(fmt.Sprintf("unknown config option %v", o))
- }
- }
- return cs
-}
-
-// configs generates different configurations to run tests.
-//
-// TODO(gvisor.dev/issue/1624): Remove VFS1 dimension.
-func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
- all := configsHelper(t, opts...)
- for key, value := range configsHelper(t, opts...) {
- value.VFS2 = true
- all[key+"VFS2"] = value
- }
- return all
-}
-
-// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
-// It verifies after each step that the container can be loaded from disk, and
-// has the correct status.
-func TestLifecycle(t *testing.T) {
- // Start the child reaper.
- childReaper := &testutil.Reaper{}
- childReaper.Start()
- defer childReaper.Stop()
-
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- // The container will just sleep for a long time. We will kill it before
- // it finishes sleeping.
- spec := testutil.NewSpecWithArgs("sleep", "100")
-
- rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // expectedPL lists the expected process state of the container.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
- }
- // Create the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
-
- // Load the container from disk and check the status.
- c, err = Load(rootDir, FullID{ContainerID: args.ID}, LoadOpts{})
- if err != nil {
- t.Fatalf("error loading container: %v", err)
- }
- if got, want := c.Status, Created; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // List should return the container id.
- ids, err := List(rootDir)
- if err != nil {
- t.Fatalf("error listing containers: %v", err)
- }
- fullID := FullID{
- SandboxID: args.ID,
- ContainerID: args.ID,
- }
- if got, want := ids, []FullID{fullID}; !reflect.DeepEqual(got, want) {
- t.Errorf("container list got %v, want %v", got, want)
- }
-
- // Start the container.
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Load the container from disk and check the status.
- c, err = Load(rootDir, fullID, LoadOpts{Exact: true})
- if err != nil {
- t.Fatalf("error loading container: %v", err)
- }
- if got, want := c.Status, Running; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Verify that "sleep 100" is running.
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Error(err)
- }
-
- // Wait on the container.
- ch := make(chan error)
- go func() {
- ws, err := c.Wait()
- if err != nil {
- ch <- err
- return
- }
- if got, want := ws.Signal(), unix.SIGTERM; got != want {
- ch <- fmt.Errorf("got signal %v, want %v", got, want)
- return
- }
- ch <- nil
- }()
-
- // Wait a bit to ensure that we've started waiting on
- // the container before we signal.
- time.Sleep(time.Second)
-
- // Send the container a SIGTERM which will cause it to stop.
- if err := c.SignalContainer(unix.SIGTERM, false); err != nil {
- t.Fatalf("error sending signal %v to container: %v", unix.SIGTERM, err)
- }
-
- // Wait for it to die.
- if err := <-ch; err != nil {
- t.Fatalf("error waiting for container: %v", err)
- }
-
- // Load the container from disk and check the status.
- c, err = Load(rootDir, fullID, LoadOpts{Exact: true})
- if err != nil {
- t.Fatalf("error loading container: %v", err)
- }
- if got, want := c.Status, Stopped; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Destroy the container.
- if err := c.Destroy(); err != nil {
- t.Fatalf("error destroying container: %v", err)
- }
-
- // List should not return the container id.
- ids, err = List(rootDir)
- if err != nil {
- t.Fatalf("error listing containers: %v", err)
- }
- if len(ids) != 0 {
- t.Errorf("expected container list to be empty, but got %v", ids)
- }
-
- // Loading the container by id should fail.
- if _, err = Load(rootDir, fullID, LoadOpts{Exact: true}); err == nil {
- t.Errorf("expected loading destroyed container to fail, but it did not")
- }
- })
- }
-}
-
-// Test the we can execute the application with different path formats.
-func TestExePath(t *testing.T) {
- // Create two directories that will be prepended to PATH.
- firstPath, err := ioutil.TempDir(testutil.TmpDir(), "first")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(firstPath)
- secondPath, err := ioutil.TempDir(testutil.TmpDir(), "second")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- defer os.RemoveAll(secondPath)
-
- // Create two minimal executables in the second path, two of which
- // will be masked by files in first path.
- for _, p := range []string{"unmasked", "masked1", "masked2"} {
- path := filepath.Join(secondPath, p)
- f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0777)
- if err != nil {
- t.Fatalf("error opening path: %v", err)
- }
- defer f.Close()
- if _, err := io.WriteString(f, "#!/bin/true\n"); err != nil {
- t.Fatalf("error writing contents: %v", err)
- }
- }
-
- // Create a non-executable file in the first path which masks a healthy
- // executable in the second.
- nonExecutable := filepath.Join(firstPath, "masked1")
- f2, err := os.OpenFile(nonExecutable, os.O_CREATE|os.O_EXCL, 0666)
- if err != nil {
- t.Fatalf("error opening file: %v", err)
- }
- f2.Close()
-
- // Create a non-regular file in the first path which masks a healthy
- // executable in the second.
- nonRegular := filepath.Join(firstPath, "masked2")
- if err := os.Mkdir(nonRegular, 0777); err != nil {
- t.Fatalf("error making directory: %v", err)
- }
-
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- for _, test := range []struct {
- path string
- success bool
- }{
- {path: "true", success: true},
- {path: "bin/true", success: true},
- {path: "/bin/true", success: true},
- {path: "thisfiledoesntexit", success: false},
- {path: "bin/thisfiledoesntexit", success: false},
- {path: "/bin/thisfiledoesntexit", success: false},
-
- {path: "unmasked", success: true},
- {path: filepath.Join(firstPath, "unmasked"), success: false},
- {path: filepath.Join(secondPath, "unmasked"), success: true},
-
- {path: "masked1", success: true},
- {path: filepath.Join(firstPath, "masked1"), success: false},
- {path: filepath.Join(secondPath, "masked1"), success: true},
-
- {path: "masked2", success: true},
- {path: filepath.Join(firstPath, "masked2"), success: false},
- {path: filepath.Join(secondPath, "masked2"), success: true},
- } {
- t.Run(fmt.Sprintf("path=%s,success=%t", test.path, test.success), func(t *testing.T) {
- spec := testutil.NewSpecWithArgs(test.path)
- spec.Process.Env = []string{
- fmt.Sprintf("PATH=%s:%s:%s", firstPath, secondPath, os.Getenv("PATH")),
- }
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("exec: error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- Attached: true,
- }
- ws, err := Run(conf, args)
-
- if test.success {
- if err != nil {
- t.Errorf("exec: error running container: %v", err)
- }
- if ws.ExitStatus() != 0 {
- t.Errorf("exec: got exit status %v want %v", ws.ExitStatus(), 0)
- }
- } else {
- if err == nil {
- t.Errorf("exec: got: no error, want: error")
- }
- }
- })
- }
- })
- }
-}
-
-// Test the we can retrieve the application exit status from the container.
-func TestAppExitStatus(t *testing.T) {
- doAppExitStatus(t, false)
-}
-
-// This is TestAppExitStatus for VFSv2.
-func TestAppExitStatusVFS2(t *testing.T) {
- doAppExitStatus(t, true)
-}
-
-func doAppExitStatus(t *testing.T, vfs2 bool) {
- // First container will succeed.
- succSpec := testutil.NewSpecWithArgs("true")
- conf := testutil.TestConfig(t)
- conf.VFS2 = vfs2
- _, bundleDir, cleanup, err := testutil.SetupContainer(succSpec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: succSpec,
- BundleDir: bundleDir,
- Attached: true,
- }
- ws, err := Run(conf, args)
- if err != nil {
- t.Fatalf("error running container: %v", err)
- }
- if ws.ExitStatus() != 0 {
- t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
- }
-
- // Second container exits with non-zero status.
- wantStatus := 123
- errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
-
- _, bundleDir2, cleanup2, err := testutil.SetupContainer(errSpec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup2()
-
- args2 := Args{
- ID: testutil.RandomContainerID(),
- Spec: errSpec,
- BundleDir: bundleDir2,
- Attached: true,
- }
- ws, err = Run(conf, args2)
- if err != nil {
- t.Fatalf("error running container: %v", err)
- }
- if ws.ExitStatus() != wantStatus {
- t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
- }
-}
-
-// TestExec verifies that a container can exec a new program.
-func TestExec(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "exec-test")
- if err != nil {
- t.Fatalf("error creating temporary directory: %v", err)
- }
- // Note that some shells may exec the final command in a sequence as
- // an optimization. We avoid this here by adding the exit 0.
- cmd := fmt.Sprintf("ln -s /bin/true %q/symlink && sleep 100 && exit 0", dir)
- spec := testutil.NewSpecWithArgs("sh", "-c", cmd)
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Wait until sleep is running to ensure the symlink was created.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sh").Process(),
- newProcessBuilder().Cmd("sleep").Process(),
- }
- if err := waitForProcessList(cont, expectedPL); err != nil {
- t.Fatalf("waitForProcessList: %v", err)
- }
-
- for _, tc := range []struct {
- name string
- args control.ExecArgs
- }{
- {
- name: "complete",
- args: control.ExecArgs{
- Filename: "/bin/true",
- Argv: []string{"/bin/true"},
- },
- },
- {
- name: "filename",
- args: control.ExecArgs{
- Filename: "/bin/true",
- },
- },
- {
- name: "argv",
- args: control.ExecArgs{
- Argv: []string{"/bin/true"},
- },
- },
- {
- name: "filename resolution",
- args: control.ExecArgs{
- Filename: "true",
- Envv: []string{"PATH=/bin"},
- },
- },
- {
- name: "argv resolution",
- args: control.ExecArgs{
- Argv: []string{"true"},
- Envv: []string{"PATH=/bin"},
- },
- },
- {
- name: "argv symlink",
- args: control.ExecArgs{
- Argv: []string{filepath.Join(dir, "symlink")},
- },
- },
- {
- name: "working dir",
- args: control.ExecArgs{
- Argv: []string{"/bin/sh", "-c", `if [[ "${PWD}" != "/tmp" ]]; then exit 1; fi`},
- WorkingDirectory: "/tmp",
- },
- },
- {
- name: "user",
- args: control.ExecArgs{
- Argv: []string{"/bin/sh", "-c", `if [[ "$(id -u)" != "343" ]]; then exit 1; fi`},
- KUID: 343,
- },
- },
- {
- name: "group",
- args: control.ExecArgs{
- Argv: []string{"/bin/sh", "-c", `if [[ "$(id -g)" != "343" ]]; then exit 1; fi`},
- KGID: 343,
- },
- },
- {
- name: "env",
- args: control.ExecArgs{
- Argv: []string{"/bin/sh", "-c", `if [[ "${FOO}" != "123" ]]; then exit 1; fi`},
- Envv: []string{"FOO=123"},
- },
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- // t.Parallel()
- if ws, err := cont.executeSync(conf, &tc.args); err != nil {
- t.Fatalf("executeAsync(%+v): %v", tc.args, err)
- } else if ws != 0 {
- t.Fatalf("executeAsync(%+v) failed with exit: %v", tc.args, ws)
- }
- })
- }
-
- // Test for exec failure with an non-existent file.
- t.Run("nonexist", func(t *testing.T) {
- // b/179114837 found by Syzkaller that causes nil pointer panic when
- // trying to dec-ref an unix socket FD.
- fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatal(err)
- }
- defer unix.Close(fds[0])
-
- _, err = cont.executeSync(conf, &control.ExecArgs{
- Argv: []string{"/nonexist"},
- FilePayload: urpc.FilePayload{
- Files: []*os.File{os.NewFile(uintptr(fds[1]), "sock")},
- },
- })
- want := "failed to load /nonexist"
- if err == nil || !strings.Contains(err.Error(), want) {
- t.Errorf("executeSync: want err containing %q; got err = %q", want, err)
- }
- })
- })
- }
-}
-
-// TestExecProcList verifies that a container can exec a new program and it
-// shows correcly in the process list.
-func TestExecProcList(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- const uid = 343
- spec := testutil.NewSpecWithArgs("sleep", "100")
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- execArgs := &control.ExecArgs{
- Filename: "/bin/sleep",
- Argv: []string{"/bin/sleep", "5"},
- WorkingDirectory: "/",
- KUID: uid,
- }
-
- // Verify that "sleep 100" and "sleep 5" are running after exec. First,
- // start running exec (which blocks).
- ch := make(chan error)
- go func() {
- exitStatus, err := cont.executeSync(conf, execArgs)
- if err != nil {
- ch <- err
- } else if exitStatus != 0 {
- ch <- fmt.Errorf("failed with exit status: %v", exitStatus)
- } else {
- ch <- nil
- }
- }()
-
- // expectedPL lists the expected process state of the container.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).PPID(0).Cmd("sleep").UID(0).Process(),
- newProcessBuilder().PID(2).PPID(0).Cmd("sleep").UID(uid).Process(),
- }
- if err := waitForProcessList(cont, expectedPL); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- // Ensure that exec finished without error.
- select {
- case <-time.After(10 * time.Second):
- t.Fatalf("container timed out waiting for exec to finish.")
- case err := <-ch:
- if err != nil {
- t.Errorf("container failed to exec %v: %v", args, err)
- }
- }
- })
- }
-}
-
-// TestKillPid verifies that we can signal individual exec'd processes.
-func TestKillPid(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- const nProcs = 4
- spec := testutil.NewSpecWithArgs(app, "task-tree", "--depth", strconv.Itoa(nProcs-1), "--width=1", "--pause=true")
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Verify that all processes are running.
- if err := waitForProcessCount(cont, nProcs); err != nil {
- t.Fatalf("timed out waiting for processes to start: %v", err)
- }
-
- // Kill the child process with the largest PID.
- procs, err := cont.Processes()
- if err != nil {
- t.Fatalf("failed to get process list: %v", err)
- }
- var pid int32
- for _, p := range procs {
- if pid < int32(p.PID) {
- pid = int32(p.PID)
- }
- }
- if err := cont.SignalProcess(unix.SIGKILL, pid); err != nil {
- t.Fatalf("failed to signal process %d: %v", pid, err)
- }
-
- // Verify that one process is gone.
- if err := waitForProcessCount(cont, nProcs-1); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- procs, err = cont.Processes()
- if err != nil {
- t.Fatalf("failed to get process list: %v", err)
- }
- for _, p := range procs {
- if pid == int32(p.PID) {
- t.Fatalf("pid %d is still alive, which should be killed", pid)
- }
- }
- })
- }
-}
-
-// TestCheckpointRestore creates a container that continuously writes successive
-// integers to a file. To test checkpoint and restore functionality, the
-// container is checkpointed and the last number printed to the file is
-// recorded. Then, it is restored in two new containers and the first number
-// printed from these containers is checked. Both should be the next consecutive
-// number after the last number from the checkpointed container.
-func TestCheckpointRestore(t *testing.T) {
- // Skip overlay because test requires writing to host file.
- for name, conf := range configs(t, noOverlay...) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
- if err != nil {
- t.Fatalf("ioutil.TempDir failed: %v", err)
- }
- defer os.RemoveAll(dir)
- if err := os.Chmod(dir, 0777); err != nil {
- t.Fatalf("error chmoding file: %q, %v", dir, err)
- }
-
- outputPath := filepath.Join(dir, "output")
- outputFile, err := createWriteableOutputFile(outputPath)
- if err != nil {
- t.Fatalf("error creating output file: %v", err)
- }
- defer outputFile.Close()
-
- script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath)
- spec := testutil.NewSpecWithArgs("bash", "-c", script)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Set the image path, which is where the checkpoint image will be saved.
- imagePath := filepath.Join(dir, "test-image-file")
-
- // Create the image file and open for writing.
- file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
- if err != nil {
- t.Fatalf("error opening new file at imagePath: %v", err)
- }
- defer file.Close()
-
- // Wait until application has ran.
- if err := waitForFileNotEmpty(outputFile); err != nil {
- t.Fatalf("Failed to wait for output file: %v", err)
- }
-
- // Checkpoint running container; save state into new file.
- if err := cont.Checkpoint(file); err != nil {
- t.Fatalf("error checkpointing container to empty file: %v", err)
- }
- defer os.RemoveAll(imagePath)
-
- lastNum, err := readOutputNum(outputPath, -1)
- if err != nil {
- t.Fatalf("error with outputFile: %v", err)
- }
-
- // Delete and recreate file before restoring.
- if err := os.Remove(outputPath); err != nil {
- t.Fatalf("error removing file")
- }
- outputFile2, err := createWriteableOutputFile(outputPath)
- if err != nil {
- t.Fatalf("error creating output file: %v", err)
- }
- defer outputFile2.Close()
-
- // Restore into a new container.
- args2 := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont2, err := New(conf, args2)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont2.Destroy()
-
- if err := cont2.Restore(spec, conf, imagePath); err != nil {
- t.Fatalf("error restoring container: %v", err)
- }
-
- // Wait until application has ran.
- if err := waitForFileNotEmpty(outputFile2); err != nil {
- t.Fatalf("Failed to wait for output file: %v", err)
- }
-
- firstNum, err := readOutputNum(outputPath, 0)
- if err != nil {
- t.Fatalf("error with outputFile: %v", err)
- }
-
- // Check that lastNum is one less than firstNum and that the container picks
- // up from where it left off.
- if lastNum+1 != firstNum {
- t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
- }
- cont2.Destroy()
-
- // Restore into another container!
- // Delete and recreate file before restoring.
- if err := os.Remove(outputPath); err != nil {
- t.Fatalf("error removing file")
- }
- outputFile3, err := createWriteableOutputFile(outputPath)
- if err != nil {
- t.Fatalf("error creating output file: %v", err)
- }
- defer outputFile3.Close()
-
- // Restore into a new container.
- args3 := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont3, err := New(conf, args3)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont3.Destroy()
-
- if err := cont3.Restore(spec, conf, imagePath); err != nil {
- t.Fatalf("error restoring container: %v", err)
- }
-
- // Wait until application has ran.
- if err := waitForFileNotEmpty(outputFile3); err != nil {
- t.Fatalf("Failed to wait for output file: %v", err)
- }
-
- firstNum2, err := readOutputNum(outputPath, 0)
- if err != nil {
- t.Fatalf("error with outputFile: %v", err)
- }
-
- // Check that lastNum is one less than firstNum and that the container picks
- // up from where it left off.
- if lastNum+1 != firstNum2 {
- t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
- }
- cont3.Destroy()
- })
- }
-}
-
-// TestUnixDomainSockets checks that Checkpoint/Restore works in cases
-// with filesystem Unix Domain Socket use.
-func TestUnixDomainSockets(t *testing.T) {
- // Skip overlay because test requires writing to host file.
- for name, conf := range configs(t, noOverlay...) {
- t.Run(name, func(t *testing.T) {
- // UDS path is limited to 108 chars for compatibility with older systems.
- // Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is
- // not exceeded. Assumes '/tmp' exists in the system.
- dir, err := ioutil.TempDir("/tmp", "uds-test")
- if err != nil {
- t.Fatalf("ioutil.TempDir failed: %v", err)
- }
- defer os.RemoveAll(dir)
-
- outputPath := filepath.Join(dir, "uds_output")
- outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
- if err != nil {
- t.Fatalf("error creating output file: %v", err)
- }
- defer outputFile.Close()
-
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- socketPath := filepath.Join(dir, "uds_socket")
- defer os.Remove(socketPath)
-
- spec := testutil.NewSpecWithArgs(app, "uds", "--file", outputPath, "--socket", socketPath)
- spec.Process.User = specs.User{
- UID: uint32(os.Getuid()),
- GID: uint32(os.Getgid()),
- }
- spec.Mounts = []specs.Mount{{
- Type: "bind",
- Destination: dir,
- Source: dir,
- }}
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Set the image path, the location where the checkpoint image will be saved.
- imagePath := filepath.Join(dir, "test-image-file")
-
- // Create the image file and open for writing.
- file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
- if err != nil {
- t.Fatalf("error opening new file at imagePath: %v", err)
- }
- defer file.Close()
- defer os.RemoveAll(imagePath)
-
- // Wait until application has ran.
- if err := waitForFileNotEmpty(outputFile); err != nil {
- t.Fatalf("Failed to wait for output file: %v", err)
- }
-
- // Checkpoint running container; save state into new file.
- if err := cont.Checkpoint(file); err != nil {
- t.Fatalf("error checkpointing container to empty file: %v", err)
- }
-
- // Read last number outputted before checkpoint.
- lastNum, err := readOutputNum(outputPath, -1)
- if err != nil {
- t.Fatalf("error with outputFile: %v", err)
- }
-
- // Delete and recreate file before restoring.
- if err := os.Remove(outputPath); err != nil {
- t.Fatalf("error removing file")
- }
- outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
- if err != nil {
- t.Fatalf("error creating output file: %v", err)
- }
- defer outputFile2.Close()
-
- // Restore into a new container.
- argsRestore := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- contRestore, err := New(conf, argsRestore)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer contRestore.Destroy()
-
- if err := contRestore.Restore(spec, conf, imagePath); err != nil {
- t.Fatalf("error restoring container: %v", err)
- }
-
- // Wait until application has ran.
- if err := waitForFileNotEmpty(outputFile2); err != nil {
- t.Fatalf("Failed to wait for output file: %v", err)
- }
-
- // Read first number outputted after restore.
- firstNum, err := readOutputNum(outputPath, 0)
- if err != nil {
- t.Fatalf("error with outputFile: %v", err)
- }
-
- // Check that lastNum is one less than firstNum.
- if lastNum+1 != firstNum {
- t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum)
- }
- contRestore.Destroy()
- })
- }
-}
-
-// TestPauseResume tests that we can successfully pause and resume a container.
-// The container will keep touching a file to indicate it's running. The test
-// pauses the container, removes the file, and checks that it doesn't get
-// recreated. Then it resumes the container, verify that the file gets created
-// again.
-func TestPauseResume(t *testing.T) {
- for name, conf := range configs(t, noOverlay...) {
- t.Run(name, func(t *testing.T) {
- tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock")
- if err != nil {
- t.Fatalf("error creating temp dir: %v", err)
- }
- defer os.RemoveAll(tmpDir)
-
- running := path.Join(tmpDir, "running")
- script := fmt.Sprintf("while [[ true ]]; do touch %q; sleep 0.1; done", running)
- spec := testutil.NewSpecWithArgs("/bin/bash", "-c", script)
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Wait until container starts running, observed by the existence of running
- // file.
- if err := waitForFileExist(running); err != nil {
- t.Errorf("error waiting for container to start: %v", err)
- }
-
- // Pause the running container.
- if err := cont.Pause(); err != nil {
- t.Errorf("error pausing container: %v", err)
- }
- if got, want := cont.Status, Paused; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- if err := os.Remove(running); err != nil {
- t.Fatalf("os.Remove(%q) failed: %v", running, err)
- }
- // Script touches the file every 100ms. Give a bit a time for it to run to
- // catch the case that pause didn't work.
- time.Sleep(200 * time.Millisecond)
- if _, err := os.Stat(running); !os.IsNotExist(err) {
- t.Fatalf("container did not pause: file exist check: %v", err)
- }
-
- // Resume the running container.
- if err := cont.Resume(); err != nil {
- t.Errorf("error pausing container: %v", err)
- }
- if got, want := cont.Status, Running; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Verify that the file is once again created by container.
- if err := waitForFileExist(running); err != nil {
- t.Fatalf("error resuming container: file exist check: %v", err)
- }
- })
- }
-}
-
-// TestPauseResumeStatus makes sure that the statuses are set correctly
-// with calls to pause and resume and that pausing and resuming only
-// occurs given the correct state.
-func TestPauseResumeStatus(t *testing.T) {
- spec := testutil.NewSpecWithArgs("sleep", "20")
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Pause the running container.
- if err := cont.Pause(); err != nil {
- t.Errorf("error pausing container: %v", err)
- }
- if got, want := cont.Status, Paused; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Try to Pause again. Should cause error.
- if err := cont.Pause(); err == nil {
- t.Errorf("error pausing container that was already paused: %v", err)
- }
- if got, want := cont.Status, Paused; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Resume the running container.
- if err := cont.Resume(); err != nil {
- t.Errorf("error resuming container: %v", err)
- }
- if got, want := cont.Status, Running; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-
- // Try to resume again. Should cause error.
- if err := cont.Resume(); err == nil {
- t.Errorf("error resuming container already running: %v", err)
- }
- if got, want := cont.Status, Running; got != want {
- t.Errorf("container status got %v, want %v", got, want)
- }
-}
-
-// TestCapabilities verifies that:
-// - Running exec as non-root UID and GID will result in an error (because the
-// executable file can't be read).
-// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips
-// this check.
-func TestCapabilities(t *testing.T) {
- // Pick uid/gid different than ours.
- uid := auth.KUID(os.Getuid() + 1)
- gid := auth.KGID(os.Getgid() + 1)
-
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- spec := testutil.NewSpecWithArgs("sleep", "100")
- rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // expectedPL lists the expected process state of the container.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
- }
- if err := waitForProcessList(cont, expectedPL); err != nil {
- t.Fatalf("Failed to wait for sleep to start, err: %v", err)
- }
-
- // Create an executable that can't be run with the specified UID:GID.
- // This shouldn't be callable within the container until we add the
- // CAP_DAC_OVERRIDE capability to skip the access check.
- exePath := filepath.Join(rootDir, "exe")
- if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
- t.Fatalf("couldn't create executable: %v", err)
- }
- defer os.Remove(exePath)
-
- // Need to traverse the intermediate directory.
- if err := os.Chmod(rootDir, 0755); err != nil {
- t.Fatal(err)
- }
-
- execArgs := &control.ExecArgs{
- Filename: exePath,
- Argv: []string{exePath},
- WorkingDirectory: "/",
- KUID: uid,
- KGID: gid,
- Capabilities: &auth.TaskCapabilities{},
- }
-
- // "exe" should fail because we don't have the necessary permissions.
- if _, err := cont.executeSync(conf, execArgs); err == nil {
- t.Fatalf("container executed without error, but an error was expected")
- }
-
- // Now we run with the capability enabled and should succeed.
- execArgs.Capabilities = &auth.TaskCapabilities{
- EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
- }
- // "exe" should not fail this time.
- if _, err := cont.executeSync(conf, execArgs); err != nil {
- t.Fatalf("container failed to exec %v: %v", args, err)
- }
- })
- }
-}
-
-// TestRunNonRoot checks that sandbox can be configured when running as
-// non-privileged user.
-func TestRunNonRoot(t *testing.T) {
- for name, conf := range configs(t, noOverlay...) {
- t.Run(name, func(t *testing.T) {
- spec := testutil.NewSpecWithArgs("/bin/true")
-
- // Set a random user/group with no access to "blocked" dir.
- spec.Process.User.UID = 343
- spec.Process.User.GID = 2401
- spec.Process.Capabilities = nil
-
- // User running inside container can't list '$TMP/blocked' and would fail to
- // mount it.
- dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- if err := os.Chmod(dir, 0700); err != nil {
- t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
- }
- dir = path.Join(dir, "test")
- if err := os.Mkdir(dir, 0755); err != nil {
- t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
- }
-
- src, err := ioutil.TempDir(testutil.TmpDir(), "src")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
-
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: dir,
- Source: src,
- Type: "bind",
- })
-
- if err := run(spec, conf); err != nil {
- t.Fatalf("error running sandbox: %v", err)
- }
- })
- }
-}
-
-// TestMountNewDir checks that runsc will create destination directory if it
-// doesn't exit.
-func TestMountNewDir(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- root, err := ioutil.TempDir(testutil.TmpDir(), "root")
- if err != nil {
- t.Fatal("ioutil.TempDir() failed:", err)
- }
-
- srcDir := path.Join(root, "src", "dir", "anotherdir")
- if err := os.MkdirAll(srcDir, 0755); err != nil {
- t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err)
- }
-
- mountDir := path.Join(root, "dir", "anotherdir")
-
- spec := testutil.NewSpecWithArgs("/bin/ls", mountDir)
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: mountDir,
- Source: srcDir,
- Type: "bind",
- })
- // Extra points for creating the mount with a readonly root.
- spec.Root.Readonly = true
-
- if err := run(spec, conf); err != nil {
- t.Fatalf("error running sandbox: %v", err)
- }
- })
- }
-}
-
-func TestReadonlyRoot(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- spec := testutil.NewSpecWithArgs("sleep", "100")
- spec.Root.Readonly = true
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Read mounts to check that root is readonly.
- out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", "mount | grep ' / ' | grep -o -e '(.*)'")
- if err != nil {
- t.Fatalf("exec failed: %v", err)
- }
- t.Logf("root mount options: %q", out)
- if !strings.Contains(string(out), "ro") {
- t.Errorf("root not mounted readonly: %q", out)
- }
-
- // Check that file cannot be created.
- ws, err := execute(conf, c, "/bin/touch", "/foo")
- if err != nil {
- t.Fatalf("touch file in ro mount: %v", err)
- }
- if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM {
- t.Fatalf("wrong waitStatus: %v", ws)
- }
- })
- }
-}
-
-func TestReadonlyMount(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- spec := testutil.NewSpecWithArgs("sleep", "100")
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: dir,
- Source: dir,
- Type: "bind",
- Options: []string{"ro"},
- })
- spec.Root.Readonly = false
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Read mounts to check that volume is readonly.
- cmd := fmt.Sprintf("mount | grep ' %s ' | grep -o -e '(.*)'", dir)
- out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", cmd)
- if err != nil {
- t.Fatalf("exec failed, err: %v", err)
- }
- t.Logf("mount options: %q", out)
- if !strings.Contains(string(out), "ro") {
- t.Errorf("volume not mounted readonly: %q", out)
- }
-
- // Check that file cannot be created.
- ws, err := execute(conf, c, "/bin/touch", path.Join(dir, "file"))
- if err != nil {
- t.Fatalf("touch file in ro mount: %v", err)
- }
- if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM {
- t.Fatalf("wrong WaitStatus: %v", ws)
- }
- })
- }
-}
-
-func TestUIDMap(t *testing.T) {
- for name, conf := range configs(t, noOverlay...) {
- t.Run(name, func(t *testing.T) {
- testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(testDir)
- testFile := path.Join(testDir, "testfile")
-
- spec := testutil.NewSpecWithArgs("touch", "/tmp/testfile")
- uid := os.Getuid()
- gid := os.Getgid()
- spec.Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {Type: specs.UserNamespace},
- {Type: specs.PIDNamespace},
- {Type: specs.MountNamespace},
- },
- UIDMappings: []specs.LinuxIDMapping{
- {
- ContainerID: 0,
- HostID: uint32(uid),
- Size: 1,
- },
- },
- GIDMappings: []specs.LinuxIDMapping{
- {
- ContainerID: 0,
- HostID: uint32(gid),
- Size: 1,
- },
- },
- }
-
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp",
- Source: testDir,
- Type: "bind",
- })
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create, start and wait for the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- ws, err := c.Wait()
- if err != nil {
- t.Fatalf("error waiting on container: %v", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Fatalf("container failed, waitStatus: %v", ws)
- }
- st := unix.Stat_t{}
- if err := unix.Stat(testFile, &st); err != nil {
- t.Fatalf("error stat /testfile: %v", err)
- }
-
- if st.Uid != uint32(uid) || st.Gid != uint32(gid) {
- t.Fatalf("UID: %d (%d) GID: %d (%d)", st.Uid, uid, st.Gid, gid)
- }
- })
- }
-}
-
-// TestAbbreviatedIDs checks that runsc supports using abbreviated container
-// IDs in place of full IDs.
-func TestAbbreviatedIDs(t *testing.T) {
- doAbbreviatedIDsTest(t, false)
-}
-
-func TestAbbreviatedIDsVFS2(t *testing.T) {
- doAbbreviatedIDsTest(t, true)
-}
-
-func doAbbreviatedIDsTest(t *testing.T, vfs2 bool) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
- conf.VFS2 = vfs2
-
- cids := []string{
- "foo-" + testutil.RandomContainerID(),
- "bar-" + testutil.RandomContainerID(),
- "baz-" + testutil.RandomContainerID(),
- }
- for _, cid := range cids {
- spec := testutil.NewSpecWithArgs("sleep", "100")
- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: cid,
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- }
-
- // These should all be unambigious.
- unambiguous := map[string]string{
- "f": cids[0],
- cids[0]: cids[0],
- "bar": cids[1],
- cids[1]: cids[1],
- "baz": cids[2],
- cids[2]: cids[2],
- }
- for shortid, longid := range unambiguous {
- if _, err := Load(rootDir, FullID{ContainerID: shortid}, LoadOpts{}); err != nil {
- t.Errorf("%q should resolve to %q: %v", shortid, longid, err)
- }
- }
-
- // These should be ambiguous.
- ambiguous := []string{
- "b",
- "ba",
- }
- for _, shortid := range ambiguous {
- if s, err := Load(rootDir, FullID{ContainerID: shortid}, LoadOpts{}); err == nil {
- t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID)
- }
- }
-}
-
-func TestGoferExits(t *testing.T) {
- doGoferExitTest(t, false)
-}
-
-func TestGoferExitsVFS2(t *testing.T) {
- doGoferExitTest(t, true)
-}
-
-func doGoferExitTest(t *testing.T, vfs2 bool) {
- spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
- conf := testutil.TestConfig(t)
- conf.VFS2 = vfs2
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
-
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Kill sandbox and expect gofer to exit on its own.
- sandboxProc, err := os.FindProcess(c.Sandbox.Pid)
- if err != nil {
- t.Fatalf("error finding sandbox process: %v", err)
- }
- if err := sandboxProc.Kill(); err != nil {
- t.Fatalf("error killing sandbox process: %v", err)
- }
-
- err = blockUntilWaitable(c.GoferPid)
- if err != nil && err != unix.ECHILD {
- t.Errorf("error waiting for gofer to exit: %v", err)
- }
-}
-
-func TestRootNotMount(t *testing.T) {
- appSym, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- app, err := filepath.EvalSymlinks(appSym)
- if err != nil {
- t.Fatalf("error resolving %q symlink: %v", appSym, err)
- }
- log.Infof("App path %q is a symlink to %q", appSym, app)
-
- static, err := testutil.IsStatic(app)
- if err != nil {
- t.Fatalf("error reading application binary: %v", err)
- }
- if !static {
- // This happens during race builds; we cannot map in shared
- // libraries also, so we need to skip the test.
- t.Skip()
- }
-
- root := filepath.Dir(app)
- exe := "/" + filepath.Base(app)
- log.Infof("Executing %q in %q", exe, root)
-
- spec := testutil.NewSpecWithArgs(exe, "help")
- spec.Root.Path = root
- spec.Root.Readonly = true
- spec.Mounts = nil
-
- conf := testutil.TestConfig(t)
- if err := run(spec, conf); err != nil {
- t.Fatalf("error running sandbox: %v", err)
- }
-}
-
-func TestUserLog(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- // sched_rr_get_interval - not implemented in gvisor.
- num := strconv.Itoa(unix.SYS_SCHED_RR_GET_INTERVAL)
- spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall="+num)
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- dir, err := ioutil.TempDir(testutil.TmpDir(), "user_log_test")
- if err != nil {
- t.Fatalf("error creating tmp dir: %v", err)
- }
- userLog := filepath.Join(dir, "user.log")
-
- // Create, start and wait for the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- UserLog: userLog,
- Attached: true,
- }
- ws, err := Run(conf, args)
- if err != nil {
- t.Fatalf("error running container: %v", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Fatalf("container failed, waitStatus: %v", ws)
- }
-
- out, err := ioutil.ReadFile(userLog)
- if err != nil {
- t.Fatalf("error opening user log file %q: %v", userLog, err)
- }
- if want := "Unsupported syscall sched_rr_get_interval("; !strings.Contains(string(out), want) {
- t.Errorf("user log file doesn't contain %q, out: %s", want, string(out))
- }
-}
-
-func TestWaitOnExitedSandbox(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- // Run a shell that sleeps for 1 second and then exits with a
- // non-zero code.
- const wantExit = 17
- cmd := fmt.Sprintf("sleep 1; exit %d", wantExit)
- spec := testutil.NewSpecWithArgs("/bin/sh", "-c", cmd)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and Start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Wait on the sandbox. This will make an RPC to the sandbox
- // and get the actual exit status of the application.
- ws, err := c.Wait()
- if err != nil {
- t.Fatalf("error waiting on container: %v", err)
- }
- if got := ws.ExitStatus(); got != wantExit {
- t.Errorf("got exit status %d, want %d", got, wantExit)
- }
-
- // Now the sandbox has exited, but the zombie sandbox process
- // still exists. Calling Wait() now will return the sandbox
- // exit status.
- ws, err = c.Wait()
- if err != nil {
- t.Fatalf("error waiting on container: %v", err)
- }
- if got := ws.ExitStatus(); got != wantExit {
- t.Errorf("got exit status %d, want %d", got, wantExit)
- }
- })
- }
-}
-
-func TestDestroyNotStarted(t *testing.T) {
- doDestroyNotStartedTest(t, false)
-}
-
-func TestDestroyNotStartedVFS2(t *testing.T) {
- doDestroyNotStartedTest(t, true)
-}
-
-func doDestroyNotStartedTest(t *testing.T, vfs2 bool) {
- spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
- conf := testutil.TestConfig(t)
- conf.VFS2 = vfs2
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create the container and check that it can be destroyed.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- if err := c.Destroy(); err != nil {
- t.Fatalf("deleting non-started container failed: %v", err)
- }
-}
-
-// TestDestroyStarting attempts to force a race between start and destroy.
-func TestDestroyStarting(t *testing.T) {
- doDestroyStartingTest(t, false)
-}
-
-func TestDestroyStartedVFS2(t *testing.T) {
- doDestroyStartingTest(t, true)
-}
-
-func doDestroyStartingTest(t *testing.T, vfs2 bool) {
- for i := 0; i < 10; i++ {
- spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
- conf := testutil.TestConfig(t)
- conf.VFS2 = vfs2
- rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create the container and check that it can be destroyed.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
-
- // Container is not thread safe, so load another instance to run in
- // concurrently.
- startCont, err := Load(rootDir, FullID{ContainerID: args.ID}, LoadOpts{})
- if err != nil {
- t.Fatalf("error loading container: %v", err)
- }
- wg := sync.WaitGroup{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- // Ignore failures, start can fail if destroy runs first.
- _ = startCont.Start(conf)
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := c.Destroy(); err != nil {
- t.Errorf("deleting non-started container failed: %v", err)
- }
- }()
- wg.Wait()
- }
-}
-
-func TestCreateWorkingDir(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- dir := path.Join(tmpDir, "new/working/dir")
-
- // touch will fail if the directory doesn't exist.
- spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
- spec.Process.Cwd = dir
- spec.Root.Readonly = true
-
- if err := run(spec, conf); err != nil {
- t.Fatalf("Error running container: %v", err)
- }
- })
- }
-}
-
-// TestMountPropagation verifies that mount propagates to slave but not to
-// private mounts.
-func TestMountPropagation(t *testing.T) {
- // Setup dir structure:
- // - src: is mounted as shared and is used as source for both private and
- // slave mounts
- // - dir: will be bind mounted inside src and should propagate to slave
- tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "mount")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- src := filepath.Join(tmpDir, "src")
- srcMnt := filepath.Join(src, "mnt")
- dir := filepath.Join(tmpDir, "dir")
- for _, path := range []string{src, srcMnt, dir} {
- if err := os.MkdirAll(path, 0777); err != nil {
- t.Fatalf("MkdirAll(%q): %v", path, err)
- }
- }
- dirFile := filepath.Join(dir, "file")
- f, err := os.Create(dirFile)
- if err != nil {
- t.Fatalf("os.Create(%q): %v", dirFile, err)
- }
- f.Close()
-
- // Setup src as a shared mount.
- if err := unix.Mount(src, src, "bind", unix.MS_BIND, ""); err != nil {
- t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
- }
- if err := unix.Mount("", src, "", unix.MS_SHARED, ""); err != nil {
- t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err)
- }
-
- spec := testutil.NewSpecWithArgs("sleep", "1000")
-
- priv := filepath.Join(tmpDir, "priv")
- slave := filepath.Join(tmpDir, "slave")
- spec.Mounts = []specs.Mount{
- {
- Source: src,
- Destination: priv,
- Type: "bind",
- Options: []string{"private"},
- },
- {
- Source: src,
- Destination: slave,
- Type: "bind",
- Options: []string{"slave"},
- },
- }
-
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("creating container: %v", err)
- }
- defer cont.Destroy()
-
- if err := cont.Start(conf); err != nil {
- t.Fatalf("starting container: %v", err)
- }
-
- // After the container is started, mount dir inside source and check what
- // happens to both destinations.
- if err := unix.Mount(dir, srcMnt, "bind", unix.MS_BIND, ""); err != nil {
- t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
- }
-
- // Check that mount didn't propagate to private mount.
- privFile := filepath.Join(priv, "mnt", "file")
- if ws, err := execute(conf, cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 {
- t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err)
- }
-
- // Check that mount propagated to slave mount.
- slaveFile := filepath.Join(slave, "mnt", "file")
- if ws, err := execute(conf, cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 {
- t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err)
- }
-}
-
-func TestMountSymlink(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dir)
-
- source := path.Join(dir, "source")
- target := path.Join(dir, "target")
- for _, path := range []string{source, target} {
- if err := os.MkdirAll(path, 0777); err != nil {
- t.Fatalf("os.MkdirAll(): %v", err)
- }
- }
- f, err := os.Create(path.Join(source, "file"))
- if err != nil {
- t.Fatalf("os.Create(): %v", err)
- }
- f.Close()
-
- link := path.Join(dir, "link")
- if err := os.Symlink(target, link); err != nil {
- t.Fatalf("os.Symlink(%q, %q): %v", target, link, err)
- }
-
- spec := testutil.NewSpecWithArgs("/bin/sleep", "1000")
-
- // Mount to a symlink to ensure the mount code will follow it and mount
- // at the symlink target.
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Type: "bind",
- Destination: link,
- Source: source,
- })
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("creating container: %v", err)
- }
- defer cont.Destroy()
-
- if err := cont.Start(conf); err != nil {
- t.Fatalf("starting container: %v", err)
- }
-
- // Check that symlink was resolved and mount was created where the symlink
- // is pointing to.
- file := path.Join(target, "file")
- if ws, err := execute(conf, cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 {
- t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err)
- }
- })
- }
-}
-
-// Check that --net-raw disables the CAP_NET_RAW capability.
-func TestNetRaw(t *testing.T) {
- capNetRaw := strconv.FormatUint(bits.MaskOf64(int(linux.CAP_NET_RAW)), 10)
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- for _, enableRaw := range []bool{true, false} {
- conf := testutil.TestConfig(t)
- conf.EnableRaw = enableRaw
-
- test := "--enabled"
- if !enableRaw {
- test = "--disabled"
- }
-
- spec := testutil.NewSpecWithArgs(app, "capability", test, capNetRaw)
- if err := run(spec, conf); err != nil {
- t.Fatalf("Error running container: %v", err)
- }
- }
-}
-
-// TestTTYField checks TTY field returned by container.Processes().
-func TestTTYField(t *testing.T) {
- stop := testutil.StartReaper()
- defer stop()
-
- testApp, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- testCases := []struct {
- name string
- useTTY bool
- wantTTYField string
- }{
- {
- name: "no tty",
- useTTY: false,
- wantTTYField: "?",
- },
- {
- name: "tty used",
- useTTY: true,
- wantTTYField: "pts/0",
- },
- }
-
- for _, test := range testCases {
- for _, vfs2 := range []bool{false, true} {
- name := test.name
- if vfs2 {
- name += "-vfs2"
- }
- t.Run(name, func(t *testing.T) {
- conf := testutil.TestConfig(t)
- conf.VFS2 = vfs2
-
- // We will run /bin/sleep, possibly with an open TTY.
- cmd := []string{"/bin/sleep", "10000"}
- if test.useTTY {
- // Run inside the "pty-runner".
- cmd = append([]string{testApp, "pty-runner"}, cmd...)
- }
-
- spec := testutil.NewSpecWithArgs(cmd...)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Wait for sleep to be running, and check the TTY
- // field.
- var gotTTYField string
- cb := func() error {
- ps, err := c.Processes()
- if err != nil {
- err = fmt.Errorf("error getting process data from container: %v", err)
- return &backoff.PermanentError{Err: err}
- }
- for _, p := range ps {
- if strings.Contains(p.Cmd, "sleep") {
- gotTTYField = p.TTY
- return nil
- }
- }
- return fmt.Errorf("sleep not running")
- }
- if err := testutil.Poll(cb, 30*time.Second); err != nil {
- t.Fatalf("error waiting for sleep process: %v", err)
- }
-
- if gotTTYField != test.wantTTYField {
- t.Errorf("tty field got %q, want %q", gotTTYField, test.wantTTYField)
- }
- })
- }
- }
-}
-
-// Test that container can run even when there are corrupt state files in the
-// root directiry.
-func TestCreateWithCorruptedStateFile(t *testing.T) {
- conf := testutil.TestConfig(t)
- spec := testutil.NewSpecWithArgs("/bin/true")
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create corrupted state file.
- corruptID := testutil.RandomContainerID()
- corruptState := buildPath(conf.RootDir, FullID{SandboxID: corruptID, ContainerID: corruptID}, stateFileExtension)
- if err := ioutil.WriteFile(corruptState, []byte("this{file(is;not[valid.json"), 0777); err != nil {
- t.Fatalf("createCorruptStateFile(): %v", err)
- }
- defer os.Remove(corruptState)
-
- if _, err := Load(conf.RootDir, FullID{ContainerID: corruptID}, LoadOpts{SkipCheck: true}); err == nil {
- t.Fatalf("loading corrupted state file should have failed")
- }
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- Attached: true,
- }
- if ws, err := Run(conf, args); err != nil {
- t.Errorf("running container: %v", err)
- } else if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Errorf("container failed, waitStatus: %v", ws)
- }
-}
-
-func TestBindMountByOption(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "bind-mount")
- spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
- if err != nil {
- t.Fatalf("ioutil.TempDir(): %v", err)
- }
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: dir,
- Source: dir,
- Type: "none",
- Options: []string{"rw", "bind"},
- })
- if err := run(spec, conf); err != nil {
- t.Fatalf("error running sandbox: %v", err)
- }
- })
- }
-}
-
-// TestRlimits sets limit to number of open files and checks that the limit
-// is propagated to the container.
-func TestRlimits(t *testing.T) {
- file, err := ioutil.TempFile(testutil.TmpDir(), "ulimit")
- if err != nil {
- t.Fatal(err)
- }
- cmd := fmt.Sprintf("ulimit -n > %q", file.Name())
-
- spec := testutil.NewSpecWithArgs("sh", "-c", cmd)
- spec.Process.Rlimits = []specs.POSIXRlimit{
- {Type: "RLIMIT_NOFILE", Hard: 1000, Soft: 100},
- }
-
- conf := testutil.TestConfig(t)
- if err := run(spec, conf); err != nil {
- t.Fatalf("Error running container: %v", err)
- }
- got, err := ioutil.ReadFile(file.Name())
- if err != nil {
- t.Fatal(err)
- }
- if want := "100\n"; string(got) != want {
- t.Errorf("ulimit result, got: %q, want: %q", got, want)
- }
-}
-
-// TestRlimitsExec sets limit to number of open files and checks that the limit
-// is propagated to exec'd processes.
-func TestRlimitsExec(t *testing.T) {
- spec := testutil.NewSpecWithArgs("sleep", "100")
- spec.Process.Rlimits = []specs.POSIXRlimit{
- {Type: "RLIMIT_NOFILE", Hard: 1000, Soft: 100},
- }
-
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer cont.Destroy()
- if err := cont.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- got, err := executeCombinedOutput(conf, cont, "/bin/sh", "-c", "ulimit -n")
- if err != nil {
- t.Fatal(err)
- }
- if want := "100\n"; string(got) != want {
- t.Errorf("ulimit result, got: %q, want: %q", got, want)
- }
-}
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
deleted file mode 100644
index 9d8022e50..000000000
--- a/runsc/container/multi_container_test.go
+++ /dev/null
@@ -1,2089 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package container
-
-import (
- "fmt"
- "io/ioutil"
- "math"
- "os"
- "path"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff"
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/boot"
- "gvisor.dev/gvisor/runsc/config"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {
- var specs []*specs.Spec
- var ids []string
- rootID := testutil.RandomContainerID()
-
- for i, cmd := range cmds {
- spec := testutil.NewSpecWithArgs(cmd...)
- if i == 0 {
- spec.Annotations = map[string]string{
- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
- }
- ids = append(ids, rootID)
- } else {
- spec.Annotations = map[string]string{
- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
- specutils.ContainerdSandboxIDAnnotation: rootID,
- }
- ids = append(ids, testutil.RandomContainerID())
- }
- specs = append(specs, spec)
- }
- return specs, ids
-}
-
-func startContainers(conf *config.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {
- if len(conf.RootDir) == 0 {
- panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.")
- }
-
- cu := cleanup.Cleanup{}
- defer cu.Clean()
-
- var containers []*Container
- for i, spec := range specs {
- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
- if err != nil {
- return nil, nil, fmt.Errorf("error setting up container: %v", err)
- }
- cu.Add(cleanup)
-
- args := Args{
- ID: ids[i],
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- return nil, nil, fmt.Errorf("error creating container: %v", err)
- }
- cu.Add(func() { cont.Destroy() })
- containers = append(containers, cont)
-
- if err := cont.Start(conf); err != nil {
- return nil, nil, fmt.Errorf("error starting container: %v", err)
- }
- }
-
- return containers, cu.Release(), nil
-}
-
-type execDesc struct {
- c *Container
- cmd []string
- want int
- name string
-}
-
-func execMany(t *testing.T, conf *config.Config, execs []execDesc) {
- for _, exec := range execs {
- t.Run(exec.name, func(t *testing.T) {
- args := &control.ExecArgs{Argv: exec.cmd}
- if ws, err := exec.c.executeSync(conf, args); err != nil {
- t.Errorf("error executing %+v: %v", args, err)
- } else if ws.ExitStatus() != exec.want {
- t.Errorf("%q: exec %q got exit status: %d, want: %d", exec.name, exec.cmd, ws.ExitStatus(), exec.want)
- }
- })
- }
-}
-
-func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {
- for _, spec := range pod {
- spec.Annotations[boot.MountPrefix+name+".source"] = mount.Source
- spec.Annotations[boot.MountPrefix+name+".type"] = mount.Type
- spec.Annotations[boot.MountPrefix+name+".share"] = "pod"
- if len(mount.Options) > 0 {
- spec.Annotations[boot.MountPrefix+name+".options"] = strings.Join(mount.Options, ",")
- }
- }
-}
-
-// TestMultiContainerSanity checks that it is possible to run 2 dead-simple
-// containers in the same sandbox.
-func TestMultiContainerSanity(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- specs, ids := createSpecs(sleep, sleep)
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check via ps that multiple processes are running.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- expectedPL = []*control.Process{
- newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- })
- }
-}
-
-// TestMultiPIDNS checks that it is possible to run 2 dead-simple containers in
-// the same sandbox with different pidns.
-func TestMultiPIDNS(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- testSpecs, ids := createSpecs(sleep, sleep)
- testSpecs[1].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {
- Type: "pid",
- },
- },
- }
-
- containers, cleanup, err := startContainers(conf, testSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check via ps that multiple processes are running.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- expectedPL = []*control.Process{
- newProcessBuilder().PID(2).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Root container runs in the root PID namespace and can see all
- // processes.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- newProcessBuilder().PID(2).Cmd("sleep").Process(),
- newProcessBuilder().Cmd("ps").Process(),
- }
- got, err := execPS(conf, containers[0])
- if err != nil {
- t.Fatal(err)
- }
- if !procListsEqual(got, expectedPL) {
- t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
- }
-
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- newProcessBuilder().Cmd("ps").Process(),
- }
- got, err = execPS(conf, containers[1])
- if err != nil {
- t.Fatal(err)
- }
- if !procListsEqual(got, expectedPL) {
- t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
- }
- })
- }
-}
-
-// TestMultiPIDNSPath checks the pidns path.
-func TestMultiPIDNSPath(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- testSpecs, ids := createSpecs(sleep, sleep, sleep)
- testSpecs[0].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {
- Type: "pid",
- Path: "/proc/1/ns/pid",
- },
- },
- }
- testSpecs[1].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {
- Type: "pid",
- Path: "/proc/1/ns/pid",
- },
- },
- }
- testSpecs[2].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {
- Type: "pid",
- Path: "/proc/2/ns/pid",
- },
- },
- }
-
- containers, cleanup, err := startContainers(conf, testSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check via ps that multiple processes are running.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- expectedPL = []*control.Process{
- newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- expectedPL = []*control.Process{
- newProcessBuilder().PID(3).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[2], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Root container runs in the root PID namespace and can see all
- // processes.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- newProcessBuilder().PID(2).Cmd("sleep").Process(),
- newProcessBuilder().PID(3).Cmd("sleep").Process(),
- newProcessBuilder().Cmd("ps").Process(),
- }
- got, err := execPS(conf, containers[0])
- if err != nil {
- t.Fatal(err)
- }
- if !procListsEqual(got, expectedPL) {
- t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
- }
-
- // Container 1 runs in the same PID namespace as the root container.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- newProcessBuilder().PID(2).Cmd("sleep").Process(),
- newProcessBuilder().PID(3).Cmd("sleep").Process(),
- newProcessBuilder().Cmd("ps").Process(),
- }
- got, err = execPS(conf, containers[1])
- if err != nil {
- t.Fatal(err)
- }
- if !procListsEqual(got, expectedPL) {
- t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
- }
-
- // Container 2 runs on its own namespace.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- newProcessBuilder().Cmd("ps").Process(),
- }
- got, err = execPS(conf, containers[2])
- if err != nil {
- t.Fatal(err)
- }
- if !procListsEqual(got, expectedPL) {
- t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL))
- }
- })
- }
-}
-
-// TestMultiPIDNSKill kills processes using PID when containers are using
-// different PID namespaces to ensure PID is taken from the root namespace.
-func TestMultiPIDNSKill(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- cmd := []string{app, "task-tree", "--depth=1", "--width=2", "--pause=true"}
- const processes = 3
- testSpecs, ids := createSpecs(cmd, cmd)
-
- testSpecs[1].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{
- {
- Type: "pid",
- },
- },
- }
-
- containers, cleanup, err := startContainers(conf, testSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Wait until all processes are created.
- for _, c := range containers {
- if err := waitForProcessCount(c, processes); err != nil {
- t.Fatalf("error waitting for processes: %v", err)
- }
- }
-
- for i, c := range containers {
- // First, kill a process that belongs to the container.
- procs, err := c.Processes()
- if err != nil {
- t.Fatalf("container.Processes(): %v", err)
- }
- t.Logf("Container %q procs: %s", c.ID, procListToString(procs))
- pidToKill := procs[processes-1].PID
- t.Logf("PID to kill: %d", pidToKill)
- if err := c.SignalProcess(unix.SIGKILL, int32(pidToKill)); err != nil {
- t.Errorf("container.SignalProcess: %v", err)
- }
- // Wait for the process to get killed.
- if err := waitForProcessCount(c, processes-1); err != nil {
- t.Fatalf("error waitting for processes: %v", err)
- }
- procs, err = c.Processes()
- if err != nil {
- t.Fatalf("container.Processes(): %v", err)
- }
- t.Logf("Container %q procs after kill: %s", c.ID, procListToString(procs))
- for _, proc := range procs {
- if proc.PID == pidToKill {
- t.Errorf("process %d not killed: %+v", pidToKill, proc)
- }
- }
-
- // Next, attempt to kill a process from another container and check that
- // it fails.
- other := containers[(i+1)%len(containers)]
- procs, err = other.Processes()
- if err != nil {
- t.Fatalf("container.Processes(): %v", err)
- }
- t.Logf("Other container %q procs: %s", other.ID, procListToString(procs))
-
- pidToKill = procs[len(procs)-1].PID
- t.Logf("PID that should not be killed: %d", pidToKill)
- err = c.SignalProcess(unix.SIGKILL, int32(pidToKill))
- if err == nil {
- t.Fatalf("killing another container's process should fail")
- }
- if !strings.Contains(err.Error(), "belongs to a different container") {
- t.Errorf("wrong error message from killing another container's: %v", err)
- }
- }
- })
- }
-}
-
-func TestMultiContainerWait(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // The first container should run the entire duration of the test.
- cmd1 := []string{"sleep", "100"}
- // We'll wait on the second container, which is much shorter lived.
- cmd2 := []string{"sleep", "1"}
- specs, ids := createSpecs(cmd1, cmd2)
-
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check that we can wait for the sub-container.
- c := containers[1]
- if ws, err := c.Wait(); err != nil {
- t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
- }
- if _, err := c.Wait(); err != nil {
- t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err)
- }
-
- // After Wait returns, ensure that the root container is running and
- // the child has finished.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").PID(1).Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err)
- }
-}
-
-// TestExecWait ensures what we can wait on containers and individual processes
-// in the sandbox that have already exited.
-func TestExecWait(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // The first container should run the entire duration of the test.
- cmd1 := []string{"sleep", "100"}
- // We'll wait on the second container, which is much shorter lived.
- cmd2 := []string{"sleep", "1"}
- specs, ids := createSpecs(cmd1, cmd2)
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check via ps that process is running.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Fatalf("failed to wait for sleep to start: %v", err)
- }
-
- // Wait for the second container to finish.
- if err := waitForProcessCount(containers[1], 0); err != nil {
- t.Fatalf("failed to wait for second container to stop: %v", err)
- }
-
- // Get the second container exit status.
- if ws, err := containers[1].Wait(); err != nil {
- t.Fatalf("failed to wait for process %s: %v", containers[1].Spec.Process.Args, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Fatalf("process %s exited with non-zero status %d", containers[1].Spec.Process.Args, es)
- }
- if _, err := containers[1].Wait(); err != nil {
- t.Fatalf("wait for stopped container %s shouldn't fail: %v", containers[1].Spec.Process.Args, err)
- }
-
- // Execute another process in the first container.
- args := &control.ExecArgs{
- Filename: "/bin/sleep",
- Argv: []string{"/bin/sleep", "1"},
- WorkingDirectory: "/",
- KUID: 0,
- }
- pid, err := containers[0].Execute(conf, args)
- if err != nil {
- t.Fatalf("error executing: %v", err)
- }
-
- // Wait for the exec'd process to exit.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Fatalf("failed to wait for second container to stop: %v", err)
- }
-
- // Get the exit status from the exec'd process.
- if ws, err := containers[0].WaitPID(pid); err != nil {
- t.Fatalf("failed to wait for process %+v with pid %d: %v", args, pid, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Fatalf("process %+v exited with non-zero status %d", args, es)
- }
- if _, err := containers[0].WaitPID(pid); err == nil {
- t.Fatalf("wait for stopped process %+v should fail", args)
- }
-}
-
-// TestMultiContainerMount tests that bind mounts can be used with multiple
-// containers.
-func TestMultiContainerMount(t *testing.T) {
- cmd1 := []string{"sleep", "100"}
-
- // 'src != dst' ensures that 'dst' doesn't exist in the host and must be
- // properly mapped inside the container to work.
- src, err := ioutil.TempDir(testutil.TmpDir(), "container")
- if err != nil {
- t.Fatal("ioutil.TempDir failed:", err)
- }
- dst := src + ".dst"
- cmd2 := []string{"touch", filepath.Join(dst, "file")}
-
- sps, ids := createSpecs(cmd1, cmd2)
- sps[1].Mounts = append(sps[1].Mounts, specs.Mount{
- Source: src,
- Destination: dst,
- Type: "bind",
- })
-
- // Setup the containers.
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- containers, cleanup, err := startContainers(conf, sps, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- ws, err := containers[1].Wait()
- if err != nil {
- t.Error("error waiting on container:", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Error("container failed, waitStatus:", ws)
- }
-}
-
-// TestMultiContainerSignal checks that it is possible to signal individual
-// containers without killing the entire sandbox.
-func TestMultiContainerSignal(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- specs, ids := createSpecs(sleep, sleep)
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check via ps that container 1 process is running.
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Kill process 2.
- if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
- t.Errorf("failed to kill process 2: %v", err)
- }
-
- // Make sure process 1 is still running.
- expectedPL = []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // goferPid is reset when container is destroyed.
- goferPid := containers[1].GoferPid
-
- // Destroy container and ensure container's gofer process has exited.
- if err := containers[1].Destroy(); err != nil {
- t.Errorf("failed to destroy container: %v", err)
- }
- _, _, err = specutils.RetryEintr(func() (uintptr, uintptr, error) {
- cpid, err := unix.Wait4(goferPid, nil, 0, nil)
- return uintptr(cpid), 0, err
- })
- if err != unix.ECHILD {
- t.Errorf("error waiting for gofer to exit: %v", err)
- }
- // Make sure process 1 is still running.
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Now that process 2 is gone, ensure we get an error trying to
- // signal it again.
- if err := containers[1].SignalContainer(unix.SIGKILL, false); err == nil {
- t.Errorf("container %q shouldn't exist, but we were able to signal it", containers[1].ID)
- }
-
- // Kill process 1.
- if err := containers[0].SignalContainer(unix.SIGKILL, false); err != nil {
- t.Errorf("failed to kill process 1: %v", err)
- }
-
- // Ensure that container's gofer and sandbox process are no more.
- err = blockUntilWaitable(containers[0].GoferPid)
- if err != nil && err != unix.ECHILD {
- t.Errorf("error waiting for gofer to exit: %v", err)
- }
-
- err = blockUntilWaitable(containers[0].Sandbox.Pid)
- if err != nil && err != unix.ECHILD {
- t.Errorf("error waiting for sandbox to exit: %v", err)
- }
-
- // The sentry should be gone, so signaling should yield an error.
- if err := containers[0].SignalContainer(unix.SIGKILL, false); err == nil {
- t.Errorf("sandbox %q shouldn't exist, but we were able to signal it", containers[0].Sandbox.ID)
- }
-
- if err := containers[0].Destroy(); err != nil {
- t.Errorf("failed to destroy container: %v", err)
- }
- })
- }
-}
-
-// TestMultiContainerDestroy checks that container are properly cleaned-up when
-// they are destroyed.
-func TestMultiContainerDestroy(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // First container will remain intact while the second container is killed.
- podSpecs, ids := createSpecs(
- []string{"sleep", "100"},
- []string{app, "fork-bomb"})
-
- // Run the fork bomb in a PID namespace to prevent processes to be
- // re-parented to PID=1 in the root container.
- podSpecs[1].Linux = &specs.Linux{
- Namespaces: []specs.LinuxNamespace{{Type: "pid"}},
- }
- containers, cleanup, err := startContainers(conf, podSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Exec more processes to ensure signal all works for exec'd processes too.
- args := &control.ExecArgs{
- Filename: app,
- Argv: []string{app, "fork-bomb"},
- }
- if _, err := containers[1].Execute(conf, args); err != nil {
- t.Fatalf("error exec'ing: %v", err)
- }
-
- // Let it brew...
- time.Sleep(500 * time.Millisecond)
-
- if err := containers[1].Destroy(); err != nil {
- t.Fatalf("error destroying container: %v", err)
- }
-
- // Check that destroy killed all processes belonging to the container and
- // waited for them to exit before returning.
- pss, err := containers[0].Sandbox.Processes("")
- if err != nil {
- t.Fatalf("error getting process data from sandbox: %v", err)
- }
- expectedPL := []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- }
- if !procListsEqual(pss, expectedPL) {
- t.Errorf("container got process list: %s, want: %s: error: %v",
- procListToString(pss), procListToString(expectedPL), err)
- }
-
- // Check that cont.Destroy is safe to call multiple times.
- if err := containers[1].Destroy(); err != nil {
- t.Errorf("error destroying container: %v", err)
- }
- })
- }
-}
-
-func TestMultiContainerProcesses(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // Note: use curly braces to keep 'sh' process around. Otherwise, shell
- // will just execve into 'sleep' and both containers will look the
- // same.
- specs, ids := createSpecs(
- []string{"sleep", "100"},
- []string{"sh", "-c", "{ sleep 100; }"})
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Check root's container process list doesn't include other containers.
- expectedPL0 := []*control.Process{
- newProcessBuilder().PID(1).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL0); err != nil {
- t.Errorf("failed to wait for process to start: %v", err)
- }
-
- // Same for the other container.
- expectedPL1 := []*control.Process{
- newProcessBuilder().PID(2).Cmd("sh").Process(),
- newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL1); err != nil {
- t.Errorf("failed to wait for process to start: %v", err)
- }
-
- // Now exec into the second container and verify it shows up in the container.
- args := &control.ExecArgs{
- Filename: "/bin/sleep",
- Argv: []string{"/bin/sleep", "100"},
- }
- if _, err := containers[1].Execute(conf, args); err != nil {
- t.Fatalf("error exec'ing: %v", err)
- }
- expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd("sleep").Process())
- if err := waitForProcessList(containers[1], expectedPL1); err != nil {
- t.Errorf("failed to wait for process to start: %v", err)
- }
- // Root container should remain unchanged.
- if err := waitForProcessList(containers[0], expectedPL0); err != nil {
- t.Errorf("failed to wait for process to start: %v", err)
- }
-}
-
-// TestMultiContainerKillAll checks that all process that belong to a container
-// are killed when SIGKILL is sent to *all* processes in that container.
-func TestMultiContainerKillAll(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- for _, tc := range []struct {
- killContainer bool
- }{
- {killContainer: true},
- {killContainer: false},
- } {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- // First container will remain intact while the second container is killed.
- specs, ids := createSpecs(
- []string{app, "task-tree", "--depth=2", "--width=2"},
- []string{app, "task-tree", "--depth=4", "--width=2"})
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Wait until all processes are created.
- rootProcCount := int(math.Pow(2, 3) - 1)
- if err := waitForProcessCount(containers[0], rootProcCount); err != nil {
- t.Fatalf("error waitting for processes: %v", err)
- }
- procCount := int(math.Pow(2, 5) - 1)
- if err := waitForProcessCount(containers[1], procCount); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- // Exec more processes to ensure signal works for exec'd processes too.
- args := &control.ExecArgs{
- Filename: app,
- Argv: []string{app, "task-tree", "--depth=2", "--width=2"},
- }
- if _, err := containers[1].Execute(conf, args); err != nil {
- t.Fatalf("error exec'ing: %v", err)
- }
- // Wait for these new processes to start.
- procCount += int(math.Pow(2, 3) - 1)
- if err := waitForProcessCount(containers[1], procCount); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
-
- if tc.killContainer {
- // First kill the init process to make the container be stopped with
- // processes still running inside.
- if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
- t.Fatalf("SignalContainer(): %v", err)
- }
- op := func() error {
- c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
- if err != nil {
- return err
- }
- if c.Status != Stopped {
- return fmt.Errorf("container is not stopped")
- }
- return nil
- }
- if err := testutil.Poll(op, 5*time.Second); err != nil {
- t.Fatalf("container did not stop %q: %v", containers[1].ID, err)
- }
- }
-
- c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
- if err != nil {
- t.Fatalf("failed to load child container %q: %v", ids[1], err)
- }
- // Kill'Em All
- if err := c.SignalContainer(unix.SIGKILL, true); err != nil {
- t.Fatalf("failed to send SIGKILL to container %q: %v", c.ID, err)
- }
-
- // Check that all processes are gone.
- if err := waitForProcessCount(containers[1], 0); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
- // Check that root container was not affected.
- if err := waitForProcessCount(containers[0], rootProcCount); err != nil {
- t.Fatalf("error waiting for processes: %v", err)
- }
- }
-}
-
-func TestMultiContainerDestroyNotStarted(t *testing.T) {
- specs, ids := createSpecs(
- []string{"/bin/sleep", "100"},
- []string{"/bin/sleep", "100"})
-
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- rootArgs := Args{
- ID: ids[0],
- Spec: specs[0],
- BundleDir: bundleDir,
- }
- root, err := New(conf, rootArgs)
- if err != nil {
- t.Fatalf("error creating root container: %v", err)
- }
- defer root.Destroy()
- if err := root.Start(conf); err != nil {
- t.Fatalf("error starting root container: %v", err)
- }
-
- // Create and destroy sub-container.
- bundleDir, cleanupSub, err := testutil.SetupBundleDir(specs[1])
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanupSub()
-
- args := Args{
- ID: ids[1],
- Spec: specs[1],
- BundleDir: bundleDir,
- }
- cont, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
-
- // Check that container can be destroyed.
- if err := cont.Destroy(); err != nil {
- t.Fatalf("deleting non-started container failed: %v", err)
- }
-}
-
-// TestMultiContainerDestroyStarting attempts to force a race between start
-// and destroy.
-func TestMultiContainerDestroyStarting(t *testing.T) {
- cmds := make([][]string, 10)
- for i := range cmds {
- cmds[i] = []string{"/bin/sleep", "100"}
- }
- specs, ids := createSpecs(cmds...)
-
- conf := testutil.TestConfig(t)
- rootDir, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- rootArgs := Args{
- ID: ids[0],
- Spec: specs[0],
- BundleDir: bundleDir,
- }
- root, err := New(conf, rootArgs)
- if err != nil {
- t.Fatalf("error creating root container: %v", err)
- }
- defer root.Destroy()
- if err := root.Start(conf); err != nil {
- t.Fatalf("error starting root container: %v", err)
- }
-
- wg := sync.WaitGroup{}
- for i := range cmds {
- if i == 0 {
- continue // skip root container
- }
-
- bundleDir, cleanup, err := testutil.SetupBundleDir(specs[i])
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- rootArgs := Args{
- ID: ids[i],
- Spec: specs[i],
- BundleDir: bundleDir,
- }
- cont, err := New(conf, rootArgs)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
-
- // Container is not thread safe, so load another instance to run in
- // concurrently.
- startCont, err := Load(rootDir, FullID{ContainerID: ids[i]}, LoadOpts{})
- if err != nil {
- t.Fatalf("error loading container: %v", err)
- }
- wg.Add(1)
- go func() {
- defer wg.Done()
- // Ignore failures, start can fail if destroy runs first.
- _ = startCont.Start(conf)
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := cont.Destroy(); err != nil {
- t.Errorf("deleting non-started container failed: %v", err)
- }
- }()
- }
- wg.Wait()
-}
-
-// TestMultiContainerDifferentFilesystems tests that different containers have
-// different root filesystems.
-func TestMultiContainerDifferentFilesystems(t *testing.T) {
- filename := "/foo"
- // Root container will create file and then sleep.
- cmdRoot := []string{"sh", "-c", fmt.Sprintf("touch %q && sleep 100", filename)}
-
- // Child containers will assert that the file does not exist, and will
- // then create it.
- script := fmt.Sprintf("if [ -f %q ]; then exit 1; else touch %q; fi", filename, filename)
- cmd := []string{"sh", "-c", script}
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // Make sure overlay is enabled, and none of the root filesystems are
- // read-only, otherwise we won't be able to create the file.
- conf.Overlay = true
- specs, ids := createSpecs(cmdRoot, cmd, cmd)
- for _, s := range specs {
- s.Root.Readonly = false
- }
-
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Both child containers should exit successfully.
- for i, c := range containers {
- if i == 0 {
- // Don't wait on the root.
- continue
- }
- if ws, err := c.Wait(); err != nil {
- t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
- }
- }
-}
-
-// TestMultiContainerContainerDestroyStress tests that IO operations continue
-// to work after containers have been stopped and gofers killed.
-func TestMultiContainerContainerDestroyStress(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- // Setup containers. Root container just reaps children, while the others
- // perform some IOs. Children are executed in 3 batches of 10. Within the
- // batch there is overlap between containers starting and being destroyed. In
- // between batches all containers stop before starting another batch.
- cmds := [][]string{{app, "reaper"}}
- const batchSize = 10
- for i := 0; i < 3*batchSize; i++ {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "gofer-stop-test")
- if err != nil {
- t.Fatal("ioutil.TempDir failed:", err)
- }
- defer os.RemoveAll(dir)
-
- cmd := "find /bin -type f | head | xargs -I SRC cp SRC " + dir
- cmds = append(cmds, []string{"sh", "-c", cmd})
- }
- allSpecs, allIDs := createSpecs(cmds...)
-
- // Split up the specs and IDs.
- rootSpec := allSpecs[0]
- rootID := allIDs[0]
- childrenSpecs := allSpecs[1:]
- childrenIDs := allIDs[1:]
-
- conf := testutil.TestConfig(t)
- _, bundleDir, cleanup, err := testutil.SetupContainer(rootSpec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Start root container.
- rootArgs := Args{
- ID: rootID,
- Spec: rootSpec,
- BundleDir: bundleDir,
- }
- root, err := New(conf, rootArgs)
- if err != nil {
- t.Fatalf("error creating root container: %v", err)
- }
- if err := root.Start(conf); err != nil {
- t.Fatalf("error starting root container: %v", err)
- }
- defer root.Destroy()
-
- // Run batches. Each batch starts containers in parallel, then wait and
- // destroy them before starting another batch.
- for i := 0; i < len(childrenSpecs); i += batchSize {
- t.Logf("Starting batch from %d to %d", i, i+batchSize)
- specs := childrenSpecs[i : i+batchSize]
- ids := childrenIDs[i : i+batchSize]
-
- var children []*Container
- for j, spec := range specs {
- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: ids[j],
- Spec: spec,
- BundleDir: bundleDir,
- }
- child, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- children = append(children, child)
-
- if err := child.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // Give a small gap between containers.
- time.Sleep(50 * time.Millisecond)
- }
- for _, child := range children {
- ws, err := child.Wait()
- if err != nil {
- t.Fatalf("waiting for container: %v", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Fatalf("container failed, waitStatus: %x (%d)", ws, ws.ExitStatus())
- }
- if err := child.Destroy(); err != nil {
- t.Fatalf("error destroying container: %v", err)
- }
- }
- }
-}
-
-// Test that pod shared mounts are properly mounted in 2 containers and that
-// changes from one container is reflected in the other.
-func TestMultiContainerSharedMount(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- podSpec, ids := createSpecs(sleep, sleep)
- mnt0 := specs.Mount{
- Destination: "/mydir/test",
- Source: "/some/dir",
- Type: "tmpfs",
- Options: nil,
- }
- podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
-
- mnt1 := mnt0
- mnt1.Destination = "/mydir2/test2"
- podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
-
- createSharedMount(mnt0, "test-mount", podSpec...)
-
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- file0 := path.Join(mnt0.Destination, "abc")
- file1 := path.Join(mnt1.Destination, "abc")
- execs := []execDesc{
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- name: "directory is mounted in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- name: "directory is mounted in container1",
- },
- {
- c: containers[0],
- cmd: []string{"/bin/touch", file0},
- name: "create file in container0",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-f", file0},
- name: "file appears in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-f", file1},
- name: "file appears in container1",
- },
- {
- c: containers[1],
- cmd: []string{"/bin/rm", file1},
- name: "remove file from container1",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "!", "-f", file0},
- name: "file removed from container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "!", "-f", file1},
- name: "file removed from container1",
- },
- {
- c: containers[1],
- cmd: []string{"/bin/mkdir", file1},
- name: "create directory in container1",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-d", file0},
- name: "dir appears in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-d", file1},
- name: "dir appears in container1",
- },
- {
- c: containers[0],
- cmd: []string{"/bin/rmdir", file0},
- name: "remove directory from container0",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "!", "-d", file0},
- name: "dir removed from container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "!", "-d", file1},
- name: "dir removed from container1",
- },
- }
- execMany(t, conf, execs)
- })
- }
-}
-
-// Test that pod mounts are mounted as readonly when requested.
-func TestMultiContainerSharedMountReadonly(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- podSpec, ids := createSpecs(sleep, sleep)
- mnt0 := specs.Mount{
- Destination: "/mydir/test",
- Source: "/some/dir",
- Type: "tmpfs",
- Options: []string{"ro"},
- }
- podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
-
- mnt1 := mnt0
- mnt1.Destination = "/mydir2/test2"
- podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
-
- createSharedMount(mnt0, "test-mount", podSpec...)
-
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- file0 := path.Join(mnt0.Destination, "abc")
- file1 := path.Join(mnt1.Destination, "abc")
- execs := []execDesc{
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- name: "directory is mounted in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- name: "directory is mounted in container1",
- },
- {
- c: containers[0],
- cmd: []string{"/bin/touch", file0},
- want: 1,
- name: "fails to write to container0",
- },
- {
- c: containers[1],
- cmd: []string{"/bin/touch", file1},
- want: 1,
- name: "fails to write to container1",
- },
- }
- execMany(t, conf, execs)
- })
- }
-}
-
-// Test that shared pod mounts continue to work after container is restarted.
-func TestMultiContainerSharedMountRestart(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"sleep", "100"}
- podSpec, ids := createSpecs(sleep, sleep)
- mnt0 := specs.Mount{
- Destination: "/mydir/test",
- Source: "/some/dir",
- Type: "tmpfs",
- Options: nil,
- }
- podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
-
- mnt1 := mnt0
- mnt1.Destination = "/mydir2/test2"
- podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
-
- createSharedMount(mnt0, "test-mount", podSpec...)
-
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- file0 := path.Join(mnt0.Destination, "abc")
- file1 := path.Join(mnt1.Destination, "abc")
- execs := []execDesc{
- {
- c: containers[0],
- cmd: []string{"/bin/touch", file0},
- name: "create file in container0",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-f", file0},
- name: "file appears in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-f", file1},
- name: "file appears in container1",
- },
- }
- execMany(t, conf, execs)
-
- containers[1].Destroy()
-
- bundleDir, cleanup, err := testutil.SetupBundleDir(podSpec[1])
- if err != nil {
- t.Fatalf("error restarting container: %v", err)
- }
- defer cleanup()
-
- args := Args{
- ID: ids[1],
- Spec: podSpec[1],
- BundleDir: bundleDir,
- }
- containers[1], err = New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- if err := containers[1].Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- execs = []execDesc{
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-f", file0},
- name: "file is still in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-f", file1},
- name: "file is still in container1",
- },
- {
- c: containers[1],
- cmd: []string{"/bin/rm", file1},
- name: "file removed from container1",
- },
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "!", "-f", file0},
- name: "file removed from container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "!", "-f", file1},
- name: "file removed from container1",
- },
- }
- execMany(t, conf, execs)
- })
- }
-}
-
-// Test that unsupported pod mounts options are ignored when matching master and
-// replica mounts.
-func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {
- for name, conf := range configs(t, all...) {
- t.Run(name, func(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"/bin/sleep", "100"}
- podSpec, ids := createSpecs(sleep, sleep)
- mnt0 := specs.Mount{
- Destination: "/mydir/test",
- Source: "/some/dir",
- Type: "tmpfs",
- Options: []string{"rw", "relatime"},
- }
- podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
-
- mnt1 := mnt0
- mnt1.Destination = "/mydir2/test2"
- mnt1.Options = []string{"rw", "nosuid"}
- podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
-
- createSharedMount(mnt0, "test-mount", podSpec...)
-
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- execs := []execDesc{
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- name: "directory is mounted in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- name: "directory is mounted in container1",
- },
- }
- execMany(t, conf, execs)
- })
- }
-}
-
-// Test that one container can send an FD to another container, even though
-// they have distinct MountNamespaces.
-func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) {
- app, err := testutil.FindFile("test/cmd/test_app/test_app")
- if err != nil {
- t.Fatal("error finding test_app:", err)
- }
-
- // We set up two containers with one shared mount that is used for a
- // shared socket. The first container will send an FD over the socket
- // to the second container. The FD corresponds to a file in the first
- // container's mount namespace that is not part of the second
- // container's mount namespace. However, the second container still
- // should be able to read the FD.
-
- // Create a shared mount where we will put the socket.
- sharedMnt := specs.Mount{
- Destination: "/mydir/test",
- Type: "tmpfs",
- // Shared mounts need a Source, even for tmpfs. It is only used
- // to match up different shared mounts inside the pod.
- Source: "/some/dir",
- }
- socketPath := filepath.Join(sharedMnt.Destination, "socket")
-
- // Create a writeable tmpfs mount where the FD sender app will create
- // files to send. This will only be mounted in the FD sender.
- writeableMnt := specs.Mount{
- Destination: "/tmp",
- Type: "tmpfs",
- }
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // Create the specs.
- specs, ids := createSpecs(
- []string{"sleep", "1000"},
- []string{app, "fd_sender", "--socket", socketPath},
- []string{app, "fd_receiver", "--socket", socketPath},
- )
- createSharedMount(sharedMnt, "shared-mount", specs...)
- specs[1].Mounts = append(specs[2].Mounts, sharedMnt, writeableMnt)
- specs[2].Mounts = append(specs[1].Mounts, sharedMnt)
-
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Both containers should exit successfully.
- for _, c := range containers[1:] {
- if ws, err := c.Wait(); err != nil {
- t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
- }
- }
-}
-
-// Test that container is destroyed when Gofer is killed.
-func TestMultiContainerGoferKilled(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- sleep := []string{"sleep", "100"}
- specs, ids := createSpecs(sleep, sleep, sleep)
- containers, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Ensure container is running
- c := containers[2]
- expectedPL := []*control.Process{
- newProcessBuilder().PID(3).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(c, expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Kill container's gofer.
- if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
- t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
- }
-
- // Wait until container stops.
- if err := waitForProcessList(c, nil); err != nil {
- t.Errorf("Container %q was not stopped after gofer death: %v", c.ID, err)
- }
-
- // Check that container isn't running anymore.
- if _, err := execute(conf, c, "/bin/true"); err == nil {
- t.Fatalf("Container %q was not stopped after gofer death", c.ID)
- }
-
- // Check that other containers are unaffected.
- for i, c := range containers {
- if i == 2 {
- continue // container[2] has been killed.
- }
- pl := []*control.Process{
- newProcessBuilder().PID(kernel.ThreadID(i + 1)).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(c, pl); err != nil {
- t.Errorf("Container %q was affected by another container: %v", c.ID, err)
- }
- if _, err := execute(conf, c, "/bin/true"); err != nil {
- t.Fatalf("Container %q was affected by another container: %v", c.ID, err)
- }
- }
-
- // Kill root container's gofer to bring entire sandbox down.
- c = containers[0]
- if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
- t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
- }
-
- // Wait until sandbox stops. waitForProcessList will loop until sandbox exits
- // and RPC errors out.
- impossiblePL := []*control.Process{
- newProcessBuilder().Cmd("non-existent-process").Process(),
- }
- if err := waitForProcessList(c, impossiblePL); err == nil {
- t.Fatalf("Sandbox was not killed after gofer death")
- }
-
- // Check that entire sandbox isn't running anymore.
- for _, c := range containers {
- if _, err := execute(conf, c, "/bin/true"); err == nil {
- t.Fatalf("Container %q was not stopped after gofer death", c.ID)
- }
- }
-}
-
-func TestMultiContainerLoadSandbox(t *testing.T) {
- sleep := []string{"sleep", "100"}
- specs, ids := createSpecs(sleep, sleep, sleep)
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- // Create containers for the sandbox.
- wants, cleanup, err := startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Then create unrelated containers.
- for i := 0; i < 3; i++ {
- specs, ids = createSpecs(sleep, sleep, sleep)
- _, cleanup, err = startContainers(conf, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
- }
-
- // Create an unrelated directory under root.
- dir := filepath.Join(conf.RootDir, "not-a-container")
- if err := os.MkdirAll(dir, 0755); err != nil {
- t.Fatalf("os.MkdirAll(%q)=%v", dir, err)
- }
-
- // Create a valid but empty container directory.
- randomCID := testutil.RandomContainerID()
- dir = filepath.Join(conf.RootDir, randomCID)
- if err := os.MkdirAll(dir, 0755); err != nil {
- t.Fatalf("os.MkdirAll(%q)=%v", dir, err)
- }
-
- // Load the sandbox and check that the correct containers were returned.
- id := wants[0].Sandbox.ID
- gots, err := loadSandbox(conf.RootDir, id)
- if err != nil {
- t.Fatalf("loadSandbox()=%v", err)
- }
- wantIDs := make(map[string]struct{})
- for _, want := range wants {
- wantIDs[want.ID] = struct{}{}
- }
- for _, got := range gots {
- if got.Sandbox.ID != id {
- t.Errorf("wrong sandbox ID, got: %v, want: %v", got.Sandbox.ID, id)
- }
- if _, ok := wantIDs[got.ID]; !ok {
- t.Errorf("wrong container ID, got: %v, wants: %v", got.ID, wantIDs)
- }
- delete(wantIDs, got.ID)
- }
- if len(wantIDs) != 0 {
- t.Errorf("containers not found: %v", wantIDs)
- }
-}
-
-// TestMultiContainerRunNonRoot checks that child container can be configured
-// when running as non-privileged user.
-func TestMultiContainerRunNonRoot(t *testing.T) {
- cmdRoot := []string{"/bin/sleep", "100"}
- cmdSub := []string{"/bin/true"}
- podSpecs, ids := createSpecs(cmdRoot, cmdSub)
-
- // User running inside container can't list '$TMP/blocked' and would fail to
- // mount it.
- blocked, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- if err := os.Chmod(blocked, 0700); err != nil {
- t.Fatalf("os.MkDir(%q) failed: %v", blocked, err)
- }
- dir := path.Join(blocked, "test")
- if err := os.Mkdir(dir, 0755); err != nil {
- t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
- }
-
- src, err := ioutil.TempDir(testutil.TmpDir(), "src")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
-
- // Set a random user/group with no access to "blocked" dir.
- podSpecs[1].Process.User.UID = 343
- podSpecs[1].Process.User.GID = 2401
- podSpecs[1].Process.Capabilities = nil
-
- podSpecs[1].Mounts = append(podSpecs[1].Mounts, specs.Mount{
- Destination: dir,
- Source: src,
- Type: "bind",
- })
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- pod, cleanup, err := startContainers(conf, podSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Once all containers are started, wait for the child container to exit.
- // This means that the volume was mounted properly.
- ws, err := pod[1].Wait()
- if err != nil {
- t.Fatalf("running child container: %v", err)
- }
- if !ws.Exited() || ws.ExitStatus() != 0 {
- t.Fatalf("child container failed, waitStatus: %v", ws)
- }
-}
-
-// TestMultiContainerHomeEnvDir tests that the HOME environment variable is set
-// for root containers, sub-containers, and exec'ed processes.
-func TestMultiContainerHomeEnvDir(t *testing.T) {
- // NOTE: Don't use overlay since we need changes to persist to the temp dir
- // outside the sandbox.
- for testName, conf := range configs(t, noOverlay...) {
- t.Run(testName, func(t *testing.T) {
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Create temp files we can write the value of $HOME to.
- homeDirs := map[string]*os.File{}
- for _, name := range []string{"root", "sub", "exec"} {
- homeFile, err := ioutil.TempFile(testutil.TmpDir(), name)
- if err != nil {
- t.Fatalf("creating temp file: %v", err)
- }
- homeDirs[name] = homeFile
- }
-
- // We will sleep in the root container in order to ensure that the root
- //container doesn't terminate before sub containers can be created.
- rootCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s; sleep 1000`, homeDirs["root"].Name())}
- subCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["sub"].Name())}
- execCmd := fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["exec"].Name())
-
- // Setup the containers, a root container and sub container.
- specConfig, ids := createSpecs(rootCmd, subCmd)
- containers, cleanup, err := startContainers(conf, specConfig, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Exec into the root container synchronously.
- if _, err := execute(conf, containers[0], "/bin/sh", "-c", execCmd); err != nil {
- t.Errorf("error executing %+v: %v", execCmd, err)
- }
-
- // Wait for the subcontainer to finish.
- _, err = containers[1].Wait()
- if err != nil {
- t.Errorf("wait on child container: %v", err)
- }
-
- // Wait until after `env` has executed.
- expectedProc := newProcessBuilder().Cmd("sleep").Process()
- if err := waitForProcess(containers[0], expectedProc); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Check the written files.
- for name, tmpFile := range homeDirs {
- dirBytes, err := ioutil.ReadAll(tmpFile)
- if err != nil {
- t.Fatalf("reading %s temp file: %v", name, err)
- }
- got := string(dirBytes)
-
- want := "/"
- if got != want {
- t.Errorf("%s $HOME incorrect: got: %q, want: %q", name, got, want)
- }
- }
-
- })
- }
-}
-
-func TestMultiContainerEvent(t *testing.T) {
- conf := testutil.TestConfig(t)
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Setup the containers.
- sleep := []string{"/bin/sleep", "100"}
- busy := []string{"/bin/bash", "-c", "i=0 ; while true ; do (( i += 1 )) ; done"}
- quick := []string{"/bin/true"}
- podSpec, ids := createSpecs(sleep, busy, quick)
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- t.Logf("Running container sleep %s", containers[0].ID)
- t.Logf("Running container busy %s", containers[1].ID)
- t.Logf("Running container quick %s", containers[2].ID)
-
- // Wait for last container to stabilize the process count that is
- // checked further below.
- if ws, err := containers[2].Wait(); err != nil || ws != 0 {
- t.Fatalf("Container.Wait, status: %v, err: %v", ws, err)
- }
- expectedPL := []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[0], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- expectedPL = []*control.Process{
- newProcessBuilder().Cmd("bash").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for bash to start: %v", err)
- }
-
- // Check events for running containers.
- for _, cont := range containers[:2] {
- ret, err := cont.Event()
- if err != nil {
- t.Errorf("Container.Event(%q): %v", cont.ID, err)
- }
- evt := ret.Event
- if want := "stats"; evt.Type != want {
- t.Errorf("Wrong event type, cid: %q, want: %s, got: %s", cont.ID, want, evt.Type)
- }
- if cont.ID != evt.ID {
- t.Errorf("Wrong container ID, want: %s, got: %s", cont.ID, evt.ID)
- }
- // One process per remaining container.
- if got, want := evt.Data.Pids.Current, uint64(2); got != want {
- t.Errorf("Wrong number of PIDs, cid: %q, want: %d, got: %d", cont.ID, want, got)
- }
-
- // The exited container should always have a usage of zero.
- if exited := ret.ContainerUsage[containers[2].ID]; exited != 0 {
- t.Errorf("Exited container should report 0 CPU usage, got: %d", exited)
- }
- }
-
- // Check that CPU reported by busy container is higher than sleep.
- cb := func() error {
- sleepEvt, err := containers[0].Event()
- if err != nil {
- return &backoff.PermanentError{Err: err}
- }
- sleepUsage := sleepEvt.Event.Data.CPU.Usage.Total
-
- busyEvt, err := containers[1].Event()
- if err != nil {
- return &backoff.PermanentError{Err: err}
- }
- busyUsage := busyEvt.Event.Data.CPU.Usage.Total
-
- if busyUsage <= sleepUsage {
- t.Logf("Busy container usage lower than sleep (busy: %d, sleep: %d), retrying...", busyUsage, sleepUsage)
- return fmt.Errorf("busy container should have higher usage than sleep, busy: %d, sleep: %d", busyUsage, sleepUsage)
- }
- return nil
- }
- // Give time for busy container to run and use more CPU than sleep.
- if err := testutil.Poll(cb, 10*time.Second); err != nil {
- t.Fatal(err)
- }
-
- // Check that stopped and destroyed containers return error.
- if err := containers[1].Destroy(); err != nil {
- t.Fatalf("container.Destroy: %v", err)
- }
- for _, cont := range containers[1:] {
- if _, err := cont.Event(); err == nil {
- t.Errorf("Container.Event() should have failed, cid: %q, state: %v", cont.ID, cont.Status)
- }
- }
-}
-
-// Tests that duplicate variables in the spec are merged into a single one.
-func TestDuplicateEnvVariable(t *testing.T) {
- conf := testutil.TestConfig(t)
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
- conf.RootDir = rootDir
-
- // Create files to dump `env` output.
- files := [3]*os.File{}
- for i := 0; i < len(files); i++ {
- var err error
- files[i], err = ioutil.TempFile(testutil.TmpDir(), "env-var-test")
- if err != nil {
- t.Fatalf("creating temp file: %v", err)
- }
- defer files[i].Close()
- defer os.Remove(files[i].Name())
- }
-
- // Setup the containers. Use root container to test exec too.
- cmd1 := fmt.Sprintf("env > %q; sleep 1000", files[0].Name())
- cmd2 := fmt.Sprintf("env > %q", files[1].Name())
- cmdExec := fmt.Sprintf("env > %q", files[2].Name())
- testSpecs, ids := createSpecs([]string{"/bin/sh", "-c", cmd1}, []string{"/bin/sh", "-c", cmd2})
- testSpecs[0].Process.Env = append(testSpecs[0].Process.Env, "VAR=foo", "VAR=bar")
- testSpecs[1].Process.Env = append(testSpecs[1].Process.Env, "VAR=foo", "VAR=bar")
-
- containers, cleanup, err := startContainers(conf, testSpecs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- // Wait until after `env` has executed.
- expectedProc := newProcessBuilder().Cmd("sleep").Process()
- if err := waitForProcess(containers[0], expectedProc); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
- if ws, err := containers[1].Wait(); err != nil {
- t.Errorf("failed to wait container 1: %v", err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("container %s exited with non-zero status: %v", containers[1].ID, es)
- }
-
- execArgs := &control.ExecArgs{
- Filename: "/bin/sh",
- Argv: []string{"/bin/sh", "-c", cmdExec},
- Envv: []string{"VAR=foo", "VAR=bar"},
- }
- if ws, err := containers[0].executeSync(conf, execArgs); err != nil || ws.ExitStatus() != 0 {
- t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
- }
-
- // Now read and check that none of the env has repeated values.
- for _, file := range files {
- out, err := ioutil.ReadAll(file)
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("Checking env %q:\n%s", file.Name(), out)
- envs := make(map[string]string)
- for _, line := range strings.Split(string(out), "\n") {
- if len(line) == 0 {
- continue
- }
- envVar := strings.SplitN(line, "=", 2)
- if len(envVar) != 2 {
- t.Fatalf("invalid env variable: %s", line)
- }
- key := envVar[0]
- if val, ok := envs[key]; ok {
- t.Errorf("env variable %q is duplicated: %q and %q", key, val, envVar[1])
- }
- envs[key] = envVar[1]
- }
- if _, ok := envs["VAR"]; !ok {
- t.Errorf("variable VAR missing: %v", envs)
- }
- }
-}
diff --git a/runsc/container/shared_volume_test.go b/runsc/container/shared_volume_test.go
deleted file mode 100644
index f16b2bd02..000000000
--- a/runsc/container/shared_volume_test.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package container
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/config"
-)
-
-// TestSharedVolume checks that modifications to a volume mount are propagated
-// into and out of the sandbox.
-func TestSharedVolume(t *testing.T) {
- conf := testutil.TestConfig(t)
- conf.FileAccess = config.FileAccessShared
-
- // Main process just sleeps. We will use "exec" to probe the state of
- // the filesystem.
- spec := testutil.NewSpecWithArgs("sleep", "1000")
-
- dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test")
- if err != nil {
- t.Fatalf("TempDir failed: %v", err)
- }
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // File that will be used to check consistency inside/outside sandbox.
- filename := filepath.Join(dir, "file")
-
- // File does not exist yet. Reading from the sandbox should fail.
- argsTestFile := &control.ExecArgs{
- Filename: "/usr/bin/test",
- Argv: []string{"test", "-f", filename},
- }
- if ws, err := c.executeSync(conf, argsTestFile); err != nil {
- t.Fatalf("unexpected error testing file %q: %v", filename, err)
- } else if ws.ExitStatus() == 0 {
- t.Errorf("test %q exited with code %v, wanted not zero", ws.ExitStatus(), err)
- }
-
- // Create the file from outside of the sandbox.
- if err := ioutil.WriteFile(filename, []byte("foobar"), 0777); err != nil {
- t.Fatalf("error writing to file %q: %v", filename, err)
- }
-
- // Now we should be able to test the file from within the sandbox.
- if ws, err := c.executeSync(conf, argsTestFile); err != nil {
- t.Fatalf("unexpected error testing file %q: %v", filename, err)
- } else if ws.ExitStatus() != 0 {
- t.Errorf("test %q exited with code %v, wanted zero", filename, ws.ExitStatus())
- }
-
- // Rename the file from outside of the sandbox.
- newFilename := filepath.Join(dir, "newfile")
- if err := os.Rename(filename, newFilename); err != nil {
- t.Fatalf("os.Rename(%q, %q) failed: %v", filename, newFilename, err)
- }
-
- // File should no longer exist at the old path within the sandbox.
- if ws, err := c.executeSync(conf, argsTestFile); err != nil {
- t.Fatalf("unexpected error testing file %q: %v", filename, err)
- } else if ws.ExitStatus() == 0 {
- t.Errorf("test %q exited with code %v, wanted not zero", filename, ws.ExitStatus())
- }
-
- // We should be able to test the new filename from within the sandbox.
- argsTestNewFile := &control.ExecArgs{
- Filename: "/usr/bin/test",
- Argv: []string{"test", "-f", newFilename},
- }
- if ws, err := c.executeSync(conf, argsTestNewFile); err != nil {
- t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
- } else if ws.ExitStatus() != 0 {
- t.Errorf("test %q exited with code %v, wanted zero", newFilename, ws.ExitStatus())
- }
-
- // Delete the renamed file from outside of the sandbox.
- if err := os.Remove(newFilename); err != nil {
- t.Fatalf("error removing file %q: %v", filename, err)
- }
-
- // Renamed file should no longer exist at the old path within the sandbox.
- if ws, err := c.executeSync(conf, argsTestNewFile); err != nil {
- t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
- } else if ws.ExitStatus() == 0 {
- t.Errorf("test %q exited with code %v, wanted not zero", newFilename, ws.ExitStatus())
- }
-
- // Now create the file from WITHIN the sandbox.
- argsTouch := &control.ExecArgs{
- Filename: "/usr/bin/touch",
- Argv: []string{"touch", filename},
- KUID: auth.KUID(os.Getuid()),
- KGID: auth.KGID(os.Getgid()),
- }
- if ws, err := c.executeSync(conf, argsTouch); err != nil {
- t.Fatalf("unexpected error touching file %q: %v", filename, err)
- } else if ws.ExitStatus() != 0 {
- t.Errorf("touch %q exited with code %v, wanted zero", filename, ws.ExitStatus())
- }
-
- // File should exist outside the sandbox.
- if _, err := os.Stat(filename); err != nil {
- t.Errorf("stat %q got error %v, wanted nil", filename, err)
- }
-
- // File should exist outside the sandbox.
- if _, err := os.Stat(filename); err != nil {
- t.Errorf("stat %q got error %v, wanted nil", filename, err)
- }
-
- // Delete the file from within the sandbox.
- argsRemove := &control.ExecArgs{
- Filename: "/bin/rm",
- Argv: []string{"rm", filename},
- }
- if ws, err := c.executeSync(conf, argsRemove); err != nil {
- t.Fatalf("unexpected error removing file %q: %v", filename, err)
- } else if ws.ExitStatus() != 0 {
- t.Errorf("remove %q exited with code %v, wanted zero", filename, ws.ExitStatus())
- }
-
- // File should not exist outside the sandbox.
- if _, err := os.Stat(filename); !os.IsNotExist(err) {
- t.Errorf("stat %q got error %v, wanted ErrNotExist", filename, err)
- }
-}
-
-func checkFile(conf *config.Config, c *Container, filename string, want []byte) error {
- cpy := filename + ".copy"
- if _, err := execute(conf, c, "/bin/cp", "-f", filename, cpy); err != nil {
- return fmt.Errorf("unexpected error copying file %q to %q: %v", filename, cpy, err)
- }
- got, err := ioutil.ReadFile(cpy)
- if err != nil {
- return fmt.Errorf("error reading file %q: %v", filename, err)
- }
- if !bytes.Equal(got, want) {
- return fmt.Errorf("file content inside the sandbox is wrong, got: %q, want: %q", got, want)
- }
- return nil
-}
-
-// TestSharedVolumeFile tests that changes to file content outside the sandbox
-// is reflected inside.
-func TestSharedVolumeFile(t *testing.T) {
- conf := testutil.TestConfig(t)
- conf.FileAccess = config.FileAccessShared
-
- // Main process just sleeps. We will use "exec" to probe the state of
- // the filesystem.
- spec := testutil.NewSpecWithArgs("sleep", "1000")
-
- dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test")
- if err != nil {
- t.Fatalf("TempDir failed: %v", err)
- }
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create and start the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- // File that will be used to check consistency inside/outside sandbox.
- filename := filepath.Join(dir, "file")
-
- // Write file from outside the container and check that the same content is
- // read inside.
- want := []byte("host-")
- if err := ioutil.WriteFile(filename, []byte(want), 0666); err != nil {
- t.Fatalf("Error writing to %q: %v", filename, err)
- }
- if err := checkFile(conf, c, filename, want); err != nil {
- t.Fatal(err.Error())
- }
-
- // Append to file inside the container and check that content is not lost.
- if _, err := execute(conf, c, "/bin/bash", "-c", "echo -n sandbox- >> "+filename); err != nil {
- t.Fatalf("unexpected error appending file %q: %v", filename, err)
- }
- want = []byte("host-sandbox-")
- if err := checkFile(conf, c, filename, want); err != nil {
- t.Fatal(err.Error())
- }
-
- // Write again from outside the container and check that the same content is
- // read inside.
- f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0)
- if err != nil {
- t.Fatalf("Error openning file %q: %v", filename, err)
- }
- defer f.Close()
- if _, err := f.Write([]byte("host")); err != nil {
- t.Fatalf("Error writing to file %q: %v", filename, err)
- }
- want = []byte("host-sandbox-host")
- if err := checkFile(conf, c, filename, want); err != nil {
- t.Fatal(err.Error())
- }
-
- // Shrink file outside and check that the same content is read inside.
- if err := f.Truncate(5); err != nil {
- t.Fatalf("Error truncating file %q: %v", filename, err)
- }
- want = want[:5]
- if err := checkFile(conf, c, filename, want); err != nil {
- t.Fatal(err.Error())
- }
-}
diff --git a/runsc/flag/BUILD b/runsc/flag/BUILD
deleted file mode 100644
index 5cb7604a8..000000000
--- a/runsc/flag/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "flag",
- srcs = ["flag.go"],
- visibility = ["//:sandbox"],
-)
diff --git a/runsc/flag/flag_state_autogen.go b/runsc/flag/flag_state_autogen.go
new file mode 100644
index 000000000..5f6c0f28e
--- /dev/null
+++ b/runsc/flag/flag_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build go1.1
+
+package flag
diff --git a/runsc/fsgofer/BUILD b/runsc/fsgofer/BUILD
deleted file mode 100644
index 3280b74fe..000000000
--- a/runsc/fsgofer/BUILD
+++ /dev/null
@@ -1,39 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "fsgofer",
- srcs = [
- "fsgofer.go",
- "fsgofer_amd64_unsafe.go",
- "fsgofer_arm64_unsafe.go",
- "fsgofer_unsafe.go",
- ],
- visibility = ["//runsc:__subpackages__"],
- deps = [
- "//pkg/cleanup",
- "//pkg/fd",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/sync",
- "//pkg/syserr",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fsgofer_test",
- size = "small",
- srcs = ["fsgofer_test.go"],
- library = ":fsgofer",
- deps = [
- "//pkg/fd",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/test/testutil",
- "//runsc/specutils",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/fsgofer/filter/BUILD b/runsc/fsgofer/filter/BUILD
deleted file mode 100644
index 82b48ef32..000000000
--- a/runsc/fsgofer/filter/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "filter",
- srcs = [
- "config.go",
- "config_amd64.go",
- "config_arm64.go",
- "extra_filters.go",
- "extra_filters_msan.go",
- "extra_filters_race.go",
- "filter.go",
- ],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = [
- "//pkg/abi/linux",
- "//pkg/flipcall",
- "//pkg/log",
- "//pkg/seccomp",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/fsgofer/filter/filter_amd64_state_autogen.go b/runsc/fsgofer/filter/filter_amd64_state_autogen.go
new file mode 100644
index 000000000..0f27e5568
--- /dev/null
+++ b/runsc/fsgofer/filter/filter_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package filter
diff --git a/runsc/fsgofer/filter/filter_arm64_state_autogen.go b/runsc/fsgofer/filter/filter_arm64_state_autogen.go
new file mode 100644
index 000000000..e87cf5af7
--- /dev/null
+++ b/runsc/fsgofer/filter/filter_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package filter
diff --git a/runsc/fsgofer/filter/filter_race_state_autogen.go b/runsc/fsgofer/filter/filter_race_state_autogen.go
new file mode 100644
index 000000000..c4a858e80
--- /dev/null
+++ b/runsc/fsgofer/filter/filter_race_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build race
+
+package filter
diff --git a/runsc/fsgofer/filter/filter_state_autogen.go b/runsc/fsgofer/filter/filter_state_autogen.go
new file mode 100644
index 000000000..41ff99424
--- /dev/null
+++ b/runsc/fsgofer/filter/filter_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+// +build !msan,!race
+// +build msan
+
+package filter
diff --git a/runsc/fsgofer/fsgofer_amd64_unsafe_state_autogen.go b/runsc/fsgofer/fsgofer_amd64_unsafe_state_autogen.go
new file mode 100644
index 000000000..df6721aaa
--- /dev/null
+++ b/runsc/fsgofer/fsgofer_amd64_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package fsgofer
diff --git a/runsc/fsgofer/fsgofer_arm64_unsafe_state_autogen.go b/runsc/fsgofer/fsgofer_arm64_unsafe_state_autogen.go
new file mode 100644
index 000000000..d2a18c61c
--- /dev/null
+++ b/runsc/fsgofer/fsgofer_arm64_unsafe_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package fsgofer
diff --git a/runsc/fsgofer/fsgofer_state_autogen.go b/runsc/fsgofer/fsgofer_state_autogen.go
new file mode 100644
index 000000000..d2f978fb9
--- /dev/null
+++ b/runsc/fsgofer/fsgofer_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fsgofer
diff --git a/runsc/fsgofer/fsgofer_test.go b/runsc/fsgofer/fsgofer_test.go
deleted file mode 100644
index ee6cc97df..000000000
--- a/runsc/fsgofer/fsgofer_test.go
+++ /dev/null
@@ -1,1139 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsgofer
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "path"
- "path/filepath"
- "testing"
-
- "github.com/syndtr/gocapability/capability"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-// Nodoby is the standard UID/GID for the nobody user/group.
-const nobody = 65534
-
-var allOpenFlags = []p9.OpenFlags{p9.ReadOnly, p9.WriteOnly, p9.ReadWrite}
-
-var (
- allTypes = []uint32{unix.S_IFREG, unix.S_IFDIR, unix.S_IFLNK}
-
- // allConfs is set in init().
- allConfs []Config
-
- rwConfs = []Config{{ROMount: false}}
- roConfs = []Config{{ROMount: true}}
-)
-
-func init() {
- log.SetLevel(log.Debug)
-
- allConfs = append(allConfs, rwConfs...)
- allConfs = append(allConfs, roConfs...)
-
- if err := OpenProcSelfFD(); err != nil {
- panic(err)
- }
-}
-
-func configTestName(conf *Config) string {
- if conf.ROMount {
- return "ROMount"
- }
- return "RWMount"
-}
-
-func testReadWrite(f p9.File, flags p9.OpenFlags, content []byte) error {
- want := make([]byte, len(content))
- copy(want, content)
-
- b := []byte("test-1-2-3")
- w, err := f.WriteAt(b, uint64(len(content)))
- if flags == p9.WriteOnly || flags == p9.ReadWrite {
- if err != nil {
- return fmt.Errorf("WriteAt(): %v", err)
- }
- if w != len(b) {
- return fmt.Errorf("WriteAt() was partial, got: %d, want: %d", w, len(b))
- }
- want = append(want, b...)
- } else {
- if e, ok := err.(unix.Errno); !ok || e != unix.EBADF {
- return fmt.Errorf("WriteAt() should have failed, got: %d, want: EBADFD", err)
- }
- }
-
- rBuf := make([]byte, len(want))
- r, err := f.ReadAt(rBuf, 0)
- if flags == p9.ReadOnly || flags == p9.ReadWrite {
- if err != nil {
- return fmt.Errorf("ReadAt(): %v", err)
- }
- if r != len(rBuf) {
- return fmt.Errorf("ReadAt() was partial, got: %d, want: %d", r, len(rBuf))
- }
- if string(rBuf) != string(want) {
- return fmt.Errorf("ReadAt() wrong data, got: %s, want: %s", string(rBuf), want)
- }
- } else {
- if e, ok := err.(unix.Errno); !ok || e != unix.EBADF {
- return fmt.Errorf("ReadAt() should have failed, got: %d, want: EBADFD", err)
- }
- }
- return nil
-}
-
-type state struct {
- root *localFile
- file *localFile
- conf Config
- fileType uint32
-}
-
-func (s state) String() string {
- return fmt.Sprintf("type(%v)", s.fileType)
-}
-
-func typeName(fileType uint32) string {
- switch fileType {
- case unix.S_IFREG:
- return "file"
- case unix.S_IFDIR:
- return "directory"
- case unix.S_IFLNK:
- return "symlink"
- default:
- panic(fmt.Sprintf("invalid file type for test: %d", fileType))
- }
-}
-
-func runAll(t *testing.T, test func(*testing.T, state)) {
- runCustom(t, allTypes, allConfs, test)
-}
-
-func runCustom(t *testing.T, types []uint32, confs []Config, test func(*testing.T, state)) {
- for _, c := range confs {
- for _, ft := range types {
- name := fmt.Sprintf("%s/%s", configTestName(&c), typeName(ft))
- t.Run(name, func(t *testing.T) {
- path, name, err := setup(ft)
- if err != nil {
- t.Fatalf("%v", err)
- }
- defer os.RemoveAll(path)
-
- a, err := NewAttachPoint(path, c)
- if err != nil {
- t.Fatalf("NewAttachPoint failed: %v", err)
- }
- root, err := a.Attach()
- if err != nil {
- t.Fatalf("Attach failed, err: %v", err)
- }
-
- _, file, err := root.Walk([]string{name})
- if err != nil {
- root.Close()
- t.Fatalf("root.Walk({%q}) failed, err: %v", "symlink", err)
- }
-
- st := state{
- root: root.(*localFile),
- file: file.(*localFile),
- conf: c,
- fileType: ft,
- }
- test(t, st)
- file.Close()
- root.Close()
- })
- }
- }
-}
-
-func setup(fileType uint32) (string, string, error) {
- path, err := ioutil.TempDir(testutil.TmpDir(), "root-")
- if err != nil {
- return "", "", fmt.Errorf("ioutil.TempDir() failed, err: %v", err)
- }
-
- // First attach with writable configuration to setup tree.
- a, err := NewAttachPoint(path, Config{})
- if err != nil {
- return "", "", err
- }
- root, err := a.Attach()
- if err != nil {
- return "", "", fmt.Errorf("attach failed, err: %v", err)
- }
- defer root.Close()
-
- var name string
- switch fileType {
- case unix.S_IFREG:
- name = "file"
- fd, f, _, _, err := root.Create(name, p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- return "", "", fmt.Errorf("createFile(root, %q) failed, err: %v", "test", err)
- }
- if fd != nil {
- fd.Close()
- }
- defer f.Close()
- case unix.S_IFDIR:
- name = "dir"
- if _, err := root.Mkdir(name, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- return "", "", fmt.Errorf("root.MkDir(%q) failed, err: %v", name, err)
- }
- case unix.S_IFLNK:
- name = "symlink"
- if _, err := root.Symlink("/some/target", name, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- return "", "", fmt.Errorf("root.Symlink(%q) failed, err: %v", name, err)
- }
- default:
- panic(fmt.Sprintf("unknown file type %v", fileType))
- }
- return path, name, nil
-}
-
-func createFile(dir *localFile, name string) (*localFile, error) {
- _, f, _, _, err := dir.Create(name, p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- return nil, err
- }
- return f.(*localFile), nil
-}
-
-func TestReadWrite(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- child, err := createFile(s.file, "test")
- if err != nil {
- t.Fatalf("%v: createFile() failed, err: %v", s, err)
- }
- defer child.Close()
- want := []byte("foobar")
- w, err := child.WriteAt(want, 0)
- if err != nil {
- t.Fatalf("%v: Write() failed, err: %v", s, err)
- }
- if w != len(want) {
- t.Fatalf("%v: Write() was partial, got: %d, expected: %d", s, w, len(want))
- }
- for _, flags := range allOpenFlags {
- _, l, err := s.file.Walk([]string{"test"})
- if err != nil {
- t.Fatalf("%v: Walk(%s) failed, err: %v", s, "test", err)
- }
- fd, _, _, err := l.Open(flags)
- if err != nil {
- t.Fatalf("%v: Open(%v) failed, err: %v", s, flags, err)
- }
- if fd != nil {
- defer fd.Close()
- }
- if err := testReadWrite(l, flags, want); err != nil {
- t.Fatalf("%v: testReadWrite(%v) failed: %v", s, flags, err)
- }
- }
- })
-}
-
-func TestCreate(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- for i, flags := range allOpenFlags {
- _, l, _, _, err := s.file.Create(fmt.Sprintf("test-%d", i), flags, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- t.Fatalf("%v, %v: WriteAt() failed, err: %v", s, flags, err)
- }
-
- if err := testReadWrite(l, flags, nil); err != nil {
- t.Fatalf("%v: testReadWrite(%v) failed: %v", s, flags, err)
- }
- }
- })
-}
-
-func checkIDs(f p9.File, uid, gid int) error {
- _, _, stat, err := f.GetAttr(p9.AttrMask{UID: true, GID: true})
- if err != nil {
- return fmt.Errorf("GetAttr() failed, err: %v", err)
- }
- if want := p9.UID(uid); stat.UID != want {
- return fmt.Errorf("wrong UID, want: %v, got: %v", want, stat.UID)
- }
- if want := p9.GID(gid); stat.GID != want {
- return fmt.Errorf("wrong GID, want: %v, got: %v", want, stat.GID)
- }
- return nil
-}
-
-// TestCreateSetGID checks files/dirs/symlinks are created with the proper
-// owner when the parent directory has setgid set,
-func TestCreateSetGID(t *testing.T) {
- if !specutils.HasCapabilities(capability.CAP_CHOWN) {
- t.Skipf("Test requires CAP_CHOWN")
- }
-
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- // Change group and set setgid to the parent dir.
- if err := unix.Chown(s.file.hostPath, os.Getuid(), nobody); err != nil {
- t.Fatalf("Chown() failed: %v", err)
- }
- if err := unix.Chmod(s.file.hostPath, 02777); err != nil {
- t.Fatalf("Chmod() failed: %v", err)
- }
-
- t.Run("create", func(t *testing.T) {
- _, l, _, _, err := s.file.Create("test", p9.ReadOnly, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- t.Fatalf("WriteAt() failed: %v", err)
- }
- defer l.Close()
- if err := checkIDs(l, os.Getuid(), os.Getgid()); err != nil {
- t.Error(err)
- }
- })
-
- t.Run("mkdir", func(t *testing.T) {
- _, err := s.file.Mkdir("test-dir", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- t.Fatalf("WriteAt() failed: %v", err)
- }
- _, l, err := s.file.Walk([]string{"test-dir"})
- if err != nil {
- t.Fatalf("Walk() failed: %v", err)
- }
- defer l.Close()
- if err := checkIDs(l, os.Getuid(), os.Getgid()); err != nil {
- t.Error(err)
- }
- })
-
- t.Run("symlink", func(t *testing.T) {
- if _, err := s.file.Symlink("/some/target", "symlink", p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("Symlink() failed: %v", err)
- }
- _, l, err := s.file.Walk([]string{"symlink"})
- if err != nil {
- t.Fatalf("Walk() failed, err: %v", err)
- }
- defer l.Close()
- if err := checkIDs(l, os.Getuid(), os.Getgid()); err != nil {
- t.Error(err)
- }
- })
-
- t.Run("mknod", func(t *testing.T) {
- if _, err := s.file.Mknod("nod", p9.ModeRegular|0777, 0, 0, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("Mknod() failed: %v", err)
- }
- _, l, err := s.file.Walk([]string{"nod"})
- if err != nil {
- t.Fatalf("Walk() failed, err: %v", err)
- }
- defer l.Close()
- if err := checkIDs(l, os.Getuid(), os.Getgid()); err != nil {
- t.Error(err)
- }
- })
- })
-}
-
-// TestReadWriteDup tests that a file opened in any mode can be dup'ed and
-// reopened in any other mode.
-func TestReadWriteDup(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- child, err := createFile(s.file, "test")
- if err != nil {
- t.Fatalf("%v: createFile() failed, err: %v", s, err)
- }
- defer child.Close()
- want := []byte("foobar")
- w, err := child.WriteAt(want, 0)
- if err != nil {
- t.Fatalf("%v: Write() failed, err: %v", s, err)
- }
- if w != len(want) {
- t.Fatalf("%v: Write() was partial, got: %d, expected: %d", s, w, len(want))
- }
- for _, flags := range allOpenFlags {
- _, l, err := s.file.Walk([]string{"test"})
- if err != nil {
- t.Fatalf("%v: Walk(%s) failed, err: %v", s, "test", err)
- }
- defer l.Close()
- if _, _, _, err := l.Open(flags); err != nil {
- t.Fatalf("%v: Open(%v) failed, err: %v", s, flags, err)
- }
- for _, dupFlags := range allOpenFlags {
- t.Logf("Original flags: %v, dup flags: %v", flags, dupFlags)
- _, dup, err := l.Walk([]string{})
- if err != nil {
- t.Fatalf("%v: Walk(<empty>) failed: %v", s, err)
- }
- defer dup.Close()
- fd, _, _, err := dup.Open(dupFlags)
- if err != nil {
- t.Fatalf("%v: Open(%v) failed: %v", s, flags, err)
- }
- if fd != nil {
- defer fd.Close()
- }
- if err := testReadWrite(dup, dupFlags, want); err != nil {
- t.Fatalf("%v: testReadWrite(%v) failed: %v", s, dupFlags, err)
- }
- }
- }
- })
-}
-
-func TestUnopened(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFREG}, allConfs, func(t *testing.T, s state) {
- b := []byte("foobar")
- if _, err := s.file.WriteAt(b, 0); err != unix.EBADF {
- t.Errorf("%v: WriteAt() should have failed, got: %v, expected: unix.EBADF", s, err)
- }
- if _, err := s.file.ReadAt(b, 0); err != unix.EBADF {
- t.Errorf("%v: ReadAt() should have failed, got: %v, expected: unix.EBADF", s, err)
- }
- if _, err := s.file.Readdir(0, 100); err != unix.EBADF {
- t.Errorf("%v: Readdir() should have failed, got: %v, expected: unix.EBADF", s, err)
- }
- if err := s.file.FSync(); err != unix.EBADF {
- t.Errorf("%v: FSync() should have failed, got: %v, expected: unix.EBADF", s, err)
- }
- })
-}
-
-// TestOpenOPath is a regression test to ensure that a file that cannot be open
-// for read is allowed to be open. This was happening because the control file
-// was open with O_PATH, but Open() was not checking for it and allowing the
-// control file to be reused.
-func TestOpenOPath(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFREG}, rwConfs, func(t *testing.T, s state) {
- // Fist remove all permissions on the file.
- if err := s.file.SetAttr(p9.SetAttrMask{Permissions: true}, p9.SetAttr{Permissions: p9.FileMode(0)}); err != nil {
- t.Fatalf("SetAttr(): %v", err)
- }
- // Then walk to the file again to open a new control file.
- filename := filepath.Base(s.file.hostPath)
- _, newFile, err := s.root.Walk([]string{filename})
- if err != nil {
- t.Fatalf("root.Walk(%q): %v", filename, err)
- }
-
- if newFile.(*localFile).controlReadable {
- t.Fatalf("control file didn't open with O_PATH: %+v", newFile)
- }
- if _, _, _, err := newFile.Open(p9.ReadOnly); err != unix.EACCES {
- t.Fatalf("Open() should have failed, got: %v, wanted: EACCES", err)
- }
- })
-}
-
-func SetGetAttr(l *localFile, valid p9.SetAttrMask, attr p9.SetAttr) (p9.Attr, error) {
- if err := l.SetAttr(valid, attr); err != nil {
- return p9.Attr{}, err
- }
- _, _, a, err := l.GetAttr(p9.AttrMask{})
- if err != nil {
- return p9.Attr{}, err
- }
- return a, nil
-}
-
-func TestSetAttrPerm(t *testing.T) {
- runCustom(t, allTypes, rwConfs, func(t *testing.T, s state) {
- valid := p9.SetAttrMask{Permissions: true}
- attr := p9.SetAttr{Permissions: 0777}
- got, err := SetGetAttr(s.file, valid, attr)
- if s.fileType == unix.S_IFLNK {
- if err == nil {
- t.Fatalf("%v: SetGetAttr(valid, %v) should have failed", s, attr.Permissions)
- }
- } else {
- if err != nil {
- t.Fatalf("%v: SetGetAttr(valid, %v) failed, err: %v", s, attr.Permissions, err)
- }
- if got.Mode.Permissions() != attr.Permissions {
- t.Errorf("%v: wrong permission, got: %v, expected: %v", s, got.Mode.Permissions(), attr.Permissions)
- }
- }
- })
-}
-
-func TestSetAttrSize(t *testing.T) {
- runCustom(t, allTypes, rwConfs, func(t *testing.T, s state) {
- for _, size := range []uint64{1024, 0, 1024 * 1024} {
- valid := p9.SetAttrMask{Size: true}
- attr := p9.SetAttr{Size: size}
- got, err := SetGetAttr(s.file, valid, attr)
- if s.fileType == unix.S_IFLNK || s.fileType == unix.S_IFDIR {
- if err == nil {
- t.Fatalf("%v: SetGetAttr(valid, %v) should have failed", s, attr.Permissions)
- }
- // Run for one size only, they will all fail the same way.
- return
- }
- if err != nil {
- t.Fatalf("%v: SetGetAttr(valid, %v) failed, err: %v", s, attr.Size, err)
- }
- if got.Size != size {
- t.Errorf("%v: wrong size, got: %v, expected: %v", s, got.Size, size)
- }
- }
- })
-}
-
-func TestSetAttrTime(t *testing.T) {
- runCustom(t, allTypes, rwConfs, func(t *testing.T, s state) {
- valid := p9.SetAttrMask{ATime: true, ATimeNotSystemTime: true}
- attr := p9.SetAttr{ATimeSeconds: 123, ATimeNanoSeconds: 456}
- got, err := SetGetAttr(s.file, valid, attr)
- if err != nil {
- t.Fatalf("%v: SetGetAttr(valid, %v:%v) failed, err: %v", s, attr.ATimeSeconds, attr.ATimeNanoSeconds, err)
- }
- if got.ATimeSeconds != 123 {
- t.Errorf("%v: wrong ATimeSeconds, got: %v, expected: %v", s, got.ATimeSeconds, 123)
- }
- if got.ATimeNanoSeconds != 456 {
- t.Errorf("%v: wrong ATimeNanoSeconds, got: %v, expected: %v", s, got.ATimeNanoSeconds, 456)
- }
-
- valid = p9.SetAttrMask{MTime: true, MTimeNotSystemTime: true}
- attr = p9.SetAttr{MTimeSeconds: 789, MTimeNanoSeconds: 012}
- got, err = SetGetAttr(s.file, valid, attr)
- if err != nil {
- t.Fatalf("%v: SetGetAttr(valid, %v:%v) failed, err: %v", s, attr.MTimeSeconds, attr.MTimeNanoSeconds, err)
- }
- if got.MTimeSeconds != 789 {
- t.Errorf("%v: wrong MTimeSeconds, got: %v, expected: %v", s, got.MTimeSeconds, 789)
- }
- if got.MTimeNanoSeconds != 012 {
- t.Errorf("%v: wrong MTimeNanoSeconds, got: %v, expected: %v", s, got.MTimeNanoSeconds, 012)
- }
- })
-}
-
-func TestSetAttrOwner(t *testing.T) {
- if !specutils.HasCapabilities(capability.CAP_CHOWN) {
- t.Skipf("SetAttr(owner) test requires CAP_CHOWN, running as %d", os.Getuid())
- }
-
- runCustom(t, allTypes, rwConfs, func(t *testing.T, s state) {
- newUID := os.Getuid() + 1
- valid := p9.SetAttrMask{UID: true}
- attr := p9.SetAttr{UID: p9.UID(newUID)}
- got, err := SetGetAttr(s.file, valid, attr)
- if err != nil {
- t.Fatalf("%v: SetGetAttr(valid, %v) failed, err: %v", s, attr.UID, err)
- }
- if got.UID != p9.UID(newUID) {
- t.Errorf("%v: wrong uid, got: %v, expected: %v", s, got.UID, newUID)
- }
- })
-}
-
-func SetGetXattr(l *localFile, name string, value string) error {
- if err := l.SetXattr(name, value, 0 /* flags */); err != nil {
- return err
- }
- ret, err := l.GetXattr(name, uint64(len(value)))
- if err != nil {
- return err
- }
- if ret != value {
- return fmt.Errorf("got value %s, want %s", ret, value)
- }
- return nil
-}
-
-func TestSetGetDisabledXattr(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFREG}, rwConfs, func(t *testing.T, s state) {
- name := "user.merkle.offset"
- value := "tmp"
- err := SetGetXattr(s.file, name, value)
- if err == nil {
- t.Fatalf("%v: SetGetXattr should have failed", s)
- }
- })
-}
-
-func TestSetGetXattr(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFREG}, []Config{{ROMount: false, EnableVerityXattr: true}}, func(t *testing.T, s state) {
- name := "user.merkle.offset"
- value := "tmp"
- err := SetGetXattr(s.file, name, value)
- if err != nil {
- t.Fatalf("%v: SetGetXattr failed, err: %v", s, err)
- }
- })
-}
-
-func TestLink(t *testing.T) {
- if !specutils.HasCapabilities(capability.CAP_DAC_READ_SEARCH) {
- t.Skipf("Link test requires CAP_DAC_READ_SEARCH, running as %d", os.Getuid())
- }
- runCustom(t, allTypes, rwConfs, func(t *testing.T, s state) {
- const dirName = "linkdir"
- const linkFile = "link"
- if _, err := s.root.Mkdir(dirName, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("%v: MkDir(%s) failed, err: %v", s, dirName, err)
- }
- _, dir, err := s.root.Walk([]string{dirName})
- if err != nil {
- t.Fatalf("%v: Walk({%s}) failed, err: %v", s, dirName, err)
- }
-
- err = dir.Link(s.file, linkFile)
- if s.fileType == unix.S_IFDIR {
- if err != unix.EPERM {
- t.Errorf("%v: Link(target, %s) should have failed, got: %v, expected: unix.EPERM", s, linkFile, err)
- }
- return
- }
- if err != nil {
- t.Errorf("%v: Link(target, %s) failed, err: %v", s, linkFile, err)
- }
- })
-}
-
-func TestROMountChecks(t *testing.T) {
- const want = unix.EROFS
- uid := p9.UID(os.Getuid())
- gid := p9.GID(os.Getgid())
-
- runCustom(t, allTypes, roConfs, func(t *testing.T, s state) {
- if s.fileType != unix.S_IFLNK {
- if _, _, _, err := s.file.Open(p9.WriteOnly); err != want {
- t.Errorf("Open() should have failed, got: %v, expected: %v", err, want)
- }
- if _, _, _, err := s.file.Open(p9.ReadWrite); err != want {
- t.Errorf("Open() should have failed, got: %v, expected: %v", err, want)
- }
- if _, _, _, err := s.file.Open(p9.ReadOnly | p9.OpenTruncate); err != want {
- t.Errorf("Open() should have failed, got: %v, expected: %v", err, want)
- }
- f, _, _, err := s.file.Open(p9.ReadOnly)
- if err != nil {
- t.Errorf("Open() failed: %v", err)
- }
- if f != nil {
- _ = f.Close()
- }
- }
-
- if _, _, _, _, err := s.file.Create("some_file", p9.ReadWrite, 0777, uid, gid); err != want {
- t.Errorf("Create() should have failed, got: %v, expected: %v", err, want)
- }
- if _, err := s.file.Mkdir("some_dir", 0777, uid, gid); err != want {
- t.Errorf("MkDir() should have failed, got: %v, expected: %v", err, want)
- }
- if err := s.file.RenameAt("some_file", s.file, "other_file"); err != want {
- t.Errorf("Rename() should have failed, got: %v, expected: %v", err, want)
- }
- if _, err := s.file.Symlink("some_place", "some_symlink", uid, gid); err != want {
- t.Errorf("Symlink() should have failed, got: %v, expected: %v", err, want)
- }
- if err := s.file.UnlinkAt("some_file", 0); err != want {
- t.Errorf("UnlinkAt() should have failed, got: %v, expected: %v", err, want)
- }
- if err := s.file.Link(s.file, "some_link"); err != want {
- t.Errorf("Link() should have failed, got: %v, expected: %v", err, want)
- }
- if _, err := s.file.Mknod("some-nod", 0777, 1, 2, uid, gid); err != want {
- t.Errorf("Mknod() should have failed, got: %v, expected: %v", err, want)
- }
-
- valid := p9.SetAttrMask{Size: true}
- attr := p9.SetAttr{Size: 0}
- if err := s.file.SetAttr(valid, attr); err != want {
- t.Errorf("SetAttr() should have failed, got: %v, expected: %v", err, want)
- }
- })
-}
-
-func TestWalkNotFound(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, allConfs, func(t *testing.T, s state) {
- if _, _, err := s.file.Walk([]string{"nobody-here"}); err != unix.ENOENT {
- t.Errorf("Walk(%q) should have failed, got: %v, expected: unix.ENOENT", "nobody-here", err)
- }
- if _, _, err := s.file.Walk([]string{"nobody", "here"}); err != unix.ENOENT {
- t.Errorf("Walk(%q) should have failed, got: %v, expected: unix.ENOENT", "nobody/here", err)
- }
- if !s.conf.ROMount {
- if _, err := s.file.Mkdir("dir", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("MkDir(dir) failed, err: %v", err)
- }
- if _, _, err := s.file.Walk([]string{"dir", "nobody-here"}); err != unix.ENOENT {
- t.Errorf("Walk(%q) should have failed, got: %v, expected: unix.ENOENT", "dir/nobody-here", err)
- }
- }
- })
-}
-
-func TestWalkDup(t *testing.T) {
- runAll(t, func(t *testing.T, s state) {
- _, dup, err := s.file.Walk([]string{})
- if err != nil {
- t.Fatalf("%v: Walk(nil) failed, err: %v", s, err)
- }
- // Check that 'dup' is usable.
- if _, _, _, err := dup.GetAttr(p9.AttrMask{}); err != nil {
- t.Errorf("%v: GetAttr() failed, err: %v", s, err)
- }
- })
-}
-
-func TestWalkMultiple(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- var names []string
- var parent p9.File = s.file
- for i := 0; i < 5; i++ {
- name := fmt.Sprintf("dir%d", i)
- names = append(names, name)
-
- if _, err := parent.Mkdir(name, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("MkDir(%q) failed, err: %v", name, err)
- }
-
- var err error
- _, parent, err = s.file.Walk(names)
- if err != nil {
- t.Errorf("Walk(%q): %v", name, err)
- }
- }
- })
-}
-
-func TestReaddir(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- name := "dir"
- if _, err := s.file.Mkdir(name, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("%v: MkDir(%s) failed, err: %v", s, name, err)
- }
- name = "symlink"
- if _, err := s.file.Symlink("/some/target", name, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != nil {
- t.Fatalf("%v: Symlink(%q) failed, err: %v", s, name, err)
- }
- name = "file"
- _, f, _, _, err := s.file.Create(name, p9.ReadWrite, 0555, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- t.Fatalf("%v: createFile(root, %q) failed, err: %v", s, name, err)
- }
- f.Close()
-
- if _, _, _, err := s.file.Open(p9.ReadOnly); err != nil {
- t.Fatalf("%v: Open(ReadOnly) failed, err: %v", s, err)
- }
-
- dirents, err := s.file.Readdir(0, 10)
- if err != nil {
- t.Fatalf("%v: Readdir(0, 10) failed, err: %v", s, err)
- }
- if len(dirents) != 3 {
- t.Fatalf("%v: Readdir(0, 10) wrong number of items, got: %v, expected: 3", s, len(dirents))
- }
- var dir, symlink, file bool
- for _, d := range dirents {
- switch d.Name {
- case "dir":
- if d.Type != p9.TypeDir {
- t.Errorf("%v: dirent.Type got: %v, expected: %v", s, d.Type, p9.TypeDir)
- }
- dir = true
- case "symlink":
- if d.Type != p9.TypeSymlink {
- t.Errorf("%v: dirent.Type got: %v, expected: %v", s, d.Type, p9.TypeSymlink)
- }
- symlink = true
- case "file":
- if d.Type != p9.TypeRegular {
- t.Errorf("%v: dirent.Type got: %v, expected: %v", s, d.Type, p9.TypeRegular)
- }
- file = true
- default:
- t.Errorf("%v: dirent.Name got: %v", s, d.Name)
- }
-
- _, f, err := s.file.Walk([]string{d.Name})
- if err != nil {
- t.Fatalf("%v: Walk({%s}) failed, err: %v", s, d.Name, err)
- }
- _, _, a, err := f.GetAttr(p9.AttrMask{})
- if err != nil {
- t.Fatalf("%v: GetAttr() failed, err: %v", s, err)
- }
- if d.Type != a.Mode.QIDType() {
- t.Errorf("%v: dirent.Type different than GetAttr().Mode.QIDType(), got: %v, expected: %v", s, d.Type, a.Mode.QIDType())
- }
- }
- if !dir || !symlink || !file {
- t.Errorf("%v: Readdir(0, 10) wrong files returned, dir: %v, symlink: %v, file: %v", s, dir, symlink, file)
- }
- })
-}
-
-// Test that attach point can be written to when it points to a file, e.g.
-// /etc/hosts.
-func TestAttachFile(t *testing.T) {
- conf := Config{ROMount: false}
- dir, err := ioutil.TempDir("", "root-")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed, err: %v", err)
- }
- defer os.RemoveAll(dir)
-
- path := path.Join(dir, "test")
- if _, err := os.Create(path); err != nil {
- t.Fatalf("os.Create(%q) failed, err: %v", path, err)
- }
-
- a, err := NewAttachPoint(path, conf)
- if err != nil {
- t.Fatalf("NewAttachPoint failed: %v", err)
- }
- root, err := a.Attach()
- if err != nil {
- t.Fatalf("Attach failed, err: %v", err)
- }
-
- if _, _, _, err := root.Open(p9.ReadWrite); err != nil {
- t.Fatalf("Open(ReadWrite) failed, err: %v", err)
- }
- defer root.Close()
-
- b := []byte("foobar")
- w, err := root.WriteAt(b, 0)
- if err != nil {
- t.Fatalf("Write() failed, err: %v", err)
- }
- if w != len(b) {
- t.Fatalf("Write() was partial, got: %d, expected: %d", w, len(b))
- }
- rBuf := make([]byte, len(b))
- r, err := root.ReadAt(rBuf, 0)
- if err != nil {
- t.Fatalf("ReadAt() failed, err: %v", err)
- }
- if r != len(rBuf) {
- t.Fatalf("ReadAt() was partial, got: %d, expected: %d", r, len(rBuf))
- }
- if string(rBuf) != "foobar" {
- t.Fatalf("ReadAt() wrong data, got: %s, expected: %s", string(rBuf), "foobar")
- }
-}
-
-func TestAttachInvalidType(t *testing.T) {
- dir, err := ioutil.TempDir("", "attach-")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed, err: %v", err)
- }
- defer os.RemoveAll(dir)
-
- fifo := filepath.Join(dir, "fifo")
- if err := unix.Mkfifo(fifo, 0755); err != nil {
- t.Fatalf("Mkfifo(%q): %v", fifo, err)
- }
-
- dirFile, err := os.Open(dir)
- if err != nil {
- t.Fatalf("Open(%s): %v", dir, err)
- }
- defer dirFile.Close()
-
- // Bind a socket via /proc to be sure that a length of a socket path
- // is less than UNIX_PATH_MAX.
- socket := filepath.Join(fmt.Sprintf("/proc/self/fd/%d", dirFile.Fd()), "socket")
- l, err := net.Listen("unix", socket)
- if err != nil {
- t.Fatalf("net.Listen(unix, %q): %v", socket, err)
- }
- defer l.Close()
-
- for _, tc := range []struct {
- name string
- path string
- }{
- {name: "fifo", path: fifo},
- {name: "socket", path: socket},
- } {
- t.Run(tc.name, func(t *testing.T) {
- conf := Config{ROMount: false}
- a, err := NewAttachPoint(tc.path, conf)
- if err != nil {
- t.Fatalf("NewAttachPoint failed: %v", err)
- }
- f, err := a.Attach()
- if f != nil || err == nil {
- t.Fatalf("Attach should have failed, got (%v, %v)", f, err)
- }
- })
- }
-}
-
-func TestDoubleAttachError(t *testing.T) {
- conf := Config{ROMount: false}
- root, err := ioutil.TempDir("", "root-")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed, err: %v", err)
- }
- defer os.RemoveAll(root)
- a, err := NewAttachPoint(root, conf)
- if err != nil {
- t.Fatalf("NewAttachPoint failed: %v", err)
- }
-
- if _, err := a.Attach(); err != nil {
- t.Fatalf("Attach failed: %v", err)
- }
- if _, err := a.Attach(); err == nil {
- t.Fatalf("Attach should have failed, got %v want non-nil", err)
- }
-}
-
-func TestTruncate(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- child, err := createFile(s.file, "test")
- if err != nil {
- t.Fatalf("createFile() failed: %v", err)
- }
- defer child.Close()
- want := []byte("foobar")
- w, err := child.WriteAt(want, 0)
- if err != nil {
- t.Fatalf("Write() failed: %v", err)
- }
- if w != len(want) {
- t.Fatalf("Write() was partial, got: %d, expected: %d", w, len(want))
- }
-
- _, l, err := s.file.Walk([]string{"test"})
- if err != nil {
- t.Fatalf("Walk(%s) failed: %v", "test", err)
- }
- if _, _, _, err := l.Open(p9.ReadOnly | p9.OpenTruncate); err != nil {
- t.Fatalf("Open() failed: %v", err)
- }
- _, mask, attr, err := l.GetAttr(p9.AttrMask{Size: true})
- if err != nil {
- t.Fatalf("GetAttr() failed: %v", err)
- }
- if !mask.Size {
- t.Fatalf("GetAttr() didn't return size: %+v", mask)
- }
- if attr.Size != 0 {
- t.Fatalf("truncate didn't work, want: 0, got: %d", attr.Size)
- }
- })
-}
-
-func TestMknod(t *testing.T) {
- runCustom(t, []uint32{unix.S_IFDIR}, rwConfs, func(t *testing.T, s state) {
- _, err := s.file.Mknod("test", p9.ModeRegular|0777, 1, 2, p9.UID(os.Getuid()), p9.GID(os.Getgid()))
- if err != nil {
- t.Fatalf("Mknod() failed: %v", err)
- }
-
- _, f, err := s.file.Walk([]string{"test"})
- if err != nil {
- t.Fatalf("Walk() failed: %v", err)
- }
- fd, _, _, err := f.Open(p9.ReadWrite)
- if err != nil {
- t.Fatalf("Open() failed: %v", err)
- }
- if fd != nil {
- defer fd.Close()
- }
- if err := testReadWrite(f, p9.ReadWrite, nil); err != nil {
- t.Fatalf("testReadWrite() failed: %v", err)
- }
- })
-}
-
-func BenchmarkWalkOne(b *testing.B) {
- path, name, err := setup(unix.S_IFDIR)
- if err != nil {
- b.Fatalf("%v", err)
- }
- defer os.RemoveAll(path)
-
- a, err := NewAttachPoint(path, Config{})
- if err != nil {
- b.Fatalf("NewAttachPoint failed: %v", err)
- }
- root, err := a.Attach()
- if err != nil {
- b.Fatalf("Attach failed, err: %v", err)
- }
- defer root.Close()
-
- names := []string{name}
- files := make([]p9.File, 0, 1000)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, file, err := root.Walk(names)
- if err != nil {
- b.Fatalf("Walk(%q): %v", name, err)
- }
- files = append(files, file)
-
- // Avoid running out of FDs.
- if len(files) == cap(files) {
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
- files = files[:0]
- b.StartTimer()
- }
- }
-
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
-}
-
-func BenchmarkCreate(b *testing.B) {
- path, _, err := setup(unix.S_IFDIR)
- if err != nil {
- b.Fatalf("%v", err)
- }
- defer os.RemoveAll(path)
-
- a, err := NewAttachPoint(path, Config{})
- if err != nil {
- b.Fatalf("NewAttachPoint failed: %v", err)
- }
- root, err := a.Attach()
- if err != nil {
- b.Fatalf("Attach failed, err: %v", err)
- }
- defer root.Close()
-
- files := make([]p9.File, 0, 500)
- fds := make([]*fd.FD, 0, 500)
- uid := p9.UID(os.Getuid())
- gid := p9.GID(os.Getgid())
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- name := fmt.Sprintf("same-%d", i)
- fd, file, _, _, err := root.Create(name, p9.ReadOnly, 0777, uid, gid)
- if err != nil {
- b.Fatalf("Create(%q): %v", name, err)
- }
- files = append(files, file)
- if fd != nil {
- fds = append(fds, fd)
- }
-
- // Avoid running out of FDs.
- if len(files) == cap(files) {
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
- files = files[:0]
- for _, fd := range fds {
- fd.Close()
- }
- fds = fds[:0]
- b.StartTimer()
- }
- }
-
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
- for _, fd := range fds {
- fd.Close()
- }
-}
-
-func BenchmarkCreateDiffOwner(b *testing.B) {
- if !specutils.HasCapabilities(capability.CAP_CHOWN) {
- b.Skipf("Test requires CAP_CHOWN")
- }
-
- path, _, err := setup(unix.S_IFDIR)
- if err != nil {
- b.Fatalf("%v", err)
- }
- defer os.RemoveAll(path)
-
- a, err := NewAttachPoint(path, Config{})
- if err != nil {
- b.Fatalf("NewAttachPoint failed: %v", err)
- }
- root, err := a.Attach()
- if err != nil {
- b.Fatalf("Attach failed, err: %v", err)
- }
- defer root.Close()
-
- files := make([]p9.File, 0, 500)
- fds := make([]*fd.FD, 0, 500)
- gid := p9.GID(os.Getgid())
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- name := fmt.Sprintf("diff-%d", i)
- fd, file, _, _, err := root.Create(name, p9.ReadOnly, 0777, nobody, gid)
- if err != nil {
- b.Fatalf("Create(%q): %v", name, err)
- }
- files = append(files, file)
- if fd != nil {
- fds = append(fds, fd)
- }
-
- // Avoid running out of FDs.
- if len(files) == cap(files) {
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
- files = files[:0]
- for _, fd := range fds {
- fd.Close()
- }
- fds = fds[:0]
- b.StartTimer()
- }
- }
-
- b.StopTimer()
- for _, file := range files {
- file.Close()
- }
- for _, fd := range fds {
- fd.Close()
- }
-}
diff --git a/runsc/fsgofer/fsgofer_unsafe_state_autogen.go b/runsc/fsgofer/fsgofer_unsafe_state_autogen.go
new file mode 100644
index 000000000..d2f978fb9
--- /dev/null
+++ b/runsc/fsgofer/fsgofer_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package fsgofer
diff --git a/runsc/mitigate/BUILD b/runsc/mitigate/BUILD
deleted file mode 100644
index 1238890fc..000000000
--- a/runsc/mitigate/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "mitigate",
- srcs = ["mitigate.go"],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = ["@in_gopkg_yaml_v2//:go_default_library"],
-)
-
-go_test(
- name = "mitigate_test",
- size = "small",
- srcs = ["mitigate_test.go"],
- library = ":mitigate",
- deps = [
- "//runsc/mitigate/mock",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/runsc/mitigate/mitigate_state_autogen.go b/runsc/mitigate/mitigate_state_autogen.go
new file mode 100644
index 000000000..14bad0cd6
--- /dev/null
+++ b/runsc/mitigate/mitigate_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package mitigate
diff --git a/runsc/mitigate/mitigate_test.go b/runsc/mitigate/mitigate_test.go
deleted file mode 100644
index a1d80581e..000000000
--- a/runsc/mitigate/mitigate_test.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build amd64
-// +build amd64
-
-package mitigate
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/runsc/mitigate/mock"
-)
-
-// TestMockCPUSet tests mock cpu test cases against the cpuSet functions.
-func TestMockCPUSet(t *testing.T) {
- for _, tc := range []struct {
- testCase mock.CPU
- isVulnerable bool
- }{
- {
- testCase: mock.AMD8,
- isVulnerable: false,
- },
- {
- testCase: mock.Haswell2,
- isVulnerable: true,
- },
- {
- testCase: mock.Haswell2core,
- isVulnerable: true,
- },
- {
- testCase: mock.CascadeLake2,
- isVulnerable: true,
- },
- {
- testCase: mock.CascadeLake4,
- isVulnerable: true,
- },
- } {
- t.Run(tc.testCase.Name, func(t *testing.T) {
- data := tc.testCase.MakeCPUString()
- set, err := NewCPUSet([]byte(data))
- if err != nil {
- t.Fatalf("Failed to create cpuSet: %v", err)
- }
-
- t.Logf("data: %s", data)
-
- for _, tg := range set {
- if err := checkSorted(tg.threads); err != nil {
- t.Fatalf("Failed to sort cpuSet: %v", err)
- }
- }
-
- remaining := set.GetRemainingList()
- // In the non-vulnerable case, no cores should be shutdown so all should remain.
- want := tc.testCase.PhysicalCores * tc.testCase.Cores * tc.testCase.ThreadsPerCore
- if tc.isVulnerable {
- want = tc.testCase.PhysicalCores * tc.testCase.Cores
- }
-
- if want != len(remaining) {
- t.Fatalf("Failed to shutdown the correct number of cores: want: %d got: %d", want, len(remaining))
- }
-
- if !tc.isVulnerable {
- return
- }
-
- // If the set is vulnerable, we expect only 1 thread per hyperthread pair.
- for _, r := range remaining {
- if _, ok := set[r.id]; !ok {
- t.Fatalf("Entry %+v not in map, there must be two entries in the same thread group.", r)
- }
- delete(set, r.id)
- }
-
- possible := tc.testCase.MakeSysPossibleString()
- set, err = NewCPUSetFromPossible([]byte(possible))
- if err != nil {
- t.Fatalf("Failed to make cpuSet: %v", err)
- }
-
- want = tc.testCase.PhysicalCores * tc.testCase.Cores * tc.testCase.ThreadsPerCore
- got := len(set.GetRemainingList())
- if got != want {
- t.Fatalf("Returned the wrong number of CPUs want: %d got: %d", want, got)
- }
- })
- }
-}
-
-// TestGetCPU tests basic parsing of single CPU strings from reading
-// /proc/cpuinfo.
-func TestGetCPU(t *testing.T) {
- data := `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-physical id: 0
-core id : 0
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit
-`
- want := Thread{
- processorNumber: 0,
- vendorID: "GenuineIntel",
- cpuFamily: 6,
- model: 85,
- id: threadID{
- physicalID: 0,
- coreID: 0,
- },
- bugs: map[string]struct{}{
- "cpu_meltdown": {},
- "spectre_v1": {},
- "spectre_v2": {},
- "spec_store_bypass": {},
- "l1tf": {},
- "mds": {},
- "swapgs": {},
- "taa": {},
- "itlb_multihit": {},
- },
- }
-
- got, err := newThread(data)
- if err != nil {
- t.Fatalf("getCpu failed with error: %v", err)
- }
-
- if !want.SimilarTo(got) {
- t.Fatalf("Failed cpus not similar: got: %+v, want: %+v", got, want)
- }
-
- if !got.IsVulnerable() {
- t.Fatalf("Failed: cpu should be vulnerable.")
- }
-}
-
-func TestInvalid(t *testing.T) {
- result, err := getThreads(`something not a processor`)
- if err == nil {
- t.Fatalf("getCPU set didn't return an error: %+v", result)
- }
-
- if !strings.Contains(err.Error(), "no cpus") {
- t.Fatalf("Incorrect error returned: %v", err)
- }
-}
-
-// TestCPUSet tests getting the right number of CPUs from
-// parsing full output of /proc/cpuinfo.
-func TestCPUSet(t *testing.T) {
- data := `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:
-`
- cpuSet, err := getThreads(data)
- if err != nil {
- t.Fatalf("getCPUSet failed: %v", err)
- }
-
- wantCPULen := 2
- if len(cpuSet) != wantCPULen {
- t.Fatalf("Num CPU mismatch: want: %d, got: %d", wantCPULen, len(cpuSet))
- }
-
- wantCPU := Thread{
- vendorID: "GenuineIntel",
- cpuFamily: 6,
- model: 63,
- bugs: map[string]struct{}{
- "cpu_meltdown": {},
- "spectre_v1": {},
- "spectre_v2": {},
- "spec_store_bypass": {},
- "l1tf": {},
- "mds": {},
- "swapgs": {},
- },
- }
-
- for _, c := range cpuSet {
- if !wantCPU.SimilarTo(c) {
- t.Fatalf("Failed cpus not equal: got: %+v, want: %+v", c, wantCPU)
- }
- }
-}
-
-// TestReadFile is a smoke test for parsing methods.
-func TestReadFile(t *testing.T) {
- data, err := ioutil.ReadFile("/proc/cpuinfo")
- if err != nil {
- t.Fatalf("Failed to read cpuinfo: %v", err)
- }
-
- set, err := NewCPUSet(data)
- if err != nil {
- t.Fatalf("Failed to parse CPU data %v\n%s", err, data)
- }
-
- for _, tg := range set {
- if err := checkSorted(tg.threads); err != nil {
- t.Fatalf("Failed to sort cpuSet: %v", err)
- }
- }
-
- if len(set) < 1 {
- t.Fatalf("Failed to parse any CPUs: %d", len(set))
- }
-
- t.Log(set)
-}
-
-// TestVulnerable tests if the isVulnerable method is correct
-// among known CPUs in GCP.
-func TestVulnerable(t *testing.T) {
- const haswell = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 4
-core id : 0
-cpu cores : 2
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
- const skylake = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-model name : Intel(R) Xeon(R) CPU @ 2.00GHz
-stepping : 3
-microcode : 0x1
-cpu MHz : 2000.180
-cache size : 39424 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa
-bogomips : 4000.36
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
- const amd = `processor : 0
-vendor_id : AuthenticAMD
-cpu family : 23
-model : 49
-model name : AMD EPYC 7B12
-stepping : 0
-microcode : 0x1000065
-cpu MHz : 2250.000
-cache size : 512 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip rdpid
-bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
-bogomips : 4500.00
-TLB size : 3072 4K pages
-clflush size : 64
-cache_alignment : 64
-address sizes : 48 bits physical, 48 bits virtual
-power management:`
-
- for _, tc := range []struct {
- name string
- cpuString string
- vulnerable bool
- }{
- {
- name: "haswell",
- cpuString: haswell,
- vulnerable: true,
- }, {
- name: "skylake",
- cpuString: skylake,
- vulnerable: true,
- }, {
- name: "amd",
- cpuString: amd,
- vulnerable: false,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- set, err := getThreads(tc.cpuString)
- if err != nil {
- t.Fatalf("Failed to getCPUSet:%v\n %s", err, tc.cpuString)
- }
-
- if len(set) < 1 {
- t.Fatalf("Returned empty cpu set: %v", set)
- }
-
- for _, c := range set {
- got := func() bool {
- return c.IsVulnerable()
- }()
-
- if got != tc.vulnerable {
- t.Fatalf("Mismatch vulnerable for cpu %s: got %t want: %t", tc.name, tc.vulnerable, got)
- }
- }
- })
- }
-}
-
-func TestReverse(t *testing.T) {
- const noParse = "-1-"
- for _, tc := range []struct {
- name string
- output string
- wantErr error
- wantCount int
- }{
- {
- name: "base",
- output: "0-7",
- wantErr: nil,
- wantCount: 8,
- },
- {
- name: "huge",
- output: "0-111",
- wantErr: nil,
- wantCount: 112,
- },
- {
- name: "not zero",
- output: "50-53",
- wantErr: nil,
- wantCount: 4,
- },
- {
- name: "small",
- output: "0",
- wantErr: nil,
- wantCount: 1,
- },
- {
- name: "invalid order",
- output: "10-6",
- wantErr: fmt.Errorf("invalid cpu bounds from possible: begin: %d end: %d", 10, 6),
- },
- {
- name: "no parse",
- output: noParse,
- wantErr: fmt.Errorf(`mismatch regex from possible: %q`, noParse),
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- threads, err := GetThreadsFromPossible([]byte(tc.output))
-
- switch {
- case tc.wantErr == nil:
- if err != nil {
- t.Fatalf("Wanted nil err, got: %v", err)
- }
- case err == nil:
- t.Fatalf("Want error: %v got: %v", tc.wantErr, err)
- default:
- if tc.wantErr.Error() != err.Error() {
- t.Fatalf("Want error: %v got error: %v", tc.wantErr, err)
- }
- }
-
- if len(threads) != tc.wantCount {
- t.Fatalf("Want count: %d got: %d", tc.wantCount, len(threads))
- }
- })
- }
-}
-
-func TestReverseSmoke(t *testing.T) {
- data, err := ioutil.ReadFile("/sys/devices/system/cpu/possible")
- if err != nil {
- t.Fatalf("Failed to read from possible: %v", err)
- }
- threads, err := GetThreadsFromPossible(data)
- if err != nil {
- t.Fatalf("Could not parse possible output: %v", err)
- }
-
- if len(threads) <= 0 {
- t.Fatalf("Didn't get any CPU cores: %d", len(threads))
- }
-}
-
-func checkSorted(threads []Thread) error {
- if len(threads) < 2 {
- return nil
- }
- last := threads[0].processorNumber
- for _, t := range threads[1:] {
- if last >= t.processorNumber {
- return fmt.Errorf("threads out of order: thread %d before %d", t.processorNumber, last)
- }
- last = t.processorNumber
- }
- return nil
-}
diff --git a/runsc/mitigate/mock/BUILD b/runsc/mitigate/mock/BUILD
deleted file mode 100644
index 5019ff9ee..000000000
--- a/runsc/mitigate/mock/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "mock",
- srcs = ["mock.go"],
- visibility = [
- "//runsc:__subpackages__",
- ],
-)
diff --git a/runsc/mitigate/mock/mock.go b/runsc/mitigate/mock/mock.go
deleted file mode 100644
index 12c59e356..000000000
--- a/runsc/mitigate/mock/mock.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mock contains mock CPUs for mitigate tests.
-package mock
-
-import "fmt"
-
-// CPU represents data from CPUs that will be mitigated.
-type CPU struct {
- Name string
- VendorID string
- Family int
- Model int
- ModelName string
- Bugs string
- PhysicalCores int
- Cores int
- ThreadsPerCore int
-}
-
-// CascadeLake2 is a two core Intel CascadeLake machine.
-var CascadeLake2 = CPU{
- Name: "CascadeLake",
- VendorID: "GenuineIntel",
- Family: 6,
- Model: 85,
- ModelName: "Intel(R) Xeon(R) CPU",
- Bugs: "spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa",
- PhysicalCores: 1,
- Cores: 1,
- ThreadsPerCore: 2,
-}
-
-// CascadeLake4 is a four core Intel CascadeLake machine.
-var CascadeLake4 = CPU{
- Name: "CascadeLake",
- VendorID: "GenuineIntel",
- Family: 6,
- Model: 85,
- ModelName: "Intel(R) Xeon(R) CPU",
- Bugs: "spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa",
- PhysicalCores: 1,
- Cores: 2,
- ThreadsPerCore: 2,
-}
-
-// Haswell2 is a two core Intel Haswell machine.
-var Haswell2 = CPU{
- Name: "Haswell",
- VendorID: "GenuineIntel",
- Family: 6,
- Model: 63,
- ModelName: "Intel(R) Xeon(R) CPU",
- Bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
- PhysicalCores: 1,
- Cores: 1,
- ThreadsPerCore: 2,
-}
-
-// Haswell2core is a 2 core Intel Haswell machine with no hyperthread pairs.
-var Haswell2core = CPU{
- Name: "Haswell2Physical",
- VendorID: "GenuineIntel",
- Family: 6,
- Model: 63,
- ModelName: "Intel(R) Xeon(R) CPU",
- Bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
- PhysicalCores: 2,
- Cores: 1,
- ThreadsPerCore: 1,
-}
-
-// AMD2 is an two core AMD machine.
-var AMD2 = CPU{
- Name: "AMD",
- VendorID: "AuthenticAMD",
- Family: 23,
- Model: 49,
- ModelName: "AMD EPYC 7B12",
- Bugs: "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass",
- PhysicalCores: 1,
- Cores: 1,
- ThreadsPerCore: 2,
-}
-
-// AMD8 is an eight core AMD machine.
-var AMD8 = CPU{
- Name: "AMD",
- VendorID: "AuthenticAMD",
- Family: 23,
- Model: 49,
- ModelName: "AMD EPYC 7B12",
- Bugs: "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass",
- PhysicalCores: 4,
- Cores: 1,
- ThreadsPerCore: 2,
-}
-
-// MakeCPUString makes a string formated like /proc/cpuinfo for each cpuTestCase
-func (tc CPU) MakeCPUString() string {
- template := `processor : %d
-vendor_id : %s
-cpu family : %d
-model : %d
-model name : %s
-physical id : %d
-core id : %d
-cpu cores : %d
-bugs : %s
-
-`
-
- ret := ``
- for i := 0; i < tc.PhysicalCores; i++ {
- for j := 0; j < tc.Cores; j++ {
- for k := 0; k < tc.ThreadsPerCore; k++ {
- processorNum := (i*tc.Cores+j)*tc.ThreadsPerCore + k
- ret += fmt.Sprintf(template,
- processorNum, /*processor*/
- tc.VendorID, /*vendor_id*/
- tc.Family, /*cpu family*/
- tc.Model, /*model*/
- tc.ModelName, /*model name*/
- i, /*physical id*/
- j, /*core id*/
- k, /*cpu cores*/
- tc.Bugs, /*bugs*/
- )
- }
- }
- }
- return ret
-}
-
-// MakeSysPossibleString makes a string representing a the contents of /sys/devices/system/cpu/possible.
-func (tc CPU) MakeSysPossibleString() string {
- max := tc.PhysicalCores * tc.Cores * tc.ThreadsPerCore
- if max == 1 {
- return "0"
- }
- return fmt.Sprintf("0-%d", max-1)
-}
diff --git a/runsc/sandbox/BUILD b/runsc/sandbox/BUILD
deleted file mode 100644
index bc4a3fa32..000000000
--- a/runsc/sandbox/BUILD
+++ /dev/null
@@ -1,39 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "sandbox",
- srcs = [
- "network.go",
- "network_unsafe.go",
- "sandbox.go",
- ],
- visibility = [
- "//runsc:__subpackages__",
- ],
- deps = [
- "//pkg/cleanup",
- "//pkg/control/client",
- "//pkg/control/server",
- "//pkg/coverage",
- "//pkg/log",
- "//pkg/sentry/control",
- "//pkg/sentry/platform",
- "//pkg/sync",
- "//pkg/tcpip/header",
- "//pkg/tcpip/stack",
- "//pkg/urpc",
- "//runsc/boot",
- "//runsc/boot/platforms",
- "//runsc/cgroup",
- "//runsc/config",
- "//runsc/console",
- "//runsc/specutils",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@com_github_vishvananda_netlink//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/sandbox/sandbox_state_autogen.go b/runsc/sandbox/sandbox_state_autogen.go
new file mode 100644
index 000000000..79ebc2220
--- /dev/null
+++ b/runsc/sandbox/sandbox_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sandbox
diff --git a/runsc/sandbox/sandbox_unsafe_state_autogen.go b/runsc/sandbox/sandbox_unsafe_state_autogen.go
new file mode 100644
index 000000000..79ebc2220
--- /dev/null
+++ b/runsc/sandbox/sandbox_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package sandbox
diff --git a/runsc/specutils/BUILD b/runsc/specutils/BUILD
deleted file mode 100644
index 679d8bc8e..000000000
--- a/runsc/specutils/BUILD
+++ /dev/null
@@ -1,34 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "specutils",
- srcs = [
- "cri.go",
- "fs.go",
- "namespace.go",
- "specutils.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/log",
- "//pkg/sentry/kernel/auth",
- "//runsc/config",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_mohae_deepcopy//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "specutils_test",
- size = "small",
- srcs = ["specutils_test.go"],
- library = ":specutils",
- deps = ["@com_github_opencontainers_runtime_spec//specs-go:go_default_library"],
-)
diff --git a/runsc/specutils/safemount_test/BUILD b/runsc/specutils/safemount_test/BUILD
deleted file mode 100644
index c39c40492..000000000
--- a/runsc/specutils/safemount_test/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "safemount_test",
- size = "small",
- srcs = ["safemount_test.go"],
- data = [":safemount_runner"],
- deps = [
- "//pkg/test/testutil",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_binary(
- name = "safemount_runner",
- srcs = ["safemount_runner.go"],
- deps = [
- "//runsc/specutils",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/specutils/safemount_test/safemount_runner.go b/runsc/specutils/safemount_test/safemount_runner.go
deleted file mode 100644
index b23193033..000000000
--- a/runsc/specutils/safemount_test/safemount_runner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// safemount_runner is used to test the SafeMount function. Because use of
-// unix.Mount requires privilege, tests must launch this process with
-// CLONE_NEWNS and CLONE_NEWUSER.
-package main
-
-import (
- "errors"
- "fmt"
- "log"
- "os"
- "path/filepath"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-func main() {
- // The test temporary directory is the first argument.
- tempdir := os.Args[1]
-
- tcs := []struct {
- name string
- testfunc func() error
- }{{
- name: "unix.Mount to folder succeeds",
- testfunc: func() error {
- dir2Path := filepath.Join(tempdir, "subdir2")
- if err := unix.Mount(filepath.Join(tempdir, "subdir"), dir2Path, "bind", unix.MS_BIND, ""); err != nil {
- return fmt.Errorf("mount: %v", err)
- }
- return unix.Unmount(dir2Path, unix.MNT_DETACH)
- },
- }, {
- // unix.Mount doesn't care whether the target is a symlink.
- name: "unix.Mount to symlink succeeds",
- testfunc: func() error {
- symlinkPath := filepath.Join(tempdir, "symlink")
- if err := unix.Mount(filepath.Join(tempdir, "subdir"), symlinkPath, "bind", unix.MS_BIND, ""); err != nil {
- return fmt.Errorf("mount: %v", err)
- }
- return unix.Unmount(symlinkPath, unix.MNT_DETACH)
- },
- }, {
- name: "SafeMount to folder succeeds",
- testfunc: func() error {
- dir2Path := filepath.Join(tempdir, "subdir2")
- if err := specutils.SafeMount(filepath.Join(tempdir, "subdir"), dir2Path, "bind", unix.MS_BIND, "", "/proc"); err != nil {
- return fmt.Errorf("SafeMount: %v", err)
- }
- return unix.Unmount(dir2Path, unix.MNT_DETACH)
- },
- }, {
- name: "SafeMount to symlink fails",
- testfunc: func() error {
- err := specutils.SafeMount(filepath.Join(tempdir, "subdir"), filepath.Join(tempdir, "symlink"), "bind", unix.MS_BIND, "", "/proc")
- if err == nil {
- return fmt.Errorf("SafeMount didn't fail, but should have")
- }
- var symErr *specutils.ErrSymlinkMount
- if !errors.As(err, &symErr) {
- return fmt.Errorf("expected SafeMount to fail with ErrSymlinkMount, but got: %v", err)
- }
- return nil
- },
- }}
-
- for _, tc := range tcs {
- if err := runTest(tempdir, tc.testfunc); err != nil {
- log.Fatalf("failed test %q: %v", tc.name, err)
- }
- }
-}
-
-// runTest runs testfunc with the following directory structure:
-// tempdir/
-// subdir/
-// subdir2/
-// symlink --> ./subdir2
-func runTest(tempdir string, testfunc func() error) error {
- // Create tempdir/subdir/.
- dirPath := filepath.Join(tempdir, "subdir")
- if err := os.Mkdir(dirPath, 0777); err != nil {
- return fmt.Errorf("os.Mkdir(%s, 0777)", dirPath)
- }
- defer os.Remove(dirPath)
-
- // Create tempdir/subdir2/.
- dir2Path := filepath.Join(tempdir, "subdir2")
- if err := os.Mkdir(dir2Path, 0777); err != nil {
- return fmt.Errorf("os.Mkdir(%s, 0777)", dir2Path)
- }
- defer os.Remove(dir2Path)
-
- // Create tempdir/symlink, which points to ./subdir2.
- symlinkPath := filepath.Join(tempdir, "symlink")
- if err := os.Symlink("./subdir2", symlinkPath); err != nil {
- return fmt.Errorf("failed to create symlink %s: %v", symlinkPath, err)
- }
- defer os.Remove(symlinkPath)
-
- // Run the actual test.
- return testfunc()
-}
diff --git a/runsc/specutils/safemount_test/safemount_test.go b/runsc/specutils/safemount_test/safemount_test.go
deleted file mode 100644
index 8820978c4..000000000
--- a/runsc/specutils/safemount_test/safemount_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package safemount_test
-
-import (
- "os"
- "os/exec"
- "syscall"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-func TestSafeMount(t *testing.T) {
- // We run the actual tests in another process, as we need CAP_SYS_ADMIN to
- // call mount(2). The new process runs in its own user and mount namespaces.
- runner, err := testutil.FindFile("runsc/specutils/safemount_test/safemount_runner")
- if err != nil {
- t.Fatalf("failed to find test runner binary: %v", err)
- }
- cmd := exec.Command(runner, t.TempDir())
- cmd.SysProcAttr = &unix.SysProcAttr{
- Cloneflags: unix.CLONE_NEWNS | unix.CLONE_NEWUSER,
- UidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getuid(), Size: 1},
- },
- GidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getgid(), Size: 1},
- },
- GidMappingsEnableSetgroups: false,
- Credential: &syscall.Credential{
- Uid: 0,
- Gid: 0,
- },
- }
- output, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("failed running %s with error: %v\ntest output:\n%s", cmd, err, output)
- }
-}
diff --git a/runsc/specutils/seccomp/BUILD b/runsc/specutils/seccomp/BUILD
deleted file mode 100644
index c5f5b863e..000000000
--- a/runsc/specutils/seccomp/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "seccomp",
- srcs = [
- "audit_amd64.go",
- "audit_arm64.go",
- "seccomp.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/log",
- "//pkg/seccomp",
- "//pkg/sentry/kernel",
- "//pkg/sentry/syscalls/linux",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "seccomp_test",
- size = "small",
- srcs = ["seccomp_test.go"],
- library = ":seccomp",
- deps = [
- "//pkg/abi/linux",
- "//pkg/bpf",
- "//pkg/hostarch",
- "//pkg/marshal",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/specutils/seccomp/seccomp_amd64_state_autogen.go b/runsc/specutils/seccomp/seccomp_amd64_state_autogen.go
new file mode 100644
index 000000000..27a96018b
--- /dev/null
+++ b/runsc/specutils/seccomp/seccomp_amd64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build amd64
+
+package seccomp
diff --git a/runsc/specutils/seccomp/seccomp_arm64_state_autogen.go b/runsc/specutils/seccomp/seccomp_arm64_state_autogen.go
new file mode 100644
index 000000000..96c64c23d
--- /dev/null
+++ b/runsc/specutils/seccomp/seccomp_arm64_state_autogen.go
@@ -0,0 +1,5 @@
+// automatically generated by stateify.
+
+// +build arm64
+
+package seccomp
diff --git a/runsc/specutils/seccomp/seccomp_state_autogen.go b/runsc/specutils/seccomp/seccomp_state_autogen.go
new file mode 100644
index 000000000..e16b5d7c2
--- /dev/null
+++ b/runsc/specutils/seccomp/seccomp_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package seccomp
diff --git a/runsc/specutils/seccomp/seccomp_test.go b/runsc/specutils/seccomp/seccomp_test.go
deleted file mode 100644
index 20796bf14..000000000
--- a/runsc/specutils/seccomp/seccomp_test.go
+++ /dev/null
@@ -1,409 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package seccomp
-
-import (
- "fmt"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/bpf"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// asInput converts a linux.SeccompData to a bpf.Input.
-func asInput(d *linux.SeccompData) bpf.Input {
- return bpf.InputBytes{marshal.Marshal(d), hostarch.ByteOrder}
-}
-
-// testInput creates an Input struct with given seccomp input values.
-func testInput(arch uint32, syscallName string, args *[6]uint64) bpf.Input {
- syscallNo, err := lookupSyscallNo(arch, syscallName)
- if err != nil {
- // Assume tests set valid syscall names.
- panic(err)
- }
-
- if args == nil {
- argArray := [6]uint64{0, 0, 0, 0, 0, 0}
- args = &argArray
- }
-
- data := linux.SeccompData{
- Nr: int32(syscallNo),
- Arch: arch,
- Args: *args,
- }
-
- return asInput(&data)
-}
-
-// testCase holds a seccomp test case.
-type testCase struct {
- name string
- config specs.LinuxSeccomp
- input bpf.Input
- expected uint32
-}
-
-var (
- // seccompTests is a list of speccomp test cases.
- seccompTests = []testCase{
- {
- name: "default_allow",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- },
- input: testInput(nativeArchAuditNo, "read", nil),
- expected: uint32(allowAction),
- },
- {
- name: "default_deny",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActErrno,
- },
- input: testInput(nativeArchAuditNo, "read", nil),
- expected: uint32(errnoAction),
- },
- {
- name: "deny_arch",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getcwd",
- },
- Action: specs.ActErrno,
- },
- },
- },
- // Syscall matches but the arch is AUDIT_ARCH_X86 so the return
- // value is the bad arch action.
- input: asInput(&linux.SeccompData{Nr: 183, Arch: 0x40000003}), //
- expected: uint32(killThreadAction),
- },
- {
- name: "match_name_errno",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getcwd",
- "chmod",
- },
- Action: specs.ActErrno,
- },
- {
- Names: []string{
- "write",
- },
- Action: specs.ActTrace,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "getcwd", nil),
- expected: uint32(errnoAction),
- },
- {
- name: "match_name_trace",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getcwd",
- "chmod",
- },
- Action: specs.ActErrno,
- },
- {
- Names: []string{
- "write",
- },
- Action: specs.ActTrace,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "write", nil),
- expected: uint32(traceAction),
- },
- {
- name: "no_match_name_allow",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getcwd",
- "chmod",
- },
- Action: specs.ActErrno,
- },
- {
- Names: []string{
- "write",
- },
- Action: specs.ActTrace,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "openat", nil),
- expected: uint32(allowAction),
- },
- {
- name: "simple_match_args",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: unix.CLONE_FS,
- Op: specs.OpEqualTo,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
- expected: uint32(errnoAction),
- },
- {
- name: "match_args_or",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: unix.CLONE_FS,
- Op: specs.OpEqualTo,
- },
- {
- Index: 0,
- Value: unix.CLONE_VM,
- Op: specs.OpEqualTo,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
- expected: uint32(errnoAction),
- },
- {
- name: "match_args_and",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getsockopt",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 1,
- Value: unix.SOL_SOCKET,
- Op: specs.OpEqualTo,
- },
- {
- Index: 2,
- Value: unix.SO_PEERCRED,
- Op: specs.OpEqualTo,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, unix.SOL_SOCKET, unix.SO_PEERCRED}),
- expected: uint32(errnoAction),
- },
- {
- name: "no_match_args_and",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "getsockopt",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 1,
- Value: unix.SOL_SOCKET,
- Op: specs.OpEqualTo,
- },
- {
- Index: 2,
- Value: unix.SO_PEERCRED,
- Op: specs.OpEqualTo,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, unix.SOL_SOCKET}),
- expected: uint32(allowAction),
- },
- {
- name: "Simple args (no match)",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: unix.CLONE_FS,
- Op: specs.OpEqualTo,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_VM}),
- expected: uint32(allowAction),
- },
- {
- name: "OpMaskedEqual (match)",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: unix.CLONE_FS,
- ValueTwo: unix.CLONE_FS,
- Op: specs.OpMaskedEqual,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS | unix.CLONE_VM}),
- expected: uint32(errnoAction),
- },
- {
- name: "OpMaskedEqual (no match)",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActAllow,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: unix.CLONE_FS | unix.CLONE_VM,
- ValueTwo: unix.CLONE_FS | unix.CLONE_VM,
- Op: specs.OpMaskedEqual,
- },
- },
- Action: specs.ActErrno,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
- expected: uint32(allowAction),
- },
- {
- name: "OpMaskedEqual (clone)",
- config: specs.LinuxSeccomp{
- DefaultAction: specs.ActErrno,
- Syscalls: []specs.LinuxSyscall{
- {
- Names: []string{
- "clone",
- },
- // This comes from the Docker default seccomp
- // profile for clone.
- Args: []specs.LinuxSeccompArg{
- {
- Index: 0,
- Value: 0x7e020000,
- ValueTwo: 0x0,
- Op: specs.OpMaskedEqual,
- },
- },
- Action: specs.ActAllow,
- },
- },
- },
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{0x50f00}),
- expected: uint32(allowAction),
- },
- }
-)
-
-// TestRunscSeccomp generates seccomp programs from OCI config and executes
-// them using runsc's library, comparing against expected results.
-func TestRunscSeccomp(t *testing.T) {
- for _, tc := range seccompTests {
- t.Run(tc.name, func(t *testing.T) {
- runscProgram, err := BuildProgram(&tc.config)
- if err != nil {
- t.Fatalf("generating runsc BPF: %v", err)
- }
-
- if err := checkProgram(runscProgram, tc.input, tc.expected); err != nil {
- t.Fatalf("running runsc BPF: %v", err)
- }
- })
- }
-}
-
-// checkProgram runs the given program over the given input and checks the
-// result against the expected output.
-func checkProgram(p bpf.Program, in bpf.Input, expected uint32) error {
- result, err := bpf.Exec(p, in)
- if err != nil {
- return err
- }
-
- if result != expected {
- // Include a decoded version of the program in output for debugging purposes.
- decoded, _ := bpf.DecodeProgram(p)
- return fmt.Errorf("Unexpected result: got: %d, expected: %d\nBPF Program\n%s", result, expected, decoded)
- }
-
- return nil
-}
diff --git a/runsc/specutils/specutils_state_autogen.go b/runsc/specutils/specutils_state_autogen.go
new file mode 100644
index 000000000..11eefbaa2
--- /dev/null
+++ b/runsc/specutils/specutils_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package specutils
diff --git a/runsc/specutils/specutils_test.go b/runsc/specutils/specutils_test.go
deleted file mode 100644
index e2d3a75dc..000000000
--- a/runsc/specutils/specutils_test.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package specutils
-
-import (
- "fmt"
- "os/exec"
- "strings"
- "testing"
- "time"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func TestWaitForReadyHappy(t *testing.T) {
- cmd := exec.Command("/bin/sleep", "1000")
- if err := cmd.Start(); err != nil {
- t.Fatalf("cmd.Start() failed, err: %v", err)
- }
- defer func() { _ = cmd.Wait() }()
-
- var count int
- err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
- if count < 3 {
- count++
- return false, nil
- }
- return true, nil
- })
- if err != nil {
- t.Errorf("ProcessWaitReady got: %v, expected: nil", err)
- }
- if err := cmd.Process.Kill(); err != nil {
- t.Errorf("cmd.ProcessKill(): %v", err)
- }
-}
-
-func TestWaitForReadyFail(t *testing.T) {
- cmd := exec.Command("/bin/sleep", "1000")
- if err := cmd.Start(); err != nil {
- t.Fatalf("cmd.Start() failed, err: %v", err)
- }
- defer func() { _ = cmd.Wait() }()
-
- var count int
- err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
- if count < 3 {
- count++
- return false, nil
- }
- return false, fmt.Errorf("fake error")
- })
- if err == nil {
- t.Errorf("ProcessWaitReady got: nil, expected: error")
- }
- if err := cmd.Process.Kill(); err != nil {
- t.Errorf("cmd.ProcessKill(): %v", err)
- }
-}
-
-func TestWaitForReadyNotRunning(t *testing.T) {
- cmd := exec.Command("/bin/true")
- if err := cmd.Start(); err != nil {
- t.Fatalf("cmd.Start() failed, err: %v", err)
- }
- defer func() { _ = cmd.Wait() }()
-
- err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
- return false, nil
- })
- if err != nil && !strings.Contains(err.Error(), "terminated") {
- t.Errorf("ProcessWaitReady got: %v, expected: process terminated", err)
- }
- if err == nil {
- t.Errorf("ProcessWaitReady incorrectly succeeded")
- }
-}
-
-func TestWaitForReadyTimeout(t *testing.T) {
- cmd := exec.Command("/bin/sleep", "1000")
- if err := cmd.Start(); err != nil {
- t.Fatalf("cmd.Start() failed, err: %v", err)
- }
- defer func() { _ = cmd.Wait() }()
-
- err := WaitForReady(cmd.Process.Pid, 50*time.Millisecond, func() (bool, error) {
- return false, nil
- })
- if err == nil || !strings.Contains(err.Error(), "not running yet") {
- t.Errorf("ProcessWaitReady got: %v, expected: not running yet", err)
- }
- if err := cmd.Process.Kill(); err != nil {
- t.Errorf("cmd.ProcessKill(): %v", err)
- }
-}
-
-func TestSpecInvalid(t *testing.T) {
- for _, test := range []struct {
- name string
- spec specs.Spec
- error string
- }{
- {
- name: "valid",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Mounts: []specs.Mount{
- {
- Source: "src",
- Destination: "/dst",
- },
- },
- },
- error: "",
- },
- {
- name: "valid+warning",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- // This is normally set by docker and will just cause warnings to be logged.
- ApparmorProfile: "someprofile",
- },
- // This is normally set by docker and will just cause warnings to be logged.
- Linux: &specs.Linux{Seccomp: &specs.LinuxSeccomp{}},
- },
- error: "",
- },
- {
- name: "no root",
- spec: specs.Spec{
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- },
- error: "must be defined",
- },
- {
- name: "empty root",
- spec: specs.Spec{
- Root: &specs.Root{},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- },
- error: "must be defined",
- },
- {
- name: "no process",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- },
- error: "must be defined",
- },
- {
- name: "empty args",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{},
- },
- error: "must be defined",
- },
- {
- name: "selinux",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- SelinuxLabel: "somelabel",
- },
- },
- error: "is not supported",
- },
- {
- name: "solaris",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Solaris: &specs.Solaris{},
- },
- error: "is not supported",
- },
- {
- name: "windows",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Windows: &specs.Windows{},
- },
- error: "is not supported",
- },
- {
- name: "relative mount destination",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Mounts: []specs.Mount{
- {
- Source: "src",
- Destination: "dst",
- },
- },
- },
- error: "must be an absolute path",
- },
- {
- name: "invalid mount option",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Mounts: []specs.Mount{
- {
- Source: "/src",
- Destination: "/dst",
- Type: "bind",
- Options: []string{"shared"},
- },
- },
- },
- error: "is not supported",
- },
- {
- name: "invalid rootfs propagation",
- spec: specs.Spec{
- Root: &specs.Root{Path: "/"},
- Process: &specs.Process{
- Args: []string{"/bin/true"},
- },
- Linux: &specs.Linux{
- RootfsPropagation: "foo",
- },
- },
- error: "root mount propagation option must specify private or slave",
- },
- } {
- err := ValidateSpec(&test.spec)
- if len(test.error) == 0 {
- if err != nil {
- t.Errorf("ValidateSpec(%q) failed, err: %v", test.name, err)
- }
- } else {
- if err == nil || !strings.Contains(err.Error(), test.error) {
- t.Errorf("ValidateSpec(%q) wrong error, got: %v, want: .*%s.*", test.name, err, test.error)
- }
- }
- }
-}
diff --git a/runsc/version_test.sh b/runsc/version_test.sh
deleted file mode 100755
index 747350654..000000000
--- a/runsc/version_test.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euf -x -o pipefail
-
-readonly runsc="$1"
-readonly version=$($runsc --version)
-
-# Version should should not match VERSION, which is the default and which will
-# also appear if something is wrong with workspace_status.sh script.
-if [[ $version =~ "VERSION" ]]; then
- echo "FAIL: Got bad version $version"
- exit 1
-fi
-
-# Version should contain at least one number.
-if [[ ! $version =~ [0-9] ]]; then
- echo "FAIL: Got bad version $version"
- exit 1
-fi
-
-echo "PASS: Got OK version $version"
-exit 0
diff --git a/shim/BUILD b/shim/BUILD
deleted file mode 100644
index 695f61eb9..000000000
--- a/shim/BUILD
+++ /dev/null
@@ -1,26 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "pkg_tar")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "containerd-shim-runsc-v1",
- srcs = ["main.go"],
- static = True,
- tags = ["staging"],
- visibility = [
- "//visibility:public",
- ],
- deps = ["//shim/cli"],
-)
-
-pkg_tar(
- name = "config",
- srcs = [
- "runsc.toml",
- ],
- mode = "0644",
- package_dir = "/etc/containerd",
- visibility = [
- "//visibility:public",
- ],
-)
diff --git a/shim/README.md b/shim/README.md
deleted file mode 100644
index 8ae33a272..000000000
--- a/shim/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Shim Overview
-
-Integration with containerd is done via a [shim][shims]. The shim implements
-[containerd][containerd] shim v2 and is supported with containerd 1.3 and newer.
-[Here is how to get started](https://gvisor.dev/docs/user_guide/containerd/quick_start/)
-
-[containerd]: https://github.com/containerd/containerd
-[shims]: https://iximiuz.com/en/posts/implementing-container-runtime-shim/
diff --git a/shim/cli/BUILD b/shim/cli/BUILD
deleted file mode 100644
index 665dcc5a1..000000000
--- a/shim/cli/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cli",
- srcs = ["cli.go"],
- visibility = [
- "//:__pkg__",
- "//shim:__pkg__",
- ],
- deps = [
- "//pkg/shim",
- "@com_github_containerd_containerd//runtime/v2/shim:go_default_library",
- ],
-)
diff --git a/shim/cli/cli_state_autogen.go b/shim/cli/cli_state_autogen.go
new file mode 100644
index 000000000..e81991e0b
--- /dev/null
+++ b/shim/cli/cli_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cli
diff --git a/shim/runsc.toml b/shim/runsc.toml
deleted file mode 100644
index e1c7de1bb..000000000
--- a/shim/runsc.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is an example configuration file for runsc.
-#
-# By default, it will be parsed from /etc/containerd/runsc.toml, but see the
-# static path configured in v1/main.go. Note that the configuration mechanism
-# for newer container shim versions is different: see the documentation in v2.
-[runsc_config]
diff --git a/test/BUILD b/test/BUILD
deleted file mode 100644
index 34b950644..000000000
--- a/test/BUILD
+++ /dev/null
@@ -1 +0,0 @@
-package(licenses = ["notice"])
diff --git a/test/README.md b/test/README.md
deleted file mode 100644
index 15b0f4c33..000000000
--- a/test/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Tests
-
-The tests defined under this path are verifying functionality beyond what unit
-tests can cover, e.g. integration and end to end tests. Due to their nature,
-they may need extra setup in the test machine and extra configuration to run.
-
-- **syscalls**: system call tests use a local runner, and do not require
- additional configuration in the machine.
-- **integration:** defines integration tests that uses `docker run` to test
- functionality.
-- **image:** basic end to end test for popular images. These require the same
- setup as integration tests.
-- **root:** tests that require to be run as root. These require the same setup
- as integration tests.
-- **util:** utilities library to support the tests.
-
-For the above noted cases, the relevant runtime must be installed via `runsc
-install` before running. Just note that they require specific configuration to
-work. This is handled automatically by the test scripts in the `scripts`
-directory and they can be used to run tests locally on your machine. They are
-also used to run these tests in `kokoro`.
-
-**Example:**
-
-To run image and integration tests, run:
-
-`make docker-tests`
-
-To run root tests, run:
-
-`make root-tests`
-
-There are a few other interesting variations for image and integration tests:
-
-* overlay: sets writable overlay inside the sentry
-* hostnet: configures host network pass-thru, instead of netstack
-* kvm: runsc the test using the KVM platform, instead of ptrace
-
-The test will build runsc, configure it with your local docker, restart
-`dockerd`, and run tests. The location for runsc logs is printed to the output.
diff --git a/test/benchmarks/BUILD b/test/benchmarks/BUILD
deleted file mode 100644
index faf310676..000000000
--- a/test/benchmarks/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-package(licenses = ["notice"])
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
deleted file mode 100644
index c745a5b1e..000000000
--- a/test/benchmarks/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-# Benchmark tools
-
-This package and subpackages are for running macro benchmarks on `runsc`. They
-are meant to replace the previous //benchmarks benchmark-tools written in
-python.
-
-Benchmarks are meant to look like regular golang benchmarks using the testing.B
-library.
-
-## Setup
-
-To run benchmarks you will need:
-
-* Docker installed (17.09.0 or greater).
-
-## Running benchmarks
-
-To run, use the Makefile:
-
-- Install runsc as a runtime: `make dev`
- - The above command will place several configurations of runsc in your
- /etc/docker/daemon.json file. Choose one without the debug option set.
-- Run your benchmark: `make run-benchmark
- RUNTIME=[RUNTIME_FROM_DAEMON.JSON/runc]
- BENCHMARKS_TARGETS=//path/to/target"`
-- Additionally, you can benchmark several platforms in one command:
-
-```
-make benchmark-platforms BENCHMARKS_PLATFORMS=ptrace,kvm \
-BENCHMARKS_TARGET=//path/to/target"
-```
-
-The above command will install runtimes/run benchmarks on ptrace and kvm as well
-as run the benchmark on native runc.
-
-Benchmarks are run with root as some benchmarks require root privileges to do
-things like drop caches.
-
-## Writing benchmarks
-
-Benchmarks consist of docker images as Dockerfiles and golang testing.B
-benchmarks.
-
-### Dockerfiles:
-
-* Are stored at //images.
-* New Dockerfiles go in an appropriately named directory at
- `//images/benchmarks/my-cool-dockerfile`.
-* Dockerfiles for benchmarks should:
- * Use explicitly versioned packages.
- * Don't use ENV and CMD statements. It is easy to add these in the API via
- `dockerutil.RunOpts`.
-* Note: A common pattern for getting access to a tmpfs mount is to copy files
- there after container start. See: //test/benchmarks/build/bazel_test.go. You
- can also make your own with `RunOpts.Mounts`.
-
-### testing.B packages
-
-In general, benchmarks should look like this:
-
-```golang
-func BenchmarkMyCoolOne(b *testing.B) {
- machine, err := harness.GetMachine()
- // check err
- defer machine.CleanUp()
-
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
-
- b.ResetTimer()
-
- // Respect b.N.
- for i := 0; i < b.N; i++ {
- out, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/my-cool-image",
- Env: []string{"MY_VAR=awesome"},
- // other options...see dockerutil
- }, "sh", "-c", "echo MY_VAR")
- // check err...
- b.StopTimer()
-
- // Do parsing and reporting outside of the timer.
- number := parseMyMetric(out)
- b.ReportMetric(number, "my-cool-custom-metric")
-
- b.StartTimer()
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
-```
-
-Some notes on the above:
-
-* Respect and linearly scale by `b.N` so that users can run a number of times
- (--benchtime=10x) or for a time duration (--benchtime=1m). For many
- benchmarks, this is just the runtime of the container under test. Sometimes
- this is a parameter to the container itself. For Example, the httpd
- benchmark (and most client server benchmarks) uses b.N as a parameter to the
- Client container that specifies how many requests to make to the server.
-* Use the `b.ReportMetric()` method to report custom metrics.
-* Never turn off the timer (b.N), but set and reset it if useful for the
- benchmark. There isn't a way to turn off default metrics in testing.B (B/op,
- allocs/op, ns/op).
-* Take a look at dockerutil at //pkg/test/dockerutil to see all methods
- available from containers. The API is based on the "official"
- [docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker).
-* `harness.GetMachine()` marks how many machines this tests needs. If you have
- a client and server and to mark them as multiple machines, call
- `harness.GetMachine()` twice.
-
-## Profiling
-
-For profiling, the runtime is required to have the `--profile` flag enabled.
-This flag loosens seccomp filters so that the runtime can write profile data to
-disk. This configuration is not recommended for production.
-
-To profile, simply run the `benchmark-platforms` command from above and profiles
-will be in /tmp/profile.
-
-Or run with: `make run-benchmark RUNTIME=[RUNTIME_UNDER_TEST]
-BENCHMARKS_TARGETS=//path/to/target`
-
-Profiles will be in /tmp/profile. Note: runtimes must have the `--profile` flag
-set in /etc/docker/daemon.conf and profiling will not work on runc.
diff --git a/test/benchmarks/base/BUILD b/test/benchmarks/base/BUILD
deleted file mode 100644
index a5a3cf2c1..000000000
--- a/test/benchmarks/base/BUILD
+++ /dev/null
@@ -1,50 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "base",
- testonly = 1,
- srcs = [
- "base.go",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- ],
-)
-
-benchmark_test(
- name = "startup_test",
- srcs = ["startup_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/base",
- "//test/benchmarks/harness",
- ],
-)
-
-benchmark_test(
- name = "size_test",
- srcs = ["size_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/base",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "sysbench_test",
- srcs = ["sysbench_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/base/base.go b/test/benchmarks/base/base.go
deleted file mode 100644
index 979564af9..000000000
--- a/test/benchmarks/base/base.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package base holds utility methods common to the base tests.
-package base
-
-import (
- "context"
- "net"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// ServerArgs wraps args for startServers and runServerWorkload.
-type ServerArgs struct {
- Machine harness.Machine
- Port int
- RunOpts dockerutil.RunOpts
- Cmd []string
-}
-
-// StartServers starts b.N containers defined by 'runOpts' and 'cmd' and uses
-// 'machine' to check that each is up.
-func StartServers(ctx context.Context, b *testing.B, args ServerArgs) []*dockerutil.Container {
- b.Helper()
- servers := make([]*dockerutil.Container, 0, b.N)
-
- // Create N servers and wait until each of them is serving.
- for i := 0; i < b.N; i++ {
- server := args.Machine.GetContainer(ctx, b)
- servers = append(servers, server)
- if err := server.Spawn(ctx, args.RunOpts, args.Cmd...); err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to spawn node instance: %v", err)
- }
-
- // Get the container IP.
- servingIP, err := server.FindIP(ctx, false)
- if err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- // Wait until the server is up.
- if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to wait for serving")
- }
- }
- return servers
-}
-
-// CleanUpContainers cleans up a slice of containers.
-func CleanUpContainers(ctx context.Context, containers []*dockerutil.Container) {
- for _, c := range containers {
- if c != nil {
- c.CleanUp(ctx)
- }
- }
-}
-
-// RedisInstance returns a Redis container and its reachable IP.
-func RedisInstance(ctx context.Context, b *testing.B, machine harness.Machine) (*dockerutil.Container, net.IP) {
- b.Helper()
- // Spawn a redis instance for the app to use.
- redis := machine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
- return redis, redisIP
-}
diff --git a/test/benchmarks/base/size_test.go b/test/benchmarks/base/size_test.go
deleted file mode 100644
index 452926e5f..000000000
--- a/test/benchmarks/base/size_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package size_test
-
-import (
- "context"
- "os"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/base"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkSizeEmpty creates N empty containers and reads memory usage from
-// /proc/meminfo.
-func BenchmarkSizeEmpty(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
- meminfo := tools.Meminfo{}
- ctx := context.Background()
- containers := make([]*dockerutil.Container, 0, b.N)
-
- // DropCaches before the test.
- harness.DropCaches(machine)
-
- // Check available memory on 'machine'.
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to get meminfo: %v", err)
- }
-
- // Make N containers.
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- containers = append(containers, container)
- if err := container.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/alpine",
- }, "sh", "-c", "echo Hello && sleep 1000"); err != nil {
- base.CleanUpContainers(ctx, containers)
- b.Fatalf("failed to run container: %v", err)
- }
- if _, err := container.WaitForOutputSubmatch(ctx, "Hello", 5*time.Second); err != nil {
- base.CleanUpContainers(ctx, containers)
- b.Fatalf("failed to read container output: %v", err)
- }
- }
-
- // Drop caches again before second measurement.
- harness.DropCaches(machine)
-
- // Check available memory after containers are up.
- after, err := machine.RunCommand(cmd, args...)
- base.CleanUpContainers(ctx, containers)
- if err != nil {
- b.Fatalf("failed to get meminfo: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// BenchmarkSizeNginx starts N containers running Nginx, checks that they're
-// serving, and checks memory used based on /proc/meminfo.
-func BenchmarkSizeNginx(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- // DropCaches for the first measurement.
- harness.DropCaches(machine)
-
- // Measure MemAvailable before creating containers.
- meminfo := tools.Meminfo{}
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
-
- // Make N Nginx containers.
- ctx := context.Background()
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- }
- const port = 80
- servers := base.StartServers(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: port,
- RunOpts: runOpts,
- Cmd: []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"},
- })
- defer base.CleanUpContainers(ctx, servers)
-
- // DropCaches after servers are created.
- harness.DropCaches(machine)
- // Take after measurement.
- after, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// BenchmarkSizeNode starts N containers running a Node app, checks that
-// they're serving, and checks memory used based on /proc/meminfo.
-func BenchmarkSizeNode(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- // Make a redis instance for Node to connect.
- ctx := context.Background()
- redis, redisIP := base.RedisInstance(ctx, b, machine)
- defer redis.CleanUp(ctx)
-
- // DropCaches after redis is created.
- harness.DropCaches(machine)
-
- // Take before measurement.
- meminfo := tools.Meminfo{}
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo commend: %v", err)
- }
-
- // Create N Node servers.
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- }
- nodeCmd := []string{"node", "index.js", redisIP.String()}
- const port = 8080
- servers := base.StartServers(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: port,
- RunOpts: runOpts,
- Cmd: nodeCmd,
- })
- defer base.CleanUpContainers(ctx, servers)
-
- // DropCaches after servers are created.
- harness.DropCaches(machine)
- // Take after measurement.
- cmd, args = meminfo.MakeCmd()
- after, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// TestMain is the main method for package network.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go
deleted file mode 100644
index 05a43ad17..000000000
--- a/test/benchmarks/base/startup_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package startup_test
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/base"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// BenchmarkStartEmpty times startup time for an empty container.
-func BenchmarkStartupEmpty(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- for i := 0; i < b.N; i++ {
- harness.DebugLog(b, "Running container: %d", i)
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if _, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/alpine",
- }, "true"); err != nil {
- b.Fatalf("failed to run container: %v", err)
- }
- harness.DebugLog(b, "Ran container: %d", i)
- }
-}
-
-// BenchmarkStartupNginx times startup for a Nginx instance.
-// Time is measured from start until the first request is served.
-func BenchmarkStartupNginx(b *testing.B) {
- // The machine to hold Nginx and the Node Server.
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- }
- runServerWorkload(ctx, b,
- base.ServerArgs{
- Machine: machine,
- RunOpts: runOpts,
- Port: 80,
- Cmd: []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"},
- })
-}
-
-// BenchmarkStartupNode times startup for a Node application instance.
-// Time is measured from start until the first request is served.
-// Note that the Node app connects to a Redis instance before serving.
-func BenchmarkStartupNode(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- redis, redisIP := base.RedisInstance(ctx, b, machine)
- defer redis.CleanUp(ctx)
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- }
-
- cmd := []string{"node", "index.js", redisIP.String()}
- runServerWorkload(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: 8080,
- RunOpts: runOpts,
- Cmd: cmd,
- })
-}
-
-// runServerWorkload runs a server workload defined by 'runOpts' and 'cmd'.
-// 'clientMachine' is used to connect to the server on 'serverMachine'.
-func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- harness.DebugLog(b, "Running iteration: %d", i)
- if err := func() error {
- server := args.Machine.GetContainer(ctx, b)
- defer func() {
- b.StopTimer()
- // Cleanup servers as we run so that we can go indefinitely.
- server.CleanUp(ctx)
- b.StartTimer()
- }()
- harness.DebugLog(b, "Spawning container: %s", args.RunOpts.Image)
- if err := server.Spawn(ctx, args.RunOpts, args.Cmd...); err != nil {
- return fmt.Errorf("failed to spawn node instance: %v", err)
- }
-
- harness.DebugLog(b, "Finding Container IP")
- servingIP, err := server.FindIP(ctx, false)
- if err != nil {
- return fmt.Errorf("failed to get ip from server: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- harness.DebugLog(b, "Waiting for container to start.")
- if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
- return fmt.Errorf("failed to wait for serving: %v", err)
- }
- return nil
- }(); err != nil {
- b.Fatal(err)
- }
- harness.DebugLog(b, "Ran iteration: %d", i)
- }
-}
-
-// TestMain is the main method for package network.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/base/sysbench_test.go b/test/benchmarks/base/sysbench_test.go
deleted file mode 100644
index d0f3f9261..000000000
--- a/test/benchmarks/base/sysbench_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sysbench_test
-
-import (
- "context"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-type testCase struct {
- name string
- test tools.Sysbench
-}
-
-// BenchmarSysbench runs sysbench on the runtime.
-func BenchmarkSysbench(b *testing.B) {
- testCases := []testCase{
- {
- name: "CPU",
- test: &tools.SysbenchCPU{
- SysbenchBase: tools.SysbenchBase{
- Threads: 1,
- },
- },
- },
- {
- name: "Memory",
- test: &tools.SysbenchMemory{
- SysbenchBase: tools.SysbenchBase{
- Threads: 1,
- },
- },
- },
- {
- name: "Mutex",
- test: &tools.SysbenchMutex{
- SysbenchBase: tools.SysbenchBase{
- Threads: 8,
- },
- },
- },
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- for _, tc := range testCases {
- param := tools.Parameter{
- Name: "testname",
- Value: tc.name,
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse params: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- sysbench := machine.GetContainer(ctx, b)
- defer sysbench.CleanUp(ctx)
-
- cmd := tc.test.MakeCmd(b)
- b.ResetTimer()
- out, err := sysbench.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/sysbench",
- }, cmd...)
- if err != nil {
- b.Fatalf("failed to run sysbench: %v: logs:%s", err, out)
- }
- b.StopTimer()
- tc.test.Report(b, out)
- })
- }
-}
diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD
deleted file mode 100644
index fee2695ff..000000000
--- a/test/benchmarks/database/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "database",
- testonly = 1,
- srcs = ["database.go"],
-)
-
-benchmark_test(
- name = "redis_test",
- srcs = ["redis_test.go"],
- library = ":database",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/database/database.go b/test/benchmarks/database/database.go
deleted file mode 100644
index c15ca661c..000000000
--- a/test/benchmarks/database/database.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package database holds benchmarks around database applications.
-package database
diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go
deleted file mode 100644
index 7497f69b3..000000000
--- a/test/benchmarks/database/redis_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 3.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package database
-
-import (
- "context"
- "os"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// All possible operations from redis. Note: "ping" will
-// run both PING_INLINE and PING_BUILD.
-var operations []string = []string{
- "PING_INLINE",
- "PING_BULK",
- "SET",
- "GET",
- "INCR",
- "LPUSH",
- "RPUSH",
- "LPOP",
- "RPOP",
- "SADD",
- "HSET",
- "SPOP",
- "LRANGE_100",
- "LRANGE_300",
- "LRANGE_500",
- "LRANGE_600",
- "MSET",
-}
-
-// BenchmarkRedis runs redis-benchmark against a redis instance and reports
-// data in queries per second. Each is reported by named operation (e.g. LPUSH).
-func BenchmarkRedis(b *testing.B) {
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // Redis runs on port 6379 by default.
- port := 6379
- ctx := context.Background()
- server := serverMachine.GetContainer(ctx, b)
- defer server.CleanUp(ctx)
-
- // The redis docker container takes no arguments to run a redis server.
- if err := server.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- Ports: []int{port},
- }); err != nil {
- b.Fatalf("failed to start redis server with: %v", err)
- }
-
- if out, err := server.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
-
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get IP from server: %v", err)
- }
-
- serverPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to get IP from server: %v", err)
- }
-
- if err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil {
- b.Fatalf("failed to start redis with: %v", err)
- }
- for _, operation := range operations {
- param := tools.Parameter{
- Name: "operation",
- Value: operation,
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse paramaters: %v", err)
- }
-
- b.Run(name, func(b *testing.B) {
- redis := tools.Redis{
- Operation: operation,
- }
-
- // Sometimes, the connection between the redis client and server can be
- // flaky such that the client returns infinity as the QPS measurement for
- // a give operation. If this happens, retry the client up to 3 times.
- out := "inf"
- for retries := 0; strings.Contains(out, "inf") && retries < 3; retries++ {
- b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
-
- out, err = client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }, redis.MakeCmd(ip, serverPort, b.N /*requests*/)...)
- }
-
- if err != nil {
- b.Fatalf("redis-benchmark failed with: %v", err)
- }
-
- b.StopTimer()
- redis.Report(b, out)
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/defs.bzl b/test/benchmarks/defs.bzl
deleted file mode 100644
index ef44b46e3..000000000
--- a/test/benchmarks/defs.bzl
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Defines a rule for benchmark test targets."""
-
-load("//tools:defs.bzl", "go_test")
-
-def benchmark_test(name, tags = [], **kwargs):
- go_test(
- name,
- tags = [
- # Requires docker and runsc to be configured before the test runs.
- "local",
- "manual",
- ],
- **kwargs
- )
diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD
deleted file mode 100644
index c2b981a07..000000000
--- a/test/benchmarks/fs/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-benchmark_test(
- name = "bazel_test",
- srcs = ["bazel_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
-
-benchmark_test(
- name = "fio_test",
- srcs = ["fio_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
deleted file mode 100644
index 797b1952d..000000000
--- a/test/benchmarks/fs/bazel_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package bazel_test
-
-import (
- "context"
- "fmt"
- "os"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// Dimensions here are clean/dirty cache (do or don't drop caches)
-// and if the mount on which we are compiling is a tmpfs/bind mount.
-type benchmark struct {
- clearCache bool // clearCache drops caches before running.
- fstype harness.FileSystemType // type of filesystem to use.
-}
-
-// Note: CleanCache versions of this test require running with root permissions.
-func BenchmarkBuildABSL(b *testing.B) {
- runBuildBenchmark(b, "benchmarks/absl", "/abseil-cpp", "absl/base/...")
-}
-
-// Note: CleanCache versions of this test require running with root permissions.
-// Note: This test takes on the order of 10m per permutation for runsc on kvm.
-func BenchmarkBuildRunsc(b *testing.B) {
- runBuildBenchmark(b, "benchmarks/runsc", "/gvisor", "runsc:runsc")
-}
-
-func runBuildBenchmark(b *testing.B, image, workdir, target string) {
- b.Helper()
- // Get a machine from the Harness on which to run.
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("Failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- benchmarks := make([]benchmark, 0, 6)
- for _, filesys := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {
- benchmarks = append(benchmarks, benchmark{
- clearCache: true,
- fstype: filesys,
- })
- benchmarks = append(benchmarks, benchmark{
- clearCache: false,
- fstype: filesys,
- })
- }
-
- for _, bm := range benchmarks {
- pageCache := tools.Parameter{
- Name: "page_cache",
- Value: "dirty",
- }
- if bm.clearCache {
- pageCache.Value = "clean"
- }
-
- filesystem := tools.Parameter{
- Name: "filesystem",
- Value: string(bm.fstype),
- }
- name, err := tools.ParametersToName(pageCache, filesystem)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
-
- b.Run(name, func(b *testing.B) {
- // Grab a container.
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- cu := cleanup.Make(func() {
- container.CleanUp(ctx)
- })
- defer cu.Clean()
- mts, prefix, err := harness.MakeMount(machine, bm.fstype, &cu)
- if err != nil {
- b.Fatalf("Failed to make mount: %v", err)
- }
-
- runOpts := dockerutil.RunOpts{
- Image: image,
- Mounts: mts,
- }
-
- // Start a container and sleep.
- if err := container.Spawn(ctx, runOpts, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
- b.Fatalf("run failed with: %v", err)
- }
-
- cpCmd := fmt.Sprintf("mkdir -p %s && cp -r %s %s/.", prefix, workdir, prefix)
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- "/bin/sh", "-c", cpCmd); err != nil {
- b.Fatalf("failed to copy directory: %v (%s)", err, out)
- }
-
- b.ResetTimer()
- b.StopTimer()
-
- // Drop Caches and bazel clean should happen inside the loop as we may use
- // time options with b.N. (e.g. Run for an hour.)
- for i := 0; i < b.N; i++ {
- // Drop Caches for clear cache runs.
- if bm.clearCache {
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
- }
-
- b.StartTimer()
- got, err := container.Exec(ctx, dockerutil.ExecOpts{
- WorkDir: prefix + workdir,
- }, "bazel", "build", "-c", "opt", target)
- if err != nil {
- b.Fatalf("build failed with: %v logs: %s", err, got)
- }
- b.StopTimer()
-
- want := "Build completed successfully"
- if !strings.Contains(got, want) {
- b.Fatalf("string %s not in: %s", want, got)
- }
-
- // Clean bazel in the case we are doing another run.
- if i < b.N-1 {
- if _, err = container.Exec(ctx, dockerutil.ExecOpts{
- WorkDir: prefix + workdir,
- }, "bazel", "clean"); err != nil {
- b.Fatalf("build failed with: %v", err)
- }
- }
- }
- })
- }
-}
-
-// TestMain is the main method for package fs.
-func TestMain(m *testing.M) {
- harness.Init()
- harness.SetFixedBenchmarks()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go
deleted file mode 100644
index 1482466f4..000000000
--- a/test/benchmarks/fs/fio_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package fio_test
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkFio runs fio on the runtime under test. There are 4 basic test
-// cases each run on a tmpfs mount and a bind mount. Fio requires root so that
-// caches can be dropped.
-func BenchmarkFio(b *testing.B) {
- testCases := []tools.Fio{
- {
- Test: "write",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "write",
- BlockSize: 1024,
- IODepth: 4,
- },
- {
- Test: "read",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "read",
- BlockSize: 1024,
- IODepth: 4,
- },
- {
- Test: "randwrite",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "randread",
- BlockSize: 4,
- IODepth: 4,
- },
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- for _, fsType := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {
- for _, tc := range testCases {
- operation := tools.Parameter{
- Name: "operation",
- Value: tc.Test,
- }
- blockSize := tools.Parameter{
- Name: "blockSize",
- Value: fmt.Sprintf("%dK", tc.BlockSize),
- }
- filesystem := tools.Parameter{
- Name: "filesystem",
- Value: string(fsType),
- }
- name, err := tools.ParametersToName(operation, blockSize, filesystem)
- if err != nil {
- b.Fatalf("Failed to parser paramters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- b.StopTimer()
- tc.Size = b.N
-
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- cu := cleanup.Make(func() {
- container.CleanUp(ctx)
- })
- defer cu.Clean()
-
- mnts, outdir, err := harness.MakeMount(machine, fsType, &cu)
- if err != nil {
- b.Fatalf("failed to make mount: %v", err)
- }
-
- // Start the container with the mount.
- if err := container.Spawn(
- ctx, dockerutil.RunOpts{
- Image: "benchmarks/fio",
- Mounts: mnts,
- },
- // Sleep on the order of b.N.
- "sleep", fmt.Sprintf("%d", 1000*b.N),
- ); err != nil {
- b.Fatalf("failed to start fio container with: %v", err)
- }
-
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- "mkdir", "-p", outdir); err != nil {
- b.Fatalf("failed to copy directory: %v (%s)", err, out)
- }
-
- // Directory and filename inside container where fio will read/write.
- outfile := filepath.Join(outdir, "test.txt")
-
- // For reads, we need a file to read so make one inside the container.
- if strings.Contains(tc.Test, "read") {
- fallocateCmd := fmt.Sprintf("fallocate -l %dK %s", tc.Size, outfile)
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- strings.Split(fallocateCmd, " ")...); err != nil {
- b.Fatalf("failed to create readable file on mount: %v, %s", err, out)
- }
- }
-
- // Drop caches just before running.
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches with %v. You probably need root.", err)
- }
-
- cmd := tc.MakeCmd(outfile)
- if err := harness.DropCaches(machine); err != nil {
- b.Fatalf("failed to drop caches: %v", err)
- }
-
- // Run fio.
- b.StartTimer()
- data, err := container.Exec(ctx, dockerutil.ExecOpts{}, cmd...)
- if err != nil {
- b.Fatalf("failed to run cmd %v: %v", cmd, err)
- }
- b.StopTimer()
- tc.Report(b, data)
- })
- }
- }
-}
-
-// TestMain is the main method for package fs.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/harness/BUILD b/test/benchmarks/harness/BUILD
deleted file mode 100644
index 367316661..000000000
--- a/test/benchmarks/harness/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "harness",
- testonly = 1,
- srcs = [
- "harness.go",
- "machine.go",
- "util.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
diff --git a/test/benchmarks/harness/harness.go b/test/benchmarks/harness/harness.go
deleted file mode 100644
index a853b7ba8..000000000
--- a/test/benchmarks/harness/harness.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package harness holds utility code for running benchmarks on Docker.
-package harness
-
-import (
- "flag"
- "fmt"
- "os"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
-)
-
-var (
- help = flag.Bool("help", false, "print this usage message")
- debug = flag.Bool("debug", false, "turns on debug messages for individual benchmarks")
-)
-
-// Init performs any harness initilialization before runs.
-func Init() error {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s -- --test.bench=<regex>\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if *help {
- flag.Usage()
- os.Exit(0)
- }
- dockerutil.EnsureSupportedDockerVersion()
- return nil
-}
-
-// SetFixedBenchmarks causes all benchmarks to run once.
-//
-// This must be set if they cannot scale with N. Note that this uses 1ns
-// instead of 1x due to https://github.com/golang/go/issues/32051.
-func SetFixedBenchmarks() {
- flag.Set("test.benchtime", "1ns")
-}
-
-// GetMachine returns this run's implementation of machine.
-func GetMachine() (Machine, error) {
- return &localMachine{}, nil
-}
diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go
deleted file mode 100644
index 405b646e8..000000000
--- a/test/benchmarks/harness/machine.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harness
-
-import (
- "context"
- "errors"
- "net"
- "os/exec"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Machine describes a real machine for use in benchmarks.
-type Machine interface {
- // GetContainer gets a container from the machine. The container uses the
- // runtime under test and is profiled if requested by flags.
- GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
-
- // GetNativeContainer gets a native container from the machine. Native containers
- // use runc by default and are not profiled.
- GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
-
- // RunCommand runs cmd on this machine.
- RunCommand(cmd string, args ...string) (string, error)
-
- // Returns IP Address for the machine.
- IPAddress() (net.IP, error)
-
- // CleanUp cleans up this machine.
- CleanUp()
-}
-
-// localMachine describes this machine.
-type localMachine struct {
-}
-
-// GetContainer implements Machine.GetContainer for localMachine.
-func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
- return dockerutil.MakeContainer(ctx, logger)
-}
-
-// GetContainer implements Machine.GetContainer for localMachine.
-func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
- return dockerutil.MakeNativeContainer(ctx, logger)
-}
-
-// RunCommand implements Machine.RunCommand for localMachine.
-func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {
- c := exec.Command(cmd, args...)
- out, err := c.CombinedOutput()
- return string(out), err
-}
-
-// IPAddress implements Machine.IPAddress.
-func (l *localMachine) IPAddress() (net.IP, error) {
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- return net.IP{}, err
- }
- for _, a := range addrs {
- if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
- if ipnet.IP.To4() != nil {
- return ipnet.IP, nil
- }
- }
- }
- // Unable to locate non-loopback address.
- return nil, errors.New("no IPAddress available")
-}
-
-// CleanUp implements Machine.CleanUp and does nothing for localMachine.
-func (*localMachine) CleanUp() {
-}
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
deleted file mode 100644
index f7e569751..000000000
--- a/test/benchmarks/harness/util.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harness
-
-import (
- "context"
- "fmt"
- "net"
- "strings"
- "testing"
-
- "github.com/docker/docker/api/types/mount"
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-//TODO(gvisor.dev/issue/3535): move to own package or move methods to harness struct.
-
-// WaitUntilServing grabs a container from `machine` and waits for a server at
-// IP:port.
-func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
- var logger testutil.DefaultLogger = "util"
- netcat := machine.GetNativeContainer(ctx, logger)
- defer netcat.CleanUp(ctx)
-
- cmd := fmt.Sprintf("while ! wget -q --spider http://%s:%d; do true; done", server, port)
- _, err := netcat.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/util",
- }, "sh", "-c", cmd)
- return err
-}
-
-// DropCaches drops caches on the provided machine. Requires root.
-func DropCaches(machine Machine) error {
- if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
- return fmt.Errorf("failed to drop caches: %v logs: %s", err, out)
- }
- return nil
-}
-
-// DebugLog prints debug messages if the debug flag is set.
-func DebugLog(b *testing.B, msg string, args ...interface{}) {
- b.Helper()
- if *debug {
- b.Logf(msg, args...)
- }
-}
-
-// FileSystemType represents a type container mount.
-type FileSystemType string
-
-const (
- // BindFS indicates a bind mount should be created.
- BindFS FileSystemType = "bindfs"
- // TmpFS indicates a tmpfs mount should be created.
- TmpFS FileSystemType = "tmpfs"
- // RootFS indicates no mount should be created and the root mount should be used.
- RootFS FileSystemType = "rootfs"
-)
-
-// MakeMount makes a mount and cleanup based on the requested type. Bind
-// and volume mounts are backed by a temp directory made with mktemp.
-// tmpfs mounts require no such backing and are just made.
-// rootfs mounts do not make a mount, but instead return a target direectory at root.
-// It is up to the caller to call Clean on the passed *cleanup.Cleanup
-func MakeMount(machine Machine, fsType FileSystemType, cu *cleanup.Cleanup) ([]mount.Mount, string, error) {
- mounts := make([]mount.Mount, 0, 1)
- target := "/data"
- switch fsType {
- case BindFS:
- dir, err := machine.RunCommand("mktemp", "-d")
- if err != nil {
- return mounts, "", fmt.Errorf("failed to create tempdir: %v", err)
- }
- dir = strings.TrimSuffix(dir, "\n")
- cu.Add(func() {
- machine.RunCommand("rm", "-rf", dir)
- })
- out, err := machine.RunCommand("chmod", "777", dir)
- if err != nil {
- return mounts, "", fmt.Errorf("failed modify directory: %v %s", err, out)
- }
- mounts = append(mounts, mount.Mount{
- Target: target,
- Source: dir,
- Type: mount.TypeBind,
- })
- return mounts, target, nil
- case RootFS:
- return mounts, target, nil
- case TmpFS:
- mounts = append(mounts, mount.Mount{
- Target: target,
- Type: mount.TypeTmpfs,
- })
- return mounts, target, nil
- default:
- return mounts, "", fmt.Errorf("illegal mount type not supported: %v", fsType)
- }
-}
diff --git a/test/benchmarks/media/BUILD b/test/benchmarks/media/BUILD
deleted file mode 100644
index ad2ef3a55..000000000
--- a/test/benchmarks/media/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "media",
- testonly = 1,
- srcs = ["media.go"],
-)
-
-benchmark_test(
- name = "ffmpeg_test",
- srcs = ["ffmpeg_test.go"],
- library = ":media",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- ],
-)
diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go
deleted file mode 100644
index 1b99a319a..000000000
--- a/test/benchmarks/media/ffmpeg_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package media
-
-import (
- "context"
- "os"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// BenchmarkFfmpeg runs ffmpeg in a container and records runtime.
-// BenchmarkFfmpeg should run as root to drop caches.
-func BenchmarkFfmpeg(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ")
-
- b.ResetTimer()
- b.StopTimer()
-
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
-
- b.StartTimer()
- if _, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/ffmpeg",
- }, cmd...); err != nil {
- b.Fatalf("failed to run container: %v", err)
- }
- b.StopTimer()
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/media/media.go b/test/benchmarks/media/media.go
deleted file mode 100644
index ed7b24651..000000000
--- a/test/benchmarks/media/media.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package media holds benchmarks around media processing applications.
-package media
diff --git a/test/benchmarks/ml/BUILD b/test/benchmarks/ml/BUILD
deleted file mode 100644
index 56a4d4f39..000000000
--- a/test/benchmarks/ml/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ml",
- testonly = 1,
- srcs = ["ml.go"],
-)
-
-benchmark_test(
- name = "tensorflow_test",
- srcs = ["tensorflow_test.go"],
- library = ":ml",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/ml/ml.go b/test/benchmarks/ml/ml.go
deleted file mode 100644
index d5fc5b7da..000000000
--- a/test/benchmarks/ml/ml.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ml holds benchmarks around machine learning performance.
-package ml
diff --git a/test/benchmarks/ml/tensorflow_test.go b/test/benchmarks/ml/tensorflow_test.go
deleted file mode 100644
index 8fa75d778..000000000
--- a/test/benchmarks/ml/tensorflow_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package ml
-
-import (
- "context"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkTensorflow runs workloads from a TensorFlow tutorial.
-// See: https://github.com/aymericdamien/TensorFlow-Examples
-func BenchmarkTensorflow(b *testing.B) {
- workloads := map[string]string{
- "GradientDecisionTree": "2_BasicModels/gradient_boosted_decision_tree.py",
- "Kmeans": "2_BasicModels/kmeans.py",
- "LogisticRegression": "2_BasicModels/logistic_regression.py",
- "NearestNeighbor": "2_BasicModels/nearest_neighbor.py",
- "RandomForest": "2_BasicModels/random_forest.py",
- "ConvolutionalNetwork": "3_NeuralNetworks/convolutional_network.py",
- "MultilayerPerceptron": "3_NeuralNetworks/multilayer_perceptron.py",
- "NeuralNetwork": "3_NeuralNetworks/neural_network.py",
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- for name, workload := range workloads {
- runName, err := tools.ParametersToName(tools.Parameter{
- Name: "operation",
- Value: name,
- })
- if err != nil {
- b.Fatalf("Faile to parse param: %v", err)
- }
-
- b.Run(runName, func(b *testing.B) {
- ctx := context.Background()
-
- b.ResetTimer()
- b.StopTimer()
-
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
-
- // Run tensorflow.
- b.StartTimer()
- if out, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/tensorflow",
- Env: []string{"PYTHONPATH=$PYTHONPATH:/TensorFlow-Examples/examples"},
- WorkDir: "/TensorFlow-Examples/examples",
- }, "python", workload); err != nil {
- b.Fatalf("failed to run container: %v logs: %s", err, out)
- }
- b.StopTimer()
- }
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- harness.SetFixedBenchmarks()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
deleted file mode 100644
index e047020bf..000000000
--- a/test/benchmarks/network/BUILD
+++ /dev/null
@@ -1,89 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "network",
- testonly = 1,
- srcs = [
- "network.go",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "iperf_test",
- srcs = [
- "iperf_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "node_test",
- srcs = [
- "node_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "ruby_test",
- srcs = [
- "ruby_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "nginx_test",
- srcs = [
- "nginx_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "httpd_test",
- srcs = [
- "httpd_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
deleted file mode 100644
index 629127250..000000000
--- a/test/benchmarks/network/httpd_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "os"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// see Dockerfile '//images/benchmarks/httpd'.
-var httpdDocs = map[string]string{
- "notfound": "notfound",
- "1Kb": "latin1k.txt",
- "10Kb": "latin10k.txt",
- "100Kb": "latin100k.txt",
- "1Mb": "latin1024k.txt",
- "10Mb": "latin10240k.txt",
-}
-
-// BenchmarkHttpd iterates over different sized payloads and concurrency, testing
-// how well the runtime handles sending different payload sizes.
-func BenchmarkHttpd(b *testing.B) {
- benchmarkHttpdDocSize(b)
-}
-
-// BenchmarkContinuousHttpd runs specific benchmarks for continous jobs.
-// The runtime under test is the server serving a runc client.
-func BenchmarkContinuousHttpd(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkHttpdContinuous(b, threads, sizes)
-}
-
-// benchmarkHttpdDocSize iterates through all doc sizes, running subbenchmarks
-// for each size.
-func benchmarkHttpdDocSize(b *testing.B) {
- b.Helper()
- for size, filename := range httpdDocs {
- concurrency := []int{1, 25, 50, 100, 1000}
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
- concurrency := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(fsize, concurrency)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runHttpd(b, hey)
- })
- }
- }
-}
-
-// benchmarkHttpdContinuous iterates through given sizes and concurrencies.
-func benchmarkHttpdContinuous(b *testing.B, concurrency []int, sizes []string) {
- for _, size := range sizes {
- filename := httpdDocs[size]
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- name, err := tools.ParametersToName(fsize, threads)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runHttpd(b, hey)
- })
- }
- }
-}
-
-// runHttpd configures the static serving methods to run httpd.
-func runHttpd(b *testing.B, hey *tools.Hey) {
- // httpd runs on port 80.
- port := 80
- httpdRunOpts := dockerutil.RunOpts{
- Image: "benchmarks/httpd",
- Ports: []int{port},
- Env: []string{
- // Standard environmental variables for httpd.
- "APACHE_RUN_DIR=/tmp",
- "APACHE_RUN_USER=nobody",
- "APACHE_RUN_GROUP=nogroup",
- "APACHE_LOG_DIR=/tmp",
- "APACHE_PID_FILE=/tmp/apache.pid",
- },
- }
- httpdCmd := []string{"sh", "-c", "mkdir -p /tmp/html; cp -r /local/* /tmp/html/.; apache2 -X"}
- runStaticServer(b, httpdRunOpts, httpdCmd, port, hey)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
deleted file mode 100644
index 6ac7717b1..000000000
--- a/test/benchmarks/network/iperf_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-func BenchmarkIperf(b *testing.B) {
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
- ctx := context.Background()
- for _, bm := range []struct {
- name string
- clientFunc func(context.Context, testutil.Logger) *dockerutil.Container
- serverFunc func(context.Context, testutil.Logger) *dockerutil.Container
- }{
- // We are either measuring the server or the client. The other should be
- // runc. e.g. Upload sees how fast the runtime under test uploads to a native
- // server.
- {
- name: "Upload",
- clientFunc: clientMachine.GetContainer,
- serverFunc: serverMachine.GetNativeContainer,
- },
- {
- name: "Download",
- clientFunc: clientMachine.GetNativeContainer,
- serverFunc: serverMachine.GetContainer,
- },
- } {
- name, err := tools.ParametersToName(tools.Parameter{
- Name: "operation",
- Value: bm.name,
- })
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- // Set up the containers.
- server := bm.serverFunc(ctx, b)
- defer server.CleanUp(ctx)
- client := bm.clientFunc(ctx, b)
- defer client.CleanUp(ctx)
-
- // iperf serves on port 5001 by default.
- port := 5001
-
- // Start the server.
- if err := server.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/iperf",
- Ports: []int{port},
- }, "iperf", "-s"); err != nil {
- b.Fatalf("failed to start server with: %v", err)
- }
-
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to find server ip: %v", err)
- }
-
- servingPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to find port %d: %v", port, err)
- }
-
- // Make sure the server is up and serving before we run.
- if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil {
- b.Fatalf("failed to wait for server: %v", err)
- }
-
- iperf := tools.Iperf{
- Num: b.N, // KB for the client to send.
- }
-
- // Run the client.
- b.ResetTimer()
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/iperf",
- }, iperf.MakeCmd(ip, servingPort)...)
- if err != nil {
- b.Fatalf("failed to run client: %v", err)
- }
- b.StopTimer()
- iperf.Report(b, out)
- b.StartTimer()
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go
deleted file mode 100644
index d61002cea..000000000
--- a/test/benchmarks/network/network.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package network holds benchmarks around raw network performance.
-package network
-
-import (
- "context"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// runStaticServer runs static serving workloads (httpd, nginx).
-func runStaticServer(b *testing.B, serverOpts dockerutil.RunOpts, serverCmd []string, port int, hey *tools.Hey) {
- ctx := context.Background()
-
- // Get two machines: a client and server.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // Make the containers.
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
- server := serverMachine.GetContainer(ctx, b)
- defer server.CleanUp(ctx)
-
- // Start the server.
- if err := server.Spawn(ctx, serverOpts, serverCmd...); err != nil {
- b.Fatalf("failed to start server: %v", err)
- }
-
- // Get its IP.
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to find server ip: %v", err)
- }
-
- // Get the published port.
- servingPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to find server port %d: %v", port, err)
- }
-
- // Make sure the server is serving.
- harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
-
- // Run the client.
- b.ResetTimer()
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, hey.MakeCmd(ip, servingPort)...)
- if err != nil {
- b.Fatalf("run failed with: %v", err)
- }
- b.StopTimer()
- hey.Report(b, out)
-}
diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go
deleted file mode 100644
index 74f3578fc..000000000
--- a/test/benchmarks/network/nginx_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "os"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// see Dockerfile '//images/benchmarks/nginx'.
-var nginxDocs = map[string]string{
- "notfound": "notfound",
- "1Kb": "latin1k.txt",
- "10Kb": "latin10k.txt",
- "100Kb": "latin100k.txt",
- "1Mb": "latin1024k.txt",
- "10Mb": "latin10240k.txt",
-}
-
-// BenchmarkNginxDocSize iterates over different sized payloads, testing how
-// well the runtime handles sending different payload sizes.
-func BenchmarkNginxDocSize(b *testing.B) {
- benchmarkNginxDocSize(b, true /* tmpfs */)
- benchmarkNginxDocSize(b, false /* tmpfs */)
-}
-
-// BenchmarkContinuousNginx runs specific benchmarks for continous jobs.
-// The runtime under test is the sever serving a runc client.
-func BenchmarkContinuousNginx(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkNginxContinuous(b, threads, sizes)
-}
-
-// benchmarkNginxDocSize iterates through all doc sizes, running subbenchmarks
-// for each size.
-func benchmarkNginxDocSize(b *testing.B, tmpfs bool) {
- for size, filename := range nginxDocs {
- concurrency := []int{1, 25, 50, 100, 1000}
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- fs := tools.Parameter{
- Name: "filesystem",
- Value: "bind",
- }
- if tmpfs {
- fs.Value = "tmpfs"
- }
- name, err := tools.ParametersToName(fsize, threads, fs)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runNginx(b, hey, tmpfs)
- })
- }
- }
-}
-
-// benchmarkNginxContinuous iterates through given sizes and concurrencies on a tmpfs mount.
-func benchmarkNginxContinuous(b *testing.B, concurrency []int, sizes []string) {
- for _, size := range sizes {
- filename := nginxDocs[size]
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- fs := tools.Parameter{
- Name: "filesystem",
- Value: "tmpfs",
- }
-
- name, err := tools.ParametersToName(fsize, threads, fs)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runNginx(b, hey, true /*tmpfs*/)
- })
- }
- }
-}
-
-// runNginx configures the static serving methods to run httpd.
-func runNginx(b *testing.B, hey *tools.Hey, tmpfs bool) {
- // nginx runs on port 80.
- port := 80
- nginxRunOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- Ports: []int{port},
- }
-
- nginxCmd := []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"}
- if tmpfs {
- nginxCmd = []string{"sh", "-c", "mkdir -p /tmp/html && cp -a /local/* /tmp/html && nginx -c /etc/nginx/nginx.conf"}
- }
-
- // Command copies nginxDocs to tmpfs serving directory and runs nginx.
- runStaticServer(b, nginxRunOpts, nginxCmd, port, hey)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go
deleted file mode 100644
index a1fc82f95..000000000
--- a/test/benchmarks/network/node_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "os"
- "strconv"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkNode runs requests using 'hey' against a Node server run on
-// 'runtime'. The server responds to requests by grabbing some data in a
-// redis instance and returns the data in its reponse. The test loops through
-// increasing amounts of concurency for requests.
-func BenchmarkNode(b *testing.B) {
- concurrency := []int{1, 5, 10, 25}
- for _, c := range concurrency {
- param := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- }
- runNode(b, hey)
- })
- }
-}
-
-// runNode runs the test for a given # of requests and concurrency.
-func runNode(b *testing.B, hey *tools.Hey) {
- b.Helper()
-
- // The machine to hold Redis and the Node Server.
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // The machine to run 'hey'.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer clientMachine.CleanUp()
-
- ctx := context.Background()
-
- // Spawn a redis instance for the app to use.
- redis := serverMachine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
- defer redis.CleanUp(ctx)
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
-
- // Node runs on port 8080.
- port := 8080
-
- // Start-up the Node server.
- nodeApp := serverMachine.GetContainer(ctx, b)
- if err := nodeApp.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- Ports: []int{port},
- }, "node", "index.js", redisIP.String()); err != nil {
- b.Fatalf("failed to spawn node instance: %v", err)
- }
- defer nodeApp.CleanUp(ctx)
-
- servingIP, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- servingPort, err := nodeApp.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to port from node instance: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort)
-
- heyCmd := hey.MakeCmd(servingIP, servingPort)
-
- // the client should run on Native.
- b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, heyCmd...)
- if err != nil {
- b.Fatalf("hey container failed: %v logs: %s", err, out)
- }
-
- // Stop the timer to parse the data and report stats.
- hey.Report(b, out)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go
deleted file mode 100644
index b7ec16e0a..000000000
--- a/test/benchmarks/network/ruby_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "fmt"
- "os"
- "strconv"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkRuby runs requests using 'hey' against a ruby application server.
-// On start, ruby app generates some random data and pushes it to a redis
-// instance. On a request, the app grabs for random entries from the redis
-// server, publishes it to a document, and returns the doc to the request.
-func BenchmarkRuby(b *testing.B) {
- concurrency := []int{1, 5, 10, 25}
- for _, c := range concurrency {
- param := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- }
- runRuby(b, hey)
- })
- }
-}
-
-// runRuby runs the test for a given # of requests and concurrency.
-func runRuby(b *testing.B, hey *tools.Hey) {
- // The machine to hold Redis and the Ruby Server.
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // The machine to run 'hey'.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer clientMachine.CleanUp()
- ctx := context.Background()
-
- // Spawn a redis instance for the app to use.
- redis := serverMachine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
- defer redis.CleanUp(ctx)
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
-
- // Ruby runs on port 9292.
- const port = 9292
-
- // Start-up the Ruby server.
- rubyApp := serverMachine.GetContainer(ctx, b)
- if err := rubyApp.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/ruby",
- WorkDir: "/app",
- Links: []string{redis.MakeLink("redis")},
- Ports: []int{port},
- Env: []string{
- fmt.Sprintf("PORT=%d", port),
- "WEB_CONCURRENCY=20",
- "WEB_MAX_THREADS=20",
- "RACK_ENV=production",
- fmt.Sprintf("HOST=%s", redisIP),
- },
- User: "nobody",
- }, "sh", "-c", "/usr/bin/puma"); err != nil {
- b.Fatalf("failed to spawn node instance: %v", err)
- }
- defer rubyApp.CleanUp(ctx)
-
- servingIP, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- servingPort, err := rubyApp.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to port from node instance: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- if err := harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort); err != nil {
- b.Fatalf("failed to wait until serving: %v", err)
- }
- heyCmd := hey.MakeCmd(servingIP, servingPort)
-
- // the client should run on Native.
- b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, heyCmd...)
- if err != nil {
- b.Fatalf("hey container failed: %v logs: %s", err, out)
- }
- b.StopTimer()
- hey.Report(b, out)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/tcp/BUILD b/test/benchmarks/tcp/BUILD
deleted file mode 100644
index 6dde7d9e6..000000000
--- a/test/benchmarks/tcp/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "tcp_proxy",
- srcs = ["tcp_proxy.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/adapters/gonet",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/qdisc/fifo",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-# nsjoin is a trivial replacement for nsenter. This is used because nsenter is
-# not available on all systems where this benchmark is run (and we aim to
-# minimize external dependencies.)
-
-cc_binary(
- name = "nsjoin",
- srcs = ["nsjoin.c"],
- visibility = ["//:sandbox"],
-)
-
-sh_binary(
- name = "tcp_benchmark",
- srcs = ["tcp_benchmark.sh"],
- data = [
- ":nsjoin",
- ":tcp_proxy",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/test/benchmarks/tcp/README.md b/test/benchmarks/tcp/README.md
deleted file mode 100644
index 38e6e69f0..000000000
--- a/test/benchmarks/tcp/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# TCP Benchmarks
-
-This directory contains a standardized TCP benchmark. This helps to evaluate the
-performance of netstack and native networking stacks under various conditions.
-
-## `tcp_benchmark`
-
-This benchmark allows TCP throughput testing under various conditions. The setup
-consists of an iperf client, a client proxy, a server proxy and an iperf server.
-The client proxy and server proxy abstract the network mechanism used to
-communicate between the iperf client and server.
-
-The setup looks like the following:
-
-```
- +--------------+ (native) +--------------+
- | iperf client |[lo @ 10.0.0.1]------>| client proxy |
- +--------------+ +--------------+
- [client.0 @ 10.0.0.2]
- (netstack) | | (native)
- +------+-----+
- |
- [br0]
- |
- Network emulation applied ---> [wan.0:wan.1]
- |
- [br1]
- |
- +------+-----+
- (netstack) | | (native)
- [server.0 @ 10.0.0.3]
- +--------------+ +--------------+
- | iperf server |<------[lo @ 10.0.0.4]| server proxy |
- +--------------+ (native) +--------------+
-```
-
-Different configurations can be run using different arguments. For example:
-
-* Native test under normal internet conditions: `tcp_benchmark`
-* Native test under ideal conditions: `tcp_benchmark --ideal`
-* Netstack client under ideal conditions: `tcp_benchmark --client --ideal`
-* Netstack client with 5% packet loss: `tcp_benchmark --client --ideal --loss
- 5`
-
-Use `tcp_benchmark --help` for full arguments.
-
-This tool may be used to easily generate data for graphing. For example, to
-generate a CSV for various latencies, you might do:
-
-```
-rm -f /tmp/netstack_latency.csv /tmp/native_latency.csv
-latencies=$(seq 0 5 50;
- seq 60 10 100;
- seq 125 25 250;
- seq 300 50 500)
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/netstack_latency.csv
-done
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/native_latency.csv
-done
-```
-
-Similarly, to generate a CSV for various levels of packet loss, the following
-would be appropriate:
-
-```
-rm -f /tmp/netstack_loss.csv /tmp/native_loss.csv
-losses=$(seq 0 0.1 1.0;
- seq 1.2 0.2 2.0;
- seq 2.5 0.5 5.0;
- seq 6.0 1.0 10.0)
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/netstack_loss.csv
-done
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/native_loss.csv
-done
-```
diff --git a/test/benchmarks/tcp/nsjoin.c b/test/benchmarks/tcp/nsjoin.c
deleted file mode 100644
index 524b4d549..000000000
--- a/test/benchmarks/tcp/nsjoin.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-int main(int argc, char** argv) {
- if (argc <= 2) {
- fprintf(stderr, "error: must provide a namespace file.\n");
- fprintf(stderr, "usage: %s <file> [arguments...]\n", argv[0]);
- return 1;
- }
-
- int fd = open(argv[1], O_RDONLY);
- if (fd < 0) {
- fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
- if (setns(fd, 0) < 0) {
- fprintf(stderr, "error joining %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
-
- execvp(argv[2], &argv[2]);
- return 1;
-}
diff --git a/test/benchmarks/tcp/tcp_benchmark.sh b/test/benchmarks/tcp/tcp_benchmark.sh
deleted file mode 100755
index 6a4f33b96..000000000
--- a/test/benchmarks/tcp/tcp_benchmark.sh
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TCP benchmark; see README.md for documentation.
-
-# Fixed parameters.
-iperf_port=45201 # Not likely to be privileged.
-proxy_port=44000 # Ditto.
-client_addr=10.0.0.1
-client_proxy_addr=10.0.0.2
-server_proxy_addr=10.0.0.3
-server_addr=10.0.0.4
-mask=8
-
-# Defaults; this provides a reasonable approximation of a decent internet link.
-# Parameters can be varied independently from this set to see response to
-# various changes in the kind of link available.
-client=false
-server=false
-verbose=false
-gso=0
-swgso=false
-mtu=1280 # 1280 is a reasonable lowest-common-denominator.
-latency=10 # 10ms approximates a fast, dedicated connection.
-latency_variation=1 # +/- 1ms is a relatively low amount of jitter.
-loss=0.1 # 0.1% loss is non-zero, but not extremely high.
-duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses.
-duration=30 # 30s is enough time to consistent results (experimentally).
-helper_dir=$(dirname $0)
-netstack_opts=
-disable_linux_gso=
-num_client_threads=1
-
-# Check for netem support.
-lsmod_output=$(lsmod | grep sch_netem)
-if [ "$?" != "0" ]; then
- echo "warning: sch_netem may not be installed." >&2
-fi
-
-while [ $# -gt 0 ]; do
- case "$1" in
- --client)
- client=true
- ;;
- --client_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -client_tcp_probe_file=$1"
- ;;
- --server)
- server=true
- ;;
- --verbose)
- verbose=true
- ;;
- --gso)
- shift
- gso=$1
- ;;
- --swgso)
- swgso=true
- ;;
- --server_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -server_tcp_probe_file=$1"
- ;;
- --ideal)
- mtu=1500 # Standard ethernet.
- latency=0 # No latency.
- latency_variation=0 # No jitter.
- loss=0 # No loss.
- duplicate=0 # No duplicates.
- ;;
- --mtu)
- shift
- [ "$#" -le 0 ] && echo "no mtu provided" && exit 1
- mtu=$1
- ;;
- --sack)
- netstack_opts="${netstack_opts} -sack"
- ;;
- --rack)
- netstack_opts="${netstack_opts} -rack"
- ;;
- --cubic)
- netstack_opts="${netstack_opts} -cubic"
- ;;
- --moderate-recv-buf)
- netstack_opts="${netstack_opts} -moderate_recv_buf"
- ;;
- --duration)
- shift
- [ "$#" -le 0 ] && echo "no duration provided" && exit 1
- duration=$1
- ;;
- --latency)
- shift
- [ "$#" -le 0 ] && echo "no latency provided" && exit 1
- latency=$1
- ;;
- --latency-variation)
- shift
- [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1
- latency_variation=$1
- ;;
- --loss)
- shift
- [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1
- loss=$1
- ;;
- --duplicate)
- shift
- [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1
- duplicate=$1
- ;;
- --cpuprofile)
- shift
- netstack_opts="${netstack_opts} -cpuprofile=$1"
- ;;
- --memprofile)
- shift
- netstack_opts="${netstack_opts} -memprofile=$1"
- ;;
- --disable-linux-gso)
- disable_linux_gso=1
- ;;
- --num-client-threads)
- shift
- num_client_threads=$1
- ;;
- --helpers)
- shift
- [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1
- helper_dir=$1
- ;;
- *)
- echo "usage: $0 [options]"
- echo "options:"
- echo " --help show this message"
- echo " --verbose verbose output"
- echo " --client use netstack as the client"
- echo " --ideal reset all network emulation"
- echo " --server use netstack as the server"
- echo " --mtu set the mtu (bytes)"
- echo " --sack enable SACK support"
- echo " --rack enable RACK support"
- echo " --moderate-recv-buf enable TCP receive buffer auto-tuning"
- echo " --cubic enable CUBIC congestion control for Netstack"
- echo " --duration set the test duration (s)"
- echo " --latency set the latency (ms)"
- echo " --latency-variation set the latency variation"
- echo " --loss set the loss probability (%)"
- echo " --duplicate set the duplicate probability (%)"
- echo " --helpers set the helper directory"
- echo " --num-client-threads number of parallel client threads to run"
- echo " --disable-linux-gso disable segmentation offload in the Linux network stack"
- echo ""
- echo "The output will of the script will be:"
- echo " <throughput> <client-cpu-usage> <server-cpu-usage>"
- exit 1
- esac
- shift
-done
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-# Latency needs to be halved, since it's applied on both ways.
-half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}')
-half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}')
-half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}')
-helper_dir=${helper_dir#$(pwd)/} # Use relative paths.
-proxy_binary=${helper_dir}/tcp_proxy
-nsjoin_binary=${helper_dir}/nsjoin
-
-if [ ! -e ${proxy_binary} ]; then
- echo "Could not locate ${proxy_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ ! -e ${nsjoin_binary} ]; then
- echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then
- # As long as there's some jitter, then we use the paretonormal distribution.
- # This will preserve the minimum RTT, but add a realistic amount of jitter to
- # the connection and cause re-ordering, etc. The regular pareto distribution
- # appears to an unreasonable level of delay (we want only small spikes.)
- distribution="distribution paretonormal"
-else
- distribution=""
-fi
-
-# Client proxy that will listen on the client's iperf target forward traffic
-# using the host networking stack.
-client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}"
-if ${client}; then
- # Client proxy that will listen on the client's iperf target
- # and forward traffic using netstack.
- client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\
- -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\
- -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Server proxy that will listen on the proxy port and forward to the server's
-# iperf server using the host networking stack.
-server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}"
-if ${server}; then
- # Server proxy that will listen on the proxy port and forward to the servers'
- # iperf server using netstack.
- server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\
- -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\
- -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Specify loss and duplicate parameters only if they are non-zero
-loss_opt=""
-if [ "$(echo $half_loss | bc -q)" != "0" ]; then
- loss_opt="loss random ${half_loss}%"
-fi
-duplicate_opt=""
-if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then
- duplicate_opt="duplicate ${half_duplicate}%"
-fi
-
-exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF
-set -e -m
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-mount -t tmpfs netstack-bench /tmp
-
-# We may have reset the path in the unshare if the shell loaded some public
-# profiles. Ensure that tools are discoverable via the parent's PATH.
-export PATH=${PATH}
-
-# Add client, server interfaces.
-ip link add client.0 type veth peer name client.1
-ip link add server.0 type veth peer name server.1
-
-# Add network emulation devices.
-ip link add wan.0 type veth peer name wan.1
-ip link set wan.0 up
-ip link set wan.1 up
-
-# Enroll on the bridge.
-ip link add name br0 type bridge
-ip link add name br1 type bridge
-ip link set client.1 master br0
-ip link set server.1 master br1
-ip link set wan.0 master br0
-ip link set wan.1 master br1
-ip link set br0 up
-ip link set br1 up
-
-# Set the MTU appropriately.
-ip link set client.0 mtu ${mtu}
-ip link set server.0 mtu ${mtu}
-ip link set wan.0 mtu ${mtu}
-ip link set wan.1 mtu ${mtu}
-
-# Add appropriate latency, loss and duplication.
-#
-# This is added in at the point of bridge connection.
-for device in wan.0 wan.1; do
- # NOTE: We don't support a loss correlation as testing has shown that it
- # actually doesn't work. The man page actually has a small comment about this
- # "It is also possible to add a correlation, but this option is now deprecated
- # due to the noticed bad behavior." For more information see netem(8).
- tc qdisc add dev \$device root netem \\
- delay ${half_latency}ms ${latency_variation}ms ${distribution} \\
- ${loss_opt} ${duplicate_opt}
-done
-
-# Start a client proxy.
-touch /tmp/client.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/client.netns
-
-# Move the endpoint into the namespace.
-while ip link | grep client.0 > /dev/null; do
- ip link set dev client.0 netns /tmp/client.netns
-done
-
-if ! ${client}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0
-fi
-
-# Start a server proxy.
-touch /tmp/server.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/server.netns
-# Move the endpoint into the namespace.
-while ip link | grep server.0 > /dev/null; do
- ip link set dev server.0 netns /tmp/server.netns
-done
-if ! ${server}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0
-fi
-
-# Add client and server addresses, and bring everything up.
-${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0
-${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0
-if [ "${disable_linux_gso}" == "1" ]; then
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off
-fi
-${nsjoin_binary} /tmp/client.netns ip link set client.0 up
-${nsjoin_binary} /tmp/client.netns ip link set lo up
-${nsjoin_binary} /tmp/server.netns ip link set server.0 up
-${nsjoin_binary} /tmp/server.netns ip link set lo up
-ip link set dev client.1 up
-ip link set dev server.1 up
-
-${nsjoin_binary} /tmp/client.netns ${client_args} &
-client_pid=\$!
-${nsjoin_binary} /tmp/server.netns ${server_args} &
-server_pid=\$!
-
-# Start the iperf server.
-${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 &
-iperf_pid=\$!
-
-# Show traffic information.
-if ! ${client} && ! ${server}; then
- ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true
-fi
-
-results_file=\$(mktemp)
-function cleanup {
- rm -f \$results_file
- kill -TERM \$client_pid
- kill -TERM \$server_pid
- wait \$client_pid
- wait \$server_pid
- kill -9 \$iperf_pid 2>/dev/null
-}
-
-# Allow failure from this point.
-set +e
-trap cleanup EXIT
-
-# Run the benchmark, recording the results file.
-while ${nsjoin_binary} /tmp/client.netns iperf \\
- -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\
- | tee \$results_file \\
- | grep "connect failed" >/dev/null; do
- sleep 0.1 # Wait for all services.
-done
-
-# Unlink all relevant devices from the bridge. This is because when the bridge
-# is deleted, the kernel may hang. It appears that this problem is fixed in
-# upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a.
-ip link set client.1 nomaster
-ip link set server.1 nomaster
-ip link set wan.0 nomaster
-ip link set wan.1 nomaster
-
-# Emit raw results.
-cat \$results_file >&2
-
-# Emit a useful result (final throughput).
-mbits=\$(grep Mbits/sec \$results_file \\
- | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p')
-client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\
- | awk '{print (\$14+\$15);}')
-server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\
- | awk '{print (\$14+\$15);}')
-ticks_per_sec=\$(getconf CLK_TCK)
-client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration})
-server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration})
-echo \$mbits \$client_cpu_load \$server_cpu_load
-EOF
diff --git a/test/benchmarks/tcp/tcp_proxy.go b/test/benchmarks/tcp/tcp_proxy.go
deleted file mode 100644
index be74e4d4a..000000000
--- a/test/benchmarks/tcp/tcp_proxy.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary tcp_proxy is a simple TCP proxy.
-package main
-
-import (
- "encoding/gob"
- "flag"
- "fmt"
- "io"
- "log"
- "math/rand"
- "net"
- "os"
- "os/signal"
- "regexp"
- "runtime"
- "runtime/pprof"
- "strconv"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-var (
- port = flag.Int("port", 0, "bind port (all addresses)")
- forward = flag.String("forward", "", "forwarding target")
- client = flag.Bool("client", false, "use netstack for listen")
- server = flag.Bool("server", false, "use netstack for dial")
-
- // Netstack-specific options.
- mtu = flag.Int("mtu", 1280, "mtu for network stack")
- addr = flag.String("addr", "", "address for tap-based netstack")
- mask = flag.Int("mask", 8, "mask size for address")
- iface = flag.String("iface", "", "network interface name to bind for netstack")
- sack = flag.Bool("sack", false, "enable SACK support for netstack")
- rack = flag.Bool("rack", false, "enable RACK in TCP")
- moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning")
- cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack")
- gso = flag.Int("gso", 0, "GSO maximum size")
- swgso = flag.Bool("swgso", false, "software-level GSO")
- clientTCPProbeFile = flag.String("client_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- serverTCPProbeFile = flag.String("server_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- cpuprofile = flag.String("cpuprofile", "", "write cpu profile to the specified file.")
- memprofile = flag.String("memprofile", "", "write memory profile to the specified file.")
-)
-
-type impl interface {
- dial(address string) (net.Conn, error)
- listen(port int) (net.Listener, error)
- printStats()
-}
-
-type netImpl struct{}
-
-func (netImpl) dial(address string) (net.Conn, error) {
- return net.Dial("tcp", address)
-}
-
-func (netImpl) listen(port int) (net.Listener, error) {
- return net.Listen("tcp", fmt.Sprintf(":%d", port))
-}
-
-func (netImpl) printStats() {
-}
-
-const (
- nicID = 1 // Fixed.
- bufSize = 4 << 20 // 4MB.
-)
-
-type netstackImpl struct {
- s *stack.Stack
- addr tcpip.Address
- mode string
-}
-
-func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) {
- // Get all interfaces in the namespace.
- ifaces, err := net.Interfaces()
- if err != nil {
- return nil, fmt.Errorf("querying interfaces: %v", err)
- }
-
- for _, iface := range ifaces {
- if iface.Name != ifaceName {
- continue
- }
- // Create the socket.
- const protocol = 0x0300 // htons(ETH_P_ALL)
- fds := make([]int, numChannels)
- for i := range fds {
- fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, protocol)
- if err != nil {
- return nil, fmt.Errorf("unable to create raw socket: %v", err)
- }
-
- // Bind to the appropriate device.
- ll := unix.SockaddrLinklayer{
- Protocol: protocol,
- Ifindex: iface.Index,
- Pkttype: unix.PACKET_HOST,
- }
- if err := unix.Bind(fd, &ll); err != nil {
- return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err)
- }
-
- // RAW Sockets by default have a very small SO_RCVBUF of 256KB,
- // up it to at least 4MB to reduce packet drops.
- if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err)
- }
-
- if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err)
- }
-
- if !*swgso && *gso != 0 {
- if err := unix.SetsockoptInt(fd, unix.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
- return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err)
- }
- }
- fds[i] = fd
- }
- return fds, nil
- }
- return nil, fmt.Errorf("failed to find interface: %v", ifaceName)
-}
-
-func newNetstackImpl(mode string) (impl, error) {
- fds, err := setupNetwork(*iface, runtime.GOMAXPROCS(-1))
- if err != nil {
- return nil, err
- }
-
- // Parse details.
- parsedAddr := tcpip.Address(net.ParseIP(*addr).To4())
- parsedDest := tcpip.Address("") // Filled in below.
- parsedMask := tcpip.AddressMask("") // Filled in below.
- switch *mask {
- case 8:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], 0, 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0, 0, 0})
- case 16:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0, 0})
- case 24:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], parsedAddr[2], 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0xff, 0})
- default:
- // This is just laziness; we don't expect a different mask.
- return nil, fmt.Errorf("mask %d not supported", mask)
- }
-
- // Create a new network stack.
- netProtos := []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol}
- transProtos := []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol}
- s := stack.New(stack.Options{
- NetworkProtocols: netProtos,
- TransportProtocols: transProtos,
- })
-
- // Generate a new mac for the eth device.
- mac := make(net.HardwareAddr, 6)
- rand.Read(mac) // Fill with random data.
- mac[0] &^= 0x1 // Clear multicast bit.
- mac[0] |= 0x2 // Set local assignment bit (IEEE802).
- ep, err := fdbased.New(&fdbased.Options{
- FDs: fds,
- MTU: uint32(*mtu),
- EthernetHeader: true,
- Address: tcpip.LinkAddress(mac),
- // Enable checksum generation as we need to generate valid
- // checksums for the veth device to deliver our packets to the
- // peer. But we do want to disable checksum verification as veth
- // devices do perform GRO and the linux host kernel may not
- // regenerate valid checksums after GRO.
- TXChecksumOffload: false,
- RXChecksumOffload: true,
- PacketDispatchMode: fdbased.RecvMMsg,
- GSOMaxSize: uint32(*gso),
- SoftwareGSOEnabled: *swgso,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to create FD endpoint: %v", err)
- }
- if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil {
- return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err)
- }
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, parsedAddr); err != nil {
- return nil, fmt.Errorf("error adding IP address to %q: %v", *iface, err)
- }
-
- subnet, err := tcpip.NewSubnet(parsedDest, parsedMask)
- if err != nil {
- return nil, fmt.Errorf("tcpip.Subnet(%s, %s): %s", parsedDest, parsedMask, err)
- }
- // Add default route; we only support
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: subnet,
- NIC: nicID,
- },
- })
-
- // Set protocol options.
- {
- opt := tcpip.TCPSACKEnabled(*sack)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- if *rack {
- opt := tcpip.TCPRecovery(tcpip.TCPRACKLossDetection)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("enabling RACK failed: %v", err)
- }
- }
-
- // Enable Receive Buffer Auto-Tuning.
- {
- opt := tcpip.TCPModerateReceiveBufferOption(*moderateRecvBuf)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- // Set Congestion Control to cubic if requested.
- if *cubic {
- opt := tcpip.CongestionControlOption("cubic")
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%s)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- return netstackImpl{
- s: s,
- addr: parsedAddr,
- mode: mode,
- }, nil
-}
-
-func (n netstackImpl) dial(address string) (net.Conn, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return nil, err
- }
- if host == "" {
- // A host must be provided for the dial.
- return nil, fmt.Errorf("no host provided")
- }
- portNumber, err := strconv.Atoi(port)
- if err != nil {
- return nil, err
- }
- addr := tcpip.FullAddress{
- NIC: nicID,
- Addr: tcpip.Address(net.ParseIP(host).To4()),
- Port: uint16(portNumber),
- }
- conn, err := gonet.DialTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func (n netstackImpl) listen(port int) (net.Listener, error) {
- addr := tcpip.FullAddress{
- NIC: nicID,
- Port: uint16(port),
- }
- listener, err := gonet.ListenTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return listener, nil
-}
-
-var zeroFieldsRegexp = regexp.MustCompile(`\s*[a-zA-Z0-9]*:0`)
-
-func (n netstackImpl) printStats() {
- // Don't show zero fields.
- stats := zeroFieldsRegexp.ReplaceAllString(fmt.Sprintf("%+v", n.s.Stats()), "")
- log.Printf("netstack %s Stats: %+v\n", n.mode, stats)
-}
-
-// installProbe installs a TCP Probe function that will dump endpoint
-// state to the specified file. It also returns a close func() that
-// can be used to close the probeFile.
-func (n netstackImpl) installProbe(probeFileName string) (close func()) {
- // Install Probe to dump out end point state.
- probeFile, err := os.Create(probeFileName)
- if err != nil {
- log.Fatalf("failed to create tcp_probe file %s: %v", probeFileName, err)
- }
- probeEncoder := gob.NewEncoder(probeFile)
- // Install a TCP Probe.
- n.s.AddTCPProbe(func(state stack.TCPEndpointState) {
- probeEncoder.Encode(state)
- })
- return func() { probeFile.Close() }
-}
-
-func main() {
- flag.Parse()
- if *port == 0 {
- log.Fatalf("no port provided")
- }
- if *forward == "" {
- log.Fatalf("no forward provided")
- }
- // Seed the random number generator to ensure that we are given MAC addresses that don't
- // for the case of the client and server stack.
- rand.Seed(time.Now().UTC().UnixNano())
-
- if *cpuprofile != "" {
- f, err := os.Create(*cpuprofile)
- if err != nil {
- log.Fatal("could not create CPU profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing CPU profile: ", err)
- }
- }()
- if err := pprof.StartCPUProfile(f); err != nil {
- log.Fatal("could not start CPU profile: ", err)
- }
- defer pprof.StopCPUProfile()
- }
-
- var (
- in impl
- out impl
- err error
- )
- if *server {
- in, err = newNetstackImpl("server")
- if *serverTCPProbeFile != "" {
- defer in.(netstackImpl).installProbe(*serverTCPProbeFile)()
- }
-
- } else {
- in = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
- if *client {
- out, err = newNetstackImpl("client")
- if *clientTCPProbeFile != "" {
- defer out.(netstackImpl).installProbe(*clientTCPProbeFile)()
- }
- } else {
- out = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
-
- // Dial forward before binding.
- var next net.Conn
- for {
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- time.Sleep(50 * time.Millisecond)
- log.Printf("connect failed retrying: %v", err)
- }
-
- // Bind once to the server socket.
- listener, err := in.listen(*port)
- if err != nil {
- // Should not happen, everything must be bound by this time
- // this proxy is started.
- log.Fatalf("unable to listen: %v", err)
- }
- log.Printf("client=%v, server=%v, ready.", *client, *server)
-
- sigs := make(chan os.Signal, 1)
- signal.Notify(sigs, unix.SIGTERM)
- go func() {
- <-sigs
- if *cpuprofile != "" {
- pprof.StopCPUProfile()
- }
- if *memprofile != "" {
- f, err := os.Create(*memprofile)
- if err != nil {
- log.Fatal("could not create memory profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing memory profile: ", err)
- }
- }()
- runtime.GC() // get up-to-date statistics
- if err := pprof.WriteHeapProfile(f); err != nil {
- log.Fatalf("Unable to write heap profile: %v", err)
- }
- }
- os.Exit(0)
- }()
-
- for {
- // Forward all connections.
- inConn, err := listener.Accept()
- if err != nil {
- // This should not happen; we are listening
- // successfully. Exhausted all available FDs?
- log.Fatalf("accept error: %v", err)
- }
- log.Printf("incoming connection established.")
-
- // Copy both ways.
- go io.Copy(inConn, next)
- go io.Copy(next, inConn)
-
- // Print stats every second.
- go func() {
- t := time.NewTicker(time.Second)
- defer t.Stop()
- for {
- <-t.C
- in.printStats()
- out.printStats()
- }
- }()
-
- for {
- // Dial again.
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- }
- }
-}
diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD
deleted file mode 100644
index 9290830d7..000000000
--- a/test/benchmarks/tools/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tools",
- testonly = 1,
- srcs = [
- "ab.go",
- "fio.go",
- "hey.go",
- "iperf.go",
- "meminfo.go",
- "parser_util.go",
- "redis.go",
- "sysbench.go",
- "tools.go",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "tools_test",
- size = "small",
- srcs = [
- "ab_test.go",
- "fio_test.go",
- "hey_test.go",
- "iperf_test.go",
- "meminfo_test.go",
- "redis_test.go",
- "sysbench_test.go",
- ],
- library = ":tools",
-)
diff --git a/test/benchmarks/tools/ab.go b/test/benchmarks/tools/ab.go
deleted file mode 100644
index d9abf0763..000000000
--- a/test/benchmarks/tools/ab.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// ApacheBench is for the client application ApacheBench.
-type ApacheBench struct {
- Requests int
- Concurrency int
- Doc string
- // TODO(zkoopmans): support KeepAlive and pass option to enable.
-}
-
-// MakeCmd makes an ApacheBench command.
-func (a *ApacheBench) MakeCmd(ip net.IP, port int) []string {
- path := fmt.Sprintf("http://%s:%d/%s", ip, port, a.Doc)
- // See apachebench (ab) for flags.
- cmd := fmt.Sprintf("ab -n %d -c %d %s", a.Requests, a.Concurrency, path)
- return []string{"sh", "-c", cmd}
-}
-
-// Report parses and reports metrics from ApacheBench output.
-func (a *ApacheBench) Report(b *testing.B, output string) {
- // Parse and report custom metrics.
- transferRate, err := a.parseTransferRate(output)
- if err != nil {
- b.Logf("failed to parse transferrate: %v", err)
- }
- b.ReportMetric(transferRate*1024, "transfer_rate_b/s") // Convert from Kb/s to b/s.
- ReportCustomMetric(b, transferRate*1024, "transfer_rate" /*metric name*/, "bytes_per_second" /*unit*/)
-
- latency, err := a.parseLatency(output)
- if err != nil {
- b.Logf("failed to parse latency: %v", err)
- }
- b.ReportMetric(latency/1000, "mean_latency_secs") // Convert from ms to s.
- ReportCustomMetric(b, latency/1000, "mean_latency" /*metric name*/, "s" /*unit*/)
-
- reqPerSecond, err := a.parseRequestsPerSecond(output)
- if err != nil {
- b.Logf("failed to parse requests per second: %v", err)
- }
- b.ReportMetric(reqPerSecond, "requests_per_second")
- ReportCustomMetric(b, reqPerSecond, "requests_per_second" /*metric name*/, "QPS" /*unit*/)
-}
-
-var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`)
-
-// parseTransferRate parses transfer rate from ApacheBench output.
-func (a *ApacheBench) parseTransferRate(data string) (float64, error) {
- match := transferRateRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`)
-
-// parseLatency parses latency from ApacheBench output.
-func (a *ApacheBench) parseLatency(data string) (float64, error) {
- match := latencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`)
-
-// parseRequestsPerSecond parses requests per second from ApacheBench output.
-func (a *ApacheBench) parseRequestsPerSecond(data string) (float64, error) {
- match := requestsPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/ab_test.go b/test/benchmarks/tools/ab_test.go
deleted file mode 100644
index 28ee66ec1..000000000
--- a/test/benchmarks/tools/ab_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestApacheBench checks the ApacheBench parsers on sample output.
-func TestApacheBench(t *testing.T) {
- // Sample output from apachebench.
- sampleData := `This is ApacheBench, Version 2.3 <$Revision: 1826891 $>
-Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
-Licensed to The Apache Software Foundation, http://www.apache.org/
-
-Benchmarking 10.10.10.10 (be patient).....done
-
-
-Server Software: Apache/2.4.38
-Server Hostname: 10.10.10.10
-Server Port: 80
-
-Document Path: /latin10k.txt
-Document Length: 210 bytes
-
-Concurrency Level: 1
-Time taken for tests: 0.180 seconds
-Complete requests: 100
-Failed requests: 0
-Non-2xx responses: 100
-Total transferred: 38800 bytes
-HTML transferred: 21000 bytes
-Requests per second: 556.44 [#/sec] (mean)
-Time per request: 1.797 [ms] (mean)
-Time per request: 1.797 [ms] (mean, across all concurrent requests)
-Transfer rate: 210.84 [Kbytes/sec] received
-
-Connection Times (ms)
- min mean[+/-sd] median max
-Connect: 0 0 0.2 0 2
-Processing: 1 2 1.0 1 8
-Waiting: 1 1 1.0 1 7
-Total: 1 2 1.2 1 10
-
-Percentage of the requests served within a certain time (ms)
- 50% 1
- 66% 2
- 75% 2
- 80% 2
- 90% 2
- 95% 3
- 98% 7
- 99% 10
- 100% 10 (longest request)`
-
- ab := ApacheBench{}
- want := 210.84
- got, err := ab.parseTransferRate(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseTransferRate got: %f, want: %f", got, want)
- }
-
- want = 2.0
- got, err = ab.parseLatency(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseLatency got: %f, want: %f", got, want)
- }
-
- want = 556.44
- got, err = ab.parseRequestsPerSecond(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go
deleted file mode 100644
index ea12436d2..000000000
--- a/test/benchmarks/tools/fio.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
- "testing"
-)
-
-// Fio makes 'fio' commands and parses their output.
-type Fio struct {
- Test string // test to run: read, write, randread, randwrite.
- Size int // total size to be read/written in megabytes.
- BlockSize int // block size to be read/written in kilobytes.
- IODepth int // I/O depth for reads/writes.
-}
-
-// MakeCmd makes a 'fio' command.
-func (f *Fio) MakeCmd(filename string) []string {
- cmd := []string{"fio", "--output-format=json", "--ioengine=sync"}
- cmd = append(cmd, fmt.Sprintf("--name=%s", f.Test))
- cmd = append(cmd, fmt.Sprintf("--size=%dM", f.Size))
- cmd = append(cmd, fmt.Sprintf("--blocksize=%dK", f.BlockSize))
- cmd = append(cmd, fmt.Sprintf("--filename=%s", filename))
- cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.IODepth))
- cmd = append(cmd, fmt.Sprintf("--rw=%s", f.Test))
- return cmd
-}
-
-// Report reports metrics based on output from an 'fio' command.
-func (f *Fio) Report(b *testing.B, output string) {
- b.Helper()
- // Parse the output and report the metrics.
- isRead := strings.Contains(f.Test, "read")
- bw, err := f.parseBandwidth(output, isRead)
- if err != nil {
- b.Fatalf("failed to parse bandwidth from %s with: %v", output, err)
- }
- ReportCustomMetric(b, bw, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
-
- iops, err := f.parseIOps(output, isRead)
- if err != nil {
- b.Fatalf("failed to parse iops from %s with: %v", output, err)
- }
- ReportCustomMetric(b, iops, "io_ops" /*metric name*/, "ops_per_second" /*unit*/)
-}
-
-// parseBandwidth reports the bandwidth in b/s.
-func (f *Fio) parseBandwidth(data string, isRead bool) (float64, error) {
- op := "write"
- if isRead {
- op = "read"
- }
- result, err := f.parseFioJSON(data, op, "bw")
- if err != nil {
- return 0, err
- }
- return result * 1024, nil
-}
-
-// parseIOps reports the write IO per second metric.
-func (f *Fio) parseIOps(data string, isRead bool) (float64, error) {
- if isRead {
- return f.parseFioJSON(data, "read", "iops")
- }
- return f.parseFioJSON(data, "write", "iops")
-}
-
-// fioResult is for parsing FioJSON.
-type fioResult struct {
- Jobs []fioJob
-}
-
-// fioJob is for parsing FioJSON.
-type fioJob map[string]json.RawMessage
-
-// fioMetrics is for parsing FioJSON.
-type fioMetrics map[string]json.RawMessage
-
-// parseFioJSON parses data and grabs "op" (read or write) and "metric"
-// (bw or iops) from the JSON.
-func (f *Fio) parseFioJSON(data, op, metric string) (float64, error) {
- var result fioResult
- if err := json.Unmarshal([]byte(data), &result); err != nil {
- return 0, fmt.Errorf("could not unmarshal data: %v", err)
- }
-
- if len(result.Jobs) < 1 {
- return 0, fmt.Errorf("no jobs present to parse")
- }
-
- var metrics fioMetrics
- if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil {
- return 0, fmt.Errorf("could not unmarshal jobs: %v", err)
- }
-
- if _, ok := metrics[metric]; !ok {
- return 0, fmt.Errorf("no metric found for op: %s", op)
- }
- return strconv.ParseFloat(string(metrics[metric]), 64)
-}
diff --git a/test/benchmarks/tools/fio_test.go b/test/benchmarks/tools/fio_test.go
deleted file mode 100644
index a98277150..000000000
--- a/test/benchmarks/tools/fio_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestFio checks the Fio parsers on sample output.
-func TestFio(t *testing.T) {
- sampleData := `
-{
- "fio version" : "fio-3.1",
- "timestamp" : 1554837456,
- "timestamp_ms" : 1554837456621,
- "time" : "Tue Apr 9 19:17:36 2019",
- "jobs" : [
- {
- "jobname" : "test",
- "groupid" : 0,
- "error" : 0,
- "eta" : 2147483647,
- "elapsed" : 1,
- "job options" : {
- "name" : "test",
- "ioengine" : "sync",
- "size" : "1073741824",
- "filename" : "/disk/file.dat",
- "iodepth" : "4",
- "bs" : "4096",
- "rw" : "write"
- },
- "read" : {
- "io_bytes" : 0,
- "io_kbytes" : 0,
- "bw" : 123456,
- "iops" : 1234.5678,
- "runtime" : 0,
- "total_ios" : 0,
- "short_ios" : 0,
- "bw_min" : 0,
- "bw_max" : 0,
- "bw_agg" : 0.000000,
- "bw_mean" : 0.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 0,
- "iops_min" : 0,
- "iops_max" : 0,
- "iops_mean" : 0.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 0
- },
- "write" : {
- "io_bytes" : 1073741824,
- "io_kbytes" : 1048576,
- "bw" : 1753471,
- "iops" : 438367.892977,
- "runtime" : 598,
- "total_ios" : 262144,
- "bw_min" : 1731120,
- "bw_max" : 1731120,
- "bw_agg" : 98.725328,
- "bw_mean" : 1731120.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 1,
- "iops_min" : 432780,
- "iops_max" : 432780,
- "iops_mean" : 432780.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 1
- }
- }
- ]
-}
-`
- fio := Fio{}
- // WriteBandwidth.
- got, err := fio.parseBandwidth(sampleData, false)
- var want float64 = 1753471.0 * 1024
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // ReadBandwidth.
- got, err = fio.parseBandwidth(sampleData, true)
- want = 123456 * 1024
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // WriteIOps.
- got, err = fio.parseIOps(sampleData, false)
- want = 438367.892977
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // ReadIOps.
- got, err = fio.parseIOps(sampleData, true)
- want = 1234.5678
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go
deleted file mode 100644
index de908feeb..000000000
--- a/test/benchmarks/tools/hey.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Hey is for the client application 'hey'.
-type Hey struct {
- Requests int // Note: requests cannot be less than concurrency.
- Concurrency int
- Doc string
-}
-
-// MakeCmd returns a 'hey' command.
-func (h *Hey) MakeCmd(ip net.IP, port int) []string {
- c := h.Concurrency
- if c > h.Requests {
- c = h.Requests
- }
- return []string{
- "hey",
- "-n", fmt.Sprintf("%d", h.Requests),
- "-c", fmt.Sprintf("%d", c),
- fmt.Sprintf("http://%s:%d/%s", ip.String(), port, h.Doc),
- }
-}
-
-// Report parses output from 'hey' and reports metrics.
-func (h *Hey) Report(b *testing.B, output string) {
- b.Helper()
- requests, err := h.parseRequestsPerSecond(output)
- if err != nil {
- b.Fatalf("failed to parse requests per second: %v", err)
- }
- ReportCustomMetric(b, requests, "requests_per_second" /*metric name*/, "QPS" /*unit*/)
-
- ave, err := h.parseAverageLatency(output)
- if err != nil {
- b.Fatalf("failed to parse average latency: %v", err)
- }
- ReportCustomMetric(b, ave, "average_latency" /*metric name*/, "s" /*unit*/)
-}
-
-var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`)
-
-// parseRequestsPerSecond finds requests per second from 'hey' output.
-func (h *Hey) parseRequestsPerSecond(data string) (float64, error) {
- match := heyReqPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`)
-
-// parseHeyAverageLatency finds Average Latency in seconds form 'hey' output.
-func (h *Hey) parseAverageLatency(data string) (float64, error) {
- match := heyAverageLatencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/hey_test.go b/test/benchmarks/tools/hey_test.go
deleted file mode 100644
index e0cab1f52..000000000
--- a/test/benchmarks/tools/hey_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestHey checks the Hey parsers on sample output.
-func TestHey(t *testing.T) {
- sampleData := `
- Summary:
- Total: 2.2391 secs
- Slowest: 1.6292 secs
- Fastest: 0.0066 secs
- Average: 0.5351 secs
- Requests/sec: 89.3202
-
- Total data: 841200 bytes
- Size/request: 4206 bytes
-
- Response time histogram:
- 0.007 [1] |
- 0.169 [0] |
- 0.331 [149] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
- 0.493 [0] |
- 0.656 [0] |
- 0.818 [0] |
- 0.980 [0] |
- 1.142 [0] |
- 1.305 [0] |
- 1.467 [49] |■■■■■■■■■■■■■
- 1.629 [1] |
-
-
- Latency distribution:
- 10% in 0.2149 secs
- 25% in 0.2449 secs
- 50% in 0.2703 secs
- 75% in 1.3315 secs
- 90% in 1.4045 secs
- 95% in 1.4232 secs
- 99% in 1.4362 secs
-
- Details (average, fastest, slowest):
- DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs
- DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
- req write: 0.0000 secs, 0.0000 secs, 0.0012 secs
- resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs
- resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs
-
- Status code distribution:
- [200] 200 responses
- `
- hey := Hey{}
- want := 89.3202
- got, err := hey.parseRequestsPerSecond(sampleData)
- if err != nil {
- t.Fatalf("failed to parse request per second with: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- want = 0.5351
- got, err = hey.parseAverageLatency(sampleData)
- if err != nil {
- t.Fatalf("failed to parse average latency with: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go
deleted file mode 100644
index 8f410a9e8..000000000
--- a/test/benchmarks/tools/iperf.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-const length = 64 * 1024
-
-// Iperf is for the client side of `iperf`.
-type Iperf struct {
- Num int // Number of bytes to send in KB.
-}
-
-// MakeCmd returns a iperf client command.
-func (i *Iperf) MakeCmd(ip net.IP, port int) []string {
- return []string{
- "iperf",
- "--format", "K", // Output in KBytes.
- "--realtime", // Measured in realtime.
- "--num", fmt.Sprintf("%dK", i.Num), // Number of bytes to send in KB.
- "--length", fmt.Sprintf("%d", length),
- "--client", ip.String(),
- "--port", fmt.Sprintf("%d", port),
- }
-}
-
-// Report parses output from iperf client and reports metrics.
-func (i *Iperf) Report(b *testing.B, output string) {
- b.Helper()
- // Parse bandwidth and report it.
- bW, err := i.bandwidth(output)
- if err != nil {
- b.Fatalf("failed to parse bandwitdth from %s: %v", output, err)
- }
- b.SetBytes(length) // Measure Bytes/sec for b.N, although below is iperf output.
- ReportCustomMetric(b, bW*1024, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
-}
-
-// bandwidth parses the Bandwidth number from an iperf report. A sample is below.
-func (i *Iperf) bandwidth(data string) (float64, error) {
- re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`)
- match := re.FindStringSubmatch(data)
- if len(match) < 1 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/iperf_test.go b/test/benchmarks/tools/iperf_test.go
deleted file mode 100644
index 03bb30d05..000000000
--- a/test/benchmarks/tools/iperf_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package tools
-
-import "testing"
-
-// TestIperf checks the Iperf parsers on sample output.
-func TestIperf(t *testing.T) {
- sampleData := `
-------------------------------------------------------------
-Client connecting to 10.138.15.215, TCP port 32779
-TCP window size: 45.0 KByte (default)
-------------------------------------------------------------
-[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779
-[ ID] Interval Transfer Bandwidth
-[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec
-`
- i := Iperf{}
- bandwidth, err := i.bandwidth(sampleData)
- if err != nil || bandwidth != 45900 {
- t.Fatalf("failed with: %v and %f", err, bandwidth)
- }
-}
diff --git a/test/benchmarks/tools/meminfo.go b/test/benchmarks/tools/meminfo.go
deleted file mode 100644
index b5786fe11..000000000
--- a/test/benchmarks/tools/meminfo.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Meminfo wraps measurements of MemAvailable using /proc/meminfo.
-type Meminfo struct {
-}
-
-// MakeCmd returns a command for checking meminfo.
-func (*Meminfo) MakeCmd() (string, []string) {
- return "cat", []string{"/proc/meminfo"}
-}
-
-// Report takes two reads of meminfo, parses them, and reports the difference
-// divided by b.N.
-func (*Meminfo) Report(b *testing.B, before, after string) {
- b.Helper()
-
- beforeVal, err := parseMemAvailable(before)
- if err != nil {
- b.Fatalf("could not parse before value %s: %v", before, err)
- }
-
- afterVal, err := parseMemAvailable(after)
- if err != nil {
- b.Fatalf("could not parse before value %s: %v", before, err)
- }
- val := 1024 * ((beforeVal - afterVal) / float64(b.N))
- ReportCustomMetric(b, val, "average_container_size" /*metric name*/, "bytes" /*units*/)
-}
-
-var memInfoRE = regexp.MustCompile(`MemAvailable:\s*(\d+)\skB\n`)
-
-// parseMemAvailable grabs the MemAvailable number from /proc/meminfo.
-func parseMemAvailable(data string) (float64, error) {
- match := memInfoRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("couldn't find MemAvailable in %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/meminfo_test.go b/test/benchmarks/tools/meminfo_test.go
deleted file mode 100644
index ba803540f..000000000
--- a/test/benchmarks/tools/meminfo_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestMeminfo checks the Meminfo parser on sample output.
-func TestMeminfo(t *testing.T) {
- sampleData := `
-MemTotal: 16337408 kB
-MemFree: 3742696 kB
-MemAvailable: 9319948 kB
-Buffers: 1433884 kB
-Cached: 4607036 kB
-SwapCached: 45284 kB
-Active: 8288376 kB
-Inactive: 2685928 kB
-Active(anon): 4724912 kB
-Inactive(anon): 1047940 kB
-Active(file): 3563464 kB
-Inactive(file): 1637988 kB
-Unevictable: 326940 kB
-Mlocked: 48 kB
-SwapTotal: 33292284 kB
-SwapFree: 32865736 kB
-Dirty: 708 kB
-Writeback: 0 kB
-AnonPages: 4304204 kB
-Mapped: 975424 kB
-Shmem: 910292 kB
-KReclaimable: 744532 kB
-Slab: 1058448 kB
-SReclaimable: 744532 kB
-SUnreclaim: 313916 kB
-KernelStack: 25188 kB
-PageTables: 65300 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 41460988 kB
-Committed_AS: 22859492 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 63088 kB
-VmallocChunk: 0 kB
-Percpu: 9248 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 786432 kB
-ShmemHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-FileHugePages: 0 kB
-FilePmdMapped: 0 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-Hugetlb: 0 kB
-DirectMap4k: 5408532 kB
-DirectMap2M: 11241472 kB
-DirectMap1G: 1048576 kB
-`
- want := 9319948.0
- got, err := parseMemAvailable(sampleData)
- if err != nil {
- t.Fatalf("parseMemAvailable failed: %v", err)
- }
- if got != want {
- t.Fatalf("parseMemAvailable got %f, want %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/parser_util.go b/test/benchmarks/tools/parser_util.go
deleted file mode 100644
index a4555c7dd..000000000
--- a/test/benchmarks/tools/parser_util.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "testing"
-)
-
-// Parameter is a test parameter.
-type Parameter struct {
- Name string
- Value string
-}
-
-// Output is parsed and split by these values. Make them illegal in input methods.
-// We are constrained on what characters these can be by 1) docker's allowable
-// container names, 2) golang allowable benchmark names, and 3) golangs allowable
-// charecters in b.ReportMetric calls.
-var illegalChars = regexp.MustCompile(`[/\.]`)
-
-// ParametersToName joins parameters into a string format for parsing.
-// It is meant to be used for t.Run() calls in benchmark tools.
-func ParametersToName(params ...Parameter) (string, error) {
- var strs []string
- for _, param := range params {
- if illegalChars.MatchString(param.Name) || illegalChars.MatchString(param.Value) {
- return "", fmt.Errorf("params Name: %q and Value: %q cannot container '.' or '/'", param.Name, param.Value)
- }
- strs = append(strs, strings.Join([]string{param.Name, param.Value}, "."))
- }
- return strings.Join(strs, "/"), nil
-}
-
-// NameToParameters parses the string created by ParametersToName and returns
-// it as a set of Parameters.
-// Example: BenchmarkRuby/server_threads.1/doc_size.16KB-6
-// The parameter part of this benchmark is:
-// "server_threads.1/doc_size.16KB" (BenchmarkRuby is the name, and 6 is GOMAXPROCS)
-// This function will return a slice with two parameters ->
-// {Name: server_threads, Value: 1}, {Name: doc_size, Value: 16KB}
-func NameToParameters(name string) ([]*Parameter, error) {
- var params []*Parameter
- for _, cond := range strings.Split(name, "/") {
- cs := strings.Split(cond, ".")
- switch len(cs) {
- case 1:
- params = append(params, &Parameter{Name: cond, Value: cond})
- case 2:
- params = append(params, &Parameter{Name: cs[0], Value: cs[1]})
- default:
- return nil, fmt.Errorf("failed to parse param: %s", cond)
- }
- }
- return params, nil
-}
-
-// ReportCustomMetric reports a metric in a set format for parsing.
-func ReportCustomMetric(b *testing.B, value float64, name, unit string) {
- if illegalChars.MatchString(name) || illegalChars.MatchString(unit) {
- b.Fatalf("name: %q and unit: %q cannot contain '/' or '.'", name, unit)
- }
- nameUnit := strings.Join([]string{name, unit}, ".")
- b.ReportMetric(value, nameUnit)
-}
-
-// Metric holds metric data parsed from a string based on the format
-// ReportMetric.
-type Metric struct {
- Name string
- Unit string
- Sample float64
-}
-
-// ParseCustomMetric parses a metric reported with ReportCustomMetric.
-func ParseCustomMetric(value, metric string) (*Metric, error) {
- sample, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return nil, fmt.Errorf("failed to parse value: %v", err)
- }
- nameUnit := strings.Split(metric, ".")
- if len(nameUnit) != 2 {
- return nil, fmt.Errorf("failed to parse metric: %s", metric)
- }
- return &Metric{Name: nameUnit[0], Unit: nameUnit[1], Sample: sample}, nil
-}
diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go
deleted file mode 100644
index 12fdbc7cc..000000000
--- a/test/benchmarks/tools/redis.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Redis is for the client 'redis-benchmark'.
-type Redis struct {
- Operation string
-}
-
-// MakeCmd returns a redis-benchmark client command.
-func (r *Redis) MakeCmd(ip net.IP, port, requests int) []string {
- // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case.
- // Note that "ping" will run both PING_INLINE and PING_BULK.
- if r.Operation == "PING_BULK" {
- return []string{
- "redis-benchmark",
- "--csv",
- "-t", "ping",
- "-h", ip.String(),
- "-p", fmt.Sprintf("%d", port),
- "-n", fmt.Sprintf("%d", requests),
- }
- }
-
- // runs redis-benchmark -t operation for 100K requests against server.
- return []string{
- "redis-benchmark",
- "--csv",
- "-t", r.Operation,
- "-h", ip.String(),
- "-p", fmt.Sprintf("%d", port),
- "-n", fmt.Sprintf("%d", requests),
- }
-}
-
-// Report parses output from redis-benchmark client and reports metrics.
-func (r *Redis) Report(b *testing.B, output string) {
- b.Helper()
- result, err := r.parseOperation(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, r.Operation /*metric_name*/, "QPS" /*unit*/)
-}
-
-// parseOperation grabs the metric operations per second from redis-benchmark output.
-func (r *Redis) parseOperation(data string) (float64, error) {
- re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, r.Operation))
- match := re.FindStringSubmatch(data)
- if len(match) < 3 {
- return 0.0, fmt.Errorf("could not find %s in %s", r.Operation, data)
- }
- return strconv.ParseFloat(match[2], 64)
-}
diff --git a/test/benchmarks/tools/redis_test.go b/test/benchmarks/tools/redis_test.go
deleted file mode 100644
index 4bafda66f..000000000
--- a/test/benchmarks/tools/redis_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestRedis checks the Redis parsers on sample output.
-func TestRedis(t *testing.T) {
- sampleData := `
- "PING_INLINE","48661.80"
- "PING_BULK","50301.81"
- "SET","48923.68"
- "GET","49382.71"
- "INCR","49975.02"
- "LPUSH","49875.31"
- "RPUSH","50276.52"
- "LPOP","50327.12"
- "RPOP","50556.12"
- "SADD","49504.95"
- "HSET","49504.95"
- "SPOP","50025.02"
- "LPUSH (needed to benchmark LRANGE)","48875.86"
- "LRANGE_100 (first 100 elements)","33955.86"
- "LRANGE_300 (first 300 elements)","16550.81"// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
- "LRANGE_500 (first 450 elements)","13653.74"
- "LRANGE_600 (first 600 elements)","11219.57"
- "MSET (10 keys)","44682.75"
- `
- wants := map[string]float64{
- "PING_INLINE": 48661.80,
- "PING_BULK": 50301.81,
- "SET": 48923.68,
- "GET": 49382.71,
- "INCR": 49975.02,
- "LPUSH": 49875.31,
- "RPUSH": 50276.52,
- "LPOP": 50327.12,
- "RPOP": 50556.12,
- "SADD": 49504.95,
- "HSET": 49504.95,
- "SPOP": 50025.02,
- "LRANGE_100": 33955.86,
- "LRANGE_300": 16550.81,
- "LRANGE_500": 13653.74,
- "LRANGE_600": 11219.57,
- "MSET": 44682.75,
- }
- for op, want := range wants {
- redis := Redis{
- Operation: op,
- }
- if got, err := redis.parseOperation(sampleData); err != nil {
- t.Fatalf("failed to parse %s: %v", op, err)
- } else if want != got {
- t.Fatalf("wanted %f for op %s, got %f", want, op, got)
- }
- }
-}
diff --git a/test/benchmarks/tools/sysbench.go b/test/benchmarks/tools/sysbench.go
deleted file mode 100644
index 350f8ec98..000000000
--- a/test/benchmarks/tools/sysbench.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Sysbench represents a 'sysbench' command.
-type Sysbench interface {
- // MakeCmd constructs the relevant command line.
- MakeCmd(*testing.B) []string
-
- // Report reports relevant custom metrics.
- Report(*testing.B, string)
-}
-
-// SysbenchBase is the top level struct for sysbench and holds top-level arguments
-// for sysbench. See: 'sysbench --help'
-type SysbenchBase struct {
- // Threads is the number of threads for the test.
- Threads int
-}
-
-// baseFlags returns top level flags.
-func (s *SysbenchBase) baseFlags(b *testing.B, useEvents bool) []string {
- var ret []string
- if s.Threads > 0 {
- ret = append(ret, fmt.Sprintf("--threads=%d", s.Threads))
- }
- ret = append(ret, "--time=0") // Ensure other mechanism is used.
- if useEvents {
- ret = append(ret, fmt.Sprintf("--events=%d", b.N))
- }
- return ret
-}
-
-// SysbenchCPU is for 'sysbench [flags] cpu run' and holds CPU specific arguments.
-type SysbenchCPU struct {
- SysbenchBase
-}
-
-// MakeCmd makes commands for SysbenchCPU.
-func (s *SysbenchCPU) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.baseFlags(b, true /* useEvents */)...)
- cmd = append(cmd, "cpu", "run")
- return cmd
-}
-
-// Report reports the relevant metrics for SysbenchCPU.
-func (s *SysbenchCPU) Report(b *testing.B, output string) {
- b.Helper()
- result, err := s.parseEvents(output)
- if err != nil {
- b.Fatalf("parsing CPU events from %s failed: %v", output, err)
- }
- ReportCustomMetric(b, result, "cpu_events" /*metric name*/, "events_per_second" /*unit*/)
-}
-
-var cpuEventsPerSecondRE = regexp.MustCompile(`events per second:\s*(\d*.?\d*)\n`)
-
-// parseEvents parses cpu events per second.
-func (s *SysbenchCPU) parseEvents(data string) (float64, error) {
- match := cpuEventsPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find events per second: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// SysbenchMemory is for 'sysbench [FLAGS] memory run' and holds Memory specific arguments.
-type SysbenchMemory struct {
- SysbenchBase
- BlockSize int // size of test memory block in megabytes [1].
- Scope string // memory access scope {global, local} [global].
- HugeTLB bool // allocate memory from HugeTLB [off].
- OperationType string // type of memory ops {read, write, none} [write].
- AccessMode string // access mode {seq, rnd} [seq].
-}
-
-// MakeCmd makes commands for SysbenchMemory.
-func (s *SysbenchMemory) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.flags(b)...)
- cmd = append(cmd, "memory", "run")
- return cmd
-}
-
-// flags makes flags for SysbenchMemory cmds.
-func (s *SysbenchMemory) flags(b *testing.B) []string {
- cmd := s.baseFlags(b, false /* useEvents */)
- if s.BlockSize != 0 {
- cmd = append(cmd, fmt.Sprintf("--memory-block-size=%dM", s.BlockSize))
- }
- if s.Scope != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-scope=%s", s.Scope))
- }
- if s.HugeTLB {
- cmd = append(cmd, "--memory-hugetlb=on")
- }
- if s.OperationType != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-oper=%s", s.OperationType))
- }
- if s.AccessMode != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-access-mode=%s", s.AccessMode))
- }
- // Sysbench ignores events for memory tests, and uses the total
- // size parameter to determine when the test is done. We scale
- // with this instead.
- cmd = append(cmd, fmt.Sprintf("--memory-total-size=%dG", b.N))
- return cmd
-}
-
-// Report reports the relevant metrics for SysbenchMemory.
-func (s *SysbenchMemory) Report(b *testing.B, output string) {
- b.Helper()
- result, err := s.parseOperations(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "memory_operations" /*metric name*/, "ops_per_second" /*unit*/)
-}
-
-var memoryOperationsRE = regexp.MustCompile(`Total\s+operations:\s+\d+\s+\((\s*\d+\.\d+\s*)\s+per\s+second\)`)
-
-// parseOperations parses memory operations per second form sysbench memory ouput.
-func (s *SysbenchMemory) parseOperations(data string) (float64, error) {
- match := memoryOperationsRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("couldn't find memory operations per second: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// SysbenchMutex is for 'sysbench [FLAGS] mutex run' and holds Mutex specific arguments.
-type SysbenchMutex struct {
- SysbenchBase
- Num int // total size of mutex array [4096].
- Loops int // number of loops to do outside mutex lock [10000].
-}
-
-// MakeCmd makes commands for SysbenchMutex.
-func (s *SysbenchMutex) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.flags(b)...)
- cmd = append(cmd, "mutex", "run")
- return cmd
-}
-
-// flags makes flags for SysbenchMutex commands.
-func (s *SysbenchMutex) flags(b *testing.B) []string {
- var cmd []string
- cmd = append(cmd, s.baseFlags(b, false /* useEvents */)...)
- if s.Num > 0 {
- cmd = append(cmd, fmt.Sprintf("--mutex-num=%d", s.Num))
- }
- if s.Loops > 0 {
- cmd = append(cmd, fmt.Sprintf("--mutex-loops=%d", s.Loops))
- }
- // Sysbench does not respect --events for mutex tests. From [1]:
- // "Here --time or --events are completely ignored. Sysbench always
- // runs one event per thread."
- // [1] https://tomfern.com/posts/sysbench-guide-1
- cmd = append(cmd, fmt.Sprintf("--mutex-locks=%d", b.N))
- return cmd
-}
-
-// Report parses and reports relevant sysbench mutex metrics.
-func (s *SysbenchMutex) Report(b *testing.B, output string) {
- b.Helper()
-
- result, err := s.parseExecutionTime(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "average_execution_time" /*metric name*/, "s" /*unit*/)
-
- result, err = s.parseDeviation(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "stddev_execution_time" /*metric name*/, "s" /*unit*/)
-
- result, err = s.parseLatency(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result/1000, "average_latency" /*metric name*/, "s" /*unit*/)
-}
-
-var executionTimeRE = regexp.MustCompile(`execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)`)
-
-// parseExecutionTime parses threads fairness average execution time from sysbench output.
-func (s *SysbenchMutex) parseExecutionTime(data string) (float64, error) {
- match := executionTimeRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find execution time average: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// parseDeviation parses threads fairness stddev time from sysbench output.
-func (s *SysbenchMutex) parseDeviation(data string) (float64, error) {
- match := executionTimeRE.FindStringSubmatch(data)
- if len(match) < 3 {
- return 0.0, fmt.Errorf("could not find execution time deviation: %s", data)
- }
- return strconv.ParseFloat(match[2], 64)
-}
-
-var averageLatencyRE = regexp.MustCompile(`avg:[^\n^\d]*(\d*\.?\d*)`)
-
-// parseLatency parses latency from sysbench output.
-func (s *SysbenchMutex) parseLatency(data string) (float64, error) {
- match := averageLatencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find average latency: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/sysbench_test.go b/test/benchmarks/tools/sysbench_test.go
deleted file mode 100644
index 850d1939e..000000000
--- a/test/benchmarks/tools/sysbench_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestSysbenchCpu tests parses on sample 'sysbench cpu' output.
-func TestSysbenchCpu(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Prime numbers limit: 10000
-
-Initializing worker threads...
-
-Threads started!
-
-CPU speed:
- events per second: 9093.38
-
-General statistics:
- total time: 10.0007s
- total number of events: 90949
-
-Latency (ms):
- min: 0.64
- avg: 0.88
- max: 24.65
- 95th percentile: 1.55
- sum: 79936.91
-
-Threads fairness:
- events (avg/stddev): 11368.6250/831.38
- execution time (avg/stddev): 9.9921/0.01
-`
- sysbench := SysbenchCPU{}
- want := 9093.38
- if got, err := sysbench.parseEvents(sampleData); err != nil {
- t.Fatalf("parse cpu events failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
-
-// TestSysbenchMemory tests parsers on sample 'sysbench memory' output.
-func TestSysbenchMemory(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Running memory speed test with the following options:
- block size: 1KiB
- total size: 102400MiB
- operation: write
- scope: global
-
-Initializing worker threads...
-
-Threads started!
-
-Total operations: 47999046 (9597428.64 per second)
-
-46874.07 MiB transferred (9372.49 MiB/sec)
-
-
-General statistics:
- total time: 5.0001s
- total number of events: 47999046
-
-Latency (ms):
- min: 0.00
- avg: 0.00
- max: 0.21
- 95th percentile: 0.00
- sum: 33165.91
-
-Threads fairness:
- events (avg/stddev): 5999880.7500/111242.52
- execution time (avg/stddev): 4.1457/0.09
-`
- sysbench := SysbenchMemory{}
- want := 9597428.64
- if got, err := sysbench.parseOperations(sampleData); err != nil {
- t.Fatalf("parse memory ops failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
-
-// TestSysbenchMutex tests parsers on sample 'sysbench mutex' output.
-func TestSysbenchMutex(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-The 'mutex' test requires a command argument. See 'sysbench mutex help'
-root@ec078132e294:/# sysbench mutex --threads=8 run
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Initializing worker threads...
-
-Threads started!
-
-
-General statistics:
- total time: 0.2320s
- total number of events: 8
-
-Latency (ms):
- min: 152.35
- avg: 192.48
- max: 231.41
- 95th percentile: 231.53
- sum: 1539.83
-
-Threads fairness:
- events (avg/stddev): 1.0000/0.00
- execution time (avg/stddev): 0.1925/0.04
-`
-
- sysbench := SysbenchMutex{}
- want := .1925
- if got, err := sysbench.parseExecutionTime(sampleData); err != nil {
- t.Fatalf("parse mutex time failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-
- want = 0.04
- if got, err := sysbench.parseDeviation(sampleData); err != nil {
- t.Fatalf("parse mutex deviation failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-
- want = 192.48
- if got, err := sysbench.parseLatency(sampleData); err != nil {
- t.Fatalf("parse mutex time failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/tools.go b/test/benchmarks/tools/tools.go
deleted file mode 100644
index eb61c0136..000000000
--- a/test/benchmarks/tools/tools.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tools holds tooling to couple command formatting and output parsers
-// together.
-package tools
diff --git a/test/cmd/test_app/BUILD b/test/cmd/test_app/BUILD
deleted file mode 100644
index 7b8b23b4d..000000000
--- a/test/cmd/test_app/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "test_app",
- testonly = 1,
- srcs = [
- "fds.go",
- "main.go",
- ],
- pure = True,
- visibility = ["//runsc/container:__pkg__"],
- deps = [
- "//pkg/test/testutil",
- "//pkg/unet",
- "//runsc/flag",
- "@com_github_google_subcommands//:go_default_library",
- "@com_github_kr_pty//:go_default_library",
- ],
-)
diff --git a/test/cmd/test_app/fds.go b/test/cmd/test_app/fds.go
deleted file mode 100644
index 9b5f7231a..000000000
--- a/test/cmd/test_app/fds.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "context"
- "io"
- "io/ioutil"
- "log"
- "os"
- "time"
-
- "github.com/google/subcommands"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/unet"
- "gvisor.dev/gvisor/runsc/flag"
-)
-
-const fileContents = "foobarbaz"
-
-// fdSender will open a file and send the FD over a unix domain socket.
-type fdSender struct {
- socketPath string
-}
-
-// Name implements subcommands.Command.Name.
-func (*fdSender) Name() string {
- return "fd_sender"
-}
-
-// Synopsis implements subcommands.Command.Synopsys.
-func (*fdSender) Synopsis() string {
- return "creates a file and sends the FD over the socket"
-}
-
-// Usage implements subcommands.Command.Usage.
-func (*fdSender) Usage() string {
- return "fd_sender <flags>"
-}
-
-// SetFlags implements subcommands.Command.SetFlags.
-func (fds *fdSender) SetFlags(f *flag.FlagSet) {
- f.StringVar(&fds.socketPath, "socket", "", "path to socket")
-}
-
-// Execute implements subcommands.Command.Execute.
-func (fds *fdSender) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if fds.socketPath == "" {
- log.Fatalf("socket flag must be set")
- }
-
- dir, err := ioutil.TempDir("", "")
- if err != nil {
- log.Fatalf("TempDir failed: %v", err)
- }
-
- fileToSend, err := ioutil.TempFile(dir, "")
- if err != nil {
- log.Fatalf("TempFile failed: %v", err)
- }
- defer fileToSend.Close()
-
- if _, err := fileToSend.WriteString(fileContents); err != nil {
- log.Fatalf("Write(%q) failed: %v", fileContents, err)
- }
-
- // Receiver may not be started yet, so try connecting in a poll loop.
- var s *unet.Socket
- if err := testutil.Poll(func() error {
- var err error
- s, err = unet.Connect(fds.socketPath, true /* SEQPACKET, so we can send empty message with FD */)
- return err
- }, 10*time.Second); err != nil {
- log.Fatalf("Error connecting to socket %q: %v", fds.socketPath, err)
- }
- defer s.Close()
-
- w := s.Writer(true)
- w.ControlMessage.PackFDs(int(fileToSend.Fd()))
- if _, err := w.WriteVec([][]byte{{'a'}}); err != nil {
- log.Fatalf("Error sending FD %q over socket %q: %v", fileToSend.Fd(), fds.socketPath, err)
- }
-
- log.Print("FD SENDER exiting successfully")
- return subcommands.ExitSuccess
-}
-
-// fdReceiver receives an FD from a unix domain socket and does things to it.
-type fdReceiver struct {
- socketPath string
-}
-
-// Name implements subcommands.Command.Name.
-func (*fdReceiver) Name() string {
- return "fd_receiver"
-}
-
-// Synopsis implements subcommands.Command.Synopsys.
-func (*fdReceiver) Synopsis() string {
- return "reads an FD from a unix socket, and then does things to it"
-}
-
-// Usage implements subcommands.Command.Usage.
-func (*fdReceiver) Usage() string {
- return "fd_receiver <flags>"
-}
-
-// SetFlags implements subcommands.Command.SetFlags.
-func (fdr *fdReceiver) SetFlags(f *flag.FlagSet) {
- f.StringVar(&fdr.socketPath, "socket", "", "path to socket")
-}
-
-// Execute implements subcommands.Command.Execute.
-func (fdr *fdReceiver) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if fdr.socketPath == "" {
- log.Fatalf("Flags cannot be empty, given: socket: %q", fdr.socketPath)
- }
-
- ss, err := unet.BindAndListen(fdr.socketPath, true /* packet */)
- if err != nil {
- log.Fatalf("BindAndListen(%q) failed: %v", fdr.socketPath, err)
- }
- defer ss.Close()
-
- var s *unet.Socket
- c := make(chan error, 1)
- go func() {
- var err error
- s, err = ss.Accept()
- c <- err
- }()
-
- select {
- case err := <-c:
- if err != nil {
- log.Fatalf("Accept() failed: %v", err)
- }
- case <-time.After(10 * time.Second):
- log.Fatalf("Timeout waiting for accept")
- }
-
- r := s.Reader(true)
- r.EnableFDs(1)
- b := [][]byte{{'a'}}
- if n, err := r.ReadVec(b); n != 1 || err != nil {
- log.Fatalf("ReadVec got n=%d err %v (wanted 0, nil)", n, err)
- }
-
- fds, err := r.ExtractFDs()
- if err != nil {
- log.Fatalf("ExtractFD() got err %v", err)
- }
- if len(fds) != 1 {
- log.Fatalf("ExtractFD() got %d FDs, wanted 1", len(fds))
- }
- fd := fds[0]
-
- file := os.NewFile(uintptr(fd), "received file")
- defer file.Close()
- if _, err := file.Seek(0, io.SeekStart); err != nil {
- log.Fatalf("Error from seek(0, 0): %v", err)
- }
-
- got, err := ioutil.ReadAll(file)
- if err != nil {
- log.Fatalf("ReadAll failed: %v", err)
- }
- if string(got) != fileContents {
- log.Fatalf("ReadAll got %q want %q", string(got), fileContents)
- }
-
- log.Print("FD RECEIVER exiting successfully")
- return subcommands.ExitSuccess
-}
diff --git a/test/cmd/test_app/main.go b/test/cmd/test_app/main.go
deleted file mode 100644
index 2d08ce2db..000000000
--- a/test/cmd/test_app/main.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary test_app is like a swiss knife for tests that need to run anything
-// inside the sandbox. New functionality can be added with new commands.
-package main
-
-import (
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "os"
- "os/exec"
- "regexp"
- "strconv"
- sys "syscall"
- "time"
-
- "github.com/google/subcommands"
- "github.com/kr/pty"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/flag"
-)
-
-func main() {
- subcommands.Register(subcommands.HelpCommand(), "")
- subcommands.Register(subcommands.FlagsCommand(), "")
- subcommands.Register(new(capability), "")
- subcommands.Register(new(fdReceiver), "")
- subcommands.Register(new(fdSender), "")
- subcommands.Register(new(forkBomb), "")
- subcommands.Register(new(ptyRunner), "")
- subcommands.Register(new(reaper), "")
- subcommands.Register(new(syscall), "")
- subcommands.Register(new(taskTree), "")
- subcommands.Register(new(uds), "")
-
- flag.Parse()
-
- exitCode := subcommands.Execute(context.Background())
- os.Exit(int(exitCode))
-}
-
-type uds struct {
- fileName string
- socketPath string
-}
-
-// Name implements subcommands.Command.Name.
-func (*uds) Name() string {
- return "uds"
-}
-
-// Synopsis implements subcommands.Command.Synopsys.
-func (*uds) Synopsis() string {
- return "creates unix domain socket client and server. Client sends a contant flow of sequential numbers. Server prints them to --file"
-}
-
-// Usage implements subcommands.Command.Usage.
-func (*uds) Usage() string {
- return "uds <flags>"
-}
-
-// SetFlags implements subcommands.Command.SetFlags.
-func (c *uds) SetFlags(f *flag.FlagSet) {
- f.StringVar(&c.fileName, "file", "", "name of output file")
- f.StringVar(&c.socketPath, "socket", "", "path to socket")
-}
-
-// Execute implements subcommands.Command.Execute.
-func (c *uds) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if c.fileName == "" || c.socketPath == "" {
- log.Fatalf("Flags cannot be empty, given: fileName: %q, socketPath: %q", c.fileName, c.socketPath)
- return subcommands.ExitFailure
- }
- outputFile, err := os.OpenFile(c.fileName, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- log.Fatal("error opening output file:", err)
- }
-
- defer os.Remove(c.socketPath)
-
- listener, err := net.Listen("unix", c.socketPath)
- if err != nil {
- log.Fatalf("error listening on socket %q: %v", c.socketPath, err)
- }
-
- go server(listener, outputFile)
- for i := 0; ; i++ {
- conn, err := net.Dial("unix", c.socketPath)
- if err != nil {
- log.Fatal("error dialing:", err)
- }
- if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil {
- log.Fatal("error writing:", err)
- }
- conn.Close()
- time.Sleep(100 * time.Millisecond)
- }
-}
-
-func server(listener net.Listener, out *os.File) {
- buf := make([]byte, 16)
-
- for {
- c, err := listener.Accept()
- if err != nil {
- log.Fatal("error accepting connection:", err)
- }
- nr, err := c.Read(buf)
- if err != nil {
- log.Fatal("error reading from buf:", err)
- }
- data := buf[0:nr]
- fmt.Fprint(out, string(data)+"\n")
- }
-}
-
-type taskTree struct {
- depth int
- width int
- pause bool
-}
-
-// Name implements subcommands.Command.
-func (*taskTree) Name() string {
- return "task-tree"
-}
-
-// Synopsis implements subcommands.Command.
-func (*taskTree) Synopsis() string {
- return "creates a tree of tasks"
-}
-
-// Usage implements subcommands.Command.
-func (*taskTree) Usage() string {
- return "task-tree <flags>"
-}
-
-// SetFlags implements subcommands.Command.
-func (c *taskTree) SetFlags(f *flag.FlagSet) {
- f.IntVar(&c.depth, "depth", 1, "number of levels to create")
- f.IntVar(&c.width, "width", 1, "number of tasks at each level")
- f.BoolVar(&c.pause, "pause", false, "whether the tasks should pause perpetually")
-}
-
-// Execute implements subcommands.Command.
-func (c *taskTree) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if c.depth == 0 {
- log.Printf("Child sleeping, PID: %d\n", os.Getpid())
- for {
- time.Sleep(time.Hour)
- }
- }
-
- log.Printf("Parent %d creating %d children, PID: %d\n", c.depth, c.width, os.Getpid())
-
- stop := testutil.StartReaper()
- defer stop()
-
- var cmds []*exec.Cmd
- for i := 0; i < c.width; i++ {
- cmd := exec.Command(
- "/proc/self/exe", c.Name(),
- "--depth", strconv.Itoa(c.depth-1),
- "--width", strconv.Itoa(c.width),
- fmt.Sprintf("--pause=%t", c.pause))
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
-
- if err := cmd.Start(); err != nil {
- log.Fatal("failed to call self:", err)
- }
- cmds = append(cmds, cmd)
- }
-
- for _, c := range cmds {
- c.Wait()
- }
-
- if c.pause {
- log.Printf("Parent %d sleeping, PID: %d\n", c.depth, os.Getpid())
- for {
- time.Sleep(time.Hour)
- }
- }
-
- return subcommands.ExitSuccess
-}
-
-type forkBomb struct {
- delay time.Duration
-}
-
-// Name implements subcommands.Command.
-func (*forkBomb) Name() string {
- return "fork-bomb"
-}
-
-// Synopsis implements subcommands.Command.
-func (*forkBomb) Synopsis() string {
- return "creates child process until the end of times"
-}
-
-// Usage implements subcommands.Command.
-func (*forkBomb) Usage() string {
- return "fork-bomb <flags>"
-}
-
-// SetFlags implements subcommands.Command.
-func (c *forkBomb) SetFlags(f *flag.FlagSet) {
- f.DurationVar(&c.delay, "delay", 100*time.Millisecond, "amount of time to delay creation of child")
-}
-
-// Execute implements subcommands.Command.
-func (c *forkBomb) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- time.Sleep(c.delay)
-
- cmd := exec.Command("/proc/self/exe", c.Name())
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- log.Fatal("failed to call self:", err)
- }
- return subcommands.ExitSuccess
-}
-
-type reaper struct{}
-
-// Name implements subcommands.Command.
-func (*reaper) Name() string {
- return "reaper"
-}
-
-// Synopsis implements subcommands.Command.
-func (*reaper) Synopsis() string {
- return "reaps all children in a loop"
-}
-
-// Usage implements subcommands.Command.
-func (*reaper) Usage() string {
- return "reaper <flags>"
-}
-
-// SetFlags implements subcommands.Command.
-func (*reaper) SetFlags(*flag.FlagSet) {}
-
-// Execute implements subcommands.Command.
-func (c *reaper) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- stop := testutil.StartReaper()
- defer stop()
- select {}
-}
-
-type syscall struct {
- sysno uint64
-}
-
-// Name implements subcommands.Command.
-func (*syscall) Name() string {
- return "syscall"
-}
-
-// Synopsis implements subcommands.Command.
-func (*syscall) Synopsis() string {
- return "syscall makes a syscall"
-}
-
-// Usage implements subcommands.Command.
-func (*syscall) Usage() string {
- return "syscall <flags>"
-}
-
-// SetFlags implements subcommands.Command.
-func (s *syscall) SetFlags(f *flag.FlagSet) {
- f.Uint64Var(&s.sysno, "syscall", 0, "syscall to call")
-}
-
-// Execute implements subcommands.Command.
-func (s *syscall) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if _, _, errno := sys.Syscall(uintptr(s.sysno), 0, 0, 0); errno != 0 {
- fmt.Printf("syscall(%d, 0, 0...) failed: %v\n", s.sysno, errno)
- } else {
- fmt.Printf("syscall(%d, 0, 0...) success\n", s.sysno)
- }
- return subcommands.ExitSuccess
-}
-
-type capability struct {
- enabled uint64
- disabled uint64
-}
-
-// Name implements subcommands.Command.
-func (*capability) Name() string {
- return "capability"
-}
-
-// Synopsis implements subcommands.Command.
-func (*capability) Synopsis() string {
- return "checks if effective capabilities are set/unset"
-}
-
-// Usage implements subcommands.Command.
-func (*capability) Usage() string {
- return "capability [--enabled=number] [--disabled=number]"
-}
-
-// SetFlags implements subcommands.Command.
-func (c *capability) SetFlags(f *flag.FlagSet) {
- f.Uint64Var(&c.enabled, "enabled", 0, "")
- f.Uint64Var(&c.disabled, "disabled", 0, "")
-}
-
-// Execute implements subcommands.Command.
-func (c *capability) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
- if c.enabled == 0 && c.disabled == 0 {
- fmt.Println("One of the flags must be set")
- return subcommands.ExitUsageError
- }
-
- status, err := ioutil.ReadFile("/proc/self/status")
- if err != nil {
- fmt.Printf("Error reading %q: %v\n", "proc/self/status", err)
- return subcommands.ExitFailure
- }
- re := regexp.MustCompile("CapEff:\t([0-9a-f]+)\n")
- matches := re.FindStringSubmatch(string(status))
- if matches == nil || len(matches) != 2 {
- fmt.Printf("Effective capabilities not found in\n%s\n", status)
- return subcommands.ExitFailure
- }
- caps, err := strconv.ParseUint(matches[1], 16, 64)
- if err != nil {
- fmt.Printf("failed to convert capabilities %q: %v\n", matches[1], err)
- return subcommands.ExitFailure
- }
-
- if c.enabled != 0 && (caps&c.enabled) != c.enabled {
- fmt.Printf("Missing capabilities, want: %#x: got: %#x\n", c.enabled, caps)
- return subcommands.ExitFailure
- }
- if c.disabled != 0 && (caps&c.disabled) != 0 {
- fmt.Printf("Extra capabilities found, dont_want: %#x: got: %#x\n", c.disabled, caps)
- return subcommands.ExitFailure
- }
-
- return subcommands.ExitSuccess
-}
-
-type ptyRunner struct{}
-
-// Name implements subcommands.Command.
-func (*ptyRunner) Name() string {
- return "pty-runner"
-}
-
-// Synopsis implements subcommands.Command.
-func (*ptyRunner) Synopsis() string {
- return "runs the given command with an open pty terminal"
-}
-
-// Usage implements subcommands.Command.
-func (*ptyRunner) Usage() string {
- return "pty-runner [command]"
-}
-
-// SetFlags implements subcommands.Command.SetFlags.
-func (*ptyRunner) SetFlags(f *flag.FlagSet) {}
-
-// Execute implements subcommands.Command.
-func (*ptyRunner) Execute(_ context.Context, fs *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
- c := exec.Command(fs.Args()[0], fs.Args()[1:]...)
- f, err := pty.Start(c)
- if err != nil {
- fmt.Printf("pty.Start failed: %v", err)
- return subcommands.ExitFailure
- }
- defer f.Close()
-
- // Copy stdout from the command to keep this process alive until the
- // subprocess exits.
- io.Copy(os.Stdout, f)
-
- return subcommands.ExitSuccess
-}
diff --git a/test/e2e/BUILD b/test/e2e/BUILD
deleted file mode 100644
index 1e9792b4f..000000000
--- a/test/e2e/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "integration_test",
- size = "large",
- srcs = [
- "exec_test.go",
- "integration_test.go",
- ],
- library = ":integration",
- tags = [
- # Requires docker and runsc to be configured before the test runs.
- "local",
- "manual",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/bits",
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//runsc/specutils",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
-
-go_library(
- name = "integration",
- srcs = ["integration.go"],
-)
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
deleted file mode 100644
index b47df447c..000000000
--- a/test/e2e/exec_test.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package integration provides end-to-end integration tests for runsc. These
-// tests require docker and runsc to be installed on the machine.
-//
-// Each test calls docker commands to start up a container, and tests that it
-// is behaving properly, with various runsc commands. The container is killed
-// and deleted at the end.
-
-package integration
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/bits"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-// Test that exec uses the exact same capability set as the container.
-func TestExecCapabilities(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sh", "-c", "cat /proc/self/status; sleep 100"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Check that capability.
- matches, err := d.WaitForOutputSubmatch(ctx, "CapEff:\t([0-9a-f]+)\n", 5*time.Second)
- if err != nil {
- t.Fatalf("WaitForOutputSubmatch() timeout: %v", err)
- }
- if len(matches) != 2 {
- t.Fatalf("There should be a match for the whole line and the capability bitmask")
- }
- want := fmt.Sprintf("CapEff:\t%s\n", matches[1])
- t.Log("Root capabilities:", want)
-
- // Now check that exec'd process capabilities match the root.
- got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "grep", "CapEff:", "/proc/self/status")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- t.Logf("CapEff: %v", got)
- if got != want {
- t.Errorf("wrong capabilities, got: %q, want: %q", got, want)
- }
-}
-
-// Test that 'exec --privileged' adds all capabilities, except for CAP_NET_RAW
-// which is removed from the container when --net-raw=false.
-func TestExecPrivileged(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container with all capabilities dropped.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- CapDrop: []string{"all"},
- }, "sh", "-c", "cat /proc/self/status; sleep 100"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Check that all capabilities where dropped from container.
- matches, err := d.WaitForOutputSubmatch(ctx, "CapEff:\t([0-9a-f]+)\n", 5*time.Second)
- if err != nil {
- t.Fatalf("WaitForOutputSubmatch() timeout: %v", err)
- }
- if len(matches) != 2 {
- t.Fatalf("There should be a match for the whole line and the capability bitmask")
- }
- containerCaps, err := strconv.ParseUint(matches[1], 16, 64)
- if err != nil {
- t.Fatalf("failed to convert capabilities %q: %v", matches[1], err)
- }
- t.Logf("Container capabilities: %#x", containerCaps)
- if containerCaps != 0 {
- t.Fatalf("Container should have no capabilities: %x", containerCaps)
- }
-
- // Check that 'exec --privileged' adds all capabilities, except for
- // CAP_NET_RAW.
- got, err := d.Exec(ctx, dockerutil.ExecOpts{
- Privileged: true,
- }, "grep", "CapEff:", "/proc/self/status")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- t.Logf("Exec CapEff: %v", got)
- want := fmt.Sprintf("CapEff:\t%016x\n", specutils.AllCapabilitiesUint64()&^bits.MaskOf64(int(linux.CAP_NET_RAW)))
- if got != want {
- t.Errorf("Wrong capabilities, got: %q, want: %q. Make sure runsc is not using '--net-raw'", got, want)
- }
-}
-
-func TestExecJobControl(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "1000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- p, err := d.ExecProcess(ctx, dockerutil.ExecOpts{UseTTY: true}, "/bin/sh")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
-
- if _, err = p.Write(time.Second, []byte("sleep 100 | cat\n")); err != nil {
- t.Fatalf("error exit: %v", err)
- }
- time.Sleep(time.Second)
-
- if _, err = p.Write(time.Second, []byte{0x03}); err != nil {
- t.Fatalf("error exit: %v", err)
- }
-
- if _, err = p.Write(time.Second, []byte("exit $(expr $? + 10)\n")); err != nil {
- t.Fatalf("error exit: %v", err)
- }
-
- want := 140
- got, err := p.WaitExitStatus(ctx)
- if err != nil {
- t.Fatalf("wait for exit failed with: %v", err)
- } else if got != want {
- t.Fatalf("wait for exit returned: %d want: %d", got, want)
- }
-}
-
-// Test that failure to exec returns proper error message.
-func TestExecError(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "1000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Attempt to exec a binary that doesn't exist.
- out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "no_can_find")
- if err == nil {
- t.Fatalf("docker exec didn't fail")
- }
- if want := `error finding executable "no_can_find" in PATH`; !strings.Contains(out, want) {
- t.Fatalf("docker exec wrong error, got: %s, want: .*%s.*", out, want)
- }
-}
-
-// Test that exec inherits environment from run.
-func TestExecEnv(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container with env FOO=BAR.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- Env: []string{"FOO=BAR"},
- }, "sleep", "1000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Exec "echo $FOO".
- got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "echo $FOO")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- if got, want := strings.TrimSpace(got), "BAR"; got != want {
- t.Errorf("bad output from 'docker exec'. Got %q; Want %q.", got, want)
- }
-}
-
-// TestRunEnvHasHome tests that run always has HOME environment set.
-func TestRunEnvHasHome(t *testing.T) {
- // Base alpine image does not have any environment variables set.
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Exec "echo $HOME". The 'bin' user's home dir is '/bin'.
- got, err := d.Run(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- User: "bin",
- }, "/bin/sh", "-c", "echo $HOME")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Check that the directory matches.
- if got, want := strings.TrimSpace(got), "/bin"; got != want {
- t.Errorf("bad output from 'docker run'. Got %q; Want %q.", got, want)
- }
-}
-
-// Test that exec always has HOME environment set, even when not set in run.
-func TestExecEnvHasHome(t *testing.T) {
- // Base alpine image does not have any environment variables set.
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "1000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Exec "echo $HOME", and expect to see "/root".
- got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "echo $HOME")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- if want := "/root"; !strings.Contains(got, want) {
- t.Errorf("wanted exec output to contain %q, got %q", want, got)
- }
-
- // Create a new user with a home directory.
- newUID := 1234
- newHome := "/foo/bar"
- cmd := fmt.Sprintf("mkdir -p -m 777 %q && adduser foo -D -u %d -h %q", newHome, newUID, newHome)
- if _, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", cmd); err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
-
- // Execute the same as the new user and expect newHome.
- got, err = d.Exec(ctx, dockerutil.ExecOpts{
- User: strconv.Itoa(newUID),
- }, "/bin/sh", "-c", "echo $HOME")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- if want := newHome; !strings.Contains(got, want) {
- t.Errorf("wanted exec output to contain %q, got %q", want, got)
- }
-}
diff --git a/test/e2e/integration.go b/test/e2e/integration.go
deleted file mode 100644
index 4cd5f6c24..000000000
--- a/test/e2e/integration.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package integration is empty. See integration_test.go for description.
-package integration
diff --git a/test/e2e/integration_test.go b/test/e2e/integration_test.go
deleted file mode 100644
index 9e22c9a7d..000000000
--- a/test/e2e/integration_test.go
+++ /dev/null
@@ -1,744 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package integration provides end-to-end integration tests for runsc.
-//
-// Each test calls docker commands to start up a container, and tests that it is
-// behaving properly, with various runsc commands. The container is killed and
-// deleted at the end.
-//
-// Setup instruction in test/README.md.
-package integration
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/api/types/mount"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// defaultWait is the default wait time used for tests.
-const defaultWait = time.Minute
-
-func TestMain(m *testing.M) {
- dockerutil.EnsureSupportedDockerVersion()
- flag.Parse()
- os.Exit(m.Run())
-}
-
-// httpRequestSucceeds sends a request to a given url and checks that the status is OK.
-func httpRequestSucceeds(client http.Client, server string, port int) error {
- url := fmt.Sprintf("http://%s:%d", server, port)
- // Ensure that content is being served.
- resp, err := client.Get(url)
- if err != nil {
- return fmt.Errorf("error reaching http server: %v", err)
- }
- if want := http.StatusOK; resp.StatusCode != want {
- return fmt.Errorf("wrong response code, got: %d, want: %d", resp.StatusCode, want)
- }
- return nil
-}
-
-// TestLifeCycle tests a basic Create/Start/Stop docker container life cycle.
-func TestLifeCycle(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- port := 80
- if err := d.Create(ctx, dockerutil.RunOpts{
- Image: "basic/nginx",
- Ports: []int{port},
- }); err != nil {
- t.Fatalf("docker create failed: %v", err)
- }
- if err := d.Start(ctx); err != nil {
- t.Fatalf("docker start failed: %v", err)
- }
-
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
- client := http.Client{Timeout: defaultWait}
- if err := httpRequestSucceeds(client, ip.String(), port); err != nil {
- t.Errorf("http request failed: %v", err)
- }
-
- if err := d.Stop(ctx); err != nil {
- t.Fatalf("docker stop failed: %v", err)
- }
- if err := d.Remove(ctx); err != nil {
- t.Fatalf("docker rm failed: %v", err)
- }
-}
-
-func TestPauseResume(t *testing.T) {
- if !testutil.IsCheckpointSupported() {
- t.Skip("Checkpoint is not supported.")
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- port := 8080
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/python",
- Ports: []int{port}, // See Dockerfile.
- }); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
-
- // Check that container is working.
- client := http.Client{Timeout: defaultWait}
- if err := httpRequestSucceeds(client, ip.String(), port); err != nil {
- t.Error("http request failed:", err)
- }
-
- if err := d.Pause(ctx); err != nil {
- t.Fatalf("docker pause failed: %v", err)
- }
-
- // Check if container is paused.
- client = http.Client{Timeout: 10 * time.Millisecond} // Don't wait a minute.
- switch _, err := client.Get(fmt.Sprintf("http://%s:%d", ip.String(), port)); v := err.(type) {
- case nil:
- t.Errorf("http req expected to fail but it succeeded")
- case net.Error:
- if !v.Timeout() {
- t.Errorf("http req got error %v, wanted timeout", v)
- }
- default:
- t.Errorf("http req got unexpected error %v", v)
- }
-
- if err := d.Unpause(ctx); err != nil {
- t.Fatalf("docker unpause failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
-
- // Check if container is working again.
- client = http.Client{Timeout: defaultWait}
- if err := httpRequestSucceeds(client, ip.String(), port); err != nil {
- t.Error("http request failed:", err)
- }
-}
-
-func TestCheckpointRestore(t *testing.T) {
- if !testutil.IsCheckpointSupported() {
- t.Skip("Pause/resume is not supported.")
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- port := 8080
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/python",
- Ports: []int{port}, // See Dockerfile.
- }); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Create a snapshot.
- if err := d.Checkpoint(ctx, "test"); err != nil {
- t.Fatalf("docker checkpoint failed: %v", err)
- }
- if err := d.WaitTimeout(ctx, defaultWait); err != nil {
- t.Fatalf("wait failed: %v", err)
- }
-
- // TODO(b/143498576): Remove Poll after github.com/moby/moby/issues/38963 is fixed.
- if err := testutil.Poll(func() error { return d.Restore(ctx, "test") }, defaultWait); err != nil {
- t.Fatalf("docker restore failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
-
- // Check if container is working again.
- client := http.Client{Timeout: defaultWait}
- if err := httpRequestSucceeds(client, ip.String(), port); err != nil {
- t.Error("http request failed:", err)
- }
-}
-
-// Create client and server that talk to each other using the local IP.
-func TestConnectToSelf(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Creates server that replies "server" and exists. Sleeps at the end because
- // 'docker exec' gets killed if the init process exists before it can finish.
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/ubuntu",
- }, "/bin/sh", "-c", "echo server | nc -l -p 8080 && sleep 1"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Finds IP address for host.
- ip, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "cat /etc/hosts | grep ${HOSTNAME} | awk '{print $1}'")
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- ip = strings.TrimRight(ip, "\n")
-
- // Runs client that sends "client" to the server and exits.
- reply, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", fmt.Sprintf("echo client | nc %s 8080", ip))
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
-
- // Ensure both client and server got the message from each other.
- if want := "server\n"; reply != want {
- t.Errorf("Error on server, want: %q, got: %q", want, reply)
- }
- if _, err := d.WaitForOutput(ctx, "^client\n$", defaultWait); err != nil {
- t.Fatalf("docker.WaitForOutput(client) timeout: %v", err)
- }
-}
-
-func TestMemLimit(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- allocMemoryKb := 50 * 1024
- out, err := d.Run(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- Memory: allocMemoryKb * 1024, // In bytes.
- }, "sh", "-c", "cat /proc/meminfo | grep MemTotal: | awk '{print $2}'")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Remove warning message that swap isn't present.
- if strings.HasPrefix(out, "WARNING") {
- lines := strings.Split(out, "\n")
- if len(lines) != 3 {
- t.Fatalf("invalid output: %s", out)
- }
- out = lines[1]
- }
-
- // Ensure the memory matches what we want.
- got, err := strconv.ParseUint(strings.TrimSpace(out), 10, 64)
- if err != nil {
- t.Fatalf("failed to parse %q: %v", out, err)
- }
- if want := uint64(allocMemoryKb); got != want {
- t.Errorf("MemTotal got: %d, want: %d", got, want)
- }
-}
-
-func TestNumCPU(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Read how many cores are in the container.
- out, err := d.Run(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- CpusetCpus: "0",
- }, "sh", "-c", "cat /proc/cpuinfo | grep 'processor.*:' | wc -l")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Ensure it matches what we want.
- got, err := strconv.Atoi(strings.TrimSpace(out))
- if err != nil {
- t.Fatalf("failed to parse %q: %v", out, err)
- }
- if want := 1; got != want {
- t.Errorf("MemTotal got: %d, want: %d", got, want)
- }
-}
-
-// TestJobControl tests that job control characters are handled properly.
-func TestJobControl(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container with an attached PTY.
- p, err := d.SpawnProcess(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sh", "-c", "sleep 100 | cat")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- // Give shell a few seconds to start executing the sleep.
- time.Sleep(2 * time.Second)
-
- if _, err := p.Write(time.Second, []byte{0x03}); err != nil {
- t.Fatalf("error exit: %v", err)
- }
-
- if err := d.WaitTimeout(ctx, 3*time.Second); err != nil {
- t.Fatalf("WaitTimeout failed: %v", err)
- }
-
- want := 130
- got, err := p.WaitExitStatus(ctx)
- if err != nil {
- t.Fatalf("wait for exit failed with: %v", err)
- } else if got != want {
- t.Fatalf("got: %d want: %d", got, want)
- }
-}
-
-// TestWorkingDirCreation checks that working dir is created if it doesn't exit.
-func TestWorkingDirCreation(t *testing.T) {
- for _, tc := range []struct {
- name string
- workingDir string
- }{
- {name: "root", workingDir: "/foo"},
- {name: "tmp", workingDir: "/tmp/foo"},
- } {
- for _, readonly := range []bool{true, false} {
- name := tc.name
- if readonly {
- name += "-readonly"
- }
- t.Run(name, func(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- WorkDir: tc.workingDir,
- ReadOnly: readonly,
- }
- got, err := d.Run(ctx, opts, "sh", "-c", "echo ${PWD}")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- if want := tc.workingDir + "\n"; want != got {
- t.Errorf("invalid working dir, want: %q, got: %q", want, got)
- }
- })
- }
- }
-}
-
-// TestTmpFile checks that files inside '/tmp' are not overridden.
-func TestTmpFile(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{Image: "basic/tmpfile"}
- got, err := d.Run(ctx, opts, "cat", "/tmp/foo/file.txt")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- if want := "123\n"; want != got {
- t.Errorf("invalid file content, want: %q, got: %q", want, got)
- }
-}
-
-// TestTmpMount checks that mounts inside '/tmp' are not overridden.
-func TestTmpMount(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "tmp-mount")
- if err != nil {
- t.Fatalf("TempDir(): %v", err)
- }
- const want = "123"
- if err := ioutil.WriteFile(filepath.Join(dir, "file.txt"), []byte("123"), 0666); err != nil {
- t.Fatalf("WriteFile(): %v", err)
- }
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- Mounts: []mount.Mount{
- {
- Type: mount.TypeBind,
- Source: dir,
- Target: "/tmp/foo",
- },
- },
- }
- got, err := d.Run(ctx, opts, "cat", "/tmp/foo/file.txt")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- if want != got {
- t.Errorf("invalid file content, want: %q, got: %q", want, got)
- }
-}
-
-// Test that it is allowed to mount a file on top of /dev files, e.g.
-// /dev/random.
-func TestMountOverDev(t *testing.T) {
- if vfs2, err := dockerutil.UsingVFS2(); err != nil {
- t.Fatalf("Failed to read config for runtime %s: %v", dockerutil.Runtime(), err)
- } else if !vfs2 {
- t.Skip("VFS1 doesn't allow /dev/random to be mounted.")
- }
-
- random, err := ioutil.TempFile(testutil.TmpDir(), "random")
- if err != nil {
- t.Fatal("ioutil.TempFile() failed:", err)
- }
- const want = "123"
- if _, err := random.WriteString(want); err != nil {
- t.Fatalf("WriteString() to %q: %v", random.Name(), err)
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- Mounts: []mount.Mount{
- {
- Type: mount.TypeBind,
- Source: random.Name(),
- Target: "/dev/random",
- },
- },
- }
- cmd := "dd count=1 bs=5 if=/dev/random 2> /dev/null"
- got, err := d.Run(ctx, opts, "sh", "-c", cmd)
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- if want != got {
- t.Errorf("invalid file content, want: %q, got: %q", want, got)
- }
-}
-
-// TestSyntheticDirs checks that submounts can be created inside a readonly
-// mount even if the target path does not exist.
-func TestSyntheticDirs(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- // Make the root read-only to force use of synthetic dirs
- // inside the root gofer mount.
- ReadOnly: true,
- Mounts: []mount.Mount{
- // Mount inside read-only gofer-backed root.
- {
- Type: mount.TypeTmpfs,
- Target: "/foo/bar/baz",
- },
- // Mount inside sysfs, which always uses synthetic dirs
- // for submounts.
- {
- Type: mount.TypeTmpfs,
- Target: "/sys/foo/bar/baz",
- },
- },
- }
- // Make sure the directories exist.
- if _, err := d.Run(ctx, opts, "ls", "/foo/bar/baz", "/sys/foo/bar/baz"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
-}
-
-// TestHostOverlayfsCopyUp tests that the --overlayfs-stale-read option causes
-// runsc to hide the incoherence of FDs opened before and after overlayfs
-// copy-up on the host.
-func TestHostOverlayfsCopyUp(t *testing.T) {
- runIntegrationTest(t, nil, "./test_copy_up")
-}
-
-// TestHostOverlayfsRewindDir tests that rewinddir() "causes the directory
-// stream to refer to the current state of the corresponding directory, as a
-// call to opendir() would have done" as required by POSIX, when the directory
-// in question is host overlayfs.
-//
-// This test specifically targets host overlayfs because, per POSIX, "if a file
-// is removed from or added to the directory after the most recent call to
-// opendir() or rewinddir(), whether a subsequent call to readdir() returns an
-// entry for that file is unspecified"; the host filesystems used by other
-// automated tests yield newly-added files from readdir() even if the fsgofer
-// does not explicitly rewinddir(), but overlayfs does not.
-func TestHostOverlayfsRewindDir(t *testing.T) {
- runIntegrationTest(t, nil, "./test_rewinddir")
-}
-
-// Basic test for linkat(2). Syscall tests requires CAP_DAC_READ_SEARCH and it
-// cannot use tricks like userns as root. For this reason, run a basic link test
-// to ensure some coverage.
-func TestLink(t *testing.T) {
- runIntegrationTest(t, nil, "./link_test")
-}
-
-// This test ensures we can run ping without errors.
-func TestPing4Loopback(t *testing.T) {
- if testutil.IsRunningWithHostNet() {
- // TODO(gvisor.dev/issue/5011): support ICMP sockets in hostnet and enable
- // this test.
- t.Skip("hostnet only supports TCP/UDP sockets, so ping is not supported.")
- }
-
- runIntegrationTest(t, nil, "./ping4.sh")
-}
-
-// This test ensures we can enable ipv6 on loopback and run ping6 without
-// errors.
-func TestPing6Loopback(t *testing.T) {
- if testutil.IsRunningWithHostNet() {
- // TODO(gvisor.dev/issue/5011): support ICMP sockets in hostnet and enable
- // this test.
- t.Skip("hostnet only supports TCP/UDP sockets, so ping6 is not supported.")
- }
-
- // The CAP_NET_ADMIN capability is required to use the `ip` utility, which
- // we use to enable ipv6 on loopback.
- //
- // By default, ipv6 loopback is not enabled by runsc, because docker does
- // not assign an ipv6 address to the test container.
- runIntegrationTest(t, []string{"NET_ADMIN"}, "./ping6.sh")
-}
-
-// This test checks that the owner of the sticky directory can delete files
-// inside it belonging to other users. It also checks that the owner of a file
-// can always delete its file when the file is inside a sticky directory owned
-// by another user.
-func TestStickyDir(t *testing.T) {
- if vfs2Used, err := dockerutil.UsingVFS2(); err != nil {
- t.Fatalf("failed to read config for runtime %s: %v", dockerutil.Runtime(), err)
- } else if !vfs2Used {
- t.Skip("sticky bit test fails on VFS1.")
- }
-
- runIntegrationTest(t, nil, "./test_sticky")
-}
-
-func runIntegrationTest(t *testing.T, capAdd []string, args ...string) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{
- Image: "basic/integrationtest",
- WorkDir: "/root",
- CapAdd: capAdd,
- }
- if got, err := d.Run(ctx, opts, args...); err != nil {
- t.Fatalf("docker run failed: %v", err)
- } else if got != "" {
- t.Errorf("test failed:\n%s", got)
- }
-}
-
-// Test that UDS can be created using overlay when parent directory is in lower
-// layer only (b/134090485).
-//
-// Prerequisite: the directory where the socket file is created must not have
-// been open for write before bind(2) is called.
-func TestBindOverlay(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Run the container.
- got, err := d.Run(ctx, dockerutil.RunOpts{
- Image: "basic/ubuntu",
- }, "bash", "-c", "nc -q -1 -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -q 0 -U /var/run/sock && wait $p")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Check the output contains what we want.
- if want := "foobar-asdf"; !strings.Contains(got, want) {
- t.Fatalf("docker run output is missing %q: %s", want, got)
- }
-}
-
-func TestStdios(t *testing.T) {
- if vfs2, err := dockerutil.UsingVFS2(); err != nil {
- t.Fatalf("Failed to read config for runtime %s: %v", dockerutil.Runtime(), err)
- } else if !vfs2 {
- t.Skip("VFS1 doesn't adjust stdios user")
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- testStdios(t, func(user string, args ...string) (string, error) {
- defer d.CleanUp(ctx)
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- User: user,
- }
- return d.Run(ctx, opts, args...)
- })
-}
-
-func TestStdiosExec(t *testing.T) {
- if vfs2, err := dockerutil.UsingVFS2(); err != nil {
- t.Fatalf("Failed to read config for runtime %s: %v", dockerutil.Runtime(), err)
- } else if !vfs2 {
- t.Skip("VFS1 doesn't adjust stdios user")
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- runOpts := dockerutil.RunOpts{Image: "basic/alpine"}
- if err := d.Spawn(ctx, runOpts, "sleep", "100"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- testStdios(t, func(user string, args ...string) (string, error) {
- opts := dockerutil.ExecOpts{User: user}
- return d.Exec(ctx, opts, args...)
- })
-}
-
-func testStdios(t *testing.T, run func(string, ...string) (string, error)) {
- const cmd = "stat -L /proc/self/fd/0 /proc/self/fd/1 /proc/self/fd/2 | grep 'Uid:'"
- got, err := run("123", "/bin/sh", "-c", cmd)
- if err != nil {
- t.Fatalf("docker exec failed: %v", err)
- }
- if len(got) == 0 {
- t.Errorf("Unexpected empty output from %q", cmd)
- }
- re := regexp.MustCompile(`Uid: \(\s*(\w+)\/.*\)`)
- for _, line := range strings.SplitN(got, "\n", 3) {
- t.Logf("stat -L: %s", line)
- matches := re.FindSubmatch([]byte(line))
- if len(matches) != 2 {
- t.Fatalf("wrong output format: %q: matches: %v", line, matches)
- }
- if want, got := "123", string(matches[1]); want != got {
- t.Errorf("wrong user, want: %q, got: %q", want, got)
- }
- }
-
- // Check that stdout and stderr can be open and written to. This checks
- // that ownership and permissions are correct inside gVisor.
- got, err = run("456", "/bin/sh", "-c", "echo foobar | tee /proc/self/fd/1 > /proc/self/fd/2")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
- t.Logf("echo foobar: %q", got)
- // Check it repeats twice, once for stdout and once for stderr.
- if want := "foobar\nfoobar\n"; want != got {
- t.Errorf("Wrong echo output, want: %q, got: %q", want, got)
- }
-
- // Check that timestamps can be changed. Setting timestamps require an extra
- // write check _after_ the file was opened, and may fail if the underlying
- // host file is not setup correctly.
- if _, err := run("789", "touch", "/proc/self/fd/0", "/proc/self/fd/1", "/proc/self/fd/2"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-}
-
-func TestStdiosChown(t *testing.T) {
- if vfs2, err := dockerutil.UsingVFS2(); err != nil {
- t.Fatalf("Failed to read config for runtime %s: %v", dockerutil.Runtime(), err)
- } else if !vfs2 {
- t.Skip("VFS1 doesn't adjust stdios user")
- }
-
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- opts := dockerutil.RunOpts{Image: "basic/alpine"}
- if _, err := d.Run(ctx, opts, "chown", "123", "/proc/self/fd/0", "/proc/self/fd/1", "/proc/self/fd/2"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-}
-
-func TestUnmount(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- dir, err := ioutil.TempDir(testutil.TmpDir(), "sub-mount")
- if err != nil {
- t.Fatalf("TempDir(): %v", err)
- }
- opts := dockerutil.RunOpts{
- Image: "basic/alpine",
- Privileged: true, // Required for umount
- Mounts: []mount.Mount{
- {
- Type: mount.TypeBind,
- Source: dir,
- Target: "/foo",
- },
- },
- }
- if _, err := d.Run(ctx, opts, "umount", "/foo"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-}
diff --git a/test/fsstress/BUILD b/test/fsstress/BUILD
deleted file mode 100644
index 402445a7e..000000000
--- a/test/fsstress/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "fsstress_test",
- size = "large",
- srcs = [
- "fsstress_test.go",
- ],
- library = ":fsstress",
- tags = [
- # Requires docker and runsc to be configured before the test runs.
- "manual",
- "local",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
-
-go_library(
- name = "fsstress",
- srcs = ["fsstress.go"],
-)
diff --git a/test/fsstress/fsstress.go b/test/fsstress/fsstress.go
deleted file mode 100644
index 464237e2d..000000000
--- a/test/fsstress/fsstress.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package fsstress is empty. See fsstress_test.go for description.
-package fsstress
diff --git a/test/fsstress/fsstress_test.go b/test/fsstress/fsstress_test.go
deleted file mode 100644
index d9513c42c..000000000
--- a/test/fsstress/fsstress_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package fsstress runs fsstress tool inside a docker container.
-package fsstress
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "math/rand"
- "os"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/api/types/mount"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-func init() {
- rand.Seed(int64(time.Now().Nanosecond()))
-}
-
-func TestMain(m *testing.M) {
- dockerutil.EnsureSupportedDockerVersion()
- flag.Parse()
- os.Exit(m.Run())
-}
-
-type config struct {
- operations string
- processes string
- target string
- mounts []mount.Mount
-}
-
-func fsstress(t *testing.T, conf config) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- const image = "basic/fsstress"
- seed := strconv.FormatUint(uint64(rand.Uint32()), 10)
- args := []string{"-d", conf.target, "-n", conf.operations, "-p", conf.processes, "-s", seed, "-X"}
- opts := dockerutil.RunOpts{
- Image: image,
- Mounts: conf.mounts,
- }
- var mounts string
- if len(conf.mounts) > 0 {
- mounts = " -v "
- for _, m := range conf.mounts {
- mounts += fmt.Sprintf("-v <any_dir>:%s", m.Target)
- }
- }
- t.Logf("Repro: docker run --rm --runtime=%s%s gvisor.dev/images/%s %s", dockerutil.Runtime(), mounts, image, strings.Join(args, " "))
- out, err := d.Run(ctx, opts, args...)
- if err != nil {
- t.Fatalf("docker run failed: %v\noutput: %s", err, out)
- }
- // This is to catch cases where fsstress spews out error messages during clean
- // up but doesn't return error.
- if len(out) > 0 {
- t.Fatalf("unexpected output: %s", out)
- }
-}
-
-func TestFsstressGofer(t *testing.T) {
- // This takes between 30-60s to run on my machine. Adjust as needed.
- cfg := config{
- operations: "500",
- processes: "20",
- target: "/test",
- }
- fsstress(t, cfg)
-}
-
-func TestFsstressGoferShared(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "fsstress")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dir)
-
- // This takes between 30-60s to run on my machine. Adjust as needed.
- cfg := config{
- operations: "500",
- processes: "20",
- target: "/test",
- mounts: []mount.Mount{
- {
- Source: dir,
- Target: "/test",
- Type: "bind",
- },
- },
- }
- fsstress(t, cfg)
-}
-
-func TestFsstressTmpfs(t *testing.T) {
- // This takes between 10s to run on my machine. Adjust as needed.
- cfg := config{
- operations: "5000",
- processes: "20",
- target: "/tmp",
- }
- fsstress(t, cfg)
-}
diff --git a/test/fuse/BUILD b/test/fuse/BUILD
deleted file mode 100644
index 2885ffdb2..000000000
--- a/test/fuse/BUILD
+++ /dev/null
@@ -1,80 +0,0 @@
-load("//test/runner:defs.bzl", "syscall_test")
-
-package(licenses = ["notice"])
-
-# FIXME(b/190750110)
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:stat_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:open_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:release_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:mknod_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:symlink_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:readlink_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:mkdir_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:read_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:write_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:rmdir_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:readdir_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:create_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:unlink_test",
-# )
-#
-# syscall_test(
-# fuse = "True",
-# test = "//test/fuse/linux:setstat_test",
-# )
-
-syscall_test(
- fuse = "True",
- test = "//test/fuse/linux:mount_test",
-)
diff --git a/test/fuse/README.md b/test/fuse/README.md
deleted file mode 100644
index 65add57e2..000000000
--- a/test/fuse/README.md
+++ /dev/null
@@ -1,188 +0,0 @@
-# gVisor FUSE Test Suite
-
-This is an integration test suite for fuse(4) filesystem. It runs under gVisor
-sandbox container with VFS2 and FUSE function enabled.
-
-This document describes the framework of FUSE integration test, how to use it,
-and the guidelines that should be followed when adding new testing features.
-
-## Integration Test Framework
-
-By inheriting the `FuseTest` class defined in `linux/fuse_base.h`, every test
-fixture can run in an environment with `mount_point_` mounted by a fake FUSE
-server. It creates a `socketpair(2)` to send and receive control commands and
-data between the client and the server. Because the FUSE server runs in the
-background thread, gTest cannot catch its assertion failure immediately. Thus,
-`TearDown()` function sends command to the FUSE server to check if all gTest
-assertion in the server are successful and all requests and preset responses are
-consumed.
-
-## Communication Diagram
-
-Diagram below describes how a testing thread communicates with the FUSE server
-to achieve integration test.
-
-For the following diagram, `>` means entering the function, `<` is leaving the
-function, and `=` indicates sequentially entering and leaving. Not necessarily
-follow exactly the below diagram due to the nature of a multi-threaded system,
-however, it is still helpful to know when the client waits for the server to
-complete a command and when the server awaits the next instruction.
-
-```
-| Client (Testing Thread) | Server (FUSE Server Thread)
-| |
-| >TEST_F() |
-| >SetUp() |
-| =MountFuse() |
-| >SetUpFuseServer() |
-| [create communication socket]|
-| =fork() | =fork()
-| [wait server complete] |
-| | =ServerConsumeFuseInit()
-| | =ServerCompleteWith()
-| <SetUpFuseServer() |
-| <SetUp() |
-| [testing main] |
-| | >ServerFuseLoop()
-| | [poll on socket and fd]
-| >SetServerResponse() |
-| [write data to socket] |
-| [wait server complete] |
-| | [socket event occurs]
-| | >ServerHandleCommand()
-| | >ServerReceiveResponse()
-| | [read data from socket]
-| | [save data to memory]
-| | <ServerReceiveResponse()
-| | =ServerCompleteWith()
-| <SetServerResponse() |
-| | <ServerHandleCommand()
-| >[Do fs operation] |
-| [wait for fs response] |
-| | [fd event occurs]
-| | >ServerProcessFuseRequest()
-| | =[read fs request]
-| | =[save fs request to memory]
-| | =[write fs response]
-| <[Do fs operation] |
-| | <ServerProcessFuseRequest()
-| |
-| =[Test fs operation result] |
-| |
-| >GetServerActualRequest() |
-| [write data to socket] |
-| [wait data from server] |
-| | [socket event occurs]
-| | >ServerHandleCommand()
-| | >ServerSendReceivedRequest()
-| | [write data to socket]
-| [read data from socket] |
-| [wait server complete] |
-| | <ServerSendReceivedRequest()
-| | =ServerCompleteWith()
-| <GetServerActualRequest() |
-| | <ServerHandleCommand()
-| |
-| =[Test actual request] |
-| |
-| >TearDown() |
-| ... |
-| >GetServerNumUnsentResponses() |
-| [write data to socket] |
-| [wait server complete] |
-| | [socket event arrive]
-| | >ServerHandleCommand()
-| | >ServerSendData()
-| | [write data to socket]
-| | <ServerSendData()
-| | =ServerCompleteWith()
-| [read data from socket] |
-| [test if all succeeded] |
-| <GetServerNumUnsentResponses() |
-| | <ServerHandleCommand()
-| =UnmountFuse() |
-| <TearDown() |
-| <TEST_F() |
-```
-
-## Running the tests
-
-Based on syscall tests, FUSE tests generate targets only with vfs2 and fuse
-enabled. The corresponding targets end in `_fuse`.
-
-For example, to run fuse test in `stat_test.cc`:
-
-```bash
-$ bazel test //test/fuse:stat_test_runsc_ptrace_vfs2_fuse
-```
-
-Test all targets tagged with fuse:
-
-```bash
-$ bazel test --test_tag_filters=fuse //test/fuse/...
-```
-
-## Writing a new FUSE test
-
-1. Add test targets in `BUILD` and `linux/BUILD`.
-2. Inherit your test from `FuseTest` base class. It allows you to:
- - Fork a fake FUSE server in background during each test setup.
- - Create a pair of sockets for communication and provide utility
- functions.
- - Stop FUSE server and check if error occurs in it after test completes.
-3. Build the expected opcode-response pairs of your FUSE operation.
-4. Call `SetServerResponse()` to preset the next expected opcode and response.
-5. Do real filesystem operations (FUSE is mounted at `mount_point_`).
-6. Check FUSE response and/or errors.
-7. Retrieve FUSE request by `GetServerActualRequest()`.
-8. Check if the request is as expected.
-
-A few customized matchers used in syscalls test are encouraged to test the
-outcome of filesystem operations. Such as:
-
-```cc
-SyscallSucceeds()
-SyscallSucceedsWithValue(...)
-SyscallFails()
-SyscallFailsWithErrno(...)
-```
-
-Please refer to [test/syscalls/README.md](../syscalls/README.md) for further
-details.
-
-## Writing a new FuseTestCmd
-
-A `FuseTestCmd` is a control protocol used in the communication between the
-testing thread and the FUSE server. Such commands are sent from the testing
-thread to the FUSE server to set up, control, or inspect the behavior of the
-FUSE server in response to a sequence of FUSE requests.
-
-The lifecycle of a command contains following steps:
-
-1. The testing thread sends a `FuseTestCmd` via socket and waits for
- completion.
-2. The FUSE server receives the command and does corresponding action.
-3. (Optional) The testing thread reads data from socket.
-4. The FUSE server sends a success indicator via socket after processing.
-5. The testing thread gets the success signal and continues testing.
-
-The success indicator, i.e. `WaitServerComplete()`, is crucial at the end of
-each `FuseTestCmd` sent from the testing thread. Because we don't want to begin
-filesystem operation if the requests have not been completely set up. Also, to
-test FUSE interactions in a sequential manner, concurrent requests are not
-supported now.
-
-To add a new `FuseTestCmd`, one must comply with following format:
-
-1. Add a new `FuseTestCmd` enum class item defined in `linux/fuse_base.h`
-2. Add a `SetServerXXX()` or `GetServerXXX()` public function in `FuseTest`.
- This is how the testing thread will call to send control message. Define how
- many bytes you want to send along with the command and what you will expect
- to receive. Finally it should block and wait for a success indicator from
- the FUSE server.
-3. Add a handler logic in the switch condition of `ServerHandleCommand()`. Use
- `ServerSendData()` or declare a new private function such as
- `ServerReceiveXXX()` or `ServerSendXXX()`. It is mandatory to set it private
- since only the FUSE server (forked from `FuseTest` base class) can call it.
- This is the server part of the specific `FuseTestCmd` and the format of the
- data should be consistent with what the client expects in the previous step.
diff --git a/test/fuse/linux/BUILD b/test/fuse/linux/BUILD
deleted file mode 100644
index 2f745bd47..000000000
--- a/test/fuse/linux/BUILD
+++ /dev/null
@@ -1,243 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "cc_library", "gtest")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-cc_binary(
- name = "stat_test",
- testonly = 1,
- srcs = ["stat_test.cc"],
- deps = [
- gtest,
- ":fuse_fd_util",
- "//test/util:cleanup",
- "//test/util:fs_util",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "open_test",
- testonly = 1,
- srcs = ["open_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "release_test",
- testonly = 1,
- srcs = ["release_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mknod_test",
- testonly = 1,
- srcs = ["mknod_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "symlink_test",
- testonly = 1,
- srcs = ["symlink_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "readlink_test",
- testonly = 1,
- srcs = ["readlink_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mkdir_test",
- testonly = 1,
- srcs = ["mkdir_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "setstat_test",
- testonly = 1,
- srcs = ["setstat_test.cc"],
- deps = [
- gtest,
- ":fuse_fd_util",
- "//test/util:cleanup",
- "//test/util:fs_util",
- "//test/util:fuse_util",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "rmdir_test",
- testonly = 1,
- srcs = ["rmdir_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fs_util",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "readdir_test",
- testonly = 1,
- srcs = ["readdir_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fs_util",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_library(
- name = "fuse_base",
- testonly = 1,
- srcs = ["fuse_base.cc"],
- hdrs = ["fuse_base.h"],
- deps = [
- gtest,
- "//test/util:fuse_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_util",
- "@com_google_absl//absl/strings:str_format",
- ],
-)
-
-cc_library(
- name = "fuse_fd_util",
- testonly = 1,
- srcs = ["fuse_fd_util.cc"],
- hdrs = ["fuse_fd_util.h"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fuse_util",
- "//test/util:posix_error",
- ],
-)
-
-cc_binary(
- name = "read_test",
- testonly = 1,
- srcs = ["read_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "write_test",
- testonly = 1,
- srcs = ["write_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "create_test",
- testonly = 1,
- srcs = ["create_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fs_util",
- "//test/util:fuse_util",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "unlink_test",
- testonly = 1,
- srcs = ["unlink_test.cc"],
- deps = [
- gtest,
- ":fuse_base",
- "//test/util:fuse_util",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mount_test",
- testonly = 1,
- srcs = ["mount_test.cc"],
- deps = [
- gtest,
- "//test/util:mount_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
diff --git a/test/fuse/linux/create_test.cc b/test/fuse/linux/create_test.cc
deleted file mode 100644
index 9a0219a58..000000000
--- a/test/fuse/linux/create_test.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fs_util.h"
-#include "test/util/fuse_util.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class CreateTest : public FuseTest {
- protected:
- const std::string test_file_name_ = "test_file";
- const mode_t mode = S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(CreateTest, CreateFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_name_);
-
- // Ensure the file doesn't exist.
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- .error = -ENOENT,
- };
- auto iov_out = FuseGenerateIovecs(out_header);
- SetServerResponse(FUSE_LOOKUP, iov_out);
-
- // creat(2) is equal to open(2) with open_flags O_CREAT | O_WRONLY | O_TRUNC.
- const mode_t new_mask = S_IWGRP | S_IWOTH;
- const int open_flags = O_CREAT | O_WRONLY | O_TRUNC;
- out_header.error = 0;
- out_header.len = sizeof(struct fuse_out_header) +
- sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out);
- struct fuse_entry_out entry_payload = DefaultEntryOut(mode & ~new_mask, 2);
- struct fuse_open_out out_payload = {
- .fh = 1,
- .open_flags = open_flags,
- };
- iov_out = FuseGenerateIovecs(out_header, entry_payload, out_payload);
- SetServerResponse(FUSE_CREATE, iov_out);
-
- // kernfs generates a successive FUSE_OPEN after the file is created. Linux's
- // fuse kernel module will not send this FUSE_OPEN after creat(2).
- out_header.len =
- sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out);
- iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_OPEN, iov_out);
-
- int fd;
- TempUmask mask(new_mask);
- EXPECT_THAT(fd = creat(test_file_path.c_str(), mode), SyscallSucceeds());
- EXPECT_THAT(fcntl(fd, F_GETFL),
- SyscallSucceedsWithValue(open_flags & O_ACCMODE));
-
- struct fuse_in_header in_header;
- struct fuse_create_in in_payload;
- std::vector<char> name(test_file_name_.size() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, in_payload, name);
-
- // Skip the request of FUSE_LOOKUP.
- SkipServerActualRequest();
-
- // Get the first FUSE_CREATE.
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload) +
- test_file_name_.size() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_CREATE);
- EXPECT_EQ(in_payload.flags, open_flags);
- EXPECT_EQ(in_payload.mode, mode & ~new_mask);
- EXPECT_EQ(in_payload.umask, new_mask);
- EXPECT_EQ(std::string(name.data()), test_file_name_);
-
- // Get the successive FUSE_OPEN.
- struct fuse_open_in in_payload_open;
- iov_in = FuseGenerateIovecs(in_header, in_payload_open);
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload_open));
- EXPECT_EQ(in_header.opcode, FUSE_OPEN);
- EXPECT_EQ(in_payload_open.flags, open_flags & O_ACCMODE);
-
- EXPECT_THAT(close(fd), SyscallSucceeds());
- // Skip the FUSE_RELEASE.
- SkipServerActualRequest();
-}
-
-TEST_F(CreateTest, CreateFileAlreadyExists) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_name_);
-
- const int open_flags = O_CREAT | O_EXCL;
-
- SetServerInodeLookup(test_file_name_);
-
- EXPECT_THAT(open(test_file_path.c_str(), mode, open_flags),
- SyscallFailsWithErrno(EEXIST));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/fuse_base.cc b/test/fuse/linux/fuse_base.cc
deleted file mode 100644
index 5b45804e1..000000000
--- a/test/fuse/linux/fuse_base.cc
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/fuse/linux/fuse_base.h"
-
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <poll.h>
-#include <sys/mount.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_format.h"
-#include "test/util/fuse_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-void FuseTest::SetUp() {
- MountFuse();
- SetUpFuseServer();
-}
-
-void FuseTest::TearDown() {
- EXPECT_EQ(GetServerNumUnconsumedRequests(), 0);
- EXPECT_EQ(GetServerNumUnsentResponses(), 0);
- UnmountFuse();
-}
-
-// Sends 3 parts of data to the FUSE server:
-// 1. The `kSetResponse` command
-// 2. The expected opcode
-// 3. The fake FUSE response
-// Then waits for the FUSE server to notify its completion.
-void FuseTest::SetServerResponse(uint32_t opcode,
- std::vector<struct iovec>& iovecs) {
- uint32_t cmd = static_cast<uint32_t>(FuseTestCmd::kSetResponse);
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &opcode, sizeof(opcode)),
- SyscallSucceedsWithValue(sizeof(opcode)));
-
- EXPECT_THAT(RetryEINTR(writev)(sock_[0], iovecs.data(), iovecs.size()),
- SyscallSucceeds());
-
- WaitServerComplete();
-}
-
-// Waits for the FUSE server to finish its blocking job and check if it
-// completes without errors.
-void FuseTest::WaitServerComplete() {
- uint32_t success;
- EXPECT_THAT(RetryEINTR(read)(sock_[0], &success, sizeof(success)),
- SyscallSucceedsWithValue(sizeof(success)));
- ASSERT_EQ(success, 1);
-}
-
-// Sends the `kGetRequest` command to the FUSE server, then reads the next
-// request into iovec struct. The order of calling this function should be
-// the same as the one of SetServerResponse().
-void FuseTest::GetServerActualRequest(std::vector<struct iovec>& iovecs) {
- uint32_t cmd = static_cast<uint32_t>(FuseTestCmd::kGetRequest);
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- EXPECT_THAT(RetryEINTR(readv)(sock_[0], iovecs.data(), iovecs.size()),
- SyscallSucceeds());
-
- WaitServerComplete();
-}
-
-// Sends a FuseTestCmd command to the FUSE server, reads from the socket, and
-// returns the corresponding data.
-uint32_t FuseTest::GetServerData(uint32_t cmd) {
- uint32_t data;
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- EXPECT_THAT(RetryEINTR(read)(sock_[0], &data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
-
- WaitServerComplete();
- return data;
-}
-
-uint32_t FuseTest::GetServerNumUnconsumedRequests() {
- return GetServerData(
- static_cast<uint32_t>(FuseTestCmd::kGetNumUnconsumedRequests));
-}
-
-uint32_t FuseTest::GetServerNumUnsentResponses() {
- return GetServerData(
- static_cast<uint32_t>(FuseTestCmd::kGetNumUnsentResponses));
-}
-
-uint32_t FuseTest::GetServerTotalReceivedBytes() {
- return GetServerData(
- static_cast<uint32_t>(FuseTestCmd::kGetTotalReceivedBytes));
-}
-
-// Sends the `kSkipRequest` command to the FUSE server, which would skip
-// current stored request data.
-void FuseTest::SkipServerActualRequest() {
- uint32_t cmd = static_cast<uint32_t>(FuseTestCmd::kSkipRequest);
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- WaitServerComplete();
-}
-
-// Sends the `kSetInodeLookup` command, expected mode, and the path of the
-// inode to create under the mount point.
-void FuseTest::SetServerInodeLookup(const std::string& path, mode_t mode,
- uint64_t size) {
- uint32_t cmd = static_cast<uint32_t>(FuseTestCmd::kSetInodeLookup);
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &mode, sizeof(mode)),
- SyscallSucceedsWithValue(sizeof(mode)));
-
- EXPECT_THAT(RetryEINTR(write)(sock_[0], &size, sizeof(size)),
- SyscallSucceedsWithValue(sizeof(size)));
-
- // Pad 1 byte for null-terminate c-string.
- EXPECT_THAT(RetryEINTR(write)(sock_[0], path.c_str(), path.size() + 1),
- SyscallSucceedsWithValue(path.size() + 1));
-
- WaitServerComplete();
-}
-
-void FuseTest::MountFuse(const char* mountOpts) {
- EXPECT_THAT(dev_fd_ = open("/dev/fuse", O_RDWR), SyscallSucceeds());
-
- std::string mount_opts = absl::StrFormat("fd=%d,%s", dev_fd_, mountOpts);
- mount_point_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(mount("fuse", mount_point_.path().c_str(), "fuse",
- MS_NODEV | MS_NOSUID, mount_opts.c_str()),
- SyscallSucceeds());
-}
-
-void FuseTest::UnmountFuse() {
- EXPECT_THAT(umount(mount_point_.path().c_str()), SyscallSucceeds());
- // TODO(gvisor.dev/issue/3330): ensure the process is terminated successfully.
-}
-
-// Consumes the first FUSE request and returns the corresponding PosixError.
-PosixError FuseTest::ServerConsumeFuseInit(
- const struct fuse_init_out* out_payload) {
- std::vector<char> buf(FUSE_MIN_READ_BUFFER);
- RETURN_ERROR_IF_SYSCALL_FAIL(
- RetryEINTR(read)(dev_fd_, buf.data(), buf.size()));
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_init_out),
- .error = 0,
- .unique = 2,
- };
- // Returns a fake fuse_init_out with 7.0 version to avoid ECONNREFUSED
- // error in the initialization of FUSE connection.
- auto iov_out = FuseGenerateIovecs(
- out_header, *const_cast<struct fuse_init_out*>(out_payload));
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- RetryEINTR(writev)(dev_fd_, iov_out.data(), iov_out.size()));
- return NoError();
-}
-
-// Reads 1 expected opcode and a fake response from socket and save them into
-// the serial buffer of this testing instance.
-void FuseTest::ServerReceiveResponse() {
- ssize_t len;
- uint32_t opcode;
- std::vector<char> buf(FUSE_MIN_READ_BUFFER);
- EXPECT_THAT(RetryEINTR(read)(sock_[1], &opcode, sizeof(opcode)),
- SyscallSucceedsWithValue(sizeof(opcode)));
-
- EXPECT_THAT(len = RetryEINTR(read)(sock_[1], buf.data(), buf.size()),
- SyscallSucceeds());
-
- responses_.AddMemBlock(opcode, buf.data(), len);
-}
-
-// Writes 1 byte of success indicator through socket.
-void FuseTest::ServerCompleteWith(bool success) {
- uint32_t data = success ? 1 : 0;
- ServerSendData(data);
-}
-
-// ServerFuseLoop is the implementation of the fake FUSE server. Monitors 2
-// file descriptors: /dev/fuse and sock_[1]. Events from /dev/fuse are FUSE
-// requests and events from sock_[1] are FUSE testing commands, leading by
-// a FuseTestCmd data to indicate the command.
-void FuseTest::ServerFuseLoop() {
- const int nfds = 2;
- struct pollfd fds[nfds] = {
- {
- .fd = dev_fd_,
- .events = POLL_IN | POLLHUP | POLLERR | POLLNVAL,
- },
- {
- .fd = sock_[1],
- .events = POLL_IN | POLLHUP | POLLERR | POLLNVAL,
- },
- };
-
- while (true) {
- ASSERT_THAT(poll(fds, nfds, -1), SyscallSucceeds());
-
- for (int fd_idx = 0; fd_idx < nfds; ++fd_idx) {
- if (fds[fd_idx].revents == 0) continue;
-
- ASSERT_EQ(fds[fd_idx].revents, POLL_IN);
- if (fds[fd_idx].fd == sock_[1]) {
- ServerHandleCommand();
- } else if (fds[fd_idx].fd == dev_fd_) {
- ServerProcessFuseRequest();
- }
- }
- }
-}
-
-// SetUpFuseServer creates 1 socketpair and fork the process. The parent thread
-// becomes testing thread and the child thread becomes the FUSE server running
-// in background. These 2 threads are connected via socketpair. sock_[0] is
-// opened in testing thread and sock_[1] is opened in the FUSE server.
-void FuseTest::SetUpFuseServer(const struct fuse_init_out* payload) {
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sock_), SyscallSucceeds());
-
- switch (fork()) {
- case -1:
- GTEST_FAIL();
- return;
- case 0:
- break;
- default:
- ASSERT_THAT(close(sock_[1]), SyscallSucceeds());
- WaitServerComplete();
- return;
- }
-
- // Begin child thread, i.e. the FUSE server.
- ASSERT_THAT(close(sock_[0]), SyscallSucceeds());
- ServerCompleteWith(ServerConsumeFuseInit(payload).ok());
- ServerFuseLoop();
- _exit(0);
-}
-
-void FuseTest::ServerSendData(uint32_t data) {
- EXPECT_THAT(RetryEINTR(write)(sock_[1], &data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
-}
-
-// Reads FuseTestCmd sent from testing thread and routes to correct handler.
-// Since each command should be a blocking operation, a `ServerCompleteWith()`
-// is required after the switch keyword.
-void FuseTest::ServerHandleCommand() {
- uint32_t cmd;
- EXPECT_THAT(RetryEINTR(read)(sock_[1], &cmd, sizeof(cmd)),
- SyscallSucceedsWithValue(sizeof(cmd)));
-
- switch (static_cast<FuseTestCmd>(cmd)) {
- case FuseTestCmd::kSetResponse:
- ServerReceiveResponse();
- break;
- case FuseTestCmd::kSetInodeLookup:
- ServerReceiveInodeLookup();
- break;
- case FuseTestCmd::kGetRequest:
- ServerSendReceivedRequest();
- break;
- case FuseTestCmd::kGetTotalReceivedBytes:
- ServerSendData(static_cast<uint32_t>(requests_.UsedBytes()));
- break;
- case FuseTestCmd::kGetNumUnconsumedRequests:
- ServerSendData(static_cast<uint32_t>(requests_.RemainingBlocks()));
- break;
- case FuseTestCmd::kGetNumUnsentResponses:
- ServerSendData(static_cast<uint32_t>(responses_.RemainingBlocks()));
- break;
- case FuseTestCmd::kSkipRequest:
- ServerSkipReceivedRequest();
- break;
- default:
- FAIL() << "Unknown FuseTestCmd " << cmd;
- break;
- }
-
- ServerCompleteWith(!HasFailure());
-}
-
-// Reads the expected file mode and the path of one file. Crafts a basic
-// `fuse_entry_out` memory block and inserts into a map for future use.
-// The FUSE server will always return this response if a FUSE_LOOKUP
-// request with this specific path comes in.
-void FuseTest::ServerReceiveInodeLookup() {
- mode_t mode;
- uint64_t size;
- std::vector<char> buf(FUSE_MIN_READ_BUFFER);
-
- EXPECT_THAT(RetryEINTR(read)(sock_[1], &mode, sizeof(mode)),
- SyscallSucceedsWithValue(sizeof(mode)));
-
- EXPECT_THAT(RetryEINTR(read)(sock_[1], &size, sizeof(size)),
- SyscallSucceedsWithValue(sizeof(size)));
-
- EXPECT_THAT(RetryEINTR(read)(sock_[1], buf.data(), buf.size()),
- SyscallSucceeds());
-
- std::string path(buf.data());
-
- uint32_t out_len =
- sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out);
- struct fuse_out_header out_header = {
- .len = out_len,
- .error = 0,
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(mode, nodeid_);
- // Since this is only used in test, nodeid_ is simply increased by 1 to
- // comply with the unqiueness of different path.
- ++nodeid_;
-
- // Set the size.
- out_payload.attr.size = size;
-
- memcpy(buf.data(), &out_header, sizeof(out_header));
- memcpy(buf.data() + sizeof(out_header), &out_payload, sizeof(out_payload));
- lookups_.AddMemBlock(FUSE_LOOKUP, buf.data(), out_len);
- lookup_map_[path] = lookups_.Next();
-}
-
-// Sends the received request pointed by current cursor and advances cursor.
-void FuseTest::ServerSendReceivedRequest() {
- if (requests_.End()) {
- FAIL() << "No more received request.";
- return;
- }
- auto mem_block = requests_.Next();
- EXPECT_THAT(
- RetryEINTR(write)(sock_[1], requests_.DataAtOffset(mem_block.offset),
- mem_block.len),
- SyscallSucceedsWithValue(mem_block.len));
-}
-
-// Skip the request pointed by current cursor.
-void FuseTest::ServerSkipReceivedRequest() {
- if (requests_.End()) {
- FAIL() << "No more received request.";
- return;
- }
- requests_.Next();
-}
-
-// Handles FUSE request. Reads request from /dev/fuse, checks if it has the
-// same opcode as expected, and responds with the saved fake FUSE response.
-// The FUSE request is copied to the serial buffer and can be retrieved one-
-// by-one by calling GetServerActualRequest from testing thread.
-void FuseTest::ServerProcessFuseRequest() {
- ssize_t len;
- std::vector<char> buf(FUSE_MIN_READ_BUFFER);
-
- // Read FUSE request.
- EXPECT_THAT(len = RetryEINTR(read)(dev_fd_, buf.data(), buf.size()),
- SyscallSucceeds());
- fuse_in_header* in_header = reinterpret_cast<fuse_in_header*>(buf.data());
-
- // Check if this is a preset FUSE_LOOKUP path.
- if (in_header->opcode == FUSE_LOOKUP) {
- std::string path(buf.data() + sizeof(struct fuse_in_header));
- auto it = lookup_map_.find(path);
- if (it != lookup_map_.end()) {
- // Matches a preset path. Reply with fake data and skip saving the
- // request.
- ServerRespondFuseSuccess(lookups_, it->second, in_header->unique);
- return;
- }
- }
-
- requests_.AddMemBlock(in_header->opcode, buf.data(), len);
-
- if (in_header->opcode == FUSE_RELEASE || in_header->opcode == FUSE_RELEASEDIR)
- return;
- // Check if there is a corresponding response.
- if (responses_.End()) {
- GTEST_NONFATAL_FAILURE_("No more FUSE response is expected");
- ServerRespondFuseError(in_header->unique);
- return;
- }
- auto mem_block = responses_.Next();
- if (in_header->opcode != mem_block.opcode) {
- std::string message = absl::StrFormat("Expect opcode %d but got %d",
- mem_block.opcode, in_header->opcode);
- GTEST_NONFATAL_FAILURE_(message.c_str());
- // We won't get correct response if opcode is not expected. Send error
- // response here to avoid wrong parsing by VFS.
- ServerRespondFuseError(in_header->unique);
- return;
- }
-
- // Write FUSE response.
- ServerRespondFuseSuccess(responses_, mem_block, in_header->unique);
-}
-
-void FuseTest::ServerRespondFuseSuccess(FuseMemBuffer& mem_buf,
- const FuseMemBlock& block,
- uint64_t unique) {
- fuse_out_header* out_header =
- reinterpret_cast<fuse_out_header*>(mem_buf.DataAtOffset(block.offset));
-
- // Patch `unique` in fuse_out_header to avoid EINVAL caused by responding
- // with an unknown `unique`.
- out_header->unique = unique;
- EXPECT_THAT(RetryEINTR(write)(dev_fd_, out_header, block.len),
- SyscallSucceedsWithValue(block.len));
-}
-
-void FuseTest::ServerRespondFuseError(uint64_t unique) {
- fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- .error = ENOSYS,
- .unique = unique,
- };
- EXPECT_THAT(RetryEINTR(write)(dev_fd_, &out_header, sizeof(out_header)),
- SyscallSucceedsWithValue(sizeof(out_header)));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/fuse_base.h b/test/fuse/linux/fuse_base.h
deleted file mode 100644
index 6ad296ca2..000000000
--- a/test/fuse/linux/fuse_base.h
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_FUSE_FUSE_BASE_H_
-#define GVISOR_TEST_FUSE_FUSE_BASE_H_
-
-#include <linux/fuse.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/uio.h>
-
-#include <iostream>
-#include <unordered_map>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-
-namespace gvisor {
-namespace testing {
-
-constexpr char kMountOpts[] = "rootmode=755,user_id=0,group_id=0";
-
-constexpr struct fuse_init_out kDefaultFUSEInitOutPayload = {.major = 7};
-
-// Internal commands used to communicate between testing thread and the FUSE
-// server. See test/fuse/README.md for further detail.
-enum class FuseTestCmd {
- kSetResponse = 0,
- kSetInodeLookup,
- kGetRequest,
- kGetNumUnconsumedRequests,
- kGetNumUnsentResponses,
- kGetTotalReceivedBytes,
- kSkipRequest,
-};
-
-// Holds the information of a memory block in a serial buffer.
-struct FuseMemBlock {
- uint32_t opcode;
- size_t offset;
- size_t len;
-};
-
-// A wrapper of a simple serial buffer that can be used with read(2) and
-// write(2). Contains a cursor to indicate accessing. This class is not thread-
-// safe and can only be used in single-thread version.
-class FuseMemBuffer {
- public:
- FuseMemBuffer() : cursor_(0) {
- // To read from /dev/fuse, a buffer needs at least FUSE_MIN_READ_BUFFER
- // bytes to avoid EINVAL. FuseMemBuffer holds memory that can accommodate
- // a sequence of FUSE request/response, so it is initiated with double
- // minimal requirement.
- mem_.resize(FUSE_MIN_READ_BUFFER * 2);
- }
-
- // Returns whether there is no memory block.
- bool Empty() { return blocks_.empty(); }
-
- // Returns if there is no more remaining memory blocks.
- bool End() { return cursor_ == blocks_.size(); }
-
- // Returns how many bytes that have been received.
- size_t UsedBytes() {
- return Empty() ? 0 : blocks_.back().offset + blocks_.back().len;
- }
-
- // Returns the available bytes remains in the serial buffer.
- size_t AvailBytes() { return mem_.size() - UsedBytes(); }
-
- // Appends a memory block information that starts at the tail of the serial
- // buffer. /dev/fuse requires at least FUSE_MIN_READ_BUFFER bytes to read, or
- // it will issue EINVAL. If it is not enough, just double the buffer length.
- void AddMemBlock(uint32_t opcode, void* data, size_t len) {
- if (AvailBytes() < FUSE_MIN_READ_BUFFER) {
- mem_.resize(mem_.size() << 1);
- }
- size_t offset = UsedBytes();
- memcpy(mem_.data() + offset, data, len);
- blocks_.push_back(FuseMemBlock{opcode, offset, len});
- }
-
- // Returns the memory address at a specific offset. Used with read(2) or
- // write(2).
- char* DataAtOffset(size_t offset) { return mem_.data() + offset; }
-
- // Returns current memory block pointed by the cursor and increase by 1.
- FuseMemBlock Next() {
- if (End()) {
- std::cerr << "Buffer is already exhausted." << std::endl;
- return FuseMemBlock{};
- }
- return blocks_[cursor_++];
- }
-
- // Returns the number of the blocks that has not been requested.
- size_t RemainingBlocks() { return blocks_.size() - cursor_; }
-
- private:
- size_t cursor_;
- std::vector<FuseMemBlock> blocks_;
- std::vector<char> mem_;
-};
-
-// FuseTest base class is useful in FUSE integration test. Inherit this class
-// to automatically set up a fake FUSE server and use the member functions
-// to manipulate with it. Refer to test/fuse/README.md for detailed explanation.
-class FuseTest : public ::testing::Test {
- public:
- // nodeid_ is the ID of a fake inode. We starts from 2 since 1 is occupied by
- // the mount point.
- FuseTest() : nodeid_(2) {}
- void SetUp() override;
- void TearDown() override;
-
- // Called by the testing thread to set up a fake response for an expected
- // opcode via socket. This can be used multiple times to define a sequence of
- // expected FUSE reactions.
- void SetServerResponse(uint32_t opcode, std::vector<struct iovec>& iovecs);
-
- // Called by the testing thread to install a fake path under the mount point.
- // e.g. a file under /mnt/dir/file and moint point is /mnt, then it will look
- // up "dir/file" in this case.
- //
- // It sets a fixed response to the FUSE_LOOKUP requests issued with this
- // path, pretending there is an inode and avoid ENOENT when testing. If mode
- // is not given, it creates a regular file with mode 0600.
- void SetServerInodeLookup(const std::string& path,
- mode_t mode = S_IFREG | S_IRUSR | S_IWUSR,
- uint64_t size = 512);
-
- // Called by the testing thread to ask the FUSE server for its next received
- // FUSE request. Be sure to use the corresponding struct of iovec to receive
- // data from server.
- void GetServerActualRequest(std::vector<struct iovec>& iovecs);
-
- // Called by the testing thread to query the number of unconsumed requests in
- // the requests_ serial buffer of the FUSE server. TearDown() ensures all
- // FUSE requests received by the FUSE server were consumed by the testing
- // thread.
- uint32_t GetServerNumUnconsumedRequests();
-
- // Called by the testing thread to query the number of unsent responses in
- // the responses_ serial buffer of the FUSE server. TearDown() ensures all
- // preset FUSE responses were sent out by the FUSE server.
- uint32_t GetServerNumUnsentResponses();
-
- // Called by the testing thread to ask the FUSE server for its total received
- // bytes from /dev/fuse.
- uint32_t GetServerTotalReceivedBytes();
-
- // Called by the testing thread to ask the FUSE server to skip stored
- // request data.
- void SkipServerActualRequest();
-
- protected:
- TempPath mount_point_;
-
- // Opens /dev/fuse and inherit the file descriptor for the FUSE server.
- void MountFuse(const char* mountOpts = kMountOpts);
-
- // Creates a socketpair for communication and forks FUSE server.
- void SetUpFuseServer(
- const struct fuse_init_out* payload = &kDefaultFUSEInitOutPayload);
-
- // Unmounts the mountpoint of the FUSE server.
- void UnmountFuse();
-
- private:
- // Sends a FuseTestCmd and gets a uint32_t data from the FUSE server.
- inline uint32_t GetServerData(uint32_t cmd);
-
- // Waits for FUSE server to complete its processing. Complains if the FUSE
- // server responds any failure during tests.
- void WaitServerComplete();
-
- // The FUSE server stays here and waits next command or FUSE request until it
- // is terminated.
- void ServerFuseLoop();
-
- // Used by the FUSE server to tell testing thread if it is OK to proceed next
- // command. Will be issued after processing each FuseTestCmd.
- void ServerCompleteWith(bool success);
-
- // Consumes the first FUSE request when mounting FUSE. Replies with a
- // response with empty payload.
- PosixError ServerConsumeFuseInit(const struct fuse_init_out* payload);
-
- // A command switch that dispatch different FuseTestCmd to its handler.
- void ServerHandleCommand();
-
- // The FUSE server side's corresponding code of `SetServerResponse()`.
- // Handles `kSetResponse` command. Saves the fake response into its output
- // memory queue.
- void ServerReceiveResponse();
-
- // The FUSE server side's corresponding code of `SetServerInodeLookup()`.
- // Handles `kSetInodeLookup` command. Receives an expected file mode and
- // file path under the mount point.
- void ServerReceiveInodeLookup();
-
- // The FUSE server side's corresponding code of `GetServerActualRequest()`.
- // Handles `kGetRequest` command. Sends the next received request pointed by
- // the cursor.
- void ServerSendReceivedRequest();
-
- // Sends a uint32_t data via socket.
- inline void ServerSendData(uint32_t data);
-
- // The FUSE server side's corresponding code of `SkipServerActualRequest()`.
- // Handles `kSkipRequest` command. Skip the request pointed by current cursor.
- void ServerSkipReceivedRequest();
-
- // Handles FUSE request sent to /dev/fuse by its saved responses.
- void ServerProcessFuseRequest();
-
- // Responds to FUSE request with a saved data.
- void ServerRespondFuseSuccess(FuseMemBuffer& mem_buf,
- const FuseMemBlock& block, uint64_t unique);
-
- // Responds an error header to /dev/fuse when bad thing happens.
- void ServerRespondFuseError(uint64_t unique);
-
- int dev_fd_;
- int sock_[2];
-
- uint64_t nodeid_;
- std::unordered_map<std::string, FuseMemBlock> lookup_map_;
-
- FuseMemBuffer requests_;
- FuseMemBuffer responses_;
- FuseMemBuffer lookups_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_FUSE_FUSE_BASE_H_
diff --git a/test/fuse/linux/fuse_fd_util.cc b/test/fuse/linux/fuse_fd_util.cc
deleted file mode 100644
index 30d1157bb..000000000
--- a/test/fuse/linux/fuse_fd_util.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/fuse/linux/fuse_fd_util.h"
-
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-
-#include <string>
-#include <vector>
-
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fuse_util.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<FileDescriptor> FuseFdTest::OpenPath(const std::string &path,
- uint32_t flags, uint64_t fh) {
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- struct fuse_open_out out_payload = {
- .fh = fh,
- .open_flags = flags,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_OPEN, iov_out);
-
- auto res = Open(path.c_str(), flags);
- if (res.ok()) {
- SkipServerActualRequest();
- }
- return res;
-}
-
-Cleanup FuseFdTest::CloseFD(FileDescriptor &fd) {
- return Cleanup([&] {
- close(fd.release());
- SkipServerActualRequest();
- });
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/fuse_fd_util.h b/test/fuse/linux/fuse_fd_util.h
deleted file mode 100644
index 066185c94..000000000
--- a/test/fuse/linux/fuse_fd_util.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_FUSE_FUSE_FD_UTIL_H_
-#define GVISOR_TEST_FUSE_FUSE_FD_UTIL_H_
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <string>
-
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-class FuseFdTest : public FuseTest {
- public:
- // Sets the FUSE server to respond to a FUSE_OPEN with corresponding flags and
- // fh. Then does a real file system open on the absolute path to get an fd.
- PosixErrorOr<FileDescriptor> OpenPath(const std::string &path,
- uint32_t flags = O_RDONLY,
- uint64_t fh = 1);
-
- // Returns a cleanup object that closes the fd when it is destroyed. After
- // the close is done, tells the FUSE server to skip this FUSE_RELEASE.
- Cleanup CloseFD(FileDescriptor &fd);
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_FUSE_FUSE_FD_UTIL_H_
diff --git a/test/fuse/linux/mkdir_test.cc b/test/fuse/linux/mkdir_test.cc
deleted file mode 100644
index 9647cb93f..000000000
--- a/test/fuse/linux/mkdir_test.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class MkdirTest : public FuseTest {
- protected:
- const std::string test_dir_ = "test_dir";
- const mode_t perms_ = S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(MkdirTest, CreateDir) {
- const std::string test_dir_path_ =
- JoinPath(mount_point_.path().c_str(), test_dir_);
- const mode_t new_umask = 0077;
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFDIR | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_MKDIR, iov_out);
- TempUmask mask(new_umask);
- ASSERT_THAT(mkdir(test_dir_path_.c_str(), 0777), SyscallSucceeds());
-
- struct fuse_in_header in_header;
- struct fuse_mkdir_in in_payload;
- std::vector<char> actual_dir(test_dir_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, in_payload, actual_dir);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len,
- sizeof(in_header) + sizeof(in_payload) + test_dir_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_MKDIR);
- EXPECT_EQ(in_payload.mode & 0777, perms_ & ~new_umask);
- EXPECT_EQ(in_payload.umask, new_umask);
- EXPECT_EQ(std::string(actual_dir.data()), test_dir_);
-}
-
-TEST_F(MkdirTest, FileTypeError) {
- const std::string test_dir_path_ =
- JoinPath(mount_point_.path().c_str(), test_dir_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFREG | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_MKDIR, iov_out);
- ASSERT_THAT(mkdir(test_dir_path_.c_str(), 0777), SyscallFailsWithErrno(EIO));
- SkipServerActualRequest();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/mknod_test.cc b/test/fuse/linux/mknod_test.cc
deleted file mode 100644
index 74c74d76b..000000000
--- a/test/fuse/linux/mknod_test.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class MknodTest : public FuseTest {
- protected:
- const std::string test_file_ = "test_file";
- const mode_t perms_ = S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(MknodTest, RegularFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- const mode_t new_umask = 0077;
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFREG | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_MKNOD, iov_out);
- TempUmask mask(new_umask);
- ASSERT_THAT(mknod(test_file_path.c_str(), perms_, 0), SyscallSucceeds());
-
- struct fuse_in_header in_header;
- struct fuse_mknod_in in_payload;
- std::vector<char> actual_file(test_file_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, in_payload, actual_file);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len,
- sizeof(in_header) + sizeof(in_payload) + test_file_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_MKNOD);
- EXPECT_EQ(in_payload.mode & 0777, perms_ & ~new_umask);
- EXPECT_EQ(in_payload.umask, new_umask);
- EXPECT_EQ(in_payload.rdev, 0);
- EXPECT_EQ(std::string(actual_file.data()), test_file_);
-}
-
-TEST_F(MknodTest, FileTypeError) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- // server return directory instead of regular file should cause an error.
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFDIR | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_MKNOD, iov_out);
- ASSERT_THAT(mknod(test_file_path.c_str(), perms_, 0),
- SyscallFailsWithErrno(EIO));
- SkipServerActualRequest();
-}
-
-TEST_F(MknodTest, NodeIDError) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload =
- DefaultEntryOut(S_IFREG | perms_, FUSE_ROOT_ID);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_MKNOD, iov_out);
- ASSERT_THAT(mknod(test_file_path.c_str(), perms_, 0),
- SyscallFailsWithErrno(EIO));
- SkipServerActualRequest();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/mount_test.cc b/test/fuse/linux/mount_test.cc
deleted file mode 100644
index 276f842ea..000000000
--- a/test/fuse/linux/mount_test.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/mount.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/mount_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(FuseMount, Success) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
- std::string mopts =
- absl::StrFormat("fd=%d,user_id=%d,group_id=%d,rootmode=0777", fd.get(),
- getuid(), getgid());
-
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const auto mount =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "fuse", 0, mopts, 0));
-}
-
-TEST(FuseMount, FDNotParsable) {
- int devfd;
- EXPECT_THAT(devfd = open("/dev/fuse", O_RDWR), SyscallSucceeds());
- std::string mount_opts = "fd=thiscantbeparsed";
- TempPath mount_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(mount("fuse", mount_dir.path().c_str(), "fuse",
- MS_NODEV | MS_NOSUID, mount_opts.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FuseMount, NoDevice) {
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, ""),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FuseMount, ClosedFD) {
- FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
- int fd = f.release();
- close(fd);
- std::string mopts = absl::StrCat("fd=", std::to_string(fd));
-
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, mopts.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FuseMount, BadFD) {
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- std::string mopts = absl::StrCat("fd=", std::to_string(fd.get()));
-
- EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, mopts.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/open_test.cc b/test/fuse/linux/open_test.cc
deleted file mode 100644
index 4b0c4a805..000000000
--- a/test/fuse/linux/open_test.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class OpenTest : public FuseTest {
- // OpenTest doesn't care the release request when close a fd,
- // so doesn't check leftover requests when tearing down.
- void TearDown() { UnmountFuse(); }
-
- protected:
- const std::string test_file_ = "test_file";
- const mode_t regular_file_ = S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO;
-
- struct fuse_open_out out_payload_ = {
- .fh = 1,
- .open_flags = O_RDWR,
- };
-};
-
-TEST_F(OpenTest, RegularFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, regular_file_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload_);
- SetServerResponse(FUSE_OPEN, iov_out);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_path.c_str(), O_RDWR));
-
- struct fuse_in_header in_header;
- struct fuse_open_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_OPEN);
- EXPECT_EQ(in_payload.flags, O_RDWR);
- EXPECT_THAT(fcntl(fd.get(), F_GETFL), SyscallSucceedsWithValue(O_RDWR));
-}
-
-TEST_F(OpenTest, SetNoOpen) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, regular_file_);
-
- // ENOSYS indicates open is not implemented.
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- .error = -ENOSYS,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload_);
- SetServerResponse(FUSE_OPEN, iov_out);
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_path.c_str(), O_RDWR));
- SkipServerActualRequest();
-
- // check open doesn't send new request.
- uint32_t recieved_before = GetServerTotalReceivedBytes();
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_path.c_str(), O_RDWR));
- EXPECT_EQ(GetServerTotalReceivedBytes(), recieved_before);
-}
-
-TEST_F(OpenTest, OpenFail) {
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- .error = -ENOENT,
- };
-
- auto iov_out = FuseGenerateIovecs(out_header, out_payload_);
- SetServerResponse(FUSE_OPENDIR, iov_out);
- ASSERT_THAT(open(mount_point_.path().c_str(), O_RDWR),
- SyscallFailsWithErrno(ENOENT));
-
- struct fuse_in_header in_header;
- struct fuse_open_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_OPENDIR);
- EXPECT_EQ(in_payload.flags, O_RDWR);
-}
-
-TEST_F(OpenTest, DirectoryFlagOnRegularFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
-
- SetServerInodeLookup(test_file_, regular_file_);
- ASSERT_THAT(open(test_file_path.c_str(), O_RDWR | O_DIRECTORY),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/read_test.cc b/test/fuse/linux/read_test.cc
deleted file mode 100644
index 88fc299d8..000000000
--- a/test/fuse/linux/read_test.cc
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReadTest : public FuseTest {
- void SetUp() override {
- FuseTest::SetUp();
- test_file_path_ = JoinPath(mount_point_.path().c_str(), test_file_);
- }
-
- // TearDown overrides the parent's function
- // to skip checking the unconsumed release request at the end.
- void TearDown() override { UnmountFuse(); }
-
- protected:
- const std::string test_file_ = "test_file";
- const mode_t test_file_mode_ = S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO;
- const uint64_t test_fh_ = 1;
- const uint32_t open_flag_ = O_RDWR;
-
- std::string test_file_path_;
-
- PosixErrorOr<FileDescriptor> OpenTestFile(const std::string &path,
- uint64_t size = 512) {
- SetServerInodeLookup(test_file_, test_file_mode_, size);
-
- struct fuse_out_header out_header_open = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- struct fuse_open_out out_payload_open = {
- .fh = test_fh_,
- .open_flags = open_flag_,
- };
- auto iov_out_open = FuseGenerateIovecs(out_header_open, out_payload_open);
- SetServerResponse(FUSE_OPEN, iov_out_open);
-
- auto res = Open(path.c_str(), open_flag_);
- if (res.ok()) {
- SkipServerActualRequest();
- }
- return res;
- }
-};
-
-class ReadTestSmallMaxRead : public ReadTest {
- void SetUp() override {
- MountFuse(mountOpts);
- SetUpFuseServer();
- test_file_path_ = JoinPath(mount_point_.path().c_str(), test_file_);
- }
-
- protected:
- constexpr static char mountOpts[] =
- "rootmode=755,user_id=0,group_id=0,max_read=4096";
- // 4096 is hard-coded as the max_read in mount options.
- const int size_fragment = 4096;
-};
-
-TEST_F(ReadTest, ReadWhole) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the read.
- const int n_read = 5;
- std::vector<char> data(n_read);
- RandomizeBuffer(data.data(), data.size());
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
- SetServerResponse(FUSE_READ, iov_out_read);
-
- // Read the whole "file".
- std::vector<char> buf(n_read);
- EXPECT_THAT(read(fd.get(), buf.data(), n_read),
- SyscallSucceedsWithValue(n_read));
-
- // Check the read request.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, 0);
- EXPECT_EQ(buf, data);
-}
-
-TEST_F(ReadTest, ReadPartial) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the read.
- const int n_data = 10;
- std::vector<char> data(n_data);
- RandomizeBuffer(data.data(), data.size());
- // Note: due to read ahead, current read implementation will treat any
- // response that is longer than requested as correct (i.e. not reach the EOF).
- // Therefore, the test below should make sure the size to read does not exceed
- // n_data.
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
-
- std::vector<char> buf(n_data);
-
- // Read 1 bytes.
- SetServerResponse(FUSE_READ, iov_out_read);
- EXPECT_THAT(read(fd.get(), buf.data(), 1), SyscallSucceedsWithValue(1));
-
- // Check the 1-byte read request.
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, 0);
-
- // Read 3 bytes.
- SetServerResponse(FUSE_READ, iov_out_read);
- EXPECT_THAT(read(fd.get(), buf.data(), 3), SyscallSucceedsWithValue(3));
-
- // Check the 3-byte read request.
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_payload_read.offset, 1);
-
- // Read 5 bytes.
- SetServerResponse(FUSE_READ, iov_out_read);
- EXPECT_THAT(read(fd.get(), buf.data(), 5), SyscallSucceedsWithValue(5));
-
- // Check the 5-byte read request.
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_payload_read.offset, 4);
-}
-
-TEST_F(ReadTest, PRead) {
- const int file_size = 512;
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_, file_size));
-
- // Prepare for the read.
- const int n_read = 5;
- std::vector<char> data(n_read);
- RandomizeBuffer(data.data(), data.size());
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
- SetServerResponse(FUSE_READ, iov_out_read);
-
- // Read some bytes.
- std::vector<char> buf(n_read);
- const int offset_read = file_size >> 1;
- EXPECT_THAT(pread(fd.get(), buf.data(), n_read, offset_read),
- SyscallSucceedsWithValue(n_read));
-
- // Check the read request.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, offset_read);
- EXPECT_EQ(buf, data);
-}
-
-TEST_F(ReadTest, ReadZero) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Issue the read.
- std::vector<char> buf;
- EXPECT_THAT(read(fd.get(), buf.data(), 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ReadTest, ReadShort) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the short read.
- const int n_read = 5;
- std::vector<char> data(n_read >> 1);
- RandomizeBuffer(data.data(), data.size());
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
- SetServerResponse(FUSE_READ, iov_out_read);
-
- // Read the whole "file".
- std::vector<char> buf(n_read);
- EXPECT_THAT(read(fd.get(), buf.data(), n_read),
- SyscallSucceedsWithValue(data.size()));
-
- // Check the read request.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, 0);
- std::vector<char> short_buf(buf.begin(), buf.begin() + data.size());
- EXPECT_EQ(short_buf, data);
-}
-
-TEST_F(ReadTest, ReadShortEOF) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the short read.
- struct fuse_out_header out_header_read = {
- .len = static_cast<uint32_t>(sizeof(struct fuse_out_header)),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read);
- SetServerResponse(FUSE_READ, iov_out_read);
-
- // Read the whole "file".
- const int n_read = 10;
- std::vector<char> buf(n_read);
- EXPECT_THAT(read(fd.get(), buf.data(), n_read), SyscallSucceedsWithValue(0));
-
- // Check the read request.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, 0);
-}
-
-TEST_F(ReadTestSmallMaxRead, ReadSmallMaxRead) {
- const int n_fragment = 10;
- const int n_read = size_fragment * n_fragment;
-
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_, n_read));
-
- // Prepare for the read.
- std::vector<char> data(size_fragment);
- RandomizeBuffer(data.data(), data.size());
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
-
- for (int i = 0; i < n_fragment; ++i) {
- SetServerResponse(FUSE_READ, iov_out_read);
- }
-
- // Read the whole "file".
- std::vector<char> buf(n_read);
- EXPECT_THAT(read(fd.get(), buf.data(), n_read),
- SyscallSucceedsWithValue(n_read));
-
- ASSERT_EQ(GetServerNumUnsentResponses(), 0);
- ASSERT_EQ(GetServerNumUnconsumedRequests(), n_fragment);
-
- // Check each read segment.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
-
- for (int i = 0; i < n_fragment; ++i) {
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, i * size_fragment);
- EXPECT_EQ(in_payload_read.size, size_fragment);
-
- auto it = buf.begin() + i * size_fragment;
- EXPECT_EQ(std::vector<char>(it, it + size_fragment), data);
- }
-}
-
-TEST_F(ReadTestSmallMaxRead, ReadSmallMaxReadShort) {
- const int n_fragment = 10;
- const int n_read = size_fragment * n_fragment;
-
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_, n_read));
-
- // Prepare for the read.
- std::vector<char> data(size_fragment);
- RandomizeBuffer(data.data(), data.size());
- struct fuse_out_header out_header_read = {
- .len =
- static_cast<uint32_t>(sizeof(struct fuse_out_header) + data.size()),
- };
- auto iov_out_read = FuseGenerateIovecs(out_header_read, data);
-
- for (int i = 0; i < n_fragment - 1; ++i) {
- SetServerResponse(FUSE_READ, iov_out_read);
- }
-
- // The last fragment is a short read.
- std::vector<char> half_data(data.begin(), data.begin() + (data.size() >> 1));
- struct fuse_out_header out_header_read_short = {
- .len = static_cast<uint32_t>(sizeof(struct fuse_out_header) +
- half_data.size()),
- };
- auto iov_out_read_short =
- FuseGenerateIovecs(out_header_read_short, half_data);
- SetServerResponse(FUSE_READ, iov_out_read_short);
-
- // Read the whole "file".
- std::vector<char> buf(n_read);
- EXPECT_THAT(read(fd.get(), buf.data(), n_read),
- SyscallSucceedsWithValue(n_read - (data.size() >> 1)));
-
- ASSERT_EQ(GetServerNumUnsentResponses(), 0);
- ASSERT_EQ(GetServerNumUnconsumedRequests(), n_fragment);
-
- // Check each read segment.
- struct fuse_in_header in_header_read;
- struct fuse_read_in in_payload_read;
- auto iov_in = FuseGenerateIovecs(in_header_read, in_payload_read);
-
- for (int i = 0; i < n_fragment; ++i) {
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_payload_read.fh, test_fh_);
- EXPECT_EQ(in_header_read.len,
- sizeof(in_header_read) + sizeof(in_payload_read));
- EXPECT_EQ(in_header_read.opcode, FUSE_READ);
- EXPECT_EQ(in_payload_read.offset, i * size_fragment);
- EXPECT_EQ(in_payload_read.size, size_fragment);
-
- auto it = buf.begin() + i * size_fragment;
- if (i != n_fragment - 1) {
- EXPECT_EQ(std::vector<char>(it, it + data.size()), data);
- } else {
- EXPECT_EQ(std::vector<char>(it, it + half_data.size()), half_data);
- }
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/readdir_test.cc b/test/fuse/linux/readdir_test.cc
deleted file mode 100644
index 2afb4b062..000000000
--- a/test/fuse/linux/readdir_test.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <linux/unistd.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
- (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
-#define FUSE_DIRENT_SIZE(d) FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReaddirTest : public FuseTest {
- public:
- void fill_fuse_dirent(char *buf, const char *name, uint64_t ino) {
- size_t namelen = strlen(name);
- size_t entlen = FUSE_NAME_OFFSET + namelen;
- size_t entlen_padded = FUSE_DIRENT_ALIGN(entlen);
- struct fuse_dirent *dirent;
-
- dirent = reinterpret_cast<struct fuse_dirent *>(buf);
- dirent->ino = ino;
- dirent->namelen = namelen;
- memcpy(dirent->name, name, namelen);
- memset(dirent->name + namelen, 0, entlen_padded - entlen);
- }
-
- protected:
- const std::string test_dir_name_ = "test_dir";
-};
-
-TEST_F(ReaddirTest, SingleEntry) {
- const std::string test_dir_path =
- JoinPath(mount_point_.path().c_str(), test_dir_name_);
-
- const uint64_t ino_dir = 1024;
- // We need to make sure the test dir is a directory that can be found.
- mode_t expected_mode =
- S_IFDIR | S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH;
- struct fuse_attr dir_attr = {
- .ino = ino_dir,
- .size = 512,
- .blocks = 4,
- .mode = expected_mode,
- .blksize = 4096,
- };
-
- // We need to make sure the test dir is a directory that can be found.
- struct fuse_out_header lookup_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out lookup_payload = {
- .nodeid = 1,
- .entry_valid = true,
- .attr_valid = true,
- .attr = dir_attr,
- };
-
- struct fuse_out_header open_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- struct fuse_open_out open_payload = {
- .fh = 1,
- };
- auto iov_out = FuseGenerateIovecs(lookup_header, lookup_payload);
- SetServerResponse(FUSE_LOOKUP, iov_out);
-
- iov_out = FuseGenerateIovecs(open_header, open_payload);
- SetServerResponse(FUSE_OPENDIR, iov_out);
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_dir_path.c_str(), O_RDONLY));
-
- // The open command makes two syscalls. Lookup the dir file and open.
- // We don't need to inspect those headers in this test.
- SkipServerActualRequest(); // LOOKUP.
- SkipServerActualRequest(); // OPENDIR.
-
- // Readdir test code.
- std::string dot = ".";
- std::string dot_dot = "..";
- std::string test_file = "testFile";
-
- // Figure out how many dirents to send over and allocate them appropriately.
- // Each dirent has a dynamic name and a static metadata part. The dirent size
- // is aligned to being a multiple of 8.
- size_t dot_file_dirent_size =
- FUSE_DIRENT_ALIGN(dot.length() + FUSE_NAME_OFFSET);
- size_t dot_dot_file_dirent_size =
- FUSE_DIRENT_ALIGN(dot_dot.length() + FUSE_NAME_OFFSET);
- size_t test_file_dirent_size =
- FUSE_DIRENT_ALIGN(test_file.length() + FUSE_NAME_OFFSET);
-
- // Create an appropriately sized payload.
- size_t readdir_payload_size =
- test_file_dirent_size + dot_file_dirent_size + dot_dot_file_dirent_size;
- std::vector<char> readdir_payload_vec(readdir_payload_size);
- char *readdir_payload = readdir_payload_vec.data();
-
- // Use fake ino for other directories.
- fill_fuse_dirent(readdir_payload, dot.c_str(), ino_dir - 2);
- fill_fuse_dirent(readdir_payload + dot_file_dirent_size, dot_dot.c_str(),
- ino_dir - 1);
- fill_fuse_dirent(
- readdir_payload + dot_file_dirent_size + dot_dot_file_dirent_size,
- test_file.c_str(), ino_dir);
-
- struct fuse_out_header readdir_header = {
- .len = uint32_t(sizeof(struct fuse_out_header) + readdir_payload_size),
- };
- struct fuse_out_header readdir_header_break = {
- .len = uint32_t(sizeof(struct fuse_out_header)),
- };
-
- iov_out = FuseGenerateIovecs(readdir_header, readdir_payload_vec);
- SetServerResponse(FUSE_READDIR, iov_out);
-
- iov_out = FuseGenerateIovecs(readdir_header_break);
- SetServerResponse(FUSE_READDIR, iov_out);
-
- std::vector<char> buf(4090, 0);
- int nread, off = 0, i = 0;
- EXPECT_THAT(
- nread = syscall(__NR_getdents64, fd.get(), buf.data(), buf.size()),
- SyscallSucceeds());
- for (; off < nread;) {
- struct dirent64 *ent = (struct dirent64 *)(buf.data() + off);
- off += ent->d_reclen;
- switch (i++) {
- case 0:
- EXPECT_EQ(std::string(ent->d_name), dot);
- break;
- case 1:
- EXPECT_EQ(std::string(ent->d_name), dot_dot);
- break;
- case 2:
- EXPECT_EQ(std::string(ent->d_name), test_file);
- break;
- }
- }
-
- EXPECT_THAT(
- nread = syscall(__NR_getdents64, fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(0));
-
- SkipServerActualRequest(); // READDIR.
- SkipServerActualRequest(); // READDIR with no data.
-
- // Clean up.
- fd.reset(-1);
-
- struct fuse_in_header in_header;
- struct fuse_release_in in_payload;
-
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_RELEASEDIR);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/readlink_test.cc b/test/fuse/linux/readlink_test.cc
deleted file mode 100644
index 2cba8fc23..000000000
--- a/test/fuse/linux/readlink_test.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReadlinkTest : public FuseTest {
- protected:
- const std::string test_file_ = "test_file_";
- const mode_t perms_ = S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(ReadlinkTest, ReadSymLink) {
- const std::string symlink_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, S_IFLNK | perms_);
-
- struct fuse_out_header out_header = {
- .len = static_cast<uint32_t>(sizeof(struct fuse_out_header)) +
- static_cast<uint32_t>(test_file_.length()) + 1,
- };
- std::string link = test_file_;
- auto iov_out = FuseGenerateIovecs(out_header, link);
- SetServerResponse(FUSE_READLINK, iov_out);
- const std::string actual_link =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink(symlink_path));
-
- struct fuse_in_header in_header;
- auto iov_in = FuseGenerateIovecs(in_header);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header));
- EXPECT_EQ(in_header.opcode, FUSE_READLINK);
- EXPECT_EQ(0, memcmp(actual_link.c_str(), link.data(), link.size()));
-
- // next readlink should have link cached, so shouldn't have new request to
- // server.
- uint32_t recieved_before = GetServerTotalReceivedBytes();
- ASSERT_NO_ERRNO(ReadLink(symlink_path));
- EXPECT_EQ(GetServerTotalReceivedBytes(), recieved_before);
-}
-
-TEST_F(ReadlinkTest, NotSymlink) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, S_IFREG | perms_);
-
- std::vector<char> buf(PATH_MAX + 1);
- ASSERT_THAT(readlink(test_file_path.c_str(), buf.data(), PATH_MAX),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/release_test.cc b/test/fuse/linux/release_test.cc
deleted file mode 100644
index b5adb0870..000000000
--- a/test/fuse/linux/release_test.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/mount.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReleaseTest : public FuseTest {
- protected:
- const std::string test_file_ = "test_file";
-};
-
-TEST_F(ReleaseTest, RegularFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- struct fuse_open_out out_payload = {
- .fh = 1,
- .open_flags = O_RDWR,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_OPEN, iov_out);
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_path, O_RDWR));
- SkipServerActualRequest();
- ASSERT_THAT(close(fd.release()), SyscallSucceeds());
-
- struct fuse_in_header in_header;
- struct fuse_release_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_RELEASE);
- EXPECT_EQ(in_payload.flags, O_RDWR);
- EXPECT_EQ(in_payload.fh, 1);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/rmdir_test.cc b/test/fuse/linux/rmdir_test.cc
deleted file mode 100644
index e3200e446..000000000
--- a/test/fuse/linux/rmdir_test.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fs_util.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class RmDirTest : public FuseTest {
- protected:
- const std::string test_dir_name_ = "test_dir";
- const std::string test_subdir_ = "test_subdir";
- const mode_t test_dir_mode_ = S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(RmDirTest, NormalRmDir) {
- const std::string test_dir_path_ =
- JoinPath(mount_point_.path().c_str(), test_dir_name_);
-
- SetServerInodeLookup(test_dir_name_, test_dir_mode_);
-
- // RmDir code.
- struct fuse_out_header rmdir_header = {
- .len = sizeof(struct fuse_out_header),
- };
-
- auto iov_out = FuseGenerateIovecs(rmdir_header);
- SetServerResponse(FUSE_RMDIR, iov_out);
-
- ASSERT_THAT(rmdir(test_dir_path_.c_str()), SyscallSucceeds());
-
- struct fuse_in_header in_header;
- std::vector<char> actual_dirname(test_dir_name_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, actual_dirname);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + test_dir_name_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_RMDIR);
- EXPECT_EQ(std::string(actual_dirname.data()), test_dir_name_);
-}
-
-TEST_F(RmDirTest, NormalRmDirSubdir) {
- SetServerInodeLookup(test_subdir_, S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
- const std::string test_dir_path_ =
- JoinPath(mount_point_.path().c_str(), test_subdir_, test_dir_name_);
- SetServerInodeLookup(test_dir_name_, test_dir_mode_);
-
- // RmDir code.
- struct fuse_out_header rmdir_header = {
- .len = sizeof(struct fuse_out_header),
- };
-
- auto iov_out = FuseGenerateIovecs(rmdir_header);
- SetServerResponse(FUSE_RMDIR, iov_out);
-
- ASSERT_THAT(rmdir(test_dir_path_.c_str()), SyscallSucceeds());
-
- struct fuse_in_header in_header;
- std::vector<char> actual_dirname(test_dir_name_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, actual_dirname);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + test_dir_name_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_RMDIR);
- EXPECT_EQ(std::string(actual_dirname.data()), test_dir_name_);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/setstat_test.cc b/test/fuse/linux/setstat_test.cc
deleted file mode 100644
index 68301c775..000000000
--- a/test/fuse/linux/setstat_test.cc
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-#include <utime.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_fd_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class SetStatTest : public FuseFdTest {
- public:
- void SetUp() override {
- FuseFdTest::SetUp();
- test_dir_path_ = JoinPath(mount_point_.path(), test_dir_);
- test_file_path_ = JoinPath(mount_point_.path(), test_file_);
- }
-
- protected:
- const uint64_t fh = 23;
- const std::string test_dir_ = "testdir";
- const std::string test_file_ = "testfile";
- const mode_t test_dir_mode_ = S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR;
- const mode_t test_file_mode_ = S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR;
-
- std::string test_dir_path_;
- std::string test_file_path_;
-};
-
-TEST_F(SetStatTest, ChmodDir) {
- // Set up fixture.
- SetServerInodeLookup(test_dir_, test_dir_mode_);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- mode_t set_mode = S_IRGRP | S_IWGRP | S_IXGRP;
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(set_mode, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(chmod(test_dir_path_.c_str(), set_mode), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_MODE);
- EXPECT_EQ(in_payload.mode, S_IFDIR | set_mode);
-}
-
-TEST_F(SetStatTest, ChownDir) {
- // Set up fixture.
- SetServerInodeLookup(test_dir_, test_dir_mode_);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(test_dir_mode_, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(chown(test_dir_path_.c_str(), 1025, 1025), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_UID | FATTR_GID);
- EXPECT_EQ(in_payload.uid, 1025);
- EXPECT_EQ(in_payload.gid, 1025);
-}
-
-TEST_F(SetStatTest, TruncateFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(S_IFREG | S_IRUSR | S_IWUSR, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(truncate(test_file_path_.c_str(), 321), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_SIZE);
- EXPECT_EQ(in_payload.size, 321);
-}
-
-TEST_F(SetStatTest, UtimeFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(S_IFREG | S_IRUSR | S_IWUSR, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- time_t expected_atime = 1597159766, expected_mtime = 1597159765;
- struct utimbuf times = {
- .actime = expected_atime,
- .modtime = expected_mtime,
- };
- EXPECT_THAT(utime(test_file_path_.c_str(), &times), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_ATIME | FATTR_MTIME);
- EXPECT_EQ(in_payload.atime, expected_atime);
- EXPECT_EQ(in_payload.mtime, expected_mtime);
-}
-
-TEST_F(SetStatTest, UtimesFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(test_file_mode_, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- struct timeval expected_times[2] = {
- {
- .tv_sec = 1597159766,
- .tv_usec = 234945,
- },
- {
- .tv_sec = 1597159765,
- .tv_usec = 232341,
- },
- };
- EXPECT_THAT(utimes(test_file_path_.c_str(), expected_times),
- SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_ATIME | FATTR_MTIME);
- EXPECT_EQ(in_payload.atime, expected_times[0].tv_sec);
- EXPECT_EQ(in_payload.atimensec, expected_times[0].tv_usec * 1000);
- EXPECT_EQ(in_payload.mtime, expected_times[1].tv_sec);
- EXPECT_EQ(in_payload.mtimensec, expected_times[1].tv_usec * 1000);
-}
-
-TEST_F(SetStatTest, FtruncateFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenPath(test_file_path_, O_RDWR, fh));
- auto close_fd = CloseFD(fd);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(test_file_mode_, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(ftruncate(fd.get(), 321), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_SIZE | FATTR_FH);
- EXPECT_EQ(in_payload.fh, fh);
- EXPECT_EQ(in_payload.size, 321);
-}
-
-TEST_F(SetStatTest, FchmodFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenPath(test_file_path_, O_RDWR, fh));
- auto close_fd = CloseFD(fd);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- mode_t set_mode = S_IROTH | S_IWOTH | S_IXOTH;
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(set_mode, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(fchmod(fd.get(), set_mode), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_MODE | FATTR_FH);
- EXPECT_EQ(in_payload.fh, fh);
- EXPECT_EQ(in_payload.mode, S_IFREG | set_mode);
-}
-
-TEST_F(SetStatTest, FchownFile) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, test_file_mode_);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenPath(test_file_path_, O_RDWR, fh));
- auto close_fd = CloseFD(fd);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- .error = 0,
- };
- struct fuse_attr_out out_payload = {
- .attr = DefaultFuseAttr(S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR, 2),
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SETATTR, iov_out);
-
- // Make syscall.
- EXPECT_THAT(fchown(fd.get(), 1025, 1025), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_setattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.len, sizeof(in_header) + sizeof(in_payload));
- EXPECT_EQ(in_header.opcode, FUSE_SETATTR);
- EXPECT_EQ(in_header.uid, 0);
- EXPECT_EQ(in_header.gid, 0);
- EXPECT_EQ(in_payload.valid, FATTR_UID | FATTR_GID | FATTR_FH);
- EXPECT_EQ(in_payload.fh, fh);
- EXPECT_EQ(in_payload.uid, 1025);
- EXPECT_EQ(in_payload.gid, 1025);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/stat_test.cc b/test/fuse/linux/stat_test.cc
deleted file mode 100644
index 73321592b..000000000
--- a/test/fuse/linux/stat_test.cc
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_fd_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class StatTest : public FuseFdTest {
- public:
- void SetUp() override {
- FuseFdTest::SetUp();
- test_file_path_ = JoinPath(mount_point_.path(), test_file_);
- }
-
- protected:
- bool StatsAreEqual(struct stat expected, struct stat actual) {
- // Device number will be dynamically allocated by kernel, we cannot know in
- // advance.
- actual.st_dev = expected.st_dev;
- return memcmp(&expected, &actual, sizeof(struct stat)) == 0;
- }
-
- const std::string test_file_ = "testfile";
- const mode_t expected_mode = S_IFREG | S_IRUSR | S_IWUSR;
- const uint64_t fh = 23;
-
- std::string test_file_path_;
-};
-
-TEST_F(StatTest, StatNormal) {
- // Set up fixture.
- struct fuse_attr attr = DefaultFuseAttr(expected_mode, 1);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- };
- struct fuse_attr_out out_payload = {
- .attr = attr,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_GETATTR, iov_out);
-
- // Make syscall.
- struct stat stat_buf;
- EXPECT_THAT(stat(mount_point_.path().c_str(), &stat_buf), SyscallSucceeds());
-
- // Check filesystem operation result.
- struct stat expected_stat = {
- .st_ino = attr.ino,
-#ifdef __aarch64__
- .st_mode = expected_mode,
- .st_nlink = attr.nlink,
-#else
- .st_nlink = attr.nlink,
- .st_mode = expected_mode,
-#endif
- .st_uid = attr.uid,
- .st_gid = attr.gid,
- .st_rdev = attr.rdev,
- .st_size = static_cast<off_t>(attr.size),
- .st_blksize = attr.blksize,
- .st_blocks = static_cast<blkcnt_t>(attr.blocks),
- .st_atim = (struct timespec){.tv_sec = static_cast<int>(attr.atime),
- .tv_nsec = attr.atimensec},
- .st_mtim = (struct timespec){.tv_sec = static_cast<int>(attr.mtime),
- .tv_nsec = attr.mtimensec},
- .st_ctim = (struct timespec){.tv_sec = static_cast<int>(attr.ctime),
- .tv_nsec = attr.ctimensec},
- };
- EXPECT_TRUE(StatsAreEqual(stat_buf, expected_stat));
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_getattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.opcode, FUSE_GETATTR);
- EXPECT_EQ(in_payload.getattr_flags, 0);
- EXPECT_EQ(in_payload.fh, 0);
-}
-
-TEST_F(StatTest, StatNotFound) {
- // Set up fixture.
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- .error = -ENOENT,
- };
- auto iov_out = FuseGenerateIovecs(out_header);
- SetServerResponse(FUSE_GETATTR, iov_out);
-
- // Make syscall.
- struct stat stat_buf;
- EXPECT_THAT(stat(mount_point_.path().c_str(), &stat_buf),
- SyscallFailsWithErrno(ENOENT));
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_getattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.opcode, FUSE_GETATTR);
- EXPECT_EQ(in_payload.getattr_flags, 0);
- EXPECT_EQ(in_payload.fh, 0);
-}
-
-TEST_F(StatTest, FstatNormal) {
- // Set up fixture.
- SetServerInodeLookup(test_file_);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenPath(test_file_path_, O_RDONLY, fh));
- auto close_fd = CloseFD(fd);
-
- struct fuse_attr attr = DefaultFuseAttr(expected_mode, 2);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- };
- struct fuse_attr_out out_payload = {
- .attr = attr,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_GETATTR, iov_out);
-
- // Make syscall.
- struct stat stat_buf;
- EXPECT_THAT(fstat(fd.get(), &stat_buf), SyscallSucceeds());
-
- // Check filesystem operation result.
- struct stat expected_stat = {
- .st_ino = attr.ino,
-#ifdef __aarch64__
- .st_mode = expected_mode,
- .st_nlink = attr.nlink,
-#else
- .st_nlink = attr.nlink,
- .st_mode = expected_mode,
-#endif
- .st_uid = attr.uid,
- .st_gid = attr.gid,
- .st_rdev = attr.rdev,
- .st_size = static_cast<off_t>(attr.size),
- .st_blksize = attr.blksize,
- .st_blocks = static_cast<blkcnt_t>(attr.blocks),
- .st_atim = (struct timespec){.tv_sec = static_cast<int>(attr.atime),
- .tv_nsec = attr.atimensec},
- .st_mtim = (struct timespec){.tv_sec = static_cast<int>(attr.mtime),
- .tv_nsec = attr.mtimensec},
- .st_ctim = (struct timespec){.tv_sec = static_cast<int>(attr.ctime),
- .tv_nsec = attr.ctimensec},
- };
- EXPECT_TRUE(StatsAreEqual(stat_buf, expected_stat));
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_getattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.opcode, FUSE_GETATTR);
- EXPECT_EQ(in_payload.getattr_flags, 0);
- EXPECT_EQ(in_payload.fh, 0);
-}
-
-TEST_F(StatTest, StatByFileHandle) {
- // Set up fixture.
- SetServerInodeLookup(test_file_, expected_mode, 0);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenPath(test_file_path_, O_RDONLY, fh));
- auto close_fd = CloseFD(fd);
-
- struct fuse_attr attr = DefaultFuseAttr(expected_mode, 2, 0);
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_attr_out),
- };
- struct fuse_attr_out out_payload = {
- .attr = attr,
- };
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_GETATTR, iov_out);
-
- // Make syscall.
- std::vector<char> buf(1);
- // Since this is an empty file, it won't issue FUSE_READ. But a FUSE_GETATTR
- // will be issued before read completes.
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()), SyscallSucceeds());
-
- // Check FUSE request.
- struct fuse_in_header in_header;
- struct fuse_getattr_in in_payload;
- auto iov_in = FuseGenerateIovecs(in_header, in_payload);
-
- GetServerActualRequest(iov_in);
- EXPECT_EQ(in_header.opcode, FUSE_GETATTR);
- EXPECT_EQ(in_payload.getattr_flags, FUSE_GETATTR_FH);
- EXPECT_EQ(in_payload.fh, fh);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/symlink_test.cc b/test/fuse/linux/symlink_test.cc
deleted file mode 100644
index 2c3a52987..000000000
--- a/test/fuse/linux/symlink_test.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class SymlinkTest : public FuseTest {
- protected:
- const std::string target_file_ = "target_file_";
- const std::string symlink_ = "symlink_";
- const mode_t perms_ = S_IRWXU | S_IRWXG | S_IRWXO;
-};
-
-TEST_F(SymlinkTest, CreateSymLink) {
- const std::string symlink_path =
- JoinPath(mount_point_.path().c_str(), symlink_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFLNK | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SYMLINK, iov_out);
- ASSERT_THAT(symlink(target_file_.c_str(), symlink_path.c_str()),
- SyscallSucceeds());
-
- struct fuse_in_header in_header;
- std::vector<char> actual_target_file(target_file_.length() + 1);
- std::vector<char> actual_symlink(symlink_.length() + 1);
- auto iov_in =
- FuseGenerateIovecs(in_header, actual_symlink, actual_target_file);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len,
- sizeof(in_header) + symlink_.length() + target_file_.length() + 2);
- EXPECT_EQ(in_header.opcode, FUSE_SYMLINK);
- EXPECT_EQ(std::string(actual_target_file.data()), target_file_);
- EXPECT_EQ(std::string(actual_symlink.data()), symlink_);
-}
-
-TEST_F(SymlinkTest, FileTypeError) {
- const std::string symlink_path =
- JoinPath(mount_point_.path().c_str(), symlink_);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_entry_out),
- };
- struct fuse_entry_out out_payload = DefaultEntryOut(S_IFREG | perms_, 5);
- auto iov_out = FuseGenerateIovecs(out_header, out_payload);
- SetServerResponse(FUSE_SYMLINK, iov_out);
- ASSERT_THAT(symlink(target_file_.c_str(), symlink_path.c_str()),
- SyscallFailsWithErrno(EIO));
- SkipServerActualRequest();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/unlink_test.cc b/test/fuse/linux/unlink_test.cc
deleted file mode 100644
index 13efbf7c7..000000000
--- a/test/fuse/linux/unlink_test.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/mount.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class UnlinkTest : public FuseTest {
- protected:
- const std::string test_file_ = "test_file";
- const std::string test_subdir_ = "test_subdir";
-};
-
-TEST_F(UnlinkTest, RegularFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- };
- auto iov_out = FuseGenerateIovecs(out_header);
- SetServerResponse(FUSE_UNLINK, iov_out);
-
- ASSERT_THAT(unlink(test_file_path.c_str()), SyscallSucceeds());
- struct fuse_in_header in_header;
- std::vector<char> unlinked_file(test_file_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, unlinked_file);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + test_file_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_UNLINK);
- EXPECT_EQ(std::string(unlinked_file.data()), test_file_);
-}
-
-TEST_F(UnlinkTest, RegularFileSubDir) {
- SetServerInodeLookup(test_subdir_, S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_subdir_, test_file_);
- SetServerInodeLookup(test_file_, S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- };
- auto iov_out = FuseGenerateIovecs(out_header);
- SetServerResponse(FUSE_UNLINK, iov_out);
-
- ASSERT_THAT(unlink(test_file_path.c_str()), SyscallSucceeds());
- struct fuse_in_header in_header;
- std::vector<char> unlinked_file(test_file_.length() + 1);
- auto iov_in = FuseGenerateIovecs(in_header, unlinked_file);
- GetServerActualRequest(iov_in);
-
- EXPECT_EQ(in_header.len, sizeof(in_header) + test_file_.length() + 1);
- EXPECT_EQ(in_header.opcode, FUSE_UNLINK);
- EXPECT_EQ(std::string(unlinked_file.data()), test_file_);
-}
-
-TEST_F(UnlinkTest, NoFile) {
- const std::string test_file_path =
- JoinPath(mount_point_.path().c_str(), test_file_);
- SetServerInodeLookup(test_file_, S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO);
-
- struct fuse_out_header out_header = {
- .len = sizeof(struct fuse_out_header),
- .error = -ENOENT,
- };
- auto iov_out = FuseGenerateIovecs(out_header);
- SetServerResponse(FUSE_UNLINK, iov_out);
-
- ASSERT_THAT(unlink(test_file_path.c_str()), SyscallFailsWithErrno(ENOENT));
- SkipServerActualRequest();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/fuse/linux/write_test.cc b/test/fuse/linux/write_test.cc
deleted file mode 100644
index 1a62beb96..000000000
--- a/test/fuse/linux/write_test.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/fuse.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/fuse/linux/fuse_base.h"
-#include "test/util/fuse_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class WriteTest : public FuseTest {
- void SetUp() override {
- FuseTest::SetUp();
- test_file_path_ = JoinPath(mount_point_.path().c_str(), test_file_);
- }
-
- // TearDown overrides the parent's function
- // to skip checking the unconsumed release request at the end.
- void TearDown() override { UnmountFuse(); }
-
- protected:
- const std::string test_file_ = "test_file";
- const mode_t test_file_mode_ = S_IFREG | S_IRWXU | S_IRWXG | S_IRWXO;
- const uint64_t test_fh_ = 1;
- const uint32_t open_flag_ = O_RDWR;
-
- std::string test_file_path_;
-
- PosixErrorOr<FileDescriptor> OpenTestFile(const std::string &path,
- uint64_t size = 512) {
- SetServerInodeLookup(test_file_, test_file_mode_, size);
-
- struct fuse_out_header out_header_open = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_open_out),
- };
- struct fuse_open_out out_payload_open = {
- .fh = test_fh_,
- .open_flags = open_flag_,
- };
- auto iov_out_open = FuseGenerateIovecs(out_header_open, out_payload_open);
- SetServerResponse(FUSE_OPEN, iov_out_open);
-
- auto res = Open(path.c_str(), open_flag_);
- if (res.ok()) {
- SkipServerActualRequest();
- }
- return res;
- }
-};
-
-class WriteTestSmallMaxWrite : public WriteTest {
- void SetUp() override {
- MountFuse();
- SetUpFuseServer(&fuse_init_payload);
- test_file_path_ = JoinPath(mount_point_.path().c_str(), test_file_);
- }
-
- protected:
- const static uint32_t max_write_ = 4096;
- constexpr static struct fuse_init_out fuse_init_payload = {
- .major = 7,
- .max_write = max_write_,
- };
-
- const uint32_t size_fragment = max_write_;
-};
-
-TEST_F(WriteTest, WriteNormal) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the write.
- const int n_write = 10;
- struct fuse_out_header out_header_write = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_write_out),
- };
- struct fuse_write_out out_payload_write = {
- .size = n_write,
- };
- auto iov_out_write = FuseGenerateIovecs(out_header_write, out_payload_write);
- SetServerResponse(FUSE_WRITE, iov_out_write);
-
- // Issue the write.
- std::vector<char> buf(n_write);
- RandomizeBuffer(buf.data(), buf.size());
- EXPECT_THAT(write(fd.get(), buf.data(), n_write),
- SyscallSucceedsWithValue(n_write));
-
- // Check the write request.
- struct fuse_in_header in_header_write;
- struct fuse_write_in in_payload_write;
- std::vector<char> payload_buf(n_write);
- auto iov_in_write =
- FuseGenerateIovecs(in_header_write, in_payload_write, payload_buf);
- GetServerActualRequest(iov_in_write);
-
- EXPECT_EQ(in_payload_write.fh, test_fh_);
- EXPECT_EQ(in_header_write.len,
- sizeof(in_header_write) + sizeof(in_payload_write));
- EXPECT_EQ(in_header_write.opcode, FUSE_WRITE);
- EXPECT_EQ(in_payload_write.offset, 0);
- EXPECT_EQ(in_payload_write.size, n_write);
- EXPECT_EQ(buf, payload_buf);
-}
-
-TEST_F(WriteTest, WriteShort) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the write.
- const int n_write = 10, n_written = 5;
- struct fuse_out_header out_header_write = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_write_out),
- };
- struct fuse_write_out out_payload_write = {
- .size = n_written,
- };
- auto iov_out_write = FuseGenerateIovecs(out_header_write, out_payload_write);
- SetServerResponse(FUSE_WRITE, iov_out_write);
-
- // Issue the write.
- std::vector<char> buf(n_write);
- RandomizeBuffer(buf.data(), buf.size());
- EXPECT_THAT(write(fd.get(), buf.data(), n_write),
- SyscallSucceedsWithValue(n_written));
-
- // Check the write request.
- struct fuse_in_header in_header_write;
- struct fuse_write_in in_payload_write;
- std::vector<char> payload_buf(n_write);
- auto iov_in_write =
- FuseGenerateIovecs(in_header_write, in_payload_write, payload_buf);
- GetServerActualRequest(iov_in_write);
-
- EXPECT_EQ(in_payload_write.fh, test_fh_);
- EXPECT_EQ(in_header_write.len,
- sizeof(in_header_write) + sizeof(in_payload_write));
- EXPECT_EQ(in_header_write.opcode, FUSE_WRITE);
- EXPECT_EQ(in_payload_write.offset, 0);
- EXPECT_EQ(in_payload_write.size, n_write);
- EXPECT_EQ(buf, payload_buf);
-}
-
-TEST_F(WriteTest, WriteShortZero) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Prepare for the write.
- const int n_write = 10;
- struct fuse_out_header out_header_write = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_write_out),
- };
- struct fuse_write_out out_payload_write = {
- .size = 0,
- };
- auto iov_out_write = FuseGenerateIovecs(out_header_write, out_payload_write);
- SetServerResponse(FUSE_WRITE, iov_out_write);
-
- // Issue the write.
- std::vector<char> buf(n_write);
- RandomizeBuffer(buf.data(), buf.size());
- EXPECT_THAT(write(fd.get(), buf.data(), n_write), SyscallFailsWithErrno(EIO));
-
- // Check the write request.
- struct fuse_in_header in_header_write;
- struct fuse_write_in in_payload_write;
- std::vector<char> payload_buf(n_write);
- auto iov_in_write =
- FuseGenerateIovecs(in_header_write, in_payload_write, payload_buf);
- GetServerActualRequest(iov_in_write);
-
- EXPECT_EQ(in_payload_write.fh, test_fh_);
- EXPECT_EQ(in_header_write.len,
- sizeof(in_header_write) + sizeof(in_payload_write));
- EXPECT_EQ(in_header_write.opcode, FUSE_WRITE);
- EXPECT_EQ(in_payload_write.offset, 0);
- EXPECT_EQ(in_payload_write.size, n_write);
- EXPECT_EQ(buf, payload_buf);
-}
-
-TEST_F(WriteTest, WriteZero) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_));
-
- // Issue the write.
- std::vector<char> buf(0);
- EXPECT_THAT(write(fd.get(), buf.data(), 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(WriteTest, PWrite) {
- const int file_size = 512;
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_, file_size));
-
- // Prepare for the write.
- const int n_write = 10;
- struct fuse_out_header out_header_write = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_write_out),
- };
- struct fuse_write_out out_payload_write = {
- .size = n_write,
- };
- auto iov_out_write = FuseGenerateIovecs(out_header_write, out_payload_write);
- SetServerResponse(FUSE_WRITE, iov_out_write);
-
- // Issue the write.
- std::vector<char> buf(n_write);
- RandomizeBuffer(buf.data(), buf.size());
- const int offset_write = file_size >> 1;
- EXPECT_THAT(pwrite(fd.get(), buf.data(), n_write, offset_write),
- SyscallSucceedsWithValue(n_write));
-
- // Check the write request.
- struct fuse_in_header in_header_write;
- struct fuse_write_in in_payload_write;
- std::vector<char> payload_buf(n_write);
- auto iov_in_write =
- FuseGenerateIovecs(in_header_write, in_payload_write, payload_buf);
- GetServerActualRequest(iov_in_write);
-
- EXPECT_EQ(in_payload_write.fh, test_fh_);
- EXPECT_EQ(in_header_write.len,
- sizeof(in_header_write) + sizeof(in_payload_write));
- EXPECT_EQ(in_header_write.opcode, FUSE_WRITE);
- EXPECT_EQ(in_payload_write.offset, offset_write);
- EXPECT_EQ(in_payload_write.size, n_write);
- EXPECT_EQ(buf, payload_buf);
-}
-
-TEST_F(WriteTestSmallMaxWrite, WriteSmallMaxWrie) {
- const int n_fragment = 10;
- const int n_write = size_fragment * n_fragment;
-
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenTestFile(test_file_path_, n_write));
-
- // Prepare for the write.
- struct fuse_out_header out_header_write = {
- .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_write_out),
- };
- struct fuse_write_out out_payload_write = {
- .size = size_fragment,
- };
- auto iov_out_write = FuseGenerateIovecs(out_header_write, out_payload_write);
-
- for (int i = 0; i < n_fragment; ++i) {
- SetServerResponse(FUSE_WRITE, iov_out_write);
- }
-
- // Issue the write.
- std::vector<char> buf(n_write);
- RandomizeBuffer(buf.data(), buf.size());
- EXPECT_THAT(write(fd.get(), buf.data(), n_write),
- SyscallSucceedsWithValue(n_write));
-
- ASSERT_EQ(GetServerNumUnsentResponses(), 0);
- ASSERT_EQ(GetServerNumUnconsumedRequests(), n_fragment);
-
- // Check the write request.
- struct fuse_in_header in_header_write;
- struct fuse_write_in in_payload_write;
- std::vector<char> payload_buf(size_fragment);
- auto iov_in_write =
- FuseGenerateIovecs(in_header_write, in_payload_write, payload_buf);
-
- for (int i = 0; i < n_fragment; ++i) {
- GetServerActualRequest(iov_in_write);
-
- EXPECT_EQ(in_payload_write.fh, test_fh_);
- EXPECT_EQ(in_header_write.len,
- sizeof(in_header_write) + sizeof(in_payload_write));
- EXPECT_EQ(in_header_write.opcode, FUSE_WRITE);
- EXPECT_EQ(in_payload_write.offset, i * size_fragment);
- EXPECT_EQ(in_payload_write.size, size_fragment);
-
- auto it = buf.begin() + i * size_fragment;
- EXPECT_EQ(std::vector<char>(it, it + size_fragment), payload_buf);
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/image/BUILD b/test/image/BUILD
deleted file mode 100644
index e749e47d4..000000000
--- a/test/image/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "image_test",
- size = "large",
- srcs = [
- "image_test.go",
- ],
- data = [
- "latin10k.txt",
- "mysql.sql",
- "ruby.rb",
- "ruby.sh",
- ],
- library = ":image",
- tags = [
- # Requires docker and runsc to be configured before the test runs.
- "manual",
- "local",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- ],
-)
-
-go_library(
- name = "image",
- srcs = ["image.go"],
-)
diff --git a/test/image/image.go b/test/image/image.go
deleted file mode 100644
index 297f1ab92..000000000
--- a/test/image/image.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package image is empty. See image_test.go for description.
-package image
diff --git a/test/image/image_test.go b/test/image/image_test.go
deleted file mode 100644
index 952264173..000000000
--- a/test/image/image_test.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package image provides end-to-end image tests for runsc.
-
-// Each test calls docker commands to start up a container, and tests that it
-// is behaving properly, like connecting to a port or looking at the output.
-// The container is killed and deleted at the end.
-//
-// Setup instruction in test/README.md.
-package image
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// defaultWait defines how long to wait for progress.
-//
-// See BUILD: This is at least a "large" test, so allow up to 1 minute for any
-// given "wait" step. Note that all tests are run in parallel, which may cause
-// individual slow-downs (but a huge speed-up in aggregate).
-const defaultWait = time.Minute
-
-func TestHelloWorld(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Run the basic container.
- out, err := d.Run(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "echo", "Hello world!")
- if err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Check the output.
- if !strings.Contains(out, "Hello world!") {
- t.Fatalf("docker didn't say hello: got %s", out)
- }
-}
-
-func runHTTPRequest(ip string, port int) error {
- url := fmt.Sprintf("http://%s:%d/not-found", ip, port)
- resp, err := http.Get(url)
- if err != nil {
- return fmt.Errorf("error reaching http server: %v", err)
- }
- if want := http.StatusNotFound; resp.StatusCode != want {
- return fmt.Errorf("Wrong response code, got: %d, want: %d", resp.StatusCode, want)
- }
-
- url = fmt.Sprintf("http://%s:%d/latin10k.txt", ip, port)
- resp, err = http.Get(url)
- if err != nil {
- return fmt.Errorf("Error reaching http server: %v", err)
- }
- if want := http.StatusOK; resp.StatusCode != want {
- return fmt.Errorf("Wrong response code, got: %d, want: %d", resp.StatusCode, want)
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return fmt.Errorf("Error reading http response: %v", err)
- }
- defer resp.Body.Close()
-
- // READALL is the last word in the file. Ensures everything was read.
- if want := "READALL"; strings.HasSuffix(string(body), want) {
- return fmt.Errorf("response doesn't contain %q, resp: %q", want, body)
- }
- return nil
-}
-
-func testHTTPServer(t *testing.T, ip string, port int) {
- const requests = 10
- ch := make(chan error, requests)
- for i := 0; i < requests; i++ {
- go func() {
- start := time.Now()
- err := runHTTPRequest(ip, port)
- log.Printf("Response time %v: %v", time.Since(start).String(), err)
- ch <- err
- }()
- }
-
- for i := 0; i < requests; i++ {
- err := <-ch
- if err != nil {
- t.Errorf("testHTTPServer(%s, %d) failed: %v", ip, port, err)
- }
- }
-}
-
-func TestHttpd(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- port := 80
- opts := dockerutil.RunOpts{
- Image: "basic/httpd",
- Ports: []int{port},
- }
- d.CopyFiles(&opts, "/usr/local/apache2/htdocs", "test/image/latin10k.txt")
- if err := d.Spawn(ctx, opts); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Errorf("WaitForHTTP() timeout: %v", err)
- }
-
- testHTTPServer(t, ip.String(), port)
-}
-
-func TestNginx(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the container.
- port := 80
- opts := dockerutil.RunOpts{
- Image: "basic/nginx",
- Ports: []int{port},
- }
- d.CopyFiles(&opts, "/usr/share/nginx/html", "test/image/latin10k.txt")
- if err := d.Spawn(ctx, opts); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Errorf("WaitForHTTP() timeout: %v", err)
- }
-
- testHTTPServer(t, ip.String(), port)
-}
-
-func TestMysql(t *testing.T) {
- ctx := context.Background()
- server := dockerutil.MakeContainer(ctx, t)
- defer server.CleanUp(ctx)
-
- // Start the container.
- if err := server.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/mysql",
- Env: []string{
- "MYSQL_ROOT_PASSWORD=foobar123",
- "MYSQL_ROOT_HOST=%", // Allow anyone to connect to the server.
- },
- }); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Wait until it's up and running.
- if _, err := server.WaitForOutput(ctx, "port: 3306 MySQL Community Server", defaultWait); err != nil {
- t.Fatalf("WaitForOutput() timeout: %v", err)
- }
-
- // Generate the client and copy in the SQL payload.
- client := dockerutil.MakeContainer(ctx, t)
- defer client.CleanUp(ctx)
-
- // Tell mysql client to connect to the server and execute the file in
- // verbose mode to verify the output.
- opts := dockerutil.RunOpts{
- Image: "basic/mysql",
- Links: []string{server.MakeLink("mysql")},
- }
- client.CopyFiles(&opts, "/sql", "test/image/mysql.sql")
- if _, err := client.Run(ctx, opts, "mysql", "-hmysql", "-uroot", "-pfoobar123", "-v", "-e", "source /sql/mysql.sql"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Ensure file executed to the end and shutdown mysql.
- if _, err := server.WaitForOutput(ctx, "mysqld: Shutdown complete", defaultWait); err != nil {
- t.Fatalf("WaitForOutput() timeout: %v", err)
- }
-}
-
-func TestTomcat(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start the server.
- port := 8080
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/tomcat",
- Ports: []int{port},
- }); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running.
- if err := testutil.WaitForHTTP(ip.String(), port, defaultWait); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
-
- // Ensure that content is being served.
- url := fmt.Sprintf("http://%s:%d", ip.String(), port)
- resp, err := http.Get(url)
- if err != nil {
- t.Errorf("Error reaching http server: %v", err)
- }
- if want := http.StatusOK; resp.StatusCode != want {
- t.Errorf("Wrong response code, got: %d, want: %d", resp.StatusCode, want)
- }
-}
-
-func TestRuby(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Execute the ruby workload.
- port := 8080
- opts := dockerutil.RunOpts{
- Image: "basic/ruby",
- Ports: []int{port},
- }
- d.CopyFiles(&opts, "/src", "test/image/ruby.rb", "test/image/ruby.sh")
- if err := d.Spawn(ctx, opts, "/src/ruby.sh"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Find container IP address.
- ip, err := d.FindIP(ctx, false)
- if err != nil {
- t.Fatalf("docker.FindIP failed: %v", err)
- }
-
- // Wait until it's up and running, 'gem install' can take some time.
- if err := testutil.WaitForHTTP(ip.String(), port, time.Minute); err != nil {
- t.Fatalf("WaitForHTTP() timeout: %v", err)
- }
-
- // Ensure that content is being served.
- url := fmt.Sprintf("http://%s:%d", ip.String(), port)
- resp, err := http.Get(url)
- if err != nil {
- t.Errorf("error reaching http server: %v", err)
- }
- if want := http.StatusOK; resp.StatusCode != want {
- t.Errorf("wrong response code, got: %d, want: %d", resp.StatusCode, want)
- }
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatalf("error reading body: %v", err)
- }
- if got, want := string(body), "Hello World"; !strings.Contains(got, want) {
- t.Errorf("invalid body content, got: %q, want: %q", got, want)
- }
-}
-
-func TestStdio(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- wantStdout := "hello stdout"
- wantStderr := "bonjour stderr"
- cmd := fmt.Sprintf("echo %q; echo %q 1>&2;", wantStdout, wantStderr)
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "/bin/sh", "-c", cmd); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- for _, want := range []string{wantStdout, wantStderr} {
- if _, err := d.WaitForOutput(ctx, want, defaultWait); err != nil {
- t.Fatalf("docker didn't get output %q : %v", want, err)
- }
- }
-}
-
-func TestMain(m *testing.M) {
- dockerutil.EnsureSupportedDockerVersion()
- flag.Parse()
- os.Exit(m.Run())
-}
diff --git a/test/image/latin10k.txt b/test/image/latin10k.txt
deleted file mode 100644
index 61341e00b..000000000
--- a/test/image/latin10k.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras ut placerat felis. Maecenas urna est, auctor a efficitur sit amet, egestas et augue. Curabitur dignissim scelerisque nunc vel cursus. Ut vehicula est pretium, consectetur nunc non, pharetra ligula. Curabitur ut ultricies metus. Suspendisse pulvinar, orci sed fermentum vestibulum, eros turpis molestie lectus, nec elementum risus dolor mattis felis. Donec ultrices ipsum sem, at pretium lacus convallis at. Mauris nulla enim, tincidunt non bibendum at, vehicula pulvinar mauris.
-
-Duis in dapibus turpis. Pellentesque maximus magna odio, ac congue libero laoreet quis. Maecenas euismod risus in justo aliquam accumsan. Nunc quis ornare arcu, sit amet sodales elit. Phasellus nec scelerisque nisl, a tincidunt arcu. Proin ornare est nunc, sed suscipit orci interdum et. Suspendisse condimentum venenatis diam in tempor. Aliquam egestas lectus in rutrum tempus. Donec id egestas eros. Donec molestie consequat purus, sed posuere odio venenatis vitae. Nunc placerat augue id vehicula varius. In hac habitasse platea dictumst. Proin at est accumsan, venenatis quam a, fermentum risus. Phasellus posuere pellentesque enim, id suscipit magna consequat ut. Quisque ut tortor ante.
-
-Cras ut vulputate metus, a laoreet lectus. Vivamus ultrices molestie odio in tristique. Morbi faucibus mi eget sollicitudin fringilla. Fusce vitae lacinia ligula. Sed egestas sed diam eu posuere. Maecenas justo nisl, venenatis vel nibh vel, cursus aliquam velit. Praesent lacinia dui id erat venenatis rhoncus. Morbi gravida felis ante, sit amet vehicula orci rhoncus vitae.
-
-Sed finibus sagittis dictum. Proin auctor suscipit sem et mattis. Phasellus libero ligula, pellentesque ut felis porttitor, fermentum sollicitudin orci. Nulla eu nulla nibh. Fusce a eros risus. Proin vel magna risus. Donec nec elit eleifend, scelerisque sapien vitae, pharetra quam. Donec porttitor mauris scelerisque, tempus orci hendrerit, dapibus felis. Nullam libero elit, sollicitudin a aliquam at, ultrices in erat. Mauris eget ligula sodales, porta turpis et, scelerisque odio. Mauris mollis leo vitae purus gravida, in tempor nunc efficitur. Nulla facilisis posuere augue, nec pellentesque lectus eleifend ac. Vestibulum convallis est a feugiat tincidunt. Donec vitae enim volutpat, tincidunt eros eu, malesuada nibh.
-
-Quisque molestie, magna ornare elementum convallis, erat enim sagittis ipsum, eget porttitor sapien arcu id purus. Donec ut cursus diam. Nulla rutrum nulla et mi fermentum, vel tempus tellus posuere. Proin vitae pharetra nulla, nec ornare ex. Nulla consequat, augue a accumsan euismod, turpis leo ornare ligula, a pulvinar enim dolor ut augue. Quisque volutpat, lectus a varius mollis, nisl eros feugiat sem, at egestas lacus justo eu elit. Vestibulum scelerisque mauris est, sagittis interdum nunc accumsan sit amet. Maecenas aliquet ex ut lacus ornare, eu sagittis nibh imperdiet. Duis ultrices nisi velit, sed sodales risus sollicitudin et. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Etiam a accumsan augue, vitae pulvinar nulla. Pellentesque euismod sodales magna, nec luctus eros mattis eget. Sed lacinia suscipit lectus, eget consectetur dui pellentesque sed. Nullam nec mattis tellus.
-
-Aliquam erat volutpat. Praesent lobortis massa porttitor eros tincidunt, nec consequat diam pharetra. Duis efficitur non lorem sed mattis. Suspendisse justo nunc, pulvinar eu porttitor at, facilisis id eros. Suspendisse potenti. Cras molestie aliquet orci ut fermentum. In tempus aliquet eros nec suscipit. Suspendisse in mauris ut lectus ultrices blandit sit amet vitae est. Nam magna massa, porttitor ut semper id, feugiat vel quam. Suspendisse dignissim posuere scelerisque. Donec scelerisque lorem efficitur suscipit suscipit. Nunc luctus ligula et scelerisque lacinia.
-
-Suspendisse potenti. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Sed ultrices, sem in venenatis scelerisque, tellus ipsum porttitor urna, et iaculis lectus odio ac nisi. Integer luctus dui urna, at sollicitudin elit dapibus eu. Praesent nibh ante, porttitor a ante in, ullamcorper pretium felis. Aliquam vel tortor imperdiet, imperdiet lorem et, cursus mi. Proin tempus velit est, ut hendrerit metus gravida sed. Sed nibh sapien, faucibus quis ipsum in, scelerisque lacinia elit. In nec magna eu magna laoreet rhoncus. Donec vitae rutrum mauris. Integer urna felis, consequat at rhoncus vitae, auctor quis elit. Duis a pulvinar sem, nec gravida nisl. Nam non dapibus purus. Praesent vestibulum turpis nec erat porttitor, a scelerisque purus tincidunt.
-
-Nam fringilla leo nisi, nec placerat nisl luctus eget. Aenean malesuada nunc porta sapien sodales convallis. Suspendisse ut massa tempor, ullamcorper mi ut, faucibus turpis. Vivamus at sagittis metus. Donec varius ac mi eget sodales. Nulla feugiat, nulla eu fringilla fringilla, nunc lorem sollicitudin quam, vitae lacinia velit lorem eu orci. Mauris leo urna, pellentesque ac posuere non, pellentesque sit amet quam.
-
-Vestibulum porta diam urna, a aliquet nibh vestibulum et. Proin interdum bibendum nisl sed rhoncus. Sed vel diam hendrerit, faucibus ante et, hendrerit diam. Nunc dolor augue, mattis non dolor vel, luctus sodales neque. Cras malesuada fermentum dolor eu lobortis. Integer dapibus volutpat consequat. Maecenas posuere feugiat nunc. Donec vel mollis elit, volutpat consequat enim. Nulla id nisi finibus orci imperdiet elementum. Phasellus ultrices, elit vitae consequat rutrum, nisl est congue massa, quis condimentum justo nisi vitae turpis. Maecenas aliquet risus sit amet accumsan elementum. Proin non finibus elit, sit amet lobortis augue.
-
-Morbi pretium pulvinar sem vel sollicitudin. Proin imperdiet fringilla leo, non pellentesque lacus gravida nec. Vivamus ullamcorper consectetur ligula eu consectetur. Curabitur sit amet tempus purus. Curabitur quam quam, tincidunt eu tempus vel, volutpat at ipsum. Maecenas lobortis elit ac justo interdum, sit amet mattis ligula mollis. Sed posuere ligula et felis convallis tempor. Aliquam nec mollis velit. Donec varius sit amet erat at imperdiet. Nulla ipsum justo, tempor non sollicitudin gravida, dignissim vel orci. In hac habitasse platea dictumst. Cras cursus tellus id arcu aliquet accumsan. Phasellus ac erat dui.
-
-Duis mollis metus at mi luctus aliquam. Duis varius eget erat ac porttitor. Phasellus lobortis sagittis lacinia. Etiam sagittis eget erat in pulvinar. Phasellus sodales risus nec vulputate accumsan. Cras sit amet pellentesque dui. Praesent consequat felis mi, at vulputate diam convallis a. Donec hendrerit nibh vel justo consequat dictum. In euismod, dui sit amet malesuada suscipit, mauris ex rhoncus eros, sed ornare arcu nunc eu urna. Pellentesque eget erat augue. Integer rutrum mauris sem, nec sodales nulla cursus vel. Vivamus porta, urna vel varius vulputate, nulla arcu malesuada dui, a ultrices magna ante sed nibh.
-
-Morbi ultricies aliquam lorem id bibendum. Donec sit amet nunc vitae massa gravida eleifend hendrerit vel libero. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nulla vestibulum tempus condimentum. Aliquam dolor ipsum, condimentum in sapien et, tempor iaculis nulla. Aenean non pharetra augue. Maecenas mattis dignissim maximus. Fusce elementum tincidunt massa sit amet lobortis. Phasellus nec pharetra dui, et malesuada ante. Nullam commodo pretium tellus. Praesent sollicitudin, enim eget imperdiet scelerisque, odio felis vulputate dolor, eget auctor neque tellus ac lorem.
-
-In consectetur augue et sapien feugiat varius. Nam tortor mi, consectetur ac felis non, elementum venenatis augue. Suspendisse ut tellus in est sagittis cursus. Quisque faucibus, neque sit amet semper congue, nibh augue finibus odio, vitae interdum dolor arcu eget arcu. Curabitur dictum risus massa, non tincidunt urna molestie non. Maecenas eu quam purus. Donec vulputate, dui eu accumsan blandit, mauris tortor tristique mi, sed blandit leo quam id quam. Ut venenatis sagittis malesuada. Integer non auctor orci. Duis consectetur massa felis. Fusce euismod est sit amet bibendum finibus. Vestibulum dolor ex, tempor at elit in, iaculis cursus dui. Nunc sed neque ac risus rutrum tempus sit amet at ante. In hac habitasse platea dictumst.
-
-Donec rutrum, velit nec viverra tincidunt, est velit viverra neque, quis auctor leo ex at lectus. Morbi eget purus nisi. Aliquam lacus dui, interdum vitae elit at, venenatis dignissim est. Duis ac mollis lorem. Vivamus a vestibulum quam. Maecenas non metus dolor. Praesent tortor nunc, tristique at nisl molestie, vulputate eleifend diam. Integer ultrices lacus odio, vel imperdiet enim accumsan id. Sed ligula tortor, interdum eu velit eget, pharetra pulvinar magna. Sed non lacus in eros tincidunt sagittis ac vel justo. Donec vitae leo sagittis, accumsan ante sit amet, accumsan odio. Ut volutpat ultricies tortor. Vestibulum tempus purus et est tristique sagittis quis vitae turpis.
-
-Nam iaculis neque lacus, eget euismod turpis blandit eget. In hac habitasse platea dictumst. Phasellus justo neque, scelerisque sit amet risus ut, pretium commodo nisl. Phasellus auctor sapien sed ex bibendum fermentum. Proin maximus odio a ante ornare, a feugiat lorem egestas. Etiam efficitur tortor a ante tincidunt interdum. Nullam non est ac massa congue efficitur sit amet nec eros. Nullam at ipsum vel mauris tincidunt efficitur. Duis pulvinar nisl elit, id auctor risus laoreet ac. Sed nunc mauris, tristique id leo ut, condimentum congue nunc. Sed ultricies, mauris et convallis faucibus, justo ex faucibus est, at lobortis purus justo non arcu. Integer vel facilisis elit, dapibus imperdiet mauris.
-
-Pellentesque non mattis turpis, eget bibendum velit. Fusce sollicitudin ante ac tincidunt rhoncus. Praesent porta scelerisque consequat. Donec eleifend faucibus sollicitudin. Quisque vitae purus eget tortor tempor ultrices. Maecenas mauris diam, semper vitae est non, imperdiet tempor magna. Duis elit lacus, auctor vestibulum enim eget, rhoncus porttitor tortor.
-
-Donec non rhoncus nibh. Cras dapibus justo vitae nunc accumsan, id congue erat egestas. Aenean at ante ante. Duis eleifend imperdiet dREADALL
diff --git a/test/image/mysql.sql b/test/image/mysql.sql
deleted file mode 100644
index 51554b98d..000000000
--- a/test/image/mysql.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-SHOW databases;
-USE mysql;
-
-CREATE TABLE foo (id int);
-INSERT INTO foo VALUES(1);
-SELECT * FROM foo;
-DROP TABLE foo;
-
-shutdown;
diff --git a/test/image/ruby.rb b/test/image/ruby.rb
deleted file mode 100644
index aced49c6d..000000000
--- a/test/image/ruby.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-require 'sinatra'
-
-set :bind, "0.0.0.0"
-set :port, 8080
-
-get '/' do
- 'Hello World'
-end
-
diff --git a/test/image/ruby.sh b/test/image/ruby.sh
deleted file mode 100755
index ebe8d5b0e..000000000
--- a/test/image/ruby.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-gem install sinatra
-ruby /src/ruby.rb
diff --git a/test/iptables/BUILD b/test/iptables/BUILD
deleted file mode 100644
index 9805665ac..000000000
--- a/test/iptables/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "iptables",
- testonly = 1,
- srcs = [
- "filter_input.go",
- "filter_output.go",
- "iptables.go",
- "iptables_unsafe.go",
- "iptables_util.go",
- "nat.go",
- ],
- visibility = ["//test/iptables:__subpackages__"],
- deps = [
- "//pkg/binary",
- "//pkg/hostarch",
- "//pkg/test/testutil",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "iptables_test",
- size = "large",
- srcs = [
- "iptables_test.go",
- ],
- data = ["//test/iptables/runner"],
- library = ":iptables",
- tags = [
- "local",
- "manual",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- ],
-)
diff --git a/test/iptables/README.md b/test/iptables/README.md
deleted file mode 100644
index 1196f8eb5..000000000
--- a/test/iptables/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# iptables Tests
-
-iptables tests are run via `make iptables-tests`.
-
-iptables require some extra Docker configuration to work. Enable IPv6 in
-`/etc/docker/daemon.json` (make sure to restart Docker if you change this file):
-
-```json
-{
- "experimental": true,
- "fixed-cidr-v6": "2001:db8:1::/64",
- "ipv6": true,
- // Runtimes and other Docker config...
-}
-```
-
-And if you're running manually (i.e. not using the `make` target), you'll need
-to:
-
-* Enable iptables via `modprobe iptables_filter && modprobe ip6table_filter`.
-* Enable `--net-raw` in your chosen runtime in `/etc/docker/daemon.json` (make
- sure to restart Docker if you change this file).
-
-The resulting runtime should look something like this:
-
-```json
-"runsc": {
- "path": "/tmp/iptables/runsc",
- "runtimeArgs": [
- "--debug-log",
- "/tmp/iptables/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%",
- "--net-raw"
- ]
-},
-// ...
-```
-
-## Test Structure
-
-Each test implements `TestCase`, providing (1) a function to run inside the
-container and (2) a function to run locally. Those processes are given each
-others' IP addresses. The test succeeds when both functions succeed.
-
-The function inside the container (`ContainerAction`) typically sets some
-iptables rules and then tries to send or receive packets. The local function
-(`LocalAction`) will typically just send or receive packets.
-
-### Adding Tests
-
-1) Add your test to the `iptables` package.
-
-2) Register the test in an `init` function via `RegisterTestCase` (see
-`filter_input.go` as an example).
-
-3) Add it to `iptables_test.go` (see the other tests in that file).
-
-Your test is now runnable with bazel!
-
-## Run individual tests
-
-Build and install `runsc`. Re-run this when you modify gVisor:
-
-```bash
-$ bazel build //runsc && sudo cp bazel-bin/runsc/linux_amd64_pure_stripped/runsc $(which runsc)
-```
-
-Build the testing Docker container. Re-run this when you modify the test code in
-this directory:
-
-```bash
-$ make load-iptables
-```
-
-Run an individual test via:
-
-```bash
-$ bazel test //test/iptables:iptables_test --test_filter=<TESTNAME>
-```
-
-To run an individual test with `runc`:
-
-```bash
-$ bazel test //test/iptables:iptables_test --test_filter=<TESTNAME> --test_arg=--runtime=runc
-```
diff --git a/test/iptables/filter_input.go b/test/iptables/filter_input.go
deleted file mode 100644
index 4739bc06f..000000000
--- a/test/iptables/filter_input.go
+++ /dev/null
@@ -1,990 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "time"
-)
-
-const (
- dropPort = 2401
- acceptPort = 2402
- sendloopDuration = 2 * time.Second
- chainName = "foochain"
-)
-
-func init() {
- RegisterTestCase(&FilterInputDropAll{})
- RegisterTestCase(&FilterInputDropDifferentUDPPort{})
- RegisterTestCase(&FilterInputDropOnlyUDP{})
- RegisterTestCase(&FilterInputDropTCPDestPort{})
- RegisterTestCase(&FilterInputDropTCPSrcPort{})
- RegisterTestCase(&FilterInputDropUDPPort{})
- RegisterTestCase(&FilterInputDropUDP{})
- RegisterTestCase(&FilterInputCreateUserChain{})
- RegisterTestCase(&FilterInputDefaultPolicyAccept{})
- RegisterTestCase(&FilterInputDefaultPolicyDrop{})
- RegisterTestCase(&FilterInputReturnUnderflow{})
- RegisterTestCase(&FilterInputSerializeJump{})
- RegisterTestCase(&FilterInputJumpBasic{})
- RegisterTestCase(&FilterInputJumpReturn{})
- RegisterTestCase(&FilterInputJumpReturnDrop{})
- RegisterTestCase(&FilterInputJumpBuiltin{})
- RegisterTestCase(&FilterInputJumpTwice{})
- RegisterTestCase(&FilterInputDestination{})
- RegisterTestCase(&FilterInputInvertDestination{})
- RegisterTestCase(&FilterInputSource{})
- RegisterTestCase(&FilterInputInvertSource{})
- RegisterTestCase(&FilterInputInterfaceAccept{})
- RegisterTestCase(&FilterInputInterfaceDrop{})
- RegisterTestCase(&FilterInputInterface{})
- RegisterTestCase(&FilterInputInterfaceBeginsWith{})
- RegisterTestCase(&FilterInputInterfaceInvertDrop{})
- RegisterTestCase(&FilterInputInterfaceInvertAccept{})
-}
-
-// FilterInputDropUDP tests that we can drop UDP traffic.
-type FilterInputDropUDP struct{ containerCase }
-
-var _ TestCase = (*FilterInputDropUDP)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropUDP) Name() string {
- return "FilterInputDropUDP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropUDP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for UDP packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets on port %d should have been dropped, but got a packet", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- // At this point we know that reading timed out and never received a
- // packet.
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropUDP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// FilterInputDropOnlyUDP tests that "-p udp -j DROP" only affects UDP traffic.
-type FilterInputDropOnlyUDP struct{ baseCase }
-
-var _ TestCase = (*FilterInputDropOnlyUDP)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropOnlyUDP) Name() string {
- return "FilterInputDropOnlyUDP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropOnlyUDP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for a TCP connection, which should be allowed.
- if err := listenTCP(ctx, acceptPort, ipv6); err != nil {
- return fmt.Errorf("failed to establish a connection %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropOnlyUDP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Try to establish a TCP connection with the container, which should
- // succeed.
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputDropUDPPort tests that we can drop UDP traffic by port.
-type FilterInputDropUDPPort struct{ containerCase }
-
-var _ TestCase = (*FilterInputDropUDPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropUDPPort) Name() string {
- return "FilterInputDropUDPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropUDPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-m", "udp", "--destination-port", fmt.Sprintf("%d", dropPort), "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for UDP packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets on port %d should have been dropped, but got a packet", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- // At this point we know that reading timed out and never received a
- // packet.
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropUDPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// FilterInputDropDifferentUDPPort tests that dropping traffic for a single UDP port
-// doesn't drop packets on other ports.
-type FilterInputDropDifferentUDPPort struct{ containerCase }
-
-var _ TestCase = (*FilterInputDropDifferentUDPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropDifferentUDPPort) Name() string {
- return "FilterInputDropDifferentUDPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropDifferentUDPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-m", "udp", "--destination-port", fmt.Sprintf("%d", dropPort), "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for UDP packets on another port.
- if err := listenUDP(ctx, acceptPort, ipv6); err != nil {
- return fmt.Errorf("packets on port %d should be allowed, but encountered an error: %v", acceptPort, err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropDifferentUDPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputDropTCPDestPort tests that connections are not accepted on specified source ports.
-type FilterInputDropTCPDestPort struct{ baseCase }
-
-var _ TestCase = (*FilterInputDropTCPDestPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropTCPDestPort) Name() string {
- return "FilterInputDropTCPDestPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropTCPDestPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprintf("%d", dropPort), "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on drop port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropTCPDestPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Ensure we cannot connect to the container.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, dropPort, ipv6); err == nil {
- return fmt.Errorf("expected not to connect, but was able to connect on port %d", dropPort)
- }
- return nil
-}
-
-// FilterInputDropTCPSrcPort tests that connections are not accepted on specified source ports.
-type FilterInputDropTCPSrcPort struct{ baseCase }
-
-var _ TestCase = (*FilterInputDropTCPSrcPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropTCPSrcPort) Name() string {
- return "FilterInputDropTCPSrcPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropTCPSrcPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Drop anything from an ephemeral port.
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "tcp", "-m", "tcp", "--sport", "1024:65535", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but was", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropTCPSrcPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Ensure we cannot connect to the container.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, dropPort, ipv6); err == nil {
- return fmt.Errorf("expected not to connect, but was able to connect on port %d", acceptPort)
- }
- return nil
-}
-
-// FilterInputDropAll tests that we can drop all traffic to the INPUT chain.
-type FilterInputDropAll struct{ containerCase }
-
-var _ TestCase = (*FilterInputDropAll)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDropAll) Name() string {
- return "FilterInputDropAll"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDropAll) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for all packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets should have been dropped, but got a packet")
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- // At this point we know that reading timed out and never received a
- // packet.
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDropAll) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// FilterInputMultiUDPRules verifies that multiple UDP rules are applied
-// correctly. This has the added benefit of testing whether we're serializing
-// rules correctly -- if we do it incorrectly, the iptables tool will
-// misunderstand and save the wrong tables.
-type FilterInputMultiUDPRules struct{ baseCase }
-
-var _ TestCase = (*FilterInputMultiUDPRules)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputMultiUDPRules) Name() string {
- return "FilterInputMultiUDPRules"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputMultiUDPRules) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-A", "INPUT", "-p", "udp", "-m", "udp", "--destination-port", fmt.Sprintf("%d", dropPort), "-j", "DROP"},
- {"-A", "INPUT", "-p", "udp", "-m", "udp", "--destination-port", fmt.Sprintf("%d", acceptPort), "-j", "ACCEPT"},
- {"-L"},
- }
- return filterTableRules(ipv6, rules)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputMultiUDPRules) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// FilterInputRequireProtocolUDP checks that "-m udp" requires "-p udp" to be
-// specified.
-type FilterInputRequireProtocolUDP struct{ baseCase }
-
-var _ TestCase = (*FilterInputRequireProtocolUDP)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputRequireProtocolUDP) Name() string {
- return "FilterInputRequireProtocolUDP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputRequireProtocolUDP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-m", "udp", "--destination-port", fmt.Sprintf("%d", dropPort), "-j", "DROP"); err == nil {
- return errors.New("expected iptables to fail with out \"-p udp\", but succeeded")
- }
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputRequireProtocolUDP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// FilterInputCreateUserChain tests chain creation.
-type FilterInputCreateUserChain struct{ baseCase }
-
-var _ TestCase = (*FilterInputCreateUserChain)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputCreateUserChain) Name() string {
- return "FilterInputCreateUserChain"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputCreateUserChain) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- // Create a chain.
- {"-N", chainName},
- // Add a simple rule to the chain.
- {"-A", chainName, "-j", "DROP"},
- }
- return filterTableRules(ipv6, rules)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputCreateUserChain) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// FilterInputDefaultPolicyAccept tests the default ACCEPT policy.
-type FilterInputDefaultPolicyAccept struct{ containerCase }
-
-var _ TestCase = (*FilterInputDefaultPolicyAccept)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDefaultPolicyAccept) Name() string {
- return "FilterInputDefaultPolicyAccept"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDefaultPolicyAccept) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Set the default policy to accept, then receive a packet.
- if err := filterTable(ipv6, "-P", "INPUT", "ACCEPT"); err != nil {
- return err
- }
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDefaultPolicyAccept) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputDefaultPolicyDrop tests the default DROP policy.
-type FilterInputDefaultPolicyDrop struct{ containerCase }
-
-var _ TestCase = (*FilterInputDefaultPolicyDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDefaultPolicyDrop) Name() string {
- return "FilterInputDefaultPolicyDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDefaultPolicyDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-P", "INPUT", "DROP"); err != nil {
- return err
- }
-
- // Listen for UDP packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets on port %d should have been dropped, but got a packet", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- // At this point we know that reading timed out and never received a
- // packet.
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDefaultPolicyDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputReturnUnderflow tests that -j RETURN in a built-in chain causes
-// the underflow rule (i.e. default policy) to be executed.
-type FilterInputReturnUnderflow struct{ containerCase }
-
-var _ TestCase = (*FilterInputReturnUnderflow)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputReturnUnderflow) Name() string {
- return "FilterInputReturnUnderflow"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputReturnUnderflow) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Add a RETURN rule followed by an unconditional accept, and set the
- // default policy to DROP.
- rules := [][]string{
- {"-A", "INPUT", "-j", "RETURN"},
- {"-A", "INPUT", "-j", "DROP"},
- {"-P", "INPUT", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // We should receive packets, as the RETURN rule will trigger the default
- // ACCEPT policy.
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputReturnUnderflow) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputSerializeJump verifies that we can serialize jumps.
-type FilterInputSerializeJump struct{ baseCase }
-
-var _ TestCase = (*FilterInputSerializeJump)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputSerializeJump) Name() string {
- return "FilterInputSerializeJump"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputSerializeJump) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Write a JUMP rule, the serialize it with `-L`.
- rules := [][]string{
- {"-N", chainName},
- {"-A", "INPUT", "-j", chainName},
- {"-L"},
- }
- return filterTableRules(ipv6, rules)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputSerializeJump) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// FilterInputJumpBasic jumps to a chain and executes a rule there.
-type FilterInputJumpBasic struct{ containerCase }
-
-var _ TestCase = (*FilterInputJumpBasic)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputJumpBasic) Name() string {
- return "FilterInputJumpBasic"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputJumpBasic) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-P", "INPUT", "DROP"},
- {"-N", chainName},
- {"-A", "INPUT", "-j", chainName},
- {"-A", chainName, "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for UDP packets on acceptPort.
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputJumpBasic) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputJumpReturn jumps, returns, and executes a rule.
-type FilterInputJumpReturn struct{ containerCase }
-
-var _ TestCase = (*FilterInputJumpReturn)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputJumpReturn) Name() string {
- return "FilterInputJumpReturn"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputJumpReturn) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-N", chainName},
- {"-P", "INPUT", "ACCEPT"},
- {"-A", "INPUT", "-j", chainName},
- {"-A", chainName, "-j", "RETURN"},
- {"-A", chainName, "-j", "DROP"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for UDP packets on acceptPort.
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputJumpReturn) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputJumpReturnDrop jumps to a chain, returns, and DROPs packets.
-type FilterInputJumpReturnDrop struct{ containerCase }
-
-var _ TestCase = (*FilterInputJumpReturnDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputJumpReturnDrop) Name() string {
- return "FilterInputJumpReturnDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputJumpReturnDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-N", chainName},
- {"-A", "INPUT", "-j", chainName},
- {"-A", "INPUT", "-j", "DROP"},
- {"-A", chainName, "-j", "RETURN"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for UDP packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets on port %d should have been dropped, but got a packet", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- // At this point we know that reading timed out and never received a
- // packet.
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputJumpReturnDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// FilterInputJumpBuiltin verifies that jumping to a top-levl chain is illegal.
-type FilterInputJumpBuiltin struct{ baseCase }
-
-var _ TestCase = (*FilterInputJumpBuiltin)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputJumpBuiltin) Name() string {
- return "FilterInputJumpBuiltin"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputJumpBuiltin) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-j", "OUTPUT"); err == nil {
- return fmt.Errorf("iptables should be unable to jump to a built-in chain")
- }
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputJumpBuiltin) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// FilterInputJumpTwice jumps twice, then returns twice and executes a rule.
-type FilterInputJumpTwice struct{ containerCase }
-
-var _ TestCase = (*FilterInputJumpTwice)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputJumpTwice) Name() string {
- return "FilterInputJumpTwice"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputJumpTwice) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- const chainName2 = chainName + "2"
- rules := [][]string{
- {"-P", "INPUT", "DROP"},
- {"-N", chainName},
- {"-N", chainName2},
- {"-A", "INPUT", "-j", chainName},
- {"-A", chainName, "-j", chainName2},
- {"-A", "INPUT", "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // UDP packets should jump and return twice, eventually hitting the
- // ACCEPT rule.
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputJumpTwice) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputDestination verifies that we can filter packets via `-d
-// <ipaddr>`.
-type FilterInputDestination struct{ containerCase }
-
-var _ TestCase = (*FilterInputDestination)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputDestination) Name() string {
- return "FilterInputDestination"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputDestination) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- addrs, err := localAddrs(ipv6)
- if err != nil {
- return err
- }
-
- // Make INPUT's default action DROP, then ACCEPT all packets bound for
- // this machine.
- rules := [][]string{{"-P", "INPUT", "DROP"}}
- for _, addr := range addrs {
- rules = append(rules, []string{"-A", "INPUT", "-d", addr, "-j", "ACCEPT"})
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputDestination) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInvertDestination verifies that we can filter packets via `! -d
-// <ipaddr>`.
-type FilterInputInvertDestination struct{ containerCase }
-
-var _ TestCase = (*FilterInputInvertDestination)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInvertDestination) Name() string {
- return "FilterInputInvertDestination"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInvertDestination) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Make INPUT's default action DROP, then ACCEPT all packets not bound
- // for 127.0.0.1.
- rules := [][]string{
- {"-P", "INPUT", "DROP"},
- {"-A", "INPUT", "!", "-d", localIP(ipv6), "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInvertDestination) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputSource verifies that we can filter packets via `-s
-// <ipaddr>`.
-type FilterInputSource struct{ containerCase }
-
-var _ TestCase = (*FilterInputSource)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputSource) Name() string {
- return "FilterInputSource"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputSource) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Make INPUT's default action DROP, then ACCEPT all packets from this
- // machine.
- rules := [][]string{
- {"-P", "INPUT", "DROP"},
- {"-A", "INPUT", "-s", fmt.Sprintf("%v", ip), "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputSource) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInvertSource verifies that we can filter packets via `! -s
-// <ipaddr>`.
-type FilterInputInvertSource struct{ containerCase }
-
-var _ TestCase = (*FilterInputInvertSource)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInvertSource) Name() string {
- return "FilterInputInvertSource"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInvertSource) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Make INPUT's default action DROP, then ACCEPT all packets not bound
- // for 127.0.0.1.
- rules := [][]string{
- {"-P", "INPUT", "DROP"},
- {"-A", "INPUT", "!", "-s", localIP(ipv6), "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInvertSource) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInterfaceAccept tests that packets are accepted from interface
-// matching the iptables rule.
-type FilterInputInterfaceAccept struct{ localCase }
-
-var _ TestCase = (*FilterInputInterfaceAccept)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterfaceAccept) Name() string {
- return "FilterInputInterfaceAccept"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterfaceAccept) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- ifname, ok := getInterfaceName()
- if !ok {
- return fmt.Errorf("no interface is present, except loopback")
- }
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-i", ifname, "-j", "ACCEPT"); err != nil {
- return err
- }
- if err := listenUDP(ctx, acceptPort, ipv6); err != nil {
- return fmt.Errorf("packets on port %d should be allowed, but encountered an error: %w", acceptPort, err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterfaceAccept) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInterfaceDrop tests that packets are dropped from interface
-// matching the iptables rule.
-type FilterInputInterfaceDrop struct{ localCase }
-
-var _ TestCase = (*FilterInputInterfaceDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterfaceDrop) Name() string {
- return "FilterInputInterfaceDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterfaceDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- ifname, ok := getInterfaceName()
- if !ok {
- return fmt.Errorf("no interface is present, except loopback")
- }
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-i", ifname, "-j", "DROP"); err != nil {
- return err
- }
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, acceptPort, ipv6); err != nil {
- if errors.Is(err, context.DeadlineExceeded) {
- return nil
- }
- return fmt.Errorf("error reading: %w", err)
- }
- return fmt.Errorf("packets should have been dropped, but got a packet")
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterfaceDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInterface tests that packets are not dropped from interface which
-// is not matching the interface name in the iptables rule.
-type FilterInputInterface struct{ localCase }
-
-var _ TestCase = (*FilterInputInterface)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterface) Name() string {
- return "FilterInputInterface"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterface) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-i", "lo", "-j", "DROP"); err != nil {
- return err
- }
- if err := listenUDP(ctx, acceptPort, ipv6); err != nil {
- return fmt.Errorf("packets on port %d should be allowed, but encountered an error: %w", acceptPort, err)
- }
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterface) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInterfaceBeginsWith tests that packets are dropped from an
-// interface which begins with the given interface name.
-type FilterInputInterfaceBeginsWith struct{ localCase }
-
-var _ TestCase = (*FilterInputInterfaceBeginsWith)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterfaceBeginsWith) Name() string {
- return "FilterInputInterfaceBeginsWith"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterfaceBeginsWith) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "udp", "-i", "e+", "-j", "DROP"); err != nil {
- return err
- }
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, acceptPort, ipv6); err != nil {
- if errors.Is(err, context.DeadlineExceeded) {
- return nil
- }
- return fmt.Errorf("error reading: %w", err)
- }
- return fmt.Errorf("packets should have been dropped, but got a packet")
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterfaceBeginsWith) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterInputInterfaceInvertDrop tests that we selectively drop packets from
-// interface not matching the interface name.
-type FilterInputInterfaceInvertDrop struct{ baseCase }
-
-var _ TestCase = (*FilterInputInterfaceInvertDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterfaceInvertDrop) Name() string {
- return "FilterInputInterfaceInvertDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterfaceInvertDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "tcp", "!", "-i", "lo", "-j", "DROP"); err != nil {
- return err
- }
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err != nil {
- if errors.Is(err, context.DeadlineExceeded) {
- return nil
- }
- return fmt.Errorf("error reading: %w", err)
- }
- return fmt.Errorf("connection on port %d should not be accepted, but was accepted", acceptPort)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterfaceInvertDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err != nil {
- var operr *net.OpError
- if errors.As(err, &operr) && operr.Timeout() {
- return nil
- }
- return fmt.Errorf("error connecting: %w", err)
- }
- return fmt.Errorf("connection destined to port %d should not be accepted, but was accepted", acceptPort)
-}
-
-// FilterInputInterfaceInvertAccept tests that we can selectively accept packets
-// not matching the specific incoming interface.
-type FilterInputInterfaceInvertAccept struct{ baseCase }
-
-var _ TestCase = (*FilterInputInterfaceInvertAccept)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterInputInterfaceInvertAccept) Name() string {
- return "FilterInputInterfaceInvertAccept"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterInputInterfaceInvertAccept) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "INPUT", "-p", "tcp", "!", "-i", "lo", "-j", "ACCEPT"); err != nil {
- return err
- }
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterInputInterfaceInvertAccept) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
diff --git a/test/iptables/filter_output.go b/test/iptables/filter_output.go
deleted file mode 100644
index bcb2a3b70..000000000
--- a/test/iptables/filter_output.go
+++ /dev/null
@@ -1,714 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
-)
-
-func init() {
- RegisterTestCase(&FilterOutputDropTCPDestPort{})
- RegisterTestCase(&FilterOutputDropTCPSrcPort{})
- RegisterTestCase(&FilterOutputDestination{})
- RegisterTestCase(&FilterOutputInvertDestination{})
- RegisterTestCase(&FilterOutputAcceptTCPOwner{})
- RegisterTestCase(&FilterOutputDropTCPOwner{})
- RegisterTestCase(&FilterOutputAcceptUDPOwner{})
- RegisterTestCase(&FilterOutputDropUDPOwner{})
- RegisterTestCase(&FilterOutputOwnerFail{})
- RegisterTestCase(&FilterOutputAcceptGIDOwner{})
- RegisterTestCase(&FilterOutputDropGIDOwner{})
- RegisterTestCase(&FilterOutputInvertGIDOwner{})
- RegisterTestCase(&FilterOutputInvertUIDOwner{})
- RegisterTestCase(&FilterOutputInvertUIDAndGIDOwner{})
- RegisterTestCase(&FilterOutputInterfaceAccept{})
- RegisterTestCase(&FilterOutputInterfaceDrop{})
- RegisterTestCase(&FilterOutputInterface{})
- RegisterTestCase(&FilterOutputInterfaceBeginsWith{})
- RegisterTestCase(&FilterOutputInterfaceInvertDrop{})
- RegisterTestCase(&FilterOutputInterfaceInvertAccept{})
-}
-
-// FilterOutputDropTCPDestPort tests that connections are not accepted on
-// specified source ports.
-type FilterOutputDropTCPDestPort struct{ baseCase }
-
-var _ TestCase = (*FilterOutputDropTCPDestPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDropTCPDestPort) Name() string {
- return "FilterOutputDropTCPDestPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDropTCPDestPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "tcp", "--dport", "1024:65535", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDropTCPDestPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", dropPort)
- }
-
- return nil
-}
-
-// FilterOutputDropTCPSrcPort tests that connections are not accepted on
-// specified source ports.
-type FilterOutputDropTCPSrcPort struct{ baseCase }
-
-var _ TestCase = (*FilterOutputDropTCPSrcPort)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDropTCPSrcPort) Name() string {
- return "FilterOutputDropTCPSrcPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDropTCPSrcPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "tcp", "--sport", fmt.Sprintf("%d", dropPort), "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on drop port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", dropPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDropTCPSrcPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, dropPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", dropPort)
- }
-
- return nil
-}
-
-// FilterOutputAcceptTCPOwner tests that TCP connections from uid owner are accepted.
-type FilterOutputAcceptTCPOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputAcceptTCPOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputAcceptTCPOwner) Name() string {
- return "FilterOutputAcceptTCPOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputAcceptTCPOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "owner", "--uid-owner", "root", "-j", "ACCEPT"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputAcceptTCPOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterOutputDropTCPOwner tests that TCP connections from uid owner are dropped.
-type FilterOutputDropTCPOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputDropTCPOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDropTCPOwner) Name() string {
- return "FilterOutputDropTCPOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDropTCPOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "owner", "--uid-owner", "root", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should be dropped, but got accepted", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDropTCPOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should be dropped, but got accepted", acceptPort)
- }
-
- return nil
-}
-
-// FilterOutputAcceptUDPOwner tests that UDP packets from uid owner are accepted.
-type FilterOutputAcceptUDPOwner struct{ localCase }
-
-var _ TestCase = (*FilterOutputAcceptUDPOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputAcceptUDPOwner) Name() string {
- return "FilterOutputAcceptUDPOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputAcceptUDPOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-m", "owner", "--uid-owner", "root", "-j", "ACCEPT"); err != nil {
- return err
- }
-
- // Send UDP packets on acceptPort.
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputAcceptUDPOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Listen for UDP packets on acceptPort.
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// FilterOutputDropUDPOwner tests that UDP packets from uid owner are dropped.
-type FilterOutputDropUDPOwner struct{ localCase }
-
-var _ TestCase = (*FilterOutputDropUDPOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDropUDPOwner) Name() string {
- return "FilterOutputDropUDPOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDropUDPOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-m", "owner", "--uid-owner", "root", "-j", "DROP"); err != nil {
- return err
- }
-
- // Send UDP packets on dropPort.
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDropUDPOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Listen for UDP packets on dropPort.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, dropPort, ipv6); err == nil {
- return fmt.Errorf("packets should not be received")
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// FilterOutputOwnerFail tests that without uid/gid option, owner rule
-// will fail.
-type FilterOutputOwnerFail struct{ baseCase }
-
-var _ TestCase = (*FilterOutputOwnerFail)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputOwnerFail) Name() string {
- return "FilterOutputOwnerFail"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputOwnerFail) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-m", "owner", "-j", "ACCEPT"); err == nil {
- return fmt.Errorf("invalid argument")
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputOwnerFail) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // no-op.
- return nil
-}
-
-// FilterOutputAcceptGIDOwner tests that TCP connections from gid owner are accepted.
-type FilterOutputAcceptGIDOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputAcceptGIDOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputAcceptGIDOwner) Name() string {
- return "FilterOutputAcceptGIDOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputAcceptGIDOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "owner", "--gid-owner", "root", "-j", "ACCEPT"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputAcceptGIDOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterOutputDropGIDOwner tests that TCP connections from gid owner are dropped.
-type FilterOutputDropGIDOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputDropGIDOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDropGIDOwner) Name() string {
- return "FilterOutputDropGIDOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDropGIDOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "owner", "--gid-owner", "root", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDropGIDOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", acceptPort)
- }
-
- return nil
-}
-
-// FilterOutputInvertGIDOwner tests that TCP connections from gid owner are dropped.
-type FilterOutputInvertGIDOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputInvertGIDOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInvertGIDOwner) Name() string {
- return "FilterOutputInvertGIDOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInvertGIDOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-A", "OUTPUT", "-p", "tcp", "-m", "owner", "!", "--gid-owner", "root", "-j", "ACCEPT"},
- {"-A", "OUTPUT", "-p", "tcp", "-j", "DROP"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInvertGIDOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", acceptPort)
- }
-
- return nil
-}
-
-// FilterOutputInvertUIDOwner tests that TCP connections from gid owner are dropped.
-type FilterOutputInvertUIDOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputInvertUIDOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInvertUIDOwner) Name() string {
- return "FilterOutputInvertUIDOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInvertUIDOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-A", "OUTPUT", "-p", "tcp", "-m", "owner", "!", "--uid-owner", "root", "-j", "DROP"},
- {"-A", "OUTPUT", "-p", "tcp", "-j", "ACCEPT"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInvertUIDOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// FilterOutputInvertUIDAndGIDOwner tests that TCP connections from uid and gid
-// owner are dropped.
-type FilterOutputInvertUIDAndGIDOwner struct{ baseCase }
-
-var _ TestCase = (*FilterOutputInvertUIDAndGIDOwner)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInvertUIDAndGIDOwner) Name() string {
- return "FilterOutputInvertUIDAndGIDOwner"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInvertUIDAndGIDOwner) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-A", "OUTPUT", "-p", "tcp", "-m", "owner", "!", "--uid-owner", "root", "!", "--gid-owner", "root", "-j", "ACCEPT"},
- {"-A", "OUTPUT", "-p", "tcp", "-j", "DROP"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInvertUIDAndGIDOwner) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", acceptPort)
- }
-
- return nil
-}
-
-// FilterOutputDestination tests that we can selectively allow packets to
-// certain destinations.
-type FilterOutputDestination struct{ localCase }
-
-var _ TestCase = (*FilterOutputDestination)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputDestination) Name() string {
- return "FilterOutputDestination"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputDestination) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- var rules [][]string
- if ipv6 {
- rules = [][]string{
- {"-A", "OUTPUT", "-d", ip.String(), "-j", "ACCEPT"},
- // Allow solicited node multicast addresses so we can send neighbor
- // solicitations.
- {"-A", "OUTPUT", "-d", "ff02::1:ff00:0/104", "-j", "ACCEPT"},
- {"-P", "OUTPUT", "DROP"},
- }
- } else {
- rules = [][]string{
- {"-A", "OUTPUT", "-d", ip.String(), "-j", "ACCEPT"},
- {"-P", "OUTPUT", "DROP"},
- }
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputDestination) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// FilterOutputInvertDestination tests that we can selectively allow packets
-// not headed for a particular destination.
-type FilterOutputInvertDestination struct{ localCase }
-
-var _ TestCase = (*FilterOutputInvertDestination)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInvertDestination) Name() string {
- return "FilterOutputInvertDestination"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInvertDestination) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- rules := [][]string{
- {"-A", "OUTPUT", "!", "-d", localIP(ipv6), "-j", "ACCEPT"},
- {"-P", "OUTPUT", "DROP"},
- }
- if err := filterTableRules(ipv6, rules); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInvertDestination) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// FilterOutputInterfaceAccept tests that packets are sent via interface
-// matching the iptables rule.
-type FilterOutputInterfaceAccept struct{ localCase }
-
-var _ TestCase = (*FilterOutputInterfaceAccept)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterfaceAccept) Name() string {
- return "FilterOutputInterfaceAccept"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterfaceAccept) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- ifname, ok := getInterfaceName()
- if !ok {
- return fmt.Errorf("no interface is present, except loopback")
- }
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-o", ifname, "-j", "ACCEPT"); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterfaceAccept) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// FilterOutputInterfaceDrop tests that packets are not sent via interface
-// matching the iptables rule.
-type FilterOutputInterfaceDrop struct{ localCase }
-
-var _ TestCase = (*FilterOutputInterfaceDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterfaceDrop) Name() string {
- return "FilterOutputInterfaceDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterfaceDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- ifname, ok := getInterfaceName()
- if !ok {
- return fmt.Errorf("no interface is present, except loopback")
- }
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-o", ifname, "-j", "DROP"); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterfaceDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("packets should not be received on port %v, but are received", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// FilterOutputInterface tests that packets are sent via interface which is
-// not matching the interface name in the iptables rule.
-type FilterOutputInterface struct{ localCase }
-
-var _ TestCase = (*FilterOutputInterface)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterface) Name() string {
- return "FilterOutputInterface"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterface) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-o", "lo", "-j", "DROP"); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterface) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// FilterOutputInterfaceBeginsWith tests that packets are not sent via an
-// interface which begins with the given interface name.
-type FilterOutputInterfaceBeginsWith struct{ localCase }
-
-var _ TestCase = (*FilterOutputInterfaceBeginsWith)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterfaceBeginsWith) Name() string {
- return "FilterOutputInterfaceBeginsWith"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterfaceBeginsWith) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-o", "e+", "-j", "DROP"); err != nil {
- return err
- }
-
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterfaceBeginsWith) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("packets should not be received on port %v, but are received", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// FilterOutputInterfaceInvertDrop tests that we selectively do not send
-// packets via interface not matching the interface name.
-type FilterOutputInterfaceInvertDrop struct{ baseCase }
-
-var _ TestCase = (*FilterOutputInterfaceInvertDrop)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterfaceInvertDrop) Name() string {
- return "FilterOutputInterfaceInvertDrop"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterfaceInvertDrop) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "!", "-o", "lo", "-j", "DROP"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenTCP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection on port %d should not be accepted, but got accepted", acceptPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterfaceInvertDrop) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := connectTCP(timedCtx, ip, acceptPort, ipv6); err == nil {
- return fmt.Errorf("connection destined to port %d should not be accepted, but got accepted", acceptPort)
- }
-
- return nil
-}
-
-// FilterOutputInterfaceInvertAccept tests that we can selectively send packets
-// not matching the specific outgoing interface.
-type FilterOutputInterfaceInvertAccept struct{ baseCase }
-
-var _ TestCase = (*FilterOutputInterfaceInvertAccept)(nil)
-
-// Name implements TestCase.Name.
-func (*FilterOutputInterfaceInvertAccept) Name() string {
- return "FilterOutputInterfaceInvertAccept"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*FilterOutputInterfaceInvertAccept) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := filterTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "!", "-o", "lo", "-j", "ACCEPT"); err != nil {
- return err
- }
-
- // Listen for TCP packets on accept port.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*FilterOutputInterfaceInvertAccept) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
diff --git a/test/iptables/iptables.go b/test/iptables/iptables.go
deleted file mode 100644
index 970587a02..000000000
--- a/test/iptables/iptables.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package iptables contains a set of iptables tests implemented as TestCases
-package iptables
-
-import (
- "context"
- "fmt"
- "net"
- "time"
-)
-
-// IPExchangePort is the port the container listens on to receive the IP
-// address of the local process.
-const IPExchangePort = 2349
-
-// TerminalStatement is the last statement in the test runner.
-const TerminalStatement = "Finished!"
-
-// TestTimeout is the timeout used for all tests.
-const TestTimeout = 10 * time.Second
-
-// NegativeTimeout is the time tests should wait to establish the negative
-// case, i.e. that connections are not made.
-const NegativeTimeout = 2 * time.Second
-
-// A TestCase contains one action to run in the container and one to run
-// locally. The actions run concurrently and each must succeed for the test
-// pass.
-type TestCase interface {
- // Name returns the name of the test.
- Name() string
-
- // ContainerAction runs inside the container. It receives the IP of the
- // local process.
- ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error
-
- // LocalAction runs locally. It receives the IP of the container.
- LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error
-
- // ContainerSufficient indicates whether ContainerAction's return value
- // alone indicates whether the test succeeded.
- ContainerSufficient() bool
-
- // LocalSufficient indicates whether LocalAction's return value alone
- // indicates whether the test succeeded.
- LocalSufficient() bool
-}
-
-// baseCase provides defaults for ContainerSufficient and LocalSufficient when
-// both actions are required to finish.
-type baseCase struct{}
-
-// ContainerSufficient implements TestCase.ContainerSufficient.
-func (*baseCase) ContainerSufficient() bool {
- return false
-}
-
-// LocalSufficient implements TestCase.LocalSufficient.
-func (*baseCase) LocalSufficient() bool {
- return false
-}
-
-// localCase provides defaults for ContainerSufficient and LocalSufficient when
-// only the local action is required to finish.
-type localCase struct{}
-
-// ContainerSufficient implements TestCase.ContainerSufficient.
-func (*localCase) ContainerSufficient() bool {
- return false
-}
-
-// LocalSufficient implements TestCase.LocalSufficient.
-func (*localCase) LocalSufficient() bool {
- return true
-}
-
-// containerCase provides defaults for ContainerSufficient and LocalSufficient
-// when only the container action is required to finish.
-type containerCase struct{}
-
-// ContainerSufficient implements TestCase.ContainerSufficient.
-func (*containerCase) ContainerSufficient() bool {
- return true
-}
-
-// LocalSufficient implements TestCase.LocalSufficient.
-func (*containerCase) LocalSufficient() bool {
- return false
-}
-
-// Tests maps test names to TestCase.
-//
-// New TestCases are added by calling RegisterTestCase in an init function.
-var Tests = map[string]TestCase{}
-
-// RegisterTestCase registers tc so it can be run.
-func RegisterTestCase(tc TestCase) {
- if _, ok := Tests[tc.Name()]; ok {
- panic(fmt.Sprintf("TestCase %s already registered.", tc.Name()))
- }
- Tests[tc.Name()] = tc
-}
diff --git a/test/iptables/iptables_test.go b/test/iptables/iptables_test.go
deleted file mode 100644
index 04d112134..000000000
--- a/test/iptables/iptables_test.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "reflect"
- "sync"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// singleTest runs a TestCase. Each test follows a pattern:
-// - Create a container.
-// - Get the container's IP.
-// - Send the container our IP.
-// - Start a new goroutine running the local action of the test.
-// - Wait for both the container and local actions to finish.
-//
-// Container output is logged to $TEST_UNDECLARED_OUTPUTS_DIR if it exists, or
-// to stderr.
-func singleTest(t *testing.T, test TestCase) {
- for _, tc := range []bool{false, true} {
- subtest := "IPv4"
- if tc {
- subtest = "IPv6"
- }
- t.Run(subtest, func(t *testing.T) {
- iptablesTest(t, test, tc)
- })
- }
-}
-
-func iptablesTest(t *testing.T, test TestCase, ipv6 bool) {
- if _, ok := Tests[test.Name()]; !ok {
- t.Fatalf("no test found with name %q. Has it been registered?", test.Name())
- }
-
- // Wait for the local and container goroutines to finish.
- var wg sync.WaitGroup
- defer wg.Wait()
-
- ctx, cancel := context.WithTimeout(context.Background(), TestTimeout)
- defer cancel()
-
- d := dockerutil.MakeContainer(ctx, t)
- defer func() {
- if logs, err := d.Logs(context.Background()); err != nil {
- t.Logf("Failed to retrieve container logs.")
- } else {
- t.Logf("=== Container logs: ===\n%s", logs)
- }
- // Use a new context, as cleanup should run even when we
- // timeout.
- d.CleanUp(context.Background())
- }()
-
- // Create and start the container.
- opts := dockerutil.RunOpts{
- Image: "iptables",
- CapAdd: []string{"NET_ADMIN"},
- }
- d.CopyFiles(&opts, "/runner", "test/iptables/runner/runner")
- args := []string{"/runner/runner", "-name", test.Name()}
- if ipv6 {
- args = append(args, "-ipv6")
- }
- if err := d.Spawn(ctx, opts, args...); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Get the container IP.
- ip, err := d.FindIP(ctx, ipv6)
- if err != nil {
- // If ipv6 is not configured, don't fail.
- if ipv6 && err == dockerutil.ErrNoIP {
- t.Skipf("No ipv6 address is available.")
- }
- t.Fatalf("failed to get container IP: %v", err)
- }
-
- // Give the container our IP.
- if err := sendIP(ip); err != nil {
- t.Fatalf("failed to send IP to container: %v", err)
- }
-
- // Run our side of the test.
- errCh := make(chan error, 2)
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := test.LocalAction(ctx, ip, ipv6); err != nil && !errors.Is(err, context.Canceled) {
- errCh <- fmt.Errorf("LocalAction failed: %v", err)
- } else {
- errCh <- nil
- }
- if test.LocalSufficient() {
- errCh <- nil
- }
- }()
-
- // Run the container side.
- wg.Add(1)
- go func() {
- defer wg.Done()
- // Wait for the final statement. This structure has the side
- // effect that all container logs will appear within the
- // individual test context.
- if _, err := d.WaitForOutput(ctx, TerminalStatement, TestTimeout); err != nil && !errors.Is(err, context.Canceled) {
- errCh <- fmt.Errorf("ContainerAction failed: %v", err)
- } else {
- errCh <- nil
- }
- if test.ContainerSufficient() {
- errCh <- nil
- }
- }()
-
- for i := 0; i < 2; i++ {
- select {
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
- }
-}
-
-func sendIP(ip net.IP) error {
- contAddr := net.TCPAddr{
- IP: ip,
- Port: IPExchangePort,
- }
- var conn *net.TCPConn
- // The container may not be listening when we first connect, so retry
- // upon error.
- cb := func() error {
- c, err := net.DialTCP("tcp", nil, &contAddr)
- conn = c
- return err
- }
- if err := testutil.Poll(cb, TestTimeout); err != nil {
- return fmt.Errorf("timed out waiting to send IP, most recent error: %v", err)
- }
- if _, err := conn.Write([]byte{0}); err != nil {
- return fmt.Errorf("error writing to container: %v", err)
- }
- return nil
-}
-
-func TestFilterInputDropUDP(t *testing.T) {
- singleTest(t, &FilterInputDropUDP{})
-}
-
-func TestFilterInputDropUDPPort(t *testing.T) {
- singleTest(t, &FilterInputDropUDPPort{})
-}
-
-func TestFilterInputDropDifferentUDPPort(t *testing.T) {
- singleTest(t, &FilterInputDropDifferentUDPPort{})
-}
-
-func TestFilterInputDropAll(t *testing.T) {
- singleTest(t, &FilterInputDropAll{})
-}
-
-func TestFilterInputDropOnlyUDP(t *testing.T) {
- singleTest(t, &FilterInputDropOnlyUDP{})
-}
-
-func TestFilterInputDropTCPDestPort(t *testing.T) {
- singleTest(t, &FilterInputDropTCPDestPort{})
-}
-
-func TestFilterInputDropTCPSrcPort(t *testing.T) {
- singleTest(t, &FilterInputDropTCPSrcPort{})
-}
-
-func TestFilterInputCreateUserChain(t *testing.T) {
- singleTest(t, &FilterInputCreateUserChain{})
-}
-
-func TestFilterInputDefaultPolicyAccept(t *testing.T) {
- singleTest(t, &FilterInputDefaultPolicyAccept{})
-}
-
-func TestFilterInputDefaultPolicyDrop(t *testing.T) {
- singleTest(t, &FilterInputDefaultPolicyDrop{})
-}
-
-func TestFilterInputReturnUnderflow(t *testing.T) {
- singleTest(t, &FilterInputReturnUnderflow{})
-}
-
-func TestFilterOutputDropTCPDestPort(t *testing.T) {
- singleTest(t, &FilterOutputDropTCPDestPort{})
-}
-
-func TestFilterOutputDropTCPSrcPort(t *testing.T) {
- singleTest(t, &FilterOutputDropTCPSrcPort{})
-}
-
-func TestFilterOutputAcceptTCPOwner(t *testing.T) {
- singleTest(t, &FilterOutputAcceptTCPOwner{})
-}
-
-func TestFilterOutputDropTCPOwner(t *testing.T) {
- singleTest(t, &FilterOutputDropTCPOwner{})
-}
-
-func TestFilterOutputAcceptUDPOwner(t *testing.T) {
- singleTest(t, &FilterOutputAcceptUDPOwner{})
-}
-
-func TestFilterOutputDropUDPOwner(t *testing.T) {
- singleTest(t, &FilterOutputDropUDPOwner{})
-}
-
-func TestFilterOutputOwnerFail(t *testing.T) {
- singleTest(t, &FilterOutputOwnerFail{})
-}
-
-func TestFilterOutputAcceptGIDOwner(t *testing.T) {
- singleTest(t, &FilterOutputAcceptGIDOwner{})
-}
-
-func TestFilterOutputDropGIDOwner(t *testing.T) {
- singleTest(t, &FilterOutputDropGIDOwner{})
-}
-
-func TestFilterOutputInvertGIDOwner(t *testing.T) {
- singleTest(t, &FilterOutputInvertGIDOwner{})
-}
-
-func TestFilterOutputInvertUIDOwner(t *testing.T) {
- singleTest(t, &FilterOutputInvertUIDOwner{})
-}
-
-func TestFilterOutputInvertUIDAndGIDOwner(t *testing.T) {
- singleTest(t, &FilterOutputInvertUIDAndGIDOwner{})
-}
-
-func TestFilterOutputInterfaceAccept(t *testing.T) {
- singleTest(t, &FilterOutputInterfaceAccept{})
-}
-
-func TestFilterOutputInterfaceDrop(t *testing.T) {
- singleTest(t, &FilterOutputInterfaceDrop{})
-}
-
-func TestFilterOutputInterface(t *testing.T) {
- singleTest(t, &FilterOutputInterface{})
-}
-
-func TestFilterOutputInterfaceBeginsWith(t *testing.T) {
- singleTest(t, &FilterOutputInterfaceBeginsWith{})
-}
-
-func TestFilterOutputInterfaceInvertDrop(t *testing.T) {
- singleTest(t, &FilterOutputInterfaceInvertDrop{})
-}
-
-func TestFilterOutputInterfaceInvertAccept(t *testing.T) {
- singleTest(t, &FilterOutputInterfaceInvertAccept{})
-}
-
-func TestJumpSerialize(t *testing.T) {
- singleTest(t, &FilterInputSerializeJump{})
-}
-
-func TestJumpBasic(t *testing.T) {
- singleTest(t, &FilterInputJumpBasic{})
-}
-
-func TestJumpReturn(t *testing.T) {
- singleTest(t, &FilterInputJumpReturn{})
-}
-
-func TestJumpReturnDrop(t *testing.T) {
- singleTest(t, &FilterInputJumpReturnDrop{})
-}
-
-func TestJumpBuiltin(t *testing.T) {
- singleTest(t, &FilterInputJumpBuiltin{})
-}
-
-func TestJumpTwice(t *testing.T) {
- singleTest(t, &FilterInputJumpTwice{})
-}
-
-func TestInputDestination(t *testing.T) {
- singleTest(t, &FilterInputDestination{})
-}
-
-func TestInputInvertDestination(t *testing.T) {
- singleTest(t, &FilterInputInvertDestination{})
-}
-
-func TestFilterOutputDestination(t *testing.T) {
- singleTest(t, &FilterOutputDestination{})
-}
-
-func TestFilterOutputInvertDestination(t *testing.T) {
- singleTest(t, &FilterOutputInvertDestination{})
-}
-
-func TestNATPreRedirectUDPPort(t *testing.T) {
- singleTest(t, &NATPreRedirectUDPPort{})
-}
-
-func TestNATPreRedirectTCPPort(t *testing.T) {
- singleTest(t, &NATPreRedirectTCPPort{})
-}
-
-func TestNATPreRedirectTCPOutgoing(t *testing.T) {
- singleTest(t, &NATPreRedirectTCPOutgoing{})
-}
-
-func TestNATOutRedirectTCPIncoming(t *testing.T) {
- singleTest(t, &NATOutRedirectTCPIncoming{})
-}
-func TestNATOutRedirectUDPPort(t *testing.T) {
- singleTest(t, &NATOutRedirectUDPPort{})
-}
-
-func TestNATOutRedirectTCPPort(t *testing.T) {
- singleTest(t, &NATOutRedirectTCPPort{})
-}
-
-func TestNATDropUDP(t *testing.T) {
- singleTest(t, &NATDropUDP{})
-}
-
-func TestNATAcceptAll(t *testing.T) {
- singleTest(t, &NATAcceptAll{})
-}
-
-func TestNATOutRedirectIP(t *testing.T) {
- singleTest(t, &NATOutRedirectIP{})
-}
-
-func TestNATOutDontRedirectIP(t *testing.T) {
- singleTest(t, &NATOutDontRedirectIP{})
-}
-
-func TestNATOutRedirectInvert(t *testing.T) {
- singleTest(t, &NATOutRedirectInvert{})
-}
-
-func TestNATPreRedirectIP(t *testing.T) {
- singleTest(t, &NATPreRedirectIP{})
-}
-
-func TestNATPreDontRedirectIP(t *testing.T) {
- singleTest(t, &NATPreDontRedirectIP{})
-}
-
-func TestNATPreRedirectInvert(t *testing.T) {
- singleTest(t, &NATPreRedirectInvert{})
-}
-
-func TestNATRedirectRequiresProtocol(t *testing.T) {
- singleTest(t, &NATRedirectRequiresProtocol{})
-}
-
-func TestNATLoopbackSkipsPrerouting(t *testing.T) {
- singleTest(t, &NATLoopbackSkipsPrerouting{})
-}
-
-func TestInputSource(t *testing.T) {
- singleTest(t, &FilterInputSource{})
-}
-
-func TestInputInvertSource(t *testing.T) {
- singleTest(t, &FilterInputInvertSource{})
-}
-
-func TestInputInterfaceAccept(t *testing.T) {
- singleTest(t, &FilterInputInterfaceAccept{})
-}
-
-func TestInputInterfaceDrop(t *testing.T) {
- singleTest(t, &FilterInputInterfaceDrop{})
-}
-
-func TestInputInterface(t *testing.T) {
- singleTest(t, &FilterInputInterface{})
-}
-
-func TestInputInterfaceBeginsWith(t *testing.T) {
- singleTest(t, &FilterInputInterfaceBeginsWith{})
-}
-
-func TestInputInterfaceInvertDrop(t *testing.T) {
- singleTest(t, &FilterInputInterfaceInvertDrop{})
-}
-
-func TestInputInterfaceInvertAccept(t *testing.T) {
- singleTest(t, &FilterInputInterfaceInvertAccept{})
-}
-
-func TestFilterAddrs(t *testing.T) {
- tcs := []struct {
- ipv6 bool
- addrs []string
- want []string
- }{
- {
- ipv6: false,
- addrs: []string{"192.168.0.1", "192.168.0.2/24", "::1", "::2/128"},
- want: []string{"192.168.0.1", "192.168.0.2"},
- },
- {
- ipv6: true,
- addrs: []string{"192.168.0.1", "192.168.0.2/24", "::1", "::2/128"},
- want: []string{"::1", "::2"},
- },
- }
-
- for _, tc := range tcs {
- if got := filterAddrs(tc.addrs, tc.ipv6); !reflect.DeepEqual(got, tc.want) {
- t.Errorf("%v with IPv6 %t: got %v, but wanted %v", tc.addrs, tc.ipv6, got, tc.want)
- }
- }
-}
-
-func TestNATPreOriginalDst(t *testing.T) {
- singleTest(t, &NATPreOriginalDst{})
-}
-
-func TestNATOutOriginalDst(t *testing.T) {
- singleTest(t, &NATOutOriginalDst{})
-}
-
-func TestNATPreRECVORIGDSTADDR(t *testing.T) {
- singleTest(t, &NATPreRECVORIGDSTADDR{})
-}
-
-func TestNATOutRECVORIGDSTADDR(t *testing.T) {
- singleTest(t, &NATOutRECVORIGDSTADDR{})
-}
-
-func TestNATPostSNATUDP(t *testing.T) {
- singleTest(t, &NATPostSNATUDP{})
-}
-
-func TestNATPostSNATTCP(t *testing.T) {
- singleTest(t, &NATPostSNATTCP{})
-}
diff --git a/test/iptables/iptables_unsafe.go b/test/iptables/iptables_unsafe.go
deleted file mode 100644
index dd1a1c082..000000000
--- a/test/iptables/iptables_unsafe.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "fmt"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-type originalDstError struct {
- errno unix.Errno
-}
-
-func (e originalDstError) Error() string {
- return fmt.Sprintf("errno (%d) when calling getsockopt(SO_ORIGINAL_DST): %v", int(e.errno), e.errno.Error())
-}
-
-// SO_ORIGINAL_DST gets the original destination of a redirected packet via
-// getsockopt.
-const SO_ORIGINAL_DST = 80
-
-func originalDestination4(connfd int) (unix.RawSockaddrInet4, error) {
- var addr unix.RawSockaddrInet4
- var addrLen uint32 = unix.SizeofSockaddrInet4
- if errno := originalDestination(connfd, unix.SOL_IP, unsafe.Pointer(&addr), &addrLen); errno != 0 {
- return unix.RawSockaddrInet4{}, originalDstError{errno}
- }
- return addr, nil
-}
-
-func originalDestination6(connfd int) (unix.RawSockaddrInet6, error) {
- var addr unix.RawSockaddrInet6
- var addrLen uint32 = unix.SizeofSockaddrInet6
- if errno := originalDestination(connfd, unix.SOL_IPV6, unsafe.Pointer(&addr), &addrLen); errno != 0 {
- return unix.RawSockaddrInet6{}, originalDstError{errno}
- }
- return addr, nil
-}
-
-func originalDestination(connfd int, level uintptr, optval unsafe.Pointer, optlen *uint32) unix.Errno {
- _, _, errno := unix.Syscall6(
- unix.SYS_GETSOCKOPT,
- uintptr(connfd),
- level,
- SO_ORIGINAL_DST,
- uintptr(optval),
- uintptr(unsafe.Pointer(optlen)),
- 0)
- return errno
-}
diff --git a/test/iptables/iptables_util.go b/test/iptables/iptables_util.go
deleted file mode 100644
index 4590e169d..000000000
--- a/test/iptables/iptables_util.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "net"
- "os/exec"
- "strings"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// filterTable calls `ip{6}tables -t filter` with the given args.
-func filterTable(ipv6 bool, args ...string) error {
- return tableCmd(ipv6, "filter", args)
-}
-
-// natTable calls `ip{6}tables -t nat` with the given args.
-func natTable(ipv6 bool, args ...string) error {
- return tableCmd(ipv6, "nat", args)
-}
-
-func tableCmd(ipv6 bool, table string, args []string) error {
- args = append([]string{"-t", table}, args...)
- binary := "iptables"
- if ipv6 {
- binary = "ip6tables"
- }
- cmd := exec.Command(binary, args...)
- if out, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf("error running iptables with args %v\nerror: %v\noutput: %s", args, err, string(out))
- }
- return nil
-}
-
-// filterTableRules is like filterTable, but runs multiple iptables commands.
-func filterTableRules(ipv6 bool, argsList [][]string) error {
- return tableRules(ipv6, "filter", argsList)
-}
-
-// natTableRules is like natTable, but runs multiple iptables commands.
-func natTableRules(ipv6 bool, argsList [][]string) error {
- return tableRules(ipv6, "nat", argsList)
-}
-
-func tableRules(ipv6 bool, table string, argsList [][]string) error {
- for _, args := range argsList {
- if err := tableCmd(ipv6, table, args); err != nil {
- return err
- }
- }
- return nil
-}
-
-// listenUDP listens on a UDP port and returns nil if the first read from that
-// port is successful.
-func listenUDP(ctx context.Context, port int, ipv6 bool) error {
- _, err := listenUDPFrom(ctx, port, ipv6)
- return err
-}
-
-// listenUDPFrom listens on a UDP port and returns the sender's UDP address if
-// the first read from that port is successful.
-func listenUDPFrom(ctx context.Context, port int, ipv6 bool) (*net.UDPAddr, error) {
- localAddr := net.UDPAddr{
- Port: port,
- }
- conn, err := net.ListenUDP(udpNetwork(ipv6), &localAddr)
- if err != nil {
- return nil, err
- }
- defer conn.Close()
-
- type result struct {
- remoteAddr *net.UDPAddr
- err error
- }
-
- ch := make(chan result)
- go func() {
- _, remoteAddr, err := conn.ReadFromUDP([]byte{0})
- ch <- result{remoteAddr, err}
- }()
-
- select {
- case res := <-ch:
- return res.remoteAddr, res.err
- case <-ctx.Done():
- return nil, fmt.Errorf("timed out reading from %s: %w", &localAddr, ctx.Err())
- }
-}
-
-// sendUDPLoop sends 1 byte UDP packets repeatedly to the IP and port specified
-// over a duration.
-func sendUDPLoop(ctx context.Context, ip net.IP, port int, ipv6 bool) error {
- remote := net.UDPAddr{
- IP: ip,
- Port: port,
- }
- conn, err := net.DialUDP(udpNetwork(ipv6), nil, &remote)
- if err != nil {
- return err
- }
- defer conn.Close()
-
- for {
- // This may return an error (connection refused) if the remote
- // hasn't started listening yet or they're dropping our
- // packets. So we ignore Write errors and depend on the remote
- // to report a failure if it doesn't get a packet it needs.
- conn.Write([]byte{0})
- select {
- case <-ctx.Done():
- // Being cancelled or timing out isn't an error, as we
- // cannot tell with UDP whether we succeeded.
- return nil
- // Continue looping.
- case <-time.After(200 * time.Millisecond):
- }
- }
-}
-
-// listenTCP listens for connections on a TCP port, and returns nil if a
-// connection is established.
-func listenTCP(ctx context.Context, port int, ipv6 bool) error {
- _, err := listenTCPFrom(ctx, port, ipv6)
- return err
-}
-
-// listenTCP listens for connections on a TCP port, and returns the remote
-// TCP address if a connection is established.
-func listenTCPFrom(ctx context.Context, port int, ipv6 bool) (net.Addr, error) {
- localAddr := net.TCPAddr{
- Port: port,
- }
-
- // Starts listening on port.
- lConn, err := net.ListenTCP(tcpNetwork(ipv6), &localAddr)
- if err != nil {
- return nil, err
- }
- defer lConn.Close()
-
- type result struct {
- remoteAddr net.Addr
- err error
- }
-
- // Accept connections on port.
- ch := make(chan result)
- go func() {
- conn, err := lConn.AcceptTCP()
- var remoteAddr net.Addr
- if err == nil {
- remoteAddr = conn.RemoteAddr()
- }
- ch <- result{remoteAddr, err}
- conn.Close()
- }()
-
- select {
- case res := <-ch:
- return res.remoteAddr, res.err
- case <-ctx.Done():
- return nil, fmt.Errorf("timed out waiting for a connection at %s: %w", &localAddr, ctx.Err())
- }
-}
-
-// connectTCP connects to the given IP and port from an ephemeral local address.
-func connectTCP(ctx context.Context, ip net.IP, port int, ipv6 bool) error {
- contAddr := net.TCPAddr{
- IP: ip,
- Port: port,
- }
- // The container may not be listening when we first connect, so retry
- // upon error.
- callback := func() error {
- var d net.Dialer
- conn, err := d.DialContext(ctx, tcpNetwork(ipv6), contAddr.String())
- if conn != nil {
- conn.Close()
- }
- return err
- }
- if err := testutil.PollContext(ctx, callback); err != nil {
- return fmt.Errorf("timed out waiting to connect IP on port %v, most recent error: %w", port, err)
- }
-
- return nil
-}
-
-// localAddrs returns a list of local network interface addresses. When ipv6 is
-// true, only IPv6 addresses are returned. Otherwise only IPv4 addresses are
-// returned.
-func localAddrs(ipv6 bool) ([]string, error) {
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- return nil, err
- }
- addrStrs := make([]string, 0, len(addrs))
- for _, addr := range addrs {
- // Add only IPv4 or only IPv6 addresses.
- parts := strings.Split(addr.String(), "/")
- if len(parts) != 2 {
- return nil, fmt.Errorf("bad interface address: %q", addr.String())
- }
- if isIPv6 := net.ParseIP(parts[0]).To4() == nil; isIPv6 == ipv6 {
- addrStrs = append(addrStrs, addr.String())
- }
- }
- return filterAddrs(addrStrs, ipv6), nil
-}
-
-func filterAddrs(addrs []string, ipv6 bool) []string {
- addrStrs := make([]string, 0, len(addrs))
- for _, addr := range addrs {
- // Add only IPv4 or only IPv6 addresses.
- parts := strings.Split(addr, "/")
- if isIPv6 := net.ParseIP(parts[0]).To4() == nil; isIPv6 == ipv6 {
- addrStrs = append(addrStrs, parts[0])
- }
- }
- return addrStrs
-}
-
-// getInterfaceName returns the name of the interface other than loopback.
-func getInterfaceName() (string, bool) {
- iface, ok := getNonLoopbackInterface()
- if !ok {
- return "", false
- }
- return iface.Name, true
-}
-
-func getInterfaceAddrs(ipv6 bool) ([]net.IP, error) {
- iface, ok := getNonLoopbackInterface()
- if !ok {
- return nil, errors.New("no non-loopback interface found")
- }
- addrs, err := iface.Addrs()
- if err != nil {
- return nil, err
- }
-
- // Get only IPv4 or IPv6 addresses.
- ips := make([]net.IP, 0, len(addrs))
- for _, addr := range addrs {
- parts := strings.Split(addr.String(), "/")
- var ip net.IP
- // To16() returns IPv4 addresses as IPv4-mapped IPv6 addresses.
- // So we check whether To4() returns nil to test whether the
- // address is v4 or v6.
- if v4 := net.ParseIP(parts[0]).To4(); ipv6 && v4 == nil {
- ip = net.ParseIP(parts[0]).To16()
- } else {
- ip = v4
- }
- if ip != nil {
- ips = append(ips, ip)
- }
- }
- return ips, nil
-}
-
-func getNonLoopbackInterface() (net.Interface, bool) {
- if interfaces, err := net.Interfaces(); err == nil {
- for _, intf := range interfaces {
- if intf.Name != "lo" {
- return intf, true
- }
- }
- }
- return net.Interface{}, false
-}
-
-func htons(x uint16) uint16 {
- buf := make([]byte, 2)
- binary.BigEndian.PutUint16(buf, x)
- return binary.LittleEndian.Uint16(buf)
-}
-
-func localIP(ipv6 bool) string {
- if ipv6 {
- return "::1"
- }
- return "127.0.0.1"
-}
-
-func nowhereIP(ipv6 bool) string {
- if ipv6 {
- return "2001:db8::1"
- }
- return "192.0.2.1"
-}
-
-// udpNetwork returns an IPv6 or IPv6 UDP network argument to net.Dial.
-func udpNetwork(ipv6 bool) string {
- if ipv6 {
- return "udp6"
- }
- return "udp4"
-}
-
-// tcpNetwork returns an IPv6 or IPv6 TCP network argument to net.Dial.
-func tcpNetwork(ipv6 bool) string {
- if ipv6 {
- return "tcp6"
- }
- return "tcp4"
-}
diff --git a/test/iptables/nat.go b/test/iptables/nat.go
deleted file mode 100644
index 0f25b6a18..000000000
--- a/test/iptables/nat.go
+++ /dev/null
@@ -1,1037 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iptables
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "strconv"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-const redirectPort = 42
-
-func init() {
- RegisterTestCase(&NATPreRedirectUDPPort{})
- RegisterTestCase(&NATPreRedirectTCPPort{})
- RegisterTestCase(&NATPreRedirectTCPOutgoing{})
- RegisterTestCase(&NATOutRedirectTCPIncoming{})
- RegisterTestCase(&NATOutRedirectUDPPort{})
- RegisterTestCase(&NATOutRedirectTCPPort{})
- RegisterTestCase(&NATDropUDP{})
- RegisterTestCase(&NATAcceptAll{})
- RegisterTestCase(&NATPreRedirectIP{})
- RegisterTestCase(&NATPreDontRedirectIP{})
- RegisterTestCase(&NATPreRedirectInvert{})
- RegisterTestCase(&NATOutRedirectIP{})
- RegisterTestCase(&NATOutDontRedirectIP{})
- RegisterTestCase(&NATOutRedirectInvert{})
- RegisterTestCase(&NATRedirectRequiresProtocol{})
- RegisterTestCase(&NATLoopbackSkipsPrerouting{})
- RegisterTestCase(&NATPreOriginalDst{})
- RegisterTestCase(&NATOutOriginalDst{})
- RegisterTestCase(&NATPreRECVORIGDSTADDR{})
- RegisterTestCase(&NATOutRECVORIGDSTADDR{})
- RegisterTestCase(&NATPostSNATUDP{})
- RegisterTestCase(&NATPostSNATTCP{})
-}
-
-// NATPreRedirectUDPPort tests that packets are redirected to different port.
-type NATPreRedirectUDPPort struct{ containerCase }
-
-var _ TestCase = (*NATPreRedirectUDPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRedirectUDPPort) Name() string {
- return "NATPreRedirectUDPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRedirectUDPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", redirectPort)); err != nil {
- return err
- }
-
- if err := listenUDP(ctx, redirectPort, ipv6); err != nil {
- return fmt.Errorf("packets on port %d should be allowed, but encountered an error: %v", redirectPort, err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRedirectUDPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// NATPreRedirectTCPPort tests that connections are redirected on specified ports.
-type NATPreRedirectTCPPort struct{ baseCase }
-
-var _ TestCase = (*NATPreRedirectTCPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRedirectTCPPort) Name() string {
- return "NATPreRedirectTCPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRedirectTCPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprintf("%d", dropPort), "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort)); err != nil {
- return err
- }
-
- // Listen for TCP packets on redirect port.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRedirectTCPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, dropPort, ipv6)
-}
-
-// NATPreRedirectTCPOutgoing verifies that outgoing TCP connections aren't
-// affected by PREROUTING connection tracking.
-type NATPreRedirectTCPOutgoing struct{ baseCase }
-
-var _ TestCase = (*NATPreRedirectTCPOutgoing)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRedirectTCPOutgoing) Name() string {
- return "NATPreRedirectTCPOutgoing"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRedirectTCPOutgoing) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect all incoming TCP traffic to a closed port.
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "tcp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", dropPort)); err != nil {
- return err
- }
-
- // Establish a connection to the host process.
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRedirectTCPOutgoing) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// NATOutRedirectTCPIncoming verifies that incoming TCP connections aren't
-// affected by OUTPUT connection tracking.
-type NATOutRedirectTCPIncoming struct{ baseCase }
-
-var _ TestCase = (*NATOutRedirectTCPIncoming)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRedirectTCPIncoming) Name() string {
- return "NATOutRedirectTCPIncoming"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRedirectTCPIncoming) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect all outgoing TCP traffic to a closed port.
- if err := natTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", dropPort)); err != nil {
- return err
- }
-
- // Establish a connection to the host process.
- return listenTCP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRedirectTCPIncoming) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// NATOutRedirectUDPPort tests that packets are redirected to different port.
-type NATOutRedirectUDPPort struct{ containerCase }
-
-var _ TestCase = (*NATOutRedirectUDPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRedirectUDPPort) Name() string {
- return "NATOutRedirectUDPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRedirectUDPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return loopbackTest(ctx, ipv6, net.ParseIP(nowhereIP(ipv6)), "-A", "OUTPUT", "-p", "udp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort))
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRedirectUDPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// NATDropUDP tests that packets are not received in ports other than redirect
-// port.
-type NATDropUDP struct{ containerCase }
-
-var _ TestCase = (*NATDropUDP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATDropUDP) Name() string {
- return "NATDropUDP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATDropUDP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", redirectPort)); err != nil {
- return err
- }
-
- timedCtx, cancel := context.WithTimeout(ctx, NegativeTimeout)
- defer cancel()
- if err := listenUDP(timedCtx, acceptPort, ipv6); err == nil {
- return fmt.Errorf("packets on port %d should have been redirected to port %d", acceptPort, redirectPort)
- } else if !errors.Is(err, context.DeadlineExceeded) {
- return fmt.Errorf("error reading: %v", err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATDropUDP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// NATAcceptAll tests that all UDP packets are accepted.
-type NATAcceptAll struct{ containerCase }
-
-var _ TestCase = (*NATAcceptAll)(nil)
-
-// Name implements TestCase.Name.
-func (*NATAcceptAll) Name() string {
- return "NATAcceptAll"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATAcceptAll) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "-j", "ACCEPT"); err != nil {
- return err
- }
-
- if err := listenUDP(ctx, acceptPort, ipv6); err != nil {
- return fmt.Errorf("packets on port %d should be allowed, but encountered an error: %v", acceptPort, err)
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATAcceptAll) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// NATOutRedirectIP uses iptables to select packets based on destination IP and
-// redirects them.
-type NATOutRedirectIP struct{ baseCase }
-
-var _ TestCase = (*NATOutRedirectIP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRedirectIP) Name() string {
- return "NATOutRedirectIP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRedirectIP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect OUTPUT packets to a listening localhost port.
- return loopbackTest(ctx, ipv6, net.ParseIP(nowhereIP(ipv6)),
- "-A", "OUTPUT",
- "-d", nowhereIP(ipv6),
- "-p", "udp",
- "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", acceptPort))
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRedirectIP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// NATOutDontRedirectIP tests that iptables matching with "-d" does not match
-// packets it shouldn't.
-type NATOutDontRedirectIP struct{ localCase }
-
-var _ TestCase = (*NATOutDontRedirectIP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutDontRedirectIP) Name() string {
- return "NATOutDontRedirectIP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutDontRedirectIP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "OUTPUT", "-d", localIP(ipv6), "-p", "udp", "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", dropPort)); err != nil {
- return err
- }
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutDontRedirectIP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// NATOutRedirectInvert tests that iptables can match with "! -d".
-type NATOutRedirectInvert struct{ baseCase }
-
-var _ TestCase = (*NATOutRedirectInvert)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRedirectInvert) Name() string {
- return "NATOutRedirectInvert"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRedirectInvert) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect OUTPUT packets to a listening localhost port.
- dest := "192.0.2.2"
- if ipv6 {
- dest = "2001:db8::2"
- }
- return loopbackTest(ctx, ipv6, net.ParseIP(nowhereIP(ipv6)),
- "-A", "OUTPUT",
- "!", "-d", dest,
- "-p", "udp",
- "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", acceptPort))
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRedirectInvert) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// NATPreRedirectIP tests that we can use iptables to select packets based on
-// destination IP and redirect them.
-type NATPreRedirectIP struct{ containerCase }
-
-var _ TestCase = (*NATPreRedirectIP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRedirectIP) Name() string {
- return "NATPreRedirectIP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRedirectIP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- addrs, err := localAddrs(ipv6)
- if err != nil {
- return err
- }
-
- var rules [][]string
- for _, addr := range addrs {
- rules = append(rules, []string{"-A", "PREROUTING", "-p", "udp", "-d", addr, "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort)})
- }
- if err := natTableRules(ipv6, rules); err != nil {
- return err
- }
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRedirectIP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// NATPreDontRedirectIP tests that iptables matching with "-d" does not match
-// packets it shouldn't.
-type NATPreDontRedirectIP struct{ containerCase }
-
-var _ TestCase = (*NATPreDontRedirectIP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreDontRedirectIP) Name() string {
- return "NATPreDontRedirectIP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreDontRedirectIP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "-d", localIP(ipv6), "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", dropPort)); err != nil {
- return err
- }
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreDontRedirectIP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// NATPreRedirectInvert tests that iptables can match with "! -d".
-type NATPreRedirectInvert struct{ containerCase }
-
-var _ TestCase = (*NATPreRedirectInvert)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRedirectInvert) Name() string {
- return "NATPreRedirectInvert"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRedirectInvert) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "!", "-d", localIP(ipv6), "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort)); err != nil {
- return err
- }
- return listenUDP(ctx, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRedirectInvert) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, dropPort, ipv6)
-}
-
-// NATRedirectRequiresProtocol tests that use of the --to-ports flag requires a
-// protocol to be specified with -p.
-type NATRedirectRequiresProtocol struct{ baseCase }
-
-var _ TestCase = (*NATRedirectRequiresProtocol)(nil)
-
-// Name implements TestCase.Name.
-func (*NATRedirectRequiresProtocol) Name() string {
- return "NATRedirectRequiresProtocol"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATRedirectRequiresProtocol) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-d", localIP(ipv6), "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort)); err == nil {
- return errors.New("expected an error using REDIRECT --to-ports without a protocol")
- }
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATRedirectRequiresProtocol) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// NATOutRedirectTCPPort tests that connections are redirected on specified ports.
-type NATOutRedirectTCPPort struct{ baseCase }
-
-var _ TestCase = (*NATOutRedirectTCPPort)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRedirectTCPPort) Name() string {
- return "NATOutRedirectTCPPort"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRedirectTCPPort) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-m", "tcp", "--dport", fmt.Sprintf("%d", dropPort), "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", acceptPort)); err != nil {
- return err
- }
-
- localAddr := net.TCPAddr{
- IP: net.ParseIP(localIP(ipv6)),
- Port: acceptPort,
- }
-
- // Starts listening on port.
- lConn, err := net.ListenTCP("tcp", &localAddr)
- if err != nil {
- return err
- }
- defer lConn.Close()
-
- // Accept connections on port.
- if err := connectTCP(ctx, ip, dropPort, ipv6); err != nil {
- return err
- }
-
- conn, err := lConn.AcceptTCP()
- if err != nil {
- return err
- }
- conn.Close()
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRedirectTCPPort) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return nil
-}
-
-// NATLoopbackSkipsPrerouting tests that packets sent via loopback aren't
-// affected by PREROUTING rules.
-type NATLoopbackSkipsPrerouting struct{ baseCase }
-
-var _ TestCase = (*NATLoopbackSkipsPrerouting)(nil)
-
-// Name implements TestCase.Name.
-func (*NATLoopbackSkipsPrerouting) Name() string {
- return "NATLoopbackSkipsPrerouting"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATLoopbackSkipsPrerouting) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect anything sent to localhost to an unused port.
- var dest net.IP
- if ipv6 {
- dest = net.IPv6loopback
- } else {
- dest = net.IPv4(127, 0, 0, 1)
- }
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "tcp", "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", dropPort)); err != nil {
- return err
- }
-
- // Establish a connection via localhost. If the PREROUTING rule did apply to
- // loopback traffic, the connection would fail.
- sendCh := make(chan error)
- go func() {
- sendCh <- connectTCP(ctx, dest, acceptPort, ipv6)
- }()
-
- if err := listenTCP(ctx, acceptPort, ipv6); err != nil {
- return err
- }
- return <-sendCh
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATLoopbackSkipsPrerouting) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-// NATPreOriginalDst tests that SO_ORIGINAL_DST returns the pre-NAT destination
-// of PREROUTING NATted packets.
-type NATPreOriginalDst struct{ baseCase }
-
-var _ TestCase = (*NATPreOriginalDst)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreOriginalDst) Name() string {
- return "NATPreOriginalDst"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreOriginalDst) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect incoming TCP connections to acceptPort.
- if err := natTable(ipv6, "-A", "PREROUTING",
- "-p", "tcp",
- "--destination-port", fmt.Sprintf("%d", dropPort),
- "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", acceptPort)); err != nil {
- return err
- }
-
- addrs, err := getInterfaceAddrs(ipv6)
- if err != nil {
- return err
- }
- return listenForRedirectedConn(ctx, ipv6, addrs)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreOriginalDst) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return connectTCP(ctx, ip, dropPort, ipv6)
-}
-
-// NATOutOriginalDst tests that SO_ORIGINAL_DST returns the pre-NAT destination
-// of OUTBOUND NATted packets.
-type NATOutOriginalDst struct{ baseCase }
-
-var _ TestCase = (*NATOutOriginalDst)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutOriginalDst) Name() string {
- return "NATOutOriginalDst"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutOriginalDst) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // Redirect incoming TCP connections to acceptPort.
- if err := natTable(ipv6, "-A", "OUTPUT", "-p", "tcp", "-j", "REDIRECT", "--to-port", fmt.Sprintf("%d", acceptPort)); err != nil {
- return err
- }
-
- connCh := make(chan error)
- go func() {
- connCh <- connectTCP(ctx, ip, dropPort, ipv6)
- }()
-
- if err := listenForRedirectedConn(ctx, ipv6, []net.IP{ip}); err != nil {
- return err
- }
- return <-connCh
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutOriginalDst) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.IP) error {
- // The net package doesn't give guaranteed access to the connection's
- // underlying FD, and thus we cannot call getsockopt. We have to use
- // traditional syscalls.
-
- // Create the listening socket, bind, listen, and accept.
- family := unix.AF_INET
- if ipv6 {
- family = unix.AF_INET6
- }
- sockfd, err := unix.Socket(family, unix.SOCK_STREAM, 0)
- if err != nil {
- return err
- }
- defer unix.Close(sockfd)
-
- var bindAddr unix.Sockaddr
- if ipv6 {
- bindAddr = &unix.SockaddrInet6{
- Port: acceptPort,
- Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // in6addr_any
- }
- } else {
- bindAddr = &unix.SockaddrInet4{
- Port: acceptPort,
- Addr: [4]byte{0, 0, 0, 0}, // INADDR_ANY
- }
- }
- if err := unix.Bind(sockfd, bindAddr); err != nil {
- return err
- }
-
- if err := unix.Listen(sockfd, 1); err != nil {
- return err
- }
-
- // Block on accept() in another goroutine.
- connCh := make(chan int)
- errCh := make(chan error)
- go func() {
- for {
- connFD, _, err := unix.Accept(sockfd)
- if errors.Is(err, unix.EINTR) {
- continue
- }
- if err != nil {
- errCh <- err
- return
- }
- connCh <- connFD
- return
- }
- }()
-
- // Wait for accept() to return or for the context to finish.
- var connFD int
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err := <-errCh:
- return err
- case connFD = <-connCh:
- }
- defer unix.Close(connFD)
-
- // Verify that, despite listening on acceptPort, SO_ORIGINAL_DST
- // indicates the packet was sent to originalDst:dropPort.
- if ipv6 {
- got, err := originalDestination6(connFD)
- if err != nil {
- return err
- }
- return addrMatches6(got, originalDsts, dropPort)
- }
-
- got, err := originalDestination4(connFD)
- if err != nil {
- return err
- }
- return addrMatches4(got, originalDsts, dropPort)
-}
-
-// loopbackTests runs an iptables rule and ensures that packets sent to
-// dest:dropPort are received by localhost:acceptPort.
-func loopbackTest(ctx context.Context, ipv6 bool, dest net.IP, args ...string) error {
- if err := natTable(ipv6, args...); err != nil {
- return err
- }
- sendCh := make(chan error, 1)
- listenCh := make(chan error, 1)
- go func() {
- sendCh <- sendUDPLoop(ctx, dest, dropPort, ipv6)
- }()
- go func() {
- listenCh <- listenUDP(ctx, acceptPort, ipv6)
- }()
- select {
- case err := <-listenCh:
- return err
- case err := <-sendCh:
- return err
- }
-}
-
-// NATPreRECVORIGDSTADDR tests that IP{V6}_RECVORIGDSTADDR gets the post-NAT
-// address on the PREROUTING chain.
-type NATPreRECVORIGDSTADDR struct{ containerCase }
-
-var _ TestCase = (*NATPreRECVORIGDSTADDR)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPreRECVORIGDSTADDR) Name() string {
- return "NATPreRECVORIGDSTADDR"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPreRECVORIGDSTADDR) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "PREROUTING", "-p", "udp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", redirectPort)); err != nil {
- return err
- }
-
- if err := recvWithRECVORIGDSTADDR(ctx, ipv6, nil, redirectPort); err != nil {
- return err
- }
-
- return nil
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPreRECVORIGDSTADDR) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// NATOutRECVORIGDSTADDR tests that IP{V6}_RECVORIGDSTADDR gets the post-NAT
-// address on the OUTPUT chain.
-type NATOutRECVORIGDSTADDR struct{ containerCase }
-
-var _ TestCase = (*NATOutRECVORIGDSTADDR)(nil)
-
-// Name implements TestCase.Name.
-func (*NATOutRECVORIGDSTADDR) Name() string {
- return "NATOutRECVORIGDSTADDR"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATOutRECVORIGDSTADDR) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- if err := natTable(ipv6, "-A", "OUTPUT", "-p", "udp", "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", redirectPort)); err != nil {
- return err
- }
-
- sendCh := make(chan error)
- go func() {
- // Packets will be sent to a non-container IP and redirected
- // back to the container.
- sendCh <- sendUDPLoop(ctx, ip, acceptPort, ipv6)
- }()
-
- expectedIP := &net.IP{127, 0, 0, 1}
- if ipv6 {
- expectedIP = &net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
- }
- if err := recvWithRECVORIGDSTADDR(ctx, ipv6, expectedIP, redirectPort); err != nil {
- return err
- }
-
- select {
- case err := <-sendCh:
- return err
- default:
- return nil
- }
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATOutRECVORIGDSTADDR) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- // No-op.
- return nil
-}
-
-func recvWithRECVORIGDSTADDR(ctx context.Context, ipv6 bool, expectedDst *net.IP, port uint16) error {
- // The net package doesn't give guaranteed access to a connection's
- // underlying FD, and thus we cannot call getsockopt. We have to use
- // traditional syscalls for IP_RECVORIGDSTADDR.
-
- // Create the listening socket.
- var (
- family = unix.AF_INET
- level = unix.SOL_IP
- option = unix.IP_RECVORIGDSTADDR
- bindAddr unix.Sockaddr = &unix.SockaddrInet4{
- Port: int(port),
- Addr: [4]byte{0, 0, 0, 0}, // INADDR_ANY
- }
- )
- if ipv6 {
- family = unix.AF_INET6
- level = unix.SOL_IPV6
- option = 74 // IPV6_RECVORIGDSTADDR, which is missing from the syscall package.
- bindAddr = &unix.SockaddrInet6{
- Port: int(port),
- Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // in6addr_any
- }
- }
- sockfd, err := unix.Socket(family, unix.SOCK_DGRAM, 0)
- if err != nil {
- return fmt.Errorf("failed Socket(%d, %d, 0): %w", family, unix.SOCK_DGRAM, err)
- }
- defer unix.Close(sockfd)
-
- if err := unix.Bind(sockfd, bindAddr); err != nil {
- return fmt.Errorf("failed Bind(%d, %+v): %v", sockfd, bindAddr, err)
- }
-
- // Enable IP_RECVORIGDSTADDR.
- if err := unix.SetsockoptInt(sockfd, level, option, 1); err != nil {
- return fmt.Errorf("failed SetsockoptByte(%d, %d, %d, 1): %v", sockfd, level, option, err)
- }
-
- addrCh := make(chan interface{})
- errCh := make(chan error)
- go func() {
- var addr interface{}
- var err error
- if ipv6 {
- addr, err = recvOrigDstAddr6(sockfd)
- } else {
- addr, err = recvOrigDstAddr4(sockfd)
- }
- if err != nil {
- errCh <- err
- } else {
- addrCh <- addr
- }
- }()
-
- // Wait to receive a packet.
- var addr interface{}
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err := <-errCh:
- return err
- case addr = <-addrCh:
- }
-
- // Get a list of local IPs to verify that the packet now appears to have
- // been sent to us.
- var localAddrs []net.IP
- if expectedDst != nil {
- localAddrs = []net.IP{*expectedDst}
- } else {
- localAddrs, err = getInterfaceAddrs(ipv6)
- if err != nil {
- return fmt.Errorf("failed to get local interfaces: %w", err)
- }
- }
-
- // Verify that the address has the post-NAT port and address.
- if ipv6 {
- return addrMatches6(addr.(unix.RawSockaddrInet6), localAddrs, redirectPort)
- }
- return addrMatches4(addr.(unix.RawSockaddrInet4), localAddrs, redirectPort)
-}
-
-func recvOrigDstAddr4(sockfd int) (unix.RawSockaddrInet4, error) {
- buf, err := recvOrigDstAddr(sockfd, unix.SOL_IP, unix.SizeofSockaddrInet4)
- if err != nil {
- return unix.RawSockaddrInet4{}, err
- }
- var addr unix.RawSockaddrInet4
- binary.Unmarshal(buf, hostarch.ByteOrder, &addr)
- return addr, nil
-}
-
-func recvOrigDstAddr6(sockfd int) (unix.RawSockaddrInet6, error) {
- buf, err := recvOrigDstAddr(sockfd, unix.SOL_IP, unix.SizeofSockaddrInet6)
- if err != nil {
- return unix.RawSockaddrInet6{}, err
- }
- var addr unix.RawSockaddrInet6
- binary.Unmarshal(buf, hostarch.ByteOrder, &addr)
- return addr, nil
-}
-
-func recvOrigDstAddr(sockfd int, level uintptr, addrSize int) ([]byte, error) {
- buf := make([]byte, 64)
- oob := make([]byte, unix.CmsgSpace(addrSize))
- for {
- _, oobn, _, _, err := unix.Recvmsg(
- sockfd,
- buf, // Message buffer.
- oob, // Out-of-band buffer.
- 0) // Flags.
- if errors.Is(err, unix.EINTR) {
- continue
- }
- if err != nil {
- return nil, fmt.Errorf("failed when calling Recvmsg: %w", err)
- }
- oob = oob[:oobn]
-
- // Parse out the control message.
- msgs, err := unix.ParseSocketControlMessage(oob)
- if err != nil {
- return nil, fmt.Errorf("failed to parse control message: %w", err)
- }
- return msgs[0].Data, nil
- }
-}
-
-func addrMatches4(got unix.RawSockaddrInet4, wantAddrs []net.IP, port uint16) error {
- for _, wantAddr := range wantAddrs {
- want := unix.RawSockaddrInet4{
- Family: unix.AF_INET,
- Port: htons(port),
- }
- copy(want.Addr[:], wantAddr.To4())
- if got == want {
- return nil
- }
- }
- return fmt.Errorf("got %+v, but wanted one of %+v (note: port numbers are in network byte order)", got, wantAddrs)
-}
-
-func addrMatches6(got unix.RawSockaddrInet6, wantAddrs []net.IP, port uint16) error {
- for _, wantAddr := range wantAddrs {
- want := unix.RawSockaddrInet6{
- Family: unix.AF_INET6,
- Port: htons(port),
- }
- copy(want.Addr[:], wantAddr.To16())
- if got == want {
- return nil
- }
- }
- return fmt.Errorf("got %+v, but wanted one of %+v (note: port numbers are in network byte order)", got, wantAddrs)
-}
-
-const (
- snatAddrV4 = "194.236.50.155"
- snatAddrV6 = "2a0a::1"
- snatPort = 43
-)
-
-// NATPostSNATUDP tests that the source port/IP in the packets are modified as expected.
-type NATPostSNATUDP struct{ localCase }
-
-var _ TestCase = (*NATPostSNATUDP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPostSNATUDP) Name() string {
- return "NATPostSNATUDP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPostSNATUDP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- var source string
- if ipv6 {
- source = fmt.Sprintf("[%s]:%d", snatAddrV6, snatPort)
- } else {
- source = fmt.Sprintf("%s:%d", snatAddrV4, snatPort)
- }
-
- if err := natTable(ipv6, "-A", "POSTROUTING", "-p", "udp", "-j", "SNAT", "--to-source", source); err != nil {
- return err
- }
- return sendUDPLoop(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPostSNATUDP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- remote, err := listenUDPFrom(ctx, acceptPort, ipv6)
- if err != nil {
- return err
- }
- var snatAddr string
- if ipv6 {
- snatAddr = snatAddrV6
- } else {
- snatAddr = snatAddrV4
- }
- if got, want := remote.IP, net.ParseIP(snatAddr); !got.Equal(want) {
- return fmt.Errorf("got remote address = %s, want = %s", got, want)
- }
- if got, want := remote.Port, snatPort; got != want {
- return fmt.Errorf("got remote port = %d, want = %d", got, want)
- }
- return nil
-}
-
-// NATPostSNATTCP tests that the source port/IP in the packets are modified as
-// expected.
-type NATPostSNATTCP struct{ localCase }
-
-var _ TestCase = (*NATPostSNATTCP)(nil)
-
-// Name implements TestCase.Name.
-func (*NATPostSNATTCP) Name() string {
- return "NATPostSNATTCP"
-}
-
-// ContainerAction implements TestCase.ContainerAction.
-func (*NATPostSNATTCP) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- addrs, err := getInterfaceAddrs(ipv6)
- if err != nil {
- return err
- }
- var source string
- for _, addr := range addrs {
- if addr.To4() != nil {
- if !ipv6 {
- source = fmt.Sprintf("%s:%d", addr, snatPort)
- }
- } else if ipv6 && addr.IsGlobalUnicast() {
- source = fmt.Sprintf("[%s]:%d", addr, snatPort)
- }
- }
- if source == "" {
- return fmt.Errorf("can't find any interface address to use")
- }
-
- if err := natTable(ipv6, "-A", "POSTROUTING", "-p", "tcp", "-j", "SNAT", "--to-source", source); err != nil {
- return err
- }
- return connectTCP(ctx, ip, acceptPort, ipv6)
-}
-
-// LocalAction implements TestCase.LocalAction.
-func (*NATPostSNATTCP) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {
- remote, err := listenTCPFrom(ctx, acceptPort, ipv6)
- if err != nil {
- return err
- }
- HostStr, portStr, err := net.SplitHostPort(remote.String())
- if err != nil {
- return err
- }
- if got, want := HostStr, ip.String(); got != want {
- return fmt.Errorf("got remote address = %s, want = %s", got, want)
- }
- port, err := strconv.ParseInt(portStr, 10, 0)
- if err != nil {
- return err
- }
- if got, want := int(port), snatPort; got != want {
- return fmt.Errorf("got remote port = %d, want = %d", got, want)
- }
- return nil
-}
diff --git a/test/iptables/runner/BUILD b/test/iptables/runner/BUILD
deleted file mode 100644
index 24504a1b9..000000000
--- a/test/iptables/runner/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "runner",
- testonly = 1,
- srcs = ["main.go"],
- pure = True,
- visibility = ["//test/iptables:__subpackages__"],
- deps = ["//test/iptables"],
-)
diff --git a/test/iptables/runner/main.go b/test/iptables/runner/main.go
deleted file mode 100644
index 9ae2d1b4d..000000000
--- a/test/iptables/runner/main.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package main runs iptables tests from within a docker container.
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "log"
- "net"
-
- "gvisor.dev/gvisor/test/iptables"
-)
-
-var (
- name = flag.String("name", "", "name of the test to run")
- ipv6 = flag.Bool("ipv6", false, "whether the test utilizes ip6tables")
-)
-
-func main() {
- flag.Parse()
-
- // Find out which test we're running.
- test, ok := iptables.Tests[*name]
- if !ok {
- log.Fatalf("No test found named %q", *name)
- }
- log.Printf("Running test %q", *name)
-
- // Get the IP of the local process.
- ip, err := getIP()
- if err != nil {
- log.Fatal(err)
- }
-
- // Run the test.
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- if err := test.ContainerAction(ctx, ip, *ipv6); err != nil {
- log.Fatalf("Failed running test %q: %v", *name, err)
- }
-
- // Emit the final line.
- log.Printf("%s", iptables.TerminalStatement)
-}
-
-// getIP listens for a connection from the local process and returns the source
-// IP of that connection.
-func getIP() (net.IP, error) {
- localAddr := net.TCPAddr{
- Port: iptables.IPExchangePort,
- }
- listener, err := net.ListenTCP("tcp", &localAddr)
- if err != nil {
- return net.IP{}, fmt.Errorf("failed listening for IP: %v", err)
- }
- defer listener.Close()
- conn, err := listener.AcceptTCP()
- if err != nil {
- return net.IP{}, fmt.Errorf("failed accepting IP: %v", err)
- }
- defer conn.Close()
- log.Printf("Connected to %v", conn.RemoteAddr())
-
- return conn.RemoteAddr().(*net.TCPAddr).IP, nil
-}
diff --git a/test/kubernetes/gvisor-injection-admission-webhook.yaml b/test/kubernetes/gvisor-injection-admission-webhook.yaml
deleted file mode 100644
index 691f02dda..000000000
--- a/test/kubernetes/gvisor-injection-admission-webhook.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-apiVersion: v1
-kind: Namespace
-metadata:
- name: e2e
- labels:
- name: e2e
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: gvisor-injection-admission-webhook
- namespace: e2e
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: gvisor-injection-admission-webhook
-rules:
-- apiGroups: [ admissionregistration.k8s.io ]
- resources: [ mutatingwebhookconfigurations ]
- verbs: [ create ]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: gvisor-injection-admission-webhook
- namespace: e2e
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: gvisor-injection-admission-webhook
-subjects:
-- kind: ServiceAccount
- name: gvisor-injection-admission-webhook
- namespace: e2e
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: gvisor-injection-admission-webhook
- namespace: e2e
- labels:
- app: gvisor-injection-admission-webhook
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: gvisor-injection-admission-webhook
- template:
- metadata:
- labels:
- app: gvisor-injection-admission-webhook
- spec:
- containers:
- - name: webhook
- image: gcr.io/gke-gvisor/gvisor-injection-admission-webhook:54ce9bd
- args:
- - --log-level=debug
- ports:
- - containerPort: 8443
- serviceAccountName: gvisor-injection-admission-webhook
----
-kind: Service
-apiVersion: v1
-metadata:
- name: gvisor-injection-admission-webhook
- namespace: e2e
-spec:
- selector:
- app: gvisor-injection-admission-webhook
- ports:
- - protocol: TCP
- port: 443
- targetPort: 8443
diff --git a/test/packetdrill/BUILD b/test/packetdrill/BUILD
deleted file mode 100644
index de66cbe6d..000000000
--- a/test/packetdrill/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-load("//test/packetdrill:defs.bzl", "packetdrill_test")
-
-package(licenses = ["notice"])
-
-packetdrill_test(
- name = "packetdrill_sanity_test",
- scripts = ["sanity_test.pkt"],
-)
-
-packetdrill_test(
- name = "accept_ack_drop_test",
- scripts = ["accept_ack_drop.pkt"],
-)
-
-packetdrill_test(
- name = "fin_wait2_timeout_test",
- scripts = ["fin_wait2_timeout.pkt"],
-)
-
-packetdrill_test(
- name = "listen_close_before_handshake_complete_test",
- scripts = ["listen_close_before_handshake_complete.pkt"],
-)
-
-packetdrill_test(
- name = "no_rst_to_rst_test",
- scripts = ["no_rst_to_rst.pkt"],
-)
-
-packetdrill_test(
- name = "tcp_defer_accept_test",
- scripts = ["tcp_defer_accept.pkt"],
-)
-
-packetdrill_test(
- name = "tcp_defer_accept_timeout_test",
- scripts = ["tcp_defer_accept_timeout.pkt"],
-)
-
-test_suite(
- name = "all_tests",
- tags = [
- "local",
- "manual",
- "packetdrill",
- ],
- tests = existing_rules(),
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/test/packetdrill/accept_ack_drop.pkt b/test/packetdrill/accept_ack_drop.pkt
deleted file mode 100644
index 76e638fd4..000000000
--- a/test/packetdrill/accept_ack_drop.pkt
+++ /dev/null
@@ -1,27 +0,0 @@
-// Test that the accept works if the final ACK is dropped and an ack with data
-// follows the dropped ack.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 bind(3, ..., ...) = 0
-
-// Set backlog to 1 so that we can easily test.
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0.0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0.0 > S. 0:0(0) ack 1 <...>
-
-+0.0 < . 1:5(4) ack 1 win 257
-+0.0 > . 1:1(0) ack 5 <...>
-
-// This should cause connection to transition to connected state.
-+0.000 accept(3, ..., ...) = 4
-+0.000 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
-
-// Now read the data and we should get 4 bytes.
-+0.000 read(4,..., 4) = 4
-+0.000 close(4) = 0
-
-+0.0 > F. 1:1(0) ack 5 <...>
-+0.0 < F. 5:5(0) ack 2 win 257
-+0.01 > . 2:2(0) ack 6 <...> \ No newline at end of file
diff --git a/test/packetdrill/defs.bzl b/test/packetdrill/defs.bzl
deleted file mode 100644
index a6cbcc376..000000000
--- a/test/packetdrill/defs.bzl
+++ /dev/null
@@ -1,89 +0,0 @@
-"""Defines a rule for packetdrill test targets."""
-
-def _packetdrill_test_impl(ctx):
- test_runner = ctx.executable._test_runner
- runner = ctx.actions.declare_file("%s-runner" % ctx.label.name)
-
- script_paths = []
- for script in ctx.files.scripts:
- script_paths.append(script.short_path)
- runner_content = "\n".join([
- "#!/bin/bash",
- # This test will run part in a distinct user namespace. This can cause
- # permission problems, because all runfiles may not be owned by the
- # current user, and no other users will be mapped in that namespace.
- # Make sure that everything is readable here.
- "find . -type f -exec chmod a+rx {} \\;",
- "find . -type d -exec chmod a+rx {} \\;",
- "%s %s --init_script %s \"$@\" -- %s\n" % (
- test_runner.short_path,
- " ".join(ctx.attr.flags),
- ctx.files._init_script[0].short_path,
- " ".join(script_paths),
- ),
- ])
- ctx.actions.write(runner, runner_content, is_executable = True)
-
- transitive_files = depset()
- if hasattr(ctx.attr._test_runner, "data_runfiles"):
- transitive_files = ctx.attr._test_runner.data_runfiles.files
- runfiles = ctx.runfiles(
- files = [test_runner] + ctx.files._init_script + ctx.files.scripts,
- transitive_files = transitive_files,
- collect_default = True,
- collect_data = True,
- )
- return [DefaultInfo(executable = runner, runfiles = runfiles)]
-
-_packetdrill_test = rule(
- attrs = {
- "_test_runner": attr.label(
- executable = True,
- cfg = "host",
- allow_files = True,
- default = "packetdrill_test.sh",
- ),
- "_init_script": attr.label(
- allow_single_file = True,
- default = "packetdrill_setup.sh",
- ),
- "flags": attr.string_list(
- mandatory = False,
- default = [],
- ),
- "scripts": attr.label_list(
- mandatory = True,
- allow_files = True,
- ),
- },
- test = True,
- implementation = _packetdrill_test_impl,
-)
-
-PACKETDRILL_TAGS = [
- "local",
- "manual",
- "packetdrill",
-]
-
-def packetdrill_linux_test(name, **kwargs):
- if "tags" not in kwargs:
- kwargs["tags"] = PACKETDRILL_TAGS
- _packetdrill_test(
- name = name,
- flags = ["--dut_platform", "linux"],
- **kwargs
- )
-
-def packetdrill_netstack_test(name, **kwargs):
- if "tags" not in kwargs:
- kwargs["tags"] = PACKETDRILL_TAGS
- _packetdrill_test(
- name = name,
- flags = ["--dut_platform", "netstack"],
- **kwargs
- )
-
-def packetdrill_test(name, **kwargs):
- packetdrill_linux_test(name + "_linux_test", **kwargs)
- packetdrill_netstack_test(name + "_netstack_test", **kwargs)
diff --git a/test/packetdrill/fin_wait2_timeout.pkt b/test/packetdrill/fin_wait2_timeout.pkt
deleted file mode 100644
index 93ab08575..000000000
--- a/test/packetdrill/fin_wait2_timeout.pkt
+++ /dev/null
@@ -1,23 +0,0 @@
-// Test that a socket in FIN_WAIT_2 eventually times out and a subsequent
-// packet generates a RST.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 bind(3, ..., ...) = 0
-
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0 > S. 0:0(0) ack 1 <...>
-+0 < P. 1:1(0) ack 1 win 257
-
-+0.100 accept(3, ..., ...) = 4
-// set FIN_WAIT2 timeout to 1 seconds.
-+0.100 setsockopt(4, SOL_TCP, TCP_LINGER2, [1], 4) = 0
-+0 close(4) = 0
-
-+0 > F. 1:1(0) ack 1 <...>
-+0 < . 1:1(0) ack 2 win 257
-
-+2 < . 1:1(0) ack 2 win 257
-+0 > R 2:2(0) win 0
diff --git a/test/packetdrill/listen_close_before_handshake_complete.pkt b/test/packetdrill/listen_close_before_handshake_complete.pkt
deleted file mode 100644
index 51c3f1a32..000000000
--- a/test/packetdrill/listen_close_before_handshake_complete.pkt
+++ /dev/null
@@ -1,31 +0,0 @@
-// Test that closing a listening socket closes any connections in SYN-RCVD
-// state and any packets bound for these connections generate a RESET.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 bind(3, ..., ...) = 0
-
-// Set backlog to 1 so that we can easily test.
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0 > S. 0:0(0) ack 1 <...>
-
-+0.100 close(3) = 0
-+0.1 < P. 1:1(0) ack 1 win 257
-
-// Linux generates a reset with no ack number/bit set. This is contradictory to
-// what is specified in Rule 1 under Reset Generation in
-// https://tools.ietf.org/html/rfc793#section-3.4.
-// "1. If the connection does not exist (CLOSED) then a reset is sent
-// in response to any incoming segment except another reset. In
-// particular, SYNs addressed to a non-existent connection are rejected
-// by this means.
-//
-// If the incoming segment has an ACK field, the reset takes its
-// sequence number from the ACK field of the segment, otherwise the
-// reset has sequence number zero and the ACK field is set to the sum
-// of the sequence number and segment length of the incoming segment.
-// The connection remains in the CLOSED state."
-
-+0.0 > R 1:1(0) win 0 \ No newline at end of file
diff --git a/test/packetdrill/no_rst_to_rst.pkt b/test/packetdrill/no_rst_to_rst.pkt
deleted file mode 100644
index 612747827..000000000
--- a/test/packetdrill/no_rst_to_rst.pkt
+++ /dev/null
@@ -1,36 +0,0 @@
-// Test a RST is not generated in response to a RST and a RST is correctly
-// generated when an accepted endpoint is RST due to an incoming RST.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 bind(3, ..., ...) = 0
-
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0 > S. 0:0(0) ack 1 <...>
-+0 < P. 1:1(0) ack 1 win 257
-
-+0.100 accept(3, ..., ...) = 4
-
-+0.200 < R 1:1(0) win 0
-
-+0.300 read(4,..., 4) = -1 ECONNRESET (Connection Reset by Peer)
-
-+0.00 < . 1:1(0) ack 1 win 257
-
-// Linux generates a reset with no ack number/bit set. This is contradictory to
-// what is specified in Rule 1 under Reset Generation in
-// https://tools.ietf.org/html/rfc793#section-3.4.
-// "1. If the connection does not exist (CLOSED) then a reset is sent
-// in response to any incoming segment except another reset. In
-// particular, SYNs addressed to a non-existent connection are rejected
-// by this means.
-//
-// If the incoming segment has an ACK field, the reset takes its
-// sequence number from the ACK field of the segment, otherwise the
-// reset has sequence number zero and the ACK field is set to the sum
-// of the sequence number and segment length of the incoming segment.
-// The connection remains in the CLOSED state."
-
-+0.00 > R 1:1(0) win 0 \ No newline at end of file
diff --git a/test/packetdrill/packetdrill_setup.sh b/test/packetdrill/packetdrill_setup.sh
deleted file mode 100755
index b858072f0..000000000
--- a/test/packetdrill/packetdrill_setup.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script runs both within the sentry context and natively. It should tweak
-# TCP parameters to match expectations found in the script files.
-sysctl -q net.ipv4.tcp_sack=1
-sysctl -q net.ipv4.tcp_rmem="4096 2097152 $((8*1024*1024))"
-sysctl -q net.ipv4.tcp_wmem="4096 2097152 $((8*1024*1024))"
-
-# There may be errors from the above, but they will show up in the test logs and
-# we always want to proceed from this point. It's possible that values were
-# already set correctly and the nodes were not available in the namespace.
-exit 0
diff --git a/test/packetdrill/packetdrill_test.sh b/test/packetdrill/packetdrill_test.sh
deleted file mode 100755
index d25cad83a..000000000
--- a/test/packetdrill/packetdrill_test.sh
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run a packetdrill test. Two docker containers are made, one for the
-# Device-Under-Test (DUT) and one for the test runner. Each is attached with
-# two networks, one for control packets that aid the test and one for test
-# packets which are sent as part of the test and observed for correctness.
-
-set -euxo pipefail
-
-function failure() {
- local lineno=$1
- local msg=$2
- local filename="$0"
- echo "FAIL: $filename:$lineno: $msg"
-}
-trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
-
-declare -r LONGOPTS="dut_platform:,init_script:,runtime:,partition:,total_partitions:"
-
-# Don't use declare below so that the error from getopt will end the script.
-PARSED=$(getopt --options "" --longoptions=$LONGOPTS --name "$0" -- "$@")
-
-eval set -- "$PARSED"
-
-while true; do
- case "$1" in
- --dut_platform)
- # Either "linux" or "netstack".
- declare -r DUT_PLATFORM="$2"
- shift 2
- ;;
- --init_script)
- declare -r INIT_SCRIPT="$2"
- shift 2
- ;;
- --runtime)
- declare RUNTIME="$2"
- shift 2
- ;;
- --partition)
- # Ignored.
- shift 2
- ;;
- --total_partitions)
- # Ignored.
- shift 2
- ;;
- --)
- shift
- break
- ;;
- *)
- echo "Programming error"
- exit 3
- esac
-done
-
-# All the other arguments are scripts.
-declare -r scripts="$@"
-
-# Check that the required flags are defined in a way that is safe for "set -u".
-if [[ "${DUT_PLATFORM-}" == "netstack" ]]; then
- if [[ -z "${RUNTIME-}" ]]; then
- echo "FAIL: Missing --runtime argument: ${RUNTIME-}"
- exit 2
- fi
- declare -r RUNTIME_ARG="--runtime ${RUNTIME}"
-elif [[ "${DUT_PLATFORM-}" == "linux" ]]; then
- declare -r RUNTIME_ARG=""
-else
- echo "FAIL: Bad or missing --dut_platform argument: ${DUT_PLATFORM-}"
- exit 2
-fi
-if [[ ! -x "${INIT_SCRIPT-}" ]]; then
- echo "FAIL: Bad or missing --init_script: ${INIT_SCRIPT-}"
- exit 2
-fi
-
-function new_net_prefix() {
- # Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24.
- echo "$(shuf -i 192-223 -n 1).$(shuf -i 0-255 -n 1).$(shuf -i 0-255 -n 1)"
-}
-
-# Variables specific to the control network and interface start with CTRL_.
-# Variables specific to the test network and interface start with TEST_.
-# Variables specific to the DUT start with DUT_.
-# Variables specific to the test runner start with TEST_RUNNER_.
-declare -r PACKETDRILL="/packetdrill/gtests/net/packetdrill/packetdrill"
-# Use random numbers so that test networks don't collide.
-declare CTRL_NET="ctrl_net-$(shuf -i 0-99999999 -n 1)"
-declare CTRL_NET_PREFIX=$(new_net_prefix)
-declare TEST_NET="test_net-$(shuf -i 0-99999999 -n 1)"
-declare TEST_NET_PREFIX=$(new_net_prefix)
-declare -r tolerance_usecs=100000
-# On both DUT and test runner, testing packets are on the eth2 interface.
-declare -r TEST_DEVICE="eth2"
-# Number of bits in the *_NET_PREFIX variables.
-declare -r NET_MASK="24"
-# Last bits of the DUT's IP address.
-declare -r DUT_NET_SUFFIX=".10"
-# Control port.
-declare -r CTRL_PORT="40000"
-# Last bits of the test runner's IP address.
-declare -r TEST_RUNNER_NET_SUFFIX=".20"
-declare -r TIMEOUT="60"
-declare -r IMAGE_TAG="gcr.io/gvisor-presubmit/packetdrill"
-
-# Make sure that docker is installed.
-docker --version
-
-function finish {
- local cleanup_success=1
- for net in "${CTRL_NET}" "${TEST_NET}"; do
- # Kill all processes attached to ${net}.
- for docker_command in "kill" "rm"; do
- (docker network inspect "${net}" \
- --format '{{range $key, $value := .Containers}}{{$key}} {{end}}' \
- | xargs -r docker "${docker_command}") || \
- cleanup_success=0
- done
- # Remove the network.
- docker network rm "${net}" || \
- cleanup_success=0
- done
-
- if ((!$cleanup_success)); then
- echo "FAIL: Cleanup command failed"
- exit 4
- fi
-}
-trap finish EXIT
-
-# Subnet for control packets between test runner and DUT.
-while ! docker network create \
- "--subnet=${CTRL_NET_PREFIX}.0/${NET_MASK}" "${CTRL_NET}"; do
- sleep 0.1
- CTRL_NET_PREFIX=$(new_net_prefix)
- CTRL_NET="ctrl_net-$(shuf -i 0-99999999 -n 1)"
-done
-
-# Subnet for the packets that are part of the test.
-while ! docker network create \
- "--subnet=${TEST_NET_PREFIX}.0/${NET_MASK}" "${TEST_NET}"; do
- sleep 0.1
- TEST_NET_PREFIX=$(new_net_prefix)
- TEST_NET="test_net-$(shuf -i 0-99999999 -n 1)"
-done
-
-# Create the DUT container and connect to network.
-DUT=$(docker create ${RUNTIME_ARG} --privileged --rm \
- --stop-timeout ${TIMEOUT} -it ${IMAGE_TAG})
-docker network connect "${CTRL_NET}" \
- --ip "${CTRL_NET_PREFIX}${DUT_NET_SUFFIX}" "${DUT}" \
- || (docker kill ${DUT}; docker rm ${DUT}; false)
-docker network connect "${TEST_NET}" \
- --ip "${TEST_NET_PREFIX}${DUT_NET_SUFFIX}" "${DUT}" \
- || (docker kill ${DUT}; docker rm ${DUT}; false)
-docker start "${DUT}"
-
-# Create the test runner container and connect to network.
-TEST_RUNNER=$(docker create --privileged --rm \
- --stop-timeout ${TIMEOUT} -it ${IMAGE_TAG})
-docker network connect "${CTRL_NET}" \
- --ip "${CTRL_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" "${TEST_RUNNER}" \
- || (docker kill ${TEST_RUNNER}; docker rm ${REST_RUNNER}; false)
-docker network connect "${TEST_NET}" \
- --ip "${TEST_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" "${TEST_RUNNER}" \
- || (docker kill ${TEST_RUNNER}; docker rm ${REST_RUNNER}; false)
-docker start "${TEST_RUNNER}"
-
-# Run tcpdump in the test runner unbuffered, without dns resolution, just on the
-# interface with the test packets.
-docker exec -t ${TEST_RUNNER} tcpdump -U -n -i "${TEST_DEVICE}" &
-
-# Start a packetdrill server on the test_runner. The packetdrill server sends
-# packets and asserts that they are received.
-docker exec -d "${TEST_RUNNER}" \
- ${PACKETDRILL} --wire_server --wire_server_dev="${TEST_DEVICE}" \
- --wire_server_ip="${CTRL_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" \
- --wire_server_port="${CTRL_PORT}" \
- --local_ip="${TEST_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" \
- --remote_ip="${TEST_NET_PREFIX}${DUT_NET_SUFFIX}"
-
-# Because the Linux kernel receives the SYN-ACK but didn't send the SYN it will
-# issue a RST. To prevent this IPtables can be used to filter those out.
-docker exec "${TEST_RUNNER}" \
- iptables -A OUTPUT -p tcp --tcp-flags RST RST -j DROP
-
-# Wait for the packetdrill server on the test runner to come. Attempt to
-# connect to it from the DUT every 100 milliseconds until success.
-while ! docker exec "${DUT}" \
- nc -zv "${CTRL_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" "${CTRL_PORT}"; do
- sleep 0.1
-done
-
-# Copy the packetdrill setup script to the DUT.
-docker cp -L "${INIT_SCRIPT}" "${DUT}:packetdrill_setup.sh"
-
-# Copy the packetdrill scripts to the DUT.
-declare -a dut_scripts
-for script in $scripts; do
- docker cp -L "${script}" "${DUT}:$(basename ${script})"
- dut_scripts+=("/$(basename ${script})")
-done
-
-# Start a packetdrill client on the DUT. The packetdrill client runs POSIX
-# socket commands and also sends instructions to the server.
-docker exec -t "${DUT}" \
- ${PACKETDRILL} --wire_client --wire_client_dev="${TEST_DEVICE}" \
- --wire_server_ip="${CTRL_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" \
- --wire_server_port="${CTRL_PORT}" \
- --local_ip="${TEST_NET_PREFIX}${DUT_NET_SUFFIX}" \
- --remote_ip="${TEST_NET_PREFIX}${TEST_RUNNER_NET_SUFFIX}" \
- --init_scripts=/packetdrill_setup.sh \
- --tolerance_usecs="${tolerance_usecs}" "${dut_scripts[@]}"
-
-echo PASS: No errors.
diff --git a/test/packetdrill/reset_for_ack_when_no_syn_cookies_in_use.pkt b/test/packetdrill/reset_for_ack_when_no_syn_cookies_in_use.pkt
deleted file mode 100644
index a86b90ce6..000000000
--- a/test/packetdrill/reset_for_ack_when_no_syn_cookies_in_use.pkt
+++ /dev/null
@@ -1,9 +0,0 @@
-// Test that a listening socket generates a RST when it receives an
-// ACK and syn cookies are not in use.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 bind(3, ..., ...) = 0
-
-+0 listen(3, 1) = 0
-+0.1 < . 1:1(0) ack 1 win 32792
-+0 > R 1:1(0) ack 0 win 0 \ No newline at end of file
diff --git a/test/packetdrill/sanity_test.pkt b/test/packetdrill/sanity_test.pkt
deleted file mode 100644
index b3b58c366..000000000
--- a/test/packetdrill/sanity_test.pkt
+++ /dev/null
@@ -1,7 +0,0 @@
-// Basic sanity test. One system call.
-//
-// All of the plumbing has to be working however, and the packetdrill wire
-// client needs to be able to connect to the wire server and send the script,
-// probe local interfaces, run through the test w/ timings, etc.
-
-0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/test/packetdrill/tcp_defer_accept.pkt b/test/packetdrill/tcp_defer_accept.pkt
deleted file mode 100644
index a17f946db..000000000
--- a/test/packetdrill/tcp_defer_accept.pkt
+++ /dev/null
@@ -1,48 +0,0 @@
-// Test that a bare ACK does not complete a connection when TCP_DEFER_ACCEPT
-// timeout is not hit but an ACK w/ data does complete and deliver the
-// connection to the accept queue.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 setsockopt(3, SOL_TCP, TCP_DEFER_ACCEPT, [5], 4) = 0
-+0.000 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0
-+0 bind(3, ..., ...) = 0
-
-// Set backlog to 1 so that we can easily test.
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0.0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0.0 > S. 0:0(0) ack 1 <...>
-
-// Send a bare ACK this should not complete the connection as we
-// set the TCP_DEFER_ACCEPT above.
-+0.0 < . 1:1(0) ack 1 win 257
-
-// The bare ACK should be dropped and no connection should be delivered
-// to the accept queue.
-+0.100 accept(3, ..., ...) = -1 EWOULDBLOCK (operation would block)
-
-// Send another bare ACK and it should still fail we set TCP_DEFER_ACCEPT
-// to 5 seconds above.
-+2.5 < . 1:1(0) ack 1 win 257
-+0.100 accept(3, ..., ...) = -1 EWOULDBLOCK (operation would block)
-
-// set accept socket back to blocking.
-+0.000 fcntl(3, F_SETFL, O_RDWR) = 0
-
-// Now send an ACK w/ data. This should complete the connection
-// and deliver the socket to the accept queue.
-+0.1 < . 1:5(4) ack 1 win 257
-+0.0 > . 1:1(0) ack 5 <...>
-
-// This should cause connection to transition to connected state.
-+0.000 accept(3, ..., ...) = 4
-+0.000 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
-
-// Now read the data and we should get 4 bytes.
-+0.000 read(4,..., 4) = 4
-+0.000 close(4) = 0
-
-+0.0 > F. 1:1(0) ack 5 <...>
-+0.0 < F. 5:5(0) ack 2 win 257
-+0.01 > . 2:2(0) ack 6 <...> \ No newline at end of file
diff --git a/test/packetdrill/tcp_defer_accept_timeout.pkt b/test/packetdrill/tcp_defer_accept_timeout.pkt
deleted file mode 100644
index 201fdeb14..000000000
--- a/test/packetdrill/tcp_defer_accept_timeout.pkt
+++ /dev/null
@@ -1,48 +0,0 @@
-// Test that a bare ACK is accepted after TCP_DEFER_ACCEPT timeout
-// is hit and a connection is delivered.
-
-0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
-+0 setsockopt(3, SOL_TCP, TCP_DEFER_ACCEPT, [3], 4) = 0
-+0.000 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0
-+0 bind(3, ..., ...) = 0
-
-// Set backlog to 1 so that we can easily test.
-+0 listen(3, 1) = 0
-
-// Establish a connection without timestamps.
-+0.0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
-+0.0 > S. 0:0(0) ack 1 <...>
-
-// Send a bare ACK this should not complete the connection as we
-// set the TCP_DEFER_ACCEPT above.
-+0.0 < . 1:1(0) ack 1 win 257
-
-// The bare ACK should be dropped and no connection should be delivered
-// to the accept queue.
-+0.100 accept(3, ..., ...) = -1 EWOULDBLOCK (operation would block)
-
-// Send another bare ACK and it should still fail we set TCP_DEFER_ACCEPT
-// to 5 seconds above.
-+2.5 < . 1:1(0) ack 1 win 257
-+0.100 accept(3, ..., ...) = -1 EWOULDBLOCK (operation would block)
-
-// set accept socket back to blocking.
-+0.000 fcntl(3, F_SETFL, O_RDWR) = 0
-
-// We should see one more retransmit of the SYN-ACK as a last ditch
-// attempt when TCP_DEFER_ACCEPT timeout is hit to trigger another
-// ACK or a packet with data.
-+.35~+2.35 > S. 0:0(0) ack 1 <...>
-
-// Now send another bare ACK after TCP_DEFER_ACCEPT time has been passed.
-+0.0 < . 1:1(0) ack 1 win 257
-
-// The ACK above should cause connection to transition to connected state.
-+0.000 accept(3, ..., ...) = 4
-+0.000 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
-
-+0.000 close(4) = 0
-
-+0.0 > F. 1:1(0) ack 1 <...>
-+0.0 < F. 1:1(0) ack 2 win 257
-+0.01 > . 2:2(0) ack 2 <...>
diff --git a/test/packetimpact/README.md b/test/packetimpact/README.md
deleted file mode 100644
index fe0976ba5..000000000
--- a/test/packetimpact/README.md
+++ /dev/null
@@ -1,709 +0,0 @@
-# Packetimpact
-
-## What is packetimpact?
-
-Packetimpact is a tool for platform-independent network testing. It is heavily
-inspired by [packetdrill](https://github.com/google/packetdrill). It creates two
-docker containers connected by a network. One is for the test bench, which
-operates the test. The other is for the device-under-test (DUT), which is the
-software being tested. The test bench communicates over the network with the DUT
-to check correctness of the network.
-
-### Goals
-
-Packetimpact aims to provide:
-
-* A **multi-platform** solution that can test both Linux and gVisor.
-* **Conciseness** on par with packetdrill scripts.
-* **Control-flow** like for loops, conditionals, and variables.
-* **Flexibilty** to specify every byte in a packet or use multiple sockets.
-
-## How to run packetimpact tests?
-
-Build the test container image by running the following at the root of the
-repository:
-
-```bash
-$ make load-packetimpact
-```
-
-Run a test, e.g. `fin_wait2_timeout`, against Linux:
-
-```bash
-$ bazel test //test/packetimpact/tests:fin_wait2_timeout_native_test
-```
-
-Run the same test, but against gVisor:
-
-```bash
-$ bazel test //test/packetimpact/tests:fin_wait2_timeout_netstack_test
-```
-
-## When to use packetimpact?
-
-There are a few ways to write networking tests for gVisor currently:
-
-* [Go unit tests](https://github.com/google/gvisor/tree/master/pkg/tcpip)
-* [syscall tests](https://github.com/google/gvisor/tree/master/test/syscalls/linux)
-* [packetdrill tests](https://github.com/google/gvisor/tree/master/test/packetdrill)
-* packetimpact tests
-
-The right choice depends on the needs of the test.
-
-Feature | Go unit test | syscall test | packetdrill | packetimpact
--------------- | ------------ | ------------ | ----------- | ------------
-Multi-platform | no | **YES** | **YES** | **YES**
-Concise | no | somewhat | somewhat | **VERY**
-Control-flow | **YES** | **YES** | no | **YES**
-Flexible | **VERY** | no | somewhat | **VERY**
-
-### Go unit tests
-
-If the test depends on the internals of gVisor and doesn't need to run on Linux
-or other platforms for comparison purposes, a Go unit test can be appropriate.
-They can observe internals of gVisor networking. The downside is that they are
-**not concise** and **not multi-platform**. If you require insight on gVisor
-internals, this is the right choice.
-
-### Syscall tests
-
-Syscall tests are **multi-platform** but cannot examine the internals of gVisor
-networking. They are **concise**. They can use **control-flow** structures like
-conditionals, for loops, and variables. However, they are limited to only what
-the POSIX interface provides so they are **not flexible**. For example, you
-would have difficulty writing a syscall test that intentionally sends a bad IP
-checksum. Or if you did write that test with raw sockets, it would be very
-**verbose** to write a test that intentionally send wrong checksums, wrong
-protocols, wrong sequence numbers, etc.
-
-### Packetdrill tests
-
-Packetdrill tests are **multi-platform** and can run against both Linux and
-gVisor. They are **concise** and use a special packetdrill scripting language.
-They are **more flexible** than a syscall test in that they can send packets
-that a syscall test would have difficulty sending, like a packet with a
-calcuated ACK number. But they are also somewhat limimted in flexibiilty in that
-they can't do tests with multiple sockets. They have **no control-flow** ability
-like variables or conditionals. For example, it isn't possible to send a packet
-that depends on the window size of a previous packet because the packetdrill
-language can't express that. Nor could you branch based on whether or not the
-other side supports window scaling, for example.
-
-### Packetimpact tests
-
-Packetimpact tests are similar to Packetdrill tests except that they are written
-in Go instead of the packetdrill scripting language. That gives them all the
-**control-flow** abilities of Go (loops, functions, variables, etc). They are
-**multi-platform** in the same way as packetdrill tests but even more
-**flexible** because Go is more expressive than the scripting language of
-packetdrill. However, Go is **not as concise** as the packetdrill language. Many
-design decisions below are made to mitigate that.
-
-## How it works
-
-```
- Testbench Device-Under-Test (DUT)
- +-------------------+ +------------------------+
- | | TEST NET | |
- | rawsockets.go <-->| <===========> | <---+ |
- | ^ | | | |
- | | | | | |
- | v | | | |
- | unittest | | | |
- | ^ | | | |
- | | | | | |
- | v | | v |
- | dut.go <========gRPC========> posix server |
- | | CONTROL NET | |
- +-------------------+ +------------------------+
-```
-
-Two docker containers are created by a "runner" script, one for the testbench
-and the other for the device under test (DUT). The script connects the two
-containers with a control network and test network. It also does some other
-tasks like waiting until the DUT is ready before starting the test and disabling
-Linux networking that would interfere with the test bench.
-
-### DUT
-
-The DUT container runs a program called the "posix_server". The posix_server is
-written in c++ for maximum portability. It is compiled on the host. The script
-that starts the containers copies it into the DUT's container and runs it. It's
-job is to receive directions from the test bench on what actions to take. For
-this, the posix_server does three steps in a loop:
-
-1. Listen for a request from the test bench.
-2. Execute a command.
-3. Send the response back to the test bench.
-
-The requests and responses are
-[protobufs](https://developers.google.com/protocol-buffers) and the
-communication is done with [gRPC](https://grpc.io/). The commands run are
-[POSIX socket commands](https://en.wikipedia.org/wiki/Berkeley_sockets#Socket_API_functions),
-with the inputs and outputs converted into protobuf requests and responses. All
-communication is on the control network, so that the test network is unaffected
-by extra packets.
-
-For example, this is the request and response pair to call
-[`socket()`](http://man7.org/linux/man-pages/man2/socket.2.html):
-
-```protocol-buffer
-message SocketRequest {
- int32 domain = 1;
- int32 type = 2;
- int32 protocol = 3;
-}
-
-message SocketResponse {
- int32 fd = 1;
- int32 errno_ = 2;
-}
-```
-
-##### Alternatives considered
-
-* We could have use JSON for communication instead. It would have been a
- lighter-touch than protobuf but protobuf handles all the data type and has
- strict typing to prevent a class of errors. The test bench could be written
- in other languages, too.
-* Instead of mimicking the POSIX interfaces, arguments could have had a more
- natural form, like the `bind()` getting a string IP address instead of bytes
- in a `sockaddr_t`. However, conforming to the existing structures keeps more
- of the complexity in Go and keeps the posix_server simpler and thus more
- likely to compile everywhere.
-
-### Test Bench
-
-The test bench does most of the work in a test. It is a Go program that compiles
-on the host and is copied by the script into test bench's container. It is a
-regular [go unit test](https://golang.org/pkg/testing/) that imports the test
-bench framework. The test bench framwork is based on three basic utilities:
-
-* Commanding the DUT to run POSIX commands and return responses.
-* Sending raw packets to the DUT on the test network.
-* Listening for raw packets from the DUT on the test network.
-
-#### DUT commands
-
-To keep the interface to the DUT consistent and easy-to-use, each POSIX command
-supported by the posix_server is wrapped in functions with signatures similar to
-the ones in the [Go unix package](https://godoc.org/golang.org/x/sys/unix). This
-way all the details of endianess and (un)marshalling of go structs such as
-[unix.Timeval](https://godoc.org/golang.org/x/sys/unix#Timeval) is handled in
-one place. This also makes it straight-forward to convert tests that use `unix.`
-or `syscall.` calls to `dut.` calls.
-
-For example, creating a connection to the DUT and commanding it to make a socket
-looks like this:
-
-```go
-dut := testbench.NewDut(t)
-fd, err := dut.SocketWithErrno(unix.AF_INET, unix.SOCK_STREAM, unix.IPPROTO_IP)
-if fd < 0 {
- t.Fatalf(...)
-}
-```
-
-Because the usual case is to fail the test when the DUT fails to create a
-socket, there is a concise version of each of the `...WithErrno` functions that
-does that:
-
-```go
-dut := testbench.NewDut(t)
-fd := dut.Socket(unix.AF_INET, unix.SOCK_STREAM, unix.IPPROTO_IP)
-```
-
-The DUT and other structs in the code store a `*testing.T` so that they can
-provide versions of functions that call `t.Fatalf(...)`. This helps keep tests
-concise.
-
-##### Alternatives considered
-
-* Instead of mimicking the `unix.` go interface, we could have invented a more
- natural one, like using `float64` instead of `Timeval`. However, using the
- same function signatures that `unix.` has makes it easier to convert code to
- `dut.`. Also, using an existing interface ensures that we don't invent an
- interface that isn't extensible. For example, if we invented a function for
- `bind()` that didn't support IPv6 and later we had to add a second `bind6()`
- function.
-
-#### Sending/Receiving Raw Packets
-
-The framework wraps POSIX sockets for sending and receiving raw frames. Both
-send and receive are synchronous commands.
-[SO_RCVTIMEO](http://man7.org/linux/man-pages/man7/socket.7.html) is used to set
-a timeout on the receive commands. For ease of use, these are wrapped in an
-`Injector` and a `Sniffer`. They have functions:
-
-```go
-func (s *Sniffer) Recv(timeout time.Duration) []byte {...}
-func (i *Injector) Send(b []byte) {...}
-```
-
-##### Alternatives considered
-
-* [gopacket](https://github.com/google/gopacket) pcap has raw socket support
- but requires cgo. cgo is not guaranteed to be portable from the host to the
- container and in practice, the container doesn't recognize binaries built on
- the host if they use cgo.
-* Both gVisor and gopacket have the ability to read and write pcap files
- without cgo but that is insufficient here because we can't just replay pcap
- files, we need a more dynamic solution.
-* The sniffer and injector can't share a socket because they need to be bound
- differently.
-* Sniffing could have been done asynchronously with channels, obviating the
- need for `SO_RCVTIMEO`. But that would introduce asynchronous complication.
- `SO_RCVTIMEO` is well supported on the test bench.
-
-#### `Layer` struct
-
-A large part of packetimpact tests is creating packets to send and comparing
-received packets against expectations. To keep tests concise, it is useful to be
-able to specify just the important parts of packets that need to be set. For
-example, sending a packet with default values except for TCP Flags. And for
-packets received, it's useful to be able to compare just the necessary parts of
-received packets and ignore the rest.
-
-To aid in both of those, Go structs with optional fields are created for each
-encapsulation type, such as IPv4, TCP, and Ethernet. This is inspired by
-[scapy](https://scapy.readthedocs.io/en/latest/). For example, here is the
-struct for Ethernet:
-
-```go
-type Ether struct {
- LayerBase
- SrcAddr *tcpip.LinkAddress
- DstAddr *tcpip.LinkAddress
- Type *tcpip.NetworkProtocolNumber
-}
-```
-
-Each struct has the same fields as those in the
-[gVisor headers](https://github.com/google/gvisor/tree/master/pkg/tcpip/header)
-but with a pointer for each field that may be `nil`.
-
-##### Alternatives considered
-
-* Just use []byte like gVisor headers do. The drawback is that it makes the
- tests more verbose.
- * For example, there would be no way to call `Send(myBytes)` concisely and
- indicate if the checksum should be calculated automatically versus
- overridden. The only way would be to add lines to the test to calculate
- it before each Send, which is wordy. Or make multiple versions of Send:
- one that checksums IP, one that doesn't, one that checksums TCP, one
- that does both, etc. That would be many combinations.
- * Filtering inputs would become verbose. Either:
- * large conditionals that need to be repeated many places:
- `h[FlagOffset] == SYN && h[LengthOffset:LengthOffset+2] == ...` or
- * Many functions, one per field, like: `filterByFlag(myBytes, SYN)`,
- `filterByLength(myBytes, 20)`, `filterByNextProto(myBytes, 0x8000)`,
- etc.
- * Using pointers allows us to combine `Layer`s with reflection. So the
- default `Layers` can be overridden by a `Layers` with just the TCP
- conection's src/dst which can be overridden by one with just a test
- specific TCP window size.
- * It's a proven way to separate the details of a packet from the byte
- format as shown by scapy's success.
-* Use packetgo. It's more general than parsing packets with gVisor. However:
- * packetgo doesn't have optional fields so many of the above problems
- still apply.
- * It would be yet another dependency.
- * It's not as well known to engineers that are already writing gVisor
- code.
- * It might be a good candidate for replacing the parsing of packets into
- `Layer`s if all that parsing turns out to be more work than parsing by
- packetgo and converting *that* to `Layer`. packetgo has easier to use
- getters for the layers. This could be done later in a way that doesn't
- break tests.
-
-#### `Layer` methods
-
-The `Layer` structs provide a way to partially specify an encapsulation. They
-also need methods for using those partially specified encapsulation, for example
-to marshal them to bytes or compare them. For those, each encapsulation
-implements the `Layer` interface:
-
-```go
-// Layer is the interface that all encapsulations must implement.
-//
-// A Layer is an encapsulation in a packet, such as TCP, IPv4, IPv6, etc. A
-// Layer contains all the fields of the encapsulation. Each field is a pointer
-// and may be nil.
-type Layer interface {
- // toBytes converts the Layer into bytes. In places where the Layer's field
- // isn't nil, the value that is pointed to is used. When the field is nil, a
- // reasonable default for the Layer is used. For example, "64" for IPv4 TTL
- // and a calculated checksum for TCP or IP. Some layers require information
- // from the previous or next layers in order to compute a default, such as
- // TCP's checksum or Ethernet's type, so each Layer has a doubly-linked list
- // to the layer's neighbors.
- toBytes() ([]byte, error)
-
- // match checks if the current Layer matches the provided Layer. If either
- // Layer has a nil in a given field, that field is considered matching.
- // Otherwise, the values pointed to by the fields must match.
- match(Layer) bool
-
- // length in bytes of the current encapsulation
- length() int
-
- // next gets a pointer to the encapsulated Layer.
- next() Layer
-
- // prev gets a pointer to the Layer encapsulating this one.
- prev() Layer
-
- // setNext sets the pointer to the encapsulated Layer.
- setNext(Layer)
-
- // setPrev sets the pointer to the Layer encapsulating this one.
- setPrev(Layer)
-}
-```
-
-The `next` and `prev` make up a link listed so that each layer can get at the
-information in the layer around it. This is necessary for some protocols, like
-TCP that needs the layer before and payload after to compute the checksum. Any
-sequence of `Layer` structs is valid so long as the parser and `toBytes`
-functions can map from type to protool number and vice-versa. When the mapping
-fails, an error is emitted explaining what functionality is missing. The
-solution is either to fix the ordering or implement the missing protocol.
-
-For each `Layer` there is also a parsing function. For example, this one is for
-Ethernet:
-
-```
-func ParseEther(b []byte) (Layers, error)
-```
-
-The parsing function converts bytes received on the wire into a `Layer`
-(actually `Layers`, see below) which has no `nil`s in it. By using
-`match(Layer)` to compare against another `Layer` that *does* have `nil`s in it,
-the received bytes can be partially compared. The `nil`s behave as
-"don't-cares".
-
-##### Alternatives considered
-
-* Matching against `[]byte` instead of converting to `Layer` first.
- * The downside is that it precludes the use of a `cmp.Equal` one-liner to
- do comparisons.
- * It creates confusion in the code to deal with both representations at
- different times. For example, is the checksum calculated on `[]byte` or
- `Layer` when sending? What about when checking received packets?
-
-#### `Layers`
-
-```
-type Layers []Layer
-
-func (ls *Layers) match(other Layers) bool {...}
-func (ls *Layers) toBytes() ([]byte, error) {...}
-```
-
-`Layers` is an array of `Layer`. It represents a stack of encapsulations, such
-as `Layers{Ether{},IPv4{},TCP{},Payload{}}`. It also has `toBytes()` and
-`match(Layers)`, like `Layer`. The parse functions above actually return
-`Layers` and not `Layer` because they know about the headers below and
-sequentially call each parser on the remaining, encapsulated bytes.
-
-All this leads to the ability to write concise packet processing. For example:
-
-```go
-etherType := 0x8000
-flags = uint8(header.TCPFlagSyn|header.TCPFlagAck)
-toMatch := Layers{Ether{Type: &etherType}, IPv4{}, TCP{Flags: &flags}}
-for {
- recvBytes := sniffer.Recv(time.Second)
- if recvBytes == nil {
- println("Got no packet for 1 second")
- }
- gotPacket, err := ParseEther(recvBytes)
- if err == nil && toMatch.match(gotPacket) {
- println("Got a TCP/IPv4/Eth packet with SYNACK")
- }
-}
-```
-
-##### Alternatives considered
-
-* Don't use previous and next pointers.
- * Each layer may need to be able to interrogate the layers around it, like
- for computing the next protocol number or total length. So *some*
- mechanism is needed for a `Layer` to see neighboring layers.
- * We could pass the entire array `Layers` to the `toBytes()` function.
- Passing an array to a method that includes in the array the function
- receiver itself seems wrong.
-
-#### `layerState`
-
-`Layers` represents the different headers of a packet but a connection includes
-more state. For example, a TCP connection needs to keep track of the next
-expected sequence number and also the next sequence number to send. This is
-stored in a `layerState` struct. This is the `layerState` for TCP:
-
-```go
-// tcpState maintains state about a TCP connection.
-type tcpState struct {
- out, in TCP
- localSeqNum, remoteSeqNum *seqnum.Value
- synAck *TCP
- portPickerFD int
- finSent bool
-}
-```
-
-The next sequence numbers for each side of the connection are stored. `out` and
-`in` have defaults for the TCP header, such as the expected source and
-destination ports for outgoing packets and incoming packets.
-
-##### `layerState` interface
-
-```go
-// layerState stores the state of a layer of a connection.
-type layerState interface {
- // outgoing returns an outgoing layer to be sent in a frame.
- outgoing() Layer
-
- // incoming creates an expected Layer for comparing against a received Layer.
- // Because the expectation can depend on values in the received Layer, it is
- // an input to incoming. For example, the ACK number needs to be checked in a
- // TCP packet but only if the ACK flag is set in the received packet.
- incoming(received Layer) Layer
-
- // sent updates the layerState based on the Layer that was sent. The input is
- // a Layer with all prev and next pointers populated so that the entire frame
- // as it was sent is available.
- sent(sent Layer) error
-
- // received updates the layerState based on a Layer that is receieved. The
- // input is a Layer with all prev and next pointers populated so that the
- // entire frame as it was receieved is available.
- received(received Layer) error
-
- // close frees associated resources held by the LayerState.
- close() error
-}
-```
-
-`outgoing` generates the default Layer for an outgoing packet. For TCP, this
-would be a `TCP` with the source and destination ports populated. Because they
-are static, they are stored inside the `out` member of `tcpState`. However, the
-sequence numbers change frequently so the outgoing sequence number is stored in
-the `localSeqNum` and put into the output of outgoing for each call.
-
-`incoming` does the same functions for packets that arrive but instead of
-generating a packet to send, it generates an expect packet for filtering packets
-that arrive. For example, if a `TCP` header arrives with the wrong ports, it can
-be ignored as belonging to a different connection. `incoming` needs the received
-header itself as an input because the filter may depend on the input. For
-example, the expected sequence number depends on the flags in the TCP header.
-
-`sent` and `received` are run for each header that is actually sent or received
-and used to update the internal state. `incoming` and `outgoing` should *not* be
-used for these purpose. For example, `incoming` is called on every packet that
-arrives but only packets that match ought to actually update the state.
-`outgoing` is called to created outgoing packets and those packets are always
-sent, so unlike `incoming`/`received`, there is one `outgoing` call for each
-`sent` call.
-
-`close` cleans up after the layerState. For example, TCP and UDP need to keep a
-port reserved and then release it.
-
-#### Connections
-
-Using `layerState` above, we can create connections.
-
-```go
-// Connection holds a collection of layer states for maintaining a connection
-// along with sockets for sniffer and injecting packets.
-type Connection struct {
- layerStates []layerState
- injector Injector
- sniffer Sniffer
- t *testing.T
-}
-```
-
-The connection stores an array of `layerState` in the order that the headers
-should be present in the frame to send. For example, Ether then IPv4 then TCP.
-The injector and sniffer are for writing and reading frames. A `*testing.T` is
-stored so that internal errors can be reported directly without code in the unit
-test.
-
-The `Connection` has some useful functions:
-
-```go
-// Close frees associated resources held by the Connection.
-func (conn *Connection) Close() {...}
-// CreateFrame builds a frame for the connection with layer overriding defaults
-// of the innermost layer and additionalLayers added after it.
-func (conn *Connection) CreateFrame(layer Layer, additionalLayers ...Layer) Layers {...}
-// SendFrame sends a frame on the wire and updates the state of all layers.
-func (conn *Connection) SendFrame(frame Layers) {...}
-// Send a packet with reasonable defaults. Potentially override the final layer
-// in the connection with the provided layer and add additionLayers.
-func (conn *Connection) Send(layer Layer, additionalLayers ...Layer) {...}
-// Expect a frame with the final layerStates layer matching the provided Layer
-// within the timeout specified. If it doesn't arrive in time, it returns nil.
-func (conn *Connection) Expect(layer Layer, timeout time.Duration) (Layer, error) {...}
-// ExpectFrame expects a frame that matches the provided Layers within the
-// timeout specified. If it doesn't arrive in time, it returns nil.
-func (conn *Connection) ExpectFrame(layers Layers, timeout time.Duration) (Layers, error) {...}
-// Drain drains the sniffer's receive buffer by receiving packets until there's
-// nothing else to receive.
-func (conn *Connection) Drain() {...}
-```
-
-`CreateFrame` uses the `[]layerState` to create a frame to send. The first
-argument is for overriding defaults in the last header of the frame, because
-this is the most common need. For a TCPIPv4 connection, this would be the TCP
-header. Optional additionalLayers can be specified to add to the frame being
-created, such as a `Payload` for `TCP`.
-
-`SendFrame` sends the frame to the DUT. It is combined with `CreateFrame` to
-make `Send`. For unittests with basic sending needs, `Send` can be used. If more
-control is needed over the frame, it can be made with `CreateFrame`, modified in
-the unit test, and then sent with `SendFrame`.
-
-On the receiving side, there is `Expect` and `ExpectFrame`. Like with the
-sending side, there are two forms of each function, one for just the last header
-and one for the whole frame. The expect functions use the `[]layerState` to
-create a template for the expected incoming frame. That frame is then overridden
-by the values in the first argument. Finally, a loop starts sniffing packets on
-the wire for frames. If a matching frame is found before the timeout, it is
-returned without error. If not, nil is returned and the error contains text of
-all the received frames that didn't match. Exactly one of the outputs will be
-non-nil, even if no frames are received at all.
-
-`Drain` sniffs and discards all the frames that have yet to be received. A
-common way to write a test is:
-
-```go
-conn.Drain() // Discard all outstanding frames.
-conn.Send(...) // Send a frame with overrides.
-// Now expect a frame with a certain header and fail if it doesn't arrive.
-if _, err := conn.Expect(...); err != nil { t.Fatal(...) }
-```
-
-Or for a test where we want to check that no frame arrives:
-
-```go
-if gotOne, _ := conn.Expect(...); gotOne != nil { t.Fatal(...) }
-```
-
-#### Specializing `Connection`
-
-Because there are some common combinations of `layerState` into `Connection`,
-they are defined:
-
-```go
-// TCPIPv4 maintains the state for all the layers in a TCP/IPv4 connection.
-type TCPIPv4 Connection
-// UDPIPv4 maintains the state for all the layers in a UDP/IPv4 connection.
-type UDPIPv4 Connection
-```
-
-Each has a `NewXxx` function to create a new connection with reasonable
-defaults. They also have functions that call the underlying `Connection`
-functions but with specialization and tighter type-checking. For example:
-
-```go
-func (conn *TCPIPv4) Send(tcp TCP, additionalLayers ...Layer) {
- (*Connection)(conn).Send(&tcp, additionalLayers...)
-}
-func (conn *TCPIPv4) Drain() {
- conn.sniffer.Drain()
-}
-```
-
-They may also have some accessors to get or set the internal state of the
-connection:
-
-```go
-func (conn *TCPIPv4) state() *tcpState {
- state, ok := conn.layerStates[len(conn.layerStates)-1].(*tcpState)
- if !ok {
- conn.t.Fatalf("expected final state of %v to be tcpState", conn.layerStates)
- }
- return state
-}
-func (conn *TCPIPv4) RemoteSeqNum() *seqnum.Value {
- return conn.state().remoteSeqNum
-}
-func (conn *TCPIPv4) LocalSeqNum() *seqnum.Value {
- return conn.state().localSeqNum
-}
-```
-
-Unittests will in practice use these functions and not the functions on
-`Connection`. For example, `NewTCPIPv4()` and then call `Send` on that rather
-than cast is to a `Connection` and call `Send` on that cast result.
-
-##### Alternatives considered
-
-* Instead of storing `outgoing` and `incoming`, store values.
- * There would be many more things to store instead, like `localMac`,
- `remoteMac`, `localIP`, `remoteIP`, `localPort`, and `remotePort`.
- * Construction of a packet would be many lines to copy each of these
- values into a `[]byte`. And there would be slight variations needed for
- each encapsulation stack, like TCPIPv6 and ARP.
- * Filtering incoming packets would be a long sequence:
- * Compare the MACs, then
- * Parse the next header, then
- * Compare the IPs, then
- * Parse the next header, then
- * Compare the TCP ports. Instead it's all just one call to
- `cmp.Equal(...)`, for all sequences.
- * A TCPIPv6 connection could share most of the code. Only the type of the
- IP addresses are different. The types of `outgoing` and `incoming` would
- be remain `Layers`.
- * An ARP connection could share all the Ethernet parts. The IP `Layer`
- could be factored out of `outgoing`. After that, the IPv4 and IPv6
- connections could implement one interface and a single TCP struct could
- have either network protocol through composition.
-
-## Putting it all together
-
-Here's what te start of a packetimpact unit test looks like. This test creates a
-TCP connection with the DUT. There are added comments for explanation in this
-document but a real test might not include them in order to stay even more
-concise.
-
-```go
-func TestMyTcpTest(t *testing.T) {
- // Prepare a DUT for communication.
- dut := testbench.NewDUT(t)
-
- // This does:
- // dut.Socket()
- // dut.Bind()
- // dut.Getsockname() to learn the new port number
- // dut.Listen()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD) // Tell the DUT to close the socket at the end of the test.
-
- // Monitor a new TCP connection with sniffer, injector, sequence number tracking,
- // and reasonable outgoing and incoming packet field default IPs, MACs, and port numbers.
- conn := testbench.NewTCPIPv4(t, dut, remotePort)
-
- // Perform a 3-way handshake: send SYN, expect SYNACK, send ACK.
- conn.Handshake()
-
- // Tell the DUT to accept the new connection.
- acceptFD := dut.Accept(acceptFd)
-}
-```
-
-### Adding a new packetimpact test
-
-* Create a go test in the [tests directory](tests/)
-* Add a `packetimpact_testbench` rule in [BUILD](tests/BUILD)
-* Add the test into the `ALL_TESTS` list in [defs.bzl](runner/defs.bzl),
- otherwise you will see an error message complaining about a missing test.
-
-## Other notes
-
-* The time between receiving a SYN-ACK and replying with an ACK in `Handshake`
- is about 3ms. This is much slower than the native unix response, which is
- about 0.3ms. Packetdrill gets closer to 0.3ms. For tests where timing is
- crucial, packetdrill is faster and more precise.
diff --git a/test/packetimpact/dut/BUILD b/test/packetimpact/dut/BUILD
deleted file mode 100644
index 0be14ca3e..000000000
--- a/test/packetimpact/dut/BUILD
+++ /dev/null
@@ -1,30 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "grpcpp")
-
-package(
- default_visibility = ["//test/packetimpact:__subpackages__"],
- licenses = ["notice"],
-)
-
-cc_binary(
- name = "posix_server",
- srcs = ["posix_server.cc"],
- linkstatic = 1,
- static = True, # This is needed for running in a docker container.
- deps = [
- grpcpp,
- "//test/packetimpact/proto:posix_server_cc_grpc_proto",
- "//test/packetimpact/proto:posix_server_cc_proto",
- "@com_google_absl//absl/strings:str_format",
- ],
-)
-
-cc_binary(
- name = "posix_server_dynamic",
- srcs = ["posix_server.cc"],
- deps = [
- grpcpp,
- "//test/packetimpact/proto:posix_server_cc_grpc_proto",
- "//test/packetimpact/proto:posix_server_cc_proto",
- "@com_google_absl//absl/strings:str_format",
- ],
-)
diff --git a/test/packetimpact/dut/posix_server.cc b/test/packetimpact/dut/posix_server.cc
deleted file mode 100644
index ea83bbe72..000000000
--- a/test/packetimpact/dut/posix_server.cc
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at //
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <netdb.h>
-#include <netinet/in.h>
-#include <poll.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <unordered_map>
-
-#include "include/grpcpp/security/server_credentials.h"
-#include "include/grpcpp/server_builder.h"
-#include "include/grpcpp/server_context.h"
-#include "absl/strings/str_format.h"
-#include "test/packetimpact/proto/posix_server.grpc.pb.h"
-#include "test/packetimpact/proto/posix_server.pb.h"
-
-// Converts a sockaddr_storage to a Sockaddr message.
-::grpc::Status sockaddr_to_proto(const sockaddr_storage &addr,
- socklen_t addrlen,
- posix_server::Sockaddr *sockaddr_proto) {
- switch (addr.ss_family) {
- case AF_INET: {
- auto addr_in = reinterpret_cast<const sockaddr_in *>(&addr);
- auto response_in = sockaddr_proto->mutable_in();
- response_in->set_family(addr_in->sin_family);
- response_in->set_port(ntohs(addr_in->sin_port));
- response_in->mutable_addr()->assign(
- reinterpret_cast<const char *>(&addr_in->sin_addr.s_addr), 4);
- return ::grpc::Status::OK;
- }
- case AF_INET6: {
- auto addr_in6 = reinterpret_cast<const sockaddr_in6 *>(&addr);
- auto response_in6 = sockaddr_proto->mutable_in6();
- response_in6->set_family(addr_in6->sin6_family);
- response_in6->set_port(ntohs(addr_in6->sin6_port));
- response_in6->set_flowinfo(ntohl(addr_in6->sin6_flowinfo));
- response_in6->mutable_addr()->assign(
- reinterpret_cast<const char *>(&addr_in6->sin6_addr.s6_addr), 16);
- // sin6_scope_id is stored in host byte order.
- //
- // https://www.gnu.org/software/libc/manual/html_node/Internet-Address-Formats.html
- response_in6->set_scope_id(addr_in6->sin6_scope_id);
- return ::grpc::Status::OK;
- }
- }
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Unknown Sockaddr");
-}
-
-::grpc::Status proto_to_sockaddr(const posix_server::Sockaddr &sockaddr_proto,
- sockaddr_storage *addr, socklen_t *addr_len) {
- switch (sockaddr_proto.sockaddr_case()) {
- case posix_server::Sockaddr::SockaddrCase::kIn: {
- auto proto_in = sockaddr_proto.in();
- if (proto_in.addr().size() != 4) {
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "IPv4 address must be 4 bytes");
- }
- auto addr_in = reinterpret_cast<sockaddr_in *>(addr);
- addr_in->sin_family = proto_in.family();
- addr_in->sin_port = htons(proto_in.port());
- proto_in.addr().copy(reinterpret_cast<char *>(&addr_in->sin_addr.s_addr),
- 4);
- *addr_len = sizeof(*addr_in);
- break;
- }
- case posix_server::Sockaddr::SockaddrCase::kIn6: {
- auto proto_in6 = sockaddr_proto.in6();
- if (proto_in6.addr().size() != 16) {
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "IPv6 address must be 16 bytes");
- }
- auto addr_in6 = reinterpret_cast<sockaddr_in6 *>(addr);
- addr_in6->sin6_family = proto_in6.family();
- addr_in6->sin6_port = htons(proto_in6.port());
- addr_in6->sin6_flowinfo = htonl(proto_in6.flowinfo());
- proto_in6.addr().copy(
- reinterpret_cast<char *>(&addr_in6->sin6_addr.s6_addr), 16);
- // sin6_scope_id is stored in host byte order.
- //
- // https://www.gnu.org/software/libc/manual/html_node/Internet-Address-Formats.html
- addr_in6->sin6_scope_id = proto_in6.scope_id();
- *addr_len = sizeof(*addr_in6);
- break;
- }
- case posix_server::Sockaddr::SockaddrCase::SOCKADDR_NOT_SET:
- default:
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Unknown Sockaddr");
- }
- return ::grpc::Status::OK;
-}
-
-class PosixImpl final : public posix_server::Posix::Service {
- ::grpc::Status Accept(grpc::ServerContext *context,
- const ::posix_server::AcceptRequest *request,
- ::posix_server::AcceptResponse *response) override {
- sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- response->set_fd(accept(request->sockfd(),
- reinterpret_cast<sockaddr *>(&addr), &addrlen));
- if (response->fd() < 0) {
- response->set_errno_(errno);
- }
- return sockaddr_to_proto(addr, addrlen, response->mutable_addr());
- }
-
- ::grpc::Status Bind(grpc::ServerContext *context,
- const ::posix_server::BindRequest *request,
- ::posix_server::BindResponse *response) override {
- if (!request->has_addr()) {
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Missing address");
- }
-
- sockaddr_storage addr;
- socklen_t addr_len;
- auto err = proto_to_sockaddr(request->addr(), &addr, &addr_len);
- if (!err.ok()) {
- return err;
- }
-
- response->set_ret(
- bind(request->sockfd(), reinterpret_cast<sockaddr *>(&addr), addr_len));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Close(grpc::ServerContext *context,
- const ::posix_server::CloseRequest *request,
- ::posix_server::CloseResponse *response) override {
- response->set_ret(close(request->fd()));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Connect(grpc::ServerContext *context,
- const ::posix_server::ConnectRequest *request,
- ::posix_server::ConnectResponse *response) override {
- if (!request->has_addr()) {
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Missing address");
- }
- sockaddr_storage addr;
- socklen_t addr_len;
- auto err = proto_to_sockaddr(request->addr(), &addr, &addr_len);
- if (!err.ok()) {
- return err;
- }
-
- response->set_ret(connect(request->sockfd(),
- reinterpret_cast<sockaddr *>(&addr), addr_len));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status GetSockName(
- grpc::ServerContext *context,
- const ::posix_server::GetSockNameRequest *request,
- ::posix_server::GetSockNameResponse *response) override {
- sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- response->set_ret(getsockname(
- request->sockfd(), reinterpret_cast<sockaddr *>(&addr), &addrlen));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return sockaddr_to_proto(addr, addrlen, response->mutable_addr());
- }
-
- ::grpc::Status GetSockOpt(
- grpc::ServerContext *context,
- const ::posix_server::GetSockOptRequest *request,
- ::posix_server::GetSockOptResponse *response) override {
- switch (request->type()) {
- case ::posix_server::GetSockOptRequest::BYTES: {
- socklen_t optlen = request->optlen();
- std::vector<char> buf(optlen);
- response->set_ret(::getsockopt(request->sockfd(), request->level(),
- request->optname(), buf.data(),
- &optlen));
- if (optlen >= 0) {
- response->mutable_optval()->set_bytesval(buf.data(), optlen);
- }
- break;
- }
- case ::posix_server::GetSockOptRequest::INT: {
- int intval = 0;
- socklen_t optlen = sizeof(intval);
- response->set_ret(::getsockopt(request->sockfd(), request->level(),
- request->optname(), &intval, &optlen));
- response->mutable_optval()->set_intval(intval);
- break;
- }
- case ::posix_server::GetSockOptRequest::TIME: {
- timeval tv;
- socklen_t optlen = sizeof(tv);
- response->set_ret(::getsockopt(request->sockfd(), request->level(),
- request->optname(), &tv, &optlen));
- response->mutable_optval()->mutable_timeval()->set_seconds(tv.tv_sec);
- response->mutable_optval()->mutable_timeval()->set_microseconds(
- tv.tv_usec);
- break;
- }
- default:
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Unknown SockOpt Type");
- }
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Listen(grpc::ServerContext *context,
- const ::posix_server::ListenRequest *request,
- ::posix_server::ListenResponse *response) override {
- response->set_ret(listen(request->sockfd(), request->backlog()));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Poll(::grpc::ServerContext *context,
- const ::posix_server::PollRequest *request,
- ::posix_server::PollResponse *response) override {
- std::vector<struct pollfd> pfds;
- pfds.reserve(request->pfds_size());
- for (const auto &pfd : request->pfds()) {
- pfds.push_back({
- .fd = pfd.fd(),
- .events = static_cast<short>(pfd.events()),
- });
- }
- int ret = ::poll(pfds.data(), pfds.size(), request->timeout_millis());
-
- response->set_ret(ret);
- if (ret < 0) {
- response->set_errno_(errno);
- } else {
- // Only pollfds that have non-empty revents are returned, the client can't
- // rely on indexes of the request array.
- for (const auto &pfd : pfds) {
- if (pfd.revents) {
- auto *proto_pfd = response->add_pfds();
- proto_pfd->set_fd(pfd.fd);
- proto_pfd->set_events(pfd.revents);
- }
- }
- if (int ready = response->pfds_size(); ret != ready) {
- return ::grpc::Status(
- ::grpc::StatusCode::INTERNAL,
- absl::StrFormat(
- "poll's return value(%d) doesn't match the number of "
- "file descriptors that are actually ready(%d)",
- ret, ready));
- }
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Send(::grpc::ServerContext *context,
- const ::posix_server::SendRequest *request,
- ::posix_server::SendResponse *response) override {
- response->set_ret(::send(request->sockfd(), request->buf().data(),
- request->buf().size(), request->flags()));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status SendTo(::grpc::ServerContext *context,
- const ::posix_server::SendToRequest *request,
- ::posix_server::SendToResponse *response) override {
- if (!request->has_dest_addr()) {
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Missing address");
- }
- sockaddr_storage addr;
- socklen_t addr_len;
- auto err = proto_to_sockaddr(request->dest_addr(), &addr, &addr_len);
- if (!err.ok()) {
- return err;
- }
-
- response->set_ret(::sendto(request->sockfd(), request->buf().data(),
- request->buf().size(), request->flags(),
- reinterpret_cast<sockaddr *>(&addr), addr_len));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status SetNonblocking(
- grpc::ServerContext *context,
- const ::posix_server::SetNonblockingRequest *request,
- ::posix_server::SetNonblockingResponse *response) override {
- int flags = fcntl(request->fd(), F_GETFL);
- if (flags == -1) {
- response->set_ret(-1);
- response->set_errno_(errno);
- response->set_cmd("F_GETFL");
- return ::grpc::Status::OK;
- }
- if (request->nonblocking()) {
- flags |= O_NONBLOCK;
- } else {
- flags &= ~O_NONBLOCK;
- }
- int ret = fcntl(request->fd(), F_SETFL, flags);
- response->set_ret(ret);
- if (ret == -1) {
- response->set_errno_(errno);
- response->set_cmd("F_SETFL");
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status SetSockOpt(
- grpc::ServerContext *context,
- const ::posix_server::SetSockOptRequest *request,
- ::posix_server::SetSockOptResponse *response) override {
- switch (request->optval().val_case()) {
- case ::posix_server::SockOptVal::kBytesval:
- response->set_ret(setsockopt(request->sockfd(), request->level(),
- request->optname(),
- request->optval().bytesval().c_str(),
- request->optval().bytesval().size()));
- break;
- case ::posix_server::SockOptVal::kIntval: {
- int opt = request->optval().intval();
- response->set_ret(::setsockopt(request->sockfd(), request->level(),
- request->optname(), &opt, sizeof(opt)));
- break;
- }
- case ::posix_server::SockOptVal::kTimeval: {
- timeval tv = {.tv_sec = static_cast<time_t>(
- request->optval().timeval().seconds()),
- .tv_usec = static_cast<suseconds_t>(
- request->optval().timeval().microseconds())};
- response->set_ret(setsockopt(request->sockfd(), request->level(),
- request->optname(), &tv, sizeof(tv)));
- break;
- }
- default:
- return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Unknown SockOpt Type");
- }
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Socket(grpc::ServerContext *context,
- const ::posix_server::SocketRequest *request,
- ::posix_server::SocketResponse *response) override {
- response->set_fd(
- socket(request->domain(), request->type(), request->protocol()));
- if (response->fd() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Shutdown(grpc::ServerContext *context,
- const ::posix_server::ShutdownRequest *request,
- ::posix_server::ShutdownResponse *response) override {
- response->set_ret(shutdown(request->fd(), request->how()));
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-
- ::grpc::Status Recv(::grpc::ServerContext *context,
- const ::posix_server::RecvRequest *request,
- ::posix_server::RecvResponse *response) override {
- std::vector<char> buf(request->len());
- response->set_ret(
- recv(request->sockfd(), buf.data(), buf.size(), request->flags()));
- if (response->ret() >= 0) {
- response->set_buf(buf.data(), response->ret());
- }
- if (response->ret() < 0) {
- response->set_errno_(errno);
- }
- return ::grpc::Status::OK;
- }
-};
-
-// Parse command line options. Returns a pointer to the first argument beyond
-// the options.
-void parse_command_line_options(int argc, char *argv[], std::string *ip,
- int *port) {
- static struct option options[] = {{"ip", required_argument, NULL, 1},
- {"port", required_argument, NULL, 2},
- {0, 0, 0, 0}};
-
- // Parse the arguments.
- int c;
- while ((c = getopt_long(argc, argv, "", options, NULL)) > 0) {
- if (c == 1) {
- *ip = optarg;
- } else if (c == 2) {
- *port = std::stoi(std::string(optarg));
- }
- }
-}
-
-void run_server(const std::string &ip, int port) {
- PosixImpl posix_service;
- grpc::ServerBuilder builder;
- std::string server_address = ip + ":" + std::to_string(port);
- // Set the authentication mechanism.
- std::shared_ptr<grpc::ServerCredentials> creds =
- grpc::InsecureServerCredentials();
- builder.AddListeningPort(server_address, creds);
- builder.RegisterService(&posix_service);
-
- std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
- std::cerr << "Server listening on " << server_address << std::endl;
- server->Wait();
- std::cerr << "posix_server is finished." << std::endl;
-}
-
-int main(int argc, char *argv[]) {
- std::cerr << "posix_server is starting." << std::endl;
- std::string ip;
- int port;
- parse_command_line_options(argc, argv, &ip, &port);
-
- std::cerr << "Got IP " << ip << " and port " << port << "." << std::endl;
- run_server(ip, port);
-}
diff --git a/test/packetimpact/netdevs/BUILD b/test/packetimpact/netdevs/BUILD
deleted file mode 100644
index 8d1193fed..000000000
--- a/test/packetimpact/netdevs/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"],
-)
-
-go_library(
- name = "netdevs",
- srcs = ["netdevs.go"],
- visibility = ["//test/packetimpact:__subpackages__"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- ],
-)
-
-go_test(
- name = "netdevs_test",
- size = "small",
- srcs = ["netdevs_test.go"],
- library = ":netdevs",
- deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
-)
diff --git a/test/packetimpact/netdevs/netdevs.go b/test/packetimpact/netdevs/netdevs.go
deleted file mode 100644
index 25dcfbf60..000000000
--- a/test/packetimpact/netdevs/netdevs.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package netdevs contains utilities for working with network devices.
-package netdevs
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "strings"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-// A DeviceInfo represents a network device.
-type DeviceInfo struct {
- ID uint32
- MAC net.HardwareAddr
- IPv4Addr net.IP
- IPv4Net *net.IPNet
- IPv6Addr net.IP
- IPv6Net *net.IPNet
-}
-
-var (
- deviceLine = regexp.MustCompile(`^\s*(\d+): (\w+)`)
- linkLine = regexp.MustCompile(`^\s*link/\w+ ([0-9a-fA-F:]+)`)
- inetLine = regexp.MustCompile(`^\s*inet ([0-9./]+)`)
- inet6Line = regexp.MustCompile(`^\s*inet6 ([0-9a-fA-F:/]+)`)
-)
-
-// ParseDevicesWithRegex will parse the output with the given regexps to produce
-// a map from device name to device information. It is assumed that deviceLine
-// contains both a name and an ID.
-func ParseDevicesWithRegex(cmdOutput string, deviceLine, linkLine, inetLine, inet6Line *regexp.Regexp) (map[string]DeviceInfo, error) {
- var currentDevice string
- var currentInfo DeviceInfo
- deviceInfos := make(map[string]DeviceInfo)
- for _, line := range strings.Split(cmdOutput, "\n") {
- if m := deviceLine.FindStringSubmatch(line); m != nil {
- if currentDevice != "" {
- deviceInfos[currentDevice] = currentInfo
- }
- id, err := strconv.ParseUint(m[1], 10, 32)
- if err != nil {
- return nil, fmt.Errorf("parsing device ID %s: %w", m[1], err)
- }
- currentInfo = DeviceInfo{ID: uint32(id)}
- currentDevice = m[2]
- } else if m := linkLine.FindStringSubmatch(line); m != nil {
- mac, err := net.ParseMAC(m[1])
- if err != nil {
- return nil, err
- }
- currentInfo.MAC = mac
- } else if m := inetLine.FindStringSubmatch(line); m != nil {
- ipv4Addr, ipv4Net, err := net.ParseCIDR(m[1])
- if err != nil {
- return nil, err
- }
- currentInfo.IPv4Addr = ipv4Addr
- currentInfo.IPv4Net = ipv4Net
- } else if m := inet6Line.FindStringSubmatch(line); m != nil {
- ipv6Addr, ipv6Net, err := net.ParseCIDR(m[1])
- if err != nil {
- return nil, err
- }
- currentInfo.IPv6Addr = ipv6Addr
- currentInfo.IPv6Net = ipv6Net
- }
- }
- if currentDevice != "" {
- deviceInfos[currentDevice] = currentInfo
- }
- return deviceInfos, nil
-}
-
-// ParseDevices parses the output from `ip addr show` into a map from device
-// name to information about the device.
-//
-// Note: if multiple IPv6 addresses are assigned to a device, the last address
-// displayed by `ip addr show` will be used. This is fine for packetimpact
-// because we will always only have at most one IPv6 address assigned to each
-// device.
-func ParseDevices(cmdOutput string) (map[string]DeviceInfo, error) {
- return ParseDevicesWithRegex(cmdOutput, deviceLine, linkLine, inetLine, inet6Line)
-}
-
-// MACToIP converts the MAC address to an IPv6 link local address as described
-// in RFC 4291 page 20: https://tools.ietf.org/html/rfc4291#page-20
-func MACToIP(mac net.HardwareAddr) net.IP {
- addr := make([]byte, header.IPv6AddressSize)
- addr[0] = 0xfe
- addr[1] = 0x80
- header.EthernetAdddressToModifiedEUI64IntoBuf(tcpip.LinkAddress(mac), addr[8:])
- return net.IP(addr)
-}
-
-// FindDeviceByIP finds a DeviceInfo and device name from an IP address in the
-// output of ParseDevices.
-func FindDeviceByIP(ip net.IP, devices map[string]DeviceInfo) (string, DeviceInfo, error) {
- for dev, info := range devices {
- if info.IPv4Addr.Equal(ip) {
- return dev, info, nil
- }
- }
- return "", DeviceInfo{}, fmt.Errorf("can't find %s on any interface", ip)
-}
diff --git a/test/packetimpact/netdevs/netdevs_test.go b/test/packetimpact/netdevs/netdevs_test.go
deleted file mode 100644
index 379386980..000000000
--- a/test/packetimpact/netdevs/netdevs_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package netdevs
-
-import (
- "fmt"
- "net"
- "testing"
-
- "github.com/google/go-cmp/cmp"
-)
-
-func mustParseMAC(s string) net.HardwareAddr {
- mac, err := net.ParseMAC(s)
- if err != nil {
- panic(fmt.Sprintf("failed to parse test MAC %q: %s", s, err))
- }
- return mac
-}
-
-func TestParseDevices(t *testing.T) {
- for _, v := range []struct {
- desc string
- cmdOutput string
- want map[string]DeviceInfo
- }{
- {
- desc: "v4 and v6",
- cmdOutput: `
-1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
-2613: eth0@if2614: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:c0:a8:09:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 192.168.9.2/24 brd 192.168.9.255 scope global eth0
- valid_lft forever preferred_lft forever
- inet6 fe80::42:c0ff:fea8:902/64 scope link tentative
- valid_lft forever preferred_lft forever
-2615: eth2@if2616: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:df:f5:e1:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 223.245.225.10/24 brd 223.245.225.255 scope global eth2
- valid_lft forever preferred_lft forever
- inet6 fe80::42:dfff:fef5:e10a/64 scope link tentative
- valid_lft forever preferred_lft forever
-2617: eth1@if2618: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:da:33:13:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 218.51.19.10/24 brd 218.51.19.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80::42:daff:fe33:130a/64 scope link tentative
- valid_lft forever preferred_lft forever`,
- want: map[string]DeviceInfo{
- "lo": {
- ID: 1,
- MAC: mustParseMAC("00:00:00:00:00:00"),
- IPv4Addr: net.IPv4(127, 0, 0, 1),
- IPv4Net: &net.IPNet{
- IP: net.IPv4(127, 0, 0, 0),
- Mask: net.CIDRMask(8, 32),
- },
- IPv6Addr: net.ParseIP("::1"),
- IPv6Net: &net.IPNet{
- IP: net.ParseIP("::1"),
- Mask: net.CIDRMask(128, 128),
- },
- },
- "eth0": {
- ID: 2613,
- MAC: mustParseMAC("02:42:c0:a8:09:02"),
- IPv4Addr: net.IPv4(192, 168, 9, 2),
- IPv4Net: &net.IPNet{
- IP: net.IPv4(192, 168, 9, 0),
- Mask: net.CIDRMask(24, 32),
- },
- IPv6Addr: net.ParseIP("fe80::42:c0ff:fea8:902"),
- IPv6Net: &net.IPNet{
- IP: net.ParseIP("fe80::"),
- Mask: net.CIDRMask(64, 128),
- },
- },
- "eth1": {
- ID: 2617,
- MAC: mustParseMAC("02:42:da:33:13:0a"),
- IPv4Addr: net.IPv4(218, 51, 19, 10),
- IPv4Net: &net.IPNet{
- IP: net.IPv4(218, 51, 19, 0),
- Mask: net.CIDRMask(24, 32),
- },
- IPv6Addr: net.ParseIP("fe80::42:daff:fe33:130a"),
- IPv6Net: &net.IPNet{
- IP: net.ParseIP("fe80::"),
- Mask: net.CIDRMask(64, 128),
- },
- },
- "eth2": {
- ID: 2615,
- MAC: mustParseMAC("02:42:df:f5:e1:0a"),
- IPv4Addr: net.IPv4(223, 245, 225, 10),
- IPv4Net: &net.IPNet{
- IP: net.IPv4(223, 245, 225, 0),
- Mask: net.CIDRMask(24, 32),
- },
- IPv6Addr: net.ParseIP("fe80::42:dfff:fef5:e10a"),
- IPv6Net: &net.IPNet{
- IP: net.ParseIP("fe80::"),
- Mask: net.CIDRMask(64, 128),
- },
- },
- },
- },
- {
- desc: "v4 only",
- cmdOutput: `
-2613: eth0@if2614: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:c0:a8:09:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 192.168.9.2/24 brd 192.168.9.255 scope global eth0
- valid_lft forever preferred_lft forever`,
- want: map[string]DeviceInfo{
- "eth0": {
- ID: 2613,
- MAC: mustParseMAC("02:42:c0:a8:09:02"),
- IPv4Addr: net.IPv4(192, 168, 9, 2),
- IPv4Net: &net.IPNet{
- IP: net.IPv4(192, 168, 9, 0),
- Mask: net.CIDRMask(24, 32),
- },
- },
- },
- },
- {
- desc: "v6 only",
- cmdOutput: `
-2615: eth2@if2616: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:df:f5:e1:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet6 fe80::42:dfff:fef5:e10a/64 scope link tentative
- valid_lft forever preferred_lft forever`,
- want: map[string]DeviceInfo{
- "eth2": {
- ID: 2615,
- MAC: mustParseMAC("02:42:df:f5:e1:0a"),
- IPv6Addr: net.ParseIP("fe80::42:dfff:fef5:e10a"),
- IPv6Net: &net.IPNet{
- IP: net.ParseIP("fe80::"),
- Mask: net.CIDRMask(64, 128),
- },
- },
- },
- },
- } {
- t.Run(v.desc, func(t *testing.T) {
- got, err := ParseDevices(v.cmdOutput)
- if err != nil {
- t.Errorf("ParseDevices(\n%s\n) got unexpected error: %s", v.cmdOutput, err)
- }
- if diff := cmp.Diff(v.want, got); diff != "" {
- t.Errorf("ParseDevices(\n%s\n) got output diff (-want, +got):\n%s", v.cmdOutput, diff)
- }
- })
- }
-}
-
-func TestParseDevicesErrors(t *testing.T) {
- for _, v := range []struct {
- desc string
- cmdOutput string
- }{
- {
- desc: "invalid MAC addr",
- cmdOutput: `
-2617: eth1@if2618: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:da:33:13:0a:ffffffff brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 218.51.19.10/24 brd 218.51.19.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80::42:daff:fe33:130a/64 scope link tentative
- valid_lft forever preferred_lft forever`,
- },
- {
- desc: "invalid v4 addr",
- cmdOutput: `
-2617: eth1@if2618: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:da:33:13:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 1234.4321.424242.0/24 brd 218.51.19.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80::42:daff:fe33:130a/64 scope link tentative
- valid_lft forever preferred_lft forever`,
- },
- {
- desc: "invalid v6 addr",
- cmdOutput: `
-2617: eth1@if2618: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:da:33:13:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 218.51.19.10/24 brd 218.51.19.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80:ffffffff::42:daff:fe33:130a/64 scope link tentative
- valid_lft forever preferred_lft forever`,
- },
- {
- desc: "invalid CIDR missing prefixlen",
- cmdOutput: `
-2617: eth1@if2618: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
- link/ether 02:42:da:33:13:0a brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 218.51.19.10 brd 218.51.19.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80::42:daff:fe33:130a scope link tentative
- valid_lft forever preferred_lft forever`,
- },
- } {
- t.Run(v.desc, func(t *testing.T) {
- if _, err := ParseDevices(v.cmdOutput); err == nil {
- t.Errorf("ParseDevices(\n%s\n) succeeded unexpectedly, want error", v.cmdOutput)
- }
- })
- }
-}
diff --git a/test/packetimpact/proto/BUILD b/test/packetimpact/proto/BUILD
deleted file mode 100644
index 4a4370f42..000000000
--- a/test/packetimpact/proto/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "proto_library")
-
-package(
- default_visibility = ["//test/packetimpact:__subpackages__"],
- licenses = ["notice"],
-)
-
-proto_library(
- name = "posix_server",
- srcs = ["posix_server.proto"],
- has_services = 1,
-)
diff --git a/test/packetimpact/proto/posix_server.proto b/test/packetimpact/proto/posix_server.proto
deleted file mode 100644
index 175a65336..000000000
--- a/test/packetimpact/proto/posix_server.proto
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package posix_server;
-
-message SockaddrIn {
- int32 family = 1;
- uint32 port = 2;
- bytes addr = 3;
-}
-
-message SockaddrIn6 {
- uint32 family = 1;
- uint32 port = 2;
- uint32 flowinfo = 3;
- bytes addr = 4;
- uint32 scope_id = 5;
-}
-
-message Sockaddr {
- oneof sockaddr {
- SockaddrIn in = 1;
- SockaddrIn6 in6 = 2;
- }
-}
-
-message Timeval {
- int64 seconds = 1;
- int64 microseconds = 2;
-}
-
-message SockOptVal {
- oneof val {
- bytes bytesval = 1;
- int32 intval = 2;
- Timeval timeval = 3;
- }
-}
-
-// Request and Response pairs for each Posix service RPC call, sorted.
-
-message AcceptRequest {
- int32 sockfd = 1;
-}
-
-message AcceptResponse {
- int32 fd = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- Sockaddr addr = 3;
-}
-
-message BindRequest {
- int32 sockfd = 1;
- Sockaddr addr = 2;
-}
-
-message BindResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message CloseRequest {
- int32 fd = 1;
-}
-
-message CloseResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message ConnectRequest {
- int32 sockfd = 1;
- Sockaddr addr = 2;
-}
-
-message ConnectResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message GetSockNameRequest {
- int32 sockfd = 1;
-}
-
-message GetSockNameResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- Sockaddr addr = 3;
-}
-
-message GetSockOptRequest {
- int32 sockfd = 1;
- int32 level = 2;
- int32 optname = 3;
- int32 optlen = 4;
- enum SockOptType {
- UNSPECIFIED = 0;
- BYTES = 1;
- INT = 2;
- TIME = 3;
- }
- SockOptType type = 5;
-}
-
-message GetSockOptResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- SockOptVal optval = 3;
-}
-
-message ListenRequest {
- int32 sockfd = 1;
- int32 backlog = 2;
-}
-
-message ListenResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-// The events field is overloaded: when used for request, it is copied into the
-// events field of posix struct pollfd; when used for response, it is filled by
-// the revents field from the posix struct pollfd.
-message PollFd {
- int32 fd = 1;
- uint32 events = 2;
-}
-
-message PollRequest {
- repeated PollFd pfds = 1;
- int32 timeout_millis = 2;
-}
-
-message PollResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- repeated PollFd pfds = 3;
-}
-
-message SendRequest {
- int32 sockfd = 1;
- bytes buf = 2;
- int32 flags = 3;
-}
-
-message SendResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message SendToRequest {
- int32 sockfd = 1;
- bytes buf = 2;
- int32 flags = 3;
- Sockaddr dest_addr = 4;
-}
-
-message SendToResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message SetNonblockingRequest {
- int32 fd = 1;
- bool nonblocking = 2;
-}
-
-message SetNonblockingResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- // The failed fcntl cmd.
- string cmd = 3;
-}
-
-message SetSockOptRequest {
- int32 sockfd = 1;
- int32 level = 2;
- int32 optname = 3;
- SockOptVal optval = 4;
-}
-
-message SetSockOptResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message SocketRequest {
- int32 domain = 1;
- int32 type = 2;
- int32 protocol = 3;
-}
-
-message SocketResponse {
- int32 fd = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message ShutdownRequest {
- int32 fd = 1;
- int32 how = 2;
-}
-
-message ShutdownResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
-}
-
-message RecvRequest {
- int32 sockfd = 1;
- int32 len = 2;
- int32 flags = 3;
-}
-
-message RecvResponse {
- int32 ret = 1;
- int32 errno_ = 2; // "errno" may fail to compile in c++.
- bytes buf = 3;
-}
-
-service Posix {
- // Call accept() on the DUT.
- rpc Accept(AcceptRequest) returns (AcceptResponse);
- // Call bind() on the DUT.
- rpc Bind(BindRequest) returns (BindResponse);
- // Call close() on the DUT.
- rpc Close(CloseRequest) returns (CloseResponse);
- // Call connect() on the DUT.
- rpc Connect(ConnectRequest) returns (ConnectResponse);
- // Call getsockname() on the DUT.
- rpc GetSockName(GetSockNameRequest) returns (GetSockNameResponse);
- // Call getsockopt() on the DUT.
- rpc GetSockOpt(GetSockOptRequest) returns (GetSockOptResponse);
- // Call listen() on the DUT.
- rpc Listen(ListenRequest) returns (ListenResponse);
- // Call poll() on the DUT. Only pollfds that have non-empty revents are
- // returned, the only way to tie the response back to the original request
- // is using the fd number.
- rpc Poll(PollRequest) returns (PollResponse);
- // Call send() on the DUT.
- rpc Send(SendRequest) returns (SendResponse);
- // Call sendto() on the DUT.
- rpc SendTo(SendToRequest) returns (SendToResponse);
- // Set/Clear O_NONBLOCK flag on the requested fd. This is needed because the
- // operating system on DUT may have a different definition for O_NONBLOCK, it
- // is not sound to assemble flags on testbench.
- rpc SetNonblocking(SetNonblockingRequest) returns (SetNonblockingResponse);
- // Call setsockopt() on the DUT.
- rpc SetSockOpt(SetSockOptRequest) returns (SetSockOptResponse);
- // Call socket() on the DUT.
- rpc Socket(SocketRequest) returns (SocketResponse);
- // Call shutdown() on the DUT.
- rpc Shutdown(ShutdownRequest) returns (ShutdownResponse);
- // Call recv() on the DUT.
- rpc Recv(RecvRequest) returns (RecvResponse);
-}
diff --git a/test/packetimpact/runner/BUILD b/test/packetimpact/runner/BUILD
deleted file mode 100644
index 888c44343..000000000
--- a/test/packetimpact/runner/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_library", "go_test")
-
-package(
- default_visibility = ["//test/packetimpact:__subpackages__"],
- licenses = ["notice"],
-)
-
-go_test(
- name = "packetimpact_test",
- srcs = [
- "packetimpact_test.go",
- ],
- tags = [
- # Not intended to be run directly.
- "local",
- "manual",
- ],
- deps = [":runner"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//test/packetimpact:__subpackages__"],
-)
-
-go_library(
- name = "runner",
- testonly = True,
- srcs = ["dut.go"],
- visibility = ["//test/packetimpact:__subpackages__"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/packetimpact/netdevs",
- "//test/packetimpact/testbench",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
diff --git a/test/packetimpact/runner/defs.bzl b/test/packetimpact/runner/defs.bzl
deleted file mode 100644
index 19f6cc0e0..000000000
--- a/test/packetimpact/runner/defs.bzl
+++ /dev/null
@@ -1,315 +0,0 @@
-"""Defines rules for packetimpact test targets."""
-
-load("//tools:defs.bzl", "go_test")
-
-def _packetimpact_test_impl(ctx):
- test_runner = ctx.executable._test_runner
- bench = ctx.actions.declare_file("%s-bench" % ctx.label.name)
- bench_content = "\n".join([
- "#!/bin/bash",
- # This test will run part in a distinct user namespace. This can cause
- # permission problems, because all runfiles may not be owned by the
- # current user, and no other users will be mapped in that namespace.
- # Make sure that everything is readable here.
- "find . -type f -or -type d -exec chmod a+rx {} \\;",
- "%s %s --testbench_binary %s --num_duts %d $@\n" % (
- test_runner.short_path,
- " ".join(ctx.attr.flags),
- ctx.files.testbench_binary[0].short_path,
- ctx.attr.num_duts,
- ),
- ])
- ctx.actions.write(bench, bench_content, is_executable = True)
-
- transitive_files = []
- if hasattr(ctx.attr._test_runner, "data_runfiles"):
- transitive_files.append(ctx.attr._test_runner.data_runfiles.files)
- files = [test_runner] + ctx.files.testbench_binary + ctx.files._posix_server
- runfiles = ctx.runfiles(
- files = files,
- transitive_files = depset(transitive = transitive_files),
- collect_default = True,
- collect_data = True,
- )
- return [DefaultInfo(executable = bench, runfiles = runfiles)]
-
-_packetimpact_test = rule(
- attrs = {
- "_test_runner": attr.label(
- executable = True,
- cfg = "target",
- default = ":packetimpact_test",
- ),
- "_posix_server": attr.label(
- cfg = "target",
- default = "//test/packetimpact/dut:posix_server",
- ),
- "testbench_binary": attr.label(
- cfg = "target",
- mandatory = True,
- ),
- "flags": attr.string_list(
- mandatory = False,
- default = [],
- ),
- "num_duts": attr.int(
- mandatory = False,
- default = 1,
- ),
- },
- test = True,
- implementation = _packetimpact_test_impl,
-)
-
-PACKETIMPACT_TAGS = [
- "local",
- "manual",
- "packetimpact",
-]
-
-def packetimpact_native_test(
- name,
- testbench_binary,
- expect_failure = False,
- **kwargs):
- """Add a native packetimpact test.
-
- Args:
- name: name of the test
- testbench_binary: the testbench binary
- expect_failure: the test must fail
- **kwargs: all the other args, forwarded to _packetimpact_test
- """
- expect_failure_flag = ["--expect_failure"] if expect_failure else []
- _packetimpact_test(
- name = name + "_native_test",
- testbench_binary = testbench_binary,
- flags = ["--native"] + expect_failure_flag,
- tags = PACKETIMPACT_TAGS,
- **kwargs
- )
-
-def packetimpact_netstack_test(
- name,
- testbench_binary,
- expect_failure = False,
- **kwargs):
- """Add a packetimpact test on netstack.
-
- Args:
- name: name of the test
- testbench_binary: the testbench binary
- expect_failure: the test must fail
- **kwargs: all the other args, forwarded to _packetimpact_test
- """
- expect_failure_flag = []
- if expect_failure:
- expect_failure_flag = ["--expect_failure"]
- _packetimpact_test(
- name = name + "_netstack_test",
- testbench_binary = testbench_binary,
- # Note that a distinct runtime must be provided in the form
- # --test_arg=--runtime=other when invoking bazel.
- flags = expect_failure_flag,
- tags = PACKETIMPACT_TAGS,
- **kwargs
- )
-
-def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False, num_duts = 1, **kwargs):
- """Add packetimpact tests written in go.
-
- Args:
- name: name of the test
- expect_native_failure: the test must fail natively
- expect_netstack_failure: the test must fail for Netstack
- num_duts: how many DUTs are needed for the test
- **kwargs: all the other args, forwarded to packetimpact_native_test and packetimpact_netstack_test
- """
- testbench_binary = name + "_test"
- packetimpact_native_test(
- name = name,
- expect_failure = expect_native_failure,
- num_duts = num_duts,
- testbench_binary = testbench_binary,
- **kwargs
- )
- packetimpact_netstack_test(
- name = name,
- expect_failure = expect_netstack_failure,
- num_duts = num_duts,
- testbench_binary = testbench_binary,
- **kwargs
- )
-
-def packetimpact_testbench(name, size = "small", pure = True, **kwargs):
- """Build packetimpact testbench written in go.
-
- Args:
- name: name of the test
- size: size of the test
- pure: make a static go binary
- **kwargs: all the other args, forwarded to go_test
- """
- go_test(
- name = name + "_test",
- size = size,
- pure = pure,
- nogo = False, # FIXME(gvisor.dev/issue/3374): Not working with all build systems.
- tags = [
- "local",
- "manual",
- ],
- **kwargs
- )
-
-PacketimpactTestInfo = provider(
- doc = "Provide information for packetimpact tests",
- fields = [
- "name",
- "timeout",
- "expect_netstack_failure",
- "num_duts",
- ],
-)
-
-ALL_TESTS = [
- PacketimpactTestInfo(
- name = "fin_wait2_timeout",
- ),
- PacketimpactTestInfo(
- name = "ipv4_id_uniqueness",
- ),
- PacketimpactTestInfo(
- name = "udp_discard_mcast_source_addr",
- ),
- PacketimpactTestInfo(
- name = "udp_any_addr_recv_unicast",
- ),
- PacketimpactTestInfo(
- name = "udp_icmp_error_propagation",
- ),
- PacketimpactTestInfo(
- name = "tcp_window_shrink",
- ),
- PacketimpactTestInfo(
- name = "tcp_zero_window_probe",
- ),
- PacketimpactTestInfo(
- name = "tcp_zero_window_probe_retransmit",
- ),
- PacketimpactTestInfo(
- name = "tcp_zero_window_probe_usertimeout",
- ),
- PacketimpactTestInfo(
- name = "tcp_retransmits",
- ),
- PacketimpactTestInfo(
- name = "tcp_outside_the_window",
- ),
- PacketimpactTestInfo(
- name = "tcp_noaccept_close_rst",
- ),
- PacketimpactTestInfo(
- name = "tcp_send_window_sizes_piggyback",
- ),
- PacketimpactTestInfo(
- name = "tcp_unacc_seq_ack",
- ),
- PacketimpactTestInfo(
- name = "tcp_paws_mechanism",
- # TODO(b/156682000): Fix netstack then remove the line below.
- expect_netstack_failure = True,
- ),
- PacketimpactTestInfo(
- name = "tcp_user_timeout",
- ),
- PacketimpactTestInfo(
- name = "tcp_zero_receive_window",
- ),
- PacketimpactTestInfo(
- name = "tcp_queue_send_recv_in_syn_sent",
- ),
- PacketimpactTestInfo(
- name = "tcp_synsent_reset",
- ),
- PacketimpactTestInfo(
- name = "tcp_synrcvd_reset",
- ),
- PacketimpactTestInfo(
- name = "tcp_network_unreachable",
- ),
- PacketimpactTestInfo(
- name = "tcp_cork_mss",
- ),
- PacketimpactTestInfo(
- name = "tcp_handshake_window_size",
- ),
- PacketimpactTestInfo(
- name = "tcp_timewait_reset",
- # TODO(b/168523247): Fix netstack then remove the line below.
- expect_netstack_failure = True,
- ),
- PacketimpactTestInfo(
- name = "tcp_listen_backlog",
- ),
- PacketimpactTestInfo(
- name = "tcp_syncookie",
- ),
- PacketimpactTestInfo(
- name = "tcp_connect_icmp_error",
- ),
- PacketimpactTestInfo(
- name = "icmpv6_param_problem",
- ),
- PacketimpactTestInfo(
- name = "ipv6_unknown_options_action",
- ),
- PacketimpactTestInfo(
- name = "ipv4_fragment_reassembly",
- ),
- PacketimpactTestInfo(
- name = "ipv6_fragment_reassembly",
- ),
- PacketimpactTestInfo(
- name = "ipv6_fragment_icmp_error",
- num_duts = 3,
- ),
- PacketimpactTestInfo(
- name = "tcp_linger",
- ),
- PacketimpactTestInfo(
- name = "tcp_rcv_buf_space",
- ),
- PacketimpactTestInfo(
- name = "tcp_rack",
- expect_netstack_failure = True,
- ),
- PacketimpactTestInfo(
- name = "tcp_info",
- ),
- PacketimpactTestInfo(
- name = "tcp_fin_retransmission",
- ),
- PacketimpactTestInfo(
- name = "generic_dgram_socket_send_recv",
- timeout = "long",
- ),
-]
-
-def validate_all_tests():
- """
- Make sure that ALL_TESTS list is in sync with the rules in BUILD.
-
- This function is order-dependent, it is intended to be used after
- all packetimpact_testbench rules and before using ALL_TESTS list
- at the end of BUILD.
- """
- all_tests_dict = {} # there is no set, using dict to approximate.
- for test in ALL_TESTS:
- rule_name = test.name + "_test"
- all_tests_dict[rule_name] = True
- if not native.existing_rule(rule_name):
- fail("%s does not have a packetimpact_testbench rule in BUILD" % test.name)
- for name in native.existing_rules():
- if name.endswith("_test") and name not in all_tests_dict:
- fail("%s is not declared in ALL_TESTS list in defs.bzl" % name[:-5])
diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go
deleted file mode 100644
index 02678a76a..000000000
--- a/test/packetimpact/runner/dut.go
+++ /dev/null
@@ -1,657 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package runner starts docker containers and networking for a packetimpact test.
-package runner
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "math/rand"
- "net"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/api/types/mount"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/packetimpact/netdevs"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-// stringList implements flag.Value.
-type stringList []string
-
-// String implements flag.Value.String.
-func (l *stringList) String() string {
- return strings.Join(*l, ",")
-}
-
-// Set implements flag.Value.Set.
-func (l *stringList) Set(value string) error {
- *l = append(*l, value)
- return nil
-}
-
-var (
- native = false
- testbenchBinary = ""
- tshark = false
- extraTestArgs = stringList{}
- expectFailure = false
- numDUTs = 1
-
- // DUTAddr is the IP addres for DUT.
- DUTAddr = net.IPv4(0, 0, 0, 10)
- testbenchAddr = net.IPv4(0, 0, 0, 20)
-)
-
-// RegisterFlags defines flags and associates them with the package-level
-// exported variables above. It should be called by tests in their init
-// functions.
-func RegisterFlags(fs *flag.FlagSet) {
- fs.BoolVar(&native, "native", false, "whether the test should be run natively")
- fs.StringVar(&testbenchBinary, "testbench_binary", "", "path to the testbench binary")
- fs.BoolVar(&tshark, "tshark", false, "use more verbose tshark in logs instead of tcpdump")
- fs.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench")
- fs.BoolVar(&expectFailure, "expect_failure", false, "expect that the test will fail when run")
- fs.IntVar(&numDUTs, "num_duts", numDUTs, "the number of duts to create")
-}
-
-const (
- // CtrlPort is the port that posix_server listens on.
- CtrlPort uint16 = 40000
- // testOutputDir is the directory in each container that holds test output.
- testOutputDir = "/tmp/testoutput"
-)
-
-// logger implements testutil.Logger.
-//
-// Labels logs based on their source and formats multi-line logs.
-type logger string
-
-// Name implements testutil.Logger.Name.
-func (l logger) Name() string {
- return string(l)
-}
-
-// Logf implements testutil.Logger.Logf.
-func (l logger) Logf(format string, args ...interface{}) {
- lines := strings.Split(fmt.Sprintf(format, args...), "\n")
- log.Printf("%s: %s", l, lines[0])
- for _, line := range lines[1:] {
- log.Printf("%*s %s", len(l), "", line)
- }
-}
-
-// dutInfo encapsulates all the essential information to set up testbench
-// container.
-type dutInfo struct {
- dut DUT
- ctrlNet, testNet *dockerutil.Network
- netInfo *testbench.DUTTestNet
- uname *testbench.DUTUname
-}
-
-// setUpDUT will set up one DUT and return information for setting up the
-// container for testbench.
-func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerutil.Container) DUT) (dutInfo, error) {
- // Create the networks needed for the test. One control network is needed
- // for the gRPC control packets and one test network on which to transmit
- // the test packets.
- var info dutInfo
- ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet"))
- testNet := dockerutil.NewNetwork(ctx, logger("testNet"))
- for _, dn := range []*dockerutil.Network{ctrlNet, testNet} {
- for {
- if err := createDockerNetwork(ctx, dn); err != nil {
- t.Log("creating docker network:", err)
- const wait = 100 * time.Millisecond
- t.Logf("sleeping %s and will try creating docker network again", wait)
- // This can fail if another docker network claimed the same IP so we
- // will just try again.
- time.Sleep(wait)
- continue
- }
- break
- }
- dn := dn
- t.Cleanup(func() {
- if err := dn.Cleanup(ctx); err != nil {
- t.Errorf("failed to cleanup network %s: %s", dn.Name, err)
- }
- })
- // Sanity check.
- if inspect, err := dn.Inspect(ctx); err != nil {
- return dutInfo{}, fmt.Errorf("failed to inspect network %s: %w", dn.Name, err)
- } else if inspect.Name != dn.Name {
- return dutInfo{}, fmt.Errorf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
- }
- }
- info.ctrlNet = ctrlNet
- info.testNet = testNet
-
- // Create the Docker container for the DUT.
- makeContainer := dockerutil.MakeContainer
- if native {
- makeContainer = dockerutil.MakeNativeContainer
- }
- dutContainer := makeContainer(ctx, logger(fmt.Sprintf("dut-%d", id)))
- t.Cleanup(func() {
- dutContainer.CleanUp(ctx)
- })
- info.dut = mkDevice(dutContainer)
-
- runOpts := dockerutil.RunOpts{
- Image: "packetimpact",
- CapAdd: []string{"NET_ADMIN"},
- }
- if _, err := MountTempDirectory(t, &runOpts, "dut-output", testOutputDir); err != nil {
- return dutInfo{}, err
- }
-
- ipv4PrefixLength, _ := testNet.Subnet.Mask.Size()
- remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev, err := info.dut.Prepare(ctx, t, runOpts, ctrlNet, testNet)
- if err != nil {
- return dutInfo{}, err
- }
- info.netInfo = &testbench.DUTTestNet{
- RemoteMAC: remoteMAC,
- RemoteIPv4: AddressInSubnet(DUTAddr, *testNet.Subnet),
- RemoteIPv6: remoteIPv6,
- RemoteDevID: dutDeviceID,
- RemoteDevName: dutTestNetDev,
- LocalIPv4: AddressInSubnet(testbenchAddr, *testNet.Subnet),
- IPv4PrefixLength: ipv4PrefixLength,
- POSIXServerIP: AddressInSubnet(DUTAddr, *ctrlNet.Subnet),
- POSIXServerPort: CtrlPort,
- }
- info.uname, err = info.dut.Uname(ctx)
- if err != nil {
- return dutInfo{}, fmt.Errorf("failed to get uname information on DUT: %w", err)
- }
- return info, nil
-}
-
-// TestWithDUT runs a packetimpact test with the given information.
-func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Container) DUT) {
- if testbenchBinary == "" {
- t.Fatal("--testbench_binary is missing")
- }
- dockerutil.EnsureSupportedDockerVersion()
-
- dutInfoChan := make(chan dutInfo, numDUTs)
- errChan := make(chan error, numDUTs)
- var dockerNetworks []*dockerutil.Network
- var dutInfos []*testbench.DUTInfo
- var duts []DUT
-
- setUpCtx, cancelSetup := context.WithCancel(ctx)
- t.Cleanup(cancelSetup)
- for i := 0; i < numDUTs; i++ {
- go func(i int) {
- info, err := setUpDUT(setUpCtx, t, i, mkDevice)
- if err != nil {
- errChan <- err
- } else {
- dutInfoChan <- info
- }
- }(i)
- }
- for i := 0; i < numDUTs; i++ {
- select {
- case info := <-dutInfoChan:
- dockerNetworks = append(dockerNetworks, info.ctrlNet, info.testNet)
- dutInfos = append(dutInfos, &testbench.DUTInfo{
- Net: info.netInfo,
- Uname: info.uname,
- })
- duts = append(duts, info.dut)
- case err := <-errChan:
- t.Fatal(err)
- }
- }
-
- // Create the Docker container for the testbench.
- testbenchContainer := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
- t.Cleanup(func() {
- testbenchContainer.CleanUp(ctx)
- })
-
- runOpts := dockerutil.RunOpts{
- Image: "packetimpact",
- CapAdd: []string{"NET_ADMIN"},
- }
- if _, err := MountTempDirectory(t, &runOpts, "testbench-output", testOutputDir); err != nil {
- t.Fatal(err)
- }
- tbb := path.Base(testbenchBinary)
- containerTestbenchBinary := filepath.Join("/packetimpact", tbb)
- testbenchContainer.CopyFiles(&runOpts, "/packetimpact", filepath.Join("test/packetimpact/tests", tbb))
-
- if err := StartContainer(
- ctx,
- runOpts,
- testbenchContainer,
- testbenchAddr,
- dockerNetworks,
- nil, /* sysctls */
- "tail", "-f", "/dev/null",
- ); err != nil {
- t.Fatalf("cannot start testbench container: %s", err)
- }
-
- for i := range dutInfos {
- name, info, err := deviceByIP(ctx, testbenchContainer, dutInfos[i].Net.LocalIPv4)
- if err != nil {
- t.Fatalf("failed to get the device name associated with %s: %s", dutInfos[i].Net.LocalIPv4, err)
- }
- dutInfos[i].Net.LocalDevName = name
- dutInfos[i].Net.LocalDevID = info.ID
- dutInfos[i].Net.LocalMAC = info.MAC
- localIPv6, err := getOrAssignIPv6Addr(ctx, testbenchContainer, name)
- if err != nil {
- t.Fatalf("failed to get IPV6 address on %s: %s", testbenchContainer.Name, err)
- }
- dutInfos[i].Net.LocalIPv6 = localIPv6
- }
- dutInfosBytes, err := json.Marshal(dutInfos)
- if err != nil {
- t.Fatalf("failed to marshal %v into json: %s", dutInfos, err)
- }
-
- baseSnifferArgs := []string{
- "tcpdump",
- "-vvv",
- "--absolute-tcp-sequence-numbers",
- "--packet-buffered",
- // Disable DNS resolution.
- "-n",
- // run tcpdump as root since the output directory is owned by root. From
- // `man tcpdump`:
- //
- // -Z user
- // --relinquish-privileges=user
- // If tcpdump is running as root, after opening the capture device
- // or input savefile, change the user ID to user and the group ID to
- // the primary group of user.
- // This behavior is enabled by default (-Z tcpdump), and can be
- // disabled by -Z root.
- "-Z", "root",
- }
- if tshark {
- baseSnifferArgs = []string{
- "tshark",
- "-V",
- "-o", "tcp.check_checksum:TRUE",
- "-o", "udp.check_checksum:TRUE",
- // Disable buffering.
- "-l",
- // Disable DNS resolution.
- "-n",
- }
- }
- for _, info := range dutInfos {
- n := info.Net
- snifferArgs := append(baseSnifferArgs, "-i", n.LocalDevName)
- if !tshark {
- snifferArgs = append(
- snifferArgs,
- "-w",
- filepath.Join(testOutputDir, fmt.Sprintf("%s.pcap", n.LocalDevName)),
- )
- }
- p, err := testbenchContainer.ExecProcess(ctx, dockerutil.ExecOpts{}, snifferArgs...)
- if err != nil {
- t.Fatalf("failed to start exec a sniffer on %s: %s", n.LocalDevName, err)
- }
- t.Cleanup(func() {
- if snifferOut, err := p.Logs(); err != nil {
- t.Errorf("sniffer logs failed: %s\n%s", err, snifferOut)
- } else {
- t.Logf("sniffer logs:\n%s", snifferOut)
- }
- })
- // When the Linux kernel receives a SYN-ACK for a SYN it didn't send, it
- // will respond with an RST. In most packetimpact tests, the SYN is sent
- // by the raw socket, the kernel knows nothing about the connection, this
- // behavior will break lots of TCP related packetimpact tests. To prevent
- // this, we can install the following iptables rules. The raw socket that
- // packetimpact tests use will still be able to see everything.
- for _, bin := range []string{"iptables", "ip6tables"} {
- if logs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", n.LocalDevName, "-p", "tcp", "-j", "DROP"); err != nil {
- t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbenchContainer.Name, err, logs)
- }
- }
- }
-
- t.Cleanup(func() {
- // Wait 1 second before killing tcpdump to give it time to flush
- // any packets. On linux tests killing it immediately can
- // sometimes result in partial pcaps.
- time.Sleep(1 * time.Second)
- if logs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, "killall", baseSnifferArgs[0]); err != nil {
- t.Errorf("failed to kill all sniffers: %s, logs: %s", err, logs)
- }
- })
-
- // FIXME(b/156449515): Some piece of the system has a race. The old
- // bash script version had a sleep, so we have one too. The race should
- // be fixed and this sleep removed.
- time.Sleep(time.Second)
-
- // Start a packetimpact test on the test bench. The packetimpact test sends
- // and receives packets and also sends POSIX socket commands to the
- // posix_server to be executed on the DUT.
- testArgs := []string{containerTestbenchBinary}
- if testing.Verbose() {
- testArgs = append(testArgs, "-test.v")
- }
- testArgs = append(testArgs, extraTestArgs...)
- testArgs = append(testArgs,
- fmt.Sprintf("--native=%t", native),
- "--dut_infos_json", string(dutInfosBytes),
- )
- testbenchLogs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
- var dutLogs string
- for i, dut := range duts {
- logs, err := dut.Logs(ctx)
- if err != nil {
- logs = fmt.Sprintf("failed to fetch DUT logs: %s", err)
- }
- dutLogs = fmt.Sprintf(`%s====== Begin of DUT-%d Logs ======
-
-%s
-
-====== End of DUT-%d Logs ======
-
-`, dutLogs, i, logs, i)
- }
- testLogs := fmt.Sprintf(`
-%s====== Begin of Testbench Logs ======
-
-%s
-
-====== End of Testbench Logs ======`, dutLogs, testbenchLogs)
- if (err != nil) != expectFailure {
- t.Errorf(`test error: %v, expect failure: %t
-%s`, err, expectFailure, testLogs)
- } else if expectFailure {
- t.Logf(`test failed as expected: %v
-%s`, err, testLogs)
- } else if testing.Verbose() {
- t.Log(testLogs)
- }
-}
-
-// DUT describes how to setup/teardown the dut for packetimpact tests.
-type DUT interface {
- // Prepare prepares the dut, starts posix_server and returns the IPv6, MAC
- // address, the interface ID, and the interface name for the testNet on DUT.
- // The t parameter is supposed to be used for t.Cleanup. Don't use it for
- // t.Fatal/FailNow functions.
- Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error)
-
- // Uname gathers information of DUT using command uname.
- Uname(ctx context.Context) (*testbench.DUTUname, error)
-
- // Logs retrieves the logs from the dut.
- Logs(ctx context.Context) (string, error)
-}
-
-// DockerDUT describes a docker based DUT.
-type DockerDUT struct {
- c *dockerutil.Container
-}
-
-// NewDockerDUT creates a docker based DUT.
-func NewDockerDUT(c *dockerutil.Container) DUT {
- return &DockerDUT{
- c: c,
- }
-}
-
-// Prepare implements DUT.Prepare.
-func (dut *DockerDUT) Prepare(ctx context.Context, _ *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error) {
- const containerPosixServerBinary = "/packetimpact/posix_server"
- dut.c.CopyFiles(&runOpts, "/packetimpact", "test/packetimpact/dut/posix_server")
-
- if err := StartContainer(
- ctx,
- runOpts,
- dut.c,
- DUTAddr,
- []*dockerutil.Network{ctrlNet, testNet},
- map[string]string{
- // This enables creating ICMP sockets on Linux.
- "net.ipv4.ping_group_range": "0 0",
- },
- containerPosixServerBinary,
- "--ip=0.0.0.0",
- fmt.Sprintf("--port=%d", CtrlPort),
- ); err != nil {
- return nil, nil, 0, "", fmt.Errorf("failed to start docker container for DUT: %w", err)
- }
-
- if _, err := dut.c.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil {
- return nil, nil, 0, "", fmt.Errorf("%s on container %s never listened: %s", containerPosixServerBinary, dut.c.Name, err)
- }
-
- dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut.c, AddressInSubnet(DUTAddr, *testNet.Subnet))
- if err != nil {
- return nil, nil, 0, "", err
- }
-
- remoteIPv6, err := getOrAssignIPv6Addr(ctx, dut.c, dutTestDevice)
- if err != nil {
- return nil, nil, 0, "", fmt.Errorf("failed to get IPv6 address on %s: %s", dut.c.Name, err)
- }
- const testNetDev = "eth2"
-
- return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev, nil
-}
-
-// Uname implements DUT.Uname.
-func (dut *DockerDUT) Uname(ctx context.Context) (*testbench.DUTUname, error) {
- machine, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-m")
- if err != nil {
- return nil, err
- }
- kernelRelease, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-r")
- if err != nil {
- return nil, err
- }
- kernelVersion, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-v")
- if err != nil {
- return nil, err
- }
- kernelName, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-s")
- if err != nil {
- return nil, err
- }
- // TODO(gvisor.dev/issues/5586): -o is not supported on macOS.
- operatingSystem, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-o")
- if err != nil {
- return nil, err
- }
- return &testbench.DUTUname{
- Machine: strings.TrimRight(machine, "\n"),
- KernelName: strings.TrimRight(kernelName, "\n"),
- KernelRelease: strings.TrimRight(kernelRelease, "\n"),
- KernelVersion: strings.TrimRight(kernelVersion, "\n"),
- OperatingSystem: strings.TrimRight(operatingSystem, "\n"),
- }, nil
-}
-
-// Logs implements DUT.Logs.
-func (dut *DockerDUT) Logs(ctx context.Context) (string, error) {
- logs, err := dut.c.Logs(ctx)
- if err != nil {
- return "", err
- }
- return logs, nil
-}
-
-// AddNetworks connects docker network with the container and assigns the specific IP.
-func AddNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error {
- for _, dn := range networks {
- ip := AddressInSubnet(addr, *dn.Subnet)
- // Connect to the network with the specified IP address.
- if err := dn.Connect(ctx, d, ip.String(), ""); err != nil {
- return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err)
- }
- }
- return nil
-}
-
-// AddressInSubnet combines the subnet provided with the address and returns a
-// new address. The return address bits come from the subnet where the mask is
-// 1 and from the ip address where the mask is 0.
-func AddressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
- var octets net.IP
- for i := 0; i < 4; i++ {
- octets = append(octets, (subnet.IP.To4()[i]&subnet.Mask[i])+(addr.To4()[i]&(^subnet.Mask[i])))
- }
- return octets
-}
-
-// devicesInfo will run "ip addr show" on the container and parse the output
-// to a map[string]netdevs.DeviceInfo.
-func devicesInfo(ctx context.Context, d *dockerutil.Container) (map[string]netdevs.DeviceInfo, error) {
- out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show")
- if err != nil {
- return map[string]netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w\n%s", d.Name, err, out)
- }
- devs, err := netdevs.ParseDevices(out)
- if err != nil {
- return map[string]netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w\n%s", d.Name, err, out)
- }
- return devs, nil
-}
-
-// deviceByIP finds a deviceInfo and device name from an IP address.
-func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
- devs, err := devicesInfo(ctx, d)
- if err != nil {
- return "", netdevs.DeviceInfo{}, err
- }
- testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs)
- if err != nil {
- return "", netdevs.DeviceInfo{}, fmt.Errorf("can't find deviceInfo for container %s: %w", d.Name, err)
- }
- return testDevice, deviceInfo, nil
-}
-
-// getOrAssignIPv6Addr will try to get the IPv6 address for the interface; if an
-// address was not assigned, a link-local address based on MAC will be assigned
-// to that interface.
-func getOrAssignIPv6Addr(ctx context.Context, d *dockerutil.Container, iface string) (net.IP, error) {
- devs, err := devicesInfo(ctx, d)
- if err != nil {
- return net.IP{}, err
- }
- info := devs[iface]
- if info.IPv6Addr != nil {
- return info.IPv6Addr, nil
- }
- if info.MAC == nil {
- return nil, fmt.Errorf("unable to find MAC address of %s", iface)
- }
- if logs, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(info.MAC).String(), "scope", "link", "dev", iface); err != nil {
- return net.IP{}, fmt.Errorf("unable to ip addr add on container %s: %w, logs: %s", d.Name, err, logs)
- }
- // Now try again, to make sure that it worked.
- devs, err = devicesInfo(ctx, d)
- if err != nil {
- return net.IP{}, err
- }
- info = devs[iface]
- if info.IPv6Addr == nil {
- return net.IP{}, fmt.Errorf("unable to set IPv6 address on container %s", d.Name)
- }
- return info.IPv6Addr, nil
-}
-
-// createDockerNetwork makes a randomly-named network that will start with the
-// namePrefix. The network will be a random /24 subnet.
-func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
- randSource := rand.NewSource(time.Now().UnixNano())
- r1 := rand.New(randSource)
- // Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24.
- ip := net.IPv4(byte(r1.Intn(224-192)+192), byte(r1.Intn(256)), byte(r1.Intn(256)), 0)
- n.Subnet = &net.IPNet{
- IP: ip,
- Mask: ip.DefaultMask(),
- }
- return n.Create(ctx)
-}
-
-// StartContainer will create a container instance from runOpts, connect it
-// with the specified docker networks and start executing the specified cmd.
-func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, sysctls map[string]string, cmd ...string) error {
- conf, hostconf, netconf := c.ConfigsFrom(runOpts, cmd...)
- _ = netconf
- hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
- for k, v := range sysctls {
- hostconf.Sysctls[k] = v
- }
-
- if err := c.CreateFrom(ctx, runOpts.Image, conf, hostconf, nil); err != nil {
- return fmt.Errorf("unable to create container %s: %w", c.Name, err)
- }
-
- if err := AddNetworks(ctx, c, containerAddr, ns); err != nil {
- return fmt.Errorf("unable to connect the container with the networks: %w", err)
- }
-
- if err := c.Start(ctx); err != nil {
- return fmt.Errorf("unable to start container %s: %w", c.Name, err)
- }
- return nil
-}
-
-// MountTempDirectory creates a temporary directory on host with the template
-// and then mounts it into the container under the name provided. The temporary
-// directory name is returned. Content in that directory will be copied to
-// TEST_UNDECLARED_OUTPUTS_DIR in cleanup phase.
-func MountTempDirectory(t *testing.T, runOpts *dockerutil.RunOpts, hostDirTemplate, containerDir string) (string, error) {
- t.Helper()
- tmpDir, err := ioutil.TempDir("", hostDirTemplate)
- if err != nil {
- return "", fmt.Errorf("failed to create a temp dir: %w", err)
- }
- t.Cleanup(func() {
- if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil {
- t.Errorf("unable to copy container output files: %s", err)
- }
- if err := os.RemoveAll(tmpDir); err != nil {
- t.Errorf("failed to remove tmpDir %s: %s", tmpDir, err)
- }
- })
- runOpts.Mounts = append(runOpts.Mounts, mount.Mount{
- Type: mount.TypeBind,
- Source: tmpDir,
- Target: containerDir,
- ReadOnly: false,
- })
- return tmpDir, nil
-}
diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go
deleted file mode 100644
index 46334b7ab..000000000
--- a/test/packetimpact/runner/packetimpact_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The runner starts docker containers and networking for a packetimpact test.
-package packetimpact_test
-
-import (
- "context"
- "flag"
- "testing"
-
- "gvisor.dev/gvisor/test/packetimpact/runner"
-)
-
-func init() {
- runner.RegisterFlags(flag.CommandLine)
-}
-
-func TestOne(t *testing.T) {
- runner.TestWithDUT(context.Background(), t, runner.NewDockerDUT)
-}
diff --git a/test/packetimpact/testbench/BUILD b/test/packetimpact/testbench/BUILD
deleted file mode 100644
index d8059ab98..000000000
--- a/test/packetimpact/testbench/BUILD
+++ /dev/null
@@ -1,47 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"],
-)
-
-go_library(
- name = "testbench",
- srcs = [
- "connections.go",
- "dut.go",
- "dut_client.go",
- "layers.go",
- "rawsockets.go",
- "testbench.go",
- ],
- visibility = ["//test/packetimpact:__subpackages__"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/binary",
- "//pkg/hostarch",
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "//test/packetimpact/proto:posix_server_go_proto",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- "@com_github_mohae_deepcopy//:go_default_library",
- "@org_golang_google_grpc//:go_default_library",
- "@org_golang_google_grpc//keepalive:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- "@org_uber_go_multierr//:go_default_library",
- ],
-)
-
-go_test(
- name = "testbench_test",
- size = "small",
- srcs = ["layers_test.go"],
- library = ":testbench",
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "@com_github_mohae_deepcopy//:go_default_library",
- ],
-)
diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go
deleted file mode 100644
index ed56f9ac7..000000000
--- a/test/packetimpact/testbench/connections.go
+++ /dev/null
@@ -1,1263 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testbench
-
-import (
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/mohae/deepcopy"
- "go.uber.org/multierr"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
-)
-
-func portFromSockaddr(sa unix.Sockaddr) (uint16, error) {
- switch sa := sa.(type) {
- case *unix.SockaddrInet4:
- return uint16(sa.Port), nil
- case *unix.SockaddrInet6:
- return uint16(sa.Port), nil
- }
- return 0, fmt.Errorf("sockaddr type %T does not contain port", sa)
-}
-
-// pickPort makes a new socket and returns the socket FD and port. The domain
-// should be AF_INET or AF_INET6. The caller must close the FD when done with
-// the port if there is no error.
-func (n *DUTTestNet) pickPort(domain, typ int) (fd int, port uint16, err error) {
- fd, err = unix.Socket(domain, typ, 0)
- if err != nil {
- return -1, 0, fmt.Errorf("creating socket: %w", err)
- }
- defer func() {
- if err != nil {
- if cerr := unix.Close(fd); cerr != nil {
- err = multierr.Append(err, fmt.Errorf("failed to close socket %d: %w", fd, cerr))
- }
- }
- }()
- var sa unix.Sockaddr
- switch domain {
- case unix.AF_INET:
- var sa4 unix.SockaddrInet4
- copy(sa4.Addr[:], n.LocalIPv4)
- sa = &sa4
- case unix.AF_INET6:
- sa6 := unix.SockaddrInet6{ZoneId: n.LocalDevID}
- copy(sa6.Addr[:], n.LocalIPv6)
- sa = &sa6
- default:
- return -1, 0, fmt.Errorf("invalid domain %d, it should be one of unix.AF_INET or unix.AF_INET6", domain)
- }
- if err = unix.Bind(fd, sa); err != nil {
- return -1, 0, fmt.Errorf("binding to %+v: %w", sa, err)
- }
- sa, err = unix.Getsockname(fd)
- if err != nil {
- return -1, 0, fmt.Errorf("unix.Getsocketname(%d): %w", fd, err)
- }
- port, err = portFromSockaddr(sa)
- if err != nil {
- return -1, 0, fmt.Errorf("extracting port from socket address %+v: %w", sa, err)
- }
- return fd, port, nil
-}
-
-// layerState stores the state of a layer of a connection.
-type layerState interface {
- // outgoing returns an outgoing layer to be sent in a frame. It should not
- // update layerState, that is done in layerState.sent.
- outgoing() Layer
-
- // incoming creates an expected Layer for comparing against a received Layer.
- // Because the expectation can depend on values in the received Layer, it is
- // an input to incoming. For example, the ACK number needs to be checked in a
- // TCP packet but only if the ACK flag is set in the received packet. It
- // should not update layerState, that is done in layerState.received. The
- // caller takes ownership of the returned Layer.
- incoming(received Layer) Layer
-
- // sent updates the layerState based on the Layer that was sent. The input is
- // a Layer with all prev and next pointers populated so that the entire frame
- // as it was sent is available.
- sent(sent Layer) error
-
- // received updates the layerState based on a Layer that is received. The
- // input is a Layer with all prev and next pointers populated so that the
- // entire frame as it was received is available.
- received(received Layer) error
-
- // close frees associated resources held by the LayerState.
- close() error
-}
-
-// etherState maintains state about an Ethernet connection.
-type etherState struct {
- out, in Ether
-}
-
-var _ layerState = (*etherState)(nil)
-
-// newEtherState creates a new etherState.
-func (n *DUTTestNet) newEtherState(out, in Ether) (*etherState, error) {
- lmac := tcpip.LinkAddress(n.LocalMAC)
- rmac := tcpip.LinkAddress(n.RemoteMAC)
- s := etherState{
- out: Ether{SrcAddr: &lmac, DstAddr: &rmac},
- in: Ether{SrcAddr: &rmac, DstAddr: &lmac},
- }
- if err := s.out.merge(&out); err != nil {
- return nil, err
- }
- if err := s.in.merge(&in); err != nil {
- return nil, err
- }
- return &s, nil
-}
-
-func (s *etherState) outgoing() Layer {
- return deepcopy.Copy(&s.out).(Layer)
-}
-
-// incoming implements layerState.incoming.
-func (s *etherState) incoming(Layer) Layer {
- return deepcopy.Copy(&s.in).(Layer)
-}
-
-func (*etherState) sent(Layer) error {
- return nil
-}
-
-func (*etherState) received(Layer) error {
- return nil
-}
-
-func (*etherState) close() error {
- return nil
-}
-
-// ipv4State maintains state about an IPv4 connection.
-type ipv4State struct {
- out, in IPv4
-}
-
-var _ layerState = (*ipv4State)(nil)
-
-// newIPv4State creates a new ipv4State.
-func (n *DUTTestNet) newIPv4State(out, in IPv4) (*ipv4State, error) {
- lIP := tcpip.Address(n.LocalIPv4)
- rIP := tcpip.Address(n.RemoteIPv4)
- s := ipv4State{
- out: IPv4{SrcAddr: &lIP, DstAddr: &rIP},
- in: IPv4{SrcAddr: &rIP, DstAddr: &lIP},
- }
- if err := s.out.merge(&out); err != nil {
- return nil, err
- }
- if err := s.in.merge(&in); err != nil {
- return nil, err
- }
- return &s, nil
-}
-
-func (s *ipv4State) outgoing() Layer {
- return deepcopy.Copy(&s.out).(Layer)
-}
-
-// incoming implements layerState.incoming.
-func (s *ipv4State) incoming(Layer) Layer {
- return deepcopy.Copy(&s.in).(Layer)
-}
-
-func (*ipv4State) sent(Layer) error {
- return nil
-}
-
-func (*ipv4State) received(Layer) error {
- return nil
-}
-
-func (*ipv4State) close() error {
- return nil
-}
-
-// ipv6State maintains state about an IPv6 connection.
-type ipv6State struct {
- out, in IPv6
-}
-
-var _ layerState = (*ipv6State)(nil)
-
-// newIPv6State creates a new ipv6State.
-func (n *DUTTestNet) newIPv6State(out, in IPv6) (*ipv6State, error) {
- lIP := tcpip.Address(n.LocalIPv6)
- rIP := tcpip.Address(n.RemoteIPv6)
- s := ipv6State{
- out: IPv6{SrcAddr: &lIP, DstAddr: &rIP},
- in: IPv6{SrcAddr: &rIP, DstAddr: &lIP},
- }
- if err := s.out.merge(&out); err != nil {
- return nil, err
- }
- if err := s.in.merge(&in); err != nil {
- return nil, err
- }
- return &s, nil
-}
-
-// outgoing returns an outgoing layer to be sent in a frame.
-func (s *ipv6State) outgoing() Layer {
- return deepcopy.Copy(&s.out).(Layer)
-}
-
-func (s *ipv6State) incoming(Layer) Layer {
- return deepcopy.Copy(&s.in).(Layer)
-}
-
-func (s *ipv6State) sent(Layer) error {
- // Nothing to do.
- return nil
-}
-
-func (s *ipv6State) received(Layer) error {
- // Nothing to do.
- return nil
-}
-
-// close cleans up any resources held.
-func (s *ipv6State) close() error {
- return nil
-}
-
-// tcpState maintains state about a TCP connection.
-type tcpState struct {
- out, in TCP
- localSeqNum, remoteSeqNum *seqnum.Value
- synAck *TCP
- portPickerFD int
- finSent bool
-}
-
-var _ layerState = (*tcpState)(nil)
-
-// SeqNumValue is a helper routine that allocates a new seqnum.Value value to
-// store v and returns a pointer to it.
-func SeqNumValue(v seqnum.Value) *seqnum.Value {
- return &v
-}
-
-// newTCPState creates a new TCPState.
-func (n *DUTTestNet) newTCPState(domain int, out, in TCP) (*tcpState, error) {
- portPickerFD, localPort, err := n.pickPort(domain, unix.SOCK_STREAM)
- if err != nil {
- return nil, err
- }
- s := tcpState{
- out: TCP{SrcPort: &localPort},
- in: TCP{DstPort: &localPort},
- localSeqNum: SeqNumValue(seqnum.Value(rand.Uint32())),
- portPickerFD: portPickerFD,
- finSent: false,
- }
- if err := s.out.merge(&out); err != nil {
- return nil, err
- }
- if err := s.in.merge(&in); err != nil {
- return nil, err
- }
- return &s, nil
-}
-
-func (s *tcpState) outgoing() Layer {
- newOutgoing := deepcopy.Copy(s.out).(TCP)
- if s.localSeqNum != nil {
- newOutgoing.SeqNum = Uint32(uint32(*s.localSeqNum))
- }
- if s.remoteSeqNum != nil {
- newOutgoing.AckNum = Uint32(uint32(*s.remoteSeqNum))
- }
- return &newOutgoing
-}
-
-// incoming implements layerState.incoming.
-func (s *tcpState) incoming(received Layer) Layer {
- tcpReceived, ok := received.(*TCP)
- if !ok {
- return nil
- }
- newIn := deepcopy.Copy(s.in).(TCP)
- if s.remoteSeqNum != nil {
- newIn.SeqNum = Uint32(uint32(*s.remoteSeqNum))
- }
- if seq, flags := s.localSeqNum, tcpReceived.Flags; seq != nil && flags != nil && *flags&header.TCPFlagAck != 0 {
- // The caller didn't specify an AckNum so we'll expect the calculated one,
- // but only if the ACK flag is set because the AckNum is not valid in a
- // header if ACK is not set.
- newIn.AckNum = Uint32(uint32(*seq))
- }
- return &newIn
-}
-
-func (s *tcpState) sent(sent Layer) error {
- tcp, ok := sent.(*TCP)
- if !ok {
- return fmt.Errorf("can't update tcpState with %T Layer", sent)
- }
- if !s.finSent {
- // update localSeqNum by the payload only when FIN is not yet sent by us
- for current := tcp.next(); current != nil; current = current.next() {
- s.localSeqNum.UpdateForward(seqnum.Size(current.length()))
- }
- }
- if tcp.Flags != nil && *tcp.Flags&(header.TCPFlagSyn|header.TCPFlagFin) != 0 {
- s.localSeqNum.UpdateForward(1)
- }
- if *tcp.Flags&(header.TCPFlagFin) != 0 {
- s.finSent = true
- }
- return nil
-}
-
-func (s *tcpState) received(l Layer) error {
- tcp, ok := l.(*TCP)
- if !ok {
- return fmt.Errorf("can't update tcpState with %T Layer", l)
- }
- s.remoteSeqNum = SeqNumValue(seqnum.Value(*tcp.SeqNum))
- if *tcp.Flags&(header.TCPFlagSyn|header.TCPFlagFin) != 0 {
- s.remoteSeqNum.UpdateForward(1)
- }
- for current := tcp.next(); current != nil; current = current.next() {
- s.remoteSeqNum.UpdateForward(seqnum.Size(current.length()))
- }
- return nil
-}
-
-// close frees the port associated with this connection.
-func (s *tcpState) close() error {
- if err := unix.Close(s.portPickerFD); err != nil {
- return err
- }
- s.portPickerFD = -1
- return nil
-}
-
-// udpState maintains state about a UDP connection.
-type udpState struct {
- out, in UDP
- portPickerFD int
-}
-
-var _ layerState = (*udpState)(nil)
-
-// newUDPState creates a new udpState.
-func (n *DUTTestNet) newUDPState(domain int, out, in UDP) (*udpState, error) {
- portPickerFD, localPort, err := n.pickPort(domain, unix.SOCK_DGRAM)
- if err != nil {
- return nil, fmt.Errorf("picking port: %w", err)
- }
- s := udpState{
- out: UDP{SrcPort: &localPort},
- in: UDP{DstPort: &localPort},
- portPickerFD: portPickerFD,
- }
- if err := s.out.merge(&out); err != nil {
- return nil, err
- }
- if err := s.in.merge(&in); err != nil {
- return nil, err
- }
- return &s, nil
-}
-
-func (s *udpState) outgoing() Layer {
- return deepcopy.Copy(&s.out).(Layer)
-}
-
-// incoming implements layerState.incoming.
-func (s *udpState) incoming(Layer) Layer {
- return deepcopy.Copy(&s.in).(Layer)
-}
-
-func (*udpState) sent(l Layer) error {
- return nil
-}
-
-func (*udpState) received(l Layer) error {
- return nil
-}
-
-// close frees the port associated with this connection.
-func (s *udpState) close() error {
- if err := unix.Close(s.portPickerFD); err != nil {
- return err
- }
- s.portPickerFD = -1
- return nil
-}
-
-// Connection holds a collection of layer states for maintaining a connection
-// along with sockets for sniffer and injecting packets.
-type Connection struct {
- layerStates []layerState
- injector Injector
- sniffer Sniffer
-}
-
-// Returns the default incoming frame against which to match. If received is
-// longer than layerStates then that may still count as a match. The reverse is
-// never a match and nil is returned.
-func (conn *Connection) incoming(received Layers) Layers {
- if len(received) < len(conn.layerStates) {
- return nil
- }
- in := Layers{}
- for i, s := range conn.layerStates {
- toMatch := s.incoming(received[i])
- if toMatch == nil {
- return nil
- }
- in = append(in, toMatch)
- }
- return in
-}
-
-func (conn *Connection) match(override, received Layers) bool {
- toMatch := conn.incoming(received)
- if toMatch == nil {
- return false // Not enough layers in gotLayers for matching.
- }
- if err := toMatch.merge(override); err != nil {
- return false // Failing to merge is not matching.
- }
- return toMatch.match(received)
-}
-
-// Close frees associated resources held by the Connection.
-func (conn *Connection) Close(t *testing.T) {
- t.Helper()
-
- errs := multierr.Combine(conn.sniffer.close(), conn.injector.close())
- for _, s := range conn.layerStates {
- if err := s.close(); err != nil {
- errs = multierr.Append(errs, fmt.Errorf("unable to close %+v: %s", s, err))
- }
- }
- if errs != nil {
- t.Fatalf("unable to close %+v: %s", conn, errs)
- }
-}
-
-// CreateFrame builds a frame for the connection with defaults overridden
-// from the innermost layer out, and additionalLayers added after it.
-//
-// Note that overrideLayers can have a length that is less than the number
-// of layers in this connection, and in such cases the innermost layers are
-// overridden first. As an example, valid values of overrideLayers for a TCP-
-// over-IPv4-over-Ethernet connection are: nil, [TCP], [IPv4, TCP], and
-// [Ethernet, IPv4, TCP].
-func (conn *Connection) CreateFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) Layers {
- t.Helper()
-
- var layersToSend Layers
- for i, s := range conn.layerStates {
- layer := s.outgoing()
- // overrideLayers and conn.layerStates have their tails aligned, so
- // to find the index we move backwards by the distance i is to the
- // end.
- if j := len(overrideLayers) - (len(conn.layerStates) - i); j >= 0 {
- if err := layer.merge(overrideLayers[j]); err != nil {
- t.Fatalf("can't merge %+v into %+v: %s", layer, overrideLayers[j], err)
- }
- }
- layersToSend = append(layersToSend, layer)
- }
- layersToSend = append(layersToSend, additionalLayers...)
- return layersToSend
-}
-
-// SendFrameStateless sends a frame without updating any of the layer states.
-//
-// This method is useful for sending out-of-band control messages such as
-// ICMP packets, where it would not make sense to update the transport layer's
-// state using the ICMP header.
-func (conn *Connection) SendFrameStateless(t *testing.T, frame Layers) {
- t.Helper()
-
- outBytes, err := frame.ToBytes()
- if err != nil {
- t.Fatalf("can't build outgoing packet: %s", err)
- }
- conn.injector.Send(t, outBytes)
-}
-
-// SendFrame sends a frame on the wire and updates the state of all layers.
-func (conn *Connection) SendFrame(t *testing.T, frame Layers) {
- t.Helper()
-
- outBytes, err := frame.ToBytes()
- if err != nil {
- t.Fatalf("can't build outgoing packet: %s", err)
- }
- conn.injector.Send(t, outBytes)
-
- // frame might have nil values where the caller wanted to use default values.
- // sentFrame will have no nil values in it because it comes from parsing the
- // bytes that were actually sent.
- sentFrame := parse(parseEther, outBytes)
- // Update the state of each layer based on what was sent.
- for i, s := range conn.layerStates {
- if err := s.sent(sentFrame[i]); err != nil {
- t.Fatalf("Unable to update the state of %+v with %s: %s", s, sentFrame[i], err)
- }
- }
-}
-
-// send sends a packet, possibly with layers of this connection overridden and
-// additional layers added.
-//
-// Types defined with Connection as the underlying type should expose
-// type-safe versions of this method.
-func (conn *Connection) send(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {
- t.Helper()
-
- conn.SendFrame(t, conn.CreateFrame(t, overrideLayers, additionalLayers...))
-}
-
-// recvFrame gets the next successfully parsed frame (of type Layers) within the
-// timeout provided. If no parsable frame arrives before the timeout, it returns
-// nil.
-func (conn *Connection) recvFrame(t *testing.T, timeout time.Duration) Layers {
- t.Helper()
-
- if timeout <= 0 {
- return nil
- }
- b := conn.sniffer.Recv(t, timeout)
- if b == nil {
- return nil
- }
- return parse(parseEther, b)
-}
-
-// layersError stores the Layers that we got and the Layers that we wanted to
-// match.
-type layersError struct {
- got, want Layers
-}
-
-func (e *layersError) Error() string {
- return e.got.diff(e.want)
-}
-
-// Expect expects a frame with the final layerStates layer matching the
-// provided Layer within the timeout specified. If it doesn't arrive in time,
-// an error is returned.
-func (conn *Connection) Expect(t *testing.T, layer Layer, timeout time.Duration) (Layer, error) {
- t.Helper()
-
- // Make a frame that will ignore all but the final layer.
- layers := make([]Layer, len(conn.layerStates))
- layers[len(layers)-1] = layer
-
- gotFrame, err := conn.ExpectFrame(t, layers, timeout)
- if err != nil {
- return nil, err
- }
- if len(conn.layerStates)-1 < len(gotFrame) {
- return gotFrame[len(conn.layerStates)-1], nil
- }
- t.Fatalf("the received frame should be at least as long as the expected layers, got %d layers, want at least %d layers, got frame: %#v", len(gotFrame), len(conn.layerStates), gotFrame)
- panic("unreachable")
-}
-
-// ExpectFrame expects a frame that matches the provided Layers within the
-// timeout specified. If one arrives in time, the Layers is returned without an
-// error. If it doesn't arrive in time, it returns nil and error is non-nil.
-func (conn *Connection) ExpectFrame(t *testing.T, layers Layers, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- frames, ok := conn.ListenForFrame(t, layers, timeout)
- if ok {
- return frames[len(frames)-1], nil
- }
- if len(frames) == 0 {
- return nil, fmt.Errorf("got no frames matching %s during %s", layers, timeout)
- }
-
- var errs error
- for _, got := range frames {
- want := conn.incoming(layers)
- if err := want.merge(layers); err != nil {
- errs = multierr.Combine(errs, err)
- } else {
- errs = multierr.Combine(errs, &layersError{got: got, want: want})
- }
- }
- return nil, fmt.Errorf("got frames:\n%w want %s during %s", errs, layers, timeout)
-}
-
-// ListenForFrame captures all frames until a frame matches the provided Layers,
-// or until the timeout specified. Returns all captured frames, including the
-// matched frame, and true if the desired frame was found.
-func (conn *Connection) ListenForFrame(t *testing.T, layers Layers, timeout time.Duration) ([]Layers, bool) {
- t.Helper()
-
- deadline := time.Now().Add(timeout)
- var frames []Layers
- for {
- var got Layers
- if timeout := time.Until(deadline); timeout > 0 {
- got = conn.recvFrame(t, timeout)
- }
- if got == nil {
- return frames, false
- }
- frames = append(frames, got)
- if conn.match(layers, got) {
- for i, s := range conn.layerStates {
- if err := s.received(got[i]); err != nil {
- t.Fatalf("failed to update test connection's layer states based on received frame: %s", err)
- }
- }
- return frames, true
- }
- }
-}
-
-// Drain drains the sniffer's receive buffer by receiving packets until there's
-// nothing else to receive.
-func (conn *Connection) Drain(t *testing.T) {
- t.Helper()
-
- conn.sniffer.Drain(t)
-}
-
-// TCPIPv4 maintains the state for all the layers in a TCP/IPv4 connection.
-type TCPIPv4 struct {
- Connection
-}
-
-// NewTCPIPv4 creates a new TCPIPv4 connection with reasonable defaults.
-func (n *DUTTestNet) NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {
- t.Helper()
-
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make etherState: %s", err)
- }
- ipv4State, err := n.newIPv4State(IPv4{}, IPv4{})
- if err != nil {
- t.Fatalf("can't make ipv4State: %s", err)
- }
- tcpState, err := n.newTCPState(unix.AF_INET, outgoingTCP, incomingTCP)
- if err != nil {
- t.Fatalf("can't make tcpState: %s", err)
- }
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
-
- return TCPIPv4{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv4State, tcpState},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-// Connect performs a TCP 3-way handshake. The input Connection should have a
-// final TCP Layer.
-func (conn *TCPIPv4) Connect(t *testing.T) {
- t.Helper()
-
- // Send the SYN.
- conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagSyn)})
-
- // Wait for the SYN-ACK.
- synAck, err := conn.Expect(t, TCP{Flags: TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("didn't get synack during handshake: %s", err)
- }
- conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
-
- // Send an ACK.
- conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagAck)})
-}
-
-// ConnectWithOptions performs a TCP 3-way handshake with given TCP options.
-// The input Connection should have a final TCP Layer.
-func (conn *TCPIPv4) ConnectWithOptions(t *testing.T, options []byte) {
- t.Helper()
-
- // Send the SYN.
- conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagSyn), Options: options})
-
- // Wait for the SYN-ACK.
- synAck, err := conn.Expect(t, TCP{Flags: TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("didn't get synack during handshake: %s", err)
- }
- conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
-
- // Send an ACK.
- conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagAck)})
-}
-
-// ExpectData is a convenient method that expects a Layer and the Layer after
-// it. If it doesn't arrive in time, it returns nil.
-func (conn *TCPIPv4) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- expected := make([]Layer, len(conn.layerStates))
- expected[len(expected)-1] = tcp
- if payload != nil {
- expected = append(expected, payload)
- }
- return conn.ExpectFrame(t, expected, timeout)
-}
-
-// ExpectNextData attempts to receive the next incoming segment for the
-// connection and expects that to match the given layers.
-//
-// It differs from ExpectData() in that here we are only interested in the next
-// received segment, while ExpectData() can receive multiple segments for the
-// connection until there is a match with given layers or a timeout.
-func (conn *TCPIPv4) ExpectNextData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- // Receive the first incoming TCP segment for this connection.
- got, err := conn.ExpectData(t, &TCP{}, nil, timeout)
- if err != nil {
- return nil, err
- }
-
- expected := make([]Layer, len(conn.layerStates))
- expected[len(expected)-1] = tcp
- if payload != nil {
- expected = append(expected, payload)
- tcp.SeqNum = Uint32(uint32(*conn.RemoteSeqNum(t)) - uint32(payload.Length()))
- }
- if !conn.match(expected, got) {
- return nil, fmt.Errorf("next frame is not matching %s during %s: got %s", expected, timeout, got)
- }
- return got, nil
-}
-
-// Send a packet with reasonable defaults. Potentially override the TCP layer in
-// the connection with the provided layer and add additionLayers.
-func (conn *TCPIPv4) Send(t *testing.T, tcp TCP, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&tcp}, additionalLayers...)
-}
-
-// Expect expects a frame with the TCP layer matching the provided TCP within
-// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *TCPIPv4) Expect(t *testing.T, tcp TCP, timeout time.Duration) (*TCP, error) {
- t.Helper()
-
- layer, err := conn.Connection.Expect(t, &tcp, timeout)
- if layer == nil {
- return nil, err
- }
- gotTCP, ok := layer.(*TCP)
- if !ok {
- t.Fatalf("expected %s to be TCP", layer)
- }
- return gotTCP, err
-}
-
-func (conn *TCPIPv4) tcpState(t *testing.T) *tcpState {
- t.Helper()
-
- state, ok := conn.layerStates[2].(*tcpState)
- if !ok {
- t.Fatalf("got transport-layer state type=%T, expected tcpState", conn.layerStates[2])
- }
- return state
-}
-
-func (conn *TCPIPv4) ipv4State(t *testing.T) *ipv4State {
- t.Helper()
-
- state, ok := conn.layerStates[1].(*ipv4State)
- if !ok {
- t.Fatalf("expected network-layer state type=%T, expected ipv4State", conn.layerStates[1])
- }
- return state
-}
-
-// RemoteSeqNum returns the next expected sequence number from the DUT.
-func (conn *TCPIPv4) RemoteSeqNum(t *testing.T) *seqnum.Value {
- t.Helper()
-
- return conn.tcpState(t).remoteSeqNum
-}
-
-// LocalSeqNum returns the next sequence number to send from the testbench.
-func (conn *TCPIPv4) LocalSeqNum(t *testing.T) *seqnum.Value {
- t.Helper()
-
- return conn.tcpState(t).localSeqNum
-}
-
-// SynAck returns the SynAck that was part of the handshake.
-func (conn *TCPIPv4) SynAck(t *testing.T) *TCP {
- t.Helper()
-
- return conn.tcpState(t).synAck
-}
-
-// LocalAddr gets the local socket address of this connection.
-func (conn *TCPIPv4) LocalAddr(t *testing.T) *unix.SockaddrInet4 {
- t.Helper()
-
- sa := &unix.SockaddrInet4{Port: int(*conn.tcpState(t).out.SrcPort)}
- copy(sa.Addr[:], *conn.ipv4State(t).out.SrcAddr)
- return sa
-}
-
-// GenerateOTWSeqSegment generates a segment with
-// seqnum = RCV.NXT + RCV.WND + seqNumOffset, the generated segment is only
-// acceptable when seqNumOffset is 0, otherwise an ACK is expected from the
-// receiver.
-func GenerateOTWSeqSegment(t *testing.T, conn *TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) TCP {
- t.Helper()
- lastAcceptable := conn.LocalSeqNum(t).Add(windowSize)
- otwSeq := uint32(lastAcceptable.Add(seqNumOffset))
- return TCP{SeqNum: Uint32(otwSeq), Flags: TCPFlags(header.TCPFlagAck)}
-}
-
-// GenerateUnaccACKSegment generates a segment with
-// acknum = SND.NXT + seqNumOffset, the generated segment is only acceptable
-// when seqNumOffset is 0, otherwise an ACK is expected from the receiver.
-func GenerateUnaccACKSegment(t *testing.T, conn *TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) TCP {
- t.Helper()
- lastAcceptable := conn.RemoteSeqNum(t)
- unaccAck := uint32(lastAcceptable.Add(seqNumOffset))
- return TCP{AckNum: Uint32(unaccAck), Flags: TCPFlags(header.TCPFlagAck)}
-}
-
-// IPv4Conn maintains the state for all the layers in a IPv4 connection.
-type IPv4Conn struct {
- Connection
-}
-
-// NewIPv4Conn creates a new IPv4Conn connection with reasonable defaults.
-func (n *DUTTestNet) NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4) IPv4Conn {
- t.Helper()
-
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make EtherState: %s", err)
- }
- ipv4State, err := n.newIPv4State(outgoingIPv4, incomingIPv4)
- if err != nil {
- t.Fatalf("can't make IPv4State: %s", err)
- }
-
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
-
- return IPv4Conn{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv4State},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-// Send sends a frame with ipv4 overriding the IPv4 layer defaults and
-// additionalLayers added after it.
-func (c *IPv4Conn) Send(t *testing.T, ipv4 IPv4, additionalLayers ...Layer) {
- t.Helper()
-
- c.send(t, Layers{&ipv4}, additionalLayers...)
-}
-
-// IPv6Conn maintains the state for all the layers in a IPv6 connection.
-type IPv6Conn struct {
- Connection
-}
-
-// NewIPv6Conn creates a new IPv6Conn connection with reasonable defaults.
-func (n *DUTTestNet) NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {
- t.Helper()
-
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make EtherState: %s", err)
- }
- ipv6State, err := n.newIPv6State(outgoingIPv6, incomingIPv6)
- if err != nil {
- t.Fatalf("can't make IPv6State: %s", err)
- }
-
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
-
- return IPv6Conn{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv6State},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-// Send sends a frame with ipv6 overriding the IPv6 layer defaults and
-// additionalLayers added after it.
-func (conn *IPv6Conn) Send(t *testing.T, ipv6 IPv6, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&ipv6}, additionalLayers...)
-}
-
-// UDPIPv4 maintains the state for all the layers in a UDP/IPv4 connection.
-type UDPIPv4 struct {
- Connection
-}
-
-// NewUDPIPv4 creates a new UDPIPv4 connection with reasonable defaults.
-func (n *DUTTestNet) NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {
- t.Helper()
-
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make etherState: %s", err)
- }
- ipv4State, err := n.newIPv4State(IPv4{}, IPv4{})
- if err != nil {
- t.Fatalf("can't make ipv4State: %s", err)
- }
- udpState, err := n.newUDPState(unix.AF_INET, outgoingUDP, incomingUDP)
- if err != nil {
- t.Fatalf("can't make udpState: %s", err)
- }
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
-
- return UDPIPv4{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv4State, udpState},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-func (conn *UDPIPv4) udpState(t *testing.T) *udpState {
- t.Helper()
-
- state, ok := conn.layerStates[2].(*udpState)
- if !ok {
- t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
- }
- return state
-}
-
-func (conn *UDPIPv4) ipv4State(t *testing.T) *ipv4State {
- t.Helper()
-
- state, ok := conn.layerStates[1].(*ipv4State)
- if !ok {
- t.Fatalf("got network-layer state type=%T, expected ipv4State", conn.layerStates[1])
- }
- return state
-}
-
-// LocalAddr gets the local socket address of this connection.
-func (conn *UDPIPv4) LocalAddr(t *testing.T) *unix.SockaddrInet4 {
- t.Helper()
-
- sa := &unix.SockaddrInet4{Port: int(*conn.udpState(t).out.SrcPort)}
- copy(sa.Addr[:], *conn.ipv4State(t).out.SrcAddr)
- return sa
-}
-
-// SrcPort returns the source port of this connection.
-func (conn *UDPIPv4) SrcPort(t *testing.T) uint16 {
- t.Helper()
-
- return *conn.udpState(t).out.SrcPort
-}
-
-// Send sends a packet with reasonable defaults, potentially overriding the UDP
-// layer and adding additionLayers.
-func (conn *UDPIPv4) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&udp}, additionalLayers...)
-}
-
-// SendIP sends a packet with reasonable defaults, potentially overriding the
-// UDP and IPv4 headers and adding additionLayers.
-func (conn *UDPIPv4) SendIP(t *testing.T, ip IPv4, udp UDP, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&ip, &udp}, additionalLayers...)
-}
-
-// SendFrame sends a frame on the wire and updates the state of all layers.
-func (conn *UDPIPv4) SendFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, overrideLayers, additionalLayers...)
-}
-
-// Expect expects a frame with the UDP layer matching the provided UDP within
-// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *UDPIPv4) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {
- t.Helper()
-
- layer, err := conn.Connection.Expect(t, &udp, timeout)
- if err != nil {
- return nil, err
- }
- gotUDP, ok := layer.(*UDP)
- if !ok {
- t.Fatalf("expected %s to be UDP", layer)
- }
- return gotUDP, nil
-}
-
-// ExpectData is a convenient method that expects a Layer and the Layer after
-// it. If it doesn't arrive in time, it returns nil.
-func (conn *UDPIPv4) ExpectData(t *testing.T, udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- expected := make([]Layer, len(conn.layerStates))
- expected[len(expected)-1] = &udp
- if payload.length() != 0 {
- expected = append(expected, &payload)
- }
- return conn.ExpectFrame(t, expected, timeout)
-}
-
-// UDPIPv6 maintains the state for all the layers in a UDP/IPv6 connection.
-type UDPIPv6 struct {
- Connection
-}
-
-// NewUDPIPv6 creates a new UDPIPv6 connection with reasonable defaults.
-func (n *DUTTestNet) NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {
- t.Helper()
-
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make etherState: %s", err)
- }
- ipv6State, err := n.newIPv6State(IPv6{}, IPv6{})
- if err != nil {
- t.Fatalf("can't make IPv6State: %s", err)
- }
- udpState, err := n.newUDPState(unix.AF_INET6, outgoingUDP, incomingUDP)
- if err != nil {
- t.Fatalf("can't make udpState: %s", err)
- }
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
- return UDPIPv6{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv6State, udpState},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-func (conn *UDPIPv6) udpState(t *testing.T) *udpState {
- t.Helper()
-
- state, ok := conn.layerStates[2].(*udpState)
- if !ok {
- t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
- }
- return state
-}
-
-func (conn *UDPIPv6) ipv6State(t *testing.T) *ipv6State {
- t.Helper()
-
- state, ok := conn.layerStates[1].(*ipv6State)
- if !ok {
- t.Fatalf("got network-layer state type=%T, expected ipv6State", conn.layerStates[1])
- }
- return state
-}
-
-// LocalAddr gets the local socket address of this connection.
-func (conn *UDPIPv6) LocalAddr(t *testing.T, zoneID uint32) *unix.SockaddrInet6 {
- t.Helper()
-
- sa := &unix.SockaddrInet6{
- Port: int(*conn.udpState(t).out.SrcPort),
- // Local address is in perspective to the remote host, so it's scoped to the
- // ID of the remote interface.
- ZoneId: zoneID,
- }
- copy(sa.Addr[:], *conn.ipv6State(t).out.SrcAddr)
- return sa
-}
-
-// SrcPort returns the source port of this connection.
-func (conn *UDPIPv6) SrcPort(t *testing.T) uint16 {
- t.Helper()
-
- return *conn.udpState(t).out.SrcPort
-}
-
-// Send sends a packet with reasonable defaults, potentially overriding the UDP
-// layer and adding additionLayers.
-func (conn *UDPIPv6) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&udp}, additionalLayers...)
-}
-
-// SendIPv6 sends a packet with reasonable defaults, potentially overriding the
-// UDP and IPv6 headers and adding additionLayers.
-func (conn *UDPIPv6) SendIPv6(t *testing.T, ip IPv6, udp UDP, additionalLayers ...Layer) {
- t.Helper()
-
- conn.send(t, Layers{&ip, &udp}, additionalLayers...)
-}
-
-// SendFrame sends a frame on the wire and updates the state of all layers.
-func (conn *UDPIPv6) SendFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {
- conn.send(t, overrideLayers, additionalLayers...)
-}
-
-// Expect expects a frame with the UDP layer matching the provided UDP within
-// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *UDPIPv6) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {
- t.Helper()
-
- layer, err := conn.Connection.Expect(t, &udp, timeout)
- if err != nil {
- return nil, err
- }
- gotUDP, ok := layer.(*UDP)
- if !ok {
- t.Fatalf("expected %s to be UDP", layer)
- }
- return gotUDP, nil
-}
-
-// ExpectData is a convenient method that expects a Layer and the Layer after
-// it. If it doesn't arrive in time, it returns nil.
-func (conn *UDPIPv6) ExpectData(t *testing.T, udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- expected := make([]Layer, len(conn.layerStates))
- expected[len(expected)-1] = &udp
- if payload.length() != 0 {
- expected = append(expected, &payload)
- }
- return conn.ExpectFrame(t, expected, timeout)
-}
-
-// TCPIPv6 maintains the state for all the layers in a TCP/IPv6 connection.
-type TCPIPv6 struct {
- Connection
-}
-
-// NewTCPIPv6 creates a new TCPIPv6 connection with reasonable defaults.
-func (n *DUTTestNet) NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv6 {
- etherState, err := n.newEtherState(Ether{}, Ether{})
- if err != nil {
- t.Fatalf("can't make etherState: %s", err)
- }
- ipv6State, err := n.newIPv6State(IPv6{}, IPv6{})
- if err != nil {
- t.Fatalf("can't make ipv6State: %s", err)
- }
- tcpState, err := n.newTCPState(unix.AF_INET6, outgoingTCP, incomingTCP)
- if err != nil {
- t.Fatalf("can't make tcpState: %s", err)
- }
- injector, err := n.NewInjector(t)
- if err != nil {
- t.Fatalf("can't make injector: %s", err)
- }
- sniffer, err := n.NewSniffer(t)
- if err != nil {
- t.Fatalf("can't make sniffer: %s", err)
- }
-
- return TCPIPv6{
- Connection: Connection{
- layerStates: []layerState{etherState, ipv6State, tcpState},
- injector: injector,
- sniffer: sniffer,
- },
- }
-}
-
-// SrcPort returns the source port from the given Connection.
-func (conn *TCPIPv6) SrcPort() uint16 {
- state := conn.layerStates[2].(*tcpState)
- return *state.out.SrcPort
-}
-
-// ExpectData is a convenient method that expects a Layer and the Layer after
-// it. If it doesn't arrive in time, it returns nil.
-func (conn *TCPIPv6) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
- t.Helper()
-
- expected := make([]Layer, len(conn.layerStates))
- expected[len(expected)-1] = tcp
- if payload != nil {
- expected = append(expected, payload)
- }
- return conn.ExpectFrame(t, expected, timeout)
-}
diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go
deleted file mode 100644
index 7e89ba2b3..000000000
--- a/test/packetimpact/testbench/dut.go
+++ /dev/null
@@ -1,789 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testbench
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "net"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
- "gvisor.dev/gvisor/pkg/abi/linux"
- bin "gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/hostarch"
- pb "gvisor.dev/gvisor/test/packetimpact/proto/posix_server_go_proto"
-)
-
-// DUT communicates with the DUT to force it to make POSIX calls.
-type DUT struct {
- conn *grpc.ClientConn
- posixServer POSIXClient
- Net *DUTTestNet
- Uname *DUTUname
-}
-
-// NewDUT creates a new connection with the DUT over gRPC.
-func NewDUT(t *testing.T) DUT {
- t.Helper()
- info := getDUTInfo()
- dut := info.ConnectToDUT(t)
- t.Cleanup(func() {
- dut.TearDownConnection()
- info.release()
- })
- return dut
-}
-
-// ConnectToDUT connects to DUT through gRPC.
-func (info *DUTInfo) ConnectToDUT(t *testing.T) DUT {
- t.Helper()
-
- n := info.Net
- posixServerAddress := net.JoinHostPort(n.POSIXServerIP.String(), fmt.Sprintf("%d", n.POSIXServerPort))
- conn, err := grpc.Dial(posixServerAddress, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{Timeout: RPCKeepalive}))
- if err != nil {
- t.Fatalf("failed to grpc.Dial(%s): %s", posixServerAddress, err)
- }
- posixServer := NewPOSIXClient(conn)
- return DUT{
- conn: conn,
- posixServer: posixServer,
- Net: n,
- Uname: info.Uname,
- }
-}
-
-// TearDownConnection closes the underlying connection.
-func (dut *DUT) TearDownConnection() {
- dut.conn.Close()
-}
-
-func (dut *DUT) sockaddrToProto(t *testing.T, sa unix.Sockaddr) *pb.Sockaddr {
- t.Helper()
-
- switch s := sa.(type) {
- case *unix.SockaddrInet4:
- return &pb.Sockaddr{
- Sockaddr: &pb.Sockaddr_In{
- In: &pb.SockaddrIn{
- Family: unix.AF_INET,
- Port: uint32(s.Port),
- Addr: s.Addr[:],
- },
- },
- }
- case *unix.SockaddrInet6:
- return &pb.Sockaddr{
- Sockaddr: &pb.Sockaddr_In6{
- In6: &pb.SockaddrIn6{
- Family: unix.AF_INET6,
- Port: uint32(s.Port),
- Flowinfo: 0,
- ScopeId: s.ZoneId,
- Addr: s.Addr[:],
- },
- },
- }
- }
- t.Fatalf("can't parse Sockaddr struct: %+v", sa)
- return nil
-}
-
-func (dut *DUT) protoToSockaddr(t *testing.T, sa *pb.Sockaddr) unix.Sockaddr {
- t.Helper()
-
- switch s := sa.Sockaddr.(type) {
- case *pb.Sockaddr_In:
- ret := unix.SockaddrInet4{
- Port: int(s.In.GetPort()),
- }
- copy(ret.Addr[:], s.In.GetAddr())
- return &ret
- case *pb.Sockaddr_In6:
- ret := unix.SockaddrInet6{
- Port: int(s.In6.GetPort()),
- ZoneId: s.In6.GetScopeId(),
- }
- copy(ret.Addr[:], s.In6.GetAddr())
- return &ret
- }
- t.Fatalf("can't parse Sockaddr proto: %#v", sa)
- return nil
-}
-
-// CreateBoundSocket makes a new socket on the DUT, with type typ and protocol
-// proto, and bound to the IP address addr. Returns the new file descriptor and
-// the port that was selected on the DUT.
-func (dut *DUT) CreateBoundSocket(t *testing.T, typ, proto int32, addr net.IP) (int32, uint16) {
- t.Helper()
-
- var fd int32
- if addr.To4() != nil {
- fd = dut.Socket(t, unix.AF_INET, typ, proto)
- sa := unix.SockaddrInet4{}
- copy(sa.Addr[:], addr.To4())
- dut.Bind(t, fd, &sa)
- } else if addr.To16() != nil {
- fd = dut.Socket(t, unix.AF_INET6, typ, proto)
- sa := unix.SockaddrInet6{}
- copy(sa.Addr[:], addr.To16())
- sa.ZoneId = dut.Net.RemoteDevID
- dut.Bind(t, fd, &sa)
- } else {
- t.Fatalf("invalid IP address: %s", addr)
- }
- sa := dut.GetSockName(t, fd)
- var port int
- switch s := sa.(type) {
- case *unix.SockaddrInet4:
- port = s.Port
- case *unix.SockaddrInet6:
- port = s.Port
- default:
- t.Fatalf("unknown sockaddr type from getsockname: %T", sa)
- }
- return fd, uint16(port)
-}
-
-// CreateListener makes a new TCP connection. If it fails, the test ends.
-func (dut *DUT) CreateListener(t *testing.T, typ, proto, backlog int32) (int32, uint16) {
- t.Helper()
-
- fd, remotePort := dut.CreateBoundSocket(t, typ, proto, dut.Net.RemoteIPv4)
- dut.Listen(t, fd, backlog)
- return fd, remotePort
-}
-
-// All the functions that make gRPC calls to the POSIX service are below, sorted
-// alphabetically.
-
-// Accept calls accept on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// AcceptWithErrno.
-func (dut *DUT) Accept(t *testing.T, sockfd int32) (int32, unix.Sockaddr) {
- t.Helper()
-
- fd, sa, err := dut.AcceptWithErrno(context.Background(), t, sockfd)
- if fd < 0 {
- t.Fatalf("failed to accept: %s", err)
- }
- return fd, sa
-}
-
-// AcceptWithErrno calls accept on the DUT.
-func (dut *DUT) AcceptWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, unix.Sockaddr, error) {
- t.Helper()
-
- req := &pb.AcceptRequest{
- Sockfd: sockfd,
- }
- resp, err := dut.posixServer.Accept(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Accept: %s", err)
- }
- return resp.GetFd(), dut.protoToSockaddr(t, resp.GetAddr()), unix.Errno(resp.GetErrno_())
-}
-
-// Bind calls bind on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is
-// needed, use BindWithErrno.
-func (dut *DUT) Bind(t *testing.T, fd int32, sa unix.Sockaddr) {
- t.Helper()
-
- ret, err := dut.BindWithErrno(context.Background(), t, fd, sa)
- if ret != 0 {
- t.Fatalf("failed to bind socket: %s", err)
- }
-}
-
-// BindWithErrno calls bind on the DUT.
-func (dut *DUT) BindWithErrno(ctx context.Context, t *testing.T, fd int32, sa unix.Sockaddr) (int32, error) {
- t.Helper()
-
- req := &pb.BindRequest{
- Sockfd: fd,
- Addr: dut.sockaddrToProto(t, sa),
- }
- resp, err := dut.posixServer.Bind(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Bind: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// Close calls close on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// CloseWithErrno.
-func (dut *DUT) Close(t *testing.T, fd int32) {
- t.Helper()
-
- ret, err := dut.CloseWithErrno(context.Background(), t, fd)
- if ret != 0 {
- t.Fatalf("failed to close: %s", err)
- }
-}
-
-// CloseWithErrno calls close on the DUT.
-func (dut *DUT) CloseWithErrno(ctx context.Context, t *testing.T, fd int32) (int32, error) {
- t.Helper()
-
- req := &pb.CloseRequest{
- Fd: fd,
- }
- resp, err := dut.posixServer.Close(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Close: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// Connect calls connect on the DUT and causes a fatal test failure if it
-// doesn't succeed. If more control over the timeout or error handling is
-// needed, use ConnectWithErrno.
-func (dut *DUT) Connect(t *testing.T, fd int32, sa unix.Sockaddr) {
- t.Helper()
-
- ret, err := dut.ConnectWithErrno(context.Background(), t, fd, sa)
- // Ignore 'operation in progress' error that can be returned when the socket
- // is non-blocking.
- if err != unix.EINPROGRESS && ret != 0 {
- t.Fatalf("failed to connect socket: %s", err)
- }
-}
-
-// ConnectWithErrno calls bind on the DUT.
-func (dut *DUT) ConnectWithErrno(ctx context.Context, t *testing.T, fd int32, sa unix.Sockaddr) (int32, error) {
- t.Helper()
-
- req := &pb.ConnectRequest{
- Sockfd: fd,
- Addr: dut.sockaddrToProto(t, sa),
- }
- resp, err := dut.posixServer.Connect(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Connect: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// GetSockName calls getsockname on the DUT and causes a fatal test failure if
-// it doesn't succeed. If more control over the timeout or error handling is
-// needed, use GetSockNameWithErrno.
-func (dut *DUT) GetSockName(t *testing.T, sockfd int32) unix.Sockaddr {
- t.Helper()
-
- ret, sa, err := dut.GetSockNameWithErrno(context.Background(), t, sockfd)
- if ret != 0 {
- t.Fatalf("failed to getsockname: %s", err)
- }
- return sa
-}
-
-// GetSockNameWithErrno calls getsockname on the DUT.
-func (dut *DUT) GetSockNameWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, unix.Sockaddr, error) {
- t.Helper()
-
- req := &pb.GetSockNameRequest{
- Sockfd: sockfd,
- }
- resp, err := dut.posixServer.GetSockName(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Bind: %s", err)
- }
- return resp.GetRet(), dut.protoToSockaddr(t, resp.GetAddr()), unix.Errno(resp.GetErrno_())
-}
-
-func (dut *DUT) getSockOpt(ctx context.Context, t *testing.T, sockfd, level, optname, optlen int32, typ pb.GetSockOptRequest_SockOptType) (int32, *pb.SockOptVal, error) {
- t.Helper()
-
- req := &pb.GetSockOptRequest{
- Sockfd: sockfd,
- Level: level,
- Optname: optname,
- Optlen: optlen,
- Type: typ,
- }
- resp, err := dut.posixServer.GetSockOpt(ctx, req)
- if err != nil {
- t.Fatalf("failed to call GetSockOpt: %s", err)
- }
- optval := resp.GetOptval()
- if optval == nil {
- t.Fatalf("GetSockOpt response does not contain a value")
- }
- return resp.GetRet(), optval, unix.Errno(resp.GetErrno_())
-}
-
-// GetSockOpt calls getsockopt on the DUT and causes a fatal test failure if it
-// doesn't succeed. If more control over the timeout or error handling is
-// needed, use GetSockOptWithErrno. Because endianess and the width of values
-// might differ between the testbench and DUT architectures, prefer to use a
-// more specific GetSockOptXxx function.
-func (dut *DUT) GetSockOpt(t *testing.T, sockfd, level, optname, optlen int32) []byte {
- t.Helper()
-
- ret, optval, err := dut.GetSockOptWithErrno(context.Background(), t, sockfd, level, optname, optlen)
- if ret != 0 {
- t.Fatalf("failed to GetSockOpt: %s", err)
- }
- return optval
-}
-
-// GetSockOptWithErrno calls getsockopt on the DUT. Because endianess and the
-// width of values might differ between the testbench and DUT architectures,
-// prefer to use a more specific GetSockOptXxxWithErrno function.
-func (dut *DUT) GetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname, optlen int32) (int32, []byte, error) {
- t.Helper()
-
- ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, optlen, pb.GetSockOptRequest_BYTES)
- bytesval, ok := optval.Val.(*pb.SockOptVal_Bytesval)
- if !ok {
- t.Fatalf("GetSockOpt got value type: %T, want bytes", optval.Val)
- }
- return ret, bytesval.Bytesval, errno
-}
-
-// GetSockOptInt calls getsockopt on the DUT and causes a fatal test failure
-// if it doesn't succeed. If more control over the int optval or error handling
-// is needed, use GetSockOptIntWithErrno.
-func (dut *DUT) GetSockOptInt(t *testing.T, sockfd, level, optname int32) int32 {
- t.Helper()
-
- ret, intval, err := dut.GetSockOptIntWithErrno(context.Background(), t, sockfd, level, optname)
- if ret != 0 {
- t.Fatalf("failed to GetSockOptInt: %s", err)
- }
- return intval
-}
-
-// GetSockOptIntWithErrno calls getsockopt with an integer optval.
-func (dut *DUT) GetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32) (int32, int32, error) {
- t.Helper()
-
- ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, 0, pb.GetSockOptRequest_INT)
- intval, ok := optval.Val.(*pb.SockOptVal_Intval)
- if !ok {
- t.Fatalf("GetSockOpt got value type: %T, want int", optval.Val)
- }
- return ret, intval.Intval, errno
-}
-
-// GetSockOptTimeval calls getsockopt on the DUT and causes a fatal test failure
-// if it doesn't succeed. If more control over the timeout or error handling is
-// needed, use GetSockOptTimevalWithErrno.
-func (dut *DUT) GetSockOptTimeval(t *testing.T, sockfd, level, optname int32) unix.Timeval {
- t.Helper()
-
- ret, timeval, err := dut.GetSockOptTimevalWithErrno(context.Background(), t, sockfd, level, optname)
- if ret != 0 {
- t.Fatalf("failed to GetSockOptTimeval: %s", err)
- }
- return timeval
-}
-
-// GetSockOptTimevalWithErrno calls getsockopt and returns a timeval.
-func (dut *DUT) GetSockOptTimevalWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32) (int32, unix.Timeval, error) {
- t.Helper()
-
- ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, 0, pb.GetSockOptRequest_TIME)
- tv, ok := optval.Val.(*pb.SockOptVal_Timeval)
- if !ok {
- t.Fatalf("GetSockOpt got value type: %T, want timeval", optval.Val)
- }
- timeval := unix.Timeval{
- Sec: tv.Timeval.Seconds,
- Usec: tv.Timeval.Microseconds,
- }
- return ret, timeval, errno
-}
-
-// GetSockOptTCPInfo retreives TCPInfo for the given socket descriptor.
-func (dut *DUT) GetSockOptTCPInfo(t *testing.T, sockfd int32) linux.TCPInfo {
- t.Helper()
-
- ret, info, err := dut.GetSockOptTCPInfoWithErrno(context.Background(), t, sockfd)
- if ret != 0 || err != unix.Errno(0) {
- t.Fatalf("failed to GetSockOptTCPInfo: %s", err)
- }
- return info
-}
-
-// GetSockOptTCPInfoWithErrno retreives TCPInfo with any errno.
-func (dut *DUT) GetSockOptTCPInfoWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, linux.TCPInfo, error) {
- t.Helper()
-
- info := linux.TCPInfo{}
- ret, infoBytes, errno := dut.GetSockOptWithErrno(ctx, t, sockfd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo))
- if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want {
- t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want)
- }
- bin.Unmarshal(infoBytes, hostarch.ByteOrder, &info)
-
- return ret, info, errno
-}
-
-// Listen calls listen on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// ListenWithErrno.
-func (dut *DUT) Listen(t *testing.T, sockfd, backlog int32) {
- t.Helper()
-
- ret, err := dut.ListenWithErrno(context.Background(), t, sockfd, backlog)
- if ret != 0 {
- t.Fatalf("failed to listen: %s", err)
- }
-}
-
-// ListenWithErrno calls listen on the DUT.
-func (dut *DUT) ListenWithErrno(ctx context.Context, t *testing.T, sockfd, backlog int32) (int32, error) {
- t.Helper()
-
- req := &pb.ListenRequest{
- Sockfd: sockfd,
- Backlog: backlog,
- }
- resp, err := dut.posixServer.Listen(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Listen: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// PollOne calls poll on the DUT and asserts that the expected event must be
-// signaled on the given fd within the given timeout.
-func (dut *DUT) PollOne(t *testing.T, fd int32, events int16, timeout time.Duration) {
- t.Helper()
-
- pfds := dut.Poll(t, []unix.PollFd{{Fd: fd, Events: events}}, timeout)
- if n := len(pfds); n != 1 {
- t.Fatalf("Poll returned %d ready file descriptors, expected 1", n)
- }
- if readyFd := pfds[0].Fd; readyFd != fd {
- t.Fatalf("Poll returned an fd %d that was not requested (%d)", readyFd, fd)
- }
- if got, want := pfds[0].Revents, int16(events); got&want != want {
- t.Fatalf("Poll returned events does not include all of the interested events, got: %#b, want: %#b", got, want)
- }
-}
-
-// Poll calls poll on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over error handling is needed, use PollWithErrno.
-// Only pollfds with non-empty revents are returned, the only way to tie the
-// response back to the original request is using the fd number.
-func (dut *DUT) Poll(t *testing.T, pfds []unix.PollFd, timeout time.Duration) []unix.PollFd {
- t.Helper()
-
- ret, result, err := dut.PollWithErrno(context.Background(), t, pfds, timeout)
- if ret < 0 {
- t.Fatalf("failed to poll: %s", err)
- }
- return result
-}
-
-// PollWithErrno calls poll on the DUT.
-func (dut *DUT) PollWithErrno(ctx context.Context, t *testing.T, pfds []unix.PollFd, timeout time.Duration) (int32, []unix.PollFd, error) {
- t.Helper()
-
- req := &pb.PollRequest{
- TimeoutMillis: int32(timeout.Milliseconds()),
- }
- for _, pfd := range pfds {
- req.Pfds = append(req.Pfds, &pb.PollFd{
- Fd: pfd.Fd,
- Events: uint32(pfd.Events),
- })
- }
- resp, err := dut.posixServer.Poll(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Poll: %s", err)
- }
- if ret, npfds := resp.GetRet(), len(resp.GetPfds()); ret >= 0 && int(ret) != npfds {
- t.Fatalf("nonsensical poll response: ret(%d) != len(pfds)(%d)", ret, npfds)
- }
- var result []unix.PollFd
- for _, protoPfd := range resp.GetPfds() {
- result = append(result, unix.PollFd{
- Fd: protoPfd.GetFd(),
- Revents: int16(protoPfd.GetEvents()),
- })
- }
- return resp.GetRet(), result, unix.Errno(resp.GetErrno_())
-}
-
-// Send calls send on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// SendWithErrno.
-func (dut *DUT) Send(t *testing.T, sockfd int32, buf []byte, flags int32) int32 {
- t.Helper()
-
- ret, err := dut.SendWithErrno(context.Background(), t, sockfd, buf, flags)
- if ret == -1 {
- t.Fatalf("failed to send: %s", err)
- }
- return ret
-}
-
-// SendWithErrno calls send on the DUT.
-func (dut *DUT) SendWithErrno(ctx context.Context, t *testing.T, sockfd int32, buf []byte, flags int32) (int32, error) {
- t.Helper()
-
- req := &pb.SendRequest{
- Sockfd: sockfd,
- Buf: buf,
- Flags: flags,
- }
- resp, err := dut.posixServer.Send(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Send: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// SendTo calls sendto on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// SendToWithErrno.
-func (dut *DUT) SendTo(t *testing.T, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) int32 {
- t.Helper()
-
- ret, err := dut.SendToWithErrno(context.Background(), t, sockfd, buf, flags, destAddr)
- if ret == -1 {
- t.Fatalf("failed to sendto: %s", err)
- }
- return ret
-}
-
-// SendToWithErrno calls sendto on the DUT.
-func (dut *DUT) SendToWithErrno(ctx context.Context, t *testing.T, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) (int32, error) {
- t.Helper()
-
- req := &pb.SendToRequest{
- Sockfd: sockfd,
- Buf: buf,
- Flags: flags,
- DestAddr: dut.sockaddrToProto(t, destAddr),
- }
- resp, err := dut.posixServer.SendTo(ctx, req)
- if err != nil {
- t.Fatalf("failed to call SendTo: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// SetNonBlocking will set O_NONBLOCK flag for fd if nonblocking
-// is true, otherwise it will clear the flag.
-func (dut *DUT) SetNonBlocking(t *testing.T, fd int32, nonblocking bool) {
- t.Helper()
-
- req := &pb.SetNonblockingRequest{
- Fd: fd,
- Nonblocking: nonblocking,
- }
-
- resp, err := dut.posixServer.SetNonblocking(context.Background(), req)
- if err != nil {
- t.Fatalf("failed to call SetNonblocking: %s", err)
- }
- if resp.GetRet() == -1 {
- t.Fatalf("fcntl(%d, %s) failed: %s", fd, resp.GetCmd(), unix.Errno(resp.GetErrno_()))
- }
-}
-
-func (dut *DUT) setSockOpt(ctx context.Context, t *testing.T, sockfd, level, optname int32, optval *pb.SockOptVal) (int32, error) {
- t.Helper()
-
- req := &pb.SetSockOptRequest{
- Sockfd: sockfd,
- Level: level,
- Optname: optname,
- Optval: optval,
- }
- resp, err := dut.posixServer.SetSockOpt(ctx, req)
- if err != nil {
- t.Fatalf("failed to call SetSockOpt: %s", err)
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
-
-// SetSockOpt calls setsockopt on the DUT and causes a fatal test failure if it
-// doesn't succeed. If more control over the timeout or error handling is
-// needed, use SetSockOptWithErrno. Because endianess and the width of values
-// might differ between the testbench and DUT architectures, prefer to use a
-// more specific SetSockOptXxx function.
-func (dut *DUT) SetSockOpt(t *testing.T, sockfd, level, optname int32, optval []byte) {
- t.Helper()
-
- ret, err := dut.SetSockOptWithErrno(context.Background(), t, sockfd, level, optname, optval)
- if ret != 0 {
- t.Fatalf("failed to SetSockOpt: %s", err)
- }
-}
-
-// SetSockOptWithErrno calls setsockopt on the DUT. Because endianess and the
-// width of values might differ between the testbench and DUT architectures,
-// prefer to use a more specific SetSockOptXxxWithErrno function.
-func (dut *DUT) SetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32, optval []byte) (int32, error) {
- t.Helper()
-
- return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Bytesval{optval}})
-}
-
-// SetSockOptInt calls setsockopt on the DUT and causes a fatal test failure
-// if it doesn't succeed. If more control over the int optval or error handling
-// is needed, use SetSockOptIntWithErrno.
-func (dut *DUT) SetSockOptInt(t *testing.T, sockfd, level, optname, optval int32) {
- t.Helper()
-
- ret, err := dut.SetSockOptIntWithErrno(context.Background(), t, sockfd, level, optname, optval)
- if ret != 0 {
- t.Fatalf("failed to SetSockOptInt: %s", err)
- }
-}
-
-// SetSockOptIntWithErrno calls setsockopt with an integer optval.
-func (dut *DUT) SetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname, optval int32) (int32, error) {
- t.Helper()
-
- return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Intval{optval}})
-}
-
-// SetSockOptTimeval calls setsockopt on the DUT and causes a fatal test failure
-// if it doesn't succeed. If more control over the timeout or error handling is
-// needed, use SetSockOptTimevalWithErrno.
-func (dut *DUT) SetSockOptTimeval(t *testing.T, sockfd, level, optname int32, tv *unix.Timeval) {
- t.Helper()
-
- ret, err := dut.SetSockOptTimevalWithErrno(context.Background(), t, sockfd, level, optname, tv)
- if ret != 0 {
- t.Fatalf("failed to SetSockOptTimeval: %s", err)
- }
-}
-
-// SetSockOptTimevalWithErrno calls setsockopt with the timeval converted to
-// bytes.
-func (dut *DUT) SetSockOptTimevalWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32, tv *unix.Timeval) (int32, error) {
- t.Helper()
-
- timeval := pb.Timeval{
- Seconds: int64(tv.Sec),
- Microseconds: int64(tv.Usec),
- }
- return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Timeval{&timeval}})
-}
-
-// Socket calls socket on the DUT and returns the file descriptor. If socket
-// fails on the DUT, the test ends.
-func (dut *DUT) Socket(t *testing.T, domain, typ, proto int32) int32 {
- t.Helper()
-
- fd, err := dut.SocketWithErrno(t, domain, typ, proto)
- if fd < 0 {
- t.Fatalf("failed to create socket: %s", err)
- }
- return fd
-}
-
-// SocketWithErrno calls socket on the DUT and returns the fd and errno.
-func (dut *DUT) SocketWithErrno(t *testing.T, domain, typ, proto int32) (int32, error) {
- t.Helper()
-
- req := &pb.SocketRequest{
- Domain: domain,
- Type: typ,
- Protocol: proto,
- }
- resp, err := dut.posixServer.Socket(context.Background(), req)
- if err != nil {
- t.Fatalf("failed to call Socket: %s", err)
- }
- return resp.GetFd(), unix.Errno(resp.GetErrno_())
-}
-
-// Recv calls recv on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// RecvWithErrno.
-func (dut *DUT) Recv(t *testing.T, sockfd, len, flags int32) []byte {
- t.Helper()
-
- ret, buf, err := dut.RecvWithErrno(context.Background(), t, sockfd, len, flags)
- if ret == -1 {
- t.Fatalf("failed to recv: %s", err)
- }
- return buf
-}
-
-// RecvWithErrno calls recv on the DUT.
-func (dut *DUT) RecvWithErrno(ctx context.Context, t *testing.T, sockfd, len, flags int32) (int32, []byte, error) {
- t.Helper()
-
- req := &pb.RecvRequest{
- Sockfd: sockfd,
- Len: len,
- Flags: flags,
- }
- resp, err := dut.posixServer.Recv(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Recv: %s", err)
- }
- return resp.GetRet(), resp.GetBuf(), unix.Errno(resp.GetErrno_())
-}
-
-// SetSockLingerOption sets SO_LINGER socket option on the DUT.
-func (dut *DUT) SetSockLingerOption(t *testing.T, sockfd int32, timeout time.Duration, enable bool) {
- var linger unix.Linger
- if enable {
- linger.Onoff = 1
- }
- linger.Linger = int32(timeout / time.Second)
-
- buf := make([]byte, 8)
- binary.LittleEndian.PutUint32(buf, uint32(linger.Onoff))
- binary.LittleEndian.PutUint32(buf[4:], uint32(linger.Linger))
- dut.SetSockOpt(t, sockfd, unix.SOL_SOCKET, unix.SO_LINGER, buf)
-}
-
-// Shutdown calls shutdown on the DUT and causes a fatal test failure if it
-// doesn't succeed. If more control over the timeout or error handling is
-// needed, use ShutdownWithErrno.
-func (dut *DUT) Shutdown(t *testing.T, fd, how int32) {
- t.Helper()
-
- ret, err := dut.ShutdownWithErrno(context.Background(), t, fd, how)
- if ret != 0 {
- t.Fatalf("failed to shutdown(%d, %d): %s", fd, how, err)
- }
-}
-
-// ShutdownWithErrno calls shutdown on the DUT.
-func (dut *DUT) ShutdownWithErrno(ctx context.Context, t *testing.T, fd, how int32) (int32, error) {
- t.Helper()
-
- req := &pb.ShutdownRequest{
- Fd: fd,
- How: how,
- }
- resp, err := dut.posixServer.Shutdown(ctx, req)
- if err != nil {
- t.Fatalf("failed to call Shutdown: %s", err)
- }
- if resp.GetErrno_() == 0 {
- return resp.GetRet(), nil
- }
- return resp.GetRet(), unix.Errno(resp.GetErrno_())
-}
diff --git a/test/packetimpact/testbench/dut_client.go b/test/packetimpact/testbench/dut_client.go
deleted file mode 100644
index 3b69b28aa..000000000
--- a/test/packetimpact/testbench/dut_client.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package testbench
-
-import (
- "google.golang.org/grpc"
- pb "gvisor.dev/gvisor/test/packetimpact/proto/posix_server_go_proto"
-)
-
-// POSIXClient is a gRPC client for the Posix service.
-type POSIXClient pb.PosixClient
-
-// NewPOSIXClient makes a new gRPC client for the POSIX service.
-func NewPOSIXClient(c grpc.ClientConnInterface) POSIXClient {
- return pb.NewPosixClient(c)
-}
diff --git a/test/packetimpact/testbench/layers.go b/test/packetimpact/testbench/layers.go
deleted file mode 100644
index 2644b3248..000000000
--- a/test/packetimpact/testbench/layers.go
+++ /dev/null
@@ -1,1602 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testbench
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "reflect"
- "strings"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "go.uber.org/multierr"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-// Layer is the interface that all encapsulations must implement.
-//
-// A Layer is an encapsulation in a packet, such as TCP, IPv4, IPv6, etc. A
-// Layer contains all the fields of the encapsulation. Each field is a pointer
-// and may be nil.
-type Layer interface {
- fmt.Stringer
-
- // ToBytes converts the Layer into bytes. In places where the Layer's field
- // isn't nil, the value that is pointed to is used. When the field is nil, a
- // reasonable default for the Layer is used. For example, "64" for IPv4 TTL
- // and a calculated checksum for TCP or IP. Some layers require information
- // from the previous or next layers in order to compute a default, such as
- // TCP's checksum or Ethernet's type, so each Layer has a doubly-linked list
- // to the layer's neighbors.
- ToBytes() ([]byte, error)
-
- // match checks if the current Layer matches the provided Layer. If either
- // Layer has a nil in a given field, that field is considered matching.
- // Otherwise, the values pointed to by the fields must match. The LayerBase is
- // ignored.
- match(Layer) bool
-
- // length in bytes of the current encapsulation
- length() int
-
- // next gets a pointer to the encapsulated Layer.
- next() Layer
-
- // prev gets a pointer to the Layer encapsulating this one.
- Prev() Layer
-
- // setNext sets the pointer to the encapsulated Layer.
- setNext(Layer)
-
- // setPrev sets the pointer to the Layer encapsulating this one.
- setPrev(Layer)
-
- // merge overrides the values in the interface with the provided values.
- merge(Layer) error
-}
-
-// LayerBase is the common elements of all layers.
-type LayerBase struct {
- nextLayer Layer
- prevLayer Layer
-}
-
-func (lb *LayerBase) next() Layer {
- return lb.nextLayer
-}
-
-// Prev returns the previous layer.
-func (lb *LayerBase) Prev() Layer {
- return lb.prevLayer
-}
-
-func (lb *LayerBase) setNext(l Layer) {
- lb.nextLayer = l
-}
-
-func (lb *LayerBase) setPrev(l Layer) {
- lb.prevLayer = l
-}
-
-// equalLayer compares that two Layer structs match while ignoring field in
-// which either input has a nil and also ignoring the LayerBase of the inputs.
-func equalLayer(x, y Layer) bool {
- if x == nil || y == nil {
- return true
- }
- // opt ignores comparison pairs where either of the inputs is a nil.
- opt := cmp.FilterValues(func(x, y interface{}) bool {
- for _, l := range []interface{}{x, y} {
- v := reflect.ValueOf(l)
- if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice) && v.IsNil() {
- return true
- }
- }
- return false
- }, cmp.Ignore())
- return cmp.Equal(x, y, opt, cmpopts.IgnoreTypes(LayerBase{}))
-}
-
-// mergeLayer merges y into x. Any fields for which y has a non-nil value, that
-// value overwrite the corresponding fields in x.
-func mergeLayer(x, y Layer) error {
- if y == nil {
- return nil
- }
- if reflect.TypeOf(x) != reflect.TypeOf(y) {
- return fmt.Errorf("can't merge %T into %T", y, x)
- }
- vx := reflect.ValueOf(x).Elem()
- vy := reflect.ValueOf(y).Elem()
- t := vy.Type()
- for i := 0; i < vy.NumField(); i++ {
- t := t.Field(i)
- if t.Anonymous {
- // Ignore the LayerBase in the Layer struct.
- continue
- }
- v := vy.Field(i)
- if v.IsNil() {
- continue
- }
- vx.Field(i).Set(v)
- }
- return nil
-}
-
-func stringLayer(l Layer) string {
- v := reflect.ValueOf(l).Elem()
- t := v.Type()
- var ret []string
- for i := 0; i < v.NumField(); i++ {
- t := t.Field(i)
- if t.Anonymous {
- // Ignore the LayerBase in the Layer struct.
- continue
- }
- v := v.Field(i)
- if v.IsNil() {
- continue
- }
- v = reflect.Indirect(v)
- if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
- ret = append(ret, fmt.Sprintf("%s:\n%v", t.Name, hex.Dump(v.Bytes())))
- } else {
- ret = append(ret, fmt.Sprintf("%s:%v", t.Name, v))
- }
- }
- return fmt.Sprintf("&%s{%s}", t, strings.Join(ret, " "))
-}
-
-// Ether can construct and match an ethernet encapsulation.
-type Ether struct {
- LayerBase
- SrcAddr *tcpip.LinkAddress
- DstAddr *tcpip.LinkAddress
- Type *tcpip.NetworkProtocolNumber
-}
-
-func (l *Ether) String() string {
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *Ether) ToBytes() ([]byte, error) {
- b := make([]byte, header.EthernetMinimumSize)
- h := header.Ethernet(b)
- fields := &header.EthernetFields{}
- if l.SrcAddr != nil {
- fields.SrcAddr = *l.SrcAddr
- }
- if l.DstAddr != nil {
- fields.DstAddr = *l.DstAddr
- }
- if l.Type != nil {
- fields.Type = *l.Type
- } else {
- switch n := l.next().(type) {
- case *IPv4:
- fields.Type = header.IPv4ProtocolNumber
- case *IPv6:
- fields.Type = header.IPv6ProtocolNumber
- default:
- return nil, fmt.Errorf("ethernet header's next layer is unrecognized: %#v", n)
- }
- }
- h.Encode(fields)
- return h, nil
-}
-
-// LinkAddress is a helper routine that allocates a new tcpip.LinkAddress value
-// to store v and returns a pointer to it.
-func LinkAddress(v tcpip.LinkAddress) *tcpip.LinkAddress {
- return &v
-}
-
-// NetworkProtocolNumber is a helper routine that allocates a new
-// tcpip.NetworkProtocolNumber value to store v and returns a pointer to it.
-func NetworkProtocolNumber(v tcpip.NetworkProtocolNumber) *tcpip.NetworkProtocolNumber {
- return &v
-}
-
-// layerParser parses the input bytes and returns a Layer along with the next
-// layerParser to run. If there is no more parsing to do, the returned
-// layerParser is nil.
-type layerParser func([]byte) (Layer, layerParser)
-
-// parse parses bytes starting with the first layerParser and using successive
-// layerParsers until all the bytes are parsed.
-func parse(parser layerParser, b []byte) Layers {
- var layers Layers
- for {
- var layer Layer
- layer, parser = parser(b)
- layers = append(layers, layer)
- if parser == nil {
- break
- }
- b = b[layer.length():]
- }
- layers.linkLayers()
- return layers
-}
-
-// parseEther parses the bytes assuming that they start with an ethernet header
-// and continues parsing further encapsulations.
-func parseEther(b []byte) (Layer, layerParser) {
- h := header.Ethernet(b)
- ether := Ether{
- SrcAddr: LinkAddress(h.SourceAddress()),
- DstAddr: LinkAddress(h.DestinationAddress()),
- Type: NetworkProtocolNumber(h.Type()),
- }
- var nextParser layerParser
- switch h.Type() {
- case header.IPv4ProtocolNumber:
- nextParser = parseIPv4
- case header.IPv6ProtocolNumber:
- nextParser = parseIPv6
- default:
- // Assume that the rest is a payload.
- nextParser = parsePayload
- }
- return &ether, nextParser
-}
-
-func (l *Ether) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *Ether) length() int {
- return header.EthernetMinimumSize
-}
-
-// merge implements Layer.merge.
-func (l *Ether) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// IPv4 can construct and match an IPv4 encapsulation.
-type IPv4 struct {
- LayerBase
- IHL *uint8
- TOS *uint8
- TotalLength *uint16
- ID *uint16
- Flags *uint8
- FragmentOffset *uint16
- TTL *uint8
- Protocol *uint8
- Checksum *uint16
- SrcAddr *tcpip.Address
- DstAddr *tcpip.Address
- Options *header.IPv4Options
-}
-
-func (l *IPv4) String() string {
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *IPv4) ToBytes() ([]byte, error) {
- // An IPv4 header is variable length depending on the size of the Options.
- hdrLen := header.IPv4MinimumSize
- if l.Options != nil {
- if len(*l.Options)%4 != 0 {
- return nil, fmt.Errorf("invalid header options '%x (len=%d)'; must be 32 bit aligned", *l.Options, len(*l.Options))
- }
- hdrLen += len(*l.Options)
- if hdrLen > header.IPv4MaximumHeaderSize {
- return nil, fmt.Errorf("IPv4 Options %d bytes, Max %d", len(*l.Options), header.IPv4MaximumOptionsSize)
- }
- }
- b := make([]byte, hdrLen)
- h := header.IPv4(b)
- fields := &header.IPv4Fields{
- TOS: 0,
- TotalLength: 0,
- ID: 0,
- Flags: 0,
- FragmentOffset: 0,
- TTL: 64,
- Protocol: 0,
- Checksum: 0,
- SrcAddr: tcpip.Address(""),
- DstAddr: tcpip.Address(""),
- Options: nil,
- }
- if l.TOS != nil {
- fields.TOS = *l.TOS
- }
- if l.TotalLength != nil {
- fields.TotalLength = *l.TotalLength
- } else {
- fields.TotalLength = uint16(l.length())
- current := l.next()
- for current != nil {
- fields.TotalLength += uint16(current.length())
- current = current.next()
- }
- }
- if l.ID != nil {
- fields.ID = *l.ID
- }
- if l.Flags != nil {
- fields.Flags = *l.Flags
- }
- if l.FragmentOffset != nil {
- fields.FragmentOffset = *l.FragmentOffset
- }
- if l.TTL != nil {
- fields.TTL = *l.TTL
- }
- if l.Protocol != nil {
- fields.Protocol = *l.Protocol
- } else {
- switch n := l.next().(type) {
- case *TCP:
- fields.Protocol = uint8(header.TCPProtocolNumber)
- case *UDP:
- fields.Protocol = uint8(header.UDPProtocolNumber)
- case *ICMPv4:
- fields.Protocol = uint8(header.ICMPv4ProtocolNumber)
- default:
- // We can add support for more protocols as needed.
- return nil, fmt.Errorf("ipv4 header's next layer is unrecognized: %#v", n)
- }
- }
- if l.SrcAddr != nil {
- fields.SrcAddr = *l.SrcAddr
- }
- if l.DstAddr != nil {
- fields.DstAddr = *l.DstAddr
- }
-
- h.Encode(fields)
-
- // Put raw option bytes from test definition in header. Options as raw bytes
- // allows us to serialize malformed options, which is not possible with
- // the provided serialization functions.
- if l.Options != nil {
- h.SetHeaderLength(h.HeaderLength() + uint8(len(*l.Options)))
- if got, want := copy(h.Options(), *l.Options), len(*l.Options); got != want {
- return nil, fmt.Errorf("failed to copy option bytes into header, got %d want %d", got, want)
- }
- }
-
- // Encode cannot set this incorrectly so we need to overwrite what it wrote
- // in order to test handling of a bad IHL value.
- if l.IHL != nil {
- h.SetHeaderLength(*l.IHL)
- }
-
- if l.Checksum == nil {
- h.SetChecksum(^h.CalculateChecksum())
- } else {
- h.SetChecksum(*l.Checksum)
- }
-
- return h, nil
-}
-
-// Uint16 is a helper routine that allocates a new
-// uint16 value to store v and returns a pointer to it.
-func Uint16(v uint16) *uint16 {
- return &v
-}
-
-// Uint8 is a helper routine that allocates a new
-// uint8 value to store v and returns a pointer to it.
-func Uint8(v uint8) *uint8 {
- return &v
-}
-
-// TCPFlags is a helper routine that allocates a new
-// header.TCPFlags value to store v and returns a pointer to it.
-func TCPFlags(v header.TCPFlags) *header.TCPFlags {
- return &v
-}
-
-// Address is a helper routine that allocates a new tcpip.Address value to
-// store v and returns a pointer to it.
-func Address(v tcpip.Address) *tcpip.Address {
- return &v
-}
-
-// parseIPv4 parses the bytes assuming that they start with an ipv4 header and
-// continues parsing further encapsulations.
-func parseIPv4(b []byte) (Layer, layerParser) {
- h := header.IPv4(b)
- options := h.Options()
- tos, _ := h.TOS()
- ipv4 := IPv4{
- IHL: Uint8(h.HeaderLength()),
- TOS: &tos,
- TotalLength: Uint16(h.TotalLength()),
- ID: Uint16(h.ID()),
- Flags: Uint8(h.Flags()),
- FragmentOffset: Uint16(h.FragmentOffset()),
- TTL: Uint8(h.TTL()),
- Protocol: Uint8(h.Protocol()),
- Checksum: Uint16(h.Checksum()),
- SrcAddr: Address(h.SourceAddress()),
- DstAddr: Address(h.DestinationAddress()),
- Options: &options,
- }
- var nextParser layerParser
- // If it is a fragment, don't treat it as having a transport protocol.
- if h.FragmentOffset() != 0 || h.More() {
- return &ipv4, parsePayload
- }
- switch h.TransportProtocol() {
- case header.TCPProtocolNumber:
- nextParser = parseTCP
- case header.UDPProtocolNumber:
- nextParser = parseUDP
- case header.ICMPv4ProtocolNumber:
- nextParser = parseICMPv4
- default:
- // Assume that the rest is a payload.
- nextParser = parsePayload
- }
- return &ipv4, nextParser
-}
-
-func (l *IPv4) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *IPv4) length() int {
- if l.IHL == nil {
- return header.IPv4MinimumSize
- }
- return int(*l.IHL)
-}
-
-// merge implements Layer.merge.
-func (l *IPv4) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// IPv6 can construct and match an IPv6 encapsulation.
-type IPv6 struct {
- LayerBase
- TrafficClass *uint8
- FlowLabel *uint32
- PayloadLength *uint16
- NextHeader *uint8
- HopLimit *uint8
- SrcAddr *tcpip.Address
- DstAddr *tcpip.Address
-}
-
-func (l *IPv6) String() string {
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *IPv6) ToBytes() ([]byte, error) {
- b := make([]byte, header.IPv6MinimumSize)
- h := header.IPv6(b)
- fields := &header.IPv6Fields{
- HopLimit: 64,
- }
- if l.TrafficClass != nil {
- fields.TrafficClass = *l.TrafficClass
- }
- if l.FlowLabel != nil {
- fields.FlowLabel = *l.FlowLabel
- }
- if l.PayloadLength != nil {
- fields.PayloadLength = *l.PayloadLength
- } else {
- for current := l.next(); current != nil; current = current.next() {
- fields.PayloadLength += uint16(current.length())
- }
- }
- if l.NextHeader != nil {
- fields.TransportProtocol = tcpip.TransportProtocolNumber(*l.NextHeader)
- } else {
- nh, err := nextHeaderByLayer(l.next())
- if err != nil {
- return nil, err
- }
- fields.TransportProtocol = tcpip.TransportProtocolNumber(nh)
- }
- if l.HopLimit != nil {
- fields.HopLimit = *l.HopLimit
- }
- if l.SrcAddr != nil {
- fields.SrcAddr = *l.SrcAddr
- }
- if l.DstAddr != nil {
- fields.DstAddr = *l.DstAddr
- }
- h.Encode(fields)
- return h, nil
-}
-
-// nextIPv6PayloadParser finds the corresponding parser for nextHeader.
-func nextIPv6PayloadParser(nextHeader uint8) layerParser {
- switch tcpip.TransportProtocolNumber(nextHeader) {
- case header.TCPProtocolNumber:
- return parseTCP
- case header.UDPProtocolNumber:
- return parseUDP
- case header.ICMPv6ProtocolNumber:
- return parseICMPv6
- }
- switch header.IPv6ExtensionHeaderIdentifier(nextHeader) {
- case header.IPv6HopByHopOptionsExtHdrIdentifier:
- return parseIPv6HopByHopOptionsExtHdr
- case header.IPv6DestinationOptionsExtHdrIdentifier:
- return parseIPv6DestinationOptionsExtHdr
- case header.IPv6FragmentExtHdrIdentifier:
- return parseIPv6FragmentExtHdr
- }
- return parsePayload
-}
-
-// parseIPv6 parses the bytes assuming that they start with an ipv6 header and
-// continues parsing further encapsulations.
-func parseIPv6(b []byte) (Layer, layerParser) {
- h := header.IPv6(b)
- tos, flowLabel := h.TOS()
- ipv6 := IPv6{
- TrafficClass: &tos,
- FlowLabel: &flowLabel,
- PayloadLength: Uint16(h.PayloadLength()),
- NextHeader: Uint8(h.NextHeader()),
- HopLimit: Uint8(h.HopLimit()),
- SrcAddr: Address(h.SourceAddress()),
- DstAddr: Address(h.DestinationAddress()),
- }
- nextParser := nextIPv6PayloadParser(h.NextHeader())
- return &ipv6, nextParser
-}
-
-func (l *IPv6) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *IPv6) length() int {
- return header.IPv6MinimumSize
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *IPv6) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// IPv6HopByHopOptionsExtHdr can construct and match an IPv6HopByHopOptions
-// Extension Header.
-type IPv6HopByHopOptionsExtHdr struct {
- LayerBase
- NextHeader *header.IPv6ExtensionHeaderIdentifier
- Options []byte
-}
-
-// IPv6DestinationOptionsExtHdr can construct and match an IPv6DestinationOptions
-// Extension Header.
-type IPv6DestinationOptionsExtHdr struct {
- LayerBase
- NextHeader *header.IPv6ExtensionHeaderIdentifier
- Options []byte
-}
-
-// IPv6FragmentExtHdr can construct and match an IPv6 Fragment Extension Header.
-type IPv6FragmentExtHdr struct {
- LayerBase
- NextHeader *header.IPv6ExtensionHeaderIdentifier
- FragmentOffset *uint16
- MoreFragments *bool
- Identification *uint32
-}
-
-// nextHeaderByLayer finds the correct next header protocol value for layer l.
-func nextHeaderByLayer(l Layer) (uint8, error) {
- if l == nil {
- return uint8(header.IPv6NoNextHeaderIdentifier), nil
- }
- switch l.(type) {
- case *TCP:
- return uint8(header.TCPProtocolNumber), nil
- case *UDP:
- return uint8(header.UDPProtocolNumber), nil
- case *ICMPv6:
- return uint8(header.ICMPv6ProtocolNumber), nil
- case *Payload:
- return uint8(header.IPv6NoNextHeaderIdentifier), nil
- case *IPv6HopByHopOptionsExtHdr:
- return uint8(header.IPv6HopByHopOptionsExtHdrIdentifier), nil
- case *IPv6DestinationOptionsExtHdr:
- return uint8(header.IPv6DestinationOptionsExtHdrIdentifier), nil
- case *IPv6FragmentExtHdr:
- return uint8(header.IPv6FragmentExtHdrIdentifier), nil
- default:
- // TODO(b/161005083): Support more protocols as needed.
- return 0, fmt.Errorf("failed to deduce the IPv6 header's next protocol: %T", l)
- }
-}
-
-// ipv6OptionsExtHdrToBytes serializes an options extension header into bytes.
-func ipv6OptionsExtHdrToBytes(nextHeader *header.IPv6ExtensionHeaderIdentifier, nextLayer Layer, options []byte) ([]byte, error) {
- length := len(options) + 2
- if length%8 != 0 {
- return nil, fmt.Errorf("IPv6 extension headers must be a multiple of 8 octets long, but the length given: %d, options: %s", length, hex.Dump(options))
- }
- bytes := make([]byte, length)
- if nextHeader != nil {
- bytes[0] = byte(*nextHeader)
- } else {
- nh, err := nextHeaderByLayer(nextLayer)
- if err != nil {
- return nil, err
- }
- bytes[0] = nh
- }
- // ExtHdrLen field is the length of the extension header
- // in 8-octet unit, ignoring the first 8 octets.
- // https://tools.ietf.org/html/rfc2460#section-4.3
- // https://tools.ietf.org/html/rfc2460#section-4.6
- bytes[1] = uint8((length - 8) / 8)
- copy(bytes[2:], options)
- return bytes, nil
-}
-
-// IPv6ExtHdrIdent is a helper routine that allocates a new
-// header.IPv6ExtensionHeaderIdentifier value to store v and returns a pointer
-// to it.
-func IPv6ExtHdrIdent(id header.IPv6ExtensionHeaderIdentifier) *header.IPv6ExtensionHeaderIdentifier {
- return &id
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *IPv6HopByHopOptionsExtHdr) ToBytes() ([]byte, error) {
- return ipv6OptionsExtHdrToBytes(l.NextHeader, l.next(), l.Options)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *IPv6DestinationOptionsExtHdr) ToBytes() ([]byte, error) {
- return ipv6OptionsExtHdrToBytes(l.NextHeader, l.next(), l.Options)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *IPv6FragmentExtHdr) ToBytes() ([]byte, error) {
- var offset, mflag uint16
- var ident uint32
- bytes := make([]byte, header.IPv6FragmentExtHdrLength)
- if l.NextHeader != nil {
- bytes[0] = byte(*l.NextHeader)
- } else {
- nh, err := nextHeaderByLayer(l.next())
- if err != nil {
- return nil, err
- }
- bytes[0] = nh
- }
- bytes[1] = 0 // reserved
- if l.MoreFragments != nil && *l.MoreFragments {
- mflag = 1
- }
- if l.FragmentOffset != nil {
- offset = *l.FragmentOffset
- }
- if l.Identification != nil {
- ident = *l.Identification
- }
- offsetAndMflag := offset<<3 | mflag
- binary.BigEndian.PutUint16(bytes[2:], offsetAndMflag)
- binary.BigEndian.PutUint32(bytes[4:], ident)
-
- return bytes, nil
-}
-
-// parseIPv6ExtHdr parses an IPv6 extension header and returns the NextHeader
-// field, the rest of the payload and a parser function for the corresponding
-// next extension header.
-func parseIPv6ExtHdr(b []byte) (header.IPv6ExtensionHeaderIdentifier, []byte, layerParser) {
- nextHeader := b[0]
- // For HopByHop and Destination options extension headers,
- // This field is the length of the extension header in
- // 8-octet units, not including the first 8 octets.
- // https://tools.ietf.org/html/rfc2460#section-4.3
- // https://tools.ietf.org/html/rfc2460#section-4.6
- length := b[1]*8 + 8
- data := b[2:length]
- nextParser := nextIPv6PayloadParser(nextHeader)
- return header.IPv6ExtensionHeaderIdentifier(nextHeader), data, nextParser
-}
-
-// parseIPv6HopByHopOptionsExtHdr parses the bytes assuming that they start
-// with an IPv6 HopByHop Options Extension Header.
-func parseIPv6HopByHopOptionsExtHdr(b []byte) (Layer, layerParser) {
- nextHeader, options, nextParser := parseIPv6ExtHdr(b)
- return &IPv6HopByHopOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser
-}
-
-// parseIPv6DestinationOptionsExtHdr parses the bytes assuming that they start
-// with an IPv6 Destination Options Extension Header.
-func parseIPv6DestinationOptionsExtHdr(b []byte) (Layer, layerParser) {
- nextHeader, options, nextParser := parseIPv6ExtHdr(b)
- return &IPv6DestinationOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser
-}
-
-// Bool is a helper routine that allocates a new
-// bool value to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// parseIPv6FragmentExtHdr parses the bytes assuming that they start
-// with an IPv6 Fragment Extension Header.
-func parseIPv6FragmentExtHdr(b []byte) (Layer, layerParser) {
- nextHeader := b[0]
- var extHdr header.IPv6FragmentExtHdr
- copy(extHdr[:], b[2:])
- fragLayer := IPv6FragmentExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6ExtensionHeaderIdentifier(nextHeader)),
- FragmentOffset: Uint16(extHdr.FragmentOffset()),
- MoreFragments: Bool(extHdr.More()),
- Identification: Uint32(extHdr.ID()),
- }
- // If it is a fragment, we can't interpret it.
- if extHdr.FragmentOffset() != 0 || extHdr.More() {
- return &fragLayer, parsePayload
- }
- return &fragLayer, nextIPv6PayloadParser(nextHeader)
-}
-
-func (l *IPv6HopByHopOptionsExtHdr) length() int {
- return len(l.Options) + 2
-}
-
-func (l *IPv6HopByHopOptionsExtHdr) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *IPv6HopByHopOptionsExtHdr) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-func (l *IPv6HopByHopOptionsExtHdr) String() string {
- return stringLayer(l)
-}
-
-func (l *IPv6DestinationOptionsExtHdr) length() int {
- return len(l.Options) + 2
-}
-
-func (l *IPv6DestinationOptionsExtHdr) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *IPv6DestinationOptionsExtHdr) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-func (l *IPv6DestinationOptionsExtHdr) String() string {
- return stringLayer(l)
-}
-
-func (*IPv6FragmentExtHdr) length() int {
- return header.IPv6FragmentExtHdrLength
-}
-
-func (l *IPv6FragmentExtHdr) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *IPv6FragmentExtHdr) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-func (l *IPv6FragmentExtHdr) String() string {
- return stringLayer(l)
-}
-
-// ICMPv6 can construct and match an ICMPv6 encapsulation.
-type ICMPv6 struct {
- LayerBase
- Type *header.ICMPv6Type
- Code *header.ICMPv6Code
- Checksum *uint16
- Ident *uint16 // Only in Echo Request/Reply.
- Pointer *uint32 // Only in Parameter Problem.
- Payload []byte
-}
-
-func (l *ICMPv6) String() string {
- // TODO(eyalsoha): Do something smarter here when *l.Type is ParameterProblem?
- // We could parse the contents of the Payload as if it were an IPv6 packet.
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *ICMPv6) ToBytes() ([]byte, error) {
- b := make([]byte, header.ICMPv6MinimumSize+len(l.Payload))
- h := header.ICMPv6(b)
- if l.Type != nil {
- h.SetType(*l.Type)
- }
- if l.Code != nil {
- h.SetCode(*l.Code)
- }
- if n := copy(h.Payload(), l.Payload); n != len(l.Payload) {
- panic(fmt.Sprintf("copied %d bytes, expected to copy %d bytes", n, len(l.Payload)))
- }
- typ := h.Type()
- switch typ {
- case header.ICMPv6EchoRequest, header.ICMPv6EchoReply:
- if l.Ident != nil {
- h.SetIdent(*l.Ident)
- }
- case header.ICMPv6ParamProblem:
- if l.Pointer != nil {
- h.SetTypeSpecific(*l.Pointer)
- }
- }
- if l.Checksum != nil {
- h.SetChecksum(*l.Checksum)
- } else {
- // It is possible that the ICMPv6 header does not follow the IPv6 header
- // immediately, there could be one or more extension headers in between.
- // We need to search backwards to find the IPv6 header.
- for layer := l.Prev(); layer != nil; layer = layer.Prev() {
- if ipv6, ok := layer.(*IPv6); ok {
- h.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: h[:header.ICMPv6PayloadOffset],
- Src: *ipv6.SrcAddr,
- Dst: *ipv6.DstAddr,
- PayloadCsum: header.Checksum(l.Payload, 0 /* initial */),
- PayloadLen: len(l.Payload),
- }))
- break
- }
- }
- }
- return h, nil
-}
-
-// ICMPv6Type is a helper routine that allocates a new ICMPv6Type value to store
-// v and returns a pointer to it.
-func ICMPv6Type(v header.ICMPv6Type) *header.ICMPv6Type {
- return &v
-}
-
-// ICMPv6Code is a helper routine that allocates a new ICMPv6Type value to store
-// v and returns a pointer to it.
-func ICMPv6Code(v header.ICMPv6Code) *header.ICMPv6Code {
- return &v
-}
-
-// parseICMPv6 parses the bytes assuming that they start with an ICMPv6 header.
-func parseICMPv6(b []byte) (Layer, layerParser) {
- h := header.ICMPv6(b)
- msgType := h.Type()
- icmpv6 := ICMPv6{
- Type: ICMPv6Type(msgType),
- Code: ICMPv6Code(h.Code()),
- Checksum: Uint16(h.Checksum()),
- Payload: h.Payload(),
- }
- switch msgType {
- case header.ICMPv6EchoRequest, header.ICMPv6EchoReply:
- icmpv6.Ident = Uint16(h.Ident())
- case header.ICMPv6ParamProblem:
- icmpv6.Pointer = Uint32(h.TypeSpecific())
- }
- return &icmpv6, nil
-}
-
-func (l *ICMPv6) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *ICMPv6) length() int {
- return header.ICMPv6MinimumSize + len(l.Payload)
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *ICMPv6) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// ICMPv4 can construct and match an ICMPv4 encapsulation.
-type ICMPv4 struct {
- LayerBase
- Type *header.ICMPv4Type
- Code *header.ICMPv4Code
- Checksum *uint16
- Ident *uint16 // Only in Echo Request/Reply.
- Sequence *uint16 // Only in Echo Request/Reply.
- Pointer *uint8 // Only in Parameter Problem.
- Payload []byte
-}
-
-func (l *ICMPv4) String() string {
- return stringLayer(l)
-}
-
-// ICMPv4Type is a helper routine that allocates a new header.ICMPv4Type value
-// to store t and returns a pointer to it.
-func ICMPv4Type(t header.ICMPv4Type) *header.ICMPv4Type {
- return &t
-}
-
-// ICMPv4Code is a helper routine that allocates a new header.ICMPv4Code value
-// to store t and returns a pointer to it.
-func ICMPv4Code(t header.ICMPv4Code) *header.ICMPv4Code {
- return &t
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *ICMPv4) ToBytes() ([]byte, error) {
- b := make([]byte, header.ICMPv4MinimumSize+len(l.Payload))
- h := header.ICMPv4(b)
- if l.Type != nil {
- h.SetType(*l.Type)
- }
- if l.Code != nil {
- h.SetCode(*l.Code)
- }
- if n := copy(h.Payload(), l.Payload); n != len(l.Payload) {
- panic(fmt.Sprintf("wrong number of bytes copied into h.Payload(): got = %d, want = %d", n, len(l.Payload)))
- }
- typ := h.Type()
- switch typ {
- case header.ICMPv4EchoReply, header.ICMPv4Echo:
- if l.Ident != nil {
- h.SetIdent(*l.Ident)
- }
- if l.Sequence != nil {
- h.SetSequence(*l.Sequence)
- }
- case header.ICMPv4ParamProblem:
- if l.Pointer != nil {
- h.SetPointer(*l.Pointer)
- }
- }
-
- // The checksum must be handled last because the ICMPv4 header fields are
- // included in the computation.
- if l.Checksum != nil {
- h.SetChecksum(*l.Checksum)
- } else {
- h.SetChecksum(^header.Checksum(h, 0))
- }
-
- return h, nil
-}
-
-// parseICMPv4 parses the bytes as an ICMPv4 header, returning a Layer and a
-// parser for the encapsulated payload.
-func parseICMPv4(b []byte) (Layer, layerParser) {
- h := header.ICMPv4(b)
-
- msgType := h.Type()
- icmpv4 := ICMPv4{
- Type: ICMPv4Type(msgType),
- Code: ICMPv4Code(h.Code()),
- Checksum: Uint16(h.Checksum()),
- Payload: h.Payload(),
- }
- switch msgType {
- case header.ICMPv4EchoReply, header.ICMPv4Echo:
- icmpv4.Ident = Uint16(h.Ident())
- icmpv4.Sequence = Uint16(h.Sequence())
- case header.ICMPv4ParamProblem:
- icmpv4.Pointer = Uint8(h.Pointer())
- }
- return &icmpv4, nil
-}
-
-func (l *ICMPv4) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *ICMPv4) length() int {
- return header.ICMPv4MinimumSize + len(l.Payload)
-}
-
-// merge overrides the values in l with the values from other but only in fields
-// where the value is not nil.
-func (l *ICMPv4) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// TCP can construct and match a TCP encapsulation.
-type TCP struct {
- LayerBase
- SrcPort *uint16
- DstPort *uint16
- SeqNum *uint32
- AckNum *uint32
- DataOffset *uint8
- Flags *header.TCPFlags
- WindowSize *uint16
- Checksum *uint16
- UrgentPointer *uint16
- Options []byte
-}
-
-func (l *TCP) String() string {
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *TCP) ToBytes() ([]byte, error) {
- b := make([]byte, l.length())
- h := header.TCP(b)
- if l.SrcPort != nil {
- h.SetSourcePort(*l.SrcPort)
- }
- if l.DstPort != nil {
- h.SetDestinationPort(*l.DstPort)
- }
- if l.SeqNum != nil {
- h.SetSequenceNumber(*l.SeqNum)
- }
- if l.AckNum != nil {
- h.SetAckNumber(*l.AckNum)
- }
- if l.DataOffset != nil {
- h.SetDataOffset(*l.DataOffset)
- } else {
- h.SetDataOffset(uint8(l.length()))
- }
- if l.Flags != nil {
- h.SetFlags(uint8(*l.Flags))
- }
- if l.WindowSize != nil {
- h.SetWindowSize(*l.WindowSize)
- } else {
- h.SetWindowSize(32768)
- }
- if l.UrgentPointer != nil {
- h.SetUrgentPoiner(*l.UrgentPointer)
- }
- copy(b[header.TCPMinimumSize:], l.Options)
- header.AddTCPOptionPadding(b[header.TCPMinimumSize:], len(l.Options))
- if l.Checksum != nil {
- h.SetChecksum(*l.Checksum)
- return h, nil
- }
- if err := setTCPChecksum(&h, l); err != nil {
- return nil, err
- }
- return h, nil
-}
-
-// totalLength returns the length of the provided layer and all following
-// layers.
-func totalLength(l Layer) int {
- var totalLength int
- for ; l != nil; l = l.next() {
- totalLength += l.length()
- }
- return totalLength
-}
-
-// payload returns a buffer.VectorisedView of l's payload.
-func payload(l Layer) (buffer.VectorisedView, error) {
- var payloadBytes buffer.VectorisedView
- for current := l.next(); current != nil; current = current.next() {
- payload, err := current.ToBytes()
- if err != nil {
- return buffer.VectorisedView{}, fmt.Errorf("can't get bytes for next header: %s", payload)
- }
- payloadBytes.AppendView(payload)
- }
- return payloadBytes, nil
-}
-
-// layerChecksum calculates the checksum of the Layer header, including the
-// peusdeochecksum of the layer before it and all the bytes after it.
-func layerChecksum(l Layer, protoNumber tcpip.TransportProtocolNumber) (uint16, error) {
- totalLength := uint16(totalLength(l))
- var xsum uint16
- switch p := l.Prev().(type) {
- case *IPv4:
- xsum = header.PseudoHeaderChecksum(protoNumber, *p.SrcAddr, *p.DstAddr, totalLength)
- case *IPv6:
- xsum = header.PseudoHeaderChecksum(protoNumber, *p.SrcAddr, *p.DstAddr, totalLength)
- default:
- // TODO(b/161246171): Support more protocols.
- return 0, fmt.Errorf("checksum for protocol %d is not supported when previous layer is %T", protoNumber, p)
- }
- payloadBytes, err := payload(l)
- if err != nil {
- return 0, err
- }
- xsum = header.ChecksumVV(payloadBytes, xsum)
- return xsum, nil
-}
-
-// setTCPChecksum calculates the checksum of the TCP header and sets it in h.
-func setTCPChecksum(h *header.TCP, tcp *TCP) error {
- h.SetChecksum(0)
- xsum, err := layerChecksum(tcp, header.TCPProtocolNumber)
- if err != nil {
- return err
- }
- h.SetChecksum(^h.CalculateChecksum(xsum))
- return nil
-}
-
-// Uint32 is a helper routine that allocates a new
-// uint32 value to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// parseTCP parses the bytes assuming that they start with a tcp header and
-// continues parsing further encapsulations.
-func parseTCP(b []byte) (Layer, layerParser) {
- h := header.TCP(b)
- tcp := TCP{
- SrcPort: Uint16(h.SourcePort()),
- DstPort: Uint16(h.DestinationPort()),
- SeqNum: Uint32(h.SequenceNumber()),
- AckNum: Uint32(h.AckNumber()),
- DataOffset: Uint8(h.DataOffset()),
- Flags: TCPFlags(h.Flags()),
- WindowSize: Uint16(h.WindowSize()),
- Checksum: Uint16(h.Checksum()),
- UrgentPointer: Uint16(h.UrgentPointer()),
- Options: b[header.TCPMinimumSize:h.DataOffset()],
- }
- return &tcp, parsePayload
-}
-
-func (l *TCP) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *TCP) length() int {
- if l.DataOffset == nil {
- // TCP header including the options must end on a 32-bit
- // boundary; the user could potentially give us a slice
- // whose length is not a multiple of 4 bytes, so we have
- // to do the alignment here.
- optlen := (len(l.Options) + 3) & ^3
- return header.TCPMinimumSize + optlen
- }
- return int(*l.DataOffset)
-}
-
-// merge implements Layer.merge.
-func (l *TCP) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// UDP can construct and match a UDP encapsulation.
-type UDP struct {
- LayerBase
- SrcPort *uint16
- DstPort *uint16
- Length *uint16
- Checksum *uint16
-}
-
-func (l *UDP) String() string {
- return stringLayer(l)
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *UDP) ToBytes() ([]byte, error) {
- b := make([]byte, header.UDPMinimumSize)
- h := header.UDP(b)
- if l.SrcPort != nil {
- h.SetSourcePort(*l.SrcPort)
- }
- if l.DstPort != nil {
- h.SetDestinationPort(*l.DstPort)
- }
- if l.Length != nil {
- h.SetLength(*l.Length)
- } else {
- h.SetLength(uint16(totalLength(l)))
- }
- if l.Checksum != nil {
- h.SetChecksum(*l.Checksum)
- return h, nil
- }
- if err := setUDPChecksum(&h, l); err != nil {
- return nil, err
- }
- return h, nil
-}
-
-// setUDPChecksum calculates the checksum of the UDP header and sets it in h.
-func setUDPChecksum(h *header.UDP, udp *UDP) error {
- h.SetChecksum(0)
- xsum, err := layerChecksum(udp, header.UDPProtocolNumber)
- if err != nil {
- return err
- }
- h.SetChecksum(^h.CalculateChecksum(xsum))
- return nil
-}
-
-// parseUDP parses the bytes assuming that they start with a udp header and
-// returns the parsed layer and the next parser to use.
-func parseUDP(b []byte) (Layer, layerParser) {
- h := header.UDP(b)
- udp := UDP{
- SrcPort: Uint16(h.SourcePort()),
- DstPort: Uint16(h.DestinationPort()),
- Length: Uint16(h.Length()),
- Checksum: Uint16(h.Checksum()),
- }
- return &udp, parsePayload
-}
-
-func (l *UDP) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *UDP) length() int {
- return header.UDPMinimumSize
-}
-
-// merge implements Layer.merge.
-func (l *UDP) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// Payload has bytes beyond OSI layer 4.
-type Payload struct {
- LayerBase
- Bytes []byte
-}
-
-func (l *Payload) String() string {
- return stringLayer(l)
-}
-
-// parsePayload parses the bytes assuming that they start with a payload and
-// continue to the end. There can be no further encapsulations.
-func parsePayload(b []byte) (Layer, layerParser) {
- payload := Payload{
- Bytes: b,
- }
- return &payload, nil
-}
-
-// ToBytes implements Layer.ToBytes.
-func (l *Payload) ToBytes() ([]byte, error) {
- return l.Bytes, nil
-}
-
-// Length returns payload byte length.
-func (l *Payload) Length() int {
- return l.length()
-}
-
-func (l *Payload) match(other Layer) bool {
- return equalLayer(l, other)
-}
-
-func (l *Payload) length() int {
- return len(l.Bytes)
-}
-
-// merge implements Layer.merge.
-func (l *Payload) merge(other Layer) error {
- return mergeLayer(l, other)
-}
-
-// Layers is an array of Layer and supports similar functions to Layer.
-type Layers []Layer
-
-// linkLayers sets the linked-list ponters in ls.
-func (ls *Layers) linkLayers() {
- for i, l := range *ls {
- if i > 0 {
- l.setPrev((*ls)[i-1])
- } else {
- l.setPrev(nil)
- }
- if i+1 < len(*ls) {
- l.setNext((*ls)[i+1])
- } else {
- l.setNext(nil)
- }
- }
-}
-
-// ToBytes converts the Layers into bytes. It creates a linked list of the Layer
-// structs and then concatentates the output of ToBytes on each Layer.
-func (ls *Layers) ToBytes() ([]byte, error) {
- ls.linkLayers()
- outBytes := []byte{}
- for _, l := range *ls {
- layerBytes, err := l.ToBytes()
- if err != nil {
- return nil, err
- }
- outBytes = append(outBytes, layerBytes...)
- }
- return outBytes, nil
-}
-
-func (ls *Layers) match(other Layers) bool {
- if len(*ls) > len(other) {
- return false
- }
- for i, l := range *ls {
- if !equalLayer(l, other[i]) {
- return false
- }
- }
- return true
-}
-
-// layerDiff stores the diffs for each field along with the label for the Layer.
-// If rows is nil, that means that there was no diff.
-type layerDiff struct {
- label string
- rows []layerDiffRow
-}
-
-// layerDiffRow stores the fields and corresponding values for two got and want
-// layers. If the value was nil then the string stored is the empty string.
-type layerDiffRow struct {
- field, got, want string
-}
-
-// diffLayer extracts all differing fields between two layers.
-func diffLayer(got, want Layer) []layerDiffRow {
- vGot := reflect.ValueOf(got).Elem()
- vWant := reflect.ValueOf(want).Elem()
- if vGot.Type() != vWant.Type() {
- return nil
- }
- t := vGot.Type()
- var result []layerDiffRow
- for i := 0; i < t.NumField(); i++ {
- t := t.Field(i)
- if t.Anonymous {
- // Ignore the LayerBase in the Layer struct.
- continue
- }
- vGot := vGot.Field(i)
- vWant := vWant.Field(i)
- gotString := ""
- if !vGot.IsNil() {
- gotString = fmt.Sprint(reflect.Indirect(vGot))
- }
- wantString := ""
- if !vWant.IsNil() {
- wantString = fmt.Sprint(reflect.Indirect(vWant))
- }
- result = append(result, layerDiffRow{t.Name, gotString, wantString})
- }
- return result
-}
-
-// layerType returns a concise string describing the type of the Layer, like
-// "TCP", or "IPv6".
-func layerType(l Layer) string {
- return reflect.TypeOf(l).Elem().Name()
-}
-
-// diff compares Layers and returns a representation of the difference. Each
-// Layer in the Layers is pairwise compared. If an element in either is nil, it
-// is considered a match with the other Layer. If two Layers have differing
-// types, they don't match regardless of the contents. If two Layers have the
-// same type then the fields in the Layer are pairwise compared. Fields that are
-// nil always match. Two non-nil fields only match if they point to equal
-// values. diff returns an empty string if and only if *ls and other match.
-func (ls *Layers) diff(other Layers) string {
- var allDiffs []layerDiff
- // Check the cases where one list is longer than the other, where one or both
- // elements are nil, where the sides have different types, and where the sides
- // have the same type.
- for i := 0; i < len(*ls) || i < len(other); i++ {
- if i >= len(*ls) {
- // Matching ls against other where other is longer than ls. missing
- // matches everything so we just include a label without any rows. Having
- // no rows is a sign that there was no diff.
- allDiffs = append(allDiffs, layerDiff{
- label: "missing matches " + layerType(other[i]),
- })
- continue
- }
-
- if i >= len(other) {
- // Matching ls against other where ls is longer than other. missing
- // matches everything so we just include a label without any rows. Having
- // no rows is a sign that there was no diff.
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]) + " matches missing",
- })
- continue
- }
-
- if (*ls)[i] == nil && other[i] == nil {
- // Matching ls against other where both elements are nil. nil matches
- // everything so we just include a label without any rows. Having no rows
- // is a sign that there was no diff.
- allDiffs = append(allDiffs, layerDiff{
- label: "nil matches nil",
- })
- continue
- }
-
- if (*ls)[i] == nil {
- // Matching ls against other where the element in ls is nil. nil matches
- // everything so we just include a label without any rows. Having no rows
- // is a sign that there was no diff.
- allDiffs = append(allDiffs, layerDiff{
- label: "nil matches " + layerType(other[i]),
- })
- continue
- }
-
- if other[i] == nil {
- // Matching ls against other where the element in other is nil. nil
- // matches everything so we just include a label without any rows. Having
- // no rows is a sign that there was no diff.
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]) + " matches nil",
- })
- continue
- }
-
- if reflect.TypeOf((*ls)[i]) == reflect.TypeOf(other[i]) {
- // Matching ls against other where both elements have the same type. Match
- // each field pairwise and only report a diff if there is a mismatch,
- // which is only when both sides are non-nil and have differring values.
- diff := diffLayer((*ls)[i], other[i])
- var layerDiffRows []layerDiffRow
- for _, d := range diff {
- if d.got == "" || d.want == "" || d.got == d.want {
- continue
- }
- layerDiffRows = append(layerDiffRows, layerDiffRow{
- d.field,
- d.got,
- d.want,
- })
- }
- if len(layerDiffRows) > 0 {
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]),
- rows: layerDiffRows,
- })
- } else {
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]) + " matches " + layerType(other[i]),
- // Having no rows is a sign that there was no diff.
- })
- }
- continue
- }
- // Neither side is nil and the types are different, so we'll display one
- // side then the other.
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]) + " doesn't match " + layerType(other[i]),
- })
- diff := diffLayer((*ls)[i], (*ls)[i])
- layerDiffRows := []layerDiffRow{}
- for _, d := range diff {
- if len(d.got) == 0 {
- continue
- }
- layerDiffRows = append(layerDiffRows, layerDiffRow{
- d.field,
- d.got,
- "",
- })
- }
- allDiffs = append(allDiffs, layerDiff{
- label: layerType((*ls)[i]),
- rows: layerDiffRows,
- })
-
- layerDiffRows = []layerDiffRow{}
- diff = diffLayer(other[i], other[i])
- for _, d := range diff {
- if len(d.want) == 0 {
- continue
- }
- layerDiffRows = append(layerDiffRows, layerDiffRow{
- d.field,
- "",
- d.want,
- })
- }
- allDiffs = append(allDiffs, layerDiff{
- label: layerType(other[i]),
- rows: layerDiffRows,
- })
- }
-
- output := ""
- // These are for output formatting.
- maxLabelLen, maxFieldLen, maxGotLen, maxWantLen := 0, 0, 0, 0
- foundOne := false
- for _, l := range allDiffs {
- if len(l.label) > maxLabelLen && len(l.rows) > 0 {
- maxLabelLen = len(l.label)
- }
- if l.rows != nil {
- foundOne = true
- }
- for _, r := range l.rows {
- if len(r.field) > maxFieldLen {
- maxFieldLen = len(r.field)
- }
- if l := len(fmt.Sprint(r.got)); l > maxGotLen {
- maxGotLen = l
- }
- if l := len(fmt.Sprint(r.want)); l > maxWantLen {
- maxWantLen = l
- }
- }
- }
- if !foundOne {
- return ""
- }
- for _, l := range allDiffs {
- if len(l.rows) == 0 {
- output += "(" + l.label + ")\n"
- continue
- }
- for i, r := range l.rows {
- var label string
- if i == 0 {
- label = l.label + ":"
- }
- output += fmt.Sprintf(
- "%*s %*s %*v %*v\n",
- maxLabelLen+1, label,
- maxFieldLen+1, r.field+":",
- maxGotLen, r.got,
- maxWantLen, r.want,
- )
- }
- }
- return output
-}
-
-// merge merges the other Layers into ls. If the other Layers is longer, those
-// additional Layer structs are added to ls. The errors from merging are
-// collected and returned.
-func (ls *Layers) merge(other Layers) error {
- var errs error
- for i, o := range other {
- if i < len(*ls) {
- errs = multierr.Combine(errs, (*ls)[i].merge(o))
- } else {
- *ls = append(*ls, o)
- }
- }
- return errs
-}
diff --git a/test/packetimpact/testbench/layers_test.go b/test/packetimpact/testbench/layers_test.go
deleted file mode 100644
index bc96e0c88..000000000
--- a/test/packetimpact/testbench/layers_test.go
+++ /dev/null
@@ -1,728 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testbench
-
-import (
- "bytes"
- "net"
- "testing"
-
- "github.com/mohae/deepcopy"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-func TestLayerMatch(t *testing.T) {
- var nilPayload *Payload
- noPayload := &Payload{}
- emptyPayload := &Payload{Bytes: []byte{}}
- fullPayload := &Payload{Bytes: []byte{1, 2, 3}}
- emptyTCP := &TCP{SrcPort: Uint16(1234), LayerBase: LayerBase{nextLayer: emptyPayload}}
- fullTCP := &TCP{SrcPort: Uint16(1234), LayerBase: LayerBase{nextLayer: fullPayload}}
- for _, tt := range []struct {
- a, b Layer
- want bool
- }{
- {nilPayload, nilPayload, true},
- {nilPayload, noPayload, true},
- {nilPayload, emptyPayload, true},
- {nilPayload, fullPayload, true},
- {noPayload, noPayload, true},
- {noPayload, emptyPayload, true},
- {noPayload, fullPayload, true},
- {emptyPayload, emptyPayload, true},
- {emptyPayload, fullPayload, false},
- {fullPayload, fullPayload, true},
- {emptyTCP, fullTCP, true},
- } {
- if got := tt.a.match(tt.b); got != tt.want {
- t.Errorf("%s.match(%s) = %t, want %t", tt.a, tt.b, got, tt.want)
- }
- if got := tt.b.match(tt.a); got != tt.want {
- t.Errorf("%s.match(%s) = %t, want %t", tt.b, tt.a, got, tt.want)
- }
- }
-}
-
-func TestLayerMergeMismatch(t *testing.T) {
- tcp := &TCP{}
- otherTCP := &TCP{}
- ipv4 := &IPv4{}
- ether := &Ether{}
- for _, tt := range []struct {
- a, b Layer
- success bool
- }{
- {tcp, tcp, true},
- {tcp, otherTCP, true},
- {tcp, ipv4, false},
- {tcp, ether, false},
- {tcp, nil, true},
-
- {otherTCP, otherTCP, true},
- {otherTCP, ipv4, false},
- {otherTCP, ether, false},
- {otherTCP, nil, true},
-
- {ipv4, ipv4, true},
- {ipv4, ether, false},
- {ipv4, nil, true},
-
- {ether, ether, true},
- {ether, nil, true},
- } {
- if err := tt.a.merge(tt.b); (err == nil) != tt.success {
- t.Errorf("%s.merge(%s) got %s, wanted the opposite", tt.a, tt.b, err)
- }
- if tt.b != nil {
- if err := tt.b.merge(tt.a); (err == nil) != tt.success {
- t.Errorf("%s.merge(%s) got %s, wanted the opposite", tt.b, tt.a, err)
- }
- }
- }
-}
-
-func TestLayerMerge(t *testing.T) {
- zero := Uint32(0)
- one := Uint32(1)
- two := Uint32(2)
- empty := []byte{}
- foo := []byte("foo")
- bar := []byte("bar")
- for _, tt := range []struct {
- a, b Layer
- want Layer
- }{
- {&TCP{AckNum: nil}, &TCP{AckNum: nil}, &TCP{AckNum: nil}},
- {&TCP{AckNum: nil}, &TCP{AckNum: zero}, &TCP{AckNum: zero}},
- {&TCP{AckNum: nil}, &TCP{AckNum: one}, &TCP{AckNum: one}},
- {&TCP{AckNum: nil}, &TCP{AckNum: two}, &TCP{AckNum: two}},
- {&TCP{AckNum: nil}, nil, &TCP{AckNum: nil}},
-
- {&TCP{AckNum: zero}, &TCP{AckNum: nil}, &TCP{AckNum: zero}},
- {&TCP{AckNum: zero}, &TCP{AckNum: zero}, &TCP{AckNum: zero}},
- {&TCP{AckNum: zero}, &TCP{AckNum: one}, &TCP{AckNum: one}},
- {&TCP{AckNum: zero}, &TCP{AckNum: two}, &TCP{AckNum: two}},
- {&TCP{AckNum: zero}, nil, &TCP{AckNum: zero}},
-
- {&TCP{AckNum: one}, &TCP{AckNum: nil}, &TCP{AckNum: one}},
- {&TCP{AckNum: one}, &TCP{AckNum: zero}, &TCP{AckNum: zero}},
- {&TCP{AckNum: one}, &TCP{AckNum: one}, &TCP{AckNum: one}},
- {&TCP{AckNum: one}, &TCP{AckNum: two}, &TCP{AckNum: two}},
- {&TCP{AckNum: one}, nil, &TCP{AckNum: one}},
-
- {&TCP{AckNum: two}, &TCP{AckNum: nil}, &TCP{AckNum: two}},
- {&TCP{AckNum: two}, &TCP{AckNum: zero}, &TCP{AckNum: zero}},
- {&TCP{AckNum: two}, &TCP{AckNum: one}, &TCP{AckNum: one}},
- {&TCP{AckNum: two}, &TCP{AckNum: two}, &TCP{AckNum: two}},
- {&TCP{AckNum: two}, nil, &TCP{AckNum: two}},
-
- {&Payload{Bytes: nil}, &Payload{Bytes: nil}, &Payload{Bytes: nil}},
- {&Payload{Bytes: nil}, &Payload{Bytes: empty}, &Payload{Bytes: empty}},
- {&Payload{Bytes: nil}, &Payload{Bytes: foo}, &Payload{Bytes: foo}},
- {&Payload{Bytes: nil}, &Payload{Bytes: bar}, &Payload{Bytes: bar}},
- {&Payload{Bytes: nil}, nil, &Payload{Bytes: nil}},
-
- {&Payload{Bytes: empty}, &Payload{Bytes: nil}, &Payload{Bytes: empty}},
- {&Payload{Bytes: empty}, &Payload{Bytes: empty}, &Payload{Bytes: empty}},
- {&Payload{Bytes: empty}, &Payload{Bytes: foo}, &Payload{Bytes: foo}},
- {&Payload{Bytes: empty}, &Payload{Bytes: bar}, &Payload{Bytes: bar}},
- {&Payload{Bytes: empty}, nil, &Payload{Bytes: empty}},
-
- {&Payload{Bytes: foo}, &Payload{Bytes: nil}, &Payload{Bytes: foo}},
- {&Payload{Bytes: foo}, &Payload{Bytes: empty}, &Payload{Bytes: empty}},
- {&Payload{Bytes: foo}, &Payload{Bytes: foo}, &Payload{Bytes: foo}},
- {&Payload{Bytes: foo}, &Payload{Bytes: bar}, &Payload{Bytes: bar}},
- {&Payload{Bytes: foo}, nil, &Payload{Bytes: foo}},
-
- {&Payload{Bytes: bar}, &Payload{Bytes: nil}, &Payload{Bytes: bar}},
- {&Payload{Bytes: bar}, &Payload{Bytes: empty}, &Payload{Bytes: empty}},
- {&Payload{Bytes: bar}, &Payload{Bytes: foo}, &Payload{Bytes: foo}},
- {&Payload{Bytes: bar}, &Payload{Bytes: bar}, &Payload{Bytes: bar}},
- {&Payload{Bytes: bar}, nil, &Payload{Bytes: bar}},
- } {
- a := deepcopy.Copy(tt.a).(Layer)
- if err := a.merge(tt.b); err != nil {
- t.Errorf("%s.merge(%s) = %s, wanted nil", tt.a, tt.b, err)
- continue
- }
- if a.String() != tt.want.String() {
- t.Errorf("%s.merge(%s) merge result got %s, want %s", tt.a, tt.b, a, tt.want)
- }
- }
-}
-
-func TestLayerStringFormat(t *testing.T) {
- for _, tt := range []struct {
- name string
- l Layer
- want string
- }{
- {
- name: "TCP",
- l: &TCP{
- SrcPort: Uint16(34785),
- DstPort: Uint16(47767),
- SeqNum: Uint32(3452155723),
- AckNum: Uint32(2596996163),
- DataOffset: Uint8(5),
- Flags: TCPFlags(header.TCPFlagRst | header.TCPFlagAck),
- WindowSize: Uint16(64240),
- Checksum: Uint16(0x2e2b),
- },
- want: "&testbench.TCP{" +
- "SrcPort:34785 " +
- "DstPort:47767 " +
- "SeqNum:3452155723 " +
- "AckNum:2596996163 " +
- "DataOffset:5 " +
- "Flags: R A " +
- "WindowSize:64240 " +
- "Checksum:11819" +
- "}",
- },
- {
- name: "UDP",
- l: &UDP{
- SrcPort: Uint16(34785),
- DstPort: Uint16(47767),
- Length: Uint16(12),
- },
- want: "&testbench.UDP{" +
- "SrcPort:34785 " +
- "DstPort:47767 " +
- "Length:12" +
- "}",
- },
- {
- name: "IPv4",
- l: &IPv4{
- IHL: Uint8(5),
- TOS: Uint8(0),
- TotalLength: Uint16(44),
- ID: Uint16(0),
- Flags: Uint8(2),
- FragmentOffset: Uint16(0),
- TTL: Uint8(64),
- Protocol: Uint8(6),
- Checksum: Uint16(0x2e2b),
- SrcAddr: Address(tcpip.Address([]byte{197, 34, 63, 10})),
- DstAddr: Address(tcpip.Address([]byte{197, 34, 63, 20})),
- },
- want: "&testbench.IPv4{" +
- "IHL:5 " +
- "TOS:0 " +
- "TotalLength:44 " +
- "ID:0 " +
- "Flags:2 " +
- "FragmentOffset:0 " +
- "TTL:64 " +
- "Protocol:6 " +
- "Checksum:11819 " +
- "SrcAddr:197.34.63.10 " +
- "DstAddr:197.34.63.20" +
- "}",
- },
- {
- name: "Ether",
- l: &Ether{
- SrcAddr: LinkAddress(tcpip.LinkAddress([]byte{0x02, 0x42, 0xc5, 0x22, 0x3f, 0x0a})),
- DstAddr: LinkAddress(tcpip.LinkAddress([]byte{0x02, 0x42, 0xc5, 0x22, 0x3f, 0x14})),
- Type: NetworkProtocolNumber(4),
- },
- want: "&testbench.Ether{" +
- "SrcAddr:02:42:c5:22:3f:0a " +
- "DstAddr:02:42:c5:22:3f:14 " +
- "Type:4" +
- "}",
- },
- {
- name: "Payload",
- l: &Payload{
- Bytes: []byte("Hooray for packetimpact."),
- },
- want: "&testbench.Payload{Bytes:\n" +
- "00000000 48 6f 6f 72 61 79 20 66 6f 72 20 70 61 63 6b 65 |Hooray for packe|\n" +
- "00000010 74 69 6d 70 61 63 74 2e |timpact.|\n" +
- "}",
- },
- } {
- t.Run(tt.name, func(t *testing.T) {
- if got := tt.l.String(); got != tt.want {
- t.Errorf("%s.String() = %s, want: %s", tt.name, got, tt.want)
- }
- })
- }
-}
-
-func TestConnectionMatch(t *testing.T) {
- conn := Connection{
- layerStates: []layerState{&etherState{}},
- }
- protoNum0 := tcpip.NetworkProtocolNumber(0)
- protoNum1 := tcpip.NetworkProtocolNumber(1)
- for _, tt := range []struct {
- description string
- override, received Layers
- wantMatch bool
- }{
- {
- description: "shorter override",
- override: []Layer{&Ether{}},
- received: []Layer{&Ether{}, &Payload{Bytes: []byte("hello")}},
- wantMatch: true,
- },
- {
- description: "longer override",
- override: []Layer{&Ether{}, &Payload{Bytes: []byte("hello")}},
- received: []Layer{&Ether{}},
- wantMatch: false,
- },
- {
- description: "ether layer mismatch",
- override: []Layer{&Ether{Type: &protoNum0}},
- received: []Layer{&Ether{Type: &protoNum1}},
- wantMatch: false,
- },
- {
- description: "both nil",
- override: nil,
- received: nil,
- wantMatch: false,
- },
- {
- description: "nil override",
- override: nil,
- received: []Layer{&Ether{}},
- wantMatch: true,
- },
- } {
- t.Run(tt.description, func(t *testing.T) {
- if gotMatch := conn.match(tt.override, tt.received); gotMatch != tt.wantMatch {
- t.Fatalf("conn.match(%s, %s) = %t, want %t", tt.override, tt.received, gotMatch, tt.wantMatch)
- }
- })
- }
-}
-
-func TestLayersDiff(t *testing.T) {
- for _, tt := range []struct {
- x, y Layers
- want string
- }{
- {
- Layers{&Ether{Type: NetworkProtocolNumber(12)}, &TCP{DataOffset: Uint8(5), SeqNum: Uint32(5)}},
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- "Ether: Type: 12 13\n" +
- " TCP: SeqNum: 5 6\n" +
- " DataOffset: 5 7\n",
- },
- {
- Layers{&Ether{Type: NetworkProtocolNumber(12)}, &UDP{SrcPort: Uint16(123)}},
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- "Ether: Type: 12 13\n" +
- "(UDP doesn't match TCP)\n" +
- " UDP: SrcPort: 123 \n" +
- " TCP: SeqNum: 6\n" +
- " DataOffset: 7\n",
- },
- {
- Layers{&UDP{SrcPort: Uint16(123)}},
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- "(UDP doesn't match Ether)\n" +
- " UDP: SrcPort: 123 \n" +
- "Ether: Type: 13\n" +
- "(missing matches TCP)\n",
- },
- {
- Layers{nil, &UDP{SrcPort: Uint16(123)}},
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- "(nil matches Ether)\n" +
- "(UDP doesn't match TCP)\n" +
- "UDP: SrcPort: 123 \n" +
- "TCP: SeqNum: 6\n" +
- " DataOffset: 7\n",
- },
- {
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &IPv4{IHL: Uint8(4)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- Layers{&Ether{Type: NetworkProtocolNumber(13)}, &IPv4{IHL: Uint8(6)}, &TCP{DataOffset: Uint8(7), SeqNum: Uint32(6)}},
- "(Ether matches Ether)\n" +
- "IPv4: IHL: 4 6\n" +
- "(TCP matches TCP)\n",
- },
- {
- Layers{&Payload{Bytes: []byte("foo")}},
- Layers{&Payload{Bytes: []byte("bar")}},
- "Payload: Bytes: [102 111 111] [98 97 114]\n",
- },
- {
- Layers{&Payload{Bytes: []byte("")}},
- Layers{&Payload{}},
- "",
- },
- {
- Layers{&Payload{Bytes: []byte("")}},
- Layers{&Payload{Bytes: []byte("")}},
- "",
- },
- {
- Layers{&UDP{}},
- Layers{&TCP{}},
- "(UDP doesn't match TCP)\n" +
- "(UDP)\n" +
- "(TCP)\n",
- },
- } {
- if got := tt.x.diff(tt.y); got != tt.want {
- t.Errorf("%s.diff(%s) = %q, want %q", tt.x, tt.y, got, tt.want)
- }
- if tt.x.match(tt.y) != (tt.x.diff(tt.y) == "") {
- t.Errorf("match and diff of %s and %s disagree", tt.x, tt.y)
- }
- if tt.y.match(tt.x) != (tt.y.diff(tt.x) == "") {
- t.Errorf("match and diff of %s and %s disagree", tt.y, tt.x)
- }
- }
-}
-
-func TestTCPOptions(t *testing.T) {
- for _, tt := range []struct {
- description string
- wantBytes []byte
- wantLayers Layers
- }{
- {
- description: "without payload",
- wantBytes: []byte{
- // IPv4 Header
- 0x45, 0x00, 0x00, 0x2c, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
- 0xf9, 0x77, 0xc0, 0xa8, 0x00, 0x02, 0xc0, 0xa8, 0x00, 0x01,
- // TCP Header
- 0x30, 0x39, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x02, 0x20, 0x00, 0xf5, 0x1c, 0x00, 0x00,
- // WindowScale Option
- 0x03, 0x03, 0x02,
- // NOP Option
- 0x00,
- },
- wantLayers: []Layer{
- &IPv4{
- IHL: Uint8(20),
- TOS: Uint8(0),
- TotalLength: Uint16(44),
- ID: Uint16(1),
- Flags: Uint8(0),
- FragmentOffset: Uint16(0),
- TTL: Uint8(64),
- Protocol: Uint8(uint8(header.TCPProtocolNumber)),
- Checksum: Uint16(0xf977),
- SrcAddr: Address(tcpip.Address(net.ParseIP("192.168.0.2").To4())),
- DstAddr: Address(tcpip.Address(net.ParseIP("192.168.0.1").To4())),
- },
- &TCP{
- SrcPort: Uint16(12345),
- DstPort: Uint16(54321),
- SeqNum: Uint32(0),
- AckNum: Uint32(0),
- Flags: TCPFlags(header.TCPFlagSyn),
- WindowSize: Uint16(8192),
- Checksum: Uint16(0xf51c),
- UrgentPointer: Uint16(0),
- Options: []byte{3, 3, 2, 0},
- },
- &Payload{Bytes: nil},
- },
- },
- {
- description: "with payload",
- wantBytes: []byte{
- // IPv4 header
- 0x45, 0x00, 0x00, 0x37, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
- 0xf9, 0x6c, 0xc0, 0xa8, 0x00, 0x02, 0xc0, 0xa8, 0x00, 0x01,
- // TCP header
- 0x30, 0x39, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x02, 0x20, 0x00, 0xe5, 0x21, 0x00, 0x00,
- // WindowScale Option
- 0x03, 0x03, 0x02,
- // NOP Option
- 0x00,
- // Payload: "Sample Data"
- 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,
- },
- wantLayers: []Layer{
- &IPv4{
- IHL: Uint8(20),
- TOS: Uint8(0),
- TotalLength: Uint16(55),
- ID: Uint16(1),
- Flags: Uint8(0),
- FragmentOffset: Uint16(0),
- TTL: Uint8(64),
- Protocol: Uint8(uint8(header.TCPProtocolNumber)),
- Checksum: Uint16(0xf96c),
- SrcAddr: Address(tcpip.Address(net.ParseIP("192.168.0.2").To4())),
- DstAddr: Address(tcpip.Address(net.ParseIP("192.168.0.1").To4())),
- },
- &TCP{
- SrcPort: Uint16(12345),
- DstPort: Uint16(54321),
- SeqNum: Uint32(0),
- AckNum: Uint32(0),
- Flags: TCPFlags(header.TCPFlagSyn),
- WindowSize: Uint16(8192),
- Checksum: Uint16(0xe521),
- UrgentPointer: Uint16(0),
- Options: []byte{3, 3, 2, 0},
- },
- &Payload{Bytes: []byte("Sample Data")},
- },
- },
- } {
- t.Run(tt.description, func(t *testing.T) {
- layers := parse(parseIPv4, tt.wantBytes)
- if !layers.match(tt.wantLayers) {
- t.Fatalf("match failed with diff: %s", layers.diff(tt.wantLayers))
- }
- gotBytes, err := layers.ToBytes()
- if err != nil {
- t.Fatalf("ToBytes() failed on %s: %s", &layers, err)
- }
- if !bytes.Equal(tt.wantBytes, gotBytes) {
- t.Fatalf("mismatching bytes, gotBytes: %x, wantBytes: %x", gotBytes, tt.wantBytes)
- }
- })
- }
-}
-
-func TestIPv6ExtHdrOptions(t *testing.T) {
- for _, tt := range []struct {
- description string
- wantBytes []byte
- wantLayers Layers
- }{
- {
- description: "IPv6/HopByHop",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // HopByHop Options
- 0x3b, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6HopByHopOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &Payload{
- Bytes: nil,
- },
- },
- },
- {
- description: "IPv6/HopByHop/Payload",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // HopByHop Options
- 0x3b, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- // Sample Data
- 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6HopByHopOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &Payload{
- Bytes: []byte("Sample Data"),
- },
- },
- },
- {
- description: "IPv6/HopByHop/Destination/ICMPv6",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // HopByHop Options
- 0x3c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- // Destination Options
- 0x3a, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- // ICMPv6 Param Problem
- 0x04, 0x00, 0x5f, 0x98, 0x00, 0x00, 0x00, 0x06,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6HopByHopOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6DestinationOptionsExtHdrIdentifier),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &IPv6DestinationOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &ICMPv6{
- Type: ICMPv6Type(header.ICMPv6ParamProblem),
- Code: ICMPv6Code(header.ICMPv6ErroneousHeader),
- Checksum: Uint16(0x5f98),
- Pointer: Uint32(6),
- },
- },
- },
- {
- description: "IPv6/HopByHop/Fragment",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // HopByHop Options
- 0x2c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- // Fragment ExtHdr
- 0x3b, 0x00, 0x03, 0x20, 0x00, 0x00, 0x00, 0x2a,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6HopByHopOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6FragmentExtHdrIdentifier),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &IPv6FragmentExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),
- FragmentOffset: Uint16(100),
- MoreFragments: Bool(false),
- Identification: Uint32(42),
- },
- &Payload{
- Bytes: nil,
- },
- },
- },
- {
- description: "IPv6/DestOpt/Fragment/Payload",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x3c, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // Destination Options
- 0x2c, 0x00, 0x05, 0x02, 0x00, 0x00, 0x01, 0x00,
- // Fragment ExtHdr
- 0x3b, 0x00, 0x03, 0x21, 0x00, 0x00, 0x00, 0x2a,
- // Sample Data
- 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6DestinationOptionsExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6FragmentExtHdrIdentifier),
- Options: []byte{0x05, 0x02, 0x00, 0x00, 0x01, 0x00},
- },
- &IPv6FragmentExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),
- FragmentOffset: Uint16(100),
- MoreFragments: Bool(true),
- Identification: Uint32(42),
- },
- &Payload{
- Bytes: []byte("Sample Data"),
- },
- },
- },
- {
- description: "IPv6/Fragment/Payload",
- wantBytes: []byte{
- // IPv6 Header
- 0x60, 0x00, 0x00, 0x00, 0x00, 0x13, 0x2c, 0x40, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef,
- // Fragment ExtHdr
- 0x3b, 0x00, 0x03, 0x21, 0x00, 0x00, 0x00, 0x2a,
- // Sample Data
- 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61,
- },
- wantLayers: []Layer{
- &IPv6{
- SrcAddr: Address(tcpip.Address(net.ParseIP("::1"))),
- DstAddr: Address(tcpip.Address(net.ParseIP("fe80::dead:beef"))),
- },
- &IPv6FragmentExtHdr{
- NextHeader: IPv6ExtHdrIdent(header.IPv6NoNextHeaderIdentifier),
- FragmentOffset: Uint16(100),
- MoreFragments: Bool(true),
- Identification: Uint32(42),
- },
- &Payload{
- Bytes: []byte("Sample Data"),
- },
- },
- },
- } {
- t.Run(tt.description, func(t *testing.T) {
- layers := parse(parseIPv6, tt.wantBytes)
- if !layers.match(tt.wantLayers) {
- t.Fatalf("match failed with diff: %s", layers.diff(tt.wantLayers))
- }
- // Make sure we can generate correct next header values and checksums
- for _, layer := range layers {
- switch layer := layer.(type) {
- case *IPv6HopByHopOptionsExtHdr:
- layer.NextHeader = nil
- case *IPv6DestinationOptionsExtHdr:
- layer.NextHeader = nil
- case *IPv6FragmentExtHdr:
- layer.NextHeader = nil
- case *ICMPv6:
- layer.Checksum = nil
- }
- }
- gotBytes, err := layers.ToBytes()
- if err != nil {
- t.Fatalf("ToBytes() failed on %s: %s", &layers, err)
- }
- if !bytes.Equal(tt.wantBytes, gotBytes) {
- t.Fatalf("mismatching bytes, gotBytes: %x, wantBytes: %x", gotBytes, tt.wantBytes)
- }
- })
- }
-}
diff --git a/test/packetimpact/testbench/rawsockets.go b/test/packetimpact/testbench/rawsockets.go
deleted file mode 100644
index 6d95c033d..000000000
--- a/test/packetimpact/testbench/rawsockets.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testbench
-
-import (
- "encoding/binary"
- "fmt"
- "net"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/hostarch"
-)
-
-// Sniffer can sniff raw packets on the wire.
-type Sniffer struct {
- fd int
-}
-
-func htons(x uint16) uint16 {
- buf := [2]byte{}
- binary.BigEndian.PutUint16(buf[:], x)
- return hostarch.ByteOrder.Uint16(buf[:])
-}
-
-// NewSniffer creates a Sniffer connected to *device.
-func (n *DUTTestNet) NewSniffer(t *testing.T) (Sniffer, error) {
- t.Helper()
-
- ifInfo, err := net.InterfaceByName(n.LocalDevName)
- if err != nil {
- return Sniffer{}, err
- }
-
- var haddr [8]byte
- copy(haddr[:], ifInfo.HardwareAddr)
- sa := unix.SockaddrLinklayer{
- Protocol: htons(unix.ETH_P_ALL),
- Ifindex: ifInfo.Index,
- }
- snifferFd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, int(htons(unix.ETH_P_ALL)))
- if err != nil {
- return Sniffer{}, err
- }
- if err := unix.Bind(snifferFd, &sa); err != nil {
- return Sniffer{}, err
- }
- if err := unix.SetsockoptInt(snifferFd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, 1); err != nil {
- t.Fatalf("can't set sockopt SO_RCVBUFFORCE to 1: %s", err)
- }
- if err := unix.SetsockoptInt(snifferFd, unix.SOL_SOCKET, unix.SO_RCVBUF, 1e7); err != nil {
- t.Fatalf("can't setsockopt SO_RCVBUF to 10M: %s", err)
- }
- return Sniffer{
- fd: snifferFd,
- }, nil
-}
-
-// maxReadSize should be large enough for the maximum frame size in bytes. If a
-// packet too large for the buffer arrives, the test will get a fatal error.
-const maxReadSize int = 65536
-
-// Recv tries to read one frame until the timeout is up. If the timeout given
-// is 0, then no read attempt will be made.
-func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {
- t.Helper()
-
- deadline := time.Now().Add(timeout)
- for {
- timeout = time.Until(deadline)
- if timeout <= 0 {
- return nil
- }
- usec := timeout.Microseconds()
- if usec == 0 {
- // Timeout is less than a microsecond; set usec to 1 to avoid
- // blocking indefinitely.
- usec = 1
- }
- const microsInOne = 1e6
- tv := unix.Timeval{
- Sec: usec / microsInOne,
- Usec: usec % microsInOne,
- }
- if err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {
- t.Fatalf("can't setsockopt SO_RCVTIMEO: %s", err)
- }
-
- buf := make([]byte, maxReadSize)
- nread, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)
- if err == unix.EINTR || err == unix.EAGAIN {
- // There was a timeout.
- continue
- }
- if err != nil {
- t.Fatalf("can't read: %s", err)
- }
- if nread > maxReadSize {
- t.Fatalf("received a truncated frame of %d bytes, want at most %d bytes", nread, maxReadSize)
- }
- return buf[:nread]
- }
-}
-
-// Drain drains the Sniffer's socket receive buffer by receiving until there's
-// nothing else to receive.
-func (s *Sniffer) Drain(t *testing.T) {
- t.Helper()
-
- flags, err := unix.FcntlInt(uintptr(s.fd), unix.F_GETFL, 0)
- if err != nil {
- t.Fatalf("failed to get sniffer socket fd flags: %s", err)
- }
- nonBlockingFlags := flags | unix.O_NONBLOCK
- if _, err := unix.FcntlInt(uintptr(s.fd), unix.F_SETFL, nonBlockingFlags); err != nil {
- t.Fatalf("failed to make sniffer socket non-blocking with flags %b: %s", nonBlockingFlags, err)
- }
- for {
- buf := make([]byte, maxReadSize)
- _, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)
- if err == unix.EINTR || err == unix.EAGAIN || err == unix.EWOULDBLOCK {
- break
- }
- }
- if _, err := unix.FcntlInt(uintptr(s.fd), unix.F_SETFL, flags); err != nil {
- t.Fatalf("failed to restore sniffer socket fd flags to %b: %s", flags, err)
- }
-}
-
-// close the socket that Sniffer is using.
-func (s *Sniffer) close() error {
- if err := unix.Close(s.fd); err != nil {
- return fmt.Errorf("can't close sniffer socket: %w", err)
- }
- s.fd = -1
- return nil
-}
-
-// Injector can inject raw frames.
-type Injector struct {
- fd int
-}
-
-// NewInjector creates a new injector on *device.
-func (n *DUTTestNet) NewInjector(t *testing.T) (Injector, error) {
- t.Helper()
-
- ifInfo, err := net.InterfaceByName(n.LocalDevName)
- if err != nil {
- return Injector{}, err
- }
-
- var haddr [8]byte
- copy(haddr[:], ifInfo.HardwareAddr)
- sa := unix.SockaddrLinklayer{
- Protocol: htons(unix.ETH_P_IP),
- Ifindex: ifInfo.Index,
- Halen: uint8(len(ifInfo.HardwareAddr)),
- Addr: haddr,
- }
-
- injectFd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, int(htons(unix.ETH_P_ALL)))
- if err != nil {
- return Injector{}, err
- }
- if err := unix.Bind(injectFd, &sa); err != nil {
- return Injector{}, err
- }
- return Injector{
- fd: injectFd,
- }, nil
-}
-
-// Send a raw frame.
-func (i *Injector) Send(t *testing.T, b []byte) {
- t.Helper()
-
- n, err := unix.Write(i.fd, b)
- if err != nil {
- t.Fatalf("can't write bytes of len %d: %s", len(b), err)
- }
- if n != len(b) {
- t.Fatalf("got %d bytes written, want %d", n, len(b))
- }
-}
-
-// close the underlying socket.
-func (i *Injector) close() error {
- if err := unix.Close(i.fd); err != nil {
- return fmt.Errorf("can't close sniffer socket: %w", err)
- }
- i.fd = -1
- return nil
-}
diff --git a/test/packetimpact/testbench/testbench.go b/test/packetimpact/testbench/testbench.go
deleted file mode 100644
index 38ae9c1d7..000000000
--- a/test/packetimpact/testbench/testbench.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testbench has utilities to send and receive packets, and also command
-// the DUT to run POSIX functions. It is the packetimpact test API.
-package testbench
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "math/rand"
- "net"
- "testing"
- "time"
-)
-
-var (
- // Native indicates that the test is being run natively.
- Native = false
- // RPCKeepalive is the gRPC keepalive.
- RPCKeepalive = 10 * time.Second
-
- // dutInfosJSON is the json string that describes information about all the
- // duts available to use.
- dutInfosJSON string
- // dutInfo is the pool among which the testbench can choose a DUT to work
- // with.
- dutInfo chan *DUTInfo
-)
-
-// DUTInfo has both network and uname information about the DUT.
-type DUTInfo struct {
- Uname *DUTUname
- Net *DUTTestNet
-}
-
-// DUTUname contains information about the DUT from uname.
-type DUTUname struct {
- Machine string
- KernelName string
- KernelRelease string
- KernelVersion string
- OperatingSystem string
-}
-
-// IsLinux returns true if the DUT is running Linux.
-func (n *DUTUname) IsLinux() bool {
- return Native && n.OperatingSystem == "GNU/Linux"
-}
-
-// IsGvisor returns true if the DUT is running gVisor.
-func (*DUTUname) IsGvisor() bool {
- return !Native
-}
-
-// IsFuchsia returns true if the DUT is running Fuchsia.
-func (n *DUTUname) IsFuchsia() bool {
- return Native && n.OperatingSystem == "Fuchsia"
-}
-
-// DUTTestNet describes the test network setup on dut and how the testbench
-// should connect with an existing DUT.
-type DUTTestNet struct {
- // LocalMAC is the local MAC address on the test network.
- LocalMAC net.HardwareAddr
- // RemoteMAC is the DUT's MAC address on the test network.
- RemoteMAC net.HardwareAddr
- // LocalIPv4 is the local IPv4 address on the test network.
- LocalIPv4 net.IP
- // RemoteIPv4 is the DUT's IPv4 address on the test network.
- RemoteIPv4 net.IP
- // IPv4PrefixLength is the network prefix length of the IPv4 test network.
- IPv4PrefixLength int
- // LocalIPv6 is the local IPv6 address on the test network.
- LocalIPv6 net.IP
- // RemoteIPv6 is the DUT's IPv6 address on the test network.
- RemoteIPv6 net.IP
- // LocalDevID is the ID of the local interface on the test network.
- LocalDevID uint32
- // RemoteDevID is the ID of the remote interface on the test network.
- RemoteDevID uint32
- // LocalDevName is the device that testbench uses to inject traffic.
- LocalDevName string
- // RemoteDevName is the device name on the DUT, individual tests can
- // use the name to construct tests.
- RemoteDevName string
-
- // The following two fields on actually on the control network instead
- // of the test network, including them for convenience.
-
- // POSIXServerIP is the POSIX server's IP address on the control network.
- POSIXServerIP net.IP
- // POSIXServerPort is the UDP port the POSIX server is bound to on the
- // control network.
- POSIXServerPort uint16
-}
-
-// SubnetBroadcast returns the test network's subnet broadcast address.
-func (n *DUTTestNet) SubnetBroadcast() net.IP {
- addr := append([]byte(nil), n.RemoteIPv4...)
- mask := net.CIDRMask(n.IPv4PrefixLength, net.IPv4len*8)
- for i := range addr {
- addr[i] |= ^mask[i]
- }
- return addr
-}
-
-// registerFlags defines flags and associates them with the package-level
-// exported variables above. It should be called by tests in their init
-// functions.
-func registerFlags(fs *flag.FlagSet) {
- fs.BoolVar(&Native, "native", Native, "whether the test is running natively")
- fs.DurationVar(&RPCKeepalive, "rpc_keepalive", RPCKeepalive, "gRPC keepalive")
- fs.StringVar(&dutInfosJSON, "dut_infos_json", dutInfosJSON, "json that describes the DUTs")
-}
-
-// Initialize initializes the testbench, it parse the flags and sets up the
-// pool of test networks for testbench's later use.
-func Initialize(fs *flag.FlagSet) {
- testing.Init()
- registerFlags(fs)
- flag.Parse()
- if err := loadDUTInfos(); err != nil {
- panic(err)
- }
-}
-
-// loadDUTInfos loads available DUT test infos from the json file, it
-// must be called after flag.Parse().
-func loadDUTInfos() error {
- var dutInfos []DUTInfo
- if err := json.Unmarshal([]byte(dutInfosJSON), &dutInfos); err != nil {
- return fmt.Errorf("failed to unmarshal JSON: %w", err)
- }
- if got, want := len(dutInfos), 1; got < want {
- return fmt.Errorf("got %d DUTs, the test requires at least %d DUTs", got, want)
- }
- // Using a buffered channel as semaphore
- dutInfo = make(chan *DUTInfo, len(dutInfos))
- for i := range dutInfos {
- dutInfos[i].Net.LocalIPv4 = dutInfos[i].Net.LocalIPv4.To4()
- dutInfos[i].Net.RemoteIPv4 = dutInfos[i].Net.RemoteIPv4.To4()
- dutInfo <- &dutInfos[i]
- }
- return nil
-}
-
-// GenerateRandomPayload generates a random byte slice of the specified length,
-// causing a fatal test failure if it is unable to do so.
-func GenerateRandomPayload(t *testing.T, n int) []byte {
- t.Helper()
- buf := make([]byte, n)
- if _, err := rand.Read(buf); err != nil {
- t.Fatalf("rand.Read(buf) failed: %s", err)
- }
- return buf
-}
-
-// getDUTInfo returns information about an available DUT from the pool. If no
-// DUT is readily available, getDUTInfo blocks until one becomes available.
-func getDUTInfo() *DUTInfo {
- return <-dutInfo
-}
-
-// release returns the DUTInfo back to the pool.
-func (info *DUTInfo) release() {
- dutInfo <- info
-}
diff --git a/test/packetimpact/tests/BUILD b/test/packetimpact/tests/BUILD
deleted file mode 100644
index 4cff0cf4c..000000000
--- a/test/packetimpact/tests/BUILD
+++ /dev/null
@@ -1,431 +0,0 @@
-load("//test/packetimpact/runner:defs.bzl", "ALL_TESTS", "packetimpact_go_test", "packetimpact_testbench", "validate_all_tests")
-
-package(
- default_visibility = ["//test/packetimpact:__subpackages__"],
- licenses = ["notice"],
-)
-
-packetimpact_testbench(
- name = "fin_wait2_timeout",
- srcs = ["fin_wait2_timeout_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "ipv4_id_uniqueness",
- srcs = ["ipv4_id_uniqueness_test.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "udp_discard_mcast_source_addr",
- srcs = ["udp_discard_mcast_source_addr_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "udp_any_addr_recv_unicast",
- srcs = ["udp_any_addr_recv_unicast_test.go"],
- deps = [
- "//pkg/tcpip",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "udp_icmp_error_propagation",
- srcs = ["udp_icmp_error_propagation_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_window_shrink",
- srcs = ["tcp_window_shrink_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_zero_window_probe",
- srcs = ["tcp_zero_window_probe_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_zero_window_probe_retransmit",
- srcs = ["tcp_zero_window_probe_retransmit_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_zero_window_probe_usertimeout",
- srcs = ["tcp_zero_window_probe_usertimeout_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_retransmits",
- srcs = ["tcp_retransmits_test.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_outside_the_window",
- srcs = ["tcp_outside_the_window_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_noaccept_close_rst",
- srcs = ["tcp_noaccept_close_rst_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_send_window_sizes_piggyback",
- srcs = ["tcp_send_window_sizes_piggyback_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_unacc_seq_ack",
- srcs = ["tcp_unacc_seq_ack_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_paws_mechanism",
- srcs = ["tcp_paws_mechanism_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_user_timeout",
- srcs = ["tcp_user_timeout_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_queue_send_recv_in_syn_sent",
- srcs = ["tcp_queue_send_recv_in_syn_sent_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_synsent_reset",
- srcs = ["tcp_synsent_reset_test.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_synrcvd_reset",
- srcs = ["tcp_synrcvd_reset_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_network_unreachable",
- srcs = ["tcp_network_unreachable_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_cork_mss",
- srcs = ["tcp_cork_mss_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_handshake_window_size",
- srcs = ["tcp_handshake_window_size_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_timewait_reset",
- srcs = ["tcp_timewait_reset_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "icmpv6_param_problem",
- srcs = ["icmpv6_param_problem_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "ipv6_unknown_options_action",
- srcs = ["ipv6_unknown_options_action_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "ipv4_fragment_reassembly",
- srcs = ["ipv4_fragment_reassembly_test.go"],
- deps = [
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "ipv6_fragment_reassembly",
- srcs = ["ipv6_fragment_reassembly_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "ipv6_fragment_icmp_error",
- srcs = ["ipv6_fragment_icmp_error_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/buffer",
- "//pkg/tcpip/header",
- "//pkg/tcpip/network/ipv6",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_linger",
- srcs = ["tcp_linger_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_rcv_buf_space",
- srcs = ["tcp_rcv_buf_space_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_zero_receive_window",
- srcs = ["tcp_zero_receive_window_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_rack",
- srcs = ["tcp_rack_test.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/tcpip/header",
- "//pkg/tcpip/seqnum",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_info",
- srcs = ["tcp_info_test.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_fin_retransmission",
- srcs = ["tcp_fin_retransmission_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_listen_backlog",
- srcs = ["tcp_listen_backlog_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_syncookie",
- srcs = ["tcp_syncookie_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "tcp_connect_icmp_error",
- srcs = ["tcp_connect_icmp_error_test.go"],
- deps = [
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-packetimpact_testbench(
- name = "generic_dgram_socket_send_recv",
- srcs = ["generic_dgram_socket_send_recv_test.go"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/header",
- "//test/packetimpact/testbench",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-validate_all_tests()
-
-[packetimpact_go_test(
- name = t.name,
- timeout = t.timeout if hasattr(t, "timeout") else "moderate",
- expect_netstack_failure = hasattr(t, "expect_netstack_failure"),
- num_duts = t.num_duts if hasattr(t, "num_duts") else 1,
-) for t in ALL_TESTS]
-
-test_suite(
- name = "all_tests",
- tags = [
- "local",
- "manual",
- "packetimpact",
- ],
- tests = existing_rules(),
-)
diff --git a/test/packetimpact/tests/fin_wait2_timeout_test.go b/test/packetimpact/tests/fin_wait2_timeout_test.go
deleted file mode 100644
index cff8ca51d..000000000
--- a/test/packetimpact/tests/fin_wait2_timeout_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fin_wait2_timeout_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestFinWait2Timeout(t *testing.T) {
- for _, tt := range []struct {
- description string
- linger2 bool
- }{
- {"WithLinger2", true},
- {"WithoutLinger2", false},
- } {
- t.Run(tt.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
-
- acceptFd, _ := dut.Accept(t, listenFd)
- if tt.linger2 {
- tv := unix.Timeval{Sec: 1, Usec: 0}
- dut.SetSockOptTimeval(t, acceptFd, unix.SOL_TCP, unix.TCP_LINGER2, &tv)
- }
- dut.Close(t, acceptFd)
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected a FIN-ACK within 1 second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- time.Sleep(5 * time.Second)
- conn.Drain(t)
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- if tt.linger2 {
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
- t.Fatalf("expected a RST packet within a second but got none: %s", err)
- }
- } else {
- if got, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, 10*time.Second); got != nil || err == nil {
- t.Fatalf("expected no RST packets within ten seconds but got one: %s", got)
- }
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go b/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go
deleted file mode 100644
index a9ffafc74..000000000
--- a/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go
+++ /dev/null
@@ -1,781 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package generic_dgram_socket_send_recv_test
-
-import (
- "context"
- "flag"
- "fmt"
- "net"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-const (
- // Even though sockets allow larger datagrams we don't test it here as they
- // need to be fragmented and written out as individual frames.
-
- maxICMPv4PayloadSize = header.IPv4MinimumMTU - header.EthernetMinimumSize - header.IPv4MinimumSize - header.ICMPv4MinimumSize
- maxICMPv6PayloadSize = header.IPv6MinimumMTU - header.EthernetMinimumSize - header.IPv6MinimumSize - header.ICMPv6MinimumSize
- maxUDPv4PayloadSize = header.IPv4MinimumMTU - header.EthernetMinimumSize - header.IPv4MinimumSize - header.UDPMinimumSize
- maxUDPv6PayloadSize = header.IPv6MinimumMTU - header.EthernetMinimumSize - header.IPv6MinimumSize - header.UDPMinimumSize
-)
-
-func maxUDPPayloadSize(addr net.IP) int {
- if addr.To4() != nil {
- return maxUDPv4PayloadSize
- }
- return maxUDPv6PayloadSize
-}
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func expectedEthLayer(t *testing.T, dut testbench.DUT, socketFD int32, sendTo net.IP) testbench.Layer {
- t.Helper()
- dst := func() tcpip.LinkAddress {
- if isBroadcast(dut, sendTo) {
- dut.SetSockOptInt(t, socketFD, unix.SOL_SOCKET, unix.SO_BROADCAST, 1)
-
- // When sending to broadcast (subnet or limited), the expected ethernet
- // address is also broadcast.
- return header.EthernetBroadcastAddress
- }
- if sendTo.IsMulticast() {
- if sendTo4 := sendTo.To4(); sendTo4 != nil {
- return header.EthernetAddressFromMulticastIPv4Address(tcpip.Address(sendTo4))
- }
- return header.EthernetAddressFromMulticastIPv6Address(tcpip.Address(sendTo.To16()))
- }
- return ""
- }()
- var ether testbench.Ether
- if len(dst) != 0 {
- ether.DstAddr = &dst
- }
- return &ether
-}
-
-type protocolTest interface {
- Name() string
- Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool)
- Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool)
-}
-
-func TestSocket(t *testing.T) {
- dut := testbench.NewDUT(t)
- subnetBroadcast := dut.Net.SubnetBroadcast()
-
- for _, proto := range []protocolTest{
- &icmpV4Test{},
- &icmpV6Test{},
- &udpTest{},
- } {
- t.Run(proto.Name(), func(t *testing.T) {
- // Test every combination of bound/unbound, broadcast/multicast/unicast
- // bound/destination address, and bound/not-bound to device.
- for _, bindTo := range []net.IP{
- nil, // Do not bind.
- net.IPv4zero,
- net.IPv4bcast,
- net.IPv4allsys,
- net.IPv6zero,
- subnetBroadcast,
- dut.Net.RemoteIPv4,
- dut.Net.RemoteIPv6,
- } {
- t.Run(fmt.Sprintf("bindTo=%s", bindTo), func(t *testing.T) {
- for _, sendTo := range []net.IP{
- net.IPv4bcast,
- net.IPv4allsys,
- subnetBroadcast,
- dut.Net.LocalIPv4,
- dut.Net.LocalIPv6,
- dut.Net.RemoteIPv4,
- dut.Net.RemoteIPv6,
- } {
- t.Run(fmt.Sprintf("sendTo=%s", sendTo), func(t *testing.T) {
- for _, bindToDevice := range []bool{true, false} {
- t.Run(fmt.Sprintf("bindToDevice=%t", bindToDevice), func(t *testing.T) {
- t.Run("Send", func(t *testing.T) {
- proto.Send(t, dut, bindTo, sendTo, bindToDevice)
- })
- t.Run("Receive", func(t *testing.T) {
- proto.Receive(t, dut, bindTo, sendTo, bindToDevice)
- })
- })
- }
- })
- }
- })
- }
- })
- }
-}
-
-type icmpV4TestEnv struct {
- socketFD int32
- ident uint16
- conn testbench.IPv4Conn
- layers testbench.Layers
-}
-
-type icmpV4Test struct{}
-
-func (test *icmpV4Test) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) icmpV4TestEnv {
- t.Helper()
-
- // Tell the DUT to create a socket.
- var socketFD int32
- var ident uint16
-
- if bindTo != nil {
- socketFD, ident = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_ICMP, bindTo)
- } else {
- // An unbound socket will auto-bind to INADDR_ANY.
- socketFD = dut.Socket(t, unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_ICMP)
- }
- t.Cleanup(func() {
- dut.Close(t, socketFD)
- })
-
- if bindToDevice {
- dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName))
- }
-
- // Create a socket on the test runner.
- conn := dut.Net.NewIPv4Conn(t, testbench.IPv4{}, testbench.IPv4{})
- t.Cleanup(func() {
- conn.Close(t)
- })
-
- return icmpV4TestEnv{
- socketFD: socketFD,
- ident: ident,
- conn: conn,
- layers: testbench.Layers{
- expectedEthLayer(t, dut, socketFD, sendTo),
- &testbench.IPv4{
- DstAddr: testbench.Address(tcpip.Address(sendTo.To4())),
- },
- },
- }
-}
-
-var _ protocolTest = (*icmpV4Test)(nil)
-
-func (*icmpV4Test) Name() string { return "icmpv4" }
-
-func (test *icmpV4Test) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- if bindTo.To4() == nil || isBroadcastOrMulticast(dut, bindTo) {
- // ICMPv4 sockets cannot bind to IPv6, broadcast, or multicast
- // addresses.
- return
- }
-
- isV4 := sendTo.To4() != nil
-
- // TODO(gvisor.dev/issue/5681): Remove this case once ICMP sockets allow
- // sending to broadcast and multicast addresses.
- if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && isV4 && isBroadcastOrMulticast(dut, sendTo) {
- // expectPacket cannot be false. In some cases the packet will send, but
- // with IPv4 destination incorrectly set to RemoteIPv4. It's all bad and
- // not worth the effort to create a special case when this occurs.
- t.Skip("TODO(gvisor.dev/issue/5681): Allow sending to broadcast and multicast addresses with ICMP sockets.")
- }
-
- expectPacket := isV4 && !sendTo.Equal(dut.Net.RemoteIPv4)
- switch {
- case bindTo.Equal(dut.Net.RemoteIPv4):
- // If we're explicitly bound to an interface's unicast address,
- // packets are always sent on that interface.
- case bindToDevice:
- // If we're explicitly bound to an interface, packets are always
- // sent on that interface.
- case !sendTo.Equal(net.IPv4bcast) && !sendTo.IsMulticast():
- // If we're not sending to limited broadcast or multicast, the route
- // table will be consulted and packets will be sent on the correct
- // interface.
- default:
- expectPacket = false
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxICMPv4PayloadSize),
- } {
- t.Run(name, func(t *testing.T) {
- icmpLayer := &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4Echo),
- Payload: payload,
- }
- bytes, err := icmpLayer.ToBytes()
- if err != nil {
- t.Fatalf("icmpLayer.ToBytes() = %s", err)
- }
- destSockaddr := unix.SockaddrInet4{}
- copy(destSockaddr.Addr[:], sendTo.To4())
-
- // Tell the DUT to send a packet out the ICMP socket.
- if got, want := dut.SendTo(t, env.socketFD, bytes, 0, &destSockaddr), len(bytes); int(got) != want {
- t.Fatalf("got dut.SendTo = %d, want %d", got, want)
- }
-
- // Verify the test runner received an ICMP packet with the correctly
- // set "ident".
- if env.ident != 0 {
- icmpLayer.Ident = &env.ident
- }
- want := append(env.layers, icmpLayer)
- if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket {
- t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got)
- } else if ok && !expectPacket {
- matchedFrame := got[len(got)-1]
- t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame)
- }
- })
- }
-}
-
-func (test *icmpV4Test) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- if bindTo.To4() == nil || isBroadcastOrMulticast(dut, bindTo) {
- // ICMPv4 sockets cannot bind to IPv6, broadcast, or multicast
- // addresses.
- return
- }
-
- expectPacket := (bindTo.Equal(dut.Net.RemoteIPv4) || bindTo.Equal(net.IPv4zero)) && sendTo.Equal(dut.Net.RemoteIPv4)
-
- // TODO(gvisor.dev/issue/5763): Remove this if statement once gVisor
- // restricts ICMP sockets to receive only from unicast addresses.
- if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv4zero) && isBroadcastOrMulticast(dut, sendTo) {
- expectPacket = true
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxICMPv4PayloadSize),
- } {
- t.Run(name, func(t *testing.T) {
- icmpLayer := &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4EchoReply),
- Payload: payload,
- }
- if env.ident != 0 {
- icmpLayer.Ident = &env.ident
- }
-
- // Send an ICMPv4 packet from the test runner to the DUT.
- frame := env.conn.CreateFrame(t, env.layers, icmpLayer)
- env.conn.SendFrame(t, frame)
-
- // Verify the behavior of the ICMP socket on the DUT.
- if expectPacket {
- payload, err := icmpLayer.ToBytes()
- if err != nil {
- t.Fatalf("icmpLayer.ToBytes() = %s", err)
- }
-
- // Receive one extra byte to assert the length of the
- // packet received in the case where the packet contains
- // more data than expected.
- len := int32(len(payload)) + 1
- got, want := dut.Recv(t, env.socketFD, len, 0), payload
- if diff := cmp.Diff(want, got); diff != "" {
- t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff)
- }
- } else {
- // Expected receive error, set a short receive timeout.
- dut.SetSockOptTimeval(
- t,
- env.socketFD,
- unix.SOL_SOCKET,
- unix.SO_RCVTIMEO,
- &unix.Timeval{
- Sec: 1,
- Usec: 0,
- },
- )
- ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, maxICMPv4PayloadSize, 0)
- if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
- t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno)
- }
- }
- })
- }
-}
-
-type icmpV6TestEnv struct {
- socketFD int32
- ident uint16
- conn testbench.IPv6Conn
- layers testbench.Layers
-}
-
-// icmpV6Test and icmpV4Test look substantially similar at first look, but have
-// enough subtle differences in setup and test expectations to discourage
-// refactoring:
-// - Different IP layers
-// - Different testbench.Connections
-// - Different UNIX domain and proto arguments
-// - Different expectPacket and wantErrno for send and receive
-type icmpV6Test struct{}
-
-func (test *icmpV6Test) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) icmpV6TestEnv {
- t.Helper()
-
- // Tell the DUT to create a socket.
- var socketFD int32
- var ident uint16
-
- if bindTo != nil {
- socketFD, ident = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_ICMPV6, bindTo)
- } else {
- // An unbound socket will auto-bind to IN6ADDR_ANY_INIT.
- socketFD = dut.Socket(t, unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_ICMPV6)
- }
- t.Cleanup(func() {
- dut.Close(t, socketFD)
- })
-
- if bindToDevice {
- dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName))
- }
-
- // Create a socket on the test runner.
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- t.Cleanup(func() {
- conn.Close(t)
- })
-
- return icmpV6TestEnv{
- socketFD: socketFD,
- ident: ident,
- conn: conn,
- layers: testbench.Layers{
- expectedEthLayer(t, dut, socketFD, sendTo),
- &testbench.IPv6{
- DstAddr: testbench.Address(tcpip.Address(sendTo.To16())),
- },
- },
- }
-}
-
-var _ protocolTest = (*icmpV6Test)(nil)
-
-func (*icmpV6Test) Name() string { return "icmpv6" }
-
-func (test *icmpV6Test) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- if bindTo.To4() != nil || bindTo.IsMulticast() {
- // ICMPv6 sockets cannot bind to IPv4 or multicast addresses.
- return
- }
-
- expectPacket := sendTo.Equal(dut.Net.LocalIPv6)
- wantErrno := unix.Errno(0)
-
- if sendTo.To4() != nil {
- wantErrno = unix.EINVAL
- }
-
- // TODO(gvisor.dev/issue/5966): Remove this if statement once ICMPv6 sockets
- // return EINVAL after calling sendto with an IPv4 address.
- if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && sendTo.To4() != nil {
- switch {
- case bindTo.Equal(dut.Net.RemoteIPv6):
- wantErrno = unix.ENETUNREACH
- case bindTo.Equal(net.IPv6zero) || bindTo == nil:
- wantErrno = unix.Errno(0)
- }
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxICMPv6PayloadSize),
- } {
- t.Run(name, func(t *testing.T) {
- icmpLayer := &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6EchoRequest),
- Payload: payload,
- }
- bytes, err := icmpLayer.ToBytes()
- if err != nil {
- t.Fatalf("icmpLayer.ToBytes() = %s", err)
- }
- destSockaddr := unix.SockaddrInet6{
- ZoneId: dut.Net.RemoteDevID,
- }
- copy(destSockaddr.Addr[:], sendTo.To16())
-
- // Tell the DUT to send a packet out the ICMPv6 socket.
- gotRet, gotErrno := dut.SendToWithErrno(context.Background(), t, env.socketFD, bytes, 0, &destSockaddr)
-
- if gotErrno != wantErrno {
- t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (_, %s), want = (_, %s)", env.socketFD, sendTo, gotErrno, wantErrno)
- }
- if wantErrno != 0 {
- return
- }
- if got, want := int(gotRet), len(bytes); got != want {
- t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (%d, _), want = (%d, _)", env.socketFD, sendTo, got, want)
- }
-
- // Verify the test runner received an ICMPv6 packet with the
- // correctly set "ident".
- if env.ident != 0 {
- icmpLayer.Ident = &env.ident
- }
- want := append(env.layers, icmpLayer)
- if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket {
- t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got)
- } else if ok && !expectPacket {
- matchedFrame := got[len(got)-1]
- t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame)
- }
- })
- }
-}
-
-func (test *icmpV6Test) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- if bindTo.To4() != nil || bindTo.IsMulticast() {
- // ICMPv6 sockets cannot bind to IPv4 or multicast addresses.
- return
- }
-
- expectPacket := true
- switch {
- case bindTo.Equal(dut.Net.RemoteIPv6) && sendTo.Equal(dut.Net.RemoteIPv6):
- case bindTo.Equal(net.IPv6zero) && sendTo.Equal(dut.Net.RemoteIPv6):
- case bindTo.Equal(net.IPv6zero) && sendTo.Equal(net.IPv6linklocalallnodes):
- default:
- expectPacket = false
- }
-
- // TODO(gvisor.dev/issue/5763): Remove this if statement once gVisor
- // restricts ICMP sockets to receive only from unicast addresses.
- if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv6zero) && isBroadcastOrMulticast(dut, sendTo) {
- expectPacket = false
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxICMPv6PayloadSize),
- } {
- t.Run(name, func(t *testing.T) {
- icmpLayer := &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6EchoReply),
- Payload: payload,
- }
- if env.ident != 0 {
- icmpLayer.Ident = &env.ident
- }
-
- // Send an ICMPv6 packet from the test runner to the DUT.
- frame := env.conn.CreateFrame(t, env.layers, icmpLayer)
- env.conn.SendFrame(t, frame)
-
- // Verify the behavior of the ICMPv6 socket on the DUT.
- if expectPacket {
- payload, err := icmpLayer.ToBytes()
- if err != nil {
- t.Fatalf("icmpLayer.ToBytes() = %s", err)
- }
-
- // Receive one extra byte to assert the length of the
- // packet received in the case where the packet contains
- // more data than expected.
- len := int32(len(payload)) + 1
- got, want := dut.Recv(t, env.socketFD, len, 0), payload
- if diff := cmp.Diff(want, got); diff != "" {
- t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff)
- }
- } else {
- // Expected receive error, set a short receive timeout.
- dut.SetSockOptTimeval(
- t,
- env.socketFD,
- unix.SOL_SOCKET,
- unix.SO_RCVTIMEO,
- &unix.Timeval{
- Sec: 1,
- Usec: 0,
- },
- )
- ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, maxICMPv6PayloadSize, 0)
- if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
- t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno)
- }
- }
- })
- }
-}
-
-type udpConn interface {
- SrcPort(*testing.T) uint16
- SendFrame(*testing.T, testbench.Layers, ...testbench.Layer)
- ListenForFrame(*testing.T, testbench.Layers, time.Duration) ([]testbench.Layers, bool)
- Close(*testing.T)
-}
-
-type udpTestEnv struct {
- socketFD int32
- conn udpConn
- layers testbench.Layers
-}
-
-type udpTest struct{}
-
-func (test *udpTest) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) udpTestEnv {
- t.Helper()
-
- var (
- socketFD int32
- outgoingUDP, incomingUDP testbench.UDP
- )
-
- // Tell the DUT to create a socket.
- if bindTo != nil {
- var remotePort uint16
- socketFD, remotePort = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, bindTo)
- outgoingUDP.DstPort = &remotePort
- incomingUDP.SrcPort = &remotePort
- } else {
- // An unbound socket will auto-bind to INADDR_ANY.
- socketFD = dut.Socket(t, unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP)
- }
- t.Cleanup(func() {
- dut.Close(t, socketFD)
- })
-
- if bindToDevice {
- dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName))
- }
-
- // Create a socket on the test runner.
- var conn udpConn
- var ipLayer testbench.Layer
- if addr := sendTo.To4(); addr != nil {
- udpConn := dut.Net.NewUDPIPv4(t, outgoingUDP, incomingUDP)
- conn = &udpConn
- ipLayer = &testbench.IPv4{
- DstAddr: testbench.Address(tcpip.Address(addr)),
- }
- } else {
- udpConn := dut.Net.NewUDPIPv6(t, outgoingUDP, incomingUDP)
- conn = &udpConn
- ipLayer = &testbench.IPv6{
- DstAddr: testbench.Address(tcpip.Address(sendTo.To16())),
- }
- }
- t.Cleanup(func() {
- conn.Close(t)
- })
-
- return udpTestEnv{
- socketFD: socketFD,
- conn: conn,
- layers: testbench.Layers{
- expectedEthLayer(t, dut, socketFD, sendTo),
- ipLayer,
- &incomingUDP,
- },
- }
-}
-
-var _ protocolTest = (*udpTest)(nil)
-
-func (*udpTest) Name() string { return "udp" }
-
-func (test *udpTest) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- canSend := bindTo == nil || bindTo.Equal(net.IPv6zero) || sameIPVersion(sendTo, bindTo)
- expectPacket := canSend && !isRemoteAddr(dut, sendTo)
- switch {
- case bindTo.Equal(dut.Net.RemoteIPv4):
- // If we're explicitly bound to an interface's unicast address,
- // packets are always sent on that interface.
- case bindToDevice:
- // If we're explicitly bound to an interface, packets are always
- // sent on that interface.
- case !sendTo.Equal(net.IPv4bcast) && !sendTo.IsMulticast():
- // If we're not sending to limited broadcast, multicast, or local, the
- // route table will be consulted and packets will be sent on the correct
- // interface.
- default:
- expectPacket = false
- }
-
- wantErrno := unix.Errno(0)
- switch {
- case !canSend && bindTo.To4() != nil:
- wantErrno = unix.EAFNOSUPPORT
- case !canSend && bindTo.To4() == nil:
- wantErrno = unix.ENETUNREACH
- }
-
- // TODO(gvisor.dev/issue/5967): Remove this if statement once UDPv4 sockets
- // returns EAFNOSUPPORT after calling sendto with an IPv6 address.
- if dut.Uname.IsGvisor() && !canSend && bindTo.To4() != nil {
- wantErrno = unix.EINVAL
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxUDPPayloadSize(bindTo)),
- } {
- t.Run(name, func(t *testing.T) {
- var destSockaddr unix.Sockaddr
- if sendTo4 := sendTo.To4(); sendTo4 != nil {
- addr := unix.SockaddrInet4{
- Port: int(env.conn.SrcPort(t)),
- }
- copy(addr.Addr[:], sendTo4)
- destSockaddr = &addr
- } else {
- addr := unix.SockaddrInet6{
- Port: int(env.conn.SrcPort(t)),
- ZoneId: dut.Net.RemoteDevID,
- }
- copy(addr.Addr[:], sendTo.To16())
- destSockaddr = &addr
- }
-
- // Tell the DUT to send a packet out the UDP socket.
- gotRet, gotErrno := dut.SendToWithErrno(context.Background(), t, env.socketFD, payload, 0, destSockaddr)
-
- if gotErrno != wantErrno {
- t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (_, %s), want = (_, %s)", env.socketFD, sendTo, gotErrno, wantErrno)
- }
- if wantErrno != 0 {
- return
- }
- if got, want := int(gotRet), len(payload); got != want {
- t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (%d, _), want = (%d, _)", env.socketFD, sendTo, got, want)
- }
-
- // Verify the test runner received a UDP packet with the
- // correct payload.
- want := append(env.layers, &testbench.Payload{
- Bytes: payload,
- })
- if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket {
- t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got)
- } else if ok && !expectPacket {
- matchedFrame := got[len(got)-1]
- t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame)
- }
- })
- }
-}
-
-func (test *udpTest) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) {
- subnetBroadcast := dut.Net.SubnetBroadcast()
-
- expectPacket := true
- switch {
- case bindTo.Equal(sendTo):
- case bindTo.Equal(net.IPv4zero) && sameIPVersion(bindTo, sendTo) && !sendTo.Equal(dut.Net.LocalIPv4):
- case bindTo.Equal(net.IPv6zero) && isBroadcast(dut, sendTo):
- case bindTo.Equal(net.IPv6zero) && isRemoteAddr(dut, sendTo):
- case bindTo.Equal(subnetBroadcast) && sendTo.Equal(subnetBroadcast):
- default:
- expectPacket = false
- }
-
- // TODO(gvisor.dev/issue/5956): Remove this if statement once gVisor
- // restricts ICMP sockets to receive only from unicast addresses.
- if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv6zero) && sendTo.Equal(net.IPv4allsys) {
- expectPacket = true
- }
-
- env := test.setup(t, dut, bindTo, sendTo, bindToDevice)
- maxPayloadSize := maxUDPPayloadSize(bindTo)
-
- for name, payload := range map[string][]byte{
- "empty": nil,
- "small": []byte("hello world"),
- "random1k": testbench.GenerateRandomPayload(t, maxPayloadSize),
- } {
- t.Run(name, func(t *testing.T) {
- // Send a UDP packet from the test runner to the DUT.
- env.conn.SendFrame(t, env.layers, &testbench.Payload{Bytes: payload})
-
- // Verify the behavior of the ICMP socket on the DUT.
- if expectPacket {
- // Receive one extra byte to assert the length of the
- // packet received in the case where the packet contains
- // more data than expected.
- len := int32(len(payload)) + 1
- got, want := dut.Recv(t, env.socketFD, len, 0), payload
- if diff := cmp.Diff(want, got); diff != "" {
- t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff)
- }
- } else {
- // Expected receive error, set a short receive timeout.
- dut.SetSockOptTimeval(
- t,
- env.socketFD,
- unix.SOL_SOCKET,
- unix.SO_RCVTIMEO,
- &unix.Timeval{
- Sec: 1,
- Usec: 0,
- },
- )
- ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, int32(maxPayloadSize), 0)
- if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
- t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno)
- }
- }
- })
- }
-}
-
-func isBroadcast(dut testbench.DUT, ip net.IP) bool {
- return ip.Equal(net.IPv4bcast) || ip.Equal(dut.Net.SubnetBroadcast())
-}
-
-func isBroadcastOrMulticast(dut testbench.DUT, ip net.IP) bool {
- return isBroadcast(dut, ip) || ip.IsMulticast()
-}
-
-func sameIPVersion(a, b net.IP) bool {
- return (a.To4() == nil) == (b.To4() == nil)
-}
-
-func isRemoteAddr(dut testbench.DUT, ip net.IP) bool {
- return ip.Equal(dut.Net.RemoteIPv4) || ip.Equal(dut.Net.RemoteIPv6)
-}
diff --git a/test/packetimpact/tests/icmpv6_param_problem_test.go b/test/packetimpact/tests/icmpv6_param_problem_test.go
deleted file mode 100644
index fdabcf1ad..000000000
--- a/test/packetimpact/tests/icmpv6_param_problem_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package icmpv6_param_problem_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestICMPv6ParamProblemTest sends a packet with a bad next header. The DUT
-// should respond with an ICMPv6 Parameter Problem message.
-func TestICMPv6ParamProblemTest(t *testing.T) {
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
- ipv6 := testbench.IPv6{
- // 254 is reserved and used for experimentation and testing. This should
- // cause an error.
- NextHeader: testbench.Uint8(254),
- }
- icmpv6 := testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6EchoRequest),
- Payload: []byte("hello world"),
- }
-
- toSend := conn.CreateFrame(t, testbench.Layers{&ipv6}, &icmpv6)
- conn.SendFrame(t, toSend)
-
- // Build the expected ICMPv6 payload, which includes an index to the
- // problematic byte and also the problematic packet as described in
- // https://tools.ietf.org/html/rfc4443#page-12 .
- ipv6Sent := toSend[1:]
- expectedPayload, err := ipv6Sent.ToBytes()
- if err != nil {
- t.Fatalf("can't convert %s to bytes: %s", ipv6Sent, err)
- }
-
- expectedICMPv6 := testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem),
- Payload: expectedPayload,
- // The problematic field is the NextHeader.
- Pointer: testbench.Uint32(header.IPv6NextHeaderOffset),
- }
-
- paramProblem := testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &expectedICMPv6,
- }
- timeout := time.Second
- if _, err := conn.ExpectFrame(t, paramProblem, timeout); err != nil {
- t.Errorf("expected %s within %s but got none: %s", paramProblem, timeout, err)
- }
-}
diff --git a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go b/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
deleted file mode 100644
index 707f0f1f5..000000000
--- a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv4_fragment_reassembly_test
-
-import (
- "flag"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-type fragmentInfo struct {
- offset uint16
- size uint16
- more uint8
- id uint16
-}
-
-func TestIPv4FragmentReassembly(t *testing.T) {
- icmpv4ProtoNum := uint8(header.ICMPv4ProtocolNumber)
-
- tests := []struct {
- description string
- ipPayloadLen int
- fragments []fragmentInfo
- expectReply bool
- }{
- {
- description: "basic reassembly",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 5, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 5, more: header.IPv4FlagMoreFragments},
- {offset: 2000, size: 1000, id: 5, more: 0},
- },
- expectReply: true,
- },
- {
- description: "out of order fragments",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 2000, size: 1000, id: 6, more: 0},
- {offset: 0, size: 1000, id: 6, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 6, more: header.IPv4FlagMoreFragments},
- },
- expectReply: true,
- },
- {
- description: "duplicated fragments",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
- {offset: 2000, size: 1000, id: 7, more: 0},
- },
- expectReply: true,
- },
- {
- description: "fragment subset",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 8, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 8, more: header.IPv4FlagMoreFragments},
- {offset: 512, size: 256, id: 8, more: header.IPv4FlagMoreFragments},
- {offset: 2000, size: 1000, id: 8, more: 0},
- },
- expectReply: true,
- },
- {
- description: "fragment overlap",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
- {offset: 1512, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
- {offset: 2000, size: 1000, id: 9, more: 0},
- },
- expectReply: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv4Conn(t, testbench.IPv4{}, testbench.IPv4{})
- defer conn.Close(t)
-
- data := make([]byte, test.ipPayloadLen)
- icmp := header.ICMPv4(data[:header.ICMPv4MinimumSize])
- icmp.SetType(header.ICMPv4Echo)
- icmp.SetCode(header.ICMPv4UnusedCode)
- icmp.SetChecksum(0)
- icmp.SetSequence(0)
- icmp.SetIdent(0)
- originalPayload := data[header.ICMPv4MinimumSize:]
- if _, err := rand.Read(originalPayload); err != nil {
- t.Fatalf("rand.Read: %s", err)
- }
- cksum := header.ICMPv4Checksum(icmp, header.Checksum(originalPayload, 0 /* initial */))
- icmp.SetChecksum(cksum)
-
- for _, fragment := range test.fragments {
- conn.Send(t,
- testbench.IPv4{
- Protocol: &icmpv4ProtoNum,
- FragmentOffset: testbench.Uint16(fragment.offset),
- Flags: testbench.Uint8(fragment.more),
- ID: testbench.Uint16(fragment.id),
- },
- &testbench.Payload{
- Bytes: data[fragment.offset:][:fragment.size],
- })
- }
-
- var bytesReceived int
- reassembledPayload := make([]byte, test.ipPayloadLen)
- // We are sending a packet fragmented into smaller parts but the
- // response may also be large enough to require fragmentation.
- // Therefore we only look for payload for an IPv4 packet not ICMP.
- for {
- incomingFrame, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv4{},
- }, time.Second)
- if err != nil {
- // Either an unexpected frame was received, or none at all.
- if test.expectReply && bytesReceived < test.ipPayloadLen {
- t.Fatalf("received %d bytes out of %d, then conn.ExpectFrame(_, _, time.Second) failed with %s", bytesReceived, test.ipPayloadLen, err)
- }
- break
- }
- if !test.expectReply {
- t.Fatalf("unexpected reply received:\n%s", incomingFrame)
- }
- // We only asked for Ethernet and IPv4 so the rest should be payload.
- ipPayload, err := incomingFrame[2 /* Payload */].ToBytes()
- if err != nil {
- t.Fatalf("failed to parse payload: incomingPacket[2].ToBytes() = (_, %s)", err)
- }
- offset := *incomingFrame[1 /* IPv4 */].(*testbench.IPv4).FragmentOffset
- if copied := copy(reassembledPayload[offset:], ipPayload); copied != len(ipPayload) {
- t.Fatalf("wrong number of bytes copied into reassembledPayload: got = %d, want = %d", copied, len(ipPayload))
- }
- bytesReceived += len(ipPayload)
- }
-
- if test.expectReply {
- if diff := cmp.Diff(originalPayload, reassembledPayload[header.ICMPv4MinimumSize:]); diff != "" {
- t.Fatalf("reassembledPayload mismatch (-want +got):\n%s", diff)
- }
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/ipv4_id_uniqueness_test.go b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
deleted file mode 100644
index 2b69ceecb..000000000
--- a/test/packetimpact/tests/ipv4_id_uniqueness_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv4_id_uniqueness_test
-
-import (
- "context"
- "flag"
- "fmt"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func recvTCPSegment(t *testing.T, conn *testbench.TCPIPv4, expect *testbench.TCP, expectPayload *testbench.Payload) (uint16, error) {
- layers, err := conn.ExpectData(t, expect, expectPayload, time.Second)
- if err != nil {
- return 0, fmt.Errorf("failed to receive TCP segment: %s", err)
- }
- if len(layers) < 2 {
- return 0, fmt.Errorf("got packet with layers: %v, expected to have at least 2 layers (link and network)", layers)
- }
- ipv4, ok := layers[1].(*testbench.IPv4)
- if !ok {
- return 0, fmt.Errorf("got network layer: %T, expected: *IPv4", layers[1])
- }
- if *ipv4.Flags&header.IPv4FlagDontFragment != 0 {
- return 0, fmt.Errorf("got IPv4 DF=1, expected DF=0")
- }
- return *ipv4.ID, nil
-}
-
-// RFC 6864 section 4.2 states: "The IPv4 ID of non-atomic datagrams MUST NOT
-// be reused when sending a copy of an earlier non-atomic datagram."
-//
-// This test creates a TCP connection, uses the IP_MTU_DISCOVER socket option
-// to force the DF bit to be 0, and checks that a retransmitted segment has a
-// different IPv4 Identification value than the original segment.
-func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {
- for _, tc := range []struct {
- name string
- payload []byte
- }{
- {"SmallPayload", []byte("sample data")},
- // 512 bytes is chosen because sending more than this in a single segment
- // causes the retransmission to send less than the original amount.
- {"512BytePayload", testbench.GenerateRandomPayload(t, 512)},
- } {
- t.Run(tc.name, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
-
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- remoteFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, remoteFD)
-
- dut.SetSockOptInt(t, remoteFD, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- // TODO(b/129291778) The following socket option clears the DF bit on
- // IP packets sent over the socket, and is currently not supported by
- // gVisor. gVisor by default sends packets with DF=0 anyway, so the
- // socket option being not supported does not affect the operation of
- // this test. Once the socket option is supported, the following call
- // can be changed to simply assert success.
- ret, errno := dut.SetSockOptIntWithErrno(context.Background(), t, remoteFD, unix.IPPROTO_IP, linux.IP_MTU_DISCOVER, linux.IP_PMTUDISC_DONT)
- // Fuchsia will return ENOPROTOPT errno.
- if ret == -1 && errno != unix.ENOPROTOOPT {
- t.Fatalf("failed to set IP_MTU_DISCOVER socket option to IP_PMTUDISC_DONT: %s", errno)
- }
-
- samplePayload := &testbench.Payload{Bytes: tc.payload}
-
- dut.Send(t, remoteFD, tc.payload, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("failed to receive TCP segment sent for RTT calculation: %s", err)
- }
- // Let the DUT estimate RTO with RTT from the DATA-ACK.
- // TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
- // we can skip sending this ACK.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- dut.Send(t, remoteFD, tc.payload, 0)
- expectTCP := &testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))}
- originalID, err := recvTCPSegment(t, &conn, expectTCP, samplePayload)
- if err != nil {
- t.Fatalf("failed to receive TCP segment: %s", err)
- }
-
- retransmitID, err := recvTCPSegment(t, &conn, expectTCP, samplePayload)
- if err != nil {
- t.Fatalf("failed to receive retransmitted TCP segment: %s", err)
- }
- if originalID == retransmitID {
- t.Fatalf("unexpectedly got retransmitted TCP segment with same IPv4 ID field=%d", originalID)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go b/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
deleted file mode 100644
index 4034a128e..000000000
--- a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6_fragment_icmp_error_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-const (
- data = "IPV6_PROTOCOL_TESTER_FOR_FRAGMENT"
- fragmentID = 1
- reassemblyTimeout = ipv6.ReassembleTimeout + 5*time.Second
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func fragmentedICMPEchoRequest(t *testing.T, n *testbench.DUTTestNet, conn *testbench.IPv6Conn, firstPayloadLength uint16, payload []byte, secondFragmentOffset uint16) ([]testbench.Layers, [][]byte) {
- t.Helper()
-
- icmpv6Header := header.ICMPv6(make([]byte, header.ICMPv6EchoMinimumSize))
- icmpv6Header.SetType(header.ICMPv6EchoRequest)
- icmpv6Header.SetCode(header.ICMPv6UnusedCode)
- icmpv6Header.SetIdent(0)
- icmpv6Header.SetSequence(0)
- cksum := header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmpv6Header,
- Src: tcpip.Address(n.LocalIPv6),
- Dst: tcpip.Address(n.RemoteIPv6),
- PayloadCsum: header.Checksum(payload, 0 /* initial */),
- PayloadLen: len(payload),
- })
- icmpv6Header.SetChecksum(cksum)
- icmpv6Bytes := append([]byte(icmpv6Header), payload...)
-
- icmpv6ProtoNum := header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)
-
- firstFragment := conn.CreateFrame(t, testbench.Layers{&testbench.IPv6{}},
- &testbench.IPv6FragmentExtHdr{
- NextHeader: &icmpv6ProtoNum,
- FragmentOffset: testbench.Uint16(0),
- MoreFragments: testbench.Bool(true),
- Identification: testbench.Uint32(fragmentID),
- },
- &testbench.Payload{
- Bytes: icmpv6Bytes[:header.ICMPv6PayloadOffset+firstPayloadLength],
- },
- )
- firstIPv6 := firstFragment[1:]
- firstIPv6Bytes, err := firstIPv6.ToBytes()
- if err != nil {
- t.Fatalf("failed to convert first %s to bytes: %s", firstIPv6, err)
- }
-
- secondFragment := conn.CreateFrame(t, testbench.Layers{&testbench.IPv6{}},
- &testbench.IPv6FragmentExtHdr{
- NextHeader: &icmpv6ProtoNum,
- FragmentOffset: testbench.Uint16(secondFragmentOffset),
- MoreFragments: testbench.Bool(false),
- Identification: testbench.Uint32(fragmentID),
- },
- &testbench.Payload{
- Bytes: icmpv6Bytes[header.ICMPv6PayloadOffset+firstPayloadLength:],
- },
- )
- secondIPv6 := secondFragment[1:]
- secondIPv6Bytes, err := secondIPv6.ToBytes()
- if err != nil {
- t.Fatalf("failed to convert second %s to bytes: %s", secondIPv6, err)
- }
-
- return []testbench.Layers{firstFragment, secondFragment}, [][]byte{firstIPv6Bytes, secondIPv6Bytes}
-}
-
-func TestIPv6ICMPEchoRequestFragmentReassembly(t *testing.T) {
- tests := []struct {
- name string
- firstPayloadLength uint16
- payload []byte
- secondFragmentOffset uint16
- sendFrameOrder []int
- }{
- {
- name: "reassemble two fragments",
- firstPayloadLength: 8,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 8) / 8,
- sendFrameOrder: []int{1, 2},
- },
- {
- name: "reassemble two fragments in reverse order",
- firstPayloadLength: 8,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 8) / 8,
- sendFrameOrder: []int{2, 1},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
-
- fragments, _ := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
-
- for _, i := range test.sendFrameOrder {
- conn.SendFrame(t, fragments[i-1])
- }
-
- gotEchoReply, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6EchoReply),
- Code: testbench.ICMPv6Code(header.ICMPv6UnusedCode),
- },
- }, time.Second)
- if err != nil {
- t.Fatalf("didn't receive an ICMPv6 Echo Reply: %s", err)
- }
- gotPayload, err := gotEchoReply[len(gotEchoReply)-1].ToBytes()
- if err != nil {
- t.Fatalf("failed to convert ICMPv6 to bytes: %s", err)
- }
- icmpPayload := gotPayload[header.ICMPv6EchoMinimumSize:]
- wantPayload := test.payload
- if diff := cmp.Diff(wantPayload, icmpPayload); diff != "" {
- t.Fatalf("payload mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestIPv6FragmentReassemblyTimeout(t *testing.T) {
- type icmpFramePattern struct {
- typ header.ICMPv6Type
- code header.ICMPv6Code
- }
-
- type icmpReassemblyTimeoutDetail struct {
- payloadFragment int // 1: first fragment, 2: second fragnemt.
- }
-
- tests := []struct {
- name string
- firstPayloadLength uint16
- payload []byte
- secondFragmentOffset uint16
- sendFrameOrder []int
- replyFilter icmpFramePattern
- expectErrorReply bool
- expectICMPReassemblyTimeout icmpReassemblyTimeoutDetail
- }{
- {
- name: "reassembly timeout (first fragment only)",
- firstPayloadLength: 8,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 8) / 8,
- sendFrameOrder: []int{1},
- replyFilter: icmpFramePattern{
- typ: header.ICMPv6TimeExceeded,
- code: header.ICMPv6ReassemblyTimeout,
- },
- expectErrorReply: true,
- expectICMPReassemblyTimeout: icmpReassemblyTimeoutDetail{
- payloadFragment: 1,
- },
- },
- {
- name: "reassembly timeout (second fragment only)",
- firstPayloadLength: 8,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 8) / 8,
- sendFrameOrder: []int{2},
- replyFilter: icmpFramePattern{
- typ: header.ICMPv6TimeExceeded,
- code: header.ICMPv6ReassemblyTimeout,
- },
- expectErrorReply: false,
- },
- {
- name: "reassembly timeout (two fragments with a gap)",
- firstPayloadLength: 8,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 16) / 8,
- sendFrameOrder: []int{1, 2},
- replyFilter: icmpFramePattern{
- typ: header.ICMPv6TimeExceeded,
- code: header.ICMPv6ReassemblyTimeout,
- },
- expectErrorReply: true,
- expectICMPReassemblyTimeout: icmpReassemblyTimeoutDetail{
- payloadFragment: 1,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
-
- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
-
- for _, i := range test.sendFrameOrder {
- conn.SendFrame(t, fragments[i-1])
- }
-
- gotErrorMessage, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(test.replyFilter.typ),
- Code: testbench.ICMPv6Code(test.replyFilter.code),
- },
- }, reassemblyTimeout)
- if !test.expectErrorReply {
- if err == nil {
- t.Fatalf("shouldn't receive an ICMPv6 Error Message with type=%d and code=%d", test.replyFilter.typ, test.replyFilter.code)
- }
- return
- }
- if err != nil {
- t.Fatalf("didn't receive an ICMPv6 Error Message with type=%d and code=%d: err", test.replyFilter.typ, test.replyFilter.code, err)
- }
- gotPayload, err := gotErrorMessage[len(gotErrorMessage)-1].ToBytes()
- if err != nil {
- t.Fatalf("failed to convert ICMPv6 to bytes: %s", err)
- }
- icmpPayload := gotPayload[header.ICMPv6ErrorHeaderSize:]
- wantPayload := ipv6Bytes[test.expectICMPReassemblyTimeout.payloadFragment-1]
- if diff := cmp.Diff(wantPayload, icmpPayload); diff != "" {
- t.Fatalf("payload mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-func TestIPv6FragmentParamProblem(t *testing.T) {
- type icmpFramePattern struct {
- typ header.ICMPv6Type
- code header.ICMPv6Code
- }
-
- type icmpParamProblemDetail struct {
- pointer uint32
- payloadFragment int // 1: first fragment, 2: second fragnemt.
- }
-
- tests := []struct {
- name string
- firstPayloadLength uint16
- payload []byte
- secondFragmentOffset uint16
- sendFrameOrder []int
- replyFilter icmpFramePattern
- expectICMPParamProblem icmpParamProblemDetail
- }{
- {
- name: "payload size not a multiple of 8",
- firstPayloadLength: 9,
- payload: []byte(data)[:20],
- secondFragmentOffset: (header.ICMPv6EchoMinimumSize + 8) / 8,
- sendFrameOrder: []int{1},
- replyFilter: icmpFramePattern{
- typ: header.ICMPv6ParamProblem,
- code: header.ICMPv6ErroneousHeader,
- },
- expectICMPParamProblem: icmpParamProblemDetail{
- pointer: 4,
- payloadFragment: 1,
- },
- },
- {
- name: "payload length error",
- firstPayloadLength: 16,
- payload: []byte(data)[:33],
- secondFragmentOffset: 65520 / 8,
- sendFrameOrder: []int{1, 2},
- replyFilter: icmpFramePattern{
- typ: header.ICMPv6ParamProblem,
- code: header.ICMPv6ErroneousHeader,
- },
- expectICMPParamProblem: icmpParamProblemDetail{
- pointer: 42,
- payloadFragment: 2,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
-
- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
-
- for _, i := range test.sendFrameOrder {
- conn.SendFrame(t, fragments[i-1])
- }
-
- gotErrorMessage, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(test.replyFilter.typ),
- Code: testbench.ICMPv6Code(test.replyFilter.code),
- },
- }, time.Second)
- if err != nil {
- t.Fatalf("didn't receive an ICMPv6 Error Message with type=%d and code=%d: err", test.replyFilter.typ, test.replyFilter.code, err)
- }
- gotPayload, err := gotErrorMessage[len(gotErrorMessage)-1].ToBytes()
- if err != nil {
- t.Fatalf("failed to convert ICMPv6 to bytes: %s", err)
- }
- gotPointer := header.ICMPv6(gotPayload).TypeSpecific()
- wantPointer := test.expectICMPParamProblem.pointer
- if gotPointer != wantPointer {
- t.Fatalf("got pointer = %d, want = %d", gotPointer, wantPointer)
- }
- icmpPayload := gotPayload[header.ICMPv6ErrorHeaderSize:]
- wantPayload := ipv6Bytes[test.expectICMPParamProblem.payloadFragment-1]
- if diff := cmp.Diff(wantPayload, icmpPayload); diff != "" {
- t.Fatalf("payload mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
deleted file mode 100644
index db6195dc9..000000000
--- a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6_fragment_reassembly_test
-
-import (
- "flag"
- "math/rand"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-type fragmentInfo struct {
- offset uint16
- size uint16
- more bool
- id uint32
-}
-
-func TestIPv6FragmentReassembly(t *testing.T) {
- icmpv6ProtoNum := header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)
-
- tests := []struct {
- description string
- ipPayloadLen int
- fragments []fragmentInfo
- expectReply bool
- }{
- {
- description: "basic reassembly",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 100, more: true},
- {offset: 1000, size: 1000, id: 100, more: true},
- {offset: 2000, size: 1000, id: 100, more: false},
- },
- expectReply: true,
- },
- {
- description: "out of order fragments",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 101, more: true},
- {offset: 2000, size: 1000, id: 101, more: false},
- {offset: 1000, size: 1000, id: 101, more: true},
- },
- expectReply: true,
- },
- {
- description: "duplicated fragments",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 102, more: true},
- {offset: 1000, size: 1000, id: 102, more: true},
- {offset: 1000, size: 1000, id: 102, more: true},
- {offset: 2000, size: 1000, id: 102, more: false},
- },
- expectReply: true,
- },
- {
- description: "fragment subset",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 103, more: true},
- {offset: 1000, size: 1000, id: 103, more: true},
- {offset: 512, size: 256, id: 103, more: true},
- {offset: 2000, size: 1000, id: 103, more: false},
- },
- expectReply: true,
- },
- {
- description: "fragment overlap",
- ipPayloadLen: 3000,
- fragments: []fragmentInfo{
- {offset: 0, size: 1000, id: 104, more: true},
- {offset: 1512, size: 1000, id: 104, more: true},
- {offset: 1000, size: 1000, id: 104, more: true},
- {offset: 2000, size: 1000, id: 104, more: false},
- },
- expectReply: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
-
- lIP := tcpip.Address(dut.Net.LocalIPv6)
- rIP := tcpip.Address(dut.Net.RemoteIPv6)
-
- data := make([]byte, test.ipPayloadLen)
- icmp := header.ICMPv6(data[:header.ICMPv6HeaderSize])
- icmp.SetType(header.ICMPv6EchoRequest)
- icmp.SetCode(header.ICMPv6UnusedCode)
- icmp.SetChecksum(0)
- originalPayload := data[header.ICMPv6HeaderSize:]
- if _, err := rand.Read(originalPayload); err != nil {
- t.Fatalf("rand.Read: %s", err)
- }
-
- cksum := header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
- Header: icmp,
- Src: lIP,
- Dst: rIP,
- PayloadCsum: header.Checksum(originalPayload, 0 /* initial */),
- PayloadLen: len(originalPayload),
- })
- icmp.SetChecksum(cksum)
-
- for _, fragment := range test.fragments {
- conn.Send(t, testbench.IPv6{},
- &testbench.IPv6FragmentExtHdr{
- NextHeader: &icmpv6ProtoNum,
- FragmentOffset: testbench.Uint16(fragment.offset / header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit),
- MoreFragments: testbench.Bool(fragment.more),
- Identification: testbench.Uint32(fragment.id),
- },
- &testbench.Payload{
- Bytes: data[fragment.offset:][:fragment.size],
- })
- }
-
- var bytesReceived int
- reassembledPayload := make([]byte, test.ipPayloadLen)
- for {
- incomingFrame, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &testbench.IPv6FragmentExtHdr{},
- }, time.Second)
- if err != nil {
- // Either an unexpected frame was received, or none at all.
- if test.expectReply && bytesReceived < test.ipPayloadLen {
- t.Fatalf("received %d bytes out of %d, then conn.ExpectFrame(_, _, time.Second) failed with %s", bytesReceived, test.ipPayloadLen, err)
- }
- break
- }
- if !test.expectReply {
- t.Fatalf("unexpected reply received:\n%s", incomingFrame)
- }
- ipPayload, err := incomingFrame[3 /* Payload */].ToBytes()
- if err != nil {
- t.Fatalf("failed to parse ICMPv6 header: incomingPacket[3].ToBytes() = (_, %s)", err)
- }
- offset := *incomingFrame[2 /* IPv6FragmentExtHdr */].(*testbench.IPv6FragmentExtHdr).FragmentOffset
- offset *= header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit
- if copied := copy(reassembledPayload[offset:], ipPayload); copied != len(ipPayload) {
- t.Fatalf("wrong number of bytes copied into reassembledPayload: got = %d, want = %d", copied, len(ipPayload))
- }
- bytesReceived += len(ipPayload)
- }
-
- if test.expectReply {
- if diff := cmp.Diff(originalPayload, reassembledPayload[header.ICMPv6HeaderSize:]); diff != "" {
- t.Fatalf("reassembledPayload mismatch (-want +got):\n%s", diff)
- }
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/ipv6_unknown_options_action_test.go b/test/packetimpact/tests/ipv6_unknown_options_action_test.go
deleted file mode 100644
index d762c43a7..000000000
--- a/test/packetimpact/tests/ipv6_unknown_options_action_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ipv6_unknown_options_action_test
-
-import (
- "flag"
- "net"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func mkHopByHopOptionsExtHdr(optType byte) testbench.Layer {
- return &testbench.IPv6HopByHopOptionsExtHdr{
- Options: []byte{optType, 0x04, 0x00, 0x00, 0x00, 0x00},
- }
-}
-
-func mkDestinationOptionsExtHdr(optType byte) testbench.Layer {
- return &testbench.IPv6DestinationOptionsExtHdr{
- Options: []byte{optType, 0x04, 0x00, 0x00, 0x00, 0x00},
- }
-}
-
-func optionTypeFromAction(action header.IPv6OptionUnknownAction) byte {
- return byte(action << 6)
-}
-
-func TestIPv6UnknownOptionAction(t *testing.T) {
- for _, tt := range []struct {
- description string
- mkExtHdr func(optType byte) testbench.Layer
- action header.IPv6OptionUnknownAction
- multicastDst bool
- wantICMPv6 bool
- }{
- {
- description: "0b00/hbh",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionSkip,
- multicastDst: false,
- wantICMPv6: false,
- },
- {
- description: "0b01/hbh",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscard,
- multicastDst: false,
- wantICMPv6: false,
- },
- {
- description: "0b10/hbh/unicast",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMP,
- multicastDst: false,
- wantICMPv6: true,
- },
- {
- description: "0b10/hbh/multicast",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMP,
- multicastDst: true,
- wantICMPv6: true,
- },
- {
- description: "0b11/hbh/unicast",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- multicastDst: false,
- wantICMPv6: true,
- },
- {
- description: "0b11/hbh/multicast",
- mkExtHdr: mkHopByHopOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- multicastDst: true,
- wantICMPv6: false,
- },
- {
- description: "0b00/destination",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionSkip,
- multicastDst: false,
- wantICMPv6: false,
- },
- {
- description: "0b01/destination",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscard,
- multicastDst: false,
- wantICMPv6: false,
- },
- {
- description: "0b10/destination/unicast",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMP,
- multicastDst: false,
- wantICMPv6: true,
- },
- {
- description: "0b10/destination/multicast",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMP,
- multicastDst: true,
- wantICMPv6: true,
- },
- {
- description: "0b11/destination/unicast",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- multicastDst: false,
- wantICMPv6: true,
- },
- {
- description: "0b11/destination/multicast",
- mkExtHdr: mkDestinationOptionsExtHdr,
- action: header.IPv6OptionUnknownActionDiscardSendICMPNoMulticastDest,
- multicastDst: true,
- wantICMPv6: false,
- },
- } {
- t.Run(tt.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close(t)
-
- outgoingOverride := testbench.Layers{}
- if tt.multicastDst {
- outgoingOverride = testbench.Layers{&testbench.IPv6{
- DstAddr: testbench.Address(tcpip.Address(net.ParseIP("ff02::1"))),
- }}
- }
-
- outgoing := conn.CreateFrame(t, outgoingOverride, tt.mkExtHdr(optionTypeFromAction(tt.action)))
- conn.SendFrame(t, outgoing)
- ipv6Sent := outgoing[1:]
- icmpv6Payload, err := ipv6Sent.ToBytes()
- if err != nil {
- t.Fatalf("failed to serialize the outgoing packet: %s", err)
- }
- gotICMPv6, err := conn.ExpectFrame(t, testbench.Layers{
- &testbench.Ether{},
- &testbench.IPv6{},
- &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem),
- Code: testbench.ICMPv6Code(header.ICMPv6UnknownOption),
- // The pointer in the ICMPv6 parameter problem message
- // should point to the option type of the unknown option. In
- // our test case, it is the first option in the extension
- // header whose option type is 2 bytes after the IPv6 header
- // (after NextHeader and ExtHdrLen).
- Pointer: testbench.Uint32(header.IPv6MinimumSize + 2),
- Payload: icmpv6Payload,
- },
- }, time.Second)
- if tt.wantICMPv6 && err != nil {
- t.Fatalf("expected ICMPv6 Parameter Problem but got none: %s", err)
- }
- if !tt.wantICMPv6 && gotICMPv6 != nil {
- t.Fatalf("expected no ICMPv6 Parameter Problem but got one: %s", gotICMPv6)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_connect_icmp_error_test.go b/test/packetimpact/tests/tcp_connect_icmp_error_test.go
deleted file mode 100644
index 15d603328..000000000
--- a/test/packetimpact/tests/tcp_connect_icmp_error_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_connect_icmp_error_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func sendICMPError(t *testing.T, conn *testbench.TCPIPv4, tcp *testbench.TCP) {
- t.Helper()
-
- icmpPayload := testbench.Layers{tcp.Prev(), tcp}
- bytes, err := icmpPayload.ToBytes()
- if err != nil {
- t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err)
- }
-
- layers := conn.CreateFrame(t, nil)
- layers[len(layers)-1] = &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable),
- Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable),
- Payload: bytes,
- }
- conn.SendFrameStateless(t, layers)
-}
-
-// TestTCPConnectICMPError tests for the handshake to fail and the socket state
-// cleaned up on receiving an ICMP error.
-func TestTCPConnectICMPError(t *testing.T) {
- dut := testbench.NewDUT(t)
-
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- port := uint16(9001)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
- defer conn.Close(t)
- sa := unix.SockaddrInet4{Port: int(port)}
- copy(sa.Addr[:], dut.Net.LocalIPv4)
- // Bring the dut to SYN-SENT state with a non-blocking connect.
- dut.Connect(t, clientFD, &sa)
- tcp, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second)
- if err != nil {
- t.Fatalf("expected SYN, %s", err)
- }
-
- // Continuously try to read the ICMP error in an attempt to trigger a race
- // condition.
- start := make(chan struct{})
- done := make(chan struct{})
- go func() {
- defer close(done)
-
- close(start)
- for {
- select {
- case <-done:
- return
- default:
- }
- const want = unix.EHOSTUNREACH
- switch got := unix.Errno(dut.GetSockOptInt(t, clientFD, unix.SOL_SOCKET, unix.SO_ERROR)); got {
- case unix.Errno(0):
- continue
- case want:
- return
- default:
- t.Fatalf("got SO_ERROR = %s, want %s", got, want)
- }
-
- }
- }()
-
- <-start
- sendICMPError(t, &conn, tcp)
-
- dut.PollOne(t, clientFD, unix.POLLHUP, time.Second)
- <-done
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // The DUT should reply with RST to our ACK as the state should have
- // transitioned to CLOSED because of handshake error.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
- t.Fatalf("expected RST, %s", err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_cork_mss_test.go b/test/packetimpact/tests/tcp_cork_mss_test.go
deleted file mode 100644
index 1db3c9883..000000000
--- a/test/packetimpact/tests/tcp_cork_mss_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_cork_mss_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPCorkMSS tests for segment coalesce and split as per MSS.
-func TestTCPCorkMSS(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- const mss = uint32(header.TCPDefaultMSS)
- options := make([]byte, header.TCPOptionMSSLength)
- header.EncodeMSSOption(mss, options)
- conn.ConnectWithOptions(t, options)
-
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- dut.SetSockOptInt(t, acceptFD, unix.IPPROTO_TCP, unix.TCP_CORK, 1)
-
- // Let the dut application send 2 small segments to be held up and coalesced
- // until the application sends a larger segment to fill up to > MSS.
- sampleData := []byte("Sample Data")
- dut.Send(t, acceptFD, sampleData, 0)
- dut.Send(t, acceptFD, sampleData, 0)
-
- expectedData := sampleData
- expectedData = append(expectedData, sampleData...)
- largeData := make([]byte, mss+1)
- expectedData = append(expectedData, largeData...)
- dut.Send(t, acceptFD, largeData, 0)
-
- // Expect the segments to be coalesced and sent and capped to MSS.
- expectedPayload := testbench.Payload{Bytes: expectedData[:mss]}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, &expectedPayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Expect the coalesced segment to be split and transmitted.
- expectedPayload = testbench.Payload{Bytes: expectedData[mss:]}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Check for segments to *not* be held up because of TCP_CORK when
- // the current send window is less than MSS.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(2 * len(sampleData)))})
- dut.Send(t, acceptFD, sampleData, 0)
- dut.Send(t, acceptFD, sampleData, 0)
- expectedPayload = testbench.Payload{Bytes: append(sampleData, sampleData...)}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-}
diff --git a/test/packetimpact/tests/tcp_fin_retransmission_test.go b/test/packetimpact/tests/tcp_fin_retransmission_test.go
deleted file mode 100644
index 500f7a783..000000000
--- a/test/packetimpact/tests/tcp_fin_retransmission_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_fin_retransmission_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPClosingFinRetransmission tests that TCP implementation should retransmit
-// FIN segment in CLOSING state.
-func TestTCPClosingFinRetransmission(t *testing.T) {
- for _, tt := range []struct {
- description string
- flags header.TCPFlags
- }{
- {"CLOSING", header.TCPFlagAck | header.TCPFlagFin},
- {"FIN_WAIT_1", header.TCPFlagAck},
- } {
- t.Run(tt.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- // Give a chance for the dut to estimate RTO with RTT from the DATA-ACK.
- // TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
- // we can skip the next block of code.
- sampleData := []byte("Sample Data")
- if got, want := dut.Send(t, acceptFD, sampleData, 0), len(sampleData); int(got) != want {
- t.Fatalf("got dut.Send(t, %d, %s, 0) = %d, want %d", acceptFD, sampleData, got, want)
- }
- if _, err := conn.ExpectData(t, &testbench.TCP{}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- dut.Shutdown(t, acceptFD, unix.SHUT_WR)
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected FINACK from DUT, but got none: %s", err)
- }
-
- // Do not ack the FIN from DUT so that we can test for retransmission.
- seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)
- conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(tt.flags)})
-
- if tt.flags&header.TCPFlagFin != 0 {
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected an ACK to our FIN, but got none: %s", err)
- }
- }
-
- if _, err := conn.Expect(t, testbench.TCP{
- SeqNum: seqNumForTheirFIN,
- Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck),
- }, time.Second); err != nil {
- t.Errorf("expected retransmission of FIN from the DUT: %s", err)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_handshake_window_size_test.go b/test/packetimpact/tests/tcp_handshake_window_size_test.go
deleted file mode 100644
index 668e0275c..000000000
--- a/test/packetimpact/tests/tcp_handshake_window_size_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_handshake_window_size_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPHandshakeWindowSize tests if the stack is honoring the window size
-// communicated during handshake.
-func TestTCPHandshakeWindowSize(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- // Start handshake with zero window size.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn), WindowSize: testbench.Uint16(uint16(0))})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN-ACK: %s", err)
- }
- // Update the advertised window size to a non-zero value with the ACK that
- // completes the handshake.
- //
- // Set the window size with MSB set and expect the dut to treat it as
- // an unsigned value.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(1 << 15))})
-
- acceptFd, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFd)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- // Since we advertised a zero window followed by a non-zero window,
- // expect the dut to honor the recently advertised non-zero window
- // and actually send out the data instead of probing for zero window.
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectNextData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_info_test.go b/test/packetimpact/tests/tcp_info_test.go
deleted file mode 100644
index 5410cc368..000000000
--- a/test/packetimpact/tests/tcp_info_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_info_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestTCPInfo(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
-
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
-
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- // Send and receive sample data.
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
- dut.Send(t, acceptFD, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected a packet with payload %s: %s", samplePayload, err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- info := dut.GetSockOptTCPInfo(t, acceptFD)
- if got, want := uint32(info.State), linux.TCP_ESTABLISHED; got != want {
- t.Fatalf("got %d want %d", got, want)
- }
- if info.RTT == 0 {
- t.Errorf("got RTT=0, want nonzero")
- }
- if info.RTTVar == 0 {
- t.Errorf("got RTTVar=0, want nonzero")
- }
- if info.RTO == 0 {
- t.Errorf("got RTO=0, want nonzero")
- }
- if info.ReordSeen != 0 {
- t.Errorf("expected the connection to not have any reordering, got: %d want: 0", info.ReordSeen)
- }
- if info.SndCwnd == 0 {
- t.Errorf("expected send congestion window to be greater than zero")
- }
- if info.CaState != linux.TCP_CA_Open {
- t.Errorf("expected the connection to be in open state, got: %d want: %d", info.CaState, linux.TCP_CA_Open)
- }
-
- if t.Failed() {
- t.FailNow()
- }
-
- // Check the congestion control state and send congestion window after
- // retransmission timeout.
- seq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
- dut.Send(t, acceptFD, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected a packet with payload %s: %s", samplePayload, err)
- }
-
- // Given a generous retransmission timeout.
- timeout := time.Duration(info.RTO) * 2 * time.Microsecond
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, timeout); err != nil {
- t.Fatalf("expected a packet with payload %s: %s", samplePayload, err)
- }
-
- info = dut.GetSockOptTCPInfo(t, acceptFD)
- if info.CaState != linux.TCP_CA_Loss {
- t.Errorf("expected the connection to be in loss recovery, got: %d want: %d", info.CaState, linux.TCP_CA_Loss)
- }
- if info.SndCwnd != 1 {
- t.Errorf("expected send congestion window to be 1, got: %d", info.SndCwnd)
- }
-}
diff --git a/test/packetimpact/tests/tcp_linger_test.go b/test/packetimpact/tests/tcp_linger_test.go
deleted file mode 100644
index 46b5ca5d8..000000000
--- a/test/packetimpact/tests/tcp_linger_test.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_linger_test
-
-import (
- "context"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func createSocket(t *testing.T, dut testbench.DUT) (int32, int32, testbench.TCPIPv4) {
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
- return acceptFD, listenFD, conn
-}
-
-func closeAll(t *testing.T, dut testbench.DUT, listenFD int32, conn testbench.TCPIPv4) {
- conn.Close(t)
- dut.Close(t, listenFD)
-}
-
-// lingerDuration is the timeout value used with SO_LINGER socket option.
-const lingerDuration = 3 * time.Second
-
-// TestTCPLingerZeroTimeout tests when SO_LINGER is set with zero timeout. DUT
-// should send RST-ACK when socket is closed.
-func TestTCPLingerZeroTimeout(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.SetSockLingerOption(t, acceptFD, 0, true)
- dut.Close(t, acceptFD)
-
- // If the linger timeout is set to zero, the DUT should send a RST.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected RST-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-}
-
-// TestTCPLingerOff tests when SO_LINGER is not set. DUT should send FIN-ACK
-// when socket is closed.
-func TestTCPLingerOff(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.Close(t, acceptFD)
-
- // If SO_LINGER is not set, DUT should send a FIN-ACK.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-}
-
-// TestTCPLingerNonZeroTimeout tests when SO_LINGER is set with non-zero timeout.
-// DUT should close the socket after timeout.
-func TestTCPLingerNonZeroTimeout(t *testing.T) {
- for _, tt := range []struct {
- description string
- lingerOn bool
- }{
- {"WithNonZeroLinger", true},
- {"WithoutLinger", false},
- } {
- t.Run(tt.description, func(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn)
-
- start := time.Now()
- dut.CloseWithErrno(context.Background(), t, acceptFD)
- elapsed := time.Since(start)
-
- expectedMaximum := time.Second
- if tt.lingerOn {
- expectedMaximum += lingerDuration
- if elapsed < lingerDuration {
- t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed)
- }
- }
- if elapsed >= expectedMaximum {
- t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed)
- }
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- })
- }
-}
-
-// TestTCPLingerSendNonZeroTimeout tests when SO_LINGER is set with non-zero
-// timeout and send a packet. DUT should close the socket after timeout.
-func TestTCPLingerSendNonZeroTimeout(t *testing.T) {
- for _, tt := range []struct {
- description string
- lingerOn bool
- }{
- {"WithSendNonZeroLinger", true},
- {"WithoutLinger", false},
- } {
- t.Run(tt.description, func(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn)
-
- // Send data.
- sampleData := []byte("Sample Data")
- dut.Send(t, acceptFD, sampleData, 0)
-
- start := time.Now()
- dut.CloseWithErrno(context.Background(), t, acceptFD)
- elapsed := time.Since(start)
-
- expectedMaximum := time.Second
- if tt.lingerOn {
- expectedMaximum += lingerDuration
- if elapsed < lingerDuration {
- t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed)
- }
- }
- if elapsed >= expectedMaximum {
- t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed)
- }
-
- samplePayload := &testbench.Payload{Bytes: sampleData}
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected a packet with payload %v: %s", samplePayload, err)
- }
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- })
- }
-}
-
-// TestTCPLingerShutdownZeroTimeout tests SO_LINGER with shutdown() and zero
-// timeout. DUT should send RST-ACK when socket is closed.
-func TestTCPLingerShutdownZeroTimeout(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.SetSockLingerOption(t, acceptFD, 0, true)
- dut.Shutdown(t, acceptFD, unix.SHUT_RDWR)
- dut.Close(t, acceptFD)
-
- // Shutdown will send FIN-ACK with read/write option.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
- }
-
- // If the linger timeout is set to zero, the DUT should send a RST.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected RST-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-}
-
-// TestTCPLingerShutdownSendNonZeroTimeout tests SO_LINGER with shutdown() and
-// non-zero timeout. DUT should close the socket after timeout.
-func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) {
- for _, tt := range []struct {
- description string
- lingerOn bool
- }{
- {"shutdownRDWR", true},
- {"shutdownRDWR", false},
- } {
- t.Run(tt.description, func(t *testing.T) {
- // Create a socket, listen, TCP connect, and accept.
- dut := testbench.NewDUT(t)
- acceptFD, listenFD, conn := createSocket(t, dut)
- defer closeAll(t, dut, listenFD, conn)
-
- dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn)
-
- // Send data.
- sampleData := []byte("Sample Data")
- dut.Send(t, acceptFD, sampleData, 0)
-
- dut.Shutdown(t, acceptFD, unix.SHUT_RDWR)
-
- start := time.Now()
- dut.CloseWithErrno(context.Background(), t, acceptFD)
- elapsed := time.Since(start)
-
- expectedMaximum := time.Second
- if tt.lingerOn {
- expectedMaximum += lingerDuration
- if elapsed < lingerDuration {
- t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed)
- }
- }
- if elapsed >= expectedMaximum {
- t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed)
- }
-
- samplePayload := &testbench.Payload{Bytes: sampleData}
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected a packet with payload %v: %s", samplePayload, err)
- }
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- })
- }
-}
-
-func TestTCPLingerNonEstablished(t *testing.T) {
- dut := testbench.NewDUT(t)
- newFD := dut.Socket(t, unix.AF_INET, unix.SOCK_STREAM, unix.IPPROTO_TCP)
- dut.SetSockLingerOption(t, newFD, lingerDuration, true)
-
- // As the socket is in the initial state, Close() should not linger
- // and return immediately.
- start := time.Now()
- dut.CloseWithErrno(context.Background(), t, newFD)
- elapsed := time.Since(start)
-
- expectedMaximum := time.Second
- if elapsed >= time.Second {
- t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed)
- }
-}
diff --git a/test/packetimpact/tests/tcp_listen_backlog_test.go b/test/packetimpact/tests/tcp_listen_backlog_test.go
deleted file mode 100644
index fea7d5b6f..000000000
--- a/test/packetimpact/tests/tcp_listen_backlog_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_listen_backlog_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPListenBacklog tests for a listening endpoint behavior:
-// (1) reply to more SYNs than what is configured as listen backlog
-// (2) ignore ACKs (that complete a handshake) when the accept queue is full
-// (3) ignore incoming SYNs when the accept queue is full
-func TestTCPListenBacklog(t *testing.T) {
- dut := testbench.NewDUT(t)
-
- // Listening endpoint accepts one more connection than the listen backlog.
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 0 /*backlog*/)
-
- var establishedConn testbench.TCPIPv4
- var incompleteConn testbench.TCPIPv4
-
- // Test if the DUT listener replies to more SYNs than listen backlog+1
- for i, conn := range []*testbench.TCPIPv4{&establishedConn, &incompleteConn} {
- *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- // Expect dut connection to have transitioned to SYN-RCVD state.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN-ACK for %d connection, %s", i, err)
- }
- }
- defer establishedConn.Close(t)
- defer incompleteConn.Close(t)
-
- // Send the ACK to complete handshake.
- establishedConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- // Poll for the established connection ready for accept.
- dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
-
- // Send the ACK to complete handshake, expect this to be dropped by the
- // listener as the accept queue would be full because of the previous
- // handshake.
- incompleteConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Let the test wait for sometime so that the ACK is indeed dropped by
- // the listener. Without such a wait, the DUT accept can race with
- // ACK handling (dropping) causing the test to be flaky.
- time.Sleep(100 * time.Millisecond)
-
- // Drain the accept queue to enable poll for subsequent connections on the
- // listener.
- fd, _ := dut.Accept(t, listenFd)
- dut.Close(t, fd)
-
- // The ACK for the incomplete connection should be ignored by the
- // listening endpoint and the poll on listener should now time out.
- if pfds := dut.Poll(t, []unix.PollFd{{Fd: listenFd, Events: unix.POLLIN}}, time.Second); len(pfds) != 0 {
- t.Fatalf("got dut.Poll(...) = %#v", pfds)
- }
-
- // Re-send the ACK to complete handshake and re-fill the accept-queue.
- incompleteConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
-
- // Now initiate a new connection when the accept queue is full.
- connectingConn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer connectingConn.Close(t)
- // Expect dut connection to drop the SYN and let the client stay in SYN_SENT state.
- connectingConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if got, err := connectingConn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err == nil {
- t.Fatalf("expected no SYN-ACK, but got %s", got)
- }
-}
diff --git a/test/packetimpact/tests/tcp_network_unreachable_test.go b/test/packetimpact/tests/tcp_network_unreachable_test.go
deleted file mode 100644
index e92e6aa9b..000000000
--- a/test/packetimpact/tests/tcp_network_unreachable_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_synsent_reset_test
-
-import (
- "context"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPSynSentUnreachable verifies that TCP connections fail immediately when
-// an ICMP destination unreachable message is sent in response to the inital
-// SYN.
-func TestTCPSynSentUnreachable(t *testing.T) {
- // Create the DUT and connection.
- dut := testbench.NewDUT(t)
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- port := uint16(9001)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
- defer conn.Close(t)
-
- // Bring the DUT to SYN-SENT state with a non-blocking connect.
- sa := unix.SockaddrInet4{Port: int(port)}
- copy(sa.Addr[:], dut.Net.LocalIPv4)
- if _, err := dut.ConnectWithErrno(context.Background(), t, clientFD, &sa); err != unix.EINPROGRESS {
- t.Errorf("got connect() = %v, want EINPROGRESS", err)
- }
-
- // Get the SYN.
- tcp, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second)
- if err != nil {
- t.Fatalf("expected SYN: %s", err)
- }
-
- // Send a host unreachable message.
- icmpPayload := testbench.Layers{tcp.Prev(), tcp}
- bytes, err := icmpPayload.ToBytes()
- if err != nil {
- t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err)
- }
-
- layers := conn.CreateFrame(t, nil)
- layers[len(layers)-1] = &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable),
- Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable),
- Payload: bytes,
- }
- conn.SendFrameStateless(t, layers)
-
- if err := getConnectError(t, &dut, clientFD); err != unix.EHOSTUNREACH {
- t.Errorf("got connect() = %v, want EHOSTUNREACH", err)
- }
-}
-
-// TestTCPSynSentUnreachable6 verifies that TCP connections fail immediately when
-// an ICMP destination unreachable message is sent in response to the inital
-// SYN.
-func TestTCPSynSentUnreachable6(t *testing.T) {
- // Create the DUT and connection.
- dut := testbench.NewDUT(t)
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv6)
- conn := dut.Net.NewTCPIPv6(t, testbench.TCP{DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort})
- defer conn.Close(t)
-
- // Bring the DUT to SYN-SENT state with a non-blocking connect.
- sa := unix.SockaddrInet6{
- Port: int(conn.SrcPort()),
- ZoneId: dut.Net.RemoteDevID,
- }
- copy(sa.Addr[:], dut.Net.LocalIPv6)
- if _, err := dut.ConnectWithErrno(context.Background(), t, clientFD, &sa); err != unix.EINPROGRESS {
- t.Errorf("got connect() = %v, want EINPROGRESS", err)
- }
-
- // Get the SYN.
- tcp, err := conn.Expect(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second)
- if err != nil {
- t.Fatalf("expected SYN: %s", err)
- }
-
- // Send a host unreachable message.
- icmpPayload := testbench.Layers{tcp.Prev(), tcp}
- bytes, err := icmpPayload.ToBytes()
- if err != nil {
- t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err)
- }
-
- layers := conn.CreateFrame(t, nil)
- layers[len(layers)-1] = &testbench.ICMPv6{
- Type: testbench.ICMPv6Type(header.ICMPv6DstUnreachable),
- Code: testbench.ICMPv6Code(header.ICMPv6NetworkUnreachable),
- Payload: bytes,
- }
- conn.SendFrameStateless(t, layers)
-
- if err := getConnectError(t, &dut, clientFD); err != unix.ENETUNREACH {
- t.Errorf("got connect() = %v, want EHOSTUNREACH", err)
- }
-}
-
-// getConnectError gets the errno generated by the on-going connect attempt on
-// fd. fd must be non-blocking and there must be a connect call to fd which
-// returned EINPROGRESS before. These conditions are guaranteed in this test.
-func getConnectError(t *testing.T, dut *testbench.DUT, fd int32) error {
- t.Helper()
- // We previously got EINPROGRESS form the connect call. We can
- // handle it as explained by connect(2):
- // EINPROGRESS:
- // The socket is nonblocking and the connection cannot be
- // completed immediately. It is possible to select(2) or poll(2)
- // for completion by selecting the socket for writing. After
- // select(2) indicates writability, use getsockopt(2) to read
- // the SO_ERROR option at level SOL_SOCKET to determine
- // whether connect() completed successfully (SO_ERROR is
- // zero) or unsuccessfully (SO_ERROR is one of the usual
- // error codes listed here, explaining the reason for the
- // failure).
- dut.PollOne(t, fd, unix.POLLOUT, 10*time.Second)
- if errno := dut.GetSockOptInt(t, fd, unix.SOL_SOCKET, unix.SO_ERROR); errno != 0 {
- return unix.Errno(errno)
- }
- return nil
-}
diff --git a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
deleted file mode 100644
index 14eb7d93b..000000000
--- a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_noaccept_close_rst_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestTcpNoAcceptCloseReset(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- conn.Connect(t)
- defer conn.Close(t)
- // We need to wait for POLLIN event on listenFd to know the connection is
- // established. Otherwise there could be a race when we issue the Close
- // command prior to the DUT receiving the last ack of the handshake and
- // it will only respond RST instead of RST+ACK.
- dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
- dut.Close(t, listenFd)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, 1*time.Second); err != nil {
- t.Fatalf("expected a RST-ACK packet but got none: %s", err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_outside_the_window_test.go b/test/packetimpact/tests/tcp_outside_the_window_test.go
deleted file mode 100644
index 0523887d9..000000000
--- a/test/packetimpact/tests/tcp_outside_the_window_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_outside_the_window_test
-
-import (
- "flag"
- "fmt"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPOutsideTheWindows tests the behavior of the DUT when packets arrive
-// that are inside or outside the TCP window. Packets that are outside the
-// window should force an extra ACK, as described in RFC793 page 69:
-// https://tools.ietf.org/html/rfc793#page-69
-func TestTCPOutsideTheWindow(t *testing.T) {
- for _, tt := range []struct {
- description string
- tcpFlags header.TCPFlags
- payload []testbench.Layer
- seqNumOffset seqnum.Size
- expectACK bool
- }{
- {"SYN", header.TCPFlagSyn, nil, 0, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 0, true},
- {"ACK", header.TCPFlagAck, nil, 0, false},
- {"FIN", header.TCPFlagFin, nil, 0, false},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("abc123")}}, 0, true},
-
- {"SYN", header.TCPFlagSyn, nil, 1, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 1, true},
- {"ACK", header.TCPFlagAck, nil, 1, true},
- {"FIN", header.TCPFlagFin, nil, 1, false},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("abc123")}}, 1, true},
-
- {"SYN", header.TCPFlagSyn, nil, 2, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 2, true},
- {"ACK", header.TCPFlagAck, nil, 2, true},
- {"FIN", header.TCPFlagFin, nil, 2, false},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("abc123")}}, 2, true},
- } {
- t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- windowSize := seqnum.Size(*conn.SynAck(t).WindowSize) + tt.seqNumOffset
- conn.Drain(t)
- // Ignore whatever incrementing that this out-of-order packet might cause
- // to the AckNum.
- localSeqNum := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
- conn.Send(t, testbench.TCP{
- Flags: testbench.TCPFlags(tt.tcpFlags),
- SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
- }, tt.payload...)
- timeout := time.Second
- gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
- if tt.expectACK && err != nil {
- t.Fatalf("expected an ACK packet within %s but got none: %s", timeout, err)
- }
- // Data packets w/o SYN bits are always acked by Linux. Netstack ACK's data packets
- // always right now. So only send a second segment and test for no ACK for packets
- // with no data.
- if tt.expectACK && tt.payload == nil {
- // Sending another out-of-window segment immediately should not trigger
- // an ACK if less than 500ms(default rate limit for out-of-window ACKs)
- // has passed since the last ACK was sent.
- t.Logf("sending another segment")
- conn.Send(t, testbench.TCP{
- Flags: testbench.TCPFlags(tt.tcpFlags),
- SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
- }, tt.payload...)
- timeout := 3 * time.Second
- gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
- if err == nil {
- t.Fatalf("expected no ACK packet but got one: %s", gotACK)
- }
- }
- if !tt.expectACK && gotACK != nil {
- t.Fatalf("expected no ACK packet within %s but got one: %s", timeout, gotACK)
- }
- })
- }
-}
-
-// TestAckOTWSeqInClosing tests that the DUT should send an ACK with
-// the right ACK number when receiving a packet with OTW Seq number
-// in CLOSING state. https://tools.ietf.org/html/rfc793#page-69
-func TestAckOTWSeqInClosing(t *testing.T) {
- for _, tt := range []struct {
- description string
- flags header.TCPFlags
- payloads testbench.Layers
- seqNumOffset seqnum.Size
- expectACK bool
- }{
- {"SYN", header.TCPFlagSyn, nil, 0, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 0, true},
- {"ACK", header.TCPFlagAck, nil, 0, false},
- {"FINACK", header.TCPFlagFin | header.TCPFlagAck, nil, 0, false},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("Sample Data")}}, 0, false},
-
- {"SYN", header.TCPFlagSyn, nil, 1, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 1, true},
- {"ACK", header.TCPFlagAck, nil, 1, true},
- {"FINACK", header.TCPFlagFin | header.TCPFlagAck, nil, 1, true},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("Sample Data")}}, 1, true},
-
- {"SYN", header.TCPFlagSyn, nil, 2, true},
- {"SYNACK", header.TCPFlagSyn | header.TCPFlagAck, nil, 2, true},
- {"ACK", header.TCPFlagAck, nil, 2, true},
- {"FINACK", header.TCPFlagFin | header.TCPFlagAck, nil, 2, true},
- {"Data", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte("Sample Data")}}, 2, true},
- } {
- t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- dut.Shutdown(t, acceptFD, unix.SHUT_WR)
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected FINACK from DUT, but got none: %s", err)
- }
-
- // Do not ack the FIN from DUT so that the TCP state on DUT is CLOSING instead of CLOSED.
- seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)
- conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
-
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected an ACK to our FIN, but got none: %s", err)
- }
-
- windowSize := seqnum.Size(*gotTCP.WindowSize) + tt.seqNumOffset
- conn.SendFrameStateless(t, conn.CreateFrame(t, testbench.Layers{&testbench.TCP{
- SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
- AckNum: seqNumForTheirFIN,
- Flags: testbench.TCPFlags(tt.flags),
- }}, tt.payloads...))
-
- gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if tt.expectACK && err != nil {
- t.Errorf("expected an ACK but got none: %s", err)
- }
- if !tt.expectACK && gotACK != nil {
- t.Errorf("expected no ACK but got one: %s", gotACK)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_paws_mechanism_test.go b/test/packetimpact/tests/tcp_paws_mechanism_test.go
deleted file mode 100644
index 9054955ea..000000000
--- a/test/packetimpact/tests/tcp_paws_mechanism_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_paws_mechanism_test
-
-import (
- "encoding/hex"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestPAWSMechanism(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- options := make([]byte, header.TCPOptionTSLength)
- header.EncodeTSOption(currentTS(), 0, options)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn), Options: options})
- synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("didn't get synack during handshake: %s", err)
- }
- parsedSynOpts := header.ParseSynOptions(synAck.Options, true)
- if !parsedSynOpts.TS {
- t.Fatalf("expected TSOpt from DUT, options we got:\n%s", hex.Dump(synAck.Options))
- }
- tsecr := parsedSynOpts.TSVal
- header.EncodeTSOption(currentTS(), tsecr, options)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options})
- acceptFD, _ := dut.Accept(t, listenFD)
- defer dut.Close(t, acceptFD)
-
- sampleData := []byte("Sample Data")
- sentTSVal := currentTS()
- header.EncodeTSOption(sentTSVal, tsecr, options)
- // 3ms here is chosen arbitrarily to make sure we have increasing timestamps
- // every time we send one, it should not cause any flakiness because timestamps
- // only need to be non-decreasing.
- time.Sleep(3 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
-
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected an ACK but got none: %s", err)
- }
-
- parsedOpts := header.ParseTCPOptions(gotTCP.Options)
- if !parsedOpts.TS {
- t.Fatalf("expected TS option in response, options we got:\n%s", hex.Dump(gotTCP.Options))
- }
- if parsedOpts.TSVal < tsecr {
- t.Fatalf("TSVal should be non-decreasing, but %d < %d", parsedOpts.TSVal, tsecr)
- }
- if parsedOpts.TSEcr != sentTSVal {
- t.Fatalf("TSEcr should match our sent TSVal, %d != %d", parsedOpts.TSEcr, sentTSVal)
- }
- tsecr = parsedOpts.TSVal
- lastAckNum := gotTCP.AckNum
-
- badTSVal := sentTSVal - 100
- header.EncodeTSOption(badTSVal, tsecr, options)
- // 3ms here is chosen arbitrarily and this time.Sleep() should not cause flakiness
- // due to the exact same reasoning discussed above.
- time.Sleep(3 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
-
- gotTCP, err = conn.Expect(t, testbench.TCP{AckNum: lastAckNum, Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected segment with AckNum %d but got none: %s", lastAckNum, err)
- }
- parsedOpts = header.ParseTCPOptions(gotTCP.Options)
- if !parsedOpts.TS {
- t.Fatalf("expected TS option in response, options we got:\n%s", hex.Dump(gotTCP.Options))
- }
- if parsedOpts.TSVal < tsecr {
- t.Fatalf("TSVal should be non-decreasing, but %d < %d", parsedOpts.TSVal, tsecr)
- }
- if parsedOpts.TSEcr != sentTSVal {
- t.Fatalf("TSEcr should match our sent TSVal, %d != %d", parsedOpts.TSEcr, sentTSVal)
- }
-}
-
-func currentTS() uint32 {
- return uint32(time.Now().UnixNano() / 1e6)
-}
diff --git a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go
deleted file mode 100644
index 974c15384..000000000
--- a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_queue_send_recv_in_syn_sent_test
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "errors"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestQueueSendInSynSentHandshake tests send behavior when the TCP state
-// is SYN-SENT and the connections is finally established.
-func TestQueueSendInSynSentHandshake(t *testing.T) {
- dut := testbench.NewDUT(t)
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- sampleData := []byte("Sample Data")
-
- dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
- t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
- }
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
- t.Fatalf("expected a SYN from DUT, but got none: %s", err)
- }
-
- // Test blocking send.
- dut.SetNonBlocking(t, socket, false)
-
- start := make(chan struct{})
- done := make(chan struct{})
- go func() {
- defer close(done)
-
- close(start)
- // Issue SEND call in SYN-SENT, this should be queued for
- // process until the connection is established.
- if _, err := dut.SendWithErrno(context.Background(), t, socket, sampleData, 0); err != unix.Errno(0) {
- t.Errorf("failed to send on DUT: %s", err)
- }
- }()
-
- // Wait for the goroutine to be scheduled and before it
- // blocks on endpoint send/receive.
- <-start
- // The following sleep is used to prevent the connection
- // from being established before we are blocked: there is
- // still a small time window between we sending the RPC
- // request and the system actually being blocked.
- time.Sleep(100 * time.Millisecond)
-
- select {
- case <-done:
- t.Fatal("expected send to be blocked in SYN-SENT")
- default:
- }
-
- // Bring the connection to Established.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)})
-
- <-done
-
- // Expect the data from the DUT's enqueued send request.
- //
- // On Linux, this can be piggybacked with the ACK completing the
- // handshake. On gVisor, getting such a piggyback is a bit more
- // complicated because the actual data enqueuing occurs in the
- // callers of endpoint Write.
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected an ACK from DUT, but got none: %s", err)
- }
-}
-
-// TestQueueRecvInSynSentHandshake tests recv behavior when the TCP state
-// is SYN-SENT and the connections is finally established.
-func TestQueueRecvInSynSentHandshake(t *testing.T) {
- dut := testbench.NewDUT(t)
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- sampleData := []byte("Sample Data")
-
- dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
- t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
- }
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
- t.Fatalf("expected a SYN from DUT, but got none: %s", err)
- }
-
- if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != unix.EWOULDBLOCK {
- t.Fatalf("expected error %s, got %s", unix.EWOULDBLOCK, err)
- }
-
- // Test blocking read.
- dut.SetNonBlocking(t, socket, false)
-
- start := make(chan struct{})
- done := make(chan struct{})
- go func() {
- defer close(done)
-
- close(start)
- // Issue RECEIVE call in SYN-SENT, this should be queued for
- // process until the connection is established.
- n, buff, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0)
- if err != unix.Errno(0) {
- t.Errorf("failed to recv on DUT: %s", err)
- return
- }
- if got := buff[:n]; !bytes.Equal(got, sampleData) {
- t.Errorf("received data doesn't match, got:\n%s, want:\n%s", hex.Dump(got), hex.Dump(sampleData))
- }
- }()
-
- // Wait for the goroutine to be scheduled and before it
- // blocks on endpoint send/receive.
- <-start
-
- // The following sleep is used to prevent the connection
- // from being established before we are blocked: there is
- // still a small time window between we sending the RPC
- // request and the system actually being blocked.
- time.Sleep(100 * time.Millisecond)
-
- // Bring the connection to Established.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected an ACK from DUT, but got none: %s", err)
- }
-
- // Send sample payload so that DUT can recv.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected an ACK from DUT, but got none: %s", err)
- }
-
- <-done
-}
-
-// TestQueueSendInSynSentRST tests send behavior when the TCP state
-// is SYN-SENT and an RST is sent.
-func TestQueueSendInSynSentRST(t *testing.T) {
- dut := testbench.NewDUT(t)
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- sampleData := []byte("Sample Data")
-
- dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
- t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
- }
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
- t.Fatalf("expected a SYN from DUT, but got none: %s", err)
- }
-
- // Test blocking send.
- dut.SetNonBlocking(t, socket, false)
-
- start := make(chan struct{})
- done := make(chan struct{})
- go func() {
- defer close(done)
-
- close(start)
- // Issue SEND call in SYN-SENT, this should be queued for
- // process until the connection is established.
- n, err := dut.SendWithErrno(context.Background(), t, socket, sampleData, 0)
- if err != unix.ECONNREFUSED {
- t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err)
- }
- if n != -1 {
- t.Errorf("expected return value %d, got %d", -1, n)
- }
- }()
-
- // Wait for the goroutine to be scheduled and before it
- // blocks on endpoint send/receive.
- <-start
-
- // The following sleep is used to prevent the connection
- // from being established before we are blocked: there is
- // still a small time window between we sending the RPC
- // request and the system actually being blocked.
- time.Sleep(100 * time.Millisecond)
-
- select {
- case <-done:
- t.Fatal("expected send to be blocked in SYN-SENT")
- default:
- }
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
-
- <-done
-}
-
-// TestQueueRecvInSynSentRST tests recv behavior when the TCP state
-// is SYN-SENT and an RST is sent.
-func TestQueueRecvInSynSentRST(t *testing.T) {
- dut := testbench.NewDUT(t)
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- sampleData := []byte("Sample Data")
-
- dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
- t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
- }
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
- t.Fatalf("expected a SYN from DUT, but got none: %s", err)
- }
-
- if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != unix.EWOULDBLOCK {
- t.Fatalf("expected error %s, got %s", unix.EWOULDBLOCK, err)
- }
-
- // Test blocking read.
- dut.SetNonBlocking(t, socket, false)
-
- start := make(chan struct{})
- done := make(chan struct{})
- go func() {
- defer close(done)
-
- close(start)
- // Issue RECEIVE call in SYN-SENT, this should be queued for
- // process until the connection is established.
- n, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0)
- if err != unix.ECONNREFUSED {
- t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err)
- }
- if n != -1 {
- t.Errorf("expected return value %d, got %d", -1, n)
- }
- }()
-
- // Wait for the goroutine to be scheduled and before it
- // blocks on endpoint send/receive.
- <-start
-
- // The following sleep is used to prevent the connection
- // from being established before we are blocked: there is
- // still a small time window between we sending the RPC
- // request and the system actually being blocked.
- time.Sleep(100 * time.Millisecond)
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
- <-done
-}
diff --git a/test/packetimpact/tests/tcp_rack_test.go b/test/packetimpact/tests/tcp_rack_test.go
deleted file mode 100644
index 5a60bf712..000000000
--- a/test/packetimpact/tests/tcp_rack_test.go
+++ /dev/null
@@ -1,404 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_rack_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-const (
- // payloadSize is the size used to send packets.
- payloadSize = header.TCPDefaultMSS
-
- // simulatedRTT is the time delay between packets sent and acked to
- // increase the RTT.
- simulatedRTT = 30 * time.Millisecond
-
- // numPktsForRTT is the number of packets sent and acked to establish
- // RTT.
- numPktsForRTT = 10
-)
-
-func createSACKConnection(t *testing.T) (testbench.DUT, testbench.TCPIPv4, int32, int32) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
-
- // Enable SACK.
- opts := make([]byte, 40)
- optsOff := 0
- optsOff += header.EncodeNOP(opts[optsOff:])
- optsOff += header.EncodeNOP(opts[optsOff:])
- optsOff += header.EncodeSACKPermittedOption(opts[optsOff:])
-
- conn.ConnectWithOptions(t, opts[:optsOff])
- acceptFd, _ := dut.Accept(t, listenFd)
- return dut, conn, acceptFd, listenFd
-}
-
-func closeSACKConnection(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4, acceptFd, listenFd int32) {
- dut.Close(t, acceptFd)
- dut.Close(t, listenFd)
- conn.Close(t)
-}
-
-func getRTTAndRTO(t *testing.T, dut testbench.DUT, acceptFd int32) (rtt, rto time.Duration) {
- info := dut.GetSockOptTCPInfo(t, acceptFd)
- return time.Duration(info.RTT) * time.Microsecond, time.Duration(info.RTO) * time.Microsecond
-}
-
-func sendAndReceive(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4, numPkts int, acceptFd int32, sendACK bool) time.Time {
- seqNum1 := *conn.RemoteSeqNum(t)
- payload := make([]byte, payloadSize)
- var lastSent time.Time
- for i, sn := 0, seqNum1; i < numPkts; i++ {
- lastSent = time.Now()
- dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, time.Second)
- if err != nil {
- t.Fatalf("Expect #%d: %s", i+1, err)
- continue
- }
- if gotOne == nil {
- t.Fatalf("#%d: expected a packet within a second but got none", i+1)
- }
- sn.UpdateForward(seqnum.Size(payloadSize))
-
- if sendACK {
- time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(sn))})
- }
- }
- return lastSent
-}
-
-// TestRACKTLPAllPacketsLost tests TLP when an entire flight of data is lost.
-func TestRACKTLPAllPacketsLost(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 5
- lastSent := sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- // Probe Timeout (PTO) should be two times RTT. Check that the last
- // packet is retransmitted after probe timeout.
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- pto := rtt * 2
- // We expect the 5th packet (the last unacknowledged packet) to be
- // retransmitted.
- tlpProbe := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: tlpProbe}, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s %v %v", err, rtt, pto)
- }
- diff := time.Now().Sub(lastSent)
- if diff < pto {
- t.Fatalf("expected payload was received before the probe timeout, got: %v, want: %v", diff, pto)
- }
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
-
-// TestRACKTLPLost tests TLP when there are tail losses.
-// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.4
-func TestRACKTLPLost(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 10
- lastSent := sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- // Cumulative ACK for #[1-5] packets.
- ackNum := seqNum1.Add(seqnum.Size(6 * payloadSize))
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(ackNum))})
-
- // Probe Timeout (PTO) should be two times RTT. Check that the last
- // packet is retransmitted after probe timeout.
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- pto := rtt * 2
- // We expect the 10th packet (the last unacknowledged packet) to be
- // retransmitted.
- tlpProbe := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: tlpProbe}, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- diff := time.Now().Sub(lastSent)
- if diff < pto {
- t.Fatalf("expected payload was received before the probe timeout, got: %v, want: %v", diff, pto)
- }
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
-
-// TestRACKWithSACK tests that RACK marks the packets as lost after receiving
-// the ACK for retransmitted packets.
-// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-8.1
-func TestRACKWithSACK(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 3
- sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- time.Sleep(simulatedRTT)
- // SACK for #2 packet.
- sackBlock := make([]byte, 40)
- start := seqNum1.Add(seqnum.Size(payloadSize))
- end := start.Add(seqnum.Size(payloadSize))
- sbOff := 0
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
-
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- timeout := 2 * rtt
- // RACK marks #1 packet as lost after RTT+reorderWindow(RTT/4) and
- // retransmits it.
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, timeout); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- time.Sleep(simulatedRTT)
- // ACK for #1 packet.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(end))})
-
- // RACK considers transmission times of the packets to mark them lost.
- // As the 3rd packet was sent before the retransmitted 1st packet, RACK
- // marks it as lost and retransmits it..
- expectedSeqNum := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: expectedSeqNum}, timeout); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
-
-// TestRACKWithoutReorder tests that without reordering RACK will retransmit the
-// lost packets after reorder timer expires.
-func TestRACKWithoutReorder(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 4
- sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- // SACK for [3,4] packets.
- sackBlock := make([]byte, 40)
- start := seqNum1.Add(seqnum.Size(2 * payloadSize))
- end := start.Add(seqnum.Size(2 * payloadSize))
- sbOff := 0
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock[sbOff:])
- time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
-
- // RACK marks #1 and #2 packets as lost and retransmits both after
- // RTT + reorderWindow. The reorderWindow initially will be a small
- // fraction of RTT.
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- timeout := 2 * rtt
- for i, sn := 0, seqNum1; i < 2; i++ {
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, timeout); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- sn.UpdateForward(seqnum.Size(payloadSize))
- }
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
-
-// TestRACKWithReorder tests that RACK will retransmit segments when there is
-// reordering in the connection and reorder timer expires.
-func TestRACKWithReorder(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 4
- sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- time.Sleep(simulatedRTT)
- // SACK in reverse order for the connection to detect reorder.
- var start seqnum.Value
- var end seqnum.Value
- for i := 0; i < numPkts-1; i++ {
- sackBlock := make([]byte, 40)
- sbOff := 0
- start = seqNum1.Add(seqnum.Size((numPkts - i - 1) * payloadSize))
- end = start.Add(seqnum.Size((i + 1) * payloadSize))
- sackBlock = make([]byte, 40)
- sbOff = 0
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
- }
-
- // Send a DSACK block indicating both original and retransmitted
- // packets are received, RACK will increase the reordering window on
- // every DSACK.
- dsackBlock := make([]byte, 40)
- dbOff := 0
- start = seqNum1
- end = start.Add(seqnum.Size(2 * payloadSize))
- dbOff += header.EncodeNOP(dsackBlock[dbOff:])
- dbOff += header.EncodeNOP(dsackBlock[dbOff:])
- dbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, dsackBlock[dbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1 + numPkts*payloadSize)), Options: dsackBlock[:dbOff]})
-
- seqNum1.UpdateForward(seqnum.Size(numPkts * payloadSize))
- sendTime := time.Now()
- sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- time.Sleep(simulatedRTT)
- // Send SACK for [2-5] packets.
- sackBlock := make([]byte, 40)
- sbOff := 0
- start = seqNum1.Add(seqnum.Size(payloadSize))
- end = start.Add(seqnum.Size(3 * payloadSize))
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
-
- // Expect the retransmission of #1 packet after RTT+ReorderWindow.
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- diff := time.Now().Sub(sendTime)
- if diff < rtt {
- t.Fatalf("expected payload was received too sonn, within RTT")
- }
-
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
-
-// TestRACKWithLostRetransmission tests that RACK will not enter RTO when a
-// retransmitted segment is lost and enters fast recovery.
-func TestRACKWithLostRetransmission(t *testing.T) {
- dut, conn, acceptFd, listenFd := createSACKConnection(t)
- seqNum1 := *conn.RemoteSeqNum(t)
-
- // Send ACK for data packets to establish RTT.
- sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)
- seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))
-
- // We are not sending ACK for these packets.
- const numPkts = 5
- sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)
-
- // SACK for [2-5] packets.
- sackBlock := make([]byte, 40)
- start := seqNum1.Add(seqnum.Size(payloadSize))
- end := start.Add(seqnum.Size(4 * payloadSize))
- sbOff := 0
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeNOP(sackBlock[sbOff:])
- sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock[sbOff:])
- time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
-
- // RACK marks #1 packet as lost and retransmits it after
- // RTT + reorderWindow. The reorderWindow is bounded between a small
- // fraction of RTT and 1 RTT.
- rtt, _ := getRTTAndRTO(t, dut, acceptFd)
- timeout := 2 * rtt
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, timeout); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Send #6 packet.
- payload := make([]byte, payloadSize)
- dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1 + 5*payloadSize))}, time.Second)
- if err != nil {
- t.Fatalf("Expect #6: %s", err)
- }
- if gotOne == nil {
- t.Fatalf("#6: expected a packet within a second but got none")
- }
-
- // SACK for [2-6] packets.
- sackBlock1 := make([]byte, 40)
- start = seqNum1.Add(seqnum.Size(payloadSize))
- end = start.Add(seqnum.Size(5 * payloadSize))
- sbOff1 := 0
- sbOff1 += header.EncodeNOP(sackBlock1[sbOff1:])
- sbOff1 += header.EncodeNOP(sackBlock1[sbOff1:])
- sbOff1 += header.EncodeSACKBlocks([]header.SACKBlock{{
- start, end,
- }}, sackBlock1[sbOff1:])
- time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock1[:sbOff1]})
-
- // Expect re-retransmission of #1 packet without entering an RTO.
- if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, timeout); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Check the congestion control state.
- info := dut.GetSockOptTCPInfo(t, acceptFd)
- if info.CaState != linux.TCP_CA_Recovery {
- t.Fatalf("expected connection to be in fast recovery, want: %v got: %v", linux.TCP_CA_Recovery, info.CaState)
- }
-
- closeSACKConnection(t, dut, conn, acceptFd, listenFd)
-}
diff --git a/test/packetimpact/tests/tcp_rcv_buf_space_test.go b/test/packetimpact/tests/tcp_rcv_buf_space_test.go
deleted file mode 100644
index f121d44eb..000000000
--- a/test/packetimpact/tests/tcp_rcv_buf_space_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_rcv_buf_space_test
-
-import (
- "context"
- "flag"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestReduceRecvBuf tests that a packet within window is still dropped
-// if the available buffer space drops below the size of the incoming
-// segment.
-func TestReduceRecvBuf(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- // Set a small receive buffer for the test.
- const rcvBufSz = 4096
- dut.SetSockOptInt(t, acceptFd, unix.SOL_SOCKET, unix.SO_RCVBUF, rcvBufSz)
-
- // Retrieve the actual buffer.
- bufSz := dut.GetSockOptInt(t, acceptFd, unix.SOL_SOCKET, unix.SO_RCVBUF)
-
- // Generate a payload of 1 more than the actual buffer size used by the
- // DUT.
- sampleData := testbench.GenerateRandomPayload(t, int(bufSz)+1)
- // Send and receive sample data to the dut.
- const pktSize = 1400
- for payload := sampleData; len(payload) != 0; {
- payloadBytes := pktSize
- if l := len(payload); l < payloadBytes {
- payloadBytes = l
- }
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, []testbench.Layer{&testbench.Payload{Bytes: payload[:payloadBytes]}}...)
- payload = payload[payloadBytes:]
- }
-
- // First read should read < len(sampleData)
- if ret, _, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), 0); ret == -1 || int(ret) == len(sampleData) {
- t.Fatalf("dut.RecvWithErrno(ctx, t, %d, %d, 0) = %d,_, %s", acceptFd, int32(len(sampleData)), ret, err)
- }
-
- // Second read should return EAGAIN as the last segment should have been
- // dropped due to it exceeding the receive buffer space available in the
- // socket.
- if ret, got, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), unix.MSG_DONTWAIT); got != nil || ret != -1 || err != unix.EAGAIN {
- t.Fatalf("expected no packets but got: %s", got)
- }
-}
diff --git a/test/packetimpact/tests/tcp_retransmits_test.go b/test/packetimpact/tests/tcp_retransmits_test.go
deleted file mode 100644
index d3fb789f4..000000000
--- a/test/packetimpact/tests/tcp_retransmits_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_retransmits_test
-
-import (
- "bytes"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func getRTO(t *testing.T, dut testbench.DUT, acceptFd int32) (rto time.Duration) {
- info := dut.GetSockOptTCPInfo(t, acceptFd)
- return time.Duration(info.RTO) * time.Microsecond
-}
-
-// TestRetransmits tests retransmits occur at exponentially increasing
-// time intervals.
-func TestRetransmits(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- // Give a chance for the dut to estimate RTO with RTT from the DATA-ACK.
- // This is to reduce the test run-time from the default initial RTO of 1s.
- // TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
- // we can skip this data send/recv which is solely to estimate RTO.
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- // Wait for the DUT to receive the data, thus ensuring that the stack has
- // estimated RTO before we query RTO via TCP_INFO.
- if got := dut.Recv(t, acceptFd, int32(len(sampleData)), 0); !bytes.Equal(got, sampleData) {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %s, want %s", acceptFd, len(sampleData), got, sampleData)
- }
-
- const timeoutCorrection = time.Second
- const diffCorrection = 200 * time.Millisecond
- rto := getRTO(t, dut, acceptFd)
-
- dut.Send(t, acceptFd, sampleData, 0)
- seq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, rto+timeoutCorrection); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Expect retransmits of the same segment.
- for i := 0; i < 5; i++ {
- startTime := time.Now()
- rto = getRTO(t, dut, acceptFd)
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, rto+timeoutCorrection); err != nil {
- t.Fatalf("expected payload was not received within %s loop %d err %s", rto+timeoutCorrection, i, err)
- }
- if diff := time.Since(startTime); diff+diffCorrection < rto {
- t.Fatalf("retransmit came sooner got: %s want: >= %s probe %d", diff+diffCorrection, rto, i)
- }
- }
-}
diff --git a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
deleted file mode 100644
index 64b7288fb..000000000
--- a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_send_window_sizes_piggyback_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestSendWindowSizesPiggyback tests cases where segment sizes are close to
-// sender window size and checks for ACK piggybacking for each of those case.
-func TestSendWindowSizesPiggyback(t *testing.T) {
- sampleData := []byte("Sample Data")
- segmentSize := uint16(len(sampleData))
- // Advertise receive window sizes that are lesser, equal to or greater than
- // enqueued segment size and check for segment transmits. The test attempts
- // to enqueue a segment on the dut before acknowledging previous segment and
- // lets the dut piggyback any ACKs along with the enqueued segment.
- for _, tt := range []struct {
- description string
- windowSize uint16
- expectedPayload1 []byte
- expectedPayload2 []byte
- enqueue bool
- }{
- // Expect the first segment to be split as it cannot be accomodated in
- // the sender window. This means we need not enqueue a new segment after
- // the first segment.
- {"WindowSmallerThanSegment", segmentSize - 1, sampleData[:(segmentSize - 1)], sampleData[(segmentSize - 1):], false /* enqueue */},
-
- {"WindowEqualToSegment", segmentSize, sampleData, sampleData, true /* enqueue */},
-
- // Expect the second segment to not be split as its size is greater than
- // the available sender window size. The segments should not be split
- // when there is pending unacknowledged data and the segment-size is
- // greater than available sender window.
- {"WindowGreaterThanSegment", segmentSize + 1, sampleData, sampleData, true /* enqueue */},
- } {
- t.Run(tt.description, func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
-
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort, WindowSize: testbench.Uint16(tt.windowSize)}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- expectedTCP := testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}
-
- dut.Send(t, acceptFd, sampleData, 0)
- expectedPayload := testbench.Payload{Bytes: tt.expectedPayload1}
- if _, err := conn.ExpectData(t, &expectedTCP, &expectedPayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Expect any enqueued segment to be transmitted by the dut along with
- // piggybacked ACK for our data.
-
- if tt.enqueue {
- // Enqueue a segment for the dut to transmit.
- dut.Send(t, acceptFd, sampleData, 0)
- }
-
- // Send ACK for the previous segment along with data for the dut to
- // receive and ACK back. Sending this ACK would make room for the dut
- // to transmit any enqueued segment.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(tt.windowSize)}, &testbench.Payload{Bytes: sampleData})
-
- // Expect the dut to piggyback the ACK for received data along with
- // the segment enqueued for transmit.
- expectedPayload = testbench.Payload{Bytes: tt.expectedPayload2}
- if _, err := conn.ExpectData(t, &expectedTCP, &expectedPayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_syncookie_test.go b/test/packetimpact/tests/tcp_syncookie_test.go
deleted file mode 100644
index 6be09996b..000000000
--- a/test/packetimpact/tests/tcp_syncookie_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_syncookie_test
-
-import (
- "flag"
- "fmt"
- "math"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPSynCookie tests for ACK handling for connections in SYNRCVD state
-// connections with and without syncookies. It verifies if the passive open
-// connection is indeed using syncookies before proceeding.
-func TestTCPSynCookie(t *testing.T) {
- dut := testbench.NewDUT(t)
- for _, tt := range []struct {
- accept bool
- flags header.TCPFlags
- }{
- {accept: true, flags: header.TCPFlagAck},
- {accept: true, flags: header.TCPFlagAck | header.TCPFlagPsh},
- {accept: false, flags: header.TCPFlagAck | header.TCPFlagSyn},
- {accept: true, flags: header.TCPFlagAck | header.TCPFlagFin},
- {accept: false, flags: header.TCPFlagAck | header.TCPFlagRst},
- {accept: false, flags: header.TCPFlagRst},
- } {
- t.Run(fmt.Sprintf("flags=%s", tt.flags), func(t *testing.T) {
- // Make a copy before parallelizing the test and refer to that
- // within the test. Otherwise, the test reference could be pointing
- // to an incorrect variant based on how it is scheduled.
- test := tt
-
- t.Parallel()
-
- // Listening endpoint accepts one more connection than the listen
- // backlog. Listener starts using syncookies when it sees a new SYN
- // and has backlog size of connections in SYNRCVD state. Keep the
- // listen backlog 1, so that the test can define 2 connections
- // without and with using syncookies.
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
-
- var withoutSynCookieConn testbench.TCPIPv4
- var withSynCookieConn testbench.TCPIPv4
-
- for _, conn := range []*testbench.TCPIPv4{&withoutSynCookieConn, &withSynCookieConn} {
- *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- }
- defer withoutSynCookieConn.Close(t)
- defer withSynCookieConn.Close(t)
-
- // Setup the 2 connections in SYNRCVD state and verify if one of the
- // connection is indeed using syncookies by checking for absence of
- // SYNACK retransmits.
- for _, c := range []struct {
- desc string
- conn *testbench.TCPIPv4
- expectRetransmit bool
- }{
- {desc: "without syncookies", conn: &withoutSynCookieConn, expectRetransmit: true},
- {desc: "with syncookies", conn: &withSynCookieConn, expectRetransmit: false},
- } {
- t.Run(c.desc, func(t *testing.T) {
- // Expect dut connection to have transitioned to SYNRCVD state.
- c.conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if _, err := c.conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYNACK, but got %s", err)
- }
-
- // If the DUT listener is using syn cookies, it will not retransmit SYNACK.
- got, err := c.conn.ExpectData(t, &testbench.TCP{SeqNum: testbench.Uint32(uint32(*c.conn.RemoteSeqNum(t) - 1)), Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, 2*time.Second)
- if c.expectRetransmit && err != nil {
- t.Fatalf("expected retransmitted SYNACK, but got %s", err)
- }
- if !c.expectRetransmit && err == nil {
- t.Fatalf("expected no retransmitted SYNACK, but got %s", got)
- }
- })
- }
-
- // Check whether ACKs with the given flags completes the handshake.
- for _, c := range []struct {
- desc string
- conn *testbench.TCPIPv4
- }{
- {desc: "with syncookies", conn: &withSynCookieConn},
- {desc: "without syncookies", conn: &withoutSynCookieConn},
- } {
- t.Run(c.desc, func(t *testing.T) {
- pfds := dut.Poll(t, []unix.PollFd{{Fd: listenFD, Events: math.MaxInt16}}, 0 /*timeout*/)
- if got, want := len(pfds), 0; got != want {
- t.Fatalf("dut.Poll(...) = %d, want = %d", got, want)
- }
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- c.conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(test.flags)}, samplePayload)
- pfds = dut.Poll(t, []unix.PollFd{{Fd: listenFD, Events: unix.POLLIN}}, time.Second)
- want := 0
- if test.accept {
- want = 1
- }
- if got := len(pfds); got != want {
- t.Fatalf("got dut.Poll(...) = %d, want = %d", got, want)
- }
- // Accept the connection to enable poll on any subsequent connection.
- if test.accept {
- fd, _ := dut.Accept(t, listenFD)
- if test.flags.Contains(header.TCPFlagFin) {
- if dut.Uname.IsLinux() {
- dut.PollOne(t, fd, unix.POLLIN|unix.POLLRDHUP, time.Second)
- } else {
- // TODO(gvisor.dev/issue/6015): Notify POLLIN|POLLRDHUP on incoming FIN.
- dut.PollOne(t, fd, unix.POLLIN, time.Second)
- }
- }
- got := dut.Recv(t, fd, int32(len(sampleData)), 0)
- if diff := cmp.Diff(got, sampleData); diff != "" {
- t.Fatalf("dut.Recv: data mismatch (-want +got):\n%s", diff)
- }
- dut.Close(t, fd)
- }
- })
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_synrcvd_reset_test.go b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
deleted file mode 100644
index 3346d43c4..000000000
--- a/test/packetimpact/tests/tcp_synrcvd_reset_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_syn_reset_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTCPSynRcvdReset tests transition from SYN-RCVD to CLOSED.
-func TestTCPSynRcvdReset(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- // Expect dut connection to have transitioned to SYN-RCVD state.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN-ACK %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
-
- // Expect the connection to have transitioned SYN-RCVD to CLOSED.
- //
- // Retransmit the ACK a few times to give time for the DUT to transition to
- // CLOSED. We cannot use TCP_INFO to lookup the state as this is a passive
- // DUT connection.
- for i := 0; i < 5; i++ {
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
- t.Logf("retransmit%d ACK as we did not get the expected RST, %s", i, err)
- continue
- }
- return
- }
- t.Fatal("did not receive a TCP RST")
-}
diff --git a/test/packetimpact/tests/tcp_synsent_reset_test.go b/test/packetimpact/tests/tcp_synsent_reset_test.go
deleted file mode 100644
index fe53e7061..000000000
--- a/test/packetimpact/tests/tcp_synsent_reset_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_synsent_reset_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// dutSynSentState sets up the dut connection in SYN-SENT state.
-func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, int32, uint16, uint16) {
- t.Helper()
-
- dut := testbench.NewDUT(t)
-
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
- port := uint16(9001)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
-
- sa := unix.SockaddrInet4{Port: int(port)}
- copy(sa.Addr[:], dut.Net.LocalIPv4)
- // Bring the dut to SYN-SENT state with a non-blocking connect.
- dut.Connect(t, clientFD, &sa)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN\n")
- }
-
- return &dut, &conn, clientFD, port, clientPort
-}
-
-// TestTCPSynSentReset tests RFC793, p67: SYN-SENT to CLOSED transition.
-func TestTCPSynSentReset(t *testing.T) {
- dut, conn, fd, _, _ := dutSynSentState(t)
- defer conn.Close(t)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
- // Expect the connection to have closed.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
- t.Fatalf("expected a TCP RST")
- }
- info := dut.GetSockOptTCPInfo(t, fd)
- if got, want := uint32(info.State), linux.TCP_CLOSE; got != want {
- t.Fatalf("got %d want %d", got, want)
- }
-}
-
-// TestTCPSynSentRcvdReset tests RFC793, p70, SYN-SENT to SYN-RCVD to CLOSED
-// transitions.
-func TestTCPSynSentRcvdReset(t *testing.T) {
- dut, c, fd, remotePort, clientPort := dutSynSentState(t)
- defer c.Close(t)
-
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &remotePort, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &remotePort})
- defer conn.Close(t)
- // Initiate new SYN connection with the same port pair
- // (simultaneous open case), expect the dut connection to move to
- // SYN-RCVD state
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN-ACK %s\n", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
- // Expect the connection to have transitioned SYN-RCVD to CLOSED.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
- t.Fatalf("expected a TCP RST")
- }
- info := dut.GetSockOptTCPInfo(t, fd)
- if got, want := uint32(info.State), linux.TCP_CLOSE; got != want {
- t.Fatalf("got %d want %d", got, want)
- }
-}
diff --git a/test/packetimpact/tests/tcp_timewait_reset_test.go b/test/packetimpact/tests/tcp_timewait_reset_test.go
deleted file mode 100644
index 89037f0a4..000000000
--- a/test/packetimpact/tests/tcp_timewait_reset_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_timewait_reset_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestTimeWaitReset tests handling of RST when in TIME_WAIT state.
-func TestTimeWaitReset(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
-
- // Trigger active close.
- dut.Close(t, acceptFD)
-
- _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected a FIN: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Send a FIN, DUT should transition to TIME_WAIT from FIN_WAIT2.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected an ACK for our FIN: %s", err)
- }
-
- // Send a RST, the DUT should transition to CLOSED from TIME_WAIT.
- // This is the default Linux behavior, it can be changed to ignore RSTs via
- // sysctl net.ipv4.tcp_rfc1337.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // The DUT should reply with RST to our ACK as the state should have
- // transitioned to CLOSED.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
- t.Fatalf("expected a RST: %s", err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
deleted file mode 100644
index 389bfc629..000000000
--- a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_unacc_seq_ack_test
-
-import (
- "flag"
- "fmt"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/tcpip/seqnum"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestEstablishedUnaccSeqAck(t *testing.T) {
- for _, tt := range []struct {
- description string
- makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP
- seqNumOffset seqnum.Size
- expectAck bool
- restoreSeq bool
- }{
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, expectAck: true, restoreSeq: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, expectAck: true, restoreSeq: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, expectAck: true, restoreSeq: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, expectAck: true, restoreSeq: false},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, expectAck: false, restoreSeq: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, expectAck: false, restoreSeq: true},
- } {
- t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- dut.Accept(t, listenFD)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected ack %s", err)
- }
- windowSize := seqnum.Size(*gotTCP.WindowSize)
-
- origSeq := *conn.LocalSeqNum(t)
- // Send a segment with OTW Seq / unacc ACK.
- conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), samplePayload)
- if tt.restoreSeq {
- // Restore the local sequence number to ensure that the incoming
- // ACK matches the TCP layer state.
- *conn.LocalSeqNum(t) = origSeq
- }
- gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if tt.expectAck && err != nil {
- t.Fatalf("expected an ack but got none: %s", err)
- }
- if err == nil && !tt.expectAck && gotAck != nil {
- t.Fatalf("expected no ack but got one: %s", gotAck)
- }
- })
- }
-}
-
-func TestPassiveCloseUnaccSeqAck(t *testing.T) {
- for _, tt := range []struct {
- description string
- makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP
- seqNumOffset seqnum.Size
- expectAck bool
- }{
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, expectAck: false},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, expectAck: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, expectAck: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, expectAck: false},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, expectAck: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, expectAck: true},
- } {
- t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
-
- // Send a FIN to DUT to intiate the passive close.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagFin)})
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected an ACK for our fin and DUT should enter CLOSE_WAIT: %s", err)
- }
- windowSize := seqnum.Size(*gotTCP.WindowSize)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- // Send a segment with OTW Seq / unacc ACK.
- conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), samplePayload)
- gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if tt.expectAck && err != nil {
- t.Errorf("expected an ack but got none: %s", err)
- }
- if err == nil && !tt.expectAck && gotAck != nil {
- t.Errorf("expected no ack but got one: %s", gotAck)
- }
-
- // Now let's verify DUT is indeed in CLOSE_WAIT
- dut.Close(t, acceptFD)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil {
- t.Fatalf("expected DUT to send a FIN: %s", err)
- }
- // Ack the FIN from DUT
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Send some extra data to DUT
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, samplePayload)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
- t.Fatalf("expected DUT to send an RST: %s", err)
- }
- })
- }
-}
-
-func TestActiveCloseUnaccpSeqAck(t *testing.T) {
- for _, tt := range []struct {
- description string
- makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP
- seqNumOffset seqnum.Size
- restoreSeq bool
- }{
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, restoreSeq: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, restoreSeq: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, restoreSeq: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, restoreSeq: false},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, restoreSeq: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, restoreSeq: true},
- } {
- t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
-
- // Trigger active close.
- dut.Shutdown(t, acceptFD, unix.SHUT_WR)
-
- // Get to FIN_WAIT2
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected a FIN: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- sendUnaccSeqAck := func(state string) {
- t.Helper()
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- origSeq := *conn.LocalSeqNum(t)
- // Send a segment with OTW Seq / unacc ACK.
- conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, seqnum.Size(*gotTCP.WindowSize)), samplePayload)
- if tt.restoreSeq {
- // Restore the local sequence number to ensure that the
- // incoming ACK matches the TCP layer state.
- *conn.LocalSeqNum(t) = origSeq
- }
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Errorf("expected an ack in %s state, but got none: %s", state, err)
- }
- }
-
- sendUnaccSeqAck("FIN_WAIT2")
-
- // Send a FIN to DUT to get to TIME_WAIT
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected an ACK for our fin and DUT should enter TIME_WAIT: %s", err)
- }
-
- sendUnaccSeqAck("TIME_WAIT")
- })
- }
-}
-
-func TestSimultaneousCloseUnaccSeqAck(t *testing.T) {
- for _, tt := range []struct {
- description string
- makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP
- seqNumOffset seqnum.Size
- expectAck bool
- }{
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, expectAck: false},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, expectAck: true},
- {description: "OTWSeq", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, expectAck: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, expectAck: false},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, expectAck: true},
- {description: "UnaccAck", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, expectAck: true},
- } {
- t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
-
- // Trigger active close.
- dut.Shutdown(t, acceptFD, unix.SHUT_WR)
-
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
- t.Fatalf("expected a FIN: %s", err)
- }
- // Do not ack the FIN from DUT so that we get to CLOSING.
- seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)
- conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
-
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Errorf("expected an ACK to our FIN, but got none: %s", err)
- }
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- origSeq := uint32(*conn.LocalSeqNum(t))
- // Send a segment with OTW Seq / unacc ACK.
- tcp := tt.makeTestingTCP(t, &conn, tt.seqNumOffset, seqnum.Size(*gotTCP.WindowSize))
- if tt.description == "OTWSeq" {
- // If we generate an OTW Seq segment, make sure we don't acknowledge their FIN so that
- // we stay in CLOSING.
- tcp.AckNum = seqNumForTheirFIN
- }
- conn.Send(t, tcp, samplePayload)
-
- got, err := conn.Expect(t, testbench.TCP{AckNum: testbench.Uint32(origSeq), Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if tt.expectAck && err != nil {
- t.Errorf("expected an ack in CLOSING state, but got none: %s", err)
- }
- if !tt.expectAck && got != nil {
- t.Errorf("expected no ack in CLOSING state, but got one: %s", got)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_user_timeout_test.go b/test/packetimpact/tests/tcp_user_timeout_test.go
deleted file mode 100644
index ef38bd738..000000000
--- a/test/packetimpact/tests/tcp_user_timeout_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_user_timeout_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func sendPayload(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) {
- sampleData := make([]byte, 100)
- for i := range sampleData {
- sampleData[i] = uint8(i)
- }
- conn.Drain(t)
- dut.Send(t, fd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
- t.Fatalf("expected data but got none: %w", err)
- }
-}
-
-func sendFIN(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) {
- dut.Close(t, fd)
-}
-
-func TestTCPUserTimeout(t *testing.T) {
- for _, tt := range []struct {
- description string
- userTimeout time.Duration
- sendDelay time.Duration
- }{
- {"NoUserTimeout", 0, 3 * time.Second},
- {"ACKBeforeUserTimeout", 5 * time.Second, 4 * time.Second},
- {"ACKAfterUserTimeout", 5 * time.Second, 7 * time.Second},
- } {
- for _, ttf := range []struct {
- description string
- f func(_ *testing.T, _ *testbench.TCPIPv4, _ *testbench.DUT, fd int32)
- }{
- {"AfterPayload", sendPayload},
- {"AfterFIN", sendFIN},
- } {
- t.Run(tt.description+ttf.description, func(t *testing.T) {
- // Create a socket, listen, TCP handshake, and accept.
- dut := testbench.NewDUT(t)
- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFD)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
- conn.Connect(t)
- acceptFD, _ := dut.Accept(t, listenFD)
-
- if tt.userTimeout != 0 {
- dut.SetSockOptInt(t, acceptFD, unix.SOL_TCP, unix.TCP_USER_TIMEOUT, int32(tt.userTimeout.Milliseconds()))
- }
-
- ttf.f(t, &conn, &dut, acceptFD)
-
- time.Sleep(tt.sendDelay)
- conn.Drain(t)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- // If TCP_USER_TIMEOUT was set and the above delay was longer than the
- // TCP_USER_TIMEOUT then the DUT should send a RST in response to the
- // testbench's packet.
- expectRST := tt.userTimeout != 0 && tt.sendDelay > tt.userTimeout
- expectTimeout := 5 * time.Second
- got, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, expectTimeout)
- if expectRST && err != nil {
- t.Errorf("expected RST packet within %s but got none: %s", expectTimeout, err)
- }
- if !expectRST && got != nil {
- t.Errorf("expected no RST packet within %s but got one: %s", expectTimeout, got)
- }
- })
- }
- }
-}
diff --git a/test/packetimpact/tests/tcp_window_shrink_test.go b/test/packetimpact/tests/tcp_window_shrink_test.go
deleted file mode 100644
index 0d65a2ea2..000000000
--- a/test/packetimpact/tests/tcp_window_shrink_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_window_shrink_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestWindowShrink(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- dut.Send(t, acceptFd, sampleData, 0)
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- // We close our receiving window here
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
-
- dut.Send(t, acceptFd, []byte("Sample Data"), 0)
- // Note: There is another kind of zero-window probing which Windows uses (by sending one
- // new byte at `RemoteSeqNum`), if netstack wants to go that way, we may want to change
- // the following lines.
- expectedRemoteSeqNum := *conn.RemoteSeqNum(t) - 1
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: testbench.Uint32(uint32(expectedRemoteSeqNum))}, nil, time.Second); err != nil {
- t.Fatalf("expected a packet with sequence number %d: %s", expectedRemoteSeqNum, err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_zero_receive_window_test.go b/test/packetimpact/tests/tcp_zero_receive_window_test.go
deleted file mode 100644
index bd33a2a03..000000000
--- a/test/packetimpact/tests/tcp_zero_receive_window_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_zero_receive_window_test
-
-import (
- "flag"
- "fmt"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestZeroReceiveWindow tests if the DUT sends a zero receive window eventually.
-func TestZeroReceiveWindow(t *testing.T) {
- for _, payloadLen := range []int{64, 512, 1024} {
- t.Run(fmt.Sprintf("TestZeroReceiveWindow_with_%dbytes_payload", payloadLen), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- fillRecvBuffer(t, &conn, &dut, acceptFd, payloadLen)
- })
- }
-}
-
-func fillRecvBuffer(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, acceptFd int32, payloadLen int) {
- // Expect the DUT to eventually advertise zero receive window.
- // The test would timeout otherwise.
- for readOnce := false; ; {
- samplePayload := &testbench.Payload{Bytes: testbench.GenerateRandomPayload(t, payloadLen)}
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- // Read once to trigger the subsequent window update from the
- // DUT to grow the right edge of the receive window from what
- // was advertised in the SYN-ACK. This ensures that we test
- // for the full default buffer size (1MB on gVisor at the time
- // of writing this comment), thus testing for cases when the
- // scaled receive window size ends up > 65535 (0xffff).
- if !readOnce {
- if got := dut.Recv(t, acceptFd, int32(payloadLen), 0); len(got) != payloadLen {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %d, want %d", acceptFd, payloadLen, len(got), payloadLen)
- }
- readOnce = true
- }
- windowSize := *gotTCP.WindowSize
- t.Logf("got window size = %d", windowSize)
- if windowSize == 0 {
- break
- }
- if payloadLen > int(windowSize) {
- payloadLen = int(windowSize)
- }
- }
-}
-
-func TestZeroToNonZeroWindowUpdate(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("didn't get synack during handshake: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- mss := header.ParseSynOptions(synAck.Options, true).MSS
- fillRecvBuffer(t, &conn, &dut, acceptFd, int(mss))
-
- // Read < mss worth of data from the receive buffer and expect the DUT to
- // not send a non-zero window update.
- payloadLen := mss - 1
- if got := dut.Recv(t, acceptFd, int32(payloadLen), 0); len(got) != int(payloadLen) {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %d, want %d", acceptFd, payloadLen, len(got), payloadLen)
- }
- // Send a zero-window-probe to force an ACK from the receiver with any
- // window updates.
- conn.Send(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1)), Flags: testbench.TCPFlags(header.TCPFlagAck)})
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- if windowSize := *gotTCP.WindowSize; windowSize != 0 {
- t.Fatalf("got non zero window = %d", windowSize)
- }
-
- // Now, ensure that the DUT eventually sends non-zero window update.
- seqNum := testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1))
- ackNum := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
- recvCheckWindowUpdate := func(readLen int) uint16 {
- if got := dut.Recv(t, acceptFd, int32(readLen), 0); len(got) != readLen {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %d, want %d", acceptFd, readLen, len(got), readLen)
- }
- conn.Send(t, testbench.TCP{SeqNum: seqNum, Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: make([]byte, 1)})
- gotTCP, err := conn.Expect(t, testbench.TCP{AckNum: ackNum, Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- return *gotTCP.WindowSize
- }
-
- if !dut.Uname.IsLinux() {
- if win := recvCheckWindowUpdate(1); win == 0 {
- t.Fatal("expected non-zero window update")
- }
- } else {
- // Linux stack takes additional socket reads to send out window update,
- // its a function of sysctl_tcp_rmem among other things.
- // https://github.com/torvalds/linux/blob/7acac4b3196/net/ipv4/tcp_input.c#L687
- for {
- if win := recvCheckWindowUpdate(int(payloadLen)); win != 0 {
- break
- }
- }
- }
-}
-
-// TestNonZeroReceiveWindow tests for the DUT to never send a zero receive
-// window when the data is being read from the socket buffer.
-func TestNonZeroReceiveWindow(t *testing.T) {
- for _, payloadLen := range []int{64, 512, 1024} {
- t.Run(fmt.Sprintf("TestZeroReceiveWindow_with_%dbytes_payload", payloadLen), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- samplePayload := &testbench.Payload{Bytes: testbench.GenerateRandomPayload(t, payloadLen)}
- var rcvWindow uint16
- initRcv := false
- // This loop keeps a running rcvWindow value from the initial ACK for the data
- // we sent. Once we have received ACKs with non-zero receive windows, we break
- // the loop.
- for {
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
- if err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- if got := dut.Recv(t, acceptFd, int32(payloadLen), 0); len(got) != payloadLen {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %d, want %d", acceptFd, payloadLen, len(got), payloadLen)
- }
- if *gotTCP.WindowSize == 0 {
- t.Fatalf("expected non-zero receive window.")
- }
- if !initRcv {
- rcvWindow = uint16(*gotTCP.WindowSize)
- initRcv = true
- }
- if rcvWindow <= uint16(payloadLen) {
- break
- }
- rcvWindow -= uint16(payloadLen)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
deleted file mode 100644
index 22b17a39e..000000000
--- a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_zero_window_probe_retransmit_test
-
-import (
- "bytes"
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestZeroWindowProbeRetransmit tests retransmits of zero window probes
-// to be sent at exponentially inreasing time intervals.
-func TestZeroWindowProbeRetransmit(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- // Send and receive sample data to the dut.
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Check for the dut to keep the connection alive as long as the zero window
- // probes are acknowledged. Check if the zero window probes are sent at
- // exponentially increasing intervals. The timeout intervals are function
- // of the recorded first zero probe transmission duration.
- //
- // Advertize zero receive window along with a payload.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(0)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
- // Wait for the payload to be received by the DUT, which is also an
- // indication of receive of the peer window advertisement.
- if got := dut.Recv(t, acceptFd, int32(len(sampleData)), 0); !bytes.Equal(got, sampleData) {
- t.Fatalf("got dut.Recv(t, %d, %d, 0) = %s, want %s", acceptFd, len(sampleData), got, sampleData)
- }
-
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
- ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
-
- // Ask the dut to send out data.
- dut.Send(t, acceptFd, sampleData, 0)
-
- var prev time.Duration
- // Expect the dut to keep the connection alive as long as the remote is
- // acknowledging the zero-window probes.
- for i := 1; i <= 5; i++ {
- start := time.Now()
- // Expect zero-window probe with a timeout which is a function of the typical
- // first retransmission time. The retransmission times is supposed to
- // exponentially increase.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, time.Duration(i)*time.Second); err != nil {
- t.Fatalf("%d: expected a probe with sequence number %d: %s", i, probeSeq, err)
- }
- if i == 1 {
- // Skip the first probe as computing transmit time for that is
- // non-deterministic because of the arbitrary time taken for
- // the dut to receive a send command and issue a send.
- continue
- }
-
- // Check if the time taken to receive the probe from the dut is
- // increasing exponentially. To avoid flakes, use a correction
- // factor for the expected duration which accounts for any
- // scheduling non-determinism.
- const timeCorrection = 200 * time.Millisecond
- got := time.Since(start)
- if want := (2 * prev) - timeCorrection; prev != 0 && got < want {
- t.Errorf("got zero probe %d after %s, want >= %s", i, got, want)
- }
- prev = got
- // Acknowledge the zero-window probes from the dut.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
- }
- // Advertize non-zero window.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Expect the dut to recover and transmit data.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_test.go b/test/packetimpact/tests/tcp_zero_window_probe_test.go
deleted file mode 100644
index 8b90fcbe9..000000000
--- a/test/packetimpact/tests/tcp_zero_window_probe_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_zero_window_probe_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestZeroWindowProbe tests few cases of zero window probing over the
-// same connection.
-func TestZeroWindowProbe(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- start := time.Now()
- // Send and receive sample data to the dut.
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- sendTime := time.Now().Sub(start)
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
-
- // Test 1: Check for receive of a zero window probe, record the duration for
- // probe to be sent.
- //
- // Advertize zero window to the dut.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
-
- // Expected sequence number of the zero window probe.
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
- // Expected ack number of the ACK for the probe.
- ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
-
- // Expect there are no zero-window probes sent until there is data to be sent out
- // from the dut.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, 2*time.Second); err == nil {
- t.Fatalf("unexpected packet with sequence number %d: %s", probeSeq, err)
- }
-
- start = time.Now()
- // Ask the dut to send out data.
- dut.Send(t, acceptFd, sampleData, 0)
- // Expect zero-window probe from the dut.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
- t.Fatalf("expected a packet with sequence number %d: %s", probeSeq, err)
- }
- // Expect the probe to be sent after some time. Compare against the previous
- // time recorded when the dut immediately sends out data on receiving the
- // send command.
- if startProbeDuration := time.Now().Sub(start); startProbeDuration <= sendTime {
- t.Fatalf("expected the first probe to be sent out after retransmission interval, got %s want > %s", startProbeDuration, sendTime)
- }
-
- // Test 2: Check if the dut recovers on advertizing non-zero receive window.
- // and sends out the sample payload after the send window opens.
- //
- // Advertize non-zero window to the dut and ack the zero window probe.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Expect the dut to recover and transmit data.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
-
- // Test 3: Sanity check for dut's processing of a similar probe it sent.
- // Check if the dut responds as we do for a similar probe sent to it.
- // Basically with sequence number to one byte behind the unacknowledged
- // sequence number.
- p := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1))})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: p}, nil, time.Second); err != nil {
- t.Fatalf("expected a packet with ack number: %d: %s", p, err)
- }
-}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
deleted file mode 100644
index 1ce4d22b7..000000000
--- a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tcp_zero_window_probe_usertimeout_test
-
-import (
- "flag"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-// TestZeroWindowProbeUserTimeout sanity tests user timeout when we are
-// retransmitting zero window probes.
-func TestZeroWindowProbeUserTimeout(t *testing.T) {
- dut := testbench.NewDUT(t)
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(t, listenFd)
- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- conn.Connect(t)
- acceptFd, _ := dut.Accept(t, listenFd)
- defer dut.Close(t, acceptFd)
-
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
-
- sampleData := []byte("Sample Data")
- samplePayload := &testbench.Payload{Bytes: sampleData}
-
- // Send and receive sample data to the dut.
- dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
- t.Fatalf("expected payload was not received: %s", err)
- }
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
-
- // Test 1: Check for receive of a zero window probe, record the duration for
- // probe to be sent.
- //
- // Advertize zero window to the dut.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
-
- // Expected sequence number of the zero window probe.
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
- start := time.Now()
- // Ask the dut to send out data.
- dut.Send(t, acceptFd, sampleData, 0)
- // Expect zero-window probe from the dut.
- if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
- t.Fatalf("expected a packet with sequence number %d: %s", probeSeq, err)
- }
- // Record the duration for first probe, the dut sends the zero window probe after
- // a retransmission time interval.
- startProbeDuration := time.Now().Sub(start)
-
- // Test 2: Check if the dut times out the connection by honoring usertimeout
- // when the dut is sending zero-window probes.
- //
- // Reduce the retransmit timeout.
- dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int32(startProbeDuration.Milliseconds()))
- // Advertize zero window again.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
- // Ask the dut to send out data that would trigger zero window probe retransmissions.
- dut.Send(t, acceptFd, sampleData, 0)
-
- // Wait for the connection to timeout after multiple zero-window probe retransmissions.
- time.Sleep(8 * startProbeDuration)
-
- // Expect the connection to have timed out and closed which would cause the dut
- // to reply with a RST to the ACK we send.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
- t.Fatalf("expected a TCP RST")
- }
-}
diff --git a/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go b/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go
deleted file mode 100644
index f4ae00a81..000000000
--- a/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package udp_any_addr_recv_unicast_test
-
-import (
- "flag"
- "net"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestAnyRecvUnicastUDP(t *testing.T) {
- dut := testbench.NewDUT(t)
- boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
- defer dut.Close(t, boundFD)
- conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- payload := testbench.GenerateRandomPayload(t, 1<<10 /* 1 KiB */)
- conn.SendIP(
- t,
- testbench.IPv4{DstAddr: testbench.Address(tcpip.Address(dut.Net.RemoteIPv4))},
- testbench.UDP{},
- &testbench.Payload{Bytes: payload},
- )
- got, want := dut.Recv(t, boundFD, int32(len(payload)+1), 0), payload
- if diff := cmp.Diff(want, got); diff != "" {
- t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff)
- }
-}
diff --git a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
deleted file mode 100644
index f63cfcc9a..000000000
--- a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package udp_discard_mcast_source_addr_test
-
-import (
- "context"
- "flag"
- "fmt"
- "net"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-var oneSecond = unix.Timeval{Sec: 1, Usec: 0}
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
- dut := testbench.NewDUT(t)
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, dut.Net.RemoteIPv4)
- defer dut.Close(t, remoteFD)
- dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
- conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- for _, mcastAddr := range []net.IP{
- net.IPv4allsys,
- net.IPv4allrouter,
- net.IPv4(224, 0, 1, 42),
- net.IPv4(232, 1, 2, 3),
- } {
- t.Run(fmt.Sprintf("srcaddr=%s", mcastAddr), func(t *testing.T) {
- conn.SendIP(
- t,
- testbench.IPv4{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To4()))},
- testbench.UDP{},
- &testbench.Payload{Bytes: []byte("test payload")},
- )
-
- ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
- if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
- t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
- }
- })
- }
-}
-
-func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) {
- dut := testbench.NewDUT(t)
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, dut.Net.RemoteIPv6)
- defer dut.Close(t, remoteFD)
- dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
- conn := dut.Net.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- for _, mcastAddr := range []net.IP{
- net.IPv6interfacelocalallnodes,
- net.IPv6linklocalallnodes,
- net.IPv6linklocalallrouters,
- net.ParseIP("ff01::42"),
- net.ParseIP("ff02::4242"),
- } {
- t.Run(fmt.Sprintf("srcaddr=%s", mcastAddr), func(t *testing.T) {
- conn.SendIPv6(
- t,
- testbench.IPv6{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To16()))},
- testbench.UDP{},
- &testbench.Payload{Bytes: []byte("test payload")},
- )
- ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
- if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
- t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
- }
- })
- }
-}
diff --git a/test/packetimpact/tests/udp_icmp_error_propagation_test.go b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
deleted file mode 100644
index bb33ca4b3..000000000
--- a/test/packetimpact/tests/udp_icmp_error_propagation_test.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package udp_icmp_error_propagation_test
-
-import (
- "context"
- "flag"
- "fmt"
- "net"
- "sync"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/test/packetimpact/testbench"
-)
-
-func init() {
- testbench.Initialize(flag.CommandLine)
-}
-
-type connectionMode bool
-
-func (c connectionMode) String() string {
- if c {
- return "Connected"
- }
- return "Connectionless"
-}
-
-type icmpError int
-
-const (
- portUnreachable icmpError = iota
- timeToLiveExceeded
-)
-
-func (e icmpError) String() string {
- switch e {
- case portUnreachable:
- return "PortUnreachable"
- case timeToLiveExceeded:
- return "TimeToLiveExpired"
- }
- return "Unknown ICMP error"
-}
-
-func (e icmpError) ToICMPv4(payload []byte) *testbench.ICMPv4 {
- switch e {
- case portUnreachable:
- return &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable),
- Code: testbench.ICMPv4Code(header.ICMPv4PortUnreachable),
- Payload: payload,
- }
- case timeToLiveExceeded:
- return &testbench.ICMPv4{
- Type: testbench.ICMPv4Type(header.ICMPv4TimeExceeded),
- Code: testbench.ICMPv4Code(header.ICMPv4TTLExceeded),
- Payload: payload,
- }
- }
- return nil
-}
-
-type errorDetection struct {
- name string
- useValidConn bool
- f func(context.Context, *testing.T, testData)
-}
-
-type testData struct {
- dut *testbench.DUT
- conn *testbench.UDPIPv4
- remoteFD int32
- remotePort uint16
- cleanFD int32
- cleanPort uint16
- wantErrno unix.Errno
-}
-
-// wantErrno computes the errno to expect given the connection mode of a UDP
-// socket and the ICMP error it will receive.
-func wantErrno(c connectionMode, icmpErr icmpError) unix.Errno {
- if c && icmpErr == portUnreachable {
- return unix.ECONNREFUSED
- }
- return unix.Errno(0)
-}
-
-// sendICMPError sends an ICMP error message in response to a UDP datagram.
-func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UDP) {
- t.Helper()
-
- ip, ok := udp.Prev().(*testbench.IPv4)
- if !ok {
- t.Fatalf("expected %s to be IPv4", udp.Prev())
- }
- if icmpErr == timeToLiveExceeded {
- *ip.TTL = 1
- // Let serialization recalculate the checksum since we set the TTL
- // to 1.
- ip.Checksum = nil
- }
-
- icmpPayload := testbench.Layers{ip, udp}
- bytes, err := icmpPayload.ToBytes()
- if err != nil {
- t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err)
- }
-
- layers := conn.CreateFrame(t, nil)
- layers[len(layers)-1] = icmpErr.ToICMPv4(bytes)
- conn.SendFrameStateless(t, layers)
-}
-
-// testRecv tests observing the ICMP error through the recv unix. A packet
-// is sent to the DUT, and if wantErrno is non-zero, then the first recv should
-// fail and the second should succeed. Otherwise if wantErrno is zero then the
-// first recv should succeed immediately.
-func testRecv(ctx context.Context, t *testing.T, d testData) {
- t.Helper()
-
- // Check that receiving on the clean socket works.
- d.conn.Send(t, testbench.UDP{DstPort: &d.cleanPort})
- d.dut.Recv(t, d.cleanFD, 100, 0)
-
- d.conn.Send(t, testbench.UDP{})
-
- if d.wantErrno != unix.Errno(0) {
- ret, _, err := d.dut.RecvWithErrno(ctx, t, d.remoteFD, 100, 0)
- if ret != -1 {
- t.Fatalf("recv after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
- }
- if err != d.wantErrno {
- t.Fatalf("recv after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
- }
- }
-
- d.dut.Recv(t, d.remoteFD, 100, 0)
-}
-
-// testSendTo tests observing the ICMP error through the send syscall. If
-// wantErrno is non-zero, the first send should fail and a subsequent send
-// should suceed; while if wantErrno is zero then the first send should just
-// succeed.
-func testSendTo(ctx context.Context, t *testing.T, d testData) {
- // Check that sending on the clean socket works.
- d.dut.SendTo(t, d.cleanFD, nil, 0, d.conn.LocalAddr(t))
- if _, err := d.conn.Expect(t, testbench.UDP{SrcPort: &d.cleanPort}, time.Second); err != nil {
- t.Fatalf("did not receive UDP packet from clean socket on DUT: %s", err)
- }
-
- if d.wantErrno != unix.Errno(0) {
- ret, err := d.dut.SendToWithErrno(ctx, t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
-
- if ret != -1 {
- t.Fatalf("sendto after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
- }
- if err != d.wantErrno {
- t.Fatalf("sendto after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
- }
- }
-
- d.dut.SendTo(t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
- if _, err := d.conn.Expect(t, testbench.UDP{}, time.Second); err != nil {
- t.Fatalf("did not receive UDP packet as expected: %s", err)
- }
-}
-
-func testSockOpt(_ context.Context, t *testing.T, d testData) {
- // Check that there's no pending error on the clean socket.
- if errno := unix.Errno(d.dut.GetSockOptInt(t, d.cleanFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != unix.Errno(0) {
- t.Fatalf("unexpected error (%[1]d) %[1]v on clean socket", errno)
- }
-
- if errno := unix.Errno(d.dut.GetSockOptInt(t, d.remoteFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != d.wantErrno {
- t.Fatalf("SO_ERROR sockopt after ICMP error is (%[1]d) %[1]v, expected (%[2]d) %[2]v", errno, d.wantErrno)
- }
-
- // Check that after clearing socket error, sending doesn't fail.
- d.dut.SendTo(t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
- if _, err := d.conn.Expect(t, testbench.UDP{}, time.Second); err != nil {
- t.Fatalf("did not receive UDP packet as expected: %s", err)
- }
-}
-
-// TestUDPICMPErrorPropagation tests that ICMP error messages in response to
-// UDP datagrams are processed correctly. RFC 1122 section 4.1.3.3 states that:
-// "UDP MUST pass to the application layer all ICMP error messages that it
-// receives from the IP layer."
-//
-// The test cases are parametrized in 3 dimensions: 1. the UDP socket is either
-// put into connection mode or left connectionless, 2. the ICMP message type
-// and code, and 3. the method by which the ICMP error is observed on the
-// socket: sendto, recv, or getsockopt(SO_ERROR).
-//
-// Linux's udp(7) man page states: "All fatal errors will be passed to the user
-// as an error return even when the socket is not connected. This includes
-// asynchronous errors received from the network." In practice, the only
-// combination of parameters to the test that causes an error to be observable
-// on the UDP socket is receiving a port unreachable message on a connected
-// socket.
-func TestUDPICMPErrorPropagation(t *testing.T) {
- for _, connect := range []connectionMode{true, false} {
- for _, icmpErr := range []icmpError{portUnreachable, timeToLiveExceeded} {
- wantErrno := wantErrno(connect, icmpErr)
-
- for _, errDetect := range []errorDetection{
- {"SendTo", false, testSendTo},
- // Send to an address that's different from the one that caused an ICMP
- // error to be returned.
- {"SendToValid", true, testSendTo},
- {"Recv", false, testRecv},
- {"SockOpt", false, testSockOpt},
- } {
- t.Run(fmt.Sprintf("%s/%s/%s", connect, icmpErr, errDetect.name), func(t *testing.T) {
- dut := testbench.NewDUT(t)
-
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
- defer dut.Close(t, remoteFD)
-
- // Create a second, clean socket on the DUT to ensure that the ICMP
- // error messages only affect the sockets they are intended for.
- cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
- defer dut.Close(t, cleanFD)
-
- conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- if connect {
- dut.Connect(t, remoteFD, conn.LocalAddr(t))
- dut.Connect(t, cleanFD, conn.LocalAddr(t))
- }
-
- dut.SendTo(t, remoteFD, nil, 0, conn.LocalAddr(t))
- udp, err := conn.Expect(t, testbench.UDP{}, time.Second)
- if err != nil {
- t.Fatalf("did not receive message from DUT: %s", err)
- }
-
- sendICMPError(t, &conn, icmpErr, udp)
-
- errDetectConn := &conn
- if errDetect.useValidConn {
- // connClean is a UDP socket on the test runner that was not
- // involved in the generation of the ICMP error. As such,
- // interactions between it and the the DUT should be independent of
- // the ICMP error at least at the port level.
- connClean := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer connClean.Close(t)
-
- errDetectConn = &connClean
- }
-
- errDetect.f(context.Background(), t, testData{&dut, errDetectConn, remoteFD, remotePort, cleanFD, cleanPort, wantErrno})
- })
- }
- }
- }
-}
-
-// TestICMPErrorDuringUDPRecv tests behavior when a UDP socket is in the middle
-// of a blocking recv and receives an ICMP error.
-func TestICMPErrorDuringUDPRecv(t *testing.T) {
- for _, connect := range []connectionMode{true, false} {
- for _, icmpErr := range []icmpError{portUnreachable, timeToLiveExceeded} {
- wantErrno := wantErrno(connect, icmpErr)
-
- t.Run(fmt.Sprintf("%s/%s", connect, icmpErr), func(t *testing.T) {
- dut := testbench.NewDUT(t)
-
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
- defer dut.Close(t, remoteFD)
-
- // Create a second, clean socket on the DUT to ensure that the ICMP
- // error messages only affect the sockets they are intended for.
- cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
- defer dut.Close(t, cleanFD)
-
- conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close(t)
-
- if connect {
- dut.Connect(t, remoteFD, conn.LocalAddr(t))
- dut.Connect(t, cleanFD, conn.LocalAddr(t))
- }
-
- dut.SendTo(t, remoteFD, nil, 0, conn.LocalAddr(t))
- udp, err := conn.Expect(t, testbench.UDP{}, time.Second)
- if err != nil {
- t.Fatalf("did not receive message from DUT: %s", err)
- }
-
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- defer wg.Done()
-
- if wantErrno != unix.Errno(0) {
- ret, _, err := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
- if ret != -1 {
- t.Errorf("recv during ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", wantErrno)
- return
- }
- if err != wantErrno {
- t.Errorf("recv during ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, wantErrno)
- return
- }
- }
-
- if ret, _, err := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0); ret == -1 {
- t.Errorf("recv after ICMP error failed with (%[1]d) %[1]", err)
- }
- }()
-
- go func() {
- defer wg.Done()
-
- if ret, _, err := dut.RecvWithErrno(context.Background(), t, cleanFD, 100, 0); ret == -1 {
- t.Errorf("recv on clean socket failed with (%[1]d) %[1]", err)
- }
- }()
-
- // TODO(b/155684889) This sleep is to allow time for the DUT to
- // actually call recv since we want the ICMP error to arrive during the
- // blocking recv, and should be replaced when a better synchronization
- // alternative is available.
- time.Sleep(2 * time.Second)
-
- sendICMPError(t, &conn, icmpErr, udp)
-
- conn.Send(t, testbench.UDP{DstPort: &cleanPort})
- conn.Send(t, testbench.UDP{})
- wg.Wait()
- })
- }
- }
-}
diff --git a/test/perf/BUILD b/test/perf/BUILD
deleted file mode 100644
index 75b5003e2..000000000
--- a/test/perf/BUILD
+++ /dev/null
@@ -1,141 +0,0 @@
-load("//tools:defs.bzl", "more_shards")
-load("//test/runner:defs.bzl", "syscall_test")
-
-package(licenses = ["notice"])
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:clock_getres_benchmark",
-)
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:clock_gettime_benchmark",
-)
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:death_benchmark",
-)
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:epoll_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:fork_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:futex_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- shard_count = more_shards,
- tags = ["nogotsan"],
- test = "//test/perf/linux:getdents_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:getpid_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- tags = ["nogotsan"],
- test = "//test/perf/linux:gettid_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:mapping_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:open_benchmark",
-)
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:pipe_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:randread_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:read_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:sched_yield_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:send_recv_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:seqwrite_benchmark",
-)
-
-syscall_test(
- size = "large",
- debug = False,
- test = "//test/perf/linux:signal_benchmark",
-)
-
-syscall_test(
- debug = False,
- test = "//test/perf/linux:sleep_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:stat_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- tags = ["nogotsan"],
- test = "//test/perf/linux:unlink_benchmark",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- debug = False,
- test = "//test/perf/linux:write_benchmark",
-)
diff --git a/test/perf/linux/BUILD b/test/perf/linux/BUILD
deleted file mode 100644
index dd1d2438c..000000000
--- a/test/perf/linux/BUILD
+++ /dev/null
@@ -1,372 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "gbenchmark", "gtest")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-cc_binary(
- name = "getpid_benchmark",
- testonly = 1,
- srcs = [
- "getpid_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "send_recv_benchmark",
- testonly = 1,
- srcs = [
- "send_recv_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/syscalls/linux:socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:logging",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/synchronization",
- ],
-)
-
-cc_binary(
- name = "gettid_benchmark",
- testonly = 1,
- srcs = [
- "gettid_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "sched_yield_benchmark",
- testonly = 1,
- srcs = [
- "sched_yield_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "clock_getres_benchmark",
- testonly = 1,
- srcs = [
- "clock_getres_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "clock_gettime_benchmark",
- testonly = 1,
- srcs = [
- "clock_gettime_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:test_main",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "open_benchmark",
- testonly = 1,
- srcs = [
- "open_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:fs_util",
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "read_benchmark",
- testonly = 1,
- srcs = [
- "read_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:fs_util",
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "randread_benchmark",
- testonly = 1,
- srcs = [
- "randread_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:file_descriptor",
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/random",
- ],
-)
-
-cc_binary(
- name = "write_benchmark",
- testonly = 1,
- srcs = [
- "write_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "seqwrite_benchmark",
- testonly = 1,
- srcs = [
- "seqwrite_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/random",
- ],
-)
-
-cc_binary(
- name = "pipe_benchmark",
- testonly = 1,
- srcs = [
- "pipe_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "fork_benchmark",
- testonly = 1,
- srcs = [
- "fork_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/synchronization",
- ],
-)
-
-cc_binary(
- name = "futex_benchmark",
- testonly = 1,
- srcs = [
- "futex_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:thread_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "epoll_benchmark",
- testonly = 1,
- srcs = [
- "epoll_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:epoll_util",
- "//test/util:file_descriptor",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "death_benchmark",
- testonly = 1,
- srcs = [
- "death_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "mapping_benchmark",
- testonly = 1,
- srcs = [
- "mapping_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "signal_benchmark",
- testonly = 1,
- srcs = [
- "signal_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "getdents_benchmark",
- testonly = 1,
- srcs = [
- "getdents_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sleep_benchmark",
- testonly = 1,
- srcs = [
- "sleep_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- ],
-)
-
-cc_binary(
- name = "stat_benchmark",
- testonly = 1,
- srcs = [
- "stat_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:fs_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_binary(
- name = "unlink_benchmark",
- testonly = 1,
- srcs = [
- "unlink_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:fs_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "open_read_close_benchmark",
- testonly = 1,
- srcs = [
- "open_read_close_benchmark.cc",
- ],
- deps = [
- gbenchmark,
- gtest,
- "//test/util:fs_util",
- "//test/util:logging",
- "//test/util:temp_path",
- "//test/util:test_main",
- ],
-)
diff --git a/test/perf/linux/clock_getres_benchmark.cc b/test/perf/linux/clock_getres_benchmark.cc
deleted file mode 100644
index b051293ad..000000000
--- a/test/perf/linux/clock_getres_benchmark.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <time.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// clock_getres(1) is very nearly a no-op syscall, but it does require copying
-// out to a userspace struct. It thus provides a nice small copy-out benchmark.
-void BM_ClockGetRes(benchmark::State& state) {
- struct timespec ts;
- for (auto _ : state) {
- clock_getres(CLOCK_MONOTONIC, &ts);
- }
-}
-
-BENCHMARK(BM_ClockGetRes);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/clock_gettime_benchmark.cc b/test/perf/linux/clock_gettime_benchmark.cc
deleted file mode 100644
index 6691bebd9..000000000
--- a/test/perf/linux/clock_gettime_benchmark.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <pthread.h>
-#include <time.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_ClockGettimeThreadCPUTime(benchmark::State& state) {
- clockid_t clockid;
- ASSERT_EQ(0, pthread_getcpuclockid(pthread_self(), &clockid));
- struct timespec tp;
-
- for (auto _ : state) {
- clock_gettime(clockid, &tp);
- }
-}
-
-BENCHMARK(BM_ClockGettimeThreadCPUTime);
-
-void BM_VDSOClockGettime(benchmark::State& state) {
- const clockid_t clock = state.range(0);
- struct timespec tp;
- absl::Time start = absl::Now();
-
- // Don't benchmark the calibration phase.
- while (absl::Now() < start + absl::Milliseconds(2100)) {
- clock_gettime(clock, &tp);
- }
-
- for (auto _ : state) {
- clock_gettime(clock, &tp);
- }
-}
-
-BENCHMARK(BM_VDSOClockGettime)->Arg(CLOCK_MONOTONIC)->Arg(CLOCK_REALTIME);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/death_benchmark.cc b/test/perf/linux/death_benchmark.cc
deleted file mode 100644
index cb2b6fd07..000000000
--- a/test/perf/linux/death_benchmark.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// DeathTest is not so much a microbenchmark as a macrobenchmark. It is testing
-// the ability of gVisor (on whatever platform) to execute all the related
-// stack-dumping routines associated with EXPECT_EXIT / EXPECT_DEATH.
-TEST(DeathTest, ZeroEqualsOne) {
- EXPECT_EXIT({ TEST_CHECK(0 == 1); }, ::testing::KilledBySignal(SIGABRT), "");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/epoll_benchmark.cc b/test/perf/linux/epoll_benchmark.cc
deleted file mode 100644
index 0b121338a..000000000
--- a/test/perf/linux/epoll_benchmark.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/epoll.h>
-#include <sys/eventfd.h>
-
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-#include <cstdlib>
-#include <ctime>
-#include <memory>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
-#include "test/util/epoll_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Returns a new eventfd.
-PosixErrorOr<FileDescriptor> NewEventFD() {
- int fd = eventfd(0, /* flags = */ 0);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, "eventfd");
- }
- return FileDescriptor(fd);
-}
-
-// Also stolen from epoll.cc unit tests.
-void BM_EpollTimeout(benchmark::State& state) {
- constexpr int kFDsPerEpoll = 3;
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
-
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN, 0));
- }
-
- struct epoll_event result[kFDsPerEpoll];
- int timeout_ms = state.range(0);
-
- for (auto _ : state) {
- EXPECT_EQ(0, epoll_wait(epollfd.get(), result, kFDsPerEpoll, timeout_ms));
- }
-}
-
-BENCHMARK(BM_EpollTimeout)->Range(0, 8);
-
-// Also stolen from epoll.cc unit tests.
-void BM_EpollAllEvents(benchmark::State& state) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- const int fds_per_epoll = state.range(0);
- constexpr uint64_t kEventVal = 5;
-
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < fds_per_epoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN, 0));
-
- ASSERT_THAT(WriteFd(eventfds[i].get(), &kEventVal, sizeof(kEventVal)),
- SyscallSucceedsWithValue(sizeof(kEventVal)));
- }
-
- std::vector<struct epoll_event> result(fds_per_epoll);
-
- for (auto _ : state) {
- EXPECT_EQ(fds_per_epoll,
- epoll_wait(epollfd.get(), result.data(), fds_per_epoll, 0));
- }
-}
-
-BENCHMARK(BM_EpollAllEvents)->Range(2, 1024);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/fork_benchmark.cc b/test/perf/linux/fork_benchmark.cc
deleted file mode 100644
index 84fdbc8a0..000000000
--- a/test/perf/linux/fork_benchmark.cc
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/barrier.h"
-#include "benchmark/benchmark.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kBusyMax = 250;
-
-// Do some CPU-bound busy-work.
-int busy(int max) {
- // Prevent the compiler from optimizing this work away,
- volatile int count = 0;
-
- for (int i = 1; i < max; i++) {
- for (int j = 2; j < i / 2; j++) {
- if (i % j == 0) {
- count++;
- }
- }
- }
-
- return count;
-}
-
-void BM_CPUBoundUniprocess(benchmark::State& state) {
- for (auto _ : state) {
- busy(kBusyMax);
- }
-}
-
-BENCHMARK(BM_CPUBoundUniprocess);
-
-void BM_CPUBoundAsymmetric(benchmark::State& state) {
- const size_t max = state.max_iterations;
- pid_t child = fork();
- if (child == 0) {
- for (int i = 0; i < max; i++) {
- busy(kBusyMax);
- }
- _exit(0);
- }
- ASSERT_THAT(child, SyscallSucceeds());
- ASSERT_TRUE(state.KeepRunningBatch(max));
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(0, WEXITSTATUS(status));
- ASSERT_FALSE(state.KeepRunning());
-}
-
-BENCHMARK(BM_CPUBoundAsymmetric)->UseRealTime();
-
-void BM_CPUBoundSymmetric(benchmark::State& state) {
- std::vector<pid_t> children;
- auto child_cleanup = Cleanup([&] {
- for (const pid_t child : children) {
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(0, WEXITSTATUS(status));
- }
- ASSERT_FALSE(state.KeepRunning());
- });
-
- const int processes = state.range(0);
- for (int i = 0; i < processes; i++) {
- size_t cur = (state.max_iterations + (processes - 1)) / processes;
- if ((state.iterations() + cur) >= state.max_iterations) {
- cur = state.max_iterations - state.iterations();
- }
- pid_t child = fork();
- if (child == 0) {
- for (int i = 0; i < cur; i++) {
- busy(kBusyMax);
- }
- _exit(0);
- }
- ASSERT_THAT(child, SyscallSucceeds());
- if (cur > 0) {
- // We can have a zero cur here, depending.
- ASSERT_TRUE(state.KeepRunningBatch(cur));
- }
- children.push_back(child);
- }
-}
-
-BENCHMARK(BM_CPUBoundSymmetric)->Range(2, 16)->UseRealTime();
-
-// Child routine for ProcessSwitch/ThreadSwitch.
-// Reads from readfd and writes the result to writefd.
-void SwitchChild(int readfd, int writefd) {
- while (1) {
- char buf;
- int ret = ReadFd(readfd, &buf, 1);
- if (ret == 0) {
- break;
- }
- TEST_CHECK_MSG(ret == 1, "read failed");
-
- ret = WriteFd(writefd, &buf, 1);
- if (ret == -1) {
- TEST_CHECK_MSG(errno == EPIPE, "unexpected write failure");
- break;
- }
- TEST_CHECK_MSG(ret == 1, "write failed");
- }
-}
-
-// Send bytes in a loop through a series of pipes, each passing through a
-// different process.
-//
-// Proc 0 Proc 1
-// * ----------> *
-// ^ Pipe 1 |
-// | |
-// | Pipe 0 | Pipe 2
-// | |
-// | |
-// | Pipe 3 v
-// * <---------- *
-// Proc 3 Proc 2
-//
-// This exercises context switching through multiple processes.
-void BM_ProcessSwitch(benchmark::State& state) {
- // Code below assumes there are at least two processes.
- const int num_processes = state.range(0);
- ASSERT_GE(num_processes, 2);
-
- std::vector<pid_t> children;
- auto child_cleanup = Cleanup([&] {
- for (const pid_t child : children) {
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(0, WEXITSTATUS(status));
- }
- });
-
- // Must come after children, as the FDs must be closed before the children
- // will exit.
- std::vector<FileDescriptor> read_fds;
- std::vector<FileDescriptor> write_fds;
-
- for (int i = 0; i < num_processes; i++) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- read_fds.emplace_back(fds[0]);
- write_fds.emplace_back(fds[1]);
- }
-
- // This process is one of the processes in the loop. It will be considered
- // index 0.
- for (int i = 1; i < num_processes; i++) {
- // Read from current pipe index, write to next.
- const int read_index = i;
- const int read_fd = read_fds[read_index].get();
-
- const int write_index = (i + 1) % num_processes;
- const int write_fd = write_fds[write_index].get();
-
- // std::vector isn't safe to use from the fork child.
- FileDescriptor* read_array = read_fds.data();
- FileDescriptor* write_array = write_fds.data();
-
- pid_t child = fork();
- if (!child) {
- // Close all other FDs.
- for (int j = 0; j < num_processes; j++) {
- if (j != read_index) {
- read_array[j].reset();
- }
- if (j != write_index) {
- write_array[j].reset();
- }
- }
-
- SwitchChild(read_fd, write_fd);
- _exit(0);
- }
- ASSERT_THAT(child, SyscallSucceeds());
- children.push_back(child);
- }
-
- // Read from current pipe index (0), write to next (1).
- const int read_index = 0;
- const int read_fd = read_fds[read_index].get();
-
- const int write_index = 1;
- const int write_fd = write_fds[write_index].get();
-
- // Kick start the loop.
- char buf = 'a';
- ASSERT_THAT(WriteFd(write_fd, &buf, 1), SyscallSucceedsWithValue(1));
-
- for (auto _ : state) {
- ASSERT_THAT(ReadFd(read_fd, &buf, 1), SyscallSucceedsWithValue(1));
- ASSERT_THAT(WriteFd(write_fd, &buf, 1), SyscallSucceedsWithValue(1));
- }
-}
-
-BENCHMARK(BM_ProcessSwitch)->Range(2, 16)->UseRealTime();
-
-// Equivalent to BM_ThreadSwitch using threads instead of processes.
-void BM_ThreadSwitch(benchmark::State& state) {
- // Code below assumes there are at least two threads.
- const int num_threads = state.range(0);
- ASSERT_GE(num_threads, 2);
-
- // Must come after threads, as the FDs must be closed before the children
- // will exit.
- std::vector<std::unique_ptr<ScopedThread>> threads;
- std::vector<FileDescriptor> read_fds;
- std::vector<FileDescriptor> write_fds;
-
- for (int i = 0; i < num_threads; i++) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- read_fds.emplace_back(fds[0]);
- write_fds.emplace_back(fds[1]);
- }
-
- // This thread is one of the threads in the loop. It will be considered
- // index 0.
- for (int i = 1; i < num_threads; i++) {
- // Read from current pipe index, write to next.
- //
- // Transfer ownership of the FDs to the thread.
- const int read_index = i;
- const int read_fd = read_fds[read_index].release();
-
- const int write_index = (i + 1) % num_threads;
- const int write_fd = write_fds[write_index].release();
-
- threads.emplace_back(std::make_unique<ScopedThread>([read_fd, write_fd] {
- FileDescriptor read(read_fd);
- FileDescriptor write(write_fd);
- SwitchChild(read.get(), write.get());
- }));
- }
-
- // Read from current pipe index (0), write to next (1).
- const int read_index = 0;
- const int read_fd = read_fds[read_index].get();
-
- const int write_index = 1;
- const int write_fd = write_fds[write_index].get();
-
- // Kick start the loop.
- char buf = 'a';
- ASSERT_THAT(WriteFd(write_fd, &buf, 1), SyscallSucceedsWithValue(1));
-
- for (auto _ : state) {
- ASSERT_THAT(ReadFd(read_fd, &buf, 1), SyscallSucceedsWithValue(1));
- ASSERT_THAT(WriteFd(write_fd, &buf, 1), SyscallSucceedsWithValue(1));
- }
-
- // The two FDs still owned by this thread are closed, causing the next thread
- // to exit its loop and close its FDs, and so on until all threads exit.
-}
-
-BENCHMARK(BM_ThreadSwitch)->Range(2, 16)->UseRealTime();
-
-void BM_ThreadStart(benchmark::State& state) {
- const int num_threads = state.range(0);
-
- for (auto _ : state) {
- state.PauseTiming();
-
- auto barrier = new absl::Barrier(num_threads + 1);
- std::vector<std::unique_ptr<ScopedThread>> threads;
-
- state.ResumeTiming();
-
- for (size_t i = 0; i < num_threads; ++i) {
- threads.emplace_back(std::make_unique<ScopedThread>([barrier] {
- if (barrier->Block()) {
- delete barrier;
- }
- }));
- }
-
- if (barrier->Block()) {
- delete barrier;
- }
-
- state.PauseTiming();
-
- for (const auto& thread : threads) {
- thread->Join();
- }
-
- state.ResumeTiming();
- }
-}
-
-BENCHMARK(BM_ThreadStart)->Range(1, 2048)->UseRealTime();
-
-// Benchmark the complete fork + exit + wait.
-void BM_ProcessLifecycle(benchmark::State& state) {
- const int num_procs = state.range(0);
-
- std::vector<pid_t> pids(num_procs);
- for (auto _ : state) {
- for (size_t i = 0; i < num_procs; ++i) {
- int pid = fork();
- if (pid == 0) {
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- pids[i] = pid;
- }
-
- for (const int pid : pids) {
- ASSERT_THAT(RetryEINTR(waitpid)(pid, nullptr, 0),
- SyscallSucceedsWithValue(pid));
- }
- }
-}
-
-BENCHMARK(BM_ProcessLifecycle)->Range(1, 512)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/futex_benchmark.cc b/test/perf/linux/futex_benchmark.cc
deleted file mode 100644
index e686041c9..000000000
--- a/test/perf/linux/futex_benchmark.cc
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/futex.h>
-
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-#include <cstdlib>
-#include <ctime>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-inline int FutexWait(std::atomic<int32_t>* v, int32_t val) {
- return syscall(SYS_futex, v, FUTEX_WAIT_PRIVATE, val, nullptr);
-}
-
-inline int FutexWaitMonotonicTimeout(std::atomic<int32_t>* v, int32_t val,
- const struct timespec* timeout) {
- return syscall(SYS_futex, v, FUTEX_WAIT_PRIVATE, val, timeout);
-}
-
-inline int FutexWaitMonotonicDeadline(std::atomic<int32_t>* v, int32_t val,
- const struct timespec* deadline) {
- return syscall(SYS_futex, v, FUTEX_WAIT_BITSET_PRIVATE, val, deadline,
- nullptr, FUTEX_BITSET_MATCH_ANY);
-}
-
-inline int FutexWaitRealtimeDeadline(std::atomic<int32_t>* v, int32_t val,
- const struct timespec* deadline) {
- return syscall(SYS_futex, v, FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
- val, deadline, nullptr, FUTEX_BITSET_MATCH_ANY);
-}
-
-inline int FutexWake(std::atomic<int32_t>* v, int32_t count) {
- return syscall(SYS_futex, v, FUTEX_WAKE_PRIVATE, count);
-}
-
-// This just uses FUTEX_WAKE on an address with nothing waiting, very simple.
-void BM_FutexWakeNop(benchmark::State& state) {
- std::atomic<int32_t> v(0);
-
- for (auto _ : state) {
- TEST_PCHECK(FutexWake(&v, 1) == 0);
- }
-}
-
-BENCHMARK(BM_FutexWakeNop)->MinTime(5);
-
-// This just uses FUTEX_WAIT on an address whose value has changed, i.e., the
-// syscall won't wait.
-void BM_FutexWaitNop(benchmark::State& state) {
- std::atomic<int32_t> v(0);
-
- for (auto _ : state) {
- TEST_PCHECK(FutexWait(&v, 1) == -1 && errno == EAGAIN);
- }
-}
-
-BENCHMARK(BM_FutexWaitNop)->MinTime(5);
-
-// This uses FUTEX_WAIT with a timeout on an address whose value never
-// changes, such that it always times out. Timeout overhead can be estimated by
-// timer overruns for short timeouts.
-void BM_FutexWaitMonotonicTimeout(benchmark::State& state) {
- const absl::Duration timeout = absl::Nanoseconds(state.range(0));
- std::atomic<int32_t> v(0);
- auto ts = absl::ToTimespec(timeout);
-
- for (auto _ : state) {
- TEST_PCHECK(FutexWaitMonotonicTimeout(&v, 0, &ts) == -1 &&
- errno == ETIMEDOUT);
- }
-}
-
-BENCHMARK(BM_FutexWaitMonotonicTimeout)
- ->MinTime(5)
- ->UseRealTime()
- ->Arg(1)
- ->Arg(10)
- ->Arg(100)
- ->Arg(1000)
- ->Arg(10000);
-
-// This uses FUTEX_WAIT_BITSET with a deadline that is in the past. This allows
-// estimation of the overhead of setting up a timer for a deadline (as opposed
-// to a timeout as specified for FUTEX_WAIT).
-void BM_FutexWaitMonotonicDeadline(benchmark::State& state) {
- std::atomic<int32_t> v(0);
- struct timespec ts = {};
-
- for (auto _ : state) {
- TEST_PCHECK(FutexWaitMonotonicDeadline(&v, 0, &ts) == -1 &&
- errno == ETIMEDOUT);
- }
-}
-
-BENCHMARK(BM_FutexWaitMonotonicDeadline)->MinTime(5);
-
-// This is equivalent to BM_FutexWaitMonotonicDeadline, but uses CLOCK_REALTIME
-// instead of CLOCK_MONOTONIC for the deadline.
-void BM_FutexWaitRealtimeDeadline(benchmark::State& state) {
- std::atomic<int32_t> v(0);
- struct timespec ts = {};
-
- for (auto _ : state) {
- TEST_PCHECK(FutexWaitRealtimeDeadline(&v, 0, &ts) == -1 &&
- errno == ETIMEDOUT);
- }
-}
-
-BENCHMARK(BM_FutexWaitRealtimeDeadline)->MinTime(5);
-
-int64_t GetCurrentMonotonicTimeNanos() {
- struct timespec ts;
- TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &ts) != -1);
- return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
-}
-
-void SpinNanos(int64_t delay_ns) {
- if (delay_ns <= 0) {
- return;
- }
- const int64_t end = GetCurrentMonotonicTimeNanos() + delay_ns;
- while (GetCurrentMonotonicTimeNanos() < end) {
- // spin
- }
-}
-
-// Each iteration of FutexRoundtripDelayed involves a thread sending a futex
-// wakeup to another thread, which spins for delay_us and then sends a futex
-// wakeup back. The time per iteration is 2 * (delay_us + kBeforeWakeDelayNs +
-// futex/scheduling overhead).
-void BM_FutexRoundtripDelayed(benchmark::State& state) {
- const int delay_us = state.range(0);
- const int64_t delay_ns = delay_us * 1000;
- // Spin for an extra kBeforeWakeDelayNs before invoking FUTEX_WAKE to reduce
- // the probability that the wakeup comes before the wait, preventing the wait
- // from ever taking effect and causing the benchmark to underestimate the
- // actual wakeup time.
- constexpr int64_t kBeforeWakeDelayNs = 500;
- std::atomic<int32_t> v(0);
- ScopedThread t([&] {
- for (int i = 0; i < state.max_iterations; i++) {
- SpinNanos(delay_ns);
- while (v.load(std::memory_order_acquire) == 0) {
- FutexWait(&v, 0);
- }
- SpinNanos(kBeforeWakeDelayNs + delay_ns);
- v.store(0, std::memory_order_release);
- FutexWake(&v, 1);
- }
- });
- for (auto _ : state) {
- SpinNanos(kBeforeWakeDelayNs + delay_ns);
- v.store(1, std::memory_order_release);
- FutexWake(&v, 1);
- SpinNanos(delay_ns);
- while (v.load(std::memory_order_acquire) == 1) {
- FutexWait(&v, 1);
- }
- }
-}
-
-BENCHMARK(BM_FutexRoundtripDelayed)
- ->MinTime(5)
- ->UseRealTime()
- ->Arg(0)
- ->Arg(10)
- ->Arg(20)
- ->Arg(50)
- ->Arg(100);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/getdents_benchmark.cc b/test/perf/linux/getdents_benchmark.cc
deleted file mode 100644
index 9030eb356..000000000
--- a/test/perf/linux/getdents_benchmark.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-#ifndef SYS_getdents64
-#if defined(__x86_64__)
-#define SYS_getdents64 217
-#elif defined(__aarch64__)
-#define SYS_getdents64 217
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_getdents64
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kBufferSize = 65536;
-
-PosixErrorOr<TempPath> CreateDirectory(int count,
- std::vector<std::string>* files) {
- ASSIGN_OR_RETURN_ERRNO(TempPath dir, TempPath::CreateDir());
-
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor dfd,
- Open(dir.path(), O_RDONLY | O_DIRECTORY));
-
- for (int i = 0; i < count; i++) {
- auto file = NewTempRelPath();
- auto res = MknodAt(dfd, file, S_IFREG | 0644, 0);
- RETURN_IF_ERRNO(res);
- files->push_back(file);
- }
-
- return std::move(dir);
-}
-
-PosixError CleanupDirectory(const TempPath& dir,
- std::vector<std::string>* files) {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor dfd,
- Open(dir.path(), O_RDONLY | O_DIRECTORY));
-
- for (auto it = files->begin(); it != files->end(); ++it) {
- auto res = UnlinkAt(dfd, *it, 0);
- RETURN_IF_ERRNO(res);
- }
- return NoError();
-}
-
-// Creates a directory containing `files` files, and reads all the directory
-// entries from the directory using a single FD.
-void BM_GetdentsSameFD(benchmark::State& state) {
- // Create directory with given files.
- const int count = state.range(0);
-
- // Keep a vector of all of the file TempPaths that is destroyed before dir.
- //
- // Normally, we'd simply allow dir to recursively clean up the contained
- // files, but that recursive cleanup uses getdents, which may be very slow in
- // extreme benchmarks.
- TempPath dir;
- std::vector<std::string> files;
- dir = ASSERT_NO_ERRNO_AND_VALUE(CreateDirectory(count, &files));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY));
- char buffer[kBufferSize];
-
- // We read all directory entries on each iteration, but report this as a
- // "batch" iteration so that reported times are per file.
- while (state.KeepRunningBatch(count)) {
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());
-
- int ret;
- do {
- ASSERT_THAT(ret = syscall(SYS_getdents64, fd.get(), buffer, kBufferSize),
- SyscallSucceeds());
- } while (ret > 0);
- }
-
- ASSERT_NO_ERRNO(CleanupDirectory(dir, &files));
-
- state.SetItemsProcessed(state.iterations());
-}
-
-BENCHMARK(BM_GetdentsSameFD)->Range(1, 1 << 12)->UseRealTime();
-
-// Creates a directory containing `files` files, and reads all the directory
-// entries from the directory using a new FD each time.
-void BM_GetdentsNewFD(benchmark::State& state) {
- // Create directory with given files.
- const int count = state.range(0);
-
- // Keep a vector of all of the file TempPaths that is destroyed before dir.
- //
- // Normally, we'd simply allow dir to recursively clean up the contained
- // files, but that recursive cleanup uses getdents, which may be very slow in
- // extreme benchmarks.
- TempPath dir;
- std::vector<std::string> files;
- dir = ASSERT_NO_ERRNO_AND_VALUE(CreateDirectory(count, &files));
- char buffer[kBufferSize];
-
- // We read all directory entries on each iteration, but report this as a
- // "batch" iteration so that reported times are per file.
- while (state.KeepRunningBatch(count)) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY));
-
- int ret;
- do {
- ASSERT_THAT(ret = syscall(SYS_getdents64, fd.get(), buffer, kBufferSize),
- SyscallSucceeds());
- } while (ret > 0);
- }
-
- ASSERT_NO_ERRNO(CleanupDirectory(dir, &files));
-
- state.SetItemsProcessed(state.iterations());
-}
-
-BENCHMARK(BM_GetdentsNewFD)->Range(1, 1 << 12)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/getpid_benchmark.cc b/test/perf/linux/getpid_benchmark.cc
deleted file mode 100644
index 047a034bd..000000000
--- a/test/perf/linux/getpid_benchmark.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Getpid(benchmark::State& state) {
- for (auto _ : state) {
- syscall(SYS_getpid);
- }
-}
-
-BENCHMARK(BM_Getpid);
-
-#ifdef __x86_64__
-
-#define SYSNO_STR1(x) #x
-#define SYSNO_STR(x) SYSNO_STR1(x)
-
-// BM_GetpidOpt uses the most often pattern of calling system calls:
-// mov $SYS_XXX, %eax; syscall.
-void BM_GetpidOpt(benchmark::State& state) {
- for (auto s : state) {
- __asm__("movl $" SYSNO_STR(SYS_getpid) ", %%eax\n"
- "syscall\n"
- : : : "rax", "rcx", "r11");
- }
-}
-
-BENCHMARK(BM_GetpidOpt);
-#endif // __x86_64__
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/gettid_benchmark.cc b/test/perf/linux/gettid_benchmark.cc
deleted file mode 100644
index 8f4961f5e..000000000
--- a/test/perf/linux/gettid_benchmark.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Gettid(benchmark::State& state) {
- for (auto _ : state) {
- syscall(SYS_gettid);
- }
-}
-
-BENCHMARK(BM_Gettid)->ThreadRange(1, 4000)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/mapping_benchmark.cc b/test/perf/linux/mapping_benchmark.cc
deleted file mode 100644
index 39c30fe69..000000000
--- a/test/perf/linux/mapping_benchmark.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Conservative value for /proc/sys/vm/max_map_count, which limits the number of
-// VMAs, minus a safety margin for VMAs that already exist for the test binary.
-// The default value for max_map_count is
-// include/linux/mm.h:DEFAULT_MAX_MAP_COUNT = 65530.
-constexpr size_t kMaxVMAs = 64001;
-
-// Map then unmap pages without touching them.
-void BM_MapUnmap(benchmark::State& state) {
- // Number of pages to map.
- const int pages = state.range(0);
-
- while (state.KeepRunning()) {
- void* addr = mmap(0, pages * kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
-
- int ret = munmap(addr, pages * kPageSize);
- TEST_CHECK_MSG(ret == 0, "munmap failed");
- }
-}
-
-BENCHMARK(BM_MapUnmap)->Range(1, 1 << 17)->UseRealTime();
-
-// Map, touch, then unmap pages.
-void BM_MapTouchUnmap(benchmark::State& state) {
- // Number of pages to map.
- const int pages = state.range(0);
-
- while (state.KeepRunning()) {
- void* addr = mmap(0, pages * kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
-
- char* c = reinterpret_cast<char*>(addr);
- char* end = c + pages * kPageSize;
- while (c < end) {
- *c = 42;
- c += kPageSize;
- }
-
- int ret = munmap(addr, pages * kPageSize);
- TEST_CHECK_MSG(ret == 0, "munmap failed");
- }
-}
-
-BENCHMARK(BM_MapTouchUnmap)->Range(1, 1 << 17)->UseRealTime();
-
-// Map and touch many pages, unmapping all at once.
-//
-// NOTE(b/111429208): This is a regression test to ensure performant mapping and
-// allocation even with tons of mappings.
-void BM_MapTouchMany(benchmark::State& state) {
- // Number of pages to map.
- const int page_count = state.range(0);
-
- while (state.KeepRunning()) {
- std::vector<void*> pages;
-
- for (int i = 0; i < page_count; i++) {
- void* addr = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- TEST_CHECK_MSG(addr != MAP_FAILED, "mmap failed");
-
- char* c = reinterpret_cast<char*>(addr);
- *c = 42;
-
- pages.push_back(addr);
- }
-
- for (void* addr : pages) {
- int ret = munmap(addr, kPageSize);
- TEST_CHECK_MSG(ret == 0, "munmap failed");
- }
- }
-
- state.SetBytesProcessed(kPageSize * page_count * state.iterations());
-}
-
-BENCHMARK(BM_MapTouchMany)->Range(1, 1 << 12)->UseRealTime();
-
-void BM_PageFault(benchmark::State& state) {
- // Map the region in which we will take page faults. To ensure that each page
- // fault maps only a single page, each page we touch must correspond to a
- // distinct VMA. Thus we need a 1-page gap between each 1-page VMA. However,
- // each gap consists of a PROT_NONE VMA, instead of an unmapped hole, so that
- // if there are background threads running, they can't inadvertently creating
- // mappings in our gaps that are unmapped when the test ends.
- size_t test_pages = kMaxVMAs;
- // Ensure that test_pages is odd, since we want the test region to both
- // begin and end with a mapped page.
- if (test_pages % 2 == 0) {
- test_pages--;
- }
- const size_t test_region_bytes = test_pages * kPageSize;
- // Use MAP_SHARED here because madvise(MADV_DONTNEED) on private mappings on
- // gVisor won't force future sentry page faults (by design). Use MAP_POPULATE
- // so that Linux pre-allocates the shmem file used to back the mapping.
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(test_region_bytes, PROT_READ, MAP_SHARED | MAP_POPULATE));
- for (size_t i = 0; i < test_pages / 2; i++) {
- ASSERT_THAT(
- mprotect(reinterpret_cast<void*>(m.addr() + ((2 * i + 1) * kPageSize)),
- kPageSize, PROT_NONE),
- SyscallSucceeds());
- }
-
- const size_t mapped_pages = test_pages / 2 + 1;
- // "Start" at the end of the mapped region to force the mapped region to be
- // reset, since we mapped it with MAP_POPULATE.
- size_t cur_page = mapped_pages;
- for (auto _ : state) {
- if (cur_page >= mapped_pages) {
- // We've reached the end of our mapped region and have to reset it to
- // incur page faults again.
- state.PauseTiming();
- ASSERT_THAT(madvise(m.ptr(), test_region_bytes, MADV_DONTNEED),
- SyscallSucceeds());
- cur_page = 0;
- state.ResumeTiming();
- }
- const uintptr_t addr = m.addr() + (2 * cur_page * kPageSize);
- const char c = *reinterpret_cast<volatile char*>(addr);
- benchmark::DoNotOptimize(c);
- cur_page++;
- }
-}
-
-BENCHMARK(BM_PageFault)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/open_benchmark.cc b/test/perf/linux/open_benchmark.cc
deleted file mode 100644
index 68008f6d5..000000000
--- a/test/perf/linux/open_benchmark.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Open(benchmark::State& state) {
- const int size = state.range(0);
- std::vector<TempPath> cache;
- for (int i = 0; i < size; i++) {
- auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- cache.emplace_back(std::move(path));
- }
-
- unsigned int seed = 1;
- for (auto _ : state) {
- const int chosen = rand_r(&seed) % size;
- int fd = open(cache[chosen].path().c_str(), O_RDONLY);
- TEST_CHECK(fd != -1);
- close(fd);
- }
-}
-
-BENCHMARK(BM_Open)->Range(1, 128)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/open_read_close_benchmark.cc b/test/perf/linux/open_read_close_benchmark.cc
deleted file mode 100644
index 8b023a3d8..000000000
--- a/test/perf/linux/open_read_close_benchmark.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_OpenReadClose(benchmark::State& state) {
- const int size = state.range(0);
- std::vector<TempPath> cache;
- for (int i = 0; i < size; i++) {
- auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "some content", 0644));
- cache.emplace_back(std::move(path));
- }
-
- char buf[1];
- unsigned int seed = 1;
- for (auto _ : state) {
- const int chosen = rand_r(&seed) % size;
- int fd = open(cache[chosen].path().c_str(), O_RDONLY);
- TEST_CHECK(fd != -1);
- TEST_CHECK(read(fd, buf, 1) == 1);
- close(fd);
- }
-}
-
-// Gofer dentry cache is 1000 by default. Go over it to force files to be closed
-// for real.
-BENCHMARK(BM_OpenReadClose)->Range(1000, 16384)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/pipe_benchmark.cc b/test/perf/linux/pipe_benchmark.cc
deleted file mode 100644
index 8f5f6a2a3..000000000
--- a/test/perf/linux/pipe_benchmark.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <cerrno>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Pipe(benchmark::State& state) {
- int fds[2];
- TEST_CHECK(pipe(fds) == 0);
-
- const int size = state.range(0);
- std::vector<char> wbuf(size);
- std::vector<char> rbuf(size);
- RandomizeBuffer(wbuf.data(), size);
-
- ScopedThread t([&] {
- auto const fd = fds[1];
- for (int i = 0; i < state.max_iterations; i++) {
- TEST_CHECK(WriteFd(fd, wbuf.data(), wbuf.size()) == size);
- }
- });
-
- for (auto _ : state) {
- TEST_CHECK(ReadFd(fds[0], rbuf.data(), rbuf.size()) == size);
- }
-
- t.Join();
-
- close(fds[0]);
- close(fds[1]);
-
- state.SetBytesProcessed(static_cast<int64_t>(size) *
- static_cast<int64_t>(state.iterations()));
-}
-
-BENCHMARK(BM_Pipe)->Range(1, 1 << 20)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/randread_benchmark.cc b/test/perf/linux/randread_benchmark.cc
deleted file mode 100644
index b0eb8c24e..000000000
--- a/test/perf/linux/randread_benchmark.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Create a 1GB file that will be read from at random positions. This should
-// invalid any performance gains from caching.
-const uint64_t kFileSize = 1ULL << 30;
-
-// How many bytes to write at once to initialize the file used to read from.
-const uint32_t kWriteSize = 65536;
-
-// Largest benchmarked read unit.
-const uint32_t kMaxRead = 1UL << 26;
-
-TempPath CreateFile(uint64_t file_size) {
- auto path = TempPath::CreateFile().ValueOrDie();
- FileDescriptor fd = Open(path.path(), O_WRONLY).ValueOrDie();
-
- // Try to minimize syscalls by using maximum size writev() requests.
- std::vector<char> buffer(kWriteSize);
- RandomizeBuffer(buffer.data(), buffer.size());
- const std::vector<std::vector<struct iovec>> iovecs_list =
- GenerateIovecs(file_size, buffer.data(), buffer.size());
- for (const auto& iovecs : iovecs_list) {
- TEST_CHECK(writev(fd.get(), iovecs.data(), iovecs.size()) >= 0);
- }
-
- return path;
-}
-
-// Global test state, initialized once per process lifetime.
-struct GlobalState {
- const TempPath tmpfile;
- explicit GlobalState(TempPath tfile) : tmpfile(std::move(tfile)) {}
-};
-
-GlobalState& GetGlobalState() {
- // This gets created only once throughout the lifetime of the process.
- // Use a dynamically allocated object (that is never deleted) to avoid order
- // of destruction of static storage variables issues.
- static GlobalState* const state =
- // The actual file size is the maximum random seek range (kFileSize) + the
- // maximum read size so we can read that number of bytes at the end of the
- // file.
- new GlobalState(CreateFile(kFileSize + kMaxRead));
- return *state;
-}
-
-void BM_RandRead(benchmark::State& state) {
- const int size = state.range(0);
-
- GlobalState& global_state = GetGlobalState();
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(global_state.tmpfile.path(), O_RDONLY));
- std::vector<char> buf(size);
-
- unsigned int seed = 1;
- for (auto _ : state) {
- TEST_CHECK(PreadFd(fd.get(), buf.data(), buf.size(),
- rand_r(&seed) % kFileSize) == size);
- }
-
- state.SetBytesProcessed(static_cast<int64_t>(size) *
- static_cast<int64_t>(state.iterations()));
-}
-
-BENCHMARK(BM_RandRead)->Range(1, kMaxRead)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/read_benchmark.cc b/test/perf/linux/read_benchmark.cc
deleted file mode 100644
index 62445867d..000000000
--- a/test/perf/linux/read_benchmark.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Read(benchmark::State& state) {
- const int size = state.range(0);
- const std::string contents(size, 0);
- auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), contents, TempPath::kDefaultFileMode));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDONLY));
-
- std::vector<char> buf(size);
- for (auto _ : state) {
- TEST_CHECK(PreadFd(fd.get(), buf.data(), buf.size(), 0) == size);
- }
-
- state.SetBytesProcessed(static_cast<int64_t>(size) *
- static_cast<int64_t>(state.iterations()));
-}
-
-BENCHMARK(BM_Read)->Range(1, 1 << 26)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/sched_yield_benchmark.cc b/test/perf/linux/sched_yield_benchmark.cc
deleted file mode 100644
index 6756b5575..000000000
--- a/test/perf/linux/sched_yield_benchmark.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Sched_yield(benchmark::State& state) {
- for (auto ignored : state) {
- TEST_CHECK(sched_yield() == 0);
- }
-}
-
-BENCHMARK(BM_Sched_yield)->ThreadRange(1, 2000)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/send_recv_benchmark.cc b/test/perf/linux/send_recv_benchmark.cc
deleted file mode 100644
index d73e49523..000000000
--- a/test/perf/linux/send_recv_benchmark.cc
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-
-#include <cstring>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/notification.h"
-#include "benchmark/benchmark.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr ssize_t kMessageSize = 1024;
-
-class Message {
- public:
- explicit Message(int byte = 0) : Message(byte, kMessageSize, 0) {}
-
- explicit Message(int byte, int sz) : Message(byte, sz, 0) {}
-
- explicit Message(int byte, int sz, int cmsg_sz)
- : buffer_(sz, byte), cmsg_buffer_(cmsg_sz, 0) {
- iov_.iov_base = buffer_.data();
- iov_.iov_len = sz;
- hdr_.msg_iov = &iov_;
- hdr_.msg_iovlen = 1;
- hdr_.msg_control = cmsg_buffer_.data();
- hdr_.msg_controllen = cmsg_sz;
- }
-
- struct msghdr* header() {
- return &hdr_;
- }
-
- private:
- std::vector<char> buffer_;
- std::vector<char> cmsg_buffer_;
- struct iovec iov_ = {};
- struct msghdr hdr_ = {};
-};
-
-void BM_Recvmsg(benchmark::State& state) {
- int sockets[2];
- TEST_CHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);
- FileDescriptor send_socket(sockets[0]), recv_socket(sockets[1]);
- absl::Notification notification;
- Message send_msg('a'), recv_msg;
-
- ScopedThread t([&send_msg, &send_socket, &notification] {
- while (!notification.HasBeenNotified()) {
- sendmsg(send_socket.get(), send_msg.header(), 0);
- }
- });
-
- int64_t bytes_received = 0;
- for (auto ignored : state) {
- int n = recvmsg(recv_socket.get(), recv_msg.header(), 0);
- TEST_CHECK(n > 0);
- bytes_received += n;
- }
-
- notification.Notify();
- recv_socket.reset();
-
- state.SetBytesProcessed(bytes_received);
-}
-
-BENCHMARK(BM_Recvmsg)->UseRealTime();
-
-void BM_Sendmsg(benchmark::State& state) {
- int sockets[2];
- TEST_CHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);
- FileDescriptor send_socket(sockets[0]), recv_socket(sockets[1]);
- absl::Notification notification;
- Message send_msg('a'), recv_msg;
-
- ScopedThread t([&recv_msg, &recv_socket, &notification] {
- while (!notification.HasBeenNotified()) {
- recvmsg(recv_socket.get(), recv_msg.header(), 0);
- }
- });
-
- int64_t bytes_sent = 0;
- for (auto ignored : state) {
- int n = sendmsg(send_socket.get(), send_msg.header(), 0);
- TEST_CHECK(n > 0);
- bytes_sent += n;
- }
-
- notification.Notify();
- send_socket.reset();
-
- state.SetBytesProcessed(bytes_sent);
-}
-
-BENCHMARK(BM_Sendmsg)->UseRealTime();
-
-void BM_Recvfrom(benchmark::State& state) {
- int sockets[2];
- TEST_CHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);
- FileDescriptor send_socket(sockets[0]), recv_socket(sockets[1]);
- absl::Notification notification;
- char send_buffer[kMessageSize], recv_buffer[kMessageSize];
-
- ScopedThread t([&send_socket, &send_buffer, &notification] {
- while (!notification.HasBeenNotified()) {
- sendto(send_socket.get(), send_buffer, kMessageSize, 0, nullptr, 0);
- }
- });
-
- int bytes_received = 0;
- for (auto ignored : state) {
- int n = recvfrom(recv_socket.get(), recv_buffer, kMessageSize, 0, nullptr,
- nullptr);
- TEST_CHECK(n > 0);
- bytes_received += n;
- }
-
- notification.Notify();
- recv_socket.reset();
-
- state.SetBytesProcessed(bytes_received);
-}
-
-BENCHMARK(BM_Recvfrom)->UseRealTime();
-
-void BM_Sendto(benchmark::State& state) {
- int sockets[2];
- TEST_CHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);
- FileDescriptor send_socket(sockets[0]), recv_socket(sockets[1]);
- absl::Notification notification;
- char send_buffer[kMessageSize], recv_buffer[kMessageSize];
-
- ScopedThread t([&recv_socket, &recv_buffer, &notification] {
- while (!notification.HasBeenNotified()) {
- recvfrom(recv_socket.get(), recv_buffer, kMessageSize, 0, nullptr,
- nullptr);
- }
- });
-
- int64_t bytes_sent = 0;
- for (auto ignored : state) {
- int n = sendto(send_socket.get(), send_buffer, kMessageSize, 0, nullptr, 0);
- TEST_CHECK(n > 0);
- bytes_sent += n;
- }
-
- notification.Notify();
- send_socket.reset();
-
- state.SetBytesProcessed(bytes_sent);
-}
-
-BENCHMARK(BM_Sendto)->UseRealTime();
-
-PosixErrorOr<sockaddr_storage> InetLoopbackAddr(int family) {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- addr.ss_family = family;
- switch (family) {
- case AF_INET:
- reinterpret_cast<struct sockaddr_in*>(&addr)->sin_addr.s_addr =
- htonl(INADDR_LOOPBACK);
- break;
- case AF_INET6:
- reinterpret_cast<struct sockaddr_in6*>(&addr)->sin6_addr =
- in6addr_loopback;
- break;
- default:
- return PosixError(EINVAL,
- absl::StrCat("unknown socket family: ", family));
- }
- return addr;
-}
-
-// BM_RecvmsgWithControlBuf measures the performance of recvmsg when we allocate
-// space for control messages. Note that we do not expect to receive any.
-void BM_RecvmsgWithControlBuf(benchmark::State& state) {
- auto listen_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr = ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(AF_INET6));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listen_socket.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the address we're listening on, then connect to it. We need to do this
- // because we're allowing the stack to pick a port for us.
- ASSERT_THAT(getsockname(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), &addrlen),
- SyscallSucceeds());
-
- auto send_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(
- RetryEINTR(connect)(send_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- // Accept the connection.
- auto recv_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_socket.get(), nullptr, nullptr));
-
- absl::Notification notification;
- Message send_msg('a');
- // Create a msghdr with a buffer allocated for control messages.
- Message recv_msg(0, kMessageSize, /*cmsg_sz=*/24);
-
- ScopedThread t([&send_msg, &send_socket, &notification] {
- while (!notification.HasBeenNotified()) {
- sendmsg(send_socket.get(), send_msg.header(), 0);
- }
- });
-
- int64_t bytes_received = 0;
- for (auto ignored : state) {
- int n = recvmsg(recv_socket.get(), recv_msg.header(), 0);
- TEST_CHECK(n > 0);
- bytes_received += n;
- }
-
- notification.Notify();
- recv_socket.reset();
-
- state.SetBytesProcessed(bytes_received);
-}
-
-BENCHMARK(BM_RecvmsgWithControlBuf)->UseRealTime();
-
-// BM_SendmsgTCP measures the sendmsg throughput with varying payload sizes.
-//
-// state.Args[0] indicates whether the underlying socket should be blocking or
-// non-blocking w/ 0 indicating non-blocking and 1 to indicate blocking.
-// state.Args[1] is the size of the payload to be used per sendmsg call.
-void BM_SendmsgTCP(benchmark::State& state) {
- auto listen_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr = ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(AF_INET));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listen_socket.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the address we're listening on, then connect to it. We need to do this
- // because we're allowing the stack to pick a port for us.
- ASSERT_THAT(getsockname(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), &addrlen),
- SyscallSucceeds());
-
- auto send_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(
- RetryEINTR(connect)(send_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- // Accept the connection.
- auto recv_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_socket.get(), nullptr, nullptr));
-
- // Check if we want to run the test w/ a blocking send socket
- // or non-blocking.
- const int blocking = state.range(0);
- if (!blocking) {
- // Set the send FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(send_socket.get(), F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(send_socket.get(), F_SETFL, opts), SyscallSucceeds());
- }
-
- absl::Notification notification;
-
- // Get the buffer size we should use for this iteration of the test.
- const int buf_size = state.range(1);
- Message send_msg('a', buf_size), recv_msg(0, buf_size);
-
- ScopedThread t([&recv_msg, &recv_socket, &notification] {
- while (!notification.HasBeenNotified()) {
- TEST_CHECK(recvmsg(recv_socket.get(), recv_msg.header(), 0) >= 0);
- }
- });
-
- int64_t bytes_sent = 0;
- int ncalls = 0;
- for (auto ignored : state) {
- int sent = 0;
- while (true) {
- struct msghdr hdr = {};
- struct iovec iov = {};
- struct msghdr* snd_header = send_msg.header();
- iov.iov_base = static_cast<char*>(snd_header->msg_iov->iov_base) + sent;
- iov.iov_len = snd_header->msg_iov->iov_len - sent;
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
- int n = RetryEINTR(sendmsg)(send_socket.get(), &hdr, 0);
- ncalls++;
- if (n > 0) {
- sent += n;
- if (sent == buf_size) {
- break;
- }
- // n can be > 0 but less than requested size. In which case we don't
- // poll.
- continue;
- }
- // Poll the fd for it to become writable.
- struct pollfd poll_fd = {send_socket.get(), POLL_OUT, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10),
- SyscallSucceedsWithValue(0));
- }
- bytes_sent += static_cast<int64_t>(sent);
- }
-
- notification.Notify();
- send_socket.reset();
- state.SetBytesProcessed(bytes_sent);
-}
-
-void Args(benchmark::internal::Benchmark* benchmark) {
- for (int blocking = 0; blocking < 2; blocking++) {
- for (int buf_size = 1024; buf_size <= 256 << 20; buf_size *= 2) {
- benchmark->Args({blocking, buf_size});
- }
- }
-}
-
-BENCHMARK(BM_SendmsgTCP)->Apply(&Args)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/seqwrite_benchmark.cc b/test/perf/linux/seqwrite_benchmark.cc
deleted file mode 100644
index af49e4477..000000000
--- a/test/perf/linux/seqwrite_benchmark.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// The maximum file size of the test file, when writes get beyond this point
-// they wrap around. This should be large enough to blow away caches.
-const uint64_t kMaxFile = 1 << 30;
-
-// Perform writes of various sizes sequentially to one file. Wraps around if it
-// goes above a certain maximum file size.
-void BM_SeqWrite(benchmark::State& state) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_WRONLY));
-
- const int size = state.range(0);
- std::vector<char> buf(size);
- RandomizeBuffer(buf.data(), buf.size());
-
- // Start writes at offset 0.
- uint64_t offset = 0;
- for (auto _ : state) {
- TEST_CHECK(PwriteFd(fd.get(), buf.data(), buf.size(), offset) ==
- buf.size());
- offset += buf.size();
- // Wrap around if going above the maximum file size.
- if (offset >= kMaxFile) {
- offset = 0;
- }
- }
-
- state.SetBytesProcessed(static_cast<int64_t>(size) *
- static_cast<int64_t>(state.iterations()));
-}
-
-BENCHMARK(BM_SeqWrite)->Range(1, 1 << 26)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/signal_benchmark.cc b/test/perf/linux/signal_benchmark.cc
deleted file mode 100644
index cec679191..000000000
--- a/test/perf/linux/signal_benchmark.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <string.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void FixupHandler(int sig, siginfo_t* si, void* void_ctx) {
- static unsigned int dataval = 0;
-
- // Skip the offending instruction.
- ucontext_t* ctx = reinterpret_cast<ucontext_t*>(void_ctx);
- ctx->uc_mcontext.gregs[REG_RAX] = reinterpret_cast<greg_t>(&dataval);
-}
-
-void BM_FaultSignalFixup(benchmark::State& state) {
- // Set up the signal handler.
- struct sigaction sa = {};
- sigemptyset(&sa.sa_mask);
- sa.sa_sigaction = FixupHandler;
- sa.sa_flags = SA_SIGINFO;
- TEST_CHECK(sigaction(SIGSEGV, &sa, nullptr) == 0);
-
- // Fault, fault, fault.
- for (auto _ : state) {
- // Trigger the segfault.
- asm volatile(
- "movq $0, %%rax\n"
- "movq $0x77777777, (%%rax)\n"
- :
- :
- : "rax");
- }
-}
-
-BENCHMARK(BM_FaultSignalFixup)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/sleep_benchmark.cc b/test/perf/linux/sleep_benchmark.cc
deleted file mode 100644
index 99ef05117..000000000
--- a/test/perf/linux/sleep_benchmark.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/syscall.h>
-#include <time.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Sleep for 'param' nanoseconds.
-void BM_Sleep(benchmark::State& state) {
- const int nanoseconds = state.range(0);
-
- for (auto _ : state) {
- struct timespec ts;
- ts.tv_sec = 0;
- ts.tv_nsec = nanoseconds;
-
- int ret;
- do {
- ret = syscall(SYS_nanosleep, &ts, &ts);
- if (ret < 0) {
- TEST_CHECK(errno == EINTR);
- }
- } while (ret < 0);
- }
-}
-
-BENCHMARK(BM_Sleep)
- ->Arg(0)
- ->Arg(1)
- ->Arg(1000) // 1us
- ->Arg(1000 * 1000) // 1ms
- ->Arg(10 * 1000 * 1000) // 10ms
- ->Arg(50 * 1000 * 1000) // 50ms
- ->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/stat_benchmark.cc b/test/perf/linux/stat_benchmark.cc
deleted file mode 100644
index f15424482..000000000
--- a/test/perf/linux/stat_benchmark.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "benchmark/benchmark.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Creates a file in a nested directory hierarchy at least `depth` directories
-// deep, and stats that file multiple times.
-void BM_Stat(benchmark::State& state) {
- // Create nested directories with given depth.
- int depth = state.range(0);
- const TempPath top_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string dir_path = top_dir.path();
-
- while (depth-- > 0) {
- // Don't use TempPath because it will make paths too long to use.
- //
- // The top_dir destructor will clean up this whole tree.
- dir_path = JoinPath(dir_path, absl::StrCat(depth));
- ASSERT_NO_ERRNO(Mkdir(dir_path, 0755));
- }
-
- // Create the file that will be stat'd.
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir_path));
-
- struct stat st;
- for (auto _ : state) {
- ASSERT_THAT(stat(file.path().c_str(), &st), SyscallSucceeds());
- }
-}
-
-BENCHMARK(BM_Stat)->Range(1, 100)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/unlink_benchmark.cc b/test/perf/linux/unlink_benchmark.cc
deleted file mode 100644
index 92243a042..000000000
--- a/test/perf/linux/unlink_benchmark.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Creates a directory containing `files` files, and unlinks all the files.
-void BM_Unlink(benchmark::State& state) {
- // Create directory with given files.
- const int file_count = state.range(0);
-
- // We unlink all files on each iteration, but report this as a "batch"
- // iteration so that reported times are per file.
- TempPath dir;
- while (state.KeepRunningBatch(file_count)) {
- state.PauseTiming();
- // N.B. dir is declared outside the loop so that destruction of the previous
- // iteration's directory occurs here, inside of PauseTiming.
- dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- std::vector<TempPath> files;
- for (int i = 0; i < file_count; i++) {
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- files.push_back(std::move(file));
- }
- state.ResumeTiming();
-
- while (!files.empty()) {
- // Destructor unlinks.
- files.pop_back();
- }
- }
-
- state.SetItemsProcessed(state.iterations());
-}
-
-BENCHMARK(BM_Unlink)->Range(1, 100 * 1000)->UseRealTime();
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/perf/linux/write_benchmark.cc b/test/perf/linux/write_benchmark.cc
deleted file mode 100644
index d495f3ddc..000000000
--- a/test/perf/linux/write_benchmark.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void BM_Write(benchmark::State& state) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_WRONLY));
-
- const int size = state.range(0);
- std::vector<char> buf(size);
- RandomizeBuffer(buf.data(), size);
-
- for (auto _ : state) {
- TEST_CHECK(PwriteFd(fd.get(), buf.data(), size, 0) == size);
- }
-
- state.SetBytesProcessed(static_cast<int64_t>(size) *
- static_cast<int64_t>(state.iterations()));
-}
-
-BENCHMARK(BM_Write)->Range(1, 1 << 26)->UseRealTime();
-
-void BM_Append(benchmark::State& state) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY | O_APPEND));
-
- const char data = 'a';
- for (auto _ : state) {
- TEST_CHECK(WriteFd(fd.get(), &data, 1) == 1);
- }
-}
-
-BENCHMARK(BM_Append);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/root/BUILD b/test/root/BUILD
deleted file mode 100644
index 8d9fff578..000000000
--- a/test/root/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "root",
- srcs = ["root.go"],
-)
-
-go_test(
- name = "root_test",
- size = "small",
- srcs = [
- "cgroup_test.go",
- "chroot_test.go",
- "crictl_test.go",
- "main_test.go",
- "oom_score_adj_test.go",
- "runsc_test.go",
- ],
- data = [
- "//runsc",
- ],
- library = ":root",
- tags = [
- "local",
- "manual",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/criutil",
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//runsc/cgroup",
- "//runsc/container",
- "//runsc/specutils",
- "@com_github_cenkalti_backoff//:go_default_library",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/test/root/cgroup_test.go b/test/root/cgroup_test.go
deleted file mode 100644
index 39e838582..000000000
--- a/test/root/cgroup_test.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package root
-
-import (
- "bufio"
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/cgroup"
-)
-
-func verifyPid(pid int, path string) error {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- defer f.Close()
-
- var gots []int
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- got, err := strconv.Atoi(scanner.Text())
- if err != nil {
- return err
- }
- if got == pid {
- return nil
- }
- gots = append(gots, got)
- }
- if scanner.Err() != nil {
- return scanner.Err()
- }
- return fmt.Errorf("got: %v, want: %d", gots, pid)
-}
-
-func TestMemCgroup(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Start a new container and allocate the specified about of memory.
- allocMemSize := 128 << 20
- allocMemLimit := 2 * allocMemSize
-
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/ubuntu",
- Memory: allocMemLimit, // Must be in bytes.
- }, "python3", "-c", fmt.Sprintf("import time; s = 'a' * %d; time.sleep(100)", allocMemSize)); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // Extract the ID to lookup the cgroup.
- gid := d.ID()
- t.Logf("cgroup ID: %s", gid)
-
- // Wait when the container will allocate memory.
- memUsage := 0
- start := time.Now()
- for time.Since(start) < 30*time.Second {
- // Sleep for a brief period of time after spawning the
- // container (so that Docker can create the cgroup etc.
- // or after looping below (so the application can start).
- time.Sleep(100 * time.Millisecond)
-
- // Read the cgroup memory limit.
- path := filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.limit_in_bytes")
- outRaw, err := ioutil.ReadFile(path)
- if err != nil {
- // It's possible that the container does not exist yet.
- continue
- }
- out := strings.TrimSpace(string(outRaw))
- memLimit, err := strconv.Atoi(out)
- if err != nil {
- t.Fatalf("Atoi(%v): %v", out, err)
- }
- if memLimit != allocMemLimit {
- // The group may not have had the correct limit set yet.
- continue
- }
-
- // Read the cgroup memory usage.
- path = filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.max_usage_in_bytes")
- outRaw, err = ioutil.ReadFile(path)
- if err != nil {
- t.Fatalf("error reading usage: %v", err)
- }
- out = strings.TrimSpace(string(outRaw))
- memUsage, err = strconv.Atoi(out)
- if err != nil {
- t.Fatalf("Atoi(%v): %v", out, err)
- }
- t.Logf("read usage: %v, wanted: %v", memUsage, allocMemSize)
-
- // Are we done?
- if memUsage >= allocMemSize {
- return
- }
- }
-
- t.Fatalf("%vMB is less than %vMB", memUsage>>20, allocMemSize>>20)
-}
-
-// TestCgroup sets cgroup options and checks that cgroup was properly configured.
-func TestCgroup(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // This is not a comprehensive list of attributes.
- //
- // Note that we are specifically missing cpusets, which fail if specified.
- // In any case, it's unclear if cpusets can be reliably tested here: these
- // are often run on a single core virtual machine, and there is only a single
- // CPU available in our current set, and every container's set.
- attrs := []struct {
- field string
- value int64
- ctrl string
- file string
- want string
- skipIfNotFound bool
- }{
- {
- field: "cpu-shares",
- value: 1000,
- ctrl: "cpu",
- file: "cpu.shares",
- want: "1000",
- },
- {
- field: "cpu-period",
- value: 2000,
- ctrl: "cpu",
- file: "cpu.cfs_period_us",
- want: "2000",
- },
- {
- field: "cpu-quota",
- value: 3000,
- ctrl: "cpu",
- file: "cpu.cfs_quota_us",
- want: "3000",
- },
- {
- field: "kernel-memory",
- value: 100 << 20,
- ctrl: "memory",
- file: "memory.kmem.limit_in_bytes",
- want: "104857600",
- },
- {
- field: "memory",
- value: 1 << 30,
- ctrl: "memory",
- file: "memory.limit_in_bytes",
- want: "1073741824",
- },
- {
- field: "memory-reservation",
- value: 500 << 20,
- ctrl: "memory",
- file: "memory.soft_limit_in_bytes",
- want: "524288000",
- },
- {
- field: "memory-swap",
- value: 2 << 30,
- ctrl: "memory",
- file: "memory.memsw.limit_in_bytes",
- want: "2147483648",
- skipIfNotFound: true, // swap may be disabled on the machine.
- },
- {
- field: "memory-swappiness",
- value: 5,
- ctrl: "memory",
- file: "memory.swappiness",
- want: "5",
- },
- {
- field: "blkio-weight",
- value: 750,
- ctrl: "blkio",
- file: "blkio.weight",
- want: "750",
- skipIfNotFound: true, // blkio groups may not be available.
- },
- {
- field: "pids-limit",
- value: 1000,
- ctrl: "pids",
- file: "pids.max",
- want: "1000",
- },
- }
-
- // Make configs.
- conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "10000")
-
- // Add Cgroup arguments to configs.
- for _, attr := range attrs {
- switch attr.field {
- case "cpu-shares":
- hostconf.Resources.CPUShares = attr.value
- case "cpu-period":
- hostconf.Resources.CPUPeriod = attr.value
- case "cpu-quota":
- hostconf.Resources.CPUQuota = attr.value
- case "kernel-memory":
- hostconf.Resources.KernelMemory = attr.value
- case "memory":
- hostconf.Resources.Memory = attr.value
- case "memory-reservation":
- hostconf.Resources.MemoryReservation = attr.value
- case "memory-swap":
- hostconf.Resources.MemorySwap = attr.value
- case "memory-swappiness":
- val := attr.value
- hostconf.Resources.MemorySwappiness = &val
- case "blkio-weight":
- hostconf.Resources.BlkioWeight = uint16(attr.value)
- case "pids-limit":
- val := attr.value
- hostconf.Resources.PidsLimit = &val
- }
- }
-
- // Create container.
- if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil {
- t.Fatalf("create failed with: %v", err)
- }
-
- // Start container.
- if err := d.Start(ctx); err != nil {
- t.Fatalf("start failed with: %v", err)
- }
-
- // Lookup the relevant cgroup ID.
- gid := d.ID()
- t.Logf("cgroup ID: %s", gid)
-
- // Check list of attributes defined above.
- for _, attr := range attrs {
- path := filepath.Join("/sys/fs/cgroup", attr.ctrl, "docker", gid, attr.file)
- out, err := ioutil.ReadFile(path)
- if err != nil {
- if os.IsNotExist(err) && attr.skipIfNotFound {
- t.Logf("skipped %s/%s", attr.ctrl, attr.file)
- continue
- }
- t.Fatalf("failed to read %q: %v", path, err)
- }
- if got := strings.TrimSpace(string(out)); got != attr.want {
- t.Errorf("field: %q, cgroup attribute %s/%s, got: %q, want: %q", attr.field, attr.ctrl, attr.file, got, attr.want)
- }
- }
-
- // Check that sandbox is inside cgroup.
- controllers := []string{
- "blkio",
- "cpu",
- "cpuset",
- "memory",
- "net_cls",
- "net_prio",
- "devices",
- "freezer",
- "perf_event",
- "pids",
- "systemd",
- }
- pid, err := d.SandboxPid(ctx)
- if err != nil {
- t.Fatalf("SandboxPid: %v", err)
- }
- for _, ctrl := range controllers {
- path := filepath.Join("/sys/fs/cgroup", ctrl, "docker", gid, "cgroup.procs")
- if err := verifyPid(pid, path); err != nil {
- t.Errorf("cgroup control %q processes: %v", ctrl, err)
- }
- }
-}
-
-// TestCgroupParent sets the "CgroupParent" option and checks that the child and
-// parent's cgroups are created correctly relative to each other.
-func TestCgroupParent(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- // Construct a known cgroup name.
- parent := testutil.RandomID("runsc-")
- conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "10000")
- hostconf.Resources.CgroupParent = parent
-
- if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil {
- t.Fatalf("create failed with: %v", err)
- }
-
- if err := d.Start(ctx); err != nil {
- t.Fatalf("start failed with: %v", err)
- }
-
- // Extract the ID to look up the cgroup.
- gid := d.ID()
- t.Logf("cgroup ID: %s", gid)
-
- // Check that sandbox is inside cgroup.
- pid, err := d.SandboxPid(ctx)
- if err != nil {
- t.Fatalf("SandboxPid: %v", err)
- }
-
- // Finds cgroup for the sandbox's parent process to check that cgroup is
- // created in the right location relative to the parent.
- cmd := fmt.Sprintf("grep PPid: /proc/%d/status | sed 's/PPid:\\s//'", pid)
- ppidStr, err := exec.Command("bash", "-c", cmd).CombinedOutput()
- if err != nil {
- t.Fatalf("Executing %q: %v", cmd, err)
- }
- ppid, err := strconv.Atoi(strings.TrimSpace(string(ppidStr)))
- if err != nil {
- t.Fatalf("invalid PID (%s): %v", ppidStr, err)
- }
- cgroups, err := cgroup.NewFromPid(ppid)
- if err != nil {
- t.Fatalf("cgroup.NewFromPid(%d): %v", ppid, err)
- }
- path := filepath.Join(cgroups.MakePath("cpuacct"), parent, gid, "cgroup.procs")
- if err := verifyPid(pid, path); err != nil {
- t.Errorf("cgroup control %q processes: %v", "memory", err)
- }
-}
diff --git a/test/root/chroot_test.go b/test/root/chroot_test.go
deleted file mode 100644
index 58fcd6f08..000000000
--- a/test/root/chroot_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package root is used for tests that requires sysadmin privileges run.
-package root
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
-)
-
-// TestChroot verifies that the sandbox is chroot'd and that mounts are cleaned
-// up after the sandbox is destroyed.
-func TestChroot(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "10000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- pid, err := d.SandboxPid(ctx)
- if err != nil {
- t.Fatalf("Docker.SandboxPid(): %v", err)
- }
-
- // Check that sandbox is chroot'ed.
- procRoot := filepath.Join("/proc", strconv.Itoa(pid), "root")
- chroot, err := filepath.EvalSymlinks(procRoot)
- if err != nil {
- t.Fatalf("error resolving /proc/<pid>/root symlink: %v", err)
- }
- if chroot != "/" {
- t.Errorf("sandbox is not chroot'd, it should be inside: /, got: %q", chroot)
- }
-
- path, err := filepath.EvalSymlinks(filepath.Join("/proc", strconv.Itoa(pid), "cwd"))
- if err != nil {
- t.Fatalf("error resolving /proc/<pid>/cwd symlink: %v", err)
- }
- if chroot != path {
- t.Errorf("sandbox current dir is wrong, want: %q, got: %q", chroot, path)
- }
-
- fi, err := ioutil.ReadDir(procRoot)
- if err != nil {
- t.Fatalf("error listing %q: %v", chroot, err)
- }
- if want, got := 1, len(fi); want != got {
- t.Fatalf("chroot dir got %d entries, want %d", got, want)
- }
-
- // chroot dir is prepared by runsc and should contains only /proc.
- if fi[0].Name() != "proc" {
- t.Errorf("chroot got children %v, want %v", fi[0].Name(), "proc")
- }
-
- d.CleanUp(ctx)
-}
-
-func TestChrootGofer(t *testing.T) {
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, t)
- defer d.CleanUp(ctx)
-
- if err := d.Spawn(ctx, dockerutil.RunOpts{
- Image: "basic/alpine",
- }, "sleep", "10000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
- }
-
- // It's tricky to find gofers. Get sandbox PID first, then find parent. From
- // parent get all immediate children, remove the sandbox, and everything else
- // are gofers.
- sandPID, err := d.SandboxPid(ctx)
- if err != nil {
- t.Fatalf("Docker.SandboxPid(): %v", err)
- }
-
- // Find sandbox's parent PID.
- cmd := fmt.Sprintf("grep PPid /proc/%d/status | awk '{print $2}'", sandPID)
- parent, err := exec.Command("sh", "-c", cmd).CombinedOutput()
- if err != nil {
- t.Fatalf("failed to fetch runsc (%d) parent PID: %v, out:\n%s", sandPID, err, string(parent))
- }
- parentPID, err := strconv.Atoi(strings.TrimSpace(string(parent)))
- if err != nil {
- t.Fatalf("failed to parse PPID %q: %v", string(parent), err)
- }
-
- // Get all children from parent.
- childrenOut, err := exec.Command("/usr/bin/pgrep", "-P", strconv.Itoa(parentPID)).CombinedOutput()
- if err != nil {
- t.Fatalf("failed to fetch containerd-shim children: %v", err)
- }
- children := strings.Split(strings.TrimSpace(string(childrenOut)), "\n")
-
- // This where the root directory is mapped on the host and that's where the
- // gofer must have chroot'd to.
- root := "/root"
-
- for _, child := range children {
- childPID, err := strconv.Atoi(child)
- if err != nil {
- t.Fatalf("failed to parse child PID %q: %v", child, err)
- }
- if childPID == sandPID {
- // Skip the sandbox, all other immediate children are gofers.
- continue
- }
-
- // Check that gofer is chroot'ed.
- chroot, err := filepath.EvalSymlinks(filepath.Join("/proc", child, "root"))
- if err != nil {
- t.Fatalf("error resolving /proc/<pid>/root symlink: %v", err)
- }
- if root != chroot {
- t.Errorf("gofer chroot is wrong, want: %q, got: %q", root, chroot)
- }
-
- path, err := filepath.EvalSymlinks(filepath.Join("/proc", child, "cwd"))
- if err != nil {
- t.Fatalf("error resolving /proc/<pid>/cwd symlink: %v", err)
- }
- if root != path {
- t.Errorf("gofer current dir is wrong, want: %q, got: %q", root, path)
- }
- }
-}
diff --git a/test/root/crictl_test.go b/test/root/crictl_test.go
deleted file mode 100644
index c26dc8577..000000000
--- a/test/root/crictl_test.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package root
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "os/exec"
- "path"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/criutil"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Tests for crictl have to be run as root (rather than in a user namespace)
-// because crictl creates named network namespaces in /var/run/netns/.
-
-// Sandbox returns a JSON config for a simple sandbox. Sandbox names must be
-// unique so different names should be used when running tests on the same
-// containerd instance.
-func Sandbox(name string) string {
- // Sandbox is a default JSON config for a sandbox.
- s := map[string]interface{}{
- "metadata": map[string]string{
- "name": name,
- "namespace": "default",
- "uid": testutil.RandomID(""),
- },
- "linux": map[string]string{},
- "log_directory": "/tmp",
- }
-
- v, err := json.Marshal(s)
- if err != nil {
- // This shouldn't happen.
- panic(err)
- }
- return string(v)
-}
-
-// SimpleSpec returns a JSON config for a simple container that runs the
-// specified command in the specified image.
-func SimpleSpec(name, image string, cmd []string, extra map[string]interface{}) string {
- s := map[string]interface{}{
- "metadata": map[string]string{
- "name": name,
- },
- "image": map[string]string{
- "image": testutil.ImageByName(image),
- },
- // Log files are not deleted after root tests are run. Log to random
- // paths to ensure logs are fresh.
- "log_path": fmt.Sprintf("%s.log", testutil.RandomID(name)),
- "stdin": false,
- "tty": false,
- }
- if len(cmd) > 0 { // Omit if empty.
- s["command"] = cmd
- }
- for k, v := range extra {
- s[k] = v // Extra settings.
- }
- v, err := json.Marshal(s)
- if err != nil {
- // This shouldn't happen.
- panic(err)
- }
- return string(v)
-}
-
-// Httpd is a JSON config for an httpd container.
-var Httpd = SimpleSpec("httpd", "basic/httpd", nil, nil)
-
-// TestCrictlSanity refers to b/112433158.
-func TestCrictlSanity(t *testing.T) {
- // Setup containerd and crictl.
- crictl, cleanup, err := setup(t)
- if err != nil {
- t.Fatalf("failed to setup crictl: %v", err)
- }
- defer cleanup()
- podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), Httpd)
- if err != nil {
- t.Fatalf("start failed: %v", err)
- }
-
- // Look for the httpd page.
- if err = httpGet(crictl, podID, "index.html"); err != nil {
- t.Fatalf("failed to get page: %v", err)
- }
-
- // Stop everything.
- if err := crictl.StopPodAndContainer(podID, contID); err != nil {
- t.Fatalf("stop failed: %v", err)
- }
-}
-
-// HttpdMountPaths is a JSON config for an httpd container with additional
-// mounts.
-var HttpdMountPaths = SimpleSpec("httpd", "basic/httpd", nil, map[string]interface{}{
- "mounts": []map[string]interface{}{
- {
- "container_path": "/var/run/secrets/kubernetes.io/serviceaccount",
- "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/volumes/kubernetes.io~secret/default-token-2rpfx",
- "readonly": true,
- },
- {
- "container_path": "/etc/hosts",
- "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/etc-hosts",
- "readonly": false,
- },
- {
- "container_path": "/dev/termination-log",
- "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/containers/httpd/d1709580",
- "readonly": false,
- },
- {
- "container_path": "/usr/local/apache2/htdocs/test",
- "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064",
- "readonly": true,
- },
- },
- "linux": map[string]interface{}{},
-})
-
-// TestMountPaths refers to b/117635704.
-func TestMountPaths(t *testing.T) {
- // Setup containerd and crictl.
- crictl, cleanup, err := setup(t)
- if err != nil {
- t.Fatalf("failed to setup crictl: %v", err)
- }
- defer cleanup()
- podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), HttpdMountPaths)
- if err != nil {
- t.Fatalf("start failed: %v", err)
- }
-
- // Look for the directory available at /test.
- if err = httpGet(crictl, podID, "test"); err != nil {
- t.Fatalf("failed to get page: %v", err)
- }
-
- // Stop everything.
- if err := crictl.StopPodAndContainer(podID, contID); err != nil {
- t.Fatalf("stop failed: %v", err)
- }
-}
-
-// TestMountPaths refers to b/118728671.
-func TestMountOverSymlinks(t *testing.T) {
- // Setup containerd and crictl.
- crictl, cleanup, err := setup(t)
- if err != nil {
- t.Fatalf("failed to setup crictl: %v", err)
- }
- defer cleanup()
-
- spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil)
- podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/resolv", Sandbox("default"), spec)
- if err != nil {
- t.Fatalf("start failed: %v", err)
- }
-
- out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf")
- if err != nil {
- t.Fatalf("readlink failed: %v, out: %s", err, out)
- }
- if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) {
- t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out))
- }
-
- etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf")
- if err != nil {
- t.Fatalf("cat failed: %v, out: %s", err, etc)
- }
- tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf")
- if err != nil {
- t.Fatalf("cat failed: %v, out: %s", err, out)
- }
- if tmp != etc {
- t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp))
- }
-
- // Stop everything.
- if err := crictl.StopPodAndContainer(podID, contID); err != nil {
- t.Fatalf("stop failed: %v", err)
- }
-}
-
-// TestHomeDir tests that the HOME environment variable is set for
-// Pod containers.
-func TestHomeDir(t *testing.T) {
- // Setup containerd and crictl.
- crictl, cleanup, err := setup(t)
- if err != nil {
- t.Fatalf("failed to setup crictl: %v", err)
- }
- defer cleanup()
-
- // Note that container ID returned here is a sub-container. All Pod
- // containers are sub-containers. The root container of the sandbox is the
- // pause container.
- t.Run("sub-container", func(t *testing.T) {
- contSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sh", "-c", "echo $HOME"}, nil)
- podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("subcont-sandbox"), contSpec)
- if err != nil {
- t.Fatalf("start failed: %v", err)
- }
-
- out, err := crictl.Logs(contID)
- if err != nil {
- t.Fatalf("failed retrieving container logs: %v, out: %s", err, out)
- }
- if got, want := strings.TrimSpace(string(out)), "/root"; got != want {
- t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want)
- }
-
- // Stop everything; note that the pod may have already stopped.
- crictl.StopPodAndContainer(podID, contID)
- })
-
- // Tests that HOME is set for the exec process.
- t.Run("exec", func(t *testing.T) {
- contSpec := SimpleSpec("exec", "basic/busybox", []string{"sleep", "1000"}, nil)
- podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("exec-sandbox"), contSpec)
- if err != nil {
- t.Fatalf("start failed: %v", err)
- }
-
- out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME")
- if err != nil {
- t.Fatalf("failed retrieving container logs: %v, out: %s", err, out)
- }
- if got, want := strings.TrimSpace(string(out)), "/root"; got != want {
- t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want)
- }
-
- // Stop everything.
- if err := crictl.StopPodAndContainer(podID, contID); err != nil {
- t.Fatalf("stop failed: %v", err)
- }
- })
-}
-
-const containerdRuntime = "runsc"
-
-// Template is the containerd configuration file that configures containerd with
-// the gVisor shim, Note that the v2 shim binary name must be
-// containerd-shim-<runtime>-v1.
-const template = `
-disabled_plugins = ["restart"]
-[plugins.cri]
- disable_tcp_service = true
-[plugins.linux]
- shim_debug = true
-[plugins.cri.containerd.runtimes.` + containerdRuntime + `]
- runtime_type = "io.containerd.` + containerdRuntime + `.v1"
-[plugins.cri.containerd.runtimes.` + containerdRuntime + `.options]
- TypeUrl = "io.containerd.` + containerdRuntime + `.v1.options"
-`
-
-// setup sets up before a test. Specifically it:
-// * Creates directories and a socket for containerd to utilize.
-// * Runs containerd and waits for it to reach a "ready" state for testing.
-// * Returns a cleanup function that should be called at the end of the test.
-func setup(t *testing.T) (*criutil.Crictl, func(), error) {
- // Create temporary containerd root and state directories, and a socket
- // via which crictl and containerd communicate.
- containerdRoot, err := ioutil.TempDir(testutil.TmpDir(), "containerd-root")
- if err != nil {
- t.Fatalf("failed to create containerd root: %v", err)
- }
- cu := cleanup.Make(func() { os.RemoveAll(containerdRoot) })
- defer cu.Clean()
- t.Logf("Using containerd root: %s", containerdRoot)
-
- containerdState, err := ioutil.TempDir(testutil.TmpDir(), "containerd-state")
- if err != nil {
- t.Fatalf("failed to create containerd state: %v", err)
- }
- cu.Add(func() { os.RemoveAll(containerdState) })
- t.Logf("Using containerd state: %s", containerdState)
-
- sockDir, err := ioutil.TempDir(testutil.TmpDir(), "containerd-sock")
- if err != nil {
- t.Fatalf("failed to create containerd socket directory: %v", err)
- }
- cu.Add(func() { os.RemoveAll(sockDir) })
- sockAddr := path.Join(sockDir, "test.sock")
- t.Logf("Using containerd socket: %s", sockAddr)
-
- // Extract the containerd version.
- versionCmd := exec.Command(getContainerd(), "-v")
- out, err := versionCmd.CombinedOutput()
- if err != nil {
- t.Fatalf("error extracting containerd version: %v (%s)", err, string(out))
- }
- r := regexp.MustCompile(" v([0-9]+)\\.([0-9]+)\\.([0-9+])")
- vs := r.FindStringSubmatch(string(out))
- if len(vs) != 4 {
- t.Fatalf("error unexpected version string: %s", string(out))
- }
- major, err := strconv.ParseUint(vs[1], 10, 64)
- if err != nil {
- t.Fatalf("error parsing containerd major version: %v (%s)", err, string(out))
- }
- minor, err := strconv.ParseUint(vs[2], 10, 64)
- if err != nil {
- t.Fatalf("error parsing containerd minor version: %v (%s)", err, string(out))
- }
- t.Logf("Using containerd version: %d.%d", major, minor)
-
- // Check if containerd supports shim v2.
- if major < 1 || (major == 1 && minor <= 1) {
- t.Skipf("skipping incompatible containerd (want at least 1.2, got %d.%d)", major, minor)
- }
-
- // We rewrite a configuration. This is based on the current docker
- // configuration for the runtime under test.
- runtime, err := dockerutil.RuntimePath()
- if err != nil {
- t.Fatalf("error discovering runtime path: %v", err)
- }
- t.Logf("Using runtime: %v", runtime)
-
- // Construct a PATH that includes the runtime directory. This is
- // because the shims will be installed there, and containerd may infer
- // the binary name and search the PATH.
- runtimeDir := path.Dir(runtime)
- modifiedPath, ok := os.LookupEnv("PATH")
- if ok {
- modifiedPath = ":" + modifiedPath // We prepend below.
- }
- modifiedPath = path.Dir(getContainerd()) + modifiedPath
- modifiedPath = runtimeDir + ":" + modifiedPath
- t.Logf("Using PATH: %v", modifiedPath)
-
- // Generate the configuration for the test.
- t.Logf("Using config: %s", template)
- configFile, configCleanup, err := testutil.WriteTmpFile("containerd-config", template)
- if err != nil {
- t.Fatalf("failed to write containerd config")
- }
- cu.Add(configCleanup)
-
- // Start containerd.
- args := []string{
- getContainerd(),
- "--config", configFile,
- "--log-level", "debug",
- "--root", containerdRoot,
- "--state", containerdState,
- "--address", sockAddr,
- }
- t.Logf("Using args: %s", strings.Join(args, " "))
- cmd := exec.Command(args[0], args[1:]...)
- cmd.Env = append(os.Environ(), "PATH="+modifiedPath)
-
- // Include output in logs.
- stderrPipe, err := cmd.StderrPipe()
- if err != nil {
- t.Fatalf("failed to create stderr pipe: %v", err)
- }
- cu.Add(func() { stderrPipe.Close() })
- stdoutPipe, err := cmd.StdoutPipe()
- if err != nil {
- t.Fatalf("failed to create stdout pipe: %v", err)
- }
- cu.Add(func() { stdoutPipe.Close() })
- var (
- wg sync.WaitGroup
- stderr bytes.Buffer
- stdout bytes.Buffer
- )
- startupR, startupW := io.Pipe()
- wg.Add(2)
- go func() {
- defer wg.Done()
- io.Copy(io.MultiWriter(startupW, &stderr), stderrPipe)
- }()
- go func() {
- defer wg.Done()
- io.Copy(io.MultiWriter(startupW, &stdout), stdoutPipe)
- }()
- cu.Add(func() {
- wg.Wait()
- t.Logf("containerd stdout: %s", stdout.String())
- t.Logf("containerd stderr: %s", stderr.String())
- })
-
- // Start the process.
- if err := cmd.Start(); err != nil {
- t.Fatalf("failed running containerd: %v", err)
- }
-
- // Wait for containerd to boot.
- if err := testutil.WaitUntilRead(startupR, "Start streaming server", 10*time.Second); err != nil {
- t.Fatalf("failed to start containerd: %v", err)
- }
-
- // Discard all subsequent data.
- go io.Copy(ioutil.Discard, startupR)
-
- // Create the crictl interface.
- cc := criutil.NewCrictl(t, sockAddr)
- cu.Add(cc.CleanUp)
-
- // Kill must be the last cleanup (as it will be executed first).
- cu.Add(func() {
- // Best effort: ignore errors.
- testutil.KillCommand(cmd)
- })
-
- return cc, cu.Release(), nil
-}
-
-// httpGet GETs the contents of a file served from a pod on port 80.
-func httpGet(crictl *criutil.Crictl, podID, filePath string) error {
- // Get the IP of the httpd server.
- ip, err := crictl.PodIP(podID)
- if err != nil {
- return fmt.Errorf("failed to get IP from pod %q: %v", podID, err)
- }
-
- // GET the page. We may be waiting for the server to start, so retry
- // with a timeout.
- var resp *http.Response
- cb := func() error {
- r, err := http.Get(fmt.Sprintf("http://%s", path.Join(ip, filePath)))
- resp = r
- return err
- }
- if err := testutil.Poll(cb, 20*time.Second); err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != 200 {
- return fmt.Errorf("bad status returned: %d", resp.StatusCode)
- }
- return nil
-}
-
-func getContainerd() string {
- // Use the local path if it exists, otherwise, use the system one.
- if _, err := os.Stat("/usr/local/bin/containerd"); err == nil {
- return "/usr/local/bin/containerd"
- }
- return "/usr/bin/containerd"
-}
diff --git a/test/root/main_test.go b/test/root/main_test.go
deleted file mode 100644
index 9fb17e0dd..000000000
--- a/test/root/main_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package root
-
-import (
- "flag"
- "fmt"
- "os"
- "testing"
-
- "github.com/syndtr/gocapability/capability"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-// TestMain is the main function for root tests. This function checks the
-// supported docker version, required capabilities, and configures the executable
-// path for runsc.
-func TestMain(m *testing.M) {
- flag.Parse()
-
- if !specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_DAC_OVERRIDE) {
- fmt.Println("Test requires sysadmin privileges to run. Try again with sudo.")
- os.Exit(1)
- }
-
- dockerutil.EnsureSupportedDockerVersion()
-
- // Configure exe for tests.
- path, err := dockerutil.RuntimePath()
- if err != nil {
- panic(err.Error())
- }
- specutils.ExePath = path
-
- os.Exit(m.Run())
-}
diff --git a/test/root/oom_score_adj_test.go b/test/root/oom_score_adj_test.go
deleted file mode 100644
index 0dcc0fdea..000000000
--- a/test/root/oom_score_adj_test.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package root
-
-import (
- "fmt"
- "os"
- "testing"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/container"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-var (
- maxOOMScoreAdj = 1000
- highOOMScoreAdj = 500
- lowOOMScoreAdj = -500
- minOOMScoreAdj = -1000
-)
-
-// Tests for oom_score_adj have to be run as root (rather than in a user
-// namespace) because we need to adjust oom_score_adj for PIDs other than our
-// own and test values below 0.
-
-// TestOOMScoreAdjSingle tests that oom_score_adj is set properly in a
-// single container sandbox.
-func TestOOMScoreAdjSingle(t *testing.T) {
- parentOOMScoreAdj, err := specutils.GetOOMScoreAdj(os.Getppid())
- if err != nil {
- t.Fatalf("getting parent oom_score_adj: %v", err)
- }
-
- testCases := []struct {
- Name string
-
- // OOMScoreAdj is the oom_score_adj set to the OCI spec. If nil then
- // no value is set.
- OOMScoreAdj *int
- }{
- {
- Name: "max",
- OOMScoreAdj: &maxOOMScoreAdj,
- },
- {
- Name: "high",
- OOMScoreAdj: &highOOMScoreAdj,
- },
- {
- Name: "low",
- OOMScoreAdj: &lowOOMScoreAdj,
- },
- {
- Name: "min",
- OOMScoreAdj: &minOOMScoreAdj,
- },
- {
- Name: "nil",
- OOMScoreAdj: &parentOOMScoreAdj,
- },
- }
-
- for _, testCase := range testCases {
- t.Run(testCase.Name, func(t *testing.T) {
- id := testutil.RandomContainerID()
- s := testutil.NewSpecWithArgs("sleep", "1000")
- s.Process.OOMScoreAdj = testCase.OOMScoreAdj
-
- containers, cleanup, err := startContainers(t, []*specs.Spec{s}, []string{id})
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- c := containers[0]
-
- // Verify the gofer's oom_score_adj
- if testCase.OOMScoreAdj != nil {
- goferScore, err := specutils.GetOOMScoreAdj(c.GoferPid)
- if err != nil {
- t.Fatalf("error reading gofer oom_score_adj: %v", err)
- }
- if goferScore != *testCase.OOMScoreAdj {
- t.Errorf("gofer oom_score_adj got: %d, want: %d", goferScore, *testCase.OOMScoreAdj)
- }
-
- // Verify the sandbox's oom_score_adj.
- //
- // The sandbox should be the same for all containers so just use
- // the first one.
- sandboxPid := c.Sandbox.Pid
- sandboxScore, err := specutils.GetOOMScoreAdj(sandboxPid)
- if err != nil {
- t.Fatalf("error reading sandbox oom_score_adj: %v", err)
- }
- if sandboxScore != *testCase.OOMScoreAdj {
- t.Errorf("sandbox oom_score_adj got: %d, want: %d", sandboxScore, *testCase.OOMScoreAdj)
- }
- }
- })
- }
-}
-
-// TestOOMScoreAdjMulti tests that oom_score_adj is set properly in a
-// multi-container sandbox.
-func TestOOMScoreAdjMulti(t *testing.T) {
- parentOOMScoreAdj, err := specutils.GetOOMScoreAdj(os.Getppid())
- if err != nil {
- t.Fatalf("getting parent oom_score_adj: %v", err)
- }
-
- testCases := []struct {
- Name string
-
- // OOMScoreAdj is the oom_score_adj set to the OCI spec. If nil then
- // no value is set. One value for each container. The first value is the
- // root container.
- OOMScoreAdj []*int
-
- // Expected is the expected oom_score_adj of the sandbox. If nil, then
- // this value is ignored.
- Expected *int
-
- // Remove is a set of container indexes to remove from the sandbox.
- Remove []int
-
- // ExpectedAfterRemove is the expected oom_score_adj of the sandbox
- // after containers are removed. Ignored if nil.
- ExpectedAfterRemove *int
- }{
- // A single container CRI test case. This should not happen in
- // practice as there should be at least one container besides the pause
- // container. However, we include a test case to ensure sane behavior.
- {
- Name: "single",
- OOMScoreAdj: []*int{&highOOMScoreAdj},
- Expected: &parentOOMScoreAdj,
- },
- {
- Name: "multi_no_value",
- OOMScoreAdj: []*int{nil, nil, nil},
- Expected: &parentOOMScoreAdj,
- },
- {
- Name: "multi_non_nil_root",
- OOMScoreAdj: []*int{&minOOMScoreAdj, nil, nil},
- Expected: &parentOOMScoreAdj,
- },
- {
- Name: "multi_value",
- OOMScoreAdj: []*int{&minOOMScoreAdj, &highOOMScoreAdj, &lowOOMScoreAdj},
- // The lowest value excluding the root container is expected.
- Expected: &lowOOMScoreAdj,
- },
- {
- Name: "multi_min_value",
- OOMScoreAdj: []*int{&minOOMScoreAdj, &lowOOMScoreAdj},
- // The lowest value excluding the root container is expected.
- Expected: &lowOOMScoreAdj,
- },
- {
- Name: "multi_max_value",
- OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
- // The lowest value excluding the root container is expected.
- Expected: &highOOMScoreAdj,
- },
- {
- Name: "remove_adjusted",
- OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
- // The lowest value excluding the root container is expected.
- Expected: &highOOMScoreAdj,
- // Remove highOOMScoreAdj container.
- Remove: []int{2},
- ExpectedAfterRemove: &maxOOMScoreAdj,
- },
- {
- // This test removes all non-root sandboxes with a specified oomScoreAdj.
- Name: "remove_to_nil",
- OOMScoreAdj: []*int{&minOOMScoreAdj, nil, &lowOOMScoreAdj},
- Expected: &lowOOMScoreAdj,
- // Remove lowOOMScoreAdj container.
- Remove: []int{2},
- // The oom_score_adj expected after remove is that of the parent process.
- ExpectedAfterRemove: &parentOOMScoreAdj,
- },
- {
- Name: "remove_no_effect",
- OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
- // The lowest value excluding the root container is expected.
- Expected: &highOOMScoreAdj,
- // Remove the maxOOMScoreAdj container.
- Remove: []int{1},
- ExpectedAfterRemove: &highOOMScoreAdj,
- },
- }
-
- for _, testCase := range testCases {
- t.Run(testCase.Name, func(t *testing.T) {
- var cmds [][]string
- var oomScoreAdj []*int
- var toRemove []string
-
- for _, oomScore := range testCase.OOMScoreAdj {
- oomScoreAdj = append(oomScoreAdj, oomScore)
- cmds = append(cmds, []string{"sleep", "100"})
- }
-
- specs, ids := createSpecs(cmds...)
- for i, spec := range specs {
- // Ensure the correct value is set, including no value.
- spec.Process.OOMScoreAdj = oomScoreAdj[i]
-
- for _, j := range testCase.Remove {
- if i == j {
- toRemove = append(toRemove, ids[i])
- }
- }
- }
-
- containers, cleanup, err := startContainers(t, specs, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
-
- for i, c := range containers {
- if oomScoreAdj[i] != nil {
- // Verify the gofer's oom_score_adj
- score, err := specutils.GetOOMScoreAdj(c.GoferPid)
- if err != nil {
- t.Fatalf("error reading gofer oom_score_adj: %v", err)
- }
- if score != *oomScoreAdj[i] {
- t.Errorf("gofer oom_score_adj got: %d, want: %d", score, *oomScoreAdj[i])
- }
- }
- }
-
- // Verify the sandbox's oom_score_adj.
- //
- // The sandbox should be the same for all containers so just use
- // the first one.
- sandboxPid := containers[0].Sandbox.Pid
- if testCase.Expected != nil {
- score, err := specutils.GetOOMScoreAdj(sandboxPid)
- if err != nil {
- t.Fatalf("error reading sandbox oom_score_adj: %v", err)
- }
- if score != *testCase.Expected {
- t.Errorf("sandbox oom_score_adj got: %d, want: %d", score, *testCase.Expected)
- }
- }
-
- if len(toRemove) == 0 {
- return
- }
-
- // Remove containers.
- for _, removeID := range toRemove {
- for _, c := range containers {
- if c.ID == removeID {
- c.Destroy()
- }
- }
- }
-
- // Check the new adjusted oom_score_adj.
- if testCase.ExpectedAfterRemove != nil {
- scoreAfterRemove, err := specutils.GetOOMScoreAdj(sandboxPid)
- if err != nil {
- t.Fatalf("error reading sandbox oom_score_adj: %v", err)
- }
- if scoreAfterRemove != *testCase.ExpectedAfterRemove {
- t.Errorf("sandbox oom_score_adj got: %d, want: %d", scoreAfterRemove, *testCase.ExpectedAfterRemove)
- }
- }
- })
- }
-}
-
-func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {
- var specs []*specs.Spec
- var ids []string
- rootID := testutil.RandomContainerID()
-
- for i, cmd := range cmds {
- spec := testutil.NewSpecWithArgs(cmd...)
- if i == 0 {
- spec.Annotations = map[string]string{
- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
- }
- ids = append(ids, rootID)
- } else {
- spec.Annotations = map[string]string{
- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
- specutils.ContainerdSandboxIDAnnotation: rootID,
- }
- ids = append(ids, testutil.RandomContainerID())
- }
- specs = append(specs, spec)
- }
- return specs, ids
-}
-
-func startContainers(t *testing.T, specs []*specs.Spec, ids []string) ([]*container.Container, func(), error) {
- var containers []*container.Container
-
- // All containers must share the same root.
- rootDir, clean, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- cu := cleanup.Make(clean)
- defer cu.Clean()
-
- // Point this to from the configuration.
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
-
- for i, spec := range specs {
- bundleDir, clean, err := testutil.SetupBundleDir(spec)
- if err != nil {
- return nil, nil, fmt.Errorf("error setting up bundle: %v", err)
- }
- cu.Add(clean)
-
- args := container.Args{
- ID: ids[i],
- Spec: spec,
- BundleDir: bundleDir,
- }
- cont, err := container.New(conf, args)
- if err != nil {
- return nil, nil, fmt.Errorf("error creating container: %v", err)
- }
- containers = append(containers, cont)
-
- if err := cont.Start(conf); err != nil {
- return nil, nil, fmt.Errorf("error starting container: %v", err)
- }
- }
-
- return containers, cu.Release(), nil
-}
diff --git a/test/root/root.go b/test/root/root.go
deleted file mode 100644
index 441fa5e2e..000000000
--- a/test/root/root.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package root is used for tests that requires sysadmin privileges run. First,
-// follow the setup instruction in runsc/test/README.md. You should also have
-// docker, containerd, and crictl installed. To run these tests from the
-// project root directory:
-//
-// make root-tests
-package root
diff --git a/test/root/runsc_test.go b/test/root/runsc_test.go
deleted file mode 100644
index 25204bebb..000000000
--- a/test/root/runsc_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package root
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/specutils"
-)
-
-// TestDoKill checks that when "runsc do..." is killed, the sandbox process is
-// also terminated. This ensures that parent death signal is propagate to the
-// sandbox process correctly.
-func TestDoKill(t *testing.T) {
- // Make the sandbox process be reparented here when it's killed, so we can
- // wait for it.
- if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0); err != nil {
- t.Fatalf("prctl(PR_SET_CHILD_SUBREAPER): %v", err)
- }
-
- cmd := exec.Command(specutils.ExePath, "do", "sleep", "10000")
- buf := &bytes.Buffer{}
- cmd.Stdout = buf
- cmd.Stderr = buf
- cmd.Start()
-
- var pid int
- findSandbox := func() error {
- var err error
- pid, err = sandboxPid(cmd.Process.Pid)
- if err != nil {
- return &backoff.PermanentError{Err: err}
- }
- if pid == 0 {
- return fmt.Errorf("sandbox process not found")
- }
- return nil
- }
- if err := testutil.Poll(findSandbox, 10*time.Second); err != nil {
- t.Fatalf("failed to find sandbox: %v", err)
- }
- t.Logf("Found sandbox, pid: %d", pid)
-
- if err := cmd.Process.Kill(); err != nil {
- t.Fatalf("failed to kill run process: %v", err)
- }
- cmd.Wait()
- t.Logf("Parent process killed (%d). Output: %s", cmd.Process.Pid, buf.String())
-
- ch := make(chan struct{})
- go func() {
- defer func() { ch <- struct{}{} }()
- t.Logf("Waiting for sandbox process (%d) termination", pid)
- if _, err := unix.Wait4(pid, nil, 0, nil); err != nil {
- t.Errorf("error waiting for sandbox process (%d): %v", pid, err)
- }
- }()
- select {
- case <-ch:
- // Done
- case <-time.After(5 * time.Second):
- t.Fatalf("timeout waiting for sandbox process (%d) to exit", pid)
- }
-}
-
-// sandboxPid looks for the sandbox process inside the process tree starting
-// from "pid". It returns 0 and no error if no sandbox process is found. It
-// returns error if anything failed.
-func sandboxPid(pid int) (int, error) {
- cmd := exec.Command("pgrep", "-P", strconv.Itoa(pid))
- buf := &bytes.Buffer{}
- cmd.Stdout = buf
- if err := cmd.Start(); err != nil {
- return 0, err
- }
- ps, err := cmd.Process.Wait()
- if err != nil {
- return 0, err
- }
- if ps.ExitCode() == 1 {
- // pgrep returns 1 when no process is found.
- return 0, nil
- }
-
- var children []int
- for _, line := range strings.Split(buf.String(), "\n") {
- if len(line) == 0 {
- continue
- }
- child, err := strconv.Atoi(line)
- if err != nil {
- return 0, err
- }
-
- cmdline, err := ioutil.ReadFile(filepath.Join("/proc", line, "cmdline"))
- if err != nil {
- if os.IsNotExist(err) {
- // Raced with process exit.
- continue
- }
- return 0, err
- }
- args := strings.SplitN(string(cmdline), "\x00", 2)
- if len(args) == 0 {
- return 0, fmt.Errorf("malformed cmdline file: %q", cmdline)
- }
- // The sandbox process has the first argument set to "runsc-sandbox".
- if args[0] == "runsc-sandbox" {
- return child, nil
- }
-
- children = append(children, child)
- }
-
- // Sandbox process wasn't found, try another level down.
- for _, pid := range children {
- sand, err := sandboxPid(pid)
- if err != nil {
- return 0, err
- }
- if sand != 0 {
- return sand, nil
- }
- // Not found, continue the search.
- }
- return 0, nil
-}
diff --git a/test/runner/BUILD b/test/runner/BUILD
deleted file mode 100644
index 2d93aa6af..000000000
--- a/test/runner/BUILD
+++ /dev/null
@@ -1,30 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "runner",
- testonly = 1,
- srcs = ["main.go"],
- data = [
- "//runsc",
- "//test/runner/setup_container",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "//pkg/test/testutil",
- "//runsc/specutils",
- "//test/runner/gtest",
- "//test/uds",
- "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/test/runner/defs.bzl b/test/runner/defs.bzl
deleted file mode 100644
index 405e03832..000000000
--- a/test/runner/defs.bzl
+++ /dev/null
@@ -1,238 +0,0 @@
-"""Defines a rule for syscall test targets."""
-
-load("//tools:defs.bzl", "default_platform", "platforms")
-
-def _runner_test_impl(ctx):
- # Generate a runner binary.
- runner = ctx.actions.declare_file(ctx.label.name)
- runner_content = "\n".join([
- "#!/bin/bash",
- "set -euf -x -o pipefail",
- "if [[ -n \"${TEST_UNDECLARED_OUTPUTS_DIR}\" ]]; then",
- " mkdir -p \"${TEST_UNDECLARED_OUTPUTS_DIR}\"",
- " chmod a+rwx \"${TEST_UNDECLARED_OUTPUTS_DIR}\"",
- "fi",
- "exec %s %s \"$@\" %s\n" % (
- ctx.files.runner[0].short_path,
- " ".join(ctx.attr.runner_args),
- ctx.files.test[0].short_path,
- ),
- ])
- ctx.actions.write(runner, runner_content, is_executable = True)
-
- # Return with all transitive files.
- runfiles = ctx.runfiles(
- transitive_files = depset(transitive = [
- target.data_runfiles.files
- for target in (ctx.attr.runner, ctx.attr.test)
- if hasattr(target, "data_runfiles")
- ]),
- files = ctx.files.runner + ctx.files.test,
- collect_default = True,
- collect_data = True,
- )
- return [DefaultInfo(executable = runner, runfiles = runfiles)]
-
-_runner_test = rule(
- attrs = {
- "runner": attr.label(
- default = "//test/runner:runner",
- ),
- "test": attr.label(
- mandatory = True,
- ),
- "runner_args": attr.string_list(),
- "data": attr.label_list(
- allow_files = True,
- ),
- },
- test = True,
- implementation = _runner_test_impl,
-)
-
-def _syscall_test(
- test,
- platform,
- use_tmpfs,
- tags,
- debug,
- network = "none",
- file_access = "exclusive",
- overlay = False,
- add_uds_tree = False,
- vfs2 = False,
- fuse = False,
- **kwargs):
- # Prepend "runsc" to non-native platform names.
- full_platform = platform if platform == "native" else "runsc_" + platform
-
- # Name the test appropriately.
- name = test.split(":")[1] + "_" + full_platform
- if file_access == "shared":
- name += "_shared"
- if overlay:
- name += "_overlay"
- if vfs2:
- name += "_vfs2"
- if fuse:
- name += "_fuse"
- if network != "none":
- name += "_" + network + "net"
-
- # Apply all tags.
- if tags == None:
- tags = []
-
- # Add the full_platform and file access in a tag to make it easier to run
- # all the tests on a specific flavor. Use --test_tag_filters=ptrace,file_shared.
- tags = list(tags)
- tags += [full_platform, "file_" + file_access]
-
- # Hash this target into one of 15 buckets. This can be used to
- # randomly split targets between different workflows.
- hash15 = hash(native.package_name() + name) % 15
- tags.append("hash15:" + str(hash15))
- tags.append("hash15")
-
- # Disable off-host networking.
- tags.append("requires-net:loopback")
- tags.append("requires-net:ipv4")
- tags.append("block-network")
-
- # gotsan makes sense only if tests are running in gVisor.
- if platform == "native":
- tags.append("nogotsan")
-
- container = "container" in tags
-
- runner_args = [
- # Arguments are passed directly to runner binary.
- "--platform=" + platform,
- "--network=" + network,
- "--use-tmpfs=" + str(use_tmpfs),
- "--file-access=" + file_access,
- "--overlay=" + str(overlay),
- "--add-uds-tree=" + str(add_uds_tree),
- "--vfs2=" + str(vfs2),
- "--fuse=" + str(fuse),
- "--strace=" + str(debug),
- "--debug=" + str(debug),
- "--container=" + str(container),
- ]
-
- # Call the rule above.
- _runner_test(
- name = name,
- test = test,
- runner_args = runner_args,
- tags = tags,
- **kwargs
- )
-
-def syscall_test(
- test,
- use_tmpfs = False,
- add_overlay = False,
- add_uds_tree = False,
- add_hostinet = False,
- vfs2 = True,
- fuse = False,
- debug = True,
- tags = None,
- **kwargs):
- """syscall_test is a macro that will create targets for all platforms.
-
- Args:
- test: the test target.
- use_tmpfs: use tmpfs in the defined tests.
- add_overlay: add an overlay test.
- add_uds_tree: add a UDS test.
- add_hostinet: add a hostinet test.
- vfs2: enable VFS2 support.
- fuse: enable FUSE support.
- debug: enable debug output.
- tags: starting test tags.
- **kwargs: additional test arguments.
- """
- if not tags:
- tags = []
-
- if vfs2 and not fuse:
- # Generate a vfs1 plain test. Most testing will now be
- # biased towards vfs2, with only a single vfs1 case.
- _syscall_test(
- test = test,
- platform = default_platform,
- use_tmpfs = use_tmpfs,
- add_uds_tree = add_uds_tree,
- tags = tags + platforms[default_platform],
- debug = debug,
- vfs2 = False,
- **kwargs
- )
-
- if not fuse:
- # Generate a native test if fuse is not required.
- _syscall_test(
- test = test,
- platform = "native",
- use_tmpfs = False,
- add_uds_tree = add_uds_tree,
- tags = tags,
- debug = debug,
- **kwargs
- )
-
- for (platform, platform_tags) in platforms.items():
- _syscall_test(
- test = test,
- platform = platform,
- use_tmpfs = use_tmpfs,
- add_uds_tree = add_uds_tree,
- tags = platform_tags + tags,
- fuse = fuse,
- vfs2 = vfs2,
- debug = debug,
- **kwargs
- )
-
- if add_overlay:
- _syscall_test(
- test = test,
- platform = default_platform,
- use_tmpfs = use_tmpfs,
- add_uds_tree = add_uds_tree,
- tags = platforms[default_platform] + tags,
- debug = debug,
- fuse = fuse,
- vfs2 = vfs2,
- overlay = True,
- **kwargs
- )
- if add_hostinet:
- _syscall_test(
- test = test,
- platform = default_platform,
- use_tmpfs = use_tmpfs,
- network = "host",
- add_uds_tree = add_uds_tree,
- tags = platforms[default_platform] + tags,
- debug = debug,
- fuse = fuse,
- vfs2 = vfs2,
- **kwargs
- )
- if not use_tmpfs:
- # Also test shared gofer access.
- _syscall_test(
- test = test,
- platform = default_platform,
- use_tmpfs = use_tmpfs,
- add_uds_tree = add_uds_tree,
- tags = platforms[default_platform] + tags,
- debug = debug,
- file_access = "shared",
- fuse = fuse,
- vfs2 = vfs2,
- **kwargs
- )
diff --git a/test/runner/gtest/BUILD b/test/runner/gtest/BUILD
deleted file mode 100644
index de4b2727c..000000000
--- a/test/runner/gtest/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "gtest",
- srcs = ["gtest.go"],
- visibility = ["//:sandbox"],
-)
diff --git a/test/runner/gtest/gtest.go b/test/runner/gtest/gtest.go
deleted file mode 100644
index 38e57d62f..000000000
--- a/test/runner/gtest/gtest.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package gtest contains helpers for running google-test tests from Go.
-package gtest
-
-import (
- "fmt"
- "os/exec"
- "strings"
-)
-
-var (
- // listTestFlag is the flag that will list tests in gtest binaries.
- listTestFlag = "--gtest_list_tests"
-
- // filterTestFlag is the flag that will filter tests in gtest binaries.
- filterTestFlag = "--gtest_filter"
-
- // listBechmarkFlag is the flag that will list benchmarks in gtest binaries.
- listBenchmarkFlag = "--benchmark_list_tests"
-
- // filterBenchmarkFlag is the flag that will run specified benchmarks.
- filterBenchmarkFlag = "--benchmark_filter"
-)
-
-// TestCase is a single gtest test case.
-type TestCase struct {
- // Suite is the suite for this test.
- Suite string
-
- // Name is the name of this individual test.
- Name string
-
- // all indicates that this will run without flags. This takes
- // precendence over benchmark below.
- all bool
-
- // benchmark indicates that this is a benchmark. In this case, the
- // suite will be empty, and we will use the appropriate test and
- // benchmark flags.
- benchmark bool
-}
-
-// FullName returns the name of the test including the suite. It is suitable to
-// pass to "-gtest_filter".
-func (tc TestCase) FullName() string {
- return fmt.Sprintf("%s.%s", tc.Suite, tc.Name)
-}
-
-// Args returns arguments to be passed when invoking the test.
-func (tc TestCase) Args() []string {
- if tc.all {
- return []string{} // No arguments.
- }
- if tc.benchmark {
- return []string{
- fmt.Sprintf("%s=^%s$", filterBenchmarkFlag, tc.Name),
- fmt.Sprintf("%s=", filterTestFlag),
- }
- }
- return []string{
- fmt.Sprintf("%s=%s", filterTestFlag, tc.FullName()),
- }
-}
-
-// ParseTestCases calls a gtest test binary to list its test and returns a
-// slice with the name and suite of each test.
-//
-// If benchmarks is true, then benchmarks will be included in the list of test
-// cases provided. Note that this requires the binary to support the
-// benchmarks_list_tests flag.
-func ParseTestCases(testBin string, benchmarks bool, extraArgs ...string) ([]TestCase, error) {
- // Run to extract test cases.
- args := append([]string{listTestFlag}, extraArgs...)
- cmd := exec.Command(testBin, args...)
- out, err := cmd.Output()
- if err != nil {
- // We failed to list tests with the given flags. Just
- // return something that will run the binary with no
- // flags, which should execute all tests.
- return []TestCase{
- {
- Suite: "Default",
- Name: "All",
- all: true,
- },
- }, nil
- }
-
- // Parse test output.
- var t []TestCase
- var suite string
- for _, line := range strings.Split(string(out), "\n") {
- // Strip comments.
- line = strings.Split(line, "#")[0]
-
- // New suite?
- if !strings.HasPrefix(line, " ") {
- suite = strings.TrimSuffix(strings.TrimSpace(line), ".")
- continue
- }
-
- // Individual test.
- name := strings.TrimSpace(line)
-
- // Do we have a suite yet?
- if suite == "" {
- return nil, fmt.Errorf("test without a suite: %v", name)
- }
-
- // Add this individual test.
- t = append(t, TestCase{
- Suite: suite,
- Name: name,
- })
- }
-
- // Finished?
- if !benchmarks {
- return t, nil
- }
-
- // Run again to extract benchmarks.
- args = append([]string{listBenchmarkFlag}, extraArgs...)
- cmd = exec.Command(testBin, args...)
- out, err = cmd.Output()
- if err != nil {
- // We were able to enumerate tests above, but not benchmarks?
- // We requested them, so we return an error in this case.
- exitErr, ok := err.(*exec.ExitError)
- if !ok {
- return nil, fmt.Errorf("could not enumerate gtest benchmarks: %v", err)
- }
- return nil, fmt.Errorf("could not enumerate gtest benchmarks: %v\nstderr\n%s", err, exitErr.Stderr)
- }
-
- benches := strings.Trim(string(out), "\n")
- if len(benches) == 0 {
- return t, nil
- }
-
- // Parse benchmark output.
- for _, line := range strings.Split(benches, "\n") {
- // Strip comments.
- line = strings.Split(line, "#")[0]
-
- // Single benchmark.
- name := strings.TrimSpace(line)
-
- // Add the single benchmark.
- t = append(t, TestCase{
- Suite: "Benchmarks",
- Name: name,
- benchmark: true,
- })
- }
- return t, nil
-}
diff --git a/test/runner/main.go b/test/runner/main.go
deleted file mode 100644
index 34e9c6279..000000000
--- a/test/runner/main.go
+++ /dev/null
@@ -1,542 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary syscall_test_runner runs the syscall test suites in gVisor
-// containers and on the host platform.
-package main
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "os/signal"
- "path/filepath"
- "strings"
- "syscall"
- "testing"
- "time"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/syndtr/gocapability/capability"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/specutils"
- "gvisor.dev/gvisor/test/runner/gtest"
- "gvisor.dev/gvisor/test/uds"
-)
-
-var (
- debug = flag.Bool("debug", false, "enable debug logs")
- strace = flag.Bool("strace", false, "enable strace logs")
- platform = flag.String("platform", "ptrace", "platform to run on")
- network = flag.String("network", "none", "network stack to run on (sandbox, host, none)")
- useTmpfs = flag.Bool("use-tmpfs", false, "mounts tmpfs for /tmp")
- fileAccess = flag.String("file-access", "exclusive", "mounts root in exclusive or shared mode")
- overlay = flag.Bool("overlay", false, "wrap filesystem mounts with writable tmpfs overlay")
- vfs2 = flag.Bool("vfs2", false, "enable VFS2")
- fuse = flag.Bool("fuse", false, "enable FUSE")
- container = flag.Bool("container", false, "run tests in their own namespaces (user ns, network ns, etc), pretending to be root")
- setupContainerPath = flag.String("setup-container", "", "path to setup_container binary (for use with --container)")
- runscPath = flag.String("runsc", "", "path to runsc binary")
-
- addUDSTree = flag.Bool("add-uds-tree", false, "expose a tree of UDS utilities for use in tests")
- // TODO(gvisor.dev/issue/4572): properly support leak checking for runsc, and
- // set to true as the default for the test runner.
- leakCheck = flag.Bool("leak-check", false, "check for reference leaks")
-)
-
-// runTestCaseNative runs the test case directly on the host machine.
-func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {
- // These tests might be running in parallel, so make sure they have a
- // unique test temp dir.
- tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
- if err != nil {
- t.Fatalf("could not create temp dir: %v", err)
- }
- defer os.RemoveAll(tmpDir)
-
- // Replace TEST_TMPDIR in the current environment with something
- // unique.
- env := os.Environ()
- newEnvVar := "TEST_TMPDIR=" + tmpDir
- var found bool
- for i, kv := range env {
- if strings.HasPrefix(kv, "TEST_TMPDIR=") {
- env[i] = newEnvVar
- found = true
- break
- }
- }
- if !found {
- env = append(env, newEnvVar)
- }
- // Remove shard env variables so that the gunit binary does not try to
- // interpret them.
- env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
-
- if *addUDSTree {
- socketDir, cleanup, err := uds.CreateSocketTree("/tmp")
- if err != nil {
- t.Fatalf("failed to create socket tree: %v", err)
- }
- defer cleanup()
-
- env = append(env, "TEST_UDS_TREE="+socketDir)
- // On Linux, the concept of "attach" location doesn't exist.
- // Just pass the same path to make these test identical.
- env = append(env, "TEST_UDS_ATTACH_TREE="+socketDir)
- }
-
- cmd := exec.Command(testBin, tc.Args()...)
- cmd.Env = env
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- cmd.SysProcAttr = &unix.SysProcAttr{}
-
- if *container {
- // setup_container takes in its target argv as positional arguments.
- cmd.Path = *setupContainerPath
- cmd.Args = append([]string{cmd.Path}, cmd.Args...)
- cmd.SysProcAttr = &unix.SysProcAttr{
- Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNET | unix.CLONE_NEWIPC | unix.CLONE_NEWUTS,
- // Set current user/group as root inside the namespace.
- UidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getuid(), Size: 1},
- },
- GidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getgid(), Size: 1},
- },
- GidMappingsEnableSetgroups: false,
- Credential: &syscall.Credential{
- Uid: 0,
- Gid: 0,
- },
- }
- }
-
- if specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {
- cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWUTS
- }
-
- if specutils.HasCapabilities(capability.CAP_NET_ADMIN) {
- cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWNET
- }
-
- if err := cmd.Run(); err != nil {
- ws := err.(*exec.ExitError).Sys().(syscall.WaitStatus)
- t.Errorf("test %q exited with status %d, want 0", tc.FullName(), ws.ExitStatus())
- }
-}
-
-// runRunsc runs spec in runsc in a standard test configuration.
-//
-// runsc logs will be saved to a path in TEST_UNDECLARED_OUTPUTS_DIR.
-//
-// Returns an error if the sandboxed application exits non-zero.
-func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
- if err != nil {
- return fmt.Errorf("SetupBundleDir failed: %v", err)
- }
- defer cleanup()
-
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- return fmt.Errorf("SetupRootDir failed: %v", err)
- }
- defer cleanup()
-
- name := tc.FullName()
- id := testutil.RandomContainerID()
- log.Infof("Running test %q in container %q", name, id)
- specutils.LogSpec(spec)
-
- args := []string{
- "-root", rootDir,
- "-network", *network,
- "-log-format=text",
- "-TESTONLY-unsafe-nonroot=true",
- "-net-raw=true",
- fmt.Sprintf("-panic-signal=%d", unix.SIGTERM),
- "-watchdog-action=panic",
- "-platform", *platform,
- "-file-access", *fileAccess,
- }
- if *overlay {
- args = append(args, "-overlay")
- }
- if *vfs2 {
- args = append(args, "-vfs2")
- if *fuse {
- args = append(args, "-fuse")
- }
- }
- if *debug {
- args = append(args, "-debug", "-log-packets=true")
- }
- if *strace {
- args = append(args, "-strace")
- }
- if *addUDSTree {
- args = append(args, "-fsgofer-host-uds")
- }
- if *leakCheck {
- args = append(args, "-ref-leak-mode=log-names")
- }
-
- testLogDir := ""
- if undeclaredOutputsDir, ok := unix.Getenv("TEST_UNDECLARED_OUTPUTS_DIR"); ok {
- // Create log directory dedicated for this test.
- testLogDir = filepath.Join(undeclaredOutputsDir, strings.Replace(name, "/", "_", -1))
- if err := os.MkdirAll(testLogDir, 0755); err != nil {
- return fmt.Errorf("could not create test dir: %v", err)
- }
- debugLogDir, err := ioutil.TempDir(testLogDir, "runsc")
- if err != nil {
- return fmt.Errorf("could not create temp dir: %v", err)
- }
- debugLogDir += "/"
- log.Infof("runsc logs: %s", debugLogDir)
- args = append(args, "-debug-log", debugLogDir)
- args = append(args, "-coverage-report", debugLogDir)
-
- // Default -log sends messages to stderr which makes reading the test log
- // difficult. Instead, drop them when debug log is enabled given it's a
- // better place for these messages.
- args = append(args, "-log=/dev/null")
- }
-
- // Current process doesn't have CAP_SYS_ADMIN, create user namespace and run
- // as root inside that namespace to get it.
- rArgs := append(args, "run", "--bundle", bundleDir, id)
- cmd := exec.Command(*runscPath, rArgs...)
- cmd.SysProcAttr = &unix.SysProcAttr{
- Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNS,
- // Set current user/group as root inside the namespace.
- UidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getuid(), Size: 1},
- },
- GidMappings: []syscall.SysProcIDMap{
- {ContainerID: 0, HostID: os.Getgid(), Size: 1},
- },
- GidMappingsEnableSetgroups: false,
- Credential: &syscall.Credential{
- Uid: 0,
- Gid: 0,
- },
- }
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- sig := make(chan os.Signal, 1)
- defer close(sig)
- signal.Notify(sig, unix.SIGTERM)
- defer signal.Stop(sig)
- go func() {
- s, ok := <-sig
- if !ok {
- return
- }
- log.Warningf("%s: Got signal: %v", name, s)
- done := make(chan bool, 1)
- dArgs := append([]string{}, args...)
- dArgs = append(dArgs, "-alsologtostderr=true", "debug", "--stacks", id)
- go func(dArgs []string) {
- debug := exec.Command(*runscPath, dArgs...)
- debug.Stdout = os.Stdout
- debug.Stderr = os.Stderr
- debug.Run()
- done <- true
- }(dArgs)
-
- timeout := time.After(3 * time.Second)
- select {
- case <-timeout:
- log.Infof("runsc debug --stacks is timeouted")
- case <-done:
- }
-
- log.Warningf("Send SIGTERM to the sandbox process")
- dArgs = append(args, "debug",
- fmt.Sprintf("--signal=%d", unix.SIGTERM),
- id)
- signal := exec.Command(*runscPath, dArgs...)
- signal.Stdout = os.Stdout
- signal.Stderr = os.Stderr
- signal.Run()
- }()
-
- err = cmd.Run()
- if err == nil && len(testLogDir) > 0 {
- // If the test passed, then we erase the log directory. This speeds up
- // uploading logs in continuous integration & saves on disk space.
- os.RemoveAll(testLogDir)
- }
-
- return err
-}
-
-// setupUDSTree updates the spec to expose a UDS tree for gofer socket testing.
-func setupUDSTree(spec *specs.Spec) (cleanup func(), err error) {
- socketDir, cleanup, err := uds.CreateSocketTree("/tmp")
- if err != nil {
- return nil, fmt.Errorf("failed to create socket tree: %v", err)
- }
-
- // Standard access to entire tree.
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets",
- Source: socketDir,
- Type: "bind",
- })
-
- // Individial attach points for each socket to test mounts that attach
- // directly to the sockets.
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets-attach/stream/echo",
- Source: filepath.Join(socketDir, "stream/echo"),
- Type: "bind",
- })
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets-attach/stream/nonlistening",
- Source: filepath.Join(socketDir, "stream/nonlistening"),
- Type: "bind",
- })
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets-attach/seqpacket/echo",
- Source: filepath.Join(socketDir, "seqpacket/echo"),
- Type: "bind",
- })
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets-attach/seqpacket/nonlistening",
- Source: filepath.Join(socketDir, "seqpacket/nonlistening"),
- Type: "bind",
- })
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp/sockets-attach/dgram/null",
- Source: filepath.Join(socketDir, "dgram/null"),
- Type: "bind",
- })
-
- spec.Process.Env = append(spec.Process.Env, "TEST_UDS_TREE=/tmp/sockets")
- spec.Process.Env = append(spec.Process.Env, "TEST_UDS_ATTACH_TREE=/tmp/sockets-attach")
-
- return cleanup, nil
-}
-
-// runsTestCaseRunsc runs the test case in runsc.
-func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
- // Run a new container with the test executable and filter for the
- // given test suite and name.
- spec := testutil.NewSpecWithArgs(append([]string{testBin}, tc.Args()...)...)
-
- // Mark the root as writeable, as some tests attempt to
- // write to the rootfs, and expect EACCES, not EROFS.
- spec.Root.Readonly = false
-
- // Test spec comes with pre-defined mounts that we don't want. Reset it.
- spec.Mounts = nil
- testTmpDir := "/tmp"
- if *useTmpfs {
- // Forces '/tmp' to be mounted as tmpfs, otherwise test that rely on
- // features only available in gVisor's internal tmpfs may fail.
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: "/tmp",
- Type: "tmpfs",
- })
- } else {
- // Use a gofer-backed directory as '/tmp'.
- //
- // Tests might be running in parallel, so make sure each has a
- // unique test temp dir.
- //
- // Some tests (e.g., sticky) access this mount from other
- // users, so make sure it is world-accessible.
- tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
- if err != nil {
- t.Fatalf("could not create temp dir: %v", err)
- }
- defer os.RemoveAll(tmpDir)
-
- if err := os.Chmod(tmpDir, 0777); err != nil {
- t.Fatalf("could not chmod temp dir: %v", err)
- }
-
- // "/tmp" is not replaced with a tmpfs mount inside the sandbox
- // when it's not empty. This ensures that testTmpDir uses gofer
- // in exclusive mode.
- testTmpDir = tmpDir
- if *fileAccess == "shared" {
- // All external mounts except the root mount are shared.
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Type: "bind",
- Destination: "/tmp",
- Source: tmpDir,
- })
- testTmpDir = "/tmp"
- }
- }
-
- // Set environment variables that indicate we are running in gVisor with
- // the given platform, network, and filesystem stack.
- platformVar := "TEST_ON_GVISOR"
- networkVar := "GVISOR_NETWORK"
- env := append(os.Environ(), platformVar+"="+*platform, networkVar+"="+*network)
- vfsVar := "GVISOR_VFS"
- if *vfs2 {
- env = append(env, vfsVar+"=VFS2")
- fuseVar := "FUSE_ENABLED"
- if *fuse {
- env = append(env, fuseVar+"=TRUE")
- } else {
- env = append(env, fuseVar+"=FALSE")
- }
- } else {
- env = append(env, vfsVar+"=VFS1")
- }
-
- // Remove shard env variables so that the gunit binary does not try to
- // interpret them.
- env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
-
- // Set TEST_TMPDIR to /tmp, as some of the syscall tests require it to
- // be backed by tmpfs.
- env = filterEnv(env, []string{"TEST_TMPDIR"})
- env = append(env, fmt.Sprintf("TEST_TMPDIR=%s", testTmpDir))
-
- spec.Process.Env = env
-
- if *addUDSTree {
- cleanup, err := setupUDSTree(spec)
- if err != nil {
- t.Fatalf("error creating UDS tree: %v", err)
- }
- defer cleanup()
- }
-
- if err := runRunsc(tc, spec); err != nil {
- t.Errorf("test %q failed with error %v, want nil", tc.FullName(), err)
- }
-}
-
-// filterEnv returns an environment with the excluded variables removed.
-func filterEnv(env, exclude []string) []string {
- var out []string
- for _, kv := range env {
- ok := true
- for _, k := range exclude {
- if strings.HasPrefix(kv, k+"=") {
- ok = false
- break
- }
- }
- if ok {
- out = append(out, kv)
- }
- }
- return out
-}
-
-func fatalf(s string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, s+"\n", args...)
- os.Exit(1)
-}
-
-func matchString(a, b string) (bool, error) {
- return a == b, nil
-}
-
-func main() {
- flag.Parse()
- if flag.NArg() != 1 {
- fatalf("test must be provided")
- }
- testBin := flag.Args()[0] // Only argument.
-
- log.SetLevel(log.Info)
- if *debug {
- log.SetLevel(log.Debug)
- }
-
- if *platform != "native" && *runscPath == "" {
- if err := testutil.ConfigureExePath(); err != nil {
- panic(err.Error())
- }
- *runscPath = specutils.ExePath
- }
- if *container && *setupContainerPath == "" {
- setupContainer, err := testutil.FindFile("test/runner/setup_container/setup_container")
- if err != nil {
- fatalf("cannot find setup_container: %v", err)
- }
- *setupContainerPath = setupContainer
- }
-
- // Make sure stdout and stderr are opened with O_APPEND, otherwise logs
- // from outside the sandbox can (and will) stomp on logs from inside
- // the sandbox.
- for _, f := range []*os.File{os.Stdout, os.Stderr} {
- flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)
- if err != nil {
- fatalf("error getting file flags for %v: %v", f, err)
- }
- if flags&unix.O_APPEND == 0 {
- flags |= unix.O_APPEND
- if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {
- fatalf("error setting file flags for %v: %v", f, err)
- }
- }
- }
-
- // Get all test cases in each binary.
- testCases, err := gtest.ParseTestCases(testBin, true)
- if err != nil {
- fatalf("ParseTestCases(%q) failed: %v", testBin, err)
- }
-
- // Get subset of tests corresponding to shard.
- indices, err := testutil.TestIndicesForShard(len(testCases))
- if err != nil {
- fatalf("TestsForShard() failed: %v", err)
- }
-
- // Resolve the absolute path for the binary.
- testBin, err = filepath.Abs(testBin)
- if err != nil {
- fatalf("Abs() failed: %v", err)
- }
-
- // Run the tests.
- var tests []testing.InternalTest
- for _, tci := range indices {
- // Capture tc.
- tc := testCases[tci]
- tests = append(tests, testing.InternalTest{
- Name: fmt.Sprintf("%s_%s", tc.Suite, tc.Name),
- F: func(t *testing.T) {
- if *platform == "native" {
- // Run the test case on host.
- runTestCaseNative(testBin, tc, t)
- } else {
- // Run the test case in runsc.
- runTestCaseRunsc(testBin, tc, t)
- }
- },
- })
- }
-
- testing.Main(matchString, tests, nil, nil)
-}
diff --git a/test/runner/setup_container/BUILD b/test/runner/setup_container/BUILD
deleted file mode 100644
index 5b99d1de9..000000000
--- a/test/runner/setup_container/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-# setup_container contains a shim binary that runs within the test container
-# for syscall tests with container=True.
-
-load("//tools:defs.bzl", "cc_binary")
-
-package(licenses = ["notice"])
-
-cc_binary(
- name = "setup_container",
- testonly = 1,
- srcs = ["setup_container.cc"],
- visibility = ["//test/runner:__subpackages__"],
- deps = [
- "//test/syscalls/linux:socket_netlink_util",
- "//test/syscalls/linux:socket_test_util",
- "//test/util:capability_util",
- "//test/util:posix_error",
- ],
-)
diff --git a/test/runner/setup_container/setup_container.cc b/test/runner/setup_container/setup_container.cc
deleted file mode 100644
index 9a4e3fb8b..000000000
--- a/test/runner/setup_container/setup_container.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/capability.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include "test/syscalls/linux/socket_netlink_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// SetupContainer sets up the networking settings in the current container.
-PosixError SetupContainer() {
- const PosixErrorOr<bool> have_net_admin = HaveCapability(CAP_NET_ADMIN);
- if (!have_net_admin.ok()) {
- std::cerr << "Cannot determine if we have CAP_NET_ADMIN." << std::endl;
- return have_net_admin.error();
- }
- if (have_net_admin.ValueOrDie() && !IsRunningOnGvisor()) {
- PosixErrorOr<FileDescriptor> sockfd = Socket(AF_INET, SOCK_DGRAM, 0);
- if (!sockfd.ok()) {
- std::cerr << "Cannot open socket." << std::endl;
- return sockfd.error();
- }
- int sock = sockfd.ValueOrDie().get();
- struct ifreq ifr = {};
- strncpy(ifr.ifr_name, "lo", IFNAMSIZ);
- if (ioctl(sock, SIOCGIFFLAGS, &ifr) == -1) {
- std::cerr << "Cannot get 'lo' flags: " << strerror(errno) << std::endl;
- return PosixError(errno);
- }
- if ((ifr.ifr_flags & IFF_UP) == 0) {
- ifr.ifr_flags |= IFF_UP;
- if (ioctl(sock, SIOCSIFFLAGS, &ifr) == -1) {
- std::cerr << "Cannot set 'lo' as UP: " << strerror(errno) << std::endl;
- return PosixError(errno);
- }
- }
- }
- return NoError();
-}
-
-} // namespace testing
-} // namespace gvisor
-
-using ::gvisor::testing::SetupContainer;
-
-// Binary setup_container initializes the container environment in which tests
-// with container=True will run, then execs the actual test binary.
-// Usage:
-// ./setup_container test_binary [arguments forwarded to test_binary...]
-int main(int argc, char *argv[], char *envp[]) {
- if (!SetupContainer().ok()) {
- return 1;
- }
- if (argc < 2) {
- std::cerr << "Must provide arguments to exec." << std::endl;
- return 2;
- }
- if (execve(argv[1], &argv[1], envp) == -1) {
- std::cerr << "execv returned errno " << errno << std::endl;
- return 1;
- }
-}
diff --git a/test/runtimes/BUILD b/test/runtimes/BUILD
deleted file mode 100644
index 510ffe013..000000000
--- a/test/runtimes/BUILD
+++ /dev/null
@@ -1,46 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "more_shards", "most_shards")
-load("//test/runtimes:defs.bzl", "runtime_test")
-
-package(licenses = ["notice"])
-
-runtime_test(
- name = "go1.12",
- exclude_file = "exclude/go1.12.csv",
- lang = "go",
- shard_count = more_shards,
-)
-
-runtime_test(
- name = "java11",
- batch = 100,
- exclude_file = "exclude/java11.csv",
- lang = "java",
- shard_count = most_shards,
-)
-
-runtime_test(
- name = "nodejs12.4.0",
- exclude_file = "exclude/nodejs12.4.0.csv",
- lang = "nodejs",
- shard_count = most_shards,
-)
-
-runtime_test(
- name = "php7.3.6",
- exclude_file = "exclude/php7.3.6.csv",
- lang = "php",
- shard_count = more_shards,
-)
-
-runtime_test(
- name = "python3.7.3",
- exclude_file = "exclude/python3.7.3.csv",
- lang = "python",
- shard_count = more_shards,
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/test/runtimes/README.md b/test/runtimes/README.md
deleted file mode 100644
index 9dda1a728..000000000
--- a/test/runtimes/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# gVisor Runtime Tests
-
-App Engine uses gvisor to sandbox application containers. The runtime tests aim
-to test `runsc` compatibility with these
-[standard runtimes](https://cloud.google.com/appengine/docs/standard/runtimes).
-The test itself runs the language-defined tests inside the sandboxed standard
-runtime container.
-
-Note: [Ruby runtime](https://cloud.google.com/appengine/docs/standard/ruby) is
-currently in beta mode and so we do not run tests for it yet.
-
-### Testing Locally
-
-To run runtime tests individually from a given runtime, use the following table.
-
-Language | Version | Download Image | Run Test(s)
--------- | ------- | ------------------------------------------- | -----------
-Go | 1.12 | `make -C images load-runtimes_go1.12` | If the test name ends with `.go`, it is an on-disk test: <br> `docker run --runtime=runsc -it gvisor.dev/images/runtimes/go1.12 ( cd /usr/local/go/test ; go run run.go -v -- <TEST_NAME>... )` <br> Otherwise it is a tool test: <br> `docker run --runtime=runsc -it gvisor.dev/images/runtimes/go1.12 go tool dist test -v -no-rebuild ^TEST1$\|^TEST2$...`
-Java | 11 | `make -C images load-runtimes_java11` | `docker run --runtime=runsc -it gvisor.dev/images/runtimes/java11 jtreg -agentvm -dir:/root/test/jdk -noreport -timeoutFactor:20 -verbose:summary <TEST_NAME>...`
-NodeJS | 12.4.0 | `make -C images load-runtimes_nodejs12.4.0` | `docker run --runtime=runsc -it gvisor.dev/images/runtimes/nodejs12.4.0 python tools/test.py --timeout=180 <TEST_NAME>...`
-Php | 7.3.6 | `make -C images load-runtimes_php7.3.6` | `docker run --runtime=runsc -it gvisor.dev/images/runtimes/php7.3.6 make test "TESTS=<TEST_NAME>..."`
-Python | 3.7.3 | `make -C images load-runtimes_python3.7.3` | `docker run --runtime=runsc -it gvisor.dev/images/runtimes/python3.7.3 ./python -m test <TEST_NAME>...`
-
-To run an entire runtime test locally, use the following table.
-
-Note: java runtime test take 1+ hours with 16 cores.
-
-Language | Version | Running the test suite
--------- | ------- | ----------------------------------------
-Go | 1.12 | `make go1.12-runtime-tests{_vfs2}`
-Java | 11 | `make java11-runtime-tests{_vfs2}`
-NodeJS | 12.4.0 | `make nodejs12.4.0-runtime-tests{_vfs2}`
-Php | 7.3.6 | `make php7.3.6-runtime-tests{_vfs2}`
-Python | 3.7.3 | `make python3.7.3-runtime-tests{_vfs2}`
-
-#### Clean Up
-
-Sometimes when runtime tests fail or when the testing container itself crashes
-unexpectedly, the containers are not removed or sometimes do not even exit. This
-can cause some docker commands like `docker system prune` to hang forever.
-
-Here are some helpful commands (should be executed in order):
-
-```bash
-docker ps -a # Lists all docker processes; useful when investigating hanging containers.
-docker kill $(docker ps -a -q) # Kills all running containers.
-docker rm $(docker ps -a -q) # Removes all exited containers.
-docker system prune # Remove unused data.
-```
-
-### Testing Infrastructure
-
-There are 3 components to this tests infrastructure:
-
-- [`runner`](runner) - This is the test entrypoint. This is the binary is
- invoked by `bazel test`. The runner spawns the target runtime container
- using `runsc` and then copies over the `proctor` binary into the container.
-- [`proctor`](proctor) - This binary acts as our agent inside the container
- which communicates with the runner and actually executes tests.
-- [`exclude`](exclude) - Holds a CSV file for each language runtime containing
- the full path of tests that should be excluded from running along with a
- reason for exclusion.
diff --git a/test/runtimes/defs.bzl b/test/runtimes/defs.bzl
deleted file mode 100644
index 2550b61a3..000000000
--- a/test/runtimes/defs.bzl
+++ /dev/null
@@ -1,89 +0,0 @@
-"""Defines a rule for runtime test targets."""
-
-load("//tools:defs.bzl", "go_test")
-
-def _runtime_test_impl(ctx):
- # Construct arguments.
- args = [
- "--lang",
- ctx.attr.lang,
- "--image",
- ctx.attr.image,
- "--batch",
- str(ctx.attr.batch),
- ]
- if ctx.attr.exclude_file:
- args += [
- "--exclude_file",
- ctx.files.exclude_file[0].short_path,
- ]
-
- # Build a runner.
- runner = ctx.actions.declare_file("%s-executer" % ctx.label.name)
- runner_content = "\n".join([
- "#!/bin/bash",
- "%s %s $@\n" % (ctx.files._runner[0].short_path, " ".join(args)),
- ])
- ctx.actions.write(runner, runner_content, is_executable = True)
-
- # Return the runner.
- return [DefaultInfo(
- executable = runner,
- runfiles = ctx.runfiles(
- files = ctx.files._runner + ctx.files.exclude_file + ctx.files._proctor,
- collect_default = True,
- collect_data = True,
- ),
- )]
-
-_runtime_test = rule(
- implementation = _runtime_test_impl,
- attrs = {
- "image": attr.string(
- mandatory = False,
- ),
- "lang": attr.string(
- mandatory = True,
- ),
- "exclude_file": attr.label(
- mandatory = False,
- allow_single_file = True,
- ),
- "batch": attr.int(
- default = 50,
- mandatory = False,
- ),
- "_runner": attr.label(
- default = "//test/runtimes/runner:runner",
- executable = True,
- cfg = "target",
- ),
- "_proctor": attr.label(
- default = "//test/runtimes/proctor:proctor",
- executable = True,
- cfg = "target",
- ),
- },
- test = True,
-)
-
-def runtime_test(name, **kwargs):
- _runtime_test(
- name = name,
- image = name, # Resolved as images/runtimes/%s.
- tags = [
- "local",
- "manual",
- ],
- **kwargs
- )
-
-def exclude_test(name, exclude_file):
- """Test that a exclude file parses correctly."""
- go_test(
- name = name + "_exclude_test",
- library = ":runner",
- srcs = ["exclude_test.go"],
- args = ["--exclude_file", "test/runtimes/" + exclude_file],
- data = [exclude_file],
- )
diff --git a/test/runtimes/exclude/go1.12.csv b/test/runtimes/exclude/go1.12.csv
deleted file mode 100644
index 81e02cf64..000000000
--- a/test/runtimes/exclude/go1.12.csv
+++ /dev/null
@@ -1,13 +0,0 @@
-test name,bug id,comment
-cgo_errors,,FLAKY
-cgo_test,,FLAKY
-go_test:cmd/go,,FLAKY
-go_test:net,b/162473575,setsockopt: protocol not available.
-go_test:os,b/118780122,we have a pollable filesystem but that's a surprise
-go_test:os/signal,b/118780860,/dev/pts not properly supported. Also being tracked in b/29356795.
-go_test:runtime,b/118782341,sigtrap not reported or caught or something. Also being tracked in b/33003106.
-go_test:syscall,b/118781998,bad bytes -- bad mem addr; FcntlFlock(F_GETLK) not supported.
-runtime:cpu124,b/118778254,segmentation fault
-test:0_1,,FLAKY
-testcarchive,b/118782924,no sigpipe
-testshared,,FLAKY
diff --git a/test/runtimes/exclude/java11.csv b/test/runtimes/exclude/java11.csv
deleted file mode 100644
index d069d5a2e..000000000
--- a/test/runtimes/exclude/java11.csv
+++ /dev/null
@@ -1,220 +0,0 @@
-test name,bug id,comment
-com/sun/crypto/provider/Cipher/PBE/PKCS12Cipher.java,,Fails in Docker
-com/sun/jdi/InvokeHangTest.java,https://bugs.openjdk.java.net/browse/JDK-8218463,
-com/sun/jdi/NashornPopFrameTest.java,,
-com/sun/jdi/OnJcmdTest.java,b/180542784,
-com/sun/jdi/ProcessAttachTest.java,,
-com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java,,Fails in Docker
-com/sun/management/OperatingSystemMXBean/GetCommittedVirtualMemorySize.java,,
-com/sun/management/ThreadMXBean/ThreadCpuTimeArray.java,,Test assumes high CPU clock precision
-com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh,,
-com/sun/tools/attach/AttachSelf.java,,
-com/sun/tools/attach/BasicTests.java,,
-com/sun/tools/attach/PermissionTest.java,,
-com/sun/tools/attach/StartManagementAgent.java,,
-com/sun/tools/attach/TempDirTest.java,,
-com/sun/tools/attach/modules/Driver.java,,
-java/lang/Character/CheckScript.java,,Fails in Docker
-java/lang/Character/CheckUnicode.java,,Fails in Docker
-java/lang/Class/GetPackageBootLoaderChildLayer.java,,
-java/lang/ClassLoader/nativeLibrary/NativeLibraryTest.java,,Fails in Docker
-java/lang/module/ModuleDescriptorTest.java,,
-java/lang/String/nativeEncoding/StringPlatformChars.java,,
-java/net/CookieHandler/B6791927.java,,java.lang.RuntimeException: Expiration date shouldn't be 0
-java/net/DatagramSocket/SetGetReceiveBufferSize.java,b/180507650,
-java/net/httpclient/websocket/WebSocketProxyTest.java,,Times out on runc too
-java/net/ipv6tests/TcpTest.java,,java.net.ConnectException: Connection timed out (Connection timed out)
-java/net/ipv6tests/UdpTest.java,,Times out
-java/net/Inet6Address/B6558853.java,,Times out
-java/net/InetAddress/CheckJNI.java,,java.net.ConnectException: Connection timed out (Connection timed out)
-java/net/InterfaceAddress/NetworkPrefixLength.java,b/78507103,
-java/net/MulticastSocket/B6425815.java,,java.net.SocketException: Protocol not available (Error getting socket option)
-java/net/MulticastSocket/B6427403.java,,java.net.SocketException: Protocol not available
-java/net/MulticastSocket/MulticastTTL.java,,
-java/net/MulticastSocket/NetworkInterfaceEmptyGetInetAddressesTest.java,,java.net.SocketException: Protocol not available (Error getting socket option)
-java/net/MulticastSocket/NoLoopbackPackets.java,,java.net.SocketException: Protocol not available
-java/net/MulticastSocket/Promiscuous.java,,
-java/net/MulticastSocket/SetLoopbackMode.java,,
-java/net/MulticastSocket/SetTTLAndGetTTL.java,,
-java/net/MulticastSocket/Test.java,,
-java/net/MulticastSocket/TestDefaults.java,,
-java/net/MulticastSocket/TimeToLive.java,,
-java/net/NetworkInterface/NetworkInterfaceStreamTest.java,,
-java/net/Socket/LinkLocal.java,,java.net.SocketTimeoutException: Receive timed out
-java/net/Socket/SetSoLinger.java,b/78527327,SO_LINGER is not yet supported
-java/net/Socket/UrgentDataTest.java,b/111515323,
-java/net/SocketOption/OptionsTest.java,,Fails in Docker
-java/net/SocketPermission/SocketPermissionTest.java,,
-java/net/URLConnection/6212146/TestDriver.java,,Fails in Docker
-java/net/httpclient/RequestBuilderTest.java,,Fails in Docker
-java/nio/channels/DatagramChannel/BasicMulticastTests.java,,
-java/nio/channels/DatagramChannel/SocketOptionTests.java,,java.net.SocketException: Invalid argument
-java/nio/channels/DatagramChannel/UseDGWithIPv6.java,,
-java/nio/channels/FileChannel/directio/DirectIOTest.java,,Fails in Docker
-java/nio/channels/FileChannel/directio/PwriteDirect.java,,java.io.IOException: Invalid argument
-java/nio/channels/Selector/OutOfBand.java,,
-java/nio/channels/Selector/SelectWithConsumer.java,,Flaky
-java/nio/channels/ServerSocketChannel/SocketOptionTests.java,,
-java/nio/channels/SocketChannel/LingerOnClose.java,,
-java/nio/channels/SocketChannel/SocketOptionTests.java,b/77965901,
-java/nio/channels/spi/SelectorProvider/inheritedChannel/InheritedChannelTest.java,,Fails in Docker
-java/rmi/activation/Activatable/extLoadedImpl/ext.sh,,
-java/rmi/transport/checkLeaseInfoLeak/CheckLeaseLeak.java,,
-java/security/cert/PolicyNode/GetPolicyQualifiers.java,b/170263154,Kokoro executor cert expired
-java/text/Format/NumberFormat/CurrencyFormat.java,,Fails in Docker
-java/text/Format/NumberFormat/CurrencyFormat.java,,Fails in Docker
-java/util/Calendar/JapaneseEraNameTest.java,,
-java/util/Currency/CurrencyTest.java,,Fails in Docker
-java/util/Currency/ValidateISO4217.java,,Fails in Docker
-java/util/EnumSet/BogusEnumSet.java,,"java.io.InvalidClassException: java.util.EnumSet; local class incompatible: stream classdesc serialVersionUID = -2409567991088730183, local class serialVersionUID = 1009687484059888093"
-java/util/Locale/Bug8040211.java,,java.lang.RuntimeException: Failed.
-java/util/Locale/LSRDataTest.java,,
-java/util/Properties/CompatibilityTest.java,,"java.lang.RuntimeException: jdk.internal.org.xml.sax.SAXParseException; Internal DTD subset is not allowed. The Properties XML document must have the following DOCTYPE declaration: <!DOCTYPE properties SYSTEM ""http://java.sun.com/dtd/properties.dtd"">"
-java/util/ResourceBundle/Control/XMLResourceBundleTest.java,,java.util.MissingResourceException: Can't find bundle for base name XmlRB locale
-java/util/ResourceBundle/modules/xmlformat/xmlformat.sh,,Timeout reached: 60000. Process is not alive!
-java/util/TimeZone/TimeZoneTest.java,,Uncaught exception thrown in test method TestShortZoneIDs
-java/util/concurrent/locks/Lock/TimedAcquireLeak.java,,
-java/util/jar/JarFile/mrjar/MultiReleaseJarAPI.java,,Fails in Docker
-java/util/logging/LogManager/Configuration/updateConfiguration/SimpleUpdateConfigWithInputStreamTest.java,,
-java/util/logging/TestLoggerWeakRefLeak.java,,
-java/util/spi/ResourceBundleControlProvider/UserDefaultControlTest.java,,java.util.MissingResourceException: Can't find bundle for base name com.foo.XmlRB locale
-javax/imageio/AppletResourceTest.java,,
-javax/imageio/plugins/jpeg/JPEGsNotAcceleratedTest.java,,java.awt.HeadlessException: No X11 DISPLAY variable was set but this program performed an operation which requires it.
-javax/management/security/HashedPasswordFileTest.java,,
-javax/net/ssl/DTLS/DTLSBufferOverflowUnderflowTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSDataExchangeTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSEnginesClosureTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSHandshakeTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSHandshakeWithReplicatedPacketsTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSIncorrectAppDataTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSMFLNTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSNotEnabledRC4Test.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSRehandshakeTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSRehandshakeWithCipherChangeTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSRehandshakeWithDataExTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSSequenceNumberTest.java,,Compilation failed
-javax/net/ssl/DTLS/DTLSUnsupportedCiphersTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10BufferOverflowUnderflowTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10DataExchangeTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10EnginesClosureTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10HandshakeTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10HandshakeWithReplicatedPacketsTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10IncorrectAppDataTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10MFLNTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10NotEnabledRC4Test.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10RehandshakeTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10RehandshakeWithCipherChangeTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10RehandshakeWithDataExTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10SequenceNumberTest.java,,Compilation failed
-javax/net/ssl/DTLSv10/DTLSv10UnsupportedCiphersTest.java,,Compilation failed
-javax/net/ssl/SSLSession/JSSERenegotiate.java,,Fails in Docker
-javax/net/ssl/TLS/TLSDataExchangeTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSEnginesClosureTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSHandshakeTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSMFLNTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSNotEnabledRC4Test.java,,Compilation failed
-javax/net/ssl/TLS/TLSRehandshakeTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSRehandshakeWithCipherChangeTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSRehandshakeWithDataExTest.java,,Compilation failed
-javax/net/ssl/TLS/TLSUnsupportedCiphersTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSDataExchangeTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSEnginesClosureTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSHandshakeTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSMFLNTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSNotEnabledRC4Test.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSRehandshakeTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSRehandshakeWithCipherChangeTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSRehandshakeWithDataExTest.java,,Compilation failed
-javax/net/ssl/TLSv1/TLSUnsupportedCiphersTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSDataExchangeTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSEnginesClosureTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSHandshakeTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSMFLNTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSNotEnabledRC4Test.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSRehandshakeTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSRehandshakeWithCipherChangeTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSRehandshakeWithDataExTest.java,,Compilation failed
-javax/net/ssl/TLSv11/TLSUnsupportedCiphersTest.java,,Compilation failed
-javax/net/ssl/TLSv12/TLSEnginesClosureTest.java,,Compilation failed
-javax/sound/sampled/AudioInputStream/FrameLengthAfterConversion.java,,
-jdk/jfr/cmd/TestHelp.java,,java.lang.RuntimeException: 'Available commands are:' missing from stdout/stderr
-jdk/jfr/cmd/TestPrint.java,,Missing file' missing from stdout/stderr
-jdk/jfr/cmd/TestPrintDefault.java,,java.lang.RuntimeException: 'JVMInformation' missing from stdout/stderr
-jdk/jfr/cmd/TestPrintJSON.java,,javax.script.ScriptException: <eval>:1:17 Expected an operand but found eof var jsonObject = ^ in <eval> at line number 1 at column number 17
-jdk/jfr/cmd/TestPrintXML.java,,org.xml.sax.SAXParseException; lineNumber: 1; columnNumber: 1; Premature end of file.
-jdk/jfr/cmd/TestReconstruct.java,,java.lang.RuntimeException: 'Too few arguments' missing from stdout/stderr
-jdk/jfr/cmd/TestSplit.java,,java.lang.RuntimeException: 'Missing file' missing from stdout/stderr
-jdk/jfr/cmd/TestSummary.java,,java.lang.RuntimeException: 'Missing file' missing from stdout/stderr
-jdk/jfr/event/compiler/TestCompilerStats.java,,java.lang.RuntimeException: Field nmetodsSize not in event
-jdk/jfr/event/metadata/TestDefaultConfigurations.java,,Setting 'threshold' in event 'jdk.SecurityPropertyModification' was not configured in the configuration 'default'
-jdk/jfr/event/oldobject/TestLargeRootSet.java,,Flaky - `main' threw exception: java.lang.RuntimeException: Could not find root object
-jdk/jfr/event/runtime/TestActiveSettingEvent.java,,java.lang.Exception: Could not find setting with name jdk.X509Validation#threshold
-jdk/jfr/event/runtime/TestModuleEvents.java,,java.lang.RuntimeException: assertEquals: expected jdk.proxy1 to equal java.base
-jdk/jfr/event/runtime/TestNetworkUtilizationEvent.java,,
-jdk/jfr/event/runtime/TestThreadParkEvent.java,,
-jdk/jfr/event/sampling/TestNative.java,,
-jdk/jfr/javaagent/TestLoadedAgent.java,b/180542638,
-jdk/jfr/jcmd/TestJcmdChangeLogLevel.java,,
-jdk/jfr/jcmd/TestJcmdConfigure.java,,
-jdk/jfr/jcmd/TestJcmdDump.java,,
-jdk/jfr/jcmd/TestJcmdDumpGeneratedFilename.java,,
-jdk/jfr/jcmd/TestJcmdDumpLimited.java,,
-jdk/jfr/jcmd/TestJcmdDumpPathToGCRoots.java,,
-jdk/jfr/jcmd/TestJcmdDumpWithFileName.java,b/180542783,
-jdk/jfr/jcmd/TestJcmdLegacy.java,,
-jdk/jfr/jcmd/TestJcmdSaveToFile.java,,
-jdk/jfr/jcmd/TestJcmdStartDirNotExist.java,,
-jdk/jfr/jcmd/TestJcmdStartInvaldFile.java,,
-jdk/jfr/jcmd/TestJcmdStartPathToGCRoots.java,,
-jdk/jfr/jcmd/TestJcmdStartStopDefault.java,,
-jdk/jfr/jcmd/TestJcmdStartWithOptions.java,,
-jdk/jfr/jcmd/TestJcmdStartWithSettings.java,,
-jdk/jfr/jcmd/TestJcmdStopInvalidFile.java,,
-jdk/jfr/jmx/TestGetRecordings.java,b/180542639,
-jdk/jfr/jvm/TestGetAllEventClasses.java,,Compilation failed
-jdk/jfr/jvm/TestJfrJavaBase.java,,
-jdk/jfr/startupargs/TestStartRecording.java,,
-jdk/modules/incubator/ImageModules.java,,
-jdk/net/Sockets/ExtOptionTest.java,,
-jdk/net/Sockets/QuickAckTest.java,,
-lib/security/cacerts/VerifyCACerts.java,,
-sun/management/jmxremote/bootstrap/CustomLauncherTest.java,,
-sun/management/jmxremote/bootstrap/JvmstatCountersTest.java,,
-sun/management/jmxremote/bootstrap/LocalManagementTest.java,,
-sun/management/jmxremote/bootstrap/RmiRegistrySslTest.java,,
-sun/management/jmxremote/bootstrap/RmiSslBootstrapTest.sh,,
-sun/management/jmxremote/startstop/JMXStartStopTest.java,,
-sun/management/jmxremote/startstop/JMXStatusPerfCountersTest.java,,
-sun/management/jmxremote/startstop/JMXStatusTest.java,,
-sun/management/jdp/JdpDefaultsTest.java,,
-sun/management/jdp/JdpJmxRemoteDynamicPortTest.java,,
-sun/management/jdp/JdpOffTest.java,,
-sun/management/jdp/JdpSpecificAddressTest.java,,
-sun/text/resources/LocaleDataTest.java,,
-sun/tools/jcmd/TestJcmdSanity.java,,
-sun/tools/jhsdb/AlternateHashingTest.java,,
-sun/tools/jhsdb/BasicLauncherTest.java,,
-sun/tools/jhsdb/HeapDumpTest.java,,
-sun/tools/jhsdb/heapconfig/JMapHeapConfigTest.java,,
-sun/tools/jhsdb/JShellHeapDumpTest.java,,Fails on runc too
-sun/tools/jinfo/BasicJInfoTest.java,,
-sun/tools/jinfo/JInfoTest.java,,
-sun/tools/jmap/BasicJMapTest.java,,
-sun/tools/jstack/BasicJStackTest.java,,
-sun/tools/jstack/DeadlockDetectionTest.java,,
-sun/tools/jstatd/TestJstatdExternalRegistry.java,,
-sun/tools/jstatd/TestJstatdPort.java,,Flaky
-sun/tools/jstatd/TestJstatdPortAndServer.java,,Flaky
-sun/util/calendar/zi/TestZoneInfo310.java,,
-tools/jar/modularJar/Basic.java,,
-tools/jar/multiRelease/Basic.java,,
-tools/jimage/JImageExtractTest.java,,
-tools/jimage/JImageTest.java,,
-tools/jlink/JLinkTest.java,,
-tools/jlink/plugins/IncludeLocalesPluginTest.java,,
-tools/jmod/hashes/HashesTest.java,,
-tools/launcher/BigJar.java,b/111611473,
-tools/launcher/HelpFlagsTest.java,,java.lang.AssertionError: HelpFlagsTest failed: Tool jfr not covered by this test. Add specification to jdkTools array!
-tools/launcher/JliLaunchTest.java,,Fails on runc too
-tools/launcher/VersionCheck.java,,java.lang.AssertionError: VersionCheck failed: testToolVersion: [jfr];
-tools/launcher/modules/patch/systemmodules/PatchSystemModules.java,,
diff --git a/test/runtimes/exclude/nodejs12.4.0.csv b/test/runtimes/exclude/nodejs12.4.0.csv
deleted file mode 100644
index c4e7917ec..000000000
--- a/test/runtimes/exclude/nodejs12.4.0.csv
+++ /dev/null
@@ -1,45 +0,0 @@
-test name,bug id,comment
-async-hooks/test-statwatcher.js,https://github.com/nodejs/node/issues/21425,Check for fix inclusion in nodejs releases after 2020-03-29
-benchmark/test-benchmark-fs.js,,Broken test
-benchmark/test-benchmark-napi.js,,Broken test
-doctool/test-make-doc.js,b/68848110,Expected to fail.
-internet/test-dgram-multicast-set-interface-lo.js,b/162798882,
-internet/test-doctool-versions.js,,Broken test
-internet/test-uv-threadpool-schedule.js,,Broken test
-parallel/test-dgram-bind-fd.js,b/132447356,
-parallel/test-dgram-socket-buffer-size.js,b/68847921,
-parallel/test-dns-channel-timeout.js,b/161893056,
-parallel/test-fs-access.js,,Broken test
-parallel/test-fs-watchfile.js,b/166819807,Flaky - VFS1 only
-parallel/test-fs-write-stream.js,b/166819807,Flaky - VFS1 only
-parallel/test-fs-write-stream-double-close.js,b/166819807,Flaky - VFS1 only
-parallel/test-fs-write-stream-throw-type-error.js,b/166819807,Flaky - VFS1 only
-parallel/test-http-writable-true-after-close.js,b/171301436,Flaky - Mismatched <anonymous> function calls. Expected exactly 1 actual 2
-parallel/test-os.js,b/63997097,
-parallel/test-process-uid-gid.js,,Does not work inside Docker with gid nobody
-pseudo-tty/test-assert-colors.js,b/162801321,
-pseudo-tty/test-assert-no-color.js,b/162801321,
-pseudo-tty/test-assert-position-indicator.js,b/162801321,
-pseudo-tty/test-async-wrap-getasyncid-tty.js,b/162801321,
-pseudo-tty/test-fatal-error.js,b/162801321,
-pseudo-tty/test-handle-wrap-isrefed-tty.js,b/162801321,
-pseudo-tty/test-readable-tty-keepalive.js,b/162801321,
-pseudo-tty/test-set-raw-mode-reset-process-exit.js,b/162801321,
-pseudo-tty/test-set-raw-mode-reset-signal.js,b/162801321,
-pseudo-tty/test-set-raw-mode-reset.js,b/162801321,
-pseudo-tty/test-stderr-stdout-handle-sigwinch.js,b/162801321,
-pseudo-tty/test-stdout-read.js,b/162801321,
-pseudo-tty/test-tty-color-support.js,b/162801321,
-pseudo-tty/test-tty-isatty.js,b/162801321,
-pseudo-tty/test-tty-stdin-call-end.js,b/162801321,
-pseudo-tty/test-tty-stdin-end.js,b/162801321,
-pseudo-tty/test-stdin-write.js,b/162801321,
-pseudo-tty/test-tty-stdout-end.js,b/162801321,
-pseudo-tty/test-tty-stdout-resize.js,b/162801321,
-pseudo-tty/test-tty-stream-constructors.js,b/162801321,
-pseudo-tty/test-tty-window-size.js,b/162801321,
-pseudo-tty/test-tty-wrap.js,b/162801321,
-pummel/test-net-pingpong.js,,Broken test
-pummel/test-vm-memleak.js,b/162799436,
-pummel/test-watch-file.js,,Flaky - VFS1 only
-tick-processor/test-tick-processor-builtin.js,,Broken test
diff --git a/test/runtimes/exclude/php7.3.6.csv b/test/runtimes/exclude/php7.3.6.csv
deleted file mode 100644
index c051fe571..000000000
--- a/test/runtimes/exclude/php7.3.6.csv
+++ /dev/null
@@ -1,48 +0,0 @@
-test name,bug id,comment
-ext/intl/tests/bug77895.phpt,,
-ext/intl/tests/dateformat_bug65683_2.phpt,,
-ext/mbstring/tests/bug76319.phpt,,
-ext/mbstring/tests/bug76958.phpt,,
-ext/mbstring/tests/bug77025.phpt,,
-ext/mbstring/tests/bug77165.phpt,,
-ext/mbstring/tests/bug77454.phpt,,
-ext/mbstring/tests/mb_convert_encoding_leak.phpt,,
-ext/mbstring/tests/mb_strrpos_encoding_3rd_param.phpt,,
-ext/pcre/tests/cache_limit.phpt,,Broken test - Flaky
-ext/session/tests/session_module_name_variation4.phpt,,Flaky
-ext/session/tests/session_set_save_handler_class_018.phpt,,
-ext/session/tests/session_set_save_handler_iface_003.phpt,,
-ext/session/tests/session_set_save_handler_sid_001.phpt,,
-ext/session/tests/session_set_save_handler_variation4.phpt,,
-ext/standard/tests/file/disk.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_free_space_basic.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_free_space_error.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_free_space_variation.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_total_space_basic.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_total_space_error.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/disk_total_space_variation.phpt,https://bugs.php.net/bug.php?id=80018,
-ext/standard/tests/file/fopen_variation19.phpt,b/162894964,
-ext/standard/tests/file/lstat_stat_variation14.phpt,,Flaky
-ext/standard/tests/file/php_fd_wrapper_01.phpt,,
-ext/standard/tests/file/php_fd_wrapper_02.phpt,,
-ext/standard/tests/file/php_fd_wrapper_03.phpt,,
-ext/standard/tests/file/php_fd_wrapper_04.phpt,,
-ext/standard/tests/file/realpath_bug77484.phpt,b/162894969,VFS1 only failure
-ext/standard/tests/file/rename_variation.phpt,b/68717309,
-ext/standard/tests/file/symlink_link_linkinfo_is_link_variation4.phpt,b/162895341,
-ext/standard/tests/file/symlink_link_linkinfo_is_link_variation8.phpt,b/162896223,VFS1 only failure
-ext/standard/tests/general_functions/escapeshellarg_bug71270.phpt,,
-ext/standard/tests/general_functions/escapeshellcmd_bug71270.phpt,,
-ext/standard/tests/streams/proc_open_bug60120.phpt,,Flaky until php-src 3852a35fdbcb
-ext/standard/tests/streams/proc_open_bug64438.phpt,,Flaky
-ext/standard/tests/streams/proc_open_bug69900.phpt,,Flaky
-ext/standard/tests/streams/stream_socket_sendto.phpt,,
-ext/standard/tests/strings/007.phpt,,
-sapi/cli/tests/upload_2G.phpt,,
-tests/output/stream_isatty_err.phpt,b/68720279,
-tests/output/stream_isatty_in-err.phpt,b/68720282,
-tests/output/stream_isatty_in-out-err.phpt,,
-tests/output/stream_isatty_in-out.phpt,b/68720299,
-tests/output/stream_isatty_out-err.phpt,b/68720311,
-tests/output/stream_isatty_out.phpt,b/68720325,
-Zend/tests/concat_003.phpt,b/162896021,
diff --git a/test/runtimes/exclude/python3.7.3.csv b/test/runtimes/exclude/python3.7.3.csv
deleted file mode 100644
index e9fef03b7..000000000
--- a/test/runtimes/exclude/python3.7.3.csv
+++ /dev/null
@@ -1,19 +0,0 @@
-test name,bug id,comment
-test_asyncio,,Fails on Docker.
-test_asyncore,b/162973328,
-test_epoll,b/162983393,
-test_fcntl,b/162978767,fcntl invalid argument -- artificial test to make sure something works in 64 bit mode.
-test_httplib,b/163000009,OSError: [Errno 98] Address already in use
-test_logging,b/162980079,
-test_multiprocessing_fork,,Flaky. Sometimes times out.
-test_multiprocessing_forkserver,,Flaky. Sometimes times out.
-test_multiprocessing_main_handling,,Flaky. Sometimes times out.
-test_multiprocessing_spawn,,Flaky. Sometimes times out.
-test_posix,b/76174079,posix.sched_get_priority_min not implemented + posix.sched_rr_get_interval not permitted
-test_pty,b/162979921,
-test_readline,b/162980389,TestReadline hangs forever
-test_resource,b/76174079,
-test_selectors,b/76116849,OSError not raised with epoll
-test_smtplib,b/162980434,unclosed sockets
-test_signal,,Flaky - signal: alarm clock
-test_socket,b/75983380,
diff --git a/test/runtimes/proctor/BUILD b/test/runtimes/proctor/BUILD
deleted file mode 100644
index b4a9b12de..000000000
--- a/test/runtimes/proctor/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "proctor",
- srcs = ["main.go"],
- pure = True,
- visibility = ["//test/runtimes:__pkg__"],
- deps = [
- "//test/runtimes/proctor/lib",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/test/runtimes/proctor/lib/BUILD b/test/runtimes/proctor/lib/BUILD
deleted file mode 100644
index f834f1b5a..000000000
--- a/test/runtimes/proctor/lib/BUILD
+++ /dev/null
@@ -1,25 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "lib",
- srcs = [
- "go.go",
- "java.go",
- "lib.go",
- "nodejs.go",
- "php.go",
- "python.go",
- ],
- visibility = ["//test/runtimes/proctor:__pkg__"],
- deps = ["@org_golang_x_sys//unix:go_default_library"],
-)
-
-go_test(
- name = "lib_test",
- size = "small",
- srcs = ["lib_test.go"],
- library = ":lib",
- deps = ["//pkg/test/testutil"],
-)
diff --git a/test/runtimes/proctor/lib/go.go b/test/runtimes/proctor/lib/go.go
deleted file mode 100644
index 5c48fb60b..000000000
--- a/test/runtimes/proctor/lib/go.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "fmt"
- "os"
- "os/exec"
- "regexp"
- "strings"
-)
-
-var (
- goTestRegEx = regexp.MustCompile(`^.+\.go$`)
-
- // Directories with .dir contain helper files for tests.
- // Exclude benchmarks and stress tests.
- goDirFilter = regexp.MustCompile(`^(bench|stress)\/.+$|^.+\.dir.+$`)
-)
-
-// Location of Go tests on disk.
-const goTestDir = "/usr/local/go/test"
-
-// goRunner implements TestRunner for Go.
-//
-// There are two types of Go tests: "Go tool tests" and "Go tests on disk".
-// "Go tool tests" are found and executed using `go tool dist test`. "Go tests
-// on disk" are found in the /usr/local/go/test directory and are executed
-// using `go run run.go`.
-type goRunner struct{}
-
-var _ TestRunner = goRunner{}
-
-// ListTests implements TestRunner.ListTests.
-func (goRunner) ListTests() ([]string, error) {
- // Go tool dist test tests.
- args := []string{"tool", "dist", "test", "-list"}
- cmd := exec.Command("go", args...)
- cmd.Stderr = os.Stderr
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("failed to list: %v", err)
- }
- var toolSlice []string
- for _, test := range strings.Split(string(out), "\n") {
- toolSlice = append(toolSlice, test)
- }
-
- // Go tests on disk.
- diskSlice, err := Search(goTestDir, goTestRegEx)
- if err != nil {
- return nil, err
- }
- // Remove items from /bench/, /stress/ and .dir files
- diskFiltered := diskSlice[:0]
- for _, file := range diskSlice {
- if !goDirFilter.MatchString(file) {
- diskFiltered = append(diskFiltered, file)
- }
- }
-
- return append(toolSlice, diskFiltered...), nil
-}
-
-// TestCmds implements TestRunner.TestCmds.
-func (goRunner) TestCmds(tests []string) []*exec.Cmd {
- var toolTests, onDiskTests []string
- for _, test := range tests {
- if strings.HasSuffix(test, ".go") {
- onDiskTests = append(onDiskTests, test)
- } else {
- toolTests = append(toolTests, "^"+test+"$")
- }
- }
-
- var cmds []*exec.Cmd
- if len(toolTests) > 0 {
- cmds = append(cmds, exec.Command("go", "tool", "dist", "test", "-v", "-no-rebuild", "-run", strings.Join(toolTests, "\\|")))
- }
- if len(onDiskTests) > 0 {
- cmd := exec.Command("go", append([]string{"run", "run.go", "-v", "--"}, onDiskTests...)...)
- cmd.Dir = goTestDir
- cmds = append(cmds, cmd)
- }
-
- return cmds
-}
diff --git a/test/runtimes/proctor/lib/java.go b/test/runtimes/proctor/lib/java.go
deleted file mode 100644
index 3105011ff..000000000
--- a/test/runtimes/proctor/lib/java.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "fmt"
- "os"
- "os/exec"
- "regexp"
- "strings"
-)
-
-// Directories to exclude from tests.
-var javaExclDirs = regexp.MustCompile(`(^(sun\/security)|(java\/util\/stream)|(java\/time)| )`)
-
-// Location of java tests.
-const javaTestDir = "/root/test/jdk"
-
-// javaRunner implements TestRunner for Java.
-type javaRunner struct{}
-
-var _ TestRunner = javaRunner{}
-
-// ListTests implements TestRunner.ListTests.
-func (javaRunner) ListTests() ([]string, error) {
- args := []string{
- "-dir:" + javaTestDir,
- "-ignore:quiet",
- "-a",
- "-listtests",
- ":jdk_core",
- ":jdk_svc",
- ":jdk_sound",
- ":jdk_imageio",
- }
- cmd := exec.Command("jtreg", args...)
- cmd.Stderr = os.Stderr
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("jtreg -listtests : %v", err)
- }
- var testSlice []string
- for _, test := range strings.Split(string(out), "\n") {
- if !javaExclDirs.MatchString(test) {
- testSlice = append(testSlice, test)
- }
- }
- return testSlice, nil
-}
-
-// TestCmds implements TestRunner.TestCmds.
-func (javaRunner) TestCmds(tests []string) []*exec.Cmd {
- args := append(
- []string{
- "-agentvm", // Execute each action using a pool of reusable JVMs.
- "-dir:" + javaTestDir, // Base directory for test files and directories.
- "-noreport", // Do not generate a final report.
- "-timeoutFactor:20", // Extend the default timeout (2 min) of all tests by this factor.
- "-verbose:nopass", // Verbose output but supress it for tests that passed.
- },
- tests...,
- )
- return []*exec.Cmd{exec.Command("jtreg", args...)}
-}
diff --git a/test/runtimes/proctor/lib/lib.go b/test/runtimes/proctor/lib/lib.go
deleted file mode 100644
index 36c60088a..000000000
--- a/test/runtimes/proctor/lib/lib.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package lib contains proctor functions.
-package lib
-
-import (
- "fmt"
- "os"
- "os/exec"
- "os/signal"
- "path/filepath"
- "regexp"
-
- "golang.org/x/sys/unix"
-)
-
-// TestRunner is an interface that must be implemented for each runtime
-// integrated with proctor.
-type TestRunner interface {
- // ListTests returns a string slice of tests available to run.
- ListTests() ([]string, error)
-
- // TestCmds returns a slice of *exec.Cmd that will run the given tests.
- // There is no correlation between the number of exec.Cmds returned and the
- // number of tests. It could return one command to run all tests or a few
- // commands that collectively run all.
- TestCmds(tests []string) []*exec.Cmd
-}
-
-// TestRunnerForRuntime returns a new TestRunner for the given runtime.
-func TestRunnerForRuntime(runtime string) (TestRunner, error) {
- switch runtime {
- case "go":
- return goRunner{}, nil
- case "java":
- return javaRunner{}, nil
- case "nodejs":
- return nodejsRunner{}, nil
- case "php":
- return phpRunner{}, nil
- case "python":
- return pythonRunner{}, nil
- }
- return nil, fmt.Errorf("invalid runtime %q", runtime)
-}
-
-// PauseAndReap is like init. It runs forever and reaps any children.
-func PauseAndReap() {
- // Get notified of any new children.
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, unix.SIGCHLD)
-
- for {
- if _, ok := <-ch; !ok {
- // Channel closed. This should not happen.
- panic("signal channel closed")
- }
-
- // Reap the child.
- for {
- if cpid, _ := unix.Wait4(-1, nil, unix.WNOHANG, nil); cpid < 1 {
- break
- }
- }
- }
-}
-
-// Search is a helper function to find tests in the given directory that match
-// the regex.
-func Search(root string, testFilter *regexp.Regexp) ([]string, error) {
- var testSlice []string
-
- err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- name := filepath.Base(path)
-
- if info.IsDir() || !testFilter.MatchString(name) {
- return nil
- }
-
- relPath, err := filepath.Rel(root, path)
- if err != nil {
- return err
- }
- testSlice = append(testSlice, relPath)
- return nil
- })
- if err != nil {
- return nil, fmt.Errorf("walking %q: %v", root, err)
- }
-
- return testSlice, nil
-}
diff --git a/test/runtimes/proctor/lib/lib_test.go b/test/runtimes/proctor/lib/lib_test.go
deleted file mode 100644
index 1193d2e28..000000000
--- a/test/runtimes/proctor/lib/lib_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "regexp"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-func touch(t *testing.T, name string) {
- t.Helper()
- f, err := os.Create(name)
- if err != nil {
- t.Fatalf("error creating file %q: %v", name, err)
- }
- if err := f.Close(); err != nil {
- t.Fatalf("error closing file %q: %v", name, err)
- }
-}
-
-func TestSearchEmptyDir(t *testing.T) {
- td, err := ioutil.TempDir(testutil.TmpDir(), "searchtest")
- if err != nil {
- t.Fatalf("error creating searchtest: %v", err)
- }
- defer os.RemoveAll(td)
-
- var want []string
-
- testFilter := regexp.MustCompile(`^test-[^-].+\.tc$`)
- got, err := Search(td, testFilter)
- if err != nil {
- t.Errorf("search error: %v", err)
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Found %#v; want %#v", got, want)
- }
-}
-
-func TestSearch(t *testing.T) {
- td, err := ioutil.TempDir(testutil.TmpDir(), "searchtest")
- if err != nil {
- t.Fatalf("error creating searchtest: %v", err)
- }
- defer os.RemoveAll(td)
-
- // Creating various files similar to the test filter regex.
- files := []string{
- "emp/",
- "tee/",
- "test-foo.tc",
- "test-foo.tc",
- "test-bar.tc",
- "test-sam.tc",
- "Test-que.tc",
- "test-brett",
- "test--abc.tc",
- "test---xyz.tc",
- "test-bool.TC",
- "--test-gvs.tc",
- " test-pew.tc",
- "dir/test_baz.tc",
- "dir/testsnap.tc",
- "dir/test-luk.tc",
- "dir/nest/test-ok.tc",
- "dir/dip/diz/goog/test-pack.tc",
- "dir/dip/diz/wobble/thud/test-cas.e",
- "dir/dip/diz/wobble/thud/test-cas.tc",
- }
- want := []string{
- "dir/dip/diz/goog/test-pack.tc",
- "dir/dip/diz/wobble/thud/test-cas.tc",
- "dir/nest/test-ok.tc",
- "dir/test-luk.tc",
- "test-bar.tc",
- "test-foo.tc",
- "test-sam.tc",
- }
-
- for _, item := range files {
- if strings.HasSuffix(item, "/") {
- // This item is a directory, create it.
- if err := os.MkdirAll(filepath.Join(td, item), 0755); err != nil {
- t.Fatalf("error making directory: %v", err)
- }
- } else {
- // This item is a file, create the directory and touch file.
- // Create directory in which file should be created
- fullDirPath := filepath.Join(td, filepath.Dir(item))
- if err := os.MkdirAll(fullDirPath, 0755); err != nil {
- t.Fatalf("error making directory: %v", err)
- }
- // Create file with full path to file.
- touch(t, filepath.Join(td, item))
- }
- }
-
- testFilter := regexp.MustCompile(`^test-[^-].+\.tc$`)
- got, err := Search(td, testFilter)
- if err != nil {
- t.Errorf("search error: %v", err)
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Found %#v; want %#v", got, want)
- }
-}
diff --git a/test/runtimes/proctor/lib/nodejs.go b/test/runtimes/proctor/lib/nodejs.go
deleted file mode 100644
index 320597aa5..000000000
--- a/test/runtimes/proctor/lib/nodejs.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "os/exec"
- "path/filepath"
- "regexp"
-)
-
-var nodejsTestRegEx = regexp.MustCompile(`^test-[^-].+\.js$`)
-
-// Location of nodejs tests relative to working dir.
-const nodejsTestDir = "test"
-
-// nodejsRunner implements TestRunner for NodeJS.
-type nodejsRunner struct{}
-
-var _ TestRunner = nodejsRunner{}
-
-// ListTests implements TestRunner.ListTests.
-func (nodejsRunner) ListTests() ([]string, error) {
- testSlice, err := Search(nodejsTestDir, nodejsTestRegEx)
- if err != nil {
- return nil, err
- }
- return testSlice, nil
-}
-
-// TestCmds implements TestRunner.TestCmds.
-func (nodejsRunner) TestCmds(tests []string) []*exec.Cmd {
- args := append([]string{filepath.Join("tools", "test.py"), "--timeout=180"}, tests...)
- return []*exec.Cmd{exec.Command("/usr/bin/python", args...)}
-}
diff --git a/test/runtimes/proctor/lib/php.go b/test/runtimes/proctor/lib/php.go
deleted file mode 100644
index b67a60a97..000000000
--- a/test/runtimes/proctor/lib/php.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "os/exec"
- "regexp"
- "strings"
-)
-
-var phpTestRegEx = regexp.MustCompile(`^.+\.phpt$`)
-
-// phpRunner implements TestRunner for PHP.
-type phpRunner struct{}
-
-var _ TestRunner = phpRunner{}
-
-// ListTests implements TestRunner.ListTests.
-func (phpRunner) ListTests() ([]string, error) {
- testSlice, err := Search(".", phpTestRegEx)
- if err != nil {
- return nil, err
- }
- return testSlice, nil
-}
-
-// TestCmds implements TestRunner.TestCmds.
-func (phpRunner) TestCmds(tests []string) []*exec.Cmd {
- args := []string{"test", "TESTS=" + strings.Join(tests, " ")}
- return []*exec.Cmd{exec.Command("make", args...)}
-}
diff --git a/test/runtimes/proctor/lib/python.go b/test/runtimes/proctor/lib/python.go
deleted file mode 100644
index 429bfd850..000000000
--- a/test/runtimes/proctor/lib/python.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "fmt"
- "os"
- "os/exec"
- "strings"
-)
-
-// pythonRunner implements TestRunner for Python.
-type pythonRunner struct{}
-
-var _ TestRunner = pythonRunner{}
-
-// ListTests implements TestRunner.ListTests.
-func (pythonRunner) ListTests() ([]string, error) {
- args := []string{"-m", "test", "--list-tests"}
- cmd := exec.Command("./python", args...)
- cmd.Stderr = os.Stderr
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("failed to list: %v", err)
- }
- var toolSlice []string
- for _, test := range strings.Split(string(out), "\n") {
- toolSlice = append(toolSlice, test)
- }
- return toolSlice, nil
-}
-
-// TestCmds implements TestRunner.TestCmds.
-func (pythonRunner) TestCmds(tests []string) []*exec.Cmd {
- args := append([]string{"-m", "test"}, tests...)
- return []*exec.Cmd{exec.Command("./python", args...)}
-}
diff --git a/test/runtimes/proctor/main.go b/test/runtimes/proctor/main.go
deleted file mode 100644
index 8c076a499..000000000
--- a/test/runtimes/proctor/main.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary proctor runs the test for a particular runtime. It is meant to be
-// included in Docker images for all runtime tests.
-package main
-
-import (
- "flag"
- "fmt"
- "log"
- "os"
- "strings"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/test/runtimes/proctor/lib"
-)
-
-var (
- runtime = flag.String("runtime", "", "name of runtime")
- list = flag.Bool("list", false, "list all available tests")
- testNames = flag.String("tests", "", "run a subset of the available tests")
- pause = flag.Bool("pause", false, "cause container to pause indefinitely, reaping any zombie children")
-)
-
-// setNumFilesLimit changes the NOFILE soft rlimit if it is too high.
-func setNumFilesLimit() error {
- // In docker containers, the default value of the NOFILE limit is
- // 1048576. A few runtime tests (e.g. python:test_subprocess)
- // enumerates all possible file descriptors and these tests can fail by
- // timeout if the NOFILE limit is too high. On gVisor, syscalls are
- // slower so these tests will need even more time to pass.
- const nofile = 32768
- rLimit := unix.Rlimit{}
- err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rLimit)
- if err != nil {
- return fmt.Errorf("failed to get RLIMIT_NOFILE: %v", err)
- }
- if rLimit.Cur > nofile {
- rLimit.Cur = nofile
- err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rLimit)
- if err != nil {
- return fmt.Errorf("failed to set RLIMIT_NOFILE: %v", err)
- }
- }
- return nil
-}
-
-func main() {
- flag.Parse()
-
- if *pause {
- lib.PauseAndReap()
- panic("pauseAndReap should never return")
- }
-
- if *runtime == "" {
- log.Fatalf("runtime flag must be provided")
- }
-
- tr, err := lib.TestRunnerForRuntime(*runtime)
- if err != nil {
- log.Fatalf("%v", err)
- }
-
- // List tests.
- if *list {
- tests, err := tr.ListTests()
- if err != nil {
- log.Fatalf("failed to list tests: %v", err)
- }
- for _, test := range tests {
- fmt.Println(test)
- }
- return
- }
-
- var tests []string
- if *testNames == "" {
- // Run every test.
- tests, err = tr.ListTests()
- if err != nil {
- log.Fatalf("failed to get all tests: %v", err)
- }
- } else {
- // Run subset of test.
- tests = strings.Split(*testNames, ",")
- }
-
- if err := setNumFilesLimit(); err != nil {
- log.Fatalf("%v", err)
- }
-
- // Run tests.
- cmds := tr.TestCmds(tests)
- for _, cmd := range cmds {
- cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
- if err := cmd.Run(); err != nil {
- log.Fatalf("FAIL: %v", err)
- }
- }
-}
diff --git a/test/runtimes/runner/BUILD b/test/runtimes/runner/BUILD
deleted file mode 100644
index 70cc01594..000000000
--- a/test/runtimes/runner/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "runner",
- testonly = 1,
- srcs = ["main.go"],
- visibility = ["//test/runtimes:__pkg__"],
- deps = ["//test/runtimes/runner/lib"],
-)
diff --git a/test/runtimes/runner/lib/BUILD b/test/runtimes/runner/lib/BUILD
deleted file mode 100644
index d308f41b0..000000000
--- a/test/runtimes/runner/lib/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "lib",
- testonly = 1,
- srcs = ["lib.go"],
- visibility = ["//test/runtimes/runner:__pkg__"],
- deps = [
- "//pkg/log",
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- ],
-)
-
-go_test(
- name = "lib_test",
- size = "small",
- srcs = ["exclude_test.go"],
- library = ":lib",
-)
diff --git a/test/runtimes/runner/lib/exclude_test.go b/test/runtimes/runner/lib/exclude_test.go
deleted file mode 100644
index f996e895b..000000000
--- a/test/runtimes/runner/lib/exclude_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-import (
- "flag"
- "os"
- "testing"
-)
-
-var excludeFile = flag.String("exclude_file", "", "file to test (standard format)")
-
-func TestMain(m *testing.M) {
- flag.Parse()
- os.Exit(m.Run())
-}
-
-// Test that the exclude file parses without error.
-func TestExcludelist(t *testing.T) {
- ex, err := getExcludes(*excludeFile)
- if err != nil {
- t.Fatalf("error parsing exclude file: %v", err)
- }
- if *excludeFile != "" && len(ex) == 0 {
- t.Errorf("got empty excludes for file %q", *excludeFile)
- }
-}
diff --git a/test/runtimes/runner/lib/lib.go b/test/runtimes/runner/lib/lib.go
deleted file mode 100644
index d6b652897..000000000
--- a/test/runtimes/runner/lib/lib.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package lib provides utilities for runner.
-package lib
-
-import (
- "context"
- "encoding/csv"
- "fmt"
- "io"
- "os"
- "sort"
- "strings"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// RunTests is a helper that is called by main. It exists so that we can run
-// defered functions before exiting. It returns an exit code that should be
-// passed to os.Exit.
-func RunTests(lang, image, excludeFile string, batchSize int, timeout time.Duration) int {
- // TODO(gvisor.dev/issue/1624): Remove those tests from all exclude lists
- // that only fail with VFS1.
-
- // Get tests to exclude.
- excludes, err := getExcludes(excludeFile)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error getting exclude list: %s\n", err.Error())
- return 1
- }
-
- // Construct the shared docker instance.
- ctx := context.Background()
- d := dockerutil.MakeContainer(ctx, testutil.DefaultLogger(lang))
- defer d.CleanUp(ctx)
-
- if err := testutil.TouchShardStatusFile(); err != nil {
- fmt.Fprintf(os.Stderr, "error touching status shard file: %v\n", err)
- return 1
- }
-
- // Get a slice of tests to run. This will also start a single Docker
- // container that will be used to run each test. The final test will
- // stop the Docker container.
- tests, err := getTests(ctx, d, lang, image, batchSize, timeout, excludes)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s\n", err.Error())
- return 1
- }
-
- m := testing.MainStart(testDeps{}, tests, nil, nil)
- return m.Run()
-}
-
-// getTests executes all tests as table tests.
-func getTests(ctx context.Context, d *dockerutil.Container, lang, image string, batchSize int, timeout time.Duration, excludes map[string]struct{}) ([]testing.InternalTest, error) {
- // Start the container.
- opts := dockerutil.RunOpts{
- Image: fmt.Sprintf("runtimes/%s", image),
- }
- d.CopyFiles(&opts, "/proctor", "test/runtimes/proctor/proctor")
- if err := d.Spawn(ctx, opts, "/proctor/proctor", "--pause"); err != nil {
- return nil, fmt.Errorf("docker run failed: %v", err)
- }
-
- // Get a list of all tests in the image.
- list, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/proctor/proctor", "--runtime", lang, "--list")
- if err != nil {
- return nil, fmt.Errorf("docker exec failed: %v", err)
- }
-
- // Calculate a subset of tests.
- tests := strings.Fields(list)
- sort.Strings(tests)
- indices, err := testutil.TestIndicesForShard(len(tests))
- if err != nil {
- return nil, fmt.Errorf("TestsForShard() failed: %v", err)
- }
-
- var itests []testing.InternalTest
- for i := 0; i < len(indices); i += batchSize {
- var tcs []string
- end := i + batchSize
- if end > len(indices) {
- end = len(indices)
- }
- for _, tc := range indices[i:end] {
- // Add test if not excluded.
- if _, ok := excludes[tests[tc]]; ok {
- log.Infof("Skipping test case %s\n", tests[tc])
- continue
- }
- tcs = append(tcs, tests[tc])
- }
- if len(tcs) == 0 {
- // No tests to add to this batch.
- continue
- }
- itests = append(itests, testing.InternalTest{
- Name: strings.Join(tcs, ", "),
- F: func(t *testing.T) {
- var (
- now = time.Now()
- done = make(chan struct{})
- output string
- err error
- )
-
- state, err := d.Status(ctx)
- if err != nil {
- t.Fatalf("Could not find container status: %v", err)
- }
- if !state.Running {
- t.Fatalf("container is not running: state = %s", state.Status)
- }
-
- go func() {
- output, err = d.Exec(ctx, dockerutil.ExecOpts{}, "/proctor/proctor", "--runtime", lang, "--tests", strings.Join(tcs, ","))
- close(done)
- }()
-
- select {
- case <-done:
- if err == nil {
- fmt.Printf("PASS: (%v) %d tests passed\n", time.Since(now), len(tcs))
- return
- }
- t.Errorf("FAIL: (%v):\nBatch:\n%s\nOutput:\n%s\n", time.Since(now), strings.Join(tcs, "\n"), output)
- case <-time.After(timeout):
- t.Errorf("TIMEOUT: (%v):\nBatch:\n%s\nOutput:\n%s\n", time.Since(now), strings.Join(tcs, "\n"), output)
- }
- },
- })
- }
-
- return itests, nil
-}
-
-// getExcludes reads the exclude file and returns a set of test names to
-// exclude.
-func getExcludes(excludeFile string) (map[string]struct{}, error) {
- excludes := make(map[string]struct{})
- if excludeFile == "" {
- return excludes, nil
- }
- f, err := os.Open(excludeFile)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- r := csv.NewReader(f)
-
- // First line is header. Skip it.
- if _, err := r.Read(); err != nil {
- return nil, err
- }
-
- for {
- record, err := r.Read()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, err
- }
- excludes[record[0]] = struct{}{}
- }
- return excludes, nil
-}
-
-// testDeps implements testing.testDeps (an unexported interface), and is
-// required to use testing.MainStart.
-type testDeps struct{}
-
-func (f testDeps) MatchString(a, b string) (bool, error) { return a == b, nil }
-func (f testDeps) StartCPUProfile(io.Writer) error { return nil }
-func (f testDeps) StopCPUProfile() {}
-func (f testDeps) WriteProfileTo(string, io.Writer, int) error { return nil }
-func (f testDeps) ImportPath() string { return "" }
-func (f testDeps) StartTestLog(io.Writer) {}
-func (f testDeps) StopTestLog() error { return nil }
-func (f testDeps) SetPanicOnExit0(bool) {}
diff --git a/test/runtimes/runner/main.go b/test/runtimes/runner/main.go
deleted file mode 100644
index ec79a22c2..000000000
--- a/test/runtimes/runner/main.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary runner runs the runtime tests in a Docker container.
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "time"
-
- "gvisor.dev/gvisor/test/runtimes/runner/lib"
-)
-
-var (
- lang = flag.String("lang", "", "language runtime to test")
- image = flag.String("image", "", "docker image with runtime tests")
- excludeFile = flag.String("exclude_file", "", "file containing list of tests to exclude, in CSV format with fields: test name, bug id, comment")
- batchSize = flag.Int("batch", 50, "number of test cases run in one command")
- timeout = flag.Duration("timeout", 90*time.Minute, "batch timeout")
-)
-
-func main() {
- flag.Parse()
- if *lang == "" || *image == "" {
- fmt.Fprintf(os.Stderr, "lang and image flags must not be empty\n")
- os.Exit(1)
- }
- os.Exit(lib.RunTests(*lang, *image, *excludeFile, *batchSize, *timeout))
-}
diff --git a/test/syscalls/BUILD b/test/syscalls/BUILD
deleted file mode 100644
index 213c7e96c..000000000
--- a/test/syscalls/BUILD
+++ /dev/null
@@ -1,1033 +0,0 @@
-load("//tools:defs.bzl", "more_shards", "most_shards")
-load("//test/runner:defs.bzl", "syscall_test")
-
-package(licenses = ["notice"])
-
-# Please keep syscall tests ordered alphabetically by name.
-
-syscall_test(
- test = "//test/syscalls/linux:32bit_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:accept_bind_stream_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:accept_bind_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:access_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:affinity_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:aio_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:alarm_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:arch_prctl_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:bad_test",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- test = "//test/syscalls/linux:bind_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:brk_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:cgroup_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:chdir_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:chmod_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:chown_test",
- use_tmpfs = True, # chown tests require gofer to be running as root.
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:chroot_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:clock_getres_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:clock_gettime_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:clock_nanosleep_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:concurrency_test",
-)
-
-syscall_test(
- add_uds_tree = True,
- test = "//test/syscalls/linux:connect_external_test",
- use_tmpfs = True,
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:creat_test",
-)
-
-syscall_test(
- fuse = "True",
- test = "//test/syscalls/linux:dev_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:dup_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:epoll_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:eventfd_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:exceptions_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:exec_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:exec_binary_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:exit_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:fadvise64_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:fallocate_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:fault_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:fchdir_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:fcntl_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:flock_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:fork_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:fpsig_fork_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:fpsig_nested_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:fsync_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:futex_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:getcpu_host_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:getcpu_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:getdents_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:verity_getdents_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:getrandom_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:getrusage_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:inotify_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:ioctl_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:verity_ioctl_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:iptables_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:ip6tables_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = more_shards,
- test = "//test/syscalls/linux:itimer_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:kcov_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:kill_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:link_test",
- use_tmpfs = True, # gofer needs CAP_DAC_READ_SEARCH to use AT_EMPTY_PATH with linkat(2)
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:lseek_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:madvise_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:membarrier_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:memory_accounting_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:mempolicy_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:mincore_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:mkdir_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:mknod_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:mlock_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:mmap_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:verity_mmap_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:mount_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:mremap_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:msync_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:munmap_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:network_namespace_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:open_create_test",
-)
-
-syscall_test(
- add_overlay = True,
- shard_count = more_shards,
- test = "//test/syscalls/linux:open_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:packet_socket_raw_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:packet_socket_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:partial_bad_buffer_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:pause_test",
-)
-
-syscall_test(
- size = "medium",
- # Takes too long under gotsan to run.
- tags = ["nogotsan"],
- test = "//test/syscalls/linux:ping_socket_test",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- shard_count = more_shards,
- test = "//test/syscalls/linux:pipe_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:poll_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:ppoll_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:prctl_setuid_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:prctl_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:pread64_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:preadv_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:preadv2_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:priority_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:proc_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_net_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_pid_oomscore_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_pid_smaps_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_pid_uid_gid_map_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:pselect_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:ptrace_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:pty_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:pty_root_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:pwritev2_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:pwrite64_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:raw_socket_hdrincl_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:raw_socket_icmp_test",
-)
-
-syscall_test(
- shard_count = more_shards,
- test = "//test/syscalls/linux:raw_socket_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:read_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:readahead_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:readv_socket_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:readv_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:rename_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:rlimits_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:rseq_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:rtsignal_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:signalfd_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sched_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sched_yield_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:seccomp_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:select_test",
-)
-
-syscall_test(
- shard_count = more_shards,
- test = "//test/syscalls/linux:semaphore_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:sendfile_socket_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:sendfile_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:setgid_test",
- # setgid tests require the gofer's user namespace to have multiple groups,
- # but bazel only provides one.
- use_tmpfs = True,
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:splice_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sigaction_test",
-)
-
-# TODO(b/119826902): Enable once the test passes in runsc.
-# syscall_test(vfs2="True",test = "//test/syscalls/linux:sigaltstack_test")
-
-syscall_test(
- test = "//test/syscalls/linux:sigreturn_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sigprocmask_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:sigstop_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sigtimedwait_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:shm_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_abstract_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_abstract_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_capability_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_domain_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_domain_test",
-)
-
-syscall_test(
- size = "medium",
- add_overlay = True,
- test = "//test/syscalls/linux:socket_filesystem_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_filesystem_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_inet_loopback_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- tags = ["container"],
- test = "//test/syscalls/linux:socket_inet_loopback_isolated_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- # Takes too long for TSAN. Creates a lot of TCP sockets.
- tags = ["nogotsan"],
- test = "//test/syscalls/linux:socket_inet_loopback_nogotsan_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_ipv4_udp_unbound_external_networking_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_ip_tcp_generic_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- add_hostinet = True,
- test = "//test/syscalls/linux:socket_ip_tcp_loopback_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_ip_tcp_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- add_hostinet = True,
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_ip_tcp_udp_generic_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- add_hostinet = True,
- test = "//test/syscalls/linux:socket_ip_udp_loopback_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_ip_udp_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_ipv6_udp_unbound_loopback_test",
-)
-
-syscall_test(
- size = "medium",
- add_hostinet = True,
- shard_count = more_shards,
- # Takes too long under gotsan to run.
- tags = ["nogotsan"],
- test = "//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_nogotsan_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_netlink_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_ipv6_udp_unbound_loopback_netlink_test",
-)
-
-syscall_test(
- shard_count = more_shards,
- test = "//test/syscalls/linux:socket_ip_unbound_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_ip_unbound_netlink_test",
-)
-
-syscall_test(
- add_hostinet = True,
- test = "//test/syscalls/linux:socket_netdevice_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_netlink_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_netlink_route_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_netlink_uevent_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_blocking_local_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_blocking_ip_test",
-)
-
-syscall_test(
- add_hostinet = True,
- test = "//test/syscalls/linux:socket_non_stream_blocking_local_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_non_stream_blocking_udp_test",
-)
-
-syscall_test(
- size = "large",
- test = "//test/syscalls/linux:socket_stream_blocking_local_test",
-)
-
-syscall_test(
- size = "large",
- test = "//test/syscalls/linux:socket_stream_blocking_tcp_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_stream_local_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_stream_nonblock_local_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_stress_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:socket_test",
-)
-
-syscall_test(
- flaky = 1, # NOTE(b/116636318): Large sendmsg may stall a long time.
- shard_count = more_shards,
- test = "//test/syscalls/linux:socket_unix_dgram_local_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_unix_dgram_non_blocking_test",
-)
-
-syscall_test(
- size = "large",
- add_overlay = True,
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_unix_pair_test",
-)
-
-syscall_test(
- flaky = 1, # NOTE(b/116636318): Large sendmsg may stall a long time.
- shard_count = more_shards,
- test = "//test/syscalls/linux:socket_unix_seqpacket_local_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_unix_stream_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_unix_unbound_abstract_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_unix_unbound_dgram_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:socket_unix_unbound_filesystem_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:socket_unix_unbound_seqpacket_test",
-)
-
-syscall_test(
- size = "large",
- shard_count = most_shards,
- test = "//test/syscalls/linux:socket_unix_unbound_stream_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:statfs_test",
- use_tmpfs = True, # Test specifically relies on TEST_TMPDIR to be tmpfs.
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:stat_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:stat_times_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:sticky_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:symlink_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:verity_symlink_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:sync_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:sync_file_range_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sysinfo_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:syslog_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:sysret_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:tcp_socket_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:tgkill_test",
-)
-
-syscall_test(
- shard_count = more_shards,
- test = "//test/syscalls/linux:timerfd_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:timers_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:time_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:tkill_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:truncate_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:tuntap_test",
-)
-
-syscall_test(
- add_hostinet = True,
- test = "//test/syscalls/linux:tuntap_hostinet_test",
-)
-
-syscall_test(
- add_hostinet = True,
- test = "//test/syscalls/linux:udp_bind_test",
-)
-
-syscall_test(
- size = "medium",
- add_hostinet = True,
- shard_count = more_shards,
- test = "//test/syscalls/linux:udp_socket_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:uidgid_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:uname_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:unlink_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:unshare_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:utimes_test",
-)
-
-syscall_test(
- size = "medium",
- test = "//test/syscalls/linux:vdso_clock_gettime_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:vdso_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:vsyscall_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:vfork_test",
-)
-
-syscall_test(
- size = "medium",
- shard_count = more_shards,
- test = "//test/syscalls/linux:wait_test",
-)
-
-syscall_test(
- add_overlay = True,
- test = "//test/syscalls/linux:write_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_net_unix_test",
-)
-
-syscall_test(
- add_hostinet = True,
- test = "//test/syscalls/linux:proc_net_tcp_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:proc_net_udp_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:processes_test",
-)
-
-syscall_test(
- test = "//test/syscalls/linux:verity_mount_test",
-)
diff --git a/test/syscalls/README.md b/test/syscalls/README.md
deleted file mode 100644
index 9e0991940..000000000
--- a/test/syscalls/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# gVisor system call test suite
-
-This is a test suite for Linux system calls. It runs under both gVisor and
-Linux, and ensures compatibility between the two.
-
-When adding support for a new syscall (or syscall argument) to gVisor, a
-corresponding syscall test should be added. It's usually recommended to write
-the test first and make sure that it passes on Linux before making changes to
-gVisor.
-
-This document outlines the general guidelines for tests and specific rules that
-must be followed for new tests.
-
-## Running the tests
-
-Each test file generates three different test targets that run in different
-environments:
-
-* a `native` target that runs directly on the host machine,
-* a `runsc_ptrace` target that runs inside runsc using the ptrace platform, and
-* a `runsc_kvm` target that runs inside runsc using the KVM platform.
-
-For example, the test in `access_test.cc` generates the following targets:
-
-* `//test/syscalls:access_test_native`
-* `//test/syscalls:access_test_runsc_ptrace`
-* `//test/syscalls:access_test_runsc_kvm`
-
-Any of these targets can be run directly via `bazel test`.
-
-```bash
-$ bazel test //test/syscalls:access_test_native
-$ bazel test //test/syscalls:access_test_runsc_ptrace
-$ bazel test //test/syscalls:access_test_runsc_kvm
-```
-
-To run all the tests on a particular platform, you can filter by the platform
-tag:
-
-```bash
-# Run all tests in native environment:
-$ bazel test --test_tag_filters=native //test/syscalls/...
-
-# Run all tests in runsc with ptrace:
-$ bazel test --test_tag_filters=runsc_ptrace //test/syscalls/...
-
-# Run all tests in runsc with kvm:
-$ bazel test --test_tag_filters=runsc_kvm //test/syscalls/...
-```
-
-You can also run all the tests on every platform. (Warning, this may take a
-while to run.)
-
-```bash
-# Run all tests on every platform:
-$ bazel test //test/syscalls/...
-```
-
-## Writing new tests
-
-Whenever we add support for a new syscall, or add support for a new argument or
-option for a syscall, we should always add a new test (perhaps many new tests).
-
-In general, it is best to write the test first and make sure it passes on Linux
-by running the test on the `native` platform on a Linux machine. This ensures
-that the gVisor implementation matches actual Linux behavior. Sometimes man
-pages contain errors, so always check the actual Linux behavior.
-
-gVisor uses the [Google Test][googletest] test framework, with a few custom
-matchers and guidelines, described below.
-
-### Syscall matchers
-
-When testing an individual system call, use the following syscall matchers,
-which will match the value returned by the syscall and the errno.
-
-```cc
-SyscallSucceeds()
-SyscallSucceedsWithValue(...)
-SyscallFails()
-SyscallFailsWithErrno(...)
-```
-
-### Use test utilities (RAII classes)
-
-The test utilties are written as RAII classes. These utilities should be
-preferred over custom test harnesses.
-
-Local class instances should be preferred, wherever possible, over full test
-fixtures.
-
-A test utility should be created when there is more than one test that requires
-that same functionality, otherwise the class should be test local.
-
-## Save/Restore support in tests
-
-gVisor supports save/restore, and our syscall tests are written in a way to
-enable saving/restoring at certain points. Hence, there are calls to
-`MaybeSave`, and certain tests that should not trigger saves are named with
-`NoSave`.
-
-However, the current open-source test runner does not yet support triggering
-save/restore, so these functions and annotations have no effect on the tests. We
-plan on extending the test runner to trigger save/restore. Until then, these
-functions and annotations should be ignored.
-
-[googletest]: https://github.com/abseil/googletest
diff --git a/test/syscalls/linux/32bit.cc b/test/syscalls/linux/32bit.cc
deleted file mode 100644
index cbf1b4f05..000000000
--- a/test/syscalls/linux/32bit.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <string.h>
-#include <sys/mman.h>
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "test/util/memory_util.h"
-#include "test/util/platform_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifdef __x86_64__
-
-constexpr char kInt3 = '\xcc';
-constexpr char kInt80[2] = {'\xcd', '\x80'};
-constexpr char kSyscall[2] = {'\x0f', '\x05'};
-constexpr char kSysenter[2] = {'\x0f', '\x34'};
-
-void ExitGroup32(const char instruction[2], int code) {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0));
-
- // Fill with INT 3 in case we execute too far.
- memset(m.ptr(), kInt3, m.len());
-
- // Copy in the actual instruction.
- memcpy(m.ptr(), instruction, 2);
-
- // We're playing *extremely* fast-and-loose with the various syscall ABIs
- // here, which we can more-or-less get away with since exit_group doesn't
- // return.
- //
- // SYSENTER expects the user stack in (%ebp) and arg6 in 0(%ebp). The kernel
- // will unconditionally dereference %ebp for arg6, so we must pass a valid
- // address or it will return EFAULT.
- //
- // SYSENTER also unconditionally returns to thread_info->sysenter_return which
- // is ostensibly a stub in the 32-bit VDSO. But a 64-bit binary doesn't have
- // the 32-bit VDSO mapped, so sysenter_return will simply be the value
- // inherited from the most recent 32-bit ancestor, or NULL if there is none.
- // As a result, return would not return from SYSENTER.
- asm volatile(
- "movl $252, %%eax\n" // exit_group
- "movl %[code], %%ebx\n" // code
- "movl %%edx, %%ebp\n" // SYSENTER: user stack (use IP as a valid addr)
- "leaq -20(%%rsp), %%rsp\n"
- "movl $0x2b, 16(%%rsp)\n" // SS = CPL3 data segment
- "movl $0,12(%%rsp)\n" // ESP = nullptr (unused)
- "movl $0, 8(%%rsp)\n" // EFLAGS
- "movl $0x23, 4(%%rsp)\n" // CS = CPL3 32-bit code segment
- "movl %%edx, 0(%%rsp)\n" // EIP
- "iretl\n"
- "int $3\n"
- :
- : [ code ] "m"(code), [ ip ] "d"(m.ptr())
- : "rax", "rbx");
-}
-
-constexpr int kExitCode = 42;
-
-TEST(Syscall32Bit, Int80) {
- switch (PlatformSupport32Bit()) {
- case PlatformSupport::NotSupported:
- break;
- case PlatformSupport::Segfault:
- EXPECT_EXIT(ExitGroup32(kInt80, kExitCode),
- ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Ignored:
- // Since the call is ignored, we'll hit the int3 trap.
- EXPECT_EXIT(ExitGroup32(kInt80, kExitCode),
- ::testing::KilledBySignal(SIGTRAP), "");
- break;
-
- case PlatformSupport::Allowed:
- EXPECT_EXIT(ExitGroup32(kInt80, kExitCode), ::testing::ExitedWithCode(42),
- "");
- break;
- }
-}
-
-TEST(Syscall32Bit, Sysenter) {
- if ((PlatformSupport32Bit() == PlatformSupport::Allowed ||
- PlatformSupport32Bit() == PlatformSupport::Ignored) &&
- GetCPUVendor() == CPUVendor::kAMD) {
- // SYSENTER is an illegal instruction in compatibility mode on AMD.
- EXPECT_EXIT(ExitGroup32(kSysenter, kExitCode),
- ::testing::KilledBySignal(SIGILL), "");
- return;
- }
-
- switch (PlatformSupport32Bit()) {
- case PlatformSupport::NotSupported:
- break;
-
- case PlatformSupport::Segfault:
- EXPECT_EXIT(ExitGroup32(kSysenter, kExitCode),
- ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Ignored:
- // See above, except expected code is SIGSEGV.
- EXPECT_EXIT(ExitGroup32(kSysenter, kExitCode),
- ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Allowed:
- EXPECT_EXIT(ExitGroup32(kSysenter, kExitCode),
- ::testing::ExitedWithCode(42), "");
- break;
- }
-}
-
-TEST(Syscall32Bit, Syscall) {
- if ((PlatformSupport32Bit() == PlatformSupport::Allowed ||
- PlatformSupport32Bit() == PlatformSupport::Ignored) &&
- GetCPUVendor() == CPUVendor::kIntel) {
- // SYSCALL is an illegal instruction in compatibility mode on Intel.
- EXPECT_EXIT(ExitGroup32(kSyscall, kExitCode),
- ::testing::KilledBySignal(SIGILL), "");
- return;
- }
-
- switch (PlatformSupport32Bit()) {
- case PlatformSupport::NotSupported:
- break;
-
- case PlatformSupport::Segfault:
- EXPECT_EXIT(ExitGroup32(kSyscall, kExitCode),
- ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Ignored:
- // See above.
- EXPECT_EXIT(ExitGroup32(kSyscall, kExitCode),
- ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Allowed:
- EXPECT_EXIT(ExitGroup32(kSyscall, kExitCode),
- ::testing::ExitedWithCode(42), "");
- break;
- }
-}
-
-// Far call code called below.
-//
-// Input stack layout:
-//
-// %esp+12 lcall segment
-// %esp+8 lcall address offset
-// %esp+0 return address
-//
-// The lcall will enter compatibility mode and jump to the call address (the
-// address of the lret). The lret will return to 64-bit mode at the retq, which
-// will return to the external caller of this function.
-//
-// Since this enters compatibility mode, it must be mapped in a 32-bit region of
-// address space and have a 32-bit stack pointer.
-constexpr char kFarCall[] = {
- '\x67', '\xff', '\x5c', '\x24', '\x08', // lcall *8(%esp)
- '\xc3', // retq
- '\xcb', // lret
-};
-
-void FarCall32() {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0));
-
- // Fill with INT 3 in case we execute too far.
- memset(m.ptr(), kInt3, m.len());
-
- // 32-bit code.
- memcpy(m.ptr(), kFarCall, sizeof(kFarCall));
-
- // Use the end of the code page as its stack.
- uintptr_t stack = m.endaddr();
-
- uintptr_t lcall = m.addr();
- uintptr_t lret = m.addr() + sizeof(kFarCall) - 1;
-
- // N.B. We must save and restore RSP manually. GCC can do so automatically
- // with an "rsp" clobber, but clang cannot.
- asm volatile(
- // Place the address of lret (%edx) and the 32-bit code segment (0x23) on
- // the 32-bit stack for lcall.
- "subl $0x8, %%ecx\n"
- "movl $0x23, 4(%%ecx)\n"
- "movl %%edx, 0(%%ecx)\n"
-
- // Save the current stack and switch to 32-bit stack.
- "pushq %%rbp\n"
- "movq %%rsp, %%rbp\n"
- "movq %%rcx, %%rsp\n"
-
- // Run the lcall code.
- "callq *%%rbx\n"
-
- // Restore the old stack.
- "leaveq\n"
- : "+c"(stack)
- : "b"(lcall), "d"(lret));
-}
-
-TEST(Call32Bit, Disallowed) {
- switch (PlatformSupport32Bit()) {
- case PlatformSupport::NotSupported:
- break;
-
- case PlatformSupport::Segfault:
- EXPECT_EXIT(FarCall32(), ::testing::KilledBySignal(SIGSEGV), "");
- break;
-
- case PlatformSupport::Ignored:
- ABSL_FALLTHROUGH_INTENDED;
- case PlatformSupport::Allowed:
- // Shouldn't crash.
- FarCall32();
- }
-}
-
-#endif
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD
deleted file mode 100644
index fa2a080f1..000000000
--- a/test/syscalls/linux/BUILD
+++ /dev/null
@@ -1,4353 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "cc_library", "default_net_util", "gbenchmark", "gtest", "select_arch", "select_system")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-exports_files(
- [
- "socket.cc",
- "socket_inet_loopback.cc",
- "socket_inet_loopback_isolated.cc",
- "socket_inet_loopback_test_params.h",
- "socket_ip_loopback_blocking.cc",
- "socket_ip_tcp_generic_loopback.cc",
- "socket_ip_tcp_loopback.cc",
- "socket_ip_tcp_loopback_blocking.cc",
- "socket_ip_tcp_loopback_nonblock.cc",
- "socket_ip_tcp_udp_generic.cc",
- "socket_ip_udp_loopback.cc",
- "socket_ip_udp_loopback_blocking.cc",
- "socket_ip_udp_loopback_nonblock.cc",
- "socket_ip_unbound.cc",
- "socket_ipv4_udp_unbound_external_networking_test.cc",
- "socket_ipv6_udp_unbound_external_networking_test.cc",
- "socket_ipv4_udp_unbound_loopback.cc",
- "socket_ipv6_udp_unbound_loopback.cc",
- "socket_ipv4_udp_unbound_loopback_nogotsan.cc",
- "tcp_socket.cc",
- "udp_bind.cc",
- "udp_socket.cc",
- ],
- visibility = ["//:sandbox"],
-)
-
-cc_binary(
- name = "sigaltstack_check",
- testonly = 1,
- srcs = ["sigaltstack_check.cc"],
- deps = ["//test/util:logging"],
-)
-
-cc_binary(
- name = "exec_assert_closed_workload",
- testonly = 1,
- srcs = ["exec_assert_closed_workload.cc"],
- deps = [
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_binary(
- name = "exec_basic_workload",
- testonly = 1,
- srcs = [
- "exec.h",
- "exec_basic_workload.cc",
- ],
-)
-
-cc_binary(
- name = "exec_proc_exe_workload",
- testonly = 1,
- srcs = ["exec_proc_exe_workload.cc"],
- deps = [
- "//test/util:fs_util",
- "//test/util:posix_error",
- ],
-)
-
-cc_binary(
- name = "exec_state_workload",
- testonly = 1,
- srcs = ["exec_state_workload.cc"],
- deps = ["@com_google_absl//absl/strings"],
-)
-
-sh_binary(
- name = "exit_script",
- testonly = 1,
- srcs = [
- "exit_script.sh",
- ],
-)
-
-cc_binary(
- name = "priority_execve",
- testonly = 1,
- srcs = [
- "priority_execve.cc",
- ],
-)
-
-cc_library(
- name = "base_poll_test",
- testonly = 1,
- srcs = ["base_poll_test.cc"],
- hdrs = ["base_poll_test.h"],
- deps = [
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_library(
- name = "file_base",
- testonly = 1,
- hdrs = ["file_base.h"],
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_util",
- ],
-)
-
-cc_library(
- name = "socket_netlink_util",
- testonly = 1,
- srcs = ["socket_netlink_util.cc"],
- hdrs = ["socket_netlink_util.h"],
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:posix_error",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_library(
- name = "socket_netlink_route_util",
- testonly = 1,
- srcs = ["socket_netlink_route_util.cc"],
- hdrs = ["socket_netlink_route_util.h"],
- deps = [
- ":socket_netlink_util",
- ],
-)
-
-cc_library(
- name = "socket_test_util",
- testonly = 1,
- srcs = [
- "socket_test_util.cc",
- "socket_test_util_impl.cc",
- ],
- hdrs = ["socket_test_util.h"],
- defines = select_system(),
- deps = default_net_util() + [
- gtest,
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/time",
- "@com_google_absl//absl/types:optional",
- "//test/util:file_descriptor",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_library(
- name = "unix_domain_socket_test_util",
- testonly = 1,
- srcs = ["unix_domain_socket_test_util.cc"],
- hdrs = ["unix_domain_socket_test_util.h"],
- deps = [
- ":socket_test_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_util",
- ],
-)
-
-cc_library(
- name = "ip_socket_test_util",
- testonly = 1,
- srcs = ["ip_socket_test_util.cc"],
- hdrs = ["ip_socket_test_util.h"],
- deps = [
- ":socket_test_util",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_binary(
- name = "clock_nanosleep_test",
- testonly = 1,
- srcs = ["clock_nanosleep.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "32bit_test",
- testonly = 1,
- srcs = ["32bit.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:memory_util",
- "//test/util:platform_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "accept_bind_test",
- testonly = 1,
- srcs = ["accept_bind.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "accept_bind_stream_test",
- testonly = 1,
- srcs = ["accept_bind_stream.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "access_test",
- testonly = 1,
- srcs = ["access.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:fs_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "affinity_test",
- testonly = 1,
- srcs = ["affinity.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "aio_test",
- testonly = 1,
- srcs = [
- "aio.cc",
- "file_base.h",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "alarm_test",
- testonly = 1,
- srcs = ["alarm.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "bad_test",
- testonly = 1,
- srcs = ["bad.cc"],
- linkstatic = 1,
- visibility = [
- "//:sandbox",
- ],
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "bind_test",
- testonly = 1,
- srcs = ["bind.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_test",
- testonly = 1,
- srcs = ["socket.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- gtest,
- "//test/util:file_descriptor",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_capability_test",
- testonly = 1,
- srcs = ["socket_capability.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "brk_test",
- testonly = 1,
- srcs = ["brk.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "chdir_test",
- testonly = 1,
- srcs = ["chdir.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "chmod_test",
- testonly = 1,
- srcs = ["chmod.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "chown_test",
- testonly = 1,
- srcs = ["chown.cc"],
- linkstatic = 1,
- # We require additional UIDs for this test, so don't include the bazel
- # sandbox as standard.
- tags = ["no-sandbox"],
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/synchronization",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "sticky_test",
- testonly = 1,
- srcs = ["sticky.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/flags:flag",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "chroot_test",
- testonly = 1,
- srcs = ["chroot.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/cleanup",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:logging",
- "//test/util:mount_util",
- "//test/util:multiprocess_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "clock_getres_test",
- testonly = 1,
- srcs = ["clock_getres.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "clock_gettime_test",
- testonly = 1,
- srcs = ["clock_gettime.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "concurrency_test",
- testonly = 1,
- srcs = ["concurrency.cc"],
- linkstatic = 1,
- deps = [
- gbenchmark,
- gtest,
- "//test/util:platform_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "connect_external_test",
- testonly = 1,
- srcs = ["connect_external.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "creat_test",
- testonly = 1,
- srcs = ["creat.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:fs_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "dev_test",
- testonly = 1,
- srcs = ["dev.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "dup_test",
- testonly = 1,
- srcs = ["dup.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:fs_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "epoll_test",
- testonly = 1,
- srcs = ["epoll.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:epoll_util",
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "eventfd_test",
- testonly = 1,
- srcs = ["eventfd.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:epoll_util",
- "//test/util:eventfd_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "exceptions_test",
- testonly = 1,
- srcs = ["exceptions.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:logging",
- "//test/util:platform_util",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "getcpu_test",
- testonly = 1,
- srcs = ["getcpu.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "getcpu_host_test",
- testonly = 1,
- srcs = ["getcpu.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "getrusage_test",
- testonly = 1,
- srcs = ["getrusage.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "exec_binary_test",
- testonly = 1,
- srcs = ["exec_binary.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "exec_test",
- testonly = 1,
- srcs = [
- "exec.cc",
- "exec.h",
- ],
- data = [
- ":exec_assert_closed_workload",
- ":exec_basic_workload",
- ":exec_proc_exe_workload",
- ":exec_state_workload",
- ":exit_script",
- ":priority_execve",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/types:optional",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "exit_test",
- testonly = 1,
- srcs = ["exit.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:time_util",
- ],
-)
-
-cc_binary(
- name = "fallocate_test",
- testonly = 1,
- srcs = ["fallocate.cc"],
- linkstatic = 1,
- deps = [
- ":file_base",
- ":socket_test_util",
- "//test/util:cleanup",
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "fault_test",
- testonly = 1,
- srcs = ["fault.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "fchdir_test",
- testonly = 1,
- srcs = ["fchdir.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "fcntl_test",
- testonly = 1,
- srcs = ["fcntl.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:signal_util",
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "flock_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "flock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:epoll_util",
- "//test/util:eventfd_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "fork_test",
- testonly = 1,
- srcs = ["fork.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "fpsig_fork_test",
- testonly = 1,
- srcs = ["fpsig_fork.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "fpsig_nested_test",
- testonly = 1,
- srcs = ["fpsig_nested.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "sync_file_range_test",
- testonly = 1,
- srcs = ["sync_file_range.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "fsync_test",
- testonly = 1,
- srcs = ["fsync.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "futex_test",
- testonly = 1,
- srcs = ["futex.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:memory_util",
- "//test/util:save_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:time_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "getdents_test",
- testonly = 1,
- srcs = ["getdents.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/container:node_hash_map",
- "@com_google_absl//absl/container:node_hash_set",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "verity_getdents_test",
- testonly = 1,
- srcs = ["verity_getdents.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:fs_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:verity_util",
- ],
-)
-
-cc_binary(
- name = "getrandom_test",
- testonly = 1,
- srcs = ["getrandom.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "inotify_test",
- testonly = 1,
- srcs = ["inotify.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:epoll_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "ioctl_test",
- testonly = 1,
- srcs = ["ioctl.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "verity_ioctl_test",
- testonly = 1,
- srcs = ["verity_ioctl.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:fs_util",
- "//test/util:mount_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:verity_util",
- ],
-)
-
-cc_library(
- name = "iptables_types",
- testonly = 1,
- hdrs = [
- "iptables.h",
- ],
-)
-
-cc_binary(
- name = "iptables_test",
- testonly = 1,
- srcs = [
- "iptables.cc",
- ],
- linkstatic = 1,
- deps = [
- ":iptables_types",
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "ip6tables_test",
- testonly = 1,
- srcs = [
- "ip6tables.cc",
- ],
- linkstatic = 1,
- deps = [
- ":iptables_types",
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "itimer_test",
- testonly = 1,
- srcs = ["itimer.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "kcov_test",
- testonly = 1,
- srcs = ["kcov.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "kill_test",
- testonly = 1,
- srcs = ["kill.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "link_test",
- testonly = 1,
- srcs = ["link.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "lseek_test",
- testonly = 1,
- srcs = ["lseek.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "madvise_test",
- testonly = 1,
- srcs = ["madvise.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "membarrier_test",
- testonly = 1,
- srcs = ["membarrier.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:cleanup",
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "mempolicy_test",
- testonly = 1,
- srcs = ["mempolicy.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "@com_google_absl//absl/memory",
- gtest,
- "//test/util:memory_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "mincore_test",
- testonly = 1,
- srcs = ["mincore.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mkdir_test",
- testonly = 1,
- srcs = ["mkdir.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:fs_util",
- gtest,
- "//test/util:temp_path",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mknod_test",
- testonly = 1,
- srcs = ["mknod.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "mlock_test",
- testonly = 1,
- srcs = ["mlock.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- gtest,
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:rlimit_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "mmap_test",
- testonly = 1,
- srcs = ["mmap.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "verity_mmap_test",
- testonly = 1,
- srcs = ["verity_mmap.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:fs_util",
- "//test/util:memory_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:verity_util",
- ],
-)
-
-cc_binary(
- name = "mount_test",
- testonly = 1,
- srcs = ["mount.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:mount_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "verity_mount_test",
- testonly = 1,
- srcs = ["verity_mount.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:capability_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:verity_util",
- ],
-)
-
-cc_binary(
- name = "mremap_test",
- testonly = 1,
- srcs = ["mremap.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "msync_test",
- testonly = 1,
- srcs = ["msync.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "munmap_test",
- testonly = 1,
- srcs = ["munmap.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "open_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "open.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "open_create_test",
- testonly = 1,
- srcs = ["open_create.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:temp_umask",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "packet_socket_raw_test",
- testonly = 1,
- srcs = ["packet_socket_raw.cc"],
- defines = select_system(),
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:endian",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "packet_socket_test",
- testonly = 1,
- srcs = ["packet_socket.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:endian",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "pty_test",
- testonly = 1,
- srcs = ["pty.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:cleanup",
- "//test/util:posix_error",
- "//test/util:pty_util",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "pty_root_test",
- testonly = 1,
- srcs = ["pty_root.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:posix_error",
- "//test/util:pty_util",
- "//test/util:test_main",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "partial_bad_buffer_test",
- testonly = 1,
- # Android does not support preadv or pwritev in r22.
- srcs = select_system(linux = ["partial_bad_buffer.cc"]),
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "pause_test",
- testonly = 1,
- srcs = ["pause.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "ping_socket_test",
- testonly = 1,
- srcs = ["ping_socket.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/algorithm:container",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/types:optional",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "pipe_test",
- testonly = 1,
- srcs = ["pipe.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "poll_test",
- testonly = 1,
- srcs = ["poll.cc"],
- linkstatic = 1,
- deps = [
- ":base_poll_test",
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "ppoll_test",
- testonly = 1,
- srcs = ["ppoll.cc"],
- linkstatic = 1,
- deps = [
- ":base_poll_test",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "arch_prctl_test",
- testonly = 1,
- srcs = select_arch(
- amd64 = ["arch_prctl.cc"],
- arm64 = [],
- ),
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "prctl_test",
- testonly = 1,
- srcs = ["prctl.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "@com_google_absl//absl/flags:flag",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "prctl_setuid_test",
- testonly = 1,
- srcs = ["prctl_setuid.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "@com_google_absl//absl/flags:flag",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "pread64_test",
- testonly = 1,
- srcs = ["pread64.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "preadv_test",
- testonly = 1,
- srcs = ["preadv.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "preadv2_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "preadv2.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "priority_test",
- testonly = 1,
- srcs = ["priority.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "proc_test",
- testonly = 1,
- srcs = ["proc.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:mount_util",
- "@com_google_absl//absl/container:node_hash_set",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:time_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "proc_net_test",
- testonly = 1,
- srcs = ["proc_net.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "proc_pid_oomscore_test",
- testonly = 1,
- srcs = ["proc_pid_oomscore.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:fs_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_binary(
- name = "proc_pid_smaps_test",
- testonly = 1,
- srcs = ["proc_pid_smaps.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/types:optional",
- gtest,
- "//test/util:memory_util",
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "proc_pid_uid_gid_map_test",
- testonly = 1,
- srcs = ["proc_pid_uid_gid_map.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:time_util",
- ],
-)
-
-cc_binary(
- name = "pselect_test",
- testonly = 1,
- srcs = ["pselect.cc"],
- linkstatic = 1,
- deps = [
- ":base_poll_test",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "ptrace_test",
- testonly = 1,
- srcs = ["ptrace.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:platform_util",
- "//test/util:signal_util",
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:time_util",
- ],
-)
-
-cc_binary(
- name = "pwrite64_test",
- testonly = 1,
- srcs = ["pwrite64.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "pwritev2_test",
- testonly = 1,
- srcs = [
- "pwritev2.cc",
- ],
- linkstatic = 1,
- deps = [
- ":file_base",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "raw_socket_hdrincl_test",
- testonly = 1,
- srcs = ["raw_socket_hdrincl.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:endian",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "raw_socket_test",
- testonly = 1,
- srcs = ["raw_socket.cc"],
- defines = select_system(),
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "raw_socket_icmp_test",
- testonly = 1,
- srcs = ["raw_socket_icmp.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "read_test",
- testonly = 1,
- srcs = ["read.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:cleanup",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "readahead_test",
- testonly = 1,
- srcs = ["readahead.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "readv_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "readv.cc",
- "readv_common.cc",
- "readv_common.h",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "readv_socket_test",
- testonly = 1,
- srcs = [
- "readv_common.cc",
- "readv_common.h",
- "readv_socket.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "rename_test",
- testonly = 1,
- srcs = ["rename.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "rlimits_test",
- testonly = 1,
- srcs = ["rlimits.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "rseq_test",
- testonly = 1,
- srcs = ["rseq.cc"],
- data = ["//test/syscalls/linux/rseq"],
- linkstatic = 1,
- deps = [
- "//test/syscalls/linux/rseq:lib",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "rtsignal_test",
- testonly = 1,
- srcs = ["rtsignal.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- gtest,
- "//test/util:logging",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sched_test",
- testonly = 1,
- srcs = ["sched.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sched_yield_test",
- testonly = 1,
- srcs = ["sched_yield.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "seccomp_test",
- testonly = 1,
- srcs = ["seccomp.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:logging",
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "select_test",
- testonly = 1,
- srcs = ["select.cc"],
- linkstatic = 1,
- deps = [
- ":base_poll_test",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:rlimit_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sendfile_test",
- testonly = 1,
- srcs = ["sendfile.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:eventfd_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:signal_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "sendfile_socket_test",
- testonly = 1,
- srcs = ["sendfile_socket.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- ":ip_socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "setgid_test",
- testonly = 1,
- srcs = ["setgid.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:fs_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/strings",
- gtest,
- ],
-)
-
-cc_binary(
- name = "splice_test",
- testonly = 1,
- srcs = ["splice.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:signal_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_binary(
- name = "sigaction_test",
- testonly = 1,
- srcs = ["sigaction.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sigaltstack_test",
- testonly = 1,
- srcs = ["sigaltstack.cc"],
- data = [
- ":sigaltstack_check",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:fs_util",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "sigreturn_test",
- testonly = 1,
- srcs = select_arch(
- amd64 = ["sigreturn_amd64.cc"],
- arm64 = ["sigreturn_arm64.cc"],
- ),
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:logging",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:timer_util",
- ] + select_arch(
- amd64 = [],
- arm64 = ["//test/util:test_main"],
- ),
-)
-
-cc_binary(
- name = "signalfd_test",
- testonly = 1,
- srcs = ["signalfd.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/synchronization",
- gtest,
- "//test/util:logging",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "sigprocmask_test",
- testonly = 1,
- srcs = ["sigprocmask.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sigstop_test",
- testonly = 1,
- srcs = ["sigstop.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "sigtimedwait_test",
- testonly = 1,
- srcs = ["sigtimedwait.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
-)
-
-cc_library(
- name = "socket_generic_test_cases",
- testonly = 1,
- srcs = [
- "socket_generic_test_cases.cc",
- ],
- hdrs = [
- "socket_generic.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- gtest,
- "//test/util:capability_util",
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_binary(
- name = "socket_stress_test",
- testonly = 1,
- srcs = [
- "socket_generic_stress.cc",
- ],
- linkstatic = 1,
- deps = [
- gtest,
- ":ip_socket_test_util",
- ":socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_library(
- name = "socket_unix_dgram_test_cases",
- testonly = 1,
- srcs = ["socket_unix_dgram.cc"],
- hdrs = ["socket_unix_dgram.h"],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_unix_seqpacket_test_cases",
- testonly = 1,
- srcs = ["socket_unix_seqpacket.cc"],
- hdrs = ["socket_unix_seqpacket.h"],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ip_tcp_generic_test_cases",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_generic.cc",
- ],
- hdrs = [
- "socket_ip_tcp_generic.h",
- ],
- deps = [
- ":socket_test_util",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_non_blocking_test_cases",
- testonly = 1,
- srcs = [
- "socket_non_blocking.cc",
- ],
- hdrs = [
- "socket_non_blocking.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_unix_non_stream_test_cases",
- testonly = 1,
- srcs = [
- "socket_unix_non_stream.cc",
- ],
- hdrs = [
- "socket_unix_non_stream.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:memory_util",
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_non_stream_test_cases",
- testonly = 1,
- srcs = [
- "socket_non_stream.cc",
- ],
- hdrs = [
- "socket_non_stream.h",
- ],
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ip_udp_test_cases",
- testonly = 1,
- srcs = [
- "socket_ip_udp_generic.cc",
- ],
- hdrs = [
- "socket_ip_udp_generic.h",
- ],
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv4_udp_unbound_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound.cc",
- ],
- hdrs = [
- "socket_ipv4_udp_unbound.h",
- ],
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- "@com_google_absl//absl/memory",
- gtest,
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv6_udp_unbound_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound.cc",
- ],
- hdrs = [
- "socket_ipv6_udp_unbound.h",
- ],
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- "@com_google_absl//absl/memory",
- gtest,
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv4_udp_unbound_netlink_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_netlink.cc",
- ],
- hdrs = [
- "socket_ipv4_udp_unbound_netlink.h",
- ],
- deps = [
- ":socket_netlink_route_util",
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:cleanup",
- gtest,
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv6_udp_unbound_netlink_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound_netlink.cc",
- ],
- hdrs = [
- "socket_ipv6_udp_unbound_netlink.h",
- ],
- deps = [
- ":socket_netlink_route_util",
- ":socket_test_util",
- "//test/util:capability_util",
- gtest,
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ip_udp_unbound_external_networking",
- testonly = 1,
- srcs = [
- "socket_ip_udp_unbound_external_networking.cc",
- ],
- hdrs = [
- "socket_ip_udp_unbound_external_networking.h",
- ],
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv4_udp_unbound_external_networking_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_external_networking.cc",
- ],
- hdrs = [
- "socket_ipv4_udp_unbound_external_networking.h",
- ],
- deps = [
- ":socket_ip_udp_unbound_external_networking",
- gtest,
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_ipv6_udp_unbound_external_networking_test_cases",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound_external_networking.cc",
- ],
- hdrs = [
- "socket_ipv6_udp_unbound_external_networking.h",
- ],
- deps = [
- ":socket_ip_udp_unbound_external_networking",
- gtest,
- ],
- alwayslink = 1,
-)
-
-cc_binary(
- name = "socket_abstract_test",
- testonly = 1,
- srcs = [
- "socket_abstract.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_generic_test_cases",
- ":socket_test_util",
- ":socket_unix_cmsg_test_cases",
- ":socket_unix_test_cases",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_abstract_non_blocking_test",
- testonly = 1,
- srcs = [
- "socket_unix_abstract_nonblock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_non_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_dgram_local_test",
- testonly = 1,
- srcs = ["socket_unix_dgram_local.cc"],
- linkstatic = 1,
- deps = [
- ":socket_non_stream_test_cases",
- ":socket_test_util",
- ":socket_unix_dgram_test_cases",
- ":socket_unix_non_stream_test_cases",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_dgram_non_blocking_test",
- testonly = 1,
- srcs = ["socket_unix_dgram_non_blocking.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_seqpacket_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_seqpacket_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_non_stream_test_cases",
- ":socket_test_util",
- ":socket_unix_non_stream_test_cases",
- ":socket_unix_seqpacket_test_cases",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_stream_test",
- testonly = 1,
- srcs = ["socket_unix_stream.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_tcp_generic_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_generic_loopback.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ip_tcp_generic_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_tcp_udp_generic_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_udp_generic.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_tcp_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_loopback.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_generic_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_tcp_loopback_non_blocking_test",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_loopback_nonblock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_non_blocking_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_udp_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ip_udp_loopback.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_generic_test_cases",
- ":socket_ip_udp_test_cases",
- ":socket_non_stream_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv4_udp_unbound_external_networking_test",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_external_networking_test.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv4_udp_unbound_external_networking_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv6_udp_unbound_external_networking_test",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound_external_networking_test.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv6_udp_unbound_external_networking_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_bind_to_device_test",
- testonly = 1,
- srcs = [
- "socket_bind_to_device.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_bind_to_device_util",
- ":socket_test_util",
- "//test/util:capability_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "socket_bind_to_device_sequence_test",
- testonly = 1,
- srcs = [
- "socket_bind_to_device_sequence.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_bind_to_device_util",
- ":socket_test_util",
- "//test/util:capability_util",
- "@com_google_absl//absl/container:node_hash_map",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "socket_bind_to_device_distribution_test",
- testonly = 1,
- srcs = [
- "socket_bind_to_device_distribution.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_bind_to_device_util",
- ":socket_test_util",
- "//test/util:capability_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_udp_loopback_non_blocking_test",
- testonly = 1,
- srcs = [
- "socket_ip_udp_loopback_nonblock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_non_blocking_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv4_udp_unbound_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_loopback.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv4_udp_unbound_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv6_udp_unbound_loopback_test",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound_loopback.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv6_udp_unbound_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv4_udp_unbound_loopback_nogotsan_test",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_loopback_nogotsan.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/memory",
- ],
-)
-
-cc_binary(
- name = "socket_ipv4_udp_unbound_loopback_netlink_test",
- testonly = 1,
- srcs = [
- "socket_ipv4_udp_unbound_loopback_netlink.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv4_udp_unbound_netlink_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ipv6_udp_unbound_loopback_netlink_test",
- testonly = 1,
- srcs = [
- "socket_ipv6_udp_unbound_loopback_netlink.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_ipv6_udp_unbound_netlink_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_unbound_test",
- testonly = 1,
- srcs = [
- "socket_ip_unbound.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_ip_unbound_netlink_test",
- testonly = 1,
- srcs = [
- "socket_ip_unbound_netlink.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_netlink_route_util",
- ":socket_test_util",
- "//test/util:capability_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_domain_test",
- testonly = 1,
- srcs = [
- "socket_unix_domain.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_generic_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_domain_non_blocking_test",
- testonly = 1,
- srcs = [
- "socket_unix_pair_nonblock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_non_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_filesystem_test",
- testonly = 1,
- srcs = [
- "socket_filesystem.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_generic_test_cases",
- ":socket_test_util",
- ":socket_unix_cmsg_test_cases",
- ":socket_unix_test_cases",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_filesystem_non_blocking_test",
- testonly = 1,
- srcs = [
- "socket_unix_filesystem_nonblock.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_non_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_library(
- name = "socket_inet_loopback_test_params",
- testonly = 1,
- hdrs = ["socket_inet_loopback_test_params.h"],
- deps = [
- ":socket_test_util",
- gtest,
- ],
-)
-
-cc_binary(
- name = "socket_inet_loopback_test",
- testonly = 1,
- srcs = ["socket_inet_loopback.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_inet_loopback_test_params",
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "socket_inet_loopback_nogotsan_test",
- testonly = 1,
- srcs = ["socket_inet_loopback_nogotsan.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_inet_loopback_test_params",
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_inet_loopback_isolated_test",
- testonly = 1,
- srcs = ["socket_inet_loopback_isolated.cc"],
- linkstatic = 1,
- deps = [
- ":socket_inet_loopback_test_params",
- ":socket_netlink_util",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "socket_netlink_test",
- testonly = 1,
- srcs = ["socket_netlink.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_netlink_route_test",
- testonly = 1,
- srcs = ["socket_netlink_route.cc"],
- linkstatic = 1,
- deps = [
- ":socket_netlink_route_util",
- ":socket_netlink_util",
- ":socket_test_util",
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings:str_format",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_netlink_uevent_test",
- testonly = 1,
- srcs = ["socket_netlink_uevent.cc"],
- linkstatic = 1,
- deps = [
- ":socket_netlink_util",
- ":socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-# These socket tests are in a library because the test cases are shared
-# across several test build targets.
-cc_library(
- name = "socket_stream_test_cases",
- testonly = 1,
- srcs = [
- "socket_stream.cc",
- ],
- hdrs = [
- "socket_stream.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_blocking_test_cases",
- testonly = 1,
- srcs = [
- "socket_blocking.cc",
- ],
- hdrs = [
- "socket_blocking.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_unix_test_cases",
- testonly = 1,
- srcs = [
- "socket_unix.cc",
- ],
- hdrs = [
- "socket_unix.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_unix_cmsg_test_cases",
- testonly = 1,
- srcs = [
- "socket_unix_cmsg.cc",
- ],
- hdrs = [
- "socket_unix_cmsg.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_stream_blocking_test_cases",
- testonly = 1,
- srcs = [
- "socket_stream_blocking.cc",
- ],
- hdrs = [
- "socket_stream_blocking.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_stream_nonblocking_test_cases",
- testonly = 1,
- srcs = [
- "socket_stream_nonblock.cc",
- ],
- hdrs = [
- "socket_stream_nonblock.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_non_stream_blocking_test_cases",
- testonly = 1,
- srcs = [
- "socket_non_stream_blocking.cc",
- ],
- hdrs = [
- "socket_non_stream_blocking.h",
- ],
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
- alwayslink = 1,
-)
-
-cc_library(
- name = "socket_bind_to_device_util",
- testonly = 1,
- srcs = [
- "socket_bind_to_device_util.cc",
- ],
- hdrs = [
- "socket_bind_to_device_util.h",
- ],
- deps = [
- "//test/util:test_util",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/strings",
- ],
- alwayslink = 1,
-)
-
-cc_binary(
- name = "socket_stream_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_stream_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_stream_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_stream_blocking_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_stream_blocking_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_stream_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_stream_blocking_tcp_test",
- testonly = 1,
- srcs = [
- "socket_ip_tcp_loopback_blocking.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_stream_blocking_test_cases",
- ":socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_stream_nonblock_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_stream_nonblock_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_stream_nonblocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_unbound_dgram_test",
- testonly = 1,
- srcs = ["socket_unix_unbound_dgram.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_unbound_abstract_test",
- testonly = 1,
- srcs = ["socket_unix_unbound_abstract.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_unbound_filesystem_test",
- testonly = 1,
- srcs = ["socket_unix_unbound_filesystem.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:file_descriptor",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_blocking_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_blocking_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_blocking_ip_test",
- testonly = 1,
- srcs = [
- "socket_ip_loopback_blocking.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_blocking_test_cases",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_non_stream_blocking_local_test",
- testonly = 1,
- srcs = [
- "socket_unix_non_stream_blocking_local.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_non_stream_blocking_test_cases",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_non_stream_blocking_udp_test",
- testonly = 1,
- srcs = [
- "socket_ip_udp_loopback_blocking.cc",
- ],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_non_stream_blocking_test_cases",
- ":socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_pair_test",
- testonly = 1,
- srcs = [
- "socket_unix_pair.cc",
- ],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":socket_unix_cmsg_test_cases",
- ":socket_unix_test_cases",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_unbound_seqpacket_test",
- testonly = 1,
- srcs = ["socket_unix_unbound_seqpacket.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_unix_unbound_stream_test",
- testonly = 1,
- srcs = ["socket_unix_unbound_stream.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "socket_netdevice_test",
- testonly = 1,
- srcs = ["socket_netdevice.cc"],
- linkstatic = 1,
- deps = [
- ":socket_netlink_util",
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/base:endian",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "stat_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "stat.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:save_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "stat_times_test",
- testonly = 1,
- srcs = ["stat_times.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "statfs_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "statfs.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "symlink_test",
- testonly = 1,
- srcs = ["symlink.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "verity_symlink_test",
- testonly = 1,
- srcs = ["verity_symlink.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- gtest,
- "//test/util:fs_util",
- "//test/util:mount_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:verity_util",
- ],
-)
-
-cc_binary(
- name = "sync_test",
- testonly = 1,
- # Android does not support syncfs in r22.
- srcs = select_system(linux = ["sync.cc"]),
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sysinfo_test",
- testonly = 1,
- srcs = ["sysinfo.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "syslog_test",
- testonly = 1,
- srcs = ["syslog.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "sysret_test",
- testonly = 1,
- srcs = ["sysret.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "tcp_socket_test",
- testonly = 1,
- srcs = ["tcp_socket.cc"],
- defines = select_system(),
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "tgkill_test",
- testonly = 1,
- srcs = ["tgkill.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "time_test",
- testonly = 1,
- srcs = ["time.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:proc_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "timerfd_test",
- testonly = 1,
- srcs = ["timerfd.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "timers_test",
- testonly = 1,
- srcs = ["timers.cc"],
- linkstatic = 1,
- deps = [
- gbenchmark,
- gtest,
- "//test/util:cleanup",
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:timer_util",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "tkill_test",
- testonly = 1,
- srcs = ["tkill.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:logging",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "truncate_test",
- testonly = 1,
- srcs = ["truncate.cc"],
- linkstatic = 1,
- deps = [
- ":file_base",
- "//test/util:capability_util",
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "tuntap_test",
- testonly = 1,
- srcs = ["tuntap.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- gtest,
- ":socket_netlink_route_util",
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_binary(
- name = "tuntap_hostinet_test",
- testonly = 1,
- srcs = ["tuntap_hostinet.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "udp_socket_test",
- testonly = 1,
- srcs = ["udp_socket.cc"],
- defines = select_system(),
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- ":socket_test_util",
- ":unix_domain_socket_test_util",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:file_descriptor",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "udp_bind_test",
- testonly = 1,
- srcs = ["udp_bind.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- "//test/util:file_descriptor",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "uidgid_test",
- testonly = 1,
- srcs = ["uidgid.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:cleanup",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:uid_util",
- ],
-)
-
-cc_binary(
- name = "uname_test",
- testonly = 1,
- srcs = ["uname.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "unlink_test",
- testonly = 1,
- srcs = ["unlink.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "unshare_test",
- testonly = 1,
- srcs = ["unshare.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/synchronization",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "utimes_test",
- testonly = 1,
- # Android does not support futimesat in r22.
- srcs = select_system(linux = ["utimes.cc"]),
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "vdso_test",
- testonly = 1,
- srcs = ["vdso.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:fs_util",
- gtest,
- "//test/util:posix_error",
- "//test/util:proc_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "vfork_test",
- testonly = 1,
- srcs = ["vfork.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:test_util",
- "//test/util:time_util",
- ],
-)
-
-cc_binary(
- name = "wait_test",
- testonly = 1,
- srcs = ["wait.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:logging",
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:signal_util",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "//test/util:time_util",
- ],
-)
-
-cc_binary(
- name = "write_test",
- testonly = 1,
- srcs = ["write.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:cleanup",
- "@com_google_absl//absl/base:core_headers",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "memory_accounting_test",
- testonly = 1,
- srcs = ["memory_accounting.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- gtest,
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "network_namespace_test",
- testonly = 1,
- srcs = ["network_namespace.cc"],
- linkstatic = 1,
- deps = [
- ":socket_test_util",
- gtest,
- "//test/util:capability_util",
- "//test/util:posix_error",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "semaphore_test",
- testonly = 1,
- # Android does not support XSI semaphores in r22.
- srcs = select_system(linux = ["semaphore.cc"]),
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/memory",
- "@com_google_absl//absl/synchronization",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- ],
-)
-
-cc_binary(
- name = "shm_test",
- testonly = 1,
- srcs = ["shm.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:multiprocess_util",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_binary(
- name = "fadvise64_test",
- testonly = 1,
- srcs = ["fadvise64.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- gtest,
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "vdso_clock_gettime_test",
- testonly = 1,
- srcs = ["vdso_clock_gettime.cc"],
- linkstatic = 1,
- deps = [
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "vsyscall_test",
- testonly = 1,
- srcs = ["vsyscall.cc"],
- linkstatic = 1,
- deps = [
- gtest,
- "//test/util:proc_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "proc_net_unix_test",
- testonly = 1,
- srcs = ["proc_net_unix.cc"],
- linkstatic = 1,
- deps = [
- ":unix_domain_socket_test_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- gtest,
- "//test/util:cleanup",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "memfd_test",
- testonly = 1,
- srcs = ["memfd.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- gtest,
- "//test/util:memory_util",
- "//test/util:multiprocess_util",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "proc_net_tcp_test",
- testonly = 1,
- srcs = ["proc_net_tcp.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "proc_net_udp_test",
- testonly = 1,
- srcs = ["proc_net_udp.cc"],
- linkstatic = 1,
- deps = [
- ":ip_socket_test_util",
- "//test/util:file_descriptor",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "processes_test",
- testonly = 1,
- srcs = ["processes.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "xattr_test",
- testonly = 1,
- srcs = [
- "file_base.h",
- "xattr.cc",
- ],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- ],
-)
-
-cc_binary(
- name = "cgroup_test",
- testonly = 1,
- srcs = ["cgroup.cc"],
- linkstatic = 1,
- deps = [
- "//test/util:capability_util",
- "//test/util:cgroup_util",
- "//test/util:file_descriptor",
- "//test/util:fs_util",
- "//test/util:mount_util",
- "@com_google_absl//absl/strings",
- gtest,
- "//test/util:cleanup",
- "//test/util:posix_error",
- "//test/util:temp_path",
- "//test/util:test_main",
- "//test/util:test_util",
- "//test/util:thread_util",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/container:flat_hash_set",
- ],
-)
diff --git a/test/syscalls/linux/accept_bind.cc b/test/syscalls/linux/accept_bind.cc
deleted file mode 100644
index ba3747290..000000000
--- a/test/syscalls/linux/accept_bind.cc
+++ /dev/null
@@ -1,666 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(AllSocketPairTest, Listen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, ListenIncreaseBacklog) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
- ASSERT_THAT(listen(sockets->first_fd(), 10), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, ListenDecreaseBacklog) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
- ASSERT_THAT(listen(sockets->first_fd(), 1), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, ListenBacklogSizes) {
- DisableSave ds;
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int type;
- socklen_t typelen = sizeof(type);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_TYPE, &type, &typelen),
- SyscallSucceeds());
-
- std::array<int, 3> backlogs = {-1, 0, 1};
- for (auto& backlog : backlogs) {
- ASSERT_THAT(listen(sockets->first_fd(), backlog), SyscallSucceeds());
-
- int expected_accepts = backlog;
- if (backlog < 0) {
- expected_accepts = 1024;
- }
- for (int i = 0; i < expected_accepts; i++) {
- SCOPED_TRACE(absl::StrCat("i=", i));
- // Connect to the listening socket.
- const FileDescriptor client =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, type, 0));
- ASSERT_THAT(connect(client.get(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- const FileDescriptor accepted = ASSERT_NO_ERRNO_AND_VALUE(
- Accept(sockets->first_fd(), nullptr, nullptr));
- }
- }
-}
-
-TEST_P(AllSocketPairTest, ListenWithoutBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(listen(sockets->first_fd(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, DoubleBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, BindListenBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, DoubleListen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, DoubleConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(EISCONN));
-}
-
-TEST_P(AllSocketPairTest, Connect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, ConnectWithWrongType) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int type;
- socklen_t typelen = sizeof(type);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_TYPE, &type, &typelen),
- SyscallSucceeds());
- switch (type) {
- case SOCK_STREAM:
- type = SOCK_SEQPACKET;
- break;
- case SOCK_SEQPACKET:
- type = SOCK_STREAM;
- break;
- }
-
- const FileDescriptor another_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, type, 0));
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- if (sockets->first_addr()->sa_data[0] != 0) {
- ASSERT_THAT(connect(another_socket.get(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(EPROTOTYPE));
- } else {
- ASSERT_THAT(connect(another_socket.get(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(ECONNREFUSED));
- }
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, ConnectNonListening) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-TEST_P(AllSocketPairTest, ConnectToFilePath) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- constexpr char kPath[] = "/tmp";
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- ASSERT_THAT(
- connect(sockets->second_fd(),
- reinterpret_cast<const struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-TEST_P(AllSocketPairTest, ConnectToInvalidAbstractPath) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- constexpr char kPath[] = "\0nonexistent";
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- ASSERT_THAT(
- connect(sockets->second_fd(),
- reinterpret_cast<const struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-TEST_P(AllSocketPairTest, SelfConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, ConnectWithoutListen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-TEST_P(AllSocketPairTest, Accept) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- ASSERT_THAT(close(accepted), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, AcceptValidAddrLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- struct sockaddr_un addr = {};
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(
- accepted = accept(sockets->first_fd(), AsSockAddr(&addr), &addr_len),
- SyscallSucceeds());
- ASSERT_THAT(close(accepted), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, AcceptNegativeAddrLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- // With a negative addr_len, accept returns EINVAL,
- struct sockaddr_un addr = {};
- socklen_t addr_len = -1;
- ASSERT_THAT(accept(sockets->first_fd(), AsSockAddr(&addr), &addr_len),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, AcceptLargePositiveAddrLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- // With a large (positive) addr_len, accept does not return EINVAL.
- int accepted = -1;
- char addr_buf[200];
- socklen_t addr_len = sizeof(addr_buf);
- ASSERT_THAT(accepted = accept(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(addr_buf),
- &addr_len),
- SyscallSucceeds());
- // addr_len should have been updated by accept().
- EXPECT_LT(addr_len, sizeof(addr_buf));
- ASSERT_THAT(close(accepted), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, AcceptVeryLargePositiveAddrLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- // With a large (positive) addr_len, accept does not return EINVAL.
- int accepted = -1;
- char addr_buf[2000];
- socklen_t addr_len = sizeof(addr_buf);
- ASSERT_THAT(accepted = accept(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(addr_buf),
- &addr_len),
- SyscallSucceeds());
- // addr_len should have been updated by accept().
- EXPECT_LT(addr_len, sizeof(addr_buf));
- ASSERT_THAT(close(accepted), SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, AcceptWithoutBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(accept(sockets->first_fd(), nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, AcceptWithoutListen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(accept(sockets->first_fd(), nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, GetRemoteAddress) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- socklen_t addr_len = sockets->first_addr_size();
- struct sockaddr_storage addr = {};
- ASSERT_THAT(
- getpeername(sockets->second_fd(), (struct sockaddr*)(&addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, sockets->first_addr_len());
- EXPECT_EQ(0, memcmp(&addr, sockets->first_addr(), sockets->first_addr_len()));
-}
-
-TEST_P(AllSocketPairTest, UnboundGetLocalAddress) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- socklen_t addr_len = sockets->first_addr_size();
- struct sockaddr_storage addr = {};
- ASSERT_THAT(
- getsockname(sockets->second_fd(), (struct sockaddr*)(&addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, 2);
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-TEST_P(AllSocketPairTest, BoundGetLocalAddress) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- socklen_t addr_len = sockets->first_addr_size();
- struct sockaddr_storage addr = {};
- ASSERT_THAT(
- getsockname(sockets->second_fd(), (struct sockaddr*)(&addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, sockets->second_addr_len());
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-TEST_P(AllSocketPairTest, BoundConnector) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, UnboundSenderAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- FileDescriptor accepted_fd(accepted);
-
- int i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- struct sockaddr_storage addr;
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(RetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0,
- AsSockAddr(&addr), &addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- EXPECT_EQ(addr_len, 0);
-}
-
-TEST_P(AllSocketPairTest, BoundSenderAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- FileDescriptor accepted_fd(accepted);
-
- int i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- struct sockaddr_storage addr;
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(RetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0,
- AsSockAddr(&addr), &addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- EXPECT_EQ(addr_len, sockets->second_addr_len());
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-TEST_P(AllSocketPairTest, BindAfterConnectSenderAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- FileDescriptor accepted_fd(accepted);
-
- int i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- struct sockaddr_storage addr;
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(RetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0,
- AsSockAddr(&addr), &addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- EXPECT_EQ(addr_len, sockets->second_addr_len());
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-TEST_P(AllSocketPairTest, BindAfterAcceptSenderAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- FileDescriptor accepted_fd(accepted);
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- int i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- struct sockaddr_storage addr;
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(RetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0,
- AsSockAddr(&addr), &addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- EXPECT_EQ(addr_len, sockets->second_addr_len());
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, AllSocketPairTest,
- ::testing::ValuesIn(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/accept_bind_stream.cc b/test/syscalls/linux/accept_bind_stream.cc
deleted file mode 100644
index 4857f160b..000000000
--- a/test/syscalls/linux/accept_bind_stream.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(AllSocketPairTest, BoundSenderAddrCoalesced) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int accepted = -1;
- ASSERT_THAT(accepted = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- FileDescriptor closer(accepted);
-
- int i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->second_addr(),
- sockets->second_addr_size()),
- SyscallSucceeds());
-
- i = 0;
- ASSERT_THAT(RetryEINTR(send)(sockets->second_fd(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
-
- int ri[2] = {0, 0};
- struct sockaddr_storage addr;
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(
- RetryEINTR(recvfrom)(accepted, ri, sizeof(ri), 0,
- reinterpret_cast<sockaddr*>(&addr), &addr_len),
- SyscallSucceedsWithValue(sizeof(ri)));
- EXPECT_EQ(addr_len, sockets->second_addr_len());
-
- EXPECT_EQ(
- memcmp(&addr, sockets->second_addr(),
- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),
- 0);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, AllSocketPairTest,
- ::testing::ValuesIn(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK})))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/access.cc b/test/syscalls/linux/access.cc
deleted file mode 100644
index bcc25cef4..000000000
--- a/test/syscalls/linux/access.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::Ge;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class AccessTest : public ::testing::Test {
- public:
- std::string CreateTempFile(int perm) {
- const std::string path = NewTempAbsPath();
- const int fd = open(path.c_str(), O_CREAT | O_RDONLY, perm);
- TEST_PCHECK(fd > 0);
- TEST_PCHECK(close(fd) == 0);
- return path;
- }
-
- protected:
- // SetUp creates various configurations of files.
- void SetUp() override {
- // Move to the temporary directory. This allows us to reason more easily
- // about absolute and relative paths.
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
-
- // Create an empty file, standard permissions.
- relfile_ = NewTempRelPath();
- int fd;
- ASSERT_THAT(fd = open(relfile_.c_str(), O_CREAT | O_TRUNC, 0644),
- SyscallSucceedsWithValue(Ge(0)));
- ASSERT_THAT(close(fd), SyscallSucceeds());
- absfile_ = GetAbsoluteTestTmpdir() + "/" + relfile_;
-
- // Create an empty directory, no writable permissions.
- absdir_ = NewTempAbsPath();
- reldir_ = JoinPath(Basename(absdir_), "");
- ASSERT_THAT(mkdir(reldir_.c_str(), 0555), SyscallSucceeds());
-
- // This file doesn't exist.
- relnone_ = NewTempRelPath();
- absnone_ = GetAbsoluteTestTmpdir() + "/" + relnone_;
- }
-
- // TearDown unlinks created files.
- void TearDown() override {
- ASSERT_THAT(unlink(absfile_.c_str()), SyscallSucceeds());
- ASSERT_THAT(rmdir(absdir_.c_str()), SyscallSucceeds());
- }
-
- std::string relfile_;
- std::string reldir_;
-
- std::string absfile_;
- std::string absdir_;
-
- std::string relnone_;
- std::string absnone_;
-};
-
-TEST_F(AccessTest, RelativeFile) {
- EXPECT_THAT(access(relfile_.c_str(), R_OK), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, RelativeDir) {
- EXPECT_THAT(access(reldir_.c_str(), R_OK | X_OK), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, AbsFile) {
- EXPECT_THAT(access(absfile_.c_str(), R_OK), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, AbsDir) {
- EXPECT_THAT(access(absdir_.c_str(), R_OK | X_OK), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, RelDoesNotExist) {
- EXPECT_THAT(access(relnone_.c_str(), R_OK), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_F(AccessTest, AbsDoesNotExist) {
- EXPECT_THAT(access(absnone_.c_str(), R_OK), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_F(AccessTest, InvalidMode) {
- EXPECT_THAT(access(relfile_.c_str(), 0xffffffff),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(AccessTest, NoPerms) {
- // Drop capabilities that allow us to override permissions. We must drop
- // PERMITTED because access() checks those instead of EFFECTIVE.
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_OVERRIDE));
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_READ_SEARCH));
-
- EXPECT_THAT(access(absdir_.c_str(), W_OK), SyscallFailsWithErrno(EACCES));
-}
-
-TEST_F(AccessTest, InvalidName) {
- EXPECT_THAT(access(reinterpret_cast<char*>(0x1234), W_OK),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(AccessTest, UsrReadOnly) {
- // Drop capabilities that allow us to override permissions. We must drop
- // PERMITTED because access() checks those instead of EFFECTIVE.
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_OVERRIDE));
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_READ_SEARCH));
-
- const std::string filename = CreateTempFile(0400);
- EXPECT_THAT(access(filename.c_str(), R_OK), SyscallSucceeds());
- EXPECT_THAT(access(filename.c_str(), W_OK), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(access(filename.c_str(), X_OK), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, UsrReadExec) {
- // Drop capabilities that allow us to override permissions. We must drop
- // PERMITTED because access() checks those instead of EFFECTIVE.
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_OVERRIDE));
- ASSERT_NO_ERRNO(DropPermittedCapability(CAP_DAC_READ_SEARCH));
-
- const std::string filename = CreateTempFile(0500);
- EXPECT_THAT(access(filename.c_str(), R_OK | X_OK), SyscallSucceeds());
- EXPECT_THAT(access(filename.c_str(), W_OK), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, UsrReadWrite) {
- const std::string filename = CreateTempFile(0600);
- EXPECT_THAT(access(filename.c_str(), R_OK | W_OK), SyscallSucceeds());
- EXPECT_THAT(access(filename.c_str(), X_OK), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-TEST_F(AccessTest, UsrReadWriteExec) {
- const std::string filename = CreateTempFile(0700);
- EXPECT_THAT(access(filename.c_str(), R_OK | W_OK | X_OK), SyscallSucceeds());
- EXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/affinity.cc b/test/syscalls/linux/affinity.cc
deleted file mode 100644
index 128364c34..000000000
--- a/test/syscalls/linux/affinity.cc
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_split.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// These tests are for both the sched_getaffinity(2) and sched_setaffinity(2)
-// syscalls.
-class AffinityTest : public ::testing::Test {
- protected:
- void SetUp() override {
- EXPECT_THAT(
- // Needs use the raw syscall to get the actual size.
- cpuset_size_ = syscall(SYS_sched_getaffinity, /*pid=*/0,
- sizeof(cpu_set_t), &mask_),
- SyscallSucceeds());
- // Lots of tests rely on having more than 1 logical processor available.
- EXPECT_GT(CPU_COUNT(&mask_), 1);
- }
-
- static PosixError ClearLowestBit(cpu_set_t* mask, size_t cpus) {
- const size_t mask_size = CPU_ALLOC_SIZE(cpus);
- for (size_t n = 0; n < cpus; ++n) {
- if (CPU_ISSET_S(n, mask_size, mask)) {
- CPU_CLR_S(n, mask_size, mask);
- return NoError();
- }
- }
- return PosixError(EINVAL, "No bit to clear, mask is empty");
- }
-
- PosixError ClearLowestBit() { return ClearLowestBit(&mask_, CPU_SETSIZE); }
-
- // Stores the initial cpu mask for this process.
- cpu_set_t mask_ = {};
- int cpuset_size_ = 0;
-};
-
-// sched_getaffinity(2) is implemented.
-TEST_F(AffinityTest, SchedGetAffinityImplemented) {
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &mask_),
- SyscallSucceeds());
-}
-
-// PID is not found.
-TEST_F(AffinityTest, SchedGetAffinityInvalidPID) {
- // Flaky, but it's tough to avoid a race condition when finding an unused pid
- EXPECT_THAT(sched_getaffinity(/*pid=*/INT_MAX - 1, sizeof(cpu_set_t), &mask_),
- SyscallFailsWithErrno(ESRCH));
-}
-
-// PID is not found.
-TEST_F(AffinityTest, SchedSetAffinityInvalidPID) {
- // Flaky, but it's tough to avoid a race condition when finding an unused pid
- EXPECT_THAT(sched_setaffinity(/*pid=*/INT_MAX - 1, sizeof(cpu_set_t), &mask_),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST_F(AffinityTest, SchedSetAffinityZeroMask) {
- CPU_ZERO(&mask_);
- EXPECT_THAT(sched_setaffinity(/*pid=*/0, sizeof(cpu_set_t), &mask_),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// N.B. This test case relies on cpuset_size_ larger than the actual number of
-// of all existing CPUs. Check your machine if the test fails.
-TEST_F(AffinityTest, SchedSetAffinityNonexistentCPUDropped) {
- cpu_set_t mask = mask_;
- // Add a nonexistent CPU.
- //
- // The number needs to be larger than the possible number of CPU available,
- // but smaller than the number of the CPU that the kernel claims to support --
- // it's implicitly returned by raw sched_getaffinity syscall.
- CPU_SET(cpuset_size_ * 8 - 1, &mask);
- EXPECT_THAT(
- // Use raw syscall because it will be rejected by the libc wrapper
- // otherwise.
- syscall(SYS_sched_setaffinity, /*pid=*/0, sizeof(cpu_set_t), &mask),
- SyscallSucceeds())
- << "failed with cpumask : " << CPUSetToString(mask)
- << ", cpuset_size_ : " << cpuset_size_;
- cpu_set_t newmask;
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &newmask),
- SyscallSucceeds());
- EXPECT_TRUE(CPU_EQUAL(&mask_, &newmask))
- << "got: " << CPUSetToString(newmask)
- << " != expected: " << CPUSetToString(mask_);
-}
-
-TEST_F(AffinityTest, SchedSetAffinityOnlyNonexistentCPUFails) {
- // Make an empty cpu set.
- CPU_ZERO(&mask_);
- // Add a nonexistent CPU.
- //
- // The number needs to be larger than the possible number of CPU available,
- // but smaller than the number of the CPU that the kernel claims to support --
- // it's implicitly returned by raw sched_getaffinity syscall.
- int cpu = cpuset_size_ * 8 - 1;
- if (cpu <= NumCPUs()) {
- GTEST_SKIP() << "Skipping test: cpu " << cpu << " exists";
- }
- CPU_SET(cpu, &mask_);
- EXPECT_THAT(
- // Use raw syscall because it will be rejected by the libc wrapper
- // otherwise.
- syscall(SYS_sched_setaffinity, /*pid=*/0, sizeof(cpu_set_t), &mask_),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(AffinityTest, SchedSetAffinityInvalidSize) {
- EXPECT_GT(cpuset_size_, 0);
- // Not big enough.
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, cpuset_size_ - 1, &mask_),
- SyscallFailsWithErrno(EINVAL));
- // Not a multiple of word size.
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, cpuset_size_ + 1, &mask_),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(AffinityTest, Sanity) {
- ASSERT_NO_ERRNO(ClearLowestBit());
- EXPECT_THAT(sched_setaffinity(/*pid=*/0, sizeof(cpu_set_t), &mask_),
- SyscallSucceeds());
- cpu_set_t newmask;
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &newmask),
- SyscallSucceeds());
- EXPECT_TRUE(CPU_EQUAL(&mask_, &newmask))
- << "got: " << CPUSetToString(newmask)
- << " != expected: " << CPUSetToString(mask_);
-}
-
-TEST_F(AffinityTest, NewThread) {
- SKIP_IF(CPU_COUNT(&mask_) < 3);
- ASSERT_NO_ERRNO(ClearLowestBit());
- ASSERT_NO_ERRNO(ClearLowestBit());
- EXPECT_THAT(sched_setaffinity(/*pid=*/0, sizeof(cpu_set_t), &mask_),
- SyscallSucceeds());
- ScopedThread([this]() {
- cpu_set_t child_mask;
- ASSERT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &child_mask),
- SyscallSucceeds());
- ASSERT_TRUE(CPU_EQUAL(&child_mask, &mask_))
- << "child cpu mask: " << CPUSetToString(child_mask)
- << " != parent cpu mask: " << CPUSetToString(mask_);
- });
-}
-
-TEST_F(AffinityTest, ConsistentWithProcCpuInfo) {
- // Count how many cpus are shown in /proc/cpuinfo.
- std::string cpuinfo = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/cpuinfo"));
- int count = 0;
- for (auto const& line : absl::StrSplit(cpuinfo, '\n')) {
- if (absl::StartsWith(line, "processor")) {
- count++;
- }
- }
- EXPECT_GE(count, CPU_COUNT(&mask_));
-}
-
-TEST_F(AffinityTest, ConsistentWithProcStat) {
- // Count how many cpus are shown in /proc/stat.
- std::string stat = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/stat"));
- int count = 0;
- for (auto const& line : absl::StrSplit(stat, '\n')) {
- if (absl::StartsWith(line, "cpu") && !absl::StartsWith(line, "cpu ")) {
- count++;
- }
- }
- EXPECT_GE(count, CPU_COUNT(&mask_));
-}
-
-TEST_F(AffinityTest, SmallCpuMask) {
- const int num_cpus = NumCPUs();
- const size_t mask_size = CPU_ALLOC_SIZE(num_cpus);
- cpu_set_t* mask = CPU_ALLOC(num_cpus);
- ASSERT_NE(mask, nullptr);
- const auto free_mask = Cleanup([&] { CPU_FREE(mask); });
-
- CPU_ZERO_S(mask_size, mask);
- ASSERT_THAT(sched_getaffinity(0, mask_size, mask), SyscallSucceeds());
-}
-
-TEST_F(AffinityTest, LargeCpuMask) {
- // Allocate mask bigger than cpu_set_t normally allocates.
- const size_t cpus = CPU_SETSIZE * 8;
- const size_t mask_size = CPU_ALLOC_SIZE(cpus);
-
- cpu_set_t* large_mask = CPU_ALLOC(cpus);
- auto free_mask = Cleanup([large_mask] { CPU_FREE(large_mask); });
- CPU_ZERO_S(mask_size, large_mask);
-
- // Check that get affinity with large mask works as expected.
- ASSERT_THAT(sched_getaffinity(/*pid=*/0, mask_size, large_mask),
- SyscallSucceeds());
- EXPECT_TRUE(CPU_EQUAL(&mask_, large_mask))
- << "got: " << CPUSetToString(*large_mask, cpus)
- << " != expected: " << CPUSetToString(mask_);
-
- // Check that set affinity with large mask works as expected.
- ASSERT_NO_ERRNO(ClearLowestBit(large_mask, cpus));
- EXPECT_THAT(sched_setaffinity(/*pid=*/0, mask_size, large_mask),
- SyscallSucceeds());
-
- cpu_set_t* new_mask = CPU_ALLOC(cpus);
- auto free_new_mask = Cleanup([new_mask] { CPU_FREE(new_mask); });
- CPU_ZERO_S(mask_size, new_mask);
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, mask_size, new_mask),
- SyscallSucceeds());
-
- EXPECT_TRUE(CPU_EQUAL_S(mask_size, large_mask, new_mask))
- << "got: " << CPUSetToString(*new_mask, cpus)
- << " != expected: " << CPUSetToString(*large_mask, cpus);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/aio.cc b/test/syscalls/linux/aio.cc
deleted file mode 100644
index 806d5729e..000000000
--- a/test/syscalls/linux/aio.cc
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/aio_abi.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::_;
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// Returns the size of the VMA containing the given address.
-PosixErrorOr<size_t> VmaSizeAt(uintptr_t addr) {
- ASSIGN_OR_RETURN_ERRNO(std::string proc_self_maps,
- GetContents("/proc/self/maps"));
- ASSIGN_OR_RETURN_ERRNO(auto entries, ParseProcMaps(proc_self_maps));
- // Use binary search to find the first VMA that might contain addr.
- ProcMapsEntry target = {};
- target.end = addr;
- auto it =
- std::upper_bound(entries.begin(), entries.end(), target,
- [](const ProcMapsEntry& x, const ProcMapsEntry& y) {
- return x.end < y.end;
- });
- // Check that it actually contains addr.
- if (it == entries.end() || addr < it->start) {
- return PosixError(ENOENT, absl::StrCat("no VMA contains address ", addr));
- }
- return it->end - it->start;
-}
-
-constexpr char kData[] = "hello world!";
-
-int SubmitCtx(aio_context_t ctx, long nr, struct iocb** iocbpp) {
- return syscall(__NR_io_submit, ctx, nr, iocbpp);
-}
-
-class AIOTest : public FileTest {
- public:
- AIOTest() : ctx_(0) {}
-
- int SetupContext(unsigned int nr) {
- return syscall(__NR_io_setup, nr, &ctx_);
- }
-
- int Submit(long nr, struct iocb** iocbpp) {
- return SubmitCtx(ctx_, nr, iocbpp);
- }
-
- int GetEvents(long min, long max, struct io_event* events,
- struct timespec* timeout) {
- return RetryEINTR(syscall)(__NR_io_getevents, ctx_, min, max, events,
- timeout);
- }
-
- int DestroyContext() { return syscall(__NR_io_destroy, ctx_); }
-
- void TearDown() override {
- FileTest::TearDown();
- if (ctx_ != 0) {
- ASSERT_THAT(DestroyContext(), SyscallSucceeds());
- ctx_ = 0;
- }
- }
-
- struct iocb CreateCallback() {
- struct iocb cb = {};
- cb.aio_data = 0x123;
- cb.aio_fildes = test_file_fd_.get();
- cb.aio_lio_opcode = IOCB_CMD_PWRITE;
- cb.aio_buf = reinterpret_cast<uint64_t>(kData);
- cb.aio_offset = 0;
- cb.aio_nbytes = strlen(kData);
- return cb;
- }
-
- protected:
- aio_context_t ctx_;
-};
-
-TEST_F(AIOTest, BasicWrite) {
- // Copied from fs/aio.c.
- constexpr unsigned AIO_RING_MAGIC = 0xa10a10a1;
- struct aio_ring {
- unsigned id;
- unsigned nr;
- unsigned head;
- unsigned tail;
- unsigned magic;
- unsigned compat_features;
- unsigned incompat_features;
- unsigned header_length;
- struct io_event io_events[0];
- };
-
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- // Check that 'ctx_' points to a valid address. libaio uses it to check if
- // aio implementation uses aio_ring. gVisor doesn't and returns all zeroes.
- // Linux implements aio_ring, so skip the zeroes check.
- //
- // TODO(gvisor.dev/issue/204): Remove when gVisor implements aio_ring.
- auto ring = reinterpret_cast<struct aio_ring*>(ctx_);
- auto magic = IsRunningOnGvisor() ? 0 : AIO_RING_MAGIC;
- EXPECT_EQ(ring->magic, magic);
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- // Submit the request.
- ASSERT_THAT(Submit(1, cbs), SyscallSucceedsWithValue(1));
-
- // Get the reply.
- struct io_event events[1];
- ASSERT_THAT(GetEvents(1, 1, events, nullptr), SyscallSucceedsWithValue(1));
-
- // Verify that it is as expected.
- EXPECT_EQ(events[0].data, 0x123);
- EXPECT_EQ(events[0].obj, reinterpret_cast<long>(&cb));
- EXPECT_EQ(events[0].res, strlen(kData));
-
- // Verify that the file contains the contents.
- char verify_buf[sizeof(kData)] = {};
- ASSERT_THAT(read(test_file_fd_.get(), verify_buf, sizeof(kData)),
- SyscallSucceedsWithValue(strlen(kData)));
- EXPECT_STREQ(verify_buf, kData);
-}
-
-TEST_F(AIOTest, BadWrite) {
- // Create a pipe and immediately close the read end.
- int pipefd[2];
- ASSERT_THAT(pipe(pipefd), SyscallSucceeds());
-
- FileDescriptor rfd(pipefd[0]);
- FileDescriptor wfd(pipefd[1]);
-
- rfd.reset(); // Close the read end.
-
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- struct iocb cb = CreateCallback();
- // Try to write to the read end.
- cb.aio_fildes = wfd.get();
- struct iocb* cbs[1] = {&cb};
-
- // Submit the request.
- ASSERT_THAT(Submit(1, cbs), SyscallSucceedsWithValue(1));
-
- // Get the reply.
- struct io_event events[1];
- ASSERT_THAT(GetEvents(1, 1, events, nullptr), SyscallSucceedsWithValue(1));
-
- // Verify that it fails with the right error code.
- EXPECT_EQ(events[0].data, 0x123);
- EXPECT_EQ(events[0].obj, reinterpret_cast<uint64_t>(&cb));
- EXPECT_LT(events[0].res, 0);
-}
-
-TEST_F(AIOTest, ExitWithPendingIo) {
- // Setup a context that is 100 entries deep.
- ASSERT_THAT(SetupContext(100), SyscallSucceeds());
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[] = {&cb};
-
- // Submit a request but don't complete it to make it pending.
- for (int i = 0; i < 100; ++i) {
- EXPECT_THAT(Submit(1, cbs), SyscallSucceeds());
- }
-
- ASSERT_THAT(DestroyContext(), SyscallSucceeds());
- ctx_ = 0;
-}
-
-int Submitter(void* arg) {
- auto test = reinterpret_cast<AIOTest*>(arg);
-
- struct iocb cb = test->CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- // Submit the request.
- TEST_CHECK(test->Submit(1, cbs) == 1);
- return 0;
-}
-
-TEST_F(AIOTest, CloneVm) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- const size_t kStackSize = 5 * kPageSize;
- std::unique_ptr<char[]> stack(new char[kStackSize]);
- char* bp = stack.get() + kStackSize;
- pid_t child;
- ASSERT_THAT(child = clone(Submitter, bp, CLONE_VM | SIGCHLD,
- reinterpret_cast<void*>(this)),
- SyscallSucceeds());
-
- // Get the reply.
- struct io_event events[1];
- ASSERT_THAT(GetEvents(1, 1, events, nullptr), SyscallSucceedsWithValue(1));
-
- // Verify that it is as expected.
- EXPECT_EQ(events[0].data, 0x123);
- EXPECT_EQ(events[0].res, strlen(kData));
-
- // Verify that the file contains the contents.
- char verify_buf[32] = {};
- ASSERT_THAT(read(test_file_fd_.get(), &verify_buf[0], strlen(kData)),
- SyscallSucceeds());
- EXPECT_EQ(strcmp(kData, &verify_buf[0]), 0);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-// Tests that AIO context can be remapped to a different address.
-TEST_F(AIOTest, Mremap) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
- const size_t ctx_size =
- ASSERT_NO_ERRNO_AND_VALUE(VmaSizeAt(reinterpret_cast<uintptr_t>(ctx_)));
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- // Reserve address space for the mremap target so we have something safe to
- // map over.
- Mapping dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(ctx_size, PROT_READ, MAP_PRIVATE));
-
- // Remap context 'handle' to a different address.
- ASSERT_THAT(Mremap(reinterpret_cast<void*>(ctx_), ctx_size, dst.len(),
- MREMAP_FIXED | MREMAP_MAYMOVE, dst.ptr()),
- IsPosixErrorOkAndHolds(dst.ptr()));
- aio_context_t old_ctx = ctx_;
- ctx_ = reinterpret_cast<aio_context_t>(dst.addr());
- // io_destroy() will unmap dst now.
- dst.release();
-
- // Check that submitting the request with the old 'ctx_' fails.
- ASSERT_THAT(SubmitCtx(old_ctx, 1, cbs), SyscallFailsWithErrno(EINVAL));
-
- // Submit the request with the new 'ctx_'.
- ASSERT_THAT(Submit(1, cbs), SyscallSucceedsWithValue(1));
-
- // Remap again.
- dst = ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(ctx_size, PROT_READ, MAP_PRIVATE));
- ASSERT_THAT(Mremap(reinterpret_cast<void*>(ctx_), ctx_size, dst.len(),
- MREMAP_FIXED | MREMAP_MAYMOVE, dst.ptr()),
- IsPosixErrorOkAndHolds(dst.ptr()));
- ctx_ = reinterpret_cast<aio_context_t>(dst.addr());
- dst.release();
-
- // Get the reply with yet another 'ctx_' and verify it.
- struct io_event events[1];
- ASSERT_THAT(GetEvents(1, 1, events, nullptr), SyscallSucceedsWithValue(1));
- EXPECT_EQ(events[0].data, 0x123);
- EXPECT_EQ(events[0].obj, reinterpret_cast<long>(&cb));
- EXPECT_EQ(events[0].res, strlen(kData));
-
- // Verify that the file contains the contents.
- char verify_buf[sizeof(kData)] = {};
- ASSERT_THAT(read(test_file_fd_.get(), verify_buf, sizeof(kData)),
- SyscallSucceedsWithValue(strlen(kData)));
- EXPECT_STREQ(verify_buf, kData);
-}
-
-// Tests that AIO context cannot be expanded with mremap.
-TEST_F(AIOTest, MremapExpansion) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
- const size_t ctx_size =
- ASSERT_NO_ERRNO_AND_VALUE(VmaSizeAt(reinterpret_cast<uintptr_t>(ctx_)));
-
- // Reserve address space for the mremap target so we have something safe to
- // map over.
- Mapping dst = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(ctx_size + kPageSize, PROT_NONE, MAP_PRIVATE));
-
- // Test that remapping to a larger address range fails.
- ASSERT_THAT(Mremap(reinterpret_cast<void*>(ctx_), ctx_size, dst.len(),
- MREMAP_FIXED | MREMAP_MAYMOVE, dst.ptr()),
- PosixErrorIs(EFAULT, _));
-
- // mm/mremap.c:sys_mremap() => mremap_to() does do_munmap() of the destination
- // before it hits the VM_DONTEXPAND check in vma_to_resize(), so we should no
- // longer munmap it (another thread may have created a mapping there).
- dst.release();
-}
-
-// Tests that AIO calls fail if context's address is inaccessible.
-TEST_F(AIOTest, Mprotect) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- ASSERT_THAT(Submit(1, cbs), SyscallSucceedsWithValue(1));
-
- // Makes the context 'handle' inaccessible and check that all subsequent
- // calls fail.
- ASSERT_THAT(mprotect(reinterpret_cast<void*>(ctx_), kPageSize, PROT_NONE),
- SyscallSucceeds());
- struct io_event events[1];
- EXPECT_THAT(GetEvents(1, 1, events, nullptr), SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(Submit(1, cbs), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(DestroyContext(), SyscallFailsWithErrno(EINVAL));
-
- // Prevent TearDown from attempting to destroy the context and fail.
- ctx_ = 0;
-}
-
-TEST_F(AIOTest, Timeout) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- struct timespec timeout;
- timeout.tv_sec = 0;
- timeout.tv_nsec = 10;
- struct io_event events[1];
- ASSERT_THAT(GetEvents(1, 1, events, &timeout), SyscallSucceedsWithValue(0));
-}
-
-class AIOReadWriteParamTest : public AIOTest,
- public ::testing::WithParamInterface<int> {};
-
-TEST_P(AIOReadWriteParamTest, BadOffset) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- // Create a buffer that we can write to.
- char buf[] = "hello world!";
- cb.aio_buf = reinterpret_cast<uint64_t>(buf);
-
- // Set the operation on the callback and give a negative offset.
- const int opcode = GetParam();
- cb.aio_lio_opcode = opcode;
-
- iovec iov = {};
- if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) {
- // Create a valid iovec and set it in the callback.
- iov.iov_base = reinterpret_cast<void*>(buf);
- iov.iov_len = 1;
- cb.aio_buf = reinterpret_cast<uint64_t>(&iov);
- // aio_nbytes is the number of iovecs.
- cb.aio_nbytes = 1;
- }
-
- // Pass a negative offset.
- cb.aio_offset = -1;
-
- // Should get error on submission.
- ASSERT_THAT(Submit(1, cbs), SyscallFailsWithErrno(EINVAL));
-}
-
-INSTANTIATE_TEST_SUITE_P(BadOffset, AIOReadWriteParamTest,
- ::testing::Values(IOCB_CMD_PREAD, IOCB_CMD_PWRITE,
- IOCB_CMD_PREADV, IOCB_CMD_PWRITEV));
-
-class AIOVectorizedParamTest : public AIOTest,
- public ::testing::WithParamInterface<int> {};
-
-TEST_P(AIOVectorizedParamTest, BadIOVecs) {
- // Setup a context that is 128 entries deep.
- ASSERT_THAT(SetupContext(128), SyscallSucceeds());
-
- struct iocb cb = CreateCallback();
- struct iocb* cbs[1] = {&cb};
-
- // Modify the callback to use the operation from the param.
- cb.aio_lio_opcode = GetParam();
-
- // Create an iovec with address in kernel range, and pass that as the buffer.
- iovec iov = {};
- iov.iov_base = reinterpret_cast<void*>(0xFFFFFFFF00000000);
- iov.iov_len = 1;
- cb.aio_buf = reinterpret_cast<uint64_t>(&iov);
- // aio_nbytes is the number of iovecs.
- cb.aio_nbytes = 1;
-
- // Should get error on submission.
- ASSERT_THAT(Submit(1, cbs), SyscallFailsWithErrno(EFAULT));
-}
-
-INSTANTIATE_TEST_SUITE_P(BadIOVecs, AIOVectorizedParamTest,
- ::testing::Values(IOCB_CMD_PREADV, IOCB_CMD_PWRITEV));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/alarm.cc b/test/syscalls/linux/alarm.cc
deleted file mode 100644
index cd0704334..000000000
--- a/test/syscalls/linux/alarm.cc
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// N.B. Below, main blocks SIGALRM. Test cases must unblock it if they want
-// delivery.
-
-void do_nothing_handler(int sig, siginfo_t* siginfo, void* arg) {}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and read.
-TEST(AlarmTest, Interrupt) {
- int pipe_fds[2];
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- FileDescriptor read_fd(pipe_fds[0]);
- FileDescriptor write_fd(pipe_fds[1]);
-
- // Use a signal handler that interrupts but does nothing rather than using the
- // default terminate action.
- struct sigaction sa;
- sa.sa_sigaction = do_nothing_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = 0;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Actually allow SIGALRM delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- // Alarm in 20 second, which should be well after read blocks below.
- ASSERT_THAT(alarm(20), SyscallSucceeds());
-
- char buf;
- ASSERT_THAT(read(read_fd.get(), &buf, 1), SyscallFailsWithErrno(EINTR));
-}
-
-/* Count of the number of SIGALARMS handled. */
-static volatile int alarms_received = 0;
-
-void inc_alarms_handler(int sig, siginfo_t* siginfo, void* arg) {
- alarms_received++;
-}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and read.
-TEST(AlarmTest, Restart) {
- alarms_received = 0;
-
- int pipe_fds[2];
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- FileDescriptor read_fd(pipe_fds[0]);
- // Write end closed by thread below.
-
- struct sigaction sa;
- sa.sa_sigaction = inc_alarms_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Spawn a thread to eventually unblock the read below.
- ScopedThread t([pipe_fds] {
- absl::SleepFor(absl::Seconds(30));
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
- });
-
- // Actually allow SIGALRM delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- // Alarm in 20 second, which should be well after read blocks below, but
- // before it returns.
- ASSERT_THAT(alarm(20), SyscallSucceeds());
-
- // Read and eventually get an EOF from the writer closing. If SA_RESTART
- // didn't work, then the alarm would not have fired and we wouldn't increment
- // our alarms_received count in our signal handler, or we would have not
- // restarted the syscall gracefully, which we expect below in order to be
- // able to get the final EOF on the pipe.
- char buf;
- ASSERT_THAT(read(read_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_EQ(alarms_received, 1);
-
- t.Join();
-}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and pause.
-TEST(AlarmTest, SaSiginfo) {
- // Use a signal handler that interrupts but does nothing rather than using the
- // default terminate action.
- struct sigaction sa;
- sa.sa_sigaction = do_nothing_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Actually allow SIGALRM delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- // Alarm in 20 second, which should be well after pause blocks below.
- ASSERT_THAT(alarm(20), SyscallSucceeds());
- ASSERT_THAT(pause(), SyscallFailsWithErrno(EINTR));
-}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and pause.
-TEST(AlarmTest, SaInterrupt) {
- // Use a signal handler that interrupts but does nothing rather than using the
- // default terminate action.
- struct sigaction sa;
- sa.sa_sigaction = do_nothing_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_INTERRUPT;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Actually allow SIGALRM delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- // Alarm in 20 second, which should be well after pause blocks below.
- ASSERT_THAT(alarm(20), SyscallSucceeds());
- ASSERT_THAT(pause(), SyscallFailsWithErrno(EINTR));
-}
-
-TEST(AlarmTest, UserModeSpinning) {
- alarms_received = 0;
-
- struct sigaction sa = {};
- sa.sa_sigaction = inc_alarms_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Actually allow SIGALRM delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- // Alarm in 20 second, which should be well into the loop below.
- ASSERT_THAT(alarm(20), SyscallSucceeds());
- // Make sure that the signal gets delivered even if we are spinning in user
- // mode when it arrives.
- while (!alarms_received) {
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // These tests depend on delivering SIGALRM to the main thread. Block SIGALRM
- // so that any other threads created by TestInit will also have SIGALRM
- // blocked.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, SIGALRM);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/arch_prctl.cc b/test/syscalls/linux/arch_prctl.cc
deleted file mode 100644
index 81bf5a775..000000000
--- a/test/syscalls/linux/arch_prctl.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <asm/prctl.h>
-#include <sys/prctl.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-// glibc does not provide a prototype for arch_prctl() so declare it here.
-extern "C" int arch_prctl(int code, uintptr_t addr);
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ArchPrctlTest, GetSetFS) {
- uintptr_t orig;
- const uintptr_t kNonCanonicalFsbase = 0x4141414142424242;
-
- // Get the original FS.base and then set it to the same value (this is
- // intentional because FS.base is the TLS pointer so we cannot change it
- // arbitrarily).
- ASSERT_THAT(arch_prctl(ARCH_GET_FS, reinterpret_cast<uintptr_t>(&orig)),
- SyscallSucceeds());
- ASSERT_THAT(arch_prctl(ARCH_SET_FS, orig), SyscallSucceeds());
-
- // Trying to set FS.base to a non-canonical value should return an error.
- ASSERT_THAT(arch_prctl(ARCH_SET_FS, kNonCanonicalFsbase),
- SyscallFailsWithErrno(EPERM));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/bad.cc b/test/syscalls/linux/bad.cc
deleted file mode 100644
index a26fc6af3..000000000
--- a/test/syscalls/linux/bad.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-#ifdef __x86_64__
-// get_kernel_syms is not supported in Linux > 2.6, and not implemented in
-// gVisor.
-constexpr uint32_t kNotImplementedSyscall = SYS_get_kernel_syms;
-#elif __aarch64__
-// Use the last of arch_specific_syscalls which are not implemented on arm64.
-constexpr uint32_t kNotImplementedSyscall = __NR_arch_specific_syscall + 15;
-#endif
-
-TEST(BadSyscallTest, NotImplemented) {
- EXPECT_THAT(syscall(kNotImplementedSyscall), SyscallFailsWithErrno(ENOSYS));
-}
-
-TEST(BadSyscallTest, NegativeOne) {
- EXPECT_THAT(syscall(-1), SyscallFailsWithErrno(ENOSYS));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/base_poll_test.cc b/test/syscalls/linux/base_poll_test.cc
deleted file mode 100644
index ab7a19dd0..000000000
--- a/test/syscalls/linux/base_poll_test.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/base_poll_test.h"
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-static volatile int timer_fired = 0;
-static void SigAlarmHandler(int, siginfo_t*, void*) { timer_fired = 1; }
-
-BasePollTest::BasePollTest() {
- // Register our SIGALRM handler, but save the original so we can restore in
- // the destructor.
- struct sigaction sa = {};
- sa.sa_sigaction = SigAlarmHandler;
- sigfillset(&sa.sa_mask);
- TEST_PCHECK(sigaction(SIGALRM, &sa, &original_alarm_sa_) == 0);
-}
-
-BasePollTest::~BasePollTest() {
- ClearTimer();
- TEST_PCHECK(sigaction(SIGALRM, &original_alarm_sa_, nullptr) == 0);
-}
-
-void BasePollTest::SetTimer(absl::Duration duration) {
- pid_t tgid = getpid();
- pid_t tid = gettid();
- ClearTimer();
-
- // Create a new timer thread.
- timer_ = absl::make_unique<TimerThread>(absl::Now() + duration, tgid, tid);
-}
-
-bool BasePollTest::TimerFired() const { return timer_fired; }
-
-void BasePollTest::ClearTimer() {
- timer_.reset();
- timer_fired = 0;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/base_poll_test.h b/test/syscalls/linux/base_poll_test.h
deleted file mode 100644
index 0d4a6701e..000000000
--- a/test/syscalls/linux/base_poll_test.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_BASE_POLL_TEST_H_
-#define GVISOR_TEST_SYSCALLS_BASE_POLL_TEST_H_
-
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <syscall.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <memory>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/time.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// TimerThread is a cancelable timer.
-class TimerThread {
- public:
- TimerThread(absl::Time deadline, pid_t tgid, pid_t tid)
- : thread_([=] {
- mu_.Lock();
- mu_.AwaitWithDeadline(absl::Condition(&cancel_), deadline);
- if (!cancel_) {
- TEST_PCHECK(tgkill(tgid, tid, SIGALRM) == 0);
- }
- mu_.Unlock();
- }) {}
-
- ~TimerThread() { Cancel(); }
-
- void Cancel() {
- absl::MutexLock ml(&mu_);
- cancel_ = true;
- }
-
- private:
- mutable absl::Mutex mu_;
- bool cancel_ ABSL_GUARDED_BY(mu_) = false;
-
- // Must be last to ensure that the destructor for the thread is run before
- // any other member of the object is destroyed.
- ScopedThread thread_;
-};
-
-// Base test fixture for poll, select, ppoll, and pselect tests.
-//
-// This fixture makes use of SIGALRM. The handler is saved in SetUp() and
-// restored in TearDown().
-class BasePollTest : public ::testing::Test {
- protected:
- BasePollTest();
- ~BasePollTest() override;
-
- // Sets a timer that will send a signal to the calling thread after
- // `duration`.
- void SetTimer(absl::Duration duration);
-
- // Returns true if the timer has fired.
- bool TimerFired() const;
-
- // Stops the pending timer (if any) and clear the "fired" state.
- void ClearTimer();
-
- private:
- // Thread that implements the timer. If the timer is stopped, timer_ is null.
- //
- // We have to use a thread for this purpose because tests using this fixture
- // expect to be interrupted by the timer signal, but itimers/alarm(2) send
- // thread-group-directed signals, which may be handled by any thread in the
- // test process.
- std::unique_ptr<TimerThread> timer_;
-
- // The original SIGALRM handler, to restore in destructor.
- struct sigaction original_alarm_sa_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_BASE_POLL_TEST_H_
diff --git a/test/syscalls/linux/bind.cc b/test/syscalls/linux/bind.cc
deleted file mode 100644
index 9547c4ab2..000000000
--- a/test/syscalls/linux/bind.cc
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(AllSocketPairTest, Bind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(AllSocketPairTest, BindTooLong) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- // first_addr is a sockaddr_storage being used as a sockaddr_un. Use the full
- // length which is longer than expected for a Unix socket.
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sizeof(sockaddr_storage)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, DoubleBindSocket) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- EXPECT_THAT(
- bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- // Linux 4.09 returns EINVAL here, but some time before 4.19 it switched
- // to EADDRINUSE.
- AnyOf(SyscallFailsWithErrno(EADDRINUSE), SyscallFailsWithErrno(EINVAL)));
-}
-
-TEST_P(AllSocketPairTest, GetLocalAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- socklen_t addressLength = sockets->first_addr_size();
- struct sockaddr_storage address = {};
- ASSERT_THAT(getsockname(sockets->first_fd(), (struct sockaddr*)(&address),
- &addressLength),
- SyscallSucceeds());
- EXPECT_EQ(
- 0, memcmp(&address, sockets->first_addr(), sockets->first_addr_size()));
-}
-
-TEST_P(AllSocketPairTest, GetLocalAddrWithoutBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- socklen_t addressLength = sockets->first_addr_size();
- struct sockaddr_storage received_address = {};
- ASSERT_THAT(
- getsockname(sockets->first_fd(), (struct sockaddr*)(&received_address),
- &addressLength),
- SyscallSucceeds());
- struct sockaddr_storage want_address = {};
- want_address.ss_family = sockets->first_addr()->sa_family;
- EXPECT_EQ(0, memcmp(&received_address, &want_address, addressLength));
-}
-
-TEST_P(AllSocketPairTest, GetRemoteAddressWithoutConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- socklen_t addressLength = sockets->first_addr_size();
- struct sockaddr_storage address = {};
- ASSERT_THAT(getpeername(sockets->second_fd(), (struct sockaddr*)(&address),
- &addressLength),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(AllSocketPairTest, DoubleBindAddress) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- EXPECT_THAT(bind(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(AllSocketPairTest, Unbind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
-
- // Filesystem Unix sockets do not release their address when closed.
- if (sockets->first_addr()->sa_data[0] != 0) {
- ASSERT_THAT(bind(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallFailsWithErrno(EADDRINUSE));
- return;
- }
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, AllSocketPairTest,
- ::testing::ValuesIn(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM,
- SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM,
- SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/brk.cc b/test/syscalls/linux/brk.cc
deleted file mode 100644
index a03a44465..000000000
--- a/test/syscalls/linux/brk.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST(BrkTest, BrkSyscallReturnsOldBrkOnFailure) {
- auto old_brk = sbrk(0);
- EXPECT_THAT(syscall(SYS_brk, reinterpret_cast<void*>(-1)),
- SyscallSucceedsWithValue(reinterpret_cast<uintptr_t>(old_brk)));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/cgroup.cc b/test/syscalls/linux/cgroup.cc
deleted file mode 100644
index f29891571..000000000
--- a/test/syscalls/linux/cgroup.cc
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// All tests in this file rely on being about to mount and unmount cgroupfs,
-// which isn't expected to work, or be safe on a general linux system.
-
-#include <limits.h>
-#include <sys/mount.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/container/flat_hash_map.h"
-#include "absl/container/flat_hash_set.h"
-#include "absl/strings/str_split.h"
-#include "test/util/capability_util.h"
-#include "test/util/cgroup_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/mount_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using ::testing::_;
-using ::testing::Contains;
-using ::testing::Ge;
-using ::testing::Gt;
-using ::testing::Key;
-using ::testing::Not;
-
-std::vector<std::string> known_controllers = {
- "cpu", "cpuset", "cpuacct", "job", "memory",
-};
-
-bool CgroupsAvailable() {
- return IsRunningOnGvisor() && !IsRunningWithVFS1() &&
- TEST_CHECK_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN));
-}
-
-TEST(Cgroup, MountSucceeds) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
- EXPECT_NO_ERRNO(c.ContainsCallingProcess());
-}
-
-TEST(Cgroup, SeparateMounts) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
-
- for (const auto& ctl : known_controllers) {
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(ctl));
- EXPECT_NO_ERRNO(c.ContainsCallingProcess());
- }
-}
-
-TEST(Cgroup, AllControllersImplicit) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
-
- absl::flat_hash_map<std::string, CgroupsEntry> cgroups_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- for (const auto& ctl : known_controllers) {
- EXPECT_TRUE(cgroups_entries.contains(ctl))
- << absl::StreamFormat("ctl=%s", ctl);
- }
- EXPECT_EQ(cgroups_entries.size(), known_controllers.size());
-}
-
-TEST(Cgroup, AllControllersExplicit) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("all"));
-
- absl::flat_hash_map<std::string, CgroupsEntry> cgroups_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- for (const auto& ctl : known_controllers) {
- EXPECT_TRUE(cgroups_entries.contains(ctl))
- << absl::StreamFormat("ctl=%s", ctl);
- }
- EXPECT_EQ(cgroups_entries.size(), known_controllers.size());
-}
-
-TEST(Cgroup, ProcsAndTasks) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
- absl::flat_hash_set<pid_t> pids = ASSERT_NO_ERRNO_AND_VALUE(c.Procs());
- absl::flat_hash_set<pid_t> tids = ASSERT_NO_ERRNO_AND_VALUE(c.Tasks());
-
- EXPECT_GE(tids.size(), pids.size()) << "Found more processes than threads";
-
- // Pids should be a strict subset of tids.
- for (auto it = pids.begin(); it != pids.end(); ++it) {
- EXPECT_TRUE(tids.contains(*it))
- << absl::StreamFormat("Have pid %d, but no such tid", *it);
- }
-}
-
-TEST(Cgroup, ControllersMustBeInUniqueHierarchy) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- // Hierarchy #1: all controllers.
- Cgroup all = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
- // Hierarchy #2: memory.
- //
- // This should conflict since memory is already in hierarchy #1, and the two
- // hierarchies have different sets of controllers, so this mount can't be a
- // view into hierarchy #1.
- EXPECT_THAT(m.MountCgroupfs("memory"), PosixErrorIs(EBUSY, _))
- << "Memory controller mounted on two hierarchies";
- EXPECT_THAT(m.MountCgroupfs("cpu"), PosixErrorIs(EBUSY, _))
- << "CPU controller mounted on two hierarchies";
-}
-
-TEST(Cgroup, UnmountFreesControllers) {
- SKIP_IF(!CgroupsAvailable());
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup all = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
- // All controllers are now attached to all's hierarchy. Attempting new mount
- // with any individual controller should fail.
- EXPECT_THAT(m.MountCgroupfs("memory"), PosixErrorIs(EBUSY, _))
- << "Memory controller mounted on two hierarchies";
-
- // Unmount the "all" hierarchy. This should enable any controller to be
- // mounted on a new hierarchy again.
- ASSERT_NO_ERRNO(m.Unmount(all));
- EXPECT_NO_ERRNO(m.MountCgroupfs("memory"));
- EXPECT_NO_ERRNO(m.MountCgroupfs("cpu"));
-}
-
-TEST(Cgroup, OnlyContainsControllerSpecificFiles) {
- SKIP_IF(!CgroupsAvailable());
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup mem = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory"));
- EXPECT_THAT(Exists(mem.Relpath("memory.usage_in_bytes")),
- IsPosixErrorOkAndHolds(true));
- // CPU files shouldn't exist in memory cgroups.
- EXPECT_THAT(Exists(mem.Relpath("cpu.cfs_period_us")),
- IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(mem.Relpath("cpu.cfs_quota_us")),
- IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(mem.Relpath("cpu.shares")), IsPosixErrorOkAndHolds(false));
-
- Cgroup cpu = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu"));
- EXPECT_THAT(Exists(cpu.Relpath("cpu.cfs_period_us")),
- IsPosixErrorOkAndHolds(true));
- EXPECT_THAT(Exists(cpu.Relpath("cpu.cfs_quota_us")),
- IsPosixErrorOkAndHolds(true));
- EXPECT_THAT(Exists(cpu.Relpath("cpu.shares")), IsPosixErrorOkAndHolds(true));
- // Memory files shouldn't exist in cpu cgroups.
- EXPECT_THAT(Exists(cpu.Relpath("memory.usage_in_bytes")),
- IsPosixErrorOkAndHolds(false));
-}
-
-TEST(Cgroup, InvalidController) {
- SKIP_IF(!CgroupsAvailable());
-
- TempPath mountpoint = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string mopts = "this-controller-is-invalid";
- EXPECT_THAT(
- mount("none", mountpoint.path().c_str(), "cgroup", 0, mopts.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Cgroup, MoptAllMustBeExclusive) {
- SKIP_IF(!CgroupsAvailable());
-
- TempPath mountpoint = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string mopts = "all,cpu";
- EXPECT_THAT(
- mount("none", mountpoint.path().c_str(), "cgroup", 0, mopts.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Cgroup, MountRace) {
- SKIP_IF(!CgroupsAvailable());
-
- TempPath mountpoint = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const DisableSave ds; // Too many syscalls.
-
- auto mount_thread = [&mountpoint]() {
- for (int i = 0; i < 100; ++i) {
- mount("none", mountpoint.path().c_str(), "cgroup", 0, 0);
- }
- };
- std::list<ScopedThread> threads;
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(mount_thread);
- }
- for (auto& t : threads) {
- t.Join();
- }
-
- auto cleanup = Cleanup([&mountpoint] {
- // We need 1 umount call per successful mount. If some of the mount calls
- // were unsuccessful, their corresponding umount will silently fail.
- for (int i = 0; i < (10 * 100) + 1; ++i) {
- umount(mountpoint.path().c_str());
- }
- });
-
- Cgroup c = Cgroup(mountpoint.path());
- // c should be a valid cgroup.
- EXPECT_NO_ERRNO(c.ContainsCallingProcess());
-}
-
-TEST(Cgroup, MountUnmountRace) {
- SKIP_IF(!CgroupsAvailable());
-
- TempPath mountpoint = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const DisableSave ds; // Too many syscalls.
-
- auto mount_thread = [&mountpoint]() {
- for (int i = 0; i < 100; ++i) {
- mount("none", mountpoint.path().c_str(), "cgroup", 0, 0);
- }
- };
- auto unmount_thread = [&mountpoint]() {
- for (int i = 0; i < 100; ++i) {
- umount(mountpoint.path().c_str());
- }
- };
- std::list<ScopedThread> threads;
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(mount_thread);
- }
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(unmount_thread);
- }
- for (auto& t : threads) {
- t.Join();
- }
-
- // We don't know how many mount refs are remaining, since the count depends on
- // the ordering of mount and umount calls. Keep calling unmount until it
- // returns an error.
- while (umount(mountpoint.path().c_str()) == 0) {
- }
-}
-
-TEST(Cgroup, UnmountRepeated) {
- SKIP_IF(!CgroupsAvailable());
-
- const DisableSave ds; // Too many syscalls.
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
-
- // First unmount should succeed.
- EXPECT_THAT(umount(c.Path().c_str()), SyscallSucceeds());
-
- // We just manually unmounted, so release managed resources.
- m.release(c);
-
- EXPECT_THAT(umount(c.Path().c_str()), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MemoryCgroup, MemoryUsageInBytes) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory"));
- EXPECT_THAT(c.ReadIntegerControlFile("memory.usage_in_bytes"),
- IsPosixErrorOkAndHolds(Gt(0)));
-}
-
-TEST(CPUCgroup, ControlFilesHaveDefaultValues) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu"));
- EXPECT_THAT(c.ReadIntegerControlFile("cpu.cfs_quota_us"),
- IsPosixErrorOkAndHolds(-1));
- EXPECT_THAT(c.ReadIntegerControlFile("cpu.cfs_period_us"),
- IsPosixErrorOkAndHolds(100000));
- EXPECT_THAT(c.ReadIntegerControlFile("cpu.shares"),
- IsPosixErrorOkAndHolds(1024));
-}
-
-TEST(CPUAcctCgroup, CPUAcctUsage) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpuacct"));
-
- const int64_t usage =
- ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile("cpuacct.usage"));
- const int64_t usage_user =
- ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile("cpuacct.usage_user"));
- const int64_t usage_sys =
- ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile("cpuacct.usage_sys"));
-
- EXPECT_GE(usage, 0);
- EXPECT_GE(usage_user, 0);
- EXPECT_GE(usage_sys, 0);
-
- EXPECT_GE(usage_user + usage_sys, usage);
-}
-
-TEST(CPUAcctCgroup, CPUAcctStat) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpuacct"));
-
- std::string stat =
- ASSERT_NO_ERRNO_AND_VALUE(c.ReadControlFile("cpuacct.stat"));
-
- // We're expecting the contents of "cpuacct.stat" to look similar to this:
- //
- // user 377986
- // system 220662
-
- std::vector<absl::string_view> lines =
- absl::StrSplit(stat, '\n', absl::SkipEmpty());
- ASSERT_EQ(lines.size(), 2);
-
- std::vector<absl::string_view> user_tokens =
- StrSplit(lines[0], absl::ByChar(' '));
- EXPECT_EQ(user_tokens[0], "user");
- EXPECT_THAT(Atoi<int64_t>(user_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));
-
- std::vector<absl::string_view> sys_tokens =
- StrSplit(lines[1], absl::ByChar(' '));
- EXPECT_EQ(sys_tokens[0], "system");
- EXPECT_THAT(Atoi<int64_t>(sys_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));
-}
-
-// WriteAndVerifyControlValue attempts to write val to a cgroup file at path,
-// and verify the value by reading it afterwards.
-PosixError WriteAndVerifyControlValue(const Cgroup& c, std::string_view path,
- int64_t val) {
- RETURN_IF_ERRNO(c.WriteIntegerControlFile(path, val));
- ASSIGN_OR_RETURN_ERRNO(int64_t newval, c.ReadIntegerControlFile(path));
- if (newval != val) {
- return PosixError(
- EINVAL,
- absl::StrFormat(
- "Unexpected value for control file '%s': expected %d, got %d", path,
- val, newval));
- }
- return NoError();
-}
-
-TEST(JobCgroup, ReadWriteRead) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("job"));
-
- EXPECT_THAT(c.ReadIntegerControlFile("job.id"), IsPosixErrorOkAndHolds(0));
- EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, "job.id", 1234));
- EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, "job.id", -1));
- EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, "job.id", LLONG_MIN));
- EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, "job.id", LLONG_MAX));
-}
-
-TEST(ProcCgroups, Empty) {
- SKIP_IF(!CgroupsAvailable());
-
- absl::flat_hash_map<std::string, CgroupsEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- // No cgroups mounted yet, we should have no entries.
- EXPECT_TRUE(entries.empty());
-}
-
-TEST(ProcCgroups, ProcCgroupsEntries) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
-
- Cgroup mem = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory"));
- absl::flat_hash_map<std::string, CgroupsEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- EXPECT_EQ(entries.size(), 1);
- ASSERT_TRUE(entries.contains("memory"));
- CgroupsEntry mem_e = entries["memory"];
- EXPECT_EQ(mem_e.subsys_name, "memory");
- EXPECT_GE(mem_e.hierarchy, 1);
- // Expect a single root cgroup.
- EXPECT_EQ(mem_e.num_cgroups, 1);
- // Cgroups are currently always enabled when mounted.
- EXPECT_TRUE(mem_e.enabled);
-
- // Add a second cgroup, and check for new entry.
-
- Cgroup cpu = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu"));
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- EXPECT_EQ(entries.size(), 2);
- EXPECT_TRUE(entries.contains("memory")); // Still have memory entry.
- ASSERT_TRUE(entries.contains("cpu"));
- CgroupsEntry cpu_e = entries["cpu"];
- EXPECT_EQ(cpu_e.subsys_name, "cpu");
- EXPECT_GE(cpu_e.hierarchy, 1);
- EXPECT_EQ(cpu_e.num_cgroups, 1);
- EXPECT_TRUE(cpu_e.enabled);
-
- // Separate hierarchies, since controllers were mounted separately.
- EXPECT_NE(mem_e.hierarchy, cpu_e.hierarchy);
-}
-
-TEST(ProcCgroups, UnmountRemovesEntries) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup cg = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu,memory"));
- absl::flat_hash_map<std::string, CgroupsEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- EXPECT_EQ(entries.size(), 2);
-
- ASSERT_NO_ERRNO(m.Unmount(cg));
-
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- EXPECT_TRUE(entries.empty());
-}
-
-TEST(ProcPIDCgroup, Empty) {
- SKIP_IF(!CgroupsAvailable());
-
- absl::flat_hash_map<std::string, PIDCgroupEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
- EXPECT_TRUE(entries.empty());
-}
-
-TEST(ProcPIDCgroup, Entries) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory"));
-
- absl::flat_hash_map<std::string, PIDCgroupEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
- EXPECT_EQ(entries.size(), 1);
- PIDCgroupEntry mem_e = entries["memory"];
- EXPECT_GE(mem_e.hierarchy, 1);
- EXPECT_EQ(mem_e.controllers, "memory");
- EXPECT_EQ(mem_e.path, "/");
-
- Cgroup c1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu"));
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
- EXPECT_EQ(entries.size(), 2);
- EXPECT_TRUE(entries.contains("memory")); // Still have memory entry.
- PIDCgroupEntry cpu_e = entries["cpu"];
- EXPECT_GE(cpu_e.hierarchy, 1);
- EXPECT_EQ(cpu_e.controllers, "cpu");
- EXPECT_EQ(cpu_e.path, "/");
-
- // Separate hierarchies, since controllers were mounted separately.
- EXPECT_NE(mem_e.hierarchy, cpu_e.hierarchy);
-}
-
-TEST(ProcPIDCgroup, UnmountRemovesEntries) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup all = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(""));
-
- absl::flat_hash_map<std::string, PIDCgroupEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
- EXPECT_GT(entries.size(), 0);
-
- ASSERT_NO_ERRNO(m.Unmount(all));
-
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
- EXPECT_TRUE(entries.empty());
-}
-
-TEST(ProcCgroup, PIDCgroupMatchesCgroups) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory"));
- Cgroup c1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpu"));
-
- absl::flat_hash_map<std::string, CgroupsEntry> cgroups_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
- absl::flat_hash_map<std::string, PIDCgroupEntry> pid_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
-
- CgroupsEntry cgroup_mem = cgroups_entries["memory"];
- PIDCgroupEntry pid_mem = pid_entries["memory"];
-
- EXPECT_EQ(cgroup_mem.hierarchy, pid_mem.hierarchy);
-
- CgroupsEntry cgroup_cpu = cgroups_entries["cpu"];
- PIDCgroupEntry pid_cpu = pid_entries["cpu"];
-
- EXPECT_EQ(cgroup_cpu.hierarchy, pid_cpu.hierarchy);
- EXPECT_NE(cgroup_mem.hierarchy, cgroup_cpu.hierarchy);
- EXPECT_NE(pid_mem.hierarchy, pid_cpu.hierarchy);
-}
-
-TEST(ProcCgroup, MultiControllerHierarchy) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory,cpu"));
-
- absl::flat_hash_map<std::string, CgroupsEntry> cgroups_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcCgroupsEntries());
-
- CgroupsEntry mem_e = cgroups_entries["memory"];
- CgroupsEntry cpu_e = cgroups_entries["cpu"];
-
- // Both controllers should have the same hierarchy ID.
- EXPECT_EQ(mem_e.hierarchy, cpu_e.hierarchy);
-
- absl::flat_hash_map<std::string, PIDCgroupEntry> pid_entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcPIDCgroupEntries(getpid()));
-
- // Expecting an entry listing both controllers, that matches the previous
- // hierarchy ID. Note that the controllers are listed in alphabetical order.
- PIDCgroupEntry pid_e = pid_entries["cpu,memory"];
- EXPECT_EQ(pid_e.hierarchy, mem_e.hierarchy);
-}
-
-TEST(ProcCgroup, ProcfsReportsCgroupfsMountOptions) {
- SKIP_IF(!CgroupsAvailable());
-
- Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));
- // Hierarchy with multiple controllers.
- Cgroup c1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("memory,cpu"));
- // Hierarchy with a single controller.
- Cgroup c2 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs("cpuacct"));
-
- const std::vector<ProcMountsEntry> mounts =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());
-
- for (auto const& e : mounts) {
- if (e.mount_point == c1.Path()) {
- auto mopts = ParseMountOptions(e.mount_opts);
- EXPECT_THAT(mopts, Contains(Key("memory")));
- EXPECT_THAT(mopts, Contains(Key("cpu")));
- EXPECT_THAT(mopts, Not(Contains(Key("cpuacct"))));
- }
-
- if (e.mount_point == c2.Path()) {
- auto mopts = ParseMountOptions(e.mount_opts);
- EXPECT_THAT(mopts, Contains(Key("cpuacct")));
- EXPECT_THAT(mopts, Not(Contains(Key("cpu"))));
- EXPECT_THAT(mopts, Not(Contains(Key("memory"))));
- }
- }
-
- const std::vector<ProcMountInfoEntry> mountinfo =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());
-
- for (auto const& e : mountinfo) {
- if (e.mount_point == c1.Path()) {
- auto mopts = ParseMountOptions(e.super_opts);
- EXPECT_THAT(mopts, Contains(Key("memory")));
- EXPECT_THAT(mopts, Contains(Key("cpu")));
- EXPECT_THAT(mopts, Not(Contains(Key("cpuacct"))));
- }
-
- if (e.mount_point == c2.Path()) {
- auto mopts = ParseMountOptions(e.super_opts);
- EXPECT_THAT(mopts, Contains(Key("cpuacct")));
- EXPECT_THAT(mopts, Not(Contains(Key("cpu"))));
- EXPECT_THAT(mopts, Not(Contains(Key("memory"))));
- }
- }
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/chdir.cc b/test/syscalls/linux/chdir.cc
deleted file mode 100644
index 3c64b9eab..000000000
--- a/test/syscalls/linux/chdir.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/limits.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ChdirTest, Success) {
- auto old_dir = GetAbsoluteTestTmpdir();
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(chdir(temp_dir.path().c_str()), SyscallSucceeds());
- // Temp path destructor deletes the newly created tmp dir and Sentry rejects
- // saving when its current dir is still pointing to the path. Switch to a
- // permanent path here.
- EXPECT_THAT(chdir(old_dir.c_str()), SyscallSucceeds());
-}
-
-TEST(ChdirTest, PermissionDenied) {
- // Drop capabilities that allow us to override directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0666 /* mode */));
- EXPECT_THAT(chdir(temp_dir.path().c_str()), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChdirTest, NotDir) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- EXPECT_THAT(chdir(temp_file.path().c_str()), SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(ChdirTest, NotExist) {
- EXPECT_THAT(chdir("/foo/bar"), SyscallFailsWithErrno(ENOENT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/chmod.cc b/test/syscalls/linux/chmod.cc
deleted file mode 100644
index dd82c5fb1..000000000
--- a/test/syscalls/linux/chmod.cc
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ChmodTest, ChmodFileSucceeds) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- ASSERT_THAT(chmod(file.path().c_str(), 0466), SyscallSucceeds());
- EXPECT_THAT(open(file.path().c_str(), O_RDWR), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, ChmodDirSucceeds) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string fileInDir = NewTempAbsPathInDir(dir.path());
-
- ASSERT_THAT(chmod(dir.path().c_str(), 0466), SyscallSucceeds());
- EXPECT_THAT(open(fileInDir.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodFileSucceeds) {
- // Drop capabilities that allow us to file directory permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666));
- int fd;
- ASSERT_THAT(fd = open(file.path().c_str(), O_RDWR), SyscallSucceeds());
-
- {
- const DisableSave ds; // File permissions are reduced.
- ASSERT_THAT(fchmod(fd, 0444), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- }
-
- EXPECT_THAT(open(file.path().c_str(), O_RDWR), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodDirSucceeds) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- int fd;
- ASSERT_THAT(fd = open(dir.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
-
- {
- const DisableSave ds; // File permissions are reduced.
- ASSERT_THAT(fchmod(fd, 0), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- }
-
- EXPECT_THAT(open(dir.path().c_str(), O_RDONLY),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodBadF) {
- ASSERT_THAT(fchmod(-1, 0444), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChmodTest, FchmodatBadF) {
- ASSERT_THAT(fchmodat(-1, "foo", 0444, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChmodTest, FchmodFileWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- ASSERT_THAT(fchmod(fd.get(), 0444), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChmodTest, FchmodDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY | O_PATH));
-
- ASSERT_THAT(fchmod(fd.get(), 0444), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChmodTest, FchmodatWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- const auto parent_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(GetAbsoluteTestTmpdir().c_str(), O_PATH | O_DIRECTORY));
-
- ASSERT_THAT(
- fchmodat(parent_fd.get(), std::string(Basename(temp_file.path())).c_str(),
- 0444, 0),
- SyscallSucceeds());
-
- EXPECT_THAT(open(temp_file.path().c_str(), O_RDWR),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodatNotDir) {
- ASSERT_THAT(fchmodat(-1, "", 0444, 0), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(ChmodTest, FchmodatFileAbsolutePath) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- ASSERT_THAT(fchmodat(-1, file.path().c_str(), 0444, 0), SyscallSucceeds());
- EXPECT_THAT(open(file.path().c_str(), O_RDWR), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodatDirAbsolutePath) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- int fd;
- ASSERT_THAT(fd = open(dir.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- ASSERT_THAT(fchmodat(-1, dir.path().c_str(), 0, 0), SyscallSucceeds());
- EXPECT_THAT(open(dir.path().c_str(), O_RDONLY),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodatFile) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- int parent_fd;
- ASSERT_THAT(
- parent_fd = open(GetAbsoluteTestTmpdir().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
-
- ASSERT_THAT(
- fchmodat(parent_fd, std::string(Basename(temp_file.path())).c_str(), 0444,
- 0),
- SyscallSucceeds());
- EXPECT_THAT(close(parent_fd), SyscallSucceeds());
-
- EXPECT_THAT(open(temp_file.path().c_str(), O_RDWR),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodatDir) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- int parent_fd;
- ASSERT_THAT(
- parent_fd = open(GetAbsoluteTestTmpdir().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
-
- int fd;
- ASSERT_THAT(fd = open(dir.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- ASSERT_THAT(
- fchmodat(parent_fd, std::string(Basename(dir.path())).c_str(), 0, 0),
- SyscallSucceeds());
- EXPECT_THAT(close(parent_fd), SyscallSucceeds());
-
- EXPECT_THAT(open(dir.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, ChmodDowngradeWritability) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666));
-
- int fd;
- ASSERT_THAT(fd = open(file.path().c_str(), O_RDWR), SyscallSucceeds());
-
- const DisableSave ds; // Permissions are dropped.
- ASSERT_THAT(chmod(file.path().c_str(), 0444), SyscallSucceeds());
- EXPECT_THAT(write(fd, "hello", 5), SyscallSucceedsWithValue(5));
-
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(ChmodTest, ChmodFileToNoPermissionsSucceeds) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666));
-
- ASSERT_THAT(chmod(file.path().c_str(), 0), SyscallSucceeds());
-
- EXPECT_THAT(open(file.path().c_str(), O_RDONLY),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChmodTest, FchmodDowngradeWritability) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- int fd;
- ASSERT_THAT(fd = open(file.path().c_str(), O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
-
- const DisableSave ds; // Permissions are dropped.
- ASSERT_THAT(fchmod(fd, 0444), SyscallSucceeds());
- EXPECT_THAT(write(fd, "hello", 5), SyscallSucceedsWithValue(5));
-
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(ChmodTest, FchmodFileToNoPermissionsSucceeds) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666));
-
- int fd;
- ASSERT_THAT(fd = open(file.path().c_str(), O_RDWR), SyscallSucceeds());
-
- {
- const DisableSave ds; // Permissions are dropped.
- ASSERT_THAT(fchmod(fd, 0), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- }
-
- EXPECT_THAT(open(file.path().c_str(), O_RDONLY),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Verify that we can get a RW FD after chmod, even if a RO fd is left open.
-TEST(ChmodTest, ChmodWritableWithOpenFD) {
- // FIXME(b/72455313): broken on hostfs.
- if (IsRunningOnGvisor()) {
- return;
- }
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0444));
-
- FileDescriptor fd1 = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- ASSERT_THAT(fchmod(fd1.get(), 0644), SyscallSucceeds());
-
- // This FD is writable, even though fd1 has a read-only reference to the file.
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- // fd1 is not writable, but fd2 is.
- char c = 'a';
- EXPECT_THAT(WriteFd(fd1.get(), &c, 1), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(WriteFd(fd2.get(), &c, 1), SyscallSucceedsWithValue(1));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/chown.cc b/test/syscalls/linux/chown.cc
deleted file mode 100644
index b0c1b6f4a..000000000
--- a/test/syscalls/linux/chown.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <grp.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/synchronization/notification.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid1, 65534, "first scratch UID");
-ABSL_FLAG(int32_t, scratch_uid2, 65533, "second scratch UID");
-ABSL_FLAG(int32_t, scratch_gid, 65534, "first scratch GID");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ChownTest, FchownBadF) {
- ASSERT_THAT(fchown(-1, 0, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChownTest, FchownatBadF) {
- ASSERT_THAT(fchownat(-1, "fff", 0, 0, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChownTest, FchownFileWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- ASSERT_THAT(fchown(fd.get(), geteuid(), getegid()),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChownTest, FchownDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY | O_PATH));
-
- ASSERT_THAT(fchown(fd.get(), geteuid(), getegid()),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ChownTest, FchownatWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- const auto dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY | O_PATH));
- ASSERT_THAT(
- fchownat(dirfd.get(), file.path().c_str(), geteuid(), getegid(), 0),
- SyscallSucceeds());
-}
-
-TEST(ChownTest, FchownatEmptyPath) {
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY | O_RDONLY));
- ASSERT_THAT(fchownat(fd.get(), "", 0, 0, 0), SyscallFailsWithErrno(ENOENT));
-}
-
-using Chown =
- std::function<PosixError(const std::string&, uid_t owner, gid_t group)>;
-
-class ChownParamTest : public ::testing::TestWithParam<Chown> {};
-
-TEST_P(ChownParamTest, ChownFileSucceeds) {
- AutoCapability cap(CAP_CHOWN, false);
-
- const auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // At least *try* setting to a group other than the EGID.
- gid_t gid;
- EXPECT_THAT(gid = getegid(), SyscallSucceeds());
- int num_groups;
- EXPECT_THAT(num_groups = getgroups(0, nullptr), SyscallSucceeds());
- if (num_groups > 0) {
- std::vector<gid_t> list(num_groups);
- EXPECT_THAT(getgroups(list.size(), list.data()), SyscallSucceeds());
- // Scan the list of groups for a valid gid. Note that if a group is not
- // defined in this local user namespace, then we will see 65534, and the
- // group will not chown below as expected. So only change if we find a
- // valid group in this list.
- for (const gid_t other_gid : list) {
- if (other_gid != 65534) {
- gid = other_gid;
- break;
- }
- }
- }
-
- EXPECT_NO_ERRNO(GetParam()(file.path(), geteuid(), gid));
-
- struct stat s = {};
- ASSERT_THAT(stat(file.path().c_str(), &s), SyscallSucceeds());
- EXPECT_EQ(s.st_uid, geteuid());
- EXPECT_EQ(s.st_gid, gid);
-}
-
-TEST_P(ChownParamTest, ChownFilePermissionDenied) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- const auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0777));
- EXPECT_THAT(chmod(GetAbsoluteTestTmpdir().c_str(), 0777), SyscallSucceeds());
-
- // Drop privileges and change IDs only in child thread, or else this parent
- // thread won't be able to open some log files after the test ends.
- ScopedThread([&] {
- // Drop privileges.
- AutoCapability cap(CAP_CHOWN, false);
-
- // Change EUID and EGID.
- //
- // See note about POSIX below.
- EXPECT_THAT(
- syscall(SYS_setresgid, -1, absl::GetFlag(FLAGS_scratch_gid), -1),
- SyscallSucceeds());
- EXPECT_THAT(
- syscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid1), -1),
- SyscallSucceeds());
-
- EXPECT_THAT(GetParam()(file.path(), geteuid(), getegid()),
- PosixErrorIs(EPERM, ::testing::ContainsRegex("chown")));
- });
-}
-
-TEST_P(ChownParamTest, ChownFileSucceedsAsRoot) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_CHOWN))));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_SETUID))));
-
- const std::string filename = NewTempAbsPath();
- EXPECT_THAT(chmod(GetAbsoluteTestTmpdir().c_str(), 0777), SyscallSucceeds());
-
- absl::Notification fileCreated, fileChowned;
- // Change UID only in child thread, or else this parent thread won't be able
- // to open some log files after the test ends.
- ScopedThread t([&] {
- // POSIX requires that all threads in a process share the same UIDs, so
- // the NPTL setresuid wrappers use signals to make all threads execute the
- // setresuid syscall. However, we want this thread to have its own set of
- // credentials different from the parent process, so we use the raw
- // syscall.
- EXPECT_THAT(
- syscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid2), -1),
- SyscallSucceeds());
-
- // Create file and immediately close it.
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_CREAT | O_RDWR, 0644));
- fd.reset(); // Close the fd.
-
- fileCreated.Notify();
- fileChowned.WaitForNotification();
-
- EXPECT_THAT(open(filename.c_str(), O_RDWR), SyscallFailsWithErrno(EACCES));
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_RDONLY));
- });
-
- fileCreated.WaitForNotification();
-
- // Set file's owners to someone different.
- EXPECT_NO_ERRNO(GetParam()(filename, absl::GetFlag(FLAGS_scratch_uid1),
- absl::GetFlag(FLAGS_scratch_gid)));
-
- struct stat s;
- EXPECT_THAT(stat(filename.c_str(), &s), SyscallSucceeds());
- EXPECT_EQ(s.st_uid, absl::GetFlag(FLAGS_scratch_uid1));
- EXPECT_EQ(s.st_gid, absl::GetFlag(FLAGS_scratch_gid));
-
- fileChowned.Notify();
-}
-
-PosixError errorFromReturn(const std::string& name, int ret) {
- if (ret == -1) {
- return PosixError(errno, absl::StrCat(name, " failed"));
- }
- return NoError();
-}
-
-INSTANTIATE_TEST_SUITE_P(
- ChownKinds, ChownParamTest,
- ::testing::Values(
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- int rc = chown(path.c_str(), owner, group);
- MaybeSave();
- return errorFromReturn("chown", rc);
- },
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- int rc = lchown(path.c_str(), owner, group);
- MaybeSave();
- return errorFromReturn("lchown", rc);
- },
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(path, O_RDWR));
- int rc = fchown(fd.get(), owner, group);
- MaybeSave();
- return errorFromReturn("fchown", rc);
- },
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(path, O_RDWR));
- int rc = fchownat(fd.get(), "", owner, group, AT_EMPTY_PATH);
- MaybeSave();
- return errorFromReturn("fchownat-fd", rc);
- },
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- ASSIGN_OR_RETURN_ERRNO(auto dirfd, Open(std::string(Dirname(path)),
- O_DIRECTORY | O_RDONLY));
- int rc = fchownat(dirfd.get(), std::string(Basename(path)).c_str(),
- owner, group, 0);
- MaybeSave();
- return errorFromReturn("fchownat-dirfd", rc);
- },
- [](const std::string& path, uid_t owner, gid_t group) -> PosixError {
- ASSIGN_OR_RETURN_ERRNO(auto dirfd, Open(std::string(Dirname(path)),
- O_DIRECTORY | O_PATH));
- int rc = fchownat(dirfd.get(), std::string(Basename(path)).c_str(),
- owner, group, 0);
- MaybeSave();
- return errorFromReturn("fchownat-opathdirfd", rc);
- }));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/chroot.cc b/test/syscalls/linux/chroot.cc
deleted file mode 100644
index 7e4626f03..000000000
--- a/test/syscalls/linux/chroot.cc
+++ /dev/null
@@ -1,441 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stddef.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/cleanup/cleanup.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/mount_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::HasSubstr;
-using ::testing::Not;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Async-signal-safe conversion from integer to string, appending the string
-// (including a terminating NUL) to buf, which is a buffer of size len bytes.
-// Returns the number of bytes written, or 0 if the buffer is too small.
-//
-// Preconditions: 2 <= radix <= 16.
-template <typename T>
-size_t SafeItoa(T val, char* buf, size_t len, int radix) {
- size_t n = 0;
-#define _WRITE_OR_FAIL(c) \
- do { \
- if (len == 0) { \
- return 0; \
- } \
- buf[n] = (c); \
- n++; \
- len--; \
- } while (false)
- if (val == 0) {
- _WRITE_OR_FAIL('0');
- } else {
- // Write digits in reverse order, then reverse them at the end.
- bool neg = val < 0;
- while (val != 0) {
- // C/C++ define modulo such that the result is negative if exactly one of
- // the dividend or divisor is negative, so this handles both positive and
- // negative values.
- char c = "fedcba9876543210123456789abcdef"[val % radix + 15];
- _WRITE_OR_FAIL(c);
- val /= 10;
- }
- if (neg) {
- _WRITE_OR_FAIL('-');
- }
- std::reverse(buf, buf + n);
- }
- _WRITE_OR_FAIL('\0');
- return n;
-#undef _WRITE_OR_FAIL
-}
-
-TEST(ChrootTest, Success) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
- auto temp_dir = TempPath::CreateDir().ValueOrDie();
- const std::string temp_dir_path = temp_dir.path();
-
- const auto rest = [&] { TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str())); };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(ChrootTest, PermissionDenied) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- // CAP_DAC_READ_SEARCH and CAP_DAC_OVERRIDE may override Execute permission
- // on directories.
- AutoCapability cap_search(CAP_DAC_READ_SEARCH, false);
- AutoCapability cap_override(CAP_DAC_OVERRIDE, false);
-
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0666 /* mode */));
- EXPECT_THAT(chroot(temp_dir.path().c_str()), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ChrootTest, NotDir) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- EXPECT_THAT(chroot(temp_file.path().c_str()), SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(ChrootTest, NotExist) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- EXPECT_THAT(chroot("/foo/bar"), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(ChrootTest, WithoutCapability) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETPCAP)));
-
- // Unset CAP_SYS_CHROOT.
- AutoCapability cap(CAP_SYS_CHROOT, false);
-
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(chroot(temp_dir.path().c_str()), SyscallFailsWithErrno(EPERM));
-}
-
-TEST(ChrootTest, CreatesNewRoot) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- // Grab the initial cwd.
- char initial_cwd[1024];
- ASSERT_THAT(syscall(__NR_getcwd, initial_cwd, sizeof(initial_cwd)),
- SyscallSucceeds());
-
- auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string new_root_path = new_root.path();
- auto file_in_new_root =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(new_root.path()));
- const std::string file_in_new_root_path = file_in_new_root.path();
-
- const auto rest = [&] {
- // chroot into new_root.
- TEST_CHECK_SUCCESS(chroot(new_root_path.c_str()));
-
- // getcwd should return "(unreachable)" followed by the initial_cwd.
- char buf[1024];
- TEST_CHECK_SUCCESS(syscall(__NR_getcwd, buf, sizeof(buf)));
- constexpr char kUnreachablePrefix[] = "(unreachable)";
- TEST_CHECK(
- strncmp(buf, kUnreachablePrefix, sizeof(kUnreachablePrefix) - 1) == 0);
- TEST_CHECK(strcmp(buf + sizeof(kUnreachablePrefix) - 1, initial_cwd) == 0);
-
- // Should not be able to stat file by its full path.
- struct stat statbuf;
- TEST_CHECK_ERRNO(stat(file_in_new_root_path.c_str(), &statbuf), ENOENT);
-
- // Should be able to stat file at new rooted path.
- buf[0] = '/';
- absl::string_view basename = Basename(file_in_new_root_path);
- TEST_CHECK(basename.length() < (sizeof(buf) - 2));
- memcpy(buf + 1, basename.data(), basename.length());
- buf[basename.length() + 1] = '\0';
- TEST_CHECK_SUCCESS(stat(buf, &statbuf));
-
- // Should be able to stat cwd at '.' even though it's outside root.
- TEST_CHECK_SUCCESS(stat(".", &statbuf));
-
- // chdir into new root.
- TEST_CHECK_SUCCESS(chdir("/"));
-
- // getcwd should return "/".
- TEST_CHECK_SUCCESS(syscall(__NR_getcwd, buf, sizeof(buf)));
- TEST_CHECK_SUCCESS(strcmp(buf, "/") == 0);
-
- // Statting '.', '..', '/', and '/..' all return the same dev and inode.
- struct stat statbuf_dot;
- TEST_CHECK_SUCCESS(stat(".", &statbuf_dot));
- struct stat statbuf_dotdot;
- TEST_CHECK_SUCCESS(stat("..", &statbuf_dotdot));
- TEST_CHECK(statbuf_dot.st_dev == statbuf_dotdot.st_dev);
- TEST_CHECK(statbuf_dot.st_ino == statbuf_dotdot.st_ino);
- struct stat statbuf_slash;
- TEST_CHECK_SUCCESS(stat("/", &statbuf_slash));
- TEST_CHECK(statbuf_dot.st_dev == statbuf_slash.st_dev);
- TEST_CHECK(statbuf_dot.st_ino == statbuf_slash.st_ino);
- struct stat statbuf_slashdotdot;
- TEST_CHECK_SUCCESS(stat("/..", &statbuf_slashdotdot));
- TEST_CHECK(statbuf_dot.st_dev == statbuf_slashdotdot.st_dev);
- TEST_CHECK(statbuf_dot.st_ino == statbuf_slashdotdot.st_ino);
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(ChrootTest, DotDotFromOpenFD) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- auto dir_outside_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(dir_outside_root.path(), O_RDONLY | O_DIRECTORY));
- auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string new_root_path = new_root.path();
-
- const auto rest = [&] {
- // chroot into new_root.
- TEST_CHECK_SUCCESS(chroot(new_root_path.c_str()));
-
- // openat on fd with path .. will succeed.
- int other_fd;
- TEST_CHECK_SUCCESS(other_fd = openat(fd.get(), "..", O_RDONLY));
- TEST_CHECK_SUCCESS(close(other_fd));
-
- // getdents on fd should not error.
- char buf[1024];
- TEST_CHECK_SUCCESS(syscall(SYS_getdents64, fd.get(), buf, sizeof(buf)));
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// Test that link resolution in a chroot can escape the root by following an
-// open proc fd. Regression test for b/32316719.
-TEST(ChrootTest, ProcFdLinkResolutionInChroot) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- const TempPath file_outside_chroot =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string file_outside_chroot_path = file_outside_chroot.path();
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file_outside_chroot.path(), O_RDONLY));
-
- const FileDescriptor proc_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc", O_DIRECTORY | O_RDONLY | O_CLOEXEC));
-
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string temp_dir_path = temp_dir.path();
-
- const auto rest = [&] {
- TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));
-
- // Opening relative to an already open fd to a node outside the chroot
- // works.
- const FileDescriptor proc_self_fd = TEST_CHECK_NO_ERRNO_AND_VALUE(
- OpenAt(proc_fd.get(), "self/fd", O_DIRECTORY | O_RDONLY | O_CLOEXEC));
-
- // Proc fd symlinks can escape the chroot if the fd the symlink refers to
- // refers to an object outside the chroot.
- char fd_buf[11];
- TEST_CHECK(SafeItoa(fd.get(), fd_buf, sizeof(fd_buf), 10));
- struct stat s = {};
- TEST_CHECK_SUCCESS(fstatat(proc_self_fd.get(), fd_buf, &s, 0));
-
- // Try to stat the stdin fd. Internally, this is handled differently from a
- // proc fd entry pointing to a file, since stdin is backed by a host fd, and
- // isn't a walkable path on the filesystem inside the sandbox.
- TEST_CHECK_SUCCESS(fstatat(proc_self_fd.get(), "0", &s, 0));
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// This test will verify that when you hold a fd to proc before entering
-// a chroot that any files inside the chroot will appear rooted to the
-// base chroot when examining /proc/self/fd/{num}.
-TEST(ChrootTest, ProcMemSelfFdsNoEscapeProcOpen) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- // Get a FD to /proc before we enter the chroot.
- const FileDescriptor proc =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc", O_RDONLY));
-
- const auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string temp_dir_path = temp_dir.path();
-
- const auto rest = [&] {
- // Enter the chroot directory.
- TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));
-
- // Open a file inside the chroot at /foo.
- const FileDescriptor foo =
- TEST_CHECK_NO_ERRNO_AND_VALUE(Open("/foo", O_CREAT | O_RDONLY, 0644));
-
- // Examine /proc/self/fd/{foo_fd} to see if it exposes the fact that we're
- // inside a chroot, the path should be /foo and NOT {chroot_dir}/foo.
- constexpr char kSelfFdRelpath[] = "self/fd/";
- char path_buf[20];
- strcpy(path_buf, kSelfFdRelpath); // NOLINT: need async-signal-safety
- TEST_CHECK(SafeItoa(foo.get(), path_buf + sizeof(kSelfFdRelpath) - 1,
- sizeof(path_buf) - (sizeof(kSelfFdRelpath) - 1), 10));
- char buf[1024] = {};
- size_t bytes_read = 0;
- TEST_CHECK_SUCCESS(
- bytes_read = readlinkat(proc.get(), path_buf, buf, sizeof(buf) - 1));
-
- // The link should resolve to something.
- TEST_CHECK(bytes_read > 0);
-
- // Assert that the link doesn't contain the chroot path and is only /foo.
- TEST_CHECK(strcmp(buf, "/foo") == 0);
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// This test will verify that a file inside a chroot when mmapped will not
-// expose the full file path via /proc/self/maps and instead honor the chroot.
-TEST(ChrootTest, ProcMemSelfMapsNoEscapeProcOpen) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- // Get a FD to /proc before we enter the chroot.
- const FileDescriptor proc =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc", O_RDONLY));
-
- const auto temp_dir = TEST_CHECK_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string temp_dir_path = temp_dir.path();
-
- const auto rest = [&] {
- // Enter the chroot directory.
- TEST_CHECK_SUCCESS(chroot(temp_dir_path.c_str()));
-
- // Open a file inside the chroot at /foo.
- const FileDescriptor foo =
- TEST_CHECK_NO_ERRNO_AND_VALUE(Open("/foo", O_CREAT | O_RDONLY, 0644));
-
- // Mmap the newly created file.
- void* foo_map = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, foo.get(), 0);
- TEST_CHECK_SUCCESS(reinterpret_cast<int64_t>(foo_map));
-
- // Always unmap. Since this function is called between fork() and execve(),
- // we can't use gvisor::testing::Cleanup, which uses std::function
- // and thus may heap-allocate (which is async-signal-unsafe); instead, use
- // absl::Cleanup, which is templated on the callback type.
- auto cleanup_map = absl::MakeCleanup(
- [&] { TEST_CHECK_SUCCESS(munmap(foo_map, kPageSize)); });
-
- // Examine /proc/self/maps to be sure that /foo doesn't appear to be
- // mapped with the full chroot path.
- const FileDescriptor maps = TEST_CHECK_NO_ERRNO_AND_VALUE(
- OpenAt(proc.get(), "self/maps", O_RDONLY));
-
- size_t bytes_read = 0;
- char buf[8 * 1024] = {};
- TEST_CHECK_SUCCESS(bytes_read = ReadFd(maps.get(), buf, sizeof(buf)));
-
- // The maps file should have something.
- TEST_CHECK(bytes_read > 0);
-
- // Finally we want to make sure the maps don't contain the chroot path
- TEST_CHECK(
- !absl::StrContains(absl::string_view(buf, bytes_read), temp_dir_path));
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// Test that mounts outside the chroot will not appear in /proc/self/mounts or
-// /proc/self/mountinfo.
-TEST(ChrootTest, ProcMountsMountinfoNoEscape) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));
-
- // Create nested tmpfs mounts.
- const auto outer_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string outer_dir_path = outer_dir.path();
- const auto outer_mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("none", outer_dir_path, "tmpfs", 0, "mode=0700", 0));
-
- const auto inner_dir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(outer_dir_path));
- const std::string inner_dir_path = inner_dir.path();
- const auto inner_mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("none", inner_dir_path, "tmpfs", 0, "mode=0700", 0));
- const std::string inner_dir_in_outer_chroot_path =
- absl::StrCat("/", Basename(inner_dir_path));
-
- // Filenames that will be checked for mounts, all relative to /proc dir.
- std::string paths[3] = {"mounts", "self/mounts", "self/mountinfo"};
-
- for (const std::string& path : paths) {
- // We should have both inner and outer mounts.
- const std::string contents =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents(JoinPath("/proc", path)));
- EXPECT_THAT(contents,
- AllOf(HasSubstr(outer_dir_path), HasSubstr(inner_dir_path)));
- // We better have at least two mounts: the mounts we created plus the
- // root.
- std::vector<absl::string_view> submounts =
- absl::StrSplit(contents, '\n', absl::SkipWhitespace());
- ASSERT_GT(submounts.size(), 2);
- }
-
- // Get a FD to /proc before we enter the chroot.
- const FileDescriptor proc =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc", O_RDONLY));
-
- const auto rest = [&] {
- // Chroot to outer mount.
- TEST_CHECK_SUCCESS(chroot(outer_dir_path.c_str()));
-
- char buf[8 * 1024];
- for (const std::string& path : paths) {
- const FileDescriptor proc_file =
- TEST_CHECK_NO_ERRNO_AND_VALUE(OpenAt(proc.get(), path, O_RDONLY));
-
- // Only two mounts visible from this chroot: the inner and outer. Both
- // paths should be relative to the new chroot.
- ssize_t n = ReadFd(proc_file.get(), buf, sizeof(buf));
- TEST_PCHECK(n >= 0);
- buf[n] = '\0';
- TEST_CHECK(absl::StrContains(buf, Basename(inner_dir_path)));
- TEST_CHECK(!absl::StrContains(buf, outer_dir_path));
- TEST_CHECK(!absl::StrContains(buf, inner_dir_path));
- TEST_CHECK(std::count(buf, buf + n, '\n') == 2);
- }
-
- // Chroot to inner mount. We must use an absolute path accessible to our
- // chroot.
- TEST_CHECK_SUCCESS(chroot(inner_dir_in_outer_chroot_path.c_str()));
-
- for (const std::string& path : paths) {
- const FileDescriptor proc_file =
- TEST_CHECK_NO_ERRNO_AND_VALUE(OpenAt(proc.get(), path, O_RDONLY));
-
- // Only the inner mount visible from this chroot.
- ssize_t n = ReadFd(proc_file.get(), buf, sizeof(buf));
- TEST_PCHECK(n >= 0);
- buf[n] = '\0';
- TEST_CHECK(std::count(buf, buf + n, '\n') == 1);
- }
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/clock_getres.cc b/test/syscalls/linux/clock_getres.cc
deleted file mode 100644
index c408b936c..000000000
--- a/test/syscalls/linux/clock_getres.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/time.h>
-#include <time.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// clock_getres works regardless of whether or not a timespec is passed.
-TEST(ClockGetres, Timespec) {
- struct timespec ts;
- EXPECT_THAT(clock_getres(CLOCK_MONOTONIC, &ts), SyscallSucceeds());
- EXPECT_THAT(clock_getres(CLOCK_MONOTONIC, nullptr), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/clock_gettime.cc b/test/syscalls/linux/clock_gettime.cc
deleted file mode 100644
index 7f6015049..000000000
--- a/test/syscalls/linux/clock_gettime.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <pthread.h>
-#include <sys/time.h>
-
-#include <cerrno>
-#include <cstdint>
-#include <ctime>
-#include <list>
-#include <memory>
-#include <string>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-int64_t clock_gettime_nsecs(clockid_t id) {
- struct timespec ts;
- TEST_PCHECK(clock_gettime(id, &ts) == 0);
- return (ts.tv_sec * 1000000000 + ts.tv_nsec);
-}
-
-// Spin on the CPU for at least ns nanoseconds, based on
-// CLOCK_THREAD_CPUTIME_ID.
-void spin_ns(int64_t ns) {
- int64_t start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID);
- int64_t end = start + ns;
-
- do {
- constexpr int kLoopCount = 1000000; // large and arbitrary
- // volatile to prevent the compiler from skipping this loop.
- for (volatile int i = 0; i < kLoopCount; i++) {
- }
- } while (clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID) < end);
-}
-
-// Test that CLOCK_PROCESS_CPUTIME_ID is a superset of CLOCK_THREAD_CPUTIME_ID.
-TEST(ClockGettime, CputimeId) {
- constexpr int kNumThreads = 13; // arbitrary
-
- absl::Duration spin_time = absl::Seconds(1);
-
- // Start off the worker threads and compute the aggregate time spent by
- // the workers. Note that we test CLOCK_PROCESS_CPUTIME_ID by having the
- // workers execute in parallel and verifying that CLOCK_PROCESS_CPUTIME_ID
- // accumulates the runtime of all threads.
- int64_t start = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID);
-
- // Create a kNumThreads threads.
- std::list<ScopedThread> threads;
- for (int i = 0; i < kNumThreads; i++) {
- threads.emplace_back(
- [spin_time] { spin_ns(absl::ToInt64Nanoseconds(spin_time)); });
- }
- for (auto& t : threads) {
- t.Join();
- }
-
- int64_t end = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID);
-
- // The aggregate time spent in the worker threads must be at least
- // 'kNumThreads' times the time each thread spun.
- ASSERT_GE(end - start, kNumThreads * absl::ToInt64Nanoseconds(spin_time));
-}
-
-TEST(ClockGettime, JavaThreadTime) {
- clockid_t clockid;
- ASSERT_EQ(0, pthread_getcpuclockid(pthread_self(), &clockid));
- struct timespec tp;
- ASSERT_THAT(clock_getres(clockid, &tp), SyscallSucceeds());
- EXPECT_TRUE(tp.tv_sec > 0 || tp.tv_nsec > 0);
- // A thread cputime is updated each 10msec and there is no approximation
- // if a task is running.
- do {
- ASSERT_THAT(clock_gettime(clockid, &tp), SyscallSucceeds());
- } while (tp.tv_sec == 0 && tp.tv_nsec == 0);
- EXPECT_TRUE(tp.tv_sec > 0 || tp.tv_nsec > 0);
-}
-
-// There is not much to test here, since CLOCK_REALTIME may be discontiguous.
-TEST(ClockGettime, RealtimeWorks) {
- struct timespec tp;
- EXPECT_THAT(clock_gettime(CLOCK_REALTIME, &tp), SyscallSucceeds());
-}
-
-class MonotonicClockTest : public ::testing::TestWithParam<clockid_t> {};
-
-TEST_P(MonotonicClockTest, IsMonotonic) {
- auto end = absl::Now() + absl::Seconds(5);
-
- struct timespec tp;
- EXPECT_THAT(clock_gettime(GetParam(), &tp), SyscallSucceeds());
-
- auto prev = absl::TimeFromTimespec(tp);
- while (absl::Now() < end) {
- EXPECT_THAT(clock_gettime(GetParam(), &tp), SyscallSucceeds());
- auto now = absl::TimeFromTimespec(tp);
- EXPECT_GE(now, prev);
- prev = now;
- }
-}
-
-std::string PrintClockId(::testing::TestParamInfo<clockid_t> info) {
- switch (info.param) {
- case CLOCK_MONOTONIC:
- return "CLOCK_MONOTONIC";
- case CLOCK_MONOTONIC_COARSE:
- return "CLOCK_MONOTONIC_COARSE";
- case CLOCK_MONOTONIC_RAW:
- return "CLOCK_MONOTONIC_RAW";
- case CLOCK_BOOTTIME:
- // CLOCK_BOOTTIME is a monotonic clock.
- return "CLOCK_BOOTTIME";
- default:
- return absl::StrCat(info.param);
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(ClockGettime, MonotonicClockTest,
- ::testing::Values(CLOCK_MONOTONIC,
- CLOCK_MONOTONIC_COARSE,
- CLOCK_MONOTONIC_RAW, CLOCK_BOOTTIME),
- PrintClockId);
-
-TEST(ClockGettime, UnimplementedReturnsEINVAL) {
- SKIP_IF(!IsRunningOnGvisor());
-
- struct timespec tp;
- EXPECT_THAT(clock_gettime(CLOCK_REALTIME_ALARM, &tp),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(clock_gettime(CLOCK_BOOTTIME_ALARM, &tp),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(ClockGettime, InvalidClockIDReturnsEINVAL) {
- struct timespec tp;
- EXPECT_THAT(clock_gettime(-1, &tp), SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/clock_nanosleep.cc b/test/syscalls/linux/clock_nanosleep.cc
deleted file mode 100644
index b55cddc52..000000000
--- a/test/syscalls/linux/clock_nanosleep.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <time.h>
-
-#include <atomic>
-#include <utility>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// sys_clock_nanosleep is defined because the glibc clock_nanosleep returns
-// error numbers directly and does not set errno. This makes our Syscall
-// matchers look a little weird when expecting failure:
-// "SyscallSucceedsWithValue(ERRNO)".
-int sys_clock_nanosleep(clockid_t clkid, int flags,
- const struct timespec* request,
- struct timespec* remain) {
- return syscall(SYS_clock_nanosleep, clkid, flags, request, remain);
-}
-
-PosixErrorOr<absl::Time> GetTime(clockid_t clk) {
- struct timespec ts = {};
- const int rc = clock_gettime(clk, &ts);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "clock_gettime");
- }
- return absl::TimeFromTimespec(ts);
-}
-
-class WallClockNanosleepTest : public ::testing::TestWithParam<clockid_t> {};
-
-TEST_P(WallClockNanosleepTest, InvalidValues) {
- const struct timespec invalid[] = {
- {.tv_sec = -1, .tv_nsec = -1}, {.tv_sec = 0, .tv_nsec = INT32_MIN},
- {.tv_sec = 0, .tv_nsec = INT32_MAX}, {.tv_sec = 0, .tv_nsec = -1},
- {.tv_sec = -1, .tv_nsec = 0},
- };
-
- for (auto const ts : invalid) {
- EXPECT_THAT(sys_clock_nanosleep(GetParam(), 0, &ts, nullptr),
- SyscallFailsWithErrno(EINVAL));
- }
-}
-
-TEST_P(WallClockNanosleepTest, SleepOneSecond) {
- constexpr absl::Duration kSleepDuration = absl::Seconds(1);
- struct timespec duration = absl::ToTimespec(kSleepDuration);
-
- const absl::Time before = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
- EXPECT_THAT(
- RetryEINTR(sys_clock_nanosleep)(GetParam(), 0, &duration, &duration),
- SyscallSucceeds());
- const absl::Time after = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
-
- EXPECT_GE(after - before, kSleepDuration);
-}
-
-TEST_P(WallClockNanosleepTest, InterruptedNanosleep) {
- constexpr absl::Duration kSleepDuration = absl::Seconds(60);
- struct timespec duration = absl::ToTimespec(kSleepDuration);
-
- // Install no-op signal handler for SIGALRM.
- struct sigaction sa = {};
- sigfillset(&sa.sa_mask);
- sa.sa_handler = +[](int signo) {};
- const auto cleanup_sa =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Measure time since setting the alarm, since the alarm will interrupt the
- // sleep and hence determine how long we sleep.
- const absl::Time before = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
-
- // Set an alarm to go off while sleeping.
- struct itimerval timer = {};
- timer.it_value.tv_sec = 1;
- timer.it_value.tv_usec = 0;
- timer.it_interval.tv_sec = 1;
- timer.it_interval.tv_usec = 0;
- const auto cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_REAL, timer));
-
- EXPECT_THAT(sys_clock_nanosleep(GetParam(), 0, &duration, &duration),
- SyscallFailsWithErrno(EINTR));
- const absl::Time after = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
-
- // Remaining time updated.
- const absl::Duration remaining = absl::DurationFromTimespec(duration);
- EXPECT_GE(after - before + remaining, kSleepDuration);
-}
-
-// Remaining time is *not* updated if nanosleep completes uninterrupted.
-TEST_P(WallClockNanosleepTest, UninterruptedNanosleep) {
- constexpr absl::Duration kSleepDuration = absl::Milliseconds(10);
- const struct timespec duration = absl::ToTimespec(kSleepDuration);
-
- while (true) {
- constexpr int kRemainingMagic = 42;
- struct timespec remaining;
- remaining.tv_sec = kRemainingMagic;
- remaining.tv_nsec = kRemainingMagic;
-
- int ret = sys_clock_nanosleep(GetParam(), 0, &duration, &remaining);
- if (ret == EINTR) {
- // Retry from beginning. We want a single uninterrupted call.
- continue;
- }
-
- EXPECT_THAT(ret, SyscallSucceeds());
- EXPECT_EQ(remaining.tv_sec, kRemainingMagic);
- EXPECT_EQ(remaining.tv_nsec, kRemainingMagic);
- break;
- }
-}
-
-TEST_P(WallClockNanosleepTest, SleepUntil) {
- const absl::Time now = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
- const absl::Time until = now + absl::Seconds(2);
- const struct timespec ts = absl::ToTimespec(until);
-
- EXPECT_THAT(
- RetryEINTR(sys_clock_nanosleep)(GetParam(), TIMER_ABSTIME, &ts, nullptr),
- SyscallSucceeds());
- const absl::Time after = ASSERT_NO_ERRNO_AND_VALUE(GetTime(GetParam()));
-
- EXPECT_GE(after, until);
-}
-
-INSTANTIATE_TEST_SUITE_P(Sleepers, WallClockNanosleepTest,
- ::testing::Values(CLOCK_REALTIME, CLOCK_MONOTONIC));
-
-TEST(ClockNanosleepProcessTest, SleepFiveSeconds) {
- const absl::Duration kSleepDuration = absl::Seconds(5);
- struct timespec duration = absl::ToTimespec(kSleepDuration);
-
- // Ensure that CLOCK_PROCESS_CPUTIME_ID advances.
- std::atomic<bool> done(false);
- ScopedThread t([&] {
- while (!done.load()) {
- }
- });
- const auto cleanup_done = Cleanup([&] { done.store(true); });
-
- const absl::Time before =
- ASSERT_NO_ERRNO_AND_VALUE(GetTime(CLOCK_PROCESS_CPUTIME_ID));
- EXPECT_THAT(RetryEINTR(sys_clock_nanosleep)(CLOCK_PROCESS_CPUTIME_ID, 0,
- &duration, &duration),
- SyscallSucceeds());
- const absl::Time after =
- ASSERT_NO_ERRNO_AND_VALUE(GetTime(CLOCK_PROCESS_CPUTIME_ID));
- EXPECT_GE(after - before, kSleepDuration);
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/concurrency.cc b/test/syscalls/linux/concurrency.cc
deleted file mode 100644
index f2daf49ee..000000000
--- a/test/syscalls/linux/concurrency.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-
-#include <atomic>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
-#include "test/util/platform_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// Test that a thread that never yields to the OS does not prevent other threads
-// from running.
-TEST(ConcurrencyTest, SingleProcessMultithreaded) {
- std::atomic<int> a(0);
-
- ScopedThread t([&a]() {
- while (!a.load()) {
- }
- });
-
- absl::SleepFor(absl::Seconds(1));
-
- // We are still able to execute code in this thread. The other hasn't
- // permanently hung execution in both threads.
- a.store(1);
-}
-
-// Test that multiple threads in this process continue to execute in parallel,
-// even if an unrelated second process is spawned. Regression test for
-// b/32119508.
-TEST(ConcurrencyTest, MultiProcessMultithreaded) {
- // In PID 1, start TIDs 1 and 2, and put both to sleep.
- //
- // Start PID 3, which spins for 5 seconds, then exits.
- //
- // TIDs 1 and 2 wake and attempt to Activate, which cannot occur until PID 3
- // exits.
- //
- // Both TIDs 1 and 2 should be woken. If they are not both woken, the test
- // hangs.
- //
- // This is all fundamentally racy. If we are failing to wake all threads, the
- // expectation is that this test becomes flaky, rather than consistently
- // failing.
- //
- // If additional background threads fail to block, we may never schedule the
- // child, at which point this test effectively becomes
- // MultiProcessConcurrency. That's not expected to occur.
-
- std::atomic<int> a(0);
- ScopedThread t([&a]() {
- // Block so that PID 3 can execute and we can wait on its exit.
- absl::SleepFor(absl::Seconds(1));
- while (!a.load()) {
- }
- });
-
- pid_t child_pid = fork();
- if (child_pid == 0) {
- // Busy wait without making any blocking syscalls.
- auto end = absl::Now() + absl::Seconds(5);
- while (absl::Now() < end) {
- }
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- absl::SleepFor(absl::Seconds(1));
-
- // If only TID 1 is woken, thread.Join will hang.
- // If only TID 2 is woken, both will hang.
- a.store(1);
- t.Join();
-
- int status = 0;
- EXPECT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(WEXITSTATUS(status), 0);
-}
-
-// Test that multiple processes can execute concurrently, even if one process
-// never yields.
-TEST(ConcurrencyTest, MultiProcessConcurrency) {
- SKIP_IF(PlatformSupportMultiProcess() == PlatformSupport::NotSupported);
-
- pid_t child_pid = fork();
- if (child_pid == 0) {
- while (true) {
- int x = 0;
- benchmark::DoNotOptimize(x); // Don't optimize this loop away.
- }
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- absl::SleepFor(absl::Seconds(5));
-
- // We are still able to execute code in this process. The other hasn't
- // permanently hung execution in both processes.
- ASSERT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- int status = 0;
-
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- ASSERT_TRUE(WIFSIGNALED(status));
- ASSERT_EQ(WTERMSIG(status), SIGKILL);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/connect_external.cc b/test/syscalls/linux/connect_external.cc
deleted file mode 100644
index 1edb50e47..000000000
--- a/test/syscalls/linux/connect_external.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <string>
-#include <tuple>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-
-// This file contains tests specific to connecting to host UDS managed outside
-// the sandbox / test.
-//
-// A set of ultity sockets will be created externally in $TEST_UDS_TREE and
-// $TEST_UDS_ATTACH_TREE for these tests to interact with.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-struct ProtocolSocket {
- int protocol;
- std::string name;
-};
-
-// Parameter is (socket root dir, ProtocolSocket).
-using GoferStreamSeqpacketTest =
- ::testing::TestWithParam<std::tuple<std::string, ProtocolSocket>>;
-
-// Connect to a socket and verify that write/read work.
-//
-// An "echo" socket doesn't work for dgram sockets because our socket is
-// unnamed. The server thus has no way to reply to us.
-TEST_P(GoferStreamSeqpacketTest, Echo) {
- std::string env;
- ProtocolSocket proto;
- std::tie(env, proto) = GetParam();
-
- char* val = getenv(env.c_str());
- ASSERT_NE(val, nullptr);
- std::string root(val);
-
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, proto.protocol, 0));
-
- std::string socket_path = JoinPath(root, proto.name, "echo");
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- memcpy(addr.sun_path, socket_path.c_str(), socket_path.length());
-
- ASSERT_THAT(connect(sock.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- constexpr int kBufferSize = 64;
- char send_buffer[kBufferSize];
- memset(send_buffer, 'a', sizeof(send_buffer));
-
- ASSERT_THAT(WriteFd(sock.get(), send_buffer, sizeof(send_buffer)),
- SyscallSucceedsWithValue(sizeof(send_buffer)));
-
- char recv_buffer[kBufferSize];
- ASSERT_THAT(ReadFd(sock.get(), recv_buffer, sizeof(recv_buffer)),
- SyscallSucceedsWithValue(sizeof(recv_buffer)));
- ASSERT_EQ(0, memcmp(send_buffer, recv_buffer, sizeof(send_buffer)));
-}
-
-// It is not possible to connect to a bound but non-listening socket.
-TEST_P(GoferStreamSeqpacketTest, NonListening) {
- std::string env;
- ProtocolSocket proto;
- std::tie(env, proto) = GetParam();
-
- char* val = getenv(env.c_str());
- ASSERT_NE(val, nullptr);
- std::string root(val);
-
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, proto.protocol, 0));
-
- std::string socket_path = JoinPath(root, proto.name, "nonlistening");
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- memcpy(addr.sun_path, socket_path.c_str(), socket_path.length());
-
- ASSERT_THAT(connect(sock.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- StreamSeqpacket, GoferStreamSeqpacketTest,
- ::testing::Combine(
- // Test access via standard path and attach point.
- ::testing::Values("TEST_UDS_TREE", "TEST_UDS_ATTACH_TREE"),
- ::testing::Values(ProtocolSocket{SOCK_STREAM, "stream"},
- ProtocolSocket{SOCK_SEQPACKET, "seqpacket"})));
-
-// Parameter is socket root dir.
-using GoferDgramTest = ::testing::TestWithParam<std::string>;
-
-// Connect to a socket and verify that write works.
-//
-// An "echo" socket doesn't work for dgram sockets because our socket is
-// unnamed. The server thus has no way to reply to us.
-TEST_P(GoferDgramTest, Null) {
- std::string env = GetParam();
- char* val = getenv(env.c_str());
- ASSERT_NE(val, nullptr);
- std::string root(val);
-
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_DGRAM, 0));
-
- std::string socket_path = JoinPath(root, "dgram/null");
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- memcpy(addr.sun_path, socket_path.c_str(), socket_path.length());
-
- ASSERT_THAT(connect(sock.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- constexpr int kBufferSize = 64;
- char send_buffer[kBufferSize];
- memset(send_buffer, 'a', sizeof(send_buffer));
-
- ASSERT_THAT(WriteFd(sock.get(), send_buffer, sizeof(send_buffer)),
- SyscallSucceedsWithValue(sizeof(send_buffer)));
-}
-
-INSTANTIATE_TEST_SUITE_P(Dgram, GoferDgramTest,
- // Test access via standard path and attach point.
- ::testing::Values("TEST_UDS_TREE",
- "TEST_UDS_ATTACH_TREE"));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/creat.cc b/test/syscalls/linux/creat.cc
deleted file mode 100644
index 3c270d6da..000000000
--- a/test/syscalls/linux/creat.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kMode = 0666;
-
-TEST(CreatTest, CreatCreatesNewFile) {
- std::string const path = NewTempAbsPath();
- struct stat buf;
- int fd;
- ASSERT_THAT(stat(path.c_str(), &buf), SyscallFailsWithErrno(ENOENT));
- ASSERT_THAT(fd = creat(path.c_str(), kMode), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- EXPECT_THAT(stat(path.c_str(), &buf), SyscallSucceeds());
-}
-
-TEST(CreatTest, CreatTruncatesExistingFile) {
- auto temp_path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- int fd;
- ASSERT_NO_ERRNO(SetContents(temp_path.path(), "non-empty"));
- ASSERT_THAT(fd = creat(temp_path.path().c_str(), kMode), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- std::string new_contents;
- ASSERT_NO_ERRNO(GetContents(temp_path.path(), &new_contents));
- EXPECT_EQ("", new_contents);
-}
-
-TEST(CreatTest, CreatWithNameTooLong) {
- // Start with a unique name, and pad it to NAME_MAX + 1;
- std::string name = NewTempRelPath();
- int padding = (NAME_MAX + 1) - name.size();
- name.append(padding, 'x');
- const std::string& path = JoinPath(GetAbsoluteTestTmpdir(), name);
-
- // Creation should return ENAMETOOLONG.
- ASSERT_THAT(creat(path.c_str(), kMode), SyscallFailsWithErrno(ENAMETOOLONG));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/dev.cc b/test/syscalls/linux/dev.cc
deleted file mode 100644
index 32860aa21..000000000
--- a/test/syscalls/linux/dev.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(DevTest, LseekDevUrandom) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/urandom", O_RDONLY));
- EXPECT_THAT(lseek(fd.get(), -10, SEEK_CUR), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), -10, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
-}
-
-TEST(DevTest, LseekDevNull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
- EXPECT_THAT(lseek(fd.get(), -10, SEEK_CUR), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), -10, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallSucceeds());
-}
-
-TEST(DevTest, LseekDevZero) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallSucceeds());
-}
-
-TEST(DevTest, LseekDevFull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/full", O_RDONLY));
- EXPECT_THAT(lseek(fd.get(), 123, SEEK_SET), SyscallSucceedsWithValue(0));
- EXPECT_THAT(lseek(fd.get(), 123, SEEK_CUR), SyscallSucceedsWithValue(0));
- EXPECT_THAT(lseek(fd.get(), 123, SEEK_END), SyscallSucceedsWithValue(0));
-}
-
-TEST(DevTest, LseekDevNullFreshFile) {
- // Seeks to /dev/null always return 0.
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
-
- EXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
- EXPECT_THAT(lseek(fd1.get(), 1000, SEEK_CUR), SyscallSucceedsWithValue(0));
- EXPECT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd3 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
- EXPECT_THAT(lseek(fd3.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-}
-
-TEST(DevTest, OpenTruncate) {
- // Truncation is ignored on linux and gvisor for device files.
- ASSERT_NO_ERRNO_AND_VALUE(
- Open("/dev/null", O_CREAT | O_TRUNC | O_WRONLY, 0644));
- ASSERT_NO_ERRNO_AND_VALUE(
- Open("/dev/zero", O_CREAT | O_TRUNC | O_WRONLY, 0644));
- ASSERT_NO_ERRNO_AND_VALUE(
- Open("/dev/full", O_CREAT | O_TRUNC | O_WRONLY, 0644));
-}
-
-TEST(DevTest, Pread64DevNull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
- char buf[1];
- EXPECT_THAT(pread64(fd.get(), buf, 1, 0), SyscallSucceedsWithValue(0));
-}
-
-TEST(DevTest, Pread64DevZero) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
- char buf[1];
- EXPECT_THAT(pread64(fd.get(), buf, 1, 0), SyscallSucceedsWithValue(1));
-}
-
-TEST(DevTest, Pread64DevFull) {
- // /dev/full behaves like /dev/zero with respect to reads.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/full", O_RDONLY));
- char buf[1];
- EXPECT_THAT(pread64(fd.get(), buf, 1, 0), SyscallSucceedsWithValue(1));
-}
-
-TEST(DevTest, ReadDevNull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDONLY));
- std::vector<char> buf(1);
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), 1), SyscallSucceeds());
-}
-
-// Do not allow random save as it could lead to partial reads.
-TEST(DevTest, ReadDevZero) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
-
- constexpr int kReadSize = 128 * 1024;
- std::vector<char> buf(kReadSize, 1);
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), kReadSize),
- SyscallSucceedsWithValue(kReadSize));
- EXPECT_EQ(std::vector<char>(kReadSize, 0), buf);
-}
-
-TEST(DevTest, WriteDevNull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_WRONLY));
- EXPECT_THAT(WriteFd(fd.get(), "a", 1), SyscallSucceedsWithValue(1));
-}
-
-TEST(DevTest, WriteDevZero) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_WRONLY));
- EXPECT_THAT(WriteFd(fd.get(), "a", 1), SyscallSucceedsWithValue(1));
-}
-
-TEST(DevTest, WriteDevFull) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/full", O_WRONLY));
- EXPECT_THAT(WriteFd(fd.get(), "a", 1), SyscallFailsWithErrno(ENOSPC));
-}
-
-TEST(DevTest, TTYExists) {
- struct stat statbuf = {};
- ASSERT_THAT(stat("/dev/tty", &statbuf), SyscallSucceeds());
- // Check that it's a character device with rw-rw-rw- permissions.
- EXPECT_EQ(statbuf.st_mode, S_IFCHR | 0666);
-}
-
-TEST(DevTest, OpenDevFuse) {
- // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new
- // device registration is complete.
- SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor() || !IsFUSEEnabled());
-
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_RDONLY));
-}
-
-TEST(DevTest, ReadDevFuseWithoutMount) {
- // Note(gvisor.dev/issue/3076) This won't work in the sentry until the new
- // device registration is complete.
- SKIP_IF(IsRunningWithVFS1() || IsRunningOnGvisor());
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_RDONLY));
-
- std::vector<char> buf(1);
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), sizeof(buf)),
- SyscallFailsWithErrno(EPERM));
-}
-
-} // namespace
-} // namespace testing
-
-} // namespace gvisor
diff --git a/test/syscalls/linux/dup.cc b/test/syscalls/linux/dup.cc
deleted file mode 100644
index ba4e13fb9..000000000
--- a/test/syscalls/linux/dup.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<FileDescriptor> Dup2(const FileDescriptor& fd, int target_fd) {
- int new_fd = dup2(fd.get(), target_fd);
- if (new_fd < 0) {
- return PosixError(errno, "Dup2");
- }
- return FileDescriptor(new_fd);
-}
-
-PosixErrorOr<FileDescriptor> Dup3(const FileDescriptor& fd, int target_fd,
- int flags) {
- int new_fd = dup3(fd.get(), target_fd, flags);
- if (new_fd < 0) {
- return PosixError(errno, "Dup2");
- }
- return FileDescriptor(new_fd);
-}
-
-TEST(DupTest, Dup) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Dup the descriptor and make sure it's the same file.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
-}
-
-TEST(DupTest, DupClearsCloExec) {
- // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag set.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_CLOEXEC));
- EXPECT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-
- // Duplicate the descriptor. Ensure that it doesn't have FD_CLOEXEC set.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-}
-
-TEST(DupTest, DupWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_PATH));
- int flags;
- ASSERT_THAT(flags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
-
- // Dup the descriptor and make sure it's the same file.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFL), SyscallSucceedsWithValue(flags));
-}
-
-TEST(DupTest, Dup2) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Regular dup once.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
-
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
-
- // Dup over the file above.
- int target_fd = nfd.release();
- FileDescriptor nfd2 = ASSERT_NO_ERRNO_AND_VALUE(Dup2(fd, target_fd));
- EXPECT_EQ(target_fd, nfd2.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd2));
-}
-
-TEST(DupTest, Dup2SameFD) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Should succeed.
- ASSERT_THAT(dup2(fd.get(), fd.get()), SyscallSucceedsWithValue(fd.get()));
-}
-
-TEST(DupTest, Dup2WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_PATH));
- int flags;
- ASSERT_THAT(flags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
-
- // Regular dup once.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
-
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFL), SyscallSucceedsWithValue(flags));
-
- // Dup over the file above.
- int target_fd = nfd.release();
- FileDescriptor nfd2 = ASSERT_NO_ERRNO_AND_VALUE(Dup2(fd, target_fd));
- EXPECT_EQ(target_fd, nfd2.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd2));
- EXPECT_THAT(fcntl(nfd2.get(), F_GETFL), SyscallSucceedsWithValue(flags));
-}
-
-TEST(DupTest, Dup3) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Regular dup once.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
-
- // Dup over the file above, check that it has no CLOEXEC.
- nfd = ASSERT_NO_ERRNO_AND_VALUE(Dup3(fd, nfd.release(), 0));
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-
- // Dup over the file again, check that it does not CLOEXEC.
- nfd = ASSERT_NO_ERRNO_AND_VALUE(Dup3(fd, nfd.release(), O_CLOEXEC));
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST(DupTest, Dup3FailsSameFD) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Only dup3 fails if the new and old fd are the same.
- ASSERT_THAT(dup3(fd.get(), fd.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(DupTest, Dup3WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_PATH));
- EXPECT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(0));
- int flags;
- ASSERT_THAT(flags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
-
- // Regular dup once.
- FileDescriptor nfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
-
- // Dup over the file above, check that it has no CLOEXEC.
- nfd = ASSERT_NO_ERRNO_AND_VALUE(Dup3(fd, nfd.release(), 0));
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFD), SyscallSucceedsWithValue(0));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFL), SyscallSucceedsWithValue(flags));
-
- // Dup over the file again, check that it does not CLOEXEC.
- nfd = ASSERT_NO_ERRNO_AND_VALUE(Dup3(fd, nfd.release(), O_CLOEXEC));
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFL), SyscallSucceedsWithValue(flags));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/epoll.cc b/test/syscalls/linux/epoll.cc
deleted file mode 100644
index 3ef8b0327..000000000
--- a/test/syscalls/linux/epoll.cc
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <limits.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/eventfd.h>
-#include <time.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/epoll_util.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kFDsPerEpoll = 3;
-constexpr uint64_t kMagicConstant = 0x0102030405060708;
-
-#ifndef SYS_epoll_pwait2
-#define SYS_epoll_pwait2 441
-#endif
-
-int epoll_pwait2(int fd, struct epoll_event* events, int maxevents,
- const struct timespec* timeout, const sigset_t* sigset) {
- return syscall(SYS_epoll_pwait2, fd, events, maxevents, timeout, sigset);
-}
-
-TEST(EpollTest, AllWritable) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(),
- EPOLLIN | EPOLLOUT, kMagicConstant + i));
- }
-
- struct epoll_event result[kFDsPerEpoll];
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(kFDsPerEpoll));
- for (int i = 0; i < kFDsPerEpoll; i++) {
- ASSERT_EQ(result[i].events, EPOLLOUT);
- }
-}
-
-TEST(EpollTest, LastReadable) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(),
- EPOLLIN | EPOLLOUT, kMagicConstant + i));
- }
-
- uint64_t tmp = 1;
- ASSERT_THAT(WriteFd(eventfds[kFDsPerEpoll - 1].get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
-
- struct epoll_event result[kFDsPerEpoll];
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(kFDsPerEpoll));
-
- int i;
- for (i = 0; i < kFDsPerEpoll - 1; i++) {
- EXPECT_EQ(result[i].events, EPOLLOUT);
- }
- EXPECT_EQ(result[i].events, EPOLLOUT | EPOLLIN);
- EXPECT_EQ(result[i].data.u64, kMagicConstant + i);
-}
-
-TEST(EpollTest, LastNonWritable) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(),
- EPOLLIN | EPOLLOUT, kMagicConstant + i));
- }
-
- // Write the maximum value to the event fd so that writing to it again would
- // block.
- uint64_t tmp = ULLONG_MAX - 1;
- ASSERT_THAT(WriteFd(eventfds[kFDsPerEpoll - 1].get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
-
- struct epoll_event result[kFDsPerEpoll];
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(kFDsPerEpoll));
-
- int i;
- for (i = 0; i < kFDsPerEpoll - 1; i++) {
- EXPECT_EQ(result[i].events, EPOLLOUT);
- }
- EXPECT_EQ(result[i].events, EPOLLIN);
- EXPECT_THAT(ReadFd(eventfds[kFDsPerEpoll - 1].get(), &tmp, sizeof(tmp)),
- sizeof(tmp));
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(kFDsPerEpoll));
-
- for (i = 0; i < kFDsPerEpoll; i++) {
- EXPECT_EQ(result[i].events, EPOLLOUT);
- }
-}
-
-TEST(EpollTest, Timeout) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN,
- kMagicConstant + i));
- }
-
- constexpr int kTimeoutMs = 200;
- struct timespec begin;
- struct timespec end;
- struct epoll_event result[kFDsPerEpoll];
-
- {
- const DisableSave ds; // Timing-related.
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &begin), SyscallSucceeds());
-
- ASSERT_THAT(
- RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, kTimeoutMs),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &end), SyscallSucceeds());
- }
-
- // Check the lower bound on the timeout. Checking for an upper bound is
- // fragile because Linux can overrun the timeout due to scheduling delays.
- EXPECT_GT(ms_elapsed(begin, end), kTimeoutMs - 1);
-}
-
-TEST(EpollTest, EpollPwait2Timeout) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- // 200 milliseconds.
- constexpr int kTimeoutNs = 200000000;
- struct timespec timeout;
- timeout.tv_sec = 0;
- timeout.tv_nsec = 0;
- struct timespec begin;
- struct timespec end;
- struct epoll_event result[kFDsPerEpoll];
-
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN,
- kMagicConstant + i));
- }
-
- // Pass valid arguments so that the syscall won't be blocked indefinitely
- // nor return errno EINVAL.
- //
- // The syscall returns immediately when timeout is zero,
- // even if no events are available.
- SKIP_IF(!IsRunningOnGvisor() &&
- epoll_pwait2(epollfd.get(), result, kFDsPerEpoll, &timeout, nullptr) <
- 0 &&
- errno == ENOSYS);
-
- {
- const DisableSave ds; // Timing-related.
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &begin), SyscallSucceeds());
-
- timeout.tv_nsec = kTimeoutNs;
- ASSERT_THAT(RetryEINTR(epoll_pwait2)(epollfd.get(), result, kFDsPerEpoll,
- &timeout, nullptr),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &end), SyscallSucceeds());
- }
-
- // Check the lower bound on the timeout. Checking for an upper bound is
- // fragile because Linux can overrun the timeout due to scheduling delays.
- EXPECT_GT(ns_elapsed(begin, end), kTimeoutNs - 1);
-}
-
-void* writer(void* arg) {
- int fd = *reinterpret_cast<int*>(arg);
- uint64_t tmp = 1;
-
- usleep(200000);
- if (WriteFd(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- fprintf(stderr, "writer failed: errno %s\n", strerror(errno));
- }
-
- return nullptr;
-}
-
-TEST(EpollTest, WaitThenUnblock) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN,
- kMagicConstant + i));
- }
-
- // Fire off a thread that will make at least one of the event fds readable.
- pthread_t thread;
- int make_readable = eventfds[0].get();
- ASSERT_THAT(pthread_create(&thread, nullptr, writer, &make_readable),
- SyscallSucceedsWithValue(0));
-
- struct epoll_event result[kFDsPerEpoll];
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_THAT(pthread_detach(thread), SyscallSucceeds());
-}
-
-#ifndef ANDROID // Android does not support pthread_cancel
-
-void sighandler(int s) {}
-
-void* signaler(void* arg) {
- pthread_t* t = reinterpret_cast<pthread_t*>(arg);
- // Repeatedly send the real-time signal until we are detached, because it's
- // difficult to know exactly when epoll_wait on another thread (which this
- // is intending to interrupt) has started blocking.
- while (1) {
- usleep(200000);
- pthread_kill(*t, SIGRTMIN);
- }
- return nullptr;
-}
-
-TEST(EpollTest, UnblockWithSignal) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN,
- kMagicConstant + i));
- }
-
- signal(SIGRTMIN, sighandler);
- // Unblock the real time signals that InitGoogle blocks :(
- sigset_t unblock;
- sigemptyset(&unblock);
- sigaddset(&unblock, SIGRTMIN);
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &unblock, nullptr), SyscallSucceeds());
-
- pthread_t thread;
- pthread_t cur = pthread_self();
- ASSERT_THAT(pthread_create(&thread, nullptr, signaler, &cur),
- SyscallSucceedsWithValue(0));
-
- struct epoll_event result[kFDsPerEpoll];
- EXPECT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallFailsWithErrno(EINTR));
- EXPECT_THAT(pthread_cancel(thread), SyscallSucceeds());
- EXPECT_THAT(pthread_detach(thread), SyscallSucceeds());
-}
-
-#endif // ANDROID
-
-TEST(EpollTest, TimeoutNoFds) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- struct epoll_event result[kFDsPerEpoll];
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
-}
-
-struct addr_ctx {
- int epollfd;
- int eventfd;
-};
-
-void* fd_adder(void* arg) {
- struct addr_ctx* actx = reinterpret_cast<struct addr_ctx*>(arg);
- struct epoll_event event;
- event.events = EPOLLIN | EPOLLOUT;
- event.data.u64 = 0xdeadbeeffacefeed;
-
- usleep(200000);
- if (epoll_ctl(actx->epollfd, EPOLL_CTL_ADD, actx->eventfd, &event) == -1) {
- fprintf(stderr, "epoll_ctl failed: %s\n", strerror(errno));
- }
-
- return nullptr;
-}
-
-TEST(EpollTest, UnblockWithNewFD) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
-
- pthread_t thread;
- struct addr_ctx actx = {epollfd.get(), eventfd.get()};
- ASSERT_THAT(pthread_create(&thread, nullptr, fd_adder, &actx),
- SyscallSucceedsWithValue(0));
-
- struct epoll_event result[kFDsPerEpoll];
- // Wait while no FDs are ready, but after 200ms fd_adder will add a ready FD
- // to epoll which will wake us up.
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_THAT(pthread_detach(thread), SyscallSucceeds());
- EXPECT_EQ(result[0].data.u64, 0xdeadbeeffacefeed);
-}
-
-TEST(EpollTest, Oneshot) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- std::vector<FileDescriptor> eventfds;
- for (int i = 0; i < kFDsPerEpoll; i++) {
- eventfds.push_back(ASSERT_NO_ERRNO_AND_VALUE(NewEventFD()));
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfds[i].get(), EPOLLIN,
- kMagicConstant + i));
- }
-
- struct epoll_event event;
- event.events = EPOLLOUT | EPOLLONESHOT;
- event.data.u64 = kMagicConstant;
- ASSERT_THAT(
- epoll_ctl(epollfd.get(), EPOLL_CTL_MOD, eventfds[0].get(), &event),
- SyscallSucceeds());
-
- struct epoll_event result[kFDsPerEpoll];
- // One-shot entry means that the first epoll_wait should succeed.
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-
- // One-shot entry means that the second epoll_wait should timeout.
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(EpollTest, EdgeTriggered) {
- // Test edge-triggered entry: make it edge-triggered, first wait should
- // return it, second one should time out, make it writable again, third wait
- // should return it, fourth wait should timeout.
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfd.get(),
- EPOLLOUT | EPOLLET, kMagicConstant));
-
- struct epoll_event result[kFDsPerEpoll];
-
- {
- const DisableSave ds; // May trigger spurious event.
-
- // Edge-triggered entry means that the first epoll_wait should return the
- // event.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-
- // Edge-triggered entry means that the second epoll_wait should time out.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
- }
-
- uint64_t tmp = ULLONG_MAX - 1;
-
- // Make an fd non-writable.
- ASSERT_THAT(WriteFd(eventfd.get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
-
- // Make the same fd non-writable to trigger a change, which will trigger an
- // edge-triggered event.
- ASSERT_THAT(ReadFd(eventfd.get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
-
- {
- const DisableSave ds; // May trigger spurious event.
-
- // An edge-triggered event should now be returned.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-
- // The edge-triggered event had been consumed above, we don't expect to
- // get it again.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
- }
-}
-
-TEST(EpollTest, OneshotAndEdgeTriggered) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), eventfd.get(),
- EPOLLOUT | EPOLLET | EPOLLONESHOT,
- kMagicConstant));
-
- struct epoll_event result[kFDsPerEpoll];
- // First time one shot edge-triggered entry means that epoll_wait should
- // return the event.
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-
- // Edge-triggered entry means that the second epoll_wait should time out.
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
-
- uint64_t tmp = ULLONG_MAX - 1;
- // Make an fd non-writable.
- ASSERT_THAT(WriteFd(eventfd.get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
- // Make the same fd non-writable to trigger a change, which will not trigger
- // an edge-triggered event because we've also included EPOLLONESHOT.
- ASSERT_THAT(ReadFd(eventfd.get(), &tmp, sizeof(tmp)),
- SyscallSucceedsWithValue(sizeof(tmp)));
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(EpollTest, CycleOfOneDisallowed) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
-
- struct epoll_event event;
- event.events = EPOLLOUT;
- event.data.u64 = kMagicConstant;
-
- ASSERT_THAT(epoll_ctl(epollfd.get(), EPOLL_CTL_ADD, epollfd.get(), &event),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(EpollTest, CycleOfThreeDisallowed) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto epollfd1 = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto epollfd2 = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
-
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd.get(), epollfd1.get(), EPOLLIN, kMagicConstant));
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd1.get(), epollfd2.get(), EPOLLIN, kMagicConstant));
-
- struct epoll_event event;
- event.events = EPOLLIN;
- event.data.u64 = kMagicConstant;
- EXPECT_THAT(epoll_ctl(epollfd2.get(), EPOLL_CTL_ADD, epollfd.get(), &event),
- SyscallFailsWithErrno(ELOOP));
-}
-
-TEST(EpollTest, CloseFile) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd.get(), eventfd.get(), EPOLLOUT, kMagicConstant));
-
- struct epoll_event result[kFDsPerEpoll];
- ASSERT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, -1),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-
- // Close the event fd early.
- eventfd.reset();
-
- EXPECT_THAT(RetryEINTR(epoll_wait)(epollfd.get(), result, kFDsPerEpoll, 100),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(EpollTest, PipeReaderHupAfterWriterClosed) {
- auto epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- int pipefds[2];
- ASSERT_THAT(pipe(pipefds), SyscallSucceeds());
- FileDescriptor rfd(pipefds[0]);
- FileDescriptor wfd(pipefds[1]);
-
- ASSERT_NO_ERRNO(RegisterEpollFD(epollfd.get(), rfd.get(), 0, kMagicConstant));
- struct epoll_event result[kFDsPerEpoll];
- // Initially, rfd should not generate any events of interest.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, 0),
- SyscallSucceedsWithValue(0));
- // Close the write end of the pipe.
- wfd.reset();
- // rfd should now generate EPOLLHUP, which EPOLL_CTL_ADD unconditionally adds
- // to the set of events of interest.
- ASSERT_THAT(epoll_wait(epollfd.get(), result, kFDsPerEpoll, 0),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(result[0].events, EPOLLHUP);
- EXPECT_EQ(result[0].data.u64, kMagicConstant);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/eventfd.cc b/test/syscalls/linux/eventfd.cc
deleted file mode 100644
index 8202d35fa..000000000
--- a/test/syscalls/linux/eventfd.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/epoll_util.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(EventfdTest, Nonblock) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t l;
- ASSERT_THAT(read(efd.get(), &l, sizeof(l)), SyscallFailsWithErrno(EAGAIN));
-
- l = 1;
- ASSERT_THAT(write(efd.get(), &l, sizeof(l)), SyscallSucceeds());
-
- l = 0;
- ASSERT_THAT(read(efd.get(), &l, sizeof(l)), SyscallSucceeds());
- EXPECT_EQ(l, 1);
-
- ASSERT_THAT(read(efd.get(), &l, sizeof(l)), SyscallFailsWithErrno(EAGAIN));
-}
-
-void* read_three_times(void* arg) {
- int efd = *reinterpret_cast<int*>(arg);
- uint64_t l;
- EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));
- EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));
- EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));
- return nullptr;
-}
-
-TEST(EventfdTest, BlockingWrite) {
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_SEMAPHORE));
- int efd = fd.get();
-
- pthread_t p;
- ASSERT_THAT(pthread_create(&p, nullptr, read_three_times,
- reinterpret_cast<void*>(&efd)),
- SyscallSucceeds());
-
- uint64_t l = 1;
- ASSERT_THAT(write(efd, &l, sizeof(l)), SyscallSucceeds());
- EXPECT_EQ(l, 1);
-
- ASSERT_THAT(write(efd, &l, sizeof(l)), SyscallSucceeds());
- EXPECT_EQ(l, 1);
-
- ASSERT_THAT(write(efd, &l, sizeof(l)), SyscallSucceeds());
- EXPECT_EQ(l, 1);
-
- ASSERT_THAT(pthread_join(p, nullptr), SyscallSucceeds());
-}
-
-TEST(EventfdTest, SmallWrite) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t l = 16;
- ASSERT_THAT(write(efd.get(), &l, 4), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(EventfdTest, SmallRead) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t l = 1;
- ASSERT_THAT(write(efd.get(), &l, sizeof(l)), SyscallSucceeds());
-
- l = 0;
- ASSERT_THAT(read(efd.get(), &l, 4), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(EventfdTest, IllegalSeek) {
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- EXPECT_THAT(lseek(efd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(EventfdTest, IllegalPread) {
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- int l;
- EXPECT_THAT(pread(efd.get(), &l, sizeof(l), 0),
- SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(EventfdTest, IllegalPwrite) {
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- EXPECT_THAT(pwrite(efd.get(), "x", 1, 0), SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(EventfdTest, BigWrite) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t big[16];
- big[0] = 16;
- ASSERT_THAT(write(efd.get(), big, sizeof(big)), SyscallSucceeds());
-}
-
-TEST(EventfdTest, BigRead) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t l = 1;
- ASSERT_THAT(write(efd.get(), &l, sizeof(l)), SyscallSucceeds());
-
- uint64_t big[16];
- ASSERT_THAT(read(efd.get(), big, sizeof(big)), SyscallSucceeds());
- EXPECT_EQ(big[0], 1);
-}
-
-TEST(EventfdTest, BigWriteBigRead) {
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
-
- uint64_t l[16];
- l[0] = 16;
- ASSERT_THAT(write(efd.get(), l, sizeof(l)), SyscallSucceeds());
- ASSERT_THAT(read(efd.get(), l, sizeof(l)), SyscallSucceeds());
- EXPECT_EQ(l[0], 1);
-}
-
-TEST(EventfdTest, SpliceFromPipePartialSucceeds) {
- int pipes[2];
- ASSERT_THAT(pipe2(pipes, O_NONBLOCK), SyscallSucceeds());
- const FileDescriptor pipe_rfd(pipes[0]);
- const FileDescriptor pipe_wfd(pipes[1]);
- constexpr uint64_t kVal{1};
-
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK));
-
- uint64_t event_array[2];
- event_array[0] = kVal;
- event_array[1] = kVal;
- ASSERT_THAT(write(pipe_wfd.get(), event_array, sizeof(event_array)),
- SyscallSucceedsWithValue(sizeof(event_array)));
- EXPECT_THAT(splice(pipe_rfd.get(), /*__offin=*/nullptr, efd.get(),
- /*__offout=*/nullptr, sizeof(event_array[0]) + 1,
- SPLICE_F_NONBLOCK),
- SyscallSucceedsWithValue(sizeof(event_array[0])));
-
- uint64_t val;
- ASSERT_THAT(read(efd.get(), &val, sizeof(val)),
- SyscallSucceedsWithValue(sizeof(val)));
- EXPECT_EQ(val, kVal);
-}
-
-// NotifyNonZero is inherently racy, so random save is disabled.
-TEST(EventfdTest, NotifyNonZero) {
- // Waits will time out at 10 seconds.
- constexpr int kEpollTimeoutMs = 10000;
- // Create an eventfd descriptor.
- FileDescriptor efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(7, EFD_NONBLOCK | EFD_SEMAPHORE));
- // Create an epoll fd to listen to efd.
- FileDescriptor epollfd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- // Add efd to epoll.
- ASSERT_NO_ERRNO(
- RegisterEpollFD(epollfd.get(), efd.get(), EPOLLIN | EPOLLET, efd.get()));
-
- // Use epoll to get a value from efd.
- struct epoll_event out_ev;
- int wait_out = epoll_wait(epollfd.get(), &out_ev, 1, kEpollTimeoutMs);
- EXPECT_EQ(wait_out, 1);
- EXPECT_EQ(efd.get(), out_ev.data.fd);
- uint64_t val = 0;
- ASSERT_THAT(read(efd.get(), &val, sizeof(val)), SyscallSucceeds());
- EXPECT_EQ(val, 1);
-
- // Start a thread that, after this thread blocks on epoll_wait, will write to
- // efd. This is racy -- it's possible that this write will happen after
- // epoll_wait times out.
- ScopedThread t([&efd] {
- sleep(5);
- uint64_t val = 1;
- EXPECT_THAT(write(efd.get(), &val, sizeof(val)),
- SyscallSucceedsWithValue(sizeof(val)));
- });
-
- // epoll_wait should return once the thread writes.
- wait_out = epoll_wait(epollfd.get(), &out_ev, 1, kEpollTimeoutMs);
- EXPECT_EQ(wait_out, 1);
- EXPECT_EQ(efd.get(), out_ev.data.fd);
-
- val = 0;
- ASSERT_THAT(read(efd.get(), &val, sizeof(val)), SyscallSucceeds());
- EXPECT_EQ(val, 1);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/exceptions.cc b/test/syscalls/linux/exceptions.cc
deleted file mode 100644
index 11dc1c651..000000000
--- a/test/syscalls/linux/exceptions.cc
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/platform_util.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-#if defined(__x86_64__)
-// Default value for the x87 FPU control word. See Intel SDM Vol 1, Ch 8.1.5
-// "x87 FPU Control Word".
-constexpr uint16_t kX87ControlWordDefault = 0x37f;
-
-// Mask for the divide-by-zero exception.
-constexpr uint16_t kX87ControlWordDiv0Mask = 1 << 2;
-
-// Default value for the SSE control register (MXCSR). See Intel SDM Vol 1, Ch
-// 11.6.4 "Initialization of SSE/SSE3 Extensions".
-constexpr uint32_t kMXCSRDefault = 0x1f80;
-
-// Mask for the divide-by-zero exception.
-constexpr uint32_t kMXCSRDiv0Mask = 1 << 9;
-
-// Flag for a pending divide-by-zero exception.
-constexpr uint32_t kMXCSRDiv0Flag = 1 << 2;
-
-void inline Halt() { asm("hlt\r\n"); }
-
-void inline SetAlignmentCheck() {
- asm("subq $128, %%rsp\r\n" // Avoid potential red zone clobber
- "pushf\r\n"
- "pop %%rax\r\n"
- "or $0x40000, %%rax\r\n"
- "push %%rax\r\n"
- "popf\r\n"
- "addq $128, %%rsp\r\n"
- :
- :
- : "ax");
-}
-
-void inline ClearAlignmentCheck() {
- asm("subq $128, %%rsp\r\n" // Avoid potential red zone clobber
- "pushf\r\n"
- "pop %%rax\r\n"
- "mov $0x40000, %%rbx\r\n"
- "not %%rbx\r\n"
- "and %%rbx, %%rax\r\n"
- "push %%rax\r\n"
- "popf\r\n"
- "addq $128, %%rsp\r\n"
- :
- :
- : "ax", "bx");
-}
-
-void inline Int3Normal() { asm(".byte 0xcd, 0x03\r\n"); }
-
-void inline Int3Compact() { asm(".byte 0xcc\r\n"); }
-
-void InIOHelper(int width, int value) {
- EXPECT_EXIT(
- {
- switch (width) {
- case 1:
- asm volatile("inb %%dx, %%al" ::"d"(value) : "%eax");
- break;
- case 2:
- asm volatile("inw %%dx, %%ax" ::"d"(value) : "%eax");
- break;
- case 4:
- asm volatile("inl %%dx, %%eax" ::"d"(value) : "%eax");
- break;
- default:
- FAIL() << "invalid input width, only 1, 2 or 4 is allowed";
- }
- },
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-#elif defined(__aarch64__)
-void inline Halt() { asm("hlt #0\r\n"); }
-#endif
-
-TEST(ExceptionTest, Halt) {
- // In order to prevent the regular handler from messing with things (and
- // perhaps refaulting until some other signal occurs), we reset the handler to
- // the default action here and ensure that it dies correctly.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGSEGV, sa));
-
-#if defined(__x86_64__)
- EXPECT_EXIT(Halt(), ::testing::KilledBySignal(SIGSEGV), "");
-#elif defined(__aarch64__)
- EXPECT_EXIT(Halt(), ::testing::KilledBySignal(SIGILL), "");
-#endif
-}
-
-#if defined(__x86_64__)
-TEST(ExceptionTest, DivideByZero) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGFPE, sa));
-
- EXPECT_EXIT(
- {
- uint32_t remainder;
- uint32_t quotient;
- uint32_t divisor = 0;
- uint64_t value = 1;
- asm("divl 0(%2)\r\n"
- : "=d"(remainder), "=a"(quotient)
- : "r"(&divisor), "d"(value >> 32), "a"(value));
- TEST_CHECK(quotient > 0); // Force dependency.
- },
- ::testing::KilledBySignal(SIGFPE), "");
-}
-
-// By default, x87 exceptions are masked and simply return a default value.
-TEST(ExceptionTest, X87DivideByZeroMasked) {
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm("fildl %[value]\r\n"
- "fidivl %[divisor]\r\n"
- "fistpl %[quotient]\r\n"
- : [ quotient ] "=m"(quotient)
- : [ value ] "m"(value), [ divisor ] "m"(divisor));
-
- EXPECT_EQ(quotient, INT32_MIN);
-}
-
-// When unmasked, division by zero raises SIGFPE.
-TEST(ExceptionTest, X87DivideByZeroUnmasked) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGFPE, sa));
-
- EXPECT_EXIT(
- {
- // Clear the divide by zero exception mask.
- constexpr uint16_t kControlWord =
- kX87ControlWordDefault & ~kX87ControlWordDiv0Mask;
-
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm volatile(
- "fldcw %[cw]\r\n"
- "fildl %[value]\r\n"
- "fidivl %[divisor]\r\n"
- "fistpl %[quotient]\r\n"
- : [ quotient ] "=m"(quotient)
- : [ cw ] "m"(kControlWord), [ value ] "m"(value),
- [ divisor ] "m"(divisor));
- },
- ::testing::KilledBySignal(SIGFPE), "");
-}
-
-// Pending exceptions in the x87 status register are not clobbered by syscalls.
-TEST(ExceptionTest, X87StatusClobber) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGFPE, sa));
-
- EXPECT_EXIT(
- {
- // Clear the divide by zero exception mask.
- constexpr uint16_t kControlWord =
- kX87ControlWordDefault & ~kX87ControlWordDiv0Mask;
-
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm volatile(
- "fildl %[value]\r\n"
- "fidivl %[divisor]\r\n"
- // Exception is masked, so it does not occur here.
- "fistpl %[quotient]\r\n"
-
- // SYS_getpid placed in rax by constraint.
- "syscall\r\n"
-
- // Unmask exception. The syscall didn't clobber the pending
- // exception, so now it can be raised.
- //
- // N.B. "a floating-point exception will be generated upon execution
- // of the *next* floating-point instruction".
- "fldcw %[cw]\r\n"
- "fwait\r\n"
- : [ quotient ] "=m"(quotient)
- : [ value ] "m"(value), [ divisor ] "m"(divisor), "a"(SYS_getpid),
- [ cw ] "m"(kControlWord)
- : "rcx", "r11");
- },
- ::testing::KilledBySignal(SIGFPE), "");
-}
-
-// By default, SSE exceptions are masked and simply return a default value.
-TEST(ExceptionTest, SSEDivideByZeroMasked) {
- uint32_t status;
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm("cvtsi2ssl %[value], %%xmm0\r\n"
- "cvtsi2ssl %[divisor], %%xmm1\r\n"
- "divss %%xmm1, %%xmm0\r\n"
- "cvtss2sil %%xmm0, %[quotient]\r\n"
- : [ quotient ] "=r"(quotient), [ status ] "=r"(status)
- : [ value ] "r"(value), [ divisor ] "r"(divisor)
- : "xmm0", "xmm1");
-
- EXPECT_EQ(quotient, INT32_MIN);
-}
-
-// When unmasked, division by zero raises SIGFPE.
-TEST(ExceptionTest, SSEDivideByZeroUnmasked) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGFPE, sa));
-
- EXPECT_EXIT(
- {
- // Clear the divide by zero exception mask.
- constexpr uint32_t kMXCSR = kMXCSRDefault & ~kMXCSRDiv0Mask;
-
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm volatile(
- "ldmxcsr %[mxcsr]\r\n"
- "cvtsi2ssl %[value], %%xmm0\r\n"
- "cvtsi2ssl %[divisor], %%xmm1\r\n"
- "divss %%xmm1, %%xmm0\r\n"
- "cvtss2sil %%xmm0, %[quotient]\r\n"
- : [ quotient ] "=r"(quotient)
- : [ mxcsr ] "m"(kMXCSR), [ value ] "r"(value),
- [ divisor ] "r"(divisor)
- : "xmm0", "xmm1");
- },
- ::testing::KilledBySignal(SIGFPE), "");
-}
-
-// Pending exceptions in the SSE status register are not clobbered by syscalls.
-TEST(ExceptionTest, SSEStatusClobber) {
- uint32_t mxcsr;
- int32_t quotient;
- int32_t value = 1;
- int32_t divisor = 0;
- asm("cvtsi2ssl %[value], %%xmm0\r\n"
- "cvtsi2ssl %[divisor], %%xmm1\r\n"
- "divss %%xmm1, %%xmm0\r\n"
- // Exception is masked, so it does not occur here.
- "cvtss2sil %%xmm0, %[quotient]\r\n"
-
- // SYS_getpid placed in rax by constraint.
- "syscall\r\n"
-
- // Intel SDM Vol 1, Ch 10.2.3.1 "SIMD Floating-Point Mask and Flag Bits":
- // "If LDMXCSR or FXRSTOR clears a mask bit and sets the corresponding
- // exception flag bit, a SIMD floating-point exception will not be
- // generated as a result of this change. The unmasked exception will be
- // generated only upon the execution of the next SSE/SSE2/SSE3 instruction
- // that detects the unmasked exception condition."
- //
- // Though ambiguous, empirical evidence indicates that this means that
- // exception flags set in the status register will never cause an
- // exception to be raised; only a new exception condition will do so.
- //
- // Thus here we just check for the flag itself rather than trying to raise
- // the exception.
- "stmxcsr %[mxcsr]\r\n"
- : [ quotient ] "=r"(quotient), [ mxcsr ] "+m"(mxcsr)
- : [ value ] "r"(value), [ divisor ] "r"(divisor), "a"(SYS_getpid)
- : "xmm0", "xmm1", "rcx", "r11");
-
- EXPECT_TRUE(mxcsr & kMXCSRDiv0Flag);
-}
-
-TEST(ExceptionTest, IOAccessFault) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGSEGV, sa));
-
- InIOHelper(1, 0x0);
- InIOHelper(2, 0x7);
- InIOHelper(4, 0x6);
- InIOHelper(1, 0xffff);
- InIOHelper(2, 0xffff);
- InIOHelper(4, 0xfffd);
-}
-
-TEST(ExceptionTest, Alignment) {
- SetAlignmentCheck();
- ClearAlignmentCheck();
-}
-
-TEST(ExceptionTest, AlignmentHalt) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGSEGV, sa));
-
- // Reported upstream. We need to ensure that bad flags are cleared even in
- // fault paths. Set the alignment flag and then generate an exception.
- EXPECT_EXIT(
- {
- SetAlignmentCheck();
- Halt();
- },
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-TEST(ExceptionTest, AlignmentCheck) {
- SKIP_IF(PlatformSupportAlignmentCheck() != PlatformSupport::Allowed);
-
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGBUS, sa));
-
- EXPECT_EXIT(
- {
- char array[16];
- SetAlignmentCheck();
- for (int i = 0; i < 8; i++) {
- // At least 7/8 offsets will be unaligned here.
- uint64_t* ptr = reinterpret_cast<uint64_t*>(&array[i]);
- asm("mov %0, 0(%0)\r\n" : : "r"(ptr) : "ax");
- }
- },
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-TEST(ExceptionTest, Int3Normal) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGTRAP, sa));
-
- EXPECT_EXIT(Int3Normal(), ::testing::KilledBySignal(SIGTRAP), "");
-}
-
-TEST(ExceptionTest, Int3Compact) {
- // See above.
- struct sigaction sa = {};
- sa.sa_handler = SIG_DFL;
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGTRAP, sa));
-
- EXPECT_EXIT(Int3Compact(), ::testing::KilledBySignal(SIGTRAP), "");
-}
-#endif
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/exec.cc b/test/syscalls/linux/exec.cc
deleted file mode 100644
index a0016146a..000000000
--- a/test/syscalls/linux/exec.cc
+++ /dev/null
@@ -1,904 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/exec.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/eventfd.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/match.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/types/optional.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr char kBasicWorkload[] = "test/syscalls/linux/exec_basic_workload";
-constexpr char kExitScript[] = "test/syscalls/linux/exit_script";
-constexpr char kStateWorkload[] = "test/syscalls/linux/exec_state_workload";
-constexpr char kProcExeWorkload[] =
- "test/syscalls/linux/exec_proc_exe_workload";
-constexpr char kAssertClosedWorkload[] =
- "test/syscalls/linux/exec_assert_closed_workload";
-constexpr char kPriorityWorkload[] = "test/syscalls/linux/priority_execve";
-
-constexpr char kExit42[] = "--exec_exit_42";
-constexpr char kExecWithThread[] = "--exec_exec_with_thread";
-constexpr char kExecFromThread[] = "--exec_exec_from_thread";
-
-// Runs file specified by dirfd and pathname with argv and checks that the exit
-// status is expect_status and that stderr contains expect_stderr.
-void CheckExecHelper(const absl::optional<int32_t> dirfd,
- const std::string& pathname, const ExecveArray& argv,
- const ExecveArray& envv, const int flags,
- int expect_status, const std::string& expect_stderr) {
- int pipe_fds[2];
- ASSERT_THAT(pipe2(pipe_fds, O_CLOEXEC), SyscallSucceeds());
-
- FileDescriptor read_fd(pipe_fds[0]);
- FileDescriptor write_fd(pipe_fds[1]);
-
- pid_t child;
- int execve_errno;
-
- const auto remap_stderr = [pipe_fds] {
- // Remap stdin and stdout to /dev/null.
- int fd = open("/dev/null", O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- _exit(errno);
- }
-
- int ret = dup2(fd, 0);
- if (ret < 0) {
- _exit(errno);
- }
-
- ret = dup2(fd, 1);
- if (ret < 0) {
- _exit(errno);
- }
-
- // And stderr to the pipe.
- ret = dup2(pipe_fds[1], 2);
- if (ret < 0) {
- _exit(errno);
- }
-
- // Here, we'd ideally close all other FDs inherited from the parent.
- // However, that's not worth the effort and CloexecNormalFile and
- // CloexecEventfd depend on that not happening.
- };
-
- Cleanup kill;
- if (dirfd.has_value()) {
- kill = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(*dirfd, pathname, argv,
- envv, flags, remap_stderr,
- &child, &execve_errno));
- } else {
- kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(pathname, argv, envv, remap_stderr, &child, &execve_errno));
- }
-
- ASSERT_EQ(0, execve_errno);
-
- // Not needed anymore.
- write_fd.reset();
-
- // Read stderr until the child exits.
- std::string output;
- constexpr int kSize = 128;
- char buf[kSize];
- int n;
- do {
- ASSERT_THAT(n = ReadFd(read_fd.get(), buf, kSize), SyscallSucceeds());
- if (n > 0) {
- output.append(buf, n);
- }
- } while (n > 0);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_EQ(status, expect_status);
-
- // Process cleanup no longer needed.
- kill.Release();
-
- EXPECT_TRUE(absl::StrContains(output, expect_stderr)) << output;
-}
-
-void CheckExec(const std::string& filename, const ExecveArray& argv,
- const ExecveArray& envv, int expect_status,
- const std::string& expect_stderr) {
- CheckExecHelper(/*dirfd=*/absl::optional<int32_t>(), filename, argv, envv,
- /*flags=*/0, expect_status, expect_stderr);
-}
-
-void CheckExecveat(const int32_t dirfd, const std::string& pathname,
- const ExecveArray& argv, const ExecveArray& envv,
- const int flags, int expect_status,
- const std::string& expect_stderr) {
- CheckExecHelper(absl::optional<int32_t>(dirfd), pathname, argv, envv, flags,
- expect_status, expect_stderr);
-}
-
-TEST(ExecTest, EmptyPath) {
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec("", {}, {}, nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ENOENT);
-}
-
-TEST(ExecTest, Basic) {
- CheckExec(RunfilePath(kBasicWorkload), {RunfilePath(kBasicWorkload)}, {},
- ArgEnvExitStatus(0, 0),
- absl::StrCat(RunfilePath(kBasicWorkload), "\n"));
-}
-
-TEST(ExecTest, OneArg) {
- CheckExec(RunfilePath(kBasicWorkload), {RunfilePath(kBasicWorkload), "1"}, {},
- ArgEnvExitStatus(1, 0),
- absl::StrCat(RunfilePath(kBasicWorkload), "\n1\n"));
-}
-
-TEST(ExecTest, FiveArg) {
- CheckExec(RunfilePath(kBasicWorkload),
- {RunfilePath(kBasicWorkload), "1", "2", "3", "4", "5"}, {},
- ArgEnvExitStatus(5, 0),
- absl::StrCat(RunfilePath(kBasicWorkload), "\n1\n2\n3\n4\n5\n"));
-}
-
-TEST(ExecTest, OneEnv) {
- CheckExec(RunfilePath(kBasicWorkload), {RunfilePath(kBasicWorkload)}, {"1"},
- ArgEnvExitStatus(0, 1),
- absl::StrCat(RunfilePath(kBasicWorkload), "\n1\n"));
-}
-
-TEST(ExecTest, FiveEnv) {
- CheckExec(RunfilePath(kBasicWorkload), {RunfilePath(kBasicWorkload)},
- {"1", "2", "3", "4", "5"}, ArgEnvExitStatus(0, 5),
- absl::StrCat(RunfilePath(kBasicWorkload), "\n1\n2\n3\n4\n5\n"));
-}
-
-TEST(ExecTest, OneArgOneEnv) {
- CheckExec(RunfilePath(kBasicWorkload), {RunfilePath(kBasicWorkload), "arg"},
- {"env"}, ArgEnvExitStatus(1, 1),
- absl::StrCat(RunfilePath(kBasicWorkload), "\narg\nenv\n"));
-}
-
-TEST(ExecTest, InterpreterScript) {
- CheckExec(RunfilePath(kExitScript), {RunfilePath(kExitScript), "25"}, {},
- ArgEnvExitStatus(25, 0), "");
-}
-
-// Everything after the path in the interpreter script is a single argument.
-TEST(ExecTest, InterpreterScriptArgSplit) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path(), " foo bar"),
- 0755));
-
- CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),
- absl::StrCat(link.path(), "\nfoo bar\n", script.path(), "\n"));
-}
-
-// Original argv[0] is replaced with the script path.
-TEST(ExecTest, InterpreterScriptArgvZero) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path()), 0755));
-
- CheckExec(script.path(), {"REPLACED"}, {}, ArgEnvExitStatus(1, 0),
- absl::StrCat(link.path(), "\n", script.path(), "\n"));
-}
-
-// Original argv[0] is replaced with the script path, exactly as passed to
-// execve.
-TEST(ExecTest, InterpreterScriptArgvZeroRelative) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path()), 0755));
-
- auto cwd = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());
- auto script_relative =
- ASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, script.path()));
-
- CheckExec(script_relative, {"REPLACED"}, {}, ArgEnvExitStatus(1, 0),
- absl::StrCat(link.path(), "\n", script_relative, "\n"));
-}
-
-// argv[0] is added as the script path, even if there was none.
-TEST(ExecTest, InterpreterScriptArgvZeroAdded) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path()), 0755));
-
- CheckExec(script.path(), {}, {}, ArgEnvExitStatus(1, 0),
- absl::StrCat(link.path(), "\n", script.path(), "\n"));
-}
-
-// A NUL byte in the script line ends parsing.
-TEST(ExecTest, InterpreterScriptArgNUL) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(),
- absl::StrCat("#!", link.path(), " foo", std::string(1, '\0'), "bar"),
- 0755));
-
- CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),
- absl::StrCat(link.path(), "\nfoo\n", script.path(), "\n"));
-}
-
-// Trailing whitespace following interpreter path is ignored.
-TEST(ExecTest, InterpreterScriptTrailingWhitespace) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path(), " \n"), 0755));
-
- CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(1, 0),
- absl::StrCat(link.path(), "\n", script.path(), "\n"));
-}
-
-// Multiple whitespace characters between interpreter and arg allowed.
-TEST(ExecTest, InterpreterScriptArgWhitespace) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path(), " foo"), 0755));
-
- CheckExec(script.path(), {script.path()}, {}, ArgEnvExitStatus(2, 0),
- absl::StrCat(link.path(), "\nfoo\n", script.path(), "\n"));
-}
-
-TEST(ExecTest, InterpreterScriptNoPath) {
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), "#!\n\n", 0755));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ENOEXEC);
-}
-
-// AT_EXECFN is the path passed to execve.
-TEST(ExecTest, ExecFn) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kStateWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link.path(), " PrintExecFn"),
- 0755));
-
- // Pass the script as a relative path and assert that is what appears in
- // AT_EXECFN.
- auto cwd = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());
- auto script_relative =
- ASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, script.path()));
-
- CheckExec(script_relative, {script_relative}, {}, ArgEnvExitStatus(0, 0),
- absl::StrCat(script_relative, "\n"));
-}
-
-TEST(ExecTest, ExecName) {
- std::string path = RunfilePath(kStateWorkload);
-
- CheckExec(path, {path, "PrintExecName"}, {}, ArgEnvExitStatus(0, 0),
- absl::StrCat(Basename(path).substr(0, 15), "\n"));
-}
-
-TEST(ExecTest, ExecNameScript) {
- // Symlink through /tmp to ensure the path is short enough.
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kStateWorkload)));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(),
- absl::StrCat("#!", link.path(), " PrintExecName"), 0755));
-
- std::string script_path = script.path();
-
- CheckExec(script_path, {script_path}, {}, ArgEnvExitStatus(0, 0),
- absl::StrCat(Basename(script_path).substr(0, 15), "\n"));
-}
-
-// execve may be called by a multithreaded process.
-TEST(ExecTest, WithSiblingThread) {
- CheckExec("/proc/self/exe", {"/proc/self/exe", kExecWithThread}, {},
- W_EXITCODE(42, 0), "");
-}
-
-// execve may be called from a thread other than the leader of a multithreaded
-// process.
-TEST(ExecTest, FromSiblingThread) {
- CheckExec("/proc/self/exe", {"/proc/self/exe", kExecFromThread}, {},
- W_EXITCODE(42, 0), "");
-}
-
-TEST(ExecTest, NotFound) {
- char* const argv[] = {nullptr};
- char* const envp[] = {nullptr};
- EXPECT_THAT(execve("/file/does/not/exist", argv, envp),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(ExecTest, NoExecPerm) {
- char* const argv[] = {nullptr};
- char* const envp[] = {nullptr};
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- EXPECT_THAT(execve(f.path().c_str(), argv, envp),
- SyscallFailsWithErrno(EACCES));
-}
-
-// A signal handler we never expect to be called.
-void SignalHandler(int signo) {
- std::cerr << "Signal " << signo << " raised." << std::endl;
- exit(1);
-}
-
-// Signal handlers are reset on execve(2), unless they have default or ignored
-// disposition.
-TEST(ExecStateTest, HandlerReset) {
- struct sigaction sa;
- sa.sa_handler = SignalHandler;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- ExecveArray args = {
- RunfilePath(kStateWorkload),
- "CheckSigHandler",
- absl::StrCat(SIGUSR1),
- absl::StrCat(absl::Hex(reinterpret_cast<uintptr_t>(SIG_DFL))),
- };
-
- CheckExec(RunfilePath(kStateWorkload), args, {}, W_EXITCODE(0, 0), "");
-}
-
-// Ignored signal dispositions are not reset.
-TEST(ExecStateTest, IgnorePreserved) {
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- ExecveArray args = {
- RunfilePath(kStateWorkload),
- "CheckSigHandler",
- absl::StrCat(SIGUSR1),
- absl::StrCat(absl::Hex(reinterpret_cast<uintptr_t>(SIG_IGN))),
- };
-
- CheckExec(RunfilePath(kStateWorkload), args, {}, W_EXITCODE(0, 0), "");
-}
-
-// Signal masks are not reset on exec
-TEST(ExecStateTest, SignalMask) {
- sigset_t s;
- sigemptyset(&s);
- sigaddset(&s, SIGUSR1);
- ASSERT_THAT(sigprocmask(SIG_BLOCK, &s, nullptr), SyscallSucceeds());
-
- ExecveArray args = {
- RunfilePath(kStateWorkload),
- "CheckSigBlocked",
- absl::StrCat(SIGUSR1),
- };
-
- CheckExec(RunfilePath(kStateWorkload), args, {}, W_EXITCODE(0, 0), "");
-}
-
-// itimers persist across execve.
-// N.B. Timers created with timer_create(2) should not be preserved!
-TEST(ExecStateTest, ItimerPreserved) {
- // The fork in ForkAndExec clears itimers, so only set them up after fork.
- auto setup_itimer = [] {
- // Ignore SIGALRM, as we don't actually care about timer
- // expirations.
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- int ret = sigaction(SIGALRM, &sa, nullptr);
- if (ret < 0) {
- _exit(errno);
- }
-
- struct itimerval itv;
- itv.it_interval.tv_sec = 1;
- itv.it_interval.tv_usec = 0;
- itv.it_value.tv_sec = 1;
- itv.it_value.tv_usec = 0;
- ret = setitimer(ITIMER_REAL, &itv, nullptr);
- if (ret < 0) {
- _exit(errno);
- }
- };
-
- std::string filename = RunfilePath(kStateWorkload);
- ExecveArray argv = {
- filename,
- "CheckItimerEnabled",
- absl::StrCat(ITIMER_REAL),
- };
-
- pid_t child;
- int execve_errno;
- auto kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(filename, argv, {}, setup_itimer, &child, &execve_errno));
- ASSERT_EQ(0, execve_errno);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_EQ(0, status);
-
- // Process cleanup no longer needed.
- kill.Release();
-}
-
-TEST(ProcSelfExe, ChangesAcrossExecve) {
- // See exec_proc_exe_workload for more details. We simply
- // assert that the /proc/self/exe link changes across execve.
- CheckExec(RunfilePath(kProcExeWorkload),
- {RunfilePath(kProcExeWorkload),
- ASSERT_NO_ERRNO_AND_VALUE(ProcessExePath(getpid()))},
- {}, W_EXITCODE(0, 0), "");
-}
-
-TEST(ExecTest, CloexecNormalFile) {
- TempPath tempFile = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), "bar", 0755));
- const FileDescriptor fd_closed_on_exec =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY | O_CLOEXEC));
-
- CheckExec(RunfilePath(kAssertClosedWorkload),
- {RunfilePath(kAssertClosedWorkload),
- absl::StrCat(fd_closed_on_exec.get())},
- {}, W_EXITCODE(0, 0), "");
-
- // The assert closed workload exits with code 2 if the file still exists. We
- // can use this to do a negative test.
- const FileDescriptor fd_open_on_exec =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY));
-
- CheckExec(
- RunfilePath(kAssertClosedWorkload),
- {RunfilePath(kAssertClosedWorkload), absl::StrCat(fd_open_on_exec.get())},
- {}, W_EXITCODE(2, 0), "");
-}
-
-TEST(ExecTest, CloexecEventfd) {
- int efd;
- ASSERT_THAT(efd = eventfd(0, EFD_CLOEXEC), SyscallSucceeds());
- FileDescriptor fd(efd);
-
- CheckExec(RunfilePath(kAssertClosedWorkload),
- {RunfilePath(kAssertClosedWorkload), absl::StrCat(fd.get())}, {},
- W_EXITCODE(0, 0), "");
-}
-
-constexpr int kLinuxMaxSymlinks = 40;
-
-TEST(ExecTest, SymlinkLimitExceeded) {
- std::string path = RunfilePath(kBasicWorkload);
-
- // Hold onto TempPath objects so they are not destructed prematurely.
- std::vector<TempPath> symlinks;
- for (int i = 0; i < kLinuxMaxSymlinks + 1; i++) {
- symlinks.push_back(
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateSymlinkTo("/tmp", path)));
- path = symlinks[i].path();
- }
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(path, {path}, {}, /*child=*/nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ELOOP);
-}
-
-TEST(ExecTest, SymlinkLimitRefreshedForInterpreter) {
- std::string tmp_dir = "/tmp";
- std::string interpreter_path = "/bin/echo";
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- tmp_dir, absl::StrCat("#!", interpreter_path), 0755));
- std::string script_path = script.path();
-
- // Hold onto TempPath objects so they are not destructed prematurely.
- std::vector<TempPath> interpreter_symlinks;
- std::vector<TempPath> script_symlinks;
- // Replace both the interpreter and script paths with symlink chains of just
- // over half the symlink limit each; this is the minimum required to test that
- // the symlink limit applies separately to each traversal, while tolerating
- // some symlinks in the resolution of (the original) interpreter_path and
- // script_path.
- for (int i = 0; i < (kLinuxMaxSymlinks / 2) + 1; i++) {
- interpreter_symlinks.push_back(ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(tmp_dir, interpreter_path)));
- interpreter_path = interpreter_symlinks[i].path();
- script_symlinks.push_back(ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(tmp_dir, script_path)));
- script_path = script_symlinks[i].path();
- }
-
- CheckExec(script_path, {script_path}, {}, ArgEnvExitStatus(0, 0), "");
-}
-
-TEST(ExecveatTest, BasicWithFDCWD) {
- std::string path = RunfilePath(kBasicWorkload);
- CheckExecveat(AT_FDCWD, path, {path}, {}, /*flags=*/0, ArgEnvExitStatus(0, 0),
- absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, Basic) {
- std::string absolute_path = RunfilePath(kBasicWorkload);
- std::string parent_dir = std::string(Dirname(absolute_path));
- std::string base = std::string(Basename(absolute_path));
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));
-
- CheckExecveat(dirfd.get(), base, {absolute_path}, {}, /*flags=*/0,
- ArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, "\n"));
-}
-
-TEST(ExecveatTest, FDNotADirectory) {
- std::string absolute_path = RunfilePath(kBasicWorkload);
- std::string base = std::string(Basename(absolute_path));
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(absolute_path, 0));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), base, {absolute_path}, {},
- /*flags=*/0, /*child=*/nullptr,
- &execve_errno));
- EXPECT_EQ(execve_errno, ENOTDIR);
-}
-
-TEST(ExecveatTest, AbsolutePathWithFDCWD) {
- std::string path = RunfilePath(kBasicWorkload);
- CheckExecveat(AT_FDCWD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0,
- absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, AbsolutePath) {
- std::string path = RunfilePath(kBasicWorkload);
- // File descriptor should be ignored when an absolute path is given.
- const int32_t badFD = -1;
- CheckExecveat(badFD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0,
- absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, EmptyPathBasic) {
- std::string path = RunfilePath(kBasicWorkload);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));
-
- CheckExecveat(fd.get(), "", {path}, {}, AT_EMPTY_PATH, ArgEnvExitStatus(0, 0),
- absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, EmptyPathWithDirFD) {
- std::string path = RunfilePath(kBasicWorkload);
- std::string parent_dir = std::string(Dirname(path));
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), "", {path}, {},
- AT_EMPTY_PATH,
- /*child=*/nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, EACCES);
-}
-
-TEST(ExecveatTest, EmptyPathWithoutEmptyPathFlag) {
- std::string path = RunfilePath(kBasicWorkload);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(
- fd.get(), "", {path}, {}, /*flags=*/0, /*child=*/nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ENOENT);
-}
-
-TEST(ExecveatTest, AbsolutePathWithEmptyPathFlag) {
- std::string path = RunfilePath(kBasicWorkload);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_PATH));
-
- CheckExecveat(fd.get(), path, {path}, {}, AT_EMPTY_PATH,
- ArgEnvExitStatus(0, 0), absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, RelativePathWithEmptyPathFlag) {
- std::string absolute_path = RunfilePath(kBasicWorkload);
- std::string parent_dir = std::string(Dirname(absolute_path));
- std::string base = std::string(Basename(absolute_path));
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));
-
- CheckExecveat(dirfd.get(), base, {absolute_path}, {}, AT_EMPTY_PATH,
- ArgEnvExitStatus(0, 0), absl::StrCat(absolute_path, "\n"));
-}
-
-TEST(ExecveatTest, SymlinkNoFollowWithRelativePath) {
- std::string parent_dir = "/tmp";
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(parent_dir, RunfilePath(kBasicWorkload)));
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_DIRECTORY));
- std::string base = std::string(Basename(link.path()));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), base, {base}, {},
- AT_SYMLINK_NOFOLLOW,
- /*child=*/nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ELOOP);
-}
-
-TEST(ExecveatTest, UnshareFiles) {
- TempPath tempFile = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), "bar", 0755));
- const FileDescriptor fd_closed_on_exec =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY | O_CLOEXEC));
-
- ExecveArray argv = {"test"};
- ExecveArray envp;
- std::string child_path = RunfilePath(kBasicWorkload);
- pid_t child =
- syscall(__NR_clone, SIGCHLD | CLONE_VFORK | CLONE_FILES, 0, 0, 0, 0);
- if (child == 0) {
- execve(child_path.c_str(), argv.get(), envp.get());
- _exit(1);
- }
- ASSERT_THAT(child, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_EQ(status, 0);
-
- struct stat st;
- EXPECT_THAT(fstat(fd_closed_on_exec.get(), &st), SyscallSucceeds());
-}
-
-TEST(ExecveatTest, SymlinkNoFollowWithAbsolutePath) {
- std::string parent_dir = "/tmp";
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(parent_dir, RunfilePath(kBasicWorkload)));
- std::string path = link.path();
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(AT_FDCWD, path, {path}, {},
- AT_SYMLINK_NOFOLLOW,
- /*child=*/nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, ELOOP);
-}
-
-TEST(ExecveatTest, SymlinkNoFollowAndEmptyPath) {
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo("/tmp", RunfilePath(kBasicWorkload)));
- std::string path = link.path();
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, 0));
-
- CheckExecveat(fd.get(), "", {path}, {}, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW,
- ArgEnvExitStatus(0, 0), absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, SymlinkNoFollowIgnoreSymlinkAncestor) {
- TempPath parent_link =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateSymlinkTo("/tmp", "/bin"));
- std::string path_with_symlink = JoinPath(parent_link.path(), "echo");
-
- CheckExecveat(AT_FDCWD, path_with_symlink, {path_with_symlink}, {},
- AT_SYMLINK_NOFOLLOW, ArgEnvExitStatus(0, 0), "");
-}
-
-TEST(ExecveatTest, SymlinkNoFollowWithNormalFile) {
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/bin", O_DIRECTORY));
-
- CheckExecveat(dirfd.get(), "echo", {"echo"}, {}, AT_SYMLINK_NOFOLLOW,
- ArgEnvExitStatus(0, 0), "");
-}
-
-TEST(ExecveatTest, BasicWithCloexecFD) {
- std::string path = RunfilePath(kBasicWorkload);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CLOEXEC));
-
- CheckExecveat(fd.get(), "", {path}, {}, AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH,
- ArgEnvExitStatus(0, 0), absl::StrCat(path, "\n"));
-}
-
-TEST(ExecveatTest, InterpreterScriptWithCloexecFD) {
- std::string path = RunfilePath(kExitScript);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CLOEXEC));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(fd.get(), "", {path}, {},
- AT_EMPTY_PATH, /*child=*/nullptr,
- &execve_errno));
- EXPECT_EQ(execve_errno, ENOENT);
-}
-
-TEST(ExecveatTest, InterpreterScriptWithCloexecDirFD) {
- std::string absolute_path = RunfilePath(kExitScript);
- std::string parent_dir = std::string(Dirname(absolute_path));
- std::string base = std::string(Basename(absolute_path));
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent_dir, O_CLOEXEC | O_DIRECTORY));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(dirfd.get(), base, {base}, {},
- /*flags=*/0, /*child=*/nullptr,
- &execve_errno));
- EXPECT_EQ(execve_errno, ENOENT);
-}
-
-TEST(ExecveatTest, InvalidFlags) {
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(ForkAndExecveat(
- /*dirfd=*/-1, "", {}, {}, /*flags=*/0xFFFF, /*child=*/nullptr,
- &execve_errno));
- EXPECT_EQ(execve_errno, EINVAL);
-}
-
-// Priority consistent across calls to execve()
-TEST(GetpriorityTest, ExecveMaintainsPriority) {
- int prio = 16;
- ASSERT_THAT(setpriority(PRIO_PROCESS, getpid(), prio), SyscallSucceeds());
-
- // To avoid trying to use negative exit values, check for
- // 20 - prio. Since prio should always be in the range [-20, 19],
- // this leave expected_exit_code in the range [1, 40].
- int expected_exit_code = 20 - prio;
-
- // Program run (priority_execve) will exit(X) where
- // X=getpriority(PRIO_PROCESS,0). Check that this exit value is prio.
- CheckExec(RunfilePath(kPriorityWorkload), {RunfilePath(kPriorityWorkload)},
- {}, W_EXITCODE(expected_exit_code, 0), "");
-}
-
-void ExecWithThread() {
- // Used to ensure that the thread has actually started.
- absl::Mutex mu;
- bool started = false;
-
- ScopedThread t([&] {
- mu.Lock();
- started = true;
- mu.Unlock();
-
- while (true) {
- pause();
- }
- });
-
- mu.LockWhen(absl::Condition(&started));
- mu.Unlock();
-
- const ExecveArray argv = {"/proc/self/exe", kExit42};
- const ExecveArray envv;
-
- execve("/proc/self/exe", argv.get(), envv.get());
- exit(errno);
-}
-
-void ExecFromThread() {
- ScopedThread t([] {
- const ExecveArray argv = {"/proc/self/exe", kExit42};
- const ExecveArray envv;
-
- execve("/proc/self/exe", argv.get(), envv.get());
- exit(errno);
- });
-
- while (true) {
- pause();
- }
-}
-
-bool ValidateProcCmdlineVsArgv(const int argc, const char* const* argv) {
- auto contents_or = GetContents("/proc/self/cmdline");
- if (!contents_or.ok()) {
- std::cerr << "Unable to get /proc/self/cmdline: " << contents_or.error()
- << std::endl;
- return false;
- }
- auto contents = contents_or.ValueOrDie();
- if (contents.back() != '\0') {
- std::cerr << "Non-null terminated /proc/self/cmdline!" << std::endl;
- return false;
- }
- contents.pop_back();
- std::vector<std::string> procfs_cmdline = absl::StrSplit(contents, '\0');
-
- if (static_cast<int>(procfs_cmdline.size()) != argc) {
- std::cerr << "argc = " << argc << " != " << procfs_cmdline.size()
- << std::endl;
- return false;
- }
-
- for (int i = 0; i < argc; ++i) {
- if (procfs_cmdline[i] != argv[i]) {
- std::cerr << "Procfs command line argument " << i << " mismatch "
- << procfs_cmdline[i] << " != " << argv[i] << std::endl;
- return false;
- }
- }
- return true;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // Start by validating that the stack argv is consistent with procfs.
- if (!gvisor::testing::ValidateProcCmdlineVsArgv(argc, argv)) {
- return 1;
- }
-
- // Some of these tests require no background threads, so check for them before
- // TestInit.
- for (int i = 0; i < argc; i++) {
- absl::string_view arg(argv[i]);
-
- if (arg == gvisor::testing::kExit42) {
- return 42;
- }
- if (arg == gvisor::testing::kExecWithThread) {
- gvisor::testing::ExecWithThread();
- return 1;
- }
- if (arg == gvisor::testing::kExecFromThread) {
- gvisor::testing::ExecFromThread();
- return 1;
- }
- }
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/exec.h b/test/syscalls/linux/exec.h
deleted file mode 100644
index 5c0f7e654..000000000
--- a/test/syscalls/linux/exec.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_EXEC_H_
-#define GVISOR_TEST_SYSCALLS_EXEC_H_
-
-#include <sys/wait.h>
-
-namespace gvisor {
-namespace testing {
-
-// Returns the exit code used by exec_basic_workload.
-inline int ArgEnvExitCode(int args, int envs) { return args + envs * 10; }
-
-// Returns the exit status used by exec_basic_workload.
-inline int ArgEnvExitStatus(int args, int envs) {
- return W_EXITCODE(ArgEnvExitCode(args, envs), 0);
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_EXEC_H_
diff --git a/test/syscalls/linux/exec_assert_closed_workload.cc b/test/syscalls/linux/exec_assert_closed_workload.cc
deleted file mode 100644
index 95643618d..000000000
--- a/test/syscalls/linux/exec_assert_closed_workload.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <iostream>
-
-#include "absl/strings/numbers.h"
-
-int main(int argc, char** argv) {
- if (argc != 2) {
- std::cerr << "need two arguments, got " << argc;
- exit(1);
- }
- int fd;
- if (!absl::SimpleAtoi(argv[1], &fd)) {
- std::cerr << "fd: " << argv[1] << " could not be parsed" << std::endl;
- exit(1);
- }
- struct stat s;
- if (fstat(fd, &s) == 0) {
- std::cerr << "fd: " << argv[1] << " should not be valid" << std::endl;
- exit(2);
- }
- if (errno != EBADF) {
- std::cerr << "fstat fd: " << argv[1] << " got errno: " << errno
- << " wanted: " << EBADF << std::endl;
- exit(1);
- }
- return 0;
-}
diff --git a/test/syscalls/linux/exec_basic_workload.cc b/test/syscalls/linux/exec_basic_workload.cc
deleted file mode 100644
index 1bbd6437e..000000000
--- a/test/syscalls/linux/exec_basic_workload.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdlib.h>
-
-#include <iostream>
-
-#include "test/syscalls/linux/exec.h"
-
-int main(int argc, char** argv, char** envp) {
- int i;
- for (i = 0; i < argc; i++) {
- std::cerr << argv[i] << std::endl;
- }
- for (i = 0; envp[i] != nullptr; i++) {
- std::cerr << envp[i] << std::endl;
- }
- exit(gvisor::testing::ArgEnvExitCode(argc - 1, i));
- return 0;
-}
diff --git a/test/syscalls/linux/exec_binary.cc b/test/syscalls/linux/exec_binary.cc
deleted file mode 100644
index b0fb120c6..000000000
--- a/test/syscalls/linux/exec_binary.cc
+++ /dev/null
@@ -1,1681 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <elf.h>
-#include <errno.h>
-#include <signal.h>
-#include <sys/ptrace.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/user.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <functional>
-#include <iterator>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/string_view.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-#if !defined(__x86_64__) && !defined(__aarch64__)
-// The assembly stub and ELF internal details must be ported to other arches.
-#error "Test only supported on x86-64/arm64"
-#endif // __x86_64__ || __aarch64__
-
-#if defined(__x86_64__)
-#define EM_TYPE EM_X86_64
-#define IP_REG(p) ((p).rip)
-#define RAX_REG(p) ((p).rax)
-#define RDI_REG(p) ((p).rdi)
-#define RETURN_REG(p) ((p).rax)
-
-// amd64 stub that calls PTRACE_TRACEME and sends itself SIGSTOP.
-const char kPtraceCode[] = {
- // movq $101, %rax /* ptrace */
- '\x48',
- '\xc7',
- '\xc0',
- '\x65',
- '\x00',
- '\x00',
- '\x00',
- // movq $0, %rsi /* PTRACE_TRACEME */
- '\x48',
- '\xc7',
- '\xc6',
- '\x00',
- '\x00',
- '\x00',
- '\x00',
- // movq $0, %rdi
- '\x48',
- '\xc7',
- '\xc7',
- '\x00',
- '\x00',
- '\x00',
- '\x00',
- // movq $0, %rdx
- '\x48',
- '\xc7',
- '\xc2',
- '\x00',
- '\x00',
- '\x00',
- '\x00',
- // movq $0, %r10
- '\x49',
- '\xc7',
- '\xc2',
- '\x00',
- '\x00',
- '\x00',
- '\x00',
- // syscall
- '\x0f',
- '\x05',
-
- // movq $39, %rax /* getpid */
- '\x48',
- '\xc7',
- '\xc0',
- '\x27',
- '\x00',
- '\x00',
- '\x00',
- // syscall
- '\x0f',
- '\x05',
-
- // movq %rax, %rdi /* pid */
- '\x48',
- '\x89',
- '\xc7',
- // movq $62, %rax /* kill */
- '\x48',
- '\xc7',
- '\xc0',
- '\x3e',
- '\x00',
- '\x00',
- '\x00',
- // movq $19, %rsi /* SIGSTOP */
- '\x48',
- '\xc7',
- '\xc6',
- '\x13',
- '\x00',
- '\x00',
- '\x00',
- // syscall
- '\x0f',
- '\x05',
-};
-
-// Size of a syscall instruction.
-constexpr int kSyscallSize = 2;
-
-#elif defined(__aarch64__)
-#define EM_TYPE EM_AARCH64
-#define IP_REG(p) ((p).pc)
-#define RAX_REG(p) ((p).regs[8])
-#define RDI_REG(p) ((p).regs[0])
-#define RETURN_REG(p) ((p).regs[0])
-
-const char kPtraceCode[] = {
- // MOVD $117, R8 /* ptrace */
- '\xa8',
- '\x0e',
- '\x80',
- '\xd2',
- // MOVD $0, R0 /* PTRACE_TRACEME */
- '\x00',
- '\x00',
- '\x80',
- '\xd2',
- // MOVD $0, R1 /* pid */
- '\x01',
- '\x00',
- '\x80',
- '\xd2',
- // MOVD $0, R2 /* addr */
- '\x02',
- '\x00',
- '\x80',
- '\xd2',
- // MOVD $0, R3 /* data */
- '\x03',
- '\x00',
- '\x80',
- '\xd2',
- // SVC
- '\x01',
- '\x00',
- '\x00',
- '\xd4',
- // MOVD $172, R8 /* getpid */
- '\x88',
- '\x15',
- '\x80',
- '\xd2',
- // SVC
- '\x01',
- '\x00',
- '\x00',
- '\xd4',
- // MOVD $129, R8 /* kill, R0=pid */
- '\x28',
- '\x10',
- '\x80',
- '\xd2',
- // MOVD $19, R1 /* SIGSTOP */
- '\x61',
- '\x02',
- '\x80',
- '\xd2',
- // SVC
- '\x01',
- '\x00',
- '\x00',
- '\xd4',
-};
-// Size of a syscall instruction.
-constexpr int kSyscallSize = 4;
-#else
-#error "Unknown architecture"
-#endif
-
-// This test suite tests executable loading in the kernel (ELF and interpreter
-// scripts).
-
-// Parameterized ELF types for 64 and 32 bit.
-template <int Size>
-struct ElfTypes;
-
-template <>
-struct ElfTypes<64> {
- typedef Elf64_Ehdr ElfEhdr;
- typedef Elf64_Phdr ElfPhdr;
-};
-
-template <>
-struct ElfTypes<32> {
- typedef Elf32_Ehdr ElfEhdr;
- typedef Elf32_Phdr ElfPhdr;
-};
-
-template <int Size>
-struct ElfBinary {
- using ElfEhdr = typename ElfTypes<Size>::ElfEhdr;
- using ElfPhdr = typename ElfTypes<Size>::ElfPhdr;
-
- ElfEhdr header = {};
- std::vector<ElfPhdr> phdrs;
- std::vector<char> data;
-
- // UpdateOffsets updates p_offset, p_vaddr in all phdrs to account for the
- // space taken by the header and phdrs.
- //
- // It also updates header.e_phnum and adds the offset to header.e_entry to
- // account for the headers residing in the first PT_LOAD segment.
- //
- // Before calling UpdateOffsets each of those fields should be the appropriate
- // offset into data.
- void UpdateOffsets() {
- size_t offset = sizeof(header) + phdrs.size() * sizeof(ElfPhdr);
- header.e_entry += offset;
- header.e_phnum = phdrs.size();
- for (auto& p : phdrs) {
- p.p_offset += offset;
- p.p_vaddr += offset;
- }
- }
-
- // AddInterpreter adds a PT_INTERP segment with the passed contents.
- //
- // A later call to UpdateOffsets is required to make the new phdr valid.
- void AddInterpreter(std::vector<char> contents) {
- const int start = data.size();
- data.insert(data.end(), contents.begin(), contents.end());
- const int size = data.size() - start;
-
- ElfPhdr phdr = {};
- phdr.p_type = PT_INTERP;
- phdr.p_offset = start;
- phdr.p_filesz = size;
- phdr.p_memsz = size;
- // "If [PT_INTERP] is present, it must precede any loadable segment entry."
- phdrs.insert(phdrs.begin(), phdr);
- }
-
- // Writes the header, phdrs, and data to fd.
- PosixError Write(int fd) const {
- int ret = WriteFd(fd, &header, sizeof(header));
- if (ret < 0) {
- return PosixError(errno, "failed to write header");
- } else if (ret != sizeof(header)) {
- return PosixError(EIO, absl::StrCat("short write of header: ", ret));
- }
-
- for (auto const& p : phdrs) {
- ret = WriteFd(fd, &p, sizeof(p));
- if (ret < 0) {
- return PosixError(errno, "failed to write phdr");
- } else if (ret != sizeof(p)) {
- return PosixError(EIO, absl::StrCat("short write of phdr: ", ret));
- }
- }
-
- ret = WriteFd(fd, data.data(), data.size());
- if (ret < 0) {
- return PosixError(errno, "failed to write data");
- } else if (ret != static_cast<int>(data.size())) {
- return PosixError(EIO, absl::StrCat("short write of data: ", ret));
- }
-
- return NoError();
- }
-};
-
-// Creates a new temporary executable ELF file in parent with elf as the
-// contents.
-template <int Size>
-PosixErrorOr<TempPath> CreateElfWith(absl::string_view parent,
- ElfBinary<Size> const& elf) {
- ASSIGN_OR_RETURN_ERRNO(
- auto file, TempPath::CreateFileWith(parent, absl::string_view(), 0755));
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(file.path(), O_RDWR));
- RETURN_IF_ERRNO(elf.Write(fd.get()));
- return std::move(file);
-}
-
-// Creates a new temporary executable ELF file with elf as the contents.
-template <int Size>
-PosixErrorOr<TempPath> CreateElfWith(ElfBinary<Size> const& elf) {
- return CreateElfWith(GetAbsoluteTestTmpdir(), elf);
-}
-
-// Wait for pid to stop, and assert that it stopped via SIGSTOP.
-PosixError WaitStopped(pid_t pid) {
- int status;
- int ret = RetryEINTR(waitpid)(pid, &status, 0);
- MaybeSave();
- if (ret < 0) {
- return PosixError(errno, "wait failed");
- } else if (ret != pid) {
- return PosixError(ESRCH, absl::StrCat("wait got ", ret, " want ", pid));
- }
-
- if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGSTOP) {
- return PosixError(EINVAL,
- absl::StrCat("pid did not SIGSTOP; status = ", status));
- }
-
- return NoError();
-}
-
-// Returns a valid ELF that PTRACE_TRACEME and SIGSTOPs itself.
-//
-// UpdateOffsets must be called before writing this ELF.
-ElfBinary<64> StandardElf() {
- ElfBinary<64> elf;
- elf.header.e_ident[EI_MAG0] = ELFMAG0;
- elf.header.e_ident[EI_MAG1] = ELFMAG1;
- elf.header.e_ident[EI_MAG2] = ELFMAG2;
- elf.header.e_ident[EI_MAG3] = ELFMAG3;
- elf.header.e_ident[EI_CLASS] = ELFCLASS64;
- elf.header.e_ident[EI_DATA] = ELFDATA2LSB;
- elf.header.e_ident[EI_VERSION] = EV_CURRENT;
- elf.header.e_type = ET_EXEC;
- elf.header.e_machine = EM_TYPE;
- elf.header.e_version = EV_CURRENT;
- elf.header.e_phoff = sizeof(elf.header);
- elf.header.e_phentsize = sizeof(decltype(elf)::ElfPhdr);
-
- // TODO(gvisor.dev/issue/153): Always include a PT_GNU_STACK segment to
- // disable executable stacks. With this omitted the stack (and all PROT_READ)
- // mappings should be executable, but gVisor doesn't support that.
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_GNU_STACK;
- phdr.p_flags = PF_R | PF_W;
- elf.phdrs.push_back(phdr);
-
- phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_X;
- phdr.p_offset = 0;
- phdr.p_vaddr = 0x40000;
- phdr.p_filesz = sizeof(kPtraceCode);
- phdr.p_memsz = phdr.p_filesz;
- elf.phdrs.push_back(phdr);
-
- elf.header.e_entry = phdr.p_vaddr;
-
- elf.data.assign(kPtraceCode, kPtraceCode + sizeof(kPtraceCode));
-
- return elf;
-}
-
-// Test that a trivial binary executes.
-TEST(ElfTest, Execute) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- // Ensure it made it to SIGSTOP.
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
- // RIP/PC is just beyond the final syscall instruction.
- EXPECT_EQ(IP_REG(regs), elf.header.e_entry + sizeof(kPtraceCode));
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- })));
-}
-
-// StandardElf without data completes execve, but faults once running.
-TEST(ElfTest, MissingText) {
- ElfBinary<64> elf = StandardElf();
- elf.data.clear();
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- // It runs off the end of the zeroes filling the end of the page.
-#if defined(__x86_64__)
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV) << status;
-#elif defined(__aarch64__)
- // 0 is an invalid instruction opcode on arm64.
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGILL) << status;
-#endif
-}
-
-// Typical ELF with a data + bss segment
-TEST(ElfTest, DataSegment) {
- ElfBinary<64> elf = StandardElf();
-
- // Create a standard ELF, but extend to 1.5 pages. The second page will be the
- // beginning of a multi-page data + bss segment.
- elf.data.resize(kPageSize + kPageSize / 2);
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_W;
- phdr.p_offset = kPageSize;
- phdr.p_vaddr = 0x41000;
- phdr.p_filesz = kPageSize / 2;
- // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a
- // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.
- phdr.p_memsz = 2 * kPageSize - kPageSize / 2;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(
- child, ContainsMappings(std::vector<ProcMapsEntry>({
- // text page.
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- // data + bss page from file.
- {0x41000, 0x42000, true, true, false, true, kPageSize, 0, 0, 0,
- file.path().c_str()},
- // bss page from anon.
- {0x42000, 0x43000, true, true, false, true, 0, 0, 0, 0, ""},
- })));
-}
-
-// Additonal pages beyond filesz honor (only) execute protections.
-//
-// N.B. Linux changed this in 4.11 (16e72e9b30986 "powerpc: do not make the
-// entire heap executable"). Previously, extra pages were always RW.
-TEST(ElfTest, ExtraMemPages) {
- // gVisor has the newer behavior.
- if (!IsRunningOnGvisor()) {
- auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());
- SKIP_IF(version.major < 4 || (version.major == 4 && version.minor < 11));
- }
-
- ElfBinary<64> elf = StandardElf();
-
- // Create a standard ELF, but extend to 1.5 pages. The second page will be the
- // beginning of a multi-page data + bss segment.
- elf.data.resize(kPageSize + kPageSize / 2);
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- // RWX segment. The extra anon page will also be RWX.
- //
- // N.B. Linux uses clear_user to clear the end of the file-mapped page, which
- // respects the mapping protections. Thus if we map this RO with memsz >
- // (unaligned) filesz, then execve will fail with EFAULT. See padzero(elf_bss)
- // in fs/binfmt_elf.c:load_elf_binary.
- //
- // N.N.B.B. The above only applies to the last segment. For earlier segments,
- // the clear_user error is ignored.
- phdr.p_flags = PF_R | PF_W | PF_X;
- phdr.p_offset = kPageSize;
- phdr.p_vaddr = 0x41000;
- phdr.p_filesz = kPageSize / 2;
- // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a
- // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.
- phdr.p_memsz = 2 * kPageSize - kPageSize / 2;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(child,
- ContainsMappings(std::vector<ProcMapsEntry>({
- // text page.
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- // data + bss page from file.
- {0x41000, 0x42000, true, true, true, true, kPageSize, 0, 0, 0,
- file.path().c_str()},
- // extra page from anon.
- {0x42000, 0x43000, true, true, true, true, 0, 0, 0, 0, ""},
- })));
-}
-
-// An aligned segment with filesz == 0, memsz > 0 is anon-only.
-TEST(ElfTest, AnonOnlySegment) {
- ElfBinary<64> elf = StandardElf();
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- // RO segment. The extra anon page will be RW anyways.
- phdr.p_flags = PF_R;
- phdr.p_offset = 0;
- phdr.p_vaddr = 0x41000;
- phdr.p_filesz = 0;
- phdr.p_memsz = kPageSize;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- // UpdateOffsets adjusts p_vaddr and p_offset by the header size, but we need
- // a page-aligned p_vaddr to get a truly anon-only page.
- elf.phdrs[2].p_vaddr = 0x41000;
- // N.B. p_offset is now unaligned, but Linux doesn't care since this is
- // anon-only.
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(child,
- ContainsMappings(std::vector<ProcMapsEntry>({
- // text page.
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- // anon page.
- {0x41000, 0x42000, true, true, false, true, 0, 0, 0, 0, ""},
- })));
-}
-
-// p_offset must have the same alignment as p_vaddr.
-TEST(ElfTest, UnalignedOffset) {
- ElfBinary<64> elf = StandardElf();
-
- // Unaligned offset.
- elf.phdrs[1].p_offset += 1;
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
-
- // execve(2) return EINVAL, but behavior varies between Linux and gVisor.
- //
- // On Linux, the new mm is committed before attempting to map into it. By the
- // time we hit EINVAL in the segment mmap, the old mm is gone. Linux returns
- // to an empty mm, which immediately segfaults.
- //
- // OTOH, gVisor maps into the new mm before committing it. Thus when it hits
- // failure, the caller is still intact to receive the error.
- if (IsRunningOnGvisor()) {
- ASSERT_EQ(execve_errno, EINVAL);
- } else {
- ASSERT_EQ(execve_errno, 0);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV) << status;
- }
-}
-
-// Linux will allow PT_LOAD segments to overlap.
-TEST(ElfTest, DirectlyOverlappingSegments) {
- // NOTE(b/37289926): see PIEOutOfOrderSegments.
- SKIP_IF(IsRunningOnGvisor());
-
- ElfBinary<64> elf = StandardElf();
-
- // Same as the StandardElf mapping.
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- // Add PF_W so we can differentiate this mapping from the first.
- phdr.p_flags = PF_R | PF_W | PF_X;
- phdr.p_offset = 0;
- phdr.p_vaddr = 0x40000;
- phdr.p_filesz = sizeof(kPtraceCode);
- phdr.p_memsz = phdr.p_filesz;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- {0x40000, 0x41000, true, true, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- })));
-}
-
-// Linux allows out-of-order PT_LOAD segments.
-TEST(ElfTest, OutOfOrderSegments) {
- // NOTE(b/37289926): see PIEOutOfOrderSegments.
- SKIP_IF(IsRunningOnGvisor());
-
- ElfBinary<64> elf = StandardElf();
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_X;
- phdr.p_offset = 0;
- phdr.p_vaddr = 0x20000;
- phdr.p_filesz = sizeof(kPtraceCode);
- phdr.p_memsz = phdr.p_filesz;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- {0x20000, 0x21000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- file.path().c_str()},
- })));
-}
-
-// header.e_phoff is bound the end of the file.
-TEST(ElfTest, OutOfBoundsPhdrs) {
- ElfBinary<64> elf = StandardElf();
- elf.header.e_phoff = 0x100000;
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- // On Linux 3.11, this caused EIO. On newer Linux, it causes ENOEXEC.
- EXPECT_THAT(execve_errno, AnyOf(Eq(ENOEXEC), Eq(EIO)));
-}
-
-// Claim there is a phdr beyond the end of the file, but don't include it.
-TEST(ElfTest, MissingPhdr) {
- ElfBinary<64> elf = StandardElf();
-
- // Clear data so the file ends immediately after the phdrs.
- // N.B. Per ElfTest.MissingData, StandardElf without data completes execve
- // without error.
- elf.data.clear();
- elf.UpdateOffsets();
-
- // Claim that there is another phdr just beyond the end of the file. Of
- // course, it isn't accessible.
- elf.header.e_phnum++;
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- // On Linux 3.11, this caused EIO. On newer Linux, it causes ENOEXEC.
- EXPECT_THAT(execve_errno, AnyOf(Eq(ENOEXEC), Eq(EIO)));
-}
-
-// No headers at all, just the ELF magic.
-TEST(ElfTest, MissingHeader) {
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0755));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- const char kElfMagic[] = {0x7f, 'E', 'L', 'F'};
-
- ASSERT_THAT(WriteFd(fd.get(), &kElfMagic, sizeof(kElfMagic)),
- SyscallSucceeds());
- fd.reset();
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, ENOEXEC);
-}
-
-// Load a PIE ELF with a data + bss segment.
-TEST(ElfTest, PIE) {
- ElfBinary<64> elf = StandardElf();
-
- elf.header.e_type = ET_DYN;
-
- // Create a standard ELF, but extend to 1.5 pages. The second page will be the
- // beginning of a multi-page data + bss segment.
- elf.data.resize(kPageSize + kPageSize / 2);
-
- elf.header.e_entry = 0x0;
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_W;
- phdr.p_offset = kPageSize;
- // Put the data segment at a bit of an offset.
- phdr.p_vaddr = 0x20000;
- phdr.p_filesz = kPageSize / 2;
- // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a
- // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.
- phdr.p_memsz = 2 * kPageSize - kPageSize / 2;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- // The first segment really needs to start at 0 for a normal PIE binary, and
- // thus includes the headers.
- const uint64_t offset = elf.phdrs[1].p_offset;
- elf.phdrs[1].p_offset = 0x0;
- elf.phdrs[1].p_vaddr = 0x0;
- elf.phdrs[1].p_filesz += offset;
- elf.phdrs[1].p_memsz += offset;
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- // RIP tells us which page the first segment was loaded into.
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
-
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- const uint64_t load_addr = IP_REG(regs) & ~(kPageSize - 1);
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- // text page.
- {load_addr, load_addr + 0x1000, true, false, true,
- true, 0, 0, 0, 0, file.path().c_str()},
- // data + bss page from file.
- {load_addr + 0x20000, load_addr + 0x21000, true, true,
- false, true, kPageSize, 0, 0, 0, file.path().c_str()},
- // bss page from anon.
- {load_addr + 0x21000, load_addr + 0x22000, true, true,
- false, true, 0, 0, 0, 0, ""},
- })));
-}
-
-// PIE binary with a non-zero start address.
-//
-// This is non-standard for a PIE binary, but valid. The binary is still loaded
-// at an arbitrary address, not the first PT_LOAD vaddr.
-//
-// N.B. Linux changed this behavior in d1fd836dcf00d2028c700c7e44d2c23404062c90.
-// Previously, with "randomization" enabled, PIE binaries with a non-zero start
-// address would be be loaded at the address they specified because mmap was
-// passed the load address, which wasn't 0 as expected.
-//
-// This change is present in kernel v4.1+.
-TEST(ElfTest, PIENonZeroStart) {
- // gVisor has the newer behavior.
- if (!IsRunningOnGvisor()) {
- auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());
- SKIP_IF(version.major < 4 || (version.major == 4 && version.minor < 1));
- }
-
- ElfBinary<64> elf = StandardElf();
-
- elf.header.e_type = ET_DYN;
-
- // Create a standard ELF, but extend to 1.5 pages. The second page will be the
- // beginning of a multi-page data + bss segment.
- elf.data.resize(kPageSize + kPageSize / 2);
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_W;
- phdr.p_offset = kPageSize;
- // Put the data segment at a bit of an offset.
- phdr.p_vaddr = 0x60000;
- phdr.p_filesz = kPageSize / 2;
- // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a
- // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.
- phdr.p_memsz = 2 * kPageSize - kPageSize / 2;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- // RIP tells us which page the first segment was loaded into.
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- const uint64_t load_addr = IP_REG(regs) & ~(kPageSize - 1);
-
- // The ELF is loaded at an arbitrary address, not the first PT_LOAD vaddr.
- //
- // N.B. this is technically flaky, but Linux is *extremely* unlikely to pick
- // this as the start address, as it searches from the top down.
- EXPECT_NE(load_addr, 0x40000);
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- // text page.
- {load_addr, load_addr + 0x1000, true, false, true,
- true, 0, 0, 0, 0, file.path().c_str()},
- // data + bss page from file.
- {load_addr + 0x20000, load_addr + 0x21000, true, true,
- false, true, kPageSize, 0, 0, 0, file.path().c_str()},
- // bss page from anon.
- {load_addr + 0x21000, load_addr + 0x22000, true, true,
- false, true, 0, 0, 0, 0, ""},
- })));
-}
-
-TEST(ElfTest, PIEOutOfOrderSegments) {
- // TODO(b/37289926): This triggers a bug in Linux where it computes the size
- // of the binary as 0x20000 - 0x40000 = 0xfffffffffffe0000, which obviously
- // fails to map.
- //
- // We test gVisor's behavior (of rejecting the binary) because I assert that
- // Linux is wrong and needs to be fixed.
- SKIP_IF(!IsRunningOnGvisor());
-
- ElfBinary<64> elf = StandardElf();
-
- elf.header.e_type = ET_DYN;
-
- // Create a standard ELF, but extend to 1.5 pages. The second page will be the
- // beginning of a multi-page data + bss segment.
- elf.data.resize(kPageSize + kPageSize / 2);
-
- decltype(elf)::ElfPhdr phdr = {};
- phdr.p_type = PT_LOAD;
- phdr.p_flags = PF_R | PF_W;
- phdr.p_offset = kPageSize;
- // Put the data segment *before* the first segment.
- phdr.p_vaddr = 0x20000;
- phdr.p_filesz = kPageSize / 2;
- // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a
- // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.
- phdr.p_memsz = 2 * kPageSize - kPageSize / 2;
- elf.phdrs.push_back(phdr);
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, ENOEXEC);
-}
-
-TEST(ElfTest, PIEOverflow) {
- ElfBinary<64> elf = StandardElf();
-
- elf.header.e_type = ET_DYN;
-
- // Choose vaddr of the first segment so that the end address overflows if the
- // segment is mapped with a non-zero offset.
- elf.phdrs[1].p_vaddr = 0xfffffffffffff000UL - elf.phdrs[1].p_memsz;
-
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- if (IsRunningOnGvisor()) {
- ASSERT_EQ(execve_errno, EINVAL);
- } else {
- ASSERT_EQ(execve_errno, 0);
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV) << status;
- }
-}
-
-// Standard dynamically linked binary with an ELF interpreter.
-TEST(ElfTest, ELFInterpreter) {
- ElfBinary<64> interpreter = StandardElf();
- interpreter.header.e_type = ET_DYN;
- interpreter.header.e_entry = 0x0;
- interpreter.UpdateOffsets();
-
- // The first segment really needs to start at 0 for a normal PIE binary, and
- // thus includes the headers.
- uint64_t const offset = interpreter.phdrs[1].p_offset;
- // N.B. Since Linux 4.10 (0036d1f7eb95b "binfmt_elf: fix calculations for bss
- // padding"), Linux unconditionally zeroes the remainder of the highest mapped
- // page in an interpreter, failing if the protections don't allow write. Thus
- // we must mark this writeable.
- interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X;
- interpreter.phdrs[1].p_offset = 0x0;
- interpreter.phdrs[1].p_vaddr = 0x0;
- interpreter.phdrs[1].p_filesz += offset;
- interpreter.phdrs[1].p_memsz += offset;
-
- TempPath interpreter_file =
- ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(interpreter));
-
- ElfBinary<64> binary = StandardElf();
-
- // Append the interpreter path.
- int const interp_data_start = binary.data.size();
- for (char const c : interpreter_file.path()) {
- binary.data.push_back(c);
- }
- // NUL-terminate.
- binary.data.push_back(0);
- int const interp_data_size = binary.data.size() - interp_data_start;
-
- decltype(binary)::ElfPhdr phdr = {};
- phdr.p_type = PT_INTERP;
- phdr.p_offset = interp_data_start;
- phdr.p_filesz = interp_data_size;
- phdr.p_memsz = interp_data_size;
- // "If [PT_INTERP] is present, it must precede any loadable segment entry."
- //
- // However, Linux allows it anywhere, so we just stick it at the end to make
- // sure out-of-order PT_INTERP is OK.
- binary.phdrs.push_back(phdr);
-
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- binary_file.path(), {binary_file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- // RIP tells us which page the first segment of the interpreter was loaded
- // into.
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- const uint64_t interp_load_addr = IP_REG(regs) & ~(kPageSize - 1);
-
- EXPECT_THAT(
- child, ContainsMappings(std::vector<ProcMapsEntry>({
- // Main binary
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- binary_file.path().c_str()},
- // Interpreter
- {interp_load_addr, interp_load_addr + 0x1000, true, true, true,
- true, 0, 0, 0, 0, interpreter_file.path().c_str()},
- })));
-}
-
-// Test parameter to ElfInterpterStaticTest cases. The first item is a suffix to
-// add to the end of the interpreter path in the PT_INTERP segment and the
-// second is the expected execve(2) errno.
-using ElfInterpreterStaticParam = std::tuple<std::vector<char>, int>;
-
-class ElfInterpreterStaticTest
- : public ::testing::TestWithParam<ElfInterpreterStaticParam> {};
-
-// Statically linked ELF with a statically linked ELF interpreter.
-TEST_P(ElfInterpreterStaticTest, Test) {
- // TODO(gvisor.dev/issue/3721): Test has been observed to segfault on 5.X
- // kernels.
- if (!IsRunningOnGvisor()) {
- auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());
- SKIP_IF(version.major > 4);
- }
-
- const std::vector<char> segment_suffix = std::get<0>(GetParam());
- const int expected_errno = std::get<1>(GetParam());
-
- ElfBinary<64> interpreter = StandardElf();
- // See comment in ElfTest.ELFInterpreter.
- interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X;
- interpreter.UpdateOffsets();
- TempPath interpreter_file =
- ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(interpreter));
-
- ElfBinary<64> binary = StandardElf();
- // The PT_LOAD segment conflicts with the interpreter's PT_LOAD segment. The
- // interpreter's will be mapped directly over the binary's.
-
- // Interpreter path plus the parameterized suffix in the PT_INTERP segment.
- const std::string path = interpreter_file.path();
- std::vector<char> segment(path.begin(), path.end());
- segment.insert(segment.end(), segment_suffix.begin(), segment_suffix.end());
- binary.AddInterpreter(segment);
-
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- binary_file.path(), {binary_file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, expected_errno);
-
- if (expected_errno == 0) {
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- EXPECT_THAT(child, ContainsMappings(std::vector<ProcMapsEntry>({
- // Interpreter.
- {0x40000, 0x41000, true, true, true, true, 0, 0, 0,
- 0, interpreter_file.path().c_str()},
- })));
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Cases, ElfInterpreterStaticTest,
- ::testing::ValuesIn({
- // Simple NUL-terminator to run the interpreter as normal.
- std::make_tuple(std::vector<char>({'\0'}), 0),
- // Add some garbage to the segment followed by a NUL-terminator. This is
- // ignored.
- std::make_tuple(std::vector<char>({'\0', 'b', '\0'}), 0),
- // Add some garbage to the segment without a NUL-terminator. Linux will
- // reject
- // this.
- std::make_tuple(std::vector<char>({'\0', 'b'}), ENOEXEC),
- }));
-
-// Test parameter to ElfInterpterBadPathTest cases. The first item is the
-// contents of the PT_INTERP segment and the second is the expected execve(2)
-// errno.
-using ElfInterpreterBadPathParam = std::tuple<std::vector<char>, int>;
-
-class ElfInterpreterBadPathTest
- : public ::testing::TestWithParam<ElfInterpreterBadPathParam> {};
-
-TEST_P(ElfInterpreterBadPathTest, Test) {
- const std::vector<char> segment = std::get<0>(GetParam());
- const int expected_errno = std::get<1>(GetParam());
-
- ElfBinary<64> binary = StandardElf();
- binary.AddInterpreter(segment);
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- binary_file.path(), {binary_file.path()}, {}, nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, expected_errno);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Cases, ElfInterpreterBadPathTest,
- ::testing::ValuesIn({
- // NUL-terminated fake path in the PT_INTERP segment.
- std::make_tuple(std::vector<char>({'/', 'f', '/', 'b', '\0'}), ENOENT),
- // ELF interpreter not NUL-terminated.
- std::make_tuple(std::vector<char>({'/', 'f', '/', 'b'}), ENOEXEC),
- // ELF interpreter path omitted entirely.
- //
- // fs/binfmt_elf.c:load_elf_binary returns ENOEXEC if p_filesz is < 2
- // bytes.
- std::make_tuple(std::vector<char>({'\0'}), ENOEXEC),
- // ELF interpreter path = "\0".
- //
- // fs/binfmt_elf.c:load_elf_binary returns ENOEXEC if p_filesz is < 2
- // bytes, so add an extra byte to pass that check.
- //
- // load_elf_binary -> open_exec -> do_open_execat fails to check that
- // name != '\0' before calling do_filp_open, which thus opens the
- // working directory. do_open_execat returns EACCES because the
- // directory is not a regular file.
- std::make_tuple(std::vector<char>({'\0', '\0'}), EACCES),
- }));
-
-// Relative path to ELF interpreter.
-TEST(ElfTest, ELFInterpreterRelative) {
- ElfBinary<64> interpreter = StandardElf();
- interpreter.header.e_type = ET_DYN;
- interpreter.header.e_entry = 0x0;
- interpreter.UpdateOffsets();
-
- // The first segment really needs to start at 0 for a normal PIE binary, and
- // thus includes the headers.
- uint64_t const offset = interpreter.phdrs[1].p_offset;
- // See comment in ElfTest.ELFInterpreter.
- interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X;
- interpreter.phdrs[1].p_offset = 0x0;
- interpreter.phdrs[1].p_vaddr = 0x0;
- interpreter.phdrs[1].p_filesz += offset;
- interpreter.phdrs[1].p_memsz += offset;
-
- TempPath interpreter_file =
- ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(interpreter));
- auto cwd = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());
- auto interpreter_relative =
- ASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, interpreter_file.path()));
-
- ElfBinary<64> binary = StandardElf();
-
- // NUL-terminated path in the PT_INTERP segment.
- std::vector<char> segment(interpreter_relative.begin(),
- interpreter_relative.end());
- segment.push_back(0);
- binary.AddInterpreter(segment);
-
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- binary_file.path(), {binary_file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- // RIP tells us which page the first segment of the interpreter was loaded
- // into.
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- const uint64_t interp_load_addr = IP_REG(regs) & ~(kPageSize - 1);
-
- EXPECT_THAT(
- child, ContainsMappings(std::vector<ProcMapsEntry>({
- // Main binary
- {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,
- binary_file.path().c_str()},
- // Interpreter
- {interp_load_addr, interp_load_addr + 0x1000, true, true, true,
- true, 0, 0, 0, 0, interpreter_file.path().c_str()},
- })));
-}
-
-// ELF interpreter architecture doesn't match the binary.
-TEST(ElfTest, ELFInterpreterWrongArch) {
- ElfBinary<64> interpreter = StandardElf();
- interpreter.header.e_machine = EM_PPC64;
- interpreter.header.e_type = ET_DYN;
- interpreter.header.e_entry = 0x0;
- interpreter.UpdateOffsets();
-
- // The first segment really needs to start at 0 for a normal PIE binary, and
- // thus includes the headers.
- uint64_t const offset = interpreter.phdrs[1].p_offset;
- // See comment in ElfTest.ELFInterpreter.
- interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X;
- interpreter.phdrs[1].p_offset = 0x0;
- interpreter.phdrs[1].p_vaddr = 0x0;
- interpreter.phdrs[1].p_filesz += offset;
- interpreter.phdrs[1].p_memsz += offset;
-
- TempPath interpreter_file =
- ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(interpreter));
-
- ElfBinary<64> binary = StandardElf();
-
- // NUL-terminated path in the PT_INTERP segment.
- const std::string path = interpreter_file.path();
- std::vector<char> segment(path.begin(), path.end());
- segment.push_back(0);
- binary.AddInterpreter(segment);
-
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- binary_file.path(), {binary_file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, ELIBBAD);
-}
-
-// No execute permissions on the binary.
-TEST(ElfTest, NoExecute) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- ASSERT_THAT(chmod(file.path().c_str(), 0644), SyscallSucceeds());
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, EACCES);
-}
-
-// Execute, but no read permissions on the binary works just fine.
-TEST(ElfTest, NoRead) {
- // TODO(gvisor.dev/issue/160): gVisor's backing filesystem may prevent the
- // sentry from reading the executable.
- SKIP_IF(IsRunningOnGvisor());
-
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- ASSERT_THAT(chmod(file.path().c_str(), 0111), SyscallSucceeds());
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- // TODO(gvisor.dev/issue/160): A task with a non-readable executable is marked
- // non-dumpable, preventing access to proc files. gVisor does not implement
- // this behavior.
-}
-
-// No execute permissions on the ELF interpreter.
-TEST(ElfTest, ElfInterpreterNoExecute) {
- ElfBinary<64> interpreter = StandardElf();
- interpreter.header.e_type = ET_DYN;
- interpreter.header.e_entry = 0x0;
- interpreter.UpdateOffsets();
-
- // The first segment really needs to start at 0 for a normal PIE binary, and
- // thus includes the headers.
- uint64_t const offset = interpreter.phdrs[1].p_offset;
- // See comment in ElfTest.ELFInterpreter.
- interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X;
- interpreter.phdrs[1].p_offset = 0x0;
- interpreter.phdrs[1].p_vaddr = 0x0;
- interpreter.phdrs[1].p_filesz += offset;
- interpreter.phdrs[1].p_memsz += offset;
-
- TempPath interpreter_file =
- ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(interpreter));
-
- ElfBinary<64> binary = StandardElf();
-
- // NUL-terminated path in the PT_INTERP segment.
- const std::string path = interpreter_file.path();
- std::vector<char> segment(path.begin(), path.end());
- segment.push_back(0);
- binary.AddInterpreter(segment);
-
- binary.UpdateOffsets();
-
- TempPath binary_file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(binary));
-
- ASSERT_THAT(chmod(interpreter_file.path().c_str(), 0644), SyscallSucceeds());
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(interpreter_file.path(), {interpreter_file.path()}, {},
- &child, &execve_errno));
- EXPECT_EQ(execve_errno, EACCES);
-}
-
-// Execute a basic interpreter script.
-TEST(InterpreterScriptTest, Execute) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", binary.path()), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Whitespace after #!.
-TEST(InterpreterScriptTest, Whitespace) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#! \t \t", binary.path()), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Interpreter script is missing execute permission.
-TEST(InterpreterScriptTest, InterpreterScriptNoExecute) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", binary.path()), 0644));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, EACCES);
-}
-
-// Binary interpreter script refers to is missing execute permission.
-TEST(InterpreterScriptTest, BinaryNoExecute) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- ASSERT_THAT(chmod(binary.path().c_str(), 0644), SyscallSucceeds());
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", binary.path()), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, EACCES);
-}
-
-// Linux will load interpreter scripts five levels deep, but no more.
-TEST(InterpreterScriptTest, MaxRecursion) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", binary.path()), 0755));
- TempPath script2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", script1.path()), 0755));
- TempPath script3 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", script2.path()), 0755));
- TempPath script4 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", script3.path()), 0755));
- TempPath script5 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", script4.path()), 0755));
- TempPath script6 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- "/tmp", absl::StrCat("#!", script5.path()), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script6.path(), {script6.path()}, {}, &child, &execve_errno));
- // Too many levels of recursion.
- EXPECT_EQ(execve_errno, ELOOP);
-
- // The next level up is OK.
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script5.path(), {script5.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Interpreter script with a relative path.
-TEST(InterpreterScriptTest, RelativePath) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- auto cwd = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());
- auto binary_relative =
- ASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(cwd, binary.path()));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", binary_relative), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Interpreter script with .. in a path component.
-TEST(InterpreterScriptTest, UncleanPath) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!/tmp/../", binary.path()),
- 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Passed interpreter script is a symlink.
-TEST(InterpreterScriptTest, Symlink) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- // Use /tmp explicitly to ensure the path is short enough.
- TempPath binary = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith("/tmp", elf));
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", binary.path()), 0755));
-
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), script.path()));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(link.path(), {link.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- EXPECT_NO_ERRNO(WaitStopped(child));
-}
-
-// Interpreter script points to a symlink loop.
-TEST(InterpreterScriptTest, SymlinkLoop) {
- std::string const link1 = NewTempAbsPathInDir("/tmp");
- std::string const link2 = NewTempAbsPathInDir("/tmp");
-
- ASSERT_THAT(symlink(link2.c_str(), link1.c_str()), SyscallSucceeds());
- auto remove_link1 = Cleanup(
- [&link1] { EXPECT_THAT(unlink(link1.c_str()), SyscallSucceeds()); });
-
- ASSERT_THAT(symlink(link1.c_str(), link2.c_str()), SyscallSucceeds());
- auto remove_link2 = Cleanup(
- [&link2] { EXPECT_THAT(unlink(link2.c_str()), SyscallSucceeds()); });
-
- TempPath script = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::StrCat("#!", link1), 0755));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(script.path(), {script.path()}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, ELOOP);
-}
-
-// Binary is a symlink loop.
-TEST(ExecveTest, SymlinkLoop) {
- std::string const link1 = NewTempAbsPathInDir("/tmp");
- std::string const link2 = NewTempAbsPathInDir("/tmp");
-
- ASSERT_THAT(symlink(link2.c_str(), link1.c_str()), SyscallSucceeds());
- auto remove_link = Cleanup(
- [&link1] { EXPECT_THAT(unlink(link1.c_str()), SyscallSucceeds()); });
-
- ASSERT_THAT(symlink(link1.c_str(), link2.c_str()), SyscallSucceeds());
- auto remove_link2 = Cleanup(
- [&link2] { EXPECT_THAT(unlink(link2.c_str()), SyscallSucceeds()); });
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(link1, {link1}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, ELOOP);
-}
-
-// Binary is a directory.
-TEST(ExecveTest, Directory) {
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/tmp", {"/tmp"}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, EACCES);
-}
-
-// Pass a valid binary as a directory (extra / on the end).
-TEST(ExecveTest, BinaryAsDirectory) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- std::string const path = absl::StrCat(file.path(), "/");
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(path, {path}, {}, &child, &execve_errno));
- EXPECT_EQ(execve_errno, ENOTDIR);
-}
-
-// The initial brk value is after the page at the end of the binary.
-TEST(ExecveTest, BrkAfterBinary) {
- ElfBinary<64> elf = StandardElf();
- elf.UpdateOffsets();
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));
-
- pid_t child;
- int execve_errno;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));
- ASSERT_EQ(execve_errno, 0);
-
- // Ensure it made it to SIGSTOP.
- ASSERT_NO_ERRNO(WaitStopped(child));
-
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- // RIP is just beyond the final syscall instruction. Rewind to execute a brk
- // syscall.
- IP_REG(regs) -= kSyscallSize;
- RAX_REG(regs) = __NR_brk;
- RDI_REG(regs) = 0;
- ASSERT_THAT(ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
-
- // Resume the child, waiting for syscall entry.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child, 0, 0), SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- ASSERT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << "status = " << status;
-
- // Execute the syscall.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child, 0, 0), SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
- ASSERT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << "status = " << status;
-
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
- // brk is after the text page.
- //
- // The kernel does brk randomization, so we can't be sure what the exact
- // address will be, but it is always beyond the final page in the binary.
- // i.e., it does not start immediately after memsz in the middle of a page.
- // Userspace may expect to use that space.
- EXPECT_GE(RETURN_REG(regs), 0x41000);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/exec_proc_exe_workload.cc b/test/syscalls/linux/exec_proc_exe_workload.cc
deleted file mode 100644
index 2989379b7..000000000
--- a/test/syscalls/linux/exec_proc_exe_workload.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <iostream>
-
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-
-int main(int argc, char** argv, char** envp) {
- // This is annoying. Because remote build systems may put these binaries
- // in a content-addressable-store, you may wind up with /proc/self/exe
- // pointing to some random path (but with a sensible argv[0]).
- //
- // Therefore, this test simply checks that the /proc/self/exe
- // is absolute and *doesn't* match argv[1].
- std::string exe =
- gvisor::testing::ProcessExePath(getpid()).ValueOrDie();
- if (exe[0] != '/') {
- std::cerr << "relative path: " << exe << std::endl;
- exit(1);
- }
- if (exe.find(argv[1]) != std::string::npos) {
- std::cerr << "matching path: " << exe << std::endl;
- exit(1);
- }
-
- return 0;
-}
diff --git a/test/syscalls/linux/exec_state_workload.cc b/test/syscalls/linux/exec_state_workload.cc
deleted file mode 100644
index eafdc2bfa..000000000
--- a/test/syscalls/linux/exec_state_workload.cc
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/auxv.h>
-#include <sys/prctl.h>
-#include <sys/time.h>
-
-#include <iostream>
-#include <ostream>
-#include <string>
-
-#include "absl/strings/numbers.h"
-
-#ifndef ANDROID // Conflicts with existing operator<< on Android.
-
-// Pretty-print a sigset_t.
-std::ostream& operator<<(std::ostream& out, const sigset_t& s) {
- out << "{ ";
-
- for (int i = 0; i < NSIG; i++) {
- if (sigismember(&s, i)) {
- out << i << " ";
- }
- }
-
- out << "}";
- return out;
-}
-
-#endif
-
-// Verify that the signo handler is handler.
-int CheckSigHandler(uint32_t signo, uintptr_t handler) {
- struct sigaction sa;
- int ret = sigaction(signo, nullptr, &sa);
- if (ret < 0) {
- perror("sigaction");
- return 1;
- }
-
- if (reinterpret_cast<void (*)(int)>(handler) != sa.sa_handler) {
- std::cerr << "signo " << signo << " handler got: " << sa.sa_handler
- << " expected: " << std::hex << handler;
- return 1;
- }
- return 0;
-}
-
-// Verify that the signo is blocked.
-int CheckSigBlocked(uint32_t signo) {
- sigset_t s;
- int ret = sigprocmask(SIG_SETMASK, nullptr, &s);
- if (ret < 0) {
- perror("sigprocmask");
- return 1;
- }
-
- if (!sigismember(&s, signo)) {
- std::cerr << "signal " << signo << " not blocked in signal mask: " << s
- << std::endl;
- return 1;
- }
- return 0;
-}
-
-// Verify that the itimer is enabled.
-int CheckItimerEnabled(uint32_t timer) {
- struct itimerval itv;
- int ret = getitimer(timer, &itv);
- if (ret < 0) {
- perror("getitimer");
- return 1;
- }
-
- if (!itv.it_value.tv_sec && !itv.it_value.tv_usec &&
- !itv.it_interval.tv_sec && !itv.it_interval.tv_usec) {
- std::cerr << "timer " << timer
- << " not enabled. value sec: " << itv.it_value.tv_sec
- << " usec: " << itv.it_value.tv_usec
- << " interval sec: " << itv.it_interval.tv_sec
- << " usec: " << itv.it_interval.tv_usec << std::endl;
- return 1;
- }
- return 0;
-}
-
-int PrintExecFn() {
- unsigned long execfn = getauxval(AT_EXECFN);
- if (!execfn) {
- std::cerr << "AT_EXECFN missing" << std::endl;
- return 1;
- }
-
- std::cerr << reinterpret_cast<const char*>(execfn) << std::endl;
- return 0;
-}
-
-int PrintExecName() {
- const size_t name_length = 20;
- char name[name_length] = {0};
- if (prctl(PR_GET_NAME, name) < 0) {
- std::cerr << "prctl(PR_GET_NAME) failed" << std::endl;
- return 1;
- }
-
- std::cerr << name << std::endl;
- return 0;
-}
-
-void usage(const std::string& prog) {
- std::cerr << "usage:\n"
- << "\t" << prog << " CheckSigHandler <signo> <handler addr (hex)>\n"
- << "\t" << prog << " CheckSigBlocked <signo>\n"
- << "\t" << prog << " CheckTimerDisabled <timer>\n"
- << "\t" << prog << " PrintExecFn\n"
- << "\t" << prog << " PrintExecName" << std::endl;
-}
-
-int main(int argc, char** argv) {
- if (argc < 2) {
- usage(argv[0]);
- return 1;
- }
-
- std::string func(argv[1]);
-
- if (func == "CheckSigHandler") {
- if (argc != 4) {
- usage(argv[0]);
- return 1;
- }
-
- uint32_t signo;
- if (!absl::SimpleAtoi(argv[2], &signo)) {
- std::cerr << "invalid signo: " << argv[2] << std::endl;
- return 1;
- }
-
- uintptr_t handler;
- if (!absl::numbers_internal::safe_strtoi_base(argv[3], &handler, 16)) {
- std::cerr << "invalid handler: " << std::hex << argv[3] << std::endl;
- return 1;
- }
-
- return CheckSigHandler(signo, handler);
- }
-
- if (func == "CheckSigBlocked") {
- if (argc != 3) {
- usage(argv[0]);
- return 1;
- }
-
- uint32_t signo;
- if (!absl::SimpleAtoi(argv[2], &signo)) {
- std::cerr << "invalid signo: " << argv[2] << std::endl;
- return 1;
- }
-
- return CheckSigBlocked(signo);
- }
-
- if (func == "CheckItimerEnabled") {
- if (argc != 3) {
- usage(argv[0]);
- return 1;
- }
-
- uint32_t timer;
- if (!absl::SimpleAtoi(argv[2], &timer)) {
- std::cerr << "invalid signo: " << argv[2] << std::endl;
- return 1;
- }
-
- return CheckItimerEnabled(timer);
- }
-
- if (func == "PrintExecFn") {
- // N.B. This will be called as an interpreter script, with the script passed
- // as the third argument. We don't care about that script.
- return PrintExecFn();
- }
-
- if (func == "PrintExecName") {
- // N.B. This may be called as an interpreter script like PrintExecFn.
- return PrintExecName();
- }
-
- std::cerr << "Invalid function: " << func << std::endl;
- return 1;
-}
diff --git a/test/syscalls/linux/exit.cc b/test/syscalls/linux/exit.cc
deleted file mode 100644
index d52ea786b..000000000
--- a/test/syscalls/linux/exit.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-#include "test/util/time_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void TestExit(int code) {
- pid_t pid = fork();
- if (pid == 0) {
- _exit(code);
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == code) << status;
-}
-
-TEST(ExitTest, Success) { TestExit(0); }
-
-TEST(ExitTest, Failure) { TestExit(1); }
-
-// This test ensures that a process's file descriptors are closed when it calls
-// exit(). In order to test this, the parent tries to read from a pipe whose
-// write end is held by the child. While the read is blocking, the child exits,
-// which should cause the parent to read 0 bytes due to EOF.
-TEST(ExitTest, CloseFds) {
- int pipe_fds[2];
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- FileDescriptor read_fd(pipe_fds[0]);
- FileDescriptor write_fd(pipe_fds[1]);
-
- pid_t pid = fork();
- if (pid == 0) {
- read_fd.reset();
-
- SleepSafe(absl::Seconds(10));
-
- _exit(0);
- }
-
- EXPECT_THAT(pid, SyscallSucceeds());
-
- write_fd.reset();
-
- char buf[10];
- EXPECT_THAT(ReadFd(read_fd.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/exit_script.sh b/test/syscalls/linux/exit_script.sh
deleted file mode 100755
index 527518e06..000000000
--- a/test/syscalls/linux/exit_script.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [ $# -ne 1 ]; then
- echo "Usage: $0 exit_code"
- exit 255
-fi
-
-exit $1
diff --git a/test/syscalls/linux/fadvise64.cc b/test/syscalls/linux/fadvise64.cc
deleted file mode 100644
index ac24c4066..000000000
--- a/test/syscalls/linux/fadvise64.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-TEST(FAdvise64Test, Basic) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- // fadvise64 is noop in gVisor, so just test that it succeeds.
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_NORMAL),
- SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_RANDOM),
- SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_SEQUENTIAL),
- SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_WILLNEED),
- SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_DONTNEED),
- SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_NOREUSE),
- SyscallSucceeds());
-}
-
-TEST(FAdvise64Test, FAdvise64WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_NORMAL),
- SyscallFailsWithErrno(EBADF));
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, POSIX_FADV_NORMAL),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FAdvise64Test, InvalidArgs) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- // Note: offset is allowed to be negative.
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, static_cast<off_t>(-1),
- POSIX_FADV_NORMAL),
- SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(syscall(__NR_fadvise64, fd.get(), 0, 10, 12345),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FAdvise64Test, NoPipes) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor read(fds[0]);
- const FileDescriptor write(fds[1]);
-
- ASSERT_THAT(syscall(__NR_fadvise64, read.get(), 0, 10, POSIX_FADV_NORMAL),
- SyscallFailsWithErrno(ESPIPE));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fallocate.cc b/test/syscalls/linux/fallocate.cc
deleted file mode 100644
index 5c839447e..000000000
--- a/test/syscalls/linux/fallocate.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <sys/eventfd.h>
-#include <sys/resource.h>
-#include <sys/signalfd.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/timerfd.h>
-#include <syscall.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <ctime>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-int fallocate(int fd, int mode, off_t offset, off_t len) {
- return RetryEINTR(syscall)(__NR_fallocate, fd, mode, offset, len);
-}
-
-class AllocateTest : public FileTest {
- void SetUp() override { FileTest::SetUp(); }
-};
-
-TEST_F(AllocateTest, Fallocate) {
- // Check that it starts at size zero.
- struct stat buf;
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-
- // Grow to ten bytes.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 0, 10), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 10);
-
- // Allocate to a smaller size should be noop.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 0, 5), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 10);
-
- // Grow again.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 0, 20), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 20);
-
- // Grow with offset.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 10, 20), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 30);
-
- // Grow with offset beyond EOF.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 39, 1), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 40);
-
- // Given length 0 should fail with EINVAL.
- ASSERT_THAT(fallocate(test_file_fd_.get(), 0, 50, 0),
- SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 40);
-}
-
-TEST_F(AllocateTest, FallocateInvalid) {
- // Invalid FD
- EXPECT_THAT(fallocate(-1, 0, 0, 10), SyscallFailsWithErrno(EBADF));
-
- // Negative offset and size.
- EXPECT_THAT(fallocate(test_file_fd_.get(), 0, -1, 10),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(fallocate(test_file_fd_.get(), 0, 0, -1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(fallocate(test_file_fd_.get(), 0, -1, -1),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(AllocateTest, FallocateReadonly) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
- EXPECT_THAT(fallocate(fd.get(), 0, 0, 10), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(AllocateTest, FallocateWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
- EXPECT_THAT(fallocate(fd.get(), 0, 0, 10), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(AllocateTest, FallocatePipe) {
- int pipes[2];
- EXPECT_THAT(pipe(pipes), SyscallSucceeds());
- auto cleanup = Cleanup([&pipes] {
- EXPECT_THAT(close(pipes[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipes[1]), SyscallSucceeds());
- });
-
- EXPECT_THAT(fallocate(pipes[1], 0, 0, 10), SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST_F(AllocateTest, FallocateChar) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_RDWR));
- EXPECT_THAT(fallocate(fd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));
-}
-
-TEST_F(AllocateTest, FallocateRlimit) {
- // Get the current rlimit and restore after test run.
- struct rlimit initial_lim;
- ASSERT_THAT(getrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- auto cleanup = Cleanup([&initial_lim] {
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- });
-
- // Try growing past the file size limit.
- sigset_t new_mask;
- sigemptyset(&new_mask);
- sigaddset(&new_mask, SIGXFSZ);
- sigprocmask(SIG_BLOCK, &new_mask, nullptr);
-
- struct rlimit setlim = {};
- setlim.rlim_cur = 1024;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_FSIZE, &setlim), SyscallSucceeds());
-
- EXPECT_THAT(fallocate(test_file_fd_.get(), 0, 0, 1025),
- SyscallFailsWithErrno(EFBIG));
-
- struct timespec timelimit = {};
- timelimit.tv_sec = 10;
- EXPECT_EQ(sigtimedwait(&new_mask, nullptr, &timelimit), SIGXFSZ);
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &new_mask, nullptr), SyscallSucceeds());
-}
-
-TEST_F(AllocateTest, FallocateOtherFDs) {
- int fd;
- ASSERT_THAT(fd = timerfd_create(CLOCK_MONOTONIC, 0), SyscallSucceeds());
- auto timer_fd = FileDescriptor(fd);
- EXPECT_THAT(fallocate(timer_fd.get(), 0, 0, 10),
- SyscallFailsWithErrno(ENODEV));
-
- sigset_t mask;
- sigemptyset(&mask);
- ASSERT_THAT(fd = signalfd(-1, &mask, 0), SyscallSucceeds());
- auto sfd = FileDescriptor(fd);
- EXPECT_THAT(fallocate(sfd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));
-
- auto efd =
- ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE));
- EXPECT_THAT(fallocate(efd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));
-
- auto sockfd = ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- EXPECT_THAT(fallocate(sockfd.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));
-
- int socks[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, PF_UNIX, socks),
- SyscallSucceeds());
- auto sock0 = FileDescriptor(socks[0]);
- auto sock1 = FileDescriptor(socks[1]);
- EXPECT_THAT(fallocate(sock0.get(), 0, 0, 10), SyscallFailsWithErrno(ENODEV));
-
- int pipefds[2];
- ASSERT_THAT(pipe(pipefds), SyscallSucceeds());
- EXPECT_THAT(fallocate(pipefds[1], 0, 0, 10), SyscallFailsWithErrno(ESPIPE));
- close(pipefds[0]);
- close(pipefds[1]);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fault.cc b/test/syscalls/linux/fault.cc
deleted file mode 100644
index bd87d5e60..000000000
--- a/test/syscalls/linux/fault.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#define _GNU_SOURCE 1
-#include <signal.h>
-#include <ucontext.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-__attribute__((noinline)) void Fault(void) {
- volatile int* foo = nullptr;
- *foo = 0;
-}
-
-int GetPcFromUcontext(ucontext_t* uc, uintptr_t* pc) {
-#if defined(__x86_64__)
- *pc = uc->uc_mcontext.gregs[REG_RIP];
- return 1;
-#elif defined(__i386__)
- *pc = uc->uc_mcontext.gregs[REG_EIP];
- return 1;
-#elif defined(__aarch64__)
- *pc = uc->uc_mcontext.pc;
- return 1;
-#else
- return 0;
-#endif
-}
-
-void sigact_handler(int sig, siginfo_t* siginfo, void* context) {
- uintptr_t pc;
- if (GetPcFromUcontext(reinterpret_cast<ucontext_t*>(context), &pc)) {
- /* Expect Fault() to be at most 64 bytes in size. */
- uintptr_t fault_addr = reinterpret_cast<uintptr_t>(&Fault);
- EXPECT_GE(pc, fault_addr);
- EXPECT_LT(pc, fault_addr + 64);
-
- // The following file is used to detect tests that exit prematurely. Since
- // we need to call exit() here, delete the file by hand.
- const char* exit_file = getenv("TEST_PREMATURE_EXIT_FILE");
- if (exit_file != nullptr) {
- ASSERT_THAT(unlink(exit_file), SyscallSucceeds());
- }
- exit(0);
- }
-}
-
-TEST(FaultTest, InRange) {
- // Reset the signal handler to do nothing so that it doesn't freak out
- // the test runner when we fire an alarm.
- struct sigaction sa = {};
- sa.sa_sigaction = sigact_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- ASSERT_THAT(sigaction(SIGSEGV, &sa, nullptr), SyscallSucceeds());
-
- Fault();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fchdir.cc b/test/syscalls/linux/fchdir.cc
deleted file mode 100644
index 0383f3f85..000000000
--- a/test/syscalls/linux/fchdir.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(FchdirTest, Success) {
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- int fd;
- ASSERT_THAT(fd = open(temp_dir.path().c_str(), O_DIRECTORY | O_RDONLY),
- SyscallSucceeds());
-
- EXPECT_THAT(fchdir(fd), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- // Change CWD to a permanent location as temp dirs will be cleaned up.
- EXPECT_THAT(chdir("/"), SyscallSucceeds());
-}
-
-TEST(FchdirTest, InvalidFD) {
- EXPECT_THAT(fchdir(-1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FchdirTest, PermissionDenied) {
- // Drop capabilities that allow us to override directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0666 /* mode */));
-
- int fd;
- ASSERT_THAT(fd = open(temp_dir.path().c_str(), O_DIRECTORY | O_RDONLY),
- SyscallSucceeds());
-
- EXPECT_THAT(fchdir(fd), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(FchdirTest, NotDir) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- int fd;
- ASSERT_THAT(fd = open(temp_file.path().c_str(), O_CREAT | O_RDONLY, 0777),
- SyscallSucceeds());
-
- EXPECT_THAT(fchdir(fd), SyscallFailsWithErrno(ENOTDIR));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(FchdirTest, FchdirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(temp_dir.path(), O_PATH));
- ASSERT_THAT(open(temp_dir.path().c_str(), O_DIRECTORY | O_PATH),
- SyscallSucceeds());
-
- EXPECT_THAT(fchdir(fd.get()), SyscallSucceeds());
- // Change CWD to a permanent location as temp dirs will be cleaned up.
- EXPECT_THAT(chdir("/"), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fcntl.cc b/test/syscalls/linux/fcntl.cc
deleted file mode 100644
index 91526572b..000000000
--- a/test/syscalls/linux/fcntl.cc
+++ /dev/null
@@ -1,1997 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <signal.h>
-#include <sys/epoll.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <atomic>
-#include <deque>
-#include <iostream>
-#include <list>
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "absl/base/port.h"
-#include "absl/flags/flag.h"
-#include "absl/memory/memory.h"
-#include "absl/strings/str_cat.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/signal_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-ABSL_FLAG(std::string, child_set_lock_on, "",
- "Contains the path to try to set a file lock on.");
-ABSL_FLAG(bool, child_set_lock_write, false,
- "Whether to set a writable lock (otherwise readable)");
-ABSL_FLAG(bool, blocking, false,
- "Whether to set a blocking lock (otherwise non-blocking).");
-ABSL_FLAG(bool, retry_eintr, false,
- "Whether to retry in the subprocess on EINTR.");
-ABSL_FLAG(uint64_t, child_set_lock_start, 0, "The value of struct flock start");
-ABSL_FLAG(uint64_t, child_set_lock_len, 0, "The value of struct flock len");
-ABSL_FLAG(int32_t, socket_fd, -1,
- "A socket to use for communicating more state back "
- "to the parent.");
-
-namespace gvisor {
-namespace testing {
-
-std::function<void(int, siginfo_t*, void*)> setsig_signal_handle;
-void setsig_signal_handler(int signum, siginfo_t* siginfo, void* ucontext) {
- setsig_signal_handle(signum, siginfo, ucontext);
-}
-
-class FcntlLockTest : public ::testing::Test {
- public:
- void SetUp() override {
- // Let's make a socket pair.
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds_), SyscallSucceeds());
- }
-
- void TearDown() override {
- EXPECT_THAT(close(fds_[0]), SyscallSucceeds());
- EXPECT_THAT(close(fds_[1]), SyscallSucceeds());
- }
-
- int64_t GetSubprocessFcntlTimeInUsec() {
- int64_t ret = 0;
- EXPECT_THAT(ReadFd(fds_[0], reinterpret_cast<void*>(&ret), sizeof(ret)),
- SyscallSucceedsWithValue(sizeof(ret)));
- return ret;
- }
-
- // The first fd will remain with the process creating the subprocess
- // and the second will go to the subprocess.
- int fds_[2] = {};
-};
-
-struct SignalDelivery {
- int num;
- siginfo_t info;
-};
-
-class FcntlSignalTest : public ::testing::Test {
- public:
- void SetUp() override {
- int pipe_fds[2];
- ASSERT_THAT(pipe2(pipe_fds, O_NONBLOCK), SyscallSucceeds());
- pipe_read_fd_ = pipe_fds[0];
- pipe_write_fd_ = pipe_fds[1];
- }
-
- PosixErrorOr<Cleanup> RegisterSignalHandler(int signum) {
- struct sigaction handler;
- handler.sa_sigaction = setsig_signal_handler;
- setsig_signal_handle = [&](int signum, siginfo_t* siginfo,
- void* unused_ucontext) {
- SignalDelivery sig;
- sig.num = signum;
- sig.info = *siginfo;
- signals_received_.push_back(sig);
- num_signals_received_++;
- };
- sigemptyset(&handler.sa_mask);
- handler.sa_flags = SA_SIGINFO;
- return ScopedSigaction(signum, handler);
- }
-
- void FlushAndCloseFD(int fd) {
- char buf;
- int read_bytes;
- do {
- read_bytes = read(fd, &buf, 1);
- } while (read_bytes > 0);
- // read() can also fail with EWOULDBLOCK since the pipe is open in
- // non-blocking mode. This is not an error.
- EXPECT_TRUE(read_bytes == 0 || (read_bytes == -1 && errno == EWOULDBLOCK));
- EXPECT_THAT(close(fd), SyscallSucceeds());
- }
-
- void DupReadFD() {
- ASSERT_THAT(pipe_read_fd_dup_ = dup(pipe_read_fd_), SyscallSucceeds());
- max_expected_signals++;
- }
-
- void RegisterFD(int fd, int signum) {
- ASSERT_THAT(fcntl(fd, F_SETOWN, getpid()), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd, F_SETSIG, signum), SyscallSucceeds());
- int old_flags;
- ASSERT_THAT(old_flags = fcntl(fd, F_GETFL), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd, F_SETFL, old_flags | O_ASYNC), SyscallSucceeds());
- }
-
- void GenerateIOEvent() {
- ASSERT_THAT(write(pipe_write_fd_, "test", 4), SyscallSucceedsWithValue(4));
- }
-
- void WaitForSignalDelivery(absl::Duration timeout) {
- absl::Time wait_start = absl::Now();
- while (num_signals_received_ < max_expected_signals &&
- absl::Now() - wait_start < timeout) {
- absl::SleepFor(absl::Milliseconds(10));
- }
- }
-
- int pipe_read_fd_ = -1;
- int pipe_read_fd_dup_ = -1;
- int pipe_write_fd_ = -1;
- int max_expected_signals = 1;
- std::deque<SignalDelivery> signals_received_;
- std::atomic<int> num_signals_received_ = 0;
-};
-
-namespace {
-
-PosixErrorOr<Cleanup> SubprocessLock(std::string const& path, bool for_write,
- bool blocking, bool retry_eintr, int fd,
- off_t start, off_t length, pid_t* child) {
- std::vector<std::string> args = {
- "/proc/self/exe", "--child_set_lock_on", path,
- "--child_set_lock_start", absl::StrCat(start), "--child_set_lock_len",
- absl::StrCat(length), "--socket_fd", absl::StrCat(fd)};
-
- if (for_write) {
- args.push_back("--child_set_lock_write");
- }
-
- if (blocking) {
- args.push_back("--blocking");
- }
-
- if (retry_eintr) {
- args.push_back("--retry_eintr");
- }
-
- int execve_errno = 0;
- ASSIGN_OR_RETURN_ERRNO(
- auto cleanup,
- ForkAndExec("/proc/self/exe", ExecveArray(args.begin(), args.end()), {},
- nullptr, child, &execve_errno));
-
- if (execve_errno != 0) {
- return PosixError(execve_errno, "execve");
- }
-
- return std::move(cleanup);
-}
-
-TEST(FcntlTest, FcntlDupWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_PATH));
-
- int new_fd;
- // Dup the descriptor and make sure it's the same file.
- EXPECT_THAT(new_fd = fcntl(fd.get(), F_DUPFD, 0), SyscallSucceeds());
-
- FileDescriptor nfd = FileDescriptor(new_fd);
- ASSERT_NE(fd.get(), nfd.get());
- ASSERT_NO_ERRNO(CheckSameFile(fd, nfd));
- EXPECT_THAT(fcntl(nfd.get(), F_GETFL), SyscallSucceedsWithValue(O_PATH));
-}
-
-TEST(FcntlTest, SetFileStatusFlagWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
-
- EXPECT_THAT(fcntl(fd.get(), F_SETFL, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FcntlTest, BadFcntlsWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
-
- EXPECT_THAT(fcntl(fd.get(), F_SETOWN, 0), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(fcntl(fd.get(), F_GETOWN, 0), SyscallFailsWithErrno(EBADF));
-
- EXPECT_THAT(fcntl(fd.get(), F_SETOWN_EX, 0), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(fcntl(fd.get(), F_GETOWN_EX, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FcntlTest, SetCloExecBadFD) {
- // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag not set.
- FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- auto fd = f.get();
- f.reset();
- ASSERT_THAT(fcntl(fd, F_GETFD), SyscallFailsWithErrno(EBADF));
- ASSERT_THAT(fcntl(fd, F_SETFD, FD_CLOEXEC), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FcntlTest, SetCloExec) {
- // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag not set.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-
- // Set the FD_CLOEXEC flag.
- ASSERT_THAT(fcntl(fd.get(), F_SETFD, FD_CLOEXEC), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST(FcntlTest, SetCloExecWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- // Open a file descriptor with FD_CLOEXEC descriptor flag not set.
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-
- // Set the FD_CLOEXEC flag.
- ASSERT_THAT(fcntl(fd.get(), F_SETFD, FD_CLOEXEC), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST(FcntlTest, DupFDCloExecWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- // Open a file descriptor with FD_CLOEXEC descriptor flag not set.
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
- int nfd;
- ASSERT_THAT(nfd = fcntl(fd.get(), F_DUPFD_CLOEXEC, 0), SyscallSucceeds());
- FileDescriptor dup_fd(nfd);
-
- // Check for the FD_CLOEXEC flag.
- ASSERT_THAT(fcntl(dup_fd.get(), F_GETFD),
- SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST(FcntlTest, ClearCloExec) {
- // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag set.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_CLOEXEC));
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-
- // Clear the FD_CLOEXEC flag.
- ASSERT_THAT(fcntl(fd.get(), F_SETFD, 0), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, IndependentDescriptorFlags) {
- // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag not set.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-
- // Duplicate the descriptor. Ensure that it also doesn't have FD_CLOEXEC.
- FileDescriptor newfd = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
- ASSERT_THAT(fcntl(newfd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-
- // Set FD_CLOEXEC on the first FD.
- ASSERT_THAT(fcntl(fd.get(), F_SETFD, FD_CLOEXEC), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-
- // Ensure that the second FD is unaffected by the change on the first.
- ASSERT_THAT(fcntl(newfd.get(), F_GETFD), SyscallSucceedsWithValue(0));
-}
-
-// All file description flags passed to open appear in F_GETFL.
-TEST(FcntlTest, GetAllFlags) {
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- int flags = O_RDWR | O_DIRECT | O_SYNC | O_NONBLOCK | O_APPEND;
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), flags));
-
- // Linux forces O_LARGEFILE on all 64-bit kernels and gVisor's is 64-bit.
- int expected = flags | kOLargeFile;
-
- int rflags;
- EXPECT_THAT(rflags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(rflags, expected);
-}
-
-// When O_PATH is specified in flags, flag bits other than O_CLOEXEC,
-// O_DIRECTORY, and O_NOFOLLOW are ignored.
-TEST(FcntlTest, GetOpathFlag) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- int flags = O_RDWR | O_DIRECT | O_SYNC | O_NONBLOCK | O_APPEND | O_PATH |
- O_NOFOLLOW | O_DIRECTORY;
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), flags));
-
- int expected = O_PATH | O_NOFOLLOW | O_DIRECTORY;
-
- int rflags;
- EXPECT_THAT(rflags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(rflags, expected);
-}
-
-TEST(FcntlTest, SetFlags) {
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), 0));
-
- int const flags = O_RDWR | O_DIRECT | O_SYNC | O_NONBLOCK | O_APPEND;
- EXPECT_THAT(fcntl(fd.get(), F_SETFL, flags), SyscallSucceeds());
-
- // Can't set O_RDWR or O_SYNC.
- // Linux forces O_LARGEFILE on all 64-bit kernels and gVisor's is 64-bit.
- int expected = O_DIRECT | O_NONBLOCK | O_APPEND | kOLargeFile;
-
- int rflags;
- EXPECT_THAT(rflags = fcntl(fd.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(rflags, expected);
-}
-
-void TestLock(int fd, short lock_type = F_RDLCK) { // NOLINT, type in flock
- struct flock fl;
- fl.l_type = lock_type;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // len 0 locks all bytes despite how large the file grows.
- fl.l_len = 0;
- EXPECT_THAT(fcntl(fd, F_SETLK, &fl), SyscallSucceeds());
-}
-
-void TestLockBadFD(int fd,
- short lock_type = F_RDLCK) { // NOLINT, type in flock
- struct flock fl;
- fl.l_type = lock_type;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // len 0 locks all bytes despite how large the file grows.
- fl.l_len = 0;
- EXPECT_THAT(fcntl(fd, F_SETLK, &fl), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(FcntlLockTest, SetLockBadFd) { TestLockBadFD(-1); }
-
-TEST_F(FcntlLockTest, SetLockDir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY, 0000));
- TestLock(fd.get());
-}
-
-TEST_F(FcntlLockTest, SetLockSymlink) {
- SKIP_IF(IsRunningWithVFS1());
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto symlink = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), file.path()));
-
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(symlink.path(), O_RDONLY | O_PATH, 0000));
- TestLockBadFD(fd.get());
-}
-
-TEST_F(FcntlLockTest, SetLockProc) {
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/status", O_RDONLY, 0000));
- TestLock(fd.get());
-}
-
-TEST_F(FcntlLockTest, SetLockPipe) {
- SKIP_IF(IsRunningWithVFS1());
-
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- TestLock(fds[0]);
- TestLockBadFD(fds[0], F_WRLCK);
-
- TestLock(fds[1], F_WRLCK);
- TestLockBadFD(fds[1]);
-
- EXPECT_THAT(close(fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(fds[1]), SyscallSucceeds());
-}
-
-TEST_F(FcntlLockTest, SetLockSocket) {
- SKIP_IF(IsRunningWithVFS1());
-
- int sock = socket(AF_UNIX, SOCK_STREAM, 0);
- ASSERT_THAT(sock, SyscallSucceeds());
-
- struct sockaddr_un addr =
- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(true /* abstract */, AF_UNIX));
- ASSERT_THAT(
- bind(sock, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
-
- TestLock(sock);
- EXPECT_THAT(close(sock), SyscallSucceeds());
-}
-
-TEST_F(FcntlLockTest, SetLockBadOpenFlagsWrite) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY, 0666));
-
- struct flock fl0;
- fl0.l_type = F_WRLCK;
- fl0.l_whence = SEEK_SET;
- fl0.l_start = 0;
- fl0.l_len = 0; // Lock all file
-
- // Expect that setting a write lock using a read only file descriptor
- // won't work.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(FcntlLockTest, SetLockBadOpenFlagsRead) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY, 0666));
-
- struct flock fl1;
- fl1.l_type = F_RDLCK;
- fl1.l_whence = SEEK_SET;
- fl1.l_start = 0;
- // Same as SetLockBadFd.
- fl1.l_len = 0;
-
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(FcntlLockTest, SetLockWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- struct flock fl0;
- fl0.l_type = F_WRLCK;
- fl0.l_whence = SEEK_SET;
- fl0.l_start = 0;
- fl0.l_len = 0; // Lock all file
-
- // Expect that setting a write lock using a Opath file descriptor
- // won't work.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(FcntlLockTest, SetLockUnlockOnNothing) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_UNLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-}
-
-TEST_F(FcntlLockTest, SetWriteLockSingleProc) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd0 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- EXPECT_THAT(fcntl(fd0.get(), F_SETLK, &fl), SyscallSucceeds());
- // Expect to be able to take the same lock on the same fd no problem.
- EXPECT_THAT(fcntl(fd0.get(), F_SETLK, &fl), SyscallSucceeds());
-
- FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- // Expect to be able to take the same lock from a different fd but for
- // the same process.
- EXPECT_THAT(fcntl(fd1.get(), F_SETLK, &fl), SyscallSucceeds());
-}
-
-TEST_F(FcntlLockTest, SetReadLockMultiProc) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // spawn a child process to take a read lock on the same file.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), false /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetReadThenWriteLockMultiProc) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Assert that another process trying to lock on the same file will fail
- // with EAGAIN. It's important that we keep the fd above open so that
- // that the other process will contend with the lock.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-
- // Close the fd: we want to test that another process can acquire the
- // lock after this point.
- fd.reset();
- // Assert that another process can now acquire the lock.
-
- child_pid = 0;
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetWriteThenReadLockMultiProc) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
- // Same as SetReadThenWriteLockMultiProc.
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- // Same as SetReadThenWriteLockMultiProc.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Same as SetReadThenWriteLockMultiProc.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), false /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-
- // Same as SetReadThenWriteLockMultiProc.
- fd.reset(); // Close the fd.
-
- // Same as SetReadThenWriteLockMultiProc.
- child_pid = 0;
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), false /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetWriteLockMultiProc) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
- // Same as SetReadThenWriteLockMultiProc.
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- // Same as SetReadWriteLockMultiProc.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Same as SetReadWriteLockMultiProc.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-
- fd.reset(); // Close the FD.
- // Same as SetReadWriteLockMultiProc.
- child_pid = 0;
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetLockIsRegional) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 4096;
-
- // Same as SetReadWriteLockMultiProc.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Same as SetReadWriteLockMultiProc.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_len, 0, &child_pid));
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetLockUpgradeDowngrade) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- // Same as SetReadWriteLockMultiProc.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Upgrade to a write lock. This will prevent anyone else from taking
- // the lock.
- fl.l_type = F_WRLCK;
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Same as SetReadWriteLockMultiProc.,
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), false /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-
- // Downgrade back to a read lock.
- fl.l_type = F_RDLCK;
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Do the same stint as before, but this time it should succeed.
- child_pid = 0;
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), false /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetLockDroppedOnClose) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- // While somewhat surprising, obtaining another fd to the same file and
- // then closing it in this process drops *all* locks.
- FileDescriptor other_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
- // Same as SetReadThenWriteLockMultiProc.
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- // Same as SetReadWriteLockMultiProc.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- other_fd.reset(); // Close.
-
- // Expect to be able to get the lock, given that the close above dropped it.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(file.path(), true /* write lock */,
- false /* nonblocking */, false /* no eintr retry */,
- -1 /* no socket fd */, fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetLockUnlock) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- // Setup two regional locks with different permissions.
- struct flock fl0;
- fl0.l_type = F_WRLCK;
- fl0.l_whence = SEEK_SET;
- fl0.l_start = 0;
- fl0.l_len = 4096;
-
- struct flock fl1;
- fl1.l_type = F_RDLCK;
- fl1.l_whence = SEEK_SET;
- fl1.l_start = 4096;
- // Same as SetLockBadFd.
- fl1.l_len = 0;
-
- // Set both region locks.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl0), SyscallSucceeds());
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl1), SyscallSucceeds());
-
- // Another process should fail to take a read lock on the entire file
- // due to the regional write lock.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), false /* write lock */, false /* nonblocking */,
- false /* no eintr retry */, -1 /* no socket fd */, 0, 0, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-
- // Then only unlock the writable one. This should ensure that other
- // processes can take any read lock that it wants.
- fl0.l_type = F_UNLCK;
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl0), SyscallSucceeds());
-
- // Another process should now succeed to get a read lock on the entire file.
- child_pid = 0;
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), false /* write lock */, false /* nonblocking */,
- false /* no eintr retry */, -1 /* no socket fd */, 0, 0, &child_pid));
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST_F(FcntlLockTest, SetLockAcrossRename) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- // Setup two regional locks with different permissions.
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- // Same as SetLockBadFd.
- fl.l_len = 0;
-
- // Set the region lock.
- EXPECT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- // Rename the file to someplace nearby.
- std::string const newpath = NewTempAbsPath();
- EXPECT_THAT(rename(file.path().c_str(), newpath.c_str()), SyscallSucceeds());
-
- // Another process should fail to take a read lock on the renamed file
- // since we still have an open handle to the inode.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- SubprocessLock(newpath, false /* write lock */, false /* nonblocking */,
- false /* no eintr retry */, -1 /* no socket fd */,
- fl.l_start, fl.l_len, &child_pid));
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == EAGAIN)
- << "Exited with code: " << status;
-}
-
-// NOTE: The blocking tests below aren't perfect. It's hard to assert exactly
-// what the kernel did while handling a syscall. These tests are timing based
-// because there really isn't any other reasonable way to assert that correct
-// blocking behavior happened.
-
-// This test will verify that blocking works as expected when another process
-// holds a write lock when obtaining a write lock. This test will hold the lock
-// for some amount of time and then wait for the second process to send over the
-// socket_fd the amount of time it was blocked for before the lock succeeded.
-TEST_F(FcntlLockTest, SetWriteLockThenBlockingWriteLock) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 0;
-
- // Take the write lock.
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Attempt to take the read lock in a sub process. This will immediately block
- // so we will release our lock after some amount of time and then assert the
- // amount of time the other process was blocked for.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), true /* write lock */, true /* Blocking Lock */,
- true /* Retry on EINTR */, fds_[1] /* Socket fd for timing information */,
- fl.l_start, fl.l_len, &child_pid));
-
- // We will wait kHoldLockForSec before we release our lock allowing the
- // subprocess to obtain it.
- constexpr absl::Duration kHoldLockFor = absl::Seconds(5);
- const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1));
-
- absl::SleepFor(kHoldLockFor);
-
- // Unlock our write lock.
- fl.l_type = F_UNLCK;
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Read the blocked time from the subprocess socket.
- int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec();
-
- // We must have been waiting at least kMinBlockTime.
- EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec);
-
- // The FCNTL write lock must always succeed as it will simply block until it
- // can obtain the lock.
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-// This test will verify that blocking works as expected when another process
-// holds a read lock when obtaining a write lock. This test will hold the lock
-// for some amount of time and then wait for the second process to send over the
-// socket_fd the amount of time it was blocked for before the lock succeeded.
-TEST_F(FcntlLockTest, SetReadLockThenBlockingWriteLock) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 0;
-
- // Take the write lock.
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Attempt to take the read lock in a sub process. This will immediately block
- // so we will release our lock after some amount of time and then assert the
- // amount of time the other process was blocked for.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), true /* write lock */, true /* Blocking Lock */,
- true /* Retry on EINTR */, fds_[1] /* Socket fd for timing information */,
- fl.l_start, fl.l_len, &child_pid));
-
- // We will wait kHoldLockForSec before we release our lock allowing the
- // subprocess to obtain it.
- constexpr absl::Duration kHoldLockFor = absl::Seconds(5);
-
- const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1));
-
- absl::SleepFor(kHoldLockFor);
-
- // Unlock our READ lock.
- fl.l_type = F_UNLCK;
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Read the blocked time from the subprocess socket.
- int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec();
-
- // We must have been waiting at least kMinBlockTime.
- EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec);
-
- // The FCNTL write lock must always succeed as it will simply block until it
- // can obtain the lock.
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-// This test will veirfy that blocking works as expected when another process
-// holds a write lock when obtaining a read lock. This test will hold the lock
-// for some amount of time and then wait for the second process to send over the
-// socket_fd the amount of time it was blocked for before the lock succeeded.
-TEST_F(FcntlLockTest, SetWriteLockThenBlockingReadLock) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 0;
-
- // Take the write lock.
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Attempt to take the read lock in a sub process. This will immediately block
- // so we will release our lock after some amount of time and then assert the
- // amount of time the other process was blocked for.
- pid_t child_pid = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), false /* read lock */, true /* Blocking Lock */,
- true /* Retry on EINTR */, fds_[1] /* Socket fd for timing information */,
- fl.l_start, fl.l_len, &child_pid));
-
- // We will wait kHoldLockForSec before we release our lock allowing the
- // subprocess to obtain it.
- constexpr absl::Duration kHoldLockFor = absl::Seconds(5);
-
- const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1));
-
- absl::SleepFor(kHoldLockFor);
-
- // Unlock our write lock.
- fl.l_type = F_UNLCK;
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Read the blocked time from the subprocess socket.
- int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec();
-
- // We must have been waiting at least kMinBlockTime.
- EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec);
-
- // The FCNTL read lock must always succeed as it will simply block until it
- // can obtain the lock.
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-// This test will verify that when one process only holds a read lock that
-// another will not block while obtaining a read lock when F_SETLKW is used.
-TEST_F(FcntlLockTest, SetReadLockThenBlockingReadLock) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 0;
-
- // Take the READ lock.
- ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds());
-
- // Attempt to take the read lock in a sub process. Since multiple processes
- // can hold a read lock this should immediately return without blocking
- // even though we used F_SETLKW in the subprocess.
- pid_t child_pid = 0;
- auto sp = ASSERT_NO_ERRNO_AND_VALUE(SubprocessLock(
- file.path(), false /* read lock */, true /* Blocking Lock */,
- true /* Retry on EINTR */, -1 /* No fd, should not block */, fl.l_start,
- fl.l_len, &child_pid));
-
- // We never release the lock and the subprocess should still obtain it without
- // blocking for any period of time.
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-TEST(FcntlTest, GetO_ASYNC) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int flag_fl = -1;
- ASSERT_THAT(flag_fl = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(flag_fl & O_ASYNC, 0);
-
- int flag_fd = -1;
- ASSERT_THAT(flag_fd = fcntl(s.get(), F_GETFD), SyscallSucceeds());
- EXPECT_EQ(flag_fd & O_ASYNC, 0);
-}
-
-TEST(FcntlTest, SetFlO_ASYNC) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int before_fl = -1;
- ASSERT_THAT(before_fl = fcntl(s.get(), F_GETFL), SyscallSucceeds());
-
- int before_fd = -1;
- ASSERT_THAT(before_fd = fcntl(s.get(), F_GETFD), SyscallSucceeds());
-
- ASSERT_THAT(fcntl(s.get(), F_SETFL, before_fl | O_ASYNC), SyscallSucceeds());
-
- int after_fl = -1;
- ASSERT_THAT(after_fl = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(after_fl, before_fl | O_ASYNC);
-
- int after_fd = -1;
- ASSERT_THAT(after_fd = fcntl(s.get(), F_GETFD), SyscallSucceeds());
- EXPECT_EQ(after_fd, before_fd);
-}
-
-TEST(FcntlTest, SetFdO_ASYNC) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int before_fl = -1;
- ASSERT_THAT(before_fl = fcntl(s.get(), F_GETFL), SyscallSucceeds());
-
- int before_fd = -1;
- ASSERT_THAT(before_fd = fcntl(s.get(), F_GETFD), SyscallSucceeds());
-
- ASSERT_THAT(fcntl(s.get(), F_SETFD, before_fd | O_ASYNC), SyscallSucceeds());
-
- int after_fl = -1;
- ASSERT_THAT(after_fl = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(after_fl, before_fl);
-
- int after_fd = -1;
- ASSERT_THAT(after_fd = fcntl(s.get(), F_GETFD), SyscallSucceeds());
- EXPECT_EQ(after_fd, before_fd);
-}
-
-TEST(FcntlTest, DupAfterO_ASYNC) {
- FileDescriptor s1 = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int before = -1;
- ASSERT_THAT(before = fcntl(s1.get(), F_GETFL), SyscallSucceeds());
-
- ASSERT_THAT(fcntl(s1.get(), F_SETFL, before | O_ASYNC), SyscallSucceeds());
-
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(s1.Dup());
-
- int after = -1;
- ASSERT_THAT(after = fcntl(fd2.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(after & O_ASYNC, O_ASYNC);
-}
-
-TEST(FcntlTest, GetOwnNone) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- // Use the raw syscall because the glibc wrapper may convert F_{GET,SET}OWN
- // into F_{GET,SET}OWN_EX.
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, GetOwnExNone) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, SetOwnInvalidPid) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 12345678),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(FcntlTest, SetOwnInvalidPgrp) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -12345678),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(FcntlTest, SetOwnPid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- pid_t pid;
- EXPECT_THAT(pid = getpid(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(pid));
-}
-
-TEST(FcntlTest, SetOwnPgrp) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- pid_t pgid;
- EXPECT_THAT(pgid = getpgrp(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -pgid),
- SyscallSucceedsWithValue(0));
-
- // Verify with F_GETOWN_EX; using F_GETOWN on Linux may incorrectly treat the
- // negative return value as an error, converting the return value to -1 and
- // setting errno accordingly.
- f_owner_ex got_owner = {};
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(got_owner.type, F_OWNER_PGRP);
- EXPECT_EQ(got_owner.pid, pgid);
-}
-
-TEST(FcntlTest, SetOwnUnset) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- // Set and unset pid.
- pid_t pid;
- EXPECT_THAT(pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 0),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(0));
-
- // Set and unset pgid.
- pid_t pgid;
- EXPECT_THAT(pgid = getpgrp(), SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, -pgid),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, 0),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(0));
-}
-
-// F_SETOWN flips the sign of negative values, an operation that is guarded
-// against overflow.
-TEST(FcntlTest, SetOwnOverflow) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, INT_MIN),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FcntlTest, SetOwnExInvalidType) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = __pid_type(-1);
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(FcntlTest, SetOwnExInvalidTid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = F_OWNER_TID;
- owner.pid = -1;
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(FcntlTest, SetOwnExInvalidPid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = F_OWNER_PID;
- owner.pid = -1;
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(FcntlTest, SetOwnExInvalidPgrp) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = F_OWNER_PGRP;
- owner.pid = -1;
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(FcntlTest, SetOwnExTid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = F_OWNER_TID;
- EXPECT_THAT(owner.pid = syscall(__NR_gettid), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(owner.pid));
-}
-
-TEST(FcntlTest, SetOwnExPid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex owner = {};
- owner.type = F_OWNER_PID;
- EXPECT_THAT(owner.pid = getpid(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(owner.pid));
-}
-
-TEST(FcntlTest, SetOwnExPgrp) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex set_owner = {};
- set_owner.type = F_OWNER_PGRP;
- EXPECT_THAT(set_owner.pid = getpgrp(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &set_owner),
- SyscallSucceedsWithValue(0));
-
- // Verify with F_GETOWN_EX; using F_GETOWN on Linux may incorrectly treat the
- // negative return value as an error, converting the return value to -1 and
- // setting errno accordingly.
- f_owner_ex got_owner = {};
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(got_owner.type, set_owner.type);
- EXPECT_EQ(got_owner.pid, set_owner.pid);
-}
-
-TEST(FcntlTest, SetOwnExUnset) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- // Set and unset pid.
- f_owner_ex owner = {};
- owner.type = F_OWNER_PID;
- EXPECT_THAT(owner.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
- owner.pid = 0;
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(0));
-
- // Set and unset pgid.
- owner.type = F_OWNER_PGRP;
- EXPECT_THAT(owner.pid = getpgrp(), SyscallSucceeds());
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
- owner.pid = 0;
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &owner),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, GetOwnExTid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex set_owner = {};
- set_owner.type = F_OWNER_TID;
- EXPECT_THAT(set_owner.pid = syscall(__NR_gettid), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &set_owner),
- SyscallSucceedsWithValue(0));
-
- f_owner_ex got_owner = {};
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(got_owner.type, set_owner.type);
- EXPECT_EQ(got_owner.pid, set_owner.pid);
-}
-
-TEST(FcntlTest, GetOwnExPid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex set_owner = {};
- set_owner.type = F_OWNER_PID;
- EXPECT_THAT(set_owner.pid = getpid(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &set_owner),
- SyscallSucceedsWithValue(0));
-
- f_owner_ex got_owner = {};
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(got_owner.type, set_owner.type);
- EXPECT_EQ(got_owner.pid, set_owner.pid);
-}
-
-TEST(FcntlTest, GetOwnExPgrp) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- f_owner_ex set_owner = {};
- set_owner.type = F_OWNER_PGRP;
- EXPECT_THAT(set_owner.pid = getpgrp(), SyscallSucceeds());
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN_EX, &set_owner),
- SyscallSucceedsWithValue(0));
-
- f_owner_ex got_owner = {};
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETOWN_EX, &got_owner),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(got_owner.type, set_owner.type);
- EXPECT_EQ(got_owner.pid, set_owner.pid);
-}
-
-TEST(FcntlTest, SetSig) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGUSR1),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(SIGUSR1));
-}
-
-TEST(FcntlTest, SetSigDefaultsToZero) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- // Defaults to returning the zero value, indicating default behavior (SIGIO).
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, SetSigToDefault) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGIO),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(SIGIO));
-
- // Can be reset to the default behavior.
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, 0),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, SetSigInvalid) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGRTMAX + 1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(FcntlTest, SetSigInvalidDoesNotResetPreviousChoice) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGUSR1),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGRTMAX + 1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(syscall(__NR_fcntl, s.get(), F_GETSIG),
- SyscallSucceedsWithValue(SIGUSR1));
-}
-
-TEST_F(FcntlSignalTest, SetSigDefault) {
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));
- RegisterFD(pipe_read_fd_, 0); // Zero = default behavior
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- signals_received_.pop_front();
- EXPECT_EQ(sig.num, SIGIO);
- EXPECT_EQ(sig.info.si_signo, SIGIO);
- // siginfo contents is undefined in this case.
-}
-
-TEST_F(FcntlSignalTest, SetSigCustom) {
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- signals_received_.pop_front();
- EXPECT_EQ(sig.num, SIGUSR1);
- EXPECT_EQ(sig.info.si_signo, SIGUSR1);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigUnregisterStillGetsSigio) {
- const auto sigio_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- RegisterFD(pipe_read_fd_, 0);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- signals_received_.pop_front();
- EXPECT_EQ(sig.num, SIGIO);
- // siginfo contents is undefined in this case.
-}
-
-TEST_F(FcntlSignalTest, SetSigWithSigioStillGetsSiginfo) {
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));
- RegisterFD(pipe_read_fd_, SIGIO);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- EXPECT_EQ(sig.num, SIGIO);
- EXPECT_EQ(sig.info.si_signo, SIGIO);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupThenCloseOld) {
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- FlushAndCloseFD(pipe_read_fd_);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the **old** FD (even though it is closed).
- EXPECT_EQ(sig.num, SIGUSR1);
- EXPECT_EQ(sig.info.si_signo, SIGUSR1);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupThenCloseNew) {
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- FlushAndCloseFD(pipe_read_fd_dup_);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the old FD.
- EXPECT_EQ(sig.num, SIGUSR1);
- EXPECT_EQ(sig.info.si_signo, SIGUSR1);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupOldRegistered) {
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the old FD.
- EXPECT_EQ(sig.num, SIGUSR1);
- EXPECT_EQ(sig.info.si_signo, SIGUSR1);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupNewRegistered) {
- const auto sigusr2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR2));
- DupReadFD();
- RegisterFD(pipe_read_fd_dup_, SIGUSR2);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the new FD.
- EXPECT_EQ(sig.num, SIGUSR2);
- EXPECT_EQ(sig.info.si_signo, SIGUSR2);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_dup_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupBothRegistered) {
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- const auto sigusr2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR2));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- RegisterFD(pipe_read_fd_dup_, SIGUSR2);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the **new** signal number, but the **old** FD.
- EXPECT_EQ(sig.num, SIGUSR2);
- EXPECT_EQ(sig.info.si_signo, SIGUSR2);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupBothRegisteredAfterDup) {
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- const auto sigusr2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR2));
- DupReadFD();
- RegisterFD(pipe_read_fd_, SIGUSR1);
- RegisterFD(pipe_read_fd_dup_, SIGUSR2);
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with the **new** signal number, but the **old** FD.
- EXPECT_EQ(sig.num, SIGUSR2);
- EXPECT_EQ(sig.info.si_signo, SIGUSR2);
- EXPECT_EQ(sig.info.si_fd, pipe_read_fd_);
- EXPECT_EQ(sig.info.si_band, EPOLLIN | EPOLLRDNORM);
-}
-
-TEST_F(FcntlSignalTest, SetSigDupUnregisterOld) {
- const auto sigio_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- const auto sigusr2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR2));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- RegisterFD(pipe_read_fd_dup_, SIGUSR2);
- RegisterFD(pipe_read_fd_, 0); // Should go back to SIGIO behavior.
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with SIGIO.
- EXPECT_EQ(sig.num, SIGIO);
- // siginfo is undefined in this case.
-}
-
-TEST_F(FcntlSignalTest, SetSigDupUnregisterNew) {
- const auto sigio_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGIO));
- const auto sigusr1_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR1));
- const auto sigusr2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGUSR2));
- RegisterFD(pipe_read_fd_, SIGUSR1);
- DupReadFD();
- RegisterFD(pipe_read_fd_dup_, SIGUSR2);
- RegisterFD(pipe_read_fd_dup_, 0); // Should go back to SIGIO behavior.
- GenerateIOEvent();
- WaitForSignalDelivery(absl::Seconds(1));
- ASSERT_EQ(num_signals_received_, 1);
- SignalDelivery sig = signals_received_.front();
- // We get a signal with SIGIO.
- EXPECT_EQ(sig.num, SIGIO);
- // siginfo is undefined in this case.
-}
-
-// Make sure that making multiple concurrent changes to async signal generation
-// does not cause any race issues.
-TEST(FcntlTest, SetFlSetOwnSetSigDoNotRace) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- pid_t pid;
- EXPECT_THAT(pid = getpid(), SyscallSucceeds());
-
- constexpr absl::Duration runtime = absl::Milliseconds(300);
- auto set_async = [&s, &runtime] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETFL, O_ASYNC),
- SyscallSucceeds());
- sched_yield();
- }
- };
- auto reset_async = [&s, &runtime] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETFL, 0), SyscallSucceeds());
- sched_yield();
- }
- };
- auto set_own = [&s, &pid, &runtime] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETOWN, pid),
- SyscallSucceeds());
- sched_yield();
- }
- };
- auto set_sig = [&s, &runtime] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- ASSERT_THAT(syscall(__NR_fcntl, s.get(), F_SETSIG, SIGUSR1),
- SyscallSucceeds());
- sched_yield();
- }
- };
-
- std::list<ScopedThread> threads;
- for (int i = 0; i < 10; i++) {
- threads.emplace_back(set_async);
- threads.emplace_back(reset_async);
- threads.emplace_back(set_own);
- threads.emplace_back(set_sig);
- }
-}
-
-TEST_F(FcntlLockTest, GetLockOnNothing) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_GETLK, &fl), SyscallSucceeds());
- ASSERT_TRUE(fl.l_type == F_UNLCK);
-}
-
-TEST_F(FcntlLockTest, GetLockOnLockSameProcess) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETLK, &fl), SyscallSucceeds());
- ASSERT_TRUE(fl.l_type == F_UNLCK);
-
- fl.l_type = F_WRLCK;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
- ASSERT_THAT(fcntl(fd.get(), F_GETLK, &fl), SyscallSucceeds());
- ASSERT_TRUE(fl.l_type == F_UNLCK);
-}
-
-TEST_F(FcntlLockTest, GetReadLockOnReadLock) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- pid_t child_pid = fork();
- if (child_pid == 0) {
- TEST_CHECK(fcntl(fd.get(), F_GETLK, &fl) >= 0);
- TEST_CHECK(fl.l_type == F_UNLCK);
- _exit(0);
- }
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-TEST_F(FcntlLockTest, GetReadLockOnWriteLock) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- fl.l_type = F_RDLCK;
- pid_t child_pid = fork();
- if (child_pid == 0) {
- TEST_CHECK(fcntl(fd.get(), F_GETLK, &fl) >= 0);
- TEST_CHECK(fl.l_type == F_WRLCK);
- TEST_CHECK(fl.l_pid == getppid());
- _exit(0);
- }
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-TEST_F(FcntlLockTest, GetWriteLockOnReadLock) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_RDLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- fl.l_type = F_WRLCK;
- pid_t child_pid = fork();
- if (child_pid == 0) {
- TEST_CHECK(fcntl(fd.get(), F_GETLK, &fl) >= 0);
- TEST_CHECK(fl.l_type == F_RDLCK);
- TEST_CHECK(fl.l_pid == getppid());
- _exit(0);
- }
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-TEST_F(FcntlLockTest, GetWriteLockOnWriteLock) {
- SKIP_IF(IsRunningWithVFS1());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- pid_t child_pid = fork();
- if (child_pid == 0) {
- TEST_CHECK(fcntl(fd.get(), F_GETLK, &fl) >= 0);
- TEST_CHECK(fl.l_type == F_WRLCK);
- TEST_CHECK(fl.l_pid == getppid());
- _exit(0);
- }
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-// Tests that the pid returned from F_GETLK is relative to the caller's PID
-// namespace.
-TEST_F(FcntlLockTest, GetLockRespectsPIDNamespace) {
- SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- std::string filename = file.path();
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_RDWR, 0666));
-
- // Lock in the parent process.
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- ASSERT_THAT(fcntl(fd.get(), F_SETLK, &fl), SyscallSucceeds());
-
- auto child_getlk = [](void* filename) {
- int fd = open((char*)filename, O_RDWR, 0666);
- TEST_CHECK(fd >= 0);
-
- struct flock fl;
- fl.l_type = F_WRLCK;
- fl.l_whence = SEEK_SET;
- fl.l_start = 0;
- fl.l_len = 40;
- TEST_CHECK(fcntl(fd, F_GETLK, &fl) >= 0);
- TEST_CHECK(fl.l_type == F_WRLCK);
- // Parent PID should be 0 in the child PID namespace.
- TEST_CHECK(fl.l_pid == 0);
- close(fd);
- return 0;
- };
-
- // Set up child process in a new PID namespace.
- constexpr int kStackSize = 4096;
- Mapping stack = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kStackSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0));
- pid_t child_pid;
- ASSERT_THAT(
- child_pid = clone(child_getlk, (char*)stack.ptr() + stack.len(),
- CLONE_NEWPID | SIGCHLD, (void*)filename.c_str()),
- SyscallSucceeds());
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int set_lock() {
- const std::string set_lock_on = absl::GetFlag(FLAGS_child_set_lock_on);
- int socket_fd = absl::GetFlag(FLAGS_socket_fd);
- int fd = open(set_lock_on.c_str(), O_RDWR, 0666);
- if (fd == -1 && errno != 0) {
- int err = errno;
- std::cerr << "CHILD open " << set_lock_on << " failed: " << err
- << std::endl;
- return err;
- }
-
- struct flock fl;
- if (absl::GetFlag(FLAGS_child_set_lock_write)) {
- fl.l_type = F_WRLCK;
- } else {
- fl.l_type = F_RDLCK;
- }
- fl.l_whence = SEEK_SET;
- fl.l_start = absl::GetFlag(FLAGS_child_set_lock_start);
- fl.l_len = absl::GetFlag(FLAGS_child_set_lock_len);
-
- // Test the fcntl.
- int err = 0;
- int ret = 0;
-
- gvisor::testing::MonotonicTimer timer;
- timer.Start();
- do {
- ret = fcntl(fd, absl::GetFlag(FLAGS_blocking) ? F_SETLKW : F_SETLK, &fl);
- } while (absl::GetFlag(FLAGS_retry_eintr) && ret == -1 && errno == EINTR);
- auto usec = absl::ToInt64Microseconds(timer.Duration());
-
- if (ret == -1 && errno != 0) {
- err = errno;
- std::cerr << "CHILD lock " << set_lock_on << " failed " << err << std::endl;
- }
-
- // If there is a socket fd let's send back the time in microseconds it took
- // to execute this syscall.
- if (socket_fd != -1) {
- gvisor::testing::WriteFd(socket_fd, reinterpret_cast<void*>(&usec),
- sizeof(usec));
- close(socket_fd);
- }
-
- close(fd);
- return err;
-}
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (!absl::GetFlag(FLAGS_child_set_lock_on).empty()) {
- exit(set_lock());
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/file_base.h b/test/syscalls/linux/file_base.h
deleted file mode 100644
index fb418e052..000000000
--- a/test/syscalls/linux/file_base.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_FILE_BASE_H_
-#define GVISOR_TEST_SYSCALLS_FILE_BASE_H_
-
-#include <arpa/inet.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <netinet/in.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include <cstring>
-#include <string>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-class FileTest : public ::testing::Test {
- public:
- void SetUp() override {
- test_pipe_[0] = -1;
- test_pipe_[1] = -1;
-
- test_file_name_ = NewTempAbsPath();
- test_file_fd_ = ASSERT_NO_ERRNO_AND_VALUE(
- Open(test_file_name_, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR));
-
- ASSERT_THAT(pipe(test_pipe_), SyscallSucceeds());
- ASSERT_THAT(fcntl(test_pipe_[0], F_SETFL, O_NONBLOCK), SyscallSucceeds());
- }
-
- // CloseFile will allow the test to manually close the file descriptor.
- void CloseFile() { test_file_fd_.reset(); }
-
- // UnlinkFile will allow the test to manually unlink the file.
- void UnlinkFile() {
- if (!test_file_name_.empty()) {
- EXPECT_THAT(unlink(test_file_name_.c_str()), SyscallSucceeds());
- test_file_name_.clear();
- }
- }
-
- // ClosePipes will allow the test to manually close the pipes.
- void ClosePipes() {
- if (test_pipe_[0] > 0) {
- EXPECT_THAT(close(test_pipe_[0]), SyscallSucceeds());
- }
-
- if (test_pipe_[1] > 0) {
- EXPECT_THAT(close(test_pipe_[1]), SyscallSucceeds());
- }
-
- test_pipe_[0] = -1;
- test_pipe_[1] = -1;
- }
-
- void TearDown() override {
- CloseFile();
- UnlinkFile();
- ClosePipes();
- }
-
- protected:
- std::string test_file_name_;
- FileDescriptor test_file_fd_;
-
- int test_pipe_[2];
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_FILE_BASE_H_
diff --git a/test/syscalls/linux/flock.cc b/test/syscalls/linux/flock.cc
deleted file mode 100644
index 10dad042f..000000000
--- a/test/syscalls/linux/flock.cc
+++ /dev/null
@@ -1,714 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/file.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class FlockTest : public FileTest {};
-
-TEST_F(FlockTest, InvalidOpCombinations) {
- // The operation cannot be both exclusive and shared.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_SH | LOCK_NB),
- SyscallFailsWithErrno(EINVAL));
-
- // Locking and Unlocking doesn't make sense.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_UN | LOCK_NB),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_UN | LOCK_NB),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(FlockTest, NoOperationSpecified) {
- // Not specifying an operation is invalid.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(FlockTest, TestSimpleExLock) {
- // Test that we can obtain an exclusive lock (no other holders)
- // and that we can unlock it.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestSimpleShLock) {
- // Test that we can obtain a shared lock (no other holders)
- // and that we can unlock it.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestLockableAnyMode) {
- // flock(2): A shared or exclusive lock can be placed on a file
- // regardless of the mode in which the file was opened.
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(test_file_name_, O_RDONLY)); // open read only to test
-
- // Mode shouldn't prevent us from taking an exclusive lock.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Unlock
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestUnlockWithNoHolders) {
- // Test that unlocking when no one holds a lock succeeeds.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestRepeatedExLockingBySameHolder) {
- // Test that repeated locking by the same holder for the
- // same type of lock works correctly.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_EX),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_EX),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestRepeatedExLockingSingleUnlock) {
- // Test that repeated locking by the same holder for the
- // same type of lock works correctly and that a single unlock is required.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_EX),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_EX),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-
- // Should be unlocked at this point
- ASSERT_THAT(flock(fd.get(), LOCK_NB | LOCK_EX), SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestRepeatedShLockingBySameHolder) {
- // Test that repeated locking by the same holder for the
- // same type of lock works correctly.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_SH),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_SH),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestSingleHolderUpgrade) {
- // Test that a shared lock is upgradable when no one else holds a lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_SH),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_NB | LOCK_EX),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestSingleHolderDowngrade) {
- // Test single holder lock downgrade case.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestMultipleShared) {
- // This is a simple test to verify that multiple independent shared
- // locks will be granted.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // A shared lock should be granted as there only exists other shared locks.
- ASSERT_THAT(flock(fd.get(), LOCK_SH | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Unlock both.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-/*
- * flock(2): If a process uses open(2) (or similar) to obtain more than one
- * descriptor for the same file, these descriptors are treated
- * independently by flock(). An attempt to lock the file using one of
- * these file descriptors may be denied by a lock that the calling process
- * has already placed via another descriptor.
- */
-TEST_F(FlockTest, TestMultipleHolderSharedExclusive) {
- // This test will verify that an exclusive lock will not be granted
- // while a shared is held.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Verify We're unable to get an exlcusive lock via the second FD.
- // because someone is holding a shared lock.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Unlock
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestSharedLockFailExclusiveHolderNonblocking) {
- // This test will verify that a shared lock is denied while
- // someone holds an exclusive lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Verify we're unable to get an shared lock via the second FD.
- // because someone is holding an exclusive lock.
- ASSERT_THAT(flock(fd.get(), LOCK_SH | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Unlock
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-void trivial_handler(int signum) {}
-
-TEST_F(FlockTest, TestSharedLockFailExclusiveHolderBlocking) {
- const DisableSave ds; // Timing-related.
-
- // This test will verify that a shared lock is denied while
- // someone holds an exclusive lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Make sure that a blocking flock() call will return EINTR when interrupted
- // by a signal. Create a timer that will go off while blocking on flock(), and
- // register the corresponding signal handler.
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(
- TimerCreate(CLOCK_MONOTONIC, sigevent_t{
- .sigev_signo = SIGALRM,
- .sigev_notify = SIGEV_SIGNAL,
- }));
-
- struct sigaction act = {};
- act.sa_handler = trivial_handler;
- ASSERT_THAT(sigaction(SIGALRM, &act, NULL), SyscallSucceeds());
-
- // Now that the signal handler is registered, set the timer. Set an interval
- // so that it's ok if the timer goes off before we call flock.
- ASSERT_NO_ERRNO(
- timer.Set(0, itimerspec{
- .it_interval = absl::ToTimespec(absl::Milliseconds(10)),
- .it_value = absl::ToTimespec(absl::Milliseconds(10)),
- }));
-
- ASSERT_THAT(flock(fd.get(), LOCK_SH), SyscallFailsWithErrno(EINTR));
- timer.reset();
-
- // Unlock
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolderNonblocking) {
- // This test will verify that an exclusive lock is denied while
- // someone already holds an exclsuive lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Verify we're unable to get an exclusive lock via the second FD
- // because someone is already holding an exclusive lock.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Unlock
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestExclusiveLockFailExclusiveHolderBlocking) {
- const DisableSave ds; // Timing-related.
-
- // This test will verify that an exclusive lock is denied while
- // someone already holds an exclsuive lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Make sure that a blocking flock() call will return EINTR when interrupted
- // by a signal. Create a timer that will go off while blocking on flock(), and
- // register the corresponding signal handler.
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(
- TimerCreate(CLOCK_MONOTONIC, sigevent_t{
- .sigev_signo = SIGALRM,
- .sigev_notify = SIGEV_SIGNAL,
- }));
-
- struct sigaction act = {};
- act.sa_handler = trivial_handler;
- ASSERT_THAT(sigaction(SIGALRM, &act, NULL), SyscallSucceeds());
-
- // Now that the signal handler is registered, set the timer. Set an interval
- // so that it's ok if the timer goes off before we call flock.
- ASSERT_NO_ERRNO(
- timer.Set(0, itimerspec{
- .it_interval = absl::ToTimespec(absl::Milliseconds(10)),
- .it_value = absl::ToTimespec(absl::Milliseconds(10)),
- }));
-
- ASSERT_THAT(flock(fd.get(), LOCK_EX), SyscallFailsWithErrno(EINTR));
- timer.reset();
-
- // Unlock
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestMultipleHolderSharedExclusiveUpgrade) {
- // This test will verify that we cannot obtain an exclusive lock while
- // a shared lock is held by another descriptor, then verify that an upgrade
- // is possible on a shared lock once all other shared locks have closed.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Verify we're unable to get an exclusive lock via the second FD because
- // a shared lock is held.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Verify that we can get a shared lock via the second descriptor instead
- ASSERT_THAT(flock(fd.get(), LOCK_SH | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Unlock the first and there will only be one shared lock remaining.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-
- // Upgrade 2nd fd.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Finally unlock the second
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestMultipleHolderSharedExclusiveDowngrade) {
- // This test will verify that a shared lock is not obtainable while an
- // exclusive lock is held but that once the first is downgraded that
- // the second independent file descriptor can also get a shared lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Verify We're unable to get a shared lock via the second FD because
- // an exclusive lock is held.
- ASSERT_THAT(flock(fd.get(), LOCK_SH | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Verify that we can downgrade the first.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- // Now verify that we can obtain a shared lock since the first was downgraded.
- ASSERT_THAT(flock(fd.get(), LOCK_SH | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Finally unlock both.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-/*
- * flock(2): Locks created by flock() are associated with an open file table
- * entry. This means that duplicate file descriptors (created by, for example,
- * fork(2) or dup(2)) refer to the same lock, and this lock may be modified or
- * released using any of these descriptors. Furthermore, the lock is released
- * either by an explicit LOCK_UN operation on any of these duplicate descriptors
- * or when all such descriptors have been closed.
- */
-TEST_F(FlockTest, TestDupFdUpgrade) {
- // This test will verify that a shared lock is upgradeable via a dupped
- // file descriptor, if the FD wasn't dupped this would fail.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor dup_fd = ASSERT_NO_ERRNO_AND_VALUE(test_file_fd_.Dup());
-
- // Now we should be able to upgrade via the dupped fd.
- ASSERT_THAT(flock(dup_fd.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- // Validate unlock via dupped fd.
- ASSERT_THAT(flock(dup_fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestDupFdDowngrade) {
- // This test will verify that a exclusive lock is downgradable via a dupped
- // file descriptor, if the FD wasn't dupped this would fail.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor dup_fd = ASSERT_NO_ERRNO_AND_VALUE(test_file_fd_.Dup());
-
- // Now we should be able to downgrade via the dupped fd.
- ASSERT_THAT(flock(dup_fd.get(), LOCK_SH | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- // Validate unlock via dupped fd
- ASSERT_THAT(flock(dup_fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestDupFdCloseRelease) {
- // flock(2): Furthermore, the lock is released either by an explicit LOCK_UN
- // operation on any of these duplicate descriptors, or when all such
- // descriptors have been closed.
- //
- // This test will verify that a dupped fd closing will not release the
- // underlying lock until all such dupped fds have closed.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- FileDescriptor dup_fd = ASSERT_NO_ERRNO_AND_VALUE(test_file_fd_.Dup());
-
- // At this point we have ONE exclusive locked referenced by two different fds.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Validate that we cannot get a lock on a new unrelated FD.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Closing the dupped fd shouldn't affect the lock until all are closed.
- dup_fd.reset(); // Closed the duped fd.
-
- // Validate that we still cannot get a lock on a new unrelated FD.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Closing the first fd
- CloseFile(); // Will validate the syscall succeeds.
-
- // Now we should actually be able to get a lock since all fds related to
- // the first lock are closed.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Unlock.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestDupFdUnlockRelease) {
- /* flock(2): Furthermore, the lock is released either by an explicit LOCK_UN
- * operation on any of these duplicate descriptors, or when all such
- * descriptors have been closed.
- */
- // This test will verify that an explict unlock on a dupped FD will release
- // the underlying lock unlike the previous case where close on a dup was
- // not enough to release the lock.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX | LOCK_NB),
- SyscallSucceedsWithValue(0));
-
- const FileDescriptor dup_fd = ASSERT_NO_ERRNO_AND_VALUE(test_file_fd_.Dup());
-
- // At this point we have ONE exclusive locked referenced by two different fds.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Validate that we cannot get a lock on a new unrelated FD.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Explicitly unlock via the dupped descriptor.
- ASSERT_THAT(flock(dup_fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-
- // Validate that we can now get the lock since we explicitly unlocked.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceedsWithValue(0));
-
- // Unlock
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(FlockTest, TestDupFdFollowedByLock) {
- // This test will verify that taking a lock on a file descriptor that has
- // already been dupped means that the lock is shared between both. This is
- // slightly different than than duping on an already locked FD.
- FileDescriptor dup_fd = ASSERT_NO_ERRNO_AND_VALUE(test_file_fd_.Dup());
-
- // Take a lock.
- ASSERT_THAT(flock(dup_fd.get(), LOCK_EX | LOCK_NB), SyscallSucceeds());
-
- // Now dup_fd and test_file_ should both reference the same lock.
- // We shouldn't be able to obtain a lock until both are closed.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Closing the first fd
- dup_fd.reset(); // Close the duped fd.
-
- // Validate that we cannot get a lock yet because the dupped descriptor.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Closing the second fd.
- CloseFile(); // CloseFile() will validate the syscall succeeds.
-
- // Now we should be able to get the lock.
- ASSERT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceeds());
-
- // Unlock.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceedsWithValue(0));
-}
-
-// NOTE: These blocking tests are not perfect. Unfortunately it's very hard to
-// determine if a thread was actually blocked in the kernel so we're forced
-// to use timing.
-TEST_F(FlockTest, BlockingLockNoBlockingForSharedLocks) {
- // This test will verify that although LOCK_NB isn't specified
- // two different fds can obtain shared locks without blocking.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH), SyscallSucceeds());
-
- // kHoldLockTime is the amount of time we will hold the lock before releasing.
- constexpr absl::Duration kHoldLockTime = absl::Seconds(30);
-
- const DisableSave ds; // Timing-related.
-
- // We do this in another thread so we can determine if it was actually
- // blocked by timing the amount of time it took for the syscall to complete.
- ScopedThread t([&] {
- MonotonicTimer timer;
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // Only a single shared lock is held, the lock will be granted immediately.
- // This should be granted without any blocking. Don't save here to avoid
- // wild discrepencies on timing.
- timer.Start();
- ASSERT_THAT(flock(fd.get(), LOCK_SH), SyscallSucceeds());
-
- // We held the lock for 30 seconds but this thread should not have
- // blocked at all so we expect a very small duration on syscall completion.
- ASSERT_LT(timer.Duration(),
- absl::Seconds(1)); // 1000ms is much less than 30s.
-
- // We can release our second shared lock
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceeds());
- });
-
- // Sleep before unlocking.
- absl::SleepFor(kHoldLockTime);
-
- // Release the first shared lock. Don't save in this situation to avoid
- // discrepencies in timing.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceeds());
-}
-
-TEST_F(FlockTest, BlockingLockFirstSharedSecondExclusive) {
- // This test will verify that if someone holds a shared lock any attempt to
- // obtain an exclusive lock will result in blocking.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_SH), SyscallSucceeds());
-
- // kHoldLockTime is the amount of time we will hold the lock before releasing.
- constexpr absl::Duration kHoldLockTime = absl::Seconds(2);
-
- const DisableSave ds; // Timing-related.
-
- // We do this in another thread so we can determine if it was actually
- // blocked by timing the amount of time it took for the syscall to complete.
- ScopedThread t([&] {
- MonotonicTimer timer;
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // This exclusive lock should block because someone is already holding a
- // shared lock. We don't save here to avoid wild discrepencies on timing.
- timer.Start();
- ASSERT_THAT(RetryEINTR(flock)(fd.get(), LOCK_EX), SyscallSucceeds());
-
- // We should be blocked, we will expect to be blocked for more than 1.0s.
- ASSERT_GT(timer.Duration(), absl::Seconds(1));
-
- // We can release our exclusive lock.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceeds());
- });
-
- // Sleep before unlocking.
- absl::SleepFor(kHoldLockTime);
-
- // Release the shared lock allowing the thread to proceed.
- // We don't save here to avoid wild discrepencies in timing.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceeds());
-}
-
-TEST_F(FlockTest, BlockingLockFirstExclusiveSecondShared) {
- // This test will verify that if someone holds an exclusive lock any attempt
- // to obtain a shared lock will result in blocking.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX), SyscallSucceeds());
-
- // kHoldLockTime is the amount of time we will hold the lock before releasing.
- constexpr absl::Duration kHoldLockTime = absl::Seconds(2);
-
- const DisableSave ds; // Timing-related.
-
- // We do this in another thread so we can determine if it was actually
- // blocked by timing the amount of time it took for the syscall to complete.
- ScopedThread t([&] {
- MonotonicTimer timer;
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // This shared lock should block because someone is already holding an
- // exclusive lock. We don't save here to avoid wild discrepencies on timing.
- timer.Start();
- ASSERT_THAT(RetryEINTR(flock)(fd.get(), LOCK_SH), SyscallSucceeds());
-
- // We should be blocked, we will expect to be blocked for more than 1.0s.
- ASSERT_GT(timer.Duration(), absl::Seconds(1));
-
- // We can release our shared lock.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceeds());
- });
-
- // Sleep before unlocking.
- absl::SleepFor(kHoldLockTime);
-
- // Release the exclusive lock allowing the blocked thread to proceed.
- // We don't save here to avoid wild discrepencies in timing.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceeds());
-}
-
-TEST_F(FlockTest, BlockingLockFirstExclusiveSecondExclusive) {
- // This test will verify that if someone holds an exclusive lock any attempt
- // to obtain another exclusive lock will result in blocking.
- ASSERT_THAT(flock(test_file_fd_.get(), LOCK_EX), SyscallSucceeds());
-
- // kHoldLockTime is the amount of time we will hold the lock before releasing.
- constexpr absl::Duration kHoldLockTime = absl::Seconds(2);
-
- const DisableSave ds; // Timing-related.
-
- // We do this in another thread so we can determine if it was actually
- // blocked by timing the amount of time it took for the syscall to complete.
- ScopedThread t([&] {
- MonotonicTimer timer;
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- // This exclusive lock should block because someone is already holding an
- // exclusive lock.
- timer.Start();
- ASSERT_THAT(RetryEINTR(flock)(fd.get(), LOCK_EX), SyscallSucceeds());
-
- // We should be blocked, we will expect to be blocked for more than 1.0s.
- ASSERT_GT(timer.Duration(), absl::Seconds(1));
-
- // We can release our exclusive lock.
- ASSERT_THAT(flock(fd.get(), LOCK_UN), SyscallSucceeds());
- });
-
- // Sleep before unlocking.
- absl::SleepFor(kHoldLockTime);
-
- // Release the exclusive lock allowing the blocked thread to proceed.
- // We don't save to avoid wild discrepencies in timing.
- EXPECT_THAT(flock(test_file_fd_.get(), LOCK_UN), SyscallSucceeds());
-}
-
-TEST(FlockTestNoFixture, BadFD) {
- // EBADF: fd is not an open file descriptor.
- ASSERT_THAT(flock(-1, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FlockTestNoFixture, FlockDir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY, 0000));
- EXPECT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceeds());
-}
-
-TEST(FlockTestNoFixture, FlockSymlink) {
- SKIP_IF(IsRunningWithVFS1());
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto symlink = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), file.path()));
-
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(symlink.path(), O_RDONLY | O_PATH, 0000));
- EXPECT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FlockTestNoFixture, FlockProc) {
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/status", O_RDONLY, 0000));
- EXPECT_THAT(flock(fd.get(), LOCK_EX | LOCK_NB), SyscallSucceeds());
-}
-
-TEST(FlockTestNoFixture, FlockPipe) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- EXPECT_THAT(flock(fds[0], LOCK_EX | LOCK_NB), SyscallSucceeds());
- // Check that the pipe was locked above.
- EXPECT_THAT(flock(fds[1], LOCK_EX | LOCK_NB), SyscallFailsWithErrno(EAGAIN));
-
- EXPECT_THAT(flock(fds[0], LOCK_UN), SyscallSucceeds());
- EXPECT_THAT(flock(fds[1], LOCK_EX | LOCK_NB), SyscallSucceeds());
-
- EXPECT_THAT(close(fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(fds[1]), SyscallSucceeds());
-}
-
-TEST(FlockTestNoFixture, FlockSocket) {
- int sock = socket(AF_UNIX, SOCK_STREAM, 0);
- ASSERT_THAT(sock, SyscallSucceeds());
-
- struct sockaddr_un addr =
- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(true /* abstract */, AF_UNIX));
- ASSERT_THAT(
- bind(sock, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
-
- EXPECT_THAT(flock(sock, LOCK_EX | LOCK_NB), SyscallSucceeds());
- EXPECT_THAT(close(sock), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fork.cc b/test/syscalls/linux/fork.cc
deleted file mode 100644
index 853f6231a..000000000
--- a/test/syscalls/linux/fork.cc
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <atomic>
-#include <cstdlib>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::Ge;
-
-class ForkTest : public ::testing::Test {
- protected:
- // SetUp creates a populated, open file.
- void SetUp() override {
- // Make a shared mapping.
- shared_ = reinterpret_cast<char*>(mmap(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0));
- ASSERT_NE(reinterpret_cast<void*>(shared_), MAP_FAILED);
-
- // Make a private mapping.
- private_ =
- reinterpret_cast<char*>(mmap(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
- ASSERT_NE(reinterpret_cast<void*>(private_), MAP_FAILED);
-
- // Make a pipe.
- ASSERT_THAT(pipe(pipes_), SyscallSucceeds());
- }
-
- // TearDown frees associated resources.
- void TearDown() override {
- EXPECT_THAT(munmap(shared_, kPageSize), SyscallSucceeds());
- EXPECT_THAT(munmap(private_, kPageSize), SyscallSucceeds());
- EXPECT_THAT(close(pipes_[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipes_[1]), SyscallSucceeds());
- }
-
- // Fork executes a clone system call.
- pid_t Fork() {
- pid_t pid = fork();
- MaybeSave();
- TEST_PCHECK_MSG(pid >= 0, "fork failed");
- return pid;
- }
-
- // Wait waits for the given pid and returns the exit status. If the child was
- // killed by a signal or an error occurs, then 256+signal is returned.
- int Wait(pid_t pid) {
- int status;
- while (true) {
- int rval = wait4(pid, &status, 0, NULL);
- if (rval < 0) {
- return rval;
- }
- if (rval != pid) {
- continue;
- }
- if (WIFEXITED(status)) {
- return WEXITSTATUS(status);
- }
- if (WIFSIGNALED(status)) {
- return 256 + WTERMSIG(status);
- }
- }
- }
-
- // Exit exits the proccess.
- void Exit(int code) {
- _exit(code);
-
- // Should never reach here. Since the exit above failed, we really don't
- // have much in the way of options to indicate failure. So we just try to
- // log an assertion failure to the logs. The parent process will likely
- // fail anyways if exit is not working.
- TEST_CHECK_MSG(false, "_exit returned");
- }
-
- // ReadByte reads a byte from the shared pipe.
- char ReadByte() {
- char val = -1;
- TEST_PCHECK(ReadFd(pipes_[0], &val, 1) == 1);
- MaybeSave();
- return val;
- }
-
- // WriteByte writes a byte from the shared pipe.
- void WriteByte(char val) {
- TEST_PCHECK(WriteFd(pipes_[1], &val, 1) == 1);
- MaybeSave();
- }
-
- // Shared pipe.
- int pipes_[2];
-
- // Shared mapping (one page).
- char* shared_;
-
- // Private mapping (one page).
- char* private_;
-};
-
-TEST_F(ForkTest, Simple) {
- pid_t child = Fork();
- if (child == 0) {
- Exit(0);
- }
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ForkTest, ExitCode) {
- pid_t child = Fork();
- if (child == 0) {
- Exit(123);
- }
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(123));
- child = Fork();
- if (child == 0) {
- Exit(1);
- }
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(1));
-}
-
-TEST_F(ForkTest, Multi) {
- pid_t child1 = Fork();
- if (child1 == 0) {
- Exit(0);
- }
- pid_t child2 = Fork();
- if (child2 == 0) {
- Exit(1);
- }
- EXPECT_THAT(Wait(child1), SyscallSucceedsWithValue(0));
- EXPECT_THAT(Wait(child2), SyscallSucceedsWithValue(1));
-}
-
-TEST_F(ForkTest, Pipe) {
- pid_t child = Fork();
- if (child == 0) {
- WriteByte(1);
- Exit(0);
- }
- EXPECT_EQ(ReadByte(), 1);
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ForkTest, SharedMapping) {
- pid_t child = Fork();
- if (child == 0) {
- // Wait for the parent.
- ReadByte();
- if (shared_[0] == 1) {
- Exit(0);
- }
- // Failed.
- Exit(1);
- }
- // Change the mapping.
- ASSERT_EQ(shared_[0], 0);
- shared_[0] = 1;
- // Unblock the child.
- WriteByte(0);
- // Did it work?
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ForkTest, PrivateMapping) {
- pid_t child = Fork();
- if (child == 0) {
- // Wait for the parent.
- ReadByte();
- if (private_[0] == 0) {
- Exit(0);
- }
- // Failed.
- Exit(1);
- }
- // Change the mapping.
- ASSERT_EQ(private_[0], 0);
- private_[0] = 1;
- // Unblock the child.
- WriteByte(0);
- // Did it work?
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-// CPUID is x86 specific.
-#ifdef __x86_64__
-// Test that cpuid works after a fork.
-TEST_F(ForkTest, Cpuid) {
- pid_t child = Fork();
-
- // We should be able to determine the CPU vendor.
- ASSERT_NE(GetCPUVendor(), CPUVendor::kUnknownVendor);
-
- if (child == 0) {
- Exit(0);
- }
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-#endif
-
-TEST_F(ForkTest, Mmap) {
- pid_t child = Fork();
-
- if (child == 0) {
- void* addr =
- mmap(0, kPageSize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- MaybeSave();
- Exit(addr == MAP_FAILED);
- }
-
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-static volatile int alarmed = 0;
-
-void AlarmHandler(int sig, siginfo_t* info, void* context) { alarmed = 1; }
-
-TEST_F(ForkTest, Alarm) {
- // Setup an alarm handler.
- struct sigaction sa;
- sa.sa_sigaction = AlarmHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- EXPECT_THAT(sigaction(SIGALRM, &sa, nullptr), SyscallSucceeds());
-
- pid_t child = Fork();
-
- if (child == 0) {
- alarm(1);
- sleep(3);
- if (!alarmed) {
- Exit(1);
- }
- Exit(0);
- }
-
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, alarmed);
-}
-
-// Child cannot affect parent private memory. Regression test for b/24137240.
-TEST_F(ForkTest, PrivateMemory) {
- std::atomic<uint32_t> local(0);
-
- pid_t child1 = Fork();
- if (child1 == 0) {
- local++;
-
- pid_t child2 = Fork();
- if (child2 == 0) {
- local++;
-
- TEST_CHECK(local.load() == 2);
-
- Exit(0);
- }
-
- TEST_PCHECK(Wait(child2) == 0);
- TEST_CHECK(local.load() == 1);
- Exit(0);
- }
-
- EXPECT_THAT(Wait(child1), SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, local.load());
-}
-
-// Kernel-accessed buffers should remain coherent across COW.
-//
-// The buffer must be >= usermem.ZeroCopyMinBytes, as UnsafeAccess operates
-// differently. Regression test for b/33811887.
-TEST_F(ForkTest, COWSegment) {
- constexpr int kBufSize = 1024;
- char* read_buf = private_;
- char* touch = private_ + kPageSize / 2;
-
- std::string contents(kBufSize, 'a');
-
- ScopedThread t([&] {
- // Wait to be sure the parent is blocked in read.
- absl::SleepFor(absl::Seconds(3));
-
- // Fork to mark private pages for COW.
- //
- // Use fork directly rather than the Fork wrapper to skip the multi-threaded
- // check, and limit the child to async-signal-safe functions:
- //
- // "After a fork() in a multithreaded program, the child can safely call
- // only async-signal-safe functions (see signal(7)) until such time as it
- // calls execve(2)."
- //
- // Skip ASSERT in the child, as it isn't async-signal-safe.
- pid_t child = fork();
- if (child == 0) {
- // Wait to be sure parent touched memory.
- sleep(3);
- Exit(0);
- }
-
- // Check success only in the parent.
- ASSERT_THAT(child, SyscallSucceedsWithValue(Ge(0)));
-
- // Trigger COW on private page.
- *touch = 42;
-
- // Write to pipe. Parent should still be able to read this.
- EXPECT_THAT(WriteFd(pipes_[1], contents.c_str(), kBufSize),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
- });
-
- EXPECT_THAT(ReadFd(pipes_[0], read_buf, kBufSize),
- SyscallSucceedsWithValue(kBufSize));
- EXPECT_STREQ(contents.c_str(), read_buf);
-}
-
-TEST_F(ForkTest, SigAltStack) {
- std::vector<char> stack_mem(SIGSTKSZ);
- stack_t stack = {};
- stack.ss_size = SIGSTKSZ;
- stack.ss_sp = stack_mem.data();
- ASSERT_THAT(sigaltstack(&stack, nullptr), SyscallSucceeds());
-
- pid_t child = Fork();
-
- if (child == 0) {
- stack_t oss = {};
- TEST_PCHECK(sigaltstack(nullptr, &oss) == 0);
- MaybeSave();
-
- TEST_CHECK((oss.ss_flags & SS_DISABLE) == 0);
- TEST_CHECK(oss.ss_size == SIGSTKSZ);
- TEST_CHECK(oss.ss_sp == stack.ss_sp);
-
- Exit(0);
- }
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ForkTest, Affinity) {
- // Make a non-default cpumask.
- cpu_set_t parent_mask;
- EXPECT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &parent_mask),
- SyscallSucceeds());
- // Knock out the lowest bit.
- for (unsigned int n = 0; n < CPU_SETSIZE; n++) {
- if (CPU_ISSET(n, &parent_mask)) {
- CPU_CLR(n, &parent_mask);
- break;
- }
- }
- EXPECT_THAT(sched_setaffinity(/*pid=*/0, sizeof(cpu_set_t), &parent_mask),
- SyscallSucceeds());
-
- pid_t child = Fork();
- if (child == 0) {
- cpu_set_t child_mask;
-
- int ret = sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &child_mask);
- MaybeSave();
- if (ret < 0) {
- Exit(-ret);
- }
-
- TEST_CHECK(CPU_EQUAL(&child_mask, &parent_mask));
-
- Exit(0);
- }
-
- EXPECT_THAT(Wait(child), SyscallSucceedsWithValue(0));
-}
-
-TEST(CloneTest, NewUserNamespacePermitsAllOtherNamespaces) {
- // "If CLONE_NEWUSER is specified along with other CLONE_NEW* flags in a
- // single clone(2) or unshare(2) call, the user namespace is guaranteed to be
- // created first, giving the child (clone(2)) or caller (unshare(2))
- // privileges over the remaining namespaces created by the call. Thus, it is
- // possible for an unprivileged caller to specify this combination of flags."
- // - user_namespaces(7)
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- Mapping child_stack = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- int child_pid;
- // We only test with CLONE_NEWIPC, CLONE_NEWNET, and CLONE_NEWUTS since these
- // namespaces were implemented in Linux before user namespaces.
- ASSERT_THAT(
- child_pid = clone(
- +[](void*) { return 0; },
- reinterpret_cast<void*>(child_stack.addr() + kPageSize),
- CLONE_NEWUSER | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWUTS | SIGCHLD,
- /* arg = */ nullptr),
- SyscallSucceeds());
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status = " << status;
-}
-
-// Clone with CLONE_SETTLS and a non-canonical TLS address is rejected.
-TEST(CloneTest, NonCanonicalTLS) {
- constexpr uintptr_t kNonCanonical = 1ull << 48;
-
- // We need a valid address for the stack pointer. We'll never actually execute
- // on this.
- char stack;
-
- // The raw system call interface on x86-64 is:
- // long clone(unsigned long flags, void *stack,
- // int *parent_tid, int *child_tid,
- // unsigned long tls);
- //
- // While on arm64, the order of the last two arguments is reversed:
- // long clone(unsigned long flags, void *stack,
- // int *parent_tid, unsigned long tls,
- // int *child_tid);
-#if defined(__x86_64__)
- EXPECT_THAT(syscall(__NR_clone, SIGCHLD | CLONE_SETTLS, &stack, nullptr,
- nullptr, kNonCanonical),
- SyscallFailsWithErrno(EPERM));
-#elif defined(__aarch64__)
- EXPECT_THAT(syscall(__NR_clone, SIGCHLD | CLONE_SETTLS, &stack, nullptr,
- kNonCanonical, nullptr),
- SyscallFailsWithErrno(EPERM));
-#endif
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fpsig_fork.cc b/test/syscalls/linux/fpsig_fork.cc
deleted file mode 100644
index 79b0596c4..000000000
--- a/test/syscalls/linux/fpsig_fork.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This test verifies that fork(2) in a signal handler will correctly
-// restore floating point state after the signal handler returns in both
-// the child and parent.
-#include <sys/time.h>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifdef __x86_64__
-#define GET_XMM(__var, __xmm) \
- asm volatile("movq %%" #__xmm ", %0" : "=r"(__var))
-#define SET_XMM(__var, __xmm) asm volatile("movq %0, %%" #__xmm : : "r"(__var))
-#define GET_FP0(__var) GET_XMM(__var, xmm0)
-#define SET_FP0(__var) SET_XMM(__var, xmm0)
-#elif __aarch64__
-#define __stringify_1(x...) #x
-#define __stringify(x...) __stringify_1(x)
-#define GET_FPREG(var, regname) \
- asm volatile("str " __stringify(regname) ", %0" : "=m"(var))
-#define SET_FPREG(var, regname) \
- asm volatile("ldr " __stringify(regname) ", %0" : "=m"(var))
-#define GET_FP0(var) GET_FPREG(var, d0)
-#define SET_FP0(var) SET_FPREG(var, d0)
-#endif
-
-#define DEFAULT_MXCSR 0x1f80
-
-int parent, child;
-
-void sigusr1(int s, siginfo_t* siginfo, void* _uc) {
- // Fork and clobber %xmm0. The fpstate should be restored by sigreturn(2)
- // in both parent and child.
- child = fork();
- TEST_CHECK_MSG(child >= 0, "fork failed");
-
- uint64_t val = SIGUSR1;
- SET_FP0(val);
- uint64_t got;
- GET_FP0(got);
- TEST_CHECK_MSG(val == got, "Basic FP check failed in sigusr1()");
-
-#ifdef __x86_64
- uint32_t mxcsr;
- __asm__("STMXCSR %0" : "=m"(mxcsr));
- TEST_CHECK_MSG(mxcsr == DEFAULT_MXCSR, "Unexpected mxcsr");
-#endif
-}
-
-TEST(FPSigTest, Fork) {
- parent = getpid();
- pid_t parent_tid = gettid();
-
- struct sigaction sa = {};
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = sigusr1;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- // The amd64 ABI specifies that the XMM register set is caller-saved. This
- // implies that if there is any function call between SET_XMM and GET_XMM the
- // compiler might save/restore xmm0 implicitly. This defeats the entire
- // purpose of the test which is to verify that fpstate is restored by
- // sigreturn(2).
- //
- // This is the reason why 'tgkill(getpid(), gettid(), SIGUSR1)' is implemented
- // in inline assembly below.
- //
- // If the OS is broken and registers are clobbered by the child, using tgkill
- // to signal the current thread increases the likelihood that this thread will
- // be the one clobbered.
-
- uint64_t expected = 0xdeadbeeffacefeed;
- SET_FP0(expected);
-
-#ifdef __x86_64__
- asm volatile(
- "movl %[killnr], %%eax;"
- "movl %[parent], %%edi;"
- "movl %[tid], %%esi;"
- "movl %[sig], %%edx;"
- "syscall;"
- :
- : [ killnr ] "i"(__NR_tgkill), [ parent ] "rm"(parent),
- [ tid ] "rm"(parent_tid), [ sig ] "i"(SIGUSR1)
- : "rax", "rdi", "rsi", "rdx",
- // Clobbered by syscall.
- "rcx", "r11");
-#elif __aarch64__
- asm volatile(
- "mov x8, %0\n"
- "mov x0, %1\n"
- "mov x1, %2\n"
- "mov x2, %3\n"
- "svc #0\n" ::"r"(__NR_tgkill),
- "r"(parent), "r"(parent_tid), "r"(SIGUSR1));
-#endif
-
- uint64_t got;
- GET_FP0(got);
-
- if (getpid() == parent) { // Parent.
- int status;
- ASSERT_THAT(waitpid(child, &status, 0), SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- }
-
- // TEST_CHECK_MSG since this may run in the child.
- TEST_CHECK_MSG(expected == got, "Bad xmm0 value");
-
- if (getpid() != parent) { // Child.
- _exit(0);
- }
-}
-
-#ifdef __x86_64__
-TEST(FPSigTest, ForkWithZeroMxcsr) {
- parent = getpid();
- pid_t parent_tid = gettid();
-
- struct sigaction sa = {};
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = sigusr1;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- // The control bits of the MXCSR register are callee-saved (preserved across
- // calls), while the status bits are caller-saved (not preserved).
- uint32_t expected = 0, origin;
- __asm__("STMXCSR %0" : "=m"(origin));
- __asm__("LDMXCSR %0" : : "m"(expected));
-
- asm volatile(
- "movl %[killnr], %%eax;"
- "movl %[parent], %%edi;"
- "movl %[tid], %%esi;"
- "movl %[sig], %%edx;"
- "syscall;"
- :
- : [killnr] "i"(__NR_tgkill), [parent] "rm"(parent),
- [tid] "rm"(parent_tid), [sig] "i"(SIGUSR1)
- : "rax", "rdi", "rsi", "rdx",
- // Clobbered by syscall.
- "rcx", "r11");
-
- uint32_t got;
- __asm__("STMXCSR %0" : "=m"(got));
- __asm__("LDMXCSR %0" : : "m"(origin));
-
- if (getpid() == parent) { // Parent.
- int status;
- ASSERT_THAT(waitpid(child, &status, 0), SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- }
-
- // TEST_CHECK_MSG since this may run in the child.
- TEST_CHECK_MSG(expected == got, "Bad mxcsr value");
-
- if (getpid() != parent) { // Child.
- _exit(0);
- }
-}
-#endif
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fpsig_nested.cc b/test/syscalls/linux/fpsig_nested.cc
deleted file mode 100644
index 302d928d1..000000000
--- a/test/syscalls/linux/fpsig_nested.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This program verifies that application floating point state is restored
-// correctly after a signal handler returns. It also verifies that this works
-// with nested signals.
-#include <sys/time.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifdef __x86_64__
-#define GET_XMM(__var, __xmm) \
- asm volatile("movq %%" #__xmm ", %0" : "=r"(__var))
-#define SET_XMM(__var, __xmm) asm volatile("movq %0, %%" #__xmm : : "r"(__var))
-#define GET_FP0(__var) GET_XMM(__var, xmm0)
-#define SET_FP0(__var) SET_XMM(__var, xmm0)
-#elif __aarch64__
-#define __stringify_1(x...) #x
-#define __stringify(x...) __stringify_1(x)
-#define GET_FPREG(var, regname) \
- asm volatile("str " __stringify(regname) ", %0" : "=m"(var))
-#define SET_FPREG(var, regname) \
- asm volatile("ldr " __stringify(regname) ", %0" : "=m"(var))
-#define GET_FP0(var) GET_FPREG(var, d0)
-#define SET_FP0(var) SET_FPREG(var, d0)
-#endif
-
-int pid;
-int tid;
-
-volatile uint64_t entryxmm[2] = {~0UL, ~0UL};
-volatile uint64_t exitxmm[2];
-
-void sigusr2(int s, siginfo_t* siginfo, void* _uc) {
- uint64_t val = SIGUSR2;
-
- // Record the value of %xmm0 on entry and then clobber it.
- GET_FP0(entryxmm[1]);
- SET_FP0(val);
- GET_FP0(exitxmm[1]);
-}
-
-void sigusr1(int s, siginfo_t* siginfo, void* _uc) {
- uint64_t val = SIGUSR1;
-
- // Record the value of %xmm0 on entry and then clobber it.
- GET_FP0(entryxmm[0]);
- SET_FP0(val);
-
- // Send a SIGUSR2 to ourself. The signal mask is configured such that
- // the SIGUSR2 handler will run before this handler returns.
-#ifdef __x86_64__
- asm volatile(
- "movl %[killnr], %%eax;"
- "movl %[pid], %%edi;"
- "movl %[tid], %%esi;"
- "movl %[sig], %%edx;"
- "syscall;"
- :
- : [ killnr ] "i"(__NR_tgkill), [ pid ] "rm"(pid), [ tid ] "rm"(tid),
- [ sig ] "i"(SIGUSR2)
- : "rax", "rdi", "rsi", "rdx",
- // Clobbered by syscall.
- "rcx", "r11");
-#elif __aarch64__
- asm volatile(
- "mov x8, %0\n"
- "mov x0, %1\n"
- "mov x1, %2\n"
- "mov x2, %3\n"
- "svc #0\n" ::"r"(__NR_tgkill),
- "r"(pid), "r"(tid), "r"(SIGUSR2));
-#endif
-
- // Record value of %xmm0 again to verify that the nested signal handler
- // does not clobber it.
- GET_FP0(exitxmm[0]);
-}
-
-TEST(FPSigTest, NestedSignals) {
- pid = getpid();
- tid = gettid();
-
- struct sigaction sa = {};
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = sigusr1;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- sa.sa_sigaction = sigusr2;
- ASSERT_THAT(sigaction(SIGUSR2, &sa, nullptr), SyscallSucceeds());
-
- // The amd64 ABI specifies that the XMM register set is caller-saved. This
- // implies that if there is any function call between SET_XMM and GET_XMM the
- // compiler might save/restore xmm0 implicitly. This defeats the entire
- // purpose of the test which is to verify that fpstate is restored by
- // sigreturn(2).
- //
- // This is the reason why 'tgkill(getpid(), gettid(), SIGUSR1)' is implemented
- // in inline assembly below.
- //
- // If the OS is broken and registers are clobbered by the signal, using tgkill
- // to signal the current thread ensures that this is the clobbered thread.
-
- uint64_t expected = 0xdeadbeeffacefeed;
- SET_FP0(expected);
-
-#ifdef __x86_64__
- asm volatile(
- "movl %[killnr], %%eax;"
- "movl %[pid], %%edi;"
- "movl %[tid], %%esi;"
- "movl %[sig], %%edx;"
- "syscall;"
- :
- : [ killnr ] "i"(__NR_tgkill), [ pid ] "rm"(pid), [ tid ] "rm"(tid),
- [ sig ] "i"(SIGUSR1)
- : "rax", "rdi", "rsi", "rdx",
- // Clobbered by syscall.
- "rcx", "r11");
-#elif __aarch64__
- asm volatile(
- "mov x8, %0\n"
- "mov x0, %1\n"
- "mov x1, %2\n"
- "mov x2, %3\n"
- "svc #0\n" ::"r"(__NR_tgkill),
- "r"(pid), "r"(tid), "r"(SIGUSR1));
-#endif
-
- uint64_t got;
- GET_FP0(got);
-
- //
- // The checks below verifies the following:
- // - signal handlers must called with a clean fpu state.
- // - sigreturn(2) must restore fpstate of the interrupted context.
- //
- EXPECT_EQ(expected, got);
- EXPECT_EQ(entryxmm[0], 0);
- EXPECT_EQ(entryxmm[1], 0);
- EXPECT_EQ(exitxmm[0], SIGUSR1);
- EXPECT_EQ(exitxmm[1], SIGUSR2);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/fsync.cc b/test/syscalls/linux/fsync.cc
deleted file mode 100644
index e7e057f06..000000000
--- a/test/syscalls/linux/fsync.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(FsyncTest, TempFileSucceeds) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
- const std::string data = "some data to sync";
- EXPECT_THAT(write(fd.get(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_THAT(fsync(fd.get()), SyscallSucceeds());
-}
-
-TEST(FsyncTest, TempDirSucceeds) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY));
- EXPECT_THAT(fsync(fd.get()), SyscallSucceeds());
-}
-
-TEST(FsyncTest, CannotFsyncOnUnopenedFd) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- int fd;
- ASSERT_THAT(fd = open(file.path().c_str(), O_RDONLY), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- // fd is now invalid.
- EXPECT_THAT(fsync(fd), SyscallFailsWithErrno(EBADF));
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/futex.cc b/test/syscalls/linux/futex.cc
deleted file mode 100644
index 859f92b75..000000000
--- a/test/syscalls/linux/futex.cc
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <linux/futex.h>
-#include <linux/types.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <atomic>
-#include <memory>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/memory_util.h"
-#include "test/util/save_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/time_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Amount of time we wait for threads doing futex_wait to start running before
-// doing futex_wake.
-constexpr auto kWaiterStartupDelay = absl::Seconds(3);
-
-// Default timeout for waiters in tests where we expect a futex_wake to be
-// ineffective.
-constexpr auto kIneffectiveWakeTimeout = absl::Seconds(6);
-
-static_assert(kWaiterStartupDelay < kIneffectiveWakeTimeout,
- "futex_wait will time out before futex_wake is called");
-
-int futex_wait(bool priv, std::atomic<int>* uaddr, int val,
- absl::Duration timeout = absl::InfiniteDuration()) {
- int op = FUTEX_WAIT;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
-
- if (timeout == absl::InfiniteDuration()) {
- return RetryEINTR(syscall)(SYS_futex, uaddr, op, val, nullptr);
- }
-
- // FUTEX_WAIT doesn't adjust the timeout if it returns EINTR, so we have to do
- // so.
- while (true) {
- auto const timeout_ts = absl::ToTimespec(timeout);
- MonotonicTimer timer;
- timer.Start();
- int const ret = syscall(SYS_futex, uaddr, op, val, &timeout_ts);
- if (ret != -1 || errno != EINTR) {
- return ret;
- }
- timeout = std::max(timeout - timer.Duration(), absl::ZeroDuration());
- }
-}
-
-int futex_wait_bitset(bool priv, std::atomic<int>* uaddr, int val, int bitset,
- absl::Time deadline = absl::InfiniteFuture()) {
- int op = FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
-
- auto const deadline_ts = absl::ToTimespec(deadline);
- return RetryEINTR(syscall)(
- SYS_futex, uaddr, op, val,
- deadline == absl::InfiniteFuture() ? nullptr : &deadline_ts, nullptr,
- bitset);
-}
-
-int futex_wake(bool priv, std::atomic<int>* uaddr, int count) {
- int op = FUTEX_WAKE;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- return syscall(SYS_futex, uaddr, op, count);
-}
-
-int futex_wake_bitset(bool priv, std::atomic<int>* uaddr, int count,
- int bitset) {
- int op = FUTEX_WAKE_BITSET;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- return syscall(SYS_futex, uaddr, op, count, nullptr, nullptr, bitset);
-}
-
-int futex_wake_op(bool priv, std::atomic<int>* uaddr1, std::atomic<int>* uaddr2,
- int nwake1, int nwake2, uint32_t sub_op) {
- int op = FUTEX_WAKE_OP;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- return syscall(SYS_futex, uaddr1, op, nwake1, nwake2, uaddr2, sub_op);
-}
-
-int futex_lock_pi(bool priv, std::atomic<int>* uaddr) {
- int op = FUTEX_LOCK_PI;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- int zero = 0;
- if (uaddr->compare_exchange_strong(zero, gettid())) {
- return 0;
- }
- return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr);
-}
-
-int futex_trylock_pi(bool priv, std::atomic<int>* uaddr) {
- int op = FUTEX_TRYLOCK_PI;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- int zero = 0;
- if (uaddr->compare_exchange_strong(zero, gettid())) {
- return 0;
- }
- return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr);
-}
-
-int futex_unlock_pi(bool priv, std::atomic<int>* uaddr) {
- int op = FUTEX_UNLOCK_PI;
- if (priv) {
- op |= FUTEX_PRIVATE_FLAG;
- }
- int tid = gettid();
- if (uaddr->compare_exchange_strong(tid, 0)) {
- return 0;
- }
- return RetryEINTR(syscall)(SYS_futex, uaddr, op, nullptr, nullptr);
-}
-
-// Fixture for futex tests parameterized by whether to use private or shared
-// futexes.
-class PrivateAndSharedFutexTest : public ::testing::TestWithParam<bool> {
- protected:
- bool IsPrivate() const { return GetParam(); }
- int PrivateFlag() const { return IsPrivate() ? FUTEX_PRIVATE_FLAG : 0; }
-};
-
-// FUTEX_WAIT with 0 timeout does not block.
-TEST_P(PrivateAndSharedFutexTest, Wait_ZeroTimeout) {
- struct timespec timeout = {};
-
- // Don't use the futex_wait helper because it adjusts timeout.
- int a = 1;
- EXPECT_THAT(syscall(SYS_futex, &a, FUTEX_WAIT | PrivateFlag(), a, &timeout),
- SyscallFailsWithErrno(ETIMEDOUT));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wait_Timeout) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
-
- MonotonicTimer timer;
- timer.Start();
- constexpr absl::Duration kTimeout = absl::Seconds(1);
- EXPECT_THAT(futex_wait(IsPrivate(), &a, a, kTimeout),
- SyscallFailsWithErrno(ETIMEDOUT));
- EXPECT_GE(timer.Duration(), kTimeout);
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wait_BitsetTimeout) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
-
- MonotonicTimer timer;
- timer.Start();
- constexpr absl::Duration kTimeout = absl::Seconds(1);
- EXPECT_THAT(
- futex_wait_bitset(IsPrivate(), &a, a, 0xffffffff, absl::Now() + kTimeout),
- SyscallFailsWithErrno(ETIMEDOUT));
- EXPECT_GE(timer.Duration(), kTimeout);
-}
-
-TEST_P(PrivateAndSharedFutexTest, WaitBitset_NegativeTimeout) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
-
- MonotonicTimer timer;
- timer.Start();
- EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, a, 0xffffffff,
- absl::Now() - absl::Seconds(1)),
- SyscallFailsWithErrno(ETIMEDOUT));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wait_WrongVal) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
- EXPECT_THAT(futex_wait(IsPrivate(), &a, a + 1),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wait_ZeroBitset) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
- EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, a, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wake1) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- // Prevent save/restore from interrupting futex_wait, which will cause it to
- // return EAGAIN instead of the expected result if futex_wait is restarted
- // after we change the value of a below.
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue),
- SyscallSucceedsWithValue(0));
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- // Change a so that if futex_wake happens before futex_wait, the latter
- // returns EAGAIN instead of hanging the test.
- a.fetch_add(1);
- EXPECT_THAT(futex_wake(IsPrivate(), &a, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wake0) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- // Prevent save/restore from interrupting futex_wait, which will cause it to
- // return EAGAIN instead of the expected result if futex_wait is restarted
- // after we change the value of a below.
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue),
- SyscallSucceedsWithValue(0));
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- // Change a so that if futex_wake happens before futex_wait, the latter
- // returns EAGAIN instead of hanging the test.
- a.fetch_add(1);
- // The Linux kernel wakes one waiter even if val is 0 or negative.
- EXPECT_THAT(futex_wake(IsPrivate(), &a, 0), SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeAll) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- constexpr int kThreads = 5;
- std::vector<std::unique_ptr<ScopedThread>> threads;
- threads.reserve(kThreads);
- for (int i = 0; i < kThreads; i++) {
- threads.push_back(absl::make_unique<ScopedThread>([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue),
- SyscallSucceeds());
- }));
- }
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake(IsPrivate(), &a, kThreads),
- SyscallSucceedsWithValue(kThreads));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeSome) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- constexpr int kThreads = 5;
- constexpr int kWokenThreads = 3;
- static_assert(kWokenThreads < kThreads,
- "can't wake more threads than are created");
- std::vector<std::unique_ptr<ScopedThread>> threads;
- threads.reserve(kThreads);
- std::vector<int> rets;
- rets.reserve(kThreads);
- std::vector<int> errs;
- errs.reserve(kThreads);
- for (int i = 0; i < kThreads; i++) {
- rets.push_back(-1);
- errs.push_back(0);
- }
- for (int i = 0; i < kThreads; i++) {
- threads.push_back(absl::make_unique<ScopedThread>([&, i] {
- rets[i] =
- futex_wait(IsPrivate(), &a, kInitialValue, kIneffectiveWakeTimeout);
- errs[i] = errno;
- }));
- }
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake(IsPrivate(), &a, kWokenThreads),
- SyscallSucceedsWithValue(kWokenThreads));
-
- int woken = 0;
- int timedout = 0;
- for (int i = 0; i < kThreads; i++) {
- threads[i]->Join();
- if (rets[i] == 0) {
- woken++;
- } else if (errs[i] == ETIMEDOUT) {
- timedout++;
- } else {
- ADD_FAILURE() << " thread " << i << ": returned " << rets[i] << ", errno "
- << errs[i];
- }
- }
- EXPECT_EQ(woken, kWokenThreads);
- EXPECT_EQ(timedout, kThreads - kWokenThreads);
-}
-
-TEST_P(PrivateAndSharedFutexTest, WaitBitset_Wake) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, 0b01001000),
- SyscallSucceeds());
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake(IsPrivate(), &a, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, Wait_WakeBitset) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds());
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, 0b01001000),
- SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WaitBitset_WakeBitsetMatch) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- constexpr int kBitset = 0b01001000;
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, kBitset),
- SyscallSucceeds());
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, kBitset),
- SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WaitBitset_WakeBitsetNoMatch) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- constexpr int kWaitBitset = 0b01000001;
- constexpr int kWakeBitset = 0b00101000;
- static_assert((kWaitBitset & kWakeBitset) == 0,
- "futex_wake_bitset will wake waiter");
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait_bitset(IsPrivate(), &a, kInitialValue, kWaitBitset,
- absl::Now() + kIneffectiveWakeTimeout),
- SyscallFailsWithErrno(ETIMEDOUT));
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- EXPECT_THAT(futex_wake_bitset(IsPrivate(), &a, 1, kWakeBitset),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeOpCondSuccess) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
- std::atomic<int> b = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread_a([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds());
- });
- ScopedThread thread_b([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &b, kInitialValue), SyscallSucceeds());
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- b.fetch_add(1);
- // This futex_wake_op should:
- // - Wake 1 waiter on a unconditionally.
- // - Wake 1 waiter on b if b == kInitialValue + 1, which it is.
- // - Do "b += 1".
- EXPECT_THAT(futex_wake_op(IsPrivate(), &a, &b, 1, 1,
- FUTEX_OP(FUTEX_OP_ADD, 1, FUTEX_OP_CMP_EQ,
- (kInitialValue + 1))),
- SyscallSucceedsWithValue(2));
- EXPECT_EQ(b, kInitialValue + 2);
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeOpCondFailure) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
- std::atomic<int> b = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread_a([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), &a, kInitialValue), SyscallSucceeds());
- });
- ScopedThread thread_b([&] {
- EXPECT_THAT(
- futex_wait(IsPrivate(), &b, kInitialValue, kIneffectiveWakeTimeout),
- SyscallFailsWithErrno(ETIMEDOUT));
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- b.fetch_add(1);
- // This futex_wake_op should:
- // - Wake 1 waiter on a unconditionally.
- // - Wake 1 waiter on b if b == kInitialValue - 1, which it isn't.
- // - Do "b += 1".
- EXPECT_THAT(futex_wake_op(IsPrivate(), &a, &b, 1, 1,
- FUTEX_OP(FUTEX_OP_ADD, 1, FUTEX_OP_CMP_EQ,
- (kInitialValue - 1))),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(b, kInitialValue + 2);
-}
-
-TEST_P(PrivateAndSharedFutexTest, NoWakeInterprocessPrivateAnon) {
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr());
- constexpr int kInitialValue = 1;
- ptr->store(kInitialValue);
-
- DisableSave ds;
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(futex_wait(IsPrivate(), ptr, kInitialValue,
- kIneffectiveWakeTimeout) == -1 &&
- errno == ETIMEDOUT);
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
- absl::SleepFor(kWaiterStartupDelay);
-
- EXPECT_THAT(futex_wake(IsPrivate(), ptr, 1), SyscallSucceedsWithValue(0));
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeAfterCOWBreak) {
- // Use a futex on a non-stack mapping so we can be sure that the child process
- // below isn't the one that breaks copy-on-write.
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr());
- constexpr int kInitialValue = 1;
- ptr->store(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(futex_wait(IsPrivate(), ptr, kInitialValue), SyscallSucceeds());
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // Wait to be killed by the parent.
- while (true) pause();
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
- auto cleanup_child = Cleanup([&] {
- EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
- });
-
- // In addition to preventing a late futex_wait from sleeping, this breaks
- // copy-on-write on the mapped page.
- ptr->fetch_add(1);
- EXPECT_THAT(futex_wake(IsPrivate(), ptr, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST_P(PrivateAndSharedFutexTest, WakeWrongKind) {
- constexpr int kInitialValue = 1;
- std::atomic<int> a = ATOMIC_VAR_INIT(kInitialValue);
-
- DisableSave ds;
- ScopedThread thread([&] {
- EXPECT_THAT(
- futex_wait(IsPrivate(), &a, kInitialValue, kIneffectiveWakeTimeout),
- SyscallFailsWithErrno(ETIMEDOUT));
- });
- absl::SleepFor(kWaiterStartupDelay);
-
- a.fetch_add(1);
- // The value of priv passed to futex_wake is the opposite of that passed to
- // the futex_waiter; we expect this not to wake the waiter.
- EXPECT_THAT(futex_wake(!IsPrivate(), &a, 1), SyscallSucceedsWithValue(0));
-}
-
-INSTANTIATE_TEST_SUITE_P(SharedPrivate, PrivateAndSharedFutexTest,
- ::testing::Bool());
-
-// Passing null as the address only works for private futexes.
-
-TEST(PrivateFutexTest, WakeOp0Set) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
-
- int futex_op = FUTEX_OP(FUTEX_OP_SET, 2, 0, 0);
- EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(a, 2);
-}
-
-TEST(PrivateFutexTest, WakeOp0Add) {
- std::atomic<int> a = ATOMIC_VAR_INIT(1);
- int futex_op = FUTEX_OP(FUTEX_OP_ADD, 1, 0, 0);
- EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(a, 2);
-}
-
-TEST(PrivateFutexTest, WakeOp0Or) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0b01);
- int futex_op = FUTEX_OP(FUTEX_OP_OR, 0b10, 0, 0);
- EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(a, 0b11);
-}
-
-TEST(PrivateFutexTest, WakeOp0Andn) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0b11);
- int futex_op = FUTEX_OP(FUTEX_OP_ANDN, 0b10, 0, 0);
- EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(a, 0b01);
-}
-
-TEST(PrivateFutexTest, WakeOp0Xor) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0b1010);
- int futex_op = FUTEX_OP(FUTEX_OP_XOR, 0b1100, 0, 0);
- EXPECT_THAT(futex_wake_op(true, nullptr, &a, 0, 0, futex_op),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(a, 0b0110);
-}
-
-TEST(SharedFutexTest, WakeInterprocessSharedAnon) {
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED));
- auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr());
- constexpr int kInitialValue = 1;
- ptr->store(kInitialValue);
-
- DisableSave ds;
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(futex_wait(false, ptr, kInitialValue) == 0);
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
- auto kill_child = Cleanup(
- [&] { EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds()); });
- absl::SleepFor(kWaiterStartupDelay);
-
- ptr->fetch_add(1);
- // This is an ASSERT so that if it fails, we immediately abort the test (and
- // kill the subprocess).
- ASSERT_THAT(futex_wake(false, ptr, 1), SyscallSucceedsWithValue(1));
-
- kill_child.Release();
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(SharedFutexTest, WakeInterprocessFile) {
- auto const file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_THAT(truncate(file.path().c_str(), kPageSize), SyscallSucceeds());
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
- auto const ptr = static_cast<std::atomic<int>*>(mapping.ptr());
- constexpr int kInitialValue = 1;
- ptr->store(kInitialValue);
-
- DisableSave ds;
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(futex_wait(false, ptr, kInitialValue) == 0);
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
- auto kill_child = Cleanup(
- [&] { EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds()); });
- absl::SleepFor(kWaiterStartupDelay);
-
- ptr->fetch_add(1);
- // This is an ASSERT so that if it fails, we immediately abort the test (and
- // kill the subprocess).
- ASSERT_THAT(futex_wake(false, ptr, 1), SyscallSucceedsWithValue(1));
-
- kill_child.Release();
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST_P(PrivateAndSharedFutexTest, PIBasic) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0);
-
- ASSERT_THAT(futex_lock_pi(IsPrivate(), &a), SyscallSucceeds());
- EXPECT_EQ(a.load(), gettid());
- EXPECT_THAT(futex_lock_pi(IsPrivate(), &a), SyscallFailsWithErrno(EDEADLK));
-
- ASSERT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallSucceeds());
- EXPECT_EQ(a.load(), 0);
- EXPECT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallFailsWithErrno(EPERM));
-}
-
-TEST_P(PrivateAndSharedFutexTest, PIConcurrency) {
- DisableSave ds; // Too many syscalls.
-
- std::atomic<int> a = ATOMIC_VAR_INIT(0);
- const bool is_priv = IsPrivate();
-
- std::unique_ptr<ScopedThread> threads[100];
- for (size_t i = 0; i < ABSL_ARRAYSIZE(threads); ++i) {
- threads[i] = absl::make_unique<ScopedThread>([is_priv, &a] {
- for (size_t j = 0; j < 10; ++j) {
- ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds());
- EXPECT_EQ(a.load() & FUTEX_TID_MASK, gettid());
- SleepSafe(absl::Milliseconds(5));
- ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds());
- }
- });
- }
-}
-
-TEST_P(PrivateAndSharedFutexTest, PIWaiters) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0);
- const bool is_priv = IsPrivate();
-
- ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds());
- EXPECT_EQ(a.load(), gettid());
-
- ScopedThread th([is_priv, &a] {
- ASSERT_THAT(futex_lock_pi(is_priv, &a), SyscallSucceeds());
- ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds());
- });
-
- // Wait until the thread blocks on the futex, setting the waiters bit.
- auto start = absl::Now();
- while (a.load() != (FUTEX_WAITERS | gettid())) {
- ASSERT_LT(absl::Now() - start, absl::Seconds(5));
- absl::SleepFor(absl::Milliseconds(100));
- }
- ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds());
-}
-
-TEST_P(PrivateAndSharedFutexTest, PITryLock) {
- std::atomic<int> a = ATOMIC_VAR_INIT(0);
- const bool is_priv = IsPrivate();
-
- ASSERT_THAT(futex_trylock_pi(IsPrivate(), &a), SyscallSucceeds());
- EXPECT_EQ(a.load(), gettid());
-
- EXPECT_THAT(futex_trylock_pi(is_priv, &a), SyscallFailsWithErrno(EDEADLK));
- ScopedThread th([is_priv, &a] {
- EXPECT_THAT(futex_trylock_pi(is_priv, &a), SyscallFailsWithErrno(EAGAIN));
- });
- th.Join();
-
- ASSERT_THAT(futex_unlock_pi(IsPrivate(), &a), SyscallSucceeds());
-}
-
-TEST_P(PrivateAndSharedFutexTest, PITryLockConcurrency) {
- DisableSave ds; // Too many syscalls.
-
- std::atomic<int> a = ATOMIC_VAR_INIT(0);
- const bool is_priv = IsPrivate();
-
- std::unique_ptr<ScopedThread> threads[10];
- for (size_t i = 0; i < ABSL_ARRAYSIZE(threads); ++i) {
- threads[i] = absl::make_unique<ScopedThread>([is_priv, &a] {
- for (size_t j = 0; j < 10;) {
- if (futex_trylock_pi(is_priv, &a) == 0) {
- ++j;
- EXPECT_EQ(a.load() & FUTEX_TID_MASK, gettid());
- SleepSafe(absl::Milliseconds(5));
- ASSERT_THAT(futex_unlock_pi(is_priv, &a), SyscallSucceeds());
- }
- }
- });
- }
-}
-
-int get_robust_list(int pid, struct robust_list_head** head_ptr,
- size_t* len_ptr) {
- return syscall(__NR_get_robust_list, pid, head_ptr, len_ptr);
-}
-
-int set_robust_list(struct robust_list_head* head, size_t len) {
- return syscall(__NR_set_robust_list, head, len);
-}
-
-TEST(RobustFutexTest, BasicSetGet) {
- struct robust_list_head hd = {};
- struct robust_list_head* hd_ptr = &hd;
-
- // Set!
- EXPECT_THAT(set_robust_list(hd_ptr, sizeof(hd)), SyscallSucceedsWithValue(0));
-
- // Get!
- struct robust_list_head* new_hd_ptr = hd_ptr;
- size_t len;
- EXPECT_THAT(get_robust_list(0, &new_hd_ptr, &len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(new_hd_ptr, hd_ptr);
- EXPECT_EQ(len, sizeof(hd));
-}
-
-TEST(RobustFutexTest, GetFromOtherTid) {
- // Get the current tid and list head.
- pid_t tid = gettid();
- struct robust_list_head* hd_ptr = {};
- size_t len;
- EXPECT_THAT(get_robust_list(0, &hd_ptr, &len), SyscallSucceedsWithValue(0));
-
- // Create a new thread.
- ScopedThread t([&] {
- // Current tid list head should be different from parent tid.
- struct robust_list_head* got_hd_ptr = {};
- EXPECT_THAT(get_robust_list(0, &got_hd_ptr, &len),
- SyscallSucceedsWithValue(0));
- EXPECT_NE(hd_ptr, got_hd_ptr);
-
- // Get the parent list head by passing its tid.
- EXPECT_THAT(get_robust_list(tid, &got_hd_ptr, &len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(hd_ptr, got_hd_ptr);
- });
-
- // Wait for thread.
- t.Join();
-}
-
-TEST(RobustFutexTest, InvalidSize) {
- struct robust_list_head* hd = {};
- EXPECT_THAT(set_robust_list(hd, sizeof(*hd) + 1),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(RobustFutexTest, PthreadMutexAttr) {
- constexpr int kNumMutexes = 3;
-
- // Create a bunch of robust mutexes.
- pthread_mutexattr_t attrs[kNumMutexes];
- pthread_mutex_t mtxs[kNumMutexes];
- for (int i = 0; i < kNumMutexes; i++) {
- TEST_PCHECK(pthread_mutexattr_init(&attrs[i]) == 0);
- TEST_PCHECK(pthread_mutexattr_setrobust(&attrs[i], PTHREAD_MUTEX_ROBUST) ==
- 0);
- TEST_PCHECK(pthread_mutex_init(&mtxs[i], &attrs[i]) == 0);
- }
-
- // Start thread to lock the mutexes and then exit.
- ScopedThread t([&] {
- for (int i = 0; i < kNumMutexes; i++) {
- TEST_PCHECK(pthread_mutex_lock(&mtxs[i]) == 0);
- }
- pthread_exit(NULL);
- });
-
- // Wait for thread.
- t.Join();
-
- // Now try to take the mutexes.
- for (int i = 0; i < kNumMutexes; i++) {
- // Should get EOWNERDEAD.
- EXPECT_EQ(pthread_mutex_lock(&mtxs[i]), EOWNERDEAD);
- // Make the mutex consistent.
- EXPECT_EQ(pthread_mutex_consistent(&mtxs[i]), 0);
- // Unlock.
- EXPECT_EQ(pthread_mutex_unlock(&mtxs[i]), 0);
- }
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/getcpu.cc b/test/syscalls/linux/getcpu.cc
deleted file mode 100644
index f4d94bd6a..000000000
--- a/test/syscalls/linux/getcpu.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(GetcpuTest, IsValidCpuStress) {
- const int num_cpus = NumCPUs();
- absl::Time deadline = absl::Now() + absl::Seconds(10);
- while (absl::Now() < deadline) {
- int cpu;
- ASSERT_THAT(cpu = sched_getcpu(), SyscallSucceeds());
- ASSERT_LT(cpu, num_cpus);
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/getdents.cc b/test/syscalls/linux/getdents.cc
deleted file mode 100644
index 2f2b14037..000000000
--- a/test/syscalls/linux/getdents.cc
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <map>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/node_hash_map.h"
-#include "absl/container/node_hash_set.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_cat.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::Contains;
-using ::testing::IsEmpty;
-using ::testing::IsSupersetOf;
-using ::testing::Not;
-using ::testing::NotNull;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// New Linux dirent format.
-struct linux_dirent64 {
- uint64_t d_ino; // Inode number
- int64_t d_off; // Offset to next linux_dirent64
- unsigned short d_reclen; // NOLINT, Length of this linux_dirent64
- unsigned char d_type; // NOLINT, File type
- char d_name[0]; // Filename (null-terminated)
-};
-
-// Old Linux dirent format.
-struct linux_dirent {
- unsigned long d_ino; // NOLINT
- unsigned long d_off; // NOLINT
- unsigned short d_reclen; // NOLINT
- char d_name[0];
-};
-
-// Wraps a buffer to provide a set of dirents.
-// T is the underlying dirent type.
-template <typename T>
-class DirentBuffer {
- public:
- // DirentBuffer manages the buffer.
- explicit DirentBuffer(size_t size)
- : managed_(true), actual_size_(size), reported_size_(size) {
- data_ = new char[actual_size_];
- }
-
- // The buffer is managed externally.
- DirentBuffer(char* data, size_t actual_size, size_t reported_size)
- : managed_(false),
- data_(data),
- actual_size_(actual_size),
- reported_size_(reported_size) {}
-
- ~DirentBuffer() {
- if (managed_) {
- delete[] data_;
- }
- }
-
- T* Data() { return reinterpret_cast<T*>(data_); }
-
- T* Start(size_t read) {
- read_ = read;
- if (read_) {
- return Data();
- } else {
- return nullptr;
- }
- }
-
- T* Current() { return reinterpret_cast<T*>(&data_[off_]); }
-
- T* Next() {
- size_t new_off = off_ + Current()->d_reclen;
- if (new_off >= read_ || new_off >= actual_size_) {
- return nullptr;
- }
-
- off_ = new_off;
- return Current();
- }
-
- size_t Size() { return reported_size_; }
-
- void Reset() {
- off_ = 0;
- read_ = 0;
- memset(data_, 0, actual_size_);
- }
-
- private:
- bool managed_;
- char* data_;
- size_t actual_size_;
- size_t reported_size_;
-
- size_t off_ = 0;
-
- size_t read_ = 0;
-};
-
-// Test for getdents/getdents64.
-// T is the Linux dirent type.
-template <typename T>
-class GetdentsTest : public ::testing::Test {
- public:
- using LinuxDirentType = T;
- using DirentBufferType = DirentBuffer<T>;
-
- protected:
- void SetUp() override {
- dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- fd_ = ASSERT_NO_ERRNO_AND_VALUE(Open(dir_.path(), O_RDONLY | O_DIRECTORY));
- }
-
- // Must be overridden with explicit specialization. See below.
- int SyscallNum();
-
- int Getdents(LinuxDirentType* dirp, unsigned int count) {
- return RetryEINTR(syscall)(SyscallNum(), fd_.get(), dirp, count);
- }
-
- // Fill directory with num files, named by number starting at 0.
- void FillDirectory(size_t num) {
- for (size_t i = 0; i < num; i++) {
- auto name = JoinPath(dir_.path(), absl::StrCat(i));
- TEST_CHECK(CreateWithContents(name, "").ok());
- }
- }
-
- // Fill directory with a given list of filenames.
- void FillDirectoryWithFiles(const std::vector<std::string>& filenames) {
- for (const auto& filename : filenames) {
- auto name = JoinPath(dir_.path(), filename);
- TEST_CHECK(CreateWithContents(name, "").ok());
- }
- }
-
- // Seek to the start of the directory.
- PosixError SeekStart() {
- constexpr off_t kStartOfFile = 0;
- off_t offset = lseek(fd_.get(), kStartOfFile, SEEK_SET);
- if (offset < 0) {
- return PosixError(errno, absl::StrCat("error seeking to ", kStartOfFile));
- }
- if (offset != kStartOfFile) {
- return PosixError(EINVAL, absl::StrCat("tried to seek to ", kStartOfFile,
- " but got ", offset));
- }
- return NoError();
- }
-
- // Call getdents multiple times, reading all dirents and calling f on each.
- // f has the type signature PosixError f(T*).
- // If f returns a non-OK error, so does ReadDirents.
- template <typename F>
- PosixError ReadDirents(DirentBufferType* dirents, F const& f) {
- int n;
- do {
- dirents->Reset();
-
- n = Getdents(dirents->Data(), dirents->Size());
- MaybeSave();
- if (n < 0) {
- return PosixError(errno, "getdents");
- }
-
- for (auto d = dirents->Start(n); d; d = dirents->Next()) {
- RETURN_IF_ERRNO(f(d));
- }
- } while (n > 0);
-
- return NoError();
- }
-
- // Call Getdents successively and count all entries.
- int ReadAndCountAllEntries(DirentBufferType* dirents) {
- int found = 0;
-
- EXPECT_NO_ERRNO(ReadDirents(dirents, [&](LinuxDirentType* d) {
- found++;
- return NoError();
- }));
-
- return found;
- }
-
- private:
- TempPath dir_;
- FileDescriptor fd_;
-};
-
-// Multiple template parameters are not allowed, so we must use explicit
-// template specialization to set the syscall number.
-
-// SYS_getdents isn't defined on arm64.
-#ifdef __x86_64__
-template <>
-int GetdentsTest<struct linux_dirent>::SyscallNum() {
- return SYS_getdents;
-}
-#endif
-
-template <>
-int GetdentsTest<struct linux_dirent64>::SyscallNum() {
- return SYS_getdents64;
-}
-
-#ifdef __x86_64__
-// Test both legacy getdents and getdents64 on x86_64.
-typedef ::testing::Types<struct linux_dirent, struct linux_dirent64>
- GetdentsTypes;
-#elif __aarch64__
-// Test only getdents64 on arm64.
-typedef ::testing::Types<struct linux_dirent64> GetdentsTypes;
-#endif
-TYPED_TEST_SUITE(GetdentsTest, GetdentsTypes);
-
-// N.B. TYPED_TESTs require explicitly using this-> to access members of
-// GetdentsTest, since we are inside of a derived class template.
-
-TYPED_TEST(GetdentsTest, VerifyEntries) {
- typename TestFixture::DirentBufferType dirents(1024);
-
- this->FillDirectory(2);
-
- // Map of all the entries we expect to find.
- std::map<std::string, bool> found;
- found["."] = false;
- found[".."] = false;
- found["0"] = false;
- found["1"] = false;
-
- EXPECT_NO_ERRNO(this->ReadDirents(
- &dirents, [&](typename TestFixture::LinuxDirentType* d) {
- auto kv = found.find(d->d_name);
- EXPECT_NE(kv, found.end()) << "Unexpected file: " << d->d_name;
- if (kv != found.end()) {
- EXPECT_FALSE(kv->second);
- }
- found[d->d_name] = true;
- return NoError();
- }));
-
- for (auto& kv : found) {
- EXPECT_TRUE(kv.second) << "File not found: " << kv.first;
- }
-}
-
-TYPED_TEST(GetdentsTest, VerifyPadding) {
- typename TestFixture::DirentBufferType dirents(1024);
-
- // Create files with names of length 1 through 16.
- std::vector<std::string> files;
- std::string filename;
- for (int i = 0; i < 16; ++i) {
- absl::StrAppend(&filename, "a");
- files.push_back(filename);
- }
- this->FillDirectoryWithFiles(files);
-
- // We expect to find all the files, plus '.' and '..'.
- const int expect_found = 2 + files.size();
- int found = 0;
-
- EXPECT_NO_ERRNO(this->ReadDirents(
- &dirents, [&](typename TestFixture::LinuxDirentType* d) {
- EXPECT_EQ(d->d_reclen % 8, 0)
- << "Dirent " << d->d_name
- << " had reclen that was not byte aligned: " << d->d_name;
- found++;
- return NoError();
- }));
-
- // Make sure we found all the files.
- EXPECT_EQ(found, expect_found);
-}
-
-// For a small directory, the provided buffer should be large enough
-// for all entries.
-TYPED_TEST(GetdentsTest, SmallDir) {
- // . and .. should be in an otherwise empty directory.
- int expect = 2;
-
- // Add some actual files.
- this->FillDirectory(2);
- expect += 2;
-
- typename TestFixture::DirentBufferType dirents(256);
-
- EXPECT_EQ(expect, this->ReadAndCountAllEntries(&dirents));
-}
-
-// A directory with lots of files requires calling getdents multiple times.
-TYPED_TEST(GetdentsTest, LargeDir) {
- // . and .. should be in an otherwise empty directory.
- int expect = 2;
-
- // Add some actual files.
- this->FillDirectory(100);
- expect += 100;
-
- typename TestFixture::DirentBufferType dirents(256);
-
- EXPECT_EQ(expect, this->ReadAndCountAllEntries(&dirents));
-}
-
-// If we lie about the size of the buffer, we should still be able to read the
-// entries with the available space.
-TYPED_TEST(GetdentsTest, PartialBuffer) {
- // . and .. should be in an otherwise empty directory.
- int expect = 2;
-
- // Add some actual files.
- this->FillDirectory(100);
- expect += 100;
-
- void* addr = mmap(0, 2 * kPageSize, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- ASSERT_NE(addr, MAP_FAILED);
-
- char* buf = reinterpret_cast<char*>(addr);
-
- // Guard page
- EXPECT_THAT(
- mprotect(reinterpret_cast<void*>(buf + kPageSize), kPageSize, PROT_NONE),
- SyscallSucceeds());
-
- // Limit space in buf to 256 bytes.
- buf += kPageSize - 256;
-
- // Lie about the buffer. Even though we claim the buffer is 1 page,
- // we should still get all of the dirents in the first 256 bytes.
- typename TestFixture::DirentBufferType dirents(buf, 256, kPageSize);
-
- EXPECT_EQ(expect, this->ReadAndCountAllEntries(&dirents));
-
- EXPECT_THAT(munmap(addr, 2 * kPageSize), SyscallSucceeds());
-}
-
-// Open many file descriptors, then scan through /proc/self/fd to find and close
-// them all. (The latter is commonly used to handle races between fork/execve
-// and the creation of unwanted non-O_CLOEXEC file descriptors.) This tests that
-// getdents iterates correctly despite mutation of /proc/self/fd.
-TYPED_TEST(GetdentsTest, ProcSelfFd) {
- constexpr size_t kNfds = 10;
- absl::node_hash_map<int, FileDescriptor> fds;
- fds.reserve(kNfds);
- for (size_t i = 0; i < kNfds; i++) {
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
- fds.emplace(fd.get(), std::move(fd));
- }
-
- const FileDescriptor proc_self_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/fd", O_RDONLY | O_DIRECTORY));
-
- // Make the buffer very small since we want to iterate.
- typename TestFixture::DirentBufferType dirents(
- 2 * sizeof(typename TestFixture::LinuxDirentType));
- absl::node_hash_set<int> prev_fds;
- while (true) {
- dirents.Reset();
- int rv;
- ASSERT_THAT(rv = RetryEINTR(syscall)(this->SyscallNum(), proc_self_fd.get(),
- dirents.Data(), dirents.Size()),
- SyscallSucceeds());
- if (rv == 0) {
- break;
- }
- for (auto* d = dirents.Start(rv); d; d = dirents.Next()) {
- int dfd;
- if (!absl::SimpleAtoi(d->d_name, &dfd)) continue;
- EXPECT_TRUE(prev_fds.insert(dfd).second)
- << "Repeated observation of /proc/self/fd/" << dfd;
- fds.erase(dfd);
- }
- }
-
- // Check that we closed every fd.
- EXPECT_THAT(fds, ::testing::IsEmpty());
-}
-
-// Test that getdents returns ENOTDIR when called on a file.
-TYPED_TEST(GetdentsTest, NotDir) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- typename TestFixture::DirentBufferType dirents(256);
- EXPECT_THAT(RetryEINTR(syscall)(this->SyscallNum(), fd.get(), dirents.Data(),
- dirents.Size()),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-// Test that getdents returns EBADF when called on an opath file.
-TYPED_TEST(GetdentsTest, OpathFile) {
- SKIP_IF(IsRunningWithVFS1());
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- typename TestFixture::DirentBufferType dirents(256);
- EXPECT_THAT(RetryEINTR(syscall)(this->SyscallNum(), fd.get(), dirents.Data(),
- dirents.Size()),
- SyscallFailsWithErrno(EBADF));
-}
-
-// Test that getdents returns EBADF when called on an opath directory.
-TYPED_TEST(GetdentsTest, OpathDirectory) {
- SKIP_IF(IsRunningWithVFS1());
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_PATH | O_DIRECTORY));
-
- typename TestFixture::DirentBufferType dirents(256);
- ASSERT_THAT(RetryEINTR(syscall)(this->SyscallNum(), fd.get(), dirents.Data(),
- dirents.Size()),
- SyscallFailsWithErrno(EBADF));
-}
-
-// Test that SEEK_SET to 0 causes getdents to re-read the entries.
-TYPED_TEST(GetdentsTest, SeekResetsCursor) {
- // . and .. should be in an otherwise empty directory.
- int expect = 2;
-
- // Add some files to the directory.
- this->FillDirectory(10);
- expect += 10;
-
- typename TestFixture::DirentBufferType dirents(256);
-
- // We should get all the expected entries.
- EXPECT_EQ(expect, this->ReadAndCountAllEntries(&dirents));
-
- // Seek back to 0.
- ASSERT_NO_ERRNO(this->SeekStart());
-
- // We should get all the expected entries again.
- EXPECT_EQ(expect, this->ReadAndCountAllEntries(&dirents));
-}
-
-// Test that getdents() after SEEK_END succeeds.
-// This is a regression test for #128.
-TYPED_TEST(GetdentsTest, Issue128ProcSeekEnd) {
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self", O_RDONLY | O_DIRECTORY));
- typename TestFixture::DirentBufferType dirents(256);
-
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(syscall)(this->SyscallNum(), fd.get(), dirents.Data(),
- dirents.Size()),
- SyscallSucceeds());
-}
-
-// Some tests using the glibc readdir interface.
-TEST(ReaddirTest, OpenDir) {
- DIR* dev;
- ASSERT_THAT(dev = opendir("/dev"), NotNull());
- EXPECT_THAT(closedir(dev), SyscallSucceeds());
-}
-
-TEST(ReaddirTest, RootContainsBasicDirectories) {
- EXPECT_THAT(ListDir("/", true),
- IsPosixErrorOkAndHolds(IsSupersetOf(
- {"bin", "dev", "etc", "lib", "proc", "sbin", "usr"})));
-}
-
-TEST(ReaddirTest, Bug24096713Dev) {
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/dev", true));
- EXPECT_THAT(contents, Not(IsEmpty()));
-}
-
-TEST(ReaddirTest, Bug24096713ProcTid) {
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(
- ListDir(absl::StrCat("/proc/", syscall(SYS_gettid), "/"), true));
- EXPECT_THAT(contents, Not(IsEmpty()));
-}
-
-TEST(ReaddirTest, Bug33429925Proc) {
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/proc", true));
- EXPECT_THAT(contents, Not(IsEmpty()));
-}
-
-TEST(ReaddirTest, Bug35110122Root) {
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/", true));
- EXPECT_THAT(contents, Not(IsEmpty()));
-}
-
-// Unlink should invalidate getdents cache.
-TEST(ReaddirTest, GoneAfterRemoveCache) {
- TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- std::string name = std::string(Basename(file.path()));
-
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), true));
- EXPECT_THAT(contents, Contains(name));
-
- file.reset();
-
- contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), true));
- EXPECT_THAT(contents, Not(Contains(name)));
-}
-
-// Regression test for b/137398511. Rename should invalidate getdents cache.
-TEST(ReaddirTest, GoneAfterRenameCache) {
- TempPath src = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath dst = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(src.path()));
- std::string name = std::string(Basename(file.path()));
-
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(src.path(), true));
- EXPECT_THAT(contents, Contains(name));
-
- ASSERT_THAT(rename(file.path().c_str(), JoinPath(dst.path(), name).c_str()),
- SyscallSucceeds());
- // Release file since it was renamed. dst cleanup will ultimately delete it.
- file.release();
-
- contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(src.path(), true));
- EXPECT_THAT(contents, Not(Contains(name)));
-
- contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dst.path(), true));
- EXPECT_THAT(contents, Contains(name));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/getrandom.cc b/test/syscalls/linux/getrandom.cc
deleted file mode 100644
index f87cdd7a1..000000000
--- a/test/syscalls/linux/getrandom.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifndef SYS_getrandom
-#if defined(__x86_64__)
-#define SYS_getrandom 318
-#elif defined(__i386__)
-#define SYS_getrandom 355
-#elif defined(__aarch64__)
-#define SYS_getrandom 278
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_getrandom
-
-bool SomeByteIsNonZero(char* random_bytes, int length) {
- for (int i = 0; i < length; i++) {
- if (random_bytes[i] != 0) {
- return true;
- }
- }
- return false;
-}
-
-TEST(GetrandomTest, IsRandom) {
- // This test calls get_random and makes sure that the array is filled in with
- // something that is non-zero. Perhaps we get back \x00\x00\x00\x00\x00.... as
- // a random result, but it's so unlikely that we'll just ignore this.
- char random_bytes[64] = {};
- int n = syscall(SYS_getrandom, random_bytes, 64, 0);
- SKIP_IF(!IsRunningOnGvisor() && n < 0 && errno == ENOSYS);
- EXPECT_THAT(n, SyscallSucceeds());
- EXPECT_GT(n, 0); // Some bytes should be returned.
- EXPECT_TRUE(SomeByteIsNonZero(random_bytes, n));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/getrusage.cc b/test/syscalls/linux/getrusage.cc
deleted file mode 100644
index e84cbfdc3..000000000
--- a/test/syscalls/linux/getrusage.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(GetrusageTest, BasicFork) {
- pid_t pid = fork();
- if (pid == 0) {
- struct rusage rusage_self;
- TEST_PCHECK(getrusage(RUSAGE_SELF, &rusage_self) == 0);
- struct rusage rusage_children;
- TEST_PCHECK(getrusage(RUSAGE_CHILDREN, &rusage_children) == 0);
- // The child has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss != 0);
- // The child has no children of its own.
- TEST_CHECK(rusage_children.ru_maxrss == 0);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0), SyscallSucceeds());
- struct rusage rusage_self;
- ASSERT_THAT(getrusage(RUSAGE_SELF, &rusage_self), SyscallSucceeds());
- struct rusage rusage_children;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &rusage_children), SyscallSucceeds());
- // The parent has consumed some memory.
- EXPECT_GT(rusage_self.ru_maxrss, 0);
- // The child has consumed some memory, and because it has exited we can get
- // its max RSS.
- EXPECT_GT(rusage_children.ru_maxrss, 0);
-}
-
-// Verifies that a process can get the max resident set size of its grandchild,
-// i.e. that maxrss propagates correctly from children to waiting parents.
-TEST(GetrusageTest, Grandchild) {
- constexpr int kGrandchildSizeKb = 1024;
- pid_t pid = fork();
- if (pid == 0) {
- pid = fork();
- if (pid == 0) {
- int flags = MAP_ANONYMOUS | MAP_POPULATE | MAP_PRIVATE;
- void* addr =
- mmap(nullptr, kGrandchildSizeKb * 1024, PROT_WRITE, flags, -1, 0);
- TEST_PCHECK(addr != MAP_FAILED);
- } else {
- int status;
- TEST_PCHECK(RetryEINTR(waitpid)(pid, &status, 0) == pid);
- }
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0), SyscallSucceeds());
- struct rusage rusage_self;
- ASSERT_THAT(getrusage(RUSAGE_SELF, &rusage_self), SyscallSucceeds());
- struct rusage rusage_children;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &rusage_children), SyscallSucceeds());
- // The parent has consumed some memory.
- EXPECT_GT(rusage_self.ru_maxrss, 0);
- // The child should consume next to no memory, but the grandchild will
- // consume at least 1MB. Verify that usage bubbles up to the grandparent.
- EXPECT_GT(rusage_children.ru_maxrss, kGrandchildSizeKb);
-}
-
-// Verifies that processes ignoring SIGCHLD do not have updated child maxrss
-// updated.
-TEST(GetrusageTest, IgnoreSIGCHLD) {
- const auto rest = [] {
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- sa.sa_flags = 0;
- auto cleanup = TEST_CHECK_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGCHLD, sa));
- pid_t pid = fork();
- if (pid == 0) {
- struct rusage rusage_self;
- TEST_PCHECK(getrusage(RUSAGE_SELF, &rusage_self) == 0);
- // The child has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss != 0);
- _exit(0);
- }
- TEST_CHECK_SUCCESS(pid);
- int status;
- TEST_CHECK_ERRNO(RetryEINTR(waitpid)(pid, &status, 0), ECHILD);
- struct rusage rusage_self;
- TEST_CHECK_SUCCESS(getrusage(RUSAGE_SELF, &rusage_self));
- struct rusage rusage_children;
- TEST_CHECK_SUCCESS(getrusage(RUSAGE_CHILDREN, &rusage_children));
- // The parent has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss > 0);
- // The child's maxrss should not have propagated up.
- TEST_CHECK(rusage_children.ru_maxrss == 0);
- };
- // Execute inside a forked process so that rusage_children is clean.
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// Verifies that zombie processes do not update their parent's maxrss. Only
-// reaped processes should do this.
-TEST(GetrusageTest, IgnoreZombie) {
- const auto rest = [] {
- pid_t pid = fork();
- if (pid == 0) {
- struct rusage rusage_self;
- TEST_PCHECK(getrusage(RUSAGE_SELF, &rusage_self) == 0);
- struct rusage rusage_children;
- TEST_PCHECK(getrusage(RUSAGE_CHILDREN, &rusage_children) == 0);
- // The child has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss != 0);
- // The child has no children of its own.
- TEST_CHECK(rusage_children.ru_maxrss == 0);
- _exit(0);
- }
- TEST_CHECK_SUCCESS(pid);
- // Give the child time to exit. Because we don't call wait, the child should
- // remain a zombie.
- absl::SleepFor(absl::Seconds(5));
- struct rusage rusage_self;
- TEST_CHECK_SUCCESS(getrusage(RUSAGE_SELF, &rusage_self));
- struct rusage rusage_children;
- TEST_CHECK_SUCCESS(getrusage(RUSAGE_CHILDREN, &rusage_children));
- // The parent has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss > 0);
- // The child has consumed some memory, but hasn't been reaped.
- TEST_CHECK(rusage_children.ru_maxrss == 0);
- };
- // Execute inside a forked process so that rusage_children is clean.
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(GetrusageTest, Wait4) {
- pid_t pid = fork();
- if (pid == 0) {
- struct rusage rusage_self;
- TEST_PCHECK(getrusage(RUSAGE_SELF, &rusage_self) == 0);
- struct rusage rusage_children;
- TEST_PCHECK(getrusage(RUSAGE_CHILDREN, &rusage_children) == 0);
- // The child has consumed some memory.
- TEST_CHECK(rusage_self.ru_maxrss != 0);
- // The child has no children of its own.
- TEST_CHECK(rusage_children.ru_maxrss == 0);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- struct rusage rusage_children;
- int status;
- ASSERT_THAT(RetryEINTR(wait4)(pid, &status, 0, &rusage_children),
- SyscallSucceeds());
- // The child has consumed some memory, and because it has exited we can get
- // its max RSS.
- EXPECT_GT(rusage_children.ru_maxrss, 0);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/inotify.cc b/test/syscalls/linux/inotify.cc
deleted file mode 100644
index f6b78989b..000000000
--- a/test/syscalls/linux/inotify.cc
+++ /dev/null
@@ -1,2532 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <libgen.h>
-#include <sched.h>
-#include <sys/epoll.h>
-#include <sys/inotify.h>
-#include <sys/ioctl.h>
-#include <sys/sendfile.h>
-#include <sys/time.h>
-#include <sys/xattr.h>
-
-#include <atomic>
-#include <list>
-#include <string>
-#include <vector>
-
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/str_join.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/epoll_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using ::absl::StreamFormat;
-using ::absl::StrFormat;
-
-constexpr int kBufSize = 1024;
-
-// C++-friendly version of struct inotify_event.
-struct Event {
- int32_t wd;
- uint32_t mask;
- uint32_t cookie;
- uint32_t len;
- std::string name;
-
- Event(uint32_t mask, int32_t wd, absl::string_view name, uint32_t cookie)
- : wd(wd),
- mask(mask),
- cookie(cookie),
- len(name.size()),
- name(std::string(name)) {}
- Event(uint32_t mask, int32_t wd, absl::string_view name)
- : Event(mask, wd, name, 0) {}
- Event(uint32_t mask, int32_t wd) : Event(mask, wd, "", 0) {}
- Event() : Event(0, 0, "", 0) {}
-};
-
-// Prints the symbolic name for a struct inotify_event's 'mask' field.
-std::string FlagString(uint32_t flags) {
- std::vector<std::string> names;
-
-#define EMIT(target) \
- if (flags & target) { \
- names.push_back(#target); \
- flags &= ~target; \
- }
-
- EMIT(IN_ACCESS);
- EMIT(IN_ATTRIB);
- EMIT(IN_CLOSE_WRITE);
- EMIT(IN_CLOSE_NOWRITE);
- EMIT(IN_CREATE);
- EMIT(IN_DELETE);
- EMIT(IN_DELETE_SELF);
- EMIT(IN_MODIFY);
- EMIT(IN_MOVE_SELF);
- EMIT(IN_MOVED_FROM);
- EMIT(IN_MOVED_TO);
- EMIT(IN_OPEN);
-
- EMIT(IN_DONT_FOLLOW);
- EMIT(IN_EXCL_UNLINK);
- EMIT(IN_ONESHOT);
- EMIT(IN_ONLYDIR);
-
- EMIT(IN_IGNORED);
- EMIT(IN_ISDIR);
- EMIT(IN_Q_OVERFLOW);
- EMIT(IN_UNMOUNT);
-
-#undef EMIT
-
- // If we have anything left over at the end, print it as a hex value.
- if (flags) {
- names.push_back(absl::StrCat("0x", absl::Hex(flags)));
- }
-
- return absl::StrJoin(names, "|");
-}
-
-std::string DumpEvent(const Event& event) {
- return StrFormat(
- "%s, wd=%d%s%s", FlagString(event.mask), event.wd,
- (event.len > 0) ? StrFormat(", name=%s", event.name) : "",
- (event.cookie > 0) ? StrFormat(", cookie=%ud", event.cookie) : "");
-}
-
-std::string DumpEvents(const std::vector<Event>& events, int indent_level) {
- std::stringstream ss;
- ss << StreamFormat("%d event%s:\n", events.size(),
- (events.size() > 1) ? "s" : "");
- int i = 0;
- for (const Event& ev : events) {
- ss << StreamFormat("%sevents[%d]: %s\n", std::string(indent_level, '\t'),
- i++, DumpEvent(ev));
- }
- return ss.str();
-}
-
-// A matcher which takes an expected list of events to match against another
-// list of inotify events, in order. This is similar to the ElementsAre matcher,
-// but displays more informative messages on mismatch.
-class EventsAreMatcher
- : public ::testing::MatcherInterface<std::vector<Event>> {
- public:
- explicit EventsAreMatcher(std::vector<Event> references)
- : references_(std::move(references)) {}
-
- bool MatchAndExplain(
- std::vector<Event> events,
- ::testing::MatchResultListener* const listener) const override {
- if (references_.size() != events.size()) {
- *listener << StreamFormat("\n\tCount mismatch, got %s",
- DumpEvents(events, 2));
- return false;
- }
-
- bool success = true;
- for (unsigned int i = 0; i < references_.size(); ++i) {
- const Event& reference = references_[i];
- const Event& target = events[i];
-
- if (target.mask != reference.mask || target.wd != reference.wd ||
- target.name != reference.name || target.cookie != reference.cookie) {
- *listener << StreamFormat("\n\tMismatch at index %d, want %s, got %s,",
- i, DumpEvent(reference), DumpEvent(target));
- success = false;
- }
- }
-
- if (!success) {
- *listener << StreamFormat("\n\tIn total of %s", DumpEvents(events, 2));
- }
- return success;
- }
-
- void DescribeTo(::std::ostream* const os) const override {
- *os << StreamFormat("%s", DumpEvents(references_, 1));
- }
-
- void DescribeNegationTo(::std::ostream* const os) const override {
- *os << StreamFormat("mismatch from %s", DumpEvents(references_, 1));
- }
-
- private:
- std::vector<Event> references_;
-};
-
-::testing::Matcher<std::vector<Event>> Are(std::vector<Event> events) {
- return MakeMatcher(new EventsAreMatcher(std::move(events)));
-}
-
-// Similar to the EventsAre matcher, but the order of events are ignored.
-class UnorderedEventsAreMatcher
- : public ::testing::MatcherInterface<std::vector<Event>> {
- public:
- explicit UnorderedEventsAreMatcher(std::vector<Event> references)
- : references_(std::move(references)) {}
-
- bool MatchAndExplain(
- std::vector<Event> events,
- ::testing::MatchResultListener* const listener) const override {
- if (references_.size() != events.size()) {
- *listener << StreamFormat("\n\tCount mismatch, got %s",
- DumpEvents(events, 2));
- return false;
- }
-
- std::vector<Event> unmatched(references_);
-
- for (const Event& candidate : events) {
- for (auto it = unmatched.begin(); it != unmatched.end();) {
- const Event& reference = *it;
- if (candidate.mask == reference.mask && candidate.wd == reference.wd &&
- candidate.name == reference.name &&
- candidate.cookie == reference.cookie) {
- it = unmatched.erase(it);
- break;
- } else {
- ++it;
- }
- }
- }
-
- // Anything left unmatched? If so, the matcher fails.
- if (!unmatched.empty()) {
- *listener << StreamFormat("\n\tFailed to match %s",
- DumpEvents(unmatched, 2));
- *listener << StreamFormat("\n\tIn total of %s", DumpEvents(events, 2));
- return false;
- }
-
- return true;
- }
-
- void DescribeTo(::std::ostream* const os) const override {
- *os << StreamFormat("unordered %s", DumpEvents(references_, 1));
- }
-
- void DescribeNegationTo(::std::ostream* const os) const override {
- *os << StreamFormat("mismatch from unordered %s",
- DumpEvents(references_, 1));
- }
-
- private:
- std::vector<Event> references_;
-};
-
-::testing::Matcher<std::vector<Event>> AreUnordered(std::vector<Event> events) {
- return MakeMatcher(new UnorderedEventsAreMatcher(std::move(events)));
-}
-
-// Reads events from an inotify fd until either EOF, or read returns EAGAIN.
-PosixErrorOr<std::vector<Event>> DrainEvents(int fd) {
- std::vector<Event> events;
- while (true) {
- int events_size = 0;
- if (ioctl(fd, FIONREAD, &events_size) < 0) {
- return PosixError(errno, "ioctl(FIONREAD) failed on inotify fd");
- }
- // Deliberately use a buffer that is larger than necessary, expecting to
- // only read events_size bytes.
- std::vector<char> buf(events_size + kBufSize, 0);
- const ssize_t readlen = read(fd, buf.data(), buf.size());
- MaybeSave();
- // Read error?
- if (readlen < 0) {
- if (errno == EAGAIN) {
- // If EAGAIN, no more events at the moment. Return what we have so far.
- return events;
- }
- // Some other read error. Return an error. Right now if we encounter this
- // after already reading some events, they get lost. However, we don't
- // expect to see any error, and the calling test will fail immediately if
- // we signal an error anyways, so this is acceptable.
- return PosixError(errno, "read() failed on inotify fd");
- }
- if (readlen < static_cast<int>(sizeof(struct inotify_event))) {
- // Impossibly short read.
- return PosixError(
- EIO,
- "read() didn't return enough data represent even a single event");
- }
- if (readlen != events_size) {
- return PosixError(EINVAL, absl::StrCat("read ", readlen,
- " bytes, expected ", events_size));
- }
- if (readlen == 0) {
- // EOF.
- return events;
- }
-
- // Normal read.
- const char* cursor = buf.data();
- while (cursor < (buf.data() + readlen)) {
- struct inotify_event event = {};
- memcpy(&event, cursor, sizeof(struct inotify_event));
-
- Event ev;
- ev.wd = event.wd;
- ev.mask = event.mask;
- ev.cookie = event.cookie;
- ev.len = event.len;
- if (event.len > 0) {
- TEST_CHECK(static_cast<int>(sizeof(struct inotify_event) + event.len) <=
- readlen);
- ev.name = std::string(cursor +
- offsetof(struct inotify_event, name)); // NOLINT
- // Name field should always be smaller than event.len, otherwise we have
- // a buffer overflow. The two sizes aren't equal because the string
- // constructor will stop at the first null byte, while event.name may be
- // padded up to event.len using multiple null bytes.
- TEST_CHECK(ev.name.size() <= event.len);
- }
-
- events.push_back(ev);
- cursor += sizeof(struct inotify_event) + event.len;
- }
- }
-}
-
-PosixErrorOr<FileDescriptor> InotifyInit1(int flags) {
- int fd = inotify_init1(flags);
- if (fd < 0) {
- return PosixError(errno, "inotify_init1() failed");
- }
- return FileDescriptor(fd);
-}
-
-PosixErrorOr<int> InotifyAddWatch(int fd, const std::string& path,
- uint32_t mask) {
- int wd = inotify_add_watch(fd, path.c_str(), mask);
- if (wd < 0) {
- return PosixError(errno, "inotify_add_watch() failed");
- }
- return wd;
-}
-
-TEST(Inotify, IllegalSeek) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(Inotify, IllegalPread) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));
- int val;
- EXPECT_THAT(pread(fd.get(), &val, sizeof(val), 0),
- SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(Inotify, IllegalPwrite) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));
- EXPECT_THAT(pwrite(fd.get(), "x", 1, 0), SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST(Inotify, IllegalWrite) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));
- int val = 0;
- EXPECT_THAT(write(fd.get(), &val, sizeof(val)), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(Inotify, InitFlags) {
- EXPECT_THAT(inotify_init1(IN_NONBLOCK | IN_CLOEXEC), SyscallSucceeds());
- EXPECT_THAT(inotify_init1(12345), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Inotify, NonBlockingReadReturnsEagain) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- std::vector<char> buf(kBufSize, 0);
-
- // The read below should return fail with EAGAIN because there is no data to
- // read and we've specified IN_NONBLOCK. We're guaranteed that there is no
- // data to read because we haven't registered any watches yet.
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(Inotify, AddWatchOnInvalidFdFails) {
- // Garbage fd.
- EXPECT_THAT(inotify_add_watch(-1, "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(inotify_add_watch(1337, "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EBADF));
-
- // Non-inotify fds.
- EXPECT_THAT(inotify_add_watch(0, "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(inotify_add_watch(1, "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(inotify_add_watch(2, "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EINVAL));
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open("/tmp", O_RDONLY));
- EXPECT_THAT(inotify_add_watch(fd.get(), "/tmp", IN_ALL_EVENTS),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Inotify, RemovingWatchGeneratesEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallSucceeds());
-
- // Read events, ensure the first event is IN_IGNORED.
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_THAT(events, Are({Event(IN_IGNORED, wd)}));
-}
-
-TEST(Inotify, CanDeleteFileAfterRemovingWatch) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallSucceeds());
- file1.reset();
-}
-
-TEST(Inotify, RemoveWatchAfterDeletingFileFails) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- file1.reset();
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd), Event(IN_DELETE_SELF, wd),
- Event(IN_IGNORED, wd)}));
-
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Inotify, DuplicateWatchRemovalFails) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallSucceeds());
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Inotify, ConcurrentFileDeletionAndWatchRemoval) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const std::string filename = NewTempAbsPathInDir(root.path());
-
- auto file_create_delete = [filename]() {
- const DisableSave ds; // Too expensive.
- for (int i = 0; i < 100; ++i) {
- FileDescriptor file_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_CREAT, S_IRUSR | S_IWUSR));
- // Close before unlinking (although S/R is disabled). Some filesystems
- // cannot restore an open fd on an unlinked file.
- file_fd.reset();
- EXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());
- }
- };
-
- const int shared_fd = fd.get(); // We need to pass it to the thread.
- auto add_remove_watch = [shared_fd, filename]() {
- for (int i = 0; i < 100; ++i) {
- int wd = inotify_add_watch(shared_fd, filename.c_str(), IN_ALL_EVENTS);
- MaybeSave();
- if (wd != -1) {
- // Watch added successfully, try removal.
- if (inotify_rm_watch(shared_fd, wd)) {
- // If removal fails, the only acceptable reason is if the wd
- // is invalid, which will be the case if we try to remove
- // the watch after the file has been deleted.
- EXPECT_EQ(errno, EINVAL);
- }
- } else {
- // Add watch failed, this should only fail if the target file doesn't
- // exist.
- EXPECT_EQ(errno, ENOENT);
- }
- }
- };
-
- ScopedThread t1(file_create_delete);
- ScopedThread t2(add_remove_watch);
-}
-
-TEST(Inotify, DeletingChildGeneratesEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- const std::string file1_path = file1.reset();
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- AreUnordered({Event(IN_ATTRIB, file1_wd), Event(IN_DELETE_SELF, file1_wd),
- Event(IN_IGNORED, file1_wd),
- Event(IN_DELETE, root_wd, Basename(file1_path))}));
-}
-
-// Creating a file in "parent/child" should generate events for child, but not
-// parent.
-TEST(Inotify, CreatingFileGeneratesEvents) {
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath child =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), parent.path(), IN_ALL_EVENTS));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), child.path(), IN_ALL_EVENTS));
-
- // Create a new file in the directory.
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(child.path()));
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- // The library function we use to create the new file opens it for writing to
- // create it and sets permissions on it, so we expect the three extra events.
- ASSERT_THAT(events, Are({Event(IN_CREATE, wd, Basename(file1.path())),
- Event(IN_OPEN, wd, Basename(file1.path())),
- Event(IN_CLOSE_WRITE, wd, Basename(file1.path())),
- Event(IN_ATTRIB, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, ReadingFileGeneratesAccessEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- char buf;
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ACCESS, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, WritingFileGeneratesModifyEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- const std::string data = "some content";
- EXPECT_THAT(write(file1_fd.get(), data.c_str(), data.length()),
- SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_MODIFY, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, SizeZeroReadWriteGeneratesNothing) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- // Read from the empty file.
- int val;
- ASSERT_THAT(read(file1_fd.get(), &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
-
- // Write zero bytes.
- ASSERT_THAT(write(file1_fd.get(), "", 0), SyscallSucceedsWithValue(0));
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({}));
-}
-
-TEST(Inotify, FailedFileCreationGeneratesNoEvents) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string dir_path = dir.path();
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(fd.get(), dir_path, IN_ALL_EVENTS));
-
- const char* p = dir_path.c_str();
- ASSERT_THAT(mkdir(p, 0777), SyscallFails());
- ASSERT_THAT(mknod(p, S_IFIFO, 0777), SyscallFails());
- ASSERT_THAT(symlink(p, p), SyscallFails());
- ASSERT_THAT(link(p, p), SyscallFails());
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({}));
-}
-
-TEST(Inotify, WatchSetAfterOpenReportsCloseFdEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- FileDescriptor file1_fd_writable =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
- FileDescriptor file1_fd_not_writable =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- file1_fd_writable.reset(); // Close file1_fd_writable.
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_CLOSE_WRITE, wd, Basename(file1.path()))}));
-
- file1_fd_not_writable.reset(); // Close file1_fd_not_writable.
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events,
- Are({Event(IN_CLOSE_NOWRITE, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, ChildrenDeletionInWatchedDirGeneratesEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- TempPath dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- const std::string file1_path = file1.reset();
- const std::string dir1_path = dir1.release();
- EXPECT_THAT(rmdir(dir1_path.c_str()), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- ASSERT_THAT(events,
- Are({Event(IN_DELETE, wd, Basename(file1_path)),
- Event(IN_DELETE | IN_ISDIR, wd, Basename(dir1_path))}));
-}
-
-TEST(Inotify, RmdirOnWatchedTargetGeneratesEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- EXPECT_THAT(rmdir(root.path().c_str()), SyscallSucceeds());
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_DELETE_SELF, wd), Event(IN_IGNORED, wd)}));
-}
-
-TEST(Inotify, MoveGeneratesEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const TempPath dir1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
- const TempPath dir2 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int dir1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), dir1.path(), IN_ALL_EVENTS));
- const int dir2_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), dir2.path(), IN_ALL_EVENTS));
- // Test move from root -> root.
- std::string newpath = NewTempAbsPathInDir(root.path());
- std::string oldpath = file1.release();
- EXPECT_THAT(rename(oldpath.c_str(), newpath.c_str()), SyscallSucceeds());
- file1.reset(newpath);
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_MOVED_FROM, root_wd, Basename(oldpath), events[0].cookie),
- Event(IN_MOVED_TO, root_wd, Basename(newpath), events[1].cookie)}));
- EXPECT_NE(events[0].cookie, 0);
- EXPECT_EQ(events[0].cookie, events[1].cookie);
- uint32_t last_cookie = events[0].cookie;
-
- // Test move from root -> root/dir1.
- newpath = NewTempAbsPathInDir(dir1.path());
- oldpath = file1.release();
- EXPECT_THAT(rename(oldpath.c_str(), newpath.c_str()), SyscallSucceeds());
- file1.reset(newpath);
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_MOVED_FROM, root_wd, Basename(oldpath), events[0].cookie),
- Event(IN_MOVED_TO, dir1_wd, Basename(newpath), events[1].cookie)}));
- // Cookies should be distinct between distinct rename events.
- EXPECT_NE(events[0].cookie, last_cookie);
- EXPECT_EQ(events[0].cookie, events[1].cookie);
- last_cookie = events[0].cookie;
-
- // Test move from root/dir1 -> root/dir2.
- newpath = NewTempAbsPathInDir(dir2.path());
- oldpath = file1.release();
- EXPECT_THAT(rename(oldpath.c_str(), newpath.c_str()), SyscallSucceeds());
- file1.reset(newpath);
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_MOVED_FROM, dir1_wd, Basename(oldpath), events[0].cookie),
- Event(IN_MOVED_TO, dir2_wd, Basename(newpath), events[1].cookie)}));
- EXPECT_NE(events[0].cookie, last_cookie);
- EXPECT_EQ(events[0].cookie, events[1].cookie);
- last_cookie = events[0].cookie;
-}
-
-TEST(Inotify, MoveWatchedTargetGeneratesEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- const std::string newpath = NewTempAbsPathInDir(root.path());
- const std::string oldpath = file1.release();
- EXPECT_THAT(rename(oldpath.c_str(), newpath.c_str()), SyscallSucceeds());
- file1.reset(newpath);
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_MOVED_FROM, root_wd, Basename(oldpath), events[0].cookie),
- Event(IN_MOVED_TO, root_wd, Basename(newpath), events[1].cookie),
- // Self move events do not have a cookie.
- Event(IN_MOVE_SELF, file1_wd)}));
- EXPECT_NE(events[0].cookie, 0);
- EXPECT_EQ(events[0].cookie, events[1].cookie);
-}
-
-// Tests that close events are only emitted when a file description drops its
-// last reference.
-TEST(Inotify, DupFD) {
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(fd.Dup());
-
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_OPEN, wd),
- }));
-
- fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-
- fd2.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_CLOSE_NOWRITE, wd),
- }));
-}
-
-TEST(Inotify, CoalesceEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
-
- FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- // Read the file a few times. This will would generate multiple IN_ACCESS
- // events but they should get coalesced to a single event.
- char buf;
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- // Use the close event verify that we haven't simply left the additional
- // IN_ACCESS events unread.
- file1_fd.reset(); // Close file1_fd.
-
- const std::string file1_name = std::string(Basename(file1.path()));
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ACCESS, wd, file1_name),
- Event(IN_CLOSE_NOWRITE, wd, file1_name)}));
-
- // Now let's try interleaving other events into a stream of repeated events.
- file1_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
-
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(write(file1_fd.get(), "x", 1), SyscallSucceeds());
- EXPECT_THAT(write(file1_fd.get(), "x", 1), SyscallSucceeds());
- EXPECT_THAT(write(file1_fd.get(), "x", 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- file1_fd.reset(); // Close the file.
-
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_OPEN, wd, file1_name), Event(IN_ACCESS, wd, file1_name),
- Event(IN_MODIFY, wd, file1_name), Event(IN_ACCESS, wd, file1_name),
- Event(IN_CLOSE_WRITE, wd, file1_name)}));
-
- // Ensure events aren't coalesced if they are from different files.
- const TempPath file2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
- // Discard events resulting from creation of file2.
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- file1_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- FileDescriptor file2_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file2.path(), O_RDONLY));
-
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file2_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- // Close both files.
- file1_fd.reset();
- file2_fd.reset();
-
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- const std::string file2_name = std::string(Basename(file2.path()));
- ASSERT_THAT(
- events,
- Are({Event(IN_OPEN, wd, file1_name), Event(IN_OPEN, wd, file2_name),
- Event(IN_ACCESS, wd, file1_name), Event(IN_ACCESS, wd, file2_name),
- Event(IN_ACCESS, wd, file1_name),
- Event(IN_CLOSE_NOWRITE, wd, file1_name),
- Event(IN_CLOSE_NOWRITE, wd, file2_name)}));
-}
-
-TEST(Inotify, ClosingInotifyFdWithoutRemovingWatchesWorks) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
- // Note: The check on close will happen in FileDescriptor::~FileDescriptor().
-}
-
-TEST(Inotify, NestedWatches) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- // Read from file1. This should generate an event for both watches.
- char buf;
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ACCESS, root_wd, Basename(file1.path())),
- Event(IN_ACCESS, file1_wd)}));
-}
-
-TEST(Inotify, ConcurrentThreadsGeneratingEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- std::vector<TempPath> files;
- files.reserve(10);
- for (int i = 0; i < 10; i++) {
- files.emplace_back(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode)));
- }
-
- auto test_thread = [&files]() {
- uint32_t seed = time(nullptr);
- for (int i = 0; i < 20; i++) {
- const TempPath& file = files[rand_r(&seed) % files.size()];
- const FileDescriptor file_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
- TEST_PCHECK(write(file_fd.get(), "x", 1) == 1);
- }
- };
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- std::list<ScopedThread> threads;
- for (int i = 0; i < 3; i++) {
- threads.emplace_back(test_thread);
- }
- for (auto& t : threads) {
- t.Join();
- }
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- // 3 threads doing 20 iterations, 3 events per iteration (open, write,
- // close). However, some events may be coalesced, and we can't reliably
- // predict how they'll be coalesced since the test threads aren't
- // synchronized. We can only check that we aren't getting unexpected events.
- for (const Event& ev : events) {
- EXPECT_NE(ev.mask & (IN_OPEN | IN_MODIFY | IN_CLOSE_WRITE), 0);
- }
-}
-
-TEST(Inotify, ReadWithTooSmallBufferFails) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- // Open the file to queue an event. This event will not have a filename, so
- // reading from the inotify fd should return sizeof(struct inotify_event)
- // bytes of data.
- FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- std::vector<char> buf(kBufSize, 0);
- ssize_t readlen;
-
- // Try a buffer too small to hold any potential event. This is rejected
- // outright without the event being dequeued.
- EXPECT_THAT(read(fd.get(), buf.data(), sizeof(struct inotify_event) - 1),
- SyscallFailsWithErrno(EINVAL));
- // Try a buffer just large enough. This should succeeed.
- EXPECT_THAT(
- readlen = read(fd.get(), buf.data(), sizeof(struct inotify_event)),
- SyscallSucceeds());
- EXPECT_EQ(readlen, sizeof(struct inotify_event));
- // Event queue is now empty, the next read should return EAGAIN.
- EXPECT_THAT(read(fd.get(), buf.data(), sizeof(struct inotify_event)),
- SyscallFailsWithErrno(EAGAIN));
-
- // Now put a watch on the directory, so that generated events contain a name.
- EXPECT_THAT(inotify_rm_watch(fd.get(), wd), SyscallSucceeds());
-
- // Drain the event generated from the watch removal.
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- file1_fd.reset(); // Close file to generate an event.
-
- // Try a buffer too small to hold any event and one too small to hold an event
- // with a name. These should both fail without consuming the event.
- EXPECT_THAT(read(fd.get(), buf.data(), sizeof(struct inotify_event) - 1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(read(fd.get(), buf.data(), sizeof(struct inotify_event)),
- SyscallFailsWithErrno(EINVAL));
- // Now try with a large enough buffer. This should return the one event.
- EXPECT_THAT(readlen = read(fd.get(), buf.data(), buf.size()),
- SyscallSucceeds());
- EXPECT_GE(readlen,
- sizeof(struct inotify_event) + Basename(file1.path()).size());
- // With the single event read, the queue should once again be empty.
- EXPECT_THAT(read(fd.get(), buf.data(), sizeof(struct inotify_event)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(Inotify, BlockingReadOnInotifyFd) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(0));
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- // Spawn a thread performing a blocking read for new events on the inotify fd.
- std::vector<char> buf(kBufSize, 0);
- const int shared_fd = fd.get(); // The thread needs it.
- ScopedThread t([shared_fd, &buf]() {
- ssize_t readlen;
- EXPECT_THAT(readlen = read(shared_fd, buf.data(), buf.size()),
- SyscallSucceeds());
- });
-
- // Perform a read on the watched file, which should generate an IN_ACCESS
- // event, unblocking the event_reader thread.
- char c;
- EXPECT_THAT(read(file1_fd.get(), &c, 1), SyscallSucceeds());
-
- // Wait for the thread to read the event and exit.
- t.Join();
-
- // Make sure the event we got back is sane.
- uint32_t event_mask;
- memcpy(&event_mask, buf.data() + offsetof(struct inotify_event, mask),
- sizeof(event_mask));
- EXPECT_EQ(event_mask, IN_ACCESS);
-}
-
-TEST(Inotify, WatchOnRelativePath) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
-
- // Change working directory to root.
- const FileDescriptor cwd = ASSERT_NO_ERRNO_AND_VALUE(Open(".", O_PATH));
- EXPECT_THAT(chdir(root.path().c_str()), SyscallSucceeds());
-
- // Add a watch on file1 with a relative path.
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- fd.get(), std::string(Basename(file1.path())), IN_ALL_EVENTS));
-
- // Perform a read on file1, this should generate an IN_ACCESS event.
- char c;
- EXPECT_THAT(read(file1_fd.get(), &c, 1), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ACCESS, wd)}));
-
- // Explicitly reset the working directory so that we don't continue to
- // reference "root". Once the test ends, "root" will get unlinked. If we
- // continue to hold a reference, random save/restore tests can fail if a save
- // is triggered after "root" is unlinked; we can't save deleted fs objects
- // with active references.
- EXPECT_THAT(fchdir(cwd.get()), SyscallSucceeds());
-}
-
-TEST(Inotify, ZeroLengthReadWriteDoesNotGenerateEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const char kContent[] = "some content";
- TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), kContent, TempPath::kDefaultFileMode));
- const int kContentSize = sizeof(kContent) - 1;
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- std::vector<char> buf(kContentSize, 0);
- // Read all available data.
- ssize_t readlen;
- EXPECT_THAT(readlen = read(file1_fd.get(), buf.data(), kContentSize),
- SyscallSucceeds());
- EXPECT_EQ(readlen, kContentSize);
- // Drain all events and make sure we got the IN_ACCESS for the read.
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ACCESS, wd, Basename(file1.path()))}));
-
- // Now try read again. This should be a 0-length read, since we're at EOF.
- char c;
- EXPECT_THAT(readlen = read(file1_fd.get(), &c, 1), SyscallSucceeds());
- EXPECT_EQ(readlen, 0);
- // We should have no new events.
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_TRUE(events.empty());
-
- // Try issuing a zero-length read.
- EXPECT_THAT(readlen = read(file1_fd.get(), &c, 0), SyscallSucceeds());
- EXPECT_EQ(readlen, 0);
- // We should have no new events.
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_TRUE(events.empty());
-
- // Try issuing a zero-length write.
- ssize_t writelen;
- EXPECT_THAT(writelen = write(file1_fd.get(), &c, 0), SyscallSucceeds());
- EXPECT_EQ(writelen, 0);
- // We should have no new events.
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_TRUE(events.empty());
-}
-
-TEST(Inotify, ChmodGeneratesAttribEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- FileDescriptor root_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(root.path(), O_RDONLY));
- FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- auto verify_chmod_events = [&]() {
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ATTRIB, root_wd, Basename(file1.path())),
- Event(IN_ATTRIB, file1_wd)}));
- };
-
- // Don't do cooperative S/R tests for any of the {f}chmod* syscalls below, the
- // test will always fail because nodes cannot be saved when they have stricter
- // permissions than the original host node.
- const DisableSave ds;
-
- // Chmod.
- ASSERT_THAT(chmod(file1.path().c_str(), S_IWGRP), SyscallSucceeds());
- verify_chmod_events();
-
- // Fchmod.
- ASSERT_THAT(fchmod(file1_fd.get(), S_IRGRP | S_IWGRP), SyscallSucceeds());
- verify_chmod_events();
-
- // Fchmodat.
- const std::string file1_basename = std::string(Basename(file1.path()));
- ASSERT_THAT(fchmodat(root_fd.get(), file1_basename.c_str(), S_IWGRP, 0),
- SyscallSucceeds());
- verify_chmod_events();
-
- // Make sure the chmod'ed file descriptors are destroyed before DisableSave
- // is destructed.
- root_fd.reset();
- file1_fd.reset();
-}
-
-TEST(Inotify, TruncateGeneratesModifyEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- auto verify_truncate_events = [&]() {
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_MODIFY, root_wd, Basename(file1.path())),
- Event(IN_MODIFY, file1_wd)}));
- };
-
- // Truncate.
- EXPECT_THAT(truncate(file1.path().c_str(), 4096), SyscallSucceeds());
- verify_truncate_events();
-
- // Ftruncate.
- EXPECT_THAT(ftruncate(file1_fd.get(), 8192), SyscallSucceeds());
- verify_truncate_events();
-
- // No events if truncate fails.
- EXPECT_THAT(ftruncate(file1_fd.get(), -1), SyscallFailsWithErrno(EINVAL));
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({}));
-}
-
-TEST(Inotify, GetdentsGeneratesAccessEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- // This internally calls getdents(2). We also expect to see an open/close
- // event for the dirfd.
- ASSERT_NO_ERRNO_AND_VALUE(ListDir(root.path(), false));
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- // Linux only seems to generate access events on getdents() on some
- // calls. Allow the test to pass even if it isn't generated. gVisor will
- // always generate the IN_ACCESS event so the test will at least ensure gVisor
- // behaves reasonably.
- int i = 0;
- EXPECT_EQ(events[i].mask, IN_OPEN | IN_ISDIR);
- ++i;
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(events[i].mask, IN_ACCESS | IN_ISDIR);
- ++i;
- } else {
- if (events[i].mask == (IN_ACCESS | IN_ISDIR)) {
- // Skip over the IN_ACCESS event on Linux, it only shows up some of the
- // time so we can't assert its existence.
- ++i;
- }
- }
- EXPECT_EQ(events[i].mask, IN_CLOSE_NOWRITE | IN_ISDIR);
-}
-
-TEST(Inotify, MknodGeneratesCreateEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- const TempPath file1(root.path() + "/file1");
- ASSERT_THAT(mknod(file1.path().c_str(), S_IFREG, 0), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_CREATE, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, SymlinkGeneratesCreateEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const TempPath link1(NewTempAbsPathInDir(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- ASSERT_THAT(symlink(file1.path().c_str(), link1.path().c_str()),
- SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
-
- ASSERT_THAT(events, Are({Event(IN_CREATE, root_wd, Basename(link1.path()))}));
-}
-
-TEST(Inotify, LinkGeneratesAttribAndCreateEvents) {
- // Inotify does not work properly with hard links in gofer and overlay fs.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(GetAbsoluteTestTmpdir())));
-
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const TempPath link1(root.path() + "/link1");
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- ASSERT_THAT(link(file1.path().c_str(), link1.path().c_str()),
- SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ATTRIB, file1_wd),
- Event(IN_CREATE, root_wd, Basename(link1.path()))}));
-}
-
-TEST(Inotify, UtimesGeneratesAttribEvent) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDWR));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
-
- const struct timeval times[2] = {{1, 0}, {2, 0}};
- EXPECT_THAT(futimes(file1_fd.get(), times), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ATTRIB, wd, Basename(file1.path()))}));
-}
-
-TEST(Inotify, HardlinksReuseSameWatch) {
- // Inotify does not work properly with hard links in gofer and overlay fs.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(GetAbsoluteTestTmpdir())));
-
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- TempPath file2(root.path() + "/file2");
- ASSERT_THAT(link(file.path().c_str(), file2.path().c_str()),
- SyscallSucceeds());
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int root_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file.path(), IN_ALL_EVENTS));
- const int file2_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file2.path(), IN_ALL_EVENTS));
-
- // The watch descriptors for watches on different links to the same file
- // should be identical.
- EXPECT_NE(root_wd, file_wd);
- EXPECT_EQ(file_wd, file2_wd);
-
- FileDescriptor file_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
-
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events,
- AreUnordered({Event(IN_OPEN, root_wd, Basename(file.path())),
- Event(IN_OPEN, file_wd)}));
-
- // For the next step, we want to ensure all fds to the file are closed. Do
- // that now and drain the resulting events.
- file_fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- AreUnordered({Event(IN_CLOSE_WRITE, root_wd, Basename(file.path())),
- Event(IN_CLOSE_WRITE, file_wd)}));
-
- // Try removing the link and let's see what events show up. Note that after
- // this, we still have a link to the file so the watch shouldn't be
- // automatically removed.
- const std::string file2_path = file2.reset();
-
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events,
- AreUnordered({Event(IN_ATTRIB, file2_wd),
- Event(IN_DELETE, root_wd, Basename(file2_path))}));
-
- // Now remove the other link. Since this is the last link to the file, the
- // watch should be automatically removed.
- const std::string file_path = file.reset();
-
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- AreUnordered({Event(IN_ATTRIB, file_wd), Event(IN_DELETE_SELF, file_wd),
- Event(IN_IGNORED, file_wd),
- Event(IN_DELETE, root_wd, Basename(file_path))}));
-}
-
-// Calling mkdir within "parent/child" should generate an event for child, but
-// not parent.
-TEST(Inotify, MkdirGeneratesCreateEventWithDirFlag) {
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath child =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), parent.path(), IN_ALL_EVENTS));
- const int child_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), child.path(), IN_ALL_EVENTS));
-
- const TempPath dir1(NewTempAbsPathInDir(child.path()));
- ASSERT_THAT(mkdir(dir1.path().c_str(), 0777), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(
- events,
- Are({Event(IN_CREATE | IN_ISDIR, child_wd, Basename(dir1.path()))}));
-}
-
-TEST(Inotify, MultipleInotifyInstancesAndWatchesAllGetEvents) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
- constexpr int kNumFds = 30;
- std::vector<FileDescriptor> inotify_fds;
-
- for (int i = 0; i < kNumFds; ++i) {
- const DisableSave ds; // Too expensive.
- inotify_fds.emplace_back(
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK)));
- const FileDescriptor& fd = inotify_fds[inotify_fds.size() - 1]; // Back.
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
- }
-
- const std::string data = "some content";
- EXPECT_THAT(write(file1_fd.get(), data.c_str(), data.length()),
- SyscallSucceeds());
-
- for (const FileDescriptor& fd : inotify_fds) {
- const DisableSave ds; // Too expensive.
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- if (events.size() >= 2) {
- EXPECT_EQ(events[0].mask, IN_MODIFY);
- EXPECT_EQ(events[0].wd, 1);
- EXPECT_EQ(events[0].name, Basename(file1.path()));
- EXPECT_EQ(events[1].mask, IN_MODIFY);
- EXPECT_EQ(events[1].wd, 2);
- EXPECT_EQ(events[1].name, "");
- }
- }
-}
-
-TEST(Inotify, EventsGoUpAtMostOneLevel) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath dir1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), root.path(), IN_ALL_EVENTS));
- const int dir1_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), dir1.path(), IN_ALL_EVENTS));
-
- const std::string file1_path = file1.reset();
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_DELETE, dir1_wd, Basename(file1_path))}));
-}
-
-TEST(Inotify, DuplicateWatchReturnsSameWatchDescriptor) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd1 = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
- const int wd2 = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- EXPECT_EQ(wd1, wd2);
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- // The watch shouldn't be duplicated, we only expect one event.
- ASSERT_THAT(events, Are({Event(IN_OPEN, wd1)}));
-}
-
-TEST(Inotify, UnmatchedEventsAreDiscarded) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ACCESS));
-
- FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
-
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- // We only asked for access events, the open event should be discarded.
- ASSERT_THAT(events, Are({}));
-
- // IN_IGNORED events are always generated, regardless of the mask.
- file1_fd.reset();
- file1.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_IGNORED, wd)}));
-}
-
-TEST(Inotify, AddWatchWithInvalidEventMaskFails) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- EXPECT_THAT(inotify_add_watch(fd.get(), root.path().c_str(), 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Inotify, AddWatchOnInvalidPathFails) {
- const TempPath nonexistent(NewTempAbsPath());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- // Non-existent path.
- EXPECT_THAT(
- inotify_add_watch(fd.get(), nonexistent.path().c_str(), IN_CREATE),
- SyscallFailsWithErrno(ENOENT));
-
- // Garbage path pointer.
- EXPECT_THAT(inotify_add_watch(fd.get(), nullptr, IN_CREATE),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(Inotify, InOnlyDirFlagRespected) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- EXPECT_THAT(
- inotify_add_watch(fd.get(), root.path().c_str(), IN_ACCESS | IN_ONLYDIR),
- SyscallSucceeds());
-
- EXPECT_THAT(
- inotify_add_watch(fd.get(), file1.path().c_str(), IN_ACCESS | IN_ONLYDIR),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(Inotify, MaskAddMergesWithExistingEventMask) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(root.path()));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_WRONLY));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_OPEN | IN_CLOSE_WRITE));
-
- const std::string data = "some content";
- EXPECT_THAT(write(file1_fd.get(), data.c_str(), data.length()),
- SyscallSucceeds());
-
- // We shouldn't get any events, since IN_MODIFY wasn't in the event mask.
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({}));
-
- // Add IN_MODIFY to event mask.
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_MODIFY | IN_MASK_ADD));
-
- EXPECT_THAT(write(file1_fd.get(), data.c_str(), data.length()),
- SyscallSucceeds());
-
- // This time we should get the modify event.
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_MODIFY, wd)}));
-
- // Now close the fd. If the modify event was added to the event mask rather
- // than replacing the event mask we won't get the close event.
- file1_fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events, Are({Event(IN_CLOSE_WRITE, wd)}));
-}
-
-// Test that control events bits are not considered when checking event mask.
-TEST(Inotify, ControlEvents) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), dir.path(), IN_ACCESS));
-
- // Check that events in the mask are dispatched and that control bits are
- // part of the event mask.
- std::vector<std::string> files =
- ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), false));
- ASSERT_EQ(files.size(), 2);
-
- const std::vector<Event> events1 =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events1, Are({Event(IN_ACCESS | IN_ISDIR, wd)}));
-
- // Check that events not in the mask are discarded.
- const FileDescriptor dir_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY));
-
- const std::vector<Event> events2 =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- ASSERT_THAT(events2, Are({}));
-}
-
-// Regression test to ensure epoll and directory access doesn't deadlock.
-TEST(Inotify, EpollNoDeadlock) {
- const DisableSave ds; // Too many syscalls.
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- // Create lots of directories and watch all of them.
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::vector<TempPath> children;
- for (size_t i = 0; i < 1000; ++i) {
- auto child = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), child.path(), IN_ACCESS));
- children.emplace_back(std::move(child));
- }
-
- // Run epoll_wait constantly in a separate thread.
- std::atomic<bool> done(false);
- ScopedThread th([&fd, &done] {
- for (auto start = absl::Now(); absl::Now() - start < absl::Seconds(5);) {
- FileDescriptor epoll_fd = ASSERT_NO_ERRNO_AND_VALUE(NewEpollFD());
- ASSERT_NO_ERRNO(RegisterEpollFD(epoll_fd.get(), fd.get(),
- EPOLLIN | EPOLLOUT | EPOLLET, 0));
- struct epoll_event result[1];
- EXPECT_THAT(RetryEINTR(epoll_wait)(epoll_fd.get(), result, 1, -1),
- SyscallSucceedsWithValue(1));
-
- sched_yield();
- }
- done = true;
- });
-
- // While epoll thread is running, constantly access all directories to
- // generate inotify events.
- while (!done) {
- std::vector<std::string> files =
- ASSERT_NO_ERRNO_AND_VALUE(ListDir(root.path(), false));
- ASSERT_EQ(files.size(), 1002);
- for (const auto& child : files) {
- if (child == "." || child == "..") {
- continue;
- }
- ASSERT_NO_ERRNO_AND_VALUE(ListDir(JoinPath(root.path(), child), false));
- }
- sched_yield();
- }
-}
-
-TEST(Inotify, Fallocate) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));
-
- // Do an arbitrary modification with fallocate.
- ASSERT_THAT(RetryEINTR(fallocate)(fd.get(), 0, 0, 123), SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_MODIFY, wd)}));
-}
-
-TEST(Inotify, Utimensat) {
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));
-
- // Just update the access time.
- struct timespec times[2] = {};
- times[0].tv_nsec = UTIME_NOW;
- times[1].tv_nsec = UTIME_OMIT;
- ASSERT_THAT(RetryEINTR(utimensat)(AT_FDCWD, file.path().c_str(), times, 0),
- SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ACCESS, wd)}));
-
- // Just the modify time.
- times[0].tv_nsec = UTIME_OMIT;
- times[1].tv_nsec = UTIME_NOW;
- ASSERT_THAT(utimensat(AT_FDCWD, file.path().c_str(), times, 0),
- SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_MODIFY, wd)}));
-
- // Both together.
- times[0].tv_nsec = UTIME_NOW;
- times[1].tv_nsec = UTIME_NOW;
- ASSERT_THAT(utimensat(AT_FDCWD, file.path().c_str(), times, 0),
- SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));
-}
-
-TEST(Inotify, Sendfile) {
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(root.path(), "x", 0644));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
- const FileDescriptor out =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Create separate inotify instances for the in and out fds. If both watches
- // were on the same instance, we would have discrepancies between Linux and
- // gVisor (order of events, duplicate events), which is not that important
- // since inotify is asynchronous anyway.
- const FileDescriptor in_inotify =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const FileDescriptor out_inotify =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int in_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(in_inotify.get(), in_file.path(), IN_ALL_EVENTS));
- const int out_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(out_inotify.get(), out_file.path(), IN_ALL_EVENTS));
-
- ASSERT_THAT(sendfile(out.get(), in.get(), /*offset=*/nullptr, 1),
- SyscallSucceeds());
-
- // Expect a single access event and a single modify event.
- std::vector<Event> in_events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(in_inotify.get()));
- std::vector<Event> out_events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(out_inotify.get()));
- EXPECT_THAT(in_events, Are({Event(IN_ACCESS, in_wd)}));
- EXPECT_THAT(out_events, Are({Event(IN_MODIFY, out_wd)}));
-}
-
-TEST(Inotify, SpliceOnWatchTarget) {
- int pipefds[2];
- ASSERT_THAT(pipe2(pipefds, O_NONBLOCK), SyscallSucceeds());
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- dir.path(), "some content", TempPath::kDefaultFileMode));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), dir.path(), IN_ALL_EVENTS));
- const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));
-
- EXPECT_THAT(splice(fd.get(), nullptr, pipefds[1], nullptr, 1, /*flags=*/0),
- SyscallSucceedsWithValue(1));
-
- // Surprisingly, events may not be generated in Linux if we read from a file.
- // fs/splice.c:generic_file_splice_read, which is used most often, does not
- // generate events, whereas fs/splice.c:default_file_splice_read does.
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- if (IsRunningOnGvisor() && !IsRunningWithVFS1()) {
- ASSERT_THAT(events, Are({Event(IN_ACCESS, dir_wd, Basename(file.path())),
- Event(IN_ACCESS, file_wd)}));
- }
-
- EXPECT_THAT(splice(pipefds[0], nullptr, fd.get(), nullptr, 1, /*flags=*/0),
- SyscallSucceedsWithValue(1));
-
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- ASSERT_THAT(events, Are({
- Event(IN_MODIFY, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, file_wd),
- }));
-}
-
-TEST(Inotify, SpliceOnInotifyFD) {
- int pipefds[2];
- ASSERT_THAT(pipe2(pipefds, O_NONBLOCK), SyscallSucceeds());
-
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- root.path(), "some content", TempPath::kDefaultFileMode));
-
- const FileDescriptor file1_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file1.path(), O_RDONLY));
- const int watcher = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), file1.path(), IN_ALL_EVENTS));
-
- char buf;
- EXPECT_THAT(read(file1_fd.get(), &buf, 1), SyscallSucceeds());
-
- EXPECT_THAT(splice(fd.get(), nullptr, pipefds[1], nullptr,
- sizeof(struct inotify_event) + 1, SPLICE_F_NONBLOCK),
- SyscallSucceedsWithValue(sizeof(struct inotify_event)));
-
- const FileDescriptor read_fd(pipefds[0]);
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(read_fd.get()));
- ASSERT_THAT(events, Are({Event(IN_ACCESS, watcher)}));
-}
-
-// Watches on a parent should not be triggered by actions on a hard link to one
-// of its children that has a different parent.
-TEST(Inotify, LinkOnOtherParent) {
- // Inotify does not work properly with hard links in gofer and overlay fs.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(GetAbsoluteTestTmpdir())));
-
- const TempPath dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- std::string link_path = NewTempAbsPathInDir(dir2.path());
-
- ASSERT_THAT(link(file.path().c_str(), link_path.c_str()), SyscallSucceeds());
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), dir1.path(), IN_ALL_EVENTS));
-
- // Perform various actions on the link outside of dir1, which should trigger
- // no inotify events.
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(link_path.c_str(), O_RDWR));
- int val = 0;
- ASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- ASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- ASSERT_THAT(ftruncate(fd.get(), 12345), SyscallSucceeds());
-
- // Close before unlinking; some filesystems cannot restore an open fd on an
- // unlinked file.
- fd.reset();
- ASSERT_THAT(unlink(link_path.c_str()), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-}
-
-TEST(Inotify, Xattr) {
- // TODO(gvisor.dev/issue/1636): Support extended attributes in runsc gofer.
- SKIP_IF(IsRunningOnGvisor());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string path = file.path();
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_RDWR));
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), path, IN_ALL_EVENTS));
-
- const char* cpath = path.c_str();
- const char* name = "user.test";
- int val = 123;
- ASSERT_THAT(setxattr(cpath, name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));
-
- ASSERT_THAT(getxattr(cpath, name, &val, sizeof(val)), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-
- char list[100];
- ASSERT_THAT(listxattr(cpath, list, sizeof(list)), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-
- ASSERT_THAT(removexattr(cpath, name), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));
-
- ASSERT_THAT(fsetxattr(fd.get(), name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));
-
- ASSERT_THAT(fgetxattr(fd.get(), name, &val, sizeof(val)), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-
- ASSERT_THAT(flistxattr(fd.get(), list, sizeof(list)), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({}));
-
- ASSERT_THAT(fremovexattr(fd.get(), name), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));
-}
-
-TEST(Inotify, Exec) {
- SKIP_IF(IsRunningWithVFS1());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(fd.get(), "/bin/true", IN_ALL_EVENTS));
-
- // Perform exec.
- pid_t child = -1;
- int execve_errno = -1;
- auto kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/bin/true", {}, {}, nullptr, &child, &execve_errno));
- ASSERT_EQ(0, execve_errno);
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
- EXPECT_EQ(0, status);
-
- // Process cleanup no longer needed.
- kill.Release();
-
- std::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));
- EXPECT_THAT(events, Are({Event(IN_OPEN, wd), Event(IN_ACCESS, wd),
- Event(IN_CLOSE_NOWRITE, wd)}));
-}
-
-// Watches without IN_EXCL_UNLINK, should continue to emit events for file
-// descriptors after their corresponding files have been unlinked.
-//
-// We need to disable S/R because there are filesystems where we cannot re-open
-// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.
-TEST(Inotify, IncludeUnlinkedFile) {
- const DisableSave ds;
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(dir.path(), "123", TempPath::kDefaultFileMode));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), dir.path(), IN_ALL_EVENTS));
- const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));
-
- ASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());
- int val = 0;
- ASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- ASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, AnyOf(Are({
- Event(IN_ATTRIB, file_wd),
- Event(IN_DELETE, dir_wd, Basename(file.path())),
- Event(IN_ACCESS, dir_wd, Basename(file.path())),
- Event(IN_ACCESS, file_wd),
- Event(IN_MODIFY, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, file_wd),
- }),
- Are({
- Event(IN_DELETE, dir_wd, Basename(file.path())),
- Event(IN_ATTRIB, file_wd),
- Event(IN_ACCESS, dir_wd, Basename(file.path())),
- Event(IN_ACCESS, file_wd),
- Event(IN_MODIFY, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, file_wd),
- })));
-
- fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_CLOSE_WRITE, dir_wd, Basename(file.path())),
- Event(IN_CLOSE_WRITE, file_wd),
- Event(IN_DELETE_SELF, file_wd),
- Event(IN_IGNORED, file_wd),
- }));
-}
-
-// Watches created with IN_EXCL_UNLINK will stop emitting events on fds for
-// children that have already been unlinked.
-//
-// We need to disable S/R because there are filesystems where we cannot re-open
-// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.
-TEST(Inotify, ExcludeUnlink) {
- const DisableSave ds;
- // TODO(gvisor.dev/issue/1624): This test fails on VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
- const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), file.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
-
- // Unlink the child, which should cause further operations on the open file
- // descriptor to be ignored.
- ASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());
- int val = 0;
- ASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- ASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, AreUnordered({
- Event(IN_ATTRIB, file_wd),
- Event(IN_DELETE, dir_wd, Basename(file.path())),
- }));
-
- fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- ASSERT_THAT(events, Are({
- Event(IN_DELETE_SELF, file_wd),
- Event(IN_IGNORED, file_wd),
- }));
-}
-
-// We need to disable S/R because there are filesystems where we cannot re-open
-// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.
-TEST(Inotify, ExcludeUnlinkDirectory) {
- // TODO(gvisor.dev/issue/1624): This test fails on VFS1. Remove once VFS1 is
- // deleted.
- SKIP_IF(IsRunningWithVFS1());
-
- const DisableSave ds;
-
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath dir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));
- std::string dirPath = dir.path();
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dirPath.c_str(), O_RDONLY | O_DIRECTORY));
- const int parent_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), parent.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
- const int self_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
-
- // Unlink the dir, and then close the open fd.
- ASSERT_THAT(rmdir(dirPath.c_str()), SyscallSucceeds());
- dir.reset();
-
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- // No close event should appear.
- ASSERT_THAT(events,
- Are({Event(IN_DELETE | IN_ISDIR, parent_wd, Basename(dirPath))}));
-
- fd.reset();
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- ASSERT_THAT(events, Are({
- Event(IN_DELETE_SELF, self_wd),
- Event(IN_IGNORED, self_wd),
- }));
-}
-
-// If "dir/child" and "dir/child2" are links to the same file, and "dir/child"
-// is unlinked, a watch on "dir" with IN_EXCL_UNLINK will exclude future events
-// for fds on "dir/child" but not "dir/child2".
-//
-// We need to disable S/R because there are filesystems where we cannot re-open
-// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.
-TEST(Inotify, ExcludeUnlinkMultipleChildren) {
- // Inotify does not work properly with hard links in gofer and overlay fs.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(GetAbsoluteTestTmpdir())));
- // TODO(gvisor.dev/issue/1624): This test fails on VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- const DisableSave ds;
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- std::string path1 = file.path();
- std::string path2 = NewTempAbsPathInDir(dir.path());
- ASSERT_THAT(link(path1.c_str(), path2.c_str()), SyscallSucceeds());
-
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path1.c_str(), O_RDWR));
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path2.c_str(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
-
- // After unlinking path1, only events on the fd for path2 should be generated.
- ASSERT_THAT(unlink(path1.c_str()), SyscallSucceeds());
- ASSERT_THAT(write(fd1.get(), "x", 1), SyscallSucceeds());
- ASSERT_THAT(write(fd2.get(), "x", 1), SyscallSucceeds());
-
- const std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_DELETE, wd, Basename(path1)),
- Event(IN_MODIFY, wd, Basename(path2)),
- }));
-}
-
-// On native Linux, actions of data type FSNOTIFY_EVENT_INODE are not affected
-// by IN_EXCL_UNLINK (see
-// fs/notify/inotify/inotify_fsnotify.c:inotify_handle_event). Inode-level
-// events include changes to metadata and extended attributes.
-//
-// We need to disable S/R because there are filesystems where we cannot re-open
-// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.
-TEST(Inotify, ExcludeUnlinkInodeEvents) {
- // TODO(gvisor.dev/issue/1624): Fails on VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- // NOTE(gvisor.dev/issue/3654): In the gofer filesystem, we do not allow
- // setting attributes through an fd if the file at the open path has been
- // deleted.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(GetAbsoluteTestTmpdir())));
-
- const DisableSave ds;
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_RDWR));
-
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
- const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(
- inotify_fd.get(), file.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));
-
- // Even after unlinking, inode-level operations will trigger events regardless
- // of IN_EXCL_UNLINK.
- ASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());
-
- // Perform various actions on fd.
- ASSERT_THAT(ftruncate(fd.get(), 12345), SyscallSucceeds());
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, AnyOf(Are({
- Event(IN_ATTRIB, file_wd),
- Event(IN_DELETE, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, file_wd),
- }),
- Are({
- Event(IN_DELETE, dir_wd, Basename(file.path())),
- Event(IN_ATTRIB, file_wd),
- Event(IN_MODIFY, dir_wd, Basename(file.path())),
- Event(IN_MODIFY, file_wd),
- })));
-
- const struct timeval times[2] = {{1, 0}, {2, 0}};
- ASSERT_THAT(futimes(fd.get(), times), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_ATTRIB, dir_wd, Basename(file.path())),
- Event(IN_ATTRIB, file_wd),
- }));
-
- // S/R is disabled on this entire test due to behavior with unlink; it must
- // also be disabled after this point because of fchmod.
- ASSERT_THAT(fchmod(fd.get(), 0777), SyscallSucceeds());
- events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_ATTRIB, dir_wd, Basename(file.path())),
- Event(IN_ATTRIB, file_wd),
- }));
-}
-
-TEST(Inotify, OneShot) {
- // TODO(gvisor.dev/issue/1624): IN_ONESHOT not supported in VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor inotify_fd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
-
- const int wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(inotify_fd.get(), file.path(), IN_MODIFY | IN_ONESHOT));
-
- // Open an fd, write to it, and then close it.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
- ASSERT_THAT(write(fd.get(), "x", 1), SyscallSucceedsWithValue(1));
- fd.reset();
-
- // We should get a single event followed by IN_IGNORED indicating removal
- // of the one-shot watch. Prior activity (i.e. open) that is not in the mask
- // should not trigger removal, and activity after removal (i.e. close) should
- // not generate events.
- std::vector<Event> events =
- ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));
- EXPECT_THAT(events, Are({
- Event(IN_MODIFY, wd),
- Event(IN_IGNORED, wd),
- }));
-
- // The watch should already have been removed.
- EXPECT_THAT(inotify_rm_watch(inotify_fd.get(), wd),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// This test helps verify that the lock order of filesystem and inotify locks
-// is respected when inotify instances and watch targets are concurrently being
-// destroyed.
-TEST(InotifyTest, InotifyAndTargetDestructionDoNotDeadlock) {
- const DisableSave ds; // Too many syscalls.
-
- // A file descriptor protected by a mutex. This ensures that while a
- // descriptor is in use, it cannot be closed and reused for a different file
- // description.
- struct atomic_fd {
- int fd;
- absl::Mutex mu;
- };
-
- // Set up initial inotify instances.
- constexpr int num_fds = 3;
- std::vector<atomic_fd> fds(num_fds);
- for (int i = 0; i < num_fds; i++) {
- int fd;
- ASSERT_THAT(fd = inotify_init1(IN_NONBLOCK), SyscallSucceeds());
- fds[i].fd = fd;
- }
-
- // Set up initial watch targets.
- std::vector<std::string> paths;
- for (int i = 0; i < 3; i++) {
- paths.push_back(NewTempAbsPath());
- ASSERT_THAT(mknod(paths[i].c_str(), S_IFREG | 0600, 0), SyscallSucceeds());
- }
-
- constexpr absl::Duration runtime = absl::Seconds(4);
-
- // Constantly replace each inotify instance with a new one.
- auto replace_fds = [&] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- for (auto& afd : fds) {
- int new_fd;
- ASSERT_THAT(new_fd = inotify_init1(IN_NONBLOCK), SyscallSucceeds());
- absl::MutexLock l(&afd.mu);
- ASSERT_THAT(close(afd.fd), SyscallSucceeds());
- afd.fd = new_fd;
- for (auto& p : paths) {
- // inotify_add_watch may fail if the file at p was deleted.
- ASSERT_THAT(inotify_add_watch(afd.fd, p.c_str(), IN_ALL_EVENTS),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- }
- }
- sched_yield();
- }
- };
-
- std::list<ScopedThread> ts;
- for (int i = 0; i < 3; i++) {
- ts.emplace_back(replace_fds);
- }
-
- // Constantly replace each watch target with a new one.
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- for (auto& p : paths) {
- ASSERT_THAT(unlink(p.c_str()), SyscallSucceeds());
- ASSERT_THAT(mknod(p.c_str(), S_IFREG | 0600, 0), SyscallSucceeds());
- }
- sched_yield();
- }
-}
-
-// This test helps verify that the lock order of filesystem and inotify locks
-// is respected when adding/removing watches occurs concurrently with the
-// removal of their targets.
-TEST(InotifyTest, AddRemoveUnlinkDoNotDeadlock) {
- const DisableSave ds; // Too many syscalls.
-
- // Set up inotify instances.
- constexpr int num_fds = 3;
- std::vector<int> fds(num_fds);
- for (int i = 0; i < num_fds; i++) {
- ASSERT_THAT(fds[i] = inotify_init1(IN_NONBLOCK), SyscallSucceeds());
- }
-
- // Set up initial watch targets.
- std::vector<std::string> paths;
- for (int i = 0; i < 3; i++) {
- paths.push_back(NewTempAbsPath());
- ASSERT_THAT(mknod(paths[i].c_str(), S_IFREG | 0600, 0), SyscallSucceeds());
- }
-
- constexpr absl::Duration runtime = absl::Seconds(1);
-
- // Constantly add/remove watches for each inotify instance/watch target pair.
- auto add_remove_watches = [&] {
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- for (int fd : fds) {
- for (auto& p : paths) {
- // Do not assert on inotify_add_watch and inotify_rm_watch. They may
- // fail if the file at p was deleted. inotify_add_watch may also fail
- // if another thread beat us to adding a watch.
- const int wd = inotify_add_watch(fd, p.c_str(), IN_ALL_EVENTS);
- if (wd > 0) {
- inotify_rm_watch(fd, wd);
- }
- }
- }
- sched_yield();
- }
- };
-
- std::list<ScopedThread> ts;
- for (int i = 0; i < 15; i++) {
- ts.emplace_back(add_remove_watches);
- }
-
- // Constantly replace each watch target with a new one.
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- for (auto& p : paths) {
- ASSERT_THAT(unlink(p.c_str()), SyscallSucceeds());
- ASSERT_THAT(mknod(p.c_str(), S_IFREG | 0600, 0), SyscallSucceeds());
- }
- sched_yield();
- }
-}
-
-// This test helps verify that the lock order of filesystem and inotify locks
-// is respected when many inotify events and filesystem operations occur
-// simultaneously.
-TEST(InotifyTest, NotifyNoDeadlock) {
- const DisableSave ds; // Too many syscalls.
-
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string dir = parent.path();
-
- // mu protects file, which will change on rename.
- absl::Mutex mu;
- std::string file = NewTempAbsPathInDir(dir);
- ASSERT_THAT(mknod(file.c_str(), 0644 | S_IFREG, 0), SyscallSucceeds());
-
- const absl::Duration runtime = absl::Milliseconds(300);
-
- // Add/remove watches on dir and file.
- ScopedThread add_remove_watches([&] {
- const FileDescriptor ifd =
- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));
- int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(ifd.get(), dir, IN_ALL_EVENTS));
- int file_wd;
- {
- absl::ReaderMutexLock l(&mu);
- file_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(ifd.get(), file, IN_ALL_EVENTS));
- }
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- ASSERT_THAT(inotify_rm_watch(ifd.get(), file_wd), SyscallSucceeds());
- ASSERT_THAT(inotify_rm_watch(ifd.get(), dir_wd), SyscallSucceeds());
- dir_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(ifd.get(), dir, IN_ALL_EVENTS));
- {
- absl::ReaderMutexLock l(&mu);
- file_wd = ASSERT_NO_ERRNO_AND_VALUE(
- InotifyAddWatch(ifd.get(), file, IN_ALL_EVENTS));
- }
- sched_yield();
- }
- });
-
- // Modify attributes on dir and file.
- ScopedThread stats([&] {
- int fd, dir_fd;
- {
- absl::ReaderMutexLock l(&mu);
- ASSERT_THAT(fd = open(file.c_str(), O_RDONLY), SyscallSucceeds());
- }
- ASSERT_THAT(dir_fd = open(dir.c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
- const struct timeval times[2] = {{1, 0}, {2, 0}};
-
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- {
- absl::ReaderMutexLock l(&mu);
- EXPECT_THAT(utimes(file.c_str(), times), SyscallSucceeds());
- }
- EXPECT_THAT(futimes(fd, times), SyscallSucceeds());
- EXPECT_THAT(utimes(dir.c_str(), times), SyscallSucceeds());
- EXPECT_THAT(futimes(dir_fd, times), SyscallSucceeds());
- sched_yield();
- }
- });
-
- // Modify extended attributes on dir and file.
- ScopedThread xattrs([&] {
- // TODO(gvisor.dev/issue/1636): Support extended attributes in runsc gofer.
- if (!IsRunningOnGvisor()) {
- int fd;
- {
- absl::ReaderMutexLock l(&mu);
- ASSERT_THAT(fd = open(file.c_str(), O_RDONLY), SyscallSucceeds());
- }
-
- const char* name = "user.test";
- int val = 123;
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- {
- absl::ReaderMutexLock l(&mu);
- ASSERT_THAT(
- setxattr(file.c_str(), name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
- ASSERT_THAT(removexattr(file.c_str(), name), SyscallSucceeds());
- }
-
- ASSERT_THAT(fsetxattr(fd, name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
- ASSERT_THAT(fremovexattr(fd, name), SyscallSucceeds());
- sched_yield();
- }
- }
- });
-
- // Read and write file's contents. Read and write dir's entries.
- ScopedThread read_write([&] {
- int fd;
- {
- absl::ReaderMutexLock l(&mu);
- ASSERT_THAT(fd = open(file.c_str(), O_RDWR), SyscallSucceeds());
- }
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- int val = 123;
- ASSERT_THAT(write(fd, &val, sizeof(val)), SyscallSucceeds());
- ASSERT_THAT(read(fd, &val, sizeof(val)), SyscallSucceeds());
- TempPath new_file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir));
- ASSERT_NO_ERRNO(ListDir(dir, false));
- new_file.reset();
- sched_yield();
- }
- });
-
- // Rename file.
- for (auto start = absl::Now(); absl::Now() - start < runtime;) {
- const std::string new_path = NewTempAbsPathInDir(dir);
- {
- absl::WriterMutexLock l(&mu);
- ASSERT_THAT(rename(file.c_str(), new_path.c_str()), SyscallSucceeds());
- file = new_path;
- }
- sched_yield();
- }
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ioctl.cc b/test/syscalls/linux/ioctl.cc
deleted file mode 100644
index 9b16d1558..000000000
--- a/test/syscalls/linux/ioctl.cc
+++ /dev/null
@@ -1,419 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <net/if.h>
-#include <netdb.h>
-#include <signal.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-bool CheckNonBlocking(int fd) {
- int ret = fcntl(fd, F_GETFL, 0);
- TEST_CHECK(ret != -1);
- return (ret & O_NONBLOCK) == O_NONBLOCK;
-}
-
-bool CheckCloExec(int fd) {
- int ret = fcntl(fd, F_GETFD, 0);
- TEST_CHECK(ret != -1);
- return (ret & FD_CLOEXEC) == FD_CLOEXEC;
-}
-
-class IoctlTest : public ::testing::Test {
- protected:
- void SetUp() override {
- ASSERT_THAT(fd_ = open("/dev/null", O_RDONLY), SyscallSucceeds());
- }
-
- void TearDown() override {
- if (fd_ >= 0) {
- ASSERT_THAT(close(fd_), SyscallSucceeds());
- fd_ = -1;
- }
- }
-
- int fd() const { return fd_; }
-
- private:
- int fd_ = -1;
-};
-
-TEST_F(IoctlTest, BadFileDescriptor) {
- EXPECT_THAT(ioctl(-1 /* fd */, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(IoctlTest, InvalidControlNumber) {
- EXPECT_THAT(ioctl(STDOUT_FILENO, 0), SyscallFailsWithErrno(ENOTTY));
-}
-
-TEST_F(IoctlTest, IoctlWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/null", O_PATH));
-
- int set = 1;
- EXPECT_THAT(ioctl(fd.get(), FIONBIO, &set), SyscallFailsWithErrno(EBADF));
-
- EXPECT_THAT(ioctl(fd.get(), FIONCLEX), SyscallFailsWithErrno(EBADF));
-
- EXPECT_THAT(ioctl(fd.get(), FIOCLEX), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(IoctlTest, FIONBIOSucceeds) {
- EXPECT_FALSE(CheckNonBlocking(fd()));
- int set = 1;
- EXPECT_THAT(ioctl(fd(), FIONBIO, &set), SyscallSucceeds());
- EXPECT_TRUE(CheckNonBlocking(fd()));
- set = 0;
- EXPECT_THAT(ioctl(fd(), FIONBIO, &set), SyscallSucceeds());
- EXPECT_FALSE(CheckNonBlocking(fd()));
-}
-
-TEST_F(IoctlTest, FIONBIOFails) {
- EXPECT_THAT(ioctl(fd(), FIONBIO, nullptr), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(IoctlTest, FIONCLEXSucceeds) {
- EXPECT_THAT(ioctl(fd(), FIONCLEX), SyscallSucceeds());
- EXPECT_FALSE(CheckCloExec(fd()));
-}
-
-TEST_F(IoctlTest, FIOCLEXSucceeds) {
- EXPECT_THAT(ioctl(fd(), FIOCLEX), SyscallSucceeds());
- EXPECT_TRUE(CheckCloExec(fd()));
-}
-
-TEST_F(IoctlTest, FIOASYNCFails) {
- EXPECT_THAT(ioctl(fd(), FIOASYNC, nullptr), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(IoctlTest, FIOASYNCSucceeds) {
- // Not all FDs support FIOASYNC.
- const FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int before = -1;
- ASSERT_THAT(before = fcntl(s.get(), F_GETFL), SyscallSucceeds());
-
- int set = 1;
- EXPECT_THAT(ioctl(s.get(), FIOASYNC, &set), SyscallSucceeds());
-
- int after_set = -1;
- ASSERT_THAT(after_set = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- EXPECT_EQ(after_set, before | O_ASYNC) << "before was " << before;
-
- set = 0;
- EXPECT_THAT(ioctl(s.get(), FIOASYNC, &set), SyscallSucceeds());
-
- ASSERT_THAT(fcntl(s.get(), F_GETFL), SyscallSucceedsWithValue(before));
-}
-
-/* Count of the number of SIGIOs handled. */
-static volatile int io_received = 0;
-
-void inc_io_handler(int sig, siginfo_t* siginfo, void* arg) { io_received++; }
-
-TEST_F(IoctlTest, FIOASYNCNoTarget) {
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- // Count SIGIOs received.
- io_received = 0;
- struct sigaction sa;
- sa.sa_sigaction = inc_io_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- // Actually allow SIGIO delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
-
- int set = 1;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
-
- constexpr char kData[] = "abc";
- ASSERT_THAT(WriteFd(pair->first_fd(), kData, sizeof(kData)),
- SyscallSucceedsWithValue(sizeof(kData)));
-
- EXPECT_EQ(io_received, 0);
-}
-
-TEST_F(IoctlTest, FIOASYNCSelfTarget) {
- // FIXME(b/120624367): gVisor erroneously sends SIGIO on close(2), which would
- // kill the test when pair goes out of scope. Temporarily ignore SIGIO so that
- // that the close signal is ignored.
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- auto early_sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- // Count SIGIOs received.
- io_received = 0;
- sa.sa_sigaction = inc_io_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- // Actually allow SIGIO delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
-
- int set = 1;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
-
- pid_t pid = getpid();
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
-
- constexpr char kData[] = "abc";
- ASSERT_THAT(WriteFd(pair->first_fd(), kData, sizeof(kData)),
- SyscallSucceedsWithValue(sizeof(kData)));
-
- EXPECT_EQ(io_received, 1);
-}
-
-// Equivalent to FIOASYNCSelfTarget except that FIOSETOWN is called before
-// FIOASYNC.
-TEST_F(IoctlTest, FIOASYNCSelfTarget2) {
- // FIXME(b/120624367): gVisor erroneously sends SIGIO on close(2), which would
- // kill the test when pair goes out of scope. Temporarily ignore SIGIO so that
- // that the close signal is ignored.
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- auto early_sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- // Count SIGIOs received.
- io_received = 0;
- sa.sa_sigaction = inc_io_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- // Actually allow SIGIO delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
-
- pid_t pid = -1;
- EXPECT_THAT(pid = getpid(), SyscallSucceeds());
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
-
- int set = 1;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
-
- constexpr char kData[] = "abc";
- ASSERT_THAT(WriteFd(pair->first_fd(), kData, sizeof(kData)),
- SyscallSucceedsWithValue(sizeof(kData)));
-
- EXPECT_EQ(io_received, 1);
-}
-
-// Check that closing an FD does not result in an event.
-TEST_F(IoctlTest, FIOASYNCSelfTargetClose) {
- // Count SIGIOs received.
- struct sigaction sa;
- io_received = 0;
- sa.sa_sigaction = inc_io_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- // Actually allow SIGIO delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
-
- for (int i = 0; i < 2; i++) {
- auto pair = ASSERT_NO_ERRNO_AND_VALUE(
- UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- pid_t pid = getpid();
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
-
- int set = 1;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
- }
-
- // FIXME(b/120624367): gVisor erroneously sends SIGIO on close.
- SKIP_IF(IsRunningOnGvisor());
-
- EXPECT_EQ(io_received, 0);
-}
-
-TEST_F(IoctlTest, FIOASYNCInvalidPID) {
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- int set = 1;
- ASSERT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
- pid_t pid = INT_MAX;
- // This succeeds (with behavior equivalent to a pid of 0) in Linux prior to
- // f73127356f34 "fs/fcntl: return -ESRCH in f_setown when pid/pgid can't be
- // found", and fails with EPERM after that commit.
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ESRCH)));
-}
-
-TEST_F(IoctlTest, FIOASYNCUnsetTarget) {
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- // Count SIGIOs received.
- io_received = 0;
- struct sigaction sa;
- sa.sa_sigaction = inc_io_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART;
- auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
-
- // Actually allow SIGIO delivery.
- auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
-
- int set = 1;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
-
- pid_t pid = getpid();
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
-
- // Passing a PID of 0 unsets the target.
- pid = 0;
- EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
-
- constexpr char kData[] = "abc";
- ASSERT_THAT(WriteFd(pair->first_fd(), kData, sizeof(kData)),
- SyscallSucceedsWithValue(sizeof(kData)));
-
- EXPECT_EQ(io_received, 0);
-}
-
-using IoctlTestSIOCGIFCONF = SimpleSocketTest;
-
-TEST_P(IoctlTestSIOCGIFCONF, ValidateNoArrayGetsLength) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Validate that no array can be used to get the length required.
- struct ifconf ifconf = {};
- ASSERT_THAT(ioctl(fd->get(), SIOCGIFCONF, &ifconf), SyscallSucceeds());
- ASSERT_GT(ifconf.ifc_len, 0);
-}
-
-// This test validates that we will only return a partial array list and not
-// partial ifrreq structs.
-TEST_P(IoctlTestSIOCGIFCONF, ValidateNoPartialIfrsReturned) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- struct ifreq ifr = {};
- struct ifconf ifconf = {};
- ifconf.ifc_len = sizeof(ifr) - 1; // One byte too few.
- ifconf.ifc_ifcu.ifcu_req = &ifr;
-
- ASSERT_THAT(ioctl(fd->get(), SIOCGIFCONF, &ifconf), SyscallSucceeds());
- ASSERT_EQ(ifconf.ifc_len, 0);
- ASSERT_EQ(ifr.ifr_name[0], '\0'); // Nothing is returned.
-
- ifconf.ifc_len = sizeof(ifreq);
- ASSERT_THAT(ioctl(fd->get(), SIOCGIFCONF, &ifconf), SyscallSucceeds());
- ASSERT_GT(ifconf.ifc_len, 0);
- ASSERT_NE(ifr.ifr_name[0], '\0'); // An interface can now be returned.
-}
-
-TEST_P(IoctlTestSIOCGIFCONF, ValidateLoopbackIsPresent) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- struct ifconf ifconf = {};
- struct ifreq ifr[10] = {}; // Storage for up to 10 interfaces.
-
- ifconf.ifc_req = ifr;
- ifconf.ifc_len = sizeof(ifr);
-
- ASSERT_THAT(ioctl(fd->get(), SIOCGIFCONF, &ifconf), SyscallSucceeds());
- size_t num_if = ifconf.ifc_len / sizeof(struct ifreq);
-
- // We should have at least one interface.
- ASSERT_GE(num_if, 1);
-
- // One of the interfaces should be a loopback.
- bool found_loopback = false;
- for (size_t i = 0; i < num_if; ++i) {
- if (strcmp(ifr[i].ifr_name, "lo") == 0) {
- // SIOCGIFCONF returns the ipv4 address of the interface, let's check it.
- ASSERT_EQ(ifr[i].ifr_addr.sa_family, AF_INET);
-
- // Validate the address is correct for loopback.
- sockaddr_in* sin = reinterpret_cast<sockaddr_in*>(&ifr[i].ifr_addr);
- ASSERT_EQ(htonl(sin->sin_addr.s_addr), INADDR_LOOPBACK);
-
- found_loopback = true;
- break;
- }
- }
- ASSERT_TRUE(found_loopback);
-}
-
-std::vector<SocketKind> IoctlSocketTypes() {
- return {SimpleSocket(AF_UNIX, SOCK_STREAM, 0),
- SimpleSocket(AF_UNIX, SOCK_DGRAM, 0),
- SimpleSocket(AF_INET, SOCK_STREAM, 0),
- SimpleSocket(AF_INET6, SOCK_STREAM, 0),
- SimpleSocket(AF_INET, SOCK_DGRAM, 0),
- SimpleSocket(AF_INET6, SOCK_DGRAM, 0)};
-}
-
-INSTANTIATE_TEST_SUITE_P(IoctlTest, IoctlTestSIOCGIFCONF,
- ::testing::ValuesIn(IoctlSocketTypes()));
-
-} // namespace
-
-TEST_F(IoctlTest, FIOGETOWNSucceeds) {
- const FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int get = -1;
- ASSERT_THAT(ioctl(s.get(), FIOGETOWN, &get), SyscallSucceeds());
- EXPECT_EQ(get, 0);
-}
-
-TEST_F(IoctlTest, SIOCGPGRPSucceeds) {
- const FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK | SOCK_CLOEXEC, 0));
-
- int get = -1;
- ASSERT_THAT(ioctl(s.get(), SIOCGPGRP, &get), SyscallSucceeds());
- EXPECT_EQ(get, 0);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ip6tables.cc b/test/syscalls/linux/ip6tables.cc
deleted file mode 100644
index e0e146067..000000000
--- a/test/syscalls/linux/ip6tables.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/capability.h>
-#include <sys/socket.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/iptables.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr char kNatTablename[] = "nat";
-constexpr char kErrorTarget[] = "ERROR";
-constexpr size_t kEmptyStandardEntrySize =
- sizeof(struct ip6t_entry) + sizeof(struct xt_standard_target);
-constexpr size_t kEmptyErrorEntrySize =
- sizeof(struct ip6t_entry) + sizeof(struct xt_error_target);
-
-TEST(IP6TablesBasic, FailSockoptNonRaw) {
- // Even if the user has CAP_NET_RAW, they shouldn't be able to use the
- // ip6tables sockopts with a non-raw socket.
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_DGRAM, 0), SyscallSucceeds());
-
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info);
- EXPECT_THAT(getsockopt(sock, SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),
- SyscallFailsWithErrno(ENOPROTOOPT));
-
- EXPECT_THAT(close(sock), SyscallSucceeds());
-}
-
-TEST(IP6TablesBasic, GetInfoErrorPrecedence) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_DGRAM, 0), SyscallSucceeds());
-
- // When using the wrong type of socket and a too-short optlen, we should get
- // EINVAL.
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info) - 1;
- EXPECT_THAT(getsockopt(sock, SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(IP6TablesBasic, GetEntriesErrorPrecedence) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_DGRAM, 0), SyscallSucceeds());
-
- // When using the wrong type of socket and a too-short optlen, we should get
- // EINVAL.
- struct ip6t_get_entries entries = {};
- socklen_t entries_size = sizeof(struct ip6t_get_entries) - 1;
- snprintf(entries.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- EXPECT_THAT(
- getsockopt(sock, SOL_IPV6, IP6T_SO_GET_ENTRIES, &entries, &entries_size),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(IP6TablesBasic, GetRevision) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW),
- SyscallSucceeds());
-
- struct xt_get_revision rev = {};
- socklen_t rev_len = sizeof(rev);
-
- snprintf(rev.name, sizeof(rev.name), "REDIRECT");
- rev.revision = 0;
-
- // Revision 0 exists.
- EXPECT_THAT(
- getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len),
- SyscallSucceeds());
- EXPECT_EQ(rev.revision, 0);
-
- // Revisions > 0 don't exist.
- rev.revision = 1;
- EXPECT_THAT(
- getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len),
- SyscallFailsWithErrno(EPROTONOSUPPORT));
-}
-
-// This tests the initial state of a machine with empty ip6tables via
-// getsockopt(IP6T_SO_GET_INFO). We don't have a guarantee that the iptables are
-// empty when running in native, but we can test that gVisor has the same
-// initial state that a newly-booted Linux machine would have.
-TEST(IP6TablesTest, InitialInfo) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_RAW));
-
- // Get info via sockopt.
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info);
- ASSERT_THAT(
- getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),
- SyscallSucceeds());
-
- // The nat table supports PREROUTING, and OUTPUT.
- unsigned int valid_hooks =
- (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT) |
- (1 << NF_IP6_POST_ROUTING) | (1 << NF_IP6_LOCAL_IN);
- EXPECT_EQ(info.valid_hooks, valid_hooks);
-
- // Each chain consists of an empty entry with a standard target..
- EXPECT_EQ(info.hook_entry[NF_IP6_PRE_ROUTING], 0);
- EXPECT_EQ(info.hook_entry[NF_IP6_LOCAL_IN], kEmptyStandardEntrySize);
- EXPECT_EQ(info.hook_entry[NF_IP6_LOCAL_OUT], kEmptyStandardEntrySize * 2);
- EXPECT_EQ(info.hook_entry[NF_IP6_POST_ROUTING], kEmptyStandardEntrySize * 3);
-
- // The underflow points are the same as the entry points.
- EXPECT_EQ(info.underflow[NF_IP6_PRE_ROUTING], 0);
- EXPECT_EQ(info.underflow[NF_IP6_LOCAL_IN], kEmptyStandardEntrySize);
- EXPECT_EQ(info.underflow[NF_IP6_LOCAL_OUT], kEmptyStandardEntrySize * 2);
- EXPECT_EQ(info.underflow[NF_IP6_POST_ROUTING], kEmptyStandardEntrySize * 3);
-
- // One entry for each chain, plus an error entry at the end.
- EXPECT_EQ(info.num_entries, 5);
-
- EXPECT_EQ(info.size, 4 * kEmptyStandardEntrySize + kEmptyErrorEntrySize);
- EXPECT_EQ(strcmp(info.name, kNatTablename), 0);
-}
-
-// This tests the initial state of a machine with empty ip6tables via
-// getsockopt(IP6T_SO_GET_ENTRIES). We don't have a guarantee that the iptables
-// are empty when running in native, but we can test that gVisor has the same
-// initial state that a newly-booted Linux machine would have.
-TEST(IP6TablesTest, InitialEntries) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_RAW));
-
- // Get info via sockopt.
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info);
- ASSERT_THAT(
- getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_INFO, &info, &info_size),
- SyscallSucceeds());
-
- // Use info to get entries.
- socklen_t entries_size = sizeof(struct ip6t_get_entries) + info.size;
- struct ip6t_get_entries* entries =
- static_cast<struct ip6t_get_entries*>(malloc(entries_size));
- snprintf(entries->name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- entries->size = info.size;
- ASSERT_THAT(getsockopt(sock.get(), SOL_IPV6, IP6T_SO_GET_ENTRIES, entries,
- &entries_size),
- SyscallSucceeds());
-
- // Verify the name and size.
- ASSERT_EQ(info.size, entries->size);
- ASSERT_EQ(strcmp(entries->name, kNatTablename), 0);
-
- // Verify that the entrytable is 4 entries with accept targets and no matches
- // followed by a single error target.
- size_t entry_offset = 0;
- while (entry_offset < entries->size) {
- struct ip6t_entry* entry = reinterpret_cast<struct ip6t_entry*>(
- reinterpret_cast<char*>(entries->entrytable) + entry_offset);
-
- // ipv6 should be zeroed.
- struct ip6t_ip6 zeroed = {};
- ASSERT_EQ(memcmp(static_cast<void*>(&zeroed),
- static_cast<void*>(&entry->ipv6), sizeof(zeroed)),
- 0);
-
- // target_offset should be zero.
- EXPECT_EQ(entry->target_offset, sizeof(ip6t_entry));
-
- if (entry_offset < kEmptyStandardEntrySize * 4) {
- // The first 4 entries are standard targets
- struct xt_standard_target* target =
- reinterpret_cast<struct xt_standard_target*>(entry->elems);
- EXPECT_EQ(entry->next_offset, kEmptyStandardEntrySize);
- EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));
- EXPECT_EQ(strcmp(target->target.u.user.name, ""), 0);
- EXPECT_EQ(target->target.u.user.revision, 0);
- // This is what's returned for an accept verdict. I don't know why.
- EXPECT_EQ(target->verdict, -NF_ACCEPT - 1);
- } else {
- // The last entry is an error target
- struct xt_error_target* target =
- reinterpret_cast<struct xt_error_target*>(entry->elems);
- EXPECT_EQ(entry->next_offset, kEmptyErrorEntrySize);
- EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));
- EXPECT_EQ(strcmp(target->target.u.user.name, kErrorTarget), 0);
- EXPECT_EQ(target->target.u.user.revision, 0);
- EXPECT_EQ(strcmp(target->errorname, kErrorTarget), 0);
- }
-
- entry_offset += entry->next_offset;
- break;
- }
-
- free(entries);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ip_socket_test_util.cc b/test/syscalls/linux/ip_socket_test_util.cc
deleted file mode 100644
index e90a7e411..000000000
--- a/test/syscalls/linux/ip_socket_test_util.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-
-#include <cstring>
-
-namespace gvisor {
-namespace testing {
-
-uint32_t IPFromInetSockaddr(const struct sockaddr* addr) {
- auto* in_addr = reinterpret_cast<const struct sockaddr_in*>(addr);
- return in_addr->sin_addr.s_addr;
-}
-
-uint16_t PortFromInetSockaddr(const struct sockaddr* addr) {
- auto* in_addr = reinterpret_cast<const struct sockaddr_in*>(addr);
- return ntohs(in_addr->sin_port);
-}
-
-PosixErrorOr<int> InterfaceIndex(std::string name) {
- int index = if_nametoindex(name.c_str());
- if (index) {
- return index;
- }
- return PosixError(errno);
-}
-
-namespace {
-
-std::string DescribeSocketType(int type) {
- return absl::StrCat(((type & SOCK_NONBLOCK) != 0) ? "non-blocking " : "",
- ((type & SOCK_CLOEXEC) != 0) ? "close-on-exec " : "");
-}
-
-} // namespace
-
-SocketPairKind IPv6TCPAcceptBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv6 TCP socket");
- return SocketPairKind{
- description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind IPv4TCPAcceptBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv4 TCP socket");
- return SocketPairKind{
- description, AF_INET, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindSocketPairCreator(AF_INET, type | SOCK_STREAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind DualStackTCPAcceptBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected dual stack TCP socket");
- return SocketPairKind{
- description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM, 0,
- /* dual_stack = */ true)};
-}
-
-SocketPairKind IPv6TCPAcceptBindPersistentListenerSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv6 TCP socket");
- return SocketPairKind{description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindPersistentListenerSocketPairCreator(
- AF_INET6, type | SOCK_STREAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind IPv4TCPAcceptBindPersistentListenerSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv4 TCP socket");
- return SocketPairKind{description, AF_INET, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindPersistentListenerSocketPairCreator(
- AF_INET, type | SOCK_STREAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind DualStackTCPAcceptBindPersistentListenerSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected dual stack TCP socket");
- return SocketPairKind{description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,
- TCPAcceptBindPersistentListenerSocketPairCreator(
- AF_INET6, type | SOCK_STREAM, 0,
- /* dual_stack = */ true)};
-}
-
-SocketPairKind IPv6UDPBidirectionalBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv6 UDP socket");
- return SocketPairKind{
- description, AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP,
- UDPBidirectionalBindSocketPairCreator(AF_INET6, type | SOCK_DGRAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind IPv4UDPBidirectionalBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected IPv4 UDP socket");
- return SocketPairKind{
- description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,
- UDPBidirectionalBindSocketPairCreator(AF_INET, type | SOCK_DGRAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketPairKind DualStackUDPBidirectionalBindSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "connected dual stack UDP socket");
- return SocketPairKind{
- description, AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP,
- UDPBidirectionalBindSocketPairCreator(AF_INET6, type | SOCK_DGRAM, 0,
- /* dual_stack = */ true)};
-}
-
-SocketPairKind IPv4UDPUnboundSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "IPv4 UDP socket");
- return SocketPairKind{
- description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,
- UDPUnboundSocketPairCreator(AF_INET, type | SOCK_DGRAM, 0,
- /* dual_stack = */ false)};
-}
-
-SocketKind ICMPUnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "ICMP socket");
- return SocketKind{
- description, AF_INET, type | SOCK_DGRAM, IPPROTO_ICMP,
- UnboundSocketCreator(AF_INET, type | SOCK_DGRAM, IPPROTO_ICMP)};
-}
-
-SocketKind ICMPv6UnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "ICMPv6 socket");
- return SocketKind{
- description, AF_INET6, type | SOCK_DGRAM, IPPROTO_ICMPV6,
- UnboundSocketCreator(AF_INET6, type | SOCK_DGRAM, IPPROTO_ICMPV6)};
-}
-
-SocketKind IPv4UDPUnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "IPv4 UDP socket");
- return SocketKind{
- description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,
- UnboundSocketCreator(AF_INET, type | SOCK_DGRAM, IPPROTO_UDP)};
-}
-
-SocketKind IPv6UDPUnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "IPv6 UDP socket");
- return SocketKind{
- description, AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP,
- UnboundSocketCreator(AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP)};
-}
-
-SocketKind IPv4TCPUnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "IPv4 TCP socket");
- return SocketKind{
- description, AF_INET, type | SOCK_STREAM, IPPROTO_TCP,
- UnboundSocketCreator(AF_INET, type | SOCK_STREAM, IPPROTO_TCP)};
-}
-
-SocketKind IPv6TCPUnboundSocket(int type) {
- std::string description =
- absl::StrCat(DescribeSocketType(type), "IPv6 TCP socket");
- return SocketKind{
- description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,
- UnboundSocketCreator(AF_INET6, type | SOCK_STREAM, IPPROTO_TCP)};
-}
-
-PosixError IfAddrHelper::Load() {
- Release();
-#ifndef ANDROID
- RETURN_ERROR_IF_SYSCALL_FAIL(getifaddrs(&ifaddr_));
-#else
- // Android does not support getifaddrs in r22.
- return PosixError(ENOSYS, "getifaddrs");
-#endif
- return NoError();
-}
-
-void IfAddrHelper::Release() {
- if (ifaddr_) {
-#ifndef ANDROID
- // Android does not support freeifaddrs in r22.
- freeifaddrs(ifaddr_);
-#endif
- ifaddr_ = nullptr;
- }
-}
-
-std::vector<std::string> IfAddrHelper::InterfaceList(int family) const {
- std::vector<std::string> names;
- for (auto ifa = ifaddr_; ifa != NULL; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != family) {
- continue;
- }
- names.emplace(names.end(), ifa->ifa_name);
- }
- return names;
-}
-
-const sockaddr* IfAddrHelper::GetAddr(int family, std::string name) const {
- for (auto ifa = ifaddr_; ifa != NULL; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != family) {
- continue;
- }
- if (name == ifa->ifa_name) {
- return ifa->ifa_addr;
- }
- }
- return nullptr;
-}
-
-PosixErrorOr<int> IfAddrHelper::GetIndex(std::string name) const {
- return InterfaceIndex(name);
-}
-
-std::string GetAddr4Str(const in_addr* a) {
- char str[INET_ADDRSTRLEN];
- inet_ntop(AF_INET, a, str, sizeof(str));
- return std::string(str);
-}
-
-std::string GetAddr6Str(const in6_addr* a) {
- char str[INET6_ADDRSTRLEN];
- inet_ntop(AF_INET6, a, str, sizeof(str));
- return std::string(str);
-}
-
-std::string GetAddrStr(const sockaddr* a) {
- if (a->sa_family == AF_INET) {
- auto src = &(reinterpret_cast<const sockaddr_in*>(a)->sin_addr);
- return GetAddr4Str(src);
- } else if (a->sa_family == AF_INET6) {
- auto src = &(reinterpret_cast<const sockaddr_in6*>(a)->sin6_addr);
- return GetAddr6Str(src);
- }
- return std::string("<invalid>");
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ip_socket_test_util.h b/test/syscalls/linux/ip_socket_test_util.h
deleted file mode 100644
index bde481f7e..000000000
--- a/test/syscalls/linux/ip_socket_test_util.h
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_IP_SOCKET_TEST_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_IP_SOCKET_TEST_UTIL_H_
-
-#include <arpa/inet.h>
-#include <ifaddrs.h>
-#include <sys/types.h>
-
-#include <string>
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Extracts the IP address from an inet sockaddr in network byte order.
-uint32_t IPFromInetSockaddr(const struct sockaddr* addr);
-
-// Extracts the port from an inet sockaddr in host byte order.
-uint16_t PortFromInetSockaddr(const struct sockaddr* addr);
-
-// InterfaceIndex returns the index of the named interface.
-PosixErrorOr<int> InterfaceIndex(std::string name);
-
-// IPv6TCPAcceptBindSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and accept() syscalls with AF_INET6 and the
-// given type bound to the IPv6 loopback.
-SocketPairKind IPv6TCPAcceptBindSocketPair(int type);
-
-// IPv4TCPAcceptBindSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and accept() syscalls with AF_INET and the
-// given type bound to the IPv4 loopback.
-SocketPairKind IPv4TCPAcceptBindSocketPair(int type);
-
-// DualStackTCPAcceptBindSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and accept() syscalls with AF_INET6 and the
-// given type bound to the IPv4 loopback.
-SocketPairKind DualStackTCPAcceptBindSocketPair(int type);
-
-// IPv6TCPAcceptBindPersistentListenerSocketPair is like
-// IPv6TCPAcceptBindSocketPair except it uses a persistent listening socket to
-// create all socket pairs.
-SocketPairKind IPv6TCPAcceptBindPersistentListenerSocketPair(int type);
-
-// IPv4TCPAcceptBindPersistentListenerSocketPair is like
-// IPv4TCPAcceptBindSocketPair except it uses a persistent listening socket to
-// create all socket pairs.
-SocketPairKind IPv4TCPAcceptBindPersistentListenerSocketPair(int type);
-
-// DualStackTCPAcceptBindPersistentListenerSocketPair is like
-// DualStackTCPAcceptBindSocketPair except it uses a persistent listening socket
-// to create all socket pairs.
-SocketPairKind DualStackTCPAcceptBindPersistentListenerSocketPair(int type);
-
-// IPv6UDPBidirectionalBindSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and connect() syscalls with AF_INET6 and the
-// given type bound to the IPv6 loopback.
-SocketPairKind IPv6UDPBidirectionalBindSocketPair(int type);
-
-// IPv4UDPBidirectionalBindSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and connect() syscalls with AF_INET and the
-// given type bound to the IPv4 loopback.
-SocketPairKind IPv4UDPBidirectionalBindSocketPair(int type);
-
-// DualStackUDPBidirectionalBindSocketPair returns a SocketPairKind that
-// represents SocketPairs created with bind() and connect() syscalls with
-// AF_INET6 and the given type bound to the IPv4 loopback.
-SocketPairKind DualStackUDPBidirectionalBindSocketPair(int type);
-
-// IPv4UDPUnboundSocketPair returns a SocketPairKind that represents
-// SocketPairs created with AF_INET and the given type.
-SocketPairKind IPv4UDPUnboundSocketPair(int type);
-
-// ICMPUnboundSocket returns a SocketKind that represents a SimpleSocket created
-// with AF_INET, SOCK_DGRAM, IPPROTO_ICMP, and the given type.
-SocketKind ICMPUnboundSocket(int type);
-
-// ICMPv6UnboundSocket returns a SocketKind that represents a SimpleSocket
-// created with AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6, and the given type.
-SocketKind ICMPv6UnboundSocket(int type);
-
-// IPv4UDPUnboundSocket returns a SocketKind that represents a SimpleSocket
-// created with AF_INET, SOCK_DGRAM, IPPROTO_UDP, and the given type.
-SocketKind IPv4UDPUnboundSocket(int type);
-
-// IPv6UDPUnboundSocket returns a SocketKind that represents a SimpleSocket
-// created with AF_INET6, SOCK_DGRAM, IPPROTO_UDP, and the given type.
-SocketKind IPv6UDPUnboundSocket(int type);
-
-// IPv4TCPUnboundSocket returns a SocketKind that represents a SimpleSocket
-// created with AF_INET, SOCK_STREAM, IPPROTO_TCP and the given type.
-SocketKind IPv4TCPUnboundSocket(int type);
-
-// IPv6TCPUnboundSocket returns a SocketKind that represents a SimpleSocket
-// created with AF_INET6, SOCK_STREAM, IPPROTO_TCP and the given type.
-SocketKind IPv6TCPUnboundSocket(int type);
-
-// IfAddrHelper is a helper class that determines the local interfaces present
-// and provides functions to obtain their names, index numbers, and IP address.
-class IfAddrHelper {
- public:
- IfAddrHelper() : ifaddr_(nullptr) {}
- ~IfAddrHelper() { Release(); }
-
- PosixError Load();
- void Release();
-
- std::vector<std::string> InterfaceList(int family) const;
-
- const sockaddr* GetAddr(int family, std::string name) const;
- PosixErrorOr<int> GetIndex(std::string name) const;
-
- private:
- struct ifaddrs* ifaddr_;
-};
-
-// GetAddr4Str returns the given IPv4 network address structure as a string.
-std::string GetAddr4Str(const in_addr* a);
-
-// GetAddr6Str returns the given IPv6 network address structure as a string.
-std::string GetAddr6Str(const in6_addr* a);
-
-// GetAddrStr returns the given IPv4 or IPv6 network address structure as a
-// string.
-std::string GetAddrStr(const sockaddr* a);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_IP_SOCKET_TEST_UTIL_H_
diff --git a/test/syscalls/linux/iptables.cc b/test/syscalls/linux/iptables.cc
deleted file mode 100644
index 22550b800..000000000
--- a/test/syscalls/linux/iptables.cc
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/iptables.h"
-
-#include <arpa/inet.h>
-#include <linux/capability.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <stdio.h>
-#include <sys/poll.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr char kNatTablename[] = "nat";
-constexpr char kErrorTarget[] = "ERROR";
-constexpr size_t kEmptyStandardEntrySize =
- sizeof(struct ipt_entry) + sizeof(struct ipt_standard_target);
-constexpr size_t kEmptyErrorEntrySize =
- sizeof(struct ipt_entry) + sizeof(struct ipt_error_target);
-
-TEST(IPTablesBasic, CreateSocket) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP),
- SyscallSucceeds());
-
- ASSERT_THAT(close(sock), SyscallSucceeds());
-}
-
-TEST(IPTablesBasic, FailSockoptNonRaw) {
- // Even if the user has CAP_NET_RAW, they shouldn't be able to use the
- // iptables sockopts with a non-raw socket.
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_DGRAM, 0), SyscallSucceeds());
-
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info);
- EXPECT_THAT(getsockopt(sock, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),
- SyscallFailsWithErrno(ENOPROTOOPT));
-
- ASSERT_THAT(close(sock), SyscallSucceeds());
-}
-
-TEST(IPTablesBasic, GetInfoErrorPrecedence) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_DGRAM, 0), SyscallSucceeds());
-
- // When using the wrong type of socket and a too-short optlen, we should get
- // EINVAL.
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info) - 1;
- ASSERT_THAT(getsockopt(sock, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(IPTablesBasic, GetEntriesErrorPrecedence) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_DGRAM, 0), SyscallSucceeds());
-
- // When using the wrong type of socket and a too-short optlen, we should get
- // EINVAL.
- struct ipt_get_entries entries = {};
- socklen_t entries_size = sizeof(struct ipt_get_entries) - 1;
- snprintf(entries.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- ASSERT_THAT(
- getsockopt(sock, SOL_IP, IPT_SO_GET_ENTRIES, &entries, &entries_size),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(IPTablesBasic, OriginalDstErrors) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_STREAM, 0), SyscallSucceeds());
-
- // Sockets not affected by NAT should fail to find an original destination.
- struct sockaddr_in addr = {};
- socklen_t addr_len = sizeof(addr);
- EXPECT_THAT(getsockopt(sock, SOL_IP, SO_ORIGINAL_DST, &addr, &addr_len),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST(IPTablesBasic, GetRevision) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP),
- SyscallSucceeds());
-
- struct xt_get_revision rev = {};
- socklen_t rev_len = sizeof(rev);
-
- snprintf(rev.name, sizeof(rev.name), "REDIRECT");
- rev.revision = 0;
-
- // Revision 0 exists.
- EXPECT_THAT(
- getsockopt(sock, SOL_IP, IPT_SO_GET_REVISION_TARGET, &rev, &rev_len),
- SyscallSucceeds());
- EXPECT_EQ(rev.revision, 0);
-
- // Revisions > 0 don't exist.
- rev.revision = 1;
- EXPECT_THAT(
- getsockopt(sock, SOL_IP, IPT_SO_GET_REVISION_TARGET, &rev, &rev_len),
- SyscallFailsWithErrno(EPROTONOSUPPORT));
-}
-
-// Fixture for iptables tests.
-class IPTablesTest : public ::testing::Test {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // The socket via which to manipulate iptables.
- int s_;
-};
-
-void IPTablesTest::SetUp() {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(s_ = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP), SyscallSucceeds());
-}
-
-void IPTablesTest::TearDown() {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- EXPECT_THAT(close(s_), SyscallSucceeds());
-}
-
-// This tests the initial state of a machine with empty iptables. We don't have
-// a guarantee that the iptables are empty when running in native, but we can
-// test that gVisor has the same initial state that a newly-booted Linux machine
-// would have.
-TEST_F(IPTablesTest, InitialState) {
- SKIP_IF(!IsRunningOnGvisor());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- //
- // Get info via sockopt.
- //
- struct ipt_getinfo info = {};
- snprintf(info.name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- socklen_t info_size = sizeof(info);
- ASSERT_THAT(getsockopt(s_, SOL_IP, IPT_SO_GET_INFO, &info, &info_size),
- SyscallSucceeds());
-
- // The nat table supports PREROUTING, and OUTPUT.
- unsigned int valid_hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT) |
- (1 << NF_IP_POST_ROUTING) | (1 << NF_IP_LOCAL_IN);
-
- EXPECT_EQ(info.valid_hooks, valid_hooks);
-
- // Each chain consists of an empty entry with a standard target..
- EXPECT_EQ(info.hook_entry[NF_IP_PRE_ROUTING], 0);
- EXPECT_EQ(info.hook_entry[NF_IP_LOCAL_IN], kEmptyStandardEntrySize);
- EXPECT_EQ(info.hook_entry[NF_IP_LOCAL_OUT], kEmptyStandardEntrySize * 2);
- EXPECT_EQ(info.hook_entry[NF_IP_POST_ROUTING], kEmptyStandardEntrySize * 3);
-
- // The underflow points are the same as the entry points.
- EXPECT_EQ(info.underflow[NF_IP_PRE_ROUTING], 0);
- EXPECT_EQ(info.underflow[NF_IP_LOCAL_IN], kEmptyStandardEntrySize);
- EXPECT_EQ(info.underflow[NF_IP_LOCAL_OUT], kEmptyStandardEntrySize * 2);
- EXPECT_EQ(info.underflow[NF_IP_POST_ROUTING], kEmptyStandardEntrySize * 3);
-
- // One entry for each chain, plus an error entry at the end.
- EXPECT_EQ(info.num_entries, 5);
-
- EXPECT_EQ(info.size, 4 * kEmptyStandardEntrySize + kEmptyErrorEntrySize);
- EXPECT_EQ(strcmp(info.name, kNatTablename), 0);
-
- //
- // Use info to get entries.
- //
- socklen_t entries_size = sizeof(struct ipt_get_entries) + info.size;
- struct ipt_get_entries* entries =
- static_cast<struct ipt_get_entries*>(malloc(entries_size));
- snprintf(entries->name, XT_TABLE_MAXNAMELEN, "%s", kNatTablename);
- entries->size = info.size;
- ASSERT_THAT(
- getsockopt(s_, SOL_IP, IPT_SO_GET_ENTRIES, entries, &entries_size),
- SyscallSucceeds());
-
- // Verify the name and size.
- ASSERT_EQ(info.size, entries->size);
- ASSERT_EQ(strcmp(entries->name, kNatTablename), 0);
-
- // Verify that the entrytable is 4 entries with accept targets and no matches
- // followed by a single error target.
- size_t entry_offset = 0;
- while (entry_offset < entries->size) {
- struct ipt_entry* entry = reinterpret_cast<struct ipt_entry*>(
- reinterpret_cast<char*>(entries->entrytable) + entry_offset);
-
- // ip should be zeroes.
- struct ipt_ip zeroed = {};
- EXPECT_EQ(memcmp(static_cast<void*>(&zeroed),
- static_cast<void*>(&entry->ip), sizeof(zeroed)),
- 0);
-
- // target_offset should be zero.
- EXPECT_EQ(entry->target_offset, sizeof(ipt_entry));
-
- if (entry_offset < kEmptyStandardEntrySize * 4) {
- // The first 4 entries are standard targets
- struct ipt_standard_target* target =
- reinterpret_cast<struct ipt_standard_target*>(entry->elems);
- EXPECT_EQ(entry->next_offset, kEmptyStandardEntrySize);
- EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));
- EXPECT_EQ(strcmp(target->target.u.user.name, ""), 0);
- EXPECT_EQ(target->target.u.user.revision, 0);
- // This is what's returned for an accept verdict. I don't know why.
- EXPECT_EQ(target->verdict, -NF_ACCEPT - 1);
- } else {
- // The last entry is an error target
- struct ipt_error_target* target =
- reinterpret_cast<struct ipt_error_target*>(entry->elems);
- EXPECT_EQ(entry->next_offset, kEmptyErrorEntrySize);
- EXPECT_EQ(target->target.u.user.target_size, sizeof(*target));
- EXPECT_EQ(strcmp(target->target.u.user.name, kErrorTarget), 0);
- EXPECT_EQ(target->target.u.user.revision, 0);
- EXPECT_EQ(strcmp(target->errorname, kErrorTarget), 0);
- }
-
- entry_offset += entry->next_offset;
- }
-
- free(entries);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/iptables.h b/test/syscalls/linux/iptables.h
deleted file mode 100644
index d0fc10fea..000000000
--- a/test/syscalls/linux/iptables.h
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// There are a number of structs and values that we can't #include because of a
-// difference between C and C++ (C++ won't let you implicitly cast from void* to
-// struct something*). We re-define them here.
-
-#ifndef GVISOR_TEST_SYSCALLS_IPTABLES_TYPES_H_
-#define GVISOR_TEST_SYSCALLS_IPTABLES_TYPES_H_
-
-// Netfilter headers require some headers to preceed them.
-// clang-format off
-#include <netinet/in.h>
-#include <stddef.h>
-// clang-format on
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv6.h>
-#include <net/if.h>
-#include <netinet/ip.h>
-#include <stdint.h>
-
-//
-// IPv4 ABI.
-//
-
-#define ipt_standard_target xt_standard_target
-#define ipt_entry_target xt_entry_target
-#define ipt_error_target xt_error_target
-
-enum SockOpts {
- // For setsockopt.
- IPT_BASE_CTL = 64,
- IPT_SO_SET_REPLACE = IPT_BASE_CTL,
- IPT_SO_SET_ADD_COUNTERS = IPT_BASE_CTL + 1,
- IPT_SO_SET_MAX = IPT_SO_SET_ADD_COUNTERS,
-
- // For getsockopt.
- IPT_SO_GET_INFO = IPT_BASE_CTL,
- IPT_SO_GET_ENTRIES = IPT_BASE_CTL + 1,
- IPT_SO_GET_REVISION_MATCH = IPT_BASE_CTL + 2,
- IPT_SO_GET_REVISION_TARGET = IPT_BASE_CTL + 3,
- IPT_SO_GET_MAX = IPT_SO_GET_REVISION_TARGET
-};
-
-// ipt_ip specifies basic matching criteria that can be applied by examining
-// only the IP header of a packet.
-struct ipt_ip {
- // Source IP address.
- struct in_addr src;
-
- // Destination IP address.
- struct in_addr dst;
-
- // Source IP address mask.
- struct in_addr smsk;
-
- // Destination IP address mask.
- struct in_addr dmsk;
-
- // Input interface.
- char iniface[IFNAMSIZ];
-
- // Output interface.
- char outiface[IFNAMSIZ];
-
- // Input interface mask.
- unsigned char iniface_mask[IFNAMSIZ];
-
- // Output interface mask.
- unsigned char outiface_mask[IFNAMSIZ];
-
- // Transport protocol.
- uint16_t proto;
-
- // Flags.
- uint8_t flags;
-
- // Inverse flags.
- uint8_t invflags;
-};
-
-// ipt_entry is an iptables rule. It contains information about what packets the
-// rule matches and what action (target) to perform for matching packets.
-struct ipt_entry {
- // Basic matching information used to match a packet's IP header.
- struct ipt_ip ip;
-
- // A caching field that isn't used by userspace.
- unsigned int nfcache;
-
- // The number of bytes between the start of this ipt_entry struct and the
- // rule's target.
- uint16_t target_offset;
-
- // The total size of this rule, from the beginning of the entry to the end of
- // the target.
- uint16_t next_offset;
-
- // A return pointer not used by userspace.
- unsigned int comefrom;
-
- // Counters for packets and bytes, which we don't yet implement.
- struct xt_counters counters;
-
- // The data for all this rules matches followed by the target. This runs
- // beyond the value of sizeof(struct ipt_entry).
- unsigned char elems[0];
-};
-
-// Passed to getsockopt(IPT_SO_GET_INFO).
-struct ipt_getinfo {
- // The name of the table. The user only fills this in, the rest is filled in
- // when returning from getsockopt. Currently "nat" and "mangle" are supported.
- char name[XT_TABLE_MAXNAMELEN];
-
- // A bitmap of which hooks apply to the table. For example, a table with hooks
- // PREROUTING and FORWARD has the value
- // (1 << NF_IP_PRE_REOUTING) | (1 << NF_IP_FORWARD).
- unsigned int valid_hooks;
-
- // The offset into the entry table for each valid hook. The entry table is
- // returned by getsockopt(IPT_SO_GET_ENTRIES).
- unsigned int hook_entry[NF_IP_NUMHOOKS];
-
- // For each valid hook, the underflow is the offset into the entry table to
- // jump to in case traversing the table yields no verdict (although I have no
- // clue how that could happen - builtin chains always end with a policy, and
- // user-defined chains always end with a RETURN.
- //
- // The entry referred to must be an "unconditional" entry, meaning it has no
- // matches, specifies no IP criteria, and either DROPs or ACCEPTs packets. It
- // basically has to be capable of making a definitive decision no matter what
- // it's passed.
- unsigned int underflow[NF_IP_NUMHOOKS];
-
- // The number of entries in the entry table returned by
- // getsockopt(IPT_SO_GET_ENTRIES).
- unsigned int num_entries;
-
- // The size of the entry table returned by getsockopt(IPT_SO_GET_ENTRIES).
- unsigned int size;
-};
-
-// Passed to getsockopt(IPT_SO_GET_ENTRIES).
-struct ipt_get_entries {
- // The name of the table. The user fills this in. Currently "nat" and "mangle"
- // are supported.
- char name[XT_TABLE_MAXNAMELEN];
-
- // The size of the entry table in bytes. The user fills this in with the value
- // from struct ipt_getinfo.size.
- unsigned int size;
-
- // The entries for the given table. This will run past the size defined by
- // sizeof(struct ipt_get_entries).
- struct ipt_entry entrytable[0];
-};
-
-// Passed to setsockopt(SO_SET_REPLACE).
-struct ipt_replace {
- // The name of the table.
- char name[XT_TABLE_MAXNAMELEN];
-
- // The same as struct ipt_getinfo.valid_hooks. Users don't change this.
- unsigned int valid_hooks;
-
- // The same as struct ipt_getinfo.num_entries.
- unsigned int num_entries;
-
- // The same as struct ipt_getinfo.size.
- unsigned int size;
-
- // The same as struct ipt_getinfo.hook_entry.
- unsigned int hook_entry[NF_IP_NUMHOOKS];
-
- // The same as struct ipt_getinfo.underflow.
- unsigned int underflow[NF_IP_NUMHOOKS];
-
- // The number of counters, which should equal the number of entries.
- unsigned int num_counters;
-
- // The unchanged values from each ipt_entry's counters.
- struct xt_counters* counters;
-
- // The entries to write to the table. This will run past the size defined by
- // sizeof(srtuct ipt_replace);
- struct ipt_entry entries[0];
-};
-
-//
-// IPv6 ABI.
-//
-
-enum SockOpts6 {
- // For setsockopt.
- IP6T_BASE_CTL = 64,
- IP6T_SO_SET_REPLACE = IP6T_BASE_CTL,
- IP6T_SO_SET_ADD_COUNTERS = IP6T_BASE_CTL + 1,
- IP6T_SO_SET_MAX = IP6T_SO_SET_ADD_COUNTERS,
-
- // For getsockopt.
- IP6T_SO_GET_INFO = IP6T_BASE_CTL,
- IP6T_SO_GET_ENTRIES = IP6T_BASE_CTL + 1,
- IP6T_SO_GET_REVISION_MATCH = IP6T_BASE_CTL + 4,
- IP6T_SO_GET_REVISION_TARGET = IP6T_BASE_CTL + 5,
- IP6T_SO_GET_MAX = IP6T_SO_GET_REVISION_TARGET
-};
-
-// ip6t_ip6 specifies basic matching criteria that can be applied by examining
-// only the IP header of a packet.
-struct ip6t_ip6 {
- // Source IP address.
- struct in6_addr src;
-
- // Destination IP address.
- struct in6_addr dst;
-
- // Source IP address mask.
- struct in6_addr smsk;
-
- // Destination IP address mask.
- struct in6_addr dmsk;
-
- // Input interface.
- char iniface[IFNAMSIZ];
-
- // Output interface.
- char outiface[IFNAMSIZ];
-
- // Input interface mask.
- unsigned char iniface_mask[IFNAMSIZ];
-
- // Output interface mask.
- unsigned char outiface_mask[IFNAMSIZ];
-
- // Transport protocol.
- uint16_t proto;
-
- // TOS.
- uint8_t tos;
-
- // Flags.
- uint8_t flags;
-
- // Inverse flags.
- uint8_t invflags;
-};
-
-// ip6t_entry is an ip6tables rule.
-struct ip6t_entry {
- // Basic matching information used to match a packet's IP header.
- struct ip6t_ip6 ipv6;
-
- // A caching field that isn't used by userspace.
- unsigned int nfcache;
-
- // The number of bytes between the start of this entry and the rule's target.
- uint16_t target_offset;
-
- // The total size of this rule, from the beginning of the entry to the end of
- // the target.
- uint16_t next_offset;
-
- // A return pointer not used by userspace.
- unsigned int comefrom;
-
- // Counters for packets and bytes, which we don't yet implement.
- struct xt_counters counters;
-
- // The data for all this rules matches followed by the target. This runs
- // beyond the value of sizeof(struct ip6t_entry).
- unsigned char elems[0];
-};
-
-// Passed to getsockopt(IP6T_SO_GET_ENTRIES).
-struct ip6t_get_entries {
- // The name of the table.
- char name[XT_TABLE_MAXNAMELEN];
-
- // The size of the entry table in bytes. The user fills this in with the value
- // from struct ipt_getinfo.size.
- unsigned int size;
-
- // The entries for the given table. This will run past the size defined by
- // sizeof(struct ip6t_get_entries).
- struct ip6t_entry entrytable[0];
-};
-
-#endif // GVISOR_TEST_SYSCALLS_IPTABLES_TYPES_H_
diff --git a/test/syscalls/linux/itimer.cc b/test/syscalls/linux/itimer.cc
deleted file mode 100644
index ac113e6da..000000000
--- a/test/syscalls/linux/itimer.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/socket.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-
-#include <atomic>
-#include <functional>
-#include <iostream>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-constexpr char kSIGALRMToMainThread[] = "--itimer_sigarlm_to_main_thread";
-constexpr char kSIGPROFFairnessActive[] = "--itimer_sigprof_fairness_active";
-constexpr char kSIGPROFFairnessIdle[] = "--itimer_sigprof_fairness_idle";
-
-// Time period to be set for the itimers.
-constexpr absl::Duration kPeriod = absl::Milliseconds(25);
-// Total amount of time to spend per thread.
-constexpr absl::Duration kTestDuration = absl::Seconds(20);
-// Amount of spin iterations to perform as the minimum work item per thread.
-// Chosen to be sub-millisecond range.
-constexpr int kIterations = 10000000;
-// Allow deviation in the number of samples.
-constexpr double kNumSamplesDeviationRatio = 0.2;
-
-TEST(ItimerTest, ItimervalUpdatedBeforeExpiration) {
- constexpr int kSleepSecs = 10;
- constexpr int kAlarmSecs = 15;
- static_assert(
- kSleepSecs < kAlarmSecs,
- "kSleepSecs must be less than kAlarmSecs for the test to be meaningful");
- constexpr int kMaxRemainingSecs = kAlarmSecs - kSleepSecs;
-
- // Install a no-op handler for SIGALRM.
- struct sigaction sa = {};
- sigfillset(&sa.sa_mask);
- sa.sa_handler = +[](int signo) {};
- auto const cleanup_sa =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- // Set an itimer-based alarm for kAlarmSecs from now.
- struct itimerval itv = {};
- itv.it_value.tv_sec = kAlarmSecs;
- auto const cleanup_itimer =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_REAL, itv));
-
- // After sleeping for kSleepSecs, the itimer value should reflect the elapsed
- // time even if it hasn't expired.
- absl::SleepFor(absl::Seconds(kSleepSecs));
- ASSERT_THAT(getitimer(ITIMER_REAL, &itv), SyscallSucceeds());
- EXPECT_TRUE(
- itv.it_value.tv_sec < kMaxRemainingSecs ||
- (itv.it_value.tv_sec == kMaxRemainingSecs && itv.it_value.tv_usec == 0))
- << "Remaining time: " << itv.it_value.tv_sec << " seconds + "
- << itv.it_value.tv_usec << " microseconds";
-}
-
-ABSL_CONST_INIT static thread_local std::atomic_int signal_test_num_samples =
- ATOMIC_VAR_INIT(0);
-
-void SignalTestSignalHandler(int /*signum*/) { signal_test_num_samples++; }
-
-struct SignalTestResult {
- int expected_total;
- int main_thread_samples;
- std::vector<int> worker_samples;
-};
-
-std::ostream& operator<<(std::ostream& os, const SignalTestResult& r) {
- os << "{expected_total: " << r.expected_total
- << ", main_thread_samples: " << r.main_thread_samples
- << ", worker_samples: [";
- bool first = true;
- for (int sample : r.worker_samples) {
- if (!first) {
- os << ", ";
- }
- os << sample;
- first = false;
- }
- os << "]}";
- return os;
-}
-
-// Starts two worker threads and itimer id and measures the number of signal
-// delivered to each thread.
-SignalTestResult ItimerSignalTest(int id, clock_t main_clock,
- clock_t worker_clock, int signal,
- absl::Duration sleep) {
- signal_test_num_samples = 0;
-
- struct sigaction sa = {};
- sa.sa_handler = &SignalTestSignalHandler;
- sa.sa_flags = SA_RESTART;
- sigemptyset(&sa.sa_mask);
- auto sigaction_cleanup = ScopedSigaction(signal, sa).ValueOrDie();
-
- int socketfds[2];
- TEST_PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, socketfds) == 0);
-
- // Do the spinning in the workers.
- std::function<void*(int)> work = [&](int socket_fd) {
- FileDescriptor fd(socket_fd);
-
- absl::Time finish = Now(worker_clock) + kTestDuration;
- while (Now(worker_clock) < finish) {
- // Blocked on read.
- char c;
- RetryEINTR(read)(fd.get(), &c, 1);
- for (int i = 0; i < kIterations; i++) {
- // Ensure compiler won't optimize this loop away.
- asm("");
- }
-
- if (sleep != absl::ZeroDuration()) {
- // Sleep so that the entire process is idle for a while.
- absl::SleepFor(sleep);
- }
-
- // Unblock the other thread.
- RetryEINTR(write)(fd.get(), &c, 1);
- }
-
- return reinterpret_cast<void*>(signal_test_num_samples.load());
- };
-
- ScopedThread th1(
- static_cast<std::function<void*()>>(std::bind(work, socketfds[0])));
- ScopedThread th2(
- static_cast<std::function<void*()>>(std::bind(work, socketfds[1])));
-
- absl::Time start = Now(main_clock);
- // Start the timer.
- struct itimerval timer = {};
- timer.it_value = absl::ToTimeval(kPeriod);
- timer.it_interval = absl::ToTimeval(kPeriod);
- auto cleanup_itimer = ScopedItimer(id, timer).ValueOrDie();
-
- // Unblock th1.
- //
- // N.B. th2 owns socketfds[1] but can't close it until it unblocks.
- char c = 0;
- TEST_CHECK(write(socketfds[1], &c, 1) == 1);
-
- SignalTestResult result;
-
- // Wait for the workers to be done and collect their sample counts.
- result.worker_samples.push_back(reinterpret_cast<int64_t>(th1.Join()));
- result.worker_samples.push_back(reinterpret_cast<int64_t>(th2.Join()));
- cleanup_itimer.Release()();
- result.expected_total = (Now(main_clock) - start) / kPeriod;
- result.main_thread_samples = signal_test_num_samples.load();
-
- return result;
-}
-
-int TestSIGALRMToMainThread() {
- SignalTestResult result =
- ItimerSignalTest(ITIMER_REAL, CLOCK_REALTIME, CLOCK_REALTIME, SIGALRM,
- absl::ZeroDuration());
-
- std::cerr << "result: " << result << std::endl;
-
- // ITIMER_REAL-generated SIGALRMs prefer to deliver to the thread group leader
- // (but don't guarantee it), so we expect to see most samples on the main
- // thread.
- //
- // The number of SIGALRMs delivered to a worker should not exceed 20%
- // of the number of total signals expected (this is somewhat arbitrary).
- const int worker_threshold = result.expected_total / 5;
-
- //
- // Linux only guarantees timers will never expire before the requested time.
- // Thus, we only check the upper bound and also it at least have one sample.
- TEST_CHECK(result.main_thread_samples <= result.expected_total);
- TEST_CHECK(result.main_thread_samples > 0);
- for (int num : result.worker_samples) {
- TEST_CHECK_MSG(num <= worker_threshold, "worker received too many samples");
- }
-
- return 0;
-}
-
-// Random save/restore is disabled as it introduces additional latency and
-// unpredictable distribution patterns.
-TEST(ItimerTest, DeliversSIGALRMToMainThread) {
- pid_t child;
- int execve_errno;
- auto kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGALRMToMainThread},
- {}, &child, &execve_errno));
- EXPECT_EQ(0, execve_errno);
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
-
- // Not required anymore.
- kill.Release();
-
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
-}
-
-// Signals are delivered to threads fairly.
-//
-// sleep indicates how long to sleep worker threads each iteration to make the
-// entire process idle.
-int TestSIGPROFFairness(absl::Duration sleep) {
- SignalTestResult result =
- ItimerSignalTest(ITIMER_PROF, CLOCK_PROCESS_CPUTIME_ID,
- CLOCK_THREAD_CPUTIME_ID, SIGPROF, sleep);
-
- std::cerr << "result: " << result << std::endl;
-
- // The number of samples on the main thread should be very low as it did
- // nothing.
- TEST_CHECK(result.main_thread_samples < 80);
-
- // Both workers should get roughly equal number of samples.
- TEST_CHECK(result.worker_samples.size() == 2);
-
- TEST_CHECK(result.expected_total > 0);
-
- // In an ideal world each thread would get exactly 50% of the signals,
- // but since that's unlikely to happen we allow for them to get no less than
- // kNumSamplesDeviationRatio of the total observed samples.
- TEST_CHECK_MSG(std::abs(result.worker_samples[0] - result.worker_samples[1]) <
- ((result.worker_samples[0] + result.worker_samples[1]) *
- kNumSamplesDeviationRatio),
- "one worker received disproportionate share of samples");
-
- return 0;
-}
-
-// Random save/restore is disabled as it introduces additional latency and
-// unpredictable distribution patterns.
-TEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyActive) {
- // On the KVM and ptrace platforms, switches between sentry and application
- // context are sometimes extremely slow, causing the itimer to send SIGPROF to
- // a thread that either already has one pending or has had SIGPROF delivered,
- // but hasn't handled it yet (and thus therefore still has SIGPROF masked). In
- // either case, since itimer signals are group-directed, signal sending falls
- // back to notifying the thread group leader. ItimerSignalTest() fails if "too
- // many" signals are delivered to the thread group leader, so these tests are
- // flaky on these platforms.
- //
- // TODO(b/143247272): Clarify why context switches are so slow on KVM.
- const auto gvisor_platform = GvisorPlatform();
- SKIP_IF(gvisor_platform == Platform::kKVM ||
- gvisor_platform == Platform::kPtrace);
-
- pid_t child;
- int execve_errno;
- auto kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGPROFFairnessActive},
- {}, &child, &execve_errno));
- EXPECT_EQ(0, execve_errno);
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
-
- // Not required anymore.
- kill.Release();
-
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-// Random save/restore is disabled as it introduces additional latency and
-// unpredictable distribution patterns.
-TEST(ItimerTest, DeliversSIGPROFToThreadsRoughlyFairlyIdle) {
- // See comment in DeliversSIGPROFToThreadsRoughlyFairlyActive.
- const auto gvisor_platform = GvisorPlatform();
- SKIP_IF(gvisor_platform == Platform::kKVM ||
- gvisor_platform == Platform::kPtrace);
-
- pid_t child;
- int execve_errno;
- auto kill = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/proc/self/exe", {"/proc/self/exe", kSIGPROFFairnessIdle},
- {}, &child, &execve_errno));
- EXPECT_EQ(0, execve_errno);
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
-
- // Not required anymore.
- kill.Release();
-
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "Exited with code: " << status;
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
-
-namespace {
-void MaskSIGPIPE() {
- // Always mask SIGPIPE as it's common and tests aren't expected to handle it.
- // We don't take the TestInit() path so we must do this manually.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- TEST_CHECK(sigaction(SIGPIPE, &sa, nullptr) == 0);
-}
-} // namespace
-
-int main(int argc, char** argv) {
- // These tests require no background threads, so check for them before
- // TestInit.
- for (int i = 0; i < argc; i++) {
- absl::string_view arg(argv[i]);
-
- if (arg == gvisor::testing::kSIGALRMToMainThread) {
- MaskSIGPIPE();
- return gvisor::testing::TestSIGALRMToMainThread();
- }
- if (arg == gvisor::testing::kSIGPROFFairnessActive) {
- MaskSIGPIPE();
- return gvisor::testing::TestSIGPROFFairness(absl::ZeroDuration());
- }
- if (arg == gvisor::testing::kSIGPROFFairnessIdle) {
- MaskSIGPIPE();
- // Sleep time > ClockTick (10ms) exercises sleeping gVisor's
- // kernel.cpuClockTicker.
- return gvisor::testing::TestSIGPROFFairness(absl::Milliseconds(25));
- }
- }
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/kcov.cc b/test/syscalls/linux/kcov.cc
deleted file mode 100644
index 6816c1fd0..000000000
--- a/test/syscalls/linux/kcov.cc
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/errno.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-
-#include <atomic>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// For this set of tests to run, they must be run with coverage enabled. On
-// native Linux, this involves compiling the kernel with kcov enabled. For
-// gVisor, we need to enable the Go coverage tool, e.g. bazel test --
-// collect_coverage_data --instrumentation_filter=//pkg/... <test>.
-
-constexpr char kcovPath[] = "/sys/kernel/debug/kcov";
-constexpr int kSize = 4096;
-constexpr int KCOV_INIT_TRACE = 0x80086301;
-constexpr int KCOV_ENABLE = 0x6364;
-constexpr int KCOV_DISABLE = 0x6365;
-
-uint64_t* KcovMmap(int fd) {
- return (uint64_t*)mmap(nullptr, kSize * sizeof(uint64_t),
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-}
-
-TEST(KcovTest, Kcov) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- int fd;
- ASSERT_THAT(fd = open(kcovPath, O_RDWR),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- // Kcov not available.
- SKIP_IF(errno == ENOENT);
- auto fd_closer = Cleanup([fd]() { close(fd); });
-
- ASSERT_THAT(ioctl(fd, KCOV_INIT_TRACE, kSize), SyscallSucceeds());
- uint64_t* area = KcovMmap(fd);
- ASSERT_TRUE(area != MAP_FAILED);
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds());
-
- for (int i = 0; i < 10; i++) {
- // Make some syscalls to generate coverage data.
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallFailsWithErrno(EINVAL));
- }
-
- uint64_t num_pcs = *(uint64_t*)(area);
- EXPECT_GT(num_pcs, 0);
- for (uint64_t i = 1; i <= num_pcs; i++) {
- // Verify that PCs are in the standard kernel range.
- EXPECT_GT(area[i], 0xffffffff7fffffffL);
- }
-
- ASSERT_THAT(ioctl(fd, KCOV_DISABLE, 0), SyscallSucceeds());
-}
-
-TEST(KcovTest, PrematureMmap) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- int fd;
- ASSERT_THAT(fd = open(kcovPath, O_RDWR),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- // Kcov not available.
- SKIP_IF(errno == ENOENT);
- auto fd_closer = Cleanup([fd]() { close(fd); });
-
- // Cannot mmap before KCOV_INIT_TRACE.
- uint64_t* area = KcovMmap(fd);
- ASSERT_TRUE(area == MAP_FAILED);
-}
-
-// Tests that multiple kcov fds can be used simultaneously.
-TEST(KcovTest, MultipleFds) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- int fd1;
- ASSERT_THAT(fd1 = open(kcovPath, O_RDWR),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- // Kcov not available.
- SKIP_IF(errno == ENOENT);
-
- int fd2;
- ASSERT_THAT(fd2 = open(kcovPath, O_RDWR), SyscallSucceeds());
- auto fd_closer = Cleanup([fd1, fd2]() {
- close(fd1);
- close(fd2);
- });
-
- auto t1 = ScopedThread([&] {
- ASSERT_THAT(ioctl(fd1, KCOV_INIT_TRACE, kSize), SyscallSucceeds());
- uint64_t* area = KcovMmap(fd1);
- ASSERT_TRUE(area != MAP_FAILED);
- ASSERT_THAT(ioctl(fd1, KCOV_ENABLE, 0), SyscallSucceeds());
- });
-
- ASSERT_THAT(ioctl(fd2, KCOV_INIT_TRACE, kSize), SyscallSucceeds());
- uint64_t* area = KcovMmap(fd2);
- ASSERT_TRUE(area != MAP_FAILED);
- ASSERT_THAT(ioctl(fd2, KCOV_ENABLE, 0), SyscallSucceeds());
-}
-
-// Tests behavior for two threads trying to use the same kcov fd.
-TEST(KcovTest, MultipleThreads) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- int fd;
- ASSERT_THAT(fd = open(kcovPath, O_RDWR),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- // Kcov not available.
- SKIP_IF(errno == ENOENT);
- auto fd_closer = Cleanup([fd]() { close(fd); });
-
- // Test the behavior of multiple threads trying to use the same kcov fd
- // simultaneously.
- std::atomic<bool> t1_enabled(false), t1_disabled(false), t2_failed(false),
- t2_exited(false);
- auto t1 = ScopedThread([&] {
- ASSERT_THAT(ioctl(fd, KCOV_INIT_TRACE, kSize), SyscallSucceeds());
- uint64_t* area = KcovMmap(fd);
- ASSERT_TRUE(area != MAP_FAILED);
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds());
- t1_enabled = true;
-
- // After t2 has made sure that enabling kcov again fails, disable it.
- while (!t2_failed) {
- sched_yield();
- }
- ASSERT_THAT(ioctl(fd, KCOV_DISABLE, 0), SyscallSucceeds());
- t1_disabled = true;
-
- // Wait for t2 to enable kcov and then exit, after which we should be able
- // to enable kcov again, without needing to set up a new memory mapping.
- while (!t2_exited) {
- sched_yield();
- }
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds());
- });
-
- auto t2 = ScopedThread([&] {
- // Wait for t1 to enable kcov, and make sure that enabling kcov again fails.
- while (!t1_enabled) {
- sched_yield();
- }
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallFailsWithErrno(EINVAL));
- t2_failed = true;
-
- // Wait for t1 to disable kcov, after which using fd should now succeed.
- while (!t1_disabled) {
- sched_yield();
- }
- uint64_t* area = KcovMmap(fd);
- ASSERT_TRUE(area != MAP_FAILED);
- ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds());
- });
-
- t2.Join();
- t2_exited = true;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/kill.cc b/test/syscalls/linux/kill.cc
deleted file mode 100644
index 5d1735853..000000000
--- a/test/syscalls/linux/kill.cc
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <cerrno>
-#include <csignal>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID");
-ABSL_FLAG(int32_t, scratch_gid, 65534, "scratch GID");
-
-using ::testing::Ge;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(KillTest, CanKillValidPid) {
- // If pid is positive, then signal sig is sent to the process with the ID
- // specified by pid.
- EXPECT_THAT(kill(getpid(), 0), SyscallSucceeds());
- // If pid equals 0, then sig is sent to every process in the process group of
- // the calling process.
- EXPECT_THAT(kill(0, 0), SyscallSucceeds());
-
- ScopedThread([] { EXPECT_THAT(kill(gettid(), 0), SyscallSucceeds()); });
-}
-
-void SigHandler(int sig, siginfo_t* info, void* context) { _exit(0); }
-
-// If pid equals -1, then sig is sent to every process for which the calling
-// process has permission to send signals, except for process 1 (init).
-TEST(KillTest, CanKillAllPIDs) {
- // If we're not running inside the sandbox, then we skip this test
- // as our namespace may contain may more processes that cannot tolerate
- // the signal below. We also cannot reliably create a new pid namespace
- // for ourselves and test the same functionality.
- SKIP_IF(!IsRunningOnGvisor());
-
- int pipe_fds[2];
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
- FileDescriptor read_fd(pipe_fds[0]);
- FileDescriptor write_fd(pipe_fds[1]);
-
- pid_t pid = fork();
- if (pid == 0) {
- read_fd.reset();
-
- struct sigaction sa;
- sa.sa_sigaction = SigHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- TEST_PCHECK(sigaction(SIGWINCH, &sa, nullptr) == 0);
- MaybeSave();
-
- // Indicate to the parent that we're ready.
- write_fd.reset();
-
- // Wait until we get the signal from the parent.
- while (true) {
- pause();
- }
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- write_fd.reset();
-
- // Wait for the child to indicate that it's unmasked the signal by closing
- // the write end.
- char buf;
- ASSERT_THAT(ReadFd(read_fd.get(), &buf, 1), SyscallSucceedsWithValue(0));
-
- // Signal the child and wait for it to die with status 0, indicating that
- // it got the expected signal.
- EXPECT_THAT(kill(-1, SIGWINCH), SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(0, WEXITSTATUS(status));
-}
-
-TEST(KillTest, CannotKillInvalidPID) {
- // We need an unused pid to verify that kill fails when given one.
- //
- // There is no way to guarantee that a PID is unused, but the PID of a
- // recently exited process likely won't be reused soon.
- pid_t fake_pid = fork();
- if (fake_pid == 0) {
- _exit(0);
- }
-
- ASSERT_THAT(fake_pid, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(fake_pid, &status, 0),
- SyscallSucceedsWithValue(fake_pid));
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(0, WEXITSTATUS(status));
-
- EXPECT_THAT(kill(fake_pid, 0), SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(KillTest, CannotUseInvalidSignal) {
- EXPECT_THAT(kill(getpid(), 200), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(KillTest, CanKillRemoteProcess) {
- pid_t pid = fork();
- if (pid == 0) {
- while (true) {
- pause();
- }
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- EXPECT_THAT(kill(pid, SIGKILL), SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(SIGKILL, WTERMSIG(status));
-}
-
-TEST(KillTest, CanKillOwnProcess) {
- EXPECT_THAT(kill(getpid(), 0), SyscallSucceeds());
-}
-
-// Verify that you can kill a process even using a tid from a thread other than
-// the group leader.
-TEST(KillTest, CannotKillTid) {
- pid_t tid;
- bool tid_available = false;
- bool finished = false;
- absl::Mutex mu;
- ScopedThread t([&] {
- mu.Lock();
- tid = gettid();
- tid_available = true;
- mu.Await(absl::Condition(&finished));
- mu.Unlock();
- });
- mu.LockWhen(absl::Condition(&tid_available));
- EXPECT_THAT(kill(tid, 0), SyscallSucceeds());
- finished = true;
- mu.Unlock();
-}
-
-TEST(KillTest, SetPgid) {
- for (int i = 0; i < 10; i++) {
- // The following in the normal pattern for creating a new process group.
- // Both the parent and child process will call setpgid in order to avoid any
- // race conditions. We do this ten times to catch races.
- pid_t pid = fork();
- if (pid == 0) {
- setpgid(0, 0);
- while (true) {
- pause();
- }
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- // Set the child's group and exit.
- ASSERT_THAT(setpgid(pid, pid), SyscallSucceeds());
- EXPECT_THAT(kill(pid, SIGKILL), SyscallSucceeds());
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(-pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(SIGKILL, WTERMSIG(status));
- }
-}
-
-TEST(KillTest, ProcessGroups) {
- // Fork a new child.
- //
- // other_child is used as a placeholder process. We use this PID as our "does
- // not exist" process group to ensure some amount of safety. (It is still
- // possible to violate this assumption, but extremely unlikely.)
- pid_t child = fork();
- if (child == 0) {
- while (true) {
- pause();
- }
- }
- ASSERT_THAT(child, SyscallSucceeds());
-
- pid_t other_child = fork();
- if (other_child == 0) {
- while (true) {
- pause();
- }
- }
- ASSERT_THAT(other_child, SyscallSucceeds());
-
- // Ensure the kill does not succeed without the new group.
- EXPECT_THAT(kill(-child, SIGKILL), SyscallFailsWithErrno(ESRCH));
-
- // Put the child in its own process group.
- ASSERT_THAT(setpgid(child, child), SyscallSucceeds());
-
- // This should be not allowed: you can only create a new group with the same
- // id or join an existing one. The other_child group should not exist.
- ASSERT_THAT(setpgid(child, other_child), SyscallFailsWithErrno(EPERM));
-
- // Done with other_child; kill it.
- EXPECT_THAT(kill(other_child, SIGKILL), SyscallSucceeds());
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(other_child, &status, 0), SyscallSucceeds());
-
- // Linux returns success for the no-op call.
- ASSERT_THAT(setpgid(child, child), SyscallSucceeds());
-
- // Kill the child's process group.
- ASSERT_THAT(kill(-child, SIGKILL), SyscallSucceeds());
-
- // Wait on the process group; ensure that the signal was as expected.
- EXPECT_THAT(RetryEINTR(waitpid)(-child, &status, 0),
- SyscallSucceedsWithValue(child));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(SIGKILL, WTERMSIG(status));
-
- // Try to kill the process group again; ensure that the wait fails.
- EXPECT_THAT(kill(-child, SIGKILL), SyscallFailsWithErrno(ESRCH));
- EXPECT_THAT(RetryEINTR(waitpid)(-child, &status, 0),
- SyscallFailsWithErrno(ECHILD));
-}
-
-TEST(KillTest, ChildDropsPrivsCannotKill) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- const int uid = absl::GetFlag(FLAGS_scratch_uid);
- const int gid = absl::GetFlag(FLAGS_scratch_gid);
-
- // Create the child that drops privileges and tries to kill the parent.
- pid_t pid = fork();
- if (pid == 0) {
- TEST_PCHECK(setresgid(gid, gid, gid) == 0);
- MaybeSave();
-
- TEST_PCHECK(setresuid(uid, uid, uid) == 0);
- MaybeSave();
-
- // setresuid should have dropped CAP_KILL. Make sure.
- TEST_CHECK(!HaveCapability(CAP_KILL).ValueOrDie());
-
- // Try to kill parent with every signal-sending syscall possible.
- pid_t parent = getppid();
-
- TEST_CHECK(kill(parent, SIGKILL) < 0);
- TEST_PCHECK_MSG(errno == EPERM, "kill failed with wrong errno");
- MaybeSave();
-
- TEST_CHECK(tgkill(parent, parent, SIGKILL) < 0);
- TEST_PCHECK_MSG(errno == EPERM, "tgkill failed with wrong errno");
- MaybeSave();
-
- TEST_CHECK(syscall(SYS_tkill, parent, SIGKILL) < 0);
- TEST_PCHECK_MSG(errno == EPERM, "tkill failed with wrong errno");
- MaybeSave();
-
- siginfo_t uinfo;
- uinfo.si_code = -1; // SI_QUEUE (allowed).
-
- TEST_CHECK(syscall(SYS_rt_sigqueueinfo, parent, SIGKILL, &uinfo) < 0);
- TEST_PCHECK_MSG(errno == EPERM, "rt_sigqueueinfo failed with wrong errno");
- MaybeSave();
-
- TEST_CHECK(syscall(SYS_rt_tgsigqueueinfo, parent, parent, SIGKILL, &uinfo) <
- 0);
- TEST_PCHECK_MSG(errno == EPERM, "rt_sigqueueinfo failed with wrong errno");
- MaybeSave();
-
- _exit(0);
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status = " << status;
-}
-
-TEST(KillTest, CanSIGCONTSameSession) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- pid_t stopped_child = fork();
- if (stopped_child == 0) {
- raise(SIGSTOP);
- _exit(0);
- }
-
- ASSERT_THAT(stopped_child, SyscallSucceeds());
-
- // Put the child in its own process group. The child and parent process
- // groups also share a session.
- ASSERT_THAT(setpgid(stopped_child, stopped_child), SyscallSucceeds());
-
- // Make sure child stopped.
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(stopped_child, &status, WUNTRACED),
- SyscallSucceedsWithValue(stopped_child));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << "status " << status;
-
- const int uid = absl::GetFlag(FLAGS_scratch_uid);
- const int gid = absl::GetFlag(FLAGS_scratch_gid);
-
- // Drop privileges only in child process, or else this parent process won't be
- // able to open some log files after the test ends.
- pid_t other_child = fork();
- if (other_child == 0) {
- // Drop privileges.
- TEST_PCHECK(setresgid(gid, gid, gid) == 0);
- MaybeSave();
-
- TEST_PCHECK(setresuid(uid, uid, uid) == 0);
- MaybeSave();
-
- // setresuid should have dropped CAP_KILL.
- TEST_CHECK(!HaveCapability(CAP_KILL).ValueOrDie());
-
- // Child 2 and child should now not share a thread group and any UIDs.
- // Child 2 should have no privileges. That means any signal other than
- // SIGCONT should fail.
- TEST_CHECK(kill(stopped_child, SIGKILL) < 0);
- TEST_PCHECK_MSG(errno == EPERM, "kill failed with wrong errno");
- MaybeSave();
-
- TEST_PCHECK(kill(stopped_child, SIGCONT) == 0);
- MaybeSave();
-
- _exit(0);
- }
-
- ASSERT_THAT(stopped_child, SyscallSucceeds());
-
- // Make sure child exited normally.
- EXPECT_THAT(RetryEINTR(waitpid)(stopped_child, &status, 0),
- SyscallSucceedsWithValue(stopped_child));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-
- // Make sure other_child exited normally.
- EXPECT_THAT(RetryEINTR(waitpid)(other_child, &status, 0),
- SyscallSucceedsWithValue(other_child));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/link.cc b/test/syscalls/linux/link.cc
deleted file mode 100644
index 4f9ca1a65..000000000
--- a/test/syscalls/linux/link.cc
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/strings/str_cat.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// IsSameFile returns true if both filenames have the same device and inode.
-bool IsSameFile(const std::string& f1, const std::string& f2) {
- // Use lstat rather than stat, so that symlinks are not followed.
- struct stat stat1 = {};
- EXPECT_THAT(lstat(f1.c_str(), &stat1), SyscallSucceeds());
- struct stat stat2 = {};
- EXPECT_THAT(lstat(f2.c_str(), &stat2), SyscallSucceeds());
-
- return stat1.st_dev == stat2.st_dev && stat1.st_ino == stat2.st_ino;
-}
-
-// TODO(b/178640646): Add test for linkat with AT_EMPTY_PATH
-
-TEST(LinkTest, CanCreateLinkFile) {
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newname = NewTempAbsPath();
-
- // Get the initial link count.
- uint64_t initial_link_count =
- ASSERT_NO_ERRNO_AND_VALUE(Links(oldfile.path()));
-
- EXPECT_THAT(link(oldfile.path().c_str(), newname.c_str()), SyscallSucceeds());
-
- EXPECT_TRUE(IsSameFile(oldfile.path(), newname));
-
- // Link count should be incremented.
- EXPECT_THAT(Links(oldfile.path()),
- IsPosixErrorOkAndHolds(initial_link_count + 1));
-
- // Delete the link.
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
-
- // Link count should be back to initial.
- EXPECT_THAT(Links(oldfile.path()),
- IsPosixErrorOkAndHolds(initial_link_count));
-}
-
-TEST(LinkTest, PermissionDenied) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_FOWNER)));
-
- // Make the file "unsafe" to link by making it only readable, but not
- // writable.
- const auto unwriteable_file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0400));
- const std::string special_path = NewTempAbsPath();
- ASSERT_THAT(mkfifo(special_path.c_str(), 0666), SyscallSucceeds());
- const auto setuid_file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666 | S_ISUID));
-
- const std::string newname = NewTempAbsPath();
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting this
- // test. Otherwise, the files are created by root (UID before the test), but
- // cannot be opened by the `uid` set below after the test. After calling
- // setuid(non-zero-UID), there is no way to get root privileges back.
- ScopedThread([&] {
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. POSIX threads, however, require that all
- // threads have the same UIDs, so using the setuid wrapper sets all threads'
- // real UID.
- // Also drops capabilities.
- EXPECT_THAT(syscall(SYS_setuid, absl::GetFlag(FLAGS_scratch_uid)),
- SyscallSucceeds());
-
- EXPECT_THAT(link(unwriteable_file.path().c_str(), newname.c_str()),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(link(special_path.c_str(), newname.c_str()),
- SyscallFailsWithErrno(EPERM));
- if (!IsRunningWithVFS1()) {
- EXPECT_THAT(link(setuid_file.path().c_str(), newname.c_str()),
- SyscallFailsWithErrno(EPERM));
- }
- });
-}
-
-TEST(LinkTest, CannotLinkDirectory) {
- auto olddir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string newdir = NewTempAbsPath();
-
- EXPECT_THAT(link(olddir.path().c_str(), newdir.c_str()),
- SyscallFailsWithErrno(EPERM));
-
- EXPECT_THAT(rmdir(olddir.path().c_str()), SyscallSucceeds());
-}
-
-TEST(LinkTest, CannotLinkWithSlash) {
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- // Put a final "/" on newname.
- const std::string newname = absl::StrCat(NewTempAbsPath(), "/");
-
- EXPECT_THAT(link(oldfile.path().c_str(), newname.c_str()),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(LinkTest, OldnameIsEmpty) {
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(link("", newname.c_str()), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(LinkTest, OldnameDoesNotExist) {
- const std::string oldname = NewTempAbsPath();
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(link("", newname.c_str()), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(LinkTest, NewnameCannotExist) {
- const std::string newname =
- JoinPath(GetAbsoluteTestTmpdir(), "thisdoesnotexist", "foo");
- EXPECT_THAT(link("/thisdoesnotmatter", newname.c_str()),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(LinkTest, WithOldDirFD) {
- const std::string oldname_parent = NewTempAbsPath();
- const std::string oldname_base = "child";
- const std::string oldname = JoinPath(oldname_parent, oldname_base);
- const std::string newname = NewTempAbsPath();
-
- // Create oldname_parent directory, and get an FD.
- ASSERT_THAT(mkdir(oldname_parent.c_str(), 0777), SyscallSucceeds());
- const FileDescriptor oldname_parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(oldname_parent, O_DIRECTORY | O_RDONLY));
-
- // Create oldname file.
- const FileDescriptor oldname_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(oldname, O_CREAT | O_RDWR, 0666));
-
- // Link oldname to newname, using oldname_parent_fd.
- EXPECT_THAT(linkat(oldname_parent_fd.get(), oldname_base.c_str(), AT_FDCWD,
- newname.c_str(), 0),
- SyscallSucceeds());
-
- EXPECT_TRUE(IsSameFile(oldname, newname));
-
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(oldname.c_str()), SyscallSucceeds());
- EXPECT_THAT(rmdir(oldname_parent.c_str()), SyscallSucceeds());
-}
-
-TEST(LinkTest, BogusFlags) {
- ASSERT_THAT(linkat(1, "foo", 2, "bar", 3), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(LinkTest, WithNewDirFD) {
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newname_parent = NewTempAbsPath();
- const std::string newname_base = "child";
- const std::string newname = JoinPath(newname_parent, newname_base);
-
- // Create newname_parent directory, and get an FD.
- EXPECT_THAT(mkdir(newname_parent.c_str(), 0777), SyscallSucceeds());
- const FileDescriptor newname_parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(newname_parent, O_DIRECTORY | O_RDONLY));
-
- // Link newname to oldfile, using newname_parent_fd.
- EXPECT_THAT(linkat(AT_FDCWD, oldfile.path().c_str(), newname_parent_fd.get(),
- newname.c_str(), 0),
- SyscallSucceeds());
-
- EXPECT_TRUE(IsSameFile(oldfile.path(), newname));
-
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
- EXPECT_THAT(rmdir(newname_parent.c_str()), SyscallSucceeds());
-}
-
-TEST(LinkTest, RelPathsWithNonDirFDs) {
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Create a file that will be passed as the directory fd for old/new names.
- const std::string filename = NewTempAbsPath();
- const FileDescriptor file_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_CREAT | O_RDWR, 0666));
-
- // Using file_fd as olddirfd will fail.
- EXPECT_THAT(linkat(file_fd.get(), "foo", AT_FDCWD, "bar", 0),
- SyscallFailsWithErrno(ENOTDIR));
-
- // Using file_fd as newdirfd will fail.
- EXPECT_THAT(linkat(AT_FDCWD, oldfile.path().c_str(), file_fd.get(), "bar", 0),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(LinkTest, AbsPathsWithNonDirFDs) {
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newname = NewTempAbsPath();
-
- // Create a file that will be passed as the directory fd for old/new names.
- const std::string filename = NewTempAbsPath();
- const FileDescriptor file_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_CREAT | O_RDWR, 0666));
-
- // Using file_fd as the dirfds is OK as long as paths are absolute.
- EXPECT_THAT(linkat(file_fd.get(), oldfile.path().c_str(), file_fd.get(),
- newname.c_str(), 0),
- SyscallSucceeds());
-}
-
-TEST(LinkTest, NewDirFDWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newname_parent = NewTempAbsPath();
- const std::string newname_base = "child";
- const std::string newname = JoinPath(newname_parent, newname_base);
-
- // Create newname_parent directory, and get an FD.
- EXPECT_THAT(mkdir(newname_parent.c_str(), 0777), SyscallSucceeds());
- const FileDescriptor newname_parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(newname_parent, O_DIRECTORY | O_PATH));
-
- // Link newname to oldfile, using newname_parent_fd.
- EXPECT_THAT(linkat(AT_FDCWD, oldfile.path().c_str(), newname_parent_fd.get(),
- newname.c_str(), 0),
- SyscallSucceeds());
-
- EXPECT_TRUE(IsSameFile(oldfile.path(), newname));
-}
-
-TEST(LinkTest, RelPathsNonDirFDsWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Create a file that will be passed as the directory fd for old/new names.
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor file_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
-
- // Using file_fd as olddirfd will fail.
- EXPECT_THAT(linkat(file_fd.get(), "foo", AT_FDCWD, "bar", 0),
- SyscallFailsWithErrno(ENOTDIR));
-
- // Using file_fd as newdirfd will fail.
- EXPECT_THAT(linkat(AT_FDCWD, oldfile.path().c_str(), file_fd.get(), "bar", 0),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(LinkTest, AbsPathsNonDirFDsWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
-
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newname = NewTempAbsPath();
-
- // Create a file that will be passed as the directory fd for old/new names.
- TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor file_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_PATH));
-
- // Using file_fd as the dirfds is OK as long as paths are absolute.
- EXPECT_THAT(linkat(file_fd.get(), oldfile.path().c_str(), file_fd.get(),
- newname.c_str(), 0),
- SyscallSucceeds());
-}
-
-TEST(LinkTest, LinkDoesNotFollowSymlinks) {
- // Create oldfile, and oldsymlink which points to it.
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string oldsymlink = NewTempAbsPath();
- EXPECT_THAT(symlink(oldfile.path().c_str(), oldsymlink.c_str()),
- SyscallSucceeds());
-
- // Now hard link newname to oldsymlink.
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(link(oldsymlink.c_str(), newname.c_str()), SyscallSucceeds());
-
- // The link should not have resolved the symlink, so newname and oldsymlink
- // are the same.
- EXPECT_TRUE(IsSameFile(oldsymlink, newname));
- EXPECT_FALSE(IsSameFile(oldfile.path(), newname));
-
- EXPECT_THAT(unlink(oldsymlink.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
-}
-
-TEST(LinkTest, LinkatDoesNotFollowSymlinkByDefault) {
- // Create oldfile, and oldsymlink which points to it.
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string oldsymlink = NewTempAbsPath();
- EXPECT_THAT(symlink(oldfile.path().c_str(), oldsymlink.c_str()),
- SyscallSucceeds());
-
- // Now hard link newname to oldsymlink.
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(
- linkat(AT_FDCWD, oldsymlink.c_str(), AT_FDCWD, newname.c_str(), 0),
- SyscallSucceeds());
-
- // The link should not have resolved the symlink, so newname and oldsymlink
- // are the same.
- EXPECT_TRUE(IsSameFile(oldsymlink, newname));
- EXPECT_FALSE(IsSameFile(oldfile.path(), newname));
-
- EXPECT_THAT(unlink(oldsymlink.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
-}
-
-TEST(LinkTest, LinkatWithSymlinkFollow) {
- // Create oldfile, and oldsymlink which points to it.
- auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string oldsymlink = NewTempAbsPath();
- ASSERT_THAT(symlink(oldfile.path().c_str(), oldsymlink.c_str()),
- SyscallSucceeds());
-
- // Now hard link newname to oldsymlink, and pass AT_SYMLINK_FOLLOW flag.
- const std::string newname = NewTempAbsPath();
- ASSERT_THAT(linkat(AT_FDCWD, oldsymlink.c_str(), AT_FDCWD, newname.c_str(),
- AT_SYMLINK_FOLLOW),
- SyscallSucceeds());
-
- // The link should have resolved the symlink, so oldfile and newname are the
- // same.
- EXPECT_TRUE(IsSameFile(oldfile.path(), newname));
- EXPECT_FALSE(IsSameFile(oldsymlink, newname));
-
- EXPECT_THAT(unlink(oldsymlink.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/lseek.cc b/test/syscalls/linux/lseek.cc
deleted file mode 100644
index d4f89527c..000000000
--- a/test/syscalls/linux/lseek.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(LseekTest, InvalidWhence) {
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
-
- ASSERT_THAT(lseek(fd.get(), 0, -1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(LseekTest, NegativeOffset) {
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
-
- EXPECT_THAT(lseek(fd.get(), -(kFileData.length() + 1), SEEK_CUR),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// A 32-bit off_t is not large enough to represent an offset larger than
-// maximum file size on standard file systems, so it isn't possible to cause
-// overflow.
-#if defined(__x86_64__) || defined(__aarch64__)
-TEST(LseekTest, Overflow) {
- // HA! Classic Linux. We really should have an EOVERFLOW
- // here, since we're seeking to something that cannot be
- // represented.. but instead we are given an EINVAL.
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
- EXPECT_THAT(lseek(fd.get(), 0x7fffffffffffffff, SEEK_END),
- SyscallFailsWithErrno(EINVAL));
-}
-#endif
-
-TEST(LseekTest, Set) {
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
-
- char buf = '\0';
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[0]);
- EXPECT_THAT(lseek(fd.get(), 6, SEEK_SET), SyscallSucceedsWithValue(6));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[6]);
-}
-
-TEST(LseekTest, Cur) {
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
-
- char buf = '\0';
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[0]);
- EXPECT_THAT(lseek(fd.get(), 3, SEEK_CUR), SyscallSucceedsWithValue(4));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[4]);
-}
-
-TEST(LseekTest, End) {
- const std::string kFileData = "hello world\n";
- const TempPath path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kFileData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.path(), O_RDWR, 0644));
-
- char buf = '\0';
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[0]);
- EXPECT_THAT(lseek(fd.get(), -2, SEEK_END), SyscallSucceedsWithValue(10));
- ASSERT_THAT(read(fd.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, kFileData.c_str()[kFileData.length() - 2]);
-}
-
-TEST(LseekTest, InvalidFD) {
- EXPECT_THAT(lseek(-1, 0, SEEK_SET), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(LseekTest, DirCurEnd) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open("/tmp", O_RDONLY));
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-}
-
-TEST(LseekTest, ProcDir) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self", O_RDONLY));
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallSucceeds());
-}
-
-TEST(LseekTest, ProcFile) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/meminfo", O_RDONLY));
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(LseekTest, SysDir) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/sys/devices", O_RDONLY));
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_END), SyscallSucceeds());
-}
-
-TEST(LseekTest, SeekCurrentDir) {
- // From include/linux/fs.h.
- constexpr loff_t MAX_LFS_FILESIZE = 0x7fffffffffffffff;
-
- char* dir = getcwd(NULL, 0);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir, O_RDONLY));
-
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_END),
- // Some filesystems (like ext4) allow lseek(SEEK_END) on a
- // directory and return MAX_LFS_FILESIZE, others return EINVAL.
- AnyOf(SyscallSucceedsWithValue(MAX_LFS_FILESIZE),
- SyscallFailsWithErrno(EINVAL)));
- free(dir);
-}
-
-TEST(LseekTest, ProcStatTwice) {
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/stat", O_RDONLY));
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/stat", O_RDONLY));
-
- ASSERT_THAT(lseek(fd1.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
- ASSERT_THAT(lseek(fd1.get(), 0, SEEK_END), SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(lseek(fd1.get(), 1000, SEEK_CUR), SyscallSucceeds());
- // Check that just because we moved fd1, fd2 doesn't move.
- ASSERT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd3 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/stat", O_RDONLY));
- ASSERT_THAT(lseek(fd3.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-}
-
-TEST(LseekTest, EtcPasswdDup) {
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/etc/passwd", O_RDONLY));
- const FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(fd1.Dup());
-
- ASSERT_THAT(lseek(fd1.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
- ASSERT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
- ASSERT_THAT(lseek(fd1.get(), 1000, SEEK_CUR), SyscallSucceeds());
- // Check that just because we moved fd1, fd2 doesn't move.
- ASSERT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(1000));
-
- const FileDescriptor fd3 = ASSERT_NO_ERRNO_AND_VALUE(fd1.Dup());
- ASSERT_THAT(lseek(fd3.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(1000));
-}
-
-// TODO(magi): Add tests where we have donated in sockets.
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/madvise.cc b/test/syscalls/linux/madvise.cc
deleted file mode 100644
index 6e714b12c..000000000
--- a/test/syscalls/linux/madvise.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void ExpectAllMappingBytes(Mapping const& m, char c) {
- auto const v = m.view();
- for (size_t i = 0; i < kPageSize; i++) {
- ASSERT_EQ(v[i], c) << "at offset " << i;
- }
-}
-
-// Equivalent to ExpectAllMappingBytes but async-signal-safe and with less
-// helpful failure messages.
-void CheckAllMappingBytes(Mapping const& m, char c) {
- auto const v = m.view();
- for (size_t i = 0; i < kPageSize; i++) {
- TEST_CHECK_MSG(v[i] == c, "mapping contains wrong value");
- }
-}
-
-TEST(MadviseDontneedTest, ZerosPrivateAnonPage) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- ExpectAllMappingBytes(m, 0);
- memset(m.ptr(), 1, m.len());
- ExpectAllMappingBytes(m, 1);
- ASSERT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());
- ExpectAllMappingBytes(m, 0);
-}
-
-TEST(MadviseDontneedTest, ZerosCOWAnonPageInCallerOnly) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- ExpectAllMappingBytes(m, 0);
- memset(m.ptr(), 2, m.len());
- ExpectAllMappingBytes(m, 2);
-
- // Do madvise in a child process.
- pid_t pid = fork();
- CheckAllMappingBytes(m, 2);
- if (pid == 0) {
- TEST_PCHECK(madvise(m.ptr(), m.len(), MADV_DONTNEED) == 0);
- CheckAllMappingBytes(m, 0);
- _exit(0);
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
-
- int status = 0;
- ASSERT_THAT(waitpid(-1, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(WEXITSTATUS(status), 0);
- // The child's madvise should not have affected the parent.
- ExpectAllMappingBytes(m, 2);
-}
-
-TEST(MadviseDontneedTest, DoesNotModifySharedAnonPage) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED));
- ExpectAllMappingBytes(m, 0);
- memset(m.ptr(), 3, m.len());
- ExpectAllMappingBytes(m, 3);
- ASSERT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());
- ExpectAllMappingBytes(m, 3);
-}
-
-TEST(MadviseDontneedTest, CleansPrivateFilePage) {
- TempPath f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- /* parent = */ GetAbsoluteTestTmpdir(),
- /* content = */ std::string(kPageSize, 4), TempPath::kDefaultFileMode));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
-
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd.get(), 0));
- ExpectAllMappingBytes(m, 4);
- memset(m.ptr(), 5, m.len());
- ExpectAllMappingBytes(m, 5);
- ASSERT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());
- ExpectAllMappingBytes(m, 4);
-}
-
-TEST(MadviseDontneedTest, DoesNotModifySharedFilePage) {
- TempPath f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- /* parent = */ GetAbsoluteTestTmpdir(),
- /* content = */ std::string(kPageSize, 6), TempPath::kDefaultFileMode));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
-
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
- ExpectAllMappingBytes(m, 6);
- memset(m.ptr(), 7, m.len());
- ExpectAllMappingBytes(m, 7);
- ASSERT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());
- ExpectAllMappingBytes(m, 7);
-}
-
-TEST(MadviseDontneedTest, IgnoresPermissions) {
- auto m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE));
- EXPECT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());
-}
-
-TEST(MadviseDontforkTest, AddressLength) {
- auto m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE));
- char* addr = static_cast<char*>(m.ptr());
-
- // Address must be page aligned.
- EXPECT_THAT(madvise(addr + 1, kPageSize, MADV_DONTFORK),
- SyscallFailsWithErrno(EINVAL));
-
- // Zero length madvise always succeeds.
- EXPECT_THAT(madvise(addr, 0, MADV_DONTFORK), SyscallSucceeds());
-
- // Length must not roll over after rounding up.
- size_t badlen = std::numeric_limits<std::size_t>::max() - (kPageSize / 2);
- EXPECT_THAT(madvise(0, badlen, MADV_DONTFORK), SyscallFailsWithErrno(EINVAL));
-
- // Length need not be page aligned - it is implicitly rounded up.
- EXPECT_THAT(madvise(addr, 1, MADV_DONTFORK), SyscallSucceeds());
- EXPECT_THAT(madvise(addr, kPageSize, MADV_DONTFORK), SyscallSucceeds());
-}
-
-TEST(MadviseDontforkTest, DontforkShared) {
- // Mmap two shared file-backed pages and MADV_DONTFORK the second page.
- TempPath f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- /* parent = */ GetAbsoluteTestTmpdir(),
- /* content = */ std::string(kPageSize * 2, 2),
- TempPath::kDefaultFileMode));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
-
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize * 2, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
-
- const Mapping ms1 = Mapping(reinterpret_cast<void*>(m.addr()), kPageSize);
- const Mapping ms2 =
- Mapping(reinterpret_cast<void*>(m.addr() + kPageSize), kPageSize);
- m.release();
-
- ASSERT_THAT(madvise(ms2.ptr(), kPageSize, MADV_DONTFORK), SyscallSucceeds());
-
- const auto rest = [&] {
- // First page is mapped in child and modifications are visible to parent
- // via the shared mapping.
- TEST_CHECK(IsMapped(ms1.addr()));
- CheckAllMappingBytes(ms1, 2);
- memset(ms1.ptr(), 1, kPageSize);
- CheckAllMappingBytes(ms1, 1);
-
- // Second page must not be mapped in child.
- TEST_CHECK(!IsMapped(ms2.addr()));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-
- ExpectAllMappingBytes(ms1, 1); // page contents modified by child.
- ExpectAllMappingBytes(ms2, 2); // page contents unchanged.
-}
-
-TEST(MadviseDontforkTest, DontforkAnonPrivate) {
- // Mmap three anonymous pages and MADV_DONTFORK the middle page.
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- const Mapping mp1 = Mapping(reinterpret_cast<void*>(m.addr()), kPageSize);
- const Mapping mp2 =
- Mapping(reinterpret_cast<void*>(m.addr() + kPageSize), kPageSize);
- const Mapping mp3 =
- Mapping(reinterpret_cast<void*>(m.addr() + 2 * kPageSize), kPageSize);
- m.release();
-
- ASSERT_THAT(madvise(mp2.ptr(), kPageSize, MADV_DONTFORK), SyscallSucceeds());
-
- // Verify that all pages are zeroed and memset the first, second and third
- // pages to 1, 2, and 3 respectively.
- ExpectAllMappingBytes(mp1, 0);
- memset(mp1.ptr(), 1, kPageSize);
-
- ExpectAllMappingBytes(mp2, 0);
- memset(mp2.ptr(), 2, kPageSize);
-
- ExpectAllMappingBytes(mp3, 0);
- memset(mp3.ptr(), 3, kPageSize);
-
- const auto rest = [&] {
- // Verify first page is mapped, verify its contents and then modify the
- // page. The mapping is private so the modifications are not visible to
- // the parent.
- TEST_CHECK(IsMapped(mp1.addr()));
- CheckAllMappingBytes(mp1, 1);
- memset(mp1.ptr(), 11, kPageSize);
- CheckAllMappingBytes(mp1, 11);
-
- // Verify second page is not mapped.
- TEST_CHECK(!IsMapped(mp2.addr()));
-
- // Verify third page is mapped, verify its contents and then modify the
- // page. The mapping is private so the modifications are not visible to
- // the parent.
- TEST_CHECK(IsMapped(mp3.addr()));
- CheckAllMappingBytes(mp3, 3);
- memset(mp3.ptr(), 13, kPageSize);
- CheckAllMappingBytes(mp3, 13);
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-
- // The fork and COW by child should not affect the parent mappings.
- ExpectAllMappingBytes(mp1, 1);
- ExpectAllMappingBytes(mp2, 2);
- ExpectAllMappingBytes(mp3, 3);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/membarrier.cc b/test/syscalls/linux/membarrier.cc
deleted file mode 100644
index 516956a25..000000000
--- a/test/syscalls/linux/membarrier.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <atomic>
-
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// This is the classic test case for memory fences on architectures with total
-// store ordering; see e.g. Intel SDM Vol. 3A Sec. 8.2.3.4 "Loads May Be
-// Reordered with Earlier Stores to Different Locations". In each iteration of
-// the test, given two variables X and Y initially set to 0
-// (MembarrierTestSharedState::local_var and remote_var in the code), two
-// threads execute as follows:
-//
-// T1 T2
-// -- --
-//
-// X = 1 Y = 1
-// T1fence() T2fence()
-// read Y read X
-//
-// On architectures where memory writes may be locally buffered by each CPU
-// (essentially all architectures), if T1fence() and T2fence() are omitted or
-// ineffective, it is possible for both T1 and T2 to read 0 because the memory
-// write from the other CPU is not yet visible outside that CPU. T1fence() and
-// T2fence() are expected to perform the necessary synchronization to restore
-// sequential consistency: both threads agree on a order of memory accesses that
-// is consistent with program order in each thread, such that at least one
-// thread reads 1.
-//
-// In the NoMembarrier test, T1fence() and T2fence() are both ordinary memory
-// fences establishing ordering between memory accesses before and after the
-// fence (std::atomic_thread_fence). In all other test cases, T1fence() is not a
-// memory fence at all, but only prevents compiler reordering of memory accesses
-// (std::atomic_signal_fence); T2fence() is an invocation of the membarrier()
-// syscall, which establishes ordering of memory accesses before and after the
-// syscall on both threads.
-
-template <typename F>
-int DoMembarrierTestSide(std::atomic<int>* our_var,
- std::atomic<int> const& their_var,
- F const& test_fence) {
- our_var->store(1, std::memory_order_relaxed);
- test_fence();
- return their_var.load(std::memory_order_relaxed);
-}
-
-struct MembarrierTestSharedState {
- std::atomic<int64_t> remote_iter_cur;
- std::atomic<int64_t> remote_iter_done;
- std::atomic<int> local_var;
- std::atomic<int> remote_var;
- int remote_obs_of_local_var;
-
- void Init() {
- remote_iter_cur.store(-1, std::memory_order_relaxed);
- remote_iter_done.store(-1, std::memory_order_relaxed);
- }
-};
-
-// Special value for MembarrierTestSharedState::remote_iter_cur indicating that
-// the remote thread should terminate.
-constexpr int64_t kRemoteIterStop = -2;
-
-// Must be async-signal-safe.
-template <typename F>
-void RunMembarrierTestRemoteSide(MembarrierTestSharedState* state,
- F const& test_fence) {
- int64_t i = 0;
- int64_t cur;
- while (true) {
- while ((cur = state->remote_iter_cur.load(std::memory_order_acquire)) < i) {
- if (cur == kRemoteIterStop) {
- return;
- }
- // spin
- }
- state->remote_obs_of_local_var =
- DoMembarrierTestSide(&state->remote_var, state->local_var, test_fence);
- state->remote_iter_done.store(i, std::memory_order_release);
- i++;
- }
-}
-
-template <typename F>
-void RunMembarrierTestLocalSide(MembarrierTestSharedState* state,
- F const& test_fence) {
- // On test completion, instruct the remote thread to terminate.
- Cleanup cleanup_remote([&] {
- state->remote_iter_cur.store(kRemoteIterStop, std::memory_order_relaxed);
- });
-
- int64_t i = 0;
- absl::Time end = absl::Now() + absl::Seconds(5); // arbitrary test duration
- while (absl::Now() < end) {
- // Reset both vars to 0.
- state->local_var.store(0, std::memory_order_relaxed);
- state->remote_var.store(0, std::memory_order_relaxed);
- // Instruct the remote thread to begin this iteration.
- state->remote_iter_cur.store(i, std::memory_order_release);
- // Perform our side of the test.
- auto local_obs_of_remote_var =
- DoMembarrierTestSide(&state->local_var, state->remote_var, test_fence);
- // Wait for the remote thread to finish this iteration.
- while (state->remote_iter_done.load(std::memory_order_acquire) < i) {
- // spin
- }
- ASSERT_TRUE(local_obs_of_remote_var != 0 ||
- state->remote_obs_of_local_var != 0);
- i++;
- }
-}
-
-TEST(MembarrierTest, NoMembarrier) {
- MembarrierTestSharedState state;
- state.Init();
-
- ScopedThread remote_thread([&] {
- RunMembarrierTestRemoteSide(
- &state, [] { std::atomic_thread_fence(std::memory_order_seq_cst); });
- });
- RunMembarrierTestLocalSide(
- &state, [] { std::atomic_thread_fence(std::memory_order_seq_cst); });
-}
-
-enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_GLOBAL = (1 << 0),
- MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1),
- MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
-};
-
-int membarrier(membarrier_cmd cmd, int flags) {
- return syscall(SYS_membarrier, cmd, flags);
-}
-
-PosixErrorOr<int> SupportedMembarrierCommands() {
- int cmds = membarrier(MEMBARRIER_CMD_QUERY, 0);
- if (cmds < 0) {
- if (errno == ENOSYS) {
- // No commands are supported.
- return 0;
- }
- return PosixError(errno, "membarrier(MEMBARRIER_CMD_QUERY) failed");
- }
- return cmds;
-}
-
-TEST(MembarrierTest, Global) {
- SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) &
- MEMBARRIER_CMD_GLOBAL) == 0);
-
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED));
- auto state = static_cast<MembarrierTestSharedState*>(m.ptr());
- state->Init();
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- RunMembarrierTestRemoteSide(
- state, [] { TEST_PCHECK(membarrier(MEMBARRIER_CMD_GLOBAL, 0) == 0); });
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
- Cleanup cleanup_child([&] {
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
- });
- RunMembarrierTestLocalSide(
- state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); });
-}
-
-TEST(MembarrierTest, GlobalExpedited) {
- constexpr int kRequiredCommands = MEMBARRIER_CMD_GLOBAL_EXPEDITED |
- MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED;
- SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) &
- kRequiredCommands) != kRequiredCommands);
-
- ASSERT_THAT(membarrier(MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED, 0),
- SyscallSucceeds());
-
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED));
- auto state = static_cast<MembarrierTestSharedState*>(m.ptr());
- state->Init();
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- RunMembarrierTestRemoteSide(state, [] {
- TEST_PCHECK(membarrier(MEMBARRIER_CMD_GLOBAL_EXPEDITED, 0) == 0);
- });
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
- Cleanup cleanup_child([&] {
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
- });
- RunMembarrierTestLocalSide(
- state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); });
-}
-
-TEST(MembarrierTest, PrivateExpedited) {
- constexpr int kRequiredCommands = MEMBARRIER_CMD_PRIVATE_EXPEDITED |
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED;
- SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) &
- kRequiredCommands) != kRequiredCommands);
-
- ASSERT_THAT(membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0),
- SyscallSucceeds());
-
- MembarrierTestSharedState state;
- state.Init();
-
- ScopedThread remote_thread([&] {
- RunMembarrierTestRemoteSide(&state, [] {
- TEST_PCHECK(membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0) == 0);
- });
- });
- RunMembarrierTestLocalSide(
- &state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); });
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/memfd.cc b/test/syscalls/linux/memfd.cc
deleted file mode 100644
index 4a450742b..000000000
--- a/test/syscalls/linux/memfd.cc
+++ /dev/null
@@ -1,542 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/memfd.h>
-#include <linux/unistd.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// The header sys/memfd.h isn't available on all systems, so redefining some of
-// the constants here.
-#define F_LINUX_SPECIFIC_BASE 1024
-
-#ifndef F_ADD_SEALS
-#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
-#endif /* F_ADD_SEALS */
-
-#ifndef F_GET_SEALS
-#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
-#endif /* F_GET_SEALS */
-
-#define F_SEAL_SEAL 0x0001
-#define F_SEAL_SHRINK 0x0002
-#define F_SEAL_GROW 0x0004
-#define F_SEAL_WRITE 0x0008
-
-using ::gvisor::testing::IsTmpfs;
-using ::testing::StartsWith;
-
-const std::string kMemfdName = "some-memfd";
-
-int memfd_create(const std::string& name, unsigned int flags) {
- return syscall(__NR_memfd_create, name.c_str(), flags);
-}
-
-PosixErrorOr<FileDescriptor> MemfdCreate(const std::string& name,
- uint32_t flags) {
- int fd = memfd_create(name, flags);
- if (fd < 0) {
- return PosixError(
- errno, absl::StrFormat("memfd_create(\"%s\", %#x)", name, flags));
- }
- MaybeSave();
- return FileDescriptor(fd);
-}
-
-// Procfs entries for memfds display the appropriate name.
-TEST(MemfdTest, Name) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
- const std::string proc_name = ASSERT_NO_ERRNO_AND_VALUE(
- ReadLink(absl::StrFormat("/proc/self/fd/%d", memfd.get())));
- EXPECT_THAT(proc_name, StartsWith("/memfd:" + kMemfdName));
-}
-
-// Memfds support read/write syscalls.
-TEST(MemfdTest, WriteRead) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
-
- // Write a random page of data to the memfd via write(2).
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Read back the same data and verify.
- std::vector<char> buf2(kPageSize);
- ASSERT_THAT(lseek(memfd.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(read(memfd.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(buf, buf2);
-}
-
-// Memfds can be mapped and used as usual.
-TEST(MemfdTest, Mmap) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
- const Mapping m1 = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, memfd.get(), 0));
-
- // Write a random page of data to the memfd via mmap m1.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- memcpy(m1.ptr(), buf.data(), buf.size());
-
- // Read the data back via a read syscall on the memfd.
- std::vector<char> buf2(kPageSize);
- EXPECT_THAT(read(memfd.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(buf, buf2);
-
- // The same data should be accessible via a new mapping m2.
- const Mapping m2 = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, memfd.get(), 0));
- EXPECT_EQ(0, memcmp(m1.ptr(), m2.ptr(), kPageSize));
-}
-
-TEST(MemfdTest, DuplicateFDsShareContent) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
- const Mapping m1 = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, memfd.get(), 0));
- const FileDescriptor memfd2 = ASSERT_NO_ERRNO_AND_VALUE(memfd.Dup());
-
- // Write a random page of data to the memfd via mmap m1.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- memcpy(m1.ptr(), buf.data(), buf.size());
-
- // Read the data back via a read syscall on a duplicate fd.
- std::vector<char> buf2(kPageSize);
- EXPECT_THAT(read(memfd2.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(buf, buf2);
-}
-
-// File seals are disabled by default on memfds.
-TEST(MemfdTest, SealingDisabledByDefault) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
- EXPECT_THAT(fcntl(memfd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_SEAL));
- // Attempting to set any seal should fail.
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE),
- SyscallFailsWithErrno(EPERM));
-}
-
-// Seals can be retrieved and updated for memfds.
-TEST(MemfdTest, SealsGetSet) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- int seals;
- ASSERT_THAT(seals = fcntl(memfd.get(), F_GET_SEALS), SyscallSucceeds());
- // No seals are set yet.
- EXPECT_EQ(0, seals);
-
- // Set a seal and check that we can get it back.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
- EXPECT_THAT(fcntl(memfd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_WRITE));
-
- // Set some more seals and verify.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_GROW | F_SEAL_SHRINK),
- SyscallSucceeds());
- EXPECT_THAT(
- fcntl(memfd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_WRITE | F_SEAL_GROW | F_SEAL_SHRINK));
-
- // Attempting to set a seal that is already set is a no-op.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
- EXPECT_THAT(
- fcntl(memfd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_WRITE | F_SEAL_GROW | F_SEAL_SHRINK));
-
- // Add remaining seals and verify.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_SEAL), SyscallSucceeds());
- EXPECT_THAT(fcntl(memfd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_WRITE | F_SEAL_GROW |
- F_SEAL_SHRINK | F_SEAL_SEAL));
-}
-
-// F_SEAL_GROW prevents a memfd from being grown using ftruncate.
-TEST(MemfdTest, SealGrowWithTruncate) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_GROW), SyscallSucceeds());
-
- // Try grow the memfd by 1 page.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize * 2),
- SyscallFailsWithErrno(EPERM));
-
- // Ftruncate calls that don't actually grow the memfd are allowed.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize / 2), SyscallSucceeds());
-
- // After shrinking, growing back is not allowed.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallFailsWithErrno(EPERM));
-}
-
-// F_SEAL_GROW prevents a memfd from being grown using the write syscall.
-TEST(MemfdTest, SealGrowWithWrite) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
-
- // Initially, writing to the memfd succeeds.
- const std::vector<char> buf(kPageSize);
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Apply F_SEAL_GROW, subsequent writes which extend the memfd should fail.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_GROW), SyscallSucceeds());
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPERM));
-
- // However, zero-length writes are ok since they don't grow the memfd.
- EXPECT_THAT(write(memfd.get(), buf.data(), 0), SyscallSucceeds());
-
- // Writing to existing parts of the memfd is also ok.
- ASSERT_THAT(lseek(memfd.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Returning the end of the file and writing still not allowed.
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPERM));
-}
-
-// F_SEAL_GROW causes writes which partially extend off the current EOF to
-// partially succeed, up to the page containing the EOF.
-TEST(MemfdTest, SealGrowPartialWriteTruncated) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_GROW), SyscallSucceeds());
-
- // FD offset: 1 page, EOF: 1 page.
-
- ASSERT_THAT(lseek(memfd.get(), kPageSize * 3 / 4, SEEK_SET),
- SyscallSucceeds());
-
- // FD offset: 3/4 page. Writing a full page now should only write 1/4 page
- // worth of data. This partially succeeds because the first page is entirely
- // within the file and requires no growth, but attempting to write the final
- // 3/4 page would require growing the file.
- const std::vector<char> buf(kPageSize);
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize / 4));
-}
-
-// F_SEAL_GROW causes writes which partially extend off the current EOF to fail
-// in its entirety if the only data written would be to the page containing the
-// EOF.
-TEST(MemfdTest, SealGrowPartialWriteTruncatedSamePage) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize * 3 / 4), SyscallSucceeds());
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_GROW), SyscallSucceeds());
-
- // EOF: 3/4 page, writing 1/2 page starting at 1/2 page would cause the file
- // to grow. Since this would require only the page containing the EOF to be
- // modified, the write is rejected entirely.
- const std::vector<char> buf(kPageSize / 2);
- EXPECT_THAT(pwrite(memfd.get(), buf.data(), buf.size(), kPageSize / 2),
- SyscallFailsWithErrno(EPERM));
-
- // However, writing up to EOF is fine.
- EXPECT_THAT(pwrite(memfd.get(), buf.data(), buf.size() / 2, kPageSize / 2),
- SyscallSucceedsWithValue(kPageSize / 4));
-}
-
-// F_SEAL_SHRINK prevents a memfd from being shrunk using ftruncate.
-TEST(MemfdTest, SealShrink) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_SHRINK),
- SyscallSucceeds());
-
- // Shrink by half a page.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize / 2),
- SyscallFailsWithErrno(EPERM));
-
- // Ftruncate calls that don't actually shrink the file are allowed.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallSucceeds());
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize * 2), SyscallSucceeds());
-
- // After growing, shrinking is still not allowed.
- ASSERT_THAT(ftruncate(memfd.get(), kPageSize), SyscallFailsWithErrno(EPERM));
-}
-
-// F_SEAL_WRITE prevents a memfd from being written to through a write
-// syscall.
-TEST(MemfdTest, SealWriteWithWrite) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const std::vector<char> buf(kPageSize);
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
-
- // Attemping to write at the end of the file fails.
- EXPECT_THAT(write(memfd.get(), buf.data(), 1), SyscallFailsWithErrno(EPERM));
-
- // Attemping to overwrite an existing part of the memfd fails.
- EXPECT_THAT(pwrite(memfd.get(), buf.data(), 1, 0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(pwrite(memfd.get(), buf.data(), buf.size() / 2, kPageSize / 2),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(pwrite(memfd.get(), buf.data(), buf.size(), kPageSize / 2),
- SyscallFailsWithErrno(EPERM));
-
- // Zero-length writes however do not fail.
- EXPECT_THAT(write(memfd.get(), buf.data(), 0), SyscallSucceeds());
-}
-
-// F_SEAL_WRITE prevents a memfd from being written to through an mmap.
-TEST(MemfdTest, SealWriteWithMmap) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const std::vector<char> buf(kPageSize);
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
-
- // Can't create a shared mapping with writes sealed.
- void* ret = mmap(nullptr, kPageSize, PROT_WRITE, MAP_SHARED, memfd.get(), 0);
- EXPECT_EQ(ret, MAP_FAILED);
- EXPECT_EQ(errno, EPERM);
- ret = mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, memfd.get(), 0);
- EXPECT_EQ(ret, MAP_FAILED);
- EXPECT_EQ(errno, EPERM);
-
- // However, private mappings are ok.
- EXPECT_NO_ERRNO(Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- memfd.get(), 0));
-}
-
-// Adding F_SEAL_WRITE fails when there are outstanding writable mappings to a
-// memfd.
-TEST(MemfdTest, SealWriteWithOutstandingWritbleMapping) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const std::vector<char> buf(kPageSize);
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Attempting to add F_SEAL_WRITE with active shared mapping with any set of
- // permissions fails.
-
- // Read-only shared mapping.
- {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, memfd.get(), 0));
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE),
- SyscallFailsWithErrno(EBUSY));
- }
-
- // Write-only shared mapping.
- {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_WRITE, MAP_SHARED, memfd.get(), 0));
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE),
- SyscallFailsWithErrno(EBUSY));
- }
-
- // Read-write shared mapping.
- {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- memfd.get(), 0));
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE),
- SyscallFailsWithErrno(EBUSY));
- }
-
- // F_SEAL_WRITE can be set with private mappings with any permissions.
- {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- memfd.get(), 0));
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE),
- SyscallSucceeds());
- }
-}
-
-// When applying F_SEAL_WRITE fails due to outstanding writable mappings, any
-// additional seals passed to the same add seal call are also rejected.
-TEST(MemfdTest, NoPartialSealApplicationWhenWriteSealRejected) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, memfd.get(), 0));
-
- // Try add some seals along with F_SEAL_WRITE. The seal application should
- // fail since there exists an active shared mapping.
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE | F_SEAL_GROW),
- SyscallFailsWithErrno(EBUSY));
-
- // None of the seals should be applied.
- EXPECT_THAT(fcntl(memfd.get(), F_GET_SEALS), SyscallSucceedsWithValue(0));
-}
-
-// Seals are inode level properties, and apply to all file descriptors referring
-// to a memfd.
-TEST(MemfdTest, SealsAreInodeLevelProperties) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const FileDescriptor memfd2 = ASSERT_NO_ERRNO_AND_VALUE(memfd.Dup());
-
- // Add seal through the original memfd, and verify that it appears on the
- // dupped fd.
- ASSERT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
- EXPECT_THAT(fcntl(memfd2.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_WRITE));
-
- // Verify the seal actually applies to both fds.
- std::vector<char> buf(kPageSize);
- EXPECT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(write(memfd2.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPERM));
-
- // Seals are enforced on new FDs that are dupped after the seal is already
- // applied.
- const FileDescriptor memfd3 = ASSERT_NO_ERRNO_AND_VALUE(memfd2.Dup());
- EXPECT_THAT(write(memfd3.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPERM));
-
- // Try a new seal applied to one of the dupped fds.
- ASSERT_THAT(fcntl(memfd3.get(), F_ADD_SEALS, F_SEAL_GROW), SyscallSucceeds());
- EXPECT_THAT(ftruncate(memfd.get(), kPageSize), SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(ftruncate(memfd2.get(), kPageSize), SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(ftruncate(memfd3.get(), kPageSize), SyscallFailsWithErrno(EPERM));
-}
-
-// Tmpfs files also support seals, but are created with F_SEAL_SEAL.
-TEST(MemfdTest, TmpfsFilesHaveSealSeal) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs("/tmp")));
- const TempPath tmpfs_file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn("/tmp"));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfs_file.path(), O_RDWR, 0644));
- EXPECT_THAT(fcntl(fd.get(), F_GET_SEALS),
- SyscallSucceedsWithValue(F_SEAL_SEAL));
-}
-
-// Can open a memfd from procfs and use as normal.
-TEST(MemfdTest, CanOpenFromProcfs) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
-
- // Write a random page of data to the memfd via write(2).
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Read back the same data from the fd obtained from procfs and verify.
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(absl::StrFormat("/proc/self/fd/%d", memfd.get()), O_RDWR));
- std::vector<char> buf2(kPageSize);
- EXPECT_THAT(pread(fd.get(), buf2.data(), buf2.size(), 0),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(buf, buf2);
-}
-
-// Test that memfd permissions are set up correctly to allow another process to
-// open it from procfs.
-TEST(MemfdTest, OtherProcessCanOpenFromProcfs) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
- const auto memfd_path =
- absl::StrFormat("/proc/%d/fd/%d", getpid(), memfd.get());
- const auto rest = [&] {
- int fd = open(memfd_path.c_str(), O_RDWR);
- TEST_PCHECK(fd >= 0);
- TEST_PCHECK(close(fd) >= 0);
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-// Test that only files opened as writable can have seals applied to them.
-// Normally there's no way to specify file permissions on memfds, but we can
-// obtain a read-only memfd by opening the corresponding procfs fd entry as
-// read-only.
-TEST(MemfdTest, MemfdMustBeWritableToModifySeals) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, MFD_ALLOW_SEALING));
-
- // Initially adding a seal works.
- EXPECT_THAT(fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_WRITE), SyscallSucceeds());
-
- // Re-open the memfd as read-only from procfs.
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(absl::StrFormat("/proc/self/fd/%d", memfd.get()), O_RDONLY));
-
- // Can't add seals through an unwritable fd.
- EXPECT_THAT(fcntl(fd.get(), F_ADD_SEALS, F_SEAL_GROW),
- SyscallFailsWithErrno(EPERM));
-}
-
-// Test that the memfd implementation internally tracks potentially writable
-// maps correctly.
-TEST(MemfdTest, MultipleWritableAndNonWritableRefsToSameFileRegion) {
- const FileDescriptor memfd =
- ASSERT_NO_ERRNO_AND_VALUE(MemfdCreate(kMemfdName, 0));
-
- // Populate with a random page of data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(memfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Read-only map to the page. This should cause an initial mapping to be
- // created.
- Mapping m1 = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ, MAP_PRIVATE, memfd.get(), 0));
-
- // Create a shared writable map to the page. This should cause the internal
- // mapping to become potentially writable.
- Mapping m2 = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, memfd.get(), 0));
-
- // Drop the read-only mapping first. If writable-ness isn't tracked correctly,
- // this can cause some misaccounting, which can trigger asserts internally.
- m1.reset();
- m2.reset();
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/memory_accounting.cc b/test/syscalls/linux/memory_accounting.cc
deleted file mode 100644
index 867a4513b..000000000
--- a/test/syscalls/linux/memory_accounting.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/mman.h>
-
-#include <map>
-
-#include "gtest/gtest.h"
-#include "absl/strings/match.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/str_split.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using ::absl::StrFormat;
-
-// AnonUsageFromMeminfo scrapes the current anonymous memory usage from
-// /proc/meminfo and returns it in bytes.
-PosixErrorOr<uint64_t> AnonUsageFromMeminfo() {
- ASSIGN_OR_RETURN_ERRNO(auto meminfo, GetContents("/proc/meminfo"));
- std::vector<std::string> lines(absl::StrSplit(meminfo, '\n'));
-
- // Try to find AnonPages line, the format is AnonPages:\\s+(\\d+) kB\n.
- for (const auto& line : lines) {
- if (!absl::StartsWith(line, "AnonPages:")) {
- continue;
- }
-
- std::vector<std::string> parts(
- absl::StrSplit(line, ' ', absl::SkipEmpty()));
- if (parts.size() == 3) {
- // The size is the second field, let's try to parse it as a number.
- ASSIGN_OR_RETURN_ERRNO(auto anon_kb, Atoi<uint64_t>(parts[1]));
- return anon_kb * 1024;
- }
-
- return PosixError(EINVAL, "AnonPages field in /proc/meminfo was malformed");
- }
-
- return PosixError(EINVAL, "AnonPages field not found in /proc/meminfo");
-}
-
-TEST(MemoryAccounting, AnonAccountingPreservedOnSaveRestore) {
- // This test isn't meaningful on Linux. /proc/meminfo reports system-wide
- // memory usage, which can change arbitrarily in Linux from other activity on
- // the machine. In gvisor, this test is the only thing running on the
- // "machine", so values in /proc/meminfo accurately reflect the memory used by
- // the test.
- SKIP_IF(!IsRunningOnGvisor());
-
- uint64_t anon_initial = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo());
-
- // Cause some anonymous memory usage.
- uint64_t map_bytes = Megabytes(512);
- char* mem =
- static_cast<char*>(mmap(nullptr, map_bytes, PROT_READ | PROT_WRITE,
- MAP_POPULATE | MAP_ANON | MAP_PRIVATE, -1, 0));
- ASSERT_NE(mem, MAP_FAILED)
- << "Map failed, errno: " << errno << " (" << strerror(errno) << ").";
-
- // Write something to each page to prevent them from being decommited on
- // S/R. Zero pages are dropped on save.
- for (uint64_t i = 0; i < map_bytes; i += kPageSize) {
- mem[i] = 'a';
- }
-
- uint64_t anon_after_alloc = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo());
- EXPECT_THAT(anon_after_alloc,
- EquivalentWithin(anon_initial + map_bytes, 0.04));
-
- // We have many implicit S/R cycles from scraping /proc/meminfo throughout the
- // test, but throw an explicit S/R in here as well.
- MaybeSave();
-
- // Usage should remain the same across S/R.
- uint64_t anon_after_sr = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo());
- EXPECT_THAT(anon_after_sr, EquivalentWithin(anon_after_alloc, 0.04));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mempolicy.cc b/test/syscalls/linux/mempolicy.cc
deleted file mode 100644
index 059fad598..000000000
--- a/test/syscalls/linux/mempolicy.cc
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/syscall.h>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/util/cleanup.h"
-#include "test/util/memory_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#define BITS_PER_BYTE 8
-
-#define MPOL_F_STATIC_NODES (1 << 15)
-#define MPOL_F_RELATIVE_NODES (1 << 14)
-#define MPOL_DEFAULT 0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND 2
-#define MPOL_INTERLEAVE 3
-#define MPOL_LOCAL 4
-#define MPOL_F_NODE (1 << 0)
-#define MPOL_F_ADDR (1 << 1)
-#define MPOL_F_MEMS_ALLOWED (1 << 2)
-#define MPOL_MF_STRICT (1 << 0)
-#define MPOL_MF_MOVE (1 << 1)
-#define MPOL_MF_MOVE_ALL (1 << 2)
-
-int get_mempolicy(int* policy, uint64_t* nmask, uint64_t maxnode, void* addr,
- int flags) {
- return syscall(SYS_get_mempolicy, policy, nmask, maxnode, addr, flags);
-}
-
-int set_mempolicy(int mode, uint64_t* nmask, uint64_t maxnode) {
- return syscall(SYS_set_mempolicy, mode, nmask, maxnode);
-}
-
-int mbind(void* addr, unsigned long len, int mode,
- const unsigned long* nodemask, unsigned long maxnode,
- unsigned flags) {
- return syscall(SYS_mbind, addr, len, mode, nodemask, maxnode, flags);
-}
-
-// Creates a cleanup object that resets the calling thread's mempolicy to the
-// system default when the calling scope ends.
-Cleanup ScopedMempolicy() {
- return Cleanup([] {
- EXPECT_THAT(set_mempolicy(MPOL_DEFAULT, nullptr, 0), SyscallSucceeds());
- });
-}
-
-// Temporarily change the memory policy for the calling thread within the
-// caller's scope.
-PosixErrorOr<Cleanup> ScopedSetMempolicy(int mode, uint64_t* nmask,
- uint64_t maxnode) {
- if (set_mempolicy(mode, nmask, maxnode)) {
- return PosixError(errno, "set_mempolicy");
- }
- return ScopedMempolicy();
-}
-
-TEST(MempolicyTest, CheckDefaultPolicy) {
- int mode = 0;
- uint64_t nodemask = 0;
- ASSERT_THAT(get_mempolicy(&mode, &nodemask, sizeof(nodemask) * BITS_PER_BYTE,
- nullptr, 0),
- SyscallSucceeds());
-
- EXPECT_EQ(MPOL_DEFAULT, mode);
- EXPECT_EQ(0x0, nodemask);
-}
-
-TEST(MempolicyTest, PolicyPreservedAfterSetMempolicy) {
- uint64_t nodemask = 0x1;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy(
- MPOL_BIND, &nodemask, sizeof(nodemask) * BITS_PER_BYTE));
-
- int mode = 0;
- uint64_t nodemask_after = 0x0;
- ASSERT_THAT(get_mempolicy(&mode, &nodemask_after,
- sizeof(nodemask_after) * BITS_PER_BYTE, nullptr, 0),
- SyscallSucceeds());
- EXPECT_EQ(MPOL_BIND, mode);
- EXPECT_EQ(0x1, nodemask_after);
-
- // Try throw in some mode flags.
- for (auto mode_flag : {MPOL_F_STATIC_NODES, MPOL_F_RELATIVE_NODES}) {
- auto cleanup2 = ASSERT_NO_ERRNO_AND_VALUE(
- ScopedSetMempolicy(MPOL_INTERLEAVE | mode_flag, &nodemask,
- sizeof(nodemask) * BITS_PER_BYTE));
- mode = 0;
- nodemask_after = 0x0;
- ASSERT_THAT(
- get_mempolicy(&mode, &nodemask_after,
- sizeof(nodemask_after) * BITS_PER_BYTE, nullptr, 0),
- SyscallSucceeds());
- EXPECT_EQ(MPOL_INTERLEAVE | mode_flag, mode);
- EXPECT_EQ(0x1, nodemask_after);
- }
-}
-
-TEST(MempolicyTest, SetMempolicyRejectsInvalidInputs) {
- auto cleanup = ScopedMempolicy();
- uint64_t nodemask;
-
- if (IsRunningOnGvisor()) {
- // Invalid nodemask, we only support a single node on gvisor.
- nodemask = 0x4;
- ASSERT_THAT(set_mempolicy(MPOL_DEFAULT, &nodemask,
- sizeof(nodemask) * BITS_PER_BYTE),
- SyscallFailsWithErrno(EINVAL));
- }
-
- nodemask = 0x1;
-
- // Invalid mode.
- ASSERT_THAT(set_mempolicy(7439, &nodemask, sizeof(nodemask) * BITS_PER_BYTE),
- SyscallFailsWithErrno(EINVAL));
-
- // Invalid nodemask size.
- ASSERT_THAT(set_mempolicy(MPOL_DEFAULT, &nodemask, 0),
- SyscallFailsWithErrno(EINVAL));
-
- // Invalid mode flag.
- ASSERT_THAT(
- set_mempolicy(MPOL_DEFAULT | MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES,
- &nodemask, sizeof(nodemask) * BITS_PER_BYTE),
- SyscallFailsWithErrno(EINVAL));
-
- // MPOL_INTERLEAVE with empty nodemask.
- nodemask = 0x0;
- ASSERT_THAT(set_mempolicy(MPOL_INTERLEAVE, &nodemask,
- sizeof(nodemask) * BITS_PER_BYTE),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// The manpages specify that the nodemask provided to set_mempolicy are
-// considered empty if the nodemask pointer is null, or if the nodemask size is
-// 0. We use a policy which accepts both empty and non-empty nodemasks
-// (MPOL_PREFERRED), a policy which requires a non-empty nodemask (MPOL_BIND),
-// and a policy which completely ignores the nodemask (MPOL_DEFAULT) to verify
-// argument checking around nodemasks.
-TEST(MempolicyTest, EmptyNodemaskOnSet) {
- auto cleanup = ScopedMempolicy();
-
- EXPECT_THAT(set_mempolicy(MPOL_DEFAULT, nullptr, 1), SyscallSucceeds());
- EXPECT_THAT(set_mempolicy(MPOL_BIND, nullptr, 1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(set_mempolicy(MPOL_PREFERRED, nullptr, 1), SyscallSucceeds());
-
- uint64_t nodemask = 0x1;
- EXPECT_THAT(set_mempolicy(MPOL_DEFAULT, &nodemask, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(set_mempolicy(MPOL_BIND, &nodemask, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(set_mempolicy(MPOL_PREFERRED, &nodemask, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MempolicyTest, QueryAvailableNodes) {
- uint64_t nodemask = 0;
- ASSERT_THAT(
- get_mempolicy(nullptr, &nodemask, sizeof(nodemask) * BITS_PER_BYTE,
- nullptr, MPOL_F_MEMS_ALLOWED),
- SyscallSucceeds());
- // We can only be sure there is a single node if running on gvisor.
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(0x1, nodemask);
- }
-
- // MPOL_F_ADDR and MPOL_F_NODE flags may not be combined with
- // MPOL_F_MEMS_ALLLOWED.
- for (auto flags :
- {MPOL_F_MEMS_ALLOWED | MPOL_F_ADDR, MPOL_F_MEMS_ALLOWED | MPOL_F_NODE,
- MPOL_F_MEMS_ALLOWED | MPOL_F_ADDR | MPOL_F_NODE}) {
- ASSERT_THAT(get_mempolicy(nullptr, &nodemask,
- sizeof(nodemask) * BITS_PER_BYTE, nullptr, flags),
- SyscallFailsWithErrno(EINVAL));
- }
-}
-
-TEST(MempolicyTest, GetMempolicyQueryNodeForAddress) {
- uint64_t dummy_stack_address;
- auto dummy_heap_address = absl::make_unique<uint64_t>();
- int mode;
-
- for (auto ptr : {&dummy_stack_address, dummy_heap_address.get()}) {
- mode = -1;
- ASSERT_THAT(
- get_mempolicy(&mode, nullptr, 0, ptr, MPOL_F_ADDR | MPOL_F_NODE),
- SyscallSucceeds());
- // If we're not running on gvisor, the address may be allocated on a
- // different numa node.
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(0, mode);
- }
- }
-
- void* invalid_address = reinterpret_cast<void*>(-1);
-
- // Invalid address.
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, invalid_address,
- MPOL_F_ADDR | MPOL_F_NODE),
- SyscallFailsWithErrno(EFAULT));
-
- // Invalid mode pointer.
- ASSERT_THAT(get_mempolicy(reinterpret_cast<int*>(invalid_address), nullptr, 0,
- &dummy_stack_address, MPOL_F_ADDR | MPOL_F_NODE),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(MempolicyTest, GetMempolicyCanOmitPointers) {
- int mode;
- uint64_t nodemask;
-
- // Omit nodemask pointer.
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, nullptr, 0), SyscallSucceeds());
- // Omit mode pointer.
- ASSERT_THAT(get_mempolicy(nullptr, &nodemask,
- sizeof(nodemask) * BITS_PER_BYTE, nullptr, 0),
- SyscallSucceeds());
- // Omit both pointers.
- ASSERT_THAT(get_mempolicy(nullptr, nullptr, 0, nullptr, 0),
- SyscallSucceeds());
-}
-
-TEST(MempolicyTest, GetMempolicyNextInterleaveNode) {
- int mode;
- // Policy for thread not yet set to MPOL_INTERLEAVE, can't query for
- // the next node which will be used for allocation.
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, nullptr, MPOL_F_NODE),
- SyscallFailsWithErrno(EINVAL));
-
- // Set default policy for thread to MPOL_INTERLEAVE.
- uint64_t nodemask = 0x1;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy(
- MPOL_INTERLEAVE, &nodemask, sizeof(nodemask) * BITS_PER_BYTE));
-
- mode = -1;
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, nullptr, MPOL_F_NODE),
- SyscallSucceeds());
- EXPECT_EQ(0, mode);
-}
-
-TEST(MempolicyTest, Mbind) {
- // Temporarily set the thread policy to MPOL_PREFERRED.
- const auto cleanup_thread_policy =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy(MPOL_PREFERRED, nullptr, 0));
-
- const auto mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS));
-
- // vmas default to MPOL_DEFAULT irrespective of the thread policy (currently
- // MPOL_PREFERRED).
- int mode;
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR),
- SyscallSucceeds());
- EXPECT_EQ(mode, MPOL_DEFAULT);
-
- // Set MPOL_PREFERRED for the vma and read it back.
- ASSERT_THAT(
- mbind(mapping.ptr(), mapping.len(), MPOL_PREFERRED, nullptr, 0, 0),
- SyscallSucceeds());
- ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR),
- SyscallSucceeds());
- EXPECT_EQ(mode, MPOL_PREFERRED);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mincore.cc b/test/syscalls/linux/mincore.cc
deleted file mode 100644
index 5c1240c89..000000000
--- a/test/syscalls/linux/mincore.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-size_t CountSetLSBs(std::vector<unsigned char> const& vec) {
- return std::count_if(begin(vec), end(vec),
- [](unsigned char c) { return (c & 1) != 0; });
-}
-
-TEST(MincoreTest, DirtyAnonPagesAreResident) {
- constexpr size_t kTestPageCount = 10;
- auto const kTestMappingBytes = kTestPageCount * kPageSize;
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kTestMappingBytes, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- memset(m.ptr(), 0, m.len());
-
- std::vector<unsigned char> vec(kTestPageCount, 0);
- ASSERT_THAT(mincore(m.ptr(), kTestMappingBytes, vec.data()),
- SyscallSucceeds());
- EXPECT_EQ(kTestPageCount, CountSetLSBs(vec));
-}
-
-TEST(MincoreTest, UnalignedAddressFails) {
- // Map and touch two pages, then try to mincore the second half of the first
- // page + the first half of the second page. Both pages are mapped, but
- // mincore should return EINVAL due to the misaligned start address.
- constexpr size_t kTestPageCount = 2;
- auto const kTestMappingBytes = kTestPageCount * kPageSize;
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kTestMappingBytes, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- memset(m.ptr(), 0, m.len());
-
- std::vector<unsigned char> vec(kTestPageCount, 0);
- EXPECT_THAT(mincore(reinterpret_cast<void*>(m.addr() + kPageSize / 2),
- kPageSize, vec.data()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MincoreTest, UnalignedLengthSucceedsAndIsRoundedUp) {
- // Map and touch two pages, then try to mincore the first page + the first
- // half of the second page. mincore should silently round up the length to
- // include both pages.
- constexpr size_t kTestPageCount = 2;
- auto const kTestMappingBytes = kTestPageCount * kPageSize;
- auto m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kTestMappingBytes, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- memset(m.ptr(), 0, m.len());
-
- std::vector<unsigned char> vec(kTestPageCount, 0);
- ASSERT_THAT(mincore(m.ptr(), kPageSize + kPageSize / 2, vec.data()),
- SyscallSucceeds());
- EXPECT_EQ(kTestPageCount, CountSetLSBs(vec));
-}
-
-TEST(MincoreTest, ZeroLengthSucceedsAndAllowsAnyVecBelowTaskSize) {
- EXPECT_THAT(mincore(nullptr, 0, nullptr), SyscallSucceeds());
-}
-
-TEST(MincoreTest, InvalidLengthFails) {
- EXPECT_THAT(mincore(nullptr, -1, nullptr), SyscallFailsWithErrno(ENOMEM));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mkdir.cc b/test/syscalls/linux/mkdir.cc
deleted file mode 100644
index 36504fe6d..000000000
--- a/test/syscalls/linux/mkdir.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class MkdirTest : public ::testing::Test {
- protected:
- // SetUp creates various configurations of files.
- void SetUp() override { dirname_ = NewTempAbsPath(); }
-
- // TearDown unlinks created files.
- void TearDown() override {
- EXPECT_THAT(rmdir(dirname_.c_str()), SyscallSucceeds());
- }
-
- std::string dirname_;
-};
-
-TEST_F(MkdirTest, CanCreateWritableDir) {
- ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());
- std::string filename = JoinPath(dirname_, "anything");
- int fd;
- ASSERT_THAT(fd = open(filename.c_str(), O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- ASSERT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-TEST_F(MkdirTest, HonorsUmask) {
- constexpr mode_t kMask = 0111;
- TempUmask mask(kMask);
- ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());
- struct stat statbuf;
- ASSERT_THAT(stat(dirname_.c_str(), &statbuf), SyscallSucceeds());
- EXPECT_EQ(0777 & ~kMask, statbuf.st_mode & 0777);
-}
-
-TEST_F(MkdirTest, HonorsUmask2) {
- constexpr mode_t kMask = 0142;
- TempUmask mask(kMask);
- ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());
- struct stat statbuf;
- ASSERT_THAT(stat(dirname_.c_str(), &statbuf), SyscallSucceeds());
- EXPECT_EQ(0777 & ~kMask, statbuf.st_mode & 0777);
-}
-
-TEST_F(MkdirTest, FailsOnDirWithoutWritePerms) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- ASSERT_THAT(mkdir(dirname_.c_str(), 0555), SyscallSucceeds());
- auto dir = JoinPath(dirname_.c_str(), "foo");
- EXPECT_THAT(mkdir(dir.c_str(), 0777), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(open(JoinPath(dirname_, "file").c_str(), O_RDWR | O_CREAT, 0666),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST_F(MkdirTest, DirAlreadyExists) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());
- auto dir = JoinPath(dirname_.c_str(), "foo");
- EXPECT_THAT(mkdir(dir.c_str(), 0777), SyscallSucceeds());
-
- struct {
- int mode;
- int err;
- } tests[] = {
- {.mode = 0000, .err = EACCES}, // No perm
- {.mode = 0100, .err = EEXIST}, // Exec only
- {.mode = 0200, .err = EACCES}, // Write only
- {.mode = 0300, .err = EEXIST}, // Write+exec
- {.mode = 0400, .err = EACCES}, // Read only
- {.mode = 0500, .err = EEXIST}, // Read+exec
- {.mode = 0600, .err = EACCES}, // Read+write
- {.mode = 0700, .err = EEXIST}, // All
- };
- for (const auto& t : tests) {
- printf("mode: 0%o\n", t.mode);
- EXPECT_THAT(chmod(dirname_.c_str(), t.mode), SyscallSucceeds());
- EXPECT_THAT(mkdir(dir.c_str(), 0777), SyscallFailsWithErrno(t.err));
- }
-
- // Clean up.
- EXPECT_THAT(chmod(dirname_.c_str(), 0777), SyscallSucceeds());
- ASSERT_THAT(rmdir(dir.c_str()), SyscallSucceeds());
-}
-
-TEST_F(MkdirTest, MkdirAtEmptyPath) {
- ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dirname_, O_RDONLY | O_DIRECTORY, 0666));
- EXPECT_THAT(mkdirat(fd.get(), "", 0777), SyscallFailsWithErrno(ENOENT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mknod.cc b/test/syscalls/linux/mknod.cc
deleted file mode 100644
index 1635c6d0c..000000000
--- a/test/syscalls/linux/mknod.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(MknodTest, RegularFile) {
- const std::string node0 = NewTempAbsPath();
- EXPECT_THAT(mknod(node0.c_str(), S_IFREG, 0), SyscallSucceeds());
-
- const std::string node1 = NewTempAbsPath();
- EXPECT_THAT(mknod(node1.c_str(), 0, 0), SyscallSucceeds());
-}
-
-TEST(MknodTest, RegularFilePermissions) {
- const std::string node = NewTempAbsPath();
- mode_t newUmask = 0077;
- umask(newUmask);
-
- // Attempt to open file with mode 0777. Not specifying file type should create
- // a regualar file.
- mode_t perms = S_IRWXU | S_IRWXG | S_IRWXO;
- EXPECT_THAT(mknod(node.c_str(), perms, 0), SyscallSucceeds());
-
- // In the absence of a default ACL, the permissions of the created node are
- // (mode & ~umask). -- mknod(2)
- mode_t wantPerms = perms & ~newUmask;
- struct stat st;
- ASSERT_THAT(stat(node.c_str(), &st), SyscallSucceeds());
- ASSERT_EQ(st.st_mode & 0777, wantPerms);
-
- // "Zero file type is equivalent to type S_IFREG." - mknod(2)
- ASSERT_EQ(st.st_mode & S_IFMT, S_IFREG);
-}
-
-TEST(MknodTest, MknodAtFIFO) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string fifo_relpath = NewTempRelPath();
- const std::string fifo = JoinPath(dir.path(), fifo_relpath);
-
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path().c_str(), O_RDONLY));
- ASSERT_THAT(mknodat(dirfd.get(), fifo_relpath.c_str(), S_IFIFO | S_IRUSR, 0),
- SyscallSucceeds());
-
- struct stat st;
- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISFIFO(st.st_mode));
-}
-
-TEST(MknodTest, MknodOnExistingPathFails) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const TempPath slink = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), file.path()));
-
- EXPECT_THAT(mknod(file.path().c_str(), S_IFREG, 0),
- SyscallFailsWithErrno(EEXIST));
- EXPECT_THAT(mknod(file.path().c_str(), S_IFIFO, 0),
- SyscallFailsWithErrno(EEXIST));
- EXPECT_THAT(mknod(slink.path().c_str(), S_IFREG, 0),
- SyscallFailsWithErrno(EEXIST));
- EXPECT_THAT(mknod(slink.path().c_str(), S_IFIFO, 0),
- SyscallFailsWithErrno(EEXIST));
-}
-
-TEST(MknodTest, UnimplementedTypesReturnError) {
- // TODO(gvisor.dev/issue/1624): These file types are supported by some
- // filesystems in VFS2, so this test should be deleted along with VFS1.
- SKIP_IF(!IsRunningWithVFS1());
-
- const std::string path = NewTempAbsPath();
- EXPECT_THAT(mknod(path.c_str(), S_IFSOCK, 0),
- SyscallFailsWithErrno(EOPNOTSUPP));
- EXPECT_THAT(mknod(path.c_str(), S_IFCHR, 0), SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(mknod(path.c_str(), S_IFBLK, 0), SyscallFailsWithErrno(EPERM));
-}
-
-TEST(MknodTest, Socket) {
- SKIP_IF(IsRunningOnGvisor() && IsRunningWithVFS1());
-
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
-
- auto filename = NewTempRelPath();
-
- ASSERT_THAT(mknod(filename.c_str(), S_IFSOCK | S_IRUSR | S_IWUSR, 0),
- SyscallSucceeds());
-
- int sk;
- ASSERT_THAT(sk = socket(AF_UNIX, SOCK_SEQPACKET, 0), SyscallSucceeds());
- FileDescriptor fd(sk);
-
- struct sockaddr_un addr = {.sun_family = AF_UNIX};
- absl::SNPrintF(addr.sun_path, sizeof(addr.sun_path), "%s", filename.c_str());
- ASSERT_THAT(connect(sk, (struct sockaddr *)&addr, sizeof(addr)),
- SyscallFailsWithErrno(ECONNREFUSED));
- ASSERT_THAT(unlink(filename.c_str()), SyscallSucceeds());
-}
-
-PosixErrorOr<FileDescriptor> OpenRetryEINTR(std::string const& path, int flags,
- mode_t mode = 0) {
- while (true) {
- auto maybe_fd = Open(path, flags, mode);
- if (maybe_fd.ok() || maybe_fd.error().errno_value() != EINTR) {
- return maybe_fd;
- }
- }
-}
-
-TEST(MknodTest, Fifo) {
- const std::string fifo = NewTempAbsPath();
- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),
- SyscallSucceeds());
-
- struct stat st;
- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISFIFO(st.st_mode));
-
- std::string msg = "some std::string";
- std::vector<char> buf(512);
-
- // Read-end of the pipe.
- ScopedThread t([&fifo, &buf, &msg]() {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(msg.length()));
- EXPECT_EQ(msg, std::string(buf.data()));
- });
-
- // Write-end of the pipe.
- FileDescriptor wfd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_WRONLY));
- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),
- SyscallSucceedsWithValue(msg.length()));
-}
-
-TEST(MknodTest, FifoOtrunc) {
- const std::string fifo = NewTempAbsPath();
- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),
- SyscallSucceeds());
-
- struct stat st = {};
- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISFIFO(st.st_mode));
-
- std::string msg = "some std::string";
- std::vector<char> buf(512);
- // Read-end of the pipe.
- ScopedThread t([&fifo, &buf, &msg]() {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(msg.length()));
- EXPECT_EQ(msg, std::string(buf.data()));
- });
-
- // Write-end of the pipe.
- FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(
- OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));
- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),
- SyscallSucceedsWithValue(msg.length()));
-}
-
-TEST(MknodTest, FifoTruncNoOp) {
- const std::string fifo = NewTempAbsPath();
- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),
- SyscallSucceeds());
-
- EXPECT_THAT(truncate(fifo.c_str(), 0), SyscallFailsWithErrno(EINVAL));
-
- struct stat st = {};
- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISFIFO(st.st_mode));
-
- std::string msg = "some std::string";
- std::vector<char> buf(512);
- // Read-end of the pipe.
- ScopedThread t([&fifo, &buf, &msg]() {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(msg.length()));
- EXPECT_EQ(msg, std::string(buf.data()));
- });
-
- FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(
- OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));
- EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),
- SyscallSucceedsWithValue(msg.length()));
- EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MknodTest, MknodAtEmptyPath) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY, 0666));
- EXPECT_THAT(mknodat(fd.get(), "", S_IFREG | 0777, 0),
- SyscallFailsWithErrno(ENOENT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mlock.cc b/test/syscalls/linux/mlock.cc
deleted file mode 100644
index dfa5b7133..000000000
--- a/test/syscalls/linux/mlock.cc
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include <cerrno>
-#include <cstring>
-
-#include "gmock/gmock.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/rlimit_util.h"
-#include "test/util/test_util.h"
-
-using ::testing::_;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<bool> CanMlock() {
- struct rlimit rlim;
- if (getrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
- return PosixError(errno, "getrlimit(RLIMIT_MEMLOCK)");
- }
- if (rlim.rlim_cur != 0) {
- return true;
- }
- return HaveCapability(CAP_IPC_LOCK);
-}
-
-// Returns true if the page containing addr is mlocked.
-bool IsPageMlocked(uintptr_t addr) {
- // This relies on msync(MS_INVALIDATE) interacting correctly with mlocked
- // pages, which is tested for by the MsyncInvalidate case below.
- int const rv = msync(reinterpret_cast<void*>(addr & ~(kPageSize - 1)),
- kPageSize, MS_ASYNC | MS_INVALIDATE);
- if (rv == 0) {
- return false;
- }
- // This uses TEST_PCHECK_MSG since it's used in subprocesses.
- TEST_PCHECK_MSG(errno == EBUSY, "msync failed with unexpected errno");
- return true;
-}
-
-TEST(MlockTest, Basic) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(MlockTest, ProtNone) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()),
- SyscallFailsWithErrno(ENOMEM));
- // ENOMEM is returned because mlock can't populate the page, but it's still
- // considered locked.
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(MlockTest, MadviseDontneed) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_THAT(madvise(mapping.ptr(), mapping.len(), MADV_DONTNEED),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MlockTest, MsyncInvalidate) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_THAT(msync(mapping.ptr(), mapping.len(), MS_ASYNC | MS_INVALIDATE),
- SyscallFailsWithErrno(EBUSY));
- EXPECT_THAT(msync(mapping.ptr(), mapping.len(), MS_SYNC | MS_INVALIDATE),
- SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(MlockTest, Fork) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
- EXPECT_THAT(
- InForkedProcess([&] { TEST_CHECK(!IsPageMlocked(mapping.addr())); }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST(MlockTest, RlimitMemlockZero) {
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()),
- SyscallFailsWithErrno(EPERM));
-}
-
-TEST(MlockTest, RlimitMemlockInsufficient) {
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, kPageSize));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-TEST(MunlockTest, Basic) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(MunlockTest, NotLocked) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- EXPECT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
-}
-
-// There is currently no test for mlockall(MCL_CURRENT) because the default
-// RLIMIT_MEMLOCK of 64 KB is insufficient to actually invoke
-// mlockall(MCL_CURRENT).
-
-TEST(MlockallTest, Future) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
-
- // Run this test in a separate (single-threaded) subprocess to ensure that a
- // background thread doesn't try to mmap a large amount of memory, fail due
- // to hitting RLIMIT_MEMLOCK, and explode the process violently.
- auto const do_test = [] {
- auto const mapping =
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie();
- TEST_CHECK(!IsPageMlocked(mapping.addr()));
- TEST_PCHECK(mlockall(MCL_FUTURE) == 0);
- // Ensure that mlockall(MCL_FUTURE) is turned off before the end of the
- // test, as otherwise mmaps may fail unexpectedly.
- Cleanup do_munlockall([] { TEST_PCHECK(munlockall() == 0); });
- auto const mapping2 =
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie();
- TEST_CHECK(IsPageMlocked(mapping2.addr()));
- // Fire munlockall() and check that it disables mlockall(MCL_FUTURE).
- do_munlockall.Release()();
- auto const mapping3 =
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE).ValueOrDie();
- TEST_CHECK(!IsPageMlocked(mapping2.addr()));
- };
- EXPECT_THAT(InForkedProcess(do_test), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(MunlockallTest, Basic) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED));
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(munlockall(), SyscallSucceeds());
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
-}
-
-#ifndef SYS_mlock2
-#if defined(__x86_64__)
-#define SYS_mlock2 325
-#elif defined(__aarch64__)
-#define SYS_mlock2 284
-#endif
-#endif
-
-#ifndef MLOCK_ONFAULT
-#define MLOCK_ONFAULT 0x01 // Linux: include/uapi/asm-generic/mman-common.h
-#endif
-
-#ifdef SYS_mlock2
-
-int mlock2(void const* addr, size_t len, int flags) {
- return syscall(SYS_mlock2, addr, len, flags);
-}
-
-TEST(Mlock2Test, NoFlags) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock2(mapping.ptr(), mapping.len(), 0), SyscallSucceeds());
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(Mlock2Test, MlockOnfault) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
- ASSERT_THAT(mlock2(mapping.ptr(), mapping.len(), MLOCK_ONFAULT),
- SyscallSucceeds());
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(Mlock2Test, UnknownFlags) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- EXPECT_THAT(mlock2(mapping.ptr(), mapping.len(), ~0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-#endif // defined(SYS_mlock2)
-
-TEST(MapLockedTest, Basic) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED));
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
- EXPECT_THAT(munlock(mapping.ptr(), mapping.len()), SyscallSucceeds());
- EXPECT_FALSE(IsPageMlocked(mapping.addr()));
-}
-
-TEST(MapLockedTest, RlimitMemlockZero) {
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0));
- EXPECT_THAT(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED),
- PosixErrorIs(EPERM, _));
-}
-
-TEST(MapLockedTest, RlimitMemlockInsufficient) {
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, kPageSize));
- EXPECT_THAT(
- MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED),
- PosixErrorIs(EAGAIN, _));
-}
-
-TEST(MremapLockedTest, Basic) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED));
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-
- void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(),
- MREMAP_MAYMOVE, nullptr);
- if (addr == MAP_FAILED) {
- FAIL() << "mremap failed: " << errno << " (" << strerror(errno) << ")";
- }
- mapping.release();
- mapping.reset(addr, 2 * mapping.len());
- EXPECT_TRUE(IsPageMlocked(reinterpret_cast<uintptr_t>(addr)));
-}
-
-TEST(MremapLockedTest, RlimitMemlockZero) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED));
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_MEMLOCK, 0));
- void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(),
- MREMAP_MAYMOVE, nullptr);
- EXPECT_TRUE(addr == MAP_FAILED && errno == EAGAIN)
- << "addr = " << addr << ", errno = " << errno;
-}
-
-TEST(MremapLockedTest, RlimitMemlockInsufficient) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanMlock()));
- auto mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_LOCKED));
- EXPECT_TRUE(IsPageMlocked(mapping.addr()));
-
- AutoCapability cap(CAP_IPC_LOCK, false);
- Cleanup reset_rlimit = ASSERT_NO_ERRNO_AND_VALUE(
- ScopedSetSoftRlimit(RLIMIT_MEMLOCK, mapping.len()));
- void* addr = mremap(mapping.ptr(), mapping.len(), 2 * mapping.len(),
- MREMAP_MAYMOVE, nullptr);
- EXPECT_TRUE(addr == MAP_FAILED && errno == EAGAIN)
- << "addr = " << addr << ", errno = " << errno;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mmap.cc b/test/syscalls/linux/mmap.cc
deleted file mode 100644
index 93a6d9cde..000000000
--- a/test/syscalls/linux/mmap.cc
+++ /dev/null
@@ -1,1697 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/magic.h>
-#include <linux/unistd.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/statfs.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/escaping.h"
-#include "absl/strings/str_split.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-using ::testing::Gt;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<int64_t> VirtualMemorySize() {
- ASSIGN_OR_RETURN_ERRNO(auto contents, GetContents("/proc/self/statm"));
- std::vector<std::string> parts = absl::StrSplit(contents, ' ');
- if (parts.empty()) {
- return PosixError(EINVAL, "Unable to parse /proc/self/statm");
- }
- ASSIGN_OR_RETURN_ERRNO(auto pages, Atoi<int64_t>(parts[0]));
- return pages * getpagesize();
-}
-
-class MMapTest : public ::testing::Test {
- protected:
- // Unmap mapping, if one was made.
- void TearDown() override {
- if (addr_) {
- EXPECT_THAT(Unmap(), SyscallSucceeds());
- }
- }
-
- // Remembers mapping, so it can be automatically unmapped.
- uintptr_t Map(uintptr_t addr, size_t length, int prot, int flags, int fd,
- off_t offset) {
- void* ret =
- mmap(reinterpret_cast<void*>(addr), length, prot, flags, fd, offset);
-
- if (ret != MAP_FAILED) {
- addr_ = ret;
- length_ = length;
- }
-
- return reinterpret_cast<uintptr_t>(ret);
- }
-
- // Unmap previous mapping
- int Unmap() {
- if (!addr_) {
- return -1;
- }
-
- int ret = munmap(addr_, length_);
-
- addr_ = nullptr;
- length_ = 0;
-
- return ret;
- }
-
- // Msync the mapping.
- int Msync() { return msync(addr_, length_, MS_SYNC); }
-
- // Mlock the mapping.
- int Mlock() { return mlock(addr_, length_); }
-
- // Munlock the mapping.
- int Munlock() { return munlock(addr_, length_); }
-
- int Protect(uintptr_t addr, size_t length, int prot) {
- return mprotect(reinterpret_cast<void*>(addr), length, prot);
- }
-
- void* addr_ = nullptr;
- size_t length_ = 0;
-};
-
-// Matches if arg contains the same contents as string str.
-MATCHER_P(EqualsMemory, str, "") {
- if (0 == memcmp(arg, str.c_str(), str.size())) {
- return true;
- }
-
- *result_listener << "Memory did not match. Got:\n"
- << absl::BytesToHexString(
- std::string(static_cast<char*>(arg), str.size()))
- << "Want:\n"
- << absl::BytesToHexString(str);
- return false;
-}
-
-// We can't map pipes, but for different reasons.
-TEST_F(MMapTest, MapPipe) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_PRIVATE, fds[0], 0),
- SyscallFailsWithErrno(ENODEV));
- EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_PRIVATE, fds[1], 0),
- SyscallFailsWithErrno(EACCES));
- ASSERT_THAT(close(fds[0]), SyscallSucceeds());
- ASSERT_THAT(close(fds[1]), SyscallSucceeds());
-}
-
-// It's very common to mmap /dev/zero because anonymous mappings aren't part
-// of POSIX although they are widely supported. So a zero initialized memory
-// region would actually come from a "file backed" /dev/zero mapping.
-TEST_F(MMapTest, MapDevZeroShared) {
- // This test will verify that we're able to map a page backed by /dev/zero
- // as MAP_SHARED.
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- // Test that we can create a RW SHARED mapping of /dev/zero.
- ASSERT_THAT(
- Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, dev_zero.get(), 0),
- SyscallSucceeds());
-}
-
-TEST_F(MMapTest, MapDevZeroPrivate) {
- // This test will verify that we're able to map a page backed by /dev/zero
- // as MAP_PRIVATE.
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- // Test that we can create a RW SHARED mapping of /dev/zero.
- ASSERT_THAT(
- Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero.get(), 0),
- SyscallSucceeds());
-}
-
-TEST_F(MMapTest, MapDevZeroNoPersistence) {
- // This test will verify that two independent mappings of /dev/zero do not
- // appear to reference the same "backed file."
-
- const FileDescriptor dev_zero1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
- const FileDescriptor dev_zero2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- ASSERT_THAT(
- Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, dev_zero1.get(), 0),
- SyscallSucceeds());
-
- // Create a second mapping via the second /dev/zero fd.
- void* psec_map = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- dev_zero2.get(), 0);
- ASSERT_THAT(reinterpret_cast<intptr_t>(psec_map), SyscallSucceeds());
-
- // Always unmap.
- auto cleanup_psec_map = Cleanup(
- [&] { EXPECT_THAT(munmap(psec_map, kPageSize), SyscallSucceeds()); });
-
- // Verify that we have independently addressed pages.
- ASSERT_NE(psec_map, addr_);
-
- std::string buf_zero(kPageSize, 0x00);
- std::string buf_ones(kPageSize, 0xFF);
-
- // Verify the first is actually all zeros after mmap.
- EXPECT_THAT(addr_, EqualsMemory(buf_zero));
-
- // Let's fill in the first mapping with 0xFF.
- memcpy(addr_, buf_ones.data(), kPageSize);
-
- // Verify that the memcpy actually stuck in the page.
- EXPECT_THAT(addr_, EqualsMemory(buf_ones));
-
- // Verify that it didn't affect the second page which should be all zeros.
- EXPECT_THAT(psec_map, EqualsMemory(buf_zero));
-}
-
-TEST_F(MMapTest, MapDevZeroSharedMultiplePages) {
- // This will test that we're able to map /dev/zero over multiple pages.
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- // Test that we can create a RW SHARED mapping of /dev/zero.
- ASSERT_THAT(Map(0, kPageSize * 2, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- dev_zero.get(), 0),
- SyscallSucceeds());
-
- std::string buf_zero(kPageSize * 2, 0x00);
- std::string buf_ones(kPageSize * 2, 0xFF);
-
- // Verify the two pages are actually all zeros after mmap.
- EXPECT_THAT(addr_, EqualsMemory(buf_zero));
-
- // Fill out the pages with all ones.
- memcpy(addr_, buf_ones.data(), kPageSize * 2);
-
- // Verify that the memcpy actually stuck in the pages.
- EXPECT_THAT(addr_, EqualsMemory(buf_ones));
-}
-
-TEST_F(MMapTest, MapDevZeroSharedFdNoPersistence) {
- // This test will verify that two independent mappings of /dev/zero do not
- // appear to reference the same "backed file" even when mapped from the
- // same initial fd.
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- ASSERT_THAT(
- Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, dev_zero.get(), 0),
- SyscallSucceeds());
-
- // Create a second mapping via the same fd.
- void* psec_map = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- dev_zero.get(), 0);
- ASSERT_THAT(reinterpret_cast<int64_t>(psec_map), SyscallSucceeds());
-
- // Always unmap.
- auto cleanup_psec_map = Cleanup(
- [&] { ASSERT_THAT(munmap(psec_map, kPageSize), SyscallSucceeds()); });
-
- // Verify that we have independently addressed pages.
- ASSERT_NE(psec_map, addr_);
-
- std::string buf_zero(kPageSize, 0x00);
- std::string buf_ones(kPageSize, 0xFF);
-
- // Verify the first is actually all zeros after mmap.
- EXPECT_THAT(addr_, EqualsMemory(buf_zero));
-
- // Let's fill in the first mapping with 0xFF.
- memcpy(addr_, buf_ones.data(), kPageSize);
-
- // Verify that the memcpy actually stuck in the page.
- EXPECT_THAT(addr_, EqualsMemory(buf_ones));
-
- // Verify that it didn't affect the second page which should be all zeros.
- EXPECT_THAT(psec_map, EqualsMemory(buf_zero));
-}
-
-TEST_F(MMapTest, MapDevZeroSegfaultAfterUnmap) {
- SetupGvisorDeathTest();
-
- // This test will verify that we're able to map a page backed by /dev/zero
- // as MAP_SHARED and after it's unmapped any access results in a SIGSEGV.
- // This test is redundant but given the special nature of /dev/zero mappings
- // it doesn't hurt.
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
-
- const auto rest = [&] {
- // Test that we can create a RW SHARED mapping of /dev/zero.
- TEST_PCHECK(Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- dev_zero.get(),
- 0) != reinterpret_cast<uintptr_t>(MAP_FAILED));
-
- // Confirm that accesses after the unmap result in a SIGSEGV.
- //
- // N.B. We depend on this process being single-threaded to ensure there
- // can't be another mmap to map addr before the dereference below.
- void* addr_saved = addr_; // Unmap resets addr_.
- TEST_PCHECK(Unmap() == 0);
- *reinterpret_cast<volatile int*>(addr_saved) = 0xFF;
- };
-
- EXPECT_THAT(InForkedProcess(rest),
- IsPosixErrorOkAndHolds(AnyOf(Eq(W_EXITCODE(0, SIGSEGV)),
- Eq(W_EXITCODE(0, 128 + SIGSEGV)))));
-}
-
-TEST_F(MMapTest, MapDevZeroUnaligned) {
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDWR));
- const size_t size = kPageSize + kPageSize / 2;
- const std::string buf_zero(size, 0x00);
-
- ASSERT_THAT(
- Map(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, dev_zero.get(), 0),
- SyscallSucceeds());
- EXPECT_THAT(addr_, EqualsMemory(buf_zero));
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- ASSERT_THAT(
- Map(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero.get(), 0),
- SyscallSucceeds());
- EXPECT_THAT(addr_, EqualsMemory(buf_zero));
-}
-
-// We can't map _some_ character devices.
-TEST_F(MMapTest, MapCharDevice) {
- const FileDescriptor cdevfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/random", 0, 0));
- EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_PRIVATE, cdevfd.get(), 0),
- SyscallFailsWithErrno(ENODEV));
-}
-
-// We can't map directories.
-TEST_F(MMapTest, MapDirectory) {
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(GetAbsoluteTestTmpdir(), 0, 0));
- EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_PRIVATE, dirfd.get(), 0),
- SyscallFailsWithErrno(ENODEV));
-}
-
-// We can map *something*
-TEST_F(MMapTest, MapAnything) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceedsWithValue(Gt(0)));
-}
-
-// Map length < PageSize allowed
-TEST_F(MMapTest, SmallMap) {
- EXPECT_THAT(Map(0, 128, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-}
-
-// Hint address doesn't break anything.
-// Note: there is no requirement we actually get the hint address
-TEST_F(MMapTest, HintAddress) {
- EXPECT_THAT(
- Map(0x30000000, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-}
-
-// MAP_FIXED gives us exactly the requested address
-TEST_F(MMapTest, MapFixed) {
- EXPECT_THAT(Map(0x30000000, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0),
- SyscallSucceedsWithValue(0x30000000));
-}
-
-// 64-bit addresses work too
-#if defined(__x86_64__) || defined(__aarch64__)
-TEST_F(MMapTest, MapFixed64) {
- EXPECT_THAT(Map(0x300000000000, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0),
- SyscallSucceedsWithValue(0x300000000000));
-}
-#endif
-
-// MAP_STACK allowed.
-// There isn't a good way to verify it did anything.
-TEST_F(MMapTest, MapStack) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0),
- SyscallSucceeds());
-}
-
-// MAP_LOCKED allowed.
-// There isn't a good way to verify it did anything.
-TEST_F(MMapTest, MapLocked) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LOCKED, -1, 0),
- SyscallSucceeds());
-}
-
-// MAP_PRIVATE or MAP_SHARED must be passed
-TEST_F(MMapTest, NotPrivateOrShared) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE, MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Only one of MAP_PRIVATE or MAP_SHARED may be passed
-TEST_F(MMapTest, PrivateAndShared) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_SHARED | MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(MMapTest, FixedAlignment) {
- // Addr must be page aligned (MAP_FIXED)
- EXPECT_THAT(Map(0x30000001, kPageSize, PROT_NONE,
- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Non-MAP_FIXED address does not need to be page aligned
-TEST_F(MMapTest, NonFixedAlignment) {
- EXPECT_THAT(
- Map(0x30000001, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-}
-
-// Length = 0 results in EINVAL.
-TEST_F(MMapTest, InvalidLength) {
- EXPECT_THAT(Map(0, 0, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Bad fd not allowed.
-TEST_F(MMapTest, BadFd) {
- EXPECT_THAT(Map(0, kPageSize, PROT_NONE, MAP_PRIVATE, 999, 0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// Mappings are writable.
-TEST_F(MMapTest, ProtWrite) {
- uint64_t addr;
- constexpr uint8_t kFirstWord[] = {42, 42, 42, 42};
-
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- // This shouldn't cause a SIGSEGV.
- memset(reinterpret_cast<void*>(addr), 42, kPageSize);
-
- // The written data should actually be there.
- EXPECT_EQ(
- 0, memcmp(reinterpret_cast<void*>(addr), kFirstWord, sizeof(kFirstWord)));
-}
-
-// "Write-only" mappings are writable *and* readable.
-TEST_F(MMapTest, ProtWriteOnly) {
- uint64_t addr;
- constexpr uint8_t kFirstWord[] = {42, 42, 42, 42};
-
- EXPECT_THAT(
- addr = Map(0, kPageSize, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- // This shouldn't cause a SIGSEGV.
- memset(reinterpret_cast<void*>(addr), 42, kPageSize);
-
- // The written data should actually be there.
- EXPECT_EQ(
- 0, memcmp(reinterpret_cast<void*>(addr), kFirstWord, sizeof(kFirstWord)));
-}
-
-// "Write-only" mappings are readable.
-//
-// This is distinct from above to ensure the page is accessible even if the
-// initial fault is a write fault.
-TEST_F(MMapTest, ProtWriteOnlyReadable) {
- uint64_t addr;
- constexpr uint64_t kFirstWord = 0;
-
- EXPECT_THAT(
- addr = Map(0, kPageSize, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), &kFirstWord,
- sizeof(kFirstWord)));
-}
-
-// Mappings are writable after mprotect from PROT_NONE to PROT_READ|PROT_WRITE.
-TEST_F(MMapTest, ProtectProtWrite) {
- uint64_t addr;
- constexpr uint8_t kFirstWord[] = {42, 42, 42, 42};
-
- EXPECT_THAT(
- addr = Map(0, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- ASSERT_THAT(Protect(addr, kPageSize, PROT_READ | PROT_WRITE),
- SyscallSucceeds());
-
- // This shouldn't cause a SIGSEGV.
- memset(reinterpret_cast<void*>(addr), 42, kPageSize);
-
- // The written data should actually be there.
- EXPECT_EQ(
- 0, memcmp(reinterpret_cast<void*>(addr), kFirstWord, sizeof(kFirstWord)));
-}
-
-// SIGSEGV raised when reading PROT_NONE memory
-TEST_F(MMapTest, ProtNoneDeath) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
-
- ASSERT_THAT(
- addr = Map(0, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- EXPECT_EXIT(*reinterpret_cast<volatile int*>(addr),
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-// SIGSEGV raised when writing PROT_READ only memory
-TEST_F(MMapTest, ReadOnlyDeath) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
-
- ASSERT_THAT(
- addr = Map(0, kPageSize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- EXPECT_EXIT(*reinterpret_cast<volatile int*>(addr) = 42,
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-// Writable mapping mprotect'd to read-only should not be writable.
-TEST_F(MMapTest, MprotectReadOnlyDeath) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
-
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- volatile int* val = reinterpret_cast<int*>(addr);
-
- // Copy to ensure page is mapped in.
- *val = 42;
-
- ASSERT_THAT(Protect(addr, kPageSize, PROT_READ), SyscallSucceeds());
-
- // Now it shouldn't be writable.
- EXPECT_EXIT(*val = 0, ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-// Verify that calling mprotect an address that's not page aligned fails.
-TEST_F(MMapTest, MprotectNotPageAligned) {
- uintptr_t addr;
-
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
- ASSERT_THAT(Protect(addr + 1, kPageSize - 1, PROT_READ),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Verify that calling mprotect with an absurdly huge length fails.
-TEST_F(MMapTest, MprotectHugeLength) {
- uintptr_t addr;
-
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
- ASSERT_THAT(Protect(addr, static_cast<size_t>(-1), PROT_READ),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-#if defined(__x86_64__) || defined(__i386__)
-// This code is equivalent in 32 and 64-bit mode
-const uint8_t machine_code[] = {
- 0xb8, 0x2a, 0x00, 0x00, 0x00, // movl $42, %eax
- 0xc3, // retq
-};
-#elif defined(__aarch64__)
-const uint8_t machine_code[] = {
- 0x40, 0x05, 0x80, 0x52, // mov w0, #42
- 0xc0, 0x03, 0x5f, 0xd6, // ret
-};
-#endif
-
-// PROT_EXEC allows code execution
-TEST_F(MMapTest, ProtExec) {
- uintptr_t addr;
- uint32_t (*func)(void);
-
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_EXEC | PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- memcpy(reinterpret_cast<void*>(addr), machine_code, sizeof(machine_code));
-
-#if defined(__aarch64__)
- // We use this as a memory barrier for Arm64.
- ASSERT_THAT(Protect(addr, kPageSize, PROT_READ | PROT_EXEC),
- SyscallSucceeds());
-#endif
-
- func = reinterpret_cast<uint32_t (*)(void)>(addr);
-
- EXPECT_EQ(42, func());
-}
-
-// No PROT_EXEC disallows code execution
-TEST_F(MMapTest, NoProtExecDeath) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
- uint32_t (*func)(void);
-
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
-
- memcpy(reinterpret_cast<void*>(addr), machine_code, sizeof(machine_code));
-
- func = reinterpret_cast<uint32_t (*)(void)>(addr);
-
- EXPECT_EXIT(func(), ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-TEST_F(MMapTest, NoExceedLimitData) {
- void* prevbrk;
- void* target_brk;
- struct rlimit setlim;
-
- prevbrk = sbrk(0);
- ASSERT_NE(-1, reinterpret_cast<intptr_t>(prevbrk));
- target_brk = reinterpret_cast<char*>(prevbrk) + 1;
-
- setlim.rlim_cur = RLIM_INFINITY;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_DATA, &setlim), SyscallSucceeds());
- EXPECT_THAT(brk(target_brk), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(MMapTest, ExceedLimitData) {
- // To unit test this more precisely, we'd need access to the mm's start_brk
- // and end_brk, which we don't have direct access to :/
- void* prevbrk;
- void* target_brk;
- struct rlimit setlim;
-
- prevbrk = sbrk(0);
- ASSERT_NE(-1, reinterpret_cast<intptr_t>(prevbrk));
- target_brk = reinterpret_cast<char*>(prevbrk) + 8192;
-
- setlim.rlim_cur = 0;
- setlim.rlim_max = RLIM_INFINITY;
- // Set RLIMIT_DATA very low so any subsequent brk() calls fail.
- // Reset RLIMIT_DATA during teardown step.
- ASSERT_THAT(setrlimit(RLIMIT_DATA, &setlim), SyscallSucceeds());
- EXPECT_THAT(brk(target_brk), SyscallFailsWithErrno(ENOMEM));
- // Teardown step...
- setlim.rlim_cur = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_DATA, &setlim), SyscallSucceeds());
-}
-
-TEST_F(MMapTest, ExceedLimitDataPrlimit) {
- // To unit test this more precisely, we'd need access to the mm's start_brk
- // and end_brk, which we don't have direct access to :/
- void* prevbrk;
- void* target_brk;
- struct rlimit setlim;
-
- prevbrk = sbrk(0);
- ASSERT_NE(-1, reinterpret_cast<intptr_t>(prevbrk));
- target_brk = reinterpret_cast<char*>(prevbrk) + 8192;
-
- setlim.rlim_cur = 0;
- setlim.rlim_max = RLIM_INFINITY;
- // Set RLIMIT_DATA very low so any subsequent brk() calls fail.
- // Reset RLIMIT_DATA during teardown step.
- ASSERT_THAT(prlimit(0, RLIMIT_DATA, &setlim, nullptr), SyscallSucceeds());
- EXPECT_THAT(brk(target_brk), SyscallFailsWithErrno(ENOMEM));
- // Teardown step...
- setlim.rlim_cur = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_DATA, &setlim), SyscallSucceeds());
-}
-
-TEST_F(MMapTest, ExceedLimitDataPrlimitPID) {
- // To unit test this more precisely, we'd need access to the mm's start_brk
- // and end_brk, which we don't have direct access to :/
- void* prevbrk;
- void* target_brk;
- struct rlimit setlim;
-
- prevbrk = sbrk(0);
- ASSERT_NE(-1, reinterpret_cast<intptr_t>(prevbrk));
- target_brk = reinterpret_cast<char*>(prevbrk) + 8192;
-
- setlim.rlim_cur = 0;
- setlim.rlim_max = RLIM_INFINITY;
- // Set RLIMIT_DATA very low so any subsequent brk() calls fail.
- // Reset RLIMIT_DATA during teardown step.
- ASSERT_THAT(prlimit(syscall(__NR_gettid), RLIMIT_DATA, &setlim, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(brk(target_brk), SyscallFailsWithErrno(ENOMEM));
- // Teardown step...
- setlim.rlim_cur = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_DATA, &setlim), SyscallSucceeds());
-}
-
-TEST_F(MMapTest, NoExceedLimitAS) {
- constexpr uint64_t kAllocBytes = 200 << 20;
- // Add some headroom to the AS limit in case of e.g. unexpected stack
- // expansion.
- constexpr uint64_t kExtraASBytes = kAllocBytes + (20 << 20);
- static_assert(kAllocBytes < kExtraASBytes,
- "test depends on allocation not exceeding AS limit");
-
- auto vss = ASSERT_NO_ERRNO_AND_VALUE(VirtualMemorySize());
- struct rlimit setlim;
- setlim.rlim_cur = vss + kExtraASBytes;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_AS, &setlim), SyscallSucceeds());
- EXPECT_THAT(
- Map(0, kAllocBytes, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceedsWithValue(Gt(0)));
-}
-
-TEST_F(MMapTest, ExceedLimitAS) {
- constexpr uint64_t kAllocBytes = 200 << 20;
- // Add some headroom to the AS limit in case of e.g. unexpected stack
- // expansion.
- constexpr uint64_t kExtraASBytes = 20 << 20;
- static_assert(kAllocBytes > kExtraASBytes,
- "test depends on allocation exceeding AS limit");
-
- auto vss = ASSERT_NO_ERRNO_AND_VALUE(VirtualMemorySize());
- struct rlimit setlim;
- setlim.rlim_cur = vss + kExtraASBytes;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_AS, &setlim), SyscallSucceeds());
- EXPECT_THAT(
- Map(0, kAllocBytes, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-// Tests that setting an anonymous mmap to PROT_NONE doesn't free the memory.
-TEST_F(MMapTest, SettingProtNoneDoesntFreeMemory) {
- uintptr_t addr;
- constexpr uint8_t kFirstWord[] = {42, 42, 42, 42};
-
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceedsWithValue(Gt(0)));
-
- memset(reinterpret_cast<void*>(addr), 42, kPageSize);
-
- ASSERT_THAT(Protect(addr, kPageSize, PROT_NONE), SyscallSucceeds());
- ASSERT_THAT(Protect(addr, kPageSize, PROT_READ | PROT_WRITE),
- SyscallSucceeds());
-
- // The written data should still be there.
- EXPECT_EQ(
- 0, memcmp(reinterpret_cast<void*>(addr), kFirstWord, sizeof(kFirstWord)));
-}
-
-constexpr char kFileContents[] = "Hello World!";
-
-class MMapFileTest : public MMapTest {
- protected:
- FileDescriptor fd_;
- std::string filename_;
-
- // Open a file for read/write
- void SetUp() override {
- MMapTest::SetUp();
-
- filename_ = NewTempAbsPath();
- fd_ = ASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_CREAT | O_RDWR, 0644));
-
- // Extend file so it can be written once mapped. Deliberately make the file
- // only half a page in size, so we can test what happens when we access the
- // second half.
- // Use ftruncate(2) once the sentry supports it.
- char zero = 0;
- size_t count = 0;
- do {
- const DisableSave ds; // saving 2048 times is slow and useless.
- Write(&zero, 1), SyscallSucceedsWithValue(1);
- } while (++count < (kPageSize / 2));
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
- }
-
- // Close and delete file
- void TearDown() override {
- MMapTest::TearDown();
- fd_.reset(); // Make sure the files is closed before we unlink it.
- ASSERT_THAT(unlink(filename_.c_str()), SyscallSucceeds());
- }
-
- ssize_t Read(char* buf, size_t count) {
- ssize_t len = 0;
- do {
- ssize_t ret = read(fd_.get(), buf, count);
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- return len;
- }
-
- len += ret;
- buf += ret;
- } while (len < static_cast<ssize_t>(count));
-
- return len;
- }
-
- ssize_t Write(const char* buf, size_t count) {
- ssize_t len = 0;
- do {
- ssize_t ret = write(fd_.get(), buf, count);
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- return len;
- }
-
- len += ret;
- buf += ret;
- } while (len < static_cast<ssize_t>(count));
-
- return len;
- }
-};
-
-class MMapFileParamTest
- : public MMapFileTest,
- public ::testing::WithParamInterface<std::tuple<int, int>> {
- protected:
- int prot() const { return std::get<0>(GetParam()); }
-
- int flags() const { return std::get<1>(GetParam()); }
-};
-
-// MAP_POPULATE allowed.
-// There isn't a good way to verify it actually did anything.
-TEST_P(MMapFileParamTest, MapPopulate) {
- ASSERT_THAT(Map(0, kPageSize, prot(), flags() | MAP_POPULATE, fd_.get(), 0),
- SyscallSucceeds());
-}
-
-// MAP_POPULATE on a short file.
-TEST_P(MMapFileParamTest, MapPopulateShort) {
- ASSERT_THAT(
- Map(0, 2 * kPageSize, prot(), flags() | MAP_POPULATE, fd_.get(), 0),
- SyscallSucceeds());
-}
-
-// Read contents from mapped file.
-TEST_F(MMapFileTest, Read) {
- size_t len = strlen(kFileContents);
- ASSERT_EQ(len, Write(kFileContents, len));
-
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_PRIVATE, fd_.get(), 0),
- SyscallSucceeds());
-
- EXPECT_THAT(reinterpret_cast<char*>(addr),
- EqualsMemory(std::string(kFileContents)));
-}
-
-// Map at an offset.
-TEST_F(MMapFileTest, MapOffset) {
- ASSERT_THAT(lseek(fd_.get(), kPageSize, SEEK_SET), SyscallSucceeds());
-
- size_t len = strlen(kFileContents);
- ASSERT_EQ(len, Write(kFileContents, len));
-
- uintptr_t addr;
- ASSERT_THAT(
- addr = Map(0, kPageSize, PROT_READ, MAP_PRIVATE, fd_.get(), kPageSize),
- SyscallSucceeds());
-
- EXPECT_THAT(reinterpret_cast<char*>(addr),
- EqualsMemory(std::string(kFileContents)));
-}
-
-TEST_F(MMapFileTest, MapOffsetBeyondEnd) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- fd_.get(), 10 * kPageSize),
- SyscallSucceeds());
-
- // Touching the memory causes SIGBUS.
- size_t len = strlen(kFileContents);
- EXPECT_EXIT(std::copy(kFileContents, kFileContents + len,
- reinterpret_cast<volatile char*>(addr)),
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-// Verify mmap fails when sum of length and offset overflows.
-TEST_F(MMapFileTest, MapLengthPlusOffsetOverflows) {
- const size_t length = static_cast<size_t>(-kPageSize);
- const off_t offset = kPageSize;
- ASSERT_THAT(Map(0, length, PROT_READ, MAP_PRIVATE, fd_.get(), offset),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-// MAP_PRIVATE PROT_WRITE is allowed on read-only FDs.
-TEST_F(MMapFileTest, WritePrivateOnReadOnlyFd) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_RDONLY));
-
- uintptr_t addr;
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- fd.get(), 0),
- SyscallSucceeds());
-
- // Touch the page to ensure the kernel didn't lie about writability.
- size_t len = strlen(kFileContents);
- std::copy(kFileContents, kFileContents + len,
- reinterpret_cast<volatile char*>(addr));
-}
-
-// MAP_SHARED PROT_WRITE not allowed on read-only FDs.
-TEST_F(MMapFileTest, WriteSharedOnReadOnlyFd) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_RDONLY));
-
- uintptr_t addr;
- EXPECT_THAT(
- addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Mmap not allowed on O_PATH FDs.
-TEST_F(MMapFileTest, MmapFileWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- uintptr_t addr;
- EXPECT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_PRIVATE, fd.get(), 0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// The FD must be readable.
-TEST_P(MMapFileParamTest, WriteOnlyFd) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename_, O_WRONLY));
-
- uintptr_t addr;
- EXPECT_THAT(addr = Map(0, kPageSize, prot(), flags(), fd.get(), 0),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Overwriting the contents of a file mapped MAP_SHARED PROT_READ
-// should cause the new data to be reflected in the mapping.
-TEST_F(MMapFileTest, ReadSharedConsistentWithOverwrite) {
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Expand the file to two pages and dirty them.
- std::string bufA(kPageSize, 'a');
- ASSERT_THAT(Write(bufA.c_str(), bufA.size()),
- SyscallSucceedsWithValue(bufA.size()));
- std::string bufB(kPageSize, 'b');
- ASSERT_THAT(Write(bufB.c_str(), bufB.size()),
- SyscallSucceedsWithValue(bufB.size()));
-
- // Map the page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Check that the mapping contains the right file data.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), bufA.c_str(), kPageSize));
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr + kPageSize), bufB.c_str(),
- kPageSize));
-
- // Start at the beginning of the file.
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Swap the write pattern.
- ASSERT_THAT(Write(bufB.c_str(), bufB.size()),
- SyscallSucceedsWithValue(bufB.size()));
- ASSERT_THAT(Write(bufA.c_str(), bufA.size()),
- SyscallSucceedsWithValue(bufA.size()));
-
- // Check that the mapping got updated.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), bufB.c_str(), kPageSize));
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr + kPageSize), bufA.c_str(),
- kPageSize));
-}
-
-// Partially overwriting a file mapped MAP_SHARED PROT_READ should be reflected
-// in the mapping.
-TEST_F(MMapFileTest, ReadSharedConsistentWithPartialOverwrite) {
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Expand the file to two pages and dirty them.
- std::string bufA(kPageSize, 'a');
- ASSERT_THAT(Write(bufA.c_str(), bufA.size()),
- SyscallSucceedsWithValue(bufA.size()));
- std::string bufB(kPageSize, 'b');
- ASSERT_THAT(Write(bufB.c_str(), bufB.size()),
- SyscallSucceedsWithValue(bufB.size()));
-
- // Map the page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Check that the mapping contains the right file data.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), bufA.c_str(), kPageSize));
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr + kPageSize), bufB.c_str(),
- kPageSize));
-
- // Start at the beginning of the file.
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Do a partial overwrite, spanning both pages.
- std::string bufC(kPageSize + (kPageSize / 2), 'c');
- ASSERT_THAT(Write(bufC.c_str(), bufC.size()),
- SyscallSucceedsWithValue(bufC.size()));
-
- // Check that the mapping got updated.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), bufC.c_str(),
- kPageSize + (kPageSize / 2)));
- EXPECT_EQ(0,
- memcmp(reinterpret_cast<void*>(addr + kPageSize + (kPageSize / 2)),
- bufB.c_str(), kPageSize / 2));
-}
-
-// Overwriting a file mapped MAP_SHARED PROT_READ should be reflected in the
-// mapping and the file.
-TEST_F(MMapFileTest, ReadSharedConsistentWithWriteAndFile) {
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Expand the file to two full pages and dirty it.
- std::string bufA(2 * kPageSize, 'a');
- ASSERT_THAT(Write(bufA.c_str(), bufA.size()),
- SyscallSucceedsWithValue(bufA.size()));
-
- // Map only the first page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Prepare to overwrite the file contents.
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Overwrite everything, beyond the mapped portion.
- std::string bufB(2 * kPageSize, 'b');
- ASSERT_THAT(Write(bufB.c_str(), bufB.size()),
- SyscallSucceedsWithValue(bufB.size()));
-
- // What the mapped portion should now look like.
- std::string bufMapped(kPageSize, 'b');
-
- // Expect that the mapped portion is consistent.
- EXPECT_EQ(
- 0, memcmp(reinterpret_cast<void*>(addr), bufMapped.c_str(), kPageSize));
-
- // Prepare to read the entire file contents.
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Expect that the file was fully updated.
- std::vector<char> bufFile(2 * kPageSize);
- ASSERT_THAT(Read(bufFile.data(), bufFile.size()),
- SyscallSucceedsWithValue(bufFile.size()));
- // Cast to void* to avoid EXPECT_THAT assuming bufFile.data() is a
- // NUL-terminated C std::string. EXPECT_THAT will try to print a char* as a C
- // std::string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(bufFile.data()), EqualsMemory(bufB));
-}
-
-// Write data to mapped file.
-TEST_F(MMapFileTest, WriteShared) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- size_t len = strlen(kFileContents);
- memcpy(reinterpret_cast<void*>(addr), kFileContents, len);
-
- // The file may not actually be updated until munmap is called.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- std::vector<char> buf(len);
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- // Cast to void* to avoid EXPECT_THAT assuming buf.data() is a
- // NUL-terminated C string. EXPECT_THAT will try to print a char* as a C
- // string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(buf.data()),
- EqualsMemory(std::string(kFileContents)));
-}
-
-// Write data to portion of mapped page beyond the end of the file.
-// These writes are not reflected in the file.
-TEST_F(MMapFileTest, WriteSharedBeyondEnd) {
- // The file is only half of a page. We map an entire page. Writes to the
- // end of the mapping must not be reflected in the file.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // First half; this is reflected in the file.
- std::string first(kPageSize / 2, 'A');
- memcpy(reinterpret_cast<void*>(addr), first.c_str(), first.size());
-
- // Second half; this is not reflected in the file.
- std::string second(kPageSize / 2, 'B');
- memcpy(reinterpret_cast<void*>(addr + kPageSize / 2), second.c_str(),
- second.size());
-
- // The file may not actually be updated until munmap is called.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- // Big enough to fit the entire page, if the writes are mistakenly written to
- // the file.
- std::vector<char> buf(kPageSize);
-
- // Only the first half is in the file.
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(first.size()));
- // Cast to void* to avoid EXPECT_THAT assuming buf.data() is a
- // NUL-terminated C string. EXPECT_THAT will try to print a char* as a C
- // NUL-terminated C std::string. EXPECT_THAT will try to print a char* as a C
- // std::string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(buf.data()), EqualsMemory(first));
-}
-
-// The portion of a mapped page that becomes part of the file after a truncate
-// is reflected in the file.
-TEST_F(MMapFileTest, WriteSharedTruncateUp) {
- // The file is only half of a page. We map an entire page. Writes to the
- // end of the mapping must not be reflected in the file.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // First half; this is reflected in the file.
- std::string first(kPageSize / 2, 'A');
- memcpy(reinterpret_cast<void*>(addr), first.c_str(), first.size());
-
- // Second half; this is not reflected in the file now (see
- // WriteSharedBeyondEnd), but will be after the truncate.
- std::string second(kPageSize / 2, 'B');
- memcpy(reinterpret_cast<void*>(addr + kPageSize / 2), second.c_str(),
- second.size());
-
- // Extend the file to a full page. The second half of the page will be
- // reflected in the file.
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize), SyscallSucceeds());
-
- // The file may not actually be updated until munmap is called.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- // The whole page is in the file.
- std::vector<char> buf(kPageSize);
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- // Cast to void* to avoid EXPECT_THAT assuming buf.data() is a
- // NUL-terminated C string. EXPECT_THAT will try to print a char* as a C
- // string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(buf.data()), EqualsMemory(first));
- EXPECT_THAT(reinterpret_cast<void*>(buf.data() + kPageSize / 2),
- EqualsMemory(second));
-}
-
-TEST_F(MMapFileTest, ReadSharedTruncateDownThenUp) {
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Expand the file to a full page and dirty it.
- std::string buf(kPageSize, 'a');
- ASSERT_THAT(Write(buf.c_str(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Map the page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Check that the memory contains the file data.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), buf.c_str(), kPageSize));
-
- // Truncate down, then up.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize), SyscallSucceeds());
-
- // Check that the memory was zeroed.
- std::string zeroed(kPageSize, '\0');
- EXPECT_EQ(0,
- memcmp(reinterpret_cast<void*>(addr), zeroed.c_str(), kPageSize));
-
- // The file may not actually be updated until msync is called.
- ASSERT_THAT(Msync(), SyscallSucceeds());
-
- // Prepare to read the entire file contents.
- ASSERT_THAT(lseek(fd_.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Expect that the file is fully updated.
- std::vector<char> bufFile(kPageSize);
- ASSERT_THAT(Read(bufFile.data(), bufFile.size()),
- SyscallSucceedsWithValue(bufFile.size()));
- EXPECT_EQ(0, memcmp(bufFile.data(), zeroed.c_str(), kPageSize));
-}
-
-TEST_F(MMapFileTest, WriteSharedTruncateDownThenUp) {
- // The file is only half of a page. We map an entire page. Writes to the
- // end of the mapping must not be reflected in the file.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // First half; this will be deleted by truncate(0).
- std::string first(kPageSize / 2, 'A');
- memcpy(reinterpret_cast<void*>(addr), first.c_str(), first.size());
-
- // Truncate down, then up.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize), SyscallSucceeds());
-
- // The whole page is zeroed in memory.
- std::string zeroed(kPageSize, '\0');
- EXPECT_EQ(0,
- memcmp(reinterpret_cast<void*>(addr), zeroed.c_str(), kPageSize));
-
- // The file may not actually be updated until munmap is called.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- // The whole file is also zeroed.
- std::vector<char> buf(kPageSize);
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- // Cast to void* to avoid EXPECT_THAT assuming buf.data() is a
- // NUL-terminated C string. EXPECT_THAT will try to print a char* as a C
- // string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(buf.data()), EqualsMemory(zeroed));
-}
-
-TEST_F(MMapFileTest, ReadSharedTruncateSIGBUS) {
- SetupGvisorDeathTest();
-
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Expand the file to a full page and dirty it.
- std::string buf(kPageSize, 'a');
- ASSERT_THAT(Write(buf.c_str(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Map the page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Check that the mapping contains the file data.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), buf.c_str(), kPageSize));
-
- // Truncate down.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Accessing the truncated region should cause a SIGBUS.
- std::vector<char> in(kPageSize);
- EXPECT_EXIT(
- std::copy(reinterpret_cast<volatile char*>(addr),
- reinterpret_cast<volatile char*>(addr) + kPageSize, in.data()),
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-TEST_F(MMapFileTest, WriteSharedTruncateSIGBUS) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // Touch the memory to be sure it really is mapped.
- size_t len = strlen(kFileContents);
- memcpy(reinterpret_cast<void*>(addr), kFileContents, len);
-
- // Truncate down.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Accessing the truncated file should cause a SIGBUS.
- EXPECT_EXIT(std::copy(kFileContents, kFileContents + len,
- reinterpret_cast<volatile char*>(addr)),
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-TEST_F(MMapFileTest, ReadSharedTruncatePartialPage) {
- // Start from scratch.
- EXPECT_THAT(ftruncate(fd_.get(), 0), SyscallSucceeds());
-
- // Dirty the file.
- std::string buf(kPageSize, 'a');
- ASSERT_THAT(Write(buf.c_str(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Map a page.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- // Truncate to half of the page.
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize / 2), SyscallSucceeds());
-
- // First half of the page untouched.
- EXPECT_EQ(0,
- memcmp(reinterpret_cast<void*>(addr), buf.data(), kPageSize / 2));
-
- // Second half is zeroed.
- std::string zeroed(kPageSize / 2, '\0');
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr + kPageSize / 2),
- zeroed.c_str(), kPageSize / 2));
-}
-
-// Page can still be accessed and contents are intact after truncating a partial
-// page.
-TEST_F(MMapFileTest, WriteSharedTruncatePartialPage) {
- // Expand the file to a full page.
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize), SyscallSucceeds());
-
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // Fill the entire page.
- std::string contents(kPageSize, 'A');
- memcpy(reinterpret_cast<void*>(addr), contents.c_str(), contents.size());
-
- // Truncate half of the page.
- EXPECT_THAT(ftruncate(fd_.get(), kPageSize / 2), SyscallSucceeds());
-
- // First half of the page untouched.
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr), contents.c_str(),
- kPageSize / 2));
-
- // Second half zeroed.
- std::string zeroed(kPageSize / 2, '\0');
- EXPECT_EQ(0, memcmp(reinterpret_cast<void*>(addr + kPageSize / 2),
- zeroed.c_str(), kPageSize / 2));
-}
-
-// MAP_PRIVATE writes are not carried through to the underlying file.
-TEST_F(MMapFileTest, WritePrivate) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- fd_.get(), 0),
- SyscallSucceeds());
-
- size_t len = strlen(kFileContents);
- memcpy(reinterpret_cast<void*>(addr), kFileContents, len);
-
- // The file should not be updated, but if it mistakenly is, it may not be
- // until after munmap is called.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- std::vector<char> buf(len);
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- // Cast to void* to avoid EXPECT_THAT assuming buf.data() is a
- // NUL-terminated C string. EXPECT_THAT will try to print a char* as a C
- // string, possibly overruning the buffer.
- EXPECT_THAT(reinterpret_cast<void*>(buf.data()),
- EqualsMemory(std::string(len, '\0')));
-}
-
-// SIGBUS raised when reading or writing past end of a mapped file.
-TEST_P(MMapFileParamTest, SigBusDeath) {
- SetupGvisorDeathTest();
-
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, prot(), flags(), fd_.get(), 0),
- SyscallSucceeds());
-
- auto* start = reinterpret_cast<volatile char*>(addr + kPageSize);
-
- // MMapFileTest makes a file kPageSize/2 long. The entire first page should be
- // accessible, but anything beyond it should not.
- if (prot() & PROT_WRITE) {
- // Write beyond first page.
- size_t len = strlen(kFileContents);
- EXPECT_EXIT(std::copy(kFileContents, kFileContents + len, start),
- ::testing::KilledBySignal(SIGBUS), "");
- } else {
- // Read beyond first page.
- std::vector<char> in(kPageSize);
- EXPECT_EXIT(std::copy(start, start + kPageSize, in.data()),
- ::testing::KilledBySignal(SIGBUS), "");
- }
-}
-
-// Tests that SIGBUS is not raised when reading or writing to a file-mapped
-// page before EOF, even if part of the mapping extends beyond EOF.
-//
-// See b/27877699.
-TEST_P(MMapFileParamTest, NoSigBusOnPagesBeforeEOF) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, prot(), flags(), fd_.get(), 0),
- SyscallSucceeds());
-
- // The test passes if this survives.
- auto* start = reinterpret_cast<volatile char*>(addr + (kPageSize / 2) + 1);
- size_t len = strlen(kFileContents);
- if (prot() & PROT_WRITE) {
- std::copy(kFileContents, kFileContents + len, start);
- } else {
- std::vector<char> in(len);
- std::copy(start, start + len, in.data());
- }
-}
-
-// Tests that SIGBUS is not raised when reading or writing from a file-mapped
-// page containing EOF, *after* the EOF.
-TEST_P(MMapFileParamTest, NoSigBusOnPageContainingEOF) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, prot(), flags(), fd_.get(), 0),
- SyscallSucceeds());
-
- // The test passes if this survives. (Technically addr+kPageSize/2 is already
- // beyond EOF, but +1 to check for fencepost errors.)
- auto* start = reinterpret_cast<volatile char*>(addr + (kPageSize / 2) + 1);
- size_t len = strlen(kFileContents);
- if (prot() & PROT_WRITE) {
- std::copy(kFileContents, kFileContents + len, start);
- } else {
- std::vector<char> in(len);
- std::copy(start, start + len, in.data());
- }
-}
-
-// Tests that reading from writable shared file-mapped pages succeeds.
-//
-// On most platforms this is trivial, but when the file is mapped via the sentry
-// page cache (which does not yet support writing to shared mappings), a bug
-// caused reads to fail unnecessarily on such mappings. See b/28913513.
-TEST_F(MMapFileTest, ReadingWritableSharedFilePageSucceeds) {
- uintptr_t addr;
- size_t len = strlen(kFileContents);
-
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- std::vector<char> buf(kPageSize);
- // The test passes if this survives.
- std::copy(reinterpret_cast<volatile char*>(addr),
- reinterpret_cast<volatile char*>(addr) + len, buf.data());
-}
-
-// Tests that EFAULT is returned when invoking a syscall that requires the OS to
-// read past end of file (resulting in a fault in sentry context in the gVisor
-// case). See b/28913513.
-TEST_F(MMapFileTest, InternalSigBus) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- fd_.get(), 0),
- SyscallSucceeds());
-
- // This depends on the fact that gVisor implements pipes internally.
- int pipefd[2];
- ASSERT_THAT(pipe(pipefd), SyscallSucceeds());
- EXPECT_THAT(
- write(pipefd[1], reinterpret_cast<void*>(addr + kPageSize), kPageSize),
- SyscallFailsWithErrno(EFAULT));
-
- EXPECT_THAT(close(pipefd[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipefd[1]), SyscallSucceeds());
-}
-
-// Like InternalSigBus, but test the WriteZerosAt path by reading from
-// /dev/zero to a shared mapping (so that the SIGBUS isn't caught during
-// copy-on-write breaking).
-TEST_F(MMapFileTest, InternalSigBusZeroing) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
-
- const FileDescriptor dev_zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
- EXPECT_THAT(read(dev_zero.get(), reinterpret_cast<void*>(addr + kPageSize),
- kPageSize),
- SyscallFailsWithErrno(EFAULT));
-}
-
-// Checks that mmaps with a length of uint64_t(-PAGE_SIZE + 1) or greater do not
-// induce a sentry panic (due to "rounding up" to 0).
-TEST_F(MMapTest, HugeLength) {
- EXPECT_THAT(Map(0, static_cast<uint64_t>(-kPageSize + 1), PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-// Tests for a specific gVisor MM caching bug.
-TEST_F(MMapTest, AccessCOWInvalidatesCachedSegments) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
- auto zero_fd = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
-
- // Get a two-page private mapping and fill it with 1s.
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, 2 * kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- SyscallSucceeds());
- memset(addr_, 1, 2 * kPageSize);
- MaybeSave();
-
- // Fork to make the mapping copy-on-write.
- pid_t const pid = fork();
- if (pid == 0) {
- // The child process waits for the parent to SIGKILL it.
- while (true) {
- pause();
- }
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- auto cleanup_child = Cleanup([&] {
- EXPECT_THAT(kill(pid, SIGKILL), SyscallSucceeds());
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- });
-
- // Induce a read-only Access of the first page of the mapping, which will not
- // cause a copy. The usermem.Segment should be cached.
- ASSERT_THAT(PwriteFd(fd.get(), addr_, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Induce a writable Access of both pages of the mapping. This should
- // invalidate the cached Segment.
- ASSERT_THAT(PreadFd(zero_fd.get(), addr_, 2 * kPageSize, 0),
- SyscallSucceedsWithValue(2 * kPageSize));
-
- // Induce a read-only Access of the first page of the mapping again. It should
- // read the 0s that were stored in the mapping by the read from /dev/zero. If
- // the read failed to invalidate the cached Segment, it will instead read the
- // 1s in the stale page.
- ASSERT_THAT(PwriteFd(fd.get(), addr_, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
- std::vector<char> buf(kPageSize);
- ASSERT_THAT(PreadFd(fd.get(), buf.data(), kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
- for (size_t i = 0; i < kPageSize; i++) {
- ASSERT_EQ(0, buf[i]) << "at offset " << i;
- }
-}
-
-TEST_F(MMapTest, NoReserve) {
- const size_t kSize = 10 * 1 << 20; // 10M
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0),
- SyscallSucceeds());
- EXPECT_GT(addr, 0);
-
- // Check that every page can be read/written. Technically, writing to memory
- // could SIGSEGV in case there is no more memory available. In gVisor it
- // would never happen though because NORESERVE is ignored. In Linux, it's
- // possible to fail, but allocation is small enough that it's highly likely
- // to succeed.
- for (size_t j = 0; j < kSize; j += kPageSize) {
- EXPECT_EQ(0, reinterpret_cast<char*>(addr)[j]);
- reinterpret_cast<char*>(addr)[j] = j;
- }
-}
-
-// Map more than the gVisor page-cache map unit (64k) and ensure that
-// it is consistent with reading from the file.
-TEST_F(MMapFileTest, Bug38498194) {
- // Choose a sufficiently large map unit.
- constexpr int kSize = 4 * 1024 * 1024;
- EXPECT_THAT(ftruncate(fd_.get(), kSize), SyscallSucceeds());
-
- // Map a large enough region so that multiple internal segments
- // are created to back the mapping.
- uintptr_t addr;
- ASSERT_THAT(
- addr = Map(0, kSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
-
- std::vector<char> expect(kSize, 'a');
- std::copy(expect.data(), expect.data() + expect.size(),
- reinterpret_cast<volatile char*>(addr));
-
- // Trigger writeback for gVisor. In Linux pages stay cached until
- // it can't hold onto them anymore.
- ASSERT_THAT(Unmap(), SyscallSucceeds());
-
- std::vector<char> buf(kSize);
- ASSERT_THAT(Read(buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_EQ(buf, expect) << std::string(buf.data(), buf.size());
-}
-
-// Tests that reading from a file to a memory mapping of the same file does not
-// deadlock. See b/34813270.
-TEST_F(MMapFileTest, SelfRead) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0),
- SyscallSucceeds());
- EXPECT_THAT(Read(reinterpret_cast<char*>(addr), kPageSize / 2),
- SyscallSucceedsWithValue(kPageSize / 2));
- // The resulting file contents are poorly-specified and irrelevant.
-}
-
-// Tests that writing to a file from a memory mapping of the same file does not
-// deadlock. Regression test for b/34813270.
-TEST_F(MMapFileTest, SelfWrite) {
- uintptr_t addr;
- ASSERT_THAT(addr = Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), 0),
- SyscallSucceeds());
- EXPECT_THAT(Write(reinterpret_cast<char*>(addr), kPageSize / 2),
- SyscallSucceedsWithValue(kPageSize / 2));
- // The resulting file contents are poorly-specified and irrelevant.
-}
-
-TEST(MMapDeathTest, TruncateAfterCOWBreak) {
- SetupGvisorDeathTest();
-
- // Create and map a single-page file.
- auto const temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDWR));
- ASSERT_THAT(ftruncate(fd.get(), kPageSize), SyscallSucceeds());
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd.get(), 0));
-
- // Write to this mapping, causing the page to be copied for write.
- memset(mapping.ptr(), 'a', mapping.len());
- MaybeSave(); // Trigger a co-operative save cycle.
-
- // Truncate the file and expect it to invalidate the copied page.
- ASSERT_THAT(ftruncate(fd.get(), 0), SyscallSucceeds());
- EXPECT_EXIT(*reinterpret_cast<volatile char*>(mapping.ptr()),
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-// Regression test for #147.
-TEST(MMapNoFixtureTest, MapReadOnlyAfterCreateWriteOnly) {
- std::string filename = NewTempAbsPath();
-
- // We have to create the file O_RDONLY to reproduce the bug because
- // fsgofer.localFile.Create() silently upgrades O_WRONLY to O_RDWR, causing
- // the cached "write-only" FD to be read/write and therefore usable by mmap().
- auto const ro_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(filename, O_RDONLY | O_CREAT | O_EXCL, 0666));
-
- // Get a write-only FD for the same file, which should be ignored by mmap()
- // (but isn't in #147).
- auto const wo_fd = ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_WRONLY));
- ASSERT_THAT(ftruncate(wo_fd.get(), kPageSize), SyscallSucceeds());
-
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, ro_fd.get(), 0));
- std::vector<char> buf(kPageSize);
- // The test passes if this survives.
- std::copy(static_cast<char*>(mapping.ptr()),
- static_cast<char*>(mapping.endptr()), buf.data());
-}
-
-// Conditional on MAP_32BIT.
-// This flag is supported only on x86-64, for 64-bit programs.
-#ifdef __x86_64__
-
-TEST(MMapNoFixtureTest, Map32Bit) {
- auto const mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE | MAP_32BIT));
- EXPECT_LT(mapping.addr(), static_cast<uintptr_t>(1) << 32);
- EXPECT_LE(mapping.endaddr(), static_cast<uintptr_t>(1) << 32);
-}
-
-#endif // defined(__x86_64__)
-
-INSTANTIATE_TEST_SUITE_P(
- ReadWriteSharedPrivate, MMapFileParamTest,
- ::testing::Combine(::testing::ValuesIn({
- PROT_READ,
- PROT_WRITE,
- PROT_READ | PROT_WRITE,
- }),
- ::testing::ValuesIn({MAP_SHARED, MAP_PRIVATE})));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mount.cc b/test/syscalls/linux/mount.cc
deleted file mode 100644
index 3c7311782..000000000
--- a/test/syscalls/linux/mount.cc
+++ /dev/null
@@ -1,385 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <sys/mount.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <functional>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/mount_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Contains;
-using ::testing::Pair;
-
-TEST(MountTest, MountBadFilesystem) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- // Linux expects a valid target before it checks the file system name.
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(mount("", dir.path().c_str(), "foobar", 0, ""),
- SyscallFailsWithErrno(ENODEV));
-}
-
-TEST(MountTest, MountInvalidTarget) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = NewTempAbsPath();
- EXPECT_THAT(mount("", dir.c_str(), "tmpfs", 0, ""),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(MountTest, MountPermDenied) {
- // Clear CAP_SYS_ADMIN.
- AutoCapability cap(CAP_SYS_ADMIN, false);
-
- // Linux expects a valid target before checking capability.
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(mount("", dir.path().c_str(), "", 0, ""),
- SyscallFailsWithErrno(EPERM));
-}
-
-TEST(MountTest, UmountPermDenied) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "tmpfs", 0, "", 0));
-
- // Drop privileges in another thread, so we can still unmount the mounted
- // directory.
- ScopedThread([&]() {
- EXPECT_NO_ERRNO(SetCapability(CAP_SYS_ADMIN, false));
- EXPECT_THAT(umount(dir.path().c_str()), SyscallFailsWithErrno(EPERM));
- });
-}
-
-TEST(MountTest, MountOverBusy) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(dir.path(), "foo"), O_CREAT | O_RDWR, 0777));
-
- // Should be able to mount over a busy directory.
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "tmpfs", 0, "", 0));
-}
-
-TEST(MountTest, OpenFileBusy) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", 0, "mode=0700", 0));
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(dir.path(), "foo"), O_CREAT | O_RDWR, 0777));
-
- // An open file should prevent unmounting.
- EXPECT_THAT(umount(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(MountTest, UmountDetach) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- // structure:
- //
- // dir (mount point)
- // subdir
- // file
- //
- // We show that we can walk around in the mount after detach-unmount dir.
- //
- // We show that even though dir is unreachable from outside the mount, we can
- // still reach dir's (former) parent!
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const struct stat before = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- auto mount =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "tmpfs", 0, "mode=0700",
- /* umountflags= */ MNT_DETACH));
- const struct stat after = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- EXPECT_FALSE(before.st_dev == after.st_dev && before.st_ino == after.st_ino)
- << "mount point has device number " << before.st_dev
- << " and inode number " << before.st_ino << " before and after mount";
-
- // Create files in the new mount.
- constexpr char kContents[] = "no no no";
- auto const subdir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- auto const file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(dir.path(), kContents, 0777));
-
- auto const dir_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(subdir.path(), O_RDONLY | O_DIRECTORY));
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- // Unmount the tmpfs.
- mount.Release()();
-
- // Inode numbers for gofer-accessed files may change across save/restore.
- //
- // For overlayfs, if xino option is not enabled and if all overlayfs layers do
- // not belong to the same filesystem then "the value of st_ino for directory
- // objects may not be persistent and could change even while the overlay
- // filesystem is mounted." -- Documentation/filesystems/overlayfs.txt
- if (!IsRunningWithSaveRestore() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(dir.path()))) {
- const struct stat after2 = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- EXPECT_EQ(before.st_ino, after2.st_ino);
- }
-
- // Can still read file after unmounting.
- std::vector<char> buf(sizeof(kContents));
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()), SyscallSucceeds());
-
- // Walk to dir.
- auto const mounted_dir = ASSERT_NO_ERRNO_AND_VALUE(
- OpenAt(dir_fd.get(), "..", O_DIRECTORY | O_RDONLY));
- // Walk to dir/file.
- auto const fd_again = ASSERT_NO_ERRNO_AND_VALUE(
- OpenAt(mounted_dir.get(), std::string(Basename(file.path())), O_RDONLY));
-
- std::vector<char> buf2(sizeof(kContents));
- EXPECT_THAT(ReadFd(fd_again.get(), buf2.data(), buf2.size()),
- SyscallSucceeds());
- EXPECT_EQ(buf, buf2);
-
- // Walking outside the unmounted realm should still work, too!
- auto const dir_parent = ASSERT_NO_ERRNO_AND_VALUE(
- OpenAt(mounted_dir.get(), "..", O_DIRECTORY | O_RDONLY));
-}
-
-TEST(MountTest, ActiveSubmountBusy) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount1 = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", 0, "mode=0700", 0));
-
- auto const dir2 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- auto const mount2 =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir2.path(), "tmpfs", 0, "", 0));
-
- // Since dir now has an active submount, should not be able to unmount.
- EXPECT_THAT(umount(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(MountTest, MountTmpfs) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // NOTE(b/129868551): Inode IDs are only stable across S/R if we have an open
- // FD for that inode. Since we are going to compare inode IDs below, get a
- // FileDescriptor for this directory here, which will be closed automatically
- // at the end of the test.
- auto const fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY, O_RDONLY));
-
- const struct stat before = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
-
- {
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", 0, "mode=0700", 0));
-
- const struct stat s = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- EXPECT_EQ(s.st_mode, S_IFDIR | 0700);
- EXPECT_FALSE(before.st_dev == s.st_dev && before.st_ino == s.st_ino)
- << "mount point has device number " << before.st_dev
- << " and inode number " << before.st_ino << " before and after mount";
-
- EXPECT_NO_ERRNO(Open(JoinPath(dir.path(), "foo"), O_CREAT | O_RDWR, 0777));
- }
-
- // Now that dir is unmounted again, we should have the old inode back.
- //
- // Inode numbers for gofer-accessed files may change across save/restore.
- //
- // For overlayfs, if xino option is not enabled and if all overlayfs layers do
- // not belong to the same filesystem then "the value of st_ino for directory
- // objects may not be persistent and could change even while the overlay
- // filesystem is mounted." -- Documentation/filesystems/overlayfs.txt
- if (!IsRunningWithSaveRestore() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(dir.path()))) {
- const struct stat after = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- EXPECT_EQ(before.st_ino, after.st_ino);
- }
-}
-
-TEST(MountTest, MountTmpfsMagicValIgnored) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", MS_MGC_VAL, "mode=0700", 0));
-}
-
-// Passing nullptr to data is equivalent to "".
-TEST(MountTest, NullData) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- EXPECT_THAT(mount("", dir.path().c_str(), "tmpfs", 0, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(umount2(dir.path().c_str(), 0), SyscallSucceeds());
-}
-
-TEST(MountTest, MountReadonly) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", MS_RDONLY, "mode=0777", 0));
-
- const struct stat s = ASSERT_NO_ERRNO_AND_VALUE(Stat(dir.path()));
- EXPECT_EQ(s.st_mode, S_IFDIR | 0777);
-
- std::string const filename = JoinPath(dir.path(), "foo");
- EXPECT_THAT(open(filename.c_str(), O_RDWR | O_CREAT, 0777),
- SyscallFailsWithErrno(EROFS));
-}
-
-PosixErrorOr<absl::Time> ATime(absl::string_view file) {
- struct stat s = {};
- if (stat(std::string(file).c_str(), &s) == -1) {
- return PosixError(errno, "stat failed");
- }
- return absl::TimeFromTimespec(s.st_atim);
-}
-
-TEST(MountTest, MountNoAtime) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", MS_NOATIME, "mode=0777", 0));
-
- std::string const contents = "No no no, don't follow the instructions!";
- auto const file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(dir.path(), contents, 0777));
-
- absl::Time const before = ASSERT_NO_ERRNO_AND_VALUE(ATime(file.path()));
-
- // Reading from the file should change the atime, but the MS_NOATIME flag
- // should prevent that.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- char buf[100];
- int read_n;
- ASSERT_THAT(read_n = read(fd.get(), buf, sizeof(buf)), SyscallSucceeds());
- EXPECT_EQ(std::string(buf, read_n), contents);
-
- absl::Time const after = ASSERT_NO_ERRNO_AND_VALUE(ATime(file.path()));
-
- // Expect that atime hasn't changed.
- EXPECT_EQ(before, after);
-}
-
-TEST(MountTest, MountNoExec) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", MS_NOEXEC, "mode=0777", 0));
-
- std::string const contents = "No no no, don't follow the instructions!";
- auto const file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(dir.path(), contents, 0777));
-
- int execve_errno;
- ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(file.path(), {}, {}, nullptr, &execve_errno));
- EXPECT_EQ(execve_errno, EACCES);
-}
-
-TEST(MountTest, RenameRemoveMountPoint) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir_parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const dir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir_parent.path()));
- auto const new_dir = NewTempAbsPath();
-
- auto const mount =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "tmpfs", 0, "", 0));
-
- ASSERT_THAT(rename(dir.path().c_str(), new_dir.c_str()),
- SyscallFailsWithErrno(EBUSY));
-
- ASSERT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(MountTest, MountInfo) {
- SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto const mount = ASSERT_NO_ERRNO_AND_VALUE(
- Mount("", dir.path(), "tmpfs", MS_NOEXEC, "mode=0123", 0));
- const std::vector<ProcMountsEntry> mounts =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());
- for (const auto& e : mounts) {
- if (e.mount_point == dir.path()) {
- EXPECT_EQ(e.fstype, "tmpfs");
- auto mopts = ParseMountOptions(e.mount_opts);
- EXPECT_THAT(mopts, AnyOf(Contains(Pair("mode", "0123")),
- Contains(Pair("mode", "123"))));
- }
- }
-
- const std::vector<ProcMountInfoEntry> mountinfo =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());
-
- for (auto const& e : mountinfo) {
- if (e.mount_point == dir.path()) {
- EXPECT_EQ(e.fstype, "tmpfs");
- auto mopts = ParseMountOptions(e.super_opts);
- EXPECT_THAT(mopts, AnyOf(Contains(Pair("mode", "0123")),
- Contains(Pair("mode", "123"))));
- }
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/mremap.cc b/test/syscalls/linux/mremap.cc
deleted file mode 100644
index f0e5f7d82..000000000
--- a/test/syscalls/linux/mremap.cc
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <string.h>
-#include <sys/mman.h>
-
-#include <string>
-
-#include "gmock/gmock.h"
-#include "absl/strings/string_view.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::_;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Fixture for mremap tests parameterized by mmap flags.
-using MremapParamTest = ::testing::TestWithParam<int>;
-
-TEST_P(MremapParamTest, Noop) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
-
- ASSERT_THAT(Mremap(m.ptr(), kPageSize, kPageSize, 0, nullptr),
- IsPosixErrorOkAndHolds(m.ptr()));
- EXPECT_TRUE(IsMapped(m.addr()));
-}
-
-TEST_P(MremapParamTest, InPlace_ShrinkingWholeVMA) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // N.B. we must be in a single-threaded subprocess to ensure a
- // background thread doesn't concurrently map the second page.
- void* addr = mremap(m.ptr(), 2 * kPageSize, kPageSize, 0, nullptr);
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == m.ptr());
- MaybeSave();
-
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(!IsMapped(m.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, InPlace_ShrinkingPartialVMA) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- void* addr = mremap(m.ptr(), 2 * kPageSize, kPageSize, 0, nullptr);
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == m.ptr());
- MaybeSave();
-
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(!IsMapped(m.addr() + kPageSize));
- TEST_CHECK(IsMapped(m.addr() + 2 * kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, InPlace_ShrinkingAcrossVMAs) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_READ, GetParam()));
- // Changing permissions on the first page forces it to become a separate vma.
- ASSERT_THAT(mprotect(m.ptr(), kPageSize, PROT_NONE), SyscallSucceeds());
-
- const auto rest = [&] {
- // Both old_size and new_size now span two vmas; mremap
- // shouldn't care.
- void* addr = mremap(m.ptr(), 3 * kPageSize, 2 * kPageSize, 0, nullptr);
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == m.ptr());
- MaybeSave();
-
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(IsMapped(m.addr() + kPageSize));
- TEST_CHECK(!IsMapped(m.addr() + 2 * kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, InPlace_ExpansionSuccess) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap the second page so that the first can be expanded back into it.
- //
- // N.B. we must be in a single-threaded subprocess to ensure a
- // background thread doesn't concurrently map this page.
- TEST_PCHECK(
- munmap(reinterpret_cast<void*>(m.addr() + kPageSize), kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(m.ptr(), kPageSize, 2 * kPageSize, 0, nullptr);
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == m.ptr());
- MaybeSave();
-
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(IsMapped(m.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, InPlace_ExpansionFailure) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap the second page, leaving a one-page hole. Trying to expand the
- // first page to three pages should fail since the original third page
- // is still mapped.
- TEST_PCHECK(
- munmap(reinterpret_cast<void*>(m.addr() + kPageSize), kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(m.ptr(), kPageSize, 3 * kPageSize, 0, nullptr);
- TEST_CHECK_MSG(addr == MAP_FAILED, "mremap unexpectedly succeeded");
- TEST_PCHECK_MSG(errno == ENOMEM, "mremap failed with wrong errno");
- MaybeSave();
-
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(!IsMapped(m.addr() + kPageSize));
- TEST_CHECK(IsMapped(m.addr() + 2 * kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, MayMove_Expansion) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap the second page, leaving a one-page hole. Trying to expand the
- // first page to three pages with MREMAP_MAYMOVE should force the
- // mapping to be relocated since the original third page is still
- // mapped.
- TEST_PCHECK(
- munmap(reinterpret_cast<void*>(m.addr() + kPageSize), kPageSize) == 0);
- MaybeSave();
-
- void* addr2 =
- mremap(m.ptr(), kPageSize, 3 * kPageSize, MREMAP_MAYMOVE, nullptr);
- TEST_PCHECK_MSG(addr2 != MAP_FAILED, "mremap failed");
- MaybeSave();
-
- const Mapping m2 = Mapping(addr2, 3 * kPageSize);
- TEST_CHECK(m.addr() != m2.addr());
-
- TEST_CHECK(!IsMapped(m.addr()));
- TEST_CHECK(!IsMapped(m.addr() + kPageSize));
- TEST_CHECK(IsMapped(m.addr() + 2 * kPageSize));
- TEST_CHECK(IsMapped(m2.addr()));
- TEST_CHECK(IsMapped(m2.addr() + kPageSize));
- TEST_CHECK(IsMapped(m2.addr() + 2 * kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_SourceAndDestinationCannotOverlap) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
-
- ASSERT_THAT(Mremap(m.ptr(), kPageSize, kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, m.ptr()),
- PosixErrorIs(EINVAL, _));
- EXPECT_TRUE(IsMapped(m.addr()));
-}
-
-TEST_P(MremapParamTest, Fixed_SameSize) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap dst to create a hole.
- TEST_PCHECK(munmap(dst.ptr(), kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(src.ptr(), kPageSize, kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == dst.ptr());
- MaybeSave();
-
- TEST_CHECK(!IsMapped(src.addr()));
- TEST_CHECK(IsMapped(dst.addr()));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_SameSize_Unmapping) {
- // Like the Fixed_SameSize case, but expect mremap to unmap the destination
- // automatically.
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- void* addr = mremap(src.ptr(), kPageSize, kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == dst.ptr());
- MaybeSave();
-
- TEST_CHECK(!IsMapped(src.addr()));
- TEST_CHECK(IsMapped(dst.addr()));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_ShrinkingWholeVMA) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap dst so we can check that mremap does not keep the
- // second page.
- TEST_PCHECK(munmap(dst.ptr(), 2 * kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(src.ptr(), 2 * kPageSize, kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == dst.ptr());
- MaybeSave();
-
- TEST_CHECK(!IsMapped(src.addr()));
- TEST_CHECK(!IsMapped(src.addr() + kPageSize));
- TEST_CHECK(IsMapped(dst.addr()));
- TEST_CHECK(!IsMapped(dst.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_ShrinkingPartialVMA) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_NONE, GetParam()));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap dst so we can check that mremap does not keep the
- // second page.
- TEST_PCHECK(munmap(dst.ptr(), 2 * kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(src.ptr(), 2 * kPageSize, kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == dst.ptr());
- MaybeSave();
-
- TEST_CHECK(!IsMapped(src.addr()));
- TEST_CHECK(!IsMapped(src.addr() + kPageSize));
- TEST_CHECK(IsMapped(src.addr() + 2 * kPageSize));
- TEST_CHECK(IsMapped(dst.addr()));
- TEST_CHECK(!IsMapped(dst.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_ShrinkingAcrossVMAs) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(3 * kPageSize, PROT_READ, GetParam()));
- // Changing permissions on the first page forces it to become a separate vma.
- ASSERT_THAT(mprotect(src.ptr(), kPageSize, PROT_NONE), SyscallSucceeds());
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unlike flags=0, MREMAP_FIXED requires that [old_address,
- // old_address+new_size) only spans a single vma.
- void* addr = mremap(src.ptr(), 3 * kPageSize, 2 * kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_CHECK_MSG(addr == MAP_FAILED, "mremap unexpectedly succeeded");
- TEST_PCHECK_MSG(errno == EFAULT, "mremap failed with wrong errno");
- MaybeSave();
-
- TEST_CHECK(IsMapped(src.addr()));
- TEST_CHECK(IsMapped(src.addr() + kPageSize));
- // Despite failing, mremap should have unmapped [old_address+new_size,
- // old_address+old_size) (i.e. the third page).
- TEST_CHECK(!IsMapped(src.addr() + 2 * kPageSize));
- // Despite failing, mremap should have unmapped the destination pages.
- TEST_CHECK(!IsMapped(dst.addr()));
- TEST_CHECK(!IsMapped(dst.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(MremapParamTest, Fixed_Expansion) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, GetParam()));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(2 * kPageSize, PROT_NONE, GetParam()));
-
- const auto rest = [&] {
- // Unmap dst so we can check that mremap actually maps all pages
- // at the destination.
- TEST_PCHECK(munmap(dst.ptr(), 2 * kPageSize) == 0);
- MaybeSave();
-
- void* addr = mremap(src.ptr(), kPageSize, 2 * kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr());
- TEST_PCHECK_MSG(addr != MAP_FAILED, "mremap failed");
- TEST_CHECK(addr == dst.ptr());
- MaybeSave();
-
- TEST_CHECK(!IsMapped(src.addr()));
- TEST_CHECK(IsMapped(dst.addr()));
- TEST_CHECK(IsMapped(dst.addr() + kPageSize));
- };
-
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-INSTANTIATE_TEST_SUITE_P(PrivateShared, MremapParamTest,
- ::testing::Values(MAP_PRIVATE, MAP_SHARED));
-
-// mremap with old_size == 0 only works with MAP_SHARED after Linux 4.14
-// (dba58d3b8c50 "mm/mremap: fail map duplication attempts for private
-// mappings").
-
-TEST(MremapTest, InPlace_Copy) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_SHARED));
- EXPECT_THAT(Mremap(m.ptr(), 0, kPageSize, 0, nullptr),
- PosixErrorIs(ENOMEM, _));
-}
-
-TEST(MremapTest, MayMove_Copy) {
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_SHARED));
-
- // Remainder of this test executes in a subprocess to ensure that if mremap
- // incorrectly removes m, it is not remapped by another thread.
- const auto rest = [&] {
- void* ptr = mremap(m.ptr(), 0, kPageSize, MREMAP_MAYMOVE, nullptr);
- MaybeSave();
- TEST_PCHECK_MSG(ptr != MAP_FAILED, "mremap failed");
- TEST_CHECK(ptr != m.ptr());
- TEST_CHECK(IsMapped(m.addr()));
- TEST_CHECK(IsMapped(reinterpret_cast<uintptr_t>(ptr)));
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(MremapTest, MustMove_Copy) {
- Mapping const src =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_SHARED));
- Mapping const dst =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE));
-
- // Remainder of this test executes in a subprocess to ensure that if mremap
- // incorrectly removes src, it is not remapped by another thread.
- const auto rest = [&] {
- void* ptr = mremap(src.ptr(), 0, kPageSize, MREMAP_MAYMOVE | MREMAP_FIXED,
- dst.ptr());
- MaybeSave();
- TEST_PCHECK_MSG(ptr != MAP_FAILED, "mremap failed");
- TEST_CHECK(ptr == dst.ptr());
- TEST_CHECK(IsMapped(src.addr()));
- TEST_CHECK(IsMapped(dst.addr()));
- };
- EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));
-}
-
-void ExpectAllBytesAre(absl::string_view v, char c) {
- for (size_t i = 0; i < v.size(); i++) {
- ASSERT_EQ(v[i], c) << "at offset " << i;
- }
-}
-
-TEST(MremapTest, ExpansionPreservesCOWPagesAndExposesNewFilePages) {
- // Create a file with 3 pages. The first is filled with 'a', the second is
- // filled with 'b', and the third is filled with 'c'.
- TempPath const file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- ASSERT_THAT(WriteFd(fd.get(), std::string(kPageSize, 'a').c_str(), kPageSize),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(WriteFd(fd.get(), std::string(kPageSize, 'b').c_str(), kPageSize),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(WriteFd(fd.get(), std::string(kPageSize, 'c').c_str(), kPageSize),
- SyscallSucceedsWithValue(kPageSize));
-
- // Create a private mapping of the first 2 pages, and fill the second page
- // with 'd'.
- Mapping const src = ASSERT_NO_ERRNO_AND_VALUE(Mmap(nullptr, 2 * kPageSize,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE, fd.get(), 0));
- memset(reinterpret_cast<void*>(src.addr() + kPageSize), 'd', kPageSize);
- MaybeSave();
-
- // Move the mapping while expanding it to 3 pages. The resulting mapping
- // should contain the original first page of the file (filled with 'a'),
- // followed by the private copy of the second page (filled with 'd'), followed
- // by the newly-mapped third page of the file (filled with 'c').
- Mapping const dst = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(3 * kPageSize, PROT_NONE, MAP_PRIVATE));
- ASSERT_THAT(Mremap(src.ptr(), 2 * kPageSize, 3 * kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, dst.ptr()),
- IsPosixErrorOkAndHolds(dst.ptr()));
- auto const v = dst.view();
- ExpectAllBytesAre(v.substr(0, kPageSize), 'a');
- ExpectAllBytesAre(v.substr(kPageSize, kPageSize), 'd');
- ExpectAllBytesAre(v.substr(2 * kPageSize, kPageSize), 'c');
-}
-
-TEST(MremapDeathTest, SharedAnon) {
- SetupGvisorDeathTest();
-
- // Reserve 4 pages of address space.
- Mapping const reserved = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(4 * kPageSize, PROT_NONE, MAP_PRIVATE));
-
- // Create a 2-page shared anonymous mapping at the beginning of the
- // reservation. Fill the first page with 'a' and the second with 'b'.
- Mapping const m = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(reserved.ptr(), 2 * kPageSize, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0));
- memset(m.ptr(), 'a', kPageSize);
- memset(reinterpret_cast<void*>(m.addr() + kPageSize), 'b', kPageSize);
- MaybeSave();
-
- // Shrink the mapping to 1 page in-place.
- ASSERT_THAT(Mremap(m.ptr(), 2 * kPageSize, kPageSize, 0, m.ptr()),
- IsPosixErrorOkAndHolds(m.ptr()));
-
- // Expand the mapping to 3 pages, moving it forward by 1 page in the process
- // since the old and new mappings can't overlap.
- void* const new_m = reinterpret_cast<void*>(m.addr() + kPageSize);
- ASSERT_THAT(Mremap(m.ptr(), kPageSize, 3 * kPageSize,
- MREMAP_MAYMOVE | MREMAP_FIXED, new_m),
- IsPosixErrorOkAndHolds(new_m));
-
- // The first 2 pages of the mapping should still contain the data we wrote
- // (i.e. shrinking should not have discarded the second page's data), while
- // touching the third page should raise SIGBUS.
- auto const v =
- absl::string_view(static_cast<char const*>(new_m), 3 * kPageSize);
- ExpectAllBytesAre(v.substr(0, kPageSize), 'a');
- ExpectAllBytesAre(v.substr(kPageSize, kPageSize), 'b');
- EXPECT_EXIT(ExpectAllBytesAre(v.substr(2 * kPageSize, kPageSize), '\0'),
- ::testing::KilledBySignal(SIGBUS), "");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/msync.cc b/test/syscalls/linux/msync.cc
deleted file mode 100644
index 2b2b6aef9..000000000
--- a/test/syscalls/linux/msync.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <functional>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "test/util/file_descriptor.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Parameters for msync tests. Use a std::tuple so we can use
-// ::testing::Combine.
-using MsyncTestParam =
- std::tuple<int, // msync flags
- std::function<PosixErrorOr<Mapping>()> // returns mapping to
- // msync
- >;
-
-class MsyncParameterizedTest : public ::testing::TestWithParam<MsyncTestParam> {
- protected:
- int msync_flags() const { return std::get<0>(GetParam()); }
-
- PosixErrorOr<Mapping> GetMapping() const { return std::get<1>(GetParam())(); }
-};
-
-// All valid msync(2) flag combinations, not including MS_INVALIDATE. ("Linux
-// permits a call to msync() that specifies neither [MS_SYNC or MS_ASYNC], with
-// semantics that are (currently) equivalent to specifying MS_ASYNC." -
-// msync(2))
-constexpr std::initializer_list<int> kMsyncFlags = {MS_SYNC, MS_ASYNC, 0};
-
-// Returns functions that return mappings that should be successfully
-// msync()able.
-std::vector<std::function<PosixErrorOr<Mapping>()>> SyncableMappings() {
- std::vector<std::function<PosixErrorOr<Mapping>()>> funcs;
- for (bool const writable : {false, true}) {
- for (int const mflags : {MAP_PRIVATE, MAP_SHARED}) {
- int const prot = PROT_READ | (writable ? PROT_WRITE : 0);
- int const oflags = O_CREAT | (writable ? O_RDWR : O_RDONLY);
- funcs.push_back([=] { return MmapAnon(kPageSize, prot, mflags); });
- funcs.push_back([=]() -> PosixErrorOr<Mapping> {
- std::string const path = NewTempAbsPath();
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(path, oflags, 0644));
- // Don't unlink the file since that breaks save/restore. Just let the
- // test infrastructure clean up all of our temporary files when we're
- // done.
- return Mmap(nullptr, kPageSize, prot, mflags, fd.get(), 0);
- });
- }
- }
- return funcs;
-}
-
-PosixErrorOr<Mapping> NoMappings() {
- return PosixError(EINVAL, "unexpected attempt to create a mapping");
-}
-
-// "Fixture" for msync tests that hold for all valid flags, but do not create
-// mappings.
-using MsyncNoMappingTest = MsyncParameterizedTest;
-
-TEST_P(MsyncNoMappingTest, UnmappedAddressWithZeroLengthSucceeds) {
- EXPECT_THAT(msync(nullptr, 0, msync_flags()), SyscallSucceeds());
-}
-
-TEST_P(MsyncNoMappingTest, UnmappedAddressWithNonzeroLengthFails) {
- EXPECT_THAT(msync(nullptr, kPageSize, msync_flags()),
- SyscallFailsWithErrno(ENOMEM));
-}
-
-INSTANTIATE_TEST_SUITE_P(All, MsyncNoMappingTest,
- ::testing::Combine(::testing::ValuesIn(kMsyncFlags),
- ::testing::Values(NoMappings)));
-
-// "Fixture" for msync tests that are not parameterized by msync flags, but do
-// create mappings.
-using MsyncNoFlagsTest = MsyncParameterizedTest;
-
-TEST_P(MsyncNoFlagsTest, BothSyncAndAsyncFails) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(GetMapping());
- EXPECT_THAT(msync(m.ptr(), m.len(), MS_SYNC | MS_ASYNC),
- SyscallFailsWithErrno(EINVAL));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- All, MsyncNoFlagsTest,
- ::testing::Combine(::testing::Values(0), // ignored
- ::testing::ValuesIn(SyncableMappings())));
-
-// "Fixture" for msync tests parameterized by both msync flags and sources of
-// mappings.
-using MsyncFullParamTest = MsyncParameterizedTest;
-
-TEST_P(MsyncFullParamTest, NormallySucceeds) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(GetMapping());
- EXPECT_THAT(msync(m.ptr(), m.len(), msync_flags()), SyscallSucceeds());
-}
-
-TEST_P(MsyncFullParamTest, UnalignedLengthSucceeds) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(GetMapping());
- EXPECT_THAT(msync(m.ptr(), m.len() - 1, msync_flags()), SyscallSucceeds());
-}
-
-TEST_P(MsyncFullParamTest, UnalignedAddressFails) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(GetMapping());
- EXPECT_THAT(
- msync(reinterpret_cast<void*>(m.addr() + 1), m.len() - 1, msync_flags()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(MsyncFullParamTest, InvalidateUnlockedSucceeds) {
- auto m = ASSERT_NO_ERRNO_AND_VALUE(GetMapping());
- EXPECT_THAT(msync(m.ptr(), m.len(), msync_flags() | MS_INVALIDATE),
- SyscallSucceeds());
-}
-
-// The test for MS_INVALIDATE on mlocked pages is in mlock.cc since it requires
-// probing for mlock support.
-
-INSTANTIATE_TEST_SUITE_P(
- All, MsyncFullParamTest,
- ::testing::Combine(::testing::ValuesIn(kMsyncFlags),
- ::testing::ValuesIn(SyncableMappings())));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/munmap.cc b/test/syscalls/linux/munmap.cc
deleted file mode 100644
index 067241f4d..000000000
--- a/test/syscalls/linux/munmap.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/mman.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class MunmapTest : public ::testing::Test {
- protected:
- void SetUp() override {
- m_ = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- ASSERT_NE(MAP_FAILED, m_);
- }
-
- void* m_ = nullptr;
-};
-
-TEST_F(MunmapTest, HappyCase) {
- EXPECT_THAT(munmap(m_, kPageSize), SyscallSucceeds());
-}
-
-TEST_F(MunmapTest, ZeroLength) {
- EXPECT_THAT(munmap(m_, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(MunmapTest, LastPageRoundUp) {
- // Attempt to unmap up to and including the last page.
- EXPECT_THAT(munmap(m_, static_cast<size_t>(-kPageSize + 1)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/network_namespace.cc b/test/syscalls/linux/network_namespace.cc
deleted file mode 100644
index 133fdecf0..000000000
--- a/test/syscalls/linux/network_namespace.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <net/if.h>
-#include <sched.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-TEST(NetworkNamespaceTest, LoopbackExists) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- ScopedThread t([&] {
- ASSERT_THAT(unshare(CLONE_NEWNET), SyscallSucceedsWithValue(0));
-
- // TODO(gvisor.dev/issue/1833): Update this to test that only "lo" exists.
- // Check loopback device exists.
- int sock = socket(AF_INET, SOCK_DGRAM, 0);
- ASSERT_THAT(sock, SyscallSucceeds());
- struct ifreq ifr;
- strncpy(ifr.ifr_name, "lo", IFNAMSIZ);
- EXPECT_THAT(ioctl(sock, SIOCGIFINDEX, &ifr), SyscallSucceeds())
- << "lo cannot be found";
- });
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/open.cc b/test/syscalls/linux/open.cc
deleted file mode 100644
index ab9d19fef..000000000
--- a/test/syscalls/linux/open.cc
+++ /dev/null
@@ -1,534 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/capability.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// This test is currently very rudimentary.
-//
-// There are plenty of extra cases to cover once the sentry supports them.
-//
-// Different types of opens:
-// * O_CREAT
-// * O_DIRECTORY
-// * O_NOFOLLOW
-// * O_PATH
-//
-// Special operations on open:
-// * O_EXCL
-//
-// Special files:
-// * Blocking behavior for a named pipe.
-//
-// Different errors:
-// * EACCES
-// * EEXIST
-// * ENAMETOOLONG
-// * ELOOP
-// * ENOTDIR
-// * EPERM
-class OpenTest : public FileTest {
- void SetUp() override {
- FileTest::SetUp();
-
- ASSERT_THAT(
- write(test_file_fd_.get(), test_data_.c_str(), test_data_.length()),
- SyscallSucceedsWithValue(test_data_.length()));
- EXPECT_THAT(lseek(test_file_fd_.get(), 0, SEEK_SET), SyscallSucceeds());
- }
-
- public:
- const std::string test_data_ = "hello world\n";
-};
-
-TEST_F(OpenTest, OTrunc) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(open(dir.path().c_str(), O_TRUNC, 0666),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(OpenTest, OTruncAndReadOnlyDir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(open(dir.path().c_str(), O_TRUNC | O_RDONLY, 0666),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(OpenTest, OTruncAndReadOnlyFile) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto path = JoinPath(dir.path(), "foo");
- EXPECT_NO_ERRNO(Open(path, O_RDWR | O_CREAT, 0666));
- EXPECT_NO_ERRNO(Open(path, O_TRUNC | O_RDONLY, 0666));
-}
-
-TEST_F(OpenTest, OCreateDirectory) {
- SKIP_IF(IsRunningWithVFS1());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Normal case: existing directory.
- ASSERT_THAT(open(dir.path().c_str(), O_RDWR | O_CREAT, 0666),
- SyscallFailsWithErrno(EISDIR));
- // Trailing separator on existing directory.
- ASSERT_THAT(open(dir.path().append("/").c_str(), O_RDWR | O_CREAT, 0666),
- SyscallFailsWithErrno(EISDIR));
- // Trailing separator on non-existing directory.
- ASSERT_THAT(open(JoinPath(dir.path(), "non-existent").append("/").c_str(),
- O_RDWR | O_CREAT, 0666),
- SyscallFailsWithErrno(EISDIR));
- // "." special case.
- ASSERT_THAT(open(JoinPath(dir.path(), ".").c_str(), O_RDWR | O_CREAT, 0666),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(OpenTest, MustCreateExisting) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Existing directory.
- ASSERT_THAT(open(dir.path().c_str(), O_RDWR | O_CREAT | O_EXCL, 0666),
- SyscallFailsWithErrno(EEXIST));
-
- // Existing file.
- auto newFile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- ASSERT_THAT(open(newFile.path().c_str(), O_RDWR | O_CREAT | O_EXCL, 0666),
- SyscallFailsWithErrno(EEXIST));
-}
-
-TEST_F(OpenTest, ReadOnly) {
- char buf;
- const FileDescriptor ro_file =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-
- EXPECT_THAT(read(ro_file.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_THAT(lseek(ro_file.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(write(ro_file.get(), &buf, 1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(OpenTest, WriteOnly) {
- char buf;
- const FileDescriptor wo_file =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_WRONLY));
-
- EXPECT_THAT(read(wo_file.get(), &buf, 1), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(lseek(wo_file.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(write(wo_file.get(), &buf, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST_F(OpenTest, CreateWithAppend) {
- std::string data = "text";
- std::string new_file = NewTempAbsPath();
- const FileDescriptor file = ASSERT_NO_ERRNO_AND_VALUE(
- Open(new_file, O_WRONLY | O_APPEND | O_CREAT, 0666));
- EXPECT_THAT(write(file.get(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_THAT(lseek(file.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(write(file.get(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
-
- // Check that the size of the file is correct and that the offset has been
- // incremented to that size.
- struct stat s0;
- EXPECT_THAT(fstat(file.get(), &s0), SyscallSucceeds());
- EXPECT_EQ(s0.st_size, 2 * data.size());
- EXPECT_THAT(lseek(file.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(2 * data.size()));
-}
-
-TEST_F(OpenTest, ReadWrite) {
- char buf;
- const FileDescriptor rw_file =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- EXPECT_THAT(read(rw_file.get(), &buf, 1), SyscallSucceedsWithValue(1));
- EXPECT_THAT(lseek(rw_file.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(write(rw_file.get(), &buf, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST_F(OpenTest, RelPath) {
- auto name = std::string(Basename(test_file_name_));
-
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name, O_RDONLY));
-}
-
-TEST_F(OpenTest, AbsPath) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-}
-
-TEST_F(OpenTest, AtRelPath) {
- auto name = std::string(Basename(test_file_name_));
- const FileDescriptor dirfd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(GetAbsoluteTestTmpdir(), O_RDONLY | O_DIRECTORY));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAt(dirfd.get(), name, O_RDONLY));
-}
-
-TEST_F(OpenTest, AtAbsPath) {
- const FileDescriptor dirfd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(GetAbsoluteTestTmpdir(), O_RDONLY | O_DIRECTORY));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAt(dirfd.get(), test_file_name_, O_RDONLY));
-}
-
-TEST_F(OpenTest, OpenNoFollowSymlink) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string link_path = JoinPath(dir.path().c_str(), "link");
- ASSERT_THAT(symlink(test_file_name_.c_str(), link_path.c_str()),
- SyscallSucceeds());
- auto cleanup = Cleanup([link_path]() {
- EXPECT_THAT(unlink(link_path.c_str()), SyscallSucceeds());
- });
-
- // Open will succeed without O_NOFOLLOW and fails with O_NOFOLLOW.
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(link_path, O_RDONLY));
- ASSERT_THAT(open(link_path.c_str(), O_RDONLY | O_NOFOLLOW),
- SyscallFailsWithErrno(ELOOP));
-}
-
-TEST_F(OpenTest, OpenNoFollowStillFollowsLinksInPath) {
- // We will create the following structure:
- // tmp_folder/real_folder/file
- // tmp_folder/sym_folder -> tmp_folder/real_folder
- //
- // We will then open tmp_folder/sym_folder/file with O_NOFOLLOW and it
- // should succeed as O_NOFOLLOW only applies to the final path component.
- auto tmp_path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto sym_path = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), tmp_path.path()));
- auto file_path =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(tmp_path.path()));
-
- auto path_via_symlink = JoinPath(sym_path.path(), Basename(file_path.path()));
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path_via_symlink, O_RDONLY | O_NOFOLLOW));
-}
-
-// Test that open(2) can follow symlinks that point back to the same tree.
-// Test sets up files as follows:
-// root/child/symlink => redirects to ../..
-// root/child/target => regular file
-//
-// open("root/child/symlink/root/child/file")
-TEST_F(OpenTest, SymlinkRecurse) {
- auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto child = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));
- auto symlink = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(child.path(), "../.."));
- auto target = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(child.path(), "abc", 0644));
- auto path_via_symlink =
- JoinPath(symlink.path(), Basename(root.path()), Basename(child.path()),
- Basename(target.path()));
- const auto contents =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents(path_via_symlink));
- ASSERT_EQ(contents, "abc");
-}
-
-TEST_F(OpenTest, Fault) {
- char* totally_not_null = nullptr;
- ASSERT_THAT(open(totally_not_null, O_RDONLY), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(OpenTest, AppendOnly) {
- // First write some data to the fresh file.
- const int64_t kBufSize = 1024;
- std::vector<char> buf(kBufSize, 'a');
-
- FileDescriptor fd0 = ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR));
-
- std::fill(buf.begin(), buf.end(), 'a');
- EXPECT_THAT(WriteFd(fd0.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- fd0.reset(); // Close the file early.
-
- // Next get two handles to the same file. We open two files because we want
- // to make sure that appending is respected between them.
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR | O_APPEND));
- EXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR | O_APPEND));
- EXPECT_THAT(lseek(fd2.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- // Then try to write to the first fd and make sure the bytes are appended.
- EXPECT_THAT(WriteFd(fd1.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Check that the size of the file is correct and that the offset has been
- // incremented to that size.
- struct stat s0;
- EXPECT_THAT(fstat(fd1.get(), &s0), SyscallSucceeds());
- EXPECT_EQ(s0.st_size, kBufSize * 2);
- EXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(kBufSize * 2));
-
- // Then try to write to the second fd and make sure the bytes are appended.
- EXPECT_THAT(WriteFd(fd2.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Check that the size of the file is correct and that the offset has been
- // incremented to that size.
- struct stat s1;
- EXPECT_THAT(fstat(fd2.get(), &s1), SyscallSucceeds());
- EXPECT_EQ(s1.st_size, kBufSize * 3);
- EXPECT_THAT(lseek(fd2.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(kBufSize * 3));
-}
-
-TEST_F(OpenTest, AppendConcurrentWrite) {
- constexpr int kThreadCount = 5;
- constexpr int kBytesPerThread = 10000;
- std::unique_ptr<ScopedThread> threads[kThreadCount];
-
- // In case of the uncached policy, we expect that a file system can be changed
- // externally, so we create a new inode each time when we open a file and we
- // can't guarantee that writes to files with O_APPEND will work correctly.
- SKIP_IF(getenv("GVISOR_GOFER_UNCACHED"));
-
- EXPECT_THAT(truncate(test_file_name_.c_str(), 0), SyscallSucceeds());
-
- std::string filename = test_file_name_;
- DisableSave ds; // Too many syscalls.
- // Start kThreadCount threads which will write concurrently into the same
- // file.
- for (int i = 0; i < kThreadCount; i++) {
- threads[i] = absl::make_unique<ScopedThread>([filename]() {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(filename, O_RDWR | O_APPEND));
-
- for (int j = 0; j < kBytesPerThread; j++) {
- EXPECT_THAT(WriteFd(fd.get(), &j, 1), SyscallSucceedsWithValue(1));
- }
- });
- }
- for (int i = 0; i < kThreadCount; i++) {
- threads[i]->Join();
- }
-
- // Check that the size of the file is correct.
- struct stat st;
- EXPECT_THAT(stat(test_file_name_.c_str(), &st), SyscallSucceeds());
- EXPECT_EQ(st.st_size, kThreadCount * kBytesPerThread);
-}
-
-TEST_F(OpenTest, Truncate) {
- {
- // First write some data to the new file and close it.
- FileDescriptor fd0 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_WRONLY));
- std::vector<char> orig(10, 'a');
- EXPECT_THAT(WriteFd(fd0.get(), orig.data(), orig.size()),
- SyscallSucceedsWithValue(orig.size()));
- }
-
- // Then open with truncate and verify that offset is set to 0.
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR | O_TRUNC));
- EXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- // Then write less data to the file and ensure the old content is gone.
- std::vector<char> want(5, 'b');
- EXPECT_THAT(WriteFd(fd1.get(), want.data(), want.size()),
- SyscallSucceedsWithValue(want.size()));
-
- struct stat stat;
- EXPECT_THAT(fstat(fd1.get(), &stat), SyscallSucceeds());
- EXPECT_EQ(stat.st_size, want.size());
- EXPECT_THAT(lseek(fd1.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(want.size()));
-
- // Read the data and ensure only the latest write is in the file.
- std::vector<char> got(want.size() + 1, 'c');
- ASSERT_THAT(pread(fd1.get(), got.data(), got.size(), 0),
- SyscallSucceedsWithValue(want.size()));
- EXPECT_EQ(memcmp(want.data(), got.data(), want.size()), 0)
- << "rbuf=" << got.data();
- EXPECT_EQ(got.back(), 'c'); // Last byte should not have been modified.
-}
-
-TEST_F(OpenTest, NameTooLong) {
- char buf[4097] = {};
- memset(buf, 'a', 4097);
- EXPECT_THAT(open(buf, O_RDONLY), SyscallFailsWithErrno(ENAMETOOLONG));
-}
-
-TEST_F(OpenTest, DotsFromRoot) {
- const FileDescriptor rootfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/", O_RDONLY | O_DIRECTORY));
- const FileDescriptor other_rootfd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAt(rootfd.get(), "..", O_RDONLY));
-}
-
-TEST_F(OpenTest, DirectoryWritableFails) {
- ASSERT_THAT(open(GetAbsoluteTestTmpdir().c_str(), O_RDWR),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(OpenTest, FileNotDirectory) {
- // Create a file and try to open it with O_DIRECTORY.
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_THAT(open(file.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST_F(OpenTest, SymlinkDirectory) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string link = NewTempAbsPath();
- ASSERT_THAT(symlink(dir.path().c_str(), link.c_str()), SyscallSucceeds());
- ASSERT_NO_ERRNO(Open(link, O_RDONLY | O_DIRECTORY));
-}
-
-TEST_F(OpenTest, Null) {
- char c = '\0';
- ASSERT_THAT(open(&c, O_RDONLY), SyscallFailsWithErrno(ENOENT));
-}
-
-// NOTE(b/119785738): While the man pages specify that this behavior should be
-// undefined, Linux truncates the file on opening read only if we have write
-// permission, so we will too.
-TEST_F(OpenTest, CanTruncateReadOnly) {
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY | O_TRUNC));
-
- struct stat stat;
- EXPECT_THAT(fstat(fd1.get(), &stat), SyscallSucceeds());
- EXPECT_EQ(stat.st_size, 0);
-}
-
-// If we don't have read permission on the file, opening with
-// O_TRUNC should fail.
-TEST_F(OpenTest, CanTruncateReadOnlyNoWritePermission) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- const DisableSave ds; // Permissions are dropped.
- ASSERT_THAT(chmod(test_file_name_.c_str(), S_IRUSR | S_IRGRP),
- SyscallSucceeds());
-
- ASSERT_THAT(open(test_file_name_.c_str(), O_RDONLY | O_TRUNC),
- SyscallFailsWithErrno(EACCES));
-
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-
- struct stat stat;
- EXPECT_THAT(fstat(fd1.get(), &stat), SyscallSucceeds());
- EXPECT_EQ(stat.st_size, test_data_.size());
-}
-
-// If we don't have read permission but have write permission, opening O_WRONLY
-// and O_TRUNC should succeed.
-TEST_F(OpenTest, CanTruncateWriteOnlyNoReadPermission) {
- const DisableSave ds; // Permissions are dropped.
-
- EXPECT_THAT(fchmod(test_file_fd_.get(), S_IWUSR | S_IWGRP),
- SyscallSucceeds());
-
- const FileDescriptor fd1 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_WRONLY | O_TRUNC));
-
- EXPECT_THAT(fchmod(test_file_fd_.get(), S_IRUSR | S_IRGRP),
- SyscallSucceeds());
-
- const FileDescriptor fd2 =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-
- struct stat stat;
- EXPECT_THAT(fstat(fd2.get(), &stat), SyscallSucceeds());
- EXPECT_EQ(stat.st_size, 0);
-}
-
-TEST_F(OpenTest, CanTruncateWithStrangePermissions) {
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
- const DisableSave ds; // Permissions are dropped.
- std::string path = NewTempAbsPath();
- // Create a file without user permissions.
- EXPECT_NO_ERRNO(Open(path, O_CREAT | O_TRUNC | O_WRONLY, 055));
-
- // Cannot open file because we are owner and have no permissions set.
- EXPECT_THAT(open(path.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));
-
- // We *can* chmod the file, because we are the owner.
- EXPECT_THAT(chmod(path.c_str(), 0755), SyscallSucceeds());
-
- // Now we can open the file again.
- EXPECT_NO_ERRNO(Open(path, O_RDWR));
-}
-
-TEST_F(OpenTest, OpenNonDirectoryWithTrailingSlash) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string bad_path = file.path() + "/";
- EXPECT_THAT(open(bad_path.c_str(), O_RDONLY), SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST_F(OpenTest, OpenWithStrangeFlags) {
- // VFS1 incorrectly allows read/write operations on such file descriptors.
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY | O_RDWR));
- EXPECT_THAT(write(fd.get(), "x", 1), SyscallFailsWithErrno(EBADF));
- char c;
- EXPECT_THAT(read(fd.get(), &c, 1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(OpenTest, OpenWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
- const DisableSave ds; // Permissions are dropped.
- std::string path = NewTempAbsPath();
-
- // Create a file without user permissions.
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(path.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 055));
-
- // Cannot open file as read only because we are owner and have no permissions
- // set.
- EXPECT_THAT(open(path.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));
-
- // Can open file with O_PATH because don't need permissions on the object when
- // opening with O_PATH.
- ASSERT_NO_ERRNO(Open(path, O_PATH));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/open_create.cc b/test/syscalls/linux/open_create.cc
deleted file mode 100644
index 177bda54d..000000000
--- a/test/syscalls/linux/open_create.cc
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-TEST(CreateTest, TmpFile) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_NO_ERRNO(Open(JoinPath(dir.path(), "a"), O_RDWR | O_CREAT, 0666));
-}
-
-TEST(CreateTest, ExistingFile) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto path = JoinPath(dir.path(), "ExistingFile");
- EXPECT_NO_ERRNO(Open(path, O_RDWR | O_CREAT, 0666));
- EXPECT_NO_ERRNO(Open(path, O_RDWR | O_CREAT, 0666));
-}
-
-TEST(CreateTest, CreateAtFile) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dirfd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_DIRECTORY, 0666));
- int fd;
- EXPECT_THAT(fd = openat(dirfd.get(), "CreateAtFile", O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(CreateTest, HonorsUmask) {
- const DisableSave ds; // file cannot be re-opened as writable.
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempUmask mask(0222);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(dir.path(), "UmaskedFile"), O_RDWR | O_CREAT, 0666));
- struct stat statbuf;
- ASSERT_THAT(fstat(fd.get(), &statbuf), SyscallSucceeds());
- EXPECT_EQ(0444, statbuf.st_mode & 0777);
-}
-
-TEST(CreateTest, CreateExclusively) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto path = JoinPath(dir.path(), "foo");
- EXPECT_NO_ERRNO(Open(path, O_CREAT | O_RDWR, 0644));
- EXPECT_THAT(open(path.c_str(), O_CREAT | O_EXCL | O_RDWR, 0644),
- SyscallFailsWithErrno(EEXIST));
-}
-
-TEST(CreateTest, CreatWithOTrunc) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(open(dir.path().c_str(), O_CREAT | O_TRUNC, 0666),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST(CreateTest, CreatDirWithOTruncAndReadOnly) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(open(dir.path().c_str(), O_CREAT | O_TRUNC | O_RDONLY, 0666),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST(CreateTest, CreatFileWithOTruncAndReadOnly) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto path = JoinPath(dir.path(), "foo");
- ASSERT_NO_ERRNO(Open(path, O_RDWR | O_CREAT, 0666));
- ASSERT_NO_ERRNO(Open(path, O_CREAT | O_TRUNC | O_RDONLY, 0666));
-}
-
-TEST(CreateTest, CreateFailsOnDirWithoutWritePerms) {
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // always override directory permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- auto parent = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0555));
- auto file = JoinPath(parent.path(), "foo");
- ASSERT_THAT(open(file.c_str(), O_CREAT | O_RDWR, 0644),
- SyscallFailsWithErrno(EACCES));
-}
-
-// A file originally created RW, but opened RO can later be opened RW.
-// Regression test for b/65385065.
-TEST(CreateTest, OpenCreateROThenRW) {
- TempPath file(NewTempAbsPath());
-
- // Create a RW file, but only open it RO.
- FileDescriptor fd1 = ASSERT_NO_ERRNO_AND_VALUE(
- Open(file.path(), O_CREAT | O_EXCL | O_RDONLY, 0644));
-
- // Now get a RW FD.
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- // fd1 is not writable, but fd2 is.
- char c = 'a';
- EXPECT_THAT(WriteFd(fd1.get(), &c, 1), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(WriteFd(fd2.get(), &c, 1), SyscallSucceedsWithValue(1));
-}
-
-TEST(CreateTest, ChmodReadToWriteBetweenOpens) {
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // override file read/write permissions. CAP_DAC_READ_SEARCH needs to be
- // cleared for the same reason.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0400));
-
- const FileDescriptor rfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- // Cannot restore after making permissions more restrictive.
- const DisableSave ds;
- ASSERT_THAT(fchmod(rfd.get(), 0200), SyscallSucceeds());
-
- EXPECT_THAT(open(file.path().c_str(), O_RDONLY),
- SyscallFailsWithErrno(EACCES));
-
- const FileDescriptor wfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
-
- char c = 'x';
- EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- c = 0;
- EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(c, 'x');
-}
-
-TEST(CreateTest, ChmodWriteToReadBetweenOpens) {
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // override file read/write permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0200));
-
- const FileDescriptor wfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
-
- // Cannot restore after making permissions more restrictive.
- const DisableSave ds;
- ASSERT_THAT(fchmod(wfd.get(), 0400), SyscallSucceeds());
-
- EXPECT_THAT(open(file.path().c_str(), O_WRONLY),
- SyscallFailsWithErrno(EACCES));
-
- const FileDescriptor rfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- char c = 'x';
- EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- c = 0;
- EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(c, 'x');
-}
-
-TEST(CreateTest, CreateWithReadFlagNotAllowedByMode) {
- // The only time we can open a file with flags forbidden by its permissions
- // is when we are creating the file. We cannot re-open with the same flags,
- // so we cannot restore an fd obtained from such an operation.
- const DisableSave ds;
-
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // override file read/write permissions. CAP_DAC_READ_SEARCH needs to be
- // cleared for the same reason.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- // Create and open a file with read flag but without read permissions.
- const std::string path = NewTempAbsPath();
- const FileDescriptor rfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CREAT | O_RDONLY, 0222));
-
- EXPECT_THAT(open(path.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));
- const FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_WRONLY));
-
- char c = 'x';
- EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- c = 0;
- EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(c, 'x');
-}
-
-TEST(CreateTest, CreateWithWriteFlagNotAllowedByMode) {
- // The only time we can open a file with flags forbidden by its permissions
- // is when we are creating the file. We cannot re-open with the same flags,
- // so we cannot restore an fd obtained from such an operation.
- const DisableSave ds;
-
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // override file read/write permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- // Create and open a file with write flag but without write permissions.
- const std::string path = NewTempAbsPath();
- const FileDescriptor wfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_CREAT | O_WRONLY, 0444));
-
- EXPECT_THAT(open(path.c_str(), O_WRONLY), SyscallFailsWithErrno(EACCES));
- const FileDescriptor rfd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_RDONLY));
-
- char c = 'x';
- EXPECT_THAT(write(wfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- c = 0;
- EXPECT_THAT(read(rfd.get(), &c, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ(c, 'x');
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/packet_socket.cc b/test/syscalls/linux/packet_socket.cc
deleted file mode 100644
index 1e246c421..000000000
--- a/test/syscalls/linux/packet_socket.cc
+++ /dev/null
@@ -1,558 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <ifaddrs.h>
-#include <linux/capability.h>
-#include <linux/if_arp.h>
-#include <linux/if_packet.h>
-#include <net/ethernet.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/udp.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/endian.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Some of these tests involve sending packets via AF_PACKET sockets and the
-// loopback interface. Because AF_PACKET circumvents so much of the networking
-// stack, Linux sees these packets as "martian", i.e. they claim to be to/from
-// localhost but don't have the usual associated data. Thus Linux drops them by
-// default. You can see where this happens by following the code at:
-//
-// - net/ipv4/ip_input.c:ip_rcv_finish, which calls
-// - net/ipv4/route.c:ip_route_input_noref, which calls
-// - net/ipv4/route.c:ip_route_input_slow, which finds and drops martian
-// packets.
-//
-// To tell Linux not to drop these packets, you need to tell it to accept our
-// funny packets (which are completely valid and correct, but lack associated
-// in-kernel data because we use AF_PACKET):
-//
-// echo 1 >> /proc/sys/net/ipv4/conf/lo/accept_local
-// echo 1 >> /proc/sys/net/ipv4/conf/lo/route_localnet
-//
-// These tests require CAP_NET_RAW to run.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-constexpr char kMessage[] = "soweoneul malhaebwa";
-constexpr in_port_t kPort = 0x409c; // htons(40000)
-
-//
-// "Cooked" tests. Cooked AF_PACKET sockets do not contain link layer
-// headers, and provide link layer destination/source information via a
-// returned struct sockaddr_ll.
-//
-
-// Send kMessage via sock to loopback
-void SendUDPMessage(int sock) {
- struct sockaddr_in dest = {};
- dest.sin_port = kPort;
- dest.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- dest.sin_family = AF_INET;
- EXPECT_THAT(sendto(sock, kMessage, sizeof(kMessage), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-}
-
-// Send an IP packet and make sure ETH_P_<something else> doesn't pick it up.
-TEST(BasicCookedPacketTest, WrongType) {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(AF_PACKET, SOCK_DGRAM, ETH_P_PUP),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- FileDescriptor sock = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_PACKET, SOCK_DGRAM, htons(ETH_P_PUP)));
-
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- SendUDPMessage(udp_sock.get());
-
- // Wait and make sure the socket never becomes readable.
- struct pollfd pfd = {};
- pfd.fd = sock.get();
- pfd.events = POLLIN;
- EXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 1000), SyscallSucceedsWithValue(0));
-}
-
-// Tests for "cooked" (SOCK_DGRAM) packet(7) sockets.
-class CookedPacketTest : public ::testing::TestWithParam<int> {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // Gets the device index of the loopback device.
- int GetLoopbackIndex();
-
- // The socket used for both reading and writing.
- int socket_;
-};
-
-void CookedPacketTest::SetUp() {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(AF_PACKET, SOCK_DGRAM, htons(GetParam())),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- if (!IsRunningOnGvisor()) {
- FileDescriptor acceptLocal = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc/sys/net/ipv4/conf/lo/accept_local", O_RDONLY));
- FileDescriptor routeLocalnet = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc/sys/net/ipv4/conf/lo/route_localnet", O_RDONLY));
- char enabled;
- ASSERT_THAT(read(acceptLocal.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_EQ(enabled, '1');
- ASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_EQ(enabled, '1');
- }
-
- ASSERT_THAT(socket_ = socket(AF_PACKET, SOCK_DGRAM, htons(GetParam())),
- SyscallSucceeds());
-}
-
-void CookedPacketTest::TearDown() {
- // TearDown will be run even if we skip the test.
- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- EXPECT_THAT(close(socket_), SyscallSucceeds());
- }
-}
-
-int CookedPacketTest::GetLoopbackIndex() {
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
- EXPECT_THAT(ioctl(socket_, SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
- return ifr.ifr_ifindex;
-}
-
-// Receive and verify the message via packet socket on interface.
-void ReceiveMessage(int sock, int ifindex) {
- // Wait for the socket to become readable.
- struct pollfd pfd = {};
- pfd.fd = sock;
- pfd.events = POLLIN;
- EXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 2000), SyscallSucceedsWithValue(1));
-
- // Read and verify the data.
- constexpr size_t packet_size =
- sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kMessage);
- char buf[64];
- struct sockaddr_ll src = {};
- socklen_t src_len = sizeof(src);
- ASSERT_THAT(recvfrom(sock, buf, sizeof(buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_len),
- SyscallSucceedsWithValue(packet_size));
-
- // sockaddr_ll ends with an 8 byte physical address field, but ethernet
- // addresses only use 6 bytes. Linux used to return sizeof(sockaddr_ll)-2
- // here, but since commit b2cf86e1563e33a14a1c69b3e508d15dc12f804c returns
- // sizeof(sockaddr_ll).
- ASSERT_THAT(src_len, AnyOf(Eq(sizeof(src)), Eq(sizeof(src) - 2)));
-
- // Verify the source address.
- EXPECT_EQ(src.sll_family, AF_PACKET);
- EXPECT_EQ(src.sll_ifindex, ifindex);
- EXPECT_EQ(src.sll_halen, ETH_ALEN);
- EXPECT_EQ(ntohs(src.sll_protocol), ETH_P_IP);
- // This came from the loopback device, so the address is all 0s.
- for (int i = 0; i < src.sll_halen; i++) {
- EXPECT_EQ(src.sll_addr[i], 0);
- }
-
- // Verify the IP header. We memcpy to deal with pointer aligment.
- struct iphdr ip = {};
- memcpy(&ip, buf, sizeof(ip));
- EXPECT_EQ(ip.ihl, 5);
- EXPECT_EQ(ip.version, 4);
- EXPECT_EQ(ip.tot_len, htons(packet_size));
- EXPECT_EQ(ip.protocol, IPPROTO_UDP);
- EXPECT_EQ(ip.daddr, htonl(INADDR_LOOPBACK));
- EXPECT_EQ(ip.saddr, htonl(INADDR_LOOPBACK));
-
- // Verify the UDP header. We memcpy to deal with pointer aligment.
- struct udphdr udp = {};
- memcpy(&udp, buf + sizeof(iphdr), sizeof(udp));
- EXPECT_EQ(udp.dest, kPort);
- EXPECT_EQ(udp.len, htons(sizeof(udphdr) + sizeof(kMessage)));
-
- // Verify the payload.
- char* payload = reinterpret_cast<char*>(buf + sizeof(iphdr) + sizeof(udphdr));
- EXPECT_EQ(strncmp(payload, kMessage, sizeof(kMessage)), 0);
-}
-
-// Receive via a packet socket.
-TEST_P(CookedPacketTest, Receive) {
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- SendUDPMessage(udp_sock.get());
-
- // Receive and verify the data.
- int loopback_index = GetLoopbackIndex();
- ReceiveMessage(socket_, loopback_index);
-}
-
-// Send via a packet socket.
-TEST_P(CookedPacketTest, Send) {
- // Let's send a UDP packet and receive it using a regular UDP socket.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- struct sockaddr_in bind_addr = {};
- bind_addr.sin_family = AF_INET;
- bind_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- bind_addr.sin_port = kPort;
- ASSERT_THAT(
- bind(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // Set up the destination physical address.
- struct sockaddr_ll dest = {};
- dest.sll_family = AF_PACKET;
- dest.sll_halen = ETH_ALEN;
- dest.sll_ifindex = GetLoopbackIndex();
- dest.sll_protocol = htons(ETH_P_IP);
- // We're sending to the loopback device, so the address is all 0s.
- memset(dest.sll_addr, 0x00, ETH_ALEN);
-
- // Set up the IP header.
- struct iphdr iphdr = {0};
- iphdr.ihl = 5;
- iphdr.version = 4;
- iphdr.tos = 0;
- iphdr.tot_len =
- htons(sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kMessage));
- // Get a pseudo-random ID. If we clash with an in-use ID the test will fail,
- // but we have no way of getting an ID we know to be good.
- srand(*reinterpret_cast<unsigned int*>(&iphdr));
- iphdr.id = rand();
- // Linux sets this bit ("do not fragment") for small packets.
- iphdr.frag_off = 1 << 6;
- iphdr.ttl = 64;
- iphdr.protocol = IPPROTO_UDP;
- iphdr.daddr = htonl(INADDR_LOOPBACK);
- iphdr.saddr = htonl(INADDR_LOOPBACK);
- iphdr.check = IPChecksum(iphdr);
-
- // Set up the UDP header.
- struct udphdr udphdr = {};
- udphdr.source = kPort;
- udphdr.dest = kPort;
- udphdr.len = htons(sizeof(udphdr) + sizeof(kMessage));
- udphdr.check = UDPChecksum(iphdr, udphdr, kMessage, sizeof(kMessage));
-
- // Copy both headers and the payload into our packet buffer.
- char send_buf[sizeof(iphdr) + sizeof(udphdr) + sizeof(kMessage)];
- memcpy(send_buf, &iphdr, sizeof(iphdr));
- memcpy(send_buf + sizeof(iphdr), &udphdr, sizeof(udphdr));
- memcpy(send_buf + sizeof(iphdr) + sizeof(udphdr), kMessage, sizeof(kMessage));
-
- // We don't implement writing to packet sockets on gVisor.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(sendto(socket_, send_buf, sizeof(send_buf), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallFailsWithErrno(EINVAL));
- GTEST_SKIP();
- }
-
- // Send it.
- ASSERT_THAT(sendto(socket_, send_buf, sizeof(send_buf), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Wait for the packet to become available on both sockets.
- struct pollfd pfd = {};
- pfd.fd = udp_sock.get();
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));
- pfd.fd = socket_;
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));
-
- // Receive on the packet socket.
- char recv_buf[sizeof(send_buf)];
- ASSERT_THAT(recv(socket_, recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- ASSERT_EQ(memcmp(recv_buf, send_buf, sizeof(send_buf)), 0);
-
- // Receive on the UDP socket.
- struct sockaddr_in src;
- socklen_t src_len = sizeof(src);
- ASSERT_THAT(recvfrom(udp_sock.get(), recv_buf, sizeof(recv_buf), MSG_DONTWAIT,
- reinterpret_cast<struct sockaddr*>(&src), &src_len),
- SyscallSucceedsWithValue(sizeof(kMessage)));
- // Check src and payload.
- EXPECT_EQ(strncmp(recv_buf, kMessage, sizeof(kMessage)), 0);
- EXPECT_EQ(src.sin_family, AF_INET);
- EXPECT_EQ(src.sin_port, kPort);
- EXPECT_EQ(src.sin_addr.s_addr, htonl(INADDR_LOOPBACK));
-}
-
-// Bind and receive via packet socket.
-TEST_P(CookedPacketTest, BindReceive) {
- struct sockaddr_ll bind_addr = {};
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_protocol = htons(GetParam());
- bind_addr.sll_ifindex = GetLoopbackIndex();
-
- ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- SendUDPMessage(udp_sock.get());
-
- // Receive and verify the data.
- ReceiveMessage(socket_, bind_addr.sll_ifindex);
-}
-
-// Double Bind socket.
-TEST_P(CookedPacketTest, DoubleBindSucceeds) {
- struct sockaddr_ll bind_addr = {};
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_protocol = htons(GetParam());
- bind_addr.sll_ifindex = GetLoopbackIndex();
-
- ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // Binding socket again should fail.
- ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- // Linux 4.09 returns EINVAL here, but some time before 4.19 it
- // switched to EADDRINUSE.
- SyscallSucceeds());
-}
-
-// Bind and verify we do not receive data on interface which is not bound
-TEST_P(CookedPacketTest, BindDrop) {
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- struct ifaddrs* if_addr_list = nullptr;
- auto cleanup = Cleanup([&if_addr_list]() { freeifaddrs(if_addr_list); });
-
- ASSERT_THAT(getifaddrs(&if_addr_list), SyscallSucceeds());
-
- // Get interface other than loopback.
- struct ifreq ifr = {};
- for (struct ifaddrs* i = if_addr_list; i; i = i->ifa_next) {
- if (strcmp(i->ifa_name, "lo") != 0) {
- strncpy(ifr.ifr_name, i->ifa_name, sizeof(ifr.ifr_name));
- break;
- }
- }
-
- // Skip if no interface is available other than loopback.
- if (strlen(ifr.ifr_name) == 0) {
- GTEST_SKIP();
- }
-
- // Get interface index.
- EXPECT_THAT(ioctl(socket_, SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
-
- // Bind to packet socket requires only family, protocol and ifindex.
- struct sockaddr_ll bind_addr = {};
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_protocol = htons(GetParam());
- bind_addr.sll_ifindex = ifr.ifr_ifindex;
-
- ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // Send to loopback interface.
- struct sockaddr_in dest = {};
- dest.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- dest.sin_family = AF_INET;
- dest.sin_port = kPort;
- EXPECT_THAT(sendto(udp_sock.get(), kMessage, sizeof(kMessage), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-
- // Wait and make sure the socket never receives any data.
- struct pollfd pfd = {};
- pfd.fd = socket_;
- pfd.events = POLLIN;
- EXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 1000), SyscallSucceedsWithValue(0));
-}
-
-// Verify that we receive outbound packets. This test requires at least one
-// non loopback interface so that we can actually capture an outgoing packet.
-TEST_P(CookedPacketTest, ReceiveOutbound) {
- // Only ETH_P_ALL sockets can receive outbound packets on linux.
- SKIP_IF(GetParam() != ETH_P_ALL);
-
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- struct ifaddrs* if_addr_list = nullptr;
- auto cleanup = Cleanup([&if_addr_list]() { freeifaddrs(if_addr_list); });
-
- ASSERT_THAT(getifaddrs(&if_addr_list), SyscallSucceeds());
-
- // Get interface other than loopback.
- struct ifreq ifr = {};
- for (struct ifaddrs* i = if_addr_list; i; i = i->ifa_next) {
- if (strcmp(i->ifa_name, "lo") != 0) {
- strncpy(ifr.ifr_name, i->ifa_name, sizeof(ifr.ifr_name));
- break;
- }
- }
-
- // Skip if no interface is available other than loopback.
- if (strlen(ifr.ifr_name) == 0) {
- GTEST_SKIP();
- }
-
- // Get interface index and name.
- EXPECT_THAT(ioctl(socket_, SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
- int ifindex = ifr.ifr_ifindex;
-
- constexpr int kMACSize = 6;
- char hwaddr[kMACSize];
- // Get interface address.
- ASSERT_THAT(ioctl(socket_, SIOCGIFHWADDR, &ifr), SyscallSucceeds());
- ASSERT_THAT(ifr.ifr_hwaddr.sa_family,
- AnyOf(Eq(ARPHRD_NONE), Eq(ARPHRD_ETHER)));
- memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, kMACSize);
-
- // Just send it to the google dns server 8.8.8.8. It's UDP we don't care
- // if it actually gets to the DNS Server we just want to see that we receive
- // it on our AF_PACKET socket.
- //
- // NOTE: We just want to pick an IP that is non-local to avoid having to
- // handle ARP as this should cause the UDP packet to be sent to the default
- // gateway configured for the system under test. Otherwise the only packet we
- // will see is the ARP query unless we picked an IP which will actually
- // resolve. The test is a bit brittle but this was the best compromise for
- // now.
- struct sockaddr_in dest = {};
- ASSERT_EQ(inet_pton(AF_INET, "8.8.8.8", &dest.sin_addr.s_addr), 1);
- dest.sin_family = AF_INET;
- dest.sin_port = kPort;
- EXPECT_THAT(sendto(udp_sock.get(), kMessage, sizeof(kMessage), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-
- // Wait and make sure the socket receives the data.
- struct pollfd pfd = {};
- pfd.fd = socket_;
- pfd.events = POLLIN;
- EXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 1000), SyscallSucceedsWithValue(1));
-
- // Now read and check that the packet is the one we just sent.
- // Read and verify the data.
- constexpr size_t packet_size =
- sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kMessage);
- char buf[64];
- struct sockaddr_ll src = {};
- socklen_t src_len = sizeof(src);
- ASSERT_THAT(recvfrom(socket_, buf, sizeof(buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_len),
- SyscallSucceedsWithValue(packet_size));
-
- // sockaddr_ll ends with an 8 byte physical address field, but ethernet
- // addresses only use 6 bytes. Linux used to return sizeof(sockaddr_ll)-2
- // here, but since commit b2cf86e1563e33a14a1c69b3e508d15dc12f804c returns
- // sizeof(sockaddr_ll).
- ASSERT_THAT(src_len, AnyOf(Eq(sizeof(src)), Eq(sizeof(src) - 2)));
-
- // Verify the source address.
- EXPECT_EQ(src.sll_family, AF_PACKET);
- EXPECT_EQ(src.sll_ifindex, ifindex);
- EXPECT_EQ(src.sll_halen, ETH_ALEN);
- EXPECT_EQ(ntohs(src.sll_protocol), ETH_P_IP);
- EXPECT_EQ(src.sll_pkttype, PACKET_OUTGOING);
- // Verify the link address of the interface matches that of the non
- // non loopback interface address we stored above.
- for (int i = 0; i < src.sll_halen; i++) {
- EXPECT_EQ(src.sll_addr[i], hwaddr[i]);
- }
-
- // Verify the IP header.
- struct iphdr ip = {};
- memcpy(&ip, buf, sizeof(ip));
- EXPECT_EQ(ip.ihl, 5);
- EXPECT_EQ(ip.version, 4);
- EXPECT_EQ(ip.tot_len, htons(packet_size));
- EXPECT_EQ(ip.protocol, IPPROTO_UDP);
- EXPECT_EQ(ip.daddr, dest.sin_addr.s_addr);
- EXPECT_NE(ip.saddr, htonl(INADDR_LOOPBACK));
-
- // Verify the UDP header.
- struct udphdr udp = {};
- memcpy(&udp, buf + sizeof(iphdr), sizeof(udp));
- EXPECT_EQ(udp.dest, kPort);
- EXPECT_EQ(udp.len, htons(sizeof(udphdr) + sizeof(kMessage)));
-
- // Verify the payload.
- char* payload = reinterpret_cast<char*>(buf + sizeof(iphdr) + sizeof(udphdr));
- EXPECT_EQ(strncmp(payload, kMessage, sizeof(kMessage)), 0);
-}
-
-// Bind with invalid address.
-TEST_P(CookedPacketTest, BindFail) {
- // Null address.
- ASSERT_THAT(
- bind(socket_, nullptr, sizeof(struct sockaddr)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallFailsWithErrno(EINVAL)));
-
- // Address of size 1.
- uint8_t addr = 0;
- ASSERT_THAT(
- bind(socket_, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-INSTANTIATE_TEST_SUITE_P(AllInetTests, CookedPacketTest,
- ::testing::Values(ETH_P_IP, ETH_P_ALL));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/packet_socket_raw.cc b/test/syscalls/linux/packet_socket_raw.cc
deleted file mode 100644
index 7e439466e..000000000
--- a/test/syscalls/linux/packet_socket_raw.cc
+++ /dev/null
@@ -1,728 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/capability.h>
-#include <linux/filter.h>
-#include <linux/if_arp.h>
-#include <linux/if_packet.h>
-#include <net/ethernet.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/udp.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/base/internal/endian.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Some of these tests involve sending packets via AF_PACKET sockets and the
-// loopback interface. Because AF_PACKET circumvents so much of the networking
-// stack, Linux sees these packets as "martian", i.e. they claim to be to/from
-// localhost but don't have the usual associated data. Thus Linux drops them by
-// default. You can see where this happens by following the code at:
-//
-// - net/ipv4/ip_input.c:ip_rcv_finish, which calls
-// - net/ipv4/route.c:ip_route_input_noref, which calls
-// - net/ipv4/route.c:ip_route_input_slow, which finds and drops martian
-// packets.
-//
-// To tell Linux not to drop these packets, you need to tell it to accept our
-// funny packets (which are completely valid and correct, but lack associated
-// in-kernel data because we use AF_PACKET):
-//
-// echo 1 >> /proc/sys/net/ipv4/conf/lo/accept_local
-// echo 1 >> /proc/sys/net/ipv4/conf/lo/route_localnet
-//
-// These tests require CAP_NET_RAW to run.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-constexpr char kMessage[] = "soweoneul malhaebwa";
-constexpr in_port_t kPort = 0x409c; // htons(40000)
-
-// Send kMessage via sock to loopback
-void SendUDPMessage(int sock) {
- struct sockaddr_in dest = {};
- dest.sin_port = kPort;
- dest.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- dest.sin_family = AF_INET;
- EXPECT_THAT(sendto(sock, kMessage, sizeof(kMessage), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-}
-
-//
-// Raw tests. Packets sent with raw AF_PACKET sockets always include link layer
-// headers.
-//
-
-// Tests for "raw" (SOCK_RAW) packet(7) sockets.
-class RawPacketTest : public ::testing::TestWithParam<int> {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // Gets the device index of the loopback device.
- int GetLoopbackIndex();
-
- // The socket used for both reading and writing.
- int s_;
-};
-
-void RawPacketTest::SetUp() {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(AF_PACKET, SOCK_RAW, htons(GetParam())),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- if (!IsRunningOnGvisor()) {
- // Ensure that looped back packets aren't rejected by the kernel.
- FileDescriptor acceptLocal = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc/sys/net/ipv4/conf/lo/accept_local", O_RDWR));
- FileDescriptor routeLocalnet = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc/sys/net/ipv4/conf/lo/route_localnet", O_RDWR));
- char enabled;
- ASSERT_THAT(read(acceptLocal.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- if (enabled != '1') {
- enabled = '1';
- ASSERT_THAT(lseek(acceptLocal.get(), 0, SEEK_SET),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(write(acceptLocal.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_THAT(lseek(acceptLocal.get(), 0, SEEK_SET),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(read(acceptLocal.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_EQ(enabled, '1');
- }
-
- ASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- if (enabled != '1') {
- enabled = '1';
- ASSERT_THAT(lseek(routeLocalnet.get(), 0, SEEK_SET),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(write(routeLocalnet.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_THAT(lseek(routeLocalnet.get(), 0, SEEK_SET),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(read(routeLocalnet.get(), &enabled, 1),
- SyscallSucceedsWithValue(1));
- ASSERT_EQ(enabled, '1');
- }
- }
-
- ASSERT_THAT(s_ = socket(AF_PACKET, SOCK_RAW, htons(GetParam())),
- SyscallSucceeds());
-}
-
-void RawPacketTest::TearDown() {
- // TearDown will be run even if we skip the test.
- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- EXPECT_THAT(close(s_), SyscallSucceeds());
- }
-}
-
-int RawPacketTest::GetLoopbackIndex() {
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
- EXPECT_THAT(ioctl(s_, SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
- return ifr.ifr_ifindex;
-}
-
-// Receive via a packet socket.
-TEST_P(RawPacketTest, Receive) {
- // Let's use a simple IP payload: a UDP datagram.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- SendUDPMessage(udp_sock.get());
-
- // Wait for the socket to become readable.
- struct pollfd pfd = {};
- pfd.fd = s_;
- pfd.events = POLLIN;
- EXPECT_THAT(RetryEINTR(poll)(&pfd, 1, 2000), SyscallSucceedsWithValue(1));
-
- // Read and verify the data.
- constexpr size_t packet_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
- sizeof(struct udphdr) + sizeof(kMessage);
- char buf[64];
- struct sockaddr_ll src = {};
- socklen_t src_len = sizeof(src);
- ASSERT_THAT(recvfrom(s_, buf, sizeof(buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_len),
- SyscallSucceedsWithValue(packet_size));
- // sockaddr_ll ends with an 8 byte physical address field, but ethernet
- // addresses only use 6 bytes. Linux used to return sizeof(sockaddr_ll)-2
- // here, but since commit b2cf86e1563e33a14a1c69b3e508d15dc12f804c returns
- // sizeof(sockaddr_ll).
- ASSERT_THAT(src_len, AnyOf(Eq(sizeof(src)), Eq(sizeof(src) - 2)));
-
- // Verify the source address.
- EXPECT_EQ(src.sll_family, AF_PACKET);
- EXPECT_EQ(src.sll_ifindex, GetLoopbackIndex());
- EXPECT_EQ(src.sll_halen, ETH_ALEN);
- EXPECT_EQ(ntohs(src.sll_protocol), ETH_P_IP);
- // This came from the loopback device, so the address is all 0s.
- for (int i = 0; i < src.sll_halen; i++) {
- EXPECT_EQ(src.sll_addr[i], 0);
- }
-
- // Verify the ethernet header. We memcpy to deal with pointer alignment.
- struct ethhdr eth = {};
- memcpy(&eth, buf, sizeof(eth));
- // The destination and source address should be 0, for loopback.
- for (int i = 0; i < ETH_ALEN; i++) {
- EXPECT_EQ(eth.h_dest[i], 0);
- EXPECT_EQ(eth.h_source[i], 0);
- }
- EXPECT_EQ(eth.h_proto, htons(ETH_P_IP));
-
- // Verify the IP header. We memcpy to deal with pointer aligment.
- struct iphdr ip = {};
- memcpy(&ip, buf + sizeof(ethhdr), sizeof(ip));
- EXPECT_EQ(ip.ihl, 5);
- EXPECT_EQ(ip.version, 4);
- EXPECT_EQ(ip.tot_len, htons(packet_size - sizeof(eth)));
- EXPECT_EQ(ip.protocol, IPPROTO_UDP);
- EXPECT_EQ(ip.daddr, htonl(INADDR_LOOPBACK));
- EXPECT_EQ(ip.saddr, htonl(INADDR_LOOPBACK));
-
- // Verify the UDP header. We memcpy to deal with pointer aligment.
- struct udphdr udp = {};
- memcpy(&udp, buf + sizeof(eth) + sizeof(iphdr), sizeof(udp));
- EXPECT_EQ(udp.dest, kPort);
- EXPECT_EQ(udp.len, htons(sizeof(udphdr) + sizeof(kMessage)));
-
- // Verify the payload.
- char* payload = reinterpret_cast<char*>(buf + sizeof(eth) + sizeof(iphdr) +
- sizeof(udphdr));
- EXPECT_EQ(strncmp(payload, kMessage, sizeof(kMessage)), 0);
-}
-
-// Send via a packet socket.
-TEST_P(RawPacketTest, Send) {
- // Let's send a UDP packet and receive it using a regular UDP socket.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- struct sockaddr_in bind_addr = {};
- bind_addr.sin_family = AF_INET;
- bind_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- bind_addr.sin_port = kPort;
- ASSERT_THAT(
- bind(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // Set up the destination physical address.
- struct sockaddr_ll dest = {};
- dest.sll_family = AF_PACKET;
- dest.sll_halen = ETH_ALEN;
- dest.sll_ifindex = GetLoopbackIndex();
- dest.sll_protocol = htons(ETH_P_IP);
- // We're sending to the loopback device, so the address is all 0s.
- memset(dest.sll_addr, 0x00, ETH_ALEN);
-
- // Set up the ethernet header. The kernel takes care of the footer.
- // We're sending to and from hardware address 0 (loopback).
- struct ethhdr eth = {};
- eth.h_proto = htons(ETH_P_IP);
-
- // Set up the IP header.
- struct iphdr iphdr = {};
- iphdr.ihl = 5;
- iphdr.version = 4;
- iphdr.tos = 0;
- iphdr.tot_len =
- htons(sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kMessage));
- // Get a pseudo-random ID. If we clash with an in-use ID the test will fail,
- // but we have no way of getting an ID we know to be good.
- srand(*reinterpret_cast<unsigned int*>(&iphdr));
- iphdr.id = rand();
- // Linux sets this bit ("do not fragment") for small packets.
- iphdr.frag_off = 1 << 6;
- iphdr.ttl = 64;
- iphdr.protocol = IPPROTO_UDP;
- iphdr.daddr = htonl(INADDR_LOOPBACK);
- iphdr.saddr = htonl(INADDR_LOOPBACK);
- iphdr.check = IPChecksum(iphdr);
-
- // Set up the UDP header.
- struct udphdr udphdr = {};
- udphdr.source = kPort;
- udphdr.dest = kPort;
- udphdr.len = htons(sizeof(udphdr) + sizeof(kMessage));
- udphdr.check = UDPChecksum(iphdr, udphdr, kMessage, sizeof(kMessage));
-
- // Copy both headers and the payload into our packet buffer.
- char
- send_buf[sizeof(eth) + sizeof(iphdr) + sizeof(udphdr) + sizeof(kMessage)];
- memcpy(send_buf, &eth, sizeof(eth));
- memcpy(send_buf + sizeof(ethhdr), &iphdr, sizeof(iphdr));
- memcpy(send_buf + sizeof(ethhdr) + sizeof(iphdr), &udphdr, sizeof(udphdr));
- memcpy(send_buf + sizeof(ethhdr) + sizeof(iphdr) + sizeof(udphdr), kMessage,
- sizeof(kMessage));
-
- // We don't implement writing to packet sockets on gVisor.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(sendto(s_, send_buf, sizeof(send_buf), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallFailsWithErrno(EINVAL));
- GTEST_SKIP();
- }
-
- // Send it.
- ASSERT_THAT(sendto(s_, send_buf, sizeof(send_buf), 0,
- reinterpret_cast<struct sockaddr*>(&dest), sizeof(dest)),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Wait for the packet to become available on both sockets.
- struct pollfd pfd = {};
- pfd.fd = udp_sock.get();
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));
- pfd.fd = s_;
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 5000), SyscallSucceedsWithValue(1));
-
- // Receive on the packet socket.
- char recv_buf[sizeof(send_buf)];
- ASSERT_THAT(recv(s_, recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- ASSERT_EQ(memcmp(recv_buf, send_buf, sizeof(send_buf)), 0);
-
- // Receive on the UDP socket.
- struct sockaddr_in src;
- socklen_t src_len = sizeof(src);
- ASSERT_THAT(recvfrom(udp_sock.get(), recv_buf, sizeof(recv_buf), MSG_DONTWAIT,
- reinterpret_cast<struct sockaddr*>(&src), &src_len),
- SyscallSucceedsWithValue(sizeof(kMessage)));
- // Check src and payload.
- EXPECT_EQ(strncmp(recv_buf, kMessage, sizeof(kMessage)), 0);
- EXPECT_EQ(src.sin_family, AF_INET);
- EXPECT_EQ(src.sin_port, kPort);
- EXPECT_EQ(src.sin_addr.s_addr, htonl(INADDR_LOOPBACK));
-}
-
-// Check that setting SO_RCVBUF below min is clamped to the minimum
-// receive buffer size.
-TEST_P(RawPacketTest, SetSocketRecvBufBelowMin) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover minimum receive buf size by trying to set it to zero.
- // See:
- // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &below_min, sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_RCVBUF above max is clamped to the maximum
-// receive buffer size.
-TEST_P(RawPacketTest, SetSocketRecvBufAboveMax) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover max buf size by trying to set the largest possible buffer size.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &above_max, sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_RCVBUF min <= kRcvBufSz <= max is honored.
-TEST_P(RawPacketTest, SetSocketRecvBuf) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int max = 0;
- int min = 0;
- {
- // Discover max buf size by trying to set a really large buffer size.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by trying to set a zero size receive buffer
- // size.
- // See:
- // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &quarter_sz, sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-// Check that setting SO_SNDBUF below min is clamped to the minimum
-// receive buffer size.
-TEST_P(RawPacketTest, SetSocketSendBufBelowMin) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &below_min, sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_SNDBUF above max is clamped to the maximum
-// send buffer size.
-TEST_P(RawPacketTest, SetSocketSendBufAboveMax) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover maximum buffer size by trying to set it to a large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &above_max, sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_SNDBUF min <= kSndBufSz <= max is honored.
-TEST_P(RawPacketTest, SetSocketSendBuf) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int max = 0;
- int min = 0;
- {
- // Discover maximum buffer size by trying to set it to a large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &quarter_sz, sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-TEST_P(RawPacketTest, GetSocketError) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(val, 0);
-}
-
-TEST_P(RawPacketTest, GetSocketErrorBind) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- {
- // Bind to the loopback device.
- struct sockaddr_ll bind_addr = {};
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_protocol = htons(GetParam());
- bind_addr.sll_ifindex = GetLoopbackIndex();
-
- ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallSucceeds());
-
- // SO_ERROR should return no errors.
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(val, 0);
- }
-
- {
- // Now try binding to an invalid interface.
- struct sockaddr_ll bind_addr = {};
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_protocol = htons(GetParam());
- bind_addr.sll_ifindex = 0xffff; // Just pick a really large number.
-
- // Binding should fail with EINVAL
- ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallFailsWithErrno(ENODEV));
-
- // SO_ERROR does not return error when the device is invalid.
- // On Linux there is just one odd ball condition where this can return
- // an error where the device was valid and then removed or disabled
- // between the first check for index and the actual registration of
- // the packet endpoint. On Netstack this is not possible as the stack
- // global mutex is held during registration and check.
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ERROR, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(val, 0);
- }
-}
-
-TEST_P(RawPacketTest, SetSocketDetachFilterNoInstalledFilter) {
- // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
- //
- // gVisor returns no error on SO_DETACH_FILTER even if there is no filter
- // attached unlike linux which does return ENOENT in such cases. This is
- // because gVisor doesn't support SO_ATTACH_FILTER and just silently returns
- // success.
- if (IsRunningOnGvisor()) {
- constexpr int val = 0;
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallSucceeds());
- return;
- }
- constexpr int val = 0;
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_P(RawPacketTest, GetSocketDetachFilter) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),
- SyscallFailsWithErrno(ENOPROTOOPT));
-}
-
-TEST_P(RawPacketTest, SetAndGetSocketLinger) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int level = SOL_SOCKET;
- int type = SO_LINGER;
-
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(setsockopt(s_, level, type, &sl, sizeof(sl)),
- SyscallSucceedsWithValue(0));
-
- struct linger got_linger = {};
- socklen_t length = sizeof(sl);
- ASSERT_THAT(getsockopt(s_, level, type, &got_linger, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, length));
-}
-
-TEST_P(RawPacketTest, GetSocketAcceptConn) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-INSTANTIATE_TEST_SUITE_P(AllInetTests, RawPacketTest,
- ::testing::Values(ETH_P_IP, ETH_P_ALL));
-
-class RawPacketMsgSizeTest : public ::testing::TestWithParam<TestAddress> {};
-
-TEST_P(RawPacketMsgSizeTest, SendTooLong) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- TestAddress addr = GetParam().WithPort(kPort);
-
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(addr.family(), SOCK_RAW, IPPROTO_UDP));
-
- ASSERT_THAT(
- connect(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&addr.addr),
- addr.addr_len),
- SyscallSucceeds());
-
- const char buf[65536] = {};
- ASSERT_THAT(send(udp_sock.get(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EMSGSIZE));
-}
-
-TEST_P(RawPacketMsgSizeTest, SpliceTooLong) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- const char buf[65536] = {};
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- ASSERT_THAT(write(fds[1], buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- TestAddress addr = GetParam().WithPort(kPort);
-
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(addr.family(), SOCK_RAW, IPPROTO_UDP));
-
- ASSERT_THAT(
- connect(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&addr.addr),
- addr.addr_len),
- SyscallSucceeds());
-
- ssize_t n = splice(fds[0], nullptr, udp_sock.get(), nullptr, sizeof(buf), 0);
- if (IsRunningOnGvisor()) {
- EXPECT_THAT(n, SyscallFailsWithErrno(EMSGSIZE));
- } else {
- // TODO(gvisor.dev/issue/138): Linux sends out multiple UDP datagrams, each
- // of the size of a page.
- EXPECT_THAT(n, SyscallSucceedsWithValue(sizeof(buf)));
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(AllRawPacketMsgSizeTest, RawPacketMsgSizeTest,
- ::testing::Values(V4Loopback(), V6Loopback()));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/partial_bad_buffer.cc b/test/syscalls/linux/partial_bad_buffer.cc
deleted file mode 100644
index 223ddc0c8..000000000
--- a/test/syscalls/linux/partial_bad_buffer.cc
+++ /dev/null
@@ -1,408 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::Gt;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr char kMessage[] = "hello world";
-
-// PartialBadBufferTest checks the result of various IO syscalls when passed a
-// buffer that does not have the space specified in the syscall (most of it is
-// PROT_NONE). Linux is annoyingly inconsistent among different syscalls, so we
-// test all of them.
-class PartialBadBufferTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Create and open a directory for getdents cases.
- directory_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(
- directory_fd_ = open(directory_.path().c_str(), O_RDONLY | O_DIRECTORY),
- SyscallSucceeds());
-
- // Create and open a normal file, placing it in the directory
- // so the getdents cases have some dirents.
- name_ = JoinPath(directory_.path(), "a");
- ASSERT_THAT(fd_ = open(name_.c_str(), O_RDWR | O_CREAT, 0644),
- SyscallSucceeds());
-
- // Write some initial data.
- size_t size = sizeof(kMessage) - 1;
- EXPECT_THAT(WriteFd(fd_, &kMessage, size), SyscallSucceedsWithValue(size));
- ASSERT_THAT(lseek(fd_, 0, SEEK_SET), SyscallSucceeds());
-
- // Map a useable buffer.
- addr_ = mmap(0, 2 * kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- ASSERT_NE(addr_, MAP_FAILED);
- char* buf = reinterpret_cast<char*>(addr_);
-
- // Guard page for our read to run into.
- ASSERT_THAT(mprotect(reinterpret_cast<void*>(buf + kPageSize), kPageSize,
- PROT_NONE),
- SyscallSucceeds());
-
- // Leave only one free byte in the buffer.
- bad_buffer_ = buf + kPageSize - 1;
- }
-
- off_t Size() {
- struct stat st;
- int rc = fstat(fd_, &st);
- if (rc < 0) {
- return static_cast<off_t>(rc);
- }
- return st.st_size;
- }
-
- void TearDown() override {
- EXPECT_THAT(munmap(addr_, 2 * kPageSize), SyscallSucceeds()) << addr_;
- EXPECT_THAT(close(fd_), SyscallSucceeds());
- EXPECT_THAT(unlink(name_.c_str()), SyscallSucceeds());
- EXPECT_THAT(close(directory_fd_), SyscallSucceeds());
- }
-
- // Return buffer with n bytes of free space.
- // N.B. this is the same buffer used to back bad_buffer_.
- char* FreeBytes(size_t n) {
- TEST_CHECK(n <= static_cast<size_t>(4096));
- return reinterpret_cast<char*>(addr_) + kPageSize - n;
- }
-
- std::string name_;
- int fd_;
- TempPath directory_;
- int directory_fd_;
- void* addr_;
- char* bad_buffer_;
-};
-
-// We do both "big" and "small" tests to try to hit the "zero copy" and
-// non-"zero copy" paths, which have different code paths for handling faults.
-
-TEST_F(PartialBadBufferTest, ReadBig) {
- EXPECT_THAT(RetryEINTR(read)(fd_, bad_buffer_, kPageSize),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, ReadSmall) {
- EXPECT_THAT(RetryEINTR(read)(fd_, bad_buffer_, 10),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, PreadBig) {
- EXPECT_THAT(RetryEINTR(pread)(fd_, bad_buffer_, kPageSize, 0),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, PreadSmall) {
- EXPECT_THAT(RetryEINTR(pread)(fd_, bad_buffer_, 10, 0),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, ReadvBig) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = kPageSize;
-
- EXPECT_THAT(RetryEINTR(readv)(fd_, &vec, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, ReadvSmall) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = 10;
-
- EXPECT_THAT(RetryEINTR(readv)(fd_, &vec, 1), SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, PreadvBig) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = kPageSize;
-
- EXPECT_THAT(RetryEINTR(preadv)(fd_, &vec, 1, 0), SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, PreadvSmall) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = 10;
-
- EXPECT_THAT(RetryEINTR(preadv)(fd_, &vec, 1, 0), SyscallSucceedsWithValue(1));
- EXPECT_EQ('h', bad_buffer_[0]);
-}
-
-TEST_F(PartialBadBufferTest, WriteBig) {
- off_t orig_size = Size();
- int n;
-
- ASSERT_THAT(lseek(fd_, orig_size, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(
- (n = RetryEINTR(write)(fd_, bad_buffer_, kPageSize)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, WriteSmall) {
- off_t orig_size = Size();
- int n;
-
- ASSERT_THAT(lseek(fd_, orig_size, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(
- (n = RetryEINTR(write)(fd_, bad_buffer_, 10)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, PwriteBig) {
- off_t orig_size = Size();
- int n;
-
- EXPECT_THAT(
- (n = RetryEINTR(pwrite)(fd_, bad_buffer_, kPageSize, orig_size)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, PwriteSmall) {
- off_t orig_size = Size();
- int n;
-
- EXPECT_THAT(
- (n = RetryEINTR(pwrite)(fd_, bad_buffer_, 10, orig_size)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, WritevBig) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = kPageSize;
- off_t orig_size = Size();
- int n;
-
- ASSERT_THAT(lseek(fd_, orig_size, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(
- (n = RetryEINTR(writev)(fd_, &vec, 1)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, WritevSmall) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = 10;
- off_t orig_size = Size();
- int n;
-
- ASSERT_THAT(lseek(fd_, orig_size, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(
- (n = RetryEINTR(writev)(fd_, &vec, 1)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, PwritevBig) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = kPageSize;
- off_t orig_size = Size();
- int n;
-
- EXPECT_THAT(
- (n = RetryEINTR(pwritev)(fd_, &vec, 1, orig_size)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-TEST_F(PartialBadBufferTest, PwritevSmall) {
- struct iovec vec;
- vec.iov_base = bad_buffer_;
- vec.iov_len = 10;
- off_t orig_size = Size();
- int n;
-
- EXPECT_THAT(
- (n = RetryEINTR(pwritev)(fd_, &vec, 1, orig_size)),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(1)));
- EXPECT_EQ(Size(), orig_size + (n >= 0 ? n : 0));
-}
-
-// getdents returns EFAULT when the you claim the buffer is large enough, but
-// it actually isn't.
-TEST_F(PartialBadBufferTest, GetdentsBig) {
- EXPECT_THAT(RetryEINTR(syscall)(SYS_getdents64, directory_fd_, bad_buffer_,
- kPageSize),
- SyscallFailsWithErrno(EFAULT));
-}
-
-// getdents returns EINVAL when the you claim the buffer is too small.
-TEST_F(PartialBadBufferTest, GetdentsSmall) {
- EXPECT_THAT(
- RetryEINTR(syscall)(SYS_getdents64, directory_fd_, bad_buffer_, 10),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// getdents will write entries into a buffer if there is space before it faults.
-TEST_F(PartialBadBufferTest, GetdentsOneEntry) {
- // 30 bytes is enough for one (small) entry.
- char* buf = FreeBytes(30);
-
- EXPECT_THAT(
- RetryEINTR(syscall)(SYS_getdents64, directory_fd_, buf, kPageSize),
- SyscallSucceedsWithValue(Gt(0)));
-}
-
-PosixErrorOr<sockaddr_storage> InetLoopbackAddr(int family) {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- addr.ss_family = family;
- switch (family) {
- case AF_INET:
- reinterpret_cast<struct sockaddr_in*>(&addr)->sin_addr.s_addr =
- htonl(INADDR_LOOPBACK);
- break;
- case AF_INET6:
- reinterpret_cast<struct sockaddr_in6*>(&addr)->sin6_addr =
- in6addr_loopback;
- break;
- default:
- return PosixError(EINVAL,
- absl::StrCat("unknown socket family: ", family));
- }
- return addr;
-}
-
-// SendMsgTCP verifies that calling sendmsg with a bad address returns an
-// EFAULT. It also verifies that passing a buffer which is made up of 2
-// pages one valid and one guard page succeeds as long as the write is
-// for exactly the size of 1 page.
-TEST_F(PartialBadBufferTest, SendMsgTCP) {
- // FIXME(b/171436815): Netstack save/restore is broken.
- const DisableSave ds;
-
- auto listen_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr = ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(AF_INET));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listen_socket.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the address we're listening on, then connect to it. We need to do this
- // because we're allowing the stack to pick a port for us.
- ASSERT_THAT(getsockname(listen_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), &addrlen),
- SyscallSucceeds());
-
- auto send_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(
- RetryEINTR(connect)(send_socket.get(),
- reinterpret_cast<struct sockaddr*>(&addr), addrlen),
- SyscallSucceeds());
-
- // Accept the connection.
- auto recv_socket =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_socket.get(), nullptr, nullptr));
-
- // TODO(gvisor.dev/issue/674): Update this once Netstack matches linux
- // behaviour on a setsockopt of SO_RCVBUF/SO_SNDBUF.
- //
- // Set SO_SNDBUF for socket to exactly kPageSize+1.
- //
- // gVisor does not double the value passed in SO_SNDBUF like linux does so we
- // just increase it by 1 byte here for gVisor so that we can test writing 1
- // byte past the valid page and check that it triggers an EFAULT
- // correctly. Otherwise in gVisor the sendmsg call will just return with no
- // error with kPageSize bytes written successfully.
- const uint32_t buf_size = kPageSize + 1;
- ASSERT_THAT(setsockopt(send_socket.get(), SOL_SOCKET, SO_SNDBUF, &buf_size,
- sizeof(buf_size)),
- SyscallSucceedsWithValue(0));
-
- struct msghdr hdr = {};
- struct iovec iov = {};
- iov.iov_base = bad_buffer_;
- iov.iov_len = kPageSize;
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(send_socket.get(), &hdr, 0),
- SyscallFailsWithErrno(EFAULT));
-
- // Now assert that writing kPageSize from addr_ succeeds.
- iov.iov_base = addr_;
- ASSERT_THAT(RetryEINTR(sendmsg)(send_socket.get(), &hdr, 0),
- SyscallSucceedsWithValue(kPageSize));
- // Read all the data out so that we drain the socket SND_BUF on the sender.
- std::vector<char> buffer(kPageSize);
- ASSERT_THAT(RetryEINTR(read)(recv_socket.get(), buffer.data(), kPageSize),
- SyscallSucceedsWithValue(kPageSize));
-
- // Sleep for a shortwhile to ensure that we have time to process the
- // ACKs. This is not strictly required unless running under gotsan which is a
- // lot slower and can result in the next write to write only 1 byte instead of
- // our intended kPageSize + 1.
- absl::SleepFor(absl::Milliseconds(50));
-
- // Now assert that writing > kPageSize results in EFAULT.
- iov.iov_len = kPageSize + 1;
- ASSERT_THAT(RetryEINTR(sendmsg)(send_socket.get(), &hdr, 0),
- SyscallFailsWithErrno(EFAULT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pause.cc b/test/syscalls/linux/pause.cc
deleted file mode 100644
index 8c05efd6f..000000000
--- a/test/syscalls/linux/pause.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <atomic>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-void NoopSignalHandler(int sig, siginfo_t* info, void* context) {}
-
-} // namespace
-
-TEST(PauseTest, OnlyReturnsWhenSignalHandled) {
- struct sigaction sa;
- sigfillset(&sa.sa_mask);
-
- // Ensure that SIGUSR1 is ignored.
- sa.sa_handler = SIG_IGN;
- ASSERT_THAT(sigaction(SIGUSR1, &sa, nullptr), SyscallSucceeds());
-
- // Register a handler for SIGUSR2.
- sa.sa_sigaction = NoopSignalHandler;
- sa.sa_flags = SA_SIGINFO;
- ASSERT_THAT(sigaction(SIGUSR2, &sa, nullptr), SyscallSucceeds());
-
- // The child sets their own tid.
- absl::Mutex mu;
- pid_t child_tid = 0;
- bool child_tid_available = false;
- std::atomic<int> sent_signal{0};
- std::atomic<int> waking_signal{0};
- ScopedThread t([&] {
- mu.Lock();
- child_tid = gettid();
- child_tid_available = true;
- mu.Unlock();
- EXPECT_THAT(pause(), SyscallFailsWithErrno(EINTR));
- waking_signal.store(sent_signal.load());
- });
- mu.Lock();
- mu.Await(absl::Condition(&child_tid_available));
- mu.Unlock();
-
- // Wait a bit to let the child enter pause().
- absl::SleepFor(absl::Seconds(3));
-
- // The child should not be woken by SIGUSR1.
- sent_signal.store(SIGUSR1);
- ASSERT_THAT(tgkill(getpid(), child_tid, SIGUSR1), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(3));
-
- // The child should be woken by SIGUSR2.
- sent_signal.store(SIGUSR2);
- ASSERT_THAT(tgkill(getpid(), child_tid, SIGUSR2), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(3));
-
- EXPECT_EQ(SIGUSR2, waking_signal.load());
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ping_socket.cc b/test/syscalls/linux/ping_socket.cc
deleted file mode 100644
index 8268e91da..000000000
--- a/test/syscalls/linux/ping_socket.cc
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <cctype>
-#include <cstring>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/algorithm/container.h"
-#include "absl/strings/str_join.h"
-#include "absl/types/optional.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Note: These tests require /proc/sys/net/ipv4/ping_group_range to be
-// configured to allow the tester to create ping sockets (see icmp(7)).
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// Test ICMP port exhaustion returns EAGAIN.
-//
-// We disable both random/cooperative S/R for this test as it makes way too many
-// syscalls.
-TEST(PingSocket, ICMPPortExhaustion) {
- DisableSave ds;
-
- {
- auto s = Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);
- if (!s.ok()) {
- ASSERT_EQ(s.error().errno_value(), EACCES);
- GTEST_SKIP() << "TODO(gvisor.dev/issue/6126): Buildkite does not allow "
- "creation of ICMP or ICMPv6 sockets";
- }
- }
-
- const struct sockaddr_in addr = {
- .sin_family = AF_INET,
- .sin_addr =
- {
- .s_addr = htonl(INADDR_LOOPBACK),
- },
- };
-
- std::vector<FileDescriptor> sockets;
- constexpr int kSockets = 65536;
- for (int i = 0; i < kSockets; i++) {
- auto s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP));
- int ret = connect(s.get(), reinterpret_cast<const struct sockaddr*>(&addr),
- sizeof(addr));
- if (ret == 0) {
- sockets.push_back(std::move(s));
- continue;
- }
- ASSERT_THAT(ret, SyscallFailsWithErrno(EAGAIN));
- break;
- }
-}
-
-struct BindTestCase {
- TestAddress bind_to;
- int want = 0;
- absl::optional<int> want_gvisor;
-};
-
-// Test fixture for socket binding.
-class Fixture
- : public ::testing::TestWithParam<std::tuple<SocketKind, BindTestCase>> {};
-
-TEST_P(Fixture, Bind) {
- auto [socket_factory, test_case] = GetParam();
- auto socket = socket_factory.Create();
- if (!socket.ok()) {
- ASSERT_EQ(socket.error().errno_value(), EACCES);
- GTEST_SKIP() << "TODO(gvisor.dev/issue/6126): Buildkite does not allow "
- "creation of ICMP or ICMPv6 sockets";
- }
- auto socket_fd = std::move(socket).ValueOrDie();
-
- const int want = test_case.want_gvisor.has_value() && IsRunningOnGvisor()
- ? *test_case.want_gvisor
- : test_case.want;
- if (want == 0) {
- EXPECT_THAT(bind(socket_fd->get(), AsSockAddr(&test_case.bind_to.addr),
- test_case.bind_to.addr_len),
- SyscallSucceeds());
- } else {
- EXPECT_THAT(bind(socket_fd->get(), AsSockAddr(&test_case.bind_to.addr),
- test_case.bind_to.addr_len),
- SyscallFailsWithErrno(want));
- }
-}
-
-std::vector<std::tuple<SocketKind, BindTestCase>> ICMPTestCases() {
- return ApplyVec<std::tuple<SocketKind, BindTestCase>>(
- [](const BindTestCase& test_case) {
- return std::make_tuple(ICMPUnboundSocket(0), test_case);
- },
- std::vector<BindTestCase>{
- {
- .bind_to = V4Any(),
- .want = 0,
- .want_gvisor = 0,
- },
- {
- .bind_to = V4Broadcast(),
- .want = EADDRNOTAVAIL,
- // TODO(gvisor.dev/issue/5711): Remove want_gvisor once ICMP
- // sockets are no longer allowed to bind to broadcast addresses.
- .want_gvisor = 0,
- },
- {
- .bind_to = V4Loopback(),
- .want = 0,
- },
- {
- .bind_to = V4LoopbackSubnetBroadcast(),
- .want = EADDRNOTAVAIL,
- // TODO(gvisor.dev/issue/5711): Remove want_gvisor once ICMP
- // sockets are no longer allowed to bind to broadcast addresses.
- .want_gvisor = 0,
- },
- {
- .bind_to = V4Multicast(),
- .want = EADDRNOTAVAIL,
- },
- {
- .bind_to = V4MulticastAllHosts(),
- .want = EADDRNOTAVAIL,
- },
- {
- .bind_to = V4AddrStr("IPv4UnknownUnicast", "192.168.1.1"),
- .want = EADDRNOTAVAIL,
- },
- // TODO(gvisor.dev/issue/6021): Remove want_gvisor from all the test
- // cases below once ICMP sockets return EAFNOSUPPORT when binding to
- // IPv6 addresses.
- {
- .bind_to = V6Any(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6Loopback(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6Multicast(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6MulticastInterfaceLocalAllNodes(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6MulticastLinkLocalAllNodes(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6MulticastLinkLocalAllRouters(),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- {
- .bind_to = V6AddrStr("IPv6UnknownUnicast", "fc00::1"),
- .want = EAFNOSUPPORT,
- .want_gvisor = EINVAL,
- },
- });
-}
-
-std::vector<std::tuple<SocketKind, BindTestCase>> ICMPv6TestCases() {
- return ApplyVec<std::tuple<SocketKind, BindTestCase>>(
- [](const BindTestCase& test_case) {
- return std::make_tuple(ICMPv6UnboundSocket(0), test_case);
- },
- std::vector<BindTestCase>{
- {
- .bind_to = V4Any(),
- .want = EINVAL,
- },
- {
- .bind_to = V4Broadcast(),
- .want = EINVAL,
- },
- {
- .bind_to = V4Loopback(),
- .want = EINVAL,
- },
- {
- .bind_to = V4LoopbackSubnetBroadcast(),
- .want = EINVAL,
- },
- {
- .bind_to = V4Multicast(),
- .want = EINVAL,
- },
- {
- .bind_to = V4MulticastAllHosts(),
- .want = EINVAL,
- },
- {
- .bind_to = V4AddrStr("IPv4UnknownUnicast", "192.168.1.1"),
- .want = EINVAL,
- },
- {
- .bind_to = V6Any(),
- .want = 0,
- },
- {
- .bind_to = V6Loopback(),
- .want = 0,
- },
- // TODO(gvisor.dev/issue/6021): Remove want_gvisor from all the
- // multicast test cases below once ICMPv6 sockets return EINVAL when
- // binding to IPv6 multicast addresses.
- {
- .bind_to = V6Multicast(),
- .want = EINVAL,
- .want_gvisor = EADDRNOTAVAIL,
- },
- {
- .bind_to = V6MulticastInterfaceLocalAllNodes(),
- .want = EINVAL,
- .want_gvisor = EADDRNOTAVAIL,
- },
- {
- .bind_to = V6MulticastLinkLocalAllNodes(),
- .want = EINVAL,
- .want_gvisor = EADDRNOTAVAIL,
- },
- {
- .bind_to = V6MulticastLinkLocalAllRouters(),
- .want = EINVAL,
- .want_gvisor = EADDRNOTAVAIL,
- },
- {
- .bind_to = V6AddrStr("IPv6UnknownUnicast", "fc00::1"),
- .want = EADDRNOTAVAIL,
- },
- });
-}
-
-std::vector<std::tuple<SocketKind, BindTestCase>> AllTestCases() {
- return VecCat<std::tuple<SocketKind, BindTestCase>>(ICMPTestCases(),
- ICMPv6TestCases());
-}
-
-std::string TestDescription(
- const ::testing::TestParamInfo<Fixture::ParamType>& info) {
- auto [socket_factory, test_case] = info.param;
- std::string name = absl::StrJoin(
- {socket_factory.description, test_case.bind_to.description}, "_");
- absl::c_replace_if(
- name, [](char c) { return !std::isalnum(c); }, '_');
- return name;
-}
-
-INSTANTIATE_TEST_SUITE_P(PingSockets, Fixture,
- ::testing::ValuesIn(AllTestCases()), TestDescription);
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pipe.cc b/test/syscalls/linux/pipe.cc
deleted file mode 100644
index 0bba86846..000000000
--- a/test/syscalls/linux/pipe.cc
+++ /dev/null
@@ -1,740 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h> /* Obtain O_* constant definitions */
-#include <linux/magic.h>
-#include <signal.h>
-#include <sys/ioctl.h>
-#include <sys/statfs.h>
-#include <sys/uio.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "absl/synchronization/notification.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Used as a non-zero sentinel value, below.
-constexpr int kTestValue = 0x12345678;
-
-// Used for synchronization in race tests.
-const absl::Duration syncDelay = absl::Seconds(2);
-
-std::atomic<int> global_num_signals_received = 0;
-void SigRecordingHandler(int signum, siginfo_t* siginfo,
- void* unused_ucontext) {
- global_num_signals_received++;
-}
-
-PosixErrorOr<Cleanup> RegisterSignalHandler(int signum) {
- global_num_signals_received = 0;
- struct sigaction handler;
- handler.sa_sigaction = SigRecordingHandler;
- sigemptyset(&handler.sa_mask);
- handler.sa_flags = SA_SIGINFO;
- return ScopedSigaction(signum, handler);
-}
-
-void WaitForSignalDelivery(absl::Duration timeout, int max_expected) {
- absl::Time wait_start = absl::Now();
- while (global_num_signals_received < max_expected &&
- absl::Now() - wait_start < timeout) {
- absl::SleepFor(absl::Milliseconds(10));
- }
-}
-
-struct PipeCreator {
- std::string name_;
-
- // void (fds, is_blocking, is_namedpipe).
- std::function<void(int[2], bool*, bool*)> create_;
-};
-
-class PipeTest : public ::testing::TestWithParam<PipeCreator> {
- public:
- static void SetUpTestSuite() {
- // Tests intentionally generate SIGPIPE.
- TEST_PCHECK(signal(SIGPIPE, SIG_IGN) != SIG_ERR);
- }
-
- // Initializes rfd_ and wfd_ as a blocking pipe.
- //
- // The return value indicates success: the test should be skipped otherwise.
- bool CreateBlocking() { return create(true); }
-
- // Initializes rfd_ and wfd_ as a non-blocking pipe.
- //
- // The return value is per CreateBlocking.
- bool CreateNonBlocking() { return create(false); }
-
- // Returns true iff the pipe represents a named pipe.
- bool IsNamedPipe() const { return named_pipe_; }
-
- size_t Size() const {
- int s1 = fcntl(rfd_.get(), F_GETPIPE_SZ);
- int s2 = fcntl(wfd_.get(), F_GETPIPE_SZ);
- EXPECT_GT(s1, 0);
- EXPECT_GT(s2, 0);
- EXPECT_EQ(s1, s2);
- return static_cast<size_t>(s1);
- }
-
- static void TearDownTestSuite() {
- TEST_PCHECK(signal(SIGPIPE, SIG_DFL) != SIG_ERR);
- }
-
- private:
- bool create(bool wants_blocking) {
- // Generate the pipe.
- int fds[2] = {-1, -1};
- bool is_blocking = false;
- GetParam().create_(fds, &is_blocking, &named_pipe_);
- if (fds[0] < 0 || fds[1] < 0) {
- return false;
- }
-
- // Save descriptors.
- rfd_.reset(fds[0]);
- wfd_.reset(fds[1]);
-
- // Adjust blocking, if needed.
- if (!is_blocking && wants_blocking) {
- // Clear the blocking flag.
- EXPECT_THAT(fcntl(fds[0], F_SETFL, 0), SyscallSucceeds());
- EXPECT_THAT(fcntl(fds[1], F_SETFL, 0), SyscallSucceeds());
- } else if (is_blocking && !wants_blocking) {
- // Set the descriptors to blocking.
- EXPECT_THAT(fcntl(fds[0], F_SETFL, O_NONBLOCK), SyscallSucceeds());
- EXPECT_THAT(fcntl(fds[1], F_SETFL, O_NONBLOCK), SyscallSucceeds());
- }
-
- return true;
- }
-
- protected:
- FileDescriptor rfd_;
- FileDescriptor wfd_;
-
- private:
- bool named_pipe_ = false;
-};
-
-TEST_P(PipeTest, Inode) {
- SKIP_IF(!CreateBlocking());
-
- // Ensure that the inode number is the same for each end.
- struct stat rst;
- ASSERT_THAT(fstat(rfd_.get(), &rst), SyscallSucceeds());
- struct stat wst;
- ASSERT_THAT(fstat(wfd_.get(), &wst), SyscallSucceeds());
- EXPECT_EQ(rst.st_ino, wst.st_ino);
-}
-
-TEST_P(PipeTest, Permissions) {
- SKIP_IF(!CreateBlocking());
-
- // Attempt bad operations.
- int buf = kTestValue;
- ASSERT_THAT(write(rfd_.get(), &buf, sizeof(buf)),
- SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(read(wfd_.get(), &buf, sizeof(buf)),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST_P(PipeTest, Flags) {
- SKIP_IF(!CreateBlocking());
-
- if (IsNamedPipe()) {
- // May be stubbed to zero; define locally.
- EXPECT_THAT(fcntl(rfd_.get(), F_GETFL),
- SyscallSucceedsWithValue(kOLargeFile | O_RDONLY));
- EXPECT_THAT(fcntl(wfd_.get(), F_GETFL),
- SyscallSucceedsWithValue(kOLargeFile | O_WRONLY));
- } else {
- EXPECT_THAT(fcntl(rfd_.get(), F_GETFL), SyscallSucceedsWithValue(O_RDONLY));
- EXPECT_THAT(fcntl(wfd_.get(), F_GETFL), SyscallSucceedsWithValue(O_WRONLY));
- }
-}
-
-TEST_P(PipeTest, Write) {
- SKIP_IF(!CreateBlocking());
-
- int wbuf = kTestValue;
- int rbuf = ~kTestValue;
- ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),
- SyscallSucceedsWithValue(sizeof(wbuf)));
- ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(wbuf, rbuf);
-}
-
-TEST_P(PipeTest, WritePage) {
- SKIP_IF(!CreateBlocking());
-
- std::vector<char> wbuf(kPageSize);
- RandomizeBuffer(wbuf.data(), wbuf.size());
- std::vector<char> rbuf(wbuf.size());
-
- ASSERT_THAT(write(wfd_.get(), wbuf.data(), wbuf.size()),
- SyscallSucceedsWithValue(wbuf.size()));
- ASSERT_THAT(read(rfd_.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(rbuf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), wbuf.data(), wbuf.size()), 0);
-}
-
-TEST_P(PipeTest, NonBlocking) {
- SKIP_IF(!CreateNonBlocking());
-
- int wbuf = kTestValue;
- int rbuf = ~kTestValue;
- EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallFailsWithErrno(EWOULDBLOCK));
- ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),
- SyscallSucceedsWithValue(sizeof(wbuf)));
-
- ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(wbuf, rbuf);
- EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST(PipeTest, StatFS) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- struct statfs st;
- EXPECT_THAT(fstatfs(fds[0], &st), SyscallSucceeds());
- EXPECT_EQ(st.f_type, PIPEFS_MAGIC);
- EXPECT_EQ(st.f_bsize, getpagesize());
- EXPECT_EQ(st.f_namelen, NAME_MAX);
-}
-
-TEST(Pipe2Test, CloExec) {
- int fds[2];
- ASSERT_THAT(pipe2(fds, O_CLOEXEC), SyscallSucceeds());
- EXPECT_THAT(fcntl(fds[0], F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
- EXPECT_THAT(fcntl(fds[1], F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
- EXPECT_THAT(close(fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(fds[1]), SyscallSucceeds());
-}
-
-TEST(Pipe2Test, BadOptions) {
- int fds[2];
- EXPECT_THAT(pipe2(fds, 0xDEAD), SyscallFailsWithErrno(EINVAL));
-}
-
-// Tests that opening named pipes with O_TRUNC shouldn't cause an error, but
-// calls to (f)truncate should.
-TEST(NamedPipeTest, Truncate) {
- const std::string tmp_path = NewTempAbsPath();
- SKIP_IF(mkfifo(tmp_path.c_str(), 0644) != 0);
-
- ASSERT_THAT(open(tmp_path.c_str(), O_NONBLOCK | O_RDONLY), SyscallSucceeds());
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(tmp_path.c_str(), O_RDWR | O_NONBLOCK | O_TRUNC));
-
- ASSERT_THAT(truncate(tmp_path.c_str(), 0), SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(ftruncate(fd.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(PipeTest, Seek) {
- SKIP_IF(!CreateBlocking());
-
- for (int i = 0; i < 4; i++) {
- // Attempt absolute seeks.
- EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(rfd_.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
-
- // Attempt relative seeks.
- EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(rfd_.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
-
- // Attempt end-of-file seeks.
- EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(rfd_.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(lseek(wfd_.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));
-
- // Add some more data to the pipe.
- int buf = kTestValue;
- ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- }
-}
-
-#ifndef ANDROID
-// Android does not support preadv or pwritev in r22.
-
-TEST_P(PipeTest, OffsetCalls) {
- SKIP_IF(!CreateBlocking());
-
- int buf;
- EXPECT_THAT(pread(wfd_.get(), &buf, sizeof(buf), 0),
- SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(pwrite(rfd_.get(), &buf, sizeof(buf), 0),
- SyscallFailsWithErrno(ESPIPE));
-
- struct iovec iov;
- iov.iov_base = &buf;
- iov.iov_len = sizeof(buf);
- EXPECT_THAT(preadv(wfd_.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(pwritev(rfd_.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));
-}
-
-#endif // ANDROID
-
-TEST_P(PipeTest, WriterSideCloses) {
- SKIP_IF(!CreateBlocking());
-
- ScopedThread t([this]() {
- int buf = ~kTestValue;
- ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- EXPECT_EQ(buf, kTestValue);
- // This will return when the close() completes.
- ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)), SyscallSucceeds());
- // This will return straight away.
- ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
- });
-
- // Sleep a bit so the thread can block.
- absl::SleepFor(syncDelay);
-
- // Write to unblock.
- int buf = kTestValue;
- ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Sleep a bit so the thread can block again.
- absl::SleepFor(syncDelay);
-
- // Allow the thread to complete.
- ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());
- t.Join();
-}
-
-TEST_P(PipeTest, WriterSideClosesReadDataFirst) {
- SKIP_IF(!CreateBlocking());
-
- int wbuf = kTestValue;
- ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),
- SyscallSucceedsWithValue(sizeof(wbuf)));
- ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());
-
- int rbuf;
- ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(wbuf, rbuf);
- EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(PipeTest, ReaderSideCloses) {
- SKIP_IF(!CreateBlocking());
-
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGPIPE));
-
- ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());
- int buf = kTestValue;
- EXPECT_THAT(write(wfd_.get(), &buf, sizeof(buf)),
- SyscallFailsWithErrno(EPIPE));
-
- WaitForSignalDelivery(absl::Seconds(1), 1);
- ASSERT_EQ(global_num_signals_received, 1);
-}
-
-TEST_P(PipeTest, CloseTwice) {
- SKIP_IF(!CreateBlocking());
-
- int reader = rfd_.release();
- int writer = wfd_.release();
- ASSERT_THAT(close(reader), SyscallSucceeds());
- ASSERT_THAT(close(writer), SyscallSucceeds());
- EXPECT_THAT(close(reader), SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(close(writer), SyscallFailsWithErrno(EBADF));
-}
-
-// Blocking write returns EPIPE when read end is closed if nothing has been
-// written.
-TEST_P(PipeTest, BlockWriteClosed) {
- SKIP_IF(!CreateBlocking());
-
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGPIPE));
-
- absl::Notification notify;
- ScopedThread t([this, &notify]() {
- std::vector<char> buf(Size());
- // Exactly fill the pipe buffer.
- ASSERT_THAT(WriteFd(wfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- notify.Notify();
-
- // Attempt to write one more byte. Blocks.
- // N.B. Don't use WriteFd, we don't want a retry.
- EXPECT_THAT(write(wfd_.get(), buf.data(), 1), SyscallFailsWithErrno(EPIPE));
- });
-
- notify.WaitForNotification();
- ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());
-
- WaitForSignalDelivery(absl::Seconds(1), 1);
- ASSERT_EQ(global_num_signals_received, 1);
-
- t.Join();
-}
-
-// Blocking write returns EPIPE when read end is closed even if something has
-// been written.
-TEST_P(PipeTest, BlockPartialWriteClosed) {
- SKIP_IF(!CreateBlocking());
-
- const auto signal_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(RegisterSignalHandler(SIGPIPE));
-
- ScopedThread t([this]() {
- const int pipe_size = Size();
- std::vector<char> buf(2 * pipe_size);
-
- // Write more than fits in the buffer. Blocks then returns partial write
- // when the other end is closed. The next call returns EPIPE.
- ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(pipe_size));
- EXPECT_THAT(write(wfd_.get(), buf.data(), buf.size()),
- SyscallFailsWithErrno(EPIPE));
- });
-
- // Leave time for write to become blocked.
- absl::SleepFor(syncDelay);
-
- // Unblock the above.
- ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());
-
- WaitForSignalDelivery(absl::Seconds(1), 2);
- ASSERT_EQ(global_num_signals_received, 2);
-
- t.Join();
-}
-
-TEST_P(PipeTest, ReadFromClosedFd) {
- SKIP_IF(!CreateBlocking());
-
- absl::Notification notify;
- ScopedThread t([this, &notify]() {
- notify.Notify();
- int buf;
- ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_EQ(kTestValue, buf);
- });
- notify.WaitForNotification();
-
- // Make sure that the thread gets to read().
- absl::SleepFor(syncDelay);
-
- {
- // We cannot save/restore here as the read end of pipe is closed but there
- // is ongoing read() above. We will not be able to restart the read()
- // successfully in restore run since the read fd is closed.
- const DisableSave ds;
- ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());
- int buf = kTestValue;
- ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- t.Join();
- }
-}
-
-TEST_P(PipeTest, FionRead) {
- SKIP_IF(!CreateBlocking());
-
- int n;
- ASSERT_THAT(ioctl(rfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
- ASSERT_THAT(ioctl(wfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- std::vector<char> buf(Size());
- ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_THAT(ioctl(rfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, buf.size());
- EXPECT_THAT(ioctl(wfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, buf.size());
-}
-
-// Test that opening an empty anonymous pipe RDONLY via /proc/self/fd/N does not
-// block waiting for a writer.
-TEST_P(PipeTest, OpenViaProcSelfFD) {
- SKIP_IF(!CreateBlocking());
- SKIP_IF(IsNamedPipe());
-
- // Close the write end of the pipe.
- ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());
-
- // Open other side via /proc/self/fd. It should not block.
- FileDescriptor proc_self_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(absl::StrCat("/proc/self/fd/", rfd_.get()), O_RDONLY));
-}
-
-// Test that opening and reading from an anonymous pipe (with existing writes)
-// RDONLY via /proc/self/fd/N returns the existing data.
-TEST_P(PipeTest, OpenViaProcSelfFDWithWrites) {
- SKIP_IF(!CreateBlocking());
- SKIP_IF(IsNamedPipe());
-
- // Write to the pipe and then close the write fd.
- int wbuf = kTestValue;
- ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),
- SyscallSucceedsWithValue(sizeof(wbuf)));
- ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());
-
- // Open read side via /proc/self/fd, and read from it.
- FileDescriptor proc_self_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(absl::StrCat("/proc/self/fd/", rfd_.get()), O_RDONLY));
- int rbuf;
- ASSERT_THAT(read(proc_self_fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(wbuf, rbuf);
-}
-
-// Test that accesses of /proc/<PID>/fd correctly decrement the refcount.
-TEST_P(PipeTest, ProcFDReleasesFile) {
- SKIP_IF(!CreateBlocking());
-
- // Stat the pipe FD, which shouldn't alter the refcount.
- struct stat wst;
- ASSERT_THAT(lstat(absl::StrCat("/proc/self/fd/", wfd_.get()).c_str(), &wst),
- SyscallSucceeds());
-
- // Close the write end and ensure that read indicates EOF.
- wfd_.reset();
- char buf;
- ASSERT_THAT(read(rfd_.get(), &buf, 1), SyscallSucceedsWithValue(0));
-}
-
-// Same for /proc/<PID>/fdinfo.
-TEST_P(PipeTest, ProcFDInfoReleasesFile) {
- SKIP_IF(!CreateBlocking());
-
- // Stat the pipe FD, which shouldn't alter the refcount.
- struct stat wst;
- ASSERT_THAT(
- lstat(absl::StrCat("/proc/self/fdinfo/", wfd_.get()).c_str(), &wst),
- SyscallSucceeds());
-
- // Close the write end and ensure that read indicates EOF.
- wfd_.reset();
- char buf;
- ASSERT_THAT(read(rfd_.get(), &buf, 1), SyscallSucceedsWithValue(0));
-}
-
-TEST_P(PipeTest, SizeChange) {
- SKIP_IF(!CreateBlocking());
-
- // Set the minimum possible size.
- ASSERT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, 0), SyscallSucceeds());
- int min = Size();
- EXPECT_GT(min, 0); // Should be rounded up.
-
- // Set from the read end.
- ASSERT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, min + 1), SyscallSucceeds());
- int med = Size();
- EXPECT_GT(med, min); // Should have grown, may be rounded.
-
- // Set from the write end.
- ASSERT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, med + 1), SyscallSucceeds());
- int max = Size();
- EXPECT_GT(max, med); // Ditto.
-}
-
-TEST_P(PipeTest, SizeChangeMax) {
- SKIP_IF(!CreateBlocking());
-
- // Assert there's some maximum.
- EXPECT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(PipeTest, SizeChangeFull) {
- SKIP_IF(!CreateBlocking());
-
- // Ensure that we adjust to a large enough size to avoid rounding when we
- // perform the size decrease. If rounding occurs, we may not actually
- // adjust the size and the call below will return success. It was found via
- // experimentation that this granularity avoids the rounding for Linux.
- constexpr int kDelta = 64 * 1024;
- ASSERT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, Size() + kDelta),
- SyscallSucceeds());
-
- // Fill the buffer and try to change down.
- std::vector<char> buf(Size());
- ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, Size() - kDelta),
- SyscallFailsWithErrno(EBUSY));
-}
-
-TEST_P(PipeTest, Streaming) {
- SKIP_IF(!CreateBlocking());
-
- // We make too many calls to go through full save cycles.
- DisableSave ds;
-
- // Size() requires 2 syscalls, call it once and remember the value.
- const size_t pipe_size = Size();
- const size_t streamed_bytes = 4 * pipe_size;
-
- absl::Notification notify;
- ScopedThread t([&, this]() {
- std::vector<char> buf(1024);
- // Don't start until it's full.
- notify.WaitForNotification();
- size_t total = 0;
- while (total < streamed_bytes) {
- ASSERT_THAT(read(rfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- total += buf.size();
- }
- });
-
- // Write 4 bytes * pipe_size. It will fill up the pipe once, notify the reader
- // to start. Then we write pipe size worth 3 more times to ensure the reader
- // can follow along.
- //
- // The size of each write (which is determined by buf.size()) must be smaller
- // than the size of the pipe (which, in the "smallbuffer" configuration, is 1
- // page) for the check for notify.Notify() below to be correct.
- std::vector<char> buf(1024);
- RandomizeBuffer(buf.data(), buf.size());
- size_t total = 0;
- while (total < streamed_bytes) {
- ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- total += buf.size();
-
- // Is the next write about to fill up the buffer? Wake up the reader once.
- if (total < pipe_size && (total + buf.size()) >= pipe_size) {
- notify.Notify();
- }
- }
-}
-
-std::string PipeCreatorName(::testing::TestParamInfo<PipeCreator> info) {
- return info.param.name_; // Use the name specified.
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Pipes, PipeTest,
- ::testing::Values(
- PipeCreator{
- "pipe",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- *is_blocking = true;
- *is_namedpipe = false;
- },
- },
- PipeCreator{
- "pipe2blocking",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- ASSERT_THAT(pipe2(fds, 0), SyscallSucceeds());
- *is_blocking = true;
- *is_namedpipe = false;
- },
- },
- PipeCreator{
- "pipe2nonblocking",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- ASSERT_THAT(pipe2(fds, O_NONBLOCK), SyscallSucceeds());
- *is_blocking = false;
- *is_namedpipe = false;
- },
- },
- PipeCreator{
- "smallbuffer",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- // Set to the minimum available size (will round up).
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- ASSERT_THAT(fcntl(fds[0], F_SETPIPE_SZ, 0), SyscallSucceeds());
- *is_blocking = true;
- *is_namedpipe = false;
- },
- },
- PipeCreator{
- "namednonblocking",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- // Create a new file-based pipe (non-blocking).
- std::string path;
- {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- path = file.path();
- }
- SKIP_IF(mkfifo(path.c_str(), 0644) != 0);
- fds[0] = open(path.c_str(), O_NONBLOCK | O_RDONLY);
- fds[1] = open(path.c_str(), O_NONBLOCK | O_WRONLY);
- MaybeSave();
- *is_blocking = false;
- *is_namedpipe = true;
- },
- },
- PipeCreator{
- "namedblocking",
- [](int fds[2], bool* is_blocking, bool* is_namedpipe) {
- // Create a new file-based pipe (blocking).
- std::string path;
- {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- path = file.path();
- }
- SKIP_IF(mkfifo(path.c_str(), 0644) != 0);
- ScopedThread t(
- [&path, &fds]() { fds[1] = open(path.c_str(), O_WRONLY); });
- fds[0] = open(path.c_str(), O_RDONLY);
- t.Join();
- MaybeSave();
- *is_blocking = true;
- *is_namedpipe = true;
- },
- }),
- PipeCreatorName);
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/poll.cc b/test/syscalls/linux/poll.cc
deleted file mode 100644
index ccd084244..000000000
--- a/test/syscalls/linux/poll.cc
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <poll.h>
-#include <sys/resource.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include <algorithm>
-#include <iostream>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/notification.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/base_poll_test.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-class PollTest : public BasePollTest {
- protected:
- void SetUp() override { BasePollTest::SetUp(); }
- void TearDown() override { BasePollTest::TearDown(); }
-};
-
-TEST_F(PollTest, InvalidFds) {
- // fds is invalid because it's null, but we tell ppoll the length is non-zero.
- EXPECT_THAT(poll(nullptr, 1, 1), SyscallFailsWithErrno(EFAULT));
- EXPECT_THAT(poll(nullptr, -1, 1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(PollTest, NullFds) {
- EXPECT_THAT(poll(nullptr, 0, 10), SyscallSucceeds());
-}
-
-TEST_F(PollTest, ZeroTimeout) {
- EXPECT_THAT(poll(nullptr, 0, 0), SyscallSucceeds());
-}
-
-// If random S/R interrupts the poll, SIGALRM may be delivered before poll
-// restarts, causing the poll to hang forever.
-TEST_F(PollTest, NegativeTimeout) {
- // Negative timeout mean wait forever so set a timer.
- SetTimer(absl::Milliseconds(100));
- EXPECT_THAT(poll(nullptr, 0, -1), SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
-}
-
-void NonBlockingReadableTest(int16_t mask) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Write some data to the pipe.
- char s[] = "foo\n";
- ASSERT_THAT(WriteFd(fd1.get(), s, strlen(s) + 1), SyscallSucceeds());
-
- // Poll on the reader fd with POLLIN event.
- struct pollfd poll_fd = {fd0.get(), mask, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 0), SyscallSucceedsWithValue(1));
-
- // Should trigger POLLIN event.
- EXPECT_EQ(poll_fd.revents & mask, mask);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLIN) { NonBlockingReadableTest(POLLIN); }
-
-TEST_F(PollTest, NonBlockingEventPOLLRDNORM) {
- NonBlockingReadableTest(POLLRDNORM);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLRDNORM_POLLIN) {
- NonBlockingReadableTest(POLLRDNORM | POLLIN);
-}
-
-void BlockingReadableTest(int16_t mask) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Start a blocking poll on the read fd.
- absl::Notification notify;
- ScopedThread t([&fd0, &notify, &mask]() {
- notify.Notify();
-
- // Poll on the reader fd with readable event.
- struct pollfd poll_fd = {fd0.get(), mask, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, -1), SyscallSucceedsWithValue(1));
-
- // Should trigger readable event.
- EXPECT_EQ(poll_fd.revents & mask, mask);
- });
-
- notify.WaitForNotification();
- absl::SleepFor(absl::Seconds(1));
-
- // Write some data to the pipe.
- char s[] = "foo\n";
- ASSERT_THAT(WriteFd(fd1.get(), s, strlen(s) + 1), SyscallSucceeds());
-}
-
-TEST_F(PollTest, BlockingEventPOLLIN) { BlockingReadableTest(POLLIN); }
-
-TEST_F(PollTest, BlockingEventPOLLRDNORM) { BlockingReadableTest(POLLRDNORM); }
-
-TEST_F(PollTest, BlockingEventPOLLRDNORM_POLLIN) {
- BlockingReadableTest(POLLRDNORM | POLLIN);
-}
-
-void WritableTest(int16_t mask, int timeout) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // In a newly created pipe 2nd fd should be writable.
-
- // Poll on second fd for a writable event.
- struct pollfd poll_fd = {fd1.get(), mask, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, timeout),
- SyscallSucceedsWithValue(1));
-
- // Should trigger a writable event.
- EXPECT_EQ(poll_fd.revents & mask, mask);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLOUT) {
- WritableTest(POLLOUT, /*timeout=*/0);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLWRNORM) {
- WritableTest(POLLWRNORM, /*timeout=*/0);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLWRNORM_POLLOUT) {
- WritableTest(POLLWRNORM | POLLOUT, /*timeout=*/0);
-}
-
-TEST_F(PollTest, BlockingEventPOLLOUT) {
- WritableTest(POLLOUT, /*timeout=*/-1);
-}
-
-TEST_F(PollTest, BlockingEventPOLLWRNORM) {
- WritableTest(POLLWRNORM, /*timeout=*/-1);
-}
-
-TEST_F(PollTest, BlockingEventPOLLWRNORM_POLLOUT) {
- WritableTest(POLLWRNORM | POLLOUT, /*timeout=*/-1);
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLHUP) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Close the writer fd.
- fd1.reset();
-
- // Poll on the reader fd with POLLIN event.
- struct pollfd poll_fd = {fd0.get(), POLLIN, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 0), SyscallSucceedsWithValue(1));
-
- // Should trigger POLLHUP event.
- EXPECT_EQ(poll_fd.revents & POLLHUP, POLLHUP);
-
- // Should not trigger POLLIN event.
- EXPECT_EQ(poll_fd.revents & POLLIN, 0);
-}
-
-TEST_F(PollTest, BlockingEventPOLLHUP) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Start a blocking poll on the read fd.
- absl::Notification notify;
- ScopedThread t([&fd0, &notify]() {
- notify.Notify();
-
- // Poll on the reader fd with POLLIN event.
- struct pollfd poll_fd = {fd0.get(), POLLIN, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, -1), SyscallSucceedsWithValue(1));
-
- // Should trigger POLLHUP event.
- EXPECT_EQ(poll_fd.revents & POLLHUP, POLLHUP);
-
- // Should not trigger POLLIN event.
- EXPECT_EQ(poll_fd.revents & POLLIN, 0);
- });
-
- notify.WaitForNotification();
- absl::SleepFor(absl::Seconds(1));
-
- // Write some data and close the writer fd.
- fd1.reset();
-}
-
-TEST_F(PollTest, NonBlockingEventPOLLERR) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Close the reader fd.
- fd0.reset();
-
- // Poll on the writer fd with POLLOUT event.
- struct pollfd poll_fd = {fd1.get(), POLLOUT, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 0), SyscallSucceedsWithValue(1));
-
- // Should trigger POLLERR event.
- EXPECT_EQ(poll_fd.revents & POLLERR, POLLERR);
-
- // Should also trigger POLLOUT event.
- EXPECT_EQ(poll_fd.revents & POLLOUT, POLLOUT);
-}
-
-// This test will validate that if an FD is already ready on some event, whether
-// it's POLLIN or POLLOUT it will not immediately return unless that's actually
-// what the caller was interested in.
-TEST_F(PollTest, ImmediatelyReturnOnlyOnPollEvents) {
- // Create a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Wait for read related event on the write side of the pipe, since a write
- // is possible on fds[1] it would mean that POLLOUT would return immediately.
- // We should make sure that we're not woken up with that state that we didn't
- // specificially request.
- constexpr int kTimeoutMs = 100;
- struct pollfd poll_fd = {fd1.get(), POLLIN | POLLPRI | POLLRDHUP, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, kTimeoutMs),
- SyscallSucceedsWithValue(0)); // We should timeout.
- EXPECT_EQ(poll_fd.revents, 0); // Nothing should be in returned events.
-
- // Now let's poll on POLLOUT and we should get back 1 fd as being ready and
- // it should contain POLLOUT in the revents.
- poll_fd.events = POLLOUT;
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, kTimeoutMs),
- SyscallSucceedsWithValue(1)); // 1 fd should have an event.
- EXPECT_EQ(poll_fd.revents, POLLOUT); // POLLOUT should be in revents.
-}
-
-// This test validates that poll(2) while data is available immediately returns.
-TEST_F(PollTest, PollLevelTriggered) {
- int fds[2] = {};
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, /*protocol=*/0, fds),
- SyscallSucceeds());
-
- FileDescriptor fd0(fds[0]);
- FileDescriptor fd1(fds[1]);
-
- // Write two bytes to the socket.
- const char* kBuf = "aa";
- ASSERT_THAT(RetryEINTR(send)(fd0.get(), kBuf, /*len=*/2, /*flags=*/0),
- SyscallSucceedsWithValue(2)); // 2 bytes should be written.
-
- // Poll(2) should immediately return as there is data available to read.
- constexpr int kInfiniteTimeout = -1;
- struct pollfd poll_fd = {fd1.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, /*nfds=*/1, kInfiniteTimeout),
- SyscallSucceedsWithValue(1)); // 1 fd should be ready to read.
- EXPECT_NE(poll_fd.revents & POLLIN, 0);
-
- // Read a single byte.
- char read_byte = 0;
- ASSERT_THAT(RetryEINTR(recv)(fd1.get(), &read_byte, /*len=*/1, /*flags=*/0),
- SyscallSucceedsWithValue(1)); // 1 byte should be read.
- ASSERT_EQ(read_byte, 'a'); // We should have read a single 'a'.
-
- // Create a separate pollfd for our second poll.
- struct pollfd poll_fd_after = {fd1.get(), POLLIN, 0};
-
- // Poll(2) should again immediately return since we only read one byte.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd_after, /*nfds=*/1, kInfiniteTimeout),
- SyscallSucceedsWithValue(1)); // 1 fd should be ready to read.
- EXPECT_NE(poll_fd_after.revents & POLLIN, 0);
-}
-
-TEST_F(PollTest, Nfds) {
- // Stash value of RLIMIT_NOFILES.
- struct rlimit rlim;
- TEST_PCHECK(getrlimit(RLIMIT_NOFILE, &rlim) == 0);
-
- // gVisor caps the number of FDs that epoll can use beyond RLIMIT_NOFILE.
- constexpr rlim_t maxFD = 4096;
- if (rlim.rlim_cur > maxFD) {
- rlim.rlim_cur = maxFD;
- TEST_PCHECK(setrlimit(RLIMIT_NOFILE, &rlim) == 0);
- }
-
- rlim_t max_fds = rlim.rlim_cur;
- std::cout << "Using limit: " << max_fds << std::endl;
-
- // Create an eventfd. Since its value is initially zero, it is writable.
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
-
- // Create the biggest possible pollfd array such that each element is valid.
- // Each entry in the 'fds' array refers to the eventfd and polls for
- // "writable" events (events=POLLOUT). This essentially guarantees that the
- // poll() is a no-op and allows negative testing of the 'nfds' parameter.
- std::vector<struct pollfd> fds(max_fds + 1,
- {.fd = efd.get(), .events = POLLOUT});
-
- // Verify that 'nfds' up to RLIMIT_NOFILE are allowed.
- EXPECT_THAT(RetryEINTR(poll)(fds.data(), 1, 1), SyscallSucceedsWithValue(1));
- EXPECT_THAT(RetryEINTR(poll)(fds.data(), max_fds / 2, 1),
- SyscallSucceedsWithValue(max_fds / 2));
- EXPECT_THAT(RetryEINTR(poll)(fds.data(), max_fds, 1),
- SyscallSucceedsWithValue(max_fds));
-
- // If 'nfds' exceeds RLIMIT_NOFILE then it must fail with EINVAL.
- EXPECT_THAT(poll(fds.data(), max_fds + 1, 1), SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ppoll.cc b/test/syscalls/linux/ppoll.cc
deleted file mode 100644
index 7f7d69731..000000000
--- a/test/syscalls/linux/ppoll.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <poll.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/base_poll_test.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-// Linux and glibc have a different idea of the sizeof sigset_t. When calling
-// the syscall directly, use what the kernel expects.
-unsigned kSigsetSize = SIGRTMAX / 8;
-
-// Linux ppoll(2) differs from the glibc wrapper function in that Linux updates
-// the timeout with the amount of time remaining. In order to test this behavior
-// we need to use the syscall directly.
-int syscallPpoll(struct pollfd* fds, nfds_t nfds, struct timespec* timeout_ts,
- const sigset_t* sigmask, unsigned mask_size) {
- return syscall(SYS_ppoll, fds, nfds, timeout_ts, sigmask, mask_size);
-}
-
-class PpollTest : public BasePollTest {
- protected:
- void SetUp() override { BasePollTest::SetUp(); }
- void TearDown() override { BasePollTest::TearDown(); }
-};
-
-TEST_F(PpollTest, InvalidFds) {
- // fds is invalid because it's null, but we tell ppoll the length is non-zero.
- struct timespec timeout = {};
- sigset_t sigmask;
- TEST_PCHECK(sigemptyset(&sigmask) == 0);
- EXPECT_THAT(syscallPpoll(nullptr, 1, &timeout, &sigmask, kSigsetSize),
- SyscallFailsWithErrno(EFAULT));
- EXPECT_THAT(syscallPpoll(nullptr, -1, &timeout, &sigmask, kSigsetSize),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// See that when fds is null, ppoll behaves like sleep.
-TEST_F(PpollTest, NullFds) {
- struct timespec timeout = absl::ToTimespec(absl::Milliseconds(10));
- ASSERT_THAT(syscallPpoll(nullptr, 0, &timeout, nullptr, 0),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 0);
-}
-
-TEST_F(PpollTest, ZeroTimeout) {
- struct timespec timeout = {};
- ASSERT_THAT(syscallPpoll(nullptr, 0, &timeout, nullptr, 0),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 0);
-}
-
-// If random S/R interrupts the ppoll, SIGALRM may be delivered before ppoll
-// restarts, causing the ppoll to hang forever.
-TEST_F(PpollTest, NoTimeout) {
- // When there's no timeout, ppoll may never return so set a timer.
- SetTimer(absl::Milliseconds(100));
- // See that we get interrupted by the timer.
- ASSERT_THAT(syscallPpoll(nullptr, 0, nullptr, nullptr, 0),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
-}
-
-TEST_F(PpollTest, InvalidTimeoutNegative) {
- struct timespec timeout = absl::ToTimespec(absl::Nanoseconds(-1));
- EXPECT_THAT(syscallPpoll(nullptr, 0, &timeout, nullptr, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(PpollTest, InvalidTimeoutNotNormalized) {
- struct timespec timeout = {0, 1000000001};
- EXPECT_THAT(syscallPpoll(nullptr, 0, &timeout, nullptr, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(PpollTest, InvalidMaskSize) {
- struct timespec timeout = {};
- sigset_t sigmask;
- TEST_PCHECK(sigemptyset(&sigmask) == 0);
- EXPECT_THAT(syscallPpoll(nullptr, 0, &timeout, &sigmask, 128),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Verify that signals blocked by the ppoll mask (that would otherwise be
-// allowed) do not interrupt ppoll.
-TEST_F(PpollTest, SignalMaskBlocksSignal) {
- absl::Duration duration(absl::Seconds(30));
- struct timespec timeout = absl::ToTimespec(duration);
- absl::Duration timer_duration(absl::Seconds(10));
-
- // Call with a mask that blocks SIGALRM. See that ppoll is not interrupted
- // (i.e. returns 0) and that upon completion, the timer has fired.
- sigset_t mask;
- ASSERT_THAT(sigprocmask(0, nullptr, &mask), SyscallSucceeds());
- TEST_PCHECK(sigaddset(&mask, SIGALRM) == 0);
- SetTimer(timer_duration);
- MaybeSave();
- ASSERT_FALSE(TimerFired());
- ASSERT_THAT(syscallPpoll(nullptr, 0, &timeout, &mask, kSigsetSize),
- SyscallSucceeds());
- EXPECT_TRUE(TimerFired());
- EXPECT_EQ(absl::DurationFromTimespec(timeout), absl::Duration());
-}
-
-// Verify that signals allowed by the ppoll mask (that would otherwise be
-// blocked) interrupt ppoll.
-TEST_F(PpollTest, SignalMaskAllowsSignal) {
- absl::Duration duration(absl::Seconds(30));
- struct timespec timeout = absl::ToTimespec(duration);
- absl::Duration timer_duration(absl::Seconds(10));
-
- sigset_t mask;
- ASSERT_THAT(sigprocmask(0, nullptr, &mask), SyscallSucceeds());
-
- // Block SIGALRM.
- auto cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, SIGALRM));
-
- // Call with a mask that unblocks SIGALRM. See that ppoll is interrupted.
- SetTimer(timer_duration);
- MaybeSave();
- ASSERT_FALSE(TimerFired());
- ASSERT_THAT(syscallPpoll(nullptr, 0, &timeout, &mask, kSigsetSize),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
- EXPECT_GT(absl::DurationFromTimespec(timeout), absl::Duration());
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/prctl.cc b/test/syscalls/linux/prctl.cc
deleted file mode 100644
index 25b0e63d4..000000000
--- a/test/syscalls/linux/prctl.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/prctl.h>
-#include <sys/ptrace.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(bool, prctl_no_new_privs_test_child, false,
- "If true, exit with the return value of prctl(PR_GET_NO_NEW_PRIVS) "
- "plus an offset (see test source).");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifndef SUID_DUMP_DISABLE
-#define SUID_DUMP_DISABLE 0
-#endif /* SUID_DUMP_DISABLE */
-#ifndef SUID_DUMP_USER
-#define SUID_DUMP_USER 1
-#endif /* SUID_DUMP_USER */
-#ifndef SUID_DUMP_ROOT
-#define SUID_DUMP_ROOT 2
-#endif /* SUID_DUMP_ROOT */
-
-TEST(PrctlTest, NameInitialized) {
- const size_t name_length = 20;
- char name[name_length] = {};
- ASSERT_THAT(prctl(PR_GET_NAME, name), SyscallSucceeds());
- ASSERT_NE(std::string(name), "");
-}
-
-TEST(PrctlTest, SetNameLongName) {
- const size_t name_length = 20;
- const std::string long_name(name_length, 'A');
- ASSERT_THAT(prctl(PR_SET_NAME, long_name.c_str()), SyscallSucceeds());
- char truncated_name[name_length] = {};
- ASSERT_THAT(prctl(PR_GET_NAME, truncated_name), SyscallSucceeds());
- const size_t truncated_length = 15;
- ASSERT_EQ(long_name.substr(0, truncated_length), std::string(truncated_name));
-}
-
-TEST(PrctlTest, ChildProcessName) {
- constexpr size_t kMaxNameLength = 15;
-
- char parent_name[kMaxNameLength + 1] = {};
- memset(parent_name, 'a', kMaxNameLength);
-
- ASSERT_THAT(prctl(PR_SET_NAME, parent_name), SyscallSucceeds());
-
- pid_t child_pid = fork();
- TEST_PCHECK(child_pid >= 0);
- if (child_pid == 0) {
- char child_name[kMaxNameLength + 1] = {};
- TEST_PCHECK(prctl(PR_GET_NAME, child_name) >= 0);
- TEST_CHECK(memcmp(parent_name, child_name, sizeof(parent_name)) == 0);
- _exit(0);
- }
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status =" << status;
-}
-
-// Offset added to exit code from test child to distinguish from other abnormal
-// exits.
-constexpr int kPrctlNoNewPrivsTestChildExitBase = 100;
-
-TEST(PrctlTest, NoNewPrivsPreservedAcrossCloneForkAndExecve) {
- // Check if no_new_privs is already set. If it is, we can still test that it's
- // preserved across clone/fork/execve, but we also expect it to still be set
- // at the end of the test. Otherwise, call prctl(PR_SET_NO_NEW_PRIVS) so as
- // not to contaminate the original thread.
- int no_new_privs;
- ASSERT_THAT(no_new_privs = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceeds());
- ScopedThread thread = ScopedThread([] {
- ASSERT_THAT(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), SyscallSucceeds());
- EXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(1));
- ScopedThread threadInner = ScopedThread([] {
- EXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(1));
- // Note that these ASSERT_*s failing will only return from this thread,
- // but this is the intended behavior.
- pid_t child_pid = -1;
- int execve_errno = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/proc/self/exe",
- {"/proc/self/exe", "--prctl_no_new_privs_test_child"}, {},
- nullptr, &child_pid, &execve_errno));
-
- ASSERT_GT(child_pid, 0);
- ASSERT_EQ(execve_errno, 0);
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceeds());
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), kPrctlNoNewPrivsTestChildExitBase + 1);
-
- EXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(1));
- });
- threadInner.Join();
- EXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(1));
- });
- thread.Join();
- EXPECT_THAT(prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(no_new_privs));
-}
-
-TEST(PrctlTest, PDeathSig) {
- pid_t child_pid;
-
- // Make the new process' parent a separate thread since the parent death
- // signal fires when the parent *thread* exits.
- ScopedThread thread = ScopedThread([&] {
- child_pid = fork();
- TEST_CHECK(child_pid >= 0);
- if (child_pid == 0) {
- // In child process.
- TEST_CHECK(prctl(PR_SET_PDEATHSIG, SIGKILL) >= 0);
- int signo;
- TEST_CHECK(prctl(PR_GET_PDEATHSIG, &signo) >= 0);
- TEST_CHECK(signo == SIGKILL);
- // Enable tracing, then raise SIGSTOP and expect our parent to suppress
- // it.
- TEST_CHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) >= 0);
- TEST_CHECK(raise(SIGSTOP) == 0);
- // Sleep until killed by our parent death signal. sleep(3) is
- // async-signal-safe, absl::SleepFor isn't.
- while (true) {
- sleep(10);
- }
- }
- // In parent process.
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << "status = " << status;
-
- // Suppress the SIGSTOP and detach from the child.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- });
- thread.Join();
-
- // The child should have been killed by its parent death SIGKILL.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << "status = " << status;
-}
-
-// This test is to validate that calling prctl with PR_SET_MM without the
-// CAP_SYS_RESOURCE returns EPERM.
-TEST(PrctlTest, InvalidPrSetMM) {
- // Drop capability to test below.
- AutoCapability cap(CAP_SYS_RESOURCE, false);
- ASSERT_THAT(prctl(PR_SET_MM, 0, 0, 0, 0), SyscallFailsWithErrno(EPERM));
-}
-
-// Sanity check that dumpability is remembered.
-TEST(PrctlTest, SetGetDumpability) {
- int before;
- ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds());
- auto cleanup = Cleanup([before] {
- ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds());
- });
-
- EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_DISABLE), SyscallSucceeds());
- EXPECT_THAT(prctl(PR_GET_DUMPABLE),
- SyscallSucceedsWithValue(SUID_DUMP_DISABLE));
-
- EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_USER), SyscallSucceeds());
- EXPECT_THAT(prctl(PR_GET_DUMPABLE), SyscallSucceedsWithValue(SUID_DUMP_USER));
-}
-
-// SUID_DUMP_ROOT cannot be set via PR_SET_DUMPABLE.
-TEST(PrctlTest, RootDumpability) {
- EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_ROOT),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_prctl_no_new_privs_test_child)) {
- exit(gvisor::testing::kPrctlNoNewPrivsTestChildExitBase +
- prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0));
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/prctl_setuid.cc b/test/syscalls/linux/prctl_setuid.cc
deleted file mode 100644
index c4e9cf528..000000000
--- a/test/syscalls/linux/prctl_setuid.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-#include <sys/prctl.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "test/util/capability_util.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID");
-// This flag is used to verify that after an exec PR_GET_KEEPCAPS
-// returns 0, the return code will be offset by kPrGetKeepCapsExitBase.
-ABSL_FLAG(bool, prctl_pr_get_keepcaps, false,
- "If true the test will verify that prctl with pr_get_keepcaps"
- "returns 0. The test will exit with the result of that check.");
-
-// These tests exist seperately from prctl because we need to start
-// them as root. Setuid() has the behavior that permissions are fully
-// removed if one of the UIDs were 0 before a setuid() call. This
-// behavior can be changed by using PR_SET_KEEPCAPS and that is what
-// is tested here.
-//
-// Reference setuid(2):
-// The setuid() function checks the effective user ID of
-// the caller and if it is the superuser, all process-related user ID's
-// are set to uid. After this has occurred, it is impossible for the
-// program to regain root privileges.
-//
-// Thus, a set-user-ID-root program wishing to temporarily drop root
-// privileges, assume the identity of an unprivileged user, and then
-// regain root privileges afterward cannot use setuid(). You can
-// accomplish this with seteuid(2).
-namespace gvisor {
-namespace testing {
-
-// Offset added to exit code from test child to distinguish from other abnormal
-// exits.
-constexpr int kPrGetKeepCapsExitBase = 100;
-
-namespace {
-
-class PrctlKeepCapsSetuidTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // PR_GET_KEEPCAPS will only return 0 or 1 (on success).
- ASSERT_THAT(original_keepcaps_ = prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0),
- SyscallSucceeds());
- ASSERT_TRUE(original_keepcaps_ == 0 || original_keepcaps_ == 1);
- }
-
- void TearDown() override {
- // Restore PR_SET_KEEPCAPS.
- ASSERT_THAT(prctl(PR_SET_KEEPCAPS, original_keepcaps_, 0, 0, 0),
- SyscallSucceeds());
-
- // Verify that it was restored.
- ASSERT_THAT(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(original_keepcaps_));
- }
-
- // The original keep caps value exposed so tests can use it if they need.
- int original_keepcaps_ = 0;
-};
-
-// This test will verify that a bad value, eg. not 0 or 1 for
-// PR_SET_KEEPCAPS will return EINVAL as required by prctl(2).
-TEST_F(PrctlKeepCapsSetuidTest, PrctlBadArgsToKeepCaps) {
- ASSERT_THAT(prctl(PR_SET_KEEPCAPS, 2, 0, 0, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// This test will verify that a setuid(2) without PR_SET_KEEPCAPS will cause
-// all capabilities to be dropped.
-TEST_F(PrctlKeepCapsSetuidTest, SetUidNoKeepCaps) {
- // getuid(2) never fails.
- if (getuid() != 0) {
- SKIP_IF(!IsRunningOnGvisor());
- FAIL() << "User is not root on gvisor platform.";
- }
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting
- // this test. Otherwise, the files are created by root (UID before the
- // test), but cannot be opened by the `uid` set below after the test. After
- // calling setuid(non-zero-UID), there is no way to get root privileges
- // back.
- ScopedThread([] {
- // Start by verifying we have a capability.
- TEST_CHECK(HaveCapability(CAP_SYS_ADMIN).ValueOrDie());
-
- // Verify that PR_GET_KEEPCAPS is disabled.
- ASSERT_THAT(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(0));
-
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. POSIX threads, however, require that
- // all threads have the same UIDs, so using the setuid wrapper sets all
- // threads' real UID.
- EXPECT_THAT(syscall(SYS_setuid, absl::GetFlag(FLAGS_scratch_uid)),
- SyscallSucceeds());
-
- // Verify that we changed uid.
- EXPECT_THAT(getuid(),
- SyscallSucceedsWithValue(absl::GetFlag(FLAGS_scratch_uid)));
-
- // Verify we lost the capability in the effective set, this always happens.
- TEST_CHECK(!HaveCapability(CAP_SYS_ADMIN).ValueOrDie());
-
- // We should have also lost it in the permitted set by the setuid() so
- // SetCapability should fail when we try to add it back to the effective set
- ASSERT_FALSE(SetCapability(CAP_SYS_ADMIN, true).ok());
- });
-}
-
-// This test will verify that a setuid with PR_SET_KEEPCAPS will cause
-// capabilities to be retained after we switch away from the root user.
-TEST_F(PrctlKeepCapsSetuidTest, SetUidKeepCaps) {
- // getuid(2) never fails.
- if (getuid() != 0) {
- SKIP_IF(!IsRunningOnGvisor());
- FAIL() << "User is not root on gvisor platform.";
- }
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting
- // this test. Otherwise, the files are created by root (UID before the
- // test), but cannot be opened by the `uid` set below after the test. After
- // calling setuid(non-zero-UID), there is no way to get root privileges
- // back.
- ScopedThread([] {
- // Start by verifying we have a capability.
- TEST_CHECK(HaveCapability(CAP_SYS_ADMIN).ValueOrDie());
-
- // Set PR_SET_KEEPCAPS.
- ASSERT_THAT(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), SyscallSucceeds());
-
- // Verify PR_SET_KEEPCAPS was set before we proceed.
- ASSERT_THAT(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(1));
-
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. POSIX threads, however, require that
- // all threads have the same UIDs, so using the setuid wrapper sets all
- // threads' real UID.
- EXPECT_THAT(syscall(SYS_setuid, absl::GetFlag(FLAGS_scratch_uid)),
- SyscallSucceeds());
-
- // Verify that we changed uid.
- EXPECT_THAT(getuid(),
- SyscallSucceedsWithValue(absl::GetFlag(FLAGS_scratch_uid)));
-
- // Verify we lost the capability in the effective set, this always happens.
- TEST_CHECK(!HaveCapability(CAP_SYS_ADMIN).ValueOrDie());
-
- // We lost the capability in the effective set, but it will still
- // exist in the permitted set so we can elevate the capability.
- ASSERT_NO_ERRNO(SetCapability(CAP_SYS_ADMIN, true));
-
- // Verify we got back the capability in the effective set.
- TEST_CHECK(HaveCapability(CAP_SYS_ADMIN).ValueOrDie());
- });
-}
-
-// This test will verify that PR_SET_KEEPCAPS is not retained
-// across an execve. According to prctl(2):
-// "The "keep capabilities" value will be reset to 0 on subsequent
-// calls to execve(2)."
-TEST_F(PrctlKeepCapsSetuidTest, NoKeepCapsAfterExec) {
- ASSERT_THAT(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), SyscallSucceeds());
-
- // Verify PR_SET_KEEPCAPS was set before we proceed.
- ASSERT_THAT(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0), SyscallSucceedsWithValue(1));
-
- pid_t child_pid = -1;
- int execve_errno = 0;
- // Do an exec and then verify that PR_GET_KEEPCAPS returns 0
- // see the body of main below.
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- "/proc/self/exe", {"/proc/self/exe", "--prctl_pr_get_keepcaps"}, {},
- nullptr, &child_pid, &execve_errno));
-
- ASSERT_GT(child_pid, 0);
- ASSERT_EQ(execve_errno, 0);
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- ASSERT_TRUE(WIFEXITED(status));
- // PR_SET_KEEPCAPS should have been cleared by the exec.
- // Success should return gvisor::testing::kPrGetKeepCapsExitBase + 0
- ASSERT_EQ(WEXITSTATUS(status), kPrGetKeepCapsExitBase);
-}
-
-TEST_F(PrctlKeepCapsSetuidTest, NoKeepCapsAfterNewUserNamespace) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
-
- // Fork to avoid changing the user namespace of the original test process.
- pid_t const child_pid = fork();
-
- if (child_pid == 0) {
- // Verify that the keepcaps flag is set to 0 when we change user namespaces.
- TEST_PCHECK(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0) == 0);
- MaybeSave();
-
- TEST_PCHECK(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0) == 1);
- MaybeSave();
-
- TEST_PCHECK(unshare(CLONE_NEWUSER) == 0);
- MaybeSave();
-
- TEST_PCHECK(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0) == 0);
- MaybeSave();
-
- _exit(0);
- }
-
- int status;
- ASSERT_THAT(child_pid, SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status = " << status;
-}
-
-// This test will verify that PR_SET_KEEPCAPS and PR_GET_KEEPCAPS work correctly
-TEST_F(PrctlKeepCapsSetuidTest, PrGetKeepCaps) {
- // Set PR_SET_KEEPCAPS to the negation of the original.
- ASSERT_THAT(prctl(PR_SET_KEEPCAPS, !original_keepcaps_, 0, 0, 0),
- SyscallSucceeds());
-
- // Verify it was set.
- ASSERT_THAT(prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0),
- SyscallSucceedsWithValue(!original_keepcaps_));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_prctl_pr_get_keepcaps)) {
- return gvisor::testing::kPrGetKeepCapsExitBase +
- prctl(PR_GET_KEEPCAPS, 0, 0, 0, 0);
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/pread64.cc b/test/syscalls/linux/pread64.cc
deleted file mode 100644
index 0a09259a3..000000000
--- a/test/syscalls/linux/pread64.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/unistd.h>
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class Pread64Test : public ::testing::Test {
- void SetUp() override {
- name_ = NewTempAbsPath();
- ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_CREAT, 0644));
- }
-
- void TearDown() override { unlink(name_.c_str()); }
-
- public:
- std::string name_;
-};
-
-TEST(Pread64TestNoTempFile, BadFileDescriptor) {
- char buf[1024];
- EXPECT_THAT(pread64(-1, buf, 1024, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(Pread64Test, ZeroBuffer) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_RDWR));
-
- char msg[] = "hello world";
- EXPECT_THAT(pwrite64(fd.get(), msg, strlen(msg), 0),
- SyscallSucceedsWithValue(strlen(msg)));
-
- char buf[10];
- EXPECT_THAT(pread64(fd.get(), buf, 0, 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(Pread64Test, BadBuffer) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_RDWR));
-
- char msg[] = "hello world";
- EXPECT_THAT(pwrite64(fd.get(), msg, strlen(msg), 0),
- SyscallSucceedsWithValue(strlen(msg)));
-
- char* bad_buffer = nullptr;
- EXPECT_THAT(pread64(fd.get(), bad_buffer, 1024, 0),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(Pread64Test, WriteOnlyNotReadable) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_WRONLY));
-
- char buf[1024];
- EXPECT_THAT(pread64(fd.get(), buf, 1024, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(Pread64Test, Pread64WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- char buf[1024];
- EXPECT_THAT(pread64(fd.get(), buf, 1024, 0), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(Pread64Test, DirNotReadable) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(GetAbsoluteTestTmpdir(), O_RDONLY));
-
- char buf[1024];
- EXPECT_THAT(pread64(fd.get(), buf, 1024, 0), SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(Pread64Test, BadOffset) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_RDONLY));
-
- char buf[1024];
- EXPECT_THAT(pread64(fd.get(), buf, 1024, -1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(Pread64Test, OffsetNotIncremented) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_RDWR));
-
- char msg[] = "hello world";
- EXPECT_THAT(write(fd.get(), msg, strlen(msg)),
- SyscallSucceedsWithValue(strlen(msg)));
- int offset;
- EXPECT_THAT(offset = lseek(fd.get(), 0, SEEK_CUR), SyscallSucceeds());
-
- char buf1[1024];
- EXPECT_THAT(pread64(fd.get(), buf1, 1024, 0),
- SyscallSucceedsWithValue(strlen(msg)));
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(offset));
-
- char buf2[1024];
- EXPECT_THAT(pread64(fd.get(), buf2, 1024, 3),
- SyscallSucceedsWithValue(strlen(msg) - 3));
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(offset));
-}
-
-TEST_F(Pread64Test, EndOfFile) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(name_, O_RDONLY));
-
- char buf[1024];
- EXPECT_THAT(pread64(fd.get(), buf, 1024, 0), SyscallSucceedsWithValue(0));
-}
-
-int memfd_create(const std::string& name, unsigned int flags) {
- return syscall(__NR_memfd_create, name.c_str(), flags);
-}
-
-TEST_F(Pread64Test, Overflow) {
- int f = memfd_create("negative", 0);
- const FileDescriptor fd(f);
-
- EXPECT_THAT(ftruncate(fd.get(), 0x7fffffffffffffffull), SyscallSucceeds());
-
- char buf[10];
- EXPECT_THAT(pread64(fd.get(), buf, sizeof(buf), 0x7fffffffffffffffull),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Pread64TestNoTempFile, CantReadSocketPair) {
- int sock_fds[2];
- EXPECT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds), SyscallSucceeds());
-
- char buf[1024];
- EXPECT_THAT(pread64(sock_fds[0], buf, 1024, 0),
- SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(pread64(sock_fds[1], buf, 1024, 0),
- SyscallFailsWithErrno(ESPIPE));
-
- EXPECT_THAT(close(sock_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(sock_fds[1]), SyscallSucceeds());
-}
-
-TEST(Pread64TestNoTempFile, CantReadPipe) {
- char buf[1024];
-
- int pipe_fds[2];
- EXPECT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- EXPECT_THAT(pread64(pipe_fds[0], buf, 1024, 0),
- SyscallFailsWithErrno(ESPIPE));
-
- EXPECT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/preadv.cc b/test/syscalls/linux/preadv.cc
deleted file mode 100644
index 1c40f0915..000000000
--- a/test/syscalls/linux/preadv.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <atomic>
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Stress copy-on-write. Attempts to reproduce b/38430174.
-TEST(PreadvTest, MMConcurrencyStress) {
- // Fill a one-page file with zeroes (the contents don't really matter).
- const auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- /* parent = */ GetAbsoluteTestTmpdir(),
- /* content = */ std::string(kPageSize, 0), TempPath::kDefaultFileMode));
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Get a one-page private mapping to read to.
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
-
- // Repeatedly fork in a separate thread to force the mapping to become
- // copy-on-write.
- std::atomic<bool> done(false);
- const ScopedThread t([&] {
- while (!done.load()) {
- const pid_t pid = fork();
- TEST_CHECK(pid >= 0);
- if (pid == 0) {
- // In child. The parent was obviously multithreaded, so it's neither
- // safe nor necessary to do much more than exit.
- syscall(SYS_exit_group, 0);
- }
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status = " << status;
- }
- });
-
- // Repeatedly read to the mapping.
- struct iovec iov[2];
- iov[0].iov_base = m.ptr();
- iov[0].iov_len = kPageSize / 2;
- iov[1].iov_base = reinterpret_cast<void*>(m.addr() + kPageSize / 2);
- iov[1].iov_len = kPageSize / 2;
- constexpr absl::Duration kTestDuration = absl::Seconds(5);
- const absl::Time end = absl::Now() + kTestDuration;
- while (absl::Now() < end) {
- // Among other causes, save/restore cycles may cause interruptions resulting
- // in partial reads, so we don't expect any particular return value.
- EXPECT_THAT(RetryEINTR(preadv)(fd.get(), iov, 2, 0), SyscallSucceeds());
- }
-
- // Stop the other thread.
- done.store(true);
-
- // The test passes if it neither deadlocks nor crashes the OS.
-}
-
-// This test calls preadv with an O_PATH fd.
-TEST(PreadvTest, PreadvWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- struct iovec iov;
- iov.iov_base = nullptr;
- iov.iov_len = 0;
-
- EXPECT_THAT(preadv(fd.get(), &iov, 1, 0), SyscallFailsWithErrno(EBADF));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/preadv2.cc b/test/syscalls/linux/preadv2.cc
deleted file mode 100644
index cb58719c4..000000000
--- a/test/syscalls/linux/preadv2.cc
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifndef SYS_preadv2
-#if defined(__x86_64__)
-#define SYS_preadv2 327
-#elif defined(__aarch64__)
-#define SYS_preadv2 286
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_preadv2
-
-#ifndef RWF_HIPRI
-#define RWF_HIPRI 0x1
-#endif // RWF_HIPRI
-
-constexpr int kBufSize = 1024;
-
-std::string SetContent() {
- std::string content;
- for (int i = 0; i < kBufSize; i++) {
- content += static_cast<char>((i % 10) + '0');
- }
- return content;
-}
-
-ssize_t preadv2(unsigned long fd, const struct iovec* iov, unsigned long iovcnt,
- off_t offset, unsigned long flags) {
- // syscall on preadv2 does some weird things (see man syscall and search
- // preadv2), so we insert a 0 to word align the flags argument on native.
- return syscall(SYS_preadv2, fd, iov, iovcnt, offset, 0, flags);
-}
-
-// This test is the base case where we call preadv (no offset, no flags).
-TEST(Preadv2Test, TestBaseCall) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- std::string content = SetContent();
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), content, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- std::vector<char> buf(kBufSize);
- struct iovec iov[2];
- iov[0].iov_base = buf.data();
- iov[0].iov_len = buf.size() / 2;
- iov[1].iov_base = static_cast<char*>(iov[0].iov_base) + (content.size() / 2);
- iov[1].iov_len = content.size() / 2;
-
- EXPECT_THAT(preadv2(fd.get(), iov, /*iovcnt*/ 2, /*offset=*/0, /*flags=*/0),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_EQ(content, std::string(buf.data(), buf.size()));
-}
-
-// This test is where we call preadv with an offset and no flags.
-TEST(Preadv2Test, TestValidPositiveOffset) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- std::string content = SetContent();
- const std::string prefix = "0";
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), prefix + content, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- std::vector<char> buf(kBufSize, '0');
- struct iovec iov;
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- EXPECT_THAT(preadv2(fd.get(), &iov, /*iovcnt=*/1, /*offset=*/prefix.size(),
- /*flags=*/0),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- EXPECT_EQ(content, std::string(buf.data(), buf.size()));
-}
-
-// This test is the base case where we call readv by using -1 as the offset. The
-// read should use the file offset, so the test increments it by one prior to
-// calling preadv2.
-TEST(Preadv2Test, TestNegativeOneOffset) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- std::string content = SetContent();
- const std::string prefix = "231";
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), prefix + content, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- ASSERT_THAT(lseek(fd.get(), prefix.size(), SEEK_SET),
- SyscallSucceedsWithValue(prefix.size()));
-
- std::vector<char> buf(kBufSize, '0');
- struct iovec iov;
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- EXPECT_THAT(preadv2(fd.get(), &iov, /*iovcnt=*/1, /*offset=*/-1, /*flags=*/0),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(prefix.size() + buf.size()));
-
- EXPECT_EQ(content, std::string(buf.data(), buf.size()));
-}
-
-// preadv2 requires if the RWF_HIPRI flag is passed, the fd must be opened with
-// O_DIRECT. This test implements a correct call with the RWF_HIPRI flag.
-TEST(Preadv2Test, TestCallWithRWF_HIPRI) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- std::string content = SetContent();
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), content, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- EXPECT_THAT(fsync(fd.get()), SyscallSucceeds());
-
- std::vector<char> buf(kBufSize, '0');
- struct iovec iov;
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- EXPECT_THAT(
- preadv2(fd.get(), &iov, /*iovcnt=*/1, /*offset=*/0, /*flags=*/RWF_HIPRI),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- EXPECT_EQ(content, std::string(buf.data(), buf.size()));
-}
-// This test calls preadv2 with an invalid flag.
-TEST(Preadv2Test, TestInvalidFlag) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY | O_DIRECT));
-
- std::vector<char> buf(kBufSize, '0');
- struct iovec iov;
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- EXPECT_THAT(preadv2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/0, /*flags=*/0xF0),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-// This test calls preadv2 with an invalid offset.
-TEST(Preadv2Test, TestInvalidOffset) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY | O_DIRECT));
-
- auto iov = absl::make_unique<struct iovec[]>(1);
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 0;
-
- EXPECT_THAT(preadv2(fd.get(), iov.get(), /*iovcnt=*/1, /*offset=*/-8,
- /*flags=*/0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// This test calls preadv with a file set O_WRONLY.
-TEST(Preadv2Test, TestUnreadableFile) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
-
- auto iov = absl::make_unique<struct iovec[]>(1);
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 0;
-
- EXPECT_THAT(preadv2(fd.get(), iov.get(), /*iovcnt=*/1,
- /*offset=*/0, /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// This test calls preadv2 with a file opened with O_PATH.
-TEST(Preadv2Test, Preadv2WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- auto iov = absl::make_unique<struct iovec[]>(1);
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 0;
-
- EXPECT_THAT(preadv2(fd.get(), iov.get(), /*iovcnt=*/1, /*offset=*/0,
- /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// Calling preadv2 with a non-negative offset calls preadv. Calling preadv with
-// an unseekable file is not allowed. A pipe is used for an unseekable file.
-TEST(Preadv2Test, TestUnseekableFileInvalid) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- int pipe_fds[2];
-
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- auto iov = absl::make_unique<struct iovec[]>(1);
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 0;
-
- EXPECT_THAT(preadv2(pipe_fds[0], iov.get(), /*iovcnt=*/1,
- /*offset=*/2, /*flags=*/0),
- SyscallFailsWithErrno(ESPIPE));
-
- EXPECT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-TEST(Preadv2Test, TestUnseekableFileValid) {
- SKIP_IF(preadv2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- int pipe_fds[2];
-
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- std::vector<char> content(32, 'X');
-
- EXPECT_THAT(write(pipe_fds[1], content.data(), content.size()),
- SyscallSucceedsWithValue(content.size()));
-
- std::vector<char> buf(content.size());
- auto iov = absl::make_unique<struct iovec[]>(1);
- iov[0].iov_base = buf.data();
- iov[0].iov_len = buf.size();
-
- EXPECT_THAT(preadv2(pipe_fds[0], iov.get(), /*iovcnt=*/1,
- /*offset=*/static_cast<off_t>(-1), /*flags=*/0),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_EQ(content, buf);
-
- EXPECT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/priority.cc b/test/syscalls/linux/priority.cc
deleted file mode 100644
index e381248e4..000000000
--- a/test/syscalls/linux/priority.cc
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_split.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// These tests are for both the getpriority(2) and setpriority(2) syscalls
-// These tests are very rudimentary because getpriority and setpriority
-// have not yet been fully implemented.
-
-// Getpriority does something
-TEST(GetpriorityTest, Implemented) {
- // "getpriority() can legitimately return the value -1, it is necessary to
- // clear the external variable errno prior to the call"
- errno = 0;
- EXPECT_THAT(getpriority(PRIO_PROCESS, /*who=*/0), SyscallSucceeds());
-}
-
-// Invalid which
-TEST(GetpriorityTest, InvalidWhich) {
- errno = 0;
- EXPECT_THAT(getpriority(/*which=*/3, /*who=*/0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Process is found when which=PRIO_PROCESS
-TEST(GetpriorityTest, ValidWho) {
- errno = 0;
- EXPECT_THAT(getpriority(PRIO_PROCESS, getpid()), SyscallSucceeds());
-}
-
-// Process is not found when which=PRIO_PROCESS
-TEST(GetpriorityTest, InvalidWho) {
- errno = 0;
- // Flaky, but it's tough to avoid a race condition when finding an unused pid
- EXPECT_THAT(getpriority(PRIO_PROCESS, /*who=*/INT_MAX - 1),
- SyscallFailsWithErrno(ESRCH));
-}
-
-// Setpriority does something
-TEST(SetpriorityTest, Implemented) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- // No need to clear errno for setpriority():
- // "The setpriority() call returns 0 if there is no error, or -1 if there is"
- EXPECT_THAT(setpriority(PRIO_PROCESS, /*who=*/0,
- /*nice=*/16), // NOLINT(bugprone-argument-comment)
- SyscallSucceeds());
-}
-
-// Invalid which
-TEST(Setpriority, InvalidWhich) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- EXPECT_THAT(setpriority(/*which=*/3, /*who=*/0,
- /*nice=*/16), // NOLINT(bugprone-argument-comment)
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Process is found when which=PRIO_PROCESS
-TEST(SetpriorityTest, ValidWho) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),
- /*nice=*/16), // NOLINT(bugprone-argument-comment)
- SyscallSucceeds());
-}
-
-// niceval is within the range [-20, 19]
-TEST(SetpriorityTest, InsideRange) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- // Set 0 < niceval < 19
- int nice = 12;
- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(), nice), SyscallSucceeds());
-
- errno = 0;
- EXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),
- SyscallSucceedsWithValue(nice));
-
- // Set -20 < niceval < 0
- nice = -12;
- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(), nice), SyscallSucceeds());
-
- errno = 0;
- EXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),
- SyscallSucceedsWithValue(nice));
-}
-
-// Verify that priority/niceness are exposed via /proc/PID/stat.
-TEST(SetpriorityTest, NicenessExposedViaProcfs) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- constexpr int kNiceVal = 12;
- ASSERT_THAT(setpriority(PRIO_PROCESS, getpid(), kNiceVal), SyscallSucceeds());
-
- errno = 0;
- ASSERT_THAT(getpriority(PRIO_PROCESS, getpid()),
- SyscallSucceedsWithValue(kNiceVal));
-
- // Now verify we can read that same value via /proc/self/stat.
- std::string proc_stat;
- ASSERT_NO_ERRNO(GetContents("/proc/self/stat", &proc_stat));
- std::vector<std::string> pieces = absl::StrSplit(proc_stat, ' ');
- ASSERT_GT(pieces.size(), 20);
-
- int niceness_procfs = 0;
- ASSERT_TRUE(absl::SimpleAtoi(pieces[18], &niceness_procfs));
- EXPECT_EQ(niceness_procfs, kNiceVal);
-}
-
-// In the kernel's implementation, values outside the range of [-20, 19] are
-// truncated to these minimum and maximum values. See
-// https://elixir.bootlin.com/linux/v4.4/source/kernel/sys.c#L190
-TEST(SetpriorityTest, OutsideRange) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- // Set niceval > 19
- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),
- /*nice=*/100), // NOLINT(bugprone-argument-comment)
- SyscallSucceeds());
-
- errno = 0;
- // Test niceval truncated to 19
- EXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),
- SyscallSucceedsWithValue(
- /*maxnice=*/19)); // NOLINT(bugprone-argument-comment)
-
- // Set niceval < -20
- EXPECT_THAT(setpriority(PRIO_PROCESS, getpid(),
- /*nice=*/-100), // NOLINT(bugprone-argument-comment)
- SyscallSucceeds());
-
- errno = 0;
- // Test niceval truncated to -20
- EXPECT_THAT(getpriority(PRIO_PROCESS, getpid()),
- SyscallSucceedsWithValue(
- /*minnice=*/-20)); // NOLINT(bugprone-argument-comment)
-}
-
-// Process is not found when which=PRIO_PROCESS
-TEST(SetpriorityTest, InvalidWho) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- // Flaky, but it's tough to avoid a race condition when finding an unused pid
- EXPECT_THAT(setpriority(PRIO_PROCESS,
- /*who=*/INT_MAX - 1,
- /*nice=*/16), // NOLINT(bugprone-argument-comment)
- SyscallFailsWithErrno(ESRCH));
-}
-
-// Nice succeeds, correctly modifies (or in this case does not
-// modify priority of process
-TEST(SetpriorityTest, NiceSucceeds) {
- errno = 0;
- const int priority_before = getpriority(PRIO_PROCESS, /*who=*/0);
- ASSERT_THAT(nice(/*inc=*/0), SyscallSucceeds());
-
- // nice(0) should not change priority
- EXPECT_EQ(priority_before, getpriority(PRIO_PROCESS, /*who=*/0));
-}
-
-// Threads resulting from clone() maintain parent's priority
-// Changes to child priority do not affect parent's priority
-TEST(GetpriorityTest, CloneMaintainsPriority) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_NICE)));
-
- constexpr int kParentPriority = 16;
- constexpr int kChildPriority = 14;
- ASSERT_THAT(setpriority(PRIO_PROCESS, getpid(), kParentPriority),
- SyscallSucceeds());
-
- ScopedThread th([]() {
- // Check that priority equals that of parent thread
- pid_t my_tid;
- EXPECT_THAT(my_tid = syscall(__NR_gettid), SyscallSucceeds());
- EXPECT_THAT(getpriority(PRIO_PROCESS, my_tid),
- SyscallSucceedsWithValue(kParentPriority));
-
- // Change the child thread's priority
- EXPECT_THAT(setpriority(PRIO_PROCESS, my_tid, kChildPriority),
- SyscallSucceeds());
- });
- th.Join();
-
- // Check that parent's priority reemained the same even though
- // the child's priority was altered
- EXPECT_EQ(kParentPriority, getpriority(PRIO_PROCESS, syscall(__NR_gettid)));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/priority_execve.cc b/test/syscalls/linux/priority_execve.cc
deleted file mode 100644
index 5cb343bad..000000000
--- a/test/syscalls/linux/priority_execve.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-int main(int argc, char** argv, char** envp) {
- errno = 0;
- int prio = getpriority(PRIO_PROCESS, getpid());
-
- // NOTE: getpriority() can legitimately return negative values
- // in the range [-20, 0). If errno is set, exit with a value that
- // could not be reached by a valid priority. Valid exit values
- // for the test are in the range [1, 40], so we'll use 0.
- if (errno != 0) {
- printf("getpriority() failed with errno = %d\n", errno);
- exit(0);
- }
-
- // Used by test to verify priority is being maintained through
- // calls to execve(). Since prio should always be in the range
- // [-20, 19], we offset by 20 so as not to have negative exit codes.
- exit(20 - prio);
-
- return 0;
-}
diff --git a/test/syscalls/linux/proc.cc b/test/syscalls/linux/proc.cc
deleted file mode 100644
index 78aa73edc..000000000
--- a/test/syscalls/linux/proc.cc
+++ /dev/null
@@ -1,2738 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <elf.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <linux/magic.h>
-#include <linux/sem.h>
-#include <sched.h>
-#include <signal.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/prctl.h>
-#include <sys/ptrace.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/utsname.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <atomic>
-#include <functional>
-#include <iostream>
-#include <map>
-#include <memory>
-#include <ostream>
-#include <regex>
-#include <string>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/node_hash_set.h"
-#include "absl/strings/ascii.h"
-#include "absl/strings/match.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/synchronization/notification.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/mount_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/time_util.h"
-#include "test/util/timer_util.h"
-
-// NOTE(magi): No, this isn't really a syscall but this is a really simple
-// way to get it tested on both gVisor, PTrace and Linux.
-
-using ::testing::AllOf;
-using ::testing::AnyOf;
-using ::testing::ContainerEq;
-using ::testing::Contains;
-using ::testing::ContainsRegex;
-using ::testing::Eq;
-using ::testing::Gt;
-using ::testing::HasSubstr;
-using ::testing::IsSupersetOf;
-using ::testing::Pair;
-using ::testing::UnorderedElementsAre;
-using ::testing::UnorderedElementsAreArray;
-
-// Exported by glibc.
-extern char** environ;
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-#ifndef SUID_DUMP_DISABLE
-#define SUID_DUMP_DISABLE 0
-#endif /* SUID_DUMP_DISABLE */
-#ifndef SUID_DUMP_USER
-#define SUID_DUMP_USER 1
-#endif /* SUID_DUMP_USER */
-#ifndef SUID_DUMP_ROOT
-#define SUID_DUMP_ROOT 2
-#endif /* SUID_DUMP_ROOT */
-
-#if defined(__x86_64__) || defined(__i386__)
-// This list of "required" fields is taken from reading the file
-// arch/x86/kernel/cpu/proc.c and seeing which fields will be unconditionally
-// printed by the kernel.
-static const char* required_fields[] = {
- "processor",
- "vendor_id",
- "cpu family",
- "model\t\t:",
- "model name",
- "stepping",
- "cpu MHz",
- "fpu\t\t:",
- "fpu_exception",
- "cpuid level",
- "wp",
- "bogomips",
- "clflush size",
- "cache_alignment",
- "address sizes",
- "power management",
-};
-#elif __aarch64__
-// This list of "required" fields is taken from reading the file
-// arch/arm64/kernel/cpuinfo.c and seeing which fields will be unconditionally
-// printed by the kernel.
-static const char* required_fields[] = {
- "processor", "BogoMIPS", "Features", "CPU implementer",
- "CPU architecture", "CPU variant", "CPU part", "CPU revision",
-};
-#else
-#error "Unknown architecture"
-#endif
-
-// Takes the subprocess command line and pid.
-// If it returns !OK, WithSubprocess returns immediately.
-using SubprocessCallback = std::function<PosixError(int)>;
-
-std::vector<std::string> saved_argv; // NOLINT
-
-// Helper function to dump /proc/{pid}/status and check the
-// state data. State should = "Z" for zombied or "RSD" for
-// running, interruptible sleeping (S), or uninterruptible sleep
-// (D).
-void CompareProcessState(absl::string_view state, int pid) {
- auto status_file = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/", pid, "/status")));
- // N.B. POSIX extended regexes don't support shorthand character classes (\w)
- // inside of brackets.
- EXPECT_THAT(status_file,
- ContainsRegex(absl::StrCat("State:.[", state,
- R"EOL(]\s+\([a-zA-Z ]+\))EOL")));
-}
-
-// Run callbacks while a subprocess is running, zombied, and/or exited.
-PosixError WithSubprocess(SubprocessCallback const& running,
- SubprocessCallback const& zombied,
- SubprocessCallback const& exited) {
- int pipe_fds[2] = {};
- if (pipe(pipe_fds) < 0) {
- return PosixError(errno, "pipe");
- }
-
- int child_pid = fork();
- if (child_pid < 0) {
- return PosixError(errno, "fork");
- }
-
- if (child_pid == 0) {
- close(pipe_fds[0]); // Close the read end.
- const DisableSave ds; // Timing issues.
-
- // Write to the pipe to tell it we're ready.
- char buf = 'a';
- int res = 0;
- res = WriteFd(pipe_fds[1], &buf, sizeof(buf));
- TEST_CHECK_MSG(res == sizeof(buf), "Write failure in subprocess");
-
- while (true) {
- SleepSafe(absl::Milliseconds(100));
- }
- }
-
- close(pipe_fds[1]); // Close the write end.
-
- int status = 0;
- auto wait_cleanup = Cleanup([child_pid, &status] {
- EXPECT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
- });
- auto kill_cleanup = Cleanup([child_pid] {
- EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- });
-
- // Wait for the child.
- char buf = 0;
- int res = ReadFd(pipe_fds[0], &buf, sizeof(buf));
- if (res < 0) {
- return PosixError(errno, "Read from pipe");
- } else if (res == 0) {
- return PosixError(EPIPE, "Unable to read from pipe: EOF");
- }
-
- if (running) {
- // The first arg, RSD, refers to a "running process", or a process with a
- // state of Running (R), Interruptable Sleep (S) or Uninterruptable
- // Sleep (D).
- CompareProcessState("RSD", child_pid);
- RETURN_IF_ERRNO(running(child_pid));
- }
-
- // Kill the process.
- kill_cleanup.Release()();
- siginfo_t info;
- // Wait until the child process has exited (WEXITED flag) but don't
- // reap the child (WNOWAIT flag).
- EXPECT_THAT(waitid(P_PID, child_pid, &info, WNOWAIT | WEXITED),
- SyscallSucceeds());
-
- if (zombied) {
- // Arg of "Z" refers to a Zombied Process.
- CompareProcessState("Z", child_pid);
- RETURN_IF_ERRNO(zombied(child_pid));
- }
-
- // Wait on the process.
- wait_cleanup.Release()();
- // If the process is reaped, then then this should return
- // with ECHILD.
- EXPECT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallFailsWithErrno(ECHILD));
-
- if (exited) {
- RETURN_IF_ERRNO(exited(child_pid));
- }
-
- return NoError();
-}
-
-// Access the file returned by name when a subprocess is running.
-PosixError AccessWhileRunning(std::function<std::string(int pid)> name,
- int flags, std::function<void(int fd)> access) {
- FileDescriptor fd;
- return WithSubprocess(
- [&](int pid) -> PosixError {
- // Running.
- ASSIGN_OR_RETURN_ERRNO(fd, Open(name(pid), flags));
-
- access(fd.get());
- return NoError();
- },
- nullptr, nullptr);
-}
-
-// Access the file returned by name when the a subprocess is zombied.
-PosixError AccessWhileZombied(std::function<std::string(int pid)> name,
- int flags, std::function<void(int fd)> access) {
- FileDescriptor fd;
- return WithSubprocess(
- [&](int pid) -> PosixError {
- // Running.
- ASSIGN_OR_RETURN_ERRNO(fd, Open(name(pid), flags));
- return NoError();
- },
- [&](int pid) -> PosixError {
- // Zombied.
- access(fd.get());
- return NoError();
- },
- nullptr);
-}
-
-// Access the file returned by name when the a subprocess is exited.
-PosixError AccessWhileExited(std::function<std::string(int pid)> name,
- int flags, std::function<void(int fd)> access) {
- FileDescriptor fd;
- return WithSubprocess(
- [&](int pid) -> PosixError {
- // Running.
- ASSIGN_OR_RETURN_ERRNO(fd, Open(name(pid), flags));
- return NoError();
- },
- nullptr,
- [&](int pid) -> PosixError {
- // Exited.
- access(fd.get());
- return NoError();
- });
-}
-
-// ReadFd(fd=/proc/PID/basename) while PID is running.
-int ReadWhileRunning(std::string const& basename, void* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileRunning(
- [&](int pid) -> std::string {
- return absl::StrCat("/proc/", pid, "/", basename);
- },
- O_RDONLY,
- [&](int fd) {
- ret = ReadFd(fd, buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-// ReadFd(fd=/proc/PID/basename) while PID is zombied.
-int ReadWhileZombied(std::string const& basename, void* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileZombied(
- [&](int pid) -> std::string {
- return absl::StrCat("/proc/", pid, "/", basename);
- },
- O_RDONLY,
- [&](int fd) {
- ret = ReadFd(fd, buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-// ReadFd(fd=/proc/PID/basename) while PID is exited.
-int ReadWhileExited(std::string const& basename, void* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileExited(
- [&](int pid) -> std::string {
- return absl::StrCat("/proc/", pid, "/", basename);
- },
- O_RDONLY,
- [&](int fd) {
- ret = ReadFd(fd, buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-// readlinkat(fd=/proc/PID/, basename) while PID is running.
-int ReadlinkWhileRunning(std::string const& basename, char* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileRunning(
- [&](int pid) -> std::string { return absl::StrCat("/proc/", pid, "/"); },
- O_DIRECTORY,
- [&](int fd) {
- ret = readlinkat(fd, basename.c_str(), buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-// readlinkat(fd=/proc/PID/, basename) while PID is zombied.
-int ReadlinkWhileZombied(std::string const& basename, char* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileZombied(
- [&](int pid) -> std::string { return absl::StrCat("/proc/", pid, "/"); },
- O_DIRECTORY,
- [&](int fd) {
- ret = readlinkat(fd, basename.c_str(), buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-// readlinkat(fd=/proc/PID/, basename) while PID is exited.
-int ReadlinkWhileExited(std::string const& basename, char* buf, size_t count) {
- int ret = 0;
- int err = 0;
- EXPECT_NO_ERRNO(AccessWhileExited(
- [&](int pid) -> std::string { return absl::StrCat("/proc/", pid, "/"); },
- O_DIRECTORY,
- [&](int fd) {
- ret = readlinkat(fd, basename.c_str(), buf, count);
- err = errno;
- }));
- errno = err;
- return ret;
-}
-
-TEST(ProcTest, NotFoundInRoot) {
- struct stat s;
- EXPECT_THAT(stat("/proc/foobar", &s), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(ProcSelfTest, IsThreadGroupLeader) {
- ScopedThread([] {
- const pid_t tgid = getpid();
- const pid_t tid = syscall(SYS_gettid);
- EXPECT_NE(tgid, tid);
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self"));
- EXPECT_EQ(link, absl::StrCat(tgid));
- });
-}
-
-TEST(ProcThreadSelfTest, Basic) {
- const pid_t tgid = getpid();
- const pid_t tid = syscall(SYS_gettid);
- EXPECT_EQ(tgid, tid);
- auto link_threadself =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/thread-self"));
- EXPECT_EQ(link_threadself, absl::StrCat(tgid, "/task/", tid));
- // Just read one file inside thread-self to ensure that the link is valid.
- auto link_threadself_exe =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/thread-self/exe"));
- auto link_procself_exe =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/exe"));
- EXPECT_EQ(link_threadself_exe, link_procself_exe);
-}
-
-TEST(ProcThreadSelfTest, Thread) {
- ScopedThread([] {
- const pid_t tgid = getpid();
- const pid_t tid = syscall(SYS_gettid);
- EXPECT_NE(tgid, tid);
- auto link_threadself =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/thread-self"));
-
- EXPECT_EQ(link_threadself, absl::StrCat(tgid, "/task/", tid));
- // Just read one file inside thread-self to ensure that the link is valid.
- auto link_threadself_exe =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/thread-self/exe"));
- auto link_procself_exe =
- ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/exe"));
- EXPECT_EQ(link_threadself_exe, link_procself_exe);
- // A thread should not have "/proc/<tid>/task".
- struct stat s;
- EXPECT_THAT(stat("/proc/thread-self/task", &s),
- SyscallFailsWithErrno(ENOENT));
- });
-}
-
-// Returns the /proc/PID/maps entry for the MAP_PRIVATE | MAP_ANONYMOUS mapping
-// m with start address addr and length len.
-std::string AnonymousMapsEntry(uintptr_t addr, size_t len, int prot) {
- return absl::StrCat(absl::Hex(addr, absl::PadSpec::kZeroPad8), "-",
- absl::Hex(addr + len, absl::PadSpec::kZeroPad8), " ",
- prot & PROT_READ ? "r" : "-",
- prot & PROT_WRITE ? "w" : "-",
- prot & PROT_EXEC ? "x" : "-", "p 00000000 00:00 0 ");
-}
-
-std::string AnonymousMapsEntryForMapping(const Mapping& m, int prot) {
- return AnonymousMapsEntry(m.addr(), m.len(), prot);
-}
-
-PosixErrorOr<std::map<uint64_t, uint64_t>> ReadProcSelfAuxv() {
- std::string auxv_file;
- RETURN_IF_ERRNO(GetContents("/proc/self/auxv", &auxv_file));
- const Elf64_auxv_t* auxv_data =
- reinterpret_cast<const Elf64_auxv_t*>(auxv_file.data());
- std::map<uint64_t, uint64_t> auxv_entries;
- for (int i = 0; auxv_data[i].a_type != AT_NULL; i++) {
- auto a_type = auxv_data[i].a_type;
- EXPECT_EQ(0, auxv_entries.count(a_type)) << "a_type: " << a_type;
- auxv_entries.emplace(a_type, auxv_data[i].a_un.a_val);
- }
- return auxv_entries;
-}
-
-TEST(ProcSelfAuxv, EntryPresence) {
- auto auxv_entries = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfAuxv());
-
- EXPECT_EQ(auxv_entries.count(AT_ENTRY), 1);
- EXPECT_EQ(auxv_entries.count(AT_PHDR), 1);
- EXPECT_EQ(auxv_entries.count(AT_PHENT), 1);
- EXPECT_EQ(auxv_entries.count(AT_PHNUM), 1);
- EXPECT_EQ(auxv_entries.count(AT_BASE), 1);
- EXPECT_EQ(auxv_entries.count(AT_UID), 1);
- EXPECT_EQ(auxv_entries.count(AT_EUID), 1);
- EXPECT_EQ(auxv_entries.count(AT_GID), 1);
- EXPECT_EQ(auxv_entries.count(AT_EGID), 1);
- EXPECT_EQ(auxv_entries.count(AT_SECURE), 1);
- EXPECT_EQ(auxv_entries.count(AT_CLKTCK), 1);
- EXPECT_EQ(auxv_entries.count(AT_RANDOM), 1);
- EXPECT_EQ(auxv_entries.count(AT_EXECFN), 1);
- EXPECT_EQ(auxv_entries.count(AT_PAGESZ), 1);
- EXPECT_EQ(auxv_entries.count(AT_SYSINFO_EHDR), 1);
-}
-
-TEST(ProcSelfAuxv, EntryValues) {
- auto proc_auxv = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfAuxv());
-
- // We need to find the ELF auxiliary vector. The section of memory pointed to
- // by envp contains some pointers to non-null pointers, followed by a single
- // pointer to a null pointer, followed by the auxiliary vector.
- char** envpi = environ;
- while (*envpi) {
- ++envpi;
- }
-
- const Elf64_auxv_t* envp_auxv =
- reinterpret_cast<const Elf64_auxv_t*>(envpi + 1);
- int i;
- for (i = 0; envp_auxv[i].a_type != AT_NULL; i++) {
- auto a_type = envp_auxv[i].a_type;
- EXPECT_EQ(proc_auxv.count(a_type), 1);
- EXPECT_EQ(proc_auxv[a_type], envp_auxv[i].a_un.a_val)
- << "a_type: " << a_type;
- }
- EXPECT_EQ(i, proc_auxv.size());
-}
-
-// Just open and read a part of /proc/self/mem, check that we can read an item.
-TEST(ProcPidMem, Read) {
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/mem", O_RDONLY));
- char input[] = "hello-world";
- char output[sizeof(input)];
- ASSERT_THAT(pread(memfd.get(), output, sizeof(output),
- reinterpret_cast<off_t>(input)),
- SyscallSucceedsWithValue(sizeof(input)));
- ASSERT_STREQ(input, output);
-}
-
-// Perform read on an unmapped region.
-TEST(ProcPidMem, Unmapped) {
- // Strategy: map then unmap, so we have a guaranteed unmapped region
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/mem", O_RDONLY));
- Mapping mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- // Fill it with things
- memset(mapping.ptr(), 'x', mapping.len());
- char expected = 'x', output;
- ASSERT_THAT(pread(memfd.get(), &output, sizeof(output),
- reinterpret_cast<off_t>(mapping.ptr())),
- SyscallSucceedsWithValue(sizeof(output)));
- ASSERT_EQ(expected, output);
-
- // Unmap region again
- ASSERT_THAT(munmap(mapping.ptr(), mapping.len()), SyscallSucceeds());
-
- // Now we want EIO error
- ASSERT_THAT(pread(memfd.get(), &output, sizeof(output),
- reinterpret_cast<off_t>(mapping.ptr())),
- SyscallFailsWithErrno(EIO));
-}
-
-// Perform read repeatedly to verify offset change.
-TEST(ProcPidMem, RepeatedRead) {
- auto const num_reads = 3;
- char expected[] = "01234567890abcdefghijkl";
- char output[sizeof(expected) / num_reads];
-
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/mem", O_RDONLY));
- ASSERT_THAT(lseek(memfd.get(), reinterpret_cast<off_t>(&expected), SEEK_SET),
- SyscallSucceedsWithValue(reinterpret_cast<off_t>(&expected)));
- for (auto i = 0; i < num_reads; i++) {
- ASSERT_THAT(read(memfd.get(), &output, sizeof(output)),
- SyscallSucceedsWithValue(sizeof(output)));
- ASSERT_EQ(strncmp(&expected[i * sizeof(output)], output, sizeof(output)),
- 0);
- }
-}
-
-// Perform seek operations repeatedly.
-TEST(ProcPidMem, RepeatedSeek) {
- auto const num_reads = 3;
- char expected[] = "01234567890abcdefghijkl";
- char output[sizeof(expected) / num_reads];
-
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/mem", O_RDONLY));
- ASSERT_THAT(lseek(memfd.get(), reinterpret_cast<off_t>(&expected), SEEK_SET),
- SyscallSucceedsWithValue(reinterpret_cast<off_t>(&expected)));
- // Read from start
- ASSERT_THAT(read(memfd.get(), &output, sizeof(output)),
- SyscallSucceedsWithValue(sizeof(output)));
- ASSERT_EQ(strncmp(&expected[0 * sizeof(output)], output, sizeof(output)), 0);
- // Skip ahead one read
- ASSERT_THAT(lseek(memfd.get(), sizeof(output), SEEK_CUR),
- SyscallSucceedsWithValue(reinterpret_cast<off_t>(&expected) +
- sizeof(output) * 2));
- // Do read again
- ASSERT_THAT(read(memfd.get(), &output, sizeof(output)),
- SyscallSucceedsWithValue(sizeof(output)));
- ASSERT_EQ(strncmp(&expected[2 * sizeof(output)], output, sizeof(output)), 0);
- // Skip back three reads
- ASSERT_THAT(lseek(memfd.get(), -3 * sizeof(output), SEEK_CUR),
- SyscallSucceedsWithValue(reinterpret_cast<off_t>(&expected)));
- // Do read again
- ASSERT_THAT(read(memfd.get(), &output, sizeof(output)),
- SyscallSucceedsWithValue(sizeof(output)));
- ASSERT_EQ(strncmp(&expected[0 * sizeof(output)], output, sizeof(output)), 0);
- // Check that SEEK_END does not work
- ASSERT_THAT(lseek(memfd.get(), 0, SEEK_END), SyscallFailsWithErrno(EINVAL));
-}
-
-// Perform read past an allocated memory region.
-TEST(ProcPidMem, PartialRead) {
- // Strategy: map large region, then do unmap and remap smaller region
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/mem", O_RDONLY));
-
- Mapping mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- ASSERT_THAT(munmap(mapping.ptr(), mapping.len()), SyscallSucceeds());
- Mapping smaller_mapping = ASSERT_NO_ERRNO_AND_VALUE(
- Mmap(mapping.ptr(), kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
-
- // Fill it with things
- memset(smaller_mapping.ptr(), 'x', smaller_mapping.len());
-
- // Now we want no error
- char expected[] = {'x'};
- std::unique_ptr<char[]> output(new char[kPageSize]);
- off_t read_offset =
- reinterpret_cast<off_t>(smaller_mapping.ptr()) + kPageSize - 1;
- ASSERT_THAT(
- pread(memfd.get(), output.get(), sizeof(output.get()), read_offset),
- SyscallSucceedsWithValue(sizeof(expected)));
- // Since output is larger, than expected we have to do manual compare
- ASSERT_EQ(expected[0], (output).get()[0]);
-}
-
-// Perform read on /proc/[pid]/mem after exit.
-TEST(ProcPidMem, AfterExit) {
- int pfd1[2] = {};
- int pfd2[2] = {};
-
- char expected[] = "hello-world";
-
- ASSERT_THAT(pipe(pfd1), SyscallSucceeds());
- ASSERT_THAT(pipe(pfd2), SyscallSucceeds());
-
- // Create child process
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // Close reading end of first pipe
- close(pfd1[0]);
-
- // Tell parent about location of input
- char ok = 1;
- TEST_CHECK(WriteFd(pfd1[1], &ok, sizeof(ok)) == sizeof(ok));
- TEST_PCHECK(close(pfd1[1]) == 0);
-
- // Close writing end of second pipe
- TEST_PCHECK(close(pfd2[1]) == 0);
-
- // Await parent OK to die
- ok = 0;
- TEST_CHECK(ReadFd(pfd2[0], &ok, sizeof(ok)) == sizeof(ok));
-
- // Close rest pipes
- TEST_PCHECK(close(pfd2[0]) == 0);
- _exit(0);
- }
-
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Close writing end of first pipe
- EXPECT_THAT(close(pfd1[1]), SyscallSucceeds());
-
- // Wait for child to be alive and well
- char ok = 0;
- EXPECT_THAT(ReadFd(pfd1[0], &ok, sizeof(ok)),
- SyscallSucceedsWithValue(sizeof(ok)));
- // Close reading end of first pipe
- EXPECT_THAT(close(pfd1[0]), SyscallSucceeds());
-
- // Open /proc/pid/mem fd
- std::string mempath = absl::StrCat("/proc/", child_pid, "/mem");
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open(mempath, O_RDONLY));
-
- // Expect that we can read
- char output[sizeof(expected)];
- EXPECT_THAT(pread(memfd.get(), &output, sizeof(output),
- reinterpret_cast<off_t>(&expected)),
- SyscallSucceedsWithValue(sizeof(output)));
- EXPECT_STREQ(expected, output);
-
- // Tell proc its ok to go
- EXPECT_THAT(close(pfd2[0]), SyscallSucceeds());
- ok = 1;
- EXPECT_THAT(WriteFd(pfd2[1], &ok, sizeof(ok)),
- SyscallSucceedsWithValue(sizeof(ok)));
- EXPECT_THAT(close(pfd2[1]), SyscallSucceeds());
-
- // Expect termination
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
-
- // Expect that we can't read anymore
- EXPECT_THAT(pread(memfd.get(), &output, sizeof(output),
- reinterpret_cast<off_t>(&expected)),
- SyscallSucceedsWithValue(0));
-}
-
-// Read from /proc/[pid]/mem with different UID/GID and attached state.
-TEST(ProcPidMem, DifferentUserAttached) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_DAC_OVERRIDE)));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_PTRACE)));
-
- int pfd1[2] = {};
- int pfd2[2] = {};
-
- ASSERT_THAT(pipe(pfd1), SyscallSucceeds());
- ASSERT_THAT(pipe(pfd2), SyscallSucceeds());
-
- // Create child process
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // Close reading end of first pipe
- close(pfd1[0]);
-
- // Tell parent about location of input
- char input[] = "hello-world";
- off_t input_location = reinterpret_cast<off_t>(input);
- TEST_CHECK(WriteFd(pfd1[1], &input_location, sizeof(input_location)) ==
- sizeof(input_location));
- TEST_PCHECK(close(pfd1[1]) == 0);
-
- // Close writing end of second pipe
- TEST_PCHECK(close(pfd2[1]) == 0);
-
- // Await parent OK to die
- char ok = 0;
- TEST_CHECK(ReadFd(pfd2[0], &ok, sizeof(ok)) == sizeof(ok));
-
- // Close rest pipes
- TEST_PCHECK(close(pfd2[0]) == 0);
- _exit(0);
- }
-
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Close writing end of first pipe
- EXPECT_THAT(close(pfd1[1]), SyscallSucceeds());
-
- // Read target location from child
- off_t target_location;
- EXPECT_THAT(ReadFd(pfd1[0], &target_location, sizeof(target_location)),
- SyscallSucceedsWithValue(sizeof(target_location)));
- // Close reading end of first pipe
- EXPECT_THAT(close(pfd1[0]), SyscallSucceeds());
-
- ScopedThread([&] {
- // Attach to child subprocess without stopping it
- EXPECT_THAT(ptrace(PTRACE_SEIZE, child_pid, NULL, NULL), SyscallSucceeds());
-
- // Keep capabilities after setuid
- EXPECT_THAT(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), SyscallSucceeds());
- constexpr int kNobody = 65534;
- EXPECT_THAT(syscall(SYS_setuid, kNobody), SyscallSucceeds());
-
- // Only restore CAP_SYS_PTRACE and CAP_DAC_OVERRIDE
- EXPECT_NO_ERRNO(SetCapability(CAP_SYS_PTRACE, true));
- EXPECT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, true));
-
- // Open /proc/pid/mem fd
- std::string mempath = absl::StrCat("/proc/", child_pid, "/mem");
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open(mempath, O_RDONLY));
- char expected[] = "hello-world";
- char output[sizeof(expected)];
- EXPECT_THAT(pread(memfd.get(), output, sizeof(output),
- reinterpret_cast<off_t>(target_location)),
- SyscallSucceedsWithValue(sizeof(output)));
- EXPECT_STREQ(expected, output);
-
- // Tell proc its ok to go
- EXPECT_THAT(close(pfd2[0]), SyscallSucceeds());
- char ok = 1;
- EXPECT_THAT(WriteFd(pfd2[1], &ok, sizeof(ok)),
- SyscallSucceedsWithValue(sizeof(ok)));
- EXPECT_THAT(close(pfd2[1]), SyscallSucceeds());
-
- // Expect termination
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
- });
-}
-
-// Attempt to read from /proc/[pid]/mem with different UID/GID.
-TEST(ProcPidMem, DifferentUser) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- int pfd1[2] = {};
- int pfd2[2] = {};
-
- ASSERT_THAT(pipe(pfd1), SyscallSucceeds());
- ASSERT_THAT(pipe(pfd2), SyscallSucceeds());
-
- // Create child process
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // Close reading end of first pipe
- close(pfd1[0]);
-
- // Tell parent about location of input
- char input[] = "hello-world";
- off_t input_location = reinterpret_cast<off_t>(input);
- TEST_CHECK(WriteFd(pfd1[1], &input_location, sizeof(input_location)) ==
- sizeof(input_location));
- TEST_PCHECK(close(pfd1[1]) == 0);
-
- // Close writing end of second pipe
- TEST_PCHECK(close(pfd2[1]) == 0);
-
- // Await parent OK to die
- char ok = 0;
- TEST_CHECK(ReadFd(pfd2[0], &ok, sizeof(ok)) == sizeof(ok));
-
- // Close rest pipes
- TEST_PCHECK(close(pfd2[0]) == 0);
- _exit(0);
- }
-
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Close writing end of first pipe
- EXPECT_THAT(close(pfd1[1]), SyscallSucceeds());
-
- // Read target location from child
- off_t target_location;
- EXPECT_THAT(ReadFd(pfd1[0], &target_location, sizeof(target_location)),
- SyscallSucceedsWithValue(sizeof(target_location)));
- // Close reading end of first pipe
- EXPECT_THAT(close(pfd1[0]), SyscallSucceeds());
-
- ScopedThread([&] {
- constexpr int kNobody = 65534;
- EXPECT_THAT(syscall(SYS_setuid, kNobody), SyscallSucceeds());
-
- // Attempt to open /proc/[child_pid]/mem
- std::string mempath = absl::StrCat("/proc/", child_pid, "/mem");
- EXPECT_THAT(open(mempath.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));
-
- // Tell proc its ok to go
- EXPECT_THAT(close(pfd2[0]), SyscallSucceeds());
- char ok = 1;
- EXPECT_THAT(WriteFd(pfd2[1], &ok, sizeof(ok)),
- SyscallSucceedsWithValue(sizeof(ok)));
- EXPECT_THAT(close(pfd2[1]), SyscallSucceeds());
-
- // Expect termination
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
- });
-}
-
-// Perform read on /proc/[pid]/mem with same UID/GID.
-TEST(ProcPidMem, SameUser) {
- int pfd1[2] = {};
- int pfd2[2] = {};
-
- ASSERT_THAT(pipe(pfd1), SyscallSucceeds());
- ASSERT_THAT(pipe(pfd2), SyscallSucceeds());
-
- // Create child process
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // Close reading end of first pipe
- close(pfd1[0]);
-
- // Tell parent about location of input
- char input[] = "hello-world";
- off_t input_location = reinterpret_cast<off_t>(input);
- TEST_CHECK(WriteFd(pfd1[1], &input_location, sizeof(input_location)) ==
- sizeof(input_location));
- TEST_PCHECK(close(pfd1[1]) == 0);
-
- // Close writing end of second pipe
- TEST_PCHECK(close(pfd2[1]) == 0);
-
- // Await parent OK to die
- char ok = 0;
- TEST_CHECK(ReadFd(pfd2[0], &ok, sizeof(ok)) == sizeof(ok));
-
- // Close rest pipes
- TEST_PCHECK(close(pfd2[0]) == 0);
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Close writing end of first pipe
- EXPECT_THAT(close(pfd1[1]), SyscallSucceeds());
-
- // Read target location from child
- off_t target_location;
- EXPECT_THAT(ReadFd(pfd1[0], &target_location, sizeof(target_location)),
- SyscallSucceedsWithValue(sizeof(target_location)));
- // Close reading end of first pipe
- EXPECT_THAT(close(pfd1[0]), SyscallSucceeds());
-
- // Open /proc/pid/mem fd
- std::string mempath = absl::StrCat("/proc/", child_pid, "/mem");
- auto memfd = ASSERT_NO_ERRNO_AND_VALUE(Open(mempath, O_RDONLY));
- char expected[] = "hello-world";
- char output[sizeof(expected)];
- EXPECT_THAT(pread(memfd.get(), output, sizeof(output),
- reinterpret_cast<off_t>(target_location)),
- SyscallSucceedsWithValue(sizeof(output)));
- EXPECT_STREQ(expected, output);
-
- // Tell proc its ok to go
- EXPECT_THAT(close(pfd2[0]), SyscallSucceeds());
- char ok = 1;
- EXPECT_THAT(WriteFd(pfd2[1], &ok, sizeof(ok)),
- SyscallSucceedsWithValue(sizeof(ok)));
- EXPECT_THAT(close(pfd2[1]), SyscallSucceeds());
-
- // Expect termination
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
-}
-
-// Just open and read /proc/self/maps, check that we can find [stack]
-TEST(ProcSelfMaps, Basic) {
- auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
-
- std::vector<std::string> strings = absl::StrSplit(proc_self_maps, '\n');
- std::vector<std::string> stacks;
- // Make sure there's a stack in there.
- for (const auto& str : strings) {
- if (str.find("[stack]") != std::string::npos) {
- stacks.push_back(str);
- }
- }
- ASSERT_EQ(1, stacks.size()) << "[stack] not found in: " << proc_self_maps;
- // Linux pads to 73 characters then we add 7.
- EXPECT_EQ(80, stacks[0].length());
-}
-
-TEST(ProcSelfMaps, Map1) {
- Mapping mapping =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_READ, MAP_PRIVATE));
- auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- std::vector<std::string> strings = absl::StrSplit(proc_self_maps, '\n');
- std::vector<std::string> addrs;
- // Make sure if is listed.
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(mapping, PROT_READ)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size());
-}
-
-TEST(ProcSelfMaps, Map2) {
- // NOTE(magi): The permissions must be different or the pages will get merged.
- Mapping map1 = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE));
- Mapping map2 =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_WRITE, MAP_PRIVATE));
-
- auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- std::vector<std::string> strings = absl::StrSplit(proc_self_maps, '\n');
- std::vector<std::string> addrs;
- // Make sure if is listed.
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map1, PROT_READ | PROT_EXEC)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size());
- addrs.clear();
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map2, PROT_WRITE)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size());
-}
-
-TEST(ProcSelfMaps, MapUnmap) {
- Mapping map1 = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE));
- Mapping map2 =
- ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_WRITE, MAP_PRIVATE));
-
- auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- std::vector<std::string> strings = absl::StrSplit(proc_self_maps, '\n');
- std::vector<std::string> addrs;
- // Make sure if is listed.
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map1, PROT_READ | PROT_EXEC)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size()) << proc_self_maps;
- addrs.clear();
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map2, PROT_WRITE)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size());
-
- map2.reset();
-
- // Read it again.
- proc_self_maps = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- strings = absl::StrSplit(proc_self_maps, '\n');
- // First entry should be there.
- addrs.clear();
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map1, PROT_READ | PROT_EXEC)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(1, addrs.size());
- addrs.clear();
- // But not the second.
- for (const auto& str : strings) {
- if (str == AnonymousMapsEntryForMapping(map2, PROT_WRITE)) {
- addrs.push_back(str);
- }
- }
- ASSERT_EQ(0, addrs.size());
-}
-
-TEST(ProcSelfMaps, Mprotect) {
- // FIXME(jamieliu): Linux's mprotect() sometimes fails to merge VMAs in this
- // case.
- SKIP_IF(!IsRunningOnGvisor());
-
- // Reserve 5 pages of address space.
- Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(5 * kPageSize, PROT_NONE, MAP_PRIVATE));
-
- // Change the permissions on the middle 3 pages. (The first and last pages may
- // be merged with other vmas on either side, so they aren't tested directly;
- // they just ensure that the middle 3 pages are bracketed by VMAs with
- // incompatible permissions.)
- ASSERT_THAT(mprotect(reinterpret_cast<void*>(m.addr() + kPageSize),
- 3 * kPageSize, PROT_READ),
- SyscallSucceeds());
-
- // Check that the middle 3 pages make up a single VMA.
- auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- std::vector<std::string> strings = absl::StrSplit(proc_self_maps, '\n');
- EXPECT_THAT(strings, Contains(AnonymousMapsEntry(m.addr() + kPageSize,
- 3 * kPageSize, PROT_READ)));
-
- // Change the permissions on the middle page only.
- ASSERT_THAT(mprotect(reinterpret_cast<void*>(m.addr() + 2 * kPageSize),
- kPageSize, PROT_READ | PROT_WRITE),
- SyscallSucceeds());
-
- // Check that the single VMA has been split into 3 VMAs.
- proc_self_maps = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- strings = absl::StrSplit(proc_self_maps, '\n');
- EXPECT_THAT(
- strings,
- IsSupersetOf(
- {AnonymousMapsEntry(m.addr() + kPageSize, kPageSize, PROT_READ),
- AnonymousMapsEntry(m.addr() + 2 * kPageSize, kPageSize,
- PROT_READ | PROT_WRITE),
- AnonymousMapsEntry(m.addr() + 3 * kPageSize, kPageSize,
- PROT_READ)}));
-
- // Change the permissions on the middle page back.
- ASSERT_THAT(mprotect(reinterpret_cast<void*>(m.addr() + 2 * kPageSize),
- kPageSize, PROT_READ),
- SyscallSucceeds());
-
- // Check that the 3 VMAs have been merged back into a single VMA.
- proc_self_maps = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- strings = absl::StrSplit(proc_self_maps, '\n');
- EXPECT_THAT(strings, Contains(AnonymousMapsEntry(m.addr() + kPageSize,
- 3 * kPageSize, PROT_READ)));
-}
-
-TEST(ProcSelfMaps, SharedAnon) {
- const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kPageSize, PROT_READ, MAP_SHARED | MAP_ANONYMOUS));
-
- const auto proc_self_maps =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- for (const auto& line : absl::StrSplit(proc_self_maps, '\n')) {
- const auto entry = ASSERT_NO_ERRNO_AND_VALUE(ParseProcMapsLine(line));
- if (entry.start <= m.addr() && m.addr() < entry.end) {
- // cf. proc(5), "/proc/[pid]/map_files/"
- EXPECT_EQ(entry.filename, "/dev/zero (deleted)");
- return;
- }
- }
- FAIL() << "no maps entry containing mapping at " << m.ptr();
-}
-
-TEST(ProcSelfFd, OpenFd) {
- int pipe_fds[2];
- ASSERT_THAT(pipe2(pipe_fds, O_CLOEXEC), SyscallSucceeds());
-
- // Reopen the write end.
- const std::string path = absl::StrCat("/proc/self/fd/", pipe_fds[1]);
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_WRONLY));
-
- // Ensure that a read/write works.
- const std::string data = "hello";
- std::unique_ptr<char[]> buffer(new char[data.size()]);
- EXPECT_THAT(write(fd.get(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(5));
- EXPECT_THAT(read(pipe_fds[0], buffer.get(), data.size()),
- SyscallSucceedsWithValue(5));
- EXPECT_EQ(strncmp(buffer.get(), data.c_str(), data.size()), 0);
-
- // Cleanup.
- ASSERT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- ASSERT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-static void CheckFdDirGetdentsDuplicates(const std::string& path) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.c_str(), O_RDONLY | O_DIRECTORY));
- // Open a FD whose value is supposed to be much larger than
- // the number of FDs opened by current process.
- auto newfd = fcntl(fd.get(), F_DUPFD, 1024);
- EXPECT_GE(newfd, 1024);
- auto fd_closer = Cleanup([newfd]() { close(newfd); });
- auto fd_files = ASSERT_NO_ERRNO_AND_VALUE(ListDir(path.c_str(), false));
- absl::node_hash_set<std::string> fd_files_dedup(fd_files.begin(),
- fd_files.end());
- EXPECT_EQ(fd_files.size(), fd_files_dedup.size());
-}
-
-// This is a regression test for gvisor.dev/issues/3894
-TEST(ProcSelfFd, GetdentsDuplicates) {
- CheckFdDirGetdentsDuplicates("/proc/self/fd");
-}
-
-// This is a regression test for gvisor.dev/issues/3894
-TEST(ProcSelfFdInfo, GetdentsDuplicates) {
- CheckFdDirGetdentsDuplicates("/proc/self/fdinfo");
-}
-
-TEST(ProcSelfFdInfo, CorrectFds) {
- // Make sure there is at least one open file.
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDONLY));
-
- // Get files in /proc/self/fd.
- auto fd_files = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/proc/self/fd", false));
-
- // Get files in /proc/self/fdinfo.
- auto fdinfo_files =
- ASSERT_NO_ERRNO_AND_VALUE(ListDir("/proc/self/fdinfo", false));
-
- // They should contain the same fds.
- EXPECT_THAT(fd_files, UnorderedElementsAreArray(fdinfo_files));
-
- // Both should contain fd.
- auto fd_s = absl::StrCat(fd.get());
- EXPECT_THAT(fd_files, Contains(fd_s));
-}
-
-TEST(ProcSelfFdInfo, Flags) {
- std::string path = NewTempAbsPath();
-
- // Create file here with O_CREAT to test that O_CREAT does not appear in
- // fdinfo flags.
- int flags = O_CREAT | O_RDWR | O_APPEND | O_CLOEXEC;
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, flags, 0644));
-
- // Automatically delete path.
- TempPath temp_path(path);
-
- // O_CREAT does not appear in fdinfo flags.
- flags &= ~O_CREAT;
-
- // O_LARGEFILE always appears (on x86_64).
- flags |= kOLargeFile;
-
- auto fd_info = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/self/fdinfo/", fd.get())));
- EXPECT_THAT(fd_info, HasSubstr(absl::StrFormat("flags:\t%#o", flags)));
-}
-
-TEST(ProcSelfExe, Absolute) {
- auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/exe"));
- EXPECT_EQ(exe[0], '/');
-}
-
-TEST(ProcSelfCwd, Absolute) {
- auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/cwd"));
- EXPECT_EQ(exe[0], '/');
-}
-
-// Sanity check that /proc/cmdline is present.
-TEST(ProcCmdline, IsPresent) {
- SKIP_IF(IsRunningWithVFS1());
-
- std::string proc_cmdline =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/cmdline"));
- ASSERT_FALSE(proc_cmdline.empty());
-}
-
-// Sanity check for /proc/cpuinfo fields that must be present.
-TEST(ProcCpuinfo, RequiredFieldsArePresent) {
- std::string proc_cpuinfo =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/cpuinfo"));
- ASSERT_FALSE(proc_cpuinfo.empty());
- std::vector<std::string> cpuinfo_fields = absl::StrSplit(proc_cpuinfo, '\n');
-
- // Check that the usual fields are there. We don't really care about the
- // contents.
- for (const std::string& field : required_fields) {
- EXPECT_THAT(proc_cpuinfo, HasSubstr(field));
- }
-}
-
-TEST(ProcCpuinfo, DeniesWriteNonRoot) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_FOWNER)));
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting this
- // test. Otherwise, the files are created by root (UID before the test), but
- // cannot be opened by the `uid` set below after the test. After calling
- // setuid(non-zero-UID), there is no way to get root privileges back.
- ScopedThread([&] {
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. POSIX threads, however, require that all
- // threads have the same UIDs, so using the setuid wrapper sets all threads'
- // real UID.
- // Also drops capabilities.
- constexpr int kNobody = 65534;
- EXPECT_THAT(syscall(SYS_setuid, kNobody), SyscallSucceeds());
- EXPECT_THAT(open("/proc/cpuinfo", O_WRONLY), SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(truncate("/proc/cpuinfo", 123), SyscallFailsWithErrno(EACCES));
- });
-}
-
-// With root privileges, it is possible to open /proc/cpuinfo with write mode,
-// but all write operations should fail.
-TEST(ProcCpuinfo, DeniesWriteRoot) {
- // VFS1 does not behave differently for root/non-root.
- SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_FOWNER)));
-
- int fd;
- EXPECT_THAT(fd = open("/proc/cpuinfo", O_WRONLY), SyscallSucceeds());
- if (fd > 0) {
- // Truncate is not tested--it may succeed on some kernels without doing
- // anything.
- EXPECT_THAT(write(fd, "x", 1), SyscallFails());
- EXPECT_THAT(pwrite(fd, "x", 1, 123), SyscallFails());
- }
-}
-
-// Sanity checks that uptime is present.
-TEST(ProcUptime, IsPresent) {
- std::string proc_uptime =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/uptime"));
- ASSERT_FALSE(proc_uptime.empty());
- std::vector<std::string> uptime_parts = absl::StrSplit(proc_uptime, ' ');
-
- // Parse once.
- double uptime0, uptime1, idletime0, idletime1;
- ASSERT_TRUE(absl::SimpleAtod(uptime_parts[0], &uptime0));
- ASSERT_TRUE(absl::SimpleAtod(uptime_parts[1], &idletime0));
-
- // Sleep for one second.
- absl::SleepFor(absl::Seconds(1));
-
- // Parse again.
- proc_uptime = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/uptime"));
- ASSERT_FALSE(proc_uptime.empty());
- uptime_parts = absl::StrSplit(proc_uptime, ' ');
- ASSERT_TRUE(absl::SimpleAtod(uptime_parts[0], &uptime1));
- ASSERT_TRUE(absl::SimpleAtod(uptime_parts[1], &idletime1));
-
- // Sanity check.
- //
- // We assert that between 0.99 and 59.99 seconds have passed. If more than a
- // minute has passed, then we must be executing really, really slowly.
- EXPECT_GE(uptime0, 0.0);
- EXPECT_GE(idletime0, 0.0);
- EXPECT_GT(uptime1, uptime0);
- EXPECT_GE(uptime1, uptime0 + 0.99);
- EXPECT_LE(uptime1, uptime0 + 59.99);
- EXPECT_GE(idletime1, idletime0);
-}
-
-TEST(ProcMeminfo, ContainsBasicFields) {
- std::string proc_meminfo =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/meminfo"));
- EXPECT_THAT(proc_meminfo, AllOf(ContainsRegex(R"(MemTotal:\s+[0-9]+ kB)"),
- ContainsRegex(R"(MemFree:\s+[0-9]+ kB)")));
-}
-
-TEST(ProcStat, ContainsBasicFields) {
- std::string proc_stat = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/stat"));
-
- std::vector<std::string> names;
- for (auto const& line : absl::StrSplit(proc_stat, '\n')) {
- std::vector<std::string> fields =
- absl::StrSplit(line, ' ', absl::SkipWhitespace());
- if (fields.empty()) {
- continue;
- }
- names.push_back(fields[0]);
- }
-
- EXPECT_THAT(names,
- IsSupersetOf({"cpu", "intr", "ctxt", "btime", "processes",
- "procs_running", "procs_blocked", "softirq"}));
-}
-
-TEST(ProcStat, EndsWithNewline) {
- std::string proc_stat = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/stat"));
- EXPECT_EQ(proc_stat.back(), '\n');
-}
-
-TEST(ProcStat, Fields) {
- std::string proc_stat = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/stat"));
-
- std::vector<std::string> names;
- for (auto const& line : absl::StrSplit(proc_stat, '\n')) {
- std::vector<std::string> fields =
- absl::StrSplit(line, ' ', absl::SkipWhitespace());
- if (fields.empty()) {
- continue;
- }
-
- if (absl::StartsWith(fields[0], "cpu")) {
- // As of Linux 3.11, each CPU entry has 10 fields, plus the name.
- EXPECT_GE(fields.size(), 11) << proc_stat;
- } else if (fields[0] == "ctxt") {
- // Single field.
- EXPECT_EQ(fields.size(), 2) << proc_stat;
- } else if (fields[0] == "btime") {
- // Single field.
- EXPECT_EQ(fields.size(), 2) << proc_stat;
- } else if (fields[0] == "itime") {
- // Single field.
- ASSERT_EQ(fields.size(), 2) << proc_stat;
- // This is the only floating point field.
- double val;
- EXPECT_TRUE(absl::SimpleAtod(fields[1], &val)) << proc_stat;
- continue;
- } else if (fields[0] == "processes") {
- // Single field.
- EXPECT_EQ(fields.size(), 2) << proc_stat;
- } else if (fields[0] == "procs_running") {
- // Single field.
- EXPECT_EQ(fields.size(), 2) << proc_stat;
- } else if (fields[0] == "procs_blocked") {
- // Single field.
- EXPECT_EQ(fields.size(), 2) << proc_stat;
- } else if (fields[0] == "softirq") {
- // As of Linux 3.11, there are 10 softirqs. 12 fields for name + total.
- EXPECT_GE(fields.size(), 12) << proc_stat;
- }
-
- // All fields besides itime are valid base 10 numbers.
- for (size_t i = 1; i < fields.size(); i++) {
- uint64_t val;
- EXPECT_TRUE(absl::SimpleAtoi(fields[i], &val)) << proc_stat;
- }
- }
-}
-
-TEST(ProcLoadavg, EndsWithNewline) {
- std::string proc_loadvg =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/loadavg"));
- EXPECT_EQ(proc_loadvg.back(), '\n');
-}
-
-TEST(ProcLoadavg, Fields) {
- std::string proc_loadvg =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/loadavg"));
- std::vector<std::string> lines = absl::StrSplit(proc_loadvg, '\n');
-
- // Single line.
- EXPECT_EQ(lines.size(), 2) << proc_loadvg;
-
- std::vector<std::string> fields =
- absl::StrSplit(lines[0], absl::ByAnyChar(" /"), absl::SkipWhitespace());
-
- // Six fields.
- EXPECT_EQ(fields.size(), 6) << proc_loadvg;
-
- double val;
- uint64_t val2;
- // First three fields are floating point numbers.
- EXPECT_TRUE(absl::SimpleAtod(fields[0], &val)) << proc_loadvg;
- EXPECT_TRUE(absl::SimpleAtod(fields[1], &val)) << proc_loadvg;
- EXPECT_TRUE(absl::SimpleAtod(fields[2], &val)) << proc_loadvg;
- // Rest of the fields are valid base 10 numbers.
- EXPECT_TRUE(absl::SimpleAtoi(fields[3], &val2)) << proc_loadvg;
- EXPECT_TRUE(absl::SimpleAtoi(fields[4], &val2)) << proc_loadvg;
- EXPECT_TRUE(absl::SimpleAtoi(fields[5], &val2)) << proc_loadvg;
-}
-
-// NOTE: Tests in priority.cc also check certain priority related fields in
-// /proc/self/stat.
-
-class ProcPidStatTest : public ::testing::TestWithParam<std::string> {};
-
-TEST_P(ProcPidStatTest, HasBasicFields) {
- std::string proc_pid_stat = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/", GetParam(), "/stat")));
-
- ASSERT_FALSE(proc_pid_stat.empty());
- std::vector<std::string> fields = absl::StrSplit(proc_pid_stat, ' ');
- ASSERT_GE(fields.size(), 24);
- EXPECT_EQ(absl::StrCat(getpid()), fields[0]);
- // fields[1] is the thread name.
- EXPECT_EQ("R", fields[2]); // task state
- EXPECT_EQ(absl::StrCat(getppid()), fields[3]);
-
- // If the test starts up quickly, then the process start time and the kernel
- // boot time will be very close, and the proc starttime field (which is the
- // delta of the two times) will be 0. For that unfortunate reason, we can
- // only check that starttime >= 0, and not that it is strictly > 0.
- uint64_t starttime;
- ASSERT_TRUE(absl::SimpleAtoi(fields[21], &starttime));
- EXPECT_GE(starttime, 0);
-
- uint64_t vss;
- ASSERT_TRUE(absl::SimpleAtoi(fields[22], &vss));
- EXPECT_GT(vss, 0);
-
- uint64_t rss;
- ASSERT_TRUE(absl::SimpleAtoi(fields[23], &rss));
- EXPECT_GT(rss, 0);
-
- uint64_t rsslim;
- ASSERT_TRUE(absl::SimpleAtoi(fields[24], &rsslim));
- EXPECT_GT(rsslim, 0);
-}
-
-INSTANTIATE_TEST_SUITE_P(SelfAndNumericPid, ProcPidStatTest,
- ::testing::Values("self", absl::StrCat(getpid())));
-
-using ProcPidStatmTest = ::testing::TestWithParam<std::string>;
-
-TEST_P(ProcPidStatmTest, HasBasicFields) {
- std::string proc_pid_statm = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/", GetParam(), "/statm")));
- ASSERT_FALSE(proc_pid_statm.empty());
- std::vector<std::string> fields = absl::StrSplit(proc_pid_statm, ' ');
- ASSERT_GE(fields.size(), 7);
-
- uint64_t vss;
- ASSERT_TRUE(absl::SimpleAtoi(fields[0], &vss));
- EXPECT_GT(vss, 0);
-
- uint64_t rss;
- ASSERT_TRUE(absl::SimpleAtoi(fields[1], &rss));
- EXPECT_GT(rss, 0);
-}
-
-INSTANTIATE_TEST_SUITE_P(SelfAndNumericPid, ProcPidStatmTest,
- ::testing::Values("self", absl::StrCat(getpid())));
-
-PosixErrorOr<uint64_t> CurrentRSS() {
- ASSIGN_OR_RETURN_ERRNO(auto proc_self_stat, GetContents("/proc/self/stat"));
- if (proc_self_stat.empty()) {
- return PosixError(EINVAL, "empty /proc/self/stat");
- }
-
- std::vector<std::string> fields = absl::StrSplit(proc_self_stat, ' ');
- if (fields.size() < 24) {
- return PosixError(
- EINVAL,
- absl::StrCat("/proc/self/stat has too few fields: ", proc_self_stat));
- }
-
- uint64_t rss;
- if (!absl::SimpleAtoi(fields[23], &rss)) {
- return PosixError(
- EINVAL, absl::StrCat("/proc/self/stat RSS field is not a number: ",
- fields[23]));
- }
-
- // RSS is given in number of pages.
- return rss * kPageSize;
-}
-
-// The size of mapping created by MapPopulateRSS.
-constexpr uint64_t kMappingSize = 100 << 20;
-
-// Tolerance on RSS comparisons to account for background thread mappings,
-// reclaimed pages, newly faulted pages, etc.
-constexpr uint64_t kRSSTolerance = 10 << 20;
-
-// Capture RSS before and after an anonymous mapping with passed prot.
-void MapPopulateRSS(int prot, uint64_t* before, uint64_t* after) {
- *before = ASSERT_NO_ERRNO_AND_VALUE(CurrentRSS());
-
- // N.B. The kernel asynchronously accumulates per-task RSS counters into the
- // mm RSS, which is exposed by /proc/PID/stat. Task exit is a synchronization
- // point (kernel/exit.c:do_exit -> sync_mm_rss), so perform the mapping on
- // another thread to ensure it is reflected in RSS after the thread exits.
- Mapping mapping;
- ScopedThread t([&mapping, prot] {
- mapping = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(kMappingSize, prot, MAP_PRIVATE | MAP_POPULATE));
- });
- t.Join();
-
- *after = ASSERT_NO_ERRNO_AND_VALUE(CurrentRSS());
-}
-
-// TODO(b/73896574): Test for PROT_READ + MAP_POPULATE anonymous mappings. Their
-// semantics are more subtle:
-//
-// Small pages -> Zero page mapped, not counted in RSS
-// (mm/memory.c:do_anonymous_page).
-//
-// Huge pages (THP enabled, use_zero_page=0) -> Pages committed
-// (mm/memory.c:__handle_mm_fault -> create_huge_pmd).
-//
-// Huge pages (THP enabled, use_zero_page=1) -> Zero page mapped, not counted in
-// RSS (mm/huge_memory.c:do_huge_pmd_anonymous_page).
-
-// PROT_WRITE + MAP_POPULATE anonymous mappings are always committed.
-TEST(ProcSelfStat, PopulateWriteRSS) {
- uint64_t before, after;
- MapPopulateRSS(PROT_READ | PROT_WRITE, &before, &after);
-
- // Mapping is committed.
- EXPECT_NEAR(before + kMappingSize, after, kRSSTolerance);
-}
-
-// PROT_NONE + MAP_POPULATE anonymous mappings are never committed.
-TEST(ProcSelfStat, PopulateNoneRSS) {
- uint64_t before, after;
- MapPopulateRSS(PROT_NONE, &before, &after);
-
- // Mapping not committed.
- EXPECT_NEAR(before, after, kRSSTolerance);
-}
-
-// Returns the calling thread's name.
-PosixErrorOr<std::string> ThreadName() {
- // "The buffer should allow space for up to 16 bytes; the returned std::string
- // will be null-terminated if it is shorter than that." - prctl(2). But we
- // always want the thread name to be null-terminated.
- char thread_name[17];
- int rc = prctl(PR_GET_NAME, thread_name, 0, 0, 0);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "prctl(PR_GET_NAME)");
- }
- thread_name[16] = '\0';
- return std::string(thread_name);
-}
-
-// Parses the contents of a /proc/[pid]/status file into a collection of
-// key-value pairs.
-PosixErrorOr<std::map<std::string, std::string>> ParseProcStatus(
- absl::string_view status_str) {
- std::map<std::string, std::string> fields;
- for (absl::string_view const line :
- absl::StrSplit(status_str, '\n', absl::SkipWhitespace())) {
- const std::pair<absl::string_view, absl::string_view> kv =
- absl::StrSplit(line, absl::MaxSplits(":\t", 1));
- if (kv.first.empty()) {
- return PosixError(
- EINVAL, absl::StrCat("failed to parse key in line \"", line, "\""));
- }
- std::string key(kv.first);
- if (fields.count(key)) {
- return PosixError(EINVAL,
- absl::StrCat("duplicate key \"", kv.first, "\""));
- }
- std::string value(kv.second);
- absl::StripLeadingAsciiWhitespace(&value);
- fields.emplace(std::move(key), std::move(value));
- }
- return fields;
-}
-
-TEST(ParseProcStatusTest, ParsesSimpleStatusFileWithMixedWhitespaceCorrectly) {
- EXPECT_THAT(
- ParseProcStatus(
- "Name:\tinit\nState:\tS (sleeping)\nCapEff:\t 0000001fffffffff\n"),
- IsPosixErrorOkAndHolds(UnorderedElementsAre(
- Pair("Name", "init"), Pair("State", "S (sleeping)"),
- Pair("CapEff", "0000001fffffffff"))));
-}
-
-TEST(ParseProcStatusTest, DetectsDuplicateKeys) {
- auto proc_status_or = ParseProcStatus("Name:\tfoo\nName:\tfoo\n");
- EXPECT_THAT(proc_status_or,
- PosixErrorIs(EINVAL, ::testing::StrEq("duplicate key \"Name\"")));
-}
-
-TEST(ParseProcStatusTest, DetectsMissingTabs) {
- EXPECT_THAT(ParseProcStatus("Name:foo\nPid: 1\n"),
- IsPosixErrorOkAndHolds(UnorderedElementsAre(Pair("Name:foo", ""),
- Pair("Pid: 1", ""))));
-}
-
-TEST(ProcPidStatusTest, HasBasicFields) {
- // Do this on a separate thread since we want tgid != tid.
- ScopedThread([] {
- const pid_t tgid = getpid();
- const pid_t tid = syscall(SYS_gettid);
- EXPECT_NE(tgid, tid);
- const auto thread_name = ASSERT_NO_ERRNO_AND_VALUE(ThreadName());
-
- std::string status_str = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/", tid, "/status")));
-
- ASSERT_FALSE(status_str.empty());
- const auto status = ASSERT_NO_ERRNO_AND_VALUE(ParseProcStatus(status_str));
- EXPECT_THAT(status, IsSupersetOf({Pair("Name", thread_name),
- Pair("Tgid", absl::StrCat(tgid)),
- Pair("Pid", absl::StrCat(tid)),
- Pair("PPid", absl::StrCat(getppid()))}));
- });
-}
-
-TEST(ProcPidStatusTest, StateRunning) {
- // Task must be running when reading the file.
- const pid_t tid = syscall(SYS_gettid);
- std::string status_str = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(absl::StrCat("/proc/", tid, "/status")));
-
- EXPECT_THAT(ParseProcStatus(status_str),
- IsPosixErrorOkAndHolds(Contains(Pair("State", "R (running)"))));
-}
-
-TEST(ProcPidStatusTest, StateSleeping) {
- // Starts a child process that blocks and checks that State is sleeping.
- auto res = WithSubprocess(
- [&](int pid) -> PosixError {
- // Because this test is timing based we will disable cooperative saving
- // and the test itself also has random saving disabled.
- const DisableSave ds;
- // Try multiple times in case the child isn't sleeping when status file
- // is read.
- MonotonicTimer timer;
- timer.Start();
- for (;;) {
- ASSIGN_OR_RETURN_ERRNO(
- std::string status_str,
- GetContents(absl::StrCat("/proc/", pid, "/status")));
- ASSIGN_OR_RETURN_ERRNO(auto map, ParseProcStatus(status_str));
- if (map["State"] == std::string("S (sleeping)")) {
- // Test passed!
- return NoError();
- }
- if (timer.Duration() > absl::Seconds(10)) {
- return PosixError(ETIMEDOUT, "Timeout waiting for child to sleep");
- }
- absl::SleepFor(absl::Milliseconds(10));
- }
- },
- nullptr, nullptr);
- ASSERT_NO_ERRNO(res);
-}
-
-TEST(ProcPidStatusTest, ValuesAreTabDelimited) {
- std::string status_str =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/status"));
- ASSERT_FALSE(status_str.empty());
- for (absl::string_view const line :
- absl::StrSplit(status_str, '\n', absl::SkipWhitespace())) {
- EXPECT_NE(std::string::npos, line.find(":\t"));
- }
-}
-
-// Threads properly counts running threads.
-//
-// TODO(mpratt): Test zombied threads while the thread group leader is still
-// running with generalized fork and clone children from the wait test.
-TEST(ProcPidStatusTest, Threads) {
- char buf[4096] = {};
- EXPECT_THAT(ReadWhileRunning("status", buf, sizeof(buf) - 1),
- SyscallSucceedsWithValue(Gt(0)));
-
- auto status = ASSERT_NO_ERRNO_AND_VALUE(ParseProcStatus(buf));
- auto it = status.find("Threads");
- ASSERT_NE(it, status.end());
- int threads = -1;
- EXPECT_TRUE(absl::SimpleAtoi(it->second, &threads))
- << "Threads value " << it->second << " is not a number";
- // Don't make assumptions about the exact number of threads, as it may not be
- // constant.
- EXPECT_GE(threads, 1);
-
- memset(buf, 0, sizeof(buf));
- EXPECT_THAT(ReadWhileZombied("status", buf, sizeof(buf) - 1),
- SyscallSucceedsWithValue(Gt(0)));
-
- status = ASSERT_NO_ERRNO_AND_VALUE(ParseProcStatus(buf));
- it = status.find("Threads");
- ASSERT_NE(it, status.end());
- threads = -1;
- EXPECT_TRUE(absl::SimpleAtoi(it->second, &threads))
- << "Threads value " << it->second << " is not a number";
- // There must be only the thread group leader remaining, zombied.
- EXPECT_EQ(threads, 1);
-}
-
-// Returns true if all characters in s are digits.
-bool IsDigits(absl::string_view s) {
- return std::all_of(s.begin(), s.end(), absl::ascii_isdigit);
-}
-
-TEST(ProcPidStatTest, VmStats) {
- std::string status_str =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/status"));
- ASSERT_FALSE(status_str.empty());
- auto status = ASSERT_NO_ERRNO_AND_VALUE(ParseProcStatus(status_str));
-
- const auto vss_it = status.find("VmSize");
- ASSERT_NE(vss_it, status.end());
-
- absl::string_view vss_str(vss_it->second);
-
- // Room for the " kB" suffix plus at least one digit.
- ASSERT_GT(vss_str.length(), 3);
- EXPECT_TRUE(absl::EndsWith(vss_str, " kB"));
- // Everything else is part of a number.
- EXPECT_TRUE(IsDigits(vss_str.substr(0, vss_str.length() - 3))) << vss_str;
- // ... which is not 0.
- EXPECT_NE('0', vss_str[0]);
-
- const auto rss_it = status.find("VmRSS");
- ASSERT_NE(rss_it, status.end());
-
- absl::string_view rss_str(rss_it->second);
-
- // Room for the " kB" suffix plus at least one digit.
- ASSERT_GT(rss_str.length(), 3);
- EXPECT_TRUE(absl::EndsWith(rss_str, " kB"));
- // Everything else is part of a number.
- EXPECT_TRUE(IsDigits(rss_str.substr(0, rss_str.length() - 3))) << rss_str;
- // ... which is not 0.
- EXPECT_NE('0', rss_str[0]);
-
- const auto data_it = status.find("VmData");
- ASSERT_NE(data_it, status.end());
-
- absl::string_view data_str(data_it->second);
-
- // Room for the " kB" suffix plus at least one digit.
- ASSERT_GT(data_str.length(), 3);
- EXPECT_TRUE(absl::EndsWith(data_str, " kB"));
- // Everything else is part of a number.
- EXPECT_TRUE(IsDigits(data_str.substr(0, data_str.length() - 3))) << data_str;
- // ... which is not 0.
- EXPECT_NE('0', data_str[0]);
-}
-
-// Parse an array of NUL-terminated char* arrays, returning a vector of
-// strings.
-std::vector<std::string> ParseNulTerminatedStrings(std::string contents) {
- EXPECT_EQ('\0', contents.back());
- // The split will leave an empty string if the NUL-byte remains, so pop
- // it.
- contents.pop_back();
-
- return absl::StrSplit(contents, '\0');
-}
-
-TEST(ProcPidCmdline, MatchesArgv) {
- std::vector<std::string> proc_cmdline = ParseNulTerminatedStrings(
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/cmdline")));
- EXPECT_THAT(saved_argv, ContainerEq(proc_cmdline));
-}
-
-TEST(ProcPidEnviron, MatchesEnviron) {
- std::vector<std::string> proc_environ = ParseNulTerminatedStrings(
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/environ")));
- // Get the environment from the environ variable, which we will compare with
- // /proc/self/environ.
- std::vector<std::string> env;
- for (char** v = environ; *v; v++) {
- env.push_back(*v);
- }
- EXPECT_THAT(env, ContainerEq(proc_environ));
-}
-
-TEST(ProcPidCmdline, SubprocessForkSameCmdline) {
- std::vector<std::string> proc_cmdline_parent;
- std::vector<std::string> proc_cmdline;
- proc_cmdline_parent = ParseNulTerminatedStrings(
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/cmdline")));
- auto res = WithSubprocess(
- [&](int pid) -> PosixError {
- ASSIGN_OR_RETURN_ERRNO(
- auto raw_cmdline,
- GetContents(absl::StrCat("/proc/", pid, "/cmdline")));
- proc_cmdline = ParseNulTerminatedStrings(raw_cmdline);
- return NoError();
- },
- nullptr, nullptr);
- ASSERT_NO_ERRNO(res);
-
- for (size_t i = 0; i < proc_cmdline_parent.size(); i++) {
- EXPECT_EQ(proc_cmdline_parent[i], proc_cmdline[i]);
- }
-}
-
-TEST(ProcPidCmdline, SubprocessSeekCmdline) {
- FileDescriptor fd;
- ASSERT_NO_ERRNO(WithSubprocess(
- [&](int pid) -> PosixError {
- // Running. Open /proc/pid/cmdline.
- ASSIGN_OR_RETURN_ERRNO(
- fd, Open(absl::StrCat("/proc/", pid, "/cmdline"), O_RDONLY));
- return NoError();
- },
- [&](int pid) -> PosixError {
- // Zombie, but seek should still succeed.
- int ret = lseek(fd.get(), 0x801, 0);
- if (ret < 0) {
- return PosixError(errno);
- }
- return NoError();
- },
- [&](int pid) -> PosixError {
- // Exited.
- int ret = lseek(fd.get(), 0x801, 0);
- if (ret < 0) {
- return PosixError(errno);
- }
- return NoError();
- }));
-}
-
-// Test whether /proc/PID/ symlinks can be read for a running process.
-TEST(ProcPidSymlink, SubprocessRunning) {
- char buf[1];
-
- EXPECT_THAT(ReadlinkWhileRunning("exe", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadlinkWhileRunning("ns/net", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadlinkWhileRunning("ns/pid", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadlinkWhileRunning("ns/user", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST(ProcPidSymlink, SubprocessZombied) {
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- char buf[1];
-
- int want = EACCES;
- if (!IsRunningOnGvisor()) {
- auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());
- if (version.major > 4 || (version.major == 4 && version.minor > 3)) {
- want = ENOENT;
- }
- }
-
- EXPECT_THAT(ReadlinkWhileZombied("exe", buf, sizeof(buf)),
- SyscallFailsWithErrno(want));
-
- if (!IsRunningOnGvisor()) {
- EXPECT_THAT(ReadlinkWhileZombied("ns/net", buf, sizeof(buf)),
- SyscallFailsWithErrno(want));
- }
-
- // FIXME(gvisor.dev/issue/164): Inconsistent behavior between linux on proc
- // files.
- //
- // ~4.3: Syscall fails with EACCES.
- // 4.17: Syscall succeeds and returns 1.
- //
- if (!IsRunningOnGvisor()) {
- return;
- }
-
- EXPECT_THAT(ReadlinkWhileZombied("ns/pid", buf, sizeof(buf)),
- SyscallFailsWithErrno(want));
-
- EXPECT_THAT(ReadlinkWhileZombied("ns/user", buf, sizeof(buf)),
- SyscallFailsWithErrno(want));
-}
-
-// Test whether /proc/PID/ symlinks can be read for an exited process.
-TEST(ProcPidSymlink, SubprocessExited) {
- char buf[1];
-
- EXPECT_THAT(ReadlinkWhileExited("exe", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-
- EXPECT_THAT(ReadlinkWhileExited("ns/net", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-
- EXPECT_THAT(ReadlinkWhileExited("ns/pid", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-
- EXPECT_THAT(ReadlinkWhileExited("ns/user", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-}
-
-// /proc/PID/exe points to the correct binary.
-TEST(ProcPidExe, Subprocess) {
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/exe"));
- auto expected_absolute_path =
- ASSERT_NO_ERRNO_AND_VALUE(MakeAbsolute(link, ""));
-
- char actual[PATH_MAX + 1] = {};
- ASSERT_THAT(ReadlinkWhileRunning("exe", actual, sizeof(actual)),
- SyscallSucceedsWithValue(Gt(0)));
- EXPECT_EQ(actual, expected_absolute_path);
-}
-
-// /proc/PID/cwd points to the correct directory.
-TEST(ProcPidCwd, Subprocess) {
- auto want = ASSERT_NO_ERRNO_AND_VALUE(GetCWD());
-
- char got[PATH_MAX + 1] = {};
- ASSERT_THAT(ReadlinkWhileRunning("cwd", got, sizeof(got)),
- SyscallSucceedsWithValue(Gt(0)));
- EXPECT_EQ(got, want);
-}
-
-// Test whether /proc/PID/ files can be read for a running process.
-TEST(ProcPidFile, SubprocessRunning) {
- char buf[1];
-
- EXPECT_THAT(ReadWhileRunning("auxv", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("cmdline", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("comm", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("gid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("io", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("maps", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("stat", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("status", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("uid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("oom_score", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileRunning("oom_score_adj", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-// Test whether /proc/PID/ files can be read for a zombie process.
-TEST(ProcPidFile, SubprocessZombie) {
- char buf[1];
-
- // FIXME(gvisor.dev/issue/164): Loosen requirement due to inconsistent
- // behavior on different kernels.
- //
- // ~4.3: Succeds and returns 0.
- // 4.17: Succeeds and returns 1.
- // gVisor: Succeeds and returns 0.
- EXPECT_THAT(ReadWhileZombied("auxv", buf, sizeof(buf)), SyscallSucceeds());
-
- EXPECT_THAT(ReadWhileZombied("cmdline", buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(ReadWhileZombied("comm", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("gid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("maps", buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-
- EXPECT_THAT(ReadWhileZombied("stat", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("status", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("uid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("oom_score", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(ReadWhileZombied("oom_score_adj", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // FIXME(gvisor.dev/issue/164): Inconsistent behavior between gVisor and linux
- // on proc files.
- //
- // ~4.3: Fails and returns EACCES.
- // gVisor & 4.17: Succeeds and returns 1.
- //
- // EXPECT_THAT(ReadWhileZombied("io", buf, sizeof(buf)),
- // SyscallFailsWithErrno(EACCES));
-}
-
-// Test whether /proc/PID/ files can be read for an exited process.
-TEST(ProcPidFile, SubprocessExited) {
- char buf[1];
-
- // FIXME(gvisor.dev/issue/164): Inconsistent behavior between kernels.
- //
- // ~4.3: Fails and returns ESRCH.
- // gVisor: Fails with ESRCH.
- // 4.17: Succeeds and returns 1.
- //
- // EXPECT_THAT(ReadWhileExited("auxv", buf, sizeof(buf)),
- // SyscallFailsWithErrno(ESRCH));
-
- EXPECT_THAT(ReadWhileExited("cmdline", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Succeeds on gVisor.
- EXPECT_THAT(ReadWhileExited("comm", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- EXPECT_THAT(ReadWhileExited("gid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Succeeds on gVisor.
- EXPECT_THAT(ReadWhileExited("io", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Returns EOF on gVisor.
- EXPECT_THAT(ReadWhileExited("maps", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Succeeds on gVisor.
- EXPECT_THAT(ReadWhileExited("stat", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Succeeds on gVisor.
- EXPECT_THAT(ReadWhileExited("status", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- EXPECT_THAT(ReadWhileExited("uid_map", buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- if (!IsRunningOnGvisor()) {
- // FIXME(gvisor.dev/issue/164): Succeeds on gVisor.
- EXPECT_THAT(ReadWhileExited("oom_score", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
- }
-
- EXPECT_THAT(ReadWhileExited("oom_score_adj", buf, sizeof(buf)),
- SyscallFailsWithErrno(ESRCH));
-}
-
-PosixError DirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude) {
- ASSIGN_OR_RETURN_ERRNO(auto listing, ListDir(path, false));
-
- for (auto& expected_entry : expect) {
- auto cursor = std::find(listing.begin(), listing.end(), expected_entry);
- if (cursor == listing.end()) {
- return PosixError(
- ENOENT,
- absl::StrCat("Failed to find one or more paths in '", path, "'"));
- }
- }
- for (auto& excluded_entry : exclude) {
- auto cursor = std::find(listing.begin(), listing.end(), excluded_entry);
- if (cursor != listing.end()) {
- return PosixError(ENOENT, absl::StrCat("File '", excluded_entry,
- "' found in path '", path, "'"));
- }
- }
- return NoError();
-}
-
-PosixError EventuallyDirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude) {
- constexpr int kRetryCount = 100;
- const absl::Duration kRetryDelay = absl::Milliseconds(100);
-
- for (int i = 0; i < kRetryCount; ++i) {
- auto res = DirContains(path, expect, exclude);
- if (res.ok()) {
- return res;
- } else if (i < kRetryCount - 1) {
- // Sleep if this isn't the final iteration.
- absl::SleepFor(kRetryDelay);
- }
- }
- return PosixError(ETIMEDOUT,
- "Timed out while waiting for directory to contain files ");
-}
-
-std::vector<std::string> TaskFiles(const std::vector<pid_t>& pids) {
- return ApplyVec<std::string>([](const pid_t p) { return absl::StrCat(p); },
- pids);
-}
-
-TEST(ProcTask, Basic) {
- EXPECT_NO_ERRNO(
- DirContains("/proc/self/task", {".", "..", absl::StrCat(getpid())}, {}));
-}
-
-// Helper class for creating a new task in the current thread group.
-class BlockingChild {
- public:
- BlockingChild() : thread_([=] { Start(); }) {}
- ~BlockingChild() { Join(); }
-
- pid_t Tid() const {
- absl::MutexLock ml(&mu_);
- mu_.Await(absl::Condition(&tid_ready_));
- return tid_;
- }
-
- void Join() {
- {
- absl::MutexLock ml(&mu_);
- stop_ = true;
- }
- thread_.Join();
- }
-
- private:
- void Start() {
- absl::MutexLock ml(&mu_);
- tid_ = syscall(__NR_gettid);
- tid_ready_ = true;
- mu_.Await(absl::Condition(&stop_));
- }
-
- mutable absl::Mutex mu_;
- bool stop_ ABSL_GUARDED_BY(mu_) = false;
- pid_t tid_;
- bool tid_ready_ ABSL_GUARDED_BY(mu_) = false;
-
- // Must be last to ensure that the destructor for the thread is run before
- // any other member of the object is destroyed.
- ScopedThread thread_;
-};
-
-TEST(ProcTask, NewThreadAppears) {
- BlockingChild child1;
- EXPECT_NO_ERRNO(
- DirContains("/proc/self/task", TaskFiles({child1.Tid()}), {}));
-}
-
-TEST(ProcTask, KilledThreadsDisappear) {
- BlockingChild child1;
- EXPECT_NO_ERRNO(
- DirContains("/proc/self/task", TaskFiles({child1.Tid()}), {}));
-
- // Stat child1's task file. Regression test for b/32097707.
- struct stat statbuf;
- const std::string child1_task_file =
- absl::StrCat("/proc/self/task/", child1.Tid());
- EXPECT_THAT(stat(child1_task_file.c_str(), &statbuf), SyscallSucceeds());
-
- BlockingChild child2;
- EXPECT_NO_ERRNO(DirContains("/proc/self/task",
- TaskFiles({child1.Tid(), child2.Tid()}), {}));
-
- BlockingChild child3;
- BlockingChild child4;
- BlockingChild child5;
- EXPECT_NO_ERRNO(
- DirContains("/proc/self/task",
- TaskFiles({child1.Tid(), child2.Tid(), child3.Tid(),
- child4.Tid(), child5.Tid()}),
- {}));
-
- child2.Join();
- EXPECT_NO_ERRNO(EventuallyDirContains(
- "/proc/self/task",
- TaskFiles({child1.Tid(), child3.Tid(), child4.Tid(), child5.Tid()}),
- TaskFiles({child2.Tid()})));
-
- child1.Join();
- child4.Join();
- EXPECT_NO_ERRNO(EventuallyDirContains(
- "/proc/self/task", TaskFiles({child3.Tid(), child5.Tid()}),
- TaskFiles({child2.Tid(), child1.Tid(), child4.Tid()})));
-
- // Stat child1's task file again. This time it should fail. See b/32097707.
- EXPECT_THAT(stat(child1_task_file.c_str(), &statbuf),
- SyscallFailsWithErrno(ENOENT));
-
- child3.Join();
- child5.Join();
- EXPECT_NO_ERRNO(
- EventuallyDirContains("/proc/self/task", {},
- TaskFiles({child2.Tid(), child1.Tid(), child4.Tid(),
- child3.Tid(), child5.Tid()})));
-}
-
-TEST(ProcTask, ChildTaskDir) {
- BlockingChild child1;
- EXPECT_NO_ERRNO(
- DirContains("/proc/self/task", TaskFiles({child1.Tid()}), {}));
- EXPECT_NO_ERRNO(DirContains(absl::StrCat("/proc/", child1.Tid(), "/task"),
- TaskFiles({child1.Tid()}), {}));
-}
-
-PosixError VerifyPidDir(std::string path) {
- return DirContains(path, {"exe", "fd", "io", "maps", "ns", "stat", "status"},
- {});
-}
-
-TEST(ProcTask, VerifyTaskDir) {
- EXPECT_NO_ERRNO(VerifyPidDir("/proc/self"));
-
- EXPECT_NO_ERRNO(VerifyPidDir(absl::StrCat("/proc/self/task/", getpid())));
- BlockingChild child1;
- EXPECT_NO_ERRNO(VerifyPidDir(absl::StrCat("/proc/self/task/", child1.Tid())));
-
- // Only the first level of task directories should contain the 'task'
- // directory. That is:
- //
- // /proc/1234/task <- should exist
- // /proc/1234/task/1234/task <- should not exist
- // /proc/1234/task/1235/task <- should not exist (where 1235 is in the same
- // thread group as 1234).
- EXPECT_NO_ERRNO(
- DirContains(absl::StrCat("/proc/self/task/", getpid()), {}, {"task"}));
-}
-
-TEST(ProcTask, TaskDirCannotBeDeleted) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- EXPECT_THAT(rmdir("/proc/self/task"), SyscallFails());
- EXPECT_THAT(rmdir(absl::StrCat("/proc/self/task/", getpid()).c_str()),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(ProcTask, TaskDirHasCorrectMetadata) {
- struct stat st;
- EXPECT_THAT(stat("/proc/self/task", &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
-
- // Verify file is readable and executable by everyone.
- mode_t expected_permissions =
- S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH;
- mode_t permissions = st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
- EXPECT_EQ(expected_permissions, permissions);
-}
-
-TEST(ProcTask, TaskDirCanSeekToEnd) {
- const FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/task", O_RDONLY));
- EXPECT_THAT(lseek(dirfd.get(), 0, SEEK_END), SyscallSucceeds());
-}
-
-TEST(ProcTask, VerifyTaskDirNlinks) {
- const auto fn = [] {
- // A task directory will have 3 links if the taskgroup has a single
- // thread. For example, the following shows where the links to
- // '/proc/12345/task' comes from for a single threaded process with pid
- // 12345:
- //
- // /proc/12345/task <-- 1 link for the directory itself
- // . <-- link from "."
- // ..
- // 12345
- // .
- // .. <-- link from ".." to parent.
- // <other contents of a task dir>
- //
- // We can't assert an absolute number of links since we don't control how
- // many threads the test framework spawns. Instead, we'll ensure creating a
- // new thread increases the number of links as expected.
-
- // Once we reach the test body, we can count on the thread count being
- // stable unless we spawn a new one.
- const uint64_t initial_links =
- TEST_CHECK_NO_ERRNO_AND_VALUE(Links("/proc/self/task"));
- TEST_CHECK(initial_links >= 3);
-
- // For each new subtask, we should gain a new link.
- BlockingChild child1;
- uint64_t links = TEST_CHECK_NO_ERRNO_AND_VALUE(Links("/proc/self/task"));
- TEST_CHECK(links == initial_links + 1);
-
- BlockingChild child2;
- links = TEST_CHECK_NO_ERRNO_AND_VALUE(Links("/proc/self/task"));
- TEST_CHECK(links == initial_links + 2);
- };
- // Run as a forked process to prevent terminating tasks from other tests to
- // show up here and race with the count.
- EXPECT_THAT(InForkedProcess(fn), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(ProcTask, CommContainsThreadNameAndTrailingNewline) {
- constexpr char kThreadName[] = "TestThread12345";
- ASSERT_THAT(prctl(PR_SET_NAME, kThreadName), SyscallSucceeds());
-
- auto thread_name = ASSERT_NO_ERRNO_AND_VALUE(
- GetContents(JoinPath("/proc", absl::StrCat(getpid()), "task",
- absl::StrCat(syscall(SYS_gettid)), "comm")));
- EXPECT_EQ(absl::StrCat(kThreadName, "\n"), thread_name);
-}
-
-TEST(ProcTaskNs, NsDirExistsAndHasCorrectMetadata) {
- EXPECT_NO_ERRNO(DirContains("/proc/self/ns", {"net", "pid", "user"}, {}));
-
- // Let's just test the 'pid' entry, all of them are very similar.
- struct stat st;
- EXPECT_THAT(lstat("/proc/self/ns/pid", &st), SyscallSucceeds());
- EXPECT_TRUE(S_ISLNK(st.st_mode));
-
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/ns/pid"));
- EXPECT_THAT(link, ::testing::StartsWith("pid:["));
-}
-
-TEST(ProcTaskNs, AccessOnNsNodeSucceeds) {
- EXPECT_THAT(access("/proc/self/ns/pid", F_OK), SyscallSucceeds());
-}
-
-TEST(ProcSysKernelHostname, Exists) {
- EXPECT_THAT(open("/proc/sys/kernel/hostname", O_RDONLY), SyscallSucceeds());
-}
-
-TEST(ProcSysKernelHostname, MatchesUname) {
- struct utsname buf;
- EXPECT_THAT(uname(&buf), SyscallSucceeds());
- const std::string hostname = absl::StrCat(buf.nodename, "\n");
- auto procfs_hostname =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/hostname"));
- EXPECT_EQ(procfs_hostname, hostname);
-}
-
-TEST(ProcSysVmMaxmapCount, HasNumericValue) {
- const std::string val_str =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/vm/max_map_count"));
- int32_t val;
- EXPECT_TRUE(absl::SimpleAtoi(val_str, &val))
- << "/proc/sys/vm/max_map_count does not contain a numeric value: "
- << val_str;
-}
-
-TEST(ProcSysVmMmapMinAddr, HasNumericValue) {
- const std::string mmap_min_addr_str =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/vm/mmap_min_addr"));
- uintptr_t mmap_min_addr;
- EXPECT_TRUE(absl::SimpleAtoi(mmap_min_addr_str, &mmap_min_addr))
- << "/proc/sys/vm/mmap_min_addr does not contain a numeric value: "
- << mmap_min_addr_str;
-}
-
-TEST(ProcSysVmOvercommitMemory, HasNumericValue) {
- const std::string overcommit_memory_str =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/vm/overcommit_memory"));
- uintptr_t overcommit_memory;
- EXPECT_TRUE(absl::SimpleAtoi(overcommit_memory_str, &overcommit_memory))
- << "/proc/sys/vm/overcommit_memory does not contain a numeric value: "
- << overcommit_memory;
-}
-
-// Check that link for proc fd entries point the target node, not the
-// symlink itself. Regression test for b/31155070.
-TEST(ProcTaskFd, FstatatFollowsSymlink) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- struct stat sproc = {};
- EXPECT_THAT(
- fstatat(-1, absl::StrCat("/proc/self/fd/", fd.get()).c_str(), &sproc, 0),
- SyscallSucceeds());
-
- struct stat sfile = {};
- EXPECT_THAT(fstatat(-1, file.path().c_str(), &sfile, 0), SyscallSucceeds());
-
- // If fstatat follows the fd symlink, the device and inode numbers should
- // match at a minimum.
- EXPECT_EQ(sproc.st_dev, sfile.st_dev);
- EXPECT_EQ(sproc.st_ino, sfile.st_ino);
- EXPECT_EQ(0, memcmp(&sfile, &sproc, sizeof(sfile)));
-}
-
-TEST(ProcFilesystems, Bug65172365) {
- std::string proc_filesystems =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/filesystems"));
- ASSERT_FALSE(proc_filesystems.empty());
-}
-
-TEST(ProcFilesystems, PresenceOfShmMaxMniAll) {
- uint64_t shmmax = 0;
- uint64_t shmall = 0;
- uint64_t shmmni = 0;
- std::string proc_file;
- proc_file = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/shmmax"));
- ASSERT_FALSE(proc_file.empty());
- ASSERT_TRUE(absl::SimpleAtoi(proc_file, &shmmax));
- proc_file = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/shmall"));
- ASSERT_FALSE(proc_file.empty());
- ASSERT_TRUE(absl::SimpleAtoi(proc_file, &shmall));
- proc_file = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/shmmni"));
- ASSERT_FALSE(proc_file.empty());
- ASSERT_TRUE(absl::SimpleAtoi(proc_file, &shmmni));
-
- ASSERT_GT(shmmax, 0);
- ASSERT_GT(shmall, 0);
- ASSERT_GT(shmmni, 0);
- ASSERT_LE(shmall, shmmax);
-
- // These values should never be higher than this by default, for more
- // information see uapi/linux/shm.h
- ASSERT_LE(shmmax, ULONG_MAX - (1UL << 24));
- ASSERT_LE(shmall, ULONG_MAX - (1UL << 24));
-}
-
-TEST(ProcFilesystems, PresenceOfSem) {
- uint32_t semmsl = 0;
- uint32_t semmns = 0;
- uint32_t semopm = 0;
- uint32_t semmni = 0;
- std::string proc_file;
- proc_file = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/sem"));
- ASSERT_FALSE(proc_file.empty());
- std::vector<absl::string_view> sem_limits =
- absl::StrSplit(proc_file, absl::ByAnyChar("\t"), absl::SkipWhitespace());
- ASSERT_EQ(sem_limits.size(), 4);
- ASSERT_TRUE(absl::SimpleAtoi(sem_limits[0], &semmsl));
- ASSERT_TRUE(absl::SimpleAtoi(sem_limits[1], &semmns));
- ASSERT_TRUE(absl::SimpleAtoi(sem_limits[2], &semopm));
- ASSERT_TRUE(absl::SimpleAtoi(sem_limits[3], &semmni));
-
- ASSERT_EQ(semmsl, SEMMSL);
- ASSERT_EQ(semmns, SEMMNS);
- ASSERT_EQ(semopm, SEMOPM);
- ASSERT_EQ(semmni, SEMMNI);
-}
-
-// Check that /proc/mounts is a symlink to self/mounts.
-TEST(ProcMounts, IsSymlink) {
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/mounts"));
- EXPECT_EQ(link, "self/mounts");
-}
-
-TEST(ProcSelfMountinfo, RequiredFieldsArePresent) {
- auto mountinfo =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/mountinfo"));
- EXPECT_THAT(
- mountinfo,
- AllOf(
- // Root mount.
- ContainsRegex(
- R"([0-9]+ [0-9]+ [0-9]+:[0-9]+ /\S* / (rw|ro).*- \S+ \S+ (rw|ro)\S*)"),
- // Proc mount - always rw.
- ContainsRegex(
- R"([0-9]+ [0-9]+ [0-9]+:[0-9]+ / /proc rw.*- \S+ \S+ rw\S*)")));
-}
-
-TEST(ProcSelfMountinfo, ContainsProcfsEntry) {
- const std::vector<ProcMountInfoEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());
- bool found = false;
- for (const auto& e : entries) {
- if (e.fstype == "proc") {
- found = true;
- break;
- }
- }
- EXPECT_TRUE(found);
-}
-
-// Check that /proc/self/mounts looks something like a real mounts file.
-TEST(ProcSelfMounts, RequiredFieldsArePresent) {
- auto mounts = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/mounts"));
- EXPECT_THAT(mounts,
- AllOf(
- // Root mount.
- ContainsRegex(R"(\S+ / \S+ (rw|ro)\S* [0-9]+ [0-9]+\s)"),
- // Root mount.
- ContainsRegex(R"(\S+ /proc \S+ rw\S* [0-9]+ [0-9]+\s)")));
-}
-
-TEST(ProcSelfMounts, ContainsProcfsEntry) {
- const std::vector<ProcMountsEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());
- bool found = false;
- for (const auto& e : entries) {
- if (e.fstype == "proc") {
- found = true;
- break;
- }
- }
- EXPECT_TRUE(found);
-}
-
-void CheckDuplicatesRecursively(std::string path) {
- std::vector<std::string> child_dirs;
-
- // There is the known issue of the linux procfs, that two consequent calls of
- // readdir can return the same entry twice if between these calls one or more
- // entries have been removed from this directory.
- int max_attempts = 5;
- for (int i = 0; i < max_attempts; i++) {
- child_dirs.clear();
- errno = 0;
- bool success = true;
- DIR* dir = opendir(path.c_str());
- if (dir == nullptr) {
- // Ignore any directories we can't read or missing directories as the
- // directory could have been deleted/mutated from the time the parent
- // directory contents were read.
- return;
- }
- auto dir_closer = Cleanup([&dir]() { closedir(dir); });
- absl::node_hash_set<std::string> children;
- while (true) {
- // Readdir(3): If the end of the directory stream is reached, NULL is
- // returned and errno is not changed. If an error occurs, NULL is
- // returned and errno is set appropriately. To distinguish end of stream
- // and from an error, set errno to zero before calling readdir() and then
- // check the value of errno if NULL is returned.
- errno = 0;
- struct dirent* dp = readdir(dir);
- if (dp == nullptr) {
- // Linux will return EINVAL when calling getdents on a /proc/tid/net
- // file corresponding to a zombie task.
- // See fs/proc/proc_net.c:proc_tgid_net_readdir().
- //
- // We just ignore the directory in this case.
- if (errno == EINVAL && absl::StartsWith(path, "/proc/") &&
- absl::EndsWith(path, "/net")) {
- break;
- }
- // We may also see permission failures traversing some files.
- if (errno == EACCES && absl::StartsWith(path, "/proc/")) {
- break;
- }
-
- // Otherwise, no errors are allowed.
- ASSERT_EQ(errno, 0) << path;
- break; // We're done.
- }
-
- const std::string name = dp->d_name;
-
- if (name == "." || name == "..") {
- continue;
- }
-
- // Ignore a duplicate entry if it isn't the last attempt.
- if (i == max_attempts - 1) {
- ASSERT_EQ(children.find(name), children.end())
- << absl::StrCat(path, "/", name);
- } else if (children.find(name) != children.end()) {
- std::cerr << "Duplicate entry: " << i << ":"
- << absl::StrCat(path, "/", name) << std::endl;
- success = false;
- break;
- }
- children.insert(name);
-
- if (dp->d_type == DT_DIR) {
- child_dirs.push_back(name);
- }
- }
- if (success) {
- break;
- }
- }
- for (auto dname = child_dirs.begin(); dname != child_dirs.end(); dname++) {
- CheckDuplicatesRecursively(absl::StrCat(path, "/", *dname));
- }
-}
-
-TEST(Proc, NoDuplicates) { CheckDuplicatesRecursively("/proc"); }
-
-// Most /proc/PID files are owned by the task user with SUID_DUMP_USER.
-TEST(ProcPid, UserDumpableOwner) {
- int before;
- ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds());
- auto cleanup = Cleanup([before] {
- ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds());
- });
-
- EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_USER), SyscallSucceeds());
-
- // This applies to the task directory itself and files inside.
- struct stat st;
- ASSERT_THAT(stat("/proc/self/", &st), SyscallSucceeds());
- EXPECT_EQ(st.st_uid, geteuid());
- EXPECT_EQ(st.st_gid, getegid());
-
- ASSERT_THAT(stat("/proc/self/stat", &st), SyscallSucceeds());
- EXPECT_EQ(st.st_uid, geteuid());
- EXPECT_EQ(st.st_gid, getegid());
-}
-
-// /proc/PID files are owned by root with SUID_DUMP_DISABLE.
-TEST(ProcPid, RootDumpableOwner) {
- int before;
- ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds());
- auto cleanup = Cleanup([before] {
- ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds());
- });
-
- EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_DISABLE), SyscallSucceeds());
-
- // This *does not* applies to the task directory itself (or other 0555
- // directories), but does to files inside.
- struct stat st;
- ASSERT_THAT(stat("/proc/self/", &st), SyscallSucceeds());
- EXPECT_EQ(st.st_uid, geteuid());
- EXPECT_EQ(st.st_gid, getegid());
-
- // This file is owned by root. Also allow nobody in case this test is running
- // in a userns without root mapped.
- ASSERT_THAT(stat("/proc/self/stat", &st), SyscallSucceeds());
- EXPECT_THAT(st.st_uid, AnyOf(Eq(0), Eq(65534)));
- EXPECT_THAT(st.st_gid, AnyOf(Eq(0), Eq(65534)));
-}
-
-TEST(Proc, GetdentsEnoent) {
- FileDescriptor fd;
- ASSERT_NO_ERRNO(WithSubprocess(
- [&](int pid) -> PosixError {
- // Running.
- ASSIGN_OR_RETURN_ERRNO(fd, Open(absl::StrCat("/proc/", pid, "/task"),
- O_RDONLY | O_DIRECTORY));
-
- return NoError();
- },
- nullptr, nullptr));
- char buf[1024];
- ASSERT_THAT(syscall(SYS_getdents64, fd.get(), buf, sizeof(buf)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-void CheckSyscwFromIOFile(const std::string& path, const std::string& regex) {
- std::string output;
- ASSERT_NO_ERRNO(GetContents(path, &output));
- ASSERT_THAT(output, ContainsRegex(absl::StrCat("syscw:\\s+", regex, "\n")));
-}
-
-// Checks that there is variable accounting of IO between threads/tasks.
-TEST(Proc, PidTidIOAccounting) {
- absl::Notification notification;
-
- // Run a thread with a bunch of writes. Check that io account records exactly
- // the number of write calls. File open/close is there to prevent buffering.
- ScopedThread writer([&notification] {
- const int num_writes = 100;
- for (int i = 0; i < num_writes; i++) {
- auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_NO_ERRNO(SetContents(path.path(), "a"));
- }
- notification.Notify();
- const std::string& writer_dir =
- absl::StrCat("/proc/", getpid(), "/task/", gettid(), "/io");
-
- CheckSyscwFromIOFile(writer_dir, std::to_string(num_writes));
- });
-
- // Run a thread and do no writes. Check that no writes are recorded.
- ScopedThread noop([&notification] {
- notification.WaitForNotification();
- const std::string& noop_dir =
- absl::StrCat("/proc/", getpid(), "/task/", gettid(), "/io");
-
- CheckSyscwFromIOFile(noop_dir, "0");
- });
-
- writer.Join();
- noop.Join();
-}
-
-TEST(Proc, Statfs) {
- struct statfs st;
- EXPECT_THAT(statfs("/proc", &st), SyscallSucceeds());
- if (IsRunningWithVFS1()) {
- EXPECT_EQ(st.f_type, ANON_INODE_FS_MAGIC);
- } else {
- EXPECT_EQ(st.f_type, PROC_SUPER_MAGIC);
- }
- EXPECT_EQ(st.f_bsize, getpagesize());
- EXPECT_EQ(st.f_namelen, NAME_MAX);
-}
-
-// Tests that /proc/[pid]/fd/[num] can resolve to a path inside /proc.
-TEST(Proc, ResolveSymlinkToProc) {
- const auto proc = ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/cmdline", 0));
- const auto path = JoinPath("/proc/self/fd/", absl::StrCat(proc.get()));
- const auto target = ASSERT_NO_ERRNO_AND_VALUE(ReadLink(path));
- EXPECT_EQ(target, JoinPath("/proc/", absl::StrCat(getpid()), "/cmdline"));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- for (int i = 0; i < argc; ++i) {
- gvisor::testing::saved_argv.emplace_back(std::string(argv[i]));
- }
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/proc_net.cc b/test/syscalls/linux/proc_net.cc
deleted file mode 100644
index 3b8a71ab4..000000000
--- a/test/syscalls/linux/proc_net.cc
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <errno.h>
-#include <netinet/in.h>
-#include <poll.h>
-#include <sys/socket.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-constexpr const char kProcNet[] = "/proc/net";
-constexpr const char kIpForward[] = "/proc/sys/net/ipv4/ip_forward";
-constexpr const char kRangeFile[] = "/proc/sys/net/ipv4/ip_local_port_range";
-
-TEST(ProcNetSymlinkTarget, FileMode) {
- struct stat s;
- ASSERT_THAT(stat(kProcNet, &s), SyscallSucceeds());
- EXPECT_EQ(s.st_mode & S_IFMT, S_IFDIR);
- EXPECT_EQ(s.st_mode & 0777, 0555);
-}
-
-TEST(ProcNetSymlink, FileMode) {
- struct stat s;
- ASSERT_THAT(lstat(kProcNet, &s), SyscallSucceeds());
- EXPECT_EQ(s.st_mode & S_IFMT, S_IFLNK);
- EXPECT_EQ(s.st_mode & 0777, 0777);
-}
-
-TEST(ProcNetSymlink, Contents) {
- char buf[40] = {};
- int n = readlink(kProcNet, buf, sizeof(buf));
- ASSERT_THAT(n, SyscallSucceeds());
-
- buf[n] = 0;
- EXPECT_STREQ(buf, "self/net");
-}
-
-TEST(ProcNetIfInet6, Format) {
- auto ifinet6 = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/if_inet6"));
- EXPECT_THAT(ifinet6,
- ::testing::MatchesRegex(
- // Ex: "00000000000000000000000000000001 01 80 10 80 lo\n"
- "^([a-f0-9]{32}( [a-f0-9]{2}){4} +[a-z][a-z0-9]*\n)+$"));
-}
-
-TEST(ProcSysNetIpv4Sack, Exists) {
- EXPECT_THAT(open("/proc/sys/net/ipv4/tcp_sack", O_RDONLY), SyscallSucceeds());
-}
-
-TEST(ProcSysNetIpv4Sack, CanReadAndWrite) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- auto const fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/sys/net/ipv4/tcp_sack", O_RDWR));
-
- char buf;
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_TRUE(buf == '0' || buf == '1') << "unexpected tcp_sack: " << buf;
-
- char to_write = (buf == '1') ? '0' : '1';
- EXPECT_THAT(PwriteFd(fd.get(), &to_write, sizeof(to_write), 0),
- SyscallSucceedsWithValue(sizeof(to_write)));
-
- buf = 0;
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
- EXPECT_EQ(buf, to_write);
-}
-
-// DeviceEntry is an entry in /proc/net/dev
-struct DeviceEntry {
- std::string name;
- uint64_t stats[16];
-};
-
-PosixErrorOr<std::vector<DeviceEntry>> GetDeviceMetricsFromProc(
- const std::string dev) {
- std::vector<std::string> lines = absl::StrSplit(dev, '\n');
- std::vector<DeviceEntry> entries;
-
- // /proc/net/dev prints 2 lines of headers followed by a line of metrics for
- // each network interface.
- for (unsigned i = 2; i < lines.size(); i++) {
- // Ignore empty lines.
- if (lines[i].empty()) {
- continue;
- }
-
- std::vector<std::string> values =
- absl::StrSplit(lines[i], ' ', absl::SkipWhitespace());
-
- // Interface name + 16 values.
- if (values.size() != 17) {
- return PosixError(EINVAL, "invalid line: " + lines[i]);
- }
-
- DeviceEntry entry;
- entry.name = values[0];
- // Skip the interface name and read only the values.
- for (unsigned j = 1; j < 17; j++) {
- uint64_t num;
- if (!absl::SimpleAtoi(values[j], &num)) {
- return PosixError(EINVAL, "invalid value: " + values[j]);
- }
- entry.stats[j - 1] = num;
- }
-
- entries.push_back(entry);
- }
-
- return entries;
-}
-
-// TEST(ProcNetDev, Format) tests that /proc/net/dev is parsable and
-// contains at least one entry.
-TEST(ProcNetDev, Format) {
- auto dev = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/dev"));
- auto entries = ASSERT_NO_ERRNO_AND_VALUE(GetDeviceMetricsFromProc(dev));
-
- EXPECT_GT(entries.size(), 0);
-}
-
-PosixErrorOr<uint64_t> GetSNMPMetricFromProc(const std::string snmp,
- const std::string& type,
- const std::string& item) {
- std::vector<std::string> snmp_vec = absl::StrSplit(snmp, '\n');
-
- // /proc/net/snmp prints a line of headers followed by a line of metrics.
- // Only search the headers.
- for (unsigned i = 0; i < snmp_vec.size(); i = i + 2) {
- if (!absl::StartsWith(snmp_vec[i], type)) continue;
-
- std::vector<std::string> fields =
- absl::StrSplit(snmp_vec[i], ' ', absl::SkipWhitespace());
-
- EXPECT_TRUE((i + 1) < snmp_vec.size());
- std::vector<std::string> values =
- absl::StrSplit(snmp_vec[i + 1], ' ', absl::SkipWhitespace());
-
- EXPECT_TRUE(!fields.empty() && fields.size() == values.size());
-
- // Metrics start at the first index.
- for (unsigned j = 1; j < fields.size(); j++) {
- if (fields[j] == item) {
- uint64_t val;
- if (!absl::SimpleAtoi(values[j], &val)) {
- return PosixError(EINVAL,
- absl::StrCat("field is not a number: ", values[j]));
- }
-
- return val;
- }
- }
- }
- // We should never get here.
- return PosixError(
- EINVAL, absl::StrCat("failed to find ", type, "/", item, " in:", snmp));
-}
-
-TEST(ProcNetSnmp, TcpReset) {
- // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.
- DisableSave ds;
-
- uint64_t oldAttemptFails;
- uint64_t oldActiveOpens;
- uint64_t oldOutRsts;
- auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- oldActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "ActiveOpens"));
- oldOutRsts =
- ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, "Tcp", "OutRsts"));
- oldAttemptFails = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "AttemptFails"));
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));
-
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(1234),
- };
-
- ASSERT_EQ(inet_pton(AF_INET, "127.0.0.1", &(sin.sin_addr)), 1);
- ASSERT_THAT(connect(s.get(), (struct sockaddr*)&sin, sizeof(sin)),
- SyscallFailsWithErrno(ECONNREFUSED));
-
- uint64_t newAttemptFails;
- uint64_t newActiveOpens;
- uint64_t newOutRsts;
- snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- newActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "ActiveOpens"));
- newOutRsts =
- ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, "Tcp", "OutRsts"));
- newAttemptFails = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "AttemptFails"));
-
- EXPECT_EQ(oldActiveOpens, newActiveOpens - 1);
- EXPECT_EQ(oldOutRsts, newOutRsts - 1);
- EXPECT_EQ(oldAttemptFails, newAttemptFails - 1);
-}
-
-TEST(ProcNetSnmp, TcpEstab) {
- // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.
- DisableSave ds;
-
- uint64_t oldEstabResets;
- uint64_t oldActiveOpens;
- uint64_t oldPassiveOpens;
- uint64_t oldCurrEstab;
- auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- oldActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "ActiveOpens"));
- oldPassiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "PassiveOpens"));
- oldCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "CurrEstab"));
- oldEstabResets = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "EstabResets"));
-
- FileDescriptor s_listen =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = 0,
- };
-
- ASSERT_EQ(inet_pton(AF_INET, "127.0.0.1", &(sin.sin_addr)), 1);
- ASSERT_THAT(bind(s_listen.get(), (struct sockaddr*)&sin, sizeof(sin)),
- SyscallSucceeds());
- ASSERT_THAT(listen(s_listen.get(), 1), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = sizeof(sin);
- ASSERT_THAT(getsockname(s_listen.get(), AsSockAddr(&sin), &addrlen),
- SyscallSucceeds());
-
- FileDescriptor s_connect =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, 0));
- ASSERT_THAT(connect(s_connect.get(), (struct sockaddr*)&sin, sizeof(sin)),
- SyscallSucceeds());
-
- auto s_accept =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(s_listen.get(), nullptr, nullptr));
-
- uint64_t newEstabResets;
- uint64_t newActiveOpens;
- uint64_t newPassiveOpens;
- uint64_t newCurrEstab;
- snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- newActiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "ActiveOpens"));
- newPassiveOpens = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "PassiveOpens"));
- newCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "CurrEstab"));
-
- EXPECT_EQ(oldActiveOpens, newActiveOpens - 1);
- EXPECT_EQ(oldPassiveOpens, newPassiveOpens - 1);
- EXPECT_EQ(oldCurrEstab, newCurrEstab - 2);
-
- // Send 1 byte from client to server.
- ASSERT_THAT(send(s_connect.get(), "a", 1, 0), SyscallSucceedsWithValue(1));
-
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
-
- // Wait until server-side fd sees the data on its side but don't read it.
- struct pollfd poll_fd = {s_accept.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now close server-side fd without reading the data which leads to a RST
- // packet sent to client side.
- s_accept.reset(-1);
-
- // Wait until client-side fd sees RST packet.
- struct pollfd poll_fd1 = {s_connect.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd1, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now close client-side fd.
- s_connect.reset(-1);
-
- // Wait until the process of the netstack.
- absl::SleepFor(absl::Seconds(1));
-
- snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- newCurrEstab = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "CurrEstab"));
- newEstabResets = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Tcp", "EstabResets"));
-
- EXPECT_EQ(oldCurrEstab, newCurrEstab);
- EXPECT_EQ(oldEstabResets, newEstabResets - 2);
-}
-
-TEST(ProcNetSnmp, UdpNoPorts) {
- // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.
- DisableSave ds;
-
- uint64_t oldOutDatagrams;
- uint64_t oldNoPorts;
- auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- oldOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "OutDatagrams"));
- oldNoPorts =
- ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, "Udp", "NoPorts"));
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(4444),
- };
- ASSERT_EQ(inet_pton(AF_INET, "127.0.0.1", &(sin.sin_addr)), 1);
- ASSERT_THAT(sendto(s.get(), "a", 1, 0, (struct sockaddr*)&sin, sizeof(sin)),
- SyscallSucceedsWithValue(1));
-
- uint64_t newOutDatagrams;
- uint64_t newNoPorts;
- snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- newOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "OutDatagrams"));
- newNoPorts =
- ASSERT_NO_ERRNO_AND_VALUE(GetSNMPMetricFromProc(snmp, "Udp", "NoPorts"));
-
- EXPECT_EQ(oldOutDatagrams, newOutDatagrams - 1);
- EXPECT_EQ(oldNoPorts, newNoPorts - 1);
-}
-
-TEST(ProcNetSnmp, UdpIn) {
- // TODO(gvisor.dev/issue/866): epsocket metrics are not savable.
- const DisableSave ds;
-
- uint64_t oldOutDatagrams;
- uint64_t oldInDatagrams;
- auto snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- oldOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "OutDatagrams"));
- oldInDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "InDatagrams"));
-
- std::cerr << "snmp: " << std::endl << snmp << std::endl;
- FileDescriptor server =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(0),
- };
- ASSERT_EQ(inet_pton(AF_INET, "127.0.0.1", &(sin.sin_addr)), 1);
- ASSERT_THAT(bind(server.get(), (struct sockaddr*)&sin, sizeof(sin)),
- SyscallSucceeds());
- // Get the port bound by the server socket.
- socklen_t addrlen = sizeof(sin);
- ASSERT_THAT(getsockname(server.get(), AsSockAddr(&sin), &addrlen),
- SyscallSucceeds());
-
- FileDescriptor client =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- ASSERT_THAT(
- sendto(client.get(), "a", 1, 0, (struct sockaddr*)&sin, sizeof(sin)),
- SyscallSucceedsWithValue(1));
-
- char buf[128];
- ASSERT_THAT(recvfrom(server.get(), buf, sizeof(buf), 0, NULL, NULL),
- SyscallSucceedsWithValue(1));
-
- uint64_t newOutDatagrams;
- uint64_t newInDatagrams;
- snmp = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
- std::cerr << "new snmp: " << std::endl << snmp << std::endl;
- newOutDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "OutDatagrams"));
- newInDatagrams = ASSERT_NO_ERRNO_AND_VALUE(
- GetSNMPMetricFromProc(snmp, "Udp", "InDatagrams"));
-
- EXPECT_EQ(oldOutDatagrams, newOutDatagrams - 1);
- EXPECT_EQ(oldInDatagrams, newInDatagrams - 1);
-}
-
-TEST(ProcNetSnmp, CheckNetStat) {
- // TODO(b/155123175): SNMP and netstat don't work on gVisor.
- SKIP_IF(IsRunningOnGvisor());
-
- std::string contents =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/netstat"));
-
- int name_count = 0;
- int value_count = 0;
- std::vector<absl::string_view> lines = absl::StrSplit(contents, '\n');
- for (size_t i = 0; i + 1 < lines.size(); i += 2) {
- std::vector<absl::string_view> names =
- absl::StrSplit(lines[i], absl::ByAnyChar("\t "));
- std::vector<absl::string_view> values =
- absl::StrSplit(lines[i + 1], absl::ByAnyChar("\t "));
- EXPECT_EQ(names.size(), values.size()) << " mismatch in lines '" << lines[i]
- << "' and '" << lines[i + 1] << "'";
- for (size_t j = 0; j < names.size() && j < values.size(); ++j) {
- if (names[j] == "TCPOrigDataSent" || names[j] == "TCPSynRetrans" ||
- names[j] == "TCPDSACKRecv" || names[j] == "TCPDSACKOfoRecv") {
- ++name_count;
- int64_t val;
- if (absl::SimpleAtoi(values[j], &val)) {
- ++value_count;
- }
- }
- }
- }
- EXPECT_EQ(name_count, 4);
- EXPECT_EQ(value_count, 4);
-}
-
-TEST(ProcNetSnmp, Stat) {
- struct stat st = {};
- ASSERT_THAT(stat("/proc/net/snmp", &st), SyscallSucceeds());
-}
-
-TEST(ProcNetSnmp, CheckSnmp) {
- // TODO(b/155123175): SNMP and netstat don't work on gVisor.
- SKIP_IF(IsRunningOnGvisor());
-
- std::string contents =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/snmp"));
-
- int name_count = 0;
- int value_count = 0;
- std::vector<absl::string_view> lines = absl::StrSplit(contents, '\n');
- for (size_t i = 0; i + 1 < lines.size(); i += 2) {
- std::vector<absl::string_view> names =
- absl::StrSplit(lines[i], absl::ByAnyChar("\t "));
- std::vector<absl::string_view> values =
- absl::StrSplit(lines[i + 1], absl::ByAnyChar("\t "));
- EXPECT_EQ(names.size(), values.size()) << " mismatch in lines '" << lines[i]
- << "' and '" << lines[i + 1] << "'";
- for (size_t j = 0; j < names.size() && j < values.size(); ++j) {
- if (names[j] == "RetransSegs") {
- ++name_count;
- int64_t val;
- if (absl::SimpleAtoi(values[j], &val)) {
- ++value_count;
- }
- }
- }
- }
- EXPECT_EQ(name_count, 1);
- EXPECT_EQ(value_count, 1);
-}
-
-TEST(ProcSysNetIpv4Recovery, Exists) {
- EXPECT_THAT(open("/proc/sys/net/ipv4/tcp_recovery", O_RDONLY),
- SyscallSucceeds());
-}
-
-TEST(ProcSysNetIpv4Recovery, CanReadAndWrite) {
- // TODO(b/162988252): Enable save/restore for this test after the bug is
- // fixed.
- DisableSave ds;
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open("/proc/sys/net/ipv4/tcp_recovery", O_RDWR));
-
- char buf[10] = {'\0'};
- char to_write = '2';
-
- // Check initial value is set to 1.
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(to_write) + 1));
- EXPECT_EQ(strcmp(buf, "1\n"), 0);
-
- // Set tcp_recovery to one of the allowed constants.
- EXPECT_THAT(PwriteFd(fd.get(), &to_write, sizeof(to_write), 0),
- SyscallSucceedsWithValue(sizeof(to_write)));
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(to_write) + 1));
- EXPECT_EQ(strcmp(buf, "2\n"), 0);
-
- // Set tcp_recovery to any random value.
- char kMessage[] = "100";
- EXPECT_THAT(PwriteFd(fd.get(), kMessage, strlen(kMessage), 0),
- SyscallSucceedsWithValue(strlen(kMessage)));
- EXPECT_THAT(PreadFd(fd.get(), buf, sizeof(kMessage), 0),
- SyscallSucceedsWithValue(sizeof(kMessage)));
- EXPECT_EQ(strcmp(buf, "100\n"), 0);
-}
-
-TEST(ProcSysNetIpv4IpForward, Exists) {
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kIpForward, O_RDONLY));
-}
-
-TEST(ProcSysNetIpv4IpForward, DefaultValueEqZero) {
- // Test is only valid in sandbox. Not hermetic in native tests
- // running on a arbitrary machine.
- SKIP_IF(!IsRunningOnGvisor());
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kIpForward, O_RDONLY));
-
- char buf = 101;
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_EQ(buf, '0') << "unexpected ip_forward: " << buf;
-}
-
-TEST(ProcSysNetIpv4IpForward, CanReadAndWrite) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE))));
-
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kIpForward, O_RDWR));
-
- char buf;
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_TRUE(buf == '0' || buf == '1') << "unexpected ip_forward: " << buf;
-
- // constexpr char to_write = '1';
- char to_write = (buf == '1') ? '0' : '1';
- EXPECT_THAT(PwriteFd(fd.get(), &to_write, sizeof(to_write), 0),
- SyscallSucceedsWithValue(sizeof(to_write)));
-
- buf = 0;
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
- EXPECT_EQ(buf, to_write);
-}
-
-TEST(ProcSysNetPortRange, CanReadAndWrite) {
- int min;
- int max;
- std::string rangefile = ASSERT_NO_ERRNO_AND_VALUE(GetContents(kRangeFile));
- ASSERT_EQ(rangefile.back(), '\n');
- rangefile.pop_back();
- std::vector<std::string> range =
- absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
- ASSERT_GT(range.size(), 1);
- ASSERT_TRUE(absl::SimpleAtoi(range.front(), &min));
- ASSERT_TRUE(absl::SimpleAtoi(range.back(), &max));
- EXPECT_LE(min, max);
-
- // If the file isn't writable, there's nothing else to do here.
- if (access(kRangeFile, W_OK)) {
- return;
- }
-
- constexpr int kSize = 77;
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(kRangeFile, O_WRONLY | O_TRUNC, 0));
- max = min + kSize;
- const std::string small_range = absl::StrFormat("%d %d", min, max);
- ASSERT_THAT(write(fd.get(), small_range.c_str(), small_range.size()),
- SyscallSucceedsWithValue(small_range.size()));
-
- rangefile = ASSERT_NO_ERRNO_AND_VALUE(GetContents(kRangeFile));
- ASSERT_EQ(rangefile.back(), '\n');
- rangefile.pop_back();
- range = absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
- ASSERT_GT(range.size(), 1);
- ASSERT_TRUE(absl::SimpleAtoi(range.front(), &min));
- ASSERT_TRUE(absl::SimpleAtoi(range.back(), &max));
- EXPECT_EQ(min + kSize, max);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_net_tcp.cc b/test/syscalls/linux/proc_net_tcp.cc
deleted file mode 100644
index 5b6e3e3cd..000000000
--- a/test/syscalls/linux/proc_net_tcp.cc
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_join.h"
-#include "absl/strings/str_split.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using absl::StrCat;
-using absl::StrSplit;
-
-constexpr char kProcNetTCPHeader[] =
- " sl local_address rem_address st tx_queue rx_queue tr tm->when "
- "retrnsmt uid timeout inode "
- " ";
-
-// TCPEntry represents a single entry from /proc/net/tcp.
-struct TCPEntry {
- uint32_t local_addr;
- uint16_t local_port;
-
- uint32_t remote_addr;
- uint16_t remote_port;
-
- uint64_t state;
- uint64_t uid;
- uint64_t inode;
-};
-
-// Finds the first entry in 'entries' for which 'predicate' returns true.
-// Returns true on match, and sets 'match' to a copy of the matching entry. If
-// 'match' is null, it's ignored.
-bool FindBy(const std::vector<TCPEntry>& entries, TCPEntry* match,
- std::function<bool(const TCPEntry&)> predicate) {
- for (const TCPEntry& entry : entries) {
- if (predicate(entry)) {
- if (match != nullptr) {
- *match = entry;
- }
- return true;
- }
- }
- return false;
-}
-
-bool FindByLocalAddr(const std::vector<TCPEntry>& entries, TCPEntry* match,
- const struct sockaddr* addr) {
- uint32_t host = IPFromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy(entries, match, [host, port](const TCPEntry& e) {
- return (e.local_addr == host && e.local_port == port);
- });
-}
-
-bool FindByRemoteAddr(const std::vector<TCPEntry>& entries, TCPEntry* match,
- const struct sockaddr* addr) {
- uint32_t host = IPFromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy(entries, match, [host, port](const TCPEntry& e) {
- return (e.remote_addr == host && e.remote_port == port);
- });
-}
-
-// Returns a parsed representation of /proc/net/tcp entries.
-PosixErrorOr<std::vector<TCPEntry>> ProcNetTCPEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/net/tcp", &content));
-
- bool found_header = false;
- std::vector<TCPEntry> entries;
- std::vector<std::string> lines = StrSplit(content, '\n');
- std::cerr << "<contents of /proc/net/tcp>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
-
- if (!found_header) {
- EXPECT_EQ(line, kProcNetTCPHeader);
- found_header = true;
- continue;
- }
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/net/tcp.
- //
- // Example entries:
- //
- // clang-format off
- //
- // sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- // 0: 00000000:006F 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 1968 1 0000000000000000 100 0 0 10 0
- // 1: 0100007F:7533 00000000:0000 0A 00000000:00000000 00:00000000 00000000 120 0 10684 1 0000000000000000 100 0 0 10 0
- // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
- //
- // clang-format on
-
- TCPEntry entry;
- std::vector<std::string> fields =
- StrSplit(line, absl::ByAnyChar(": "), absl::SkipEmpty());
-
- ASSIGN_OR_RETURN_ERRNO(entry.local_addr, AtoiBase(fields[1], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.local_port, AtoiBase(fields[2], 16));
-
- ASSIGN_OR_RETURN_ERRNO(entry.remote_addr, AtoiBase(fields[3], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16));
-
- ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi<uint64_t>(fields[11]));
- ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi<uint64_t>(fields[13]));
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/net/tcp>" << std::endl;
-
- return entries;
-}
-
-TEST(ProcNetTCP, Exists) {
- const std::string content =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/tcp"));
- const std::string header_line = StrCat(kProcNetTCPHeader, "\n");
- if (IsRunningOnGvisor()) {
- // Should be just the header since we don't have any tcp sockets yet.
- EXPECT_EQ(content, header_line);
- } else {
- // On a general linux machine, we could have abitrary sockets on the system,
- // so just check the header.
- EXPECT_THAT(content, ::testing::StartsWith(header_line));
- }
-}
-
-TEST(ProcNetTCP, EntryUID) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());
- std::vector<TCPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
- TCPEntry e;
- ASSERT_TRUE(FindByLocalAddr(entries, &e, sockets->first_addr()));
- EXPECT_EQ(e.uid, geteuid());
- ASSERT_TRUE(FindByRemoteAddr(entries, &e, sockets->first_addr()));
- EXPECT_EQ(e.uid, geteuid());
-}
-
-TEST(ProcNetTCP, BindAcceptConnect) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());
- std::vector<TCPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
- // We can only make assertions about the total number of entries if we control
- // the entire "machine".
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(entries.size(), 2);
- }
-
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->first_addr()));
- EXPECT_TRUE(FindByRemoteAddr(entries, nullptr, sockets->first_addr()));
-}
-
-TEST(ProcNetTCP, InodeReasonable) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());
- std::vector<TCPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
-
- TCPEntry accepted_entry;
- ASSERT_TRUE(FindByLocalAddr(entries, &accepted_entry, sockets->first_addr()));
- EXPECT_NE(accepted_entry.inode, 0);
-
- TCPEntry client_entry;
- ASSERT_TRUE(FindByRemoteAddr(entries, &client_entry, sockets->first_addr()));
- EXPECT_NE(client_entry.inode, 0);
- EXPECT_NE(accepted_entry.inode, client_entry.inode);
-}
-
-TEST(ProcNetTCP, State) {
- std::unique_ptr<FileDescriptor> server =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPUnboundSocket(0).Create());
-
- auto test_addr = V4Loopback();
- ASSERT_THAT(
- bind(server->get(), reinterpret_cast<struct sockaddr*>(&test_addr.addr),
- test_addr.addr_len),
- SyscallSucceeds());
-
- struct sockaddr addr;
- socklen_t addrlen = sizeof(struct sockaddr);
- ASSERT_THAT(getsockname(server->get(), &addr, &addrlen), SyscallSucceeds());
- ASSERT_EQ(addrlen, sizeof(struct sockaddr));
-
- ASSERT_THAT(listen(server->get(), 10), SyscallSucceeds());
- std::vector<TCPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
- TCPEntry listen_entry;
- ASSERT_TRUE(FindByLocalAddr(entries, &listen_entry, &addr));
- EXPECT_EQ(listen_entry.state, TCP_LISTEN);
-
- std::unique_ptr<FileDescriptor> client =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPUnboundSocket(0).Create());
- ASSERT_THAT(RetryEINTR(connect)(client->get(), &addr, addrlen),
- SyscallSucceeds());
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
- ASSERT_TRUE(FindByLocalAddr(entries, &listen_entry, &addr));
- EXPECT_EQ(listen_entry.state, TCP_LISTEN);
- TCPEntry client_entry;
- ASSERT_TRUE(FindByRemoteAddr(entries, &client_entry, &addr));
- EXPECT_EQ(client_entry.state, TCP_ESTABLISHED);
-
- FileDescriptor accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(server->get(), nullptr, nullptr));
-
- const uint32_t accepted_local_host = IPFromInetSockaddr(&addr);
- const uint16_t accepted_local_port = PortFromInetSockaddr(&addr);
-
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());
- TCPEntry accepted_entry;
- ASSERT_TRUE(FindBy(entries, &accepted_entry,
- [client_entry, accepted_local_host,
- accepted_local_port](const TCPEntry& e) {
- return e.local_addr == accepted_local_host &&
- e.local_port == accepted_local_port &&
- e.remote_addr == client_entry.local_addr &&
- e.remote_port == client_entry.local_port;
- }));
- EXPECT_EQ(accepted_entry.state, TCP_ESTABLISHED);
-}
-
-constexpr char kProcNetTCP6Header[] =
- " sl local_address remote_address"
- " st tx_queue rx_queue tr tm->when retrnsmt"
- " uid timeout inode";
-
-// TCP6Entry represents a single entry from /proc/net/tcp6.
-struct TCP6Entry {
- struct in6_addr local_addr;
- uint16_t local_port;
-
- struct in6_addr remote_addr;
- uint16_t remote_port;
-
- uint64_t state;
- uint64_t uid;
- uint64_t inode;
-};
-
-bool IPv6AddrEqual(const struct in6_addr* a1, const struct in6_addr* a2) {
- return memcmp(a1, a2, sizeof(struct in6_addr)) == 0;
-}
-
-// Finds the first entry in 'entries' for which 'predicate' returns true.
-// Returns true on match, and sets 'match' to a copy of the matching entry. If
-// 'match' is null, it's ignored.
-bool FindBy6(const std::vector<TCP6Entry>& entries, TCP6Entry* match,
- std::function<bool(const TCP6Entry&)> predicate) {
- for (const TCP6Entry& entry : entries) {
- if (predicate(entry)) {
- if (match != nullptr) {
- *match = entry;
- }
- return true;
- }
- }
- return false;
-}
-
-const struct in6_addr* IP6FromInetSockaddr(const struct sockaddr* addr) {
- auto* addr6 = reinterpret_cast<const struct sockaddr_in6*>(addr);
- return &addr6->sin6_addr;
-}
-
-bool FindByLocalAddr6(const std::vector<TCP6Entry>& entries, TCP6Entry* match,
- const struct sockaddr* addr) {
- const struct in6_addr* local = IP6FromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy6(entries, match, [local, port](const TCP6Entry& e) {
- return (IPv6AddrEqual(&e.local_addr, local) && e.local_port == port);
- });
-}
-
-bool FindByRemoteAddr6(const std::vector<TCP6Entry>& entries, TCP6Entry* match,
- const struct sockaddr* addr) {
- const struct in6_addr* remote = IP6FromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy6(entries, match, [remote, port](const TCP6Entry& e) {
- return (IPv6AddrEqual(&e.remote_addr, remote) && e.remote_port == port);
- });
-}
-
-void ReadIPv6Address(std::string s, struct in6_addr* addr) {
- uint32_t a0, a1, a2, a3;
- const char* fmt = "%08X%08X%08X%08X";
- EXPECT_EQ(sscanf(s.c_str(), fmt, &a0, &a1, &a2, &a3), 4);
-
- uint8_t* b = addr->s6_addr;
- *((uint32_t*)&b[0]) = a0;
- *((uint32_t*)&b[4]) = a1;
- *((uint32_t*)&b[8]) = a2;
- *((uint32_t*)&b[12]) = a3;
-}
-
-// Returns a parsed representation of /proc/net/tcp6 entries.
-PosixErrorOr<std::vector<TCP6Entry>> ProcNetTCP6Entries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/net/tcp6", &content));
-
- bool found_header = false;
- std::vector<TCP6Entry> entries;
- std::vector<std::string> lines = StrSplit(content, '\n');
- std::cerr << "<contents of /proc/net/tcp6>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
-
- if (!found_header) {
- EXPECT_EQ(line, kProcNetTCP6Header);
- found_header = true;
- continue;
- }
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/net/tcp6.
- //
- // Example entries:
- //
- // clang-format off
- //
- // sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- // 0: 00000000000000000000000000000000:1F90 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 876340 1 ffff8803da9c9380 100 0 0 10 0
- // 1: 00000000000000000000000000000000:C350 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 876987 1 ffff8803ec408000 100 0 0 10 0
- // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
- //
- // clang-format on
-
- TCP6Entry entry;
- std::vector<std::string> fields =
- StrSplit(line, absl::ByAnyChar(": "), absl::SkipEmpty());
-
- ReadIPv6Address(fields[1], &entry.local_addr);
- ASSIGN_OR_RETURN_ERRNO(entry.local_port, AtoiBase(fields[2], 16));
- ReadIPv6Address(fields[3], &entry.remote_addr);
- ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi<uint64_t>(fields[11]));
- ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi<uint64_t>(fields[13]));
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/net/tcp6>" << std::endl;
-
- return entries;
-}
-
-TEST(ProcNetTCP6, Exists) {
- const std::string content =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/tcp6"));
- const std::string header_line = StrCat(kProcNetTCP6Header, "\n");
- if (IsRunningOnGvisor()) {
- // Should be just the header since we don't have any tcp sockets yet.
- EXPECT_EQ(content, header_line);
- } else {
- // On a general linux machine, we could have abitrary sockets on the system,
- // so just check the header.
- EXPECT_THAT(content, ::testing::StartsWith(header_line));
- }
-}
-
-TEST(ProcNetTCP6, EntryUID) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv6TCPAcceptBindSocketPair(0).Create());
- std::vector<TCP6Entry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
- TCP6Entry e;
-
- ASSERT_TRUE(FindByLocalAddr6(entries, &e, sockets->first_addr()));
- EXPECT_EQ(e.uid, geteuid());
- ASSERT_TRUE(FindByRemoteAddr6(entries, &e, sockets->first_addr()));
- EXPECT_EQ(e.uid, geteuid());
-}
-
-TEST(ProcNetTCP6, BindAcceptConnect) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv6TCPAcceptBindSocketPair(0).Create());
- std::vector<TCP6Entry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
- // We can only make assertions about the total number of entries if we control
- // the entire "machine".
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(entries.size(), 2);
- }
-
- EXPECT_TRUE(FindByLocalAddr6(entries, nullptr, sockets->first_addr()));
- EXPECT_TRUE(FindByRemoteAddr6(entries, nullptr, sockets->first_addr()));
-}
-
-TEST(ProcNetTCP6, InodeReasonable) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv6TCPAcceptBindSocketPair(0).Create());
- std::vector<TCP6Entry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
-
- TCP6Entry accepted_entry;
-
- ASSERT_TRUE(
- FindByLocalAddr6(entries, &accepted_entry, sockets->first_addr()));
- EXPECT_NE(accepted_entry.inode, 0);
-
- TCP6Entry client_entry;
- ASSERT_TRUE(FindByRemoteAddr6(entries, &client_entry, sockets->first_addr()));
- EXPECT_NE(client_entry.inode, 0);
- EXPECT_NE(accepted_entry.inode, client_entry.inode);
-}
-
-TEST(ProcNetTCP6, State) {
- std::unique_ptr<FileDescriptor> server =
- ASSERT_NO_ERRNO_AND_VALUE(IPv6TCPUnboundSocket(0).Create());
-
- auto test_addr = V6Loopback();
- ASSERT_THAT(
- bind(server->get(), reinterpret_cast<struct sockaddr*>(&test_addr.addr),
- test_addr.addr_len),
- SyscallSucceeds());
-
- struct sockaddr_in6 addr6;
- socklen_t addrlen = sizeof(struct sockaddr_in6);
- auto* addr = reinterpret_cast<struct sockaddr*>(&addr6);
- ASSERT_THAT(getsockname(server->get(), addr, &addrlen), SyscallSucceeds());
- ASSERT_EQ(addrlen, sizeof(struct sockaddr_in6));
-
- ASSERT_THAT(listen(server->get(), 10), SyscallSucceeds());
- std::vector<TCP6Entry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
- TCP6Entry listen_entry;
-
- ASSERT_TRUE(FindByLocalAddr6(entries, &listen_entry, addr));
- EXPECT_EQ(listen_entry.state, TCP_LISTEN);
-
- std::unique_ptr<FileDescriptor> client =
- ASSERT_NO_ERRNO_AND_VALUE(IPv6TCPUnboundSocket(0).Create());
- ASSERT_THAT(RetryEINTR(connect)(client->get(), addr, addrlen),
- SyscallSucceeds());
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
- ASSERT_TRUE(FindByLocalAddr6(entries, &listen_entry, addr));
- EXPECT_EQ(listen_entry.state, TCP_LISTEN);
- TCP6Entry client_entry;
- ASSERT_TRUE(FindByRemoteAddr6(entries, &client_entry, addr));
- EXPECT_EQ(client_entry.state, TCP_ESTABLISHED);
-
- FileDescriptor accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(server->get(), nullptr, nullptr));
-
- const struct in6_addr* local = IP6FromInetSockaddr(addr);
- const uint16_t accepted_local_port = PortFromInetSockaddr(addr);
-
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries());
- TCP6Entry accepted_entry;
- ASSERT_TRUE(FindBy6(
- entries, &accepted_entry,
- [client_entry, local, accepted_local_port](const TCP6Entry& e) {
- return IPv6AddrEqual(&e.local_addr, local) &&
- e.local_port == accepted_local_port &&
- IPv6AddrEqual(&e.remote_addr, &client_entry.local_addr) &&
- e.remote_port == client_entry.local_port;
- }));
- EXPECT_EQ(accepted_entry.state, TCP_ESTABLISHED);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_net_udp.cc b/test/syscalls/linux/proc_net_udp.cc
deleted file mode 100644
index 786b4b4af..000000000
--- a/test/syscalls/linux/proc_net_udp.cc
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_join.h"
-#include "absl/strings/str_split.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using absl::StrCat;
-using absl::StrFormat;
-using absl::StrSplit;
-
-constexpr char kProcNetUDPHeader[] =
- " sl local_address rem_address st tx_queue rx_queue tr tm->when "
- "retrnsmt uid timeout inode ref pointer drops ";
-
-// UDPEntry represents a single entry from /proc/net/udp.
-struct UDPEntry {
- uint32_t local_addr;
- uint16_t local_port;
-
- uint32_t remote_addr;
- uint16_t remote_port;
-
- uint64_t state;
- uint64_t uid;
- uint64_t inode;
-};
-
-std::string DescribeFirstInetSocket(const SocketPair& sockets) {
- const struct sockaddr* addr = sockets.first_addr();
- return StrFormat("First test socket: fd:%d %8X:%4X", sockets.first_fd(),
- IPFromInetSockaddr(addr), PortFromInetSockaddr(addr));
-}
-
-std::string DescribeSecondInetSocket(const SocketPair& sockets) {
- const struct sockaddr* addr = sockets.second_addr();
- return StrFormat("Second test socket fd:%d %8X:%4X", sockets.second_fd(),
- IPFromInetSockaddr(addr), PortFromInetSockaddr(addr));
-}
-
-// Finds the first entry in 'entries' for which 'predicate' returns true.
-// Returns true on match, and set 'match' to a copy of the matching entry. If
-// 'match' is null, it's ignored.
-bool FindBy(const std::vector<UDPEntry>& entries, UDPEntry* match,
- std::function<bool(const UDPEntry&)> predicate) {
- for (const UDPEntry& entry : entries) {
- if (predicate(entry)) {
- if (match != nullptr) {
- *match = entry;
- }
- return true;
- }
- }
- return false;
-}
-
-bool FindByLocalAddr(const std::vector<UDPEntry>& entries, UDPEntry* match,
- const struct sockaddr* addr) {
- uint32_t host = IPFromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy(entries, match, [host, port](const UDPEntry& e) {
- return (e.local_addr == host && e.local_port == port);
- });
-}
-
-bool FindByRemoteAddr(const std::vector<UDPEntry>& entries, UDPEntry* match,
- const struct sockaddr* addr) {
- uint32_t host = IPFromInetSockaddr(addr);
- uint16_t port = PortFromInetSockaddr(addr);
- return FindBy(entries, match, [host, port](const UDPEntry& e) {
- return (e.remote_addr == host && e.remote_port == port);
- });
-}
-
-PosixErrorOr<uint64_t> InodeFromSocketFD(int fd) {
- ASSIGN_OR_RETURN_ERRNO(struct stat s, Fstat(fd));
- if (!S_ISSOCK(s.st_mode)) {
- return PosixError(EINVAL, StrFormat("FD %d is not a socket", fd));
- }
- return s.st_ino;
-}
-
-PosixErrorOr<bool> FindByFD(const std::vector<UDPEntry>& entries,
- UDPEntry* match, int fd) {
- ASSIGN_OR_RETURN_ERRNO(uint64_t inode, InodeFromSocketFD(fd));
- return FindBy(entries, match,
- [inode](const UDPEntry& e) { return (e.inode == inode); });
-}
-
-// Returns a parsed representation of /proc/net/udp entries.
-PosixErrorOr<std::vector<UDPEntry>> ProcNetUDPEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/net/udp", &content));
-
- bool found_header = false;
- std::vector<UDPEntry> entries;
- std::vector<std::string> lines = StrSplit(content, '\n');
- std::cerr << "<contents of /proc/net/udp>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
-
- if (!found_header) {
- EXPECT_EQ(line, kProcNetUDPHeader);
- found_header = true;
- continue;
- }
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/net/udp.
- //
- // Example entries:
- //
- // clang-format off
- //
- // sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- // 3503: 0100007F:0035 00000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 33317 2 0000000000000000 0
- // 3518: 00000000:0044 00000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 40394 2 0000000000000000 0
- // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
- // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
- //
- // clang-format on
-
- UDPEntry entry;
- std::vector<std::string> fields =
- StrSplit(line, absl::ByAnyChar(": "), absl::SkipEmpty());
-
- ASSIGN_OR_RETURN_ERRNO(entry.local_addr, AtoiBase(fields[1], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.local_port, AtoiBase(fields[2], 16));
-
- ASSIGN_OR_RETURN_ERRNO(entry.remote_addr, AtoiBase(fields[3], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16));
-
- ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi<uint64_t>(fields[11]));
- ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi<uint64_t>(fields[13]));
-
- // Linux shares internal data structures between TCP and UDP sockets. The
- // proc entries for UDP sockets share some fields with TCP sockets, but
- // these fields should always be zero as they're not meaningful for UDP
- // sockets.
- EXPECT_EQ(fields[8], "00") << StrFormat("sl:%s, tr", fields[0]);
- EXPECT_EQ(fields[9], "00000000") << StrFormat("sl:%s, tm->when", fields[0]);
- EXPECT_EQ(fields[10], "00000000")
- << StrFormat("sl:%s, retrnsmt", fields[0]);
- EXPECT_EQ(fields[12], "0") << StrFormat("sl:%s, timeout", fields[0]);
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/net/udp>" << std::endl;
-
- return entries;
-}
-
-TEST(ProcNetUDP, Exists) {
- const std::string content =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/udp"));
- const std::string header_line = StrCat(kProcNetUDPHeader, "\n");
- EXPECT_THAT(content, ::testing::StartsWith(header_line));
-}
-
-TEST(ProcNetUDP, EntryUID) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4UDPBidirectionalBindSocketPair(0).Create());
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
- UDPEntry e;
- ASSERT_TRUE(FindByLocalAddr(entries, &e, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_EQ(e.uid, geteuid());
- ASSERT_TRUE(FindByRemoteAddr(entries, &e, sockets->first_addr()))
- << DescribeSecondInetSocket(*sockets);
- EXPECT_EQ(e.uid, geteuid());
-}
-
-TEST(ProcNetUDP, FindMutualEntries) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4UDPBidirectionalBindSocketPair(0).Create());
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
-
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_TRUE(FindByRemoteAddr(entries, nullptr, sockets->first_addr()))
- << DescribeSecondInetSocket(*sockets);
-
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->second_addr()))
- << DescribeSecondInetSocket(*sockets);
- EXPECT_TRUE(FindByRemoteAddr(entries, nullptr, sockets->second_addr()))
- << DescribeFirstInetSocket(*sockets);
-}
-
-TEST(ProcNetUDP, EntriesRemovedOnClose) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4UDPBidirectionalBindSocketPair(0).Create());
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
-
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->second_addr()))
- << DescribeSecondInetSocket(*sockets);
-
- EXPECT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
- // First socket's entry should be gone, but the second socket's entry should
- // still exist.
- EXPECT_FALSE(FindByLocalAddr(entries, nullptr, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_TRUE(FindByLocalAddr(entries, nullptr, sockets->second_addr()))
- << DescribeSecondInetSocket(*sockets);
-
- EXPECT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
- // Both entries should be gone.
- EXPECT_FALSE(FindByLocalAddr(entries, nullptr, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_FALSE(FindByLocalAddr(entries, nullptr, sockets->second_addr()))
- << DescribeSecondInetSocket(*sockets);
-}
-
-PosixErrorOr<std::unique_ptr<FileDescriptor>> BoundUDPSocket() {
- ASSIGN_OR_RETURN_ERRNO(std::unique_ptr<FileDescriptor> socket,
- IPv4UDPUnboundSocket(0).Create());
- struct sockaddr_in addr;
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = 0;
-
- int res = bind(socket->get(), reinterpret_cast<const struct sockaddr*>(&addr),
- sizeof(addr));
- if (res) {
- return PosixError(errno, "bind()");
- }
- return socket;
-}
-
-TEST(ProcNetUDP, BoundEntry) {
- std::unique_ptr<FileDescriptor> socket =
- ASSERT_NO_ERRNO_AND_VALUE(BoundUDPSocket());
- struct sockaddr addr;
- socklen_t len = sizeof(addr);
- ASSERT_THAT(getsockname(socket->get(), &addr, &len), SyscallSucceeds());
- uint16_t port = PortFromInetSockaddr(&addr);
-
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
- UDPEntry e;
- ASSERT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(FindByFD(entries, &e, socket->get())));
- EXPECT_EQ(e.local_port, port);
- EXPECT_EQ(e.remote_addr, 0);
- EXPECT_EQ(e.remote_port, 0);
-}
-
-TEST(ProcNetUDP, BoundSocketStateClosed) {
- std::unique_ptr<FileDescriptor> socket =
- ASSERT_NO_ERRNO_AND_VALUE(BoundUDPSocket());
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
- UDPEntry e;
- ASSERT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(FindByFD(entries, &e, socket->get())));
- EXPECT_EQ(e.state, TCP_CLOSE);
-}
-
-TEST(ProcNetUDP, ConnectedSocketStateEstablished) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(IPv4UDPBidirectionalBindSocketPair(0).Create());
- std::vector<UDPEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries());
-
- UDPEntry e;
- ASSERT_TRUE(FindByLocalAddr(entries, &e, sockets->first_addr()))
- << DescribeFirstInetSocket(*sockets);
- EXPECT_EQ(e.state, TCP_ESTABLISHED);
-
- ASSERT_TRUE(FindByLocalAddr(entries, &e, sockets->second_addr()))
- << DescribeSecondInetSocket(*sockets);
- EXPECT_EQ(e.state, TCP_ESTABLISHED);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_net_unix.cc b/test/syscalls/linux/proc_net_unix.cc
deleted file mode 100644
index f7ff65aad..000000000
--- a/test/syscalls/linux/proc_net_unix.cc
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/str_join.h"
-#include "absl/strings/str_split.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using absl::StrCat;
-using absl::StreamFormat;
-using absl::StrFormat;
-
-constexpr char kProcNetUnixHeader[] =
- "Num RefCount Protocol Flags Type St Inode Path";
-
-// Possible values of the "st" field in a /proc/net/unix entry. Source: Linux
-// kernel, include/uapi/linux/net.h.
-enum {
- SS_FREE = 0, // Not allocated
- SS_UNCONNECTED, // Unconnected to any socket
- SS_CONNECTING, // In process of connecting
- SS_CONNECTED, // Connected to socket
- SS_DISCONNECTING // In process of disconnecting
-};
-
-// UnixEntry represents a single entry from /proc/net/unix.
-struct UnixEntry {
- uintptr_t addr;
- uint64_t refs;
- uint64_t protocol;
- uint64_t flags;
- uint64_t type;
- uint64_t state;
- uint64_t inode;
- std::string path;
-};
-
-// Abstract socket paths can have either trailing null bytes or '@'s as padding
-// at the end, depending on the linux version. This function strips any such
-// padding.
-void StripAbstractPathPadding(std::string* s) {
- const char pad_char = s->back();
- if (pad_char != '\0' && pad_char != '@') {
- return;
- }
-
- const auto last_pos = s->find_last_not_of(pad_char);
- if (last_pos != std::string::npos) {
- s->resize(last_pos + 1);
- }
-}
-
-// Precondition: addr must be a unix socket address (i.e. sockaddr_un) and
-// addr->sun_path must be null-terminated. This is always the case if addr comes
-// from Linux:
-//
-// Per man unix(7):
-//
-// "When the address of a pathname socket is returned (by [getsockname(2)]), its
-// length is
-//
-// offsetof(struct sockaddr_un, sun_path) + strlen(sun_path) + 1
-//
-// and sun_path contains the null-terminated pathname."
-std::string ExtractPath(const struct sockaddr* addr) {
- const char* path =
- reinterpret_cast<const struct sockaddr_un*>(addr)->sun_path;
- // Note: sockaddr_un.sun_path is an embedded character array of length
- // UNIX_PATH_MAX, so we can always safely dereference the first 2 bytes below.
- //
- // We also rely on the path being null-terminated.
- if (path[0] == 0) {
- std::string abstract_path = StrCat("@", &path[1]);
- StripAbstractPathPadding(&abstract_path);
- return abstract_path;
- }
- return std::string(path);
-}
-
-// Returns a parsed representation of /proc/net/unix entries.
-PosixErrorOr<std::vector<UnixEntry>> ProcNetUnixEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/net/unix", &content));
-
- bool skipped_header = false;
- std::vector<UnixEntry> entries;
- std::vector<std::string> lines = absl::StrSplit(content, '\n');
- std::cerr << "<contents of /proc/net/unix>" << std::endl;
- for (const std::string& line : lines) {
- // Emit the proc entry to the test output to provide context for the test
- // results.
- std::cerr << line << std::endl;
-
- if (!skipped_header) {
- EXPECT_EQ(line, kProcNetUnixHeader);
- skipped_header = true;
- continue;
- }
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/net/unix.
- //
- // Sample file:
- //
- // clang-format off
- //
- // Num RefCount Protocol Flags Type St Inode Path"
- // ffffa130e7041c00: 00000002 00000000 00010000 0001 01 1299413685 /tmp/control_server/13293772586877554487
- // ffffa14f547dc400: 00000002 00000000 00010000 0001 01 3793 @remote_coredump
- //
- // clang-format on
- //
- // Note that from the second entry, the inode number can be padded using
- // spaces, so we need to handle it separately during parsing. See
- // net/unix/af_unix.c:unix_seq_show() for how these entries are produced. In
- // particular, only the inode field is padded with spaces.
- UnixEntry entry;
-
- // Process the first 6 fields, up to but not including "Inode".
- std::vector<std::string> fields =
- absl::StrSplit(line, absl::MaxSplits(' ', 6));
-
- if (fields.size() < 7) {
- return PosixError(EINVAL, StrFormat("Invalid entry: '%s'\n", line));
- }
-
- // AtoiBase can't handle the ':' in the "Num" field, so strip it out.
- std::vector<std::string> addr = absl::StrSplit(fields[0], ':');
- ASSIGN_OR_RETURN_ERRNO(entry.addr, AtoiBase(addr[0], 16));
-
- ASSIGN_OR_RETURN_ERRNO(entry.refs, AtoiBase(fields[1], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.protocol, AtoiBase(fields[2], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.flags, AtoiBase(fields[3], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.type, AtoiBase(fields[4], 16));
- ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16));
-
- absl::string_view rest = absl::StripAsciiWhitespace(fields[6]);
- fields = absl::StrSplit(rest, absl::MaxSplits(' ', 1));
- if (fields.empty()) {
- return PosixError(
- EINVAL, StrFormat("Invalid entry, missing 'Inode': '%s'\n", line));
- }
- ASSIGN_OR_RETURN_ERRNO(entry.inode, AtoiBase(fields[0], 10));
-
- entry.path = "";
- if (fields.size() > 1) {
- entry.path = fields[1];
- StripAbstractPathPadding(&entry.path);
- }
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/net/unix>" << std::endl;
-
- return entries;
-}
-
-// Finds the first entry in 'entries' for which 'predicate' returns true.
-// Returns true on match, and sets 'match' to point to the matching entry.
-bool FindBy(std::vector<UnixEntry> entries, UnixEntry* match,
- std::function<bool(const UnixEntry&)> predicate) {
- for (size_t i = 0; i < entries.size(); ++i) {
- if (predicate(entries[i])) {
- *match = entries[i];
- return true;
- }
- }
- return false;
-}
-
-bool FindByPath(std::vector<UnixEntry> entries, UnixEntry* match,
- const std::string& path) {
- return FindBy(entries, match,
- [path](const UnixEntry& e) { return e.path == path; });
-}
-
-TEST(ProcNetUnix, Exists) {
- const std::string content =
- ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/net/unix"));
- const std::string header_line = StrCat(kProcNetUnixHeader, "\n");
- // We could have abitrary sockets on the system, so just check the header.
- EXPECT_THAT(content, ::testing::StartsWith(header_line));
-}
-
-TEST(ProcNetUnix, FilesystemBindAcceptConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- FilesystemBoundUnixDomainSocketPair(SOCK_STREAM).Create());
-
- std::string path1 = ExtractPath(sockets->first_addr());
- std::string path2 = ExtractPath(sockets->second_addr());
- std::cerr << StreamFormat("Server socket address (path1): %s\n", path1);
- std::cerr << StreamFormat("Client socket address (path2): %s\n", path2);
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
-
- // The server-side socket's path is listed in the socket entry...
- UnixEntry s1;
- EXPECT_TRUE(FindByPath(entries, &s1, path1));
-
- // ... but the client-side socket's path is not.
- UnixEntry s2;
- EXPECT_FALSE(FindByPath(entries, &s2, path2));
-}
-
-TEST(ProcNetUnix, AbstractBindAcceptConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractBoundUnixDomainSocketPair(SOCK_STREAM).Create());
-
- std::string path1 = ExtractPath(sockets->first_addr());
- std::string path2 = ExtractPath(sockets->second_addr());
- std::cerr << StreamFormat("Server socket address (path1): '%s'\n", path1);
- std::cerr << StreamFormat("Client socket address (path2): '%s'\n", path2);
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
-
- // The server-side socket's path is listed in the socket entry...
- UnixEntry s1;
- EXPECT_TRUE(FindByPath(entries, &s1, path1));
-
- // ... but the client-side socket's path is not.
- UnixEntry s2;
- EXPECT_FALSE(FindByPath(entries, &s2, path2));
-}
-
-TEST(ProcNetUnix, SocketPair) {
- auto sockets =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_STREAM).Create());
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- EXPECT_GE(entries.size(), 2);
-}
-
-TEST(ProcNetUnix, StreamSocketStateUnconnectedOnBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
-
- const std::string address = ExtractPath(sockets->first_addr());
- UnixEntry bind_entry;
- ASSERT_TRUE(FindByPath(entries, &bind_entry, address));
- EXPECT_EQ(bind_entry.state, SS_UNCONNECTED);
-}
-
-TEST(ProcNetUnix, StreamSocketStateStateUnconnectedOnListen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
-
- const std::string address = ExtractPath(sockets->first_addr());
- UnixEntry bind_entry;
- ASSERT_TRUE(FindByPath(entries, &bind_entry, address));
- EXPECT_EQ(bind_entry.state, SS_UNCONNECTED);
-
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
-
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- UnixEntry listen_entry;
- ASSERT_TRUE(
- FindByPath(entries, &listen_entry, ExtractPath(sockets->first_addr())));
- EXPECT_EQ(listen_entry.state, SS_UNCONNECTED);
- // The bind and listen entries should refer to the same socket.
- EXPECT_EQ(listen_entry.inode, bind_entry.inode);
-}
-
-TEST(ProcNetUnix, StreamSocketStateStateConnectedOnAccept) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create());
- const std::string address = ExtractPath(sockets->first_addr());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- UnixEntry listen_entry;
- ASSERT_TRUE(
- FindByPath(entries, &listen_entry, ExtractPath(sockets->first_addr())));
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- int clientfd;
- ASSERT_THAT(clientfd = accept(sockets->first_fd(), nullptr, nullptr),
- SyscallSucceeds());
- auto cleanup = Cleanup(
- [clientfd]() { ASSERT_THAT(close(clientfd), SyscallSucceeds()); });
-
- // Find the entry for the accepted socket. UDS proc entries don't have a
- // remote address, so we distinguish the accepted socket from the listen
- // socket by checking for a different inode.
- entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- UnixEntry accept_entry;
- ASSERT_TRUE(FindBy(
- entries, &accept_entry, [address, listen_entry](const UnixEntry& e) {
- return e.path == address && e.inode != listen_entry.inode;
- }));
- EXPECT_EQ(accept_entry.state, SS_CONNECTED);
- // Listen entry should still be in SS_UNCONNECTED state.
- ASSERT_TRUE(FindBy(entries, &listen_entry,
- [&sockets, listen_entry](const UnixEntry& e) {
- return e.path == ExtractPath(sockets->first_addr()) &&
- e.inode == listen_entry.inode;
- }));
- EXPECT_EQ(listen_entry.state, SS_UNCONNECTED);
-}
-
-TEST(ProcNetUnix, DgramSocketStateDisconnectingOnBind) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractUnboundUnixDomainSocketPair(SOCK_DGRAM).Create());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- const std::string address = ExtractPath(sockets->first_addr());
- UnixEntry bind_entry;
- ASSERT_TRUE(FindByPath(entries, &bind_entry, address));
- EXPECT_EQ(bind_entry.state, SS_UNCONNECTED);
-}
-
-TEST(ProcNetUnix, DgramSocketStateConnectingOnConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(
- AbstractUnboundUnixDomainSocketPair(SOCK_DGRAM).Create());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- std::vector<UnixEntry> entries =
- ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries());
- const std::string address = ExtractPath(sockets->first_addr());
- UnixEntry bind_entry;
- ASSERT_TRUE(FindByPath(entries, &bind_entry, address));
-
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_pid_oomscore.cc b/test/syscalls/linux/proc_pid_oomscore.cc
deleted file mode 100644
index 707821a3f..000000000
--- a/test/syscalls/linux/proc_pid_oomscore.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-
-#include <exception>
-#include <iostream>
-#include <string>
-
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<int> ReadProcNumber(std::string path) {
- ASSIGN_OR_RETURN_ERRNO(std::string contents, GetContents(path));
- EXPECT_EQ(contents[contents.length() - 1], '\n');
-
- int num;
- if (!absl::SimpleAtoi(contents, &num)) {
- return PosixError(EINVAL, "invalid value: " + contents);
- }
-
- return num;
-}
-
-TEST(ProcPidOomscoreTest, BasicRead) {
- auto const oom_score =
- ASSERT_NO_ERRNO_AND_VALUE(ReadProcNumber("/proc/self/oom_score"));
- EXPECT_LE(oom_score, 1000);
- EXPECT_GE(oom_score, -1000);
-}
-
-TEST(ProcPidOomscoreAdjTest, BasicRead) {
- auto const oom_score =
- ASSERT_NO_ERRNO_AND_VALUE(ReadProcNumber("/proc/self/oom_score_adj"));
-
- // oom_score_adj defaults to 0.
- EXPECT_EQ(oom_score, 0);
-}
-
-TEST(ProcPidOomscoreAdjTest, BasicWrite) {
- constexpr int test_value = 7;
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/proc/self/oom_score_adj", O_WRONLY));
- ASSERT_THAT(
- RetryEINTR(write)(fd.get(), std::to_string(test_value).c_str(), 1),
- SyscallSucceeds());
-
- auto const oom_score =
- ASSERT_NO_ERRNO_AND_VALUE(ReadProcNumber("/proc/self/oom_score_adj"));
- EXPECT_EQ(oom_score, test_value);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_pid_smaps.cc b/test/syscalls/linux/proc_pid_smaps.cc
deleted file mode 100644
index 738923822..000000000
--- a/test/syscalls/linux/proc_pid_smaps.cc
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <algorithm>
-#include <iostream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "absl/container/flat_hash_set.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/types/optional.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::Contains;
-using ::testing::ElementsAreArray;
-using ::testing::IsSupersetOf;
-using ::testing::Not;
-using ::testing::Optional;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-struct ProcPidSmapsEntry {
- ProcMapsEntry maps_entry;
-
- // These fields should always exist, as they were included in e070ad49f311
- // "[PATCH] add /proc/pid/smaps".
- size_t size_kb;
- size_t rss_kb;
- size_t shared_clean_kb;
- size_t shared_dirty_kb;
- size_t private_clean_kb;
- size_t private_dirty_kb;
-
- // These fields were added later and may not be present.
- absl::optional<size_t> pss_kb;
- absl::optional<size_t> referenced_kb;
- absl::optional<size_t> anonymous_kb;
- absl::optional<size_t> anon_huge_pages_kb;
- absl::optional<size_t> shared_hugetlb_kb;
- absl::optional<size_t> private_hugetlb_kb;
- absl::optional<size_t> swap_kb;
- absl::optional<size_t> swap_pss_kb;
- absl::optional<size_t> kernel_page_size_kb;
- absl::optional<size_t> mmu_page_size_kb;
- absl::optional<size_t> locked_kb;
-
- // Caution: "Note that there is no guarantee that every flag and associated
- // mnemonic will be present in all further kernel releases. Things get
- // changed, the flags may be vanished or the reverse -- new added." - Linux
- // Documentation/filesystems/proc.txt, on VmFlags. Avoid checking for any
- // flags that are not extremely well-established.
- absl::optional<std::vector<std::string>> vm_flags;
-};
-
-// Given the value part of a /proc/[pid]/smaps field containing a value in kB
-// (for example, " 4 kB", returns the value in kB (in this example, 4).
-PosixErrorOr<size_t> SmapsValueKb(absl::string_view value) {
- // TODO(jamieliu): let us use RE2 or <regex>
- std::pair<absl::string_view, absl::string_view> parts =
- absl::StrSplit(value, ' ', absl::SkipEmpty());
- if (parts.second != "kB") {
- return PosixError(EINVAL,
- absl::StrCat("invalid smaps field value: ", value));
- }
- ASSIGN_OR_RETURN_ERRNO(auto val_kb, Atoi<size_t>(parts.first));
- return val_kb;
-}
-
-PosixErrorOr<std::vector<ProcPidSmapsEntry>> ParseProcPidSmaps(
- absl::string_view contents) {
- std::vector<ProcPidSmapsEntry> entries;
- absl::optional<ProcPidSmapsEntry> entry;
- bool have_size_kb = false;
- bool have_rss_kb = false;
- bool have_shared_clean_kb = false;
- bool have_shared_dirty_kb = false;
- bool have_private_clean_kb = false;
- bool have_private_dirty_kb = false;
-
- auto const finish_entry = [&] {
- if (entry) {
- if (!have_size_kb) {
- return PosixError(EINVAL, "smaps entry is missing Size");
- }
- if (!have_rss_kb) {
- return PosixError(EINVAL, "smaps entry is missing Rss");
- }
- if (!have_shared_clean_kb) {
- return PosixError(EINVAL, "smaps entry is missing Shared_Clean");
- }
- if (!have_shared_dirty_kb) {
- return PosixError(EINVAL, "smaps entry is missing Shared_Dirty");
- }
- if (!have_private_clean_kb) {
- return PosixError(EINVAL, "smaps entry is missing Private_Clean");
- }
- if (!have_private_dirty_kb) {
- return PosixError(EINVAL, "smaps entry is missing Private_Dirty");
- }
- // std::move(entry.value()) instead of std::move(entry).value(), because
- // otherwise tools may report a "use-after-move" warning, which is
- // spurious because entry.emplace() below resets entry to a new
- // ProcPidSmapsEntry.
- entries.emplace_back(std::move(entry.value()));
- }
- entry.emplace();
- have_size_kb = false;
- have_rss_kb = false;
- have_shared_clean_kb = false;
- have_shared_dirty_kb = false;
- have_private_clean_kb = false;
- have_private_dirty_kb = false;
- return NoError();
- };
-
- // Holds key/value pairs from smaps field lines. Declared here so it can be
- // captured by reference by the following lambdas.
- std::vector<absl::string_view> key_value;
-
- auto const on_required_field_kb = [&](size_t* field, bool* have_field) {
- if (*have_field) {
- return PosixError(
- EINVAL,
- absl::StrFormat("smaps entry has duplicate %s line", key_value[0]));
- }
- ASSIGN_OR_RETURN_ERRNO(*field, SmapsValueKb(key_value[1]));
- *have_field = true;
- return NoError();
- };
-
- auto const on_optional_field_kb = [&](absl::optional<size_t>* field) {
- if (*field) {
- return PosixError(
- EINVAL,
- absl::StrFormat("smaps entry has duplicate %s line", key_value[0]));
- }
- ASSIGN_OR_RETURN_ERRNO(*field, SmapsValueKb(key_value[1]));
- return NoError();
- };
-
- absl::flat_hash_set<std::string> unknown_fields;
- auto const on_unknown_field = [&] {
- absl::string_view key = key_value[0];
- // Don't mention unknown fields more than once.
- if (unknown_fields.count(key)) {
- return;
- }
- unknown_fields.insert(std::string(key));
- std::cerr << "skipping unknown smaps field " << key << std::endl;
- };
-
- auto lines = absl::StrSplit(contents, '\n', absl::SkipEmpty());
- for (absl::string_view l : lines) {
- // Is this line a valid /proc/[pid]/maps entry?
- auto maybe_maps_entry = ParseProcMapsLine(l);
- if (maybe_maps_entry.ok()) {
- // This marks the beginning of a new /proc/[pid]/smaps entry.
- RETURN_IF_ERRNO(finish_entry());
- entry->maps_entry = std::move(maybe_maps_entry).ValueOrDie();
- continue;
- }
- // Otherwise it's a field in an existing /proc/[pid]/smaps entry of the form
- // "key:value" (where value in practice will be preceded by a variable
- // amount of whitespace).
- if (!entry) {
- std::cerr << "smaps line not considered a maps line: "
- << maybe_maps_entry.error().message() << std::endl;
- return PosixError(
- EINVAL,
- absl::StrCat("smaps field line without preceding maps line: ", l));
- }
- key_value = absl::StrSplit(l, absl::MaxSplits(':', 1));
- if (key_value.size() != 2) {
- return PosixError(EINVAL, absl::StrCat("invalid smaps field line: ", l));
- }
- absl::string_view const key = key_value[0];
- if (key == "Size") {
- RETURN_IF_ERRNO(on_required_field_kb(&entry->size_kb, &have_size_kb));
- } else if (key == "Rss") {
- RETURN_IF_ERRNO(on_required_field_kb(&entry->rss_kb, &have_rss_kb));
- } else if (key == "Shared_Clean") {
- RETURN_IF_ERRNO(
- on_required_field_kb(&entry->shared_clean_kb, &have_shared_clean_kb));
- } else if (key == "Shared_Dirty") {
- RETURN_IF_ERRNO(
- on_required_field_kb(&entry->shared_dirty_kb, &have_shared_dirty_kb));
- } else if (key == "Private_Clean") {
- RETURN_IF_ERRNO(on_required_field_kb(&entry->private_clean_kb,
- &have_private_clean_kb));
- } else if (key == "Private_Dirty") {
- RETURN_IF_ERRNO(on_required_field_kb(&entry->private_dirty_kb,
- &have_private_dirty_kb));
- } else if (key == "Pss") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->pss_kb));
- } else if (key == "Referenced") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->referenced_kb));
- } else if (key == "Anonymous") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->anonymous_kb));
- } else if (key == "AnonHugePages") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->anon_huge_pages_kb));
- } else if (key == "Shared_Hugetlb") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->shared_hugetlb_kb));
- } else if (key == "Private_Hugetlb") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->private_hugetlb_kb));
- } else if (key == "Swap") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->swap_kb));
- } else if (key == "SwapPss") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->swap_pss_kb));
- } else if (key == "KernelPageSize") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->kernel_page_size_kb));
- } else if (key == "MMUPageSize") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->mmu_page_size_kb));
- } else if (key == "Locked") {
- RETURN_IF_ERRNO(on_optional_field_kb(&entry->locked_kb));
- } else if (key == "VmFlags") {
- if (entry->vm_flags) {
- return PosixError(EINVAL, "duplicate VmFlags line");
- }
- entry->vm_flags = absl::StrSplit(key_value[1], ' ', absl::SkipEmpty());
- } else {
- on_unknown_field();
- }
- }
- RETURN_IF_ERRNO(finish_entry());
- return entries;
-};
-
-TEST(ParseProcPidSmapsTest, Correctness) {
- auto entries = ASSERT_NO_ERRNO_AND_VALUE(
- ParseProcPidSmaps("0-10000 rw-s 00000000 00:00 0 "
- " /dev/zero (deleted)\n"
- "Size: 0 kB\n"
- "Rss: 1 kB\n"
- "Pss: 2 kB\n"
- "Shared_Clean: 3 kB\n"
- "Shared_Dirty: 4 kB\n"
- "Private_Clean: 5 kB\n"
- "Private_Dirty: 6 kB\n"
- "Referenced: 7 kB\n"
- "Anonymous: 8 kB\n"
- "AnonHugePages: 9 kB\n"
- "Shared_Hugetlb: 10 kB\n"
- "Private_Hugetlb: 11 kB\n"
- "Swap: 12 kB\n"
- "SwapPss: 13 kB\n"
- "KernelPageSize: 14 kB\n"
- "MMUPageSize: 15 kB\n"
- "Locked: 16 kB\n"
- "FutureUnknownKey: 17 kB\n"
- "VmFlags: rd wr sh mr mw me ms lo ?? sd \n"));
- ASSERT_EQ(entries.size(), 1);
- auto& entry = entries[0];
- EXPECT_EQ(entry.maps_entry.filename, "/dev/zero (deleted)");
- EXPECT_EQ(entry.size_kb, 0);
- EXPECT_EQ(entry.rss_kb, 1);
- EXPECT_THAT(entry.pss_kb, Optional(2));
- EXPECT_EQ(entry.shared_clean_kb, 3);
- EXPECT_EQ(entry.shared_dirty_kb, 4);
- EXPECT_EQ(entry.private_clean_kb, 5);
- EXPECT_EQ(entry.private_dirty_kb, 6);
- EXPECT_THAT(entry.referenced_kb, Optional(7));
- EXPECT_THAT(entry.anonymous_kb, Optional(8));
- EXPECT_THAT(entry.anon_huge_pages_kb, Optional(9));
- EXPECT_THAT(entry.shared_hugetlb_kb, Optional(10));
- EXPECT_THAT(entry.private_hugetlb_kb, Optional(11));
- EXPECT_THAT(entry.swap_kb, Optional(12));
- EXPECT_THAT(entry.swap_pss_kb, Optional(13));
- EXPECT_THAT(entry.kernel_page_size_kb, Optional(14));
- EXPECT_THAT(entry.mmu_page_size_kb, Optional(15));
- EXPECT_THAT(entry.locked_kb, Optional(16));
- EXPECT_THAT(entry.vm_flags,
- Optional(ElementsAreArray({"rd", "wr", "sh", "mr", "mw", "me",
- "ms", "lo", "??", "sd"})));
-}
-
-// Returns the unique entry in entries containing the given address.
-PosixErrorOr<ProcPidSmapsEntry> FindUniqueSmapsEntry(
- std::vector<ProcPidSmapsEntry> const& entries, uintptr_t addr) {
- auto const pred = [&](ProcPidSmapsEntry const& entry) {
- return entry.maps_entry.start <= addr && addr < entry.maps_entry.end;
- };
- auto const it = std::find_if(entries.begin(), entries.end(), pred);
- if (it == entries.end()) {
- return PosixError(EINVAL,
- absl::StrFormat("no entry contains address %#x", addr));
- }
- auto const it2 = std::find_if(it + 1, entries.end(), pred);
- if (it2 != entries.end()) {
- return PosixError(
- EINVAL,
- absl::StrFormat("overlapping entries [%#x-%#x) and [%#x-%#x) both "
- "contain address %#x",
- it->maps_entry.start, it->maps_entry.end,
- it2->maps_entry.start, it2->maps_entry.end, addr));
- }
- return *it;
-}
-
-PosixErrorOr<std::vector<ProcPidSmapsEntry>> ReadProcSelfSmaps() {
- ASSIGN_OR_RETURN_ERRNO(std::string contents, GetContents("/proc/self/smaps"));
- return ParseProcPidSmaps(contents);
-}
-
-TEST(ProcPidSmapsTest, SharedAnon) {
- // Map with MAP_POPULATE so we get some RSS.
- Mapping const m = ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(
- 2 * kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE));
- auto const entries = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfSmaps());
- auto const entry =
- ASSERT_NO_ERRNO_AND_VALUE(FindUniqueSmapsEntry(entries, m.addr()));
-
- EXPECT_EQ(entry.size_kb, m.len() / 1024);
- // It's possible that populated pages have been swapped out, so RSS might be
- // less than size.
- EXPECT_LE(entry.rss_kb, entry.size_kb);
-
- if (entry.pss_kb) {
- // PSS should be exactly equal to RSS since no other address spaces should
- // be sharing our new mapping.
- EXPECT_EQ(entry.pss_kb.value(), entry.rss_kb);
- }
-
- // "Shared" and "private" in smaps refers to whether or not *physical pages*
- // are shared; thus all pages in our MAP_SHARED mapping should nevertheless
- // be private.
- EXPECT_EQ(entry.shared_clean_kb, 0);
- EXPECT_EQ(entry.shared_dirty_kb, 0);
- EXPECT_EQ(entry.private_clean_kb + entry.private_dirty_kb, entry.rss_kb)
- << "Private_Clean = " << entry.private_clean_kb
- << " kB, Private_Dirty = " << entry.private_dirty_kb << " kB";
-
- // Shared anonymous mappings are implemented as a shmem file, so their pages
- // are not PageAnon.
- if (entry.anonymous_kb) {
- EXPECT_EQ(entry.anonymous_kb.value(), 0);
- }
-
- if (entry.vm_flags) {
- EXPECT_THAT(entry.vm_flags.value(),
- IsSupersetOf({"rd", "wr", "sh", "mr", "mw", "me", "ms"}));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("ex")));
- }
-}
-
-TEST(ProcPidSmapsTest, PrivateAnon) {
- // Map with MAP_POPULATE so we get some RSS.
- Mapping const m = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(2 * kPageSize, PROT_WRITE, MAP_PRIVATE | MAP_POPULATE));
- auto const entries = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfSmaps());
- auto const entry =
- ASSERT_NO_ERRNO_AND_VALUE(FindUniqueSmapsEntry(entries, m.addr()));
-
- // It's possible that our mapping was merged with another vma, so the smaps
- // entry might be bigger than our original mapping.
- EXPECT_GE(entry.size_kb, m.len() / 1024);
- EXPECT_LE(entry.rss_kb, entry.size_kb);
- if (entry.pss_kb) {
- EXPECT_LE(entry.pss_kb.value(), entry.rss_kb);
- }
-
- if (entry.anonymous_kb) {
- EXPECT_EQ(entry.anonymous_kb.value(), entry.rss_kb);
- }
-
- if (entry.vm_flags) {
- EXPECT_THAT(entry.vm_flags.value(), IsSupersetOf({"wr", "mr", "mw", "me"}));
- // We passed PROT_WRITE to mmap. On at least x86, the mapping is in
- // practice readable because there is no way to configure the MMU to make
- // pages writable but not readable. However, VmFlags should reflect the
- // flags set on the VMA, so "rd" (VM_READ) should not appear in VmFlags.
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("rd")));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("ex")));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("sh")));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("ms")));
- }
-}
-
-TEST(ProcPidSmapsTest, SharedReadOnlyFile) {
- size_t const kFileSize = kPageSize;
-
- auto const temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_THAT(truncate(temp_file.path().c_str(), kFileSize), SyscallSucceeds());
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDONLY));
-
- auto const m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kFileSize, PROT_READ, MAP_SHARED | MAP_POPULATE, fd.get(), 0));
- auto const entries = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfSmaps());
- auto const entry =
- ASSERT_NO_ERRNO_AND_VALUE(FindUniqueSmapsEntry(entries, m.addr()));
-
- // Most of the same logic as the SharedAnon case applies.
- EXPECT_EQ(entry.size_kb, kFileSize / 1024);
- EXPECT_LE(entry.rss_kb, entry.size_kb);
- if (entry.pss_kb) {
- EXPECT_EQ(entry.pss_kb.value(), entry.rss_kb);
- }
- EXPECT_EQ(entry.shared_clean_kb, 0);
- EXPECT_EQ(entry.shared_dirty_kb, 0);
- EXPECT_EQ(entry.private_clean_kb + entry.private_dirty_kb, entry.rss_kb)
- << "Private_Clean = " << entry.private_clean_kb
- << " kB, Private_Dirty = " << entry.private_dirty_kb << " kB";
- if (entry.anonymous_kb) {
- EXPECT_EQ(entry.anonymous_kb.value(), 0);
- }
-
- if (entry.vm_flags) {
- EXPECT_THAT(entry.vm_flags.value(), IsSupersetOf({"rd", "mr", "me", "ms"}));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("wr")));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("ex")));
- // Because the mapped file was opened O_RDONLY, the VMA is !VM_MAYWRITE and
- // also !VM_SHARED.
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("sh")));
- EXPECT_THAT(entry.vm_flags.value(), Not(Contains("mw")));
- }
-}
-
-// Tests that gVisor's /proc/[pid]/smaps provides all of the fields we expect it
-// to, which as of this writing is all fields provided by Linux 4.4.
-TEST(ProcPidSmapsTest, GvisorFields) {
- SKIP_IF(!IsRunningOnGvisor());
- auto const entries = ASSERT_NO_ERRNO_AND_VALUE(ReadProcSelfSmaps());
- for (auto const& entry : entries) {
- EXPECT_TRUE(entry.pss_kb);
- EXPECT_TRUE(entry.referenced_kb);
- EXPECT_TRUE(entry.anonymous_kb);
- EXPECT_TRUE(entry.anon_huge_pages_kb);
- EXPECT_TRUE(entry.shared_hugetlb_kb);
- EXPECT_TRUE(entry.private_hugetlb_kb);
- EXPECT_TRUE(entry.swap_kb);
- EXPECT_TRUE(entry.swap_pss_kb);
- EXPECT_THAT(entry.kernel_page_size_kb, Optional(kPageSize / 1024));
- EXPECT_THAT(entry.mmu_page_size_kb, Optional(kPageSize / 1024));
- EXPECT_TRUE(entry.locked_kb);
- EXPECT_TRUE(entry.vm_flags);
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/proc_pid_uid_gid_map.cc b/test/syscalls/linux/proc_pid_uid_gid_map.cc
deleted file mode 100644
index c030592c8..000000000
--- a/test/syscalls/linux/proc_pid_uid_gid_map.cc
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sched.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <functional>
-#include <string>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/ascii.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-#include "test/util/time_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<int> InNewUserNamespace(const std::function<void()>& fn) {
- return InForkedProcess([&] {
- TEST_PCHECK(unshare(CLONE_NEWUSER) == 0);
- MaybeSave();
- fn();
- });
-}
-
-PosixErrorOr<std::tuple<pid_t, Cleanup>> CreateProcessInNewUserNamespace() {
- int pipefd[2];
- if (pipe(pipefd) < 0) {
- return PosixError(errno, "pipe failed");
- }
- const auto cleanup_pipe_read =
- Cleanup([&] { EXPECT_THAT(close(pipefd[0]), SyscallSucceeds()); });
- auto cleanup_pipe_write =
- Cleanup([&] { EXPECT_THAT(close(pipefd[1]), SyscallSucceeds()); });
- pid_t child_pid = fork();
- if (child_pid < 0) {
- return PosixError(errno, "fork failed");
- }
- if (child_pid == 0) {
- // Close our copy of the pipe's read end, which doesn't really matter.
- TEST_PCHECK(close(pipefd[0]) >= 0);
- TEST_PCHECK(unshare(CLONE_NEWUSER) == 0);
- MaybeSave();
- // Indicate that we've switched namespaces by unblocking the parent's read.
- TEST_PCHECK(close(pipefd[1]) >= 0);
- while (true) {
- SleepSafe(absl::Minutes(1));
- }
- }
- auto cleanup_child = Cleanup([child_pid] {
- EXPECT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << "status = " << status;
- });
- // Close our copy of the pipe's write end, then wait for the child to close
- // its copy, indicating that it's switched namespaces.
- cleanup_pipe_write.Release()();
- char buf;
- if (RetryEINTR(read)(pipefd[0], &buf, 1) < 0) {
- return PosixError(errno, "reading from pipe failed");
- }
- MaybeSave();
- return std::make_tuple(child_pid, std::move(cleanup_child));
-}
-
-// TEST_CHECK-fails on error, since this function is used in contexts that
-// require async-signal-safety.
-void DenySetgroupsByPath(const char* path) {
- int fd = open(path, O_WRONLY);
- if (fd < 0 && errno == ENOENT) {
- // On kernels where this file doesn't exist, writing "deny" to it isn't
- // necessary to write to gid_map.
- return;
- }
- TEST_PCHECK(fd >= 0);
- MaybeSave();
- char deny[] = "deny";
- TEST_PCHECK(write(fd, deny, sizeof(deny)) == sizeof(deny));
- MaybeSave();
- TEST_PCHECK(close(fd) == 0);
-}
-
-void DenySelfSetgroups() { DenySetgroupsByPath("/proc/self/setgroups"); }
-
-void DenyPidSetgroups(pid_t pid) {
- DenySetgroupsByPath(absl::StrCat("/proc/", pid, "/setgroups").c_str());
-}
-
-// Returns a valid UID/GID that isn't id.
-uint32_t another_id(uint32_t id) { return (id + 1) % 65535; }
-
-struct TestParam {
- std::string desc;
- int cap;
- std::function<std::string(absl::string_view)> get_map_filename;
- std::function<uint32_t()> get_current_id;
-};
-
-std::string DescribeTestParam(const ::testing::TestParamInfo<TestParam>& info) {
- return info.param.desc;
-}
-
-std::vector<TestParam> UidGidMapTestParams() {
- return {TestParam{"UID", CAP_SETUID,
- [](absl::string_view pid) {
- return absl::StrCat("/proc/", pid, "/uid_map");
- },
- []() -> uint32_t { return getuid(); }},
- TestParam{"GID", CAP_SETGID,
- [](absl::string_view pid) {
- return absl::StrCat("/proc/", pid, "/gid_map");
- },
- []() -> uint32_t { return getgid(); }}};
-}
-
-class ProcUidGidMapTest : public ::testing::TestWithParam<TestParam> {
- protected:
- uint32_t CurrentID() { return GetParam().get_current_id(); }
-};
-
-class ProcSelfUidGidMapTest : public ProcUidGidMapTest {
- protected:
- PosixErrorOr<int> InNewUserNamespaceWithMapFD(
- const std::function<void(int)>& fn) {
- std::string map_filename = GetParam().get_map_filename("self");
- return InNewUserNamespace([&] {
- int fd = open(map_filename.c_str(), O_RDWR);
- TEST_PCHECK(fd >= 0);
- MaybeSave();
- fn(fd);
- TEST_PCHECK(close(fd) == 0);
- });
- }
-};
-
-class ProcPidUidGidMapTest : public ProcUidGidMapTest {
- protected:
- PosixErrorOr<bool> HaveSetIDCapability() {
- return HaveCapability(GetParam().cap);
- }
-
- // Returns true if the caller is running in a user namespace with all IDs
- // mapped. This matters for tests that expect to successfully map arbitrary
- // IDs into a child user namespace, since even with CAP_SET*ID this is only
- // possible if those IDs are mapped into the current one.
- PosixErrorOr<bool> AllIDsMapped() {
- ASSIGN_OR_RETURN_ERRNO(std::string id_map,
- GetContents(GetParam().get_map_filename("self")));
- absl::StripTrailingAsciiWhitespace(&id_map);
- std::vector<std::string> id_map_parts =
- absl::StrSplit(id_map, ' ', absl::SkipEmpty());
- return id_map_parts == std::vector<std::string>({"0", "0", "4294967295"});
- }
-
- PosixErrorOr<FileDescriptor> OpenMapFile(pid_t pid) {
- return Open(GetParam().get_map_filename(absl::StrCat(pid)), O_RDWR);
- }
-};
-
-TEST_P(ProcSelfUidGidMapTest, IsInitiallyEmpty) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- EXPECT_THAT(InNewUserNamespaceWithMapFD([](int fd) {
- char buf[64];
- TEST_PCHECK(read(fd, buf, sizeof(buf)) == 0);
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(ProcSelfUidGidMapTest, IdentityMapOwnID) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- uint32_t id = CurrentID();
- std::string line = absl::StrCat(id, " ", id, " 1");
- EXPECT_THAT(
- InNewUserNamespaceWithMapFD([&](int fd) {
- DenySelfSetgroups();
- size_t n;
- TEST_PCHECK((n = write(fd, line.c_str(), line.size())) != -1);
- TEST_CHECK(n == line.size());
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(ProcSelfUidGidMapTest, TrailingNewlineAndNULIgnored) {
- // This is identical to IdentityMapOwnID, except that a trailing newline, NUL,
- // and an invalid (incomplete) map entry are appended to the valid entry. The
- // newline should be accepted, and everything after the NUL should be ignored.
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- uint32_t id = CurrentID();
- std::string line = absl::StrCat(id, " ", id, " 1\n\0 4 3");
- EXPECT_THAT(
- InNewUserNamespaceWithMapFD([&](int fd) {
- DenySelfSetgroups();
- // The write should return the full size of the write, even though
- // characters after the NUL were ignored.
- size_t n;
- TEST_PCHECK((n = write(fd, line.c_str(), line.size())) != -1);
- TEST_CHECK(n == line.size());
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(ProcSelfUidGidMapTest, NonIdentityMapOwnID) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- uint32_t id = CurrentID();
- uint32_t id2 = another_id(id);
- std::string line = absl::StrCat(id2, " ", id, " 1");
- EXPECT_THAT(
- InNewUserNamespaceWithMapFD([&](int fd) {
- DenySelfSetgroups();
- TEST_PCHECK(static_cast<long unsigned int>(
- write(fd, line.c_str(), line.size())) == line.size());
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST_P(ProcSelfUidGidMapTest, MapOtherID) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- // Whether or not we have CAP_SET*ID is irrelevant: the process running in the
- // new (child) user namespace won't have any capabilities in the current
- // (parent) user namespace, which is needed.
- uint32_t id = CurrentID();
- uint32_t id2 = another_id(id);
- std::string line = absl::StrCat(id, " ", id2, " 1");
- EXPECT_THAT(InNewUserNamespaceWithMapFD([&](int fd) {
- DenySelfSetgroups();
- TEST_PCHECK(write(fd, line.c_str(), line.size()) < 0);
- TEST_CHECK(errno == EPERM);
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-INSTANTIATE_TEST_SUITE_P(All, ProcSelfUidGidMapTest,
- ::testing::ValuesIn(UidGidMapTestParams()),
- DescribeTestParam);
-
-TEST_P(ProcPidUidGidMapTest, MapOtherIDPrivileged) {
- // Like ProcSelfUidGidMapTest_MapOtherID, but since we have CAP_SET*ID in the
- // parent user namespace (this one), we can map IDs that aren't ours.
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveSetIDCapability()));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(AllIDsMapped()));
-
- pid_t child_pid;
- Cleanup cleanup_child;
- std::tie(child_pid, cleanup_child) =
- ASSERT_NO_ERRNO_AND_VALUE(CreateProcessInNewUserNamespace());
-
- uint32_t id = CurrentID();
- uint32_t id2 = another_id(id);
- std::string line = absl::StrCat(id, " ", id2, " 1");
- DenyPidSetgroups(child_pid);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenMapFile(child_pid));
- EXPECT_THAT(write(fd.get(), line.c_str(), line.size()),
- SyscallSucceedsWithValue(line.size()));
-}
-
-TEST_P(ProcPidUidGidMapTest, MapAnyIDsPrivileged) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace()));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveSetIDCapability()));
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(AllIDsMapped()));
-
- pid_t child_pid;
- Cleanup cleanup_child;
- std::tie(child_pid, cleanup_child) =
- ASSERT_NO_ERRNO_AND_VALUE(CreateProcessInNewUserNamespace());
-
- // Test all of:
- //
- // - Mapping ranges of length > 1
- //
- // - Mapping multiple ranges
- //
- // - Non-identity mappings
- char entries[] = "2 0 2\n4 6 2";
- DenyPidSetgroups(child_pid);
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenMapFile(child_pid));
- EXPECT_THAT(write(fd.get(), entries, sizeof(entries)),
- SyscallSucceedsWithValue(sizeof(entries)));
-}
-
-INSTANTIATE_TEST_SUITE_P(All, ProcPidUidGidMapTest,
- ::testing::ValuesIn(UidGidMapTestParams()),
- DescribeTestParam);
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/processes.cc b/test/syscalls/linux/processes.cc
deleted file mode 100644
index 412582515..000000000
--- a/test/syscalls/linux/processes.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-int testSetPGIDOfZombie(void* arg) {
- int p[2];
-
- TEST_PCHECK(pipe(p) == 0);
-
- pid_t pid = fork();
- if (pid == 0) {
- pid = fork();
- // Create a second child to repeat one of syzkaller reproducers.
- if (pid == 0) {
- pid = getpid();
- TEST_PCHECK(setpgid(pid, 0) == 0);
- TEST_PCHECK(write(p[1], &pid, sizeof(pid)) == sizeof(pid));
- _exit(0);
- }
- TEST_PCHECK(pid > 0);
- _exit(0);
- }
- close(p[1]);
- TEST_PCHECK(pid > 0);
-
- // Get PID of the second child.
- pid_t cpid;
- TEST_PCHECK(read(p[0], &cpid, sizeof(cpid)) == sizeof(cpid));
-
- // Wait when both child processes will die.
- int c;
- TEST_PCHECK(read(p[0], &c, sizeof(c)) == 0);
-
- // Wait the second child process to collect its zombie.
- int status;
- TEST_PCHECK(RetryEINTR(waitpid)(cpid, &status, 0) == cpid);
-
- // Set the child's group.
- TEST_PCHECK(setpgid(pid, pid) == 0);
-
- TEST_PCHECK(RetryEINTR(waitpid)(-pid, &status, 0) == pid);
-
- TEST_PCHECK(status == 0);
- _exit(0);
-}
-
-TEST(Processes, SetPGIDOfZombie) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- // Fork a test process in a new PID namespace, because it needs to manipulate
- // with reparanted processes.
- struct clone_arg {
- // Reserve some space for clone() to locate arguments and retcode in this
- // place.
- char stack[128] __attribute__((aligned(16)));
- char stack_ptr[0];
- } ca;
- pid_t pid;
- ASSERT_THAT(pid = clone(testSetPGIDOfZombie, ca.stack_ptr,
- CLONE_NEWPID | SIGCHLD, &ca),
- SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_EQ(status, 0);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pselect.cc b/test/syscalls/linux/pselect.cc
deleted file mode 100644
index e490a987d..000000000
--- a/test/syscalls/linux/pselect.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/select.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/base_poll_test.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-struct MaskWithSize {
- sigset_t* mask;
- size_t mask_size;
-};
-
-// Linux and glibc have a different idea of the sizeof sigset_t. When calling
-// the syscall directly, use what the kernel expects.
-unsigned kSigsetSize = SIGRTMAX / 8;
-
-// Linux pselect(2) differs from the glibc wrapper function in that Linux
-// updates the timeout with the amount of time remaining. In order to test this
-// behavior we need to use the syscall directly.
-int syscallPselect6(int nfds, fd_set* readfds, fd_set* writefds,
- fd_set* exceptfds, struct timespec* timeout,
- const MaskWithSize* mask_with_size) {
- return syscall(SYS_pselect6, nfds, readfds, writefds, exceptfds, timeout,
- mask_with_size);
-}
-
-class PselectTest : public BasePollTest {
- protected:
- void SetUp() override { BasePollTest::SetUp(); }
- void TearDown() override { BasePollTest::TearDown(); }
-};
-
-// See that when there are no FD sets, pselect behaves like sleep.
-TEST_F(PselectTest, NullFds) {
- struct timespec timeout = absl::ToTimespec(absl::Milliseconds(10));
- ASSERT_THAT(syscallPselect6(0, nullptr, nullptr, nullptr, &timeout, nullptr),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 0);
-
- timeout = absl::ToTimespec(absl::Milliseconds(10));
- ASSERT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, nullptr),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 0);
-}
-
-TEST_F(PselectTest, ClosedFds) {
- fd_set read_set;
- FD_ZERO(&read_set);
- int fd;
- ASSERT_THAT(fd = dup(1), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
- FD_SET(fd, &read_set);
- struct timespec timeout = absl::ToTimespec(absl::Milliseconds(10));
- EXPECT_THAT(
- syscallPselect6(fd + 1, &read_set, nullptr, nullptr, &timeout, nullptr),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(PselectTest, ZeroTimeout) {
- struct timespec timeout = {};
- ASSERT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, nullptr),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 0);
-}
-
-// If random S/R interrupts the pselect, SIGALRM may be delivered before pselect
-// restarts, causing the pselect to hang forever.
-TEST_F(PselectTest, NoTimeout) {
- // When there's no timeout, pselect may never return so set a timer.
- SetTimer(absl::Milliseconds(100));
- // See that we get interrupted by the timer.
- ASSERT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, nullptr, nullptr),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
-}
-
-TEST_F(PselectTest, InvalidTimeoutNegative) {
- struct timespec timeout = absl::ToTimespec(absl::Seconds(-1));
- ASSERT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, nullptr),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_EQ(timeout.tv_sec, -1);
- EXPECT_EQ(timeout.tv_nsec, 0);
-}
-
-TEST_F(PselectTest, InvalidTimeoutNotNormalized) {
- struct timespec timeout = {0, 1000000001};
- ASSERT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, nullptr),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_nsec, 1000000001);
-}
-
-TEST_F(PselectTest, EmptySigMaskInvalidMaskSize) {
- struct timespec timeout = {};
- MaskWithSize invalid = {nullptr, 7};
- EXPECT_THAT(syscallPselect6(0, nullptr, nullptr, nullptr, &timeout, &invalid),
- SyscallSucceeds());
-}
-
-TEST_F(PselectTest, EmptySigMaskValidMaskSize) {
- struct timespec timeout = {};
- MaskWithSize invalid = {nullptr, 8};
- EXPECT_THAT(syscallPselect6(0, nullptr, nullptr, nullptr, &timeout, &invalid),
- SyscallSucceeds());
-}
-
-TEST_F(PselectTest, InvalidMaskSize) {
- struct timespec timeout = {};
- sigset_t sigmask;
- ASSERT_THAT(sigemptyset(&sigmask), SyscallSucceeds());
- MaskWithSize invalid = {&sigmask, 7};
- EXPECT_THAT(syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, &invalid),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Verify that signals blocked by the pselect mask (that would otherwise be
-// allowed) do not interrupt pselect.
-TEST_F(PselectTest, SignalMaskBlocksSignal) {
- absl::Duration duration(absl::Seconds(30));
- struct timespec timeout = absl::ToTimespec(duration);
- absl::Duration timer_duration(absl::Seconds(10));
-
- // Call with a mask that blocks SIGALRM. See that pselect is not interrupted
- // (i.e. returns 0) and that upon completion, the timer has fired.
- sigset_t mask;
- ASSERT_THAT(sigprocmask(0, nullptr, &mask), SyscallSucceeds());
- ASSERT_THAT(sigaddset(&mask, SIGALRM), SyscallSucceeds());
- MaskWithSize mask_with_size = {&mask, kSigsetSize};
- SetTimer(timer_duration);
- MaybeSave();
- ASSERT_FALSE(TimerFired());
- ASSERT_THAT(
- syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, &mask_with_size),
- SyscallSucceeds());
- EXPECT_TRUE(TimerFired());
- EXPECT_EQ(absl::DurationFromTimespec(timeout), absl::Duration());
-}
-
-// Verify that signals allowed by the pselect mask (that would otherwise be
-// blocked) interrupt pselect.
-TEST_F(PselectTest, SignalMaskAllowsSignal) {
- absl::Duration duration = absl::Seconds(30);
- struct timespec timeout = absl::ToTimespec(duration);
- absl::Duration timer_duration = absl::Seconds(10);
-
- sigset_t mask;
- ASSERT_THAT(sigprocmask(0, nullptr, &mask), SyscallSucceeds());
-
- // Block SIGALRM.
- auto cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, SIGALRM));
-
- // Call with a mask that unblocks SIGALRM. See that pselect is interrupted.
- MaskWithSize mask_with_size = {&mask, kSigsetSize};
- SetTimer(timer_duration);
- MaybeSave();
- ASSERT_FALSE(TimerFired());
- ASSERT_THAT(
- syscallPselect6(1, nullptr, nullptr, nullptr, &timeout, &mask_with_size),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
- EXPECT_GT(absl::DurationFromTimespec(timeout), absl::Duration());
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/ptrace.cc b/test/syscalls/linux/ptrace.cc
deleted file mode 100644
index f64c23ac0..000000000
--- a/test/syscalls/linux/ptrace.cc
+++ /dev/null
@@ -1,2302 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <elf.h>
-#include <signal.h>
-#include <stddef.h>
-#include <sys/prctl.h>
-#include <sys/ptrace.h>
-#include <sys/socket.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/user.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/platform_util.h"
-#include "test/util/signal_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/time_util.h"
-
-ABSL_FLAG(bool, ptrace_test_execve_child, false,
- "If true, run the "
- "PtraceExecveTest_Execve_GetRegs_PeekUser_SIGKILL_TraceClone_"
- "TraceExit child workload.");
-ABSL_FLAG(bool, ptrace_test_trace_descendants_allowed, false,
- "If set, run the child workload for "
- "PtraceTest_TraceDescendantsAllowed.");
-ABSL_FLAG(bool, ptrace_test_ptrace_attacher, false,
- "If set, run the child workload for PtraceAttacherSubprocess.");
-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer, false,
- "If set, run the child workload for PrctlSetPtracerSubprocess.");
-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_and_exit_tracee_thread, false,
- "If set, run the child workload for "
- "PtraceTest_PrctlSetPtracerPersistsPastTraceeThreadExit.");
-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_and_exec_non_leader, false,
- "If set, run the child workload for "
- "PtraceTest_PrctlSetPtracerDoesNotPersistPastNonLeaderExec.");
-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_and_exit_tracer_thread, false,
- "If set, run the child workload for "
- "PtraceTest_PrctlSetPtracerDoesNotPersistPastTracerThreadExit.");
-ABSL_FLAG(int, ptrace_test_prctl_set_ptracer_and_exit_tracer_thread_tid, -1,
- "Specifies the tracee tid in the child workload for "
- "PtraceTest_PrctlSetPtracerDoesNotPersistPastTracerThreadExit.");
-ABSL_FLAG(bool, ptrace_test_prctl_set_ptracer_respects_tracer_thread_id, false,
- "If set, run the child workload for PtraceTest_PrctlSetPtracePID.");
-ABSL_FLAG(int, ptrace_test_prctl_set_ptracer_respects_tracer_thread_id_tid, -1,
- "Specifies the thread tid to be traced in the child workload "
- "for PtraceTest_PrctlSetPtracerRespectsTracerThreadID.");
-
-ABSL_FLAG(bool, ptrace_test_tracee, false,
- "If true, run the tracee process for the "
- "PrctlSetPtracerDoesNotPersistPastLeaderExec and "
- "PrctlSetPtracerDoesNotPersistPastNonLeaderExec workloads.");
-ABSL_FLAG(int, ptrace_test_trace_tid, -1,
- "If set, run a process to ptrace attach to the thread with the "
- "specified pid for the PrctlSetPtracerRespectsTracerThreadID "
- "workload.");
-ABSL_FLAG(int, ptrace_test_fd, -1,
- "Specifies the fd used for communication between tracer and tracee "
- "processes across exec.");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// PTRACE_GETSIGMASK and PTRACE_SETSIGMASK are not defined until glibc 2.23
-// (fb53a27c5741 "Add new header definitions from Linux 4.4 (plus older ptrace
-// definitions)").
-constexpr auto kPtraceGetSigMask = static_cast<__ptrace_request>(0x420a);
-constexpr auto kPtraceSetSigMask = static_cast<__ptrace_request>(0x420b);
-
-// PTRACE_SYSEMU is not defined until glibc 2.27 (c48831d0eebf "linux/x86: sync
-// sys/ptrace.h with Linux 4.14 [BZ #22433]").
-constexpr auto kPtraceSysemu = static_cast<__ptrace_request>(31);
-
-// PTRACE_EVENT_STOP is not defined until glibc 2.26 (3f67d1a7021e "Add Linux
-// PTRACE_EVENT_STOP").
-constexpr int kPtraceEventStop = 128;
-
-// Sends sig to the current process with tgkill(2).
-//
-// glibc's raise(2) may change the signal mask before sending the signal. These
-// extra syscalls make tests of syscall, signal interception, etc. difficult to
-// write.
-void RaiseSignal(int sig) {
- pid_t pid = getpid();
- TEST_PCHECK(pid > 0);
- pid_t tid = gettid();
- TEST_PCHECK(tid > 0);
- TEST_PCHECK(tgkill(pid, tid, sig) == 0);
-}
-
-constexpr char kYamaPtraceScopePath[] = "/proc/sys/kernel/yama/ptrace_scope";
-
-// Returns the Yama ptrace scope.
-PosixErrorOr<int> YamaPtraceScope() {
- ASSIGN_OR_RETURN_ERRNO(bool exists, Exists(kYamaPtraceScopePath));
- if (!exists) {
- // File doesn't exist means no Yama, so the scope is disabled -> 0.
- return 0;
- }
-
- std::string contents;
- RETURN_IF_ERRNO(GetContents(kYamaPtraceScopePath, &contents));
-
- int scope;
- if (!absl::SimpleAtoi(contents, &scope)) {
- return PosixError(EINVAL, absl::StrCat(contents, ": not a valid number"));
- }
-
- return scope;
-}
-
-int CheckPtraceAttach(pid_t pid) {
- int ret = ptrace(PTRACE_ATTACH, pid, 0, 0);
- MaybeSave();
- if (ret < 0) {
- return ret;
- }
-
- int status;
- TEST_PCHECK(waitpid(pid, &status, 0) == pid);
- MaybeSave();
- TEST_CHECK(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
- TEST_PCHECK(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
- MaybeSave();
- return 0;
-}
-
-class SimpleSubprocess {
- public:
- explicit SimpleSubprocess(absl::string_view child_flag) {
- int sockets[2];
- TEST_PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets) == 0);
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {"/proc/self/exe", child_flag,
- "--ptrace_test_fd",
- std::to_string(sockets[0])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_ = fork();
- if (pid_ == 0) {
- TEST_PCHECK(close(sockets[1]) == 0);
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- TEST_PCHECK(pid_ > 0);
- TEST_PCHECK(close(sockets[0]) == 0);
- sockfd_ = sockets[1];
- }
-
- SimpleSubprocess(SimpleSubprocess&& orig)
- : pid_(orig.pid_), sockfd_(orig.sockfd_) {
- orig.pid_ = -1;
- orig.sockfd_ = -1;
- }
-
- SimpleSubprocess& operator=(SimpleSubprocess&& orig) {
- if (this != &orig) {
- this->~SimpleSubprocess();
- pid_ = orig.pid_;
- sockfd_ = orig.sockfd_;
- orig.pid_ = -1;
- orig.sockfd_ = -1;
- }
- return *this;
- }
-
- SimpleSubprocess(SimpleSubprocess const&) = delete;
- SimpleSubprocess& operator=(SimpleSubprocess const&) = delete;
-
- ~SimpleSubprocess() {
- if (pid_ < 0) {
- return;
- }
- EXPECT_THAT(shutdown(sockfd_, SHUT_RDWR), SyscallSucceeds());
- EXPECT_THAT(close(sockfd_), SyscallSucceeds());
- int status;
- EXPECT_THAT(waitpid(pid_, &status, 0), SyscallSucceedsWithValue(pid_));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
- }
-
- pid_t pid() const { return pid_; }
-
- // Sends the child process the given value, receives an errno in response, and
- // returns a PosixError corresponding to the received errno.
- template <typename T>
- PosixError Cmd(T val) {
- if (WriteFd(sockfd_, &val, sizeof(val)) < 0) {
- return PosixError(errno, "write failed");
- }
- return RecvErrno();
- }
-
- private:
- PosixError RecvErrno() {
- int resp_errno;
- if (ReadFd(sockfd_, &resp_errno, sizeof(resp_errno)) < 0) {
- return PosixError(errno, "read failed");
- }
- return PosixError(resp_errno);
- }
-
- pid_t pid_ = -1;
- int sockfd_ = -1;
-};
-
-TEST(PtraceTest, AttachSelf) {
- EXPECT_THAT(ptrace(PTRACE_ATTACH, gettid(), 0, 0),
- SyscallFailsWithErrno(EPERM));
-}
-
-TEST(PtraceTest, AttachSameThreadGroup) {
- pid_t const tid = gettid();
- ScopedThread([&] {
- EXPECT_THAT(ptrace(PTRACE_ATTACH, tid, 0, 0), SyscallFailsWithErrno(EPERM));
- });
-}
-
-TEST(PtraceTest, TraceParentNotAllowed) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) < 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- TEST_CHECK(CheckPtraceAttach(getppid()) == -1);
- TEST_PCHECK(errno == EPERM);
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(PtraceTest, TraceNonDescendantNotAllowed) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) < 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- TEST_CHECK(CheckPtraceAttach(tracee_pid) == -1);
- TEST_PCHECK(errno == EPERM);
- _exit(0);
- }
- EXPECT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, TraceNonDescendantWithCapabilityAllowed) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_PTRACE)));
- // Skip if disallowed by YAMA despite having CAP_SYS_PTRACE.
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) > 2);
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- TEST_PCHECK(CheckPtraceAttach(tracee_pid) == 0);
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, TraceDescendantsAllowed) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) > 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Use socket pair to communicate tids to this process from its grandchild.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe", "--ptrace_test_trace_descendants_allowed",
- "--ptrace_test_fd", std::to_string(sockets[0])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- TEST_PCHECK(close(sockets[1]) == 0);
- pid_t const grandchild_pid = fork();
- if (grandchild_pid == 0) {
- // This test will create a new thread in the grandchild process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- TEST_PCHECK(grandchild_pid > 0);
- MaybeSave();
-
- // Wait for grandchild. Our parent process will kill it once it's done.
- int status;
- TEST_PCHECK(waitpid(grandchild_pid, &status, 0) == grandchild_pid);
- TEST_CHECK(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL);
- MaybeSave();
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- // We should be able to attach to any thread in the grandchild.
- pid_t grandchild_tid1, grandchild_tid2;
- ASSERT_THAT(read(sockets[1], &grandchild_tid1, sizeof(grandchild_tid1)),
- SyscallSucceedsWithValue(sizeof(grandchild_tid1)));
- ASSERT_THAT(read(sockets[1], &grandchild_tid2, sizeof(grandchild_tid2)),
- SyscallSucceedsWithValue(sizeof(grandchild_tid2)));
-
- EXPECT_THAT(CheckPtraceAttach(grandchild_tid1), SyscallSucceeds());
- EXPECT_THAT(CheckPtraceAttach(grandchild_tid2), SyscallSucceeds());
-
- // Clean up grandchild.
- ASSERT_THAT(kill(grandchild_tid1, SIGKILL), SyscallSucceeds());
-
- // Clean up child.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-[[noreturn]] void RunTraceDescendantsAllowed(int fd) {
- // Let the tracer know our tid through the socket fd.
- pid_t const tid = gettid();
- TEST_PCHECK(write(fd, &tid, sizeof(tid)) == sizeof(tid));
- MaybeSave();
-
- ScopedThread t([fd] {
- // See if any arbitrary thread (whose tid differs from the process id) can
- // be traced as well.
- pid_t const tid = gettid();
- TEST_PCHECK(write(fd, &tid, sizeof(tid)) == sizeof(tid));
- MaybeSave();
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- });
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
-}
-
-TEST(PtraceTest, PrctlSetPtracerInvalidPID) {
- // EINVAL should also be returned if PR_SET_PTRACER is not supported.
- EXPECT_THAT(prctl(PR_SET_PTRACER, 123456789), SyscallFailsWithErrno(EINVAL));
-}
-
-SimpleSubprocess CreatePtraceAttacherSubprocess() {
- return SimpleSubprocess("--ptrace_test_ptrace_attacher");
-}
-
-[[noreturn]] static void RunPtraceAttacher(int sockfd) {
- // execve() may have restored CAP_SYS_PTRACE if we had real UID 0.
- TEST_CHECK(SetCapability(CAP_SYS_PTRACE, false).ok());
- // Perform PTRACE_ATTACH in a separate thread to verify that permissions
- // apply process-wide.
- ScopedThread t([&] {
- while (true) {
- pid_t pid;
- int rv = read(sockfd, &pid, sizeof(pid));
- if (rv == 0) {
- _exit(0);
- }
- if (rv < 0) {
- _exit(1);
- }
- int resp_errno = 0;
- if (CheckPtraceAttach(pid) < 0) {
- resp_errno = errno;
- }
- TEST_PCHECK(write(sockfd, &resp_errno, sizeof(resp_errno)) ==
- sizeof(resp_errno));
- }
- });
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
-}
-
-SimpleSubprocess CreatePrctlSetPtracerSubprocess() {
- return SimpleSubprocess("--ptrace_test_prctl_set_ptracer");
-}
-
-[[noreturn]] static void RunPrctlSetPtracer(int sockfd) {
- // Perform prctl in a separate thread to verify that it applies
- // process-wide.
- ScopedThread t([&] {
- while (true) {
- pid_t pid;
- int rv = read(sockfd, &pid, sizeof(pid));
- if (rv == 0) {
- _exit(0);
- }
- if (rv < 0) {
- _exit(1);
- }
- int resp_errno = 0;
- if (prctl(PR_SET_PTRACER, pid) < 0) {
- resp_errno = errno;
- }
- TEST_PCHECK(write(sockfd, &resp_errno, sizeof(resp_errno)) ==
- sizeof(resp_errno));
- }
- });
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
-}
-
-TEST(PtraceTest, PrctlSetPtracer) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
-
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Ensure that initially, no tracer exception is set.
- ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());
-
- SimpleSubprocess tracee = CreatePrctlSetPtracerSubprocess();
- SimpleSubprocess tracer = CreatePtraceAttacherSubprocess();
-
- // By default, Yama should prevent tracer from tracing its parent (this
- // process) or siblings (tracee).
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));
-
- // If tracee invokes PR_SET_PTRACER on either tracer's pid, the pid of any of
- // its ancestors (i.e. us), or PR_SET_PTRACER_ANY, then tracer can trace it
- // (but not us).
-
- ASSERT_THAT(tracee.Cmd(tracer.pid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
-
- ASSERT_THAT(tracee.Cmd(gettid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
-
- ASSERT_THAT(tracee.Cmd(static_cast<pid_t>(PR_SET_PTRACER_ANY)),
- PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
-
- // If tracee invokes PR_SET_PTRACER with pid 0, then tracer can no longer
- // trace it.
- ASSERT_THAT(tracee.Cmd(0), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));
-
- // If we invoke PR_SET_PTRACER with tracer's pid, then it can trace us (but
- // not our descendants).
- ASSERT_THAT(prctl(PR_SET_PTRACER, tracer.pid()), SyscallSucceeds());
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(tracee.pid()), PosixErrorIs(EPERM));
-
- // If we invoke PR_SET_PTRACER with pid 0, then tracer can no longer trace us.
- ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
-
- // Another thread in our thread group can invoke PR_SET_PTRACER instead; its
- // effect applies to the whole thread group.
- pid_t const our_tid = gettid();
- ScopedThread([&] {
- ASSERT_THAT(prctl(PR_SET_PTRACER, tracer.pid()), SyscallSucceeds());
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(0));
- EXPECT_THAT(tracer.Cmd(our_tid), PosixErrorIs(0));
-
- ASSERT_THAT(prctl(PR_SET_PTRACER, 0), SyscallSucceeds());
- EXPECT_THAT(tracer.Cmd(gettid()), PosixErrorIs(EPERM));
- EXPECT_THAT(tracer.Cmd(our_tid), PosixErrorIs(EPERM));
- }).Join();
-}
-
-// Tests that YAMA exceptions store tracees by thread group leader. Exceptions
-// are preserved even after the tracee thread exits, as long as the tracee's
-// thread group leader is still around.
-TEST(PtraceTest, PrctlSetPtracerPersistsPastTraceeThreadExit) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe",
- "--ptrace_test_prctl_set_ptracer_and_exit_tracee_thread",
- "--ptrace_test_fd", std::to_string(sockets[0])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- // This test will create a new thread in the child process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- TEST_PCHECK(close(sockets[1]) == 0);
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // Wait until the tracee thread calling prctl has terminated.
- char done;
- TEST_PCHECK(read(sockets[1], &done, 1) == 1);
- MaybeSave();
-
- TEST_PCHECK(CheckPtraceAttach(tracee_pid) == 0);
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunPrctlSetPtracerPersistsPastTraceeThreadExit(int fd) {
- ScopedThread t([] {
- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);
- MaybeSave();
- });
- t.Join();
- // Indicate that thread setting the prctl has exited.
- TEST_PCHECK(write(fd, "x", 1) == 1);
- MaybeSave();
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
-}
-
-// Tests that YAMA exceptions store tracees by thread group leader. Exceptions
-// are preserved across exec as long as the thread group leader does not change,
-// even if the tracee thread is terminated.
-TEST(PtraceTest, PrctlSetPtracerPersistsPastLeaderExec) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe", "--ptrace_test_tracee", "--ptrace_test_fd",
- std::to_string(sockets[0])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- TEST_PCHECK(close(sockets[1]) == 0);
- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);
- MaybeSave();
-
- // This test will create a new thread in the child process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // Wait until the tracee has exec'd.
- char done;
- TEST_PCHECK(read(sockets[1], &done, 1) == 1);
- MaybeSave();
-
- TEST_PCHECK(CheckPtraceAttach(tracee_pid) == 0);
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunTracee(int fd) {
- // Indicate that we have exec'd.
- TEST_PCHECK(write(fd, "x", 1) == 1);
- MaybeSave();
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
-}
-
-// Tests that YAMA exceptions store tracees by thread group leader. Exceptions
-// are cleared if the tracee process's thread group leader is terminated by
-// exec.
-TEST(PtraceTest, PrctlSetPtracerDoesNotPersistPastNonLeaderExec) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe", "--ptrace_test_prctl_set_ptracer_and_exec_non_leader",
- "--ptrace_test_fd", std::to_string(sockets[0])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- // This test will create a new thread in the child process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- TEST_PCHECK(close(sockets[1]) == 0);
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // Wait until the tracee has exec'd.
- char done;
- TEST_PCHECK(read(sockets[1], &done, 1) == 1);
- MaybeSave();
-
- TEST_CHECK(CheckPtraceAttach(tracee_pid) == -1);
- TEST_PCHECK(errno == EPERM);
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunPrctlSetPtracerDoesNotPersistPastNonLeaderExec(int fd) {
- ScopedThread t([fd] {
- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);
- MaybeSave();
-
- ExecveArray const owned_child_argv = {
- "/proc/self/exe", "--ptrace_test_tracee", "--ptrace_test_fd",
- std::to_string(fd)};
- char* const* const child_argv = owned_child_argv.get();
-
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- });
- t.Join();
- TEST_CHECK_MSG(false, "Survived execve? (main)");
- _exit(1);
-}
-
-// Tests that YAMA exceptions store the tracer itself rather than the thread
-// group leader. Exceptions are cleared when the tracer task exits, rather than
-// when its thread group leader exits.
-TEST(PtraceTest, PrctlSetPtracerDoesNotPersistPastTracerThreadExit) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- TEST_PCHECK(close(sockets[1]) == 0);
- pid_t tracer_tid;
- TEST_PCHECK(read(sockets[0], &tracer_tid, sizeof(tracer_tid)) ==
- sizeof(tracer_tid));
- MaybeSave();
-
- TEST_PCHECK(prctl(PR_SET_PTRACER, tracer_tid) == 0);
- MaybeSave();
- // Indicate that the prctl has been set.
- TEST_PCHECK(write(sockets[0], "x", 1) == 1);
- MaybeSave();
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe",
- "--ptrace_test_prctl_set_ptracer_and_exit_tracer_thread",
- "--ptrace_test_prctl_set_ptracer_and_exit_tracer_thread_tid",
- std::to_string(tracee_pid),
- "--ptrace_test_fd",
- std::to_string(sockets[1])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // This test will create a new thread in the child process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunPrctlSetPtracerDoesNotPersistPastTracerThreadExit(
- int tracee_tid, int fd) {
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- ScopedThread t([fd] {
- pid_t const tracer_tid = gettid();
- TEST_PCHECK(write(fd, &tracer_tid, sizeof(tracer_tid)) ==
- sizeof(tracer_tid));
-
- // Wait until the prctl has been set.
- char done;
- TEST_PCHECK(read(fd, &done, 1) == 1);
- MaybeSave();
- });
- t.Join();
-
- // Sleep for a bit before verifying the invalidation. The thread exit above
- // should cause the ptrace exception to be invalidated, but in Linux, this is
- // not done immediately. The YAMA exception is dropped during
- // __put_task_struct(), which occurs (at the earliest) one RCU grace period
- // after exit_notify() ==> release_task().
- SleepSafe(absl::Milliseconds(100));
-
- TEST_CHECK(CheckPtraceAttach(tracee_tid) == -1);
- TEST_PCHECK(errno == EPERM);
- _exit(0);
-}
-
-// Tests that YAMA exceptions store the tracer thread itself rather than the
-// thread group leader. Exceptions are preserved across exec in the tracer
-// thread, even if the thread group leader is terminated.
-TEST(PtraceTest, PrctlSetPtracerRespectsTracerThreadID) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- TEST_PCHECK(close(sockets[1]) == 0);
- pid_t tracer_tid;
- TEST_PCHECK(read(sockets[0], &tracer_tid, sizeof(tracer_tid)) ==
- sizeof(tracer_tid));
- MaybeSave();
-
- TEST_PCHECK(prctl(PR_SET_PTRACER, tracer_tid) == 0);
- MaybeSave();
- // Indicate that the prctl has been set.
- TEST_PCHECK(write(sockets[0], "x", 1) == 1);
- MaybeSave();
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- // Allocate vector before forking (not async-signal-safe).
- ExecveArray const owned_child_argv = {
- "/proc/self/exe",
- "--ptrace_test_prctl_set_ptracer_respects_tracer_thread_id",
- "--ptrace_test_prctl_set_ptracer_respects_tracer_thread_id_tid",
- std::to_string(tracee_pid),
- "--ptrace_test_fd",
- std::to_string(sockets[1])};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // This test will create a new thread in the child process.
- // pthread_create(2) isn't async-signal-safe, so we execve() first.
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunPrctlSetPtracerRespectsTracerThreadID(int tracee_tid,
- int fd) {
- // Create a separate thread for tracing (i.e., not the thread group
- // leader). After the subsequent execve(), the current thread group leader
- // will no longer be exist, but the YAMA exception installed with this
- // thread should still be valid.
- ScopedThread t([tracee_tid, fd] {
- pid_t const tracer_tid = gettid();
- TEST_PCHECK(write(fd, &tracer_tid, sizeof(tracer_tid)));
- MaybeSave();
-
- // Wait until the tracee has made the PR_SET_PTRACER prctl.
- char done;
- TEST_PCHECK(read(fd, &done, 1) == 1);
- MaybeSave();
-
- ExecveArray const owned_child_argv = {
- "/proc/self/exe", "--ptrace_test_trace_tid", std::to_string(tracee_tid),
- "--ptrace_test_fd", std::to_string(fd)};
- char* const* const child_argv = owned_child_argv.get();
-
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- });
- t.Join();
- TEST_CHECK_MSG(false, "Survived execve? (main)");
- _exit(1);
-}
-
-[[noreturn]] void RunTraceTID(int tracee_tid, int fd) {
- TEST_PCHECK(SetCapability(CAP_SYS_PTRACE, false).ok());
- TEST_PCHECK(CheckPtraceAttach(tracee_tid) == 0);
- _exit(0);
-}
-
-// Tests that removing a YAMA exception does not affect a tracer that is already
-// attached.
-TEST(PtraceTest, PrctlClearPtracerDoesNotAffectCurrentTracer) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Use sockets to synchronize between tracer and tracee.
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- TEST_PCHECK(close(sockets[1]) == 0);
- TEST_PCHECK(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) == 0);
- MaybeSave();
- // Indicate that the prctl has been set.
- TEST_PCHECK(write(sockets[0], "x", 1) == 1);
- MaybeSave();
-
- // Wait until tracer has attached before clearing PR_SET_PTRACER.
- char done;
- TEST_PCHECK(read(sockets[0], &done, 1) == 1);
- MaybeSave();
-
- TEST_PCHECK(prctl(PR_SET_PTRACER, 0) == 0);
- MaybeSave();
- // Indicate that the prctl has been set.
- TEST_PCHECK(write(sockets[0], "x", 1) == 1);
- MaybeSave();
-
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
- ASSERT_THAT(close(sockets[0]), SyscallSucceeds());
-
- std::string mem_path = "/proc/" + std::to_string(tracee_pid) + "/mem";
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- // Wait until tracee has called prctl, or else we won't be able to attach.
- char done;
- TEST_PCHECK(read(sockets[1], &done, 1) == 1);
- MaybeSave();
-
- TEST_PCHECK(ptrace(PTRACE_ATTACH, tracee_pid, 0, 0) == 0);
- MaybeSave();
- // Indicate that we have attached.
- TEST_PCHECK(write(sockets[1], &done, 1) == 1);
- MaybeSave();
-
- // Block until tracee enters signal-delivery-stop as a result of the
- // SIGSTOP sent by PTRACE_ATTACH.
- int status;
- TEST_PCHECK(waitpid(tracee_pid, &status, 0) == tracee_pid);
- MaybeSave();
- TEST_CHECK(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
- MaybeSave();
-
- TEST_PCHECK(ptrace(PTRACE_CONT, tracee_pid, 0, 0) == 0);
- MaybeSave();
-
- // Wait until tracee has cleared PR_SET_PTRACER. Even though it was cleared,
- // we should still be able to access /proc/[pid]/mem because we are already
- // attached.
- TEST_PCHECK(read(sockets[1], &done, 1) == 1);
- MaybeSave();
- TEST_PCHECK(open(mem_path.c_str(), O_RDONLY) != -1);
- MaybeSave();
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, PrctlNotInherited) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope()) != 1);
- AutoCapability cap(CAP_SYS_PTRACE, false);
-
- // Allow any ptracer. This should not affect the child processes.
- ASSERT_THAT(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), SyscallSucceeds());
-
- pid_t const tracee_pid = fork();
- if (tracee_pid == 0) {
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
- ASSERT_THAT(tracee_pid, SyscallSucceeds());
-
- pid_t const tracer_pid = fork();
- if (tracer_pid == 0) {
- TEST_CHECK(CheckPtraceAttach(tracee_pid) == -1);
- TEST_PCHECK(errno == EPERM);
- _exit(0);
- }
- ASSERT_THAT(tracer_pid, SyscallSucceeds());
-
- // Clean up tracer.
- int status;
- ASSERT_THAT(waitpid(tracer_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Clean up tracee.
- ASSERT_THAT(kill(tracee_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(tracee_pid, &status, 0),
- SyscallSucceedsWithValue(tracee_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, AttachParent_PeekData_PokeData_SignalSuppression) {
- // Yama prevents attaching to a parent. Skip the test if the scope is anything
- // except disabled.
- const int yama_scope = ASSERT_NO_ERRNO_AND_VALUE(YamaPtraceScope());
- SKIP_IF(yama_scope > 1);
- if (yama_scope == 1) {
- // Allow child to trace us.
- ASSERT_THAT(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), SyscallSucceeds());
- }
-
- // Test PTRACE_POKE/PEEKDATA on both anonymous and file mappings.
- const auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_NO_ERRNO(Truncate(file.path(), kPageSize));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- const auto file_mapping = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
-
- constexpr long kBeforePokeDataAnonValue = 10;
- constexpr long kAfterPokeDataAnonValue = 20;
- constexpr long kBeforePokeDataFileValue = 0; // implicit, due to truncate()
- constexpr long kAfterPokeDataFileValue = 30;
-
- volatile long anon_word = kBeforePokeDataAnonValue;
- auto* file_word_ptr = static_cast<volatile long*>(file_mapping.ptr());
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Attach to the parent.
- pid_t const parent_pid = getppid();
- TEST_PCHECK(ptrace(PTRACE_ATTACH, parent_pid, 0, 0) == 0);
- MaybeSave();
-
- // Block until the parent enters signal-delivery-stop as a result of the
- // SIGSTOP sent by PTRACE_ATTACH.
- int status;
- TEST_PCHECK(waitpid(parent_pid, &status, 0) == parent_pid);
- MaybeSave();
- TEST_CHECK(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
-
- // Replace the value of anon_word in the parent process with
- // kAfterPokeDataAnonValue.
- long parent_word = ptrace(PTRACE_PEEKDATA, parent_pid, &anon_word, 0);
- MaybeSave();
- TEST_CHECK(parent_word == kBeforePokeDataAnonValue);
- TEST_PCHECK(ptrace(PTRACE_POKEDATA, parent_pid, &anon_word,
- kAfterPokeDataAnonValue) == 0);
- MaybeSave();
-
- // Replace the value pointed to by file_word_ptr in the mapped file with
- // kAfterPokeDataFileValue, via the parent process' mapping.
- parent_word = ptrace(PTRACE_PEEKDATA, parent_pid, file_word_ptr, 0);
- MaybeSave();
- TEST_CHECK(parent_word == kBeforePokeDataFileValue);
- TEST_PCHECK(ptrace(PTRACE_POKEDATA, parent_pid, file_word_ptr,
- kAfterPokeDataFileValue) == 0);
- MaybeSave();
-
- // Detach from the parent and suppress the SIGSTOP. If the SIGSTOP is not
- // suppressed, the parent will hang in group-stop, causing the test to time
- // out.
- TEST_PCHECK(ptrace(PTRACE_DETACH, parent_pid, 0, 0) == 0);
- MaybeSave();
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to complete.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Check that the child's PTRACE_POKEDATA was effective.
- EXPECT_EQ(kAfterPokeDataAnonValue, anon_word);
- EXPECT_EQ(kAfterPokeDataFileValue, *file_word_ptr);
-}
-
-TEST(PtraceTest, GetSigMask) {
- // glibc and the Linux kernel define a sigset_t with different sizes. To avoid
- // creating a kernel_sigset_t and recreating all the modification functions
- // (sigemptyset, etc), we just hardcode the kernel sigset size.
- constexpr int kSizeofKernelSigset = 8;
- constexpr int kBlockSignal = SIGUSR1;
- sigset_t blocked;
- sigemptyset(&blocked);
- sigaddset(&blocked, kBlockSignal);
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Install a signal handler for kBlockSignal to avoid termination and block
- // it.
- TEST_PCHECK(signal(
- kBlockSignal, +[](int signo) {}) != SIG_ERR);
- MaybeSave();
- TEST_PCHECK(sigprocmask(SIG_SETMASK, &blocked, nullptr) == 0);
- MaybeSave();
-
- // Enable tracing.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
-
- // This should be blocked.
- RaiseSignal(kBlockSignal);
-
- // This should be suppressed by parent, who will change signal mask in the
- // meantime, which means kBlockSignal should be delivered once this resumes.
- RaiseSignal(SIGSTOP);
-
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Get current signal mask.
- sigset_t set;
- EXPECT_THAT(ptrace(kPtraceGetSigMask, child_pid, kSizeofKernelSigset, &set),
- SyscallSucceeds());
- EXPECT_THAT(blocked, EqualsSigset(set));
-
- // Try to get current signal mask with bad size argument.
- EXPECT_THAT(ptrace(kPtraceGetSigMask, child_pid, 0, nullptr),
- SyscallFailsWithErrno(EINVAL));
-
- // Try to set bad signal mask.
- sigset_t* bad_addr = reinterpret_cast<sigset_t*>(-1);
- EXPECT_THAT(
- ptrace(kPtraceSetSigMask, child_pid, kSizeofKernelSigset, bad_addr),
- SyscallFailsWithErrno(EFAULT));
-
- // Set signal mask to empty set.
- sigset_t set1;
- sigemptyset(&set1);
- EXPECT_THAT(ptrace(kPtraceSetSigMask, child_pid, kSizeofKernelSigset, &set1),
- SyscallSucceeds());
-
- // Suppress SIGSTOP and resume the child. It should re-enter
- // signal-delivery-stop for kBlockSignal.
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == kBlockSignal)
- << " status " << status;
-
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- // Let's see that process exited normally.
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(PtraceTest, GetSiginfo_SetSiginfo_SignalInjection) {
- constexpr int kOriginalSigno = SIGUSR1;
- constexpr int kInjectedSigno = SIGUSR2;
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Override all signal handlers.
- struct sigaction sa = {};
- sa.sa_handler = +[](int signo) { _exit(signo); };
- TEST_PCHECK(sigfillset(&sa.sa_mask) == 0);
- for (int signo = 1; signo < 32; signo++) {
- if (signo == SIGKILL || signo == SIGSTOP) {
- continue;
- }
- TEST_PCHECK(sigaction(signo, &sa, nullptr) == 0);
- }
- for (int signo = SIGRTMIN; signo <= SIGRTMAX; signo++) {
- TEST_PCHECK(sigaction(signo, &sa, nullptr) == 0);
- }
-
- // Unblock all signals.
- TEST_PCHECK(sigprocmask(SIG_UNBLOCK, &sa.sa_mask, nullptr) == 0);
- MaybeSave();
-
- // Send ourselves kOriginalSignal while ptraced and exit with the signal we
- // actually receive via the signal handler, if any, or 0 if we don't receive
- // a signal.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- RaiseSignal(kOriginalSigno);
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself kOriginalSigno and enter
- // signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == kOriginalSigno)
- << " status " << status;
-
- siginfo_t siginfo = {};
- ASSERT_THAT(ptrace(PTRACE_GETSIGINFO, child_pid, 0, &siginfo),
- SyscallSucceeds());
- EXPECT_EQ(kOriginalSigno, siginfo.si_signo);
- EXPECT_EQ(SI_TKILL, siginfo.si_code);
-
- // Replace the signal with kInjectedSigno, and check that the child exits
- // with kInjectedSigno, indicating that signal injection was successful.
- siginfo.si_signo = kInjectedSigno;
- ASSERT_THAT(ptrace(PTRACE_SETSIGINFO, child_pid, 0, &siginfo),
- SyscallSucceeds());
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, kInjectedSigno),
- SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == kInjectedSigno)
- << " status " << status;
-}
-
-TEST(PtraceTest, SIGKILLDoesNotCauseSignalDeliveryStop) {
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- RaiseSignal(SIGKILL);
- TEST_CHECK_MSG(false, "Survived SIGKILL?");
- _exit(1);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Expect the child to die to SIGKILL without entering signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, PtraceKill) {
- constexpr int kOriginalSigno = SIGUSR1;
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
-
- // PTRACE_KILL only works if tracee has entered signal-delivery-stop.
- RaiseSignal(kOriginalSigno);
- TEST_CHECK_MSG(false, "Failed to kill the process?");
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself kOriginalSigno and enter
- // signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == kOriginalSigno)
- << " status " << status;
-
- ASSERT_THAT(ptrace(PTRACE_KILL, child_pid, 0, 0), SyscallSucceeds());
-
- // Expect the child to die with SIGKILL.
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, GetRegSet) {
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Enable tracing.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
-
- // Use kill explicitly because we check the syscall argument register below.
- kill(getpid(), SIGSTOP);
-
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Get the general registers.
- struct user_regs_struct regs;
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child_pid, NT_PRSTATUS, &iov),
- SyscallSucceeds());
-
- // Read exactly the full register set.
- EXPECT_EQ(iov.iov_len, sizeof(regs));
-
-#if defined(__x86_64__)
- // Child called kill(2), with SIGSTOP as arg 2.
- EXPECT_EQ(regs.rsi, SIGSTOP);
-#elif defined(__aarch64__)
- EXPECT_EQ(regs.regs[1], SIGSTOP);
-#endif
-
- // Suppress SIGSTOP and resume the child.
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- // Let's see that process exited normally.
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(PtraceTest, AttachingConvertsGroupStopToPtraceStop) {
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- while (true) {
- pause();
- }
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // SIGSTOP the child and wait for it to stop.
- ASSERT_THAT(kill(child_pid, SIGSTOP), SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, WUNTRACED),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Attach to the child and expect it to re-enter a traced group-stop despite
- // already being stopped.
- ASSERT_THAT(ptrace(PTRACE_ATTACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Verify that the child is ptrace-stopped by checking that it can receive
- // ptrace commands requiring a ptrace-stop.
- EXPECT_THAT(ptrace(PTRACE_SETOPTIONS, child_pid, 0, 0), SyscallSucceeds());
-
- // Group-stop is distinguished from signal-delivery-stop by PTRACE_GETSIGINFO
- // failing with EINVAL.
- siginfo_t siginfo = {};
- EXPECT_THAT(ptrace(PTRACE_GETSIGINFO, child_pid, 0, &siginfo),
- SyscallFailsWithErrno(EINVAL));
-
- // Detach from the child and expect it to stay stopped without a notification.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, WUNTRACED | WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Sending it SIGCONT should cause it to leave its stop.
- ASSERT_THAT(kill(child_pid, SIGCONT), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, WCONTINUED),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFCONTINUED(status)) << " status " << status;
-
- // Clean up the child.
- ASSERT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-// Fixture for tests parameterized by whether or not to use PTRACE_O_TRACEEXEC.
-class PtraceExecveTest : public ::testing::TestWithParam<bool> {
- protected:
- bool TraceExec() const { return GetParam(); }
-};
-
-TEST_P(PtraceExecveTest, Execve_GetRegs_PeekUser_SIGKILL_TraceClone_TraceExit) {
- ExecveArray const owned_child_argv = {"/proc/self/exe",
- "--ptrace_test_execve_child"};
- char* const* const child_argv = owned_child_argv.get();
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process. The test relies on calling execve() in a non-leader
- // thread; pthread_create() isn't async-signal-safe, so the safest way to
- // do this is to execve() first, then enable tracing and run the expected
- // child process behavior in the new subprocess.
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve to test child");
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Enable PTRACE_O_TRACECLONE so we can get the ID of the child's non-leader
- // thread, PTRACE_O_TRACEEXIT so we can observe the leader's death, and
- // PTRACE_O_TRACEEXEC if required by the test. (The leader doesn't call
- // execve, but options should be inherited across clone.)
- long opts = PTRACE_O_TRACECLONE | PTRACE_O_TRACEEXIT;
- if (TraceExec()) {
- opts |= PTRACE_O_TRACEEXEC;
- }
- ASSERT_THAT(ptrace(PTRACE_SETOPTIONS, child_pid, 0, opts), SyscallSucceeds());
-
- // Suppress the SIGSTOP and wait for the child's leader thread to report
- // PTRACE_EVENT_CLONE. Get the new thread's ID from the event.
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_CLONE << 8), status >> 8);
- unsigned long eventmsg;
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, child_pid, 0, &eventmsg),
- SyscallSucceeds());
- pid_t const nonleader_tid = eventmsg;
- pid_t const leader_tid = child_pid;
-
- // The new thread should be ptraced and in signal-delivery-stop by SIGSTOP due
- // to PTRACE_O_TRACECLONE.
- //
- // Before bf959931ddb88c4e4366e96dd22e68fa0db9527c "wait/ptrace: assume __WALL
- // if the child is traced" (4.7) , waiting on it requires __WCLONE since, as a
- // non-leader, its termination signal is 0. After, a standard wait is
- // sufficient.
- ASSERT_THAT(waitpid(nonleader_tid, &status, __WCLONE),
- SyscallSucceedsWithValue(nonleader_tid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Resume both child threads.
- for (pid_t const tid : {leader_tid, nonleader_tid}) {
- ASSERT_THAT(ptrace(PTRACE_CONT, tid, 0, 0), SyscallSucceeds());
- }
-
- // The non-leader child thread should call execve, causing the leader thread
- // to enter PTRACE_EVENT_EXIT with an apparent exit code of 0. At this point,
- // the leader has not yet exited, so the non-leader should be blocked in
- // execve.
- ASSERT_THAT(waitpid(leader_tid, &status, 0),
- SyscallSucceedsWithValue(leader_tid));
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_EXIT << 8), status >> 8);
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, leader_tid, 0, &eventmsg),
- SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(eventmsg) && WEXITSTATUS(eventmsg) == 0)
- << " eventmsg " << eventmsg;
- EXPECT_THAT(waitpid(nonleader_tid, &status, __WCLONE | WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Allow the leader to continue exiting. This should allow the non-leader to
- // complete its execve, causing the original leader to be reaped without
- // further notice and the non-leader to steal its ID.
- ASSERT_THAT(ptrace(PTRACE_CONT, leader_tid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(leader_tid, &status, 0),
- SyscallSucceedsWithValue(leader_tid));
- if (TraceExec()) {
- // If PTRACE_O_TRACEEXEC was enabled, the execing thread should be in
- // PTRACE_EVENT_EXEC-stop, with the event message set to its old thread ID.
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_EXEC << 8), status >> 8);
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, leader_tid, 0, &eventmsg),
- SyscallSucceeds());
- EXPECT_EQ(nonleader_tid, eventmsg);
- } else {
- // Otherwise, the execing thread should have received SIGTRAP and should now
- // be in signal-delivery-stop.
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << " status " << status;
- }
-
-#ifdef __x86_64__
- {
- // CS should be 0x33, indicating an 64-bit binary.
- constexpr uint64_t kAMD64UserCS = 0x33;
- EXPECT_THAT(ptrace(PTRACE_PEEKUSER, leader_tid,
- offsetof(struct user_regs_struct, cs), 0),
- SyscallSucceedsWithValue(kAMD64UserCS));
- struct user_regs_struct regs = {};
- ASSERT_THAT(ptrace(PTRACE_GETREGS, leader_tid, 0, &regs),
- SyscallSucceeds());
- EXPECT_EQ(kAMD64UserCS, regs.cs);
- }
-#endif // defined(__x86_64__)
-
- // PTRACE_O_TRACEEXIT should have been inherited across execve. Send SIGKILL,
- // which should end the PTRACE_EVENT_EXEC-stop or signal-delivery-stop and
- // leave the child in PTRACE_EVENT_EXIT-stop.
- ASSERT_THAT(kill(leader_tid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(leader_tid, &status, 0),
- SyscallSucceedsWithValue(leader_tid));
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_EXIT << 8), status >> 8);
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, leader_tid, 0, &eventmsg),
- SyscallSucceeds());
- EXPECT_TRUE(WIFSIGNALED(eventmsg) && WTERMSIG(eventmsg) == SIGKILL)
- << " eventmsg " << eventmsg;
-
- // End the PTRACE_EVENT_EXIT stop, allowing the child to exit.
- ASSERT_THAT(ptrace(PTRACE_CONT, leader_tid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(leader_tid, &status, 0),
- SyscallSucceedsWithValue(leader_tid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-[[noreturn]] void RunExecveChild() {
- // Enable tracing, then raise SIGSTOP and expect our parent to suppress it.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- RaiseSignal(SIGSTOP);
- MaybeSave();
-
- // Call execve() in a non-leader thread. As long as execve() succeeds, what
- // exactly we execve() shouldn't really matter, since the tracer should kill
- // us after execve() completes.
- ScopedThread t([&] {
- ExecveArray const owned_child_argv = {"/proc/self/exe",
- "--this_flag_shouldnt_exist"};
- char* const* const child_argv = owned_child_argv.get();
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "Survived execve? (thread)");
- });
- t.Join();
- TEST_CHECK_MSG(false, "Survived execve? (main)");
- _exit(1);
-}
-
-INSTANTIATE_TEST_SUITE_P(TraceExec, PtraceExecveTest, ::testing::Bool());
-
-// This test has expectations on when syscall-enter/exit-stops occur that are
-// violated if saving occurs, since saving interrupts all syscalls, causing
-// premature syscall-exit.
-TEST(PtraceTest, ExitWhenParentIsNotTracer_Syscall_TraceVfork_TraceVforkDone) {
- constexpr int kExitTraceeExitCode = 99;
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Block SIGCHLD so it doesn't interrupt wait4.
- sigset_t mask;
- TEST_PCHECK(sigemptyset(&mask) == 0);
- TEST_PCHECK(sigaddset(&mask, SIGCHLD) == 0);
- TEST_PCHECK(sigprocmask(SIG_SETMASK, &mask, nullptr) == 0);
- MaybeSave();
-
- // Enable tracing, then raise SIGSTOP and expect our parent to suppress it.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- RaiseSignal(SIGSTOP);
- MaybeSave();
-
- // Spawn a vfork child that exits immediately, and reap it. Don't save
- // after vfork since the parent expects to see wait4 as the next syscall.
- pid_t const pid = vfork();
- if (pid == 0) {
- _exit(kExitTraceeExitCode);
- }
- TEST_PCHECK_MSG(pid > 0, "vfork failed");
-
- int status;
- TEST_PCHECK(wait4(pid, &status, 0, nullptr) > 0);
- MaybeSave();
- TEST_CHECK(WIFEXITED(status) && WEXITSTATUS(status) == kExitTraceeExitCode);
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(child_pid, SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Enable PTRACE_O_TRACEVFORK so we can get the ID of the grandchild,
- // PTRACE_O_TRACEVFORKDONE so we can observe PTRACE_EVENT_VFORK_DONE, and
- // PTRACE_O_TRACESYSGOOD so syscall-enter/exit-stops are unambiguously
- // indicated by a stop signal of SIGTRAP|0x80 rather than just SIGTRAP.
- ASSERT_THAT(ptrace(PTRACE_SETOPTIONS, child_pid, 0,
- PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE |
- PTRACE_O_TRACESYSGOOD),
- SyscallSucceeds());
-
- // Suppress the SIGSTOP and wait for the child to report PTRACE_EVENT_VFORK.
- // Get the new process' ID from the event.
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_VFORK << 8), status >> 8);
- unsigned long eventmsg;
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, child_pid, 0, &eventmsg),
- SyscallSucceeds());
- pid_t const grandchild_pid = eventmsg;
-
- // The grandchild should be traced by us and in signal-delivery-stop by
- // SIGSTOP due to PTRACE_O_TRACEVFORK. This allows us to wait on it even
- // though we're not its parent.
- ASSERT_THAT(waitpid(grandchild_pid, &status, 0),
- SyscallSucceedsWithValue(grandchild_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Resume the child with PTRACE_SYSCALL. Since the grandchild is still in
- // signal-delivery-stop, the child should remain in vfork() waiting for the
- // grandchild to exec or exit.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1));
- ASSERT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Suppress the grandchild's SIGSTOP and wait for the grandchild to exit. Pass
- // WNOWAIT to waitid() so that we don't acknowledge the grandchild's exit yet.
- ASSERT_THAT(ptrace(PTRACE_CONT, grandchild_pid, 0, 0), SyscallSucceeds());
- siginfo_t siginfo = {};
- ASSERT_THAT(waitid(P_PID, grandchild_pid, &siginfo, WEXITED | WNOWAIT),
- SyscallSucceeds());
- EXPECT_EQ(SIGCHLD, siginfo.si_signo);
- EXPECT_EQ(CLD_EXITED, siginfo.si_code);
- EXPECT_EQ(kExitTraceeExitCode, siginfo.si_status);
- EXPECT_EQ(grandchild_pid, siginfo.si_pid);
- EXPECT_EQ(getuid(), siginfo.si_uid);
-
- // The child should now be in PTRACE_EVENT_VFORK_DONE stop. The event
- // message should still be the grandchild's PID.
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (PTRACE_EVENT_VFORK_DONE << 8), status >> 8);
- ASSERT_THAT(ptrace(PTRACE_GETEVENTMSG, child_pid, 0, &eventmsg),
- SyscallSucceeds());
- EXPECT_EQ(grandchild_pid, eventmsg);
-
- // Resume the child with PTRACE_SYSCALL again and expect it to enter
- // syscall-exit-stop for vfork() or clone(), either of which should return the
- // grandchild's PID from the syscall. Aside from PTRACE_O_TRACESYSGOOD,
- // syscall-stops are distinguished from signal-delivery-stop by
- // PTRACE_GETSIGINFO returning a siginfo for which si_code == SIGTRAP or
- // SIGTRAP|0x80.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80))
- << " status " << status;
- ASSERT_THAT(ptrace(PTRACE_GETSIGINFO, child_pid, 0, &siginfo),
- SyscallSucceeds());
- EXPECT_TRUE(siginfo.si_code == SIGTRAP || siginfo.si_code == (SIGTRAP | 0x80))
- << "si_code = " << siginfo.si_code;
-
- {
- struct user_regs_struct regs = {};
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child_pid, NT_PRSTATUS, &iov),
- SyscallSucceeds());
-#if defined(__x86_64__)
- EXPECT_TRUE(regs.orig_rax == SYS_vfork || regs.orig_rax == SYS_clone)
- << "orig_rax = " << regs.orig_rax;
- EXPECT_EQ(grandchild_pid, regs.rax);
-#elif defined(__aarch64__)
- EXPECT_TRUE(regs.regs[8] == SYS_clone) << "regs[8] = " << regs.regs[8];
- EXPECT_EQ(grandchild_pid, regs.regs[0]);
-#endif // defined(__x86_64__)
- }
-
- // After this point, the child will be making wait4 syscalls that will be
- // interrupted by saving, so saving is not permitted. Note that this is
- // explicitly released below once the grandchild exits.
- DisableSave ds;
-
- // Resume the child with PTRACE_SYSCALL again and expect it to enter
- // syscall-enter-stop for wait4().
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80))
- << " status " << status;
- ASSERT_THAT(ptrace(PTRACE_GETSIGINFO, child_pid, 0, &siginfo),
- SyscallSucceeds());
- EXPECT_TRUE(siginfo.si_code == SIGTRAP || siginfo.si_code == (SIGTRAP | 0x80))
- << "si_code = " << siginfo.si_code;
-#ifdef __x86_64__
- {
- EXPECT_THAT(ptrace(PTRACE_PEEKUSER, child_pid,
- offsetof(struct user_regs_struct, orig_rax), 0),
- SyscallSucceedsWithValue(SYS_wait4));
- }
-#endif // defined(__x86_64__)
-
- // Resume the child with PTRACE_SYSCALL again. Since the grandchild is
- // waiting for the tracer (us) to acknowledge its exit first, wait4 should
- // block.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1));
- ASSERT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Acknowledge the grandchild's exit.
- ASSERT_THAT(waitpid(grandchild_pid, &status, 0),
- SyscallSucceedsWithValue(grandchild_pid));
- ds.reset();
-
- // Now the child should enter syscall-exit-stop for wait4, returning with the
- // grandchild's PID.
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80))
- << " status " << status;
- {
- struct user_regs_struct regs = {};
- struct iovec iov;
- iov.iov_base = &regs;
- iov.iov_len = sizeof(regs);
- EXPECT_THAT(ptrace(PTRACE_GETREGSET, child_pid, NT_PRSTATUS, &iov),
- SyscallSucceeds());
-#if defined(__x86_64__)
- EXPECT_EQ(SYS_wait4, regs.orig_rax);
- EXPECT_EQ(grandchild_pid, regs.rax);
-#elif defined(__aarch64__)
- EXPECT_EQ(SYS_wait4, regs.regs[8]);
- EXPECT_EQ(grandchild_pid, regs.regs[0]);
-#endif // defined(__x86_64__)
- }
-
- // Detach from the child and wait for it to exit.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-// These tests requires knowledge of architecture-specific syscall convention.
-#ifdef __x86_64__
-TEST(PtraceTest, Int3) {
- SKIP_IF(PlatformSupportInt3() == PlatformSupport::NotSupported);
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Enable tracing.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
-
- // Interrupt 3 - trap to debugger
- asm("int3");
-
- _exit(56);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << " status " << status;
-
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
-
- // The child should validate the injected return value and then exit normally.
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 56)
- << " status " << status;
-}
-
-TEST(PtraceTest, Sysemu_PokeUser) {
- constexpr int kSysemuHelperFirstExitCode = 126;
- constexpr uint64_t kSysemuInjectedExitGroupReturn = 42;
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Enable tracing, then raise SIGSTOP and expect our parent to suppress it.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- RaiseSignal(SIGSTOP);
-
- // Try to exit_group, expecting the tracer to skip the syscall and set its
- // own return value.
- int const rv = syscall(SYS_exit_group, kSysemuHelperFirstExitCode);
- TEST_PCHECK_MSG(rv == kSysemuInjectedExitGroupReturn,
- "exit_group returned incorrect value");
-
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Suppress the SIGSTOP and wait for the child to enter syscall-enter-stop
- // for its first exit_group syscall.
- ASSERT_THAT(ptrace(kPtraceSysemu, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << " status " << status;
-
- struct user_regs_struct regs = {};
- ASSERT_THAT(ptrace(PTRACE_GETREGS, child_pid, 0, &regs), SyscallSucceeds());
- EXPECT_EQ(SYS_exit_group, regs.orig_rax);
- EXPECT_EQ(-ENOSYS, regs.rax);
- EXPECT_EQ(kSysemuHelperFirstExitCode, regs.rdi);
-
- // Replace the exit_group return value, then resume the child, which should
- // automatically skip the syscall.
- ASSERT_THAT(
- ptrace(PTRACE_POKEUSER, child_pid, offsetof(struct user_regs_struct, rax),
- kSysemuInjectedExitGroupReturn),
- SyscallSucceeds());
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
-
- // The child should validate the injected return value and then exit normally.
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-// This test also cares about syscall-exit-stop.
-TEST(PtraceTest, ERESTART) {
- constexpr int kSigno = SIGUSR1;
-
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
-
- // Ignore, but unblock, kSigno.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- TEST_PCHECK(sigfillset(&sa.sa_mask) == 0);
- TEST_PCHECK(sigaction(kSigno, &sa, nullptr) == 0);
- MaybeSave();
- TEST_PCHECK(sigprocmask(SIG_UNBLOCK, &sa.sa_mask, nullptr) == 0);
- MaybeSave();
-
- // Enable tracing, then raise SIGSTOP and expect our parent to suppress it.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- RaiseSignal(SIGSTOP);
-
- // Invoke the pause syscall, which normally should not return until we
- // receive a signal that "either terminates the process or causes the
- // invocation of a signal-catching function".
- pause();
-
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // After this point, the child's pause syscall will be interrupted by saving,
- // so saving is not permitted. Note that this is explicitly released below
- // once the child is stopped.
- DisableSave ds;
-
- // Suppress the SIGSTOP and wait for the child to enter syscall-enter-stop for
- // its pause syscall.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << " status " << status;
-
- struct user_regs_struct regs = {};
- ASSERT_THAT(ptrace(PTRACE_GETREGS, child_pid, 0, &regs), SyscallSucceeds());
- EXPECT_EQ(SYS_pause, regs.orig_rax);
- EXPECT_EQ(-ENOSYS, regs.rax);
-
- // Resume the child with PTRACE_SYSCALL and expect it to block in the pause
- // syscall.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1));
- ASSERT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Send the child kSigno, causing it to return ERESTARTNOHAND and enter
- // syscall-exit-stop from the pause syscall.
- constexpr int ERESTARTNOHAND = 514;
- ASSERT_THAT(kill(child_pid, kSigno), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP)
- << " status " << status;
- ds.reset();
-
- ASSERT_THAT(ptrace(PTRACE_GETREGS, child_pid, 0, &regs), SyscallSucceeds());
- EXPECT_EQ(SYS_pause, regs.orig_rax);
- EXPECT_EQ(-ERESTARTNOHAND, regs.rax);
-
- // Replace the return value from pause with 0, causing pause to not be
- // restarted despite kSigno being ignored.
- ASSERT_THAT(ptrace(PTRACE_POKEUSER, child_pid,
- offsetof(struct user_regs_struct, rax), 0),
- SyscallSucceeds());
-
- // Detach from the child and wait for it to exit.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-#endif // defined(__x86_64__)
-
-TEST(PtraceTest, Seize_Interrupt_Listen) {
- volatile long child_should_spin = 1;
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- while (child_should_spin) {
- SleepSafe(absl::Seconds(1));
- }
- _exit(1);
- }
-
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Attach to the child with PTRACE_SEIZE; doing so should not stop the child.
- ASSERT_THAT(ptrace(PTRACE_SEIZE, child_pid, 0, 0), SyscallSucceeds());
- int status;
- EXPECT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Stop the child with PTRACE_INTERRUPT.
- ASSERT_THAT(ptrace(PTRACE_INTERRUPT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (kPtraceEventStop << 8), status >> 8);
-
- // Unset child_should_spin to verify that the child never leaves the spin
- // loop.
- ASSERT_THAT(ptrace(PTRACE_POKEDATA, child_pid, &child_should_spin, 0),
- SyscallSucceeds());
-
- // Send SIGSTOP to the child, then resume it, allowing it to proceed to
- // signal-delivery-stop.
- ASSERT_THAT(kill(child_pid, SIGSTOP), SyscallSucceeds());
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // Release the child from signal-delivery-stop without suppressing the
- // SIGSTOP, causing it to enter group-stop.
- ASSERT_THAT(ptrace(PTRACE_CONT, child_pid, 0, SIGSTOP), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGSTOP | (kPtraceEventStop << 8), status >> 8);
-
- // "The state of the tracee after PTRACE_LISTEN is somewhat of a gray area: it
- // is not in any ptrace-stop (ptrace commands won't work on it, and it will
- // deliver waitpid(2) notifications), but it also may be considered 'stopped'
- // because it is not executing instructions (is not scheduled), and if it was
- // in group-stop before PTRACE_LISTEN, it will not respond to signals until
- // SIGCONT is received." - ptrace(2).
- ASSERT_THAT(ptrace(PTRACE_LISTEN, child_pid, 0, 0), SyscallSucceeds());
- EXPECT_THAT(ptrace(PTRACE_CONT, child_pid, 0, 0),
- SyscallFailsWithErrno(ESRCH));
- EXPECT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(kill(child_pid, SIGTERM), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1));
- EXPECT_THAT(waitpid(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Send SIGCONT to the child, causing it to leave group-stop and re-trap due
- // to PTRACE_LISTEN.
- EXPECT_THAT(kill(child_pid, SIGCONT), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (kPtraceEventStop << 8), status >> 8);
-
- // Detach the child and expect it to exit due to the SIGTERM we sent while
- // it was stopped by PTRACE_LISTEN.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGTERM)
- << " status " << status;
-}
-
-TEST(PtraceTest, Interrupt_Listen_RequireSeize) {
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- raise(SIGSTOP);
- _exit(0);
- }
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Wait for the child to send itself SIGSTOP and enter signal-delivery-stop.
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)
- << " status " << status;
-
- // PTRACE_INTERRUPT and PTRACE_LISTEN should fail since the child wasn't
- // attached with PTRACE_SEIZE, leaving the child in signal-delivery-stop.
- EXPECT_THAT(ptrace(PTRACE_INTERRUPT, child_pid, 0, 0),
- SyscallFailsWithErrno(EIO));
- EXPECT_THAT(ptrace(PTRACE_LISTEN, child_pid, 0, 0),
- SyscallFailsWithErrno(EIO));
-
- // Suppress SIGSTOP and detach from the child, expecting it to exit normally.
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(PtraceTest, SeizeSetOptions) {
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- // In child process.
- while (true) {
- SleepSafe(absl::Seconds(1));
- }
- }
-
- // In parent process.
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- // Attach to the child with PTRACE_SEIZE while setting PTRACE_O_TRACESYSGOOD.
- ASSERT_THAT(ptrace(PTRACE_SEIZE, child_pid, 0, PTRACE_O_TRACESYSGOOD),
- SyscallSucceeds());
-
- // Stop the child with PTRACE_INTERRUPT.
- ASSERT_THAT(ptrace(PTRACE_INTERRUPT, child_pid, 0, 0), SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_EQ(SIGTRAP | (kPtraceEventStop << 8), status >> 8);
-
- // Resume the child with PTRACE_SYSCALL and wait for it to enter
- // syscall-enter-stop. The stop signal status from the syscall stop should be
- // SIGTRAP|0x80, reflecting PTRACE_O_TRACESYSGOOD.
- ASSERT_THAT(ptrace(PTRACE_SYSCALL, child_pid, 0, 0), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80))
- << " status " << status;
-
- // Clean up the child.
- ASSERT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- if (WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80)) {
- // "SIGKILL kills even within system calls (syscall-exit-stop is not
- // generated prior to death by SIGKILL). The net effect is that SIGKILL
- // always kills the process (all its threads), even if some threads of the
- // process are ptraced." - ptrace(2). This is technically true, but...
- //
- // When we send SIGKILL to the child, kernel/signal.c:complete_signal() =>
- // signal_wake_up(resume=1) kicks the tracee out of the syscall-enter-stop.
- // The pending SIGKILL causes the syscall to be skipped, but the child
- // thread still reports syscall-exit before checking for pending signals; in
- // current kernels, this is
- // arch/x86/entry/common.c:syscall_return_slowpath() =>
- // syscall_slow_exit_work() =>
- // include/linux/tracehook.h:tracehook_report_syscall_exit() =>
- // ptrace_report_syscall() => kernel/signal.c:ptrace_notify() =>
- // ptrace_do_notify() => ptrace_stop().
- //
- // ptrace_stop() sets the task's state to TASK_TRACED and the task's
- // exit_code to SIGTRAP|0x80 (passed by ptrace_report_syscall()), then calls
- // freezable_schedule(). freezable_schedule() eventually reaches
- // __schedule(), which detects signal_pending_state() due to the pending
- // SIGKILL, sets the task's state back to TASK_RUNNING, and returns without
- // descheduling. Thus, the task never enters syscall-exit-stop. However, if
- // our wait4() => kernel/exit.c:wait_task_stopped() racily observes the
- // TASK_TRACED state and the non-zero exit code set by ptrace_stop() before
- // __schedule() sets the state back to TASK_RUNNING, it will return the
- // task's exit_code as status W_STOPCODE(SIGTRAP|0x80). So we get a spurious
- // syscall-exit-stop notification, and need to wait4() again for task exit.
- //
- // gVisor is not susceptible to this race because
- // kernel.Task.waitCollectTraceeStopLocked() checks specifically for an
- // active ptraceStop, which is not initiated if SIGKILL is pending.
- std::cout << "Observed syscall-exit after SIGKILL" << std::endl;
- ASSERT_THAT(waitpid(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- }
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)
- << " status " << status;
-}
-
-TEST(PtraceTest, SetYAMAPtraceScope) {
- SKIP_IF(IsRunningWithVFS1());
-
- // Do not modify the ptrace scope on the host.
- SKIP_IF(!IsRunningOnGvisor());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(std::string(kYamaPtraceScopePath), O_RDWR));
-
- ASSERT_THAT(write(fd.get(), "0", 1), SyscallSucceedsWithValue(1));
-
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());
- std::vector<char> buf(10);
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()), SyscallSucceeds());
- EXPECT_STREQ(buf.data(), "0\n");
-
- // Test that a child can attach to its parent when ptrace_scope is 0.
- AutoCapability cap(CAP_SYS_PTRACE, false);
- pid_t const child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(CheckPtraceAttach(getppid()) == 0);
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(waitpid(child_pid, &status, 0), SyscallSucceeds());
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-
- // Set ptrace_scope back to 1 (and try writing with a newline).
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());
- ASSERT_THAT(write(fd.get(), "1\n", 2), SyscallSucceedsWithValue(2));
-
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()), SyscallSucceeds());
- EXPECT_STREQ(buf.data(), "1\n");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_ptrace_test_execve_child)) {
- gvisor::testing::RunExecveChild();
- }
-
- int fd = absl::GetFlag(FLAGS_ptrace_test_fd);
-
- if (absl::GetFlag(FLAGS_ptrace_test_trace_descendants_allowed)) {
- gvisor::testing::RunTraceDescendantsAllowed(fd);
- }
-
- if (absl::GetFlag(FLAGS_ptrace_test_ptrace_attacher)) {
- gvisor::testing::RunPtraceAttacher(fd);
- }
-
- if (absl::GetFlag(FLAGS_ptrace_test_prctl_set_ptracer)) {
- gvisor::testing::RunPrctlSetPtracer(fd);
- }
-
- if (absl::GetFlag(
- FLAGS_ptrace_test_prctl_set_ptracer_and_exit_tracee_thread)) {
- gvisor::testing::RunPrctlSetPtracerPersistsPastTraceeThreadExit(fd);
- }
-
- if (absl::GetFlag(FLAGS_ptrace_test_prctl_set_ptracer_and_exec_non_leader)) {
- gvisor::testing::RunPrctlSetPtracerDoesNotPersistPastNonLeaderExec(
- fd);
- }
-
- if (absl::GetFlag(
- FLAGS_ptrace_test_prctl_set_ptracer_and_exit_tracer_thread)) {
- gvisor::testing::RunPrctlSetPtracerDoesNotPersistPastTracerThreadExit(
- absl::GetFlag(
- FLAGS_ptrace_test_prctl_set_ptracer_and_exit_tracer_thread_tid),
- fd);
- }
-
- if (absl::GetFlag(
- FLAGS_ptrace_test_prctl_set_ptracer_respects_tracer_thread_id)) {
- gvisor::testing::RunPrctlSetPtracerRespectsTracerThreadID(
- absl::GetFlag(
- FLAGS_ptrace_test_prctl_set_ptracer_respects_tracer_thread_id_tid),
- fd);
- }
-
- if (absl::GetFlag(FLAGS_ptrace_test_tracee)) {
- gvisor::testing::RunTracee(fd);
- }
-
- int pid = absl::GetFlag(FLAGS_ptrace_test_trace_tid);
- if (pid != -1) {
- gvisor::testing::RunTraceTID(pid, fd);
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/pty.cc b/test/syscalls/linux/pty.cc
deleted file mode 100644
index 5ff1f12a0..000000000
--- a/test/syscalls/linux/pty.cc
+++ /dev/null
@@ -1,1759 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/capability.h>
-#include <linux/major.h>
-#include <poll.h>
-#include <sched.h>
-#include <signal.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/sysmacros.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <termios.h>
-#include <unistd.h>
-
-#include <iostream>
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "absl/strings/str_cat.h"
-#include "absl/synchronization/notification.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/pty_util.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Contains;
-using ::testing::Eq;
-using ::testing::Not;
-using SubprocessCallback = std::function<void()>;
-
-// Tests Unix98 pseudoterminals.
-//
-// These tests assume that /dev/ptmx exists and is associated with a devpts
-// filesystem mounted at /dev/pts/. While a Linux distribution could
-// theoretically place those anywhere, glibc expects those locations, so they
-// are effectively fixed.
-
-// Minor device number for an unopened ptmx file.
-constexpr int kPtmxMinor = 2;
-
-// The timeout when polling for data from a pty. When data is written to one end
-// of a pty, Linux asynchronously makes it available to the other end, so we
-// have to wait.
-constexpr absl::Duration kTimeout = absl::Seconds(20);
-
-// The maximum line size in bytes returned per read from a pty file.
-constexpr int kMaxLineSize = 4096;
-
-constexpr char kMasterPath[] = "/dev/ptmx";
-
-// glibc defines its own, different, version of struct termios. We care about
-// what the kernel does, not glibc.
-#define KERNEL_NCCS 19
-struct kernel_termios {
- tcflag_t c_iflag;
- tcflag_t c_oflag;
- tcflag_t c_cflag;
- tcflag_t c_lflag;
- cc_t c_line;
- cc_t c_cc[KERNEL_NCCS];
-};
-
-bool operator==(struct kernel_termios const& a,
- struct kernel_termios const& b) {
- return memcmp(&a, &b, sizeof(a)) == 0;
-}
-
-// Returns the termios-style control character for the passed character.
-//
-// e.g., for Ctrl-C, i.e., ^C, call ControlCharacter('C').
-//
-// Standard control characters are ASCII bytes 0 through 31.
-constexpr char ControlCharacter(char c) {
- // A is 1, B is 2, etc.
- return c - 'A' + 1;
-}
-
-// Returns the printable character the given control character represents.
-constexpr char FromControlCharacter(char c) { return c + 'A' - 1; }
-
-// Returns true if c is a control character.
-//
-// Standard control characters are ASCII bytes 0 through 31.
-constexpr bool IsControlCharacter(char c) { return c <= 31; }
-
-struct Field {
- const char* name;
- uint64_t mask;
- uint64_t value;
-};
-
-// ParseFields returns a string representation of value, using the names in
-// fields.
-std::string ParseFields(const Field* fields, size_t len, uint64_t value) {
- bool first = true;
- std::string s;
- for (size_t i = 0; i < len; i++) {
- const Field f = fields[i];
- if ((value & f.mask) == f.value) {
- if (!first) {
- s += "|";
- }
- s += f.name;
- first = false;
- value &= ~f.mask;
- }
- }
-
- if (value) {
- if (!first) {
- s += "|";
- }
- absl::StrAppend(&s, value);
- }
-
- return s;
-}
-
-const Field kIflagFields[] = {
- {"IGNBRK", IGNBRK, IGNBRK}, {"BRKINT", BRKINT, BRKINT},
- {"IGNPAR", IGNPAR, IGNPAR}, {"PARMRK", PARMRK, PARMRK},
- {"INPCK", INPCK, INPCK}, {"ISTRIP", ISTRIP, ISTRIP},
- {"INLCR", INLCR, INLCR}, {"IGNCR", IGNCR, IGNCR},
- {"ICRNL", ICRNL, ICRNL}, {"IUCLC", IUCLC, IUCLC},
- {"IXON", IXON, IXON}, {"IXANY", IXANY, IXANY},
- {"IXOFF", IXOFF, IXOFF}, {"IMAXBEL", IMAXBEL, IMAXBEL},
- {"IUTF8", IUTF8, IUTF8},
-};
-
-const Field kOflagFields[] = {
- {"OPOST", OPOST, OPOST}, {"OLCUC", OLCUC, OLCUC},
- {"ONLCR", ONLCR, ONLCR}, {"OCRNL", OCRNL, OCRNL},
- {"ONOCR", ONOCR, ONOCR}, {"ONLRET", ONLRET, ONLRET},
- {"OFILL", OFILL, OFILL}, {"OFDEL", OFDEL, OFDEL},
- {"NL0", NLDLY, NL0}, {"NL1", NLDLY, NL1},
- {"CR0", CRDLY, CR0}, {"CR1", CRDLY, CR1},
- {"CR2", CRDLY, CR2}, {"CR3", CRDLY, CR3},
- {"TAB0", TABDLY, TAB0}, {"TAB1", TABDLY, TAB1},
- {"TAB2", TABDLY, TAB2}, {"TAB3", TABDLY, TAB3},
- {"BS0", BSDLY, BS0}, {"BS1", BSDLY, BS1},
- {"FF0", FFDLY, FF0}, {"FF1", FFDLY, FF1},
- {"VT0", VTDLY, VT0}, {"VT1", VTDLY, VT1},
- {"XTABS", XTABS, XTABS},
-};
-
-#ifndef IBSHIFT
-// Shift from CBAUD to CIBAUD.
-#define IBSHIFT 16
-#endif
-
-const Field kCflagFields[] = {
- {"B0", CBAUD, B0},
- {"B50", CBAUD, B50},
- {"B75", CBAUD, B75},
- {"B110", CBAUD, B110},
- {"B134", CBAUD, B134},
- {"B150", CBAUD, B150},
- {"B200", CBAUD, B200},
- {"B300", CBAUD, B300},
- {"B600", CBAUD, B600},
- {"B1200", CBAUD, B1200},
- {"B1800", CBAUD, B1800},
- {"B2400", CBAUD, B2400},
- {"B4800", CBAUD, B4800},
- {"B9600", CBAUD, B9600},
- {"B19200", CBAUD, B19200},
- {"B38400", CBAUD, B38400},
- {"CS5", CSIZE, CS5},
- {"CS6", CSIZE, CS6},
- {"CS7", CSIZE, CS7},
- {"CS8", CSIZE, CS8},
- {"CSTOPB", CSTOPB, CSTOPB},
- {"CREAD", CREAD, CREAD},
- {"PARENB", PARENB, PARENB},
- {"PARODD", PARODD, PARODD},
- {"HUPCL", HUPCL, HUPCL},
- {"CLOCAL", CLOCAL, CLOCAL},
- {"B57600", CBAUD, B57600},
- {"B115200", CBAUD, B115200},
- {"B230400", CBAUD, B230400},
- {"B460800", CBAUD, B460800},
- {"B500000", CBAUD, B500000},
- {"B576000", CBAUD, B576000},
- {"B921600", CBAUD, B921600},
- {"B1000000", CBAUD, B1000000},
- {"B1152000", CBAUD, B1152000},
- {"B1500000", CBAUD, B1500000},
- {"B2000000", CBAUD, B2000000},
- {"B2500000", CBAUD, B2500000},
- {"B3000000", CBAUD, B3000000},
- {"B3500000", CBAUD, B3500000},
- {"B4000000", CBAUD, B4000000},
- {"CMSPAR", CMSPAR, CMSPAR},
- {"CRTSCTS", CRTSCTS, CRTSCTS},
- {"IB0", CIBAUD, B0 << IBSHIFT},
- {"IB50", CIBAUD, B50 << IBSHIFT},
- {"IB75", CIBAUD, B75 << IBSHIFT},
- {"IB110", CIBAUD, B110 << IBSHIFT},
- {"IB134", CIBAUD, B134 << IBSHIFT},
- {"IB150", CIBAUD, B150 << IBSHIFT},
- {"IB200", CIBAUD, B200 << IBSHIFT},
- {"IB300", CIBAUD, B300 << IBSHIFT},
- {"IB600", CIBAUD, B600 << IBSHIFT},
- {"IB1200", CIBAUD, B1200 << IBSHIFT},
- {"IB1800", CIBAUD, B1800 << IBSHIFT},
- {"IB2400", CIBAUD, B2400 << IBSHIFT},
- {"IB4800", CIBAUD, B4800 << IBSHIFT},
- {"IB9600", CIBAUD, B9600 << IBSHIFT},
- {"IB19200", CIBAUD, B19200 << IBSHIFT},
- {"IB38400", CIBAUD, B38400 << IBSHIFT},
- {"IB57600", CIBAUD, B57600 << IBSHIFT},
- {"IB115200", CIBAUD, B115200 << IBSHIFT},
- {"IB230400", CIBAUD, B230400 << IBSHIFT},
- {"IB460800", CIBAUD, B460800 << IBSHIFT},
- {"IB500000", CIBAUD, B500000 << IBSHIFT},
- {"IB576000", CIBAUD, B576000 << IBSHIFT},
- {"IB921600", CIBAUD, B921600 << IBSHIFT},
- {"IB1000000", CIBAUD, B1000000 << IBSHIFT},
- {"IB1152000", CIBAUD, B1152000 << IBSHIFT},
- {"IB1500000", CIBAUD, B1500000 << IBSHIFT},
- {"IB2000000", CIBAUD, B2000000 << IBSHIFT},
- {"IB2500000", CIBAUD, B2500000 << IBSHIFT},
- {"IB3000000", CIBAUD, B3000000 << IBSHIFT},
- {"IB3500000", CIBAUD, B3500000 << IBSHIFT},
- {"IB4000000", CIBAUD, B4000000 << IBSHIFT},
-};
-
-const Field kLflagFields[] = {
- {"ISIG", ISIG, ISIG}, {"ICANON", ICANON, ICANON},
- {"XCASE", XCASE, XCASE}, {"ECHO", ECHO, ECHO},
- {"ECHOE", ECHOE, ECHOE}, {"ECHOK", ECHOK, ECHOK},
- {"ECHONL", ECHONL, ECHONL}, {"NOFLSH", NOFLSH, NOFLSH},
- {"TOSTOP", TOSTOP, TOSTOP}, {"ECHOCTL", ECHOCTL, ECHOCTL},
- {"ECHOPRT", ECHOPRT, ECHOPRT}, {"ECHOKE", ECHOKE, ECHOKE},
- {"FLUSHO", FLUSHO, FLUSHO}, {"PENDIN", PENDIN, PENDIN},
- {"IEXTEN", IEXTEN, IEXTEN}, {"EXTPROC", EXTPROC, EXTPROC},
-};
-
-std::string FormatCC(char c) {
- if (isgraph(c)) {
- return std::string(1, c);
- } else if (c == ' ') {
- return " ";
- } else if (c == '\t') {
- return "\\t";
- } else if (c == '\r') {
- return "\\r";
- } else if (c == '\n') {
- return "\\n";
- } else if (c == '\0') {
- return "\\0";
- } else if (IsControlCharacter(c)) {
- return absl::StrCat("^", std::string(1, FromControlCharacter(c)));
- }
- return absl::StrCat("\\x", absl::Hex(c));
-}
-
-std::ostream& operator<<(std::ostream& os, struct kernel_termios const& a) {
- os << "{ c_iflag = "
- << ParseFields(kIflagFields, ABSL_ARRAYSIZE(kIflagFields), a.c_iflag);
- os << ", c_oflag = "
- << ParseFields(kOflagFields, ABSL_ARRAYSIZE(kOflagFields), a.c_oflag);
- os << ", c_cflag = "
- << ParseFields(kCflagFields, ABSL_ARRAYSIZE(kCflagFields), a.c_cflag);
- os << ", c_lflag = "
- << ParseFields(kLflagFields, ABSL_ARRAYSIZE(kLflagFields), a.c_lflag);
- os << ", c_line = " << a.c_line;
- os << ", c_cc = { [VINTR] = '" << FormatCC(a.c_cc[VINTR]);
- os << "', [VQUIT] = '" << FormatCC(a.c_cc[VQUIT]);
- os << "', [VERASE] = '" << FormatCC(a.c_cc[VERASE]);
- os << "', [VKILL] = '" << FormatCC(a.c_cc[VKILL]);
- os << "', [VEOF] = '" << FormatCC(a.c_cc[VEOF]);
- os << "', [VTIME] = '" << static_cast<int>(a.c_cc[VTIME]);
- os << "', [VMIN] = " << static_cast<int>(a.c_cc[VMIN]);
- os << ", [VSWTC] = '" << FormatCC(a.c_cc[VSWTC]);
- os << "', [VSTART] = '" << FormatCC(a.c_cc[VSTART]);
- os << "', [VSTOP] = '" << FormatCC(a.c_cc[VSTOP]);
- os << "', [VSUSP] = '" << FormatCC(a.c_cc[VSUSP]);
- os << "', [VEOL] = '" << FormatCC(a.c_cc[VEOL]);
- os << "', [VREPRINT] = '" << FormatCC(a.c_cc[VREPRINT]);
- os << "', [VDISCARD] = '" << FormatCC(a.c_cc[VDISCARD]);
- os << "', [VWERASE] = '" << FormatCC(a.c_cc[VWERASE]);
- os << "', [VLNEXT] = '" << FormatCC(a.c_cc[VLNEXT]);
- os << "', [VEOL2] = '" << FormatCC(a.c_cc[VEOL2]);
- os << "'}";
- return os;
-}
-
-// Return the default termios settings for a new terminal.
-struct kernel_termios DefaultTermios() {
- struct kernel_termios t = {};
- t.c_iflag = IXON | ICRNL;
- t.c_oflag = OPOST | ONLCR;
- t.c_cflag = B38400 | CSIZE | CS8 | CREAD;
- t.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK | ECHOCTL | ECHOKE | IEXTEN;
- t.c_line = 0;
- t.c_cc[VINTR] = ControlCharacter('C');
- t.c_cc[VQUIT] = ControlCharacter('\\');
- t.c_cc[VERASE] = '\x7f';
- t.c_cc[VKILL] = ControlCharacter('U');
- t.c_cc[VEOF] = ControlCharacter('D');
- t.c_cc[VTIME] = '\0';
- t.c_cc[VMIN] = 1;
- t.c_cc[VSWTC] = '\0';
- t.c_cc[VSTART] = ControlCharacter('Q');
- t.c_cc[VSTOP] = ControlCharacter('S');
- t.c_cc[VSUSP] = ControlCharacter('Z');
- t.c_cc[VEOL] = '\0';
- t.c_cc[VREPRINT] = ControlCharacter('R');
- t.c_cc[VDISCARD] = ControlCharacter('O');
- t.c_cc[VWERASE] = ControlCharacter('W');
- t.c_cc[VLNEXT] = ControlCharacter('V');
- t.c_cc[VEOL2] = '\0';
- return t;
-}
-
-// PollAndReadFd tries to read count bytes from buf within timeout.
-//
-// Returns a partial read if some bytes were read.
-//
-// fd must be non-blocking.
-PosixErrorOr<size_t> PollAndReadFd(int fd, void* buf, size_t count,
- absl::Duration timeout) {
- absl::Time end = absl::Now() + timeout;
-
- size_t completed = 0;
- absl::Duration remaining;
- while ((remaining = end - absl::Now()) > absl::ZeroDuration()) {
- struct pollfd pfd = {fd, POLLIN, 0};
- int ret = RetryEINTR(poll)(&pfd, 1, absl::ToInt64Milliseconds(remaining));
- if (ret < 0) {
- return PosixError(errno, "poll failed");
- } else if (ret == 0) {
- // Timed out.
- continue;
- } else if (ret != 1) {
- return PosixError(EINVAL, absl::StrCat("Bad poll ret ", ret));
- }
-
- ssize_t n =
- ReadFd(fd, static_cast<char*>(buf) + completed, count - completed);
- if (n < 0) {
- if (errno == EAGAIN) {
- // Linux sometimes returns EAGAIN from this read, despite the fact that
- // poll returned success. Let's just do what do as we are told and try
- // again.
- continue;
- }
- return PosixError(errno, "read failed");
- }
- completed += n;
- if (completed >= count) {
- return completed;
- }
- }
-
- if (completed) {
- return completed;
- }
- return PosixError(ETIMEDOUT, "Poll timed out");
-}
-
-TEST(PtyTrunc, Truncate) {
- SKIP_IF(IsRunningWithVFS1());
-
- // setsid either puts us in a new session or fails because we're already the
- // session leader. Either way, this ensures we're the session leader and have
- // no controlling terminal.
- ASSERT_THAT(setsid(), AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EPERM)));
-
- // Make sure we're ignoring SIGHUP, which will be sent to this process once we
- // disconnect the TTY.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- sa.sa_flags = 0;
- sigemptyset(&sa.sa_mask);
- const Cleanup cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGHUP, sa));
-
- // Opening PTYs with O_TRUNC shouldn't cause an error, but calls to
- // (f)truncate should.
- FileDescriptor master =
- ASSERT_NO_ERRNO_AND_VALUE(Open(kMasterPath, O_RDWR | O_TRUNC));
- int n = ASSERT_NO_ERRNO_AND_VALUE(ReplicaID(master));
- std::string spath = absl::StrCat("/dev/pts/", n);
- FileDescriptor replica =
- ASSERT_NO_ERRNO_AND_VALUE(Open(spath, O_RDWR | O_NONBLOCK | O_TRUNC));
- ASSERT_THAT(ioctl(replica.get(), TIOCNOTTY), SyscallSucceeds());
-
- EXPECT_THAT(truncate(kMasterPath, 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(truncate(spath.c_str(), 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(ftruncate(master.get(), 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(ftruncate(replica.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(BasicPtyTest, StatUnopenedMaster) {
- struct stat s;
- ASSERT_THAT(stat(kMasterPath, &s), SyscallSucceeds());
-
- EXPECT_EQ(s.st_rdev, makedev(TTYAUX_MAJOR, kPtmxMinor));
- EXPECT_EQ(s.st_size, 0);
- EXPECT_EQ(s.st_blocks, 0);
-
- // ptmx attached to a specific devpts mount uses block size 1024. See
- // fs/devpts/inode.c:devpts_fill_super.
- //
- // The global ptmx device uses the block size of the filesystem it is created
- // on (which is usually 4096 for disk filesystems).
- EXPECT_THAT(s.st_blksize, AnyOf(Eq(1024), Eq(4096)));
-}
-
-// Waits for count bytes to be readable from fd. Unlike poll, which can return
-// before all data is moved into a pty's read buffer, this function waits for
-// all count bytes to become readable.
-PosixErrorOr<int> WaitUntilReceived(int fd, int count) {
- int buffered = -1;
- absl::Duration remaining;
- absl::Time end = absl::Now() + kTimeout;
- while ((remaining = end - absl::Now()) > absl::ZeroDuration()) {
- if (ioctl(fd, FIONREAD, &buffered) < 0) {
- return PosixError(errno, "failed FIONREAD ioctl");
- }
- if (buffered >= count) {
- return buffered;
- }
- absl::SleepFor(absl::Milliseconds(500));
- }
- return PosixError(
- ETIMEDOUT,
- absl::StrFormat(
- "FIONREAD timed out, receiving only %d of %d expected bytes",
- buffered, count));
-}
-
-// Verifies that there is nothing left to read from fd.
-void ExpectFinished(const FileDescriptor& fd) {
- // Nothing more to read.
- char c;
- EXPECT_THAT(ReadFd(fd.get(), &c, 1), SyscallFailsWithErrno(EAGAIN));
-}
-
-// Verifies that we can read expected bytes from fd into buf.
-void ExpectReadable(const FileDescriptor& fd, int expected, char* buf) {
- size_t n = ASSERT_NO_ERRNO_AND_VALUE(
- PollAndReadFd(fd.get(), buf, expected, kTimeout));
- EXPECT_EQ(expected, n);
-}
-
-TEST(BasicPtyTest, OpenMasterReplica) {
- FileDescriptor master = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- FileDescriptor replica = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master));
-}
-
-TEST(BasicPtyTest, OpenSetsControllingTTY) {
- SKIP_IF(IsRunningWithVFS1());
- // setsid either puts us in a new session or fails because we're already the
- // session leader. Either way, this ensures we're the session leader.
- ASSERT_THAT(setsid(), AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EPERM)));
-
- // Make sure we're ignoring SIGHUP, which will be sent to this process once we
- // disconnect the TTY.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- sa.sa_flags = 0;
- sigemptyset(&sa.sa_mask);
- struct sigaction old_sa;
- ASSERT_THAT(sigaction(SIGHUP, &sa, &old_sa), SyscallSucceeds());
- auto cleanup = Cleanup([old_sa] {
- EXPECT_THAT(sigaction(SIGHUP, &old_sa, NULL), SyscallSucceeds());
- });
-
- FileDescriptor master = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- FileDescriptor replica =
- ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master, O_NONBLOCK | O_RDWR));
-
- // Opening replica should make it our controlling TTY, and therefore we are
- // able to give it up.
- ASSERT_THAT(ioctl(replica.get(), TIOCNOTTY), SyscallSucceeds());
-}
-
-TEST(BasicPtyTest, OpenMasterDoesNotSetsControllingTTY) {
- SKIP_IF(IsRunningWithVFS1());
- // setsid either puts us in a new session or fails because we're already the
- // session leader. Either way, this ensures we're the session leader.
- ASSERT_THAT(setsid(), AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EPERM)));
- FileDescriptor master = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
-
- // Opening master does not set the controlling TTY, and therefore we are
- // unable to give it up.
- ASSERT_THAT(ioctl(master.get(), TIOCNOTTY), SyscallFailsWithErrno(ENOTTY));
-}
-
-TEST(BasicPtyTest, OpenNOCTTY) {
- SKIP_IF(IsRunningWithVFS1());
- // setsid either puts us in a new session or fails because we're already the
- // session leader. Either way, this ensures we're the session leader.
- ASSERT_THAT(setsid(), AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EPERM)));
- FileDescriptor master = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- FileDescriptor replica = ASSERT_NO_ERRNO_AND_VALUE(
- OpenReplica(master, O_NOCTTY | O_NONBLOCK | O_RDWR));
-
- // Opening replica with O_NOCTTY won't make it our controlling TTY, and
- // therefore we are unable to give it up.
- ASSERT_THAT(ioctl(replica.get(), TIOCNOTTY), SyscallFailsWithErrno(ENOTTY));
-}
-
-// The replica entry in /dev/pts/ disappears when the master is closed, even if
-// the replica is still open.
-TEST(BasicPtyTest, ReplicaEntryGoneAfterMasterClose) {
- FileDescriptor master = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- FileDescriptor replica = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master));
-
- // Get pty index.
- int index = -1;
- ASSERT_THAT(ioctl(master.get(), TIOCGPTN, &index), SyscallSucceeds());
-
- std::string path = absl::StrCat("/dev/pts/", index);
-
- struct stat st;
- EXPECT_THAT(stat(path.c_str(), &st), SyscallSucceeds());
-
- master.reset();
-
- EXPECT_THAT(stat(path.c_str(), &st), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(BasicPtyTest, Getdents) {
- FileDescriptor master1 = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- int index1 = -1;
- ASSERT_THAT(ioctl(master1.get(), TIOCGPTN, &index1), SyscallSucceeds());
- FileDescriptor replica1 = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master1));
-
- FileDescriptor master2 = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR));
- int index2 = -1;
- ASSERT_THAT(ioctl(master2.get(), TIOCGPTN, &index2), SyscallSucceeds());
- FileDescriptor replica2 = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master2));
-
- // The directory contains ptmx, index1, and index2. (Plus any additional PTYs
- // unrelated to this test.)
-
- std::vector<std::string> contents =
- ASSERT_NO_ERRNO_AND_VALUE(ListDir("/dev/pts/", true));
- EXPECT_THAT(contents, Contains(absl::StrCat(index1)));
- EXPECT_THAT(contents, Contains(absl::StrCat(index2)));
-
- master2.reset();
-
- // The directory contains ptmx and index1, but not index2 since the master is
- // closed. (Plus any additional PTYs unrelated to this test.)
-
- contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/dev/pts/", true));
- EXPECT_THAT(contents, Contains(absl::StrCat(index1)));
- EXPECT_THAT(contents, Not(Contains(absl::StrCat(index2))));
-
- // N.B. devpts supports legacy "single-instance" mode and new "multi-instance"
- // mode. In legacy mode, devpts does not contain a "ptmx" device (the distro
- // must use mknod to create it somewhere, presumably /dev/ptmx).
- // Multi-instance mode does include a "ptmx" device tied to that mount.
- //
- // We don't check for the presence or absence of "ptmx", as distros vary in
- // their usage of the two modes.
-}
-
-class PtyTest : public ::testing::Test {
- protected:
- void SetUp() override {
- master_ = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR | O_NONBLOCK));
- replica_ = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master_));
- }
-
- void DisableCanonical() {
- struct kernel_termios t = {};
- EXPECT_THAT(ioctl(replica_.get(), TCGETS, &t), SyscallSucceeds());
- t.c_lflag &= ~ICANON;
- EXPECT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
- }
-
- void EnableCanonical() {
- struct kernel_termios t = {};
- EXPECT_THAT(ioctl(replica_.get(), TCGETS, &t), SyscallSucceeds());
- t.c_lflag |= ICANON;
- EXPECT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
- }
-
- // Master and replica ends of the PTY. Non-blocking.
- FileDescriptor master_;
- FileDescriptor replica_;
-};
-
-// Master to replica sanity test.
-TEST_F(PtyTest, WriteMasterToReplica) {
- // N.B. by default, the replica reads nothing until the master writes a
- // newline.
- constexpr char kBuf[] = "hello\n";
-
- EXPECT_THAT(WriteFd(master_.get(), kBuf, sizeof(kBuf) - 1),
- SyscallSucceedsWithValue(sizeof(kBuf) - 1));
-
- // Linux moves data from the master to the replica via async work scheduled
- // via tty_flip_buffer_push. Since it is asynchronous, the data may not be
- // available for reading immediately. Instead we must poll and assert that it
- // becomes available "soon".
-
- char buf[sizeof(kBuf)] = {};
- ExpectReadable(replica_, sizeof(buf) - 1, buf);
-
- EXPECT_EQ(memcmp(buf, kBuf, sizeof(kBuf)), 0);
-}
-
-// Replica to master sanity test.
-TEST_F(PtyTest, WriteReplicaToMaster) {
- // N.B. by default, the master reads nothing until the replica writes a
- // newline, and the master gets a carriage return.
- constexpr char kInput[] = "hello\n";
- constexpr char kExpected[] = "hello\r\n";
-
- EXPECT_THAT(WriteFd(replica_.get(), kInput, sizeof(kInput) - 1),
- SyscallSucceedsWithValue(sizeof(kInput) - 1));
-
- // Linux moves data from the master to the replica via async work scheduled
- // via tty_flip_buffer_push. Since it is asynchronous, the data may not be
- // available for reading immediately. Instead we must poll and assert that it
- // becomes available "soon".
-
- char buf[sizeof(kExpected)] = {};
- ExpectReadable(master_, sizeof(buf) - 1, buf);
-
- EXPECT_EQ(memcmp(buf, kExpected, sizeof(kExpected)), 0);
-}
-
-TEST_F(PtyTest, WriteInvalidUTF8) {
- char c = 0xff;
- ASSERT_THAT(syscall(__NR_write, master_.get(), &c, sizeof(c)),
- SyscallSucceedsWithValue(sizeof(c)));
-}
-
-// Both the master and replica report the standard default termios settings.
-//
-// Note that TCGETS on the master actually redirects to the replica (see comment
-// on MasterTermiosUnchangable).
-TEST_F(PtyTest, DefaultTermios) {
- struct kernel_termios t = {};
- EXPECT_THAT(ioctl(replica_.get(), TCGETS, &t), SyscallSucceeds());
- EXPECT_EQ(t, DefaultTermios());
-
- EXPECT_THAT(ioctl(master_.get(), TCGETS, &t), SyscallSucceeds());
- EXPECT_EQ(t, DefaultTermios());
-}
-
-// Changing termios from the master actually affects the replica.
-//
-// TCSETS on the master actually redirects to the replica (see comment on
-// MasterTermiosUnchangable).
-TEST_F(PtyTest, TermiosAffectsReplica) {
- struct kernel_termios master_termios = {};
- EXPECT_THAT(ioctl(master_.get(), TCGETS, &master_termios), SyscallSucceeds());
- master_termios.c_lflag ^= ICANON;
- EXPECT_THAT(ioctl(master_.get(), TCSETS, &master_termios), SyscallSucceeds());
-
- struct kernel_termios replica_termios = {};
- EXPECT_THAT(ioctl(replica_.get(), TCGETS, &replica_termios),
- SyscallSucceeds());
- EXPECT_EQ(master_termios, replica_termios);
-}
-
-// The master end of the pty has termios:
-//
-// struct kernel_termios t = {
-// .c_iflag = 0;
-// .c_oflag = 0;
-// .c_cflag = B38400 | CS8 | CREAD;
-// .c_lflag = 0;
-// .c_cc = /* same as DefaultTermios */
-// }
-//
-// (From drivers/tty/pty.c:unix98_pty_init)
-//
-// All termios control ioctls on the master actually redirect to the replica
-// (drivers/tty/tty_ioctl.c:tty_mode_ioctl), making it impossible to change the
-// master termios.
-//
-// Verify this by setting ICRNL (which rewrites input \r to \n) and verify that
-// it has no effect on the master.
-TEST_F(PtyTest, MasterTermiosUnchangable) {
- struct kernel_termios master_termios = {};
- EXPECT_THAT(ioctl(master_.get(), TCGETS, &master_termios), SyscallSucceeds());
- master_termios.c_lflag |= ICRNL;
- EXPECT_THAT(ioctl(master_.get(), TCSETS, &master_termios), SyscallSucceeds());
-
- char c = '\r';
- ASSERT_THAT(WriteFd(replica_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- ExpectReadable(master_, 1, &c);
- EXPECT_EQ(c, '\r'); // ICRNL had no effect!
-
- ExpectFinished(master_);
-}
-
-// ICRNL rewrites input \r to \n.
-TEST_F(PtyTest, TermiosICRNL) {
- struct kernel_termios t = DefaultTermios();
- t.c_iflag |= ICRNL;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- char c = '\r';
- ASSERT_THAT(WriteFd(master_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- ExpectReadable(replica_, 1, &c);
- EXPECT_EQ(c, '\n');
-
- ExpectFinished(replica_);
-}
-
-// ONLCR rewrites output \n to \r\n.
-TEST_F(PtyTest, TermiosONLCR) {
- struct kernel_termios t = DefaultTermios();
- t.c_oflag |= ONLCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- char c = '\n';
- ASSERT_THAT(WriteFd(replica_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- // Extra byte for NUL for EXPECT_STREQ.
- char buf[3] = {};
- ExpectReadable(master_, 2, buf);
- EXPECT_STREQ(buf, "\r\n");
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, TermiosIGNCR) {
- struct kernel_termios t = DefaultTermios();
- t.c_iflag |= IGNCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- char c = '\r';
- ASSERT_THAT(WriteFd(master_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- // Nothing to read.
- ASSERT_THAT(PollAndReadFd(replica_.get(), &c, 1, kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-}
-
-// Test that we can successfully poll for readable data from the replica.
-TEST_F(PtyTest, TermiosPollReplica) {
- struct kernel_termios t = DefaultTermios();
- t.c_iflag |= IGNCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- absl::Notification notify;
- int sfd = replica_.get();
- ScopedThread th([sfd, &notify]() {
- notify.Notify();
-
- // Poll on the reader fd with POLLIN event.
- struct pollfd poll_fd = {sfd, POLLIN, 0};
- EXPECT_THAT(
- RetryEINTR(poll)(&poll_fd, 1, absl::ToInt64Milliseconds(kTimeout)),
- SyscallSucceedsWithValue(1));
-
- // Should trigger POLLIN event.
- EXPECT_EQ(poll_fd.revents & POLLIN, POLLIN);
- });
-
- notify.WaitForNotification();
- // Sleep ensures that poll begins waiting before we write to the FD.
- absl::SleepFor(absl::Seconds(1));
-
- char s[] = "foo\n";
- ASSERT_THAT(WriteFd(master_.get(), s, strlen(s) + 1), SyscallSucceeds());
-}
-
-// Test that we can successfully poll for readable data from the master.
-TEST_F(PtyTest, TermiosPollMaster) {
- struct kernel_termios t = DefaultTermios();
- t.c_iflag |= IGNCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(master_.get(), TCSETS, &t), SyscallSucceeds());
-
- absl::Notification notify;
- int mfd = master_.get();
- ScopedThread th([mfd, &notify]() {
- notify.Notify();
-
- // Poll on the reader fd with POLLIN event.
- struct pollfd poll_fd = {mfd, POLLIN, 0};
- EXPECT_THAT(
- RetryEINTR(poll)(&poll_fd, 1, absl::ToInt64Milliseconds(kTimeout)),
- SyscallSucceedsWithValue(1));
-
- // Should trigger POLLIN event.
- EXPECT_EQ(poll_fd.revents & POLLIN, POLLIN);
- });
-
- notify.WaitForNotification();
- // Sleep ensures that poll begins waiting before we write to the FD.
- absl::SleepFor(absl::Seconds(1));
-
- char s[] = "foo\n";
- ASSERT_THAT(WriteFd(replica_.get(), s, strlen(s) + 1), SyscallSucceeds());
-}
-
-TEST_F(PtyTest, TermiosINLCR) {
- struct kernel_termios t = DefaultTermios();
- t.c_iflag |= INLCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- char c = '\n';
- ASSERT_THAT(WriteFd(master_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- ExpectReadable(replica_, 1, &c);
- EXPECT_EQ(c, '\r');
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, TermiosONOCR) {
- struct kernel_termios t = DefaultTermios();
- t.c_oflag |= ONOCR;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- // The terminal is at column 0, so there should be no CR to read.
- char c = '\r';
- ASSERT_THAT(WriteFd(replica_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- // Nothing to read.
- ASSERT_THAT(PollAndReadFd(master_.get(), &c, 1, kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-
- // This time the column is greater than 0, so we should be able to read the CR
- // out of the other end.
- constexpr char kInput[] = "foo\r";
- constexpr int kInputSize = sizeof(kInput) - 1;
- ASSERT_THAT(WriteFd(replica_.get(), kInput, kInputSize),
- SyscallSucceedsWithValue(kInputSize));
-
- char buf[kInputSize] = {};
- ExpectReadable(master_, kInputSize, buf);
-
- EXPECT_EQ(memcmp(buf, kInput, kInputSize), 0);
-
- ExpectFinished(master_);
-
- // Terminal should be at column 0 again, so no CR can be read.
- ASSERT_THAT(WriteFd(replica_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- // Nothing to read.
- ASSERT_THAT(PollAndReadFd(master_.get(), &c, 1, kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-}
-
-TEST_F(PtyTest, TermiosOCRNL) {
- struct kernel_termios t = DefaultTermios();
- t.c_oflag |= OCRNL;
- t.c_lflag &= ~ICANON; // for byte-by-byte reading.
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
-
- // The terminal is at column 0, so there should be no CR to read.
- char c = '\r';
- ASSERT_THAT(WriteFd(replica_.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- ExpectReadable(master_, 1, &c);
- EXPECT_EQ(c, '\n');
-
- ExpectFinished(master_);
-}
-
-// Tests that VEOL is disabled when we start, and that we can set it to enable
-// it.
-TEST_F(PtyTest, VEOLTermination) {
- // Write a few bytes ending with '\0', and confirm that we can't read.
- constexpr char kInput[] = "hello";
- ASSERT_THAT(WriteFd(master_.get(), kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
- char buf[sizeof(kInput)] = {};
- ASSERT_THAT(PollAndReadFd(replica_.get(), buf, sizeof(kInput), kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-
- // Set the EOL character to '=' and write it.
- constexpr char delim = '=';
- struct kernel_termios t = DefaultTermios();
- t.c_cc[VEOL] = delim;
- ASSERT_THAT(ioctl(replica_.get(), TCSETS, &t), SyscallSucceeds());
- ASSERT_THAT(WriteFd(master_.get(), &delim, 1), SyscallSucceedsWithValue(1));
-
- // Now we can read, as sending EOL caused the line to become available.
- ExpectReadable(replica_, sizeof(kInput), buf);
- EXPECT_EQ(memcmp(buf, kInput, sizeof(kInput)), 0);
-
- ExpectReadable(replica_, 1, buf);
- EXPECT_EQ(buf[0], '=');
-
- ExpectFinished(replica_);
-}
-
-// Tests that we can write more than the 4096 character limit, then a
-// terminating character, then read out just the first 4095 bytes plus the
-// terminator.
-TEST_F(PtyTest, CanonBigWrite) {
- constexpr int kWriteLen = kMaxLineSize + 4;
- char input[kWriteLen];
- memset(input, 'M', kWriteLen - 1);
- input[kWriteLen - 1] = '\n';
- ASSERT_THAT(WriteFd(master_.get(), input, kWriteLen),
- SyscallSucceedsWithValue(kWriteLen));
-
- // We can read the line.
- char buf[kMaxLineSize] = {};
- ExpectReadable(replica_, kMaxLineSize, buf);
-
- ExpectFinished(replica_);
-}
-
-// Tests that data written in canonical mode can be read immediately once
-// switched to noncanonical mode.
-TEST_F(PtyTest, SwitchCanonToNoncanon) {
- // Write a few bytes without a terminating character, switch to noncanonical
- // mode, and read them.
- constexpr char kInput[] = "hello";
- ASSERT_THAT(WriteFd(master_.get(), kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
-
- // Nothing available yet.
- char buf[sizeof(kInput)] = {};
- ASSERT_THAT(PollAndReadFd(replica_.get(), buf, sizeof(kInput), kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-
- DisableCanonical();
-
- ExpectReadable(replica_, sizeof(kInput), buf);
- EXPECT_STREQ(buf, kInput);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchCanonToNonCanonNewline) {
- // Write a few bytes with a terminating character.
- constexpr char kInput[] = "hello\n";
- ASSERT_THAT(WriteFd(master_.get(), kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
-
- DisableCanonical();
-
- // We can read the line.
- char buf[sizeof(kInput)] = {};
- ExpectReadable(replica_, sizeof(kInput), buf);
- EXPECT_STREQ(buf, kInput);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchNoncanonToCanonNewlineBig) {
- DisableCanonical();
-
- // Write more than the maximum line size, then write a delimiter.
- constexpr int kWriteLen = 4100;
- char input[kWriteLen];
- memset(input, 'M', kWriteLen);
- ASSERT_THAT(WriteFd(master_.get(), input, kWriteLen),
- SyscallSucceedsWithValue(kWriteLen));
- // Wait for the input queue to fill.
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), kMaxLineSize - 1));
- constexpr char delim = '\n';
- ASSERT_THAT(WriteFd(master_.get(), &delim, 1), SyscallSucceedsWithValue(1));
-
- EnableCanonical();
-
- // We can read the line.
- char buf[kMaxLineSize] = {};
- ExpectReadable(replica_, kMaxLineSize - 1, buf);
-
- // We can also read the remaining characters.
- ExpectReadable(replica_, 6, buf);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchNoncanonToCanonNoNewline) {
- DisableCanonical();
-
- // Write a few bytes without a terminating character.
- // mode, and read them.
- constexpr char kInput[] = "hello";
- ASSERT_THAT(WriteFd(master_.get(), kInput, sizeof(kInput) - 1),
- SyscallSucceedsWithValue(sizeof(kInput) - 1));
-
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), sizeof(kInput) - 1));
- EnableCanonical();
-
- // We can read the line.
- char buf[sizeof(kInput)] = {};
- ExpectReadable(replica_, sizeof(kInput) - 1, buf);
- EXPECT_STREQ(buf, kInput);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchNoncanonToCanonNoNewlineBig) {
- DisableCanonical();
-
- // Write a few bytes without a terminating character.
- // mode, and read them.
- constexpr int kWriteLen = 4100;
- char input[kWriteLen];
- memset(input, 'M', kWriteLen);
- ASSERT_THAT(WriteFd(master_.get(), input, kWriteLen),
- SyscallSucceedsWithValue(kWriteLen));
-
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), kMaxLineSize - 1));
- EnableCanonical();
-
- // We can read the line.
- char buf[kMaxLineSize] = {};
- ExpectReadable(replica_, kMaxLineSize - 1, buf);
-
- ExpectFinished(replica_);
-}
-
-// Tests that we can write over the 4095 noncanonical limit, then read out
-// everything.
-TEST_F(PtyTest, NoncanonBigWrite) {
- DisableCanonical();
-
- // Write well over the 4095 internal buffer limit.
- constexpr char kInput = 'M';
- constexpr int kInputSize = kMaxLineSize * 2;
- for (int i = 0; i < kInputSize; i++) {
- // This makes too many syscalls for save/restore.
- const DisableSave ds;
- ASSERT_THAT(WriteFd(master_.get(), &kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
- }
-
- // We should be able to read out everything. Sleep a bit so that Linux has a
- // chance to move data from the master to the replica.
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), kMaxLineSize - 1));
- for (int i = 0; i < kInputSize; i++) {
- // This makes too many syscalls for save/restore.
- const DisableSave ds;
- char c;
- ExpectReadable(replica_, 1, &c);
- ASSERT_EQ(c, kInput);
- }
-
- ExpectFinished(replica_);
-}
-
-// ICANON doesn't make input available until a line delimiter is typed.
-//
-// Test newline.
-TEST_F(PtyTest, TermiosICANONNewline) {
- char input[3] = {'a', 'b', 'c'};
- ASSERT_THAT(WriteFd(master_.get(), input, sizeof(input)),
- SyscallSucceedsWithValue(sizeof(input)));
-
- // Extra bytes for newline (written later) and NUL for EXPECT_STREQ.
- char buf[5] = {};
-
- // Nothing available yet.
- ASSERT_THAT(PollAndReadFd(replica_.get(), buf, sizeof(input), kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
-
- char delim = '\n';
- ASSERT_THAT(WriteFd(master_.get(), &delim, 1), SyscallSucceedsWithValue(1));
-
- // Now it is available.
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), sizeof(input) + 1));
- ExpectReadable(replica_, sizeof(input) + 1, buf);
- EXPECT_STREQ(buf, "abc\n");
-
- ExpectFinished(replica_);
-}
-
-// ICANON doesn't make input available until a line delimiter is typed.
-//
-// Test EOF (^D).
-TEST_F(PtyTest, TermiosICANONEOF) {
- char input[3] = {'a', 'b', 'c'};
- ASSERT_THAT(WriteFd(master_.get(), input, sizeof(input)),
- SyscallSucceedsWithValue(sizeof(input)));
-
- // Extra byte for NUL for EXPECT_STREQ.
- char buf[4] = {};
-
- // Nothing available yet.
- ASSERT_THAT(PollAndReadFd(replica_.get(), buf, sizeof(input), kTimeout),
- PosixErrorIs(ETIMEDOUT, ::testing::StrEq("Poll timed out")));
- char delim = ControlCharacter('D');
- ASSERT_THAT(WriteFd(master_.get(), &delim, 1), SyscallSucceedsWithValue(1));
-
- // Now it is available. Note that ^D is not included.
- ExpectReadable(replica_, sizeof(input), buf);
- EXPECT_STREQ(buf, "abc");
-
- ExpectFinished(replica_);
-}
-
-// ICANON limits us to 4096 bytes including a terminating character. Anything
-// after and 4095th character is discarded (although still processed for
-// signals and echoing).
-TEST_F(PtyTest, CanonDiscard) {
- constexpr char kInput = 'M';
- constexpr int kInputSize = 4100;
- constexpr int kIter = 3;
-
- // A few times write more than the 4096 character maximum, then a newline.
- constexpr char delim = '\n';
- for (int i = 0; i < kIter; i++) {
- // This makes too many syscalls for save/restore.
- const DisableSave ds;
- for (int i = 0; i < kInputSize; i++) {
- ASSERT_THAT(WriteFd(master_.get(), &kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
- }
- ASSERT_THAT(WriteFd(master_.get(), &delim, 1), SyscallSucceedsWithValue(1));
- }
-
- // There should be multiple truncated lines available to read.
- for (int i = 0; i < kIter; i++) {
- char buf[kInputSize] = {};
- ExpectReadable(replica_, kMaxLineSize, buf);
- EXPECT_EQ(buf[kMaxLineSize - 1], delim);
- EXPECT_EQ(buf[kMaxLineSize - 2], kInput);
- }
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, CanonMultiline) {
- constexpr char kInput1[] = "GO\n";
- constexpr char kInput2[] = "BLUE\n";
-
- // Write both lines.
- ASSERT_THAT(WriteFd(master_.get(), kInput1, sizeof(kInput1) - 1),
- SyscallSucceedsWithValue(sizeof(kInput1) - 1));
- ASSERT_THAT(WriteFd(master_.get(), kInput2, sizeof(kInput2) - 1),
- SyscallSucceedsWithValue(sizeof(kInput2) - 1));
-
- // Get the first line.
- char line1[8] = {};
- ExpectReadable(replica_, sizeof(kInput1) - 1, line1);
- EXPECT_STREQ(line1, kInput1);
-
- // Get the second line.
- char line2[8] = {};
- ExpectReadable(replica_, sizeof(kInput2) - 1, line2);
- EXPECT_STREQ(line2, kInput2);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchNoncanonToCanonMultiline) {
- DisableCanonical();
-
- constexpr char kInput1[] = "GO\n";
- constexpr char kInput2[] = "BLUE\n";
- constexpr char kExpected[] = "GO\nBLUE\n";
-
- // Write both lines.
- ASSERT_THAT(WriteFd(master_.get(), kInput1, sizeof(kInput1) - 1),
- SyscallSucceedsWithValue(sizeof(kInput1) - 1));
- ASSERT_THAT(WriteFd(master_.get(), kInput2, sizeof(kInput2) - 1),
- SyscallSucceedsWithValue(sizeof(kInput2) - 1));
-
- ASSERT_NO_ERRNO(
- WaitUntilReceived(replica_.get(), sizeof(kInput1) + sizeof(kInput2) - 2));
- EnableCanonical();
-
- // Get all together as one line.
- char line[9] = {};
- ExpectReadable(replica_, 8, line);
- EXPECT_STREQ(line, kExpected);
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, SwitchTwiceMultiline) {
- std::string kInputs[] = {"GO\n", "BLUE\n", "!"};
- std::string kExpected = "GO\nBLUE\n!";
-
- // Write each line.
- for (const std::string& input : kInputs) {
- ASSERT_THAT(WriteFd(master_.get(), input.c_str(), input.size()),
- SyscallSucceedsWithValue(input.size()));
- }
-
- DisableCanonical();
- // All written characters have to make it into the input queue before
- // canonical mode is re-enabled. If the final '!' character hasn't been
- // enqueued before canonical mode is re-enabled, it won't be readable.
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), kExpected.size()));
- EnableCanonical();
-
- // Get all together as one line.
- char line[10] = {};
- ExpectReadable(replica_, 9, line);
- EXPECT_STREQ(line, kExpected.c_str());
-
- ExpectFinished(replica_);
-}
-
-TEST_F(PtyTest, QueueSize) {
- // Write the line.
- constexpr char kInput1[] = "GO\n";
- ASSERT_THAT(WriteFd(master_.get(), kInput1, sizeof(kInput1) - 1),
- SyscallSucceedsWithValue(sizeof(kInput1) - 1));
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), sizeof(kInput1) - 1));
-
- // Ensure that writing more (beyond what is readable) does not impact the
- // readable size.
- char input[kMaxLineSize];
- memset(input, 'M', kMaxLineSize);
- ASSERT_THAT(WriteFd(master_.get(), input, kMaxLineSize),
- SyscallSucceedsWithValue(kMaxLineSize));
- int inputBufSize = ASSERT_NO_ERRNO_AND_VALUE(
- WaitUntilReceived(replica_.get(), sizeof(kInput1) - 1));
- EXPECT_EQ(inputBufSize, sizeof(kInput1) - 1);
-}
-
-TEST_F(PtyTest, PartialBadBuffer) {
- // Allocate 2 pages.
- void* addr = mmap(nullptr, 2 * kPageSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- ASSERT_NE(addr, MAP_FAILED);
- char* buf = reinterpret_cast<char*>(addr);
-
- // Guard the 2nd page for our read to run into.
- ASSERT_THAT(
- mprotect(reinterpret_cast<void*>(buf + kPageSize), kPageSize, PROT_NONE),
- SyscallSucceeds());
-
- // Leave only one free byte in the buffer.
- char* bad_buffer = buf + kPageSize - 1;
-
- // Write to the master.
- constexpr char kBuf[] = "hello\n";
- constexpr size_t size = sizeof(kBuf) - 1;
- EXPECT_THAT(WriteFd(master_.get(), kBuf, size),
- SyscallSucceedsWithValue(size));
-
- // Read from the replica into bad_buffer.
- ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), size));
- // Before Linux 3b830a9c this returned EFAULT, but after that commit it
- // returns EAGAIN.
- EXPECT_THAT(
- ReadFd(replica_.get(), bad_buffer, size),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallFailsWithErrno(EAGAIN)));
-
- EXPECT_THAT(munmap(addr, 2 * kPageSize), SyscallSucceeds()) << addr;
-}
-
-TEST_F(PtyTest, SimpleEcho) {
- constexpr char kInput[] = "Mr. Eko";
- EXPECT_THAT(WriteFd(master_.get(), kInput, strlen(kInput)),
- SyscallSucceedsWithValue(strlen(kInput)));
-
- char buf[100] = {};
- ExpectReadable(master_, strlen(kInput), buf);
-
- EXPECT_STREQ(buf, kInput);
- ExpectFinished(master_);
-}
-
-TEST_F(PtyTest, GetWindowSize) {
- struct winsize ws;
- ASSERT_THAT(ioctl(replica_.get(), TIOCGWINSZ, &ws), SyscallSucceeds());
- EXPECT_EQ(ws.ws_row, 0);
- EXPECT_EQ(ws.ws_col, 0);
-}
-
-TEST_F(PtyTest, SetReplicaWindowSize) {
- constexpr uint16_t kRows = 343;
- constexpr uint16_t kCols = 2401;
- struct winsize ws = {.ws_row = kRows, .ws_col = kCols};
- ASSERT_THAT(ioctl(replica_.get(), TIOCSWINSZ, &ws), SyscallSucceeds());
-
- struct winsize retrieved_ws = {};
- ASSERT_THAT(ioctl(master_.get(), TIOCGWINSZ, &retrieved_ws),
- SyscallSucceeds());
- EXPECT_EQ(retrieved_ws.ws_row, kRows);
- EXPECT_EQ(retrieved_ws.ws_col, kCols);
-}
-
-TEST_F(PtyTest, SetMasterWindowSize) {
- constexpr uint16_t kRows = 343;
- constexpr uint16_t kCols = 2401;
- struct winsize ws = {.ws_row = kRows, .ws_col = kCols};
- ASSERT_THAT(ioctl(master_.get(), TIOCSWINSZ, &ws), SyscallSucceeds());
-
- struct winsize retrieved_ws = {};
- ASSERT_THAT(ioctl(replica_.get(), TIOCGWINSZ, &retrieved_ws),
- SyscallSucceeds());
- EXPECT_EQ(retrieved_ws.ws_row, kRows);
- EXPECT_EQ(retrieved_ws.ws_col, kCols);
-}
-
-class JobControlTest : public ::testing::Test {
- protected:
- void SetUp() override {
- master_ = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR | O_NONBLOCK));
- replica_ = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master_));
-
- // Make this a session leader, which also drops the controlling terminal.
- // In the gVisor test environment, this test will be run as the session
- // leader already (as the sentry init process).
- if (!IsRunningOnGvisor()) {
- // Ignore failure because setsid(2) fails if the process is already the
- // session leader.
- setsid();
- ioctl(replica_.get(), TIOCNOTTY);
- }
- }
-
- PosixError RunInChild(SubprocessCallback childFunc) {
- pid_t child = fork();
- if (!child) {
- childFunc();
- _exit(0);
- }
- int wstatus;
- if (waitpid(child, &wstatus, 0) != child) {
- return PosixError(
- errno, absl::StrCat("child failed with wait status: ", wstatus));
- }
- return PosixError(wstatus, "process returned");
- }
-
- // Master and replica ends of the PTY. Non-blocking.
- FileDescriptor master_;
- FileDescriptor replica_;
-};
-
-TEST_F(JobControlTest, SetTTYMaster) {
- auto res = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(master_.get(), TIOCSCTTY, 0));
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetTTY) {
- auto res = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(ioctl(!replica_.get(), TIOCSCTTY, 0));
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetTTYNonLeader) {
- // Fork a process that won't be the session leader.
- auto res =
- RunInChild([=]() { TEST_PCHECK(ioctl(replica_.get(), TIOCSCTTY, 0)); });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetTTYBadArg) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- auto res = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 1));
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetTTYDifferentSession) {
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- auto res = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 1));
-
- // Fork, join a new session, and try to steal the parent's controlling
- // terminal, which should fail.
- pid_t grandchild = fork();
- if (!grandchild) {
- TEST_PCHECK(setsid() >= 0);
- // We shouldn't be able to steal the terminal.
- TEST_PCHECK(ioctl(replica_.get(), TIOCSCTTY, 1));
- _exit(0);
- }
-
- int gcwstatus;
- TEST_PCHECK(waitpid(grandchild, &gcwstatus, 0) == grandchild);
- TEST_PCHECK(gcwstatus == 0);
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, ReleaseTTY) {
- ASSERT_THAT(ioctl(replica_.get(), TIOCSCTTY, 0), SyscallSucceeds());
-
- // Make sure we're ignoring SIGHUP, which will be sent to this process once we
- // disconnect the TTY.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- sa.sa_flags = 0;
- sigemptyset(&sa.sa_mask);
- struct sigaction old_sa;
- EXPECT_THAT(sigaction(SIGHUP, &sa, &old_sa), SyscallSucceeds());
- EXPECT_THAT(ioctl(replica_.get(), TIOCNOTTY), SyscallSucceeds());
- EXPECT_THAT(sigaction(SIGHUP, &old_sa, NULL), SyscallSucceeds());
-}
-
-TEST_F(JobControlTest, ReleaseUnsetTTY) {
- ASSERT_THAT(ioctl(replica_.get(), TIOCNOTTY), SyscallFailsWithErrno(ENOTTY));
-}
-
-TEST_F(JobControlTest, ReleaseWrongTTY) {
- auto res = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
- TEST_PCHECK(ioctl(master_.get(), TIOCNOTTY) < 0 && errno == ENOTTY);
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, ReleaseTTYNonLeader) {
- auto ret = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- pid_t grandchild = fork();
- if (!grandchild) {
- TEST_PCHECK(!ioctl(replica_.get(), TIOCNOTTY));
- _exit(0);
- }
-
- int wstatus;
- TEST_PCHECK(waitpid(grandchild, &wstatus, 0) == grandchild);
- TEST_PCHECK(wstatus == 0);
- });
- ASSERT_NO_ERRNO(ret);
-}
-
-TEST_F(JobControlTest, ReleaseTTYDifferentSession) {
- auto ret = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
-
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- pid_t grandchild = fork();
- if (!grandchild) {
- // Join a new session, then try to disconnect.
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(ioctl(replica_.get(), TIOCNOTTY));
- _exit(0);
- }
-
- int wstatus;
- TEST_PCHECK(waitpid(grandchild, &wstatus, 0) == grandchild);
- TEST_PCHECK(wstatus == 0);
- });
- ASSERT_NO_ERRNO(ret);
-}
-
-// Used by the child process spawned in ReleaseTTYSignals to track received
-// signals.
-static int received;
-
-void sig_handler(int signum) { received |= signum; }
-
-// When the session leader releases its controlling terminal, the foreground
-// process group gets SIGHUP, then SIGCONT. This test:
-// - Spawns 2 threads
-// - Has thread 1 return 0 if it gets both SIGHUP and SIGCONT
-// - Has thread 2 leave the foreground process group, and return non-zero if it
-// receives any signals.
-// - Has the parent thread release its controlling terminal
-// - Checks that thread 1 got both signals
-// - Checks that thread 2 didn't get any signals.
-TEST_F(JobControlTest, ReleaseTTYSignals) {
- ASSERT_THAT(ioctl(replica_.get(), TIOCSCTTY, 0), SyscallSucceeds());
-
- received = 0;
- struct sigaction sa = {};
- sa.sa_handler = sig_handler;
- sa.sa_flags = 0;
- sigemptyset(&sa.sa_mask);
- sigaddset(&sa.sa_mask, SIGHUP);
- sigaddset(&sa.sa_mask, SIGCONT);
- sigprocmask(SIG_BLOCK, &sa.sa_mask, NULL);
-
- pid_t same_pgrp_child = fork();
- if (!same_pgrp_child) {
- // The child will wait for SIGHUP and SIGCONT, then return 0. It begins with
- // SIGHUP and SIGCONT blocked. We install signal handlers for those signals,
- // then use sigsuspend to wait for those specific signals.
- TEST_PCHECK(!sigaction(SIGHUP, &sa, NULL));
- TEST_PCHECK(!sigaction(SIGCONT, &sa, NULL));
- sigset_t mask;
- sigfillset(&mask);
- sigdelset(&mask, SIGHUP);
- sigdelset(&mask, SIGCONT);
- while (received != (SIGHUP | SIGCONT)) {
- sigsuspend(&mask);
- }
- _exit(0);
- }
-
- // We don't want to block these anymore.
- sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
-
- // This child will return non-zero if either SIGHUP or SIGCONT are received.
- pid_t diff_pgrp_child = fork();
- if (!diff_pgrp_child) {
- TEST_PCHECK(!setpgid(0, 0));
- TEST_PCHECK(pause());
- _exit(1);
- }
-
- EXPECT_THAT(setpgid(diff_pgrp_child, diff_pgrp_child), SyscallSucceeds());
-
- // Make sure we're ignoring SIGHUP, which will be sent to this process once we
- // disconnect the TTY.
- struct sigaction sighup_sa = {};
- sighup_sa.sa_handler = SIG_IGN;
- sighup_sa.sa_flags = 0;
- sigemptyset(&sighup_sa.sa_mask);
- struct sigaction old_sa;
- EXPECT_THAT(sigaction(SIGHUP, &sighup_sa, &old_sa), SyscallSucceeds());
-
- // Release the controlling terminal, sending SIGHUP and SIGCONT to all other
- // processes in this process group.
- EXPECT_THAT(ioctl(replica_.get(), TIOCNOTTY), SyscallSucceeds());
-
- EXPECT_THAT(sigaction(SIGHUP, &old_sa, NULL), SyscallSucceeds());
-
- // The child in the same process group will get signaled.
- int wstatus;
- EXPECT_THAT(waitpid(same_pgrp_child, &wstatus, 0),
- SyscallSucceedsWithValue(same_pgrp_child));
- EXPECT_EQ(wstatus, 0);
-
- // The other child will not get signaled.
- EXPECT_THAT(waitpid(diff_pgrp_child, &wstatus, WNOHANG),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(kill(diff_pgrp_child, SIGKILL), SyscallSucceeds());
-}
-
-TEST_F(JobControlTest, GetForegroundProcessGroup) {
- auto res = RunInChild([=]() {
- pid_t pid, foreground_pgid;
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 1));
- TEST_PCHECK(!ioctl(replica_.get(), TIOCGPGRP, &foreground_pgid));
- TEST_PCHECK((pid = getpid()) >= 0);
- TEST_PCHECK(pid == foreground_pgid);
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, GetForegroundProcessGroupNonControlling) {
- // At this point there's no controlling terminal, so TIOCGPGRP should fail.
- pid_t foreground_pgid;
- ASSERT_THAT(ioctl(replica_.get(), TIOCGPGRP, &foreground_pgid),
- SyscallFailsWithErrno(ENOTTY));
-}
-
-// This test:
-// - sets itself as the foreground process group
-// - creates a child process in a new process group
-// - sets that child as the foreground process group
-// - kills its child and sets itself as the foreground process group.
-// TODO(gvisor.dev/issue/5357): Fix and enable.
-TEST_F(JobControlTest, DISABLED_SetForegroundProcessGroup) {
- auto res = RunInChild([=]() {
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- // Ignore SIGTTOU so that we don't stop ourself when calling tcsetpgrp.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- sa.sa_flags = 0;
- sigemptyset(&sa.sa_mask);
- sigaction(SIGTTOU, &sa, NULL);
-
- // Set ourself as the foreground process group.
- TEST_PCHECK(!tcsetpgrp(replica_.get(), getpgid(0)));
-
- // Create a new process that just waits to be signaled.
- pid_t grandchild = fork();
- if (!grandchild) {
- TEST_PCHECK(!pause());
- // We should never reach this.
- _exit(1);
- }
-
- // Make the child its own process group, then make it the controlling
- // process group of the terminal.
- TEST_PCHECK(!setpgid(grandchild, grandchild));
- TEST_PCHECK(!tcsetpgrp(replica_.get(), grandchild));
-
- // Sanity check - we're still the controlling session.
- TEST_PCHECK(getsid(0) == getsid(grandchild));
-
- // Signal the child, wait for it to exit, then retake the terminal.
- TEST_PCHECK(!kill(grandchild, SIGTERM));
- int wstatus;
- TEST_PCHECK(waitpid(grandchild, &wstatus, 0) == grandchild);
- TEST_PCHECK(WIFSIGNALED(wstatus));
- TEST_PCHECK(WTERMSIG(wstatus) == SIGTERM);
-
- // Set ourself as the foreground process.
- pid_t pgid;
- TEST_PCHECK(pgid = getpgid(0) == 0);
- TEST_PCHECK(!tcsetpgrp(replica_.get(), pgid));
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetForegroundProcessGroupWrongTTY) {
- pid_t pid = getpid();
- ASSERT_THAT(ioctl(replica_.get(), TIOCSPGRP, &pid),
- SyscallFailsWithErrno(ENOTTY));
-}
-
-TEST_F(JobControlTest, SetForegroundProcessGroupNegPgid) {
- auto ret = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- pid_t pid = -1;
- TEST_PCHECK(ioctl(replica_.get(), TIOCSPGRP, &pid) && errno == EINVAL);
- });
- ASSERT_NO_ERRNO(ret);
-}
-
-// TODO(gvisor.dev/issue/5357): Fix and enable.
-TEST_F(JobControlTest, DISABLED_SetForegroundProcessGroupEmptyProcessGroup) {
- auto res = RunInChild([=]() {
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- // Create a new process, put it in a new process group, make that group the
- // foreground process group, then have the process wait.
- pid_t grandchild = fork();
- if (!grandchild) {
- TEST_PCHECK(!setpgid(0, 0));
- _exit(0);
- }
-
- // Wait for the child to exit.
- int wstatus;
- TEST_PCHECK(waitpid(grandchild, &wstatus, 0) == grandchild);
- // The child's process group doesn't exist anymore - this should fail.
- TEST_PCHECK(ioctl(replica_.get(), TIOCSPGRP, &grandchild) != 0 &&
- errno == ESRCH);
- });
- ASSERT_NO_ERRNO(res);
-}
-
-TEST_F(JobControlTest, SetForegroundProcessGroupDifferentSession) {
- auto ret = RunInChild([=]() {
- TEST_PCHECK(setsid() >= 0);
- TEST_PCHECK(!ioctl(replica_.get(), TIOCSCTTY, 0));
-
- int sync_setsid[2];
- int sync_exit[2];
- TEST_PCHECK(pipe(sync_setsid) >= 0);
- TEST_PCHECK(pipe(sync_exit) >= 0);
-
- // Create a new process and put it in a new session.
- pid_t grandchild = fork();
- if (!grandchild) {
- TEST_PCHECK(setsid() >= 0);
- // Tell the parent we're in a new session.
- char c = 'c';
- TEST_PCHECK(WriteFd(sync_setsid[1], &c, 1) == 1);
- TEST_PCHECK(ReadFd(sync_exit[0], &c, 1) == 1);
- _exit(0);
- }
-
- // Wait for the child to tell us it's in a new session.
- char c = 'c';
- TEST_PCHECK(ReadFd(sync_setsid[0], &c, 1) == 1);
-
- // Child is in a new session, so we can't make it the foregroup process
- // group.
- TEST_PCHECK(ioctl(replica_.get(), TIOCSPGRP, &grandchild) &&
- errno == EPERM);
-
- TEST_PCHECK(WriteFd(sync_exit[1], &c, 1) == 1);
-
- int wstatus;
- TEST_PCHECK(waitpid(grandchild, &wstatus, 0) == grandchild);
- TEST_PCHECK(WIFEXITED(wstatus));
- TEST_PCHECK(!WEXITSTATUS(wstatus));
- });
- ASSERT_NO_ERRNO(ret);
-}
-
-// Verify that we don't hang when creating a new session from an orphaned
-// process group (b/139968068). Calling setsid() creates an orphaned process
-// group, as process groups that contain the session's leading process are
-// orphans.
-//
-// We create 2 sessions in this test. The init process in gVisor is considered
-// not to be an orphan (see sessions.go), so we have to create a session from
-// which to create a session. The latter session is being created from an
-// orphaned process group.
-TEST_F(JobControlTest, OrphanRegression) {
- pid_t session_2_leader = fork();
- if (!session_2_leader) {
- TEST_PCHECK(setsid() >= 0);
-
- pid_t session_3_leader = fork();
- if (!session_3_leader) {
- TEST_PCHECK(setsid() >= 0);
-
- _exit(0);
- }
-
- int wstatus;
- TEST_PCHECK(waitpid(session_3_leader, &wstatus, 0) == session_3_leader);
- TEST_PCHECK(wstatus == 0);
-
- _exit(0);
- }
-
- int wstatus;
- ASSERT_THAT(waitpid(session_2_leader, &wstatus, 0),
- SyscallSucceedsWithValue(session_2_leader));
- ASSERT_EQ(wstatus, 0);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pty_root.cc b/test/syscalls/linux/pty_root.cc
deleted file mode 100644
index 4ac648729..000000000
--- a/test/syscalls/linux/pty_root.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/ioctl.h>
-#include <termios.h>
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/pty_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// StealTTY tests whether privileged processes can steal controlling terminals.
-// If the stealing process has CAP_SYS_ADMIN in the root user namespace, the
-// test ensures that stealing works. If it has non-root CAP_SYS_ADMIN, it
-// ensures stealing fails.
-TEST(JobControlRootTest, StealTTY) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- bool true_root = true;
- if (!IsRunningOnGvisor()) {
- // If running in Linux, we may only have CAP_SYS_ADMIN in a non-root user
- // namespace (i.e. we are not truly root). We use init_module as a proxy for
- // whether we are true root, as it returns EPERM immediately.
- ASSERT_THAT(syscall(SYS_init_module, nullptr, 0, nullptr), SyscallFails());
- true_root = errno != EPERM;
-
- // Make this a session leader, which also drops the controlling terminal.
- // In the gVisor test environment, this test will be run as the session
- // leader already (as the sentry init process).
- ASSERT_THAT(setsid(), SyscallSucceeds());
- }
-
- FileDescriptor master =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/ptmx", O_RDWR | O_NONBLOCK));
- FileDescriptor replica = ASSERT_NO_ERRNO_AND_VALUE(OpenReplica(master));
-
- // Make replica the controlling terminal.
- ASSERT_THAT(ioctl(replica.get(), TIOCSCTTY, 0), SyscallSucceeds());
-
- // Fork, join a new session, and try to steal the parent's controlling
- // terminal, which should succeed when we have CAP_SYS_ADMIN and pass an arg
- // of 1.
- pid_t child = fork();
- if (!child) {
- ASSERT_THAT(setsid(), SyscallSucceeds());
- // We shouldn't be able to steal the terminal with the wrong arg value.
- TEST_PCHECK(ioctl(replica.get(), TIOCSCTTY, 0));
- // We should be able to steal it if we are true root.
- TEST_PCHECK(true_root == !ioctl(replica.get(), TIOCSCTTY, 1));
- _exit(0);
- }
-
- int wstatus;
- ASSERT_THAT(waitpid(child, &wstatus, 0), SyscallSucceedsWithValue(child));
- ASSERT_EQ(wstatus, 0);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pwrite64.cc b/test/syscalls/linux/pwrite64.cc
deleted file mode 100644
index 1b2f25363..000000000
--- a/test/syscalls/linux/pwrite64.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/unistd.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// TODO(gvisor.dev/issue/2370): This test is currently very rudimentary.
-class Pwrite64 : public ::testing::Test {
- void SetUp() override {
- name_ = NewTempAbsPath();
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_CREAT, 0644), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- }
-
- void TearDown() override { unlink(name_.c_str()); }
-
- public:
- std::string name_;
-};
-
-TEST_F(Pwrite64, AppendOnly) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_APPEND | O_RDWR), SyscallSucceeds());
- constexpr int64_t kBufSize = 1024;
- std::vector<char> buf(kBufSize);
- std::fill(buf.begin(), buf.end(), 'a');
- EXPECT_THAT(PwriteFd(fd, buf.data(), buf.size(), 0),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(0));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(Pwrite64, InvalidArgs) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_APPEND | O_RDWR), SyscallSucceeds());
- constexpr int64_t kBufSize = 1024;
- std::vector<char> buf(kBufSize);
- std::fill(buf.begin(), buf.end(), 'a');
- EXPECT_THAT(PwriteFd(fd, buf.data(), buf.size(), -1),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(Pwrite64, Overflow) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_APPEND | O_RDWR), SyscallSucceeds());
- constexpr int64_t kBufSize = 1024;
- std::vector<char> buf(kBufSize);
- std::fill(buf.begin(), buf.end(), 'a');
- EXPECT_THAT(PwriteFd(fd, buf.data(), buf.size(), 0x7fffffffffffffffull),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(Pwrite64, Pwrite64WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- std::vector<char> buf(1);
- EXPECT_THAT(PwriteFd(fd.get(), buf.data(), 1, 0),
- SyscallFailsWithErrno(EBADF));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/pwritev2.cc b/test/syscalls/linux/pwritev2.cc
deleted file mode 100644
index 00aed61b4..000000000
--- a/test/syscalls/linux/pwritev2.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-
-#include <string>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#ifndef SYS_pwritev2
-#if defined(__x86_64__)
-#define SYS_pwritev2 328
-#elif defined(__aarch64__)
-#define SYS_pwritev2 287
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_pwrite2
-
-#ifndef RWF_HIPRI
-#define RWF_HIPRI 0x1
-#endif // RWF_HIPRI
-
-#ifndef RWF_DSYNC
-#define RWF_DSYNC 0x2
-#endif // RWF_DSYNC
-
-#ifndef RWF_SYNC
-#define RWF_SYNC 0x4
-#endif // RWF_SYNC
-
-constexpr int kBufSize = 1024;
-
-void SetContent(std::vector<char>& content) {
- for (uint i = 0; i < content.size(); i++) {
- content[i] = static_cast<char>((i % 10) + '0');
- }
-}
-
-ssize_t pwritev2(unsigned long fd, const struct iovec* iov,
- unsigned long iovcnt, off_t offset, unsigned long flags) {
- // syscall on pwritev2 does some weird things (see man syscall and search
- // pwritev2), so we insert a 0 to word align the flags argument on native.
- return syscall(SYS_pwritev2, fd, iov, iovcnt, offset, 0, flags);
-}
-
-// This test is the base case where we call pwritev (no offset, no flags).
-TEST(Writev2Test, BaseCall) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- std::vector<char> content(kBufSize);
- SetContent(content);
- struct iovec iov[2];
- iov[0].iov_base = content.data();
- iov[0].iov_len = content.size() / 2;
- iov[1].iov_base = static_cast<char*>(iov[0].iov_base) + (content.size() / 2);
- iov[1].iov_len = content.size() / 2;
-
- ASSERT_THAT(pwritev2(fd.get(), iov, /*iovcnt=*/2,
- /*offset=*/0, /*flags=*/0),
- SyscallSucceedsWithValue(kBufSize));
-
- std::vector<char> buf(kBufSize);
- EXPECT_THAT(read(fd.get(), buf.data(), kBufSize),
- SyscallSucceedsWithValue(kBufSize));
-
- EXPECT_EQ(content, buf);
-}
-
-// This test is where we call pwritev2 with a positive offset and no flags.
-TEST(Pwritev2Test, ValidPositiveOffset) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- std::string prefix(kBufSize, '0');
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), prefix, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- std::vector<char> content(kBufSize);
- SetContent(content);
- struct iovec iov;
- iov.iov_base = content.data();
- iov.iov_len = content.size();
-
- ASSERT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/prefix.size(), /*flags=*/0),
- SyscallSucceedsWithValue(content.size()));
-
- std::vector<char> buf(prefix.size() + content.size());
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- std::vector<char> want(prefix.begin(), prefix.end());
- want.insert(want.end(), content.begin(), content.end());
- EXPECT_EQ(want, buf);
-}
-
-// This test is the base case where we call writev by using -1 as the offset.
-// The write should use the file offset, so the test increments the file offset
-// prior to call pwritev2.
-TEST(Pwritev2Test, NegativeOneOffset) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const std::string prefix = "00";
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), prefix.data(), TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- ASSERT_THAT(lseek(fd.get(), prefix.size(), SEEK_SET),
- SyscallSucceedsWithValue(prefix.size()));
-
- std::vector<char> content(kBufSize);
- SetContent(content);
- struct iovec iov;
- iov.iov_base = content.data();
- iov.iov_len = content.size();
-
- ASSERT_THAT(pwritev2(fd.get(), &iov, /*iovcnt*/ 1,
- /*offset=*/static_cast<off_t>(-1), /*flags=*/0),
- SyscallSucceedsWithValue(content.size()));
-
- ASSERT_THAT(lseek(fd.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(prefix.size() + content.size()));
-
- std::vector<char> buf(prefix.size() + content.size());
- EXPECT_THAT(pread(fd.get(), buf.data(), buf.size(), /*offset=*/0),
- SyscallSucceedsWithValue(buf.size()));
-
- std::vector<char> want(prefix.begin(), prefix.end());
- want.insert(want.end(), content.begin(), content.end());
- EXPECT_EQ(want, buf);
-}
-
-// pwritev2 requires if the RWF_HIPRI flag is passed, the fd must be opened with
-// O_DIRECT. This test implements a correct call with the RWF_HIPRI flag.
-TEST(Pwritev2Test, CallWithRWF_HIPRI) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- std::vector<char> content(kBufSize);
- SetContent(content);
- struct iovec iov;
- iov.iov_base = content.data();
- iov.iov_len = content.size();
-
- EXPECT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/0, /*flags=*/RWF_HIPRI),
- SyscallSucceedsWithValue(kBufSize));
-
- std::vector<char> buf(content.size());
- EXPECT_THAT(read(fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_EQ(buf, content);
-}
-
-// This test calls pwritev2 with a bad file descriptor.
-TEST(Writev2Test, BadFile) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
- ASSERT_THAT(pwritev2(/*fd=*/-1, /*iov=*/nullptr, /*iovcnt=*/0,
- /*offset=*/0, /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// This test calls pwrite2 with an invalid offset.
-TEST(Pwritev2Test, InvalidOffset) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- EXPECT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/static_cast<off_t>(-8), /*flags=*/0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(Pwritev2Test, UnseekableFileValid) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- int pipe_fds[2];
-
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- std::vector<char> content(32, '0');
- SetContent(content);
- struct iovec iov;
- iov.iov_base = content.data();
- iov.iov_len = content.size();
-
- EXPECT_THAT(pwritev2(pipe_fds[1], &iov, /*iovcnt=*/1,
- /*offset=*/static_cast<off_t>(-1), /*flags=*/0),
- SyscallSucceedsWithValue(content.size()));
-
- std::vector<char> buf(content.size());
- EXPECT_THAT(read(pipe_fds[0], buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_EQ(content, buf);
-
- EXPECT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-// Calling pwritev2 with a non-negative offset calls pwritev. Calling pwritev
-// with an unseekable file is not allowed. A pipe is used for an unseekable
-// file.
-TEST(Pwritev2Test, UnseekableFileInvalid) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- int pipe_fds[2];
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
-
- EXPECT_THAT(pwritev2(pipe_fds[1], &iov, /*iovcnt=*/1,
- /*offset=*/2, /*flags=*/0),
- SyscallFailsWithErrno(ESPIPE));
-
- EXPECT_THAT(close(pipe_fds[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipe_fds[1]), SyscallSucceeds());
-}
-
-TEST(Pwritev2Test, ReadOnlyFile) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- EXPECT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/0, /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(Pwritev2Test, Pwritev2WithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- EXPECT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1, /*offset=*/0, /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// This test calls pwritev2 with an invalid flag.
-TEST(Pwritev2Test, InvalidFlag) {
- SKIP_IF(pwritev2(-1, nullptr, 0, 0, 0) < 0 && errno == ENOSYS);
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR | O_DIRECT));
-
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- EXPECT_THAT(pwritev2(fd.get(), &iov, /*iovcnt=*/1,
- /*offset=*/0, /*flags=*/0xF0),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/raw_socket.cc b/test/syscalls/linux/raw_socket.cc
deleted file mode 100644
index f8798bc76..000000000
--- a/test/syscalls/linux/raw_socket.cc
+++ /dev/null
@@ -1,957 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/capability.h>
-#include <linux/filter.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/ip_icmp.h>
-#include <poll.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Note: in order to run these tests, /proc/sys/net/ipv4/ping_group_range will
-// need to be configured to let the superuser create ping sockets (see icmp(7)).
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Fixture for tests parameterized by protocol.
-class RawSocketTest : public ::testing::TestWithParam<std::tuple<int, int>> {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // Sends buf via s_.
- void SendBuf(const char* buf, int buf_len);
-
- // Reads from s_ into recv_buf.
- void ReceiveBuf(char* recv_buf, size_t recv_buf_len);
-
- void ReceiveBufFrom(int sock, char* recv_buf, size_t recv_buf_len);
-
- int Protocol() { return std::get<0>(GetParam()); }
-
- int Family() { return std::get<1>(GetParam()); }
-
- socklen_t AddrLen() {
- if (Family() == AF_INET) {
- return sizeof(sockaddr_in);
- }
- return sizeof(sockaddr_in6);
- }
-
- int HdrLen() {
- if (Family() == AF_INET) {
- return sizeof(struct iphdr);
- }
- // IPv6 raw sockets don't include the header.
- return 0;
- }
-
- uint16_t Port(struct sockaddr* s) {
- if (Family() == AF_INET) {
- return ntohs(reinterpret_cast<struct sockaddr_in*>(s)->sin_port);
- }
- return ntohs(reinterpret_cast<struct sockaddr_in6*>(s)->sin6_port);
- }
-
- void* Addr(struct sockaddr* s) {
- if (Family() == AF_INET) {
- return &(reinterpret_cast<struct sockaddr_in*>(s)->sin_addr);
- }
- return &(reinterpret_cast<struct sockaddr_in6*>(s)->sin6_addr);
- }
-
- // The socket used for both reading and writing.
- int s_;
-
- // The loopback address.
- struct sockaddr_storage addr_;
-};
-
-void RawSocketTest::SetUp() {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(Family(), SOCK_RAW, Protocol()),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- ASSERT_THAT(s_ = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());
-
- addr_ = {};
-
- // We don't set ports because raw sockets don't have a notion of ports.
- if (Family() == AF_INET) {
- struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr_);
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- } else {
- struct sockaddr_in6* sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr_);
- sin6->sin6_family = AF_INET6;
- sin6->sin6_addr = in6addr_loopback;
- }
-}
-
-void RawSocketTest::TearDown() {
- // TearDown will be run even if we skip the test.
- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- EXPECT_THAT(close(s_), SyscallSucceeds());
- }
-}
-
-// We should be able to create multiple raw sockets for the same protocol.
-// BasicRawSocket::Setup creates the first one, so we only have to create one
-// more here.
-TEST_P(RawSocketTest, MultipleCreation) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int s2;
- ASSERT_THAT(s2 = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());
-
- ASSERT_THAT(close(s2), SyscallSucceeds());
-}
-
-// Test that shutting down an unconnected socket fails.
-TEST_P(RawSocketTest, FailShutdownWithoutConnect) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(shutdown(s_, SHUT_WR), SyscallFailsWithErrno(ENOTCONN));
- ASSERT_THAT(shutdown(s_, SHUT_RD), SyscallFailsWithErrno(ENOTCONN));
-}
-
-// Shutdown is a no-op for raw sockets (and datagram sockets in general).
-TEST_P(RawSocketTest, ShutdownWriteNoop) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- ASSERT_THAT(shutdown(s_, SHUT_WR), SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "noop";
- ASSERT_THAT(RetryEINTR(write)(s_, kBuf, sizeof(kBuf)),
- SyscallSucceedsWithValue(sizeof(kBuf)));
-}
-
-// Shutdown is a no-op for raw sockets (and datagram sockets in general).
-TEST_P(RawSocketTest, ShutdownReadNoop) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- ASSERT_THAT(shutdown(s_, SHUT_RD), SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "gdg";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- std::vector<char> c(sizeof(kBuf) + HdrLen());
- ASSERT_THAT(read(s_, c.data(), c.size()), SyscallSucceedsWithValue(c.size()));
-}
-
-// Test that listen() fails.
-TEST_P(RawSocketTest, FailListen) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(listen(s_, 1), SyscallFailsWithErrno(ENOTSUP));
-}
-
-// Test that accept() fails.
-TEST_P(RawSocketTest, FailAccept) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct sockaddr saddr;
- socklen_t addrlen;
- ASSERT_THAT(accept(s_, &saddr, &addrlen), SyscallFailsWithErrno(ENOTSUP));
-}
-
-TEST_P(RawSocketTest, BindThenGetSockName) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct sockaddr* addr = reinterpret_cast<struct sockaddr*>(&addr_);
- ASSERT_THAT(bind(s_, addr, AddrLen()), SyscallSucceeds());
- struct sockaddr_storage saddr_storage;
- struct sockaddr* saddr = reinterpret_cast<struct sockaddr*>(&saddr_storage);
- socklen_t saddrlen = AddrLen();
- ASSERT_THAT(getsockname(s_, saddr, &saddrlen), SyscallSucceeds());
- ASSERT_EQ(saddrlen, AddrLen());
-
- // The port is expected to hold the protocol number.
- EXPECT_EQ(Port(saddr), Protocol());
-
- char addrbuf[INET6_ADDRSTRLEN], saddrbuf[INET6_ADDRSTRLEN];
- const char* addrstr =
- inet_ntop(addr->sa_family, Addr(addr), addrbuf, sizeof(addrbuf));
- ASSERT_NE(addrstr, nullptr);
- const char* saddrstr =
- inet_ntop(saddr->sa_family, Addr(saddr), saddrbuf, sizeof(saddrbuf));
- ASSERT_NE(saddrstr, nullptr);
- EXPECT_STREQ(saddrstr, addrstr);
-}
-
-TEST_P(RawSocketTest, ConnectThenGetSockName) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct sockaddr* addr = reinterpret_cast<struct sockaddr*>(&addr_);
- ASSERT_THAT(connect(s_, addr, AddrLen()), SyscallSucceeds());
- struct sockaddr_storage saddr_storage;
- struct sockaddr* saddr = reinterpret_cast<struct sockaddr*>(&saddr_storage);
- socklen_t saddrlen = AddrLen();
- ASSERT_THAT(getsockname(s_, saddr, &saddrlen), SyscallSucceeds());
- ASSERT_EQ(saddrlen, AddrLen());
-
- // The port is expected to hold the protocol number.
- EXPECT_EQ(Port(saddr), Protocol());
-
- char addrbuf[INET6_ADDRSTRLEN], saddrbuf[INET6_ADDRSTRLEN];
- const char* addrstr =
- inet_ntop(addr->sa_family, Addr(addr), addrbuf, sizeof(addrbuf));
- ASSERT_NE(addrstr, nullptr);
- const char* saddrstr =
- inet_ntop(saddr->sa_family, Addr(saddr), saddrbuf, sizeof(saddrbuf));
- ASSERT_NE(saddrstr, nullptr);
- EXPECT_STREQ(saddrstr, addrstr);
-}
-
-// Test that getpeername() returns nothing before connect().
-TEST_P(RawSocketTest, FailGetPeerNameBeforeConnect) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct sockaddr saddr;
- socklen_t addrlen = sizeof(saddr);
- ASSERT_THAT(getpeername(s_, &saddr, &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-// Test that getpeername() returns something after connect().
-TEST_P(RawSocketTest, GetPeerName) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- struct sockaddr saddr;
- socklen_t addrlen = sizeof(saddr);
- ASSERT_THAT(getpeername(s_, &saddr, &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
- ASSERT_GT(addrlen, 0);
-}
-
-// Test that the socket is writable immediately.
-TEST_P(RawSocketTest, PollWritableImmediately) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct pollfd pfd = {};
- pfd.fd = s_;
- pfd.events = POLLOUT;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 10000), SyscallSucceedsWithValue(1));
-}
-
-// Test that the socket isn't readable before receiving anything.
-TEST_P(RawSocketTest, PollNotReadableInitially) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Try to receive data with MSG_DONTWAIT, which returns immediately if there's
- // nothing to be read.
- char buf[117];
- ASSERT_THAT(RetryEINTR(recv)(s_, buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Test that the socket becomes readable once something is written to it.
-TEST_P(RawSocketTest, PollTriggeredOnWrite) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Write something so that there's data to be read.
- // Arbitrary.
- constexpr char kBuf[] = "JP5";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- struct pollfd pfd = {};
- pfd.fd = s_;
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 10000), SyscallSucceedsWithValue(1));
-}
-
-// Test that we can connect() to a valid IP (loopback).
-TEST_P(RawSocketTest, ConnectToLoopback) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-}
-
-// Test that calling send() without connect() fails.
-TEST_P(RawSocketTest, SendWithoutConnectFails) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Arbitrary.
- constexpr char kBuf[] = "Endgame was good";
- ASSERT_THAT(send(s_, kBuf, sizeof(kBuf), 0),
- SyscallFailsWithErrno(EDESTADDRREQ));
-}
-
-// Wildcard Bind.
-TEST_P(RawSocketTest, BindToWildcard) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
- struct sockaddr_storage addr;
- addr = {};
-
- // We don't set ports because raw sockets don't have a notion of ports.
- if (Family() == AF_INET) {
- struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = htonl(INADDR_ANY);
- } else {
- struct sockaddr_in6* sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);
- sin6->sin6_family = AF_INET6;
- sin6->sin6_addr = in6addr_any;
- }
-
- ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-}
-
-// Bind to localhost.
-TEST_P(RawSocketTest, BindToLocalhost) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-}
-
-// Bind to a different address.
-TEST_P(RawSocketTest, BindToInvalid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- struct sockaddr_storage bind_addr = addr_;
- if (Family() == AF_INET) {
- struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&bind_addr);
- sin->sin_addr = {1}; // 1.0.0.0 - An address that we can't bind to.
- } else {
- struct sockaddr_in6* sin6 =
- reinterpret_cast<struct sockaddr_in6*>(&bind_addr);
- memset(&sin6->sin6_addr.s6_addr, 0, sizeof(sin6->sin6_addr.s6_addr));
- sin6->sin6_addr.s6_addr[0] = 1; // 1: - An address that we can't bind to.
- }
- ASSERT_THAT(bind(s_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- AddrLen()), SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-// Send and receive an packet.
-TEST_P(RawSocketTest, SendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Arbitrary.
- constexpr char kBuf[] = "TB12";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);
-}
-
-// We should be able to create multiple raw sockets for the same protocol and
-// receive the same packet on both.
-TEST_P(RawSocketTest, MultipleSocketReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int s2;
- ASSERT_THAT(s2 = socket(Family(), SOCK_RAW, Protocol()), SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "TB10";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- // Receive it on socket 1.
- std::vector<char> recv_buf1(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf1.data(), recv_buf1.size()));
-
- // Receive it on socket 2.
- std::vector<char> recv_buf2(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBufFrom(s2, recv_buf2.data(),
- recv_buf2.size()));
-
- EXPECT_EQ(memcmp(recv_buf1.data() + HdrLen(),
- recv_buf2.data() + HdrLen(), sizeof(kBuf)),
- 0);
-
- ASSERT_THAT(close(s2), SyscallSucceeds());
-}
-
-// Test that connect sends packets to the right place.
-TEST_P(RawSocketTest, SendAndReceiveViaConnect) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "JH4";
- ASSERT_THAT(send(s_, kBuf, sizeof(kBuf), 0),
- SyscallSucceedsWithValue(sizeof(kBuf)));
-
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);
-}
-
-// Bind to localhost, then send and receive packets.
-TEST_P(RawSocketTest, BindSendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "DR16";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);
-}
-
-// Bind and connect to localhost and send/receive packets.
-TEST_P(RawSocketTest, BindConnectSendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-
- // Arbitrary.
- constexpr char kBuf[] = "DG88";
- ASSERT_NO_FATAL_FAILURE(SendBuf(kBuf, sizeof(kBuf)));
-
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(sizeof(kBuf) + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), kBuf, sizeof(kBuf)), 0);
-}
-
-// Check that setting SO_RCVBUF below min is clamped to the minimum
-// receive buffer size.
-TEST_P(RawSocketTest, SetSocketRecvBufBelowMin) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover minimum receive buf size by trying to set it to zero.
- // See:
- // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &below_min, sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_RCVBUF above max is clamped to the maximum
-// receive buffer size.
-TEST_P(RawSocketTest, SetSocketRecvBufAboveMax) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover max buf size by trying to set the largest possible buffer size.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &above_max, sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_RCVBUF min <= kRcvBufSz <= max is honored.
-TEST_P(RawSocketTest, SetSocketRecvBuf) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int max = 0;
- int min = 0;
- {
- // Discover max buf size by trying to set a really large buffer size.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by trying to set a zero size receive buffer
- // size.
- // See:
- // https://github.com/torvalds/linux/blob/a5dc8300df75e8b8384b4c82225f1e4a0b4d9b55/net/core/sock.c#L820
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &quarter_sz, sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- // Linux doubles the value set by SO_SNDBUF/SO_RCVBUF.
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-// Check that setting SO_SNDBUF below min is clamped to the minimum
-// receive buffer size.
-TEST_P(RawSocketTest, SetSocketSendBufBelowMin) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &below_min, sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_SNDBUF above max is clamped to the maximum
-// send buffer size.
-TEST_P(RawSocketTest, SetSocketSendBufAboveMax) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Discover maximum buffer size by trying to set it to a large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &above_max, sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_SNDBUF min <= kSndBufSz <= max is honored.
-TEST_P(RawSocketTest, SetSocketSendBuf) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int max = 0;
- int min = 0;
- {
- // Discover maximum buffer size by trying to set it to a large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &kSndBufSz, sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_SNDBUF, &quarter_sz, sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-// Test that receive buffer limits are not enforced when the recv buffer is
-// empty.
-TEST_P(RawSocketTest, RecvBufLimitsEmptyRecvBuffer) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-
- int min = 0;
- {
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- {
- // Send data of size min and verify that it's received.
- std::vector<char> buf(min);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
-
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(buf.size() + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(
- memcmp(recv_buf.data() + HdrLen(), buf.data(), buf.size()),
- 0);
- }
-
- {
- // Send data of size min + 1 and verify that its received. Both linux and
- // Netstack accept a dgram that exceeds rcvBuf limits if the receive buffer
- // is currently empty.
- std::vector<char> buf(min + 1);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(buf.size() + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(
- memcmp(recv_buf.data() + HdrLen(), buf.data(), buf.size()),
- 0);
- }
-}
-
-TEST_P(RawSocketTest, RecvBufLimits) {
- // TCP stack generates RSTs for unknown endpoints and it complicates the test
- // as we have to deal with the RST packets as well. For testing the raw socket
- // endpoints buffer limit enforcement we can just test for UDP.
- //
- // We don't use SKIP_IF here because root_test_runner explicitly fails if a
- // test is skipped.
- if (Protocol() == IPPROTO_TCP) {
- return;
- }
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), AddrLen()),
- SyscallSucceeds());
-
- int min = 0;
- {
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- // Now set the limit to min * 2.
- int new_rcv_buf_sz = min * 2;
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_RCVBUF, &new_rcv_buf_sz,
- sizeof(new_rcv_buf_sz)),
- SyscallSucceeds());
- int rcv_buf_sz = 0;
- {
- socklen_t rcv_buf_len = sizeof(rcv_buf_sz);
- ASSERT_THAT(
- getsockopt(s_, SOL_SOCKET, SO_RCVBUF, &rcv_buf_sz, &rcv_buf_len),
- SyscallSucceeds());
- }
-
- // Set a receive timeout so that we don't block forever on reads if the test
- // fails.
- struct timeval tv {
- .tv_sec = 1, .tv_usec = 0,
- };
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- {
- std::vector<char> buf(min);
- RandomizeBuffer(buf.data(), buf.size());
-
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- int sent = 4;
- if (IsRunningOnGvisor()) {
- // Linux seems to drop the 4th packet even though technically it should
- // fit in the receive buffer.
- ASSERT_NO_FATAL_FAILURE(SendBuf(buf.data(), buf.size()));
- sent++;
- }
-
- // Verify that the expected number of packets are available to be read.
- for (int i = 0; i < sent - 1; i++) {
- // Receive the packet and make sure it's identical.
- std::vector<char> recv_buf(buf.size() + HdrLen());
- ASSERT_NO_FATAL_FAILURE(ReceiveBuf(recv_buf.data(), recv_buf.size()));
- EXPECT_EQ(memcmp(recv_buf.data() + HdrLen(), buf.data(),
- buf.size()),
- 0);
- }
-
- // Assert that the last packet is dropped because the receive buffer should
- // be full after the first four packets.
- std::vector<char> recv_buf(buf.size() + HdrLen());
- struct iovec iov = {};
- iov.iov_base = static_cast<void*>(const_cast<char*>(recv_buf.data()));
- iov.iov_len = buf.size();
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
- ASSERT_THAT(RetryEINTR(recvmsg)(s_, &msg, MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
- }
-}
-
-void RawSocketTest::SendBuf(const char* buf, int buf_len) {
- // It's safe to use const_cast here because sendmsg won't modify the iovec or
- // address.
- struct iovec iov = {};
- iov.iov_base = static_cast<void*>(const_cast<char*>(buf));
- iov.iov_len = static_cast<size_t>(buf_len);
- struct msghdr msg = {};
- msg.msg_name = static_cast<void*>(&addr_);
- msg.msg_namelen = AddrLen();
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
- ASSERT_THAT(sendmsg(s_, &msg, 0), SyscallSucceedsWithValue(buf_len));
-}
-
-void RawSocketTest::ReceiveBuf(char* recv_buf, size_t recv_buf_len) {
- ASSERT_NO_FATAL_FAILURE(ReceiveBufFrom(s_, recv_buf, recv_buf_len));
-}
-
-void RawSocketTest::ReceiveBufFrom(int sock, char* recv_buf,
- size_t recv_buf_len) {
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sock, recv_buf, recv_buf_len));
-}
-
-TEST_P(RawSocketTest, SetSocketDetachFilterNoInstalledFilter) {
- // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
- if (IsRunningOnGvisor()) {
- constexpr int val = 0;
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallSucceeds());
- return;
- }
-
- constexpr int val = 0;
- ASSERT_THAT(setsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_P(RawSocketTest, GetSocketDetachFilter) {
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),
- SyscallFailsWithErrno(ENOPROTOOPT));
-}
-
-// AF_INET6+SOCK_RAW+IPPROTO_RAW sockets can be created, but not written to.
-TEST(RawSocketTest, IPv6ProtoRaw) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW),
- SyscallSucceeds());
-
- // Verify that writing yields EINVAL.
- char buf[] = "This is such a weird little edge case";
- struct sockaddr_in6 sin6 = {};
- sin6.sin6_family = AF_INET6;
- sin6.sin6_addr = in6addr_loopback;
- ASSERT_THAT(sendto(sock, buf, sizeof(buf), 0 /* flags */,
- reinterpret_cast<struct sockaddr*>(&sin6), sizeof(sin6)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(RawSocketTest, IPv6SendMsg) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_TCP),
- SyscallSucceeds());
-
- char kBuf[] = "hello";
- struct iovec iov = {};
- iov.iov_base = static_cast<void*>(const_cast<char*>(kBuf));
- iov.iov_len = static_cast<size_t>(sizeof(kBuf));
-
- struct sockaddr_storage addr = {};
- struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-
- struct msghdr msg = {};
- msg.msg_name = static_cast<void*>(&addr);
- msg.msg_namelen = sizeof(sockaddr_in);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
- ASSERT_THAT(sendmsg(sock, &msg, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(RawSocketTest, ConnectOnIPv6Socket) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int sock;
- ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_TCP),
- SyscallSucceeds());
-
- struct sockaddr_storage addr = {};
- struct sockaddr_in* sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-
- ASSERT_THAT(connect(sock, reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(sockaddr_in6)),
- SyscallFailsWithErrno(EAFNOSUPPORT));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllInetTests, RawSocketTest,
- ::testing::Combine(::testing::Values(IPPROTO_TCP, IPPROTO_UDP),
- ::testing::Values(AF_INET, AF_INET6)));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/raw_socket_hdrincl.cc b/test/syscalls/linux/raw_socket_hdrincl.cc
deleted file mode 100644
index 4611b6283..000000000
--- a/test/syscalls/linux/raw_socket_hdrincl.cc
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/capability.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <netinet/udp.h>
-#include <poll.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <cstring>
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/endian.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Tests for IPPROTO_RAW raw sockets, which implies IP_HDRINCL.
-class RawHDRINCL : public ::testing::Test {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // Returns a valid looback IP header with no payload.
- struct iphdr LoopbackHeader();
-
- // Fills in buf with an IP header, UDP header, and payload. Returns false if
- // buf_size isn't large enough to hold everything.
- bool FillPacket(char* buf, size_t buf_size, int port, const char* payload,
- uint16_t payload_size);
-
- // The socket used for both reading and writing.
- int socket_;
-
- // The loopback address.
- struct sockaddr_in addr_;
-};
-
-void RawHDRINCL::SetUp() {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(AF_INET, SOCK_RAW, IPPROTO_RAW),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- ASSERT_THAT(socket_ = socket(AF_INET, SOCK_RAW, IPPROTO_RAW),
- SyscallSucceeds());
-
- addr_ = {};
-
- addr_.sin_port = IPPROTO_IP;
- addr_.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr_.sin_family = AF_INET;
-}
-
-void RawHDRINCL::TearDown() {
- // TearDown will be run even if we skip the test.
- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- EXPECT_THAT(close(socket_), SyscallSucceeds());
- }
-}
-
-struct iphdr RawHDRINCL::LoopbackHeader() {
- struct iphdr hdr = {};
- hdr.ihl = 5;
- hdr.version = 4;
- hdr.tos = 0;
- hdr.tot_len = absl::gbswap_16(sizeof(hdr));
- hdr.id = 0;
- hdr.frag_off = 0;
- hdr.ttl = 7;
- hdr.protocol = 1;
- hdr.daddr = htonl(INADDR_LOOPBACK);
- // hdr.check is set by the network stack.
- // hdr.tot_len is set by the network stack.
- // hdr.saddr is set by the network stack.
- return hdr;
-}
-
-bool RawHDRINCL::FillPacket(char* buf, size_t buf_size, int port,
- const char* payload, uint16_t payload_size) {
- if (buf_size < sizeof(struct iphdr) + sizeof(struct udphdr) + payload_size) {
- return false;
- }
-
- struct iphdr ip = LoopbackHeader();
- ip.protocol = IPPROTO_UDP;
-
- struct udphdr udp = {};
- udp.source = absl::gbswap_16(port);
- udp.dest = absl::gbswap_16(port);
- udp.len = absl::gbswap_16(sizeof(udp) + payload_size);
- udp.check = 0;
-
- memcpy(buf, reinterpret_cast<char*>(&ip), sizeof(ip));
- memcpy(buf + sizeof(ip), reinterpret_cast<char*>(&udp), sizeof(udp));
- memcpy(buf + sizeof(ip) + sizeof(udp), payload, payload_size);
-
- return true;
-}
-
-// We should be able to create multiple IPPROTO_RAW sockets. RawHDRINCL::Setup
-// creates the first one, so we only have to create one more here.
-TEST_F(RawHDRINCL, MultipleCreation) {
- int s2;
- ASSERT_THAT(s2 = socket(AF_INET, SOCK_RAW, IPPROTO_RAW), SyscallSucceeds());
-
- ASSERT_THAT(close(s2), SyscallSucceeds());
-}
-
-// Test that shutting down an unconnected socket fails.
-TEST_F(RawHDRINCL, FailShutdownWithoutConnect) {
- ASSERT_THAT(shutdown(socket_, SHUT_WR), SyscallFailsWithErrno(ENOTCONN));
- ASSERT_THAT(shutdown(socket_, SHUT_RD), SyscallFailsWithErrno(ENOTCONN));
-}
-
-// Test that listen() fails.
-TEST_F(RawHDRINCL, FailListen) {
- ASSERT_THAT(listen(socket_, 1), SyscallFailsWithErrno(ENOTSUP));
-}
-
-// Test that accept() fails.
-TEST_F(RawHDRINCL, FailAccept) {
- struct sockaddr saddr;
- socklen_t addrlen;
- ASSERT_THAT(accept(socket_, &saddr, &addrlen),
- SyscallFailsWithErrno(ENOTSUP));
-}
-
-// Test that the socket is writable immediately.
-TEST_F(RawHDRINCL, PollWritableImmediately) {
- struct pollfd pfd = {};
- pfd.fd = socket_;
- pfd.events = POLLOUT;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 0), SyscallSucceedsWithValue(1));
-}
-
-// Test that the socket isn't readable.
-TEST_F(RawHDRINCL, NotReadable) {
- // Try to receive data with MSG_DONTWAIT, which returns immediately if there's
- // nothing to be read.
- char buf[117];
- ASSERT_THAT(RetryEINTR(recv)(socket_, buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Test that we can connect() to a valid IP (loopback).
-TEST_F(RawHDRINCL, ConnectToLoopback) {
- ASSERT_THAT(connect(socket_, reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceeds());
-}
-
-TEST_F(RawHDRINCL, SendWithoutConnectFails) {
- struct iphdr hdr = LoopbackHeader();
- ASSERT_THAT(send(socket_, &hdr, sizeof(hdr), 0),
- SyscallFailsWithErrno(EDESTADDRREQ));
-}
-
-// HDRINCL implies write-only. Verify that we can't read a packet sent to
-// loopback.
-TEST_F(RawHDRINCL, NotReadableAfterWrite) {
- ASSERT_THAT(connect(socket_, reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceeds());
-
- // Construct a packet with an IP header, UDP header, and payload.
- constexpr char kPayload[] = "odst";
- char packet[sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kPayload)];
- ASSERT_TRUE(FillPacket(packet, sizeof(packet), 40000 /* port */, kPayload,
- sizeof(kPayload)));
-
- socklen_t addrlen = sizeof(addr_);
- ASSERT_NO_FATAL_FAILURE(
- sendto(socket_, reinterpret_cast<void*>(&packet), sizeof(packet), 0,
- reinterpret_cast<struct sockaddr*>(&addr_), addrlen));
-
- struct pollfd pfd = {};
- pfd.fd = socket_;
- pfd.events = POLLIN;
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, 1000), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(RawHDRINCL, WriteTooSmall) {
- ASSERT_THAT(connect(socket_, reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceeds());
-
- // This is smaller than the size of an IP header.
- constexpr char kBuf[] = "JP5";
- ASSERT_THAT(send(socket_, kBuf, sizeof(kBuf), 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Bind to localhost.
-TEST_F(RawHDRINCL, BindToLocalhost) {
- ASSERT_THAT(
- bind(socket_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),
- SyscallSucceeds());
-}
-
-// Bind to a different address.
-TEST_F(RawHDRINCL, BindToInvalid) {
- struct sockaddr_in bind_addr = {};
- bind_addr.sin_family = AF_INET;
- bind_addr.sin_addr = {1}; // 1.0.0.0 - An address that we can't bind to.
- ASSERT_THAT(bind(socket_, reinterpret_cast<struct sockaddr*>(&bind_addr),
- sizeof(bind_addr)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-// Send and receive a packet.
-TEST_F(RawHDRINCL, SendAndReceive) {
- int port = 40000;
- if (!IsRunningOnGvisor()) {
- port = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(
- PortAvailable(0, AddressFamily::kIpv4, SocketType::kUdp, false)));
- }
-
- // IPPROTO_RAW sockets are write-only. We'll have to open another socket to
- // read what we write.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));
-
- // Construct a packet with an IP header, UDP header, and payload.
- constexpr char kPayload[] = "toto";
- char packet[sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kPayload)];
- ASSERT_TRUE(
- FillPacket(packet, sizeof(packet), port, kPayload, sizeof(kPayload)));
-
- socklen_t addrlen = sizeof(addr_);
- ASSERT_NO_FATAL_FAILURE(sendto(socket_, &packet, sizeof(packet), 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- addrlen));
-
- // Receive the payload.
- char recv_buf[sizeof(packet)];
- struct sockaddr_in src;
- socklen_t src_size = sizeof(src);
- ASSERT_THAT(recvfrom(udp_sock.get(), recv_buf, sizeof(recv_buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_size),
- SyscallSucceedsWithValue(sizeof(packet)));
- EXPECT_EQ(
- memcmp(kPayload, recv_buf + sizeof(struct iphdr) + sizeof(struct udphdr),
- sizeof(kPayload)),
- 0);
- // The network stack should have set the source address.
- EXPECT_EQ(src.sin_family, AF_INET);
- EXPECT_EQ(absl::gbswap_32(src.sin_addr.s_addr), INADDR_LOOPBACK);
- // The packet ID should not be 0, as the packet has DF=0.
- struct iphdr* iphdr = reinterpret_cast<struct iphdr*>(recv_buf);
- EXPECT_NE(iphdr->id, 0);
-}
-
-// Send and receive a packet where the sendto address is not the same as the
-// provided destination.
-TEST_F(RawHDRINCL, SendAndReceiveDifferentAddress) {
- int port = 40000;
- if (!IsRunningOnGvisor()) {
- port = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(
- PortAvailable(0, AddressFamily::kIpv4, SocketType::kUdp, false)));
- }
-
- // IPPROTO_RAW sockets are write-only. We'll have to open another socket to
- // read what we write.
- FileDescriptor udp_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));
-
- // Construct a packet with an IP header, UDP header, and payload.
- constexpr char kPayload[] = "toto";
- char packet[sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kPayload)];
- ASSERT_TRUE(
- FillPacket(packet, sizeof(packet), port, kPayload, sizeof(kPayload)));
- // Overwrite the IP destination address with an IP we can't get to.
- constexpr int32_t kUnreachable = 42;
- struct iphdr iphdr = {};
- memcpy(&iphdr, packet, sizeof(iphdr));
- iphdr.daddr = kUnreachable;
- memcpy(packet, &iphdr, sizeof(iphdr));
-
- // Send to localhost via loopback.
- socklen_t addrlen = sizeof(addr_);
- ASSERT_NO_FATAL_FAILURE(sendto(socket_, &packet, sizeof(packet), 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- addrlen));
-
- // Receive the payload. Despite an unreachable destination address, sendto
- // should have sent the packet through loopback.
- char recv_buf[sizeof(packet)];
- struct sockaddr_in src;
- socklen_t src_size = sizeof(src);
- ASSERT_THAT(recvfrom(udp_sock.get(), recv_buf, sizeof(recv_buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_size),
- SyscallSucceedsWithValue(sizeof(packet)));
- EXPECT_EQ(
- memcmp(kPayload, recv_buf + sizeof(struct iphdr) + sizeof(struct udphdr),
- sizeof(kPayload)),
- 0);
- // The network stack should have set the source address.
- EXPECT_EQ(src.sin_family, AF_INET);
- EXPECT_EQ(absl::gbswap_32(src.sin_addr.s_addr), INADDR_LOOPBACK);
- // The packet ID should not be 0, as the packet has DF=0.
- struct iphdr recv_iphdr = {};
- memcpy(&recv_iphdr, recv_buf, sizeof(recv_iphdr));
- EXPECT_NE(recv_iphdr.id, 0);
- // The destination address is kUnreachable despite arriving via loopback.
- EXPECT_EQ(recv_iphdr.daddr, kUnreachable);
-}
-
-// Send and receive a packet w/ the IP_HDRINCL option set.
-TEST_F(RawHDRINCL, SendAndReceiveIPHdrIncl) {
- int port = 40000;
- if (!IsRunningOnGvisor()) {
- port = static_cast<short>(ASSERT_NO_ERRNO_AND_VALUE(
- PortAvailable(0, AddressFamily::kIpv4, SocketType::kUdp, false)));
- }
-
- FileDescriptor recv_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));
-
- FileDescriptor send_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_UDP));
-
- // Enable IP_HDRINCL option so that we can build and send w/ an IP
- // header.
- constexpr int kSockOptOn = 1;
- ASSERT_THAT(setsockopt(send_sock.get(), SOL_IP, IP_HDRINCL, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- // This is not strictly required but we do it to make sure that setting
- // IP_HDRINCL on a non IPPROTO_RAW socket does not prevent it from receiving
- // packets.
- ASSERT_THAT(setsockopt(recv_sock.get(), SOL_IP, IP_HDRINCL, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Construct a packet with an IP header, UDP header, and payload.
- constexpr char kPayload[] = "toto";
- char packet[sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(kPayload)];
- ASSERT_TRUE(
- FillPacket(packet, sizeof(packet), port, kPayload, sizeof(kPayload)));
-
- socklen_t addrlen = sizeof(addr_);
- ASSERT_NO_FATAL_FAILURE(sendto(send_sock.get(), &packet, sizeof(packet), 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- addrlen));
-
- // Receive the payload.
- char recv_buf[sizeof(packet)];
- struct sockaddr_in src;
- socklen_t src_size = sizeof(src);
- ASSERT_THAT(recvfrom(recv_sock.get(), recv_buf, sizeof(recv_buf), 0,
- reinterpret_cast<struct sockaddr*>(&src), &src_size),
- SyscallSucceedsWithValue(sizeof(packet)));
- EXPECT_EQ(
- memcmp(kPayload, recv_buf + sizeof(struct iphdr) + sizeof(struct udphdr),
- sizeof(kPayload)),
- 0);
- // The network stack should have set the source address.
- EXPECT_EQ(src.sin_family, AF_INET);
- EXPECT_EQ(absl::gbswap_32(src.sin_addr.s_addr), INADDR_LOOPBACK);
- struct iphdr iphdr = {};
- memcpy(&iphdr, recv_buf, sizeof(iphdr));
- EXPECT_NE(iphdr.id, 0);
-
- // Also verify that the packet we just sent was not delivered to the
- // IPPROTO_RAW socket.
- {
- char recv_buf[sizeof(packet)];
- struct sockaddr_in src;
- socklen_t src_size = sizeof(src);
- ASSERT_THAT(recvfrom(socket_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT,
- reinterpret_cast<struct sockaddr*>(&src), &src_size),
- SyscallFailsWithErrno(EAGAIN));
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/raw_socket_icmp.cc b/test/syscalls/linux/raw_socket_icmp.cc
deleted file mode 100644
index 275996bd3..000000000
--- a/test/syscalls/linux/raw_socket_icmp.cc
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/capability.h>
-#include <netinet/in.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <cstdint>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// The size of an empty ICMP packet and IP header together.
-constexpr size_t kEmptyICMPSize = 28;
-
-// ICMP raw sockets get their own special tests because Linux automatically
-// responds to ICMP echo requests, and thus a single echo request sent via
-// loopback leads to 2 received ICMP packets.
-
-class RawSocketICMPTest : public ::testing::Test {
- protected:
- // Creates a socket to be used in tests.
- void SetUp() override;
-
- // Closes the socket created by SetUp().
- void TearDown() override;
-
- // Checks that both an ICMP echo request and reply are received. Calls should
- // be wrapped in ASSERT_NO_FATAL_FAILURE.
- void ExpectICMPSuccess(const struct icmphdr& icmp);
-
- // Sends icmp via s_.
- void SendEmptyICMP(const struct icmphdr& icmp);
-
- // Sends icmp via s_ to the given address.
- void SendEmptyICMPTo(int sock, const struct sockaddr_in& addr,
- const struct icmphdr& icmp);
-
- // Reads from s_ into recv_buf.
- void ReceiveICMP(char* recv_buf, size_t recv_buf_len, size_t expected_size,
- struct sockaddr_in* src);
-
- // Reads from sock into recv_buf.
- void ReceiveICMPFrom(char* recv_buf, size_t recv_buf_len,
- size_t expected_size, struct sockaddr_in* src, int sock);
-
- // The socket used for both reading and writing.
- int s_;
-
- // The loopback address.
- struct sockaddr_in addr_;
-};
-
-void RawSocketICMPTest::SetUp() {
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- ASSERT_THAT(socket(AF_INET, SOCK_RAW, IPPROTO_ICMP),
- SyscallFailsWithErrno(EPERM));
- GTEST_SKIP();
- }
-
- ASSERT_THAT(s_ = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP), SyscallSucceeds());
-
- addr_ = {};
-
- // "On raw sockets sin_port is set to the IP protocol." - ip(7).
- addr_.sin_port = IPPROTO_IP;
- addr_.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr_.sin_family = AF_INET;
-}
-
-void RawSocketICMPTest::TearDown() {
- // TearDown will be run even if we skip the test.
- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))) {
- EXPECT_THAT(close(s_), SyscallSucceeds());
- }
-}
-
-// We'll only read an echo in this case, as the kernel won't respond to the
-// malformed ICMP checksum.
-TEST_F(RawSocketICMPTest, SendAndReceiveBadChecksum) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,
- // and ID. None of that should matter for raw sockets - the kernel should
- // still give us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2012;
- icmp.un.echo.id = 2014;
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));
-
- // Veryify that we get the echo, then that there's nothing else to read.
- char recv_buf[kEmptyICMPSize];
- struct sockaddr_in src;
- ASSERT_NO_FATAL_FAILURE(
- ReceiveICMP(recv_buf, sizeof(recv_buf), sizeof(struct icmphdr), &src));
- EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);
- // The packet should be identical to what we sent.
- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), &icmp, sizeof(icmp)), 0);
-
- // And there should be nothing left to read.
- EXPECT_THAT(RetryEINTR(recv)(s_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Send and receive an ICMP packet.
-TEST_F(RawSocketICMPTest, SendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.
- // None of that should matter for raw sockets - the kernel should still give
- // us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2012;
- icmp.un.echo.id = 2014;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));
-
- ASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));
-}
-
-// We should be able to create multiple raw sockets for the same protocol and
-// receive the same packet on both.
-TEST_F(RawSocketICMPTest, MultipleSocketReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor s2 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_ICMP));
-
- // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.
- // None of that should matter for raw sockets - the kernel should still give
- // us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2016;
- icmp.un.echo.id = 2018;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));
-
- // Both sockets will receive the echo request and reply in indeterminate
- // order, so we'll need to read 2 packets from each.
-
- // Receive on socket 1.
- constexpr int kBufSize = kEmptyICMPSize;
- char recv_buf1[2][kBufSize];
- struct sockaddr_in src;
- for (int i = 0; i < 2; i++) {
- ASSERT_NO_FATAL_FAILURE(ReceiveICMP(recv_buf1[i],
- ABSL_ARRAYSIZE(recv_buf1[i]),
- sizeof(struct icmphdr), &src));
- EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);
- }
-
- // Receive on socket 2.
- char recv_buf2[2][kBufSize];
- for (int i = 0; i < 2; i++) {
- ASSERT_NO_FATAL_FAILURE(
- ReceiveICMPFrom(recv_buf2[i], ABSL_ARRAYSIZE(recv_buf2[i]),
- sizeof(struct icmphdr), &src, s2.get()));
- EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);
- }
-
- // Ensure both sockets receive identical packets.
- int types[] = {ICMP_ECHO, ICMP_ECHOREPLY};
- for (int type : types) {
- auto match_type = [=](char buf[kBufSize]) {
- struct icmphdr* icmp =
- reinterpret_cast<struct icmphdr*>(buf + sizeof(struct iphdr));
- return icmp->type == type;
- };
- auto icmp1_it =
- std::find_if(std::begin(recv_buf1), std::end(recv_buf1), match_type);
- auto icmp2_it =
- std::find_if(std::begin(recv_buf2), std::end(recv_buf2), match_type);
- ASSERT_NE(icmp1_it, std::end(recv_buf1));
- ASSERT_NE(icmp2_it, std::end(recv_buf2));
- EXPECT_EQ(memcmp(*icmp1_it + sizeof(struct iphdr),
- *icmp2_it + sizeof(struct iphdr), sizeof(icmp)),
- 0);
- }
-}
-
-// A raw ICMP socket and ping socket should both receive the ICMP packets
-// intended for the ping socket.
-TEST_F(RawSocketICMPTest, RawAndPingSockets) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor ping_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP));
-
- // Ping sockets take care of the ICMP ID and checksum.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.un.echo.sequence = *static_cast<unsigned short*>(&icmp.un.echo.sequence);
- ASSERT_THAT(RetryEINTR(sendto)(ping_sock.get(), &icmp, sizeof(icmp), 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceedsWithValue(sizeof(icmp)));
-
- // Receive on socket 1, which receives the echo request and reply in
- // indeterminate order.
- constexpr int kBufSize = kEmptyICMPSize;
- char recv_buf1[2][kBufSize];
- struct sockaddr_in src;
- for (int i = 0; i < 2; i++) {
- ASSERT_NO_FATAL_FAILURE(
- ReceiveICMP(recv_buf1[i], kBufSize, sizeof(struct icmphdr), &src));
- EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);
- }
-
- // Receive on socket 2. Ping sockets only get the echo reply, not the initial
- // echo.
- char ping_recv_buf[kBufSize];
- ASSERT_THAT(RetryEINTR(recv)(ping_sock.get(), ping_recv_buf, kBufSize, 0),
- SyscallSucceedsWithValue(sizeof(struct icmphdr)));
-
- // Ensure both sockets receive identical echo reply packets.
- auto match_type_raw = [=](char buf[kBufSize]) {
- struct icmphdr* icmp =
- reinterpret_cast<struct icmphdr*>(buf + sizeof(struct iphdr));
- return icmp->type == ICMP_ECHOREPLY;
- };
- auto raw_reply_it =
- std::find_if(std::begin(recv_buf1), std::end(recv_buf1), match_type_raw);
- ASSERT_NE(raw_reply_it, std::end(recv_buf1));
- EXPECT_EQ(
- memcmp(*raw_reply_it + sizeof(struct iphdr), ping_recv_buf, sizeof(icmp)),
- 0);
-}
-
-// A raw ICMP socket should be able to send a malformed short ICMP Echo Request,
-// while a ping socket should not. Neither should be able to receieve a short
-// malformed packet.
-TEST_F(RawSocketICMPTest, ShortEchoRawAndPingSockets) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor ping_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP));
-
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.un.echo.sequence = 0;
- icmp.un.echo.id = 6789;
- icmp.checksum = 0;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
-
- // Omit 2 bytes from ICMP packet.
- constexpr int kShortICMPSize = sizeof(icmp) - 2;
-
- // Sending a malformed short ICMP message to a ping socket should fail.
- ASSERT_THAT(RetryEINTR(sendto)(ping_sock.get(), &icmp, kShortICMPSize, 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallFailsWithErrno(EINVAL));
-
- // Sending a malformed short ICMP message to a raw socket should not fail.
- ASSERT_THAT(RetryEINTR(sendto)(s_, &icmp, kShortICMPSize, 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceedsWithValue(kShortICMPSize));
-
- // Neither Ping nor Raw socket should have anything to read.
- char recv_buf[kEmptyICMPSize];
- EXPECT_THAT(RetryEINTR(recv)(ping_sock.get(), recv_buf, sizeof(recv_buf),
- MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
- EXPECT_THAT(RetryEINTR(recv)(s_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// A raw ICMP socket should be able to send a malformed short ICMP Echo Reply,
-// while ping socket should not.
-// Neither should be able to receieve a short malformed packet.
-TEST_F(RawSocketICMPTest, ShortEchoReplyRawAndPingSockets) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- FileDescriptor ping_sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP));
-
- struct icmphdr icmp;
- icmp.type = ICMP_ECHOREPLY;
- icmp.code = 0;
- icmp.un.echo.sequence = 0;
- icmp.un.echo.id = 6789;
- icmp.checksum = 0;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
-
- // Omit 2 bytes from ICMP packet.
- constexpr int kShortICMPSize = sizeof(icmp) - 2;
-
- // Sending a malformed short ICMP message to a ping socket should fail.
- ASSERT_THAT(RetryEINTR(sendto)(ping_sock.get(), &icmp, kShortICMPSize, 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallFailsWithErrno(EINVAL));
-
- // Sending a malformed short ICMP message to a raw socket should not fail.
- ASSERT_THAT(RetryEINTR(sendto)(s_, &icmp, kShortICMPSize, 0,
- reinterpret_cast<struct sockaddr*>(&addr_),
- sizeof(addr_)),
- SyscallSucceedsWithValue(kShortICMPSize));
-
- // Neither Ping nor Raw socket should have anything to read.
- char recv_buf[kEmptyICMPSize];
- EXPECT_THAT(RetryEINTR(recv)(ping_sock.get(), recv_buf, sizeof(recv_buf),
- MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
- EXPECT_THAT(RetryEINTR(recv)(s_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Test that connect() sends packets to the right place.
-TEST_F(RawSocketICMPTest, SendAndReceiveViaConnect) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),
- SyscallSucceeds());
-
- // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.
- // None of that should matter for raw sockets - the kernel should still give
- // us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2003;
- icmp.un.echo.id = 2004;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
- ASSERT_THAT(send(s_, &icmp, sizeof(icmp), 0),
- SyscallSucceedsWithValue(sizeof(icmp)));
-
- ASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));
-}
-
-// Bind to localhost, then send and receive packets.
-TEST_F(RawSocketICMPTest, BindSendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),
- SyscallSucceeds());
-
- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,
- // and ID. None of that should matter for raw sockets - the kernel should
- // still give us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2004;
- icmp.un.echo.id = 2007;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));
-
- ASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));
-}
-
-// Bind and connect to localhost and send/receive packets.
-TEST_F(RawSocketICMPTest, BindConnectSendAndReceive) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- ASSERT_THAT(
- bind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),
- SyscallSucceeds());
- ASSERT_THAT(
- connect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),
- SyscallSucceeds());
-
- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,
- // and ID. None of that should matter for raw sockets - the kernel should
- // still give us the packet.
- struct icmphdr icmp;
- icmp.type = ICMP_ECHO;
- icmp.code = 0;
- icmp.checksum = 0;
- icmp.un.echo.sequence = 2010;
- icmp.un.echo.id = 7;
- icmp.checksum = ICMPChecksum(icmp, NULL, 0);
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));
-
- ASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));
-}
-
-// Set and get SO_LINGER.
-TEST_F(RawSocketICMPTest, SetAndGetSocketLinger) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int level = SOL_SOCKET;
- int type = SO_LINGER;
-
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(setsockopt(s_, level, type, &sl, sizeof(sl)),
- SyscallSucceedsWithValue(0));
-
- struct linger got_linger = {};
- socklen_t length = sizeof(sl);
- ASSERT_THAT(getsockopt(s_, level, type, &got_linger, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, length));
-}
-
-// Test getsockopt for SO_ACCEPTCONN.
-TEST_F(RawSocketICMPTest, GetSocketAcceptConn) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));
-
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(getsockopt(s_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-
-void RawSocketICMPTest::ExpectICMPSuccess(const struct icmphdr& icmp) {
- // We're going to receive both the echo request and reply, but the order is
- // indeterminate.
- char recv_buf[kEmptyICMPSize];
- struct sockaddr_in src;
- bool received_request = false;
- bool received_reply = false;
-
- for (int i = 0; i < 2; i++) {
- // Receive the packet.
- ASSERT_NO_FATAL_FAILURE(ReceiveICMP(recv_buf, ABSL_ARRAYSIZE(recv_buf),
- sizeof(struct icmphdr), &src));
- EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);
- struct icmphdr* recvd_icmp =
- reinterpret_cast<struct icmphdr*>(recv_buf + sizeof(struct iphdr));
- switch (recvd_icmp->type) {
- case ICMP_ECHO:
- EXPECT_FALSE(received_request);
- received_request = true;
- // The packet should be identical to what we sent.
- EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), &icmp, sizeof(icmp)),
- 0);
- break;
-
- case ICMP_ECHOREPLY:
- EXPECT_FALSE(received_reply);
- received_reply = true;
- // Most fields should be the same.
- EXPECT_EQ(recvd_icmp->code, icmp.code);
- EXPECT_EQ(recvd_icmp->un.echo.sequence, icmp.un.echo.sequence);
- EXPECT_EQ(recvd_icmp->un.echo.id, icmp.un.echo.id);
- // A couple are different.
- EXPECT_EQ(recvd_icmp->type, ICMP_ECHOREPLY);
- // The checksum computed over the reply should still be valid.
- EXPECT_EQ(ICMPChecksum(*recvd_icmp, NULL, 0), 0);
- break;
- }
- }
-
- ASSERT_TRUE(received_request);
- ASSERT_TRUE(received_reply);
-}
-
-void RawSocketICMPTest::SendEmptyICMP(const struct icmphdr& icmp) {
- ASSERT_NO_FATAL_FAILURE(SendEmptyICMPTo(s_, addr_, icmp));
-}
-
-void RawSocketICMPTest::SendEmptyICMPTo(int sock,
- const struct sockaddr_in& addr,
- const struct icmphdr& icmp) {
- // It's safe to use const_cast here because sendmsg won't modify the iovec or
- // address.
- struct iovec iov = {};
- iov.iov_base = static_cast<void*>(const_cast<struct icmphdr*>(&icmp));
- iov.iov_len = sizeof(icmp);
- struct msghdr msg = {};
- msg.msg_name = static_cast<void*>(const_cast<struct sockaddr_in*>(&addr));
- msg.msg_namelen = sizeof(addr);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
- ASSERT_THAT(sendmsg(sock, &msg, 0), SyscallSucceedsWithValue(sizeof(icmp)));
-}
-
-void RawSocketICMPTest::ReceiveICMP(char* recv_buf, size_t recv_buf_len,
- size_t expected_size,
- struct sockaddr_in* src) {
- ASSERT_NO_FATAL_FAILURE(
- ReceiveICMPFrom(recv_buf, recv_buf_len, expected_size, src, s_));
-}
-
-void RawSocketICMPTest::ReceiveICMPFrom(char* recv_buf, size_t recv_buf_len,
- size_t expected_size,
- struct sockaddr_in* src, int sock) {
- struct iovec iov = {};
- iov.iov_base = recv_buf;
- iov.iov_len = recv_buf_len;
- struct msghdr msg = {};
- msg.msg_name = src;
- msg.msg_namelen = sizeof(*src);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
- // We should receive the ICMP packet plus 20 bytes of IP header.
- ASSERT_THAT(recvmsg(sock, &msg, 0),
- SyscallSucceedsWithValue(expected_size + sizeof(struct iphdr)));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/read.cc b/test/syscalls/linux/read.cc
deleted file mode 100644
index 7756af24d..000000000
--- a/test/syscalls/linux/read.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReadTest : public ::testing::Test {
- void SetUp() override {
- name_ = NewTempAbsPath();
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_CREAT, 0644), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
- }
-
- void TearDown() override { unlink(name_.c_str()); }
-
- public:
- std::string name_;
-};
-
-TEST_F(ReadTest, ZeroBuffer) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_RDWR), SyscallSucceeds());
-
- char msg[] = "hello world";
- EXPECT_THAT(PwriteFd(fd, msg, strlen(msg), 0),
- SyscallSucceedsWithValue(strlen(msg)));
-
- char buf[10];
- EXPECT_THAT(ReadFd(fd, buf, 0), SyscallSucceedsWithValue(0));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(ReadTest, EmptyFileReturnsZeroAtEOF) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_RDWR), SyscallSucceeds());
-
- char eof_buf[10];
- EXPECT_THAT(ReadFd(fd, eof_buf, 10), SyscallSucceedsWithValue(0));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(ReadTest, EofAfterRead) {
- int fd;
- ASSERT_THAT(fd = open(name_.c_str(), O_RDWR), SyscallSucceeds());
-
- // Write some bytes to be read.
- constexpr char kMessage[] = "hello world";
- EXPECT_THAT(PwriteFd(fd, kMessage, sizeof(kMessage), 0),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-
- // Read all of the bytes at once.
- char buf[sizeof(kMessage)];
- EXPECT_THAT(ReadFd(fd, buf, sizeof(kMessage)),
- SyscallSucceedsWithValue(sizeof(kMessage)));
-
- // Read again with a non-zero buffer and expect EOF.
- char eof_buf[10];
- EXPECT_THAT(ReadFd(fd, eof_buf, 10), SyscallSucceedsWithValue(0));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(ReadTest, DevNullReturnsEof) {
- int fd;
- ASSERT_THAT(fd = open("/dev/null", O_RDONLY), SyscallSucceeds());
- std::vector<char> buf(1);
- EXPECT_THAT(ReadFd(fd, buf.data(), 1), SyscallSucceedsWithValue(0));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-const int kReadSize = 128 * 1024;
-
-// Do not allow random save as it could lead to partial reads.
-TEST_F(ReadTest, CanReadFullyFromDevZero) {
- int fd;
- ASSERT_THAT(fd = open("/dev/zero", O_RDONLY), SyscallSucceeds());
-
- std::vector<char> buf(kReadSize, 1);
- EXPECT_THAT(ReadFd(fd, buf.data(), kReadSize),
- SyscallSucceedsWithValue(kReadSize));
- EXPECT_THAT(close(fd), SyscallSucceeds());
- EXPECT_EQ(std::vector<char>(kReadSize, 0), buf);
-}
-
-TEST_F(ReadTest, ReadDirectoryFails) {
- const FileDescriptor file =
- ASSERT_NO_ERRNO_AND_VALUE(Open(GetAbsoluteTestTmpdir(), O_RDONLY));
- std::vector<char> buf(1);
- EXPECT_THAT(ReadFd(file.get(), buf.data(), 1), SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(ReadTest, ReadWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
- std::vector<char> buf(1);
- EXPECT_THAT(ReadFd(fd.get(), buf.data(), 1), SyscallFailsWithErrno(EBADF));
-}
-
-// Test that partial writes that hit SIGSEGV are correctly handled and return
-// partial write.
-TEST_F(ReadTest, PartialReadSIGSEGV) {
- // Allocate 2 pages and remove permission from the second.
- const size_t size = 2 * kPageSize;
- void* addr =
- mmap(0, size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
- ASSERT_NE(addr, MAP_FAILED);
- auto cleanup = Cleanup(
- [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(name_.c_str(), O_RDWR, 0666));
- for (size_t i = 0; i < 2; i++) {
- EXPECT_THAT(pwrite(fd.get(), addr, size, 0),
- SyscallSucceedsWithValue(size));
- }
-
- void* badAddr = reinterpret_cast<char*>(addr) + kPageSize;
- ASSERT_THAT(mprotect(badAddr, kPageSize, PROT_NONE), SyscallSucceeds());
-
- // Attempt to read to both pages. Create a non-contiguous iovec pair to
- // ensure operation is done in 2 steps.
- struct iovec iov[] = {
- {
- .iov_base = addr,
- .iov_len = kPageSize,
- },
- {
- .iov_base = addr,
- .iov_len = size,
- },
- };
- EXPECT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());
- EXPECT_THAT(readv(fd.get(), iov, ABSL_ARRAYSIZE(iov)),
- SyscallSucceedsWithValue(size));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/readahead.cc b/test/syscalls/linux/readahead.cc
deleted file mode 100644
index 71073bb3c..000000000
--- a/test/syscalls/linux/readahead.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ReadaheadTest, InvalidFD) {
- EXPECT_THAT(readahead(-1, 1, 1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ReadaheadTest, UnsupportedFile) {
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, 0));
- ASSERT_THAT(readahead(sock.get(), 1, 1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(ReadaheadTest, InvalidOffset) {
- // This test is not valid for some Linux Kernels.
- SKIP_IF(!IsRunningOnGvisor());
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- EXPECT_THAT(readahead(fd.get(), -1, 1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(ReadaheadTest, ValidOffset) {
- constexpr char kData[] = "123";
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // N.B. The implementation of readahead is filesystem-specific, and a file
- // backed by ram may return EINVAL because there is nothing to be read.
- EXPECT_THAT(readahead(fd.get(), 1, 1), AnyOf(SyscallSucceedsWithValue(0),
- SyscallFailsWithErrno(EINVAL)));
-}
-
-TEST(ReadaheadTest, PastEnd) {
- constexpr char kData[] = "123";
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- // See above.
- EXPECT_THAT(readahead(fd.get(), 2, 2), AnyOf(SyscallSucceedsWithValue(0),
- SyscallFailsWithErrno(EINVAL)));
-}
-
-TEST(ReadaheadTest, CrossesEnd) {
- constexpr char kData[] = "123";
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- // See above.
- EXPECT_THAT(readahead(fd.get(), 4, 2), AnyOf(SyscallSucceedsWithValue(0),
- SyscallFailsWithErrno(EINVAL)));
-}
-
-TEST(ReadaheadTest, WriteOnly) {
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_WRONLY));
- EXPECT_THAT(readahead(fd.get(), 0, 1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(ReadaheadTest, InvalidSize) {
- // This test is not valid on some Linux kernels.
- SKIP_IF(!IsRunningOnGvisor());
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- EXPECT_THAT(readahead(fd.get(), 0, -1), SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/readv.cc b/test/syscalls/linux/readv.cc
deleted file mode 100644
index a50d98d21..000000000
--- a/test/syscalls/linux/readv.cc
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/syscalls/linux/readv_common.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReadvTest : public FileTest {
- void SetUp() override {
- FileTest::SetUp();
-
- ASSERT_THAT(write(test_file_fd_.get(), kReadvTestData, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- ASSERT_THAT(lseek(test_file_fd_.get(), 0, SEEK_SET),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(write(test_pipe_[1], kReadvTestData, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- }
-};
-
-TEST_F(ReadvTest, ReadOneBufferPerByte_File) {
- ReadOneBufferPerByte(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadOneBufferPerByte_Pipe) {
- ReadOneBufferPerByte(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, ReadOneHalfAtATime_File) {
- ReadOneHalfAtATime(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadOneHalfAtATime_Pipe) {
- ReadOneHalfAtATime(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, ReadAllOneBuffer_File) {
- ReadAllOneBuffer(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadAllOneBuffer_Pipe) { ReadAllOneBuffer(test_pipe_[0]); }
-
-TEST_F(ReadvTest, ReadAllOneLargeBuffer_File) {
- ReadAllOneLargeBuffer(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadAllOneLargeBuffer_Pipe) {
- ReadAllOneLargeBuffer(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, ReadBuffersOverlapping_File) {
- ReadBuffersOverlapping(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadBuffersOverlapping_Pipe) {
- ReadBuffersOverlapping(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, ReadBuffersDiscontinuous_File) {
- ReadBuffersDiscontinuous(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadBuffersDiscontinuous_Pipe) {
- ReadBuffersDiscontinuous(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, ReadIovecsCompletelyFilled_File) {
- ReadIovecsCompletelyFilled(test_file_fd_.get());
-}
-
-TEST_F(ReadvTest, ReadIovecsCompletelyFilled_Pipe) {
- ReadIovecsCompletelyFilled(test_pipe_[0]);
-}
-
-TEST_F(ReadvTest, BadFileDescriptor) {
- char buffer[1024];
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = 1024;
-
- ASSERT_THAT(readv(-1, iov, 1024), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(ReadvTest, BadIovecsPointer_File) {
- ASSERT_THAT(readv(test_file_fd_.get(), nullptr, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvTest, BadIovecsPointer_Pipe) {
- ASSERT_THAT(readv(test_pipe_[0], nullptr, 1), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvTest, BadIovecBase_File) {
- struct iovec iov[1];
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 1024;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvTest, BadIovecBase_Pipe) {
- struct iovec iov[1];
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 1024;
- ASSERT_THAT(readv(test_pipe_[0], iov, 1), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvTest, ZeroIovecs_File) {
- struct iovec iov[1];
- iov[0].iov_base = 0;
- iov[0].iov_len = 0;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 1), SyscallSucceeds());
-}
-
-TEST_F(ReadvTest, ZeroIovecs_Pipe) {
- struct iovec iov[1];
- iov[0].iov_base = 0;
- iov[0].iov_len = 0;
- ASSERT_THAT(readv(test_pipe_[0], iov, 1), SyscallSucceeds());
-}
-
-TEST_F(ReadvTest, NotReadable_File) {
- char buffer[1024];
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = 1024;
-
- std::string wronly_file = NewTempAbsPath();
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(wronly_file, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR));
- ASSERT_THAT(readv(fd.get(), iov, 1), SyscallFailsWithErrno(EBADF));
- fd.reset(); // Close before unlinking.
- ASSERT_THAT(unlink(wronly_file.c_str()), SyscallSucceeds());
-}
-
-TEST_F(ReadvTest, NotReadable_Pipe) {
- char buffer[1024];
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = 1024;
- ASSERT_THAT(readv(test_pipe_[1], iov, 1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(ReadvTest, DirNotReadable) {
- char buffer[1024];
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = 1024;
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(GetAbsoluteTestTmpdir(), O_RDONLY));
- ASSERT_THAT(readv(fd.get(), iov, 1), SyscallFailsWithErrno(EISDIR));
-}
-
-TEST_F(ReadvTest, OffsetIncremented) {
- char* buffer = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = kReadvTestDataSize;
-
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 1),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- ASSERT_THAT(lseek(test_file_fd_.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(kReadvTestDataSize));
-
- free(buffer);
-}
-
-TEST_F(ReadvTest, EndOfFile) {
- char* buffer = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 1),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- free(buffer);
-
- buffer = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- iov[0].iov_base = buffer;
- iov[0].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 1), SyscallSucceedsWithValue(0));
- free(buffer);
-}
-
-TEST_F(ReadvTest, WouldBlock_Pipe) {
- struct iovec iov[1];
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- iov[0].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_pipe_[0], iov, 1),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- free(iov[0].iov_base);
-
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- ASSERT_THAT(readv(test_pipe_[0], iov, 1), SyscallFailsWithErrno(EAGAIN));
- free(iov[0].iov_base);
-}
-
-TEST_F(ReadvTest, ZeroBuffer) {
- char buf[10];
- struct iovec iov[1];
- iov[0].iov_base = buf;
- iov[0].iov_len = 0;
- ASSERT_THAT(readv(test_pipe_[0], iov, 1), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(ReadvTest, NullIovecInNonemptyArray) {
- std::vector<char> buf(kReadvTestDataSize);
- struct iovec iov[2];
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 0;
- iov[1].iov_base = buf.data();
- iov[1].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 2),
- SyscallSucceedsWithValue(kReadvTestDataSize));
-}
-
-TEST_F(ReadvTest, IovecOutsideTaskAddressRangeInNonemptyArray) {
- std::vector<char> buf(kReadvTestDataSize);
- struct iovec iov[2];
- iov[0].iov_base = reinterpret_cast<void*>(~static_cast<uintptr_t>(0));
- iov[0].iov_len = 0;
- iov[1].iov_base = buf.data();
- iov[1].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_file_fd_.get(), iov, 2),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvTest, ReadvWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- char buffer[1024];
- struct iovec iov[1];
- iov[0].iov_base = buffer;
- iov[0].iov_len = 1024;
-
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_PATH));
-
- ASSERT_THAT(readv(fd.get(), iov, 1), SyscallFailsWithErrno(EBADF));
-}
-
-// This test depends on the maximum extent of a single readv() syscall, so
-// we can't tolerate interruption from saving.
-TEST(ReadvTestNoFixture, TruncatedAtMax) {
- // Ensure that we won't be interrupted by ITIMER_PROF. This is particularly
- // important in environments where automated profiling tools may start
- // ITIMER_PROF automatically.
- struct itimerval itv = {};
- auto const cleanup_itimer =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_PROF, itv));
-
- // From Linux's include/linux/fs.h.
- size_t const MAX_RW_COUNT = INT_MAX & ~(kPageSize - 1);
-
- // Create an iovec array with 3 segments pointing to consecutive parts of a
- // buffer. The first covers all but the last three pages, and should be
- // written to in its entirety. The second covers the last page before
- // MAX_RW_COUNT and the first page after; only the first page should be
- // written to. The third covers the last page of the buffer, and should be
- // skipped entirely.
- size_t const kBufferSize = MAX_RW_COUNT + 2 * kPageSize;
- size_t const kFirstOffset = MAX_RW_COUNT - kPageSize;
- size_t const kSecondOffset = MAX_RW_COUNT + kPageSize;
- // The buffer is too big to fit on the stack.
- std::vector<char> buf(kBufferSize);
- struct iovec iov[3];
- iov[0].iov_base = buf.data();
- iov[0].iov_len = kFirstOffset;
- iov[1].iov_base = buf.data() + kFirstOffset;
- iov[1].iov_len = kSecondOffset - kFirstOffset;
- iov[2].iov_base = buf.data() + kSecondOffset;
- iov[2].iov_len = kBufferSize - kSecondOffset;
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_RDONLY));
- EXPECT_THAT(readv(fd.get(), iov, 3), SyscallSucceedsWithValue(MAX_RW_COUNT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/readv_common.cc b/test/syscalls/linux/readv_common.cc
deleted file mode 100644
index 2694dc64f..000000000
--- a/test/syscalls/linux/readv_common.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// MatchesStringLength checks that a tuple argument of (struct iovec *, int)
-// corresponding to an iovec array and its length, contains data that matches
-// the string length strlen.
-MATCHER_P(MatchesStringLength, strlen, "") {
- struct iovec* iovs = arg.first;
- int niov = arg.second;
- int offset = 0;
- for (int i = 0; i < niov; i++) {
- offset += iovs[i].iov_len;
- }
- if (offset != static_cast<int>(strlen)) {
- *result_listener << offset;
- return false;
- }
- return true;
-}
-
-// MatchesStringValue checks that a tuple argument of (struct iovec *, int)
-// corresponding to an iovec array and its length, contains data that matches
-// the string value str.
-MATCHER_P(MatchesStringValue, str, "") {
- struct iovec* iovs = arg.first;
- int len = strlen(str);
- int niov = arg.second;
- int offset = 0;
- for (int i = 0; i < niov; i++) {
- struct iovec iov = iovs[i];
- if (len < offset) {
- *result_listener << "strlen " << len << " < offset " << offset;
- return false;
- }
- if (strncmp(static_cast<char*>(iov.iov_base), &str[offset], iov.iov_len)) {
- absl::string_view iovec_string(static_cast<char*>(iov.iov_base),
- iov.iov_len);
- *result_listener << iovec_string << " @offset " << offset;
- return false;
- }
- offset += iov.iov_len;
- }
- return true;
-}
-
-extern const char kReadvTestData[] =
- "127.0.0.1 localhost"
- ""
- "# The following lines are desirable for IPv6 capable hosts"
- "::1 ip6-localhost ip6-loopback"
- "fe00::0 ip6-localnet"
- "ff00::0 ip6-mcastprefix"
- "ff02::1 ip6-allnodes"
- "ff02::2 ip6-allrouters"
- "ff02::3 ip6-allhosts"
- "192.168.1.100 a"
- "93.184.216.34 foo.bar.example.com xcpu";
-extern const size_t kReadvTestDataSize = sizeof(kReadvTestData);
-
-static void ReadAllOneProvidedBuffer(int fd, std::vector<char>* buffer) {
- struct iovec iovs[1];
- iovs[0].iov_base = buffer->data();
- iovs[0].iov_len = kReadvTestDataSize;
-
- ASSERT_THAT(readv(fd, iovs, 1), SyscallSucceedsWithValue(kReadvTestDataSize));
-
- std::pair<struct iovec*, int> iovec_desc(iovs, 1);
- EXPECT_THAT(iovec_desc, MatchesStringLength(kReadvTestDataSize));
- EXPECT_THAT(iovec_desc, MatchesStringValue(kReadvTestData));
-}
-
-void ReadAllOneBuffer(int fd) {
- std::vector<char> buffer(kReadvTestDataSize);
- ReadAllOneProvidedBuffer(fd, &buffer);
-}
-
-void ReadAllOneLargeBuffer(int fd) {
- std::vector<char> buffer(10 * kReadvTestDataSize);
- ReadAllOneProvidedBuffer(fd, &buffer);
-}
-
-void ReadOneHalfAtATime(int fd) {
- int len0 = kReadvTestDataSize / 2;
- int len1 = kReadvTestDataSize - len0;
- std::vector<char> buffer0(len0);
- std::vector<char> buffer1(len1);
-
- struct iovec iovs[2];
- iovs[0].iov_base = buffer0.data();
- iovs[0].iov_len = len0;
- iovs[1].iov_base = buffer1.data();
- iovs[1].iov_len = len1;
-
- ASSERT_THAT(readv(fd, iovs, 2), SyscallSucceedsWithValue(kReadvTestDataSize));
-
- std::pair<struct iovec*, int> iovec_desc(iovs, 2);
- EXPECT_THAT(iovec_desc, MatchesStringLength(kReadvTestDataSize));
- EXPECT_THAT(iovec_desc, MatchesStringValue(kReadvTestData));
-}
-
-void ReadOneBufferPerByte(int fd) {
- std::vector<char> buffer(kReadvTestDataSize);
- std::vector<struct iovec> iovs(kReadvTestDataSize);
- char* buffer_ptr = buffer.data();
- struct iovec* iovs_ptr = iovs.data();
-
- for (int i = 0; i < static_cast<int>(kReadvTestDataSize); i++) {
- struct iovec iov = {
- .iov_base = &buffer_ptr[i],
- .iov_len = 1,
- };
- iovs_ptr[i] = iov;
- }
-
- ASSERT_THAT(readv(fd, iovs_ptr, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
-
- std::pair<struct iovec*, int> iovec_desc(iovs.data(), kReadvTestDataSize);
- EXPECT_THAT(iovec_desc, MatchesStringLength(kReadvTestDataSize));
- EXPECT_THAT(iovec_desc, MatchesStringValue(kReadvTestData));
-}
-
-void ReadBuffersOverlapping(int fd) {
- // overlap the first overlap_bytes.
- int overlap_bytes = 8;
- std::vector<char> buffer(kReadvTestDataSize);
-
- // overlapping causes us to get more data.
- int expected_size = kReadvTestDataSize + overlap_bytes;
- std::vector<char> expected(expected_size);
- char* expected_ptr = expected.data();
- memcpy(expected_ptr, &kReadvTestData[overlap_bytes], overlap_bytes);
- memcpy(&expected_ptr[overlap_bytes], &kReadvTestData[overlap_bytes],
- kReadvTestDataSize - overlap_bytes);
-
- struct iovec iovs[2];
- iovs[0].iov_base = buffer.data();
- iovs[0].iov_len = overlap_bytes;
- iovs[1].iov_base = buffer.data();
- iovs[1].iov_len = kReadvTestDataSize;
-
- ASSERT_THAT(readv(fd, iovs, 2), SyscallSucceedsWithValue(kReadvTestDataSize));
-
- std::pair<struct iovec*, int> iovec_desc(iovs, 2);
- EXPECT_THAT(iovec_desc, MatchesStringLength(expected_size));
- EXPECT_THAT(iovec_desc, MatchesStringValue(expected_ptr));
-}
-
-void ReadBuffersDiscontinuous(int fd) {
- // Each iov is 1 byte separated by 1 byte.
- std::vector<char> buffer(kReadvTestDataSize * 2);
- std::vector<struct iovec> iovs(kReadvTestDataSize);
-
- char* buffer_ptr = buffer.data();
- struct iovec* iovs_ptr = iovs.data();
-
- for (int i = 0; i < static_cast<int>(kReadvTestDataSize); i++) {
- struct iovec iov = {
- .iov_base = &buffer_ptr[i * 2],
- .iov_len = 1,
- };
- iovs_ptr[i] = iov;
- }
-
- ASSERT_THAT(readv(fd, iovs_ptr, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
-
- std::pair<struct iovec*, int> iovec_desc(iovs.data(), kReadvTestDataSize);
- EXPECT_THAT(iovec_desc, MatchesStringLength(kReadvTestDataSize));
- EXPECT_THAT(iovec_desc, MatchesStringValue(kReadvTestData));
-}
-
-void ReadIovecsCompletelyFilled(int fd) {
- int half = kReadvTestDataSize / 2;
- std::vector<char> buffer(kReadvTestDataSize);
- char* buffer_ptr = buffer.data();
- memset(buffer.data(), '\0', kReadvTestDataSize);
-
- struct iovec iovs[2];
- iovs[0].iov_base = buffer.data();
- iovs[0].iov_len = half;
- iovs[1].iov_base = &buffer_ptr[half];
- iovs[1].iov_len = half;
-
- ASSERT_THAT(readv(fd, iovs, 2), SyscallSucceedsWithValue(half * 2));
-
- std::pair<struct iovec*, int> iovec_desc(iovs, 2);
- EXPECT_THAT(iovec_desc, MatchesStringLength(half * 2));
- EXPECT_THAT(iovec_desc, MatchesStringValue(kReadvTestData));
-
- char* str = static_cast<char*>(iovs[0].iov_base);
- str[iovs[0].iov_len - 1] = '\0';
- ASSERT_EQ(half - 1, strlen(str));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/readv_common.h b/test/syscalls/linux/readv_common.h
deleted file mode 100644
index 2fa40c35f..000000000
--- a/test/syscalls/linux/readv_common.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_READV_COMMON_H_
-#define GVISOR_TEST_SYSCALLS_READV_COMMON_H_
-
-#include <stddef.h>
-
-namespace gvisor {
-namespace testing {
-
-// A NUL-terminated string containing the data used by tests using the following
-// test helpers.
-extern const char kReadvTestData[];
-
-// The size of kReadvTestData, including the terminating NUL.
-extern const size_t kReadvTestDataSize;
-
-// ReadAllOneBuffer asserts that it can read kReadvTestData from an fd using
-// exactly one iovec.
-void ReadAllOneBuffer(int fd);
-
-// ReadAllOneLargeBuffer asserts that it can read kReadvTestData from an fd
-// using exactly one iovec containing an overly large buffer.
-void ReadAllOneLargeBuffer(int fd);
-
-// ReadOneHalfAtATime asserts that it can read test_data_from an fd using
-// exactly two iovecs that are roughly equivalent in size.
-void ReadOneHalfAtATime(int fd);
-
-// ReadOneBufferPerByte asserts that it can read kReadvTestData from an fd
-// using one iovec per byte.
-void ReadOneBufferPerByte(int fd);
-
-// ReadBuffersOverlapping asserts that it can read kReadvTestData from an fd
-// where two iovecs are overlapping.
-void ReadBuffersOverlapping(int fd);
-
-// ReadBuffersDiscontinuous asserts that it can read kReadvTestData from an fd
-// where each iovec is discontinuous from the next by 1 byte.
-void ReadBuffersDiscontinuous(int fd);
-
-// ReadIovecsCompletelyFilled asserts that the previous iovec is completely
-// filled before moving onto the next.
-void ReadIovecsCompletelyFilled(int fd);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_READV_COMMON_H_
diff --git a/test/syscalls/linux/readv_socket.cc b/test/syscalls/linux/readv_socket.cc
deleted file mode 100644
index dd6fb7008..000000000
--- a/test/syscalls/linux/readv_socket.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/readv_common.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class ReadvSocketTest : public ::testing::Test {
- public:
- void SetUp() override {
- test_unix_stream_socket_[0] = -1;
- test_unix_stream_socket_[1] = -1;
- test_unix_dgram_socket_[0] = -1;
- test_unix_dgram_socket_[1] = -1;
- test_unix_seqpacket_socket_[0] = -1;
- test_unix_seqpacket_socket_[1] = -1;
-
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, test_unix_stream_socket_),
- SyscallSucceeds());
- ASSERT_THAT(fcntl(test_unix_stream_socket_[0], F_SETFL, O_NONBLOCK),
- SyscallSucceeds());
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_DGRAM, 0, test_unix_dgram_socket_),
- SyscallSucceeds());
- ASSERT_THAT(fcntl(test_unix_dgram_socket_[0], F_SETFL, O_NONBLOCK),
- SyscallSucceeds());
- ASSERT_THAT(
- socketpair(AF_UNIX, SOCK_SEQPACKET, 0, test_unix_seqpacket_socket_),
- SyscallSucceeds());
- ASSERT_THAT(fcntl(test_unix_seqpacket_socket_[0], F_SETFL, O_NONBLOCK),
- SyscallSucceeds());
-
- ASSERT_THAT(
- write(test_unix_stream_socket_[1], kReadvTestData, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- ASSERT_THAT(
- write(test_unix_dgram_socket_[1], kReadvTestData, kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- ASSERT_THAT(write(test_unix_seqpacket_socket_[1], kReadvTestData,
- kReadvTestDataSize),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- }
-
- void TearDown() override {
- close(test_unix_stream_socket_[0]);
- close(test_unix_stream_socket_[1]);
-
- close(test_unix_dgram_socket_[0]);
- close(test_unix_dgram_socket_[1]);
-
- close(test_unix_seqpacket_socket_[0]);
- close(test_unix_seqpacket_socket_[1]);
- }
-
- int test_unix_stream_socket_[2];
- int test_unix_dgram_socket_[2];
- int test_unix_seqpacket_socket_[2];
-};
-
-TEST_F(ReadvSocketTest, ReadOneBufferPerByte_StreamSocket) {
- ReadOneBufferPerByte(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadOneBufferPerByte_DgramSocket) {
- ReadOneBufferPerByte(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadOneBufferPerByte_SeqPacketSocket) {
- ReadOneBufferPerByte(test_unix_seqpacket_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadOneHalfAtATime_StreamSocket) {
- ReadOneHalfAtATime(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadOneHalfAtATime_DgramSocket) {
- ReadOneHalfAtATime(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadAllOneBuffer_StreamSocket) {
- ReadAllOneBuffer(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadAllOneBuffer_DgramSocket) {
- ReadAllOneBuffer(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadAllOneLargeBuffer_StreamSocket) {
- ReadAllOneLargeBuffer(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadAllOneLargeBuffer_DgramSocket) {
- ReadAllOneLargeBuffer(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadBuffersOverlapping_StreamSocket) {
- ReadBuffersOverlapping(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadBuffersOverlapping_DgramSocket) {
- ReadBuffersOverlapping(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadBuffersDiscontinuous_StreamSocket) {
- ReadBuffersDiscontinuous(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadBuffersDiscontinuous_DgramSocket) {
- ReadBuffersDiscontinuous(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadIovecsCompletelyFilled_StreamSocket) {
- ReadIovecsCompletelyFilled(test_unix_stream_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, ReadIovecsCompletelyFilled_DgramSocket) {
- ReadIovecsCompletelyFilled(test_unix_dgram_socket_[0]);
-}
-
-TEST_F(ReadvSocketTest, BadIovecsPointer_StreamSocket) {
- ASSERT_THAT(readv(test_unix_stream_socket_[0], nullptr, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvSocketTest, BadIovecsPointer_DgramSocket) {
- ASSERT_THAT(readv(test_unix_dgram_socket_[0], nullptr, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvSocketTest, BadIovecBase_StreamSocket) {
- struct iovec iov[1];
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 1024;
- ASSERT_THAT(readv(test_unix_stream_socket_[0], iov, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvSocketTest, BadIovecBase_DgramSocket) {
- struct iovec iov[1];
- iov[0].iov_base = nullptr;
- iov[0].iov_len = 1024;
- ASSERT_THAT(readv(test_unix_dgram_socket_[0], iov, 1),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(ReadvSocketTest, ZeroIovecs_StreamSocket) {
- struct iovec iov[1];
- iov[0].iov_base = 0;
- iov[0].iov_len = 0;
- ASSERT_THAT(readv(test_unix_stream_socket_[0], iov, 1), SyscallSucceeds());
-}
-
-TEST_F(ReadvSocketTest, ZeroIovecs_DgramSocket) {
- struct iovec iov[1];
- iov[0].iov_base = 0;
- iov[0].iov_len = 0;
- ASSERT_THAT(readv(test_unix_dgram_socket_[0], iov, 1), SyscallSucceeds());
-}
-
-TEST_F(ReadvSocketTest, WouldBlock_StreamSocket) {
- struct iovec iov[1];
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- iov[0].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_unix_stream_socket_[0], iov, 1),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- free(iov[0].iov_base);
-
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- ASSERT_THAT(readv(test_unix_stream_socket_[0], iov, 1),
- SyscallFailsWithErrno(EAGAIN));
- free(iov[0].iov_base);
-}
-
-TEST_F(ReadvSocketTest, WouldBlock_DgramSocket) {
- struct iovec iov[1];
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- iov[0].iov_len = kReadvTestDataSize;
- ASSERT_THAT(readv(test_unix_dgram_socket_[0], iov, 1),
- SyscallSucceedsWithValue(kReadvTestDataSize));
- free(iov[0].iov_base);
-
- iov[0].iov_base = reinterpret_cast<char*>(malloc(kReadvTestDataSize));
- ASSERT_THAT(readv(test_unix_dgram_socket_[0], iov, 1),
- SyscallFailsWithErrno(EAGAIN));
- free(iov[0].iov_base);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/rename.cc b/test/syscalls/linux/rename.cc
deleted file mode 100644
index 561eed99f..000000000
--- a/test/syscalls/linux/rename.cc
+++ /dev/null
@@ -1,500 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdio.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-using ::testing::AnyOf;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(RenameTest, RootToAnything) {
- ASSERT_THAT(rename("/", "/bin"), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(RenameTest, AnythingToRoot) {
- ASSERT_THAT(rename("/bin", "/"), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(RenameTest, SourceIsAncestorOfTarget) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto subdir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- ASSERT_THAT(rename(dir.path().c_str(), subdir.path().c_str()),
- SyscallFailsWithErrno(EINVAL));
-
- // Try an even deeper directory.
- auto deep_subdir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(subdir.path()));
- ASSERT_THAT(rename(dir.path().c_str(), deep_subdir.path().c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(RenameTest, TargetIsAncestorOfSource) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto subdir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- ASSERT_THAT(rename(subdir.path().c_str(), dir.path().c_str()),
- SyscallFailsWithErrno(ENOTEMPTY));
-
- // Try an even deeper directory.
- auto deep_subdir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(subdir.path()));
- ASSERT_THAT(rename(deep_subdir.path().c_str(), dir.path().c_str()),
- SyscallFailsWithErrno(ENOTEMPTY));
-}
-
-TEST(RenameTest, FileToSelf) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- EXPECT_THAT(rename(f.path().c_str(), f.path().c_str()), SyscallSucceeds());
-}
-
-TEST(RenameTest, DirectoryToSelf) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(rename(f.path().c_str(), f.path().c_str()), SyscallSucceeds());
-}
-
-TEST(RenameTest, FileToSameDirectory) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- std::string const newpath = NewTempAbsPath();
- ASSERT_THAT(rename(f.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = f.release();
- f.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, DirectoryToSameDirectory) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string const newpath = NewTempAbsPath();
- ASSERT_THAT(rename(dir.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = dir.release();
- dir.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, FileToParentDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir2.path()));
- std::string const newpath = NewTempAbsPathInDir(dir1.path());
- ASSERT_THAT(rename(f.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = f.release();
- f.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, DirectoryToParentDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- auto dir3 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir2.path()));
- EXPECT_THAT(IsDirectory(dir3.path()), IsPosixErrorOkAndHolds(true));
- std::string const newpath = NewTempAbsPathInDir(dir1.path());
- ASSERT_THAT(rename(dir3.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = dir3.release();
- dir3.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
- EXPECT_THAT(IsDirectory(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, FileToChildDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- std::string const newpath = NewTempAbsPathInDir(dir2.path());
- ASSERT_THAT(rename(f.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = f.release();
- f.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, DirectoryToChildDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- auto dir3 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- std::string const newpath = NewTempAbsPathInDir(dir2.path());
- ASSERT_THAT(rename(dir3.path().c_str(), newpath.c_str()), SyscallSucceeds());
- std::string const oldpath = dir3.release();
- dir3.reset(newpath);
- EXPECT_THAT(Exists(oldpath), IsPosixErrorOkAndHolds(false));
- EXPECT_THAT(Exists(newpath), IsPosixErrorOkAndHolds(true));
- EXPECT_THAT(IsDirectory(newpath), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, DirectoryToOwnChildDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir1.path()));
- std::string const newpath = NewTempAbsPathInDir(dir2.path());
- ASSERT_THAT(rename(dir1.path().c_str(), newpath.c_str()),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(RenameTest, FileOverwritesFile) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- dir.path(), "first", TempPath::kDefaultFileMode));
- auto f2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- dir.path(), "second", TempPath::kDefaultFileMode));
- ASSERT_THAT(rename(f1.path().c_str(), f2.path().c_str()), SyscallSucceeds());
- EXPECT_THAT(Exists(f1.path()), IsPosixErrorOkAndHolds(false));
-
- f1.release();
- std::string f2_contents;
- ASSERT_NO_ERRNO(GetContents(f2.path(), &f2_contents));
- EXPECT_EQ("first", f2_contents);
-}
-
-TEST(RenameTest, DirectoryOverwritesDirectoryLinkCount) {
- // Directory link counts are synthetic on overlay filesystems.
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir())));
-
- auto parent1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(Links(parent1.path()), IsPosixErrorOkAndHolds(2));
-
- auto parent2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(Links(parent2.path()), IsPosixErrorOkAndHolds(2));
-
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent1.path()));
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent2.path()));
-
- EXPECT_THAT(Links(parent1.path()), IsPosixErrorOkAndHolds(3));
- EXPECT_THAT(Links(parent2.path()), IsPosixErrorOkAndHolds(3));
-
- ASSERT_THAT(rename(dir1.path().c_str(), dir2.path().c_str()),
- SyscallSucceeds());
-
- EXPECT_THAT(Links(parent1.path()), IsPosixErrorOkAndHolds(2));
- EXPECT_THAT(Links(parent2.path()), IsPosixErrorOkAndHolds(3));
-}
-
-TEST(RenameTest, FileDoesNotExist) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string source = JoinPath(dir.path(), "source");
- const std::string dest = JoinPath(dir.path(), "dest");
- ASSERT_THAT(rename(source.c_str(), dest.c_str()),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(RenameTest, FileDoesNotOverwriteDirectory) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(rename(f.path().c_str(), dir.path().c_str()),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST(RenameTest, DirectoryDoesNotOverwriteFile) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(rename(dir.path().c_str(), f.path().c_str()),
- SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(RenameTest, DirectoryOverwritesEmptyDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(rename(dir1.path().c_str(), dir2.path().c_str()),
- SyscallSucceeds());
- EXPECT_THAT(Exists(dir1.path()), IsPosixErrorOkAndHolds(false));
- dir1.release();
- EXPECT_THAT(Exists(JoinPath(dir2.path(), Basename(f.path()))),
- IsPosixErrorOkAndHolds(true));
- f.release();
-}
-
-TEST(RenameTest, FailsWithDots) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto dir1_dot = absl::StrCat(dir1.path(), "/.");
- auto dir2_dot = absl::StrCat(dir2.path(), "/.");
- auto dir1_dot_dot = absl::StrCat(dir1.path(), "/..");
- auto dir2_dot_dot = absl::StrCat(dir2.path(), "/..");
-
- // Try with dot paths in the first argument
- EXPECT_THAT(rename(dir1_dot.c_str(), dir2.path().c_str()),
- SyscallFailsWithErrno(EBUSY));
- EXPECT_THAT(rename(dir1_dot_dot.c_str(), dir2.path().c_str()),
- SyscallFailsWithErrno(EBUSY));
-
- // Try with dot paths in the second argument
- EXPECT_THAT(rename(dir1.path().c_str(), dir2_dot.c_str()),
- SyscallFailsWithErrno(EBUSY));
- EXPECT_THAT(rename(dir1.path().c_str(), dir2_dot_dot.c_str()),
- SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(RenameTest, DirectoryDoesNotOverwriteNonemptyDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir2.path()));
- ASSERT_THAT(rename(dir1.path().c_str(), dir2.path().c_str()),
- SyscallFailsWithErrno(ENOTEMPTY));
-}
-
-TEST(RenameTest, FailsWhenOldParentNotWritable) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- // dir1 is not writable.
- ASSERT_THAT(chmod(dir1.path().c_str(), 0555), SyscallSucceeds());
-
- std::string const newpath = NewTempAbsPathInDir(dir2.path());
- EXPECT_THAT(rename(f1.path().c_str(), newpath.c_str()),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(RenameTest, FailsWhenNewParentNotWritable) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- // dir2 is not writable.
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0555));
-
- std::string const newpath = NewTempAbsPathInDir(dir2.path());
- EXPECT_THAT(rename(f1.path().c_str(), newpath.c_str()),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Equivalent to FailsWhenNewParentNotWritable, but with a destination file
-// to overwrite.
-TEST(RenameTest, OverwriteFailsWhenNewParentNotWritable) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
-
- // dir2 is not writable.
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir2.path()));
- ASSERT_THAT(chmod(dir2.path().c_str(), 0555), SyscallSucceeds());
-
- EXPECT_THAT(rename(f1.path().c_str(), f2.path().c_str()),
- SyscallFailsWithErrno(EACCES));
-}
-
-// If the parent directory of source is not accessible, rename returns EACCES
-// because the user cannot determine if source exists.
-TEST(RenameTest, FileDoesNotExistWhenNewParentNotExecutable) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- // No execute permission.
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0400));
-
- const std::string source = JoinPath(dir.path(), "source");
- const std::string dest = JoinPath(dir.path(), "dest");
- ASSERT_THAT(rename(source.c_str(), dest.c_str()),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(RenameTest, DirectoryWithOpenFdOverwritesEmptyDirectory) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir1.path()));
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Get an fd on dir1
- int fd;
- ASSERT_THAT(fd = open(dir1.path().c_str(), O_DIRECTORY), SyscallSucceeds());
- auto close_f = Cleanup([fd] {
- // Close the fd on f.
- EXPECT_THAT(close(fd), SyscallSucceeds());
- });
-
- EXPECT_THAT(rename(dir1.path().c_str(), dir2.path().c_str()),
- SyscallSucceeds());
-
- const std::string new_f_path = JoinPath(dir2.path(), Basename(f.path()));
-
- auto remove_f = Cleanup([&] {
- // Delete f in its new location.
- ASSERT_NO_ERRNO(Delete(new_f_path));
- f.release();
- });
-
- EXPECT_THAT(Exists(dir1.path()), IsPosixErrorOkAndHolds(false));
- dir1.release();
- EXPECT_THAT(Exists(new_f_path), IsPosixErrorOkAndHolds(true));
-}
-
-TEST(RenameTest, FileWithOpenFd) {
- TempPath root_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath dir1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root_dir.path()));
- TempPath dir2 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root_dir.path()));
- TempPath dir3 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root_dir.path()));
-
- // Create file in dir1.
- constexpr char kContents[] = "foo";
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- dir1.path(), kContents, TempPath::kDefaultFileMode));
-
- // Get fd on file.
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
-
- // Move f to dir2.
- const std::string path2 = NewTempAbsPathInDir(dir2.path());
- ASSERT_THAT(rename(f.path().c_str(), path2.c_str()), SyscallSucceeds());
-
- // Read f's kContents.
- char buf[sizeof(kContents)];
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(kContents), 0),
- SyscallSucceedsWithValue(sizeof(kContents) - 1));
- EXPECT_EQ(absl::string_view(buf, sizeof(buf) - 1), kContents);
-
- // Move f to dir3.
- const std::string path3 = NewTempAbsPathInDir(dir3.path());
- ASSERT_THAT(rename(path2.c_str(), path3.c_str()), SyscallSucceeds());
-
- // Read f's kContents.
- EXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(kContents), 0),
- SyscallSucceedsWithValue(sizeof(kContents) - 1));
- EXPECT_EQ(absl::string_view(buf, sizeof(buf) - 1), kContents);
-}
-
-// Tests that calling rename with file path ending with . or .. causes EBUSY.
-TEST(RenameTest, PathEndingWithDots) {
- TempPath root_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath dir1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root_dir.path()));
- TempPath dir2 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root_dir.path()));
-
- // Try to move dir1 into dir2 but mess up the paths.
- auto dir1Dot = JoinPath(dir1.path(), ".");
- auto dir2Dot = JoinPath(dir2.path(), ".");
- auto dir1DotDot = JoinPath(dir1.path(), "..");
- auto dir2DotDot = JoinPath(dir2.path(), "..");
- ASSERT_THAT(rename(dir1.path().c_str(), dir2Dot.c_str()),
- SyscallFailsWithErrno(EBUSY));
- ASSERT_THAT(rename(dir1.path().c_str(), dir2DotDot.c_str()),
- SyscallFailsWithErrno(EBUSY));
- ASSERT_THAT(rename(dir1Dot.c_str(), dir2.path().c_str()),
- SyscallFailsWithErrno(EBUSY));
- ASSERT_THAT(rename(dir1DotDot.c_str(), dir2.path().c_str()),
- SyscallFailsWithErrno(EBUSY));
-}
-
-// Calling rename with file path ending with . or .. causes EBUSY in sysfs.
-TEST(RenameTest, SysfsPathEndingWithDots) {
- // If a non-root user tries to rename inside /sys then we get EPERM.
- SKIP_IF(geteuid() != 0);
- ASSERT_THAT(rename("/sys/devices/system/cpu/online", "/sys/."),
- SyscallFailsWithErrno(EBUSY));
- ASSERT_THAT(rename("/sys/devices/system/cpu/online", "/sys/.."),
- SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(RenameTest, SysfsFileToSelf) {
- // If a non-root user tries to rename inside /sys then we get EPERM.
- SKIP_IF(geteuid() != 0);
- std::string const path = "/sys/devices/system/cpu/online";
- EXPECT_THAT(rename(path.c_str(), path.c_str()), SyscallSucceeds());
-}
-
-TEST(RenameTest, SysfsDirectoryToSelf) {
- // If a non-root user tries to rename inside /sys then we get EPERM.
- SKIP_IF(geteuid() != 0);
- std::string const path = "/sys/devices";
- EXPECT_THAT(rename(path.c_str(), path.c_str()), SyscallSucceeds());
-}
-
-#ifndef SYS_renameat2
-#if defined(__x86_64__)
-#define SYS_renameat2 316
-#elif defined(__aarch64__)
-#define SYS_renameat2 276
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_renameat2
-
-#ifndef RENAME_NOREPLACE
-#define RENAME_NOREPLACE (1 << 0)
-#endif // RENAME_NOREPLACE
-
-int renameat2(int olddirfd, const char* oldpath, int newdirfd,
- const char* newpath, unsigned int flags) {
- return syscall(SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
-}
-
-TEST(Renameat2Test, NoReplaceSuccess) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- std::string const newpath = NewTempAbsPath();
- // renameat2 may fail with ENOSYS (if the syscall is unsupported) or EINVAL
- // (if flags are unsupported), or succeed (if RENAME_NOREPLACE is operating
- // correctly).
- EXPECT_THAT(
- renameat2(AT_FDCWD, f.path().c_str(), AT_FDCWD, newpath.c_str(),
- RENAME_NOREPLACE),
- AnyOf(SyscallFailsWithErrno(AnyOf(ENOSYS, EINVAL)), SyscallSucceeds()));
-}
-
-TEST(Renameat2Test, NoReplaceExisting) {
- auto f1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto f2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- // renameat2 may fail with ENOSYS (if the syscall is unsupported), EINVAL (if
- // flags are unsupported), or EEXIST (if RENAME_NOREPLACE is operating
- // correctly).
- EXPECT_THAT(renameat2(AT_FDCWD, f1.path().c_str(), AT_FDCWD,
- f2.path().c_str(), RENAME_NOREPLACE),
- SyscallFailsWithErrno(AnyOf(ENOSYS, EINVAL, EEXIST)));
-}
-
-TEST(Renameat2Test, NoReplaceDot) {
- auto d1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto d2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- // renameat2 may fail with ENOSYS (if the syscall is unsupported), EINVAL (if
- // flags are unsupported), or EEXIST (if RENAME_NOREPLACE is operating
- // correctly).
- EXPECT_THAT(
- renameat2(AT_FDCWD, d1.path().c_str(), AT_FDCWD,
- absl::StrCat(d2.path(), "/.").c_str(), RENAME_NOREPLACE),
- SyscallFailsWithErrno(AnyOf(ENOSYS, EINVAL, EEXIST)));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/rlimits.cc b/test/syscalls/linux/rlimits.cc
deleted file mode 100644
index d31a2a880..000000000
--- a/test/syscalls/linux/rlimits.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/resource.h>
-#include <sys/time.h>
-
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(RlimitTest, SetRlimitHigher) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_RESOURCE)));
-
- struct rlimit rl = {};
- EXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());
-
- // Lower the rlimit first, as it may be equal to /proc/sys/fs/nr_open, in
- // which case even users with CAP_SYS_RESOURCE can't raise it.
- rl.rlim_cur--;
- rl.rlim_max--;
- ASSERT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());
-
- rl.rlim_max++;
- EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());
-}
-
-TEST(RlimitTest, UnprivilegedSetRlimit) {
- // Drop privileges if necessary.
- AutoCapability cap(CAP_SYS_RESOURCE, false);
-
- struct rlimit rl = {};
- rl.rlim_cur = 1000;
- rl.rlim_max = 20000;
- EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());
-
- struct rlimit rl2 = {};
- EXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl2), SyscallSucceeds());
- EXPECT_EQ(rl.rlim_cur, rl2.rlim_cur);
- EXPECT_EQ(rl.rlim_max, rl2.rlim_max);
-
- rl.rlim_max = 100000;
- EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallFailsWithErrno(EPERM));
-}
-
-TEST(RlimitTest, SetSoftRlimitAboveHard) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_RESOURCE)));
-
- struct rlimit rl = {};
- EXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());
-
- rl.rlim_cur = rl.rlim_max + 1;
- EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/rseq.cc b/test/syscalls/linux/rseq.cc
deleted file mode 100644
index 94f9154a0..000000000
--- a/test/syscalls/linux/rseq.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/rseq/test.h"
-#include "test/syscalls/linux/rseq/uapi.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-// Syscall test for rseq (restartable sequences).
-//
-// We must be very careful about how these tests are written. Each thread may
-// only have one struct rseq registration, which may be done automatically at
-// thread start (as of 2019-11-13, glibc does *not* support rseq and thus does
-// not do so, but other libraries do).
-//
-// Testing of rseq is thus done primarily in a child process with no
-// registration. This means exec'ing a nostdlib binary, as rseq registration can
-// only be cleared by execve (or knowing the old rseq address), and glibc (based
-// on the current unmerged patches) register rseq before calling main()).
-
-int RSeq(struct rseq* rseq, uint32_t rseq_len, int flags, uint32_t sig) {
- return syscall(kRseqSyscall, rseq, rseq_len, flags, sig);
-}
-
-// Returns true if this kernel supports the rseq syscall.
-PosixErrorOr<bool> RSeqSupported() {
- // We have to be careful here, there are three possible cases:
- //
- // 1. rseq is not supported -> ENOSYS
- // 2. rseq is supported and not registered -> success, but we should
- // unregister.
- // 3. rseq is supported and registered -> EINVAL (most likely).
-
- // The only validation done on new registrations is that rseq is aligned and
- // writable.
- rseq rseq = {};
- int ret = RSeq(&rseq, sizeof(rseq), 0, 0);
- if (ret == 0) {
- // Successfully registered, rseq is supported. Unregister.
- ret = RSeq(&rseq, sizeof(rseq), kRseqFlagUnregister, 0);
- if (ret != 0) {
- return PosixError(errno);
- }
- return true;
- }
-
- switch (errno) {
- case ENOSYS:
- // Not supported.
- return false;
- case EINVAL:
- // Supported, but already registered. EINVAL returned because we provided
- // a different address.
- return true;
- default:
- // Unknown error.
- return PosixError(errno);
- }
-}
-
-constexpr char kRseqBinary[] = "test/syscalls/linux/rseq/rseq";
-
-void RunChildTest(std::string test_case, int want_status) {
- std::string path = RunfilePath(kRseqBinary);
-
- pid_t child_pid = -1;
- int execve_errno = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(path, {path, test_case}, {}, &child_pid, &execve_errno));
-
- ASSERT_GT(child_pid, 0);
- ASSERT_EQ(execve_errno, 0);
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- ASSERT_THAT(status, AnyOf(Eq(want_status), Eq(128 + want_status)));
-}
-
-// Test that rseq must be aligned.
-TEST(RseqTest, Unaligned) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestUnaligned, 0);
-}
-
-// Sanity test that registration works.
-TEST(RseqTest, Register) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestRegister, 0);
-}
-
-// Registration can't be done twice.
-TEST(RseqTest, DoubleRegister) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestDoubleRegister, 0);
-}
-
-// Registration can be done again after unregister.
-TEST(RseqTest, RegisterUnregister) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestRegisterUnregister, 0);
-}
-
-// The pointer to rseq must match on register/unregister.
-TEST(RseqTest, UnregisterDifferentPtr) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestUnregisterDifferentPtr, 0);
-}
-
-// The signature must match on register/unregister.
-TEST(RseqTest, UnregisterDifferentSignature) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestUnregisterDifferentSignature, 0);
-}
-
-// The CPU ID is initialized.
-TEST(RseqTest, CPU) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestCPU, 0);
-}
-
-// Critical section is eventually aborted.
-TEST(RseqTest, Abort) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestAbort, 0);
-}
-
-// Abort may be before the critical section.
-TEST(RseqTest, AbortBefore) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestAbortBefore, 0);
-}
-
-// Signature must match.
-TEST(RseqTest, AbortSignature) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestAbortSignature, SIGSEGV);
-}
-
-// Abort must not be in the critical section.
-TEST(RseqTest, AbortPreCommit) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestAbortPreCommit, SIGSEGV);
-}
-
-// rseq.rseq_cs is cleared on abort.
-TEST(RseqTest, AbortClearsCS) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestAbortClearsCS, 0);
-}
-
-// rseq.rseq_cs is cleared on abort outside of critical section.
-TEST(RseqTest, InvalidAbortClearsCS) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported()));
-
- RunChildTest(kRseqTestInvalidAbortClearsCS, 0);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/rseq/BUILD b/test/syscalls/linux/rseq/BUILD
deleted file mode 100644
index 853258b04..000000000
--- a/test/syscalls/linux/rseq/BUILD
+++ /dev/null
@@ -1,61 +0,0 @@
-# This package contains a standalone rseq test binary. This binary must not
-# depend on libc, which might use rseq itself.
-
-load("//tools:defs.bzl", "cc_flags_supplier", "cc_library", "cc_toolchain", "select_arch")
-
-package(licenses = ["notice"])
-
-genrule(
- name = "rseq_binary",
- srcs = [
- "critical.h",
- "critical_amd64.S",
- "critical_arm64.S",
- "rseq.cc",
- "syscalls.h",
- "start_amd64.S",
- "start_arm64.S",
- "test.h",
- "types.h",
- "uapi.h",
- ],
- outs = ["rseq"],
- cmd = "$(CC) " +
- "$(CC_FLAGS) " +
- "-I. " +
- "-Wall " +
- "-Werror " +
- "-O2 " +
- "-std=c++17 " +
- "-static " +
- "-nostdlib " +
- "-ffreestanding " +
- "-o " +
- "$(location rseq) " +
- select_arch(
- amd64 = "$(location critical_amd64.S) $(location start_amd64.S) ",
- arm64 = "$(location critical_arm64.S) $(location start_arm64.S) ",
- no_match_error = "unsupported architecture",
- ) +
- "$(location rseq.cc)",
- toolchains = [
- cc_toolchain,
- ":no_pie_cc_flags",
- ],
- visibility = ["//:sandbox"],
-)
-
-cc_flags_supplier(
- name = "no_pie_cc_flags",
- features = ["-pie"],
-)
-
-cc_library(
- name = "lib",
- testonly = 1,
- hdrs = [
- "test.h",
- "uapi.h",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/test/syscalls/linux/rseq/critical.h b/test/syscalls/linux/rseq/critical.h
deleted file mode 100644
index ac987a25e..000000000
--- a/test/syscalls/linux/rseq/critical.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_RSEQ_CRITICAL_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_RSEQ_CRITICAL_H_
-
-#include "test/syscalls/linux/rseq/types.h"
-#include "test/syscalls/linux/rseq/uapi.h"
-
-constexpr uint32_t kRseqSignature = 0x90909090;
-
-extern "C" {
-
-extern void rseq_loop(struct rseq* r, struct rseq_cs* cs);
-extern void* rseq_loop_early_abort;
-extern void* rseq_loop_start;
-extern void* rseq_loop_pre_commit;
-extern void* rseq_loop_post_commit;
-extern void* rseq_loop_abort;
-
-extern int rseq_getpid(struct rseq* r, struct rseq_cs* cs);
-extern void* rseq_getpid_start;
-extern void* rseq_getpid_post_commit;
-extern void* rseq_getpid_abort;
-
-} // extern "C"
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_CRITICAL_H_
diff --git a/test/syscalls/linux/rseq/critical_amd64.S b/test/syscalls/linux/rseq/critical_amd64.S
deleted file mode 100644
index 8c0687e6d..000000000
--- a/test/syscalls/linux/rseq/critical_amd64.S
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Restartable sequences critical sections.
-
-// Loops continuously until aborted.
-//
-// void rseq_loop(struct rseq* r, struct rseq_cs* cs)
-
- .text
- .globl rseq_loop
- .type rseq_loop, @function
-
-rseq_loop:
- jmp begin
-
- // Abort block before the critical section.
- // Abort signature is 4 nops for simplicity.
- .byte 0x90, 0x90, 0x90, 0x90
- .globl rseq_loop_early_abort
-rseq_loop_early_abort:
- ret
-
-begin:
- // r->rseq_cs = cs
- movq %rsi, 8(%rdi)
-
- // N.B. rseq_cs will be cleared by any preempt, even outside the critical
- // section. Thus it must be set in or immediately before the critical section
- // to ensure it is not cleared before the section begins.
- .globl rseq_loop_start
-rseq_loop_start:
- jmp rseq_loop_start
-
- // "Pre-commit": extra instructions inside the critical section. These are
- // used as the abort point in TestAbortPreCommit, which is not valid.
- .globl rseq_loop_pre_commit
-rseq_loop_pre_commit:
- // Extra abort signature + nop for TestAbortPostCommit.
- .byte 0x90, 0x90, 0x90, 0x90
- nop
-
- // "Post-commit": never reached in this case.
- .globl rseq_loop_post_commit
-rseq_loop_post_commit:
-
- // Abort signature is 4 nops for simplicity.
- .byte 0x90, 0x90, 0x90, 0x90
-
- .globl rseq_loop_abort
-rseq_loop_abort:
- ret
-
- .size rseq_loop,.-rseq_loop
- .section .note.GNU-stack,"",@progbits
diff --git a/test/syscalls/linux/rseq/critical_arm64.S b/test/syscalls/linux/rseq/critical_arm64.S
deleted file mode 100644
index bfe7e8307..000000000
--- a/test/syscalls/linux/rseq/critical_arm64.S
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Restartable sequences critical sections.
-
-// Loops continuously until aborted.
-//
-// void rseq_loop(struct rseq* r, struct rseq_cs* cs)
-
- .text
- .globl rseq_loop
- .type rseq_loop, @function
-
-rseq_loop:
- b begin
-
- // Abort block before the critical section.
- // Abort signature.
- .byte 0x90, 0x90, 0x90, 0x90
- .globl rseq_loop_early_abort
-rseq_loop_early_abort:
- ret
-
-begin:
- // r->rseq_cs = cs
- str x1, [x0, #8]
-
- // N.B. rseq_cs will be cleared by any preempt, even outside the critical
- // section. Thus it must be set in or immediately before the critical section
- // to ensure it is not cleared before the section begins.
- .globl rseq_loop_start
-rseq_loop_start:
- b rseq_loop_start
-
- // "Pre-commit": extra instructions inside the critical section. These are
- // used as the abort point in TestAbortPreCommit, which is not valid.
- .globl rseq_loop_pre_commit
-rseq_loop_pre_commit:
- // Extra abort signature + nop for TestAbortPostCommit.
- .byte 0x90, 0x90, 0x90, 0x90
- nop
-
- // "Post-commit": never reached in this case.
- .globl rseq_loop_post_commit
-rseq_loop_post_commit:
-
- // Abort signature.
- .byte 0x90, 0x90, 0x90, 0x90
-
- .globl rseq_loop_abort
-rseq_loop_abort:
- ret
-
- .size rseq_loop,.-rseq_loop
- .section .note.GNU-stack,"",@progbits
diff --git a/test/syscalls/linux/rseq/rseq.cc b/test/syscalls/linux/rseq/rseq.cc
deleted file mode 100644
index 6f5d38bba..000000000
--- a/test/syscalls/linux/rseq/rseq.cc
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/rseq/critical.h"
-#include "test/syscalls/linux/rseq/syscalls.h"
-#include "test/syscalls/linux/rseq/test.h"
-#include "test/syscalls/linux/rseq/types.h"
-#include "test/syscalls/linux/rseq/uapi.h"
-
-namespace gvisor {
-namespace testing {
-
-extern "C" int main(int argc, char** argv, char** envp);
-
-// Standalone initialization before calling main().
-extern "C" void __init(uintptr_t* sp) {
- int argc = sp[0];
- char** argv = reinterpret_cast<char**>(&sp[1]);
- char** envp = &argv[argc + 1];
-
- // Call main() and exit.
- sys_exit_group(main(argc, argv, envp));
-
- // sys_exit_group does not return
-}
-
-int strcmp(const char* s1, const char* s2) {
- const unsigned char* p1 = reinterpret_cast<const unsigned char*>(s1);
- const unsigned char* p2 = reinterpret_cast<const unsigned char*>(s2);
-
- while (*p1 == *p2) {
- if (!*p1) {
- return 0;
- }
- ++p1;
- ++p2;
- }
- return static_cast<int>(*p1) - static_cast<int>(*p2);
-}
-
-int sys_rseq(struct rseq* rseq, uint32_t rseq_len, int flags, uint32_t sig) {
- return raw_syscall(kRseqSyscall, rseq, rseq_len, flags, sig);
-}
-
-// Test that rseq must be aligned.
-int TestUnaligned() {
- constexpr uintptr_t kRequiredAlignment = alignof(rseq);
-
- char buf[2 * kRequiredAlignment] = {};
- uintptr_t ptr = reinterpret_cast<uintptr_t>(&buf[0]);
- if ((ptr & (kRequiredAlignment - 1)) == 0) {
- // buf is already aligned. Misalign it.
- ptr++;
- }
-
- int ret = sys_rseq(reinterpret_cast<rseq*>(ptr), sizeof(rseq), 0, 0);
- if (sys_errno(ret) != EINVAL) {
- return 1;
- }
- return 0;
-}
-
-// Sanity test that registration works.
-int TestRegister() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
- return 0;
-}
-
-// Registration can't be done twice.
-int TestDoubleRegister() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != EBUSY) {
- return 1;
- }
-
- return 0;
-}
-
-// Registration can be done again after unregister.
-int TestRegisterUnregister() {
- struct rseq r = {};
-
- int ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- return 0;
-}
-
-// The pointer to rseq must match on register/unregister.
-int TestUnregisterDifferentPtr() {
- struct rseq r = {};
-
- int ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq r2 = {};
-
- ret = sys_rseq(&r2, sizeof(r2), kRseqFlagUnregister, 0);
- if (sys_errno(ret) != EINVAL) {
- return 1;
- }
-
- return 0;
-}
-
-// The signature must match on register/unregister.
-int TestUnregisterDifferentSignature() {
- constexpr int kSignature = 0;
-
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kSignature);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- ret = sys_rseq(&r, sizeof(r), kRseqFlagUnregister, kSignature + 1);
- if (sys_errno(ret) != EPERM) {
- return 1;
- }
-
- return 0;
-}
-
-// The CPU ID is initialized.
-int TestCPU() {
- struct rseq r = {};
- r.cpu_id = kRseqCPUIDUninitialized;
-
- int ret = sys_rseq(&r, sizeof(r), 0, 0);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- if (__atomic_load_n(&r.cpu_id, __ATOMIC_RELAXED) < 0) {
- return 1;
- }
- if (__atomic_load_n(&r.cpu_id_start, __ATOMIC_RELAXED) < 0) {
- return 1;
- }
-
- return 0;
-}
-
-// Critical section is eventually aborted.
-int TestAbort() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_abort);
-
- // Loops until abort. If this returns then abort occurred.
- rseq_loop(&r, &cs);
-
- return 0;
-}
-
-// Abort may be before the critical section.
-int TestAbortBefore() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_early_abort);
-
- // Loops until abort. If this returns then abort occurred.
- rseq_loop(&r, &cs);
-
- return 0;
-}
-
-// Signature must match.
-int TestAbortSignature() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_abort);
-
- // Loops until abort. This should SIGSEGV on abort.
- rseq_loop(&r, &cs);
-
- return 1;
-}
-
-// Abort must not be in the critical section.
-int TestAbortPreCommit() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature + 1);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_pre_commit);
-
- // Loops until abort. This should SIGSEGV on abort.
- rseq_loop(&r, &cs);
-
- return 1;
-}
-
-// rseq.rseq_cs is cleared on abort.
-int TestAbortClearsCS() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_abort);
-
- // Loops until abort. If this returns then abort occurred.
- rseq_loop(&r, &cs);
-
- if (__atomic_load_n(&r.rseq_cs, __ATOMIC_RELAXED)) {
- return 1;
- }
-
- return 0;
-}
-
-// rseq.rseq_cs is cleared on abort outside of critical section.
-int TestInvalidAbortClearsCS() {
- struct rseq r = {};
- int ret = sys_rseq(&r, sizeof(r), 0, kRseqSignature);
- if (sys_errno(ret) != 0) {
- return 1;
- }
-
- struct rseq_cs cs = {};
- cs.version = 0;
- cs.flags = 0;
- cs.start_ip = reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.post_commit_offset = reinterpret_cast<uint64_t>(&rseq_loop_post_commit) -
- reinterpret_cast<uint64_t>(&rseq_loop_start);
- cs.abort_ip = reinterpret_cast<uint64_t>(&rseq_loop_abort);
-
- __atomic_store_n(&r.rseq_cs, &cs, __ATOMIC_RELAXED);
-
- // When the next abort condition occurs, the kernel will clear cs once it
- // determines we aren't in the critical section.
- while (1) {
- if (!__atomic_load_n(&r.rseq_cs, __ATOMIC_RELAXED)) {
- break;
- }
- }
-
- return 0;
-}
-
-// Exit codes:
-// 0 - Pass
-// 1 - Fail
-// 2 - Missing argument
-// 3 - Unknown test case
-extern "C" int main(int argc, char** argv, char** envp) {
- if (argc != 2) {
- // Usage: rseq <test case>
- return 2;
- }
-
- if (strcmp(argv[1], kRseqTestUnaligned) == 0) {
- return TestUnaligned();
- }
- if (strcmp(argv[1], kRseqTestRegister) == 0) {
- return TestRegister();
- }
- if (strcmp(argv[1], kRseqTestDoubleRegister) == 0) {
- return TestDoubleRegister();
- }
- if (strcmp(argv[1], kRseqTestRegisterUnregister) == 0) {
- return TestRegisterUnregister();
- }
- if (strcmp(argv[1], kRseqTestUnregisterDifferentPtr) == 0) {
- return TestUnregisterDifferentPtr();
- }
- if (strcmp(argv[1], kRseqTestUnregisterDifferentSignature) == 0) {
- return TestUnregisterDifferentSignature();
- }
- if (strcmp(argv[1], kRseqTestCPU) == 0) {
- return TestCPU();
- }
- if (strcmp(argv[1], kRseqTestAbort) == 0) {
- return TestAbort();
- }
- if (strcmp(argv[1], kRseqTestAbortBefore) == 0) {
- return TestAbortBefore();
- }
- if (strcmp(argv[1], kRseqTestAbortSignature) == 0) {
- return TestAbortSignature();
- }
- if (strcmp(argv[1], kRseqTestAbortPreCommit) == 0) {
- return TestAbortPreCommit();
- }
- if (strcmp(argv[1], kRseqTestAbortClearsCS) == 0) {
- return TestAbortClearsCS();
- }
- if (strcmp(argv[1], kRseqTestInvalidAbortClearsCS) == 0) {
- return TestInvalidAbortClearsCS();
- }
-
- return 3;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/rseq/start_amd64.S b/test/syscalls/linux/rseq/start_amd64.S
deleted file mode 100644
index b9611b276..000000000
--- a/test/syscalls/linux/rseq/start_amd64.S
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-
- .text
- .align 4
- .type _start,@function
- .globl _start
-
-_start:
- movq %rsp,%rdi
- call __init
- hlt
-
- .size _start,.-_start
- .section .note.GNU-stack,"",@progbits
-
- .text
- .globl raw_syscall
- .type raw_syscall, @function
-
-raw_syscall:
- mov %rdi,%rax // syscall #
- mov %rsi,%rdi // arg0
- mov %rdx,%rsi // arg1
- mov %rcx,%rdx // arg2
- mov %r8,%r10 // arg3 (goes in r10 instead of rcx for system calls)
- mov %r9,%r8 // arg4
- mov 0x8(%rsp),%r9 // arg5
- syscall
- ret
-
- .size raw_syscall,.-raw_syscall
- .section .note.GNU-stack,"",@progbits
diff --git a/test/syscalls/linux/rseq/start_arm64.S b/test/syscalls/linux/rseq/start_arm64.S
deleted file mode 100644
index 693c1c6eb..000000000
--- a/test/syscalls/linux/rseq/start_arm64.S
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-
- .text
- .align 4
- .type _start,@function
- .globl _start
-
-_start:
- mov x29, sp
- bl __init
- wfi
-
- .size _start,.-_start
- .section .note.GNU-stack,"",@progbits
-
- .text
- .globl raw_syscall
- .type raw_syscall, @function
-
-raw_syscall:
- mov x8,x0 // syscall #
- mov x0,x1 // arg0
- mov x1,x2 // arg1
- mov x2,x3 // arg2
- mov x3,x4 // arg3
- mov x4,x5 // arg4
- mov x5,x6 // arg5
- svc #0
- ret
-
- .size raw_syscall,.-raw_syscall
- .section .note.GNU-stack,"",@progbits
diff --git a/test/syscalls/linux/rseq/syscalls.h b/test/syscalls/linux/rseq/syscalls.h
deleted file mode 100644
index c4118e6c5..000000000
--- a/test/syscalls/linux/rseq/syscalls.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_RSEQ_SYSCALLS_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_RSEQ_SYSCALLS_H_
-
-#include "test/syscalls/linux/rseq/types.h"
-
-// Syscall numbers.
-#if defined(__x86_64__)
-constexpr int kGetpid = 39;
-constexpr int kExitGroup = 231;
-#elif defined(__aarch64__)
-constexpr int kGetpid = 172;
-constexpr int kExitGroup = 94;
-#else
-#error "Unknown architecture"
-#endif
-
-namespace gvisor {
-namespace testing {
-
-// Standalone system call interfaces.
-// Note that these are all "raw" system call interfaces which encode
-// errors by setting the return value to a small negative number.
-// Use sys_errno() to check system call return values for errors.
-
-// Maximum Linux error number.
-constexpr int kMaxErrno = 4095;
-
-// Errno values.
-#define EPERM 1
-#define EFAULT 14
-#define EBUSY 16
-#define EINVAL 22
-
-// Get the error number from a raw system call return value.
-// Returns a positive error number or 0 if there was no error.
-static inline int sys_errno(uintptr_t rval) {
- if (rval >= static_cast<uintptr_t>(-kMaxErrno)) {
- return -static_cast<int>(rval);
- }
- return 0;
-}
-
-extern "C" uintptr_t raw_syscall(int number, ...);
-
-static inline void sys_exit_group(int status) {
- raw_syscall(kExitGroup, status);
-}
-static inline int sys_getpid() {
- return static_cast<int>(raw_syscall(kGetpid));
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_SYSCALLS_H_
diff --git a/test/syscalls/linux/rseq/test.h b/test/syscalls/linux/rseq/test.h
deleted file mode 100644
index ff0dd6e48..000000000
--- a/test/syscalls/linux/rseq/test.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TEST_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TEST_H_
-
-namespace gvisor {
-namespace testing {
-
-// Test cases supported by rseq binary.
-
-constexpr char kRseqTestUnaligned[] = "unaligned";
-constexpr char kRseqTestRegister[] = "register";
-constexpr char kRseqTestDoubleRegister[] = "double-register";
-constexpr char kRseqTestRegisterUnregister[] = "register-unregister";
-constexpr char kRseqTestUnregisterDifferentPtr[] = "unregister-different-ptr";
-constexpr char kRseqTestUnregisterDifferentSignature[] =
- "unregister-different-signature";
-constexpr char kRseqTestCPU[] = "cpu";
-constexpr char kRseqTestAbort[] = "abort";
-constexpr char kRseqTestAbortBefore[] = "abort-before";
-constexpr char kRseqTestAbortSignature[] = "abort-signature";
-constexpr char kRseqTestAbortPreCommit[] = "abort-precommit";
-constexpr char kRseqTestAbortClearsCS[] = "abort-clears-cs";
-constexpr char kRseqTestInvalidAbortClearsCS[] = "invalid-abort-clears-cs";
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TEST_H_
diff --git a/test/syscalls/linux/rseq/types.h b/test/syscalls/linux/rseq/types.h
deleted file mode 100644
index b6afe9817..000000000
--- a/test/syscalls/linux/rseq/types.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TYPES_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TYPES_H_
-
-using size_t = __SIZE_TYPE__;
-using uintptr_t = __UINTPTR_TYPE__;
-
-using uint8_t = __UINT8_TYPE__;
-using uint16_t = __UINT16_TYPE__;
-using uint32_t = __UINT32_TYPE__;
-using uint64_t = __UINT64_TYPE__;
-
-using int8_t = __INT8_TYPE__;
-using int16_t = __INT16_TYPE__;
-using int32_t = __INT32_TYPE__;
-using int64_t = __INT64_TYPE__;
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TYPES_H_
diff --git a/test/syscalls/linux/rseq/uapi.h b/test/syscalls/linux/rseq/uapi.h
deleted file mode 100644
index d3e60d0a4..000000000
--- a/test/syscalls/linux/rseq/uapi.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_RSEQ_UAPI_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_RSEQ_UAPI_H_
-
-#include <stdint.h>
-
-// User-kernel ABI for restartable sequences.
-
-// Syscall numbers.
-#if defined(__x86_64__)
-constexpr int kRseqSyscall = 334;
-#elif defined(__aarch64__)
-constexpr int kRseqSyscall = 293;
-#else
-#error "Unknown architecture"
-#endif // __x86_64__
-
-struct rseq_cs {
- uint32_t version;
- uint32_t flags;
- uint64_t start_ip;
- uint64_t post_commit_offset;
- uint64_t abort_ip;
-} __attribute__((aligned(4 * sizeof(uint64_t))));
-
-// N.B. alignment is enforced by the kernel.
-struct rseq {
- uint32_t cpu_id_start;
- uint32_t cpu_id;
- struct rseq_cs* rseq_cs;
- uint32_t flags;
-} __attribute__((aligned(4 * sizeof(uint64_t))));
-
-constexpr int kRseqFlagUnregister = 1 << 0;
-
-constexpr int kRseqCPUIDUninitialized = -1;
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_UAPI_H_
diff --git a/test/syscalls/linux/rtsignal.cc b/test/syscalls/linux/rtsignal.cc
deleted file mode 100644
index ed27e2566..000000000
--- a/test/syscalls/linux/rtsignal.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <cerrno>
-#include <csignal>
-
-#include "gtest/gtest.h"
-#include "test/util/cleanup.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// saved_info is set by the handler.
-siginfo_t saved_info;
-
-// has_saved_info is set to true by the handler.
-volatile bool has_saved_info;
-
-void SigHandler(int sig, siginfo_t* info, void* context) {
- // Copy to the given info.
- saved_info = *info;
- has_saved_info = true;
-}
-
-void ClearSavedInfo() {
- // Clear the cached info.
- memset(&saved_info, 0, sizeof(saved_info));
- has_saved_info = false;
-}
-
-PosixErrorOr<Cleanup> SetupSignalHandler(int sig) {
- struct sigaction sa;
- sa.sa_sigaction = SigHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- return ScopedSigaction(sig, sa);
-}
-
-class RtSignalTest : public ::testing::Test {
- protected:
- void SetUp() override {
- action_cleanup_ = ASSERT_NO_ERRNO_AND_VALUE(SetupSignalHandler(SIGUSR1));
- mask_cleanup_ =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGUSR1));
- }
-
- void TearDown() override { ClearSavedInfo(); }
-
- private:
- Cleanup action_cleanup_;
- Cleanup mask_cleanup_;
-};
-
-static int rt_sigqueueinfo(pid_t tgid, int sig, siginfo_t* uinfo) {
- int ret;
- do {
- // NOTE(b/25434735): rt_sigqueueinfo(2) could return EAGAIN for RT signals.
- ret = syscall(SYS_rt_sigqueueinfo, tgid, sig, uinfo);
- } while (ret == -1 && errno == EAGAIN);
- return ret;
-}
-
-TEST_F(RtSignalTest, InvalidTID) {
- siginfo_t uinfo;
- // Depending on the kernel version, these calls may fail with
- // ESRCH (goobunutu machines) or EPERM (production machines). Thus,
- // the test simply ensures that they do fail.
- EXPECT_THAT(rt_sigqueueinfo(-1, SIGUSR1, &uinfo), SyscallFails());
- EXPECT_FALSE(has_saved_info);
- EXPECT_THAT(rt_sigqueueinfo(0, SIGUSR1, &uinfo), SyscallFails());
- EXPECT_FALSE(has_saved_info);
-}
-
-TEST_F(RtSignalTest, InvalidCodes) {
- siginfo_t uinfo;
-
- // We need a child for the code checks to apply. If the process is delivering
- // to itself, then it can use whatever codes it wants and they will go
- // through.
- pid_t child = fork();
- if (child == 0) {
- _exit(1);
- }
- ASSERT_THAT(child, SyscallSucceeds());
-
- // These are not allowed for child processes.
- uinfo.si_code = 0; // SI_USER.
- EXPECT_THAT(rt_sigqueueinfo(child, SIGUSR1, &uinfo),
- SyscallFailsWithErrno(EPERM));
- uinfo.si_code = 0x80; // SI_KERNEL.
- EXPECT_THAT(rt_sigqueueinfo(child, SIGUSR1, &uinfo),
- SyscallFailsWithErrno(EPERM));
- uinfo.si_code = -6; // SI_TKILL.
- EXPECT_THAT(rt_sigqueueinfo(child, SIGUSR1, &uinfo),
- SyscallFailsWithErrno(EPERM));
- uinfo.si_code = -1; // SI_QUEUE (allowed).
- EXPECT_THAT(rt_sigqueueinfo(child, SIGUSR1, &uinfo), SyscallSucceeds());
-
- // Join the child process.
- EXPECT_THAT(waitpid(child, nullptr, 0), SyscallSucceeds());
-}
-
-TEST_F(RtSignalTest, ValueDelivered) {
- siginfo_t uinfo;
- uinfo.si_code = -1; // SI_QUEUE (allowed).
- uinfo.si_errno = 0x1234;
-
- EXPECT_EQ(saved_info.si_errno, 0x0);
- EXPECT_THAT(rt_sigqueueinfo(getpid(), SIGUSR1, &uinfo), SyscallSucceeds());
- EXPECT_TRUE(has_saved_info);
- EXPECT_EQ(saved_info.si_errno, 0x1234);
-}
-
-TEST_F(RtSignalTest, SignoMatch) {
- auto action2_cleanup = ASSERT_NO_ERRNO_AND_VALUE(SetupSignalHandler(SIGUSR2));
- auto mask2_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGUSR2));
-
- siginfo_t uinfo;
- uinfo.si_code = -1; // SI_QUEUE (allowed).
-
- EXPECT_THAT(rt_sigqueueinfo(getpid(), SIGUSR1, &uinfo), SyscallSucceeds());
- EXPECT_TRUE(has_saved_info);
- EXPECT_EQ(saved_info.si_signo, SIGUSR1);
-
- ClearSavedInfo();
-
- EXPECT_THAT(rt_sigqueueinfo(getpid(), SIGUSR2, &uinfo), SyscallSucceeds());
- EXPECT_TRUE(has_saved_info);
- EXPECT_EQ(saved_info.si_signo, SIGUSR2);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // These tests depend on delivering SIGUSR1/2 to the main thread (so they can
- // synchronously check has_saved_info). Block these so that any other threads
- // created by TestInit will also have them blocked.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, SIGUSR1);
- sigaddset(&set, SIGUSR2);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/sched.cc b/test/syscalls/linux/sched.cc
deleted file mode 100644
index 735e99411..000000000
--- a/test/syscalls/linux/sched.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sched.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// In linux, pid is limited to 29 bits because how futex is implemented.
-constexpr int kImpossiblePID = (1 << 29) + 1;
-
-TEST(SchedGetparamTest, ReturnsZero) {
- struct sched_param param;
- EXPECT_THAT(sched_getparam(getpid(), &param), SyscallSucceeds());
- EXPECT_EQ(param.sched_priority, 0);
- EXPECT_THAT(sched_getparam(/*pid=*/0, &param), SyscallSucceeds());
- EXPECT_EQ(param.sched_priority, 0);
-}
-
-TEST(SchedGetparamTest, InvalidPIDReturnsEINVAL) {
- struct sched_param param;
- EXPECT_THAT(sched_getparam(/*pid=*/-1, &param),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SchedGetparamTest, ImpossiblePIDReturnsESRCH) {
- struct sched_param param;
- EXPECT_THAT(sched_getparam(kImpossiblePID, &param),
- SyscallFailsWithErrno(ESRCH));
-}
-
-TEST(SchedGetparamTest, NullParamReturnsEINVAL) {
- EXPECT_THAT(sched_getparam(0, nullptr), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SchedGetschedulerTest, ReturnsSchedOther) {
- EXPECT_THAT(sched_getscheduler(getpid()),
- SyscallSucceedsWithValue(SCHED_OTHER));
- EXPECT_THAT(sched_getscheduler(/*pid=*/0),
- SyscallSucceedsWithValue(SCHED_OTHER));
-}
-
-TEST(SchedGetschedulerTest, ReturnsEINVAL) {
- EXPECT_THAT(sched_getscheduler(/*pid=*/-1), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SchedGetschedulerTest, ReturnsESRCH) {
- EXPECT_THAT(sched_getscheduler(kImpossiblePID), SyscallFailsWithErrno(ESRCH));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sched_yield.cc b/test/syscalls/linux/sched_yield.cc
deleted file mode 100644
index 5d24f5b58..000000000
--- a/test/syscalls/linux/sched_yield.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SchedYieldTest, Success) {
- EXPECT_THAT(sched_yield(), SyscallSucceeds());
- EXPECT_THAT(sched_yield(), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/seccomp.cc b/test/syscalls/linux/seccomp.cc
deleted file mode 100644
index ce88d90dd..000000000
--- a/test/syscalls/linux/seccomp.cc
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <linux/audit.h>
-#include <linux/filter.h>
-#include <linux/seccomp.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <string.h>
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <time.h>
-#include <ucontext.h>
-#include <unistd.h>
-
-#include <atomic>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "test/util/logging.h"
-#include "test/util/memory_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-#ifndef SYS_SECCOMP
-#define SYS_SECCOMP 1
-#endif
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// A syscall not implemented by Linux that we don't expect to be called.
-#ifdef __x86_64__
-constexpr uint32_t kFilteredSyscall = SYS_vserver;
-#elif __aarch64__
-// Use the last of arch_specific_syscalls which are not implemented on arm64.
-constexpr uint32_t kFilteredSyscall = __NR_arch_specific_syscall + 15;
-#endif
-
-// Applies a seccomp-bpf filter that returns `filtered_result` for
-// `sysno` and allows all other syscalls. Async-signal-safe.
-void ApplySeccompFilter(uint32_t sysno, uint32_t filtered_result,
- uint32_t flags = 0) {
- // "Prior to [PR_SET_SECCOMP], the task must call prctl(PR_SET_NO_NEW_PRIVS,
- // 1) or run with CAP_SYS_ADMIN privileges in its namespace." -
- // Documentation/prctl/seccomp_filter.txt
- //
- // prctl(PR_SET_NO_NEW_PRIVS, 1) may be called repeatedly; calls after the
- // first are no-ops.
- TEST_PCHECK(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) == 0);
- MaybeSave();
-
- struct sock_filter filter[] = {
- // A = seccomp_data.arch
- BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 4),
-#if defined(__x86_64__)
- // if (A != AUDIT_ARCH_X86_64) goto kill
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, AUDIT_ARCH_X86_64, 0, 4),
-#elif defined(__aarch64__)
- // if (A != AUDIT_ARCH_AARCH64) goto kill
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, AUDIT_ARCH_AARCH64, 0, 4),
-#else
-#error "Unknown architecture"
-#endif
- // A = seccomp_data.nr
- BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0),
- // if (A != sysno) goto allow
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, sysno, 0, 1),
- // return filtered_result
- BPF_STMT(BPF_RET | BPF_K, filtered_result),
- // allow: return SECCOMP_RET_ALLOW
- BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW),
- // kill: return SECCOMP_RET_KILL
- BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL),
- };
- struct sock_fprog prog;
- prog.len = ABSL_ARRAYSIZE(filter);
- prog.filter = filter;
- if (flags) {
- TEST_CHECK(syscall(__NR_seccomp, SECCOMP_SET_MODE_FILTER, flags, &prog) ==
- 0);
- } else {
- TEST_PCHECK(prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0) == 0);
- }
- MaybeSave();
-}
-
-// Wrapper for sigaction. Async-signal-safe.
-void RegisterSignalHandler(int signum,
- void (*handler)(int, siginfo_t*, void*)) {
- struct sigaction sa = {};
- sa.sa_sigaction = handler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- TEST_PCHECK(sigaction(signum, &sa, nullptr) == 0);
- MaybeSave();
-}
-
-// All of the following tests execute in a subprocess to ensure that each test
-// is run in a separate process. This avoids cross-contamination of seccomp
-// state between tests, and is necessary to ensure that test processes killed
-// by SECCOMP_RET_KILL are single-threaded (since SECCOMP_RET_KILL only kills
-// the offending thread, not the whole thread group).
-
-TEST(SeccompTest, RetKillCausesDeathBySIGSYS) {
- pid_t const pid = fork();
- if (pid == 0) {
- // Register a signal handler for SIGSYS that we don't expect to be invoked.
- RegisterSignalHandler(
- SIGSYS, +[](int, siginfo_t*, void*) { _exit(1); });
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_KILL);
- syscall(kFilteredSyscall);
- TEST_CHECK_MSG(false, "Survived invocation of test syscall");
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSYS)
- << "status " << status;
-}
-
-TEST(SeccompTest, RetKillOnlyKillsOneThread) {
- Mapping stack = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
-
- pid_t const pid = fork();
- if (pid == 0) {
- // Register a signal handler for SIGSYS that we don't expect to be invoked.
- RegisterSignalHandler(
- SIGSYS, +[](int, siginfo_t*, void*) { _exit(1); });
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_KILL);
- // Pass CLONE_VFORK to block the original thread in the child process until
- // the clone thread exits with SIGSYS.
- //
- // N.B. clone(2) is not officially async-signal-safe, but at minimum glibc's
- // x86_64 implementation is safe. See glibc
- // sysdeps/unix/sysv/linux/x86_64/clone.S.
- clone(
- +[](void* arg) {
- syscall(kFilteredSyscall); // should kill the thread
- _exit(1); // should be unreachable
- return 2; // should be very unreachable, shut up the compiler
- },
- stack.endptr(),
- CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_THREAD | CLONE_VM |
- CLONE_VFORK,
- nullptr);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-TEST(SeccompTest, RetTrapCausesSIGSYS) {
- pid_t const pid = fork();
- if (pid == 0) {
- constexpr uint16_t kTrapValue = 0xdead;
- RegisterSignalHandler(
- SIGSYS, +[](int signo, siginfo_t* info, void* ucv) {
- ucontext_t* uc = static_cast<ucontext_t*>(ucv);
- // This is a signal handler, so we must stay async-signal-safe.
- TEST_CHECK(info->si_signo == SIGSYS);
- TEST_CHECK(info->si_code == SYS_SECCOMP);
- TEST_CHECK(info->si_errno == kTrapValue);
- TEST_CHECK(info->si_call_addr != nullptr);
- TEST_CHECK(info->si_syscall == kFilteredSyscall);
-#if defined(__x86_64__)
- TEST_CHECK(info->si_arch == AUDIT_ARCH_X86_64);
- TEST_CHECK(uc->uc_mcontext.gregs[REG_RAX] == kFilteredSyscall);
-#elif defined(__aarch64__)
- TEST_CHECK(info->si_arch == AUDIT_ARCH_AARCH64);
- TEST_CHECK(uc->uc_mcontext.regs[8] == kFilteredSyscall);
-#endif // defined(__x86_64__)
- _exit(0);
- });
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_TRAP | kTrapValue);
- syscall(kFilteredSyscall);
- TEST_CHECK_MSG(false, "Survived invocation of test syscall");
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-#ifdef __x86_64__
-
-constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400;
-
-time_t vsyscall_time(time_t* t) {
- return reinterpret_cast<time_t (*)(time_t*)>(kVsyscallTimeEntry)(t);
-}
-
-TEST(SeccompTest, SeccompAppliesToVsyscall) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
-
- pid_t const pid = fork();
- if (pid == 0) {
- constexpr uint16_t kTrapValue = 0xdead;
- RegisterSignalHandler(
- SIGSYS, +[](int signo, siginfo_t* info, void* ucv) {
- ucontext_t* uc = static_cast<ucontext_t*>(ucv);
- // This is a signal handler, so we must stay async-signal-safe.
- TEST_CHECK(info->si_signo == SIGSYS);
- TEST_CHECK(info->si_code == SYS_SECCOMP);
- TEST_CHECK(info->si_errno == kTrapValue);
- TEST_CHECK(info->si_call_addr != nullptr);
- TEST_CHECK(info->si_syscall == SYS_time);
- TEST_CHECK(info->si_arch == AUDIT_ARCH_X86_64);
- TEST_CHECK(uc->uc_mcontext.gregs[REG_RAX] == SYS_time);
- _exit(0);
- });
- ApplySeccompFilter(SYS_time, SECCOMP_RET_TRAP | kTrapValue);
- vsyscall_time(nullptr); // Should result in death.
- TEST_CHECK_MSG(false, "Survived invocation of test syscall");
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-TEST(SeccompTest, RetKillVsyscallCausesDeathBySIGSYS) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
-
- pid_t const pid = fork();
- if (pid == 0) {
- // Register a signal handler for SIGSYS that we don't expect to be invoked.
- RegisterSignalHandler(
- SIGSYS, +[](int, siginfo_t*, void*) { _exit(1); });
- ApplySeccompFilter(SYS_time, SECCOMP_RET_KILL);
- vsyscall_time(nullptr); // Should result in death.
- TEST_CHECK_MSG(false, "Survived invocation of test syscall");
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSYS)
- << "status " << status;
-}
-
-#endif // defined(__x86_64__)
-
-TEST(SeccompTest, RetTraceWithoutPtracerReturnsENOSYS) {
- pid_t const pid = fork();
- if (pid == 0) {
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_TRACE);
- TEST_CHECK(syscall(kFilteredSyscall) == -1 && errno == ENOSYS);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-TEST(SeccompTest, RetErrnoReturnsErrno) {
- pid_t const pid = fork();
- if (pid == 0) {
- // ENOTNAM: "Not a XENIX named type file"
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_ERRNO | ENOTNAM);
- TEST_CHECK(syscall(kFilteredSyscall) == -1 && errno == ENOTNAM);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-TEST(SeccompTest, RetAllowAllowsSyscall) {
- pid_t const pid = fork();
- if (pid == 0) {
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_ALLOW);
- TEST_CHECK(syscall(kFilteredSyscall) == -1 && errno == ENOSYS);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-// This test will validate that TSYNC will apply to all threads.
-TEST(SeccompTest, TsyncAppliesToAllThreads) {
- Mapping stack = ASSERT_NO_ERRNO_AND_VALUE(
- MmapAnon(2 * kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
-
- // We don't want to apply this policy to other test runner threads, so fork.
- const pid_t pid = fork();
-
- if (pid == 0) {
- // First check that we receive a ENOSYS before the policy is applied.
- TEST_CHECK(syscall(kFilteredSyscall) == -1 && errno == ENOSYS);
-
- // N.B. clone(2) is not officially async-signal-safe, but at minimum glibc's
- // x86_64 implementation is safe. See glibc
- // sysdeps/unix/sysv/linux/x86_64/clone.S.
- clone(
- +[](void* arg) {
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_ERRNO | ENOTNAM,
- SECCOMP_FILTER_FLAG_TSYNC);
- return 0;
- },
- stack.endptr(),
- CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_THREAD | CLONE_VM |
- CLONE_VFORK,
- nullptr);
-
- // Because we're using CLONE_VFORK this thread will be blocked until
- // the second thread has released resources to our virtual memory, since
- // we're not execing that will happen on _exit.
-
- // Now verify that the policy applied to this thread too.
- TEST_CHECK(syscall(kFilteredSyscall) == -1 && errno == ENOTNAM);
- _exit(0);
- }
-
- ASSERT_THAT(pid, SyscallSucceeds());
- int status = 0;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-// This test will validate that seccomp(2) rejects unsupported flags.
-TEST(SeccompTest, SeccompRejectsUnknownFlags) {
- constexpr uint32_t kInvalidFlag = 123;
- ASSERT_THAT(
- syscall(__NR_seccomp, SECCOMP_SET_MODE_FILTER, kInvalidFlag, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SeccompTest, LeastPermissiveFilterReturnValueApplies) {
- // This is RetKillCausesDeathBySIGSYS, plus extra filters before and after the
- // one that causes the kill that should be ignored.
- pid_t const pid = fork();
- if (pid == 0) {
- RegisterSignalHandler(
- SIGSYS, +[](int, siginfo_t*, void*) { _exit(1); });
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_TRACE);
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_KILL);
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_ERRNO | ENOTNAM);
- syscall(kFilteredSyscall);
- TEST_CHECK_MSG(false, "Survived invocation of test syscall");
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSYS)
- << "status " << status;
-}
-
-// Passed as argv[1] to cause the test binary to invoke kFilteredSyscall and
-// exit. Not a real flag since flag parsing happens during initialization,
-// which may create threads.
-constexpr char kInvokeFilteredSyscallFlag[] = "--seccomp_test_child";
-
-TEST(SeccompTest, FiltersPreservedAcrossForkAndExecve) {
- ExecveArray const grandchild_argv(
- {"/proc/self/exe", kInvokeFilteredSyscallFlag});
-
- pid_t const pid = fork();
- if (pid == 0) {
- ApplySeccompFilter(kFilteredSyscall, SECCOMP_RET_KILL);
- pid_t const grandchild_pid = fork();
- if (grandchild_pid == 0) {
- execve(grandchild_argv.get()[0], grandchild_argv.get(),
- /* envp = */ nullptr);
- TEST_PCHECK_MSG(false, "execve failed");
- }
- int status;
- TEST_PCHECK(waitpid(grandchild_pid, &status, 0) == grandchild_pid);
- TEST_CHECK(WIFSIGNALED(status) && WTERMSIG(status) == SIGSYS);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status " << status;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- if (argc >= 2 &&
- strcmp(argv[1], gvisor::testing::kInvokeFilteredSyscallFlag) == 0) {
- syscall(gvisor::testing::kFilteredSyscall);
- exit(0);
- }
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/select.cc b/test/syscalls/linux/select.cc
deleted file mode 100644
index d74096ded..000000000
--- a/test/syscalls/linux/select.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/time.h>
-
-#include <climits>
-#include <csignal>
-#include <cstdio>
-
-#include "gtest/gtest.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/base_poll_test.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/rlimit_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-class SelectTest : public BasePollTest {
- protected:
- void SetUp() override { BasePollTest::SetUp(); }
- void TearDown() override { BasePollTest::TearDown(); }
-};
-
-// See that when there are no FD sets, select behaves like sleep.
-TEST_F(SelectTest, NullFds) {
- struct timeval timeout = absl::ToTimeval(absl::Milliseconds(10));
- ASSERT_THAT(select(0, nullptr, nullptr, nullptr, &timeout),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_usec, 0);
-
- timeout = absl::ToTimeval(absl::Milliseconds(10));
- ASSERT_THAT(select(1, nullptr, nullptr, nullptr, &timeout),
- SyscallSucceeds());
- EXPECT_EQ(timeout.tv_sec, 0);
- EXPECT_EQ(timeout.tv_usec, 0);
-}
-
-TEST_F(SelectTest, NegativeNfds) {
- EXPECT_THAT(select(-1, nullptr, nullptr, nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(select(-100000, nullptr, nullptr, nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(select(INT_MIN, nullptr, nullptr, nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(SelectTest, ClosedFds) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDONLY));
-
- // We can't rely on a file descriptor being closed in a multi threaded
- // application so fork to get a clean process.
- EXPECT_THAT(InForkedProcess([&] {
- int fd_num = fd.get();
- fd.reset();
-
- fd_set read_set;
- FD_ZERO(&read_set);
- FD_SET(fd_num, &read_set);
-
- struct timeval timeout =
- absl::ToTimeval(absl::Milliseconds(10));
- TEST_PCHECK(select(fd_num + 1, &read_set, nullptr, nullptr,
- &timeout) != 0);
- TEST_PCHECK(errno == EBADF);
- }),
- IsPosixErrorOkAndHolds(0));
-}
-
-TEST_F(SelectTest, ZeroTimeout) {
- struct timeval timeout = {};
- EXPECT_THAT(select(1, nullptr, nullptr, nullptr, &timeout),
- SyscallSucceeds());
- // Ignore timeout as its value is now undefined.
-}
-
-// If random S/R interrupts the select, SIGALRM may be delivered before select
-// restarts, causing the select to hang forever.
-TEST_F(SelectTest, NoTimeout) {
- // When there's no timeout, select may never return so set a timer.
- SetTimer(absl::Milliseconds(100));
- // See that we get interrupted by the timer.
- ASSERT_THAT(select(1, nullptr, nullptr, nullptr, nullptr),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
-}
-
-TEST_F(SelectTest, InvalidTimeoutNegative) {
- struct timeval timeout = absl::ToTimeval(absl::Microseconds(-1));
- EXPECT_THAT(select(1, nullptr, nullptr, nullptr, &timeout),
- SyscallFailsWithErrno(EINVAL));
- // Ignore timeout as its value is now undefined.
-}
-
-// Verify that a signal interrupts select.
-//
-// If random S/R interrupts the select, SIGALRM may be delivered before select
-// restarts, causing the select to hang forever.
-TEST_F(SelectTest, InterruptedBySignal) {
- absl::Duration duration(absl::Seconds(5));
- struct timeval timeout = absl::ToTimeval(duration);
- SetTimer(absl::Milliseconds(100));
- ASSERT_FALSE(TimerFired());
- ASSERT_THAT(select(1, nullptr, nullptr, nullptr, &timeout),
- SyscallFailsWithErrno(EINTR));
- EXPECT_TRUE(TimerFired());
- // Ignore timeout as its value is now undefined.
-}
-
-TEST_F(SelectTest, IgnoreBitsAboveNfds) {
- // fd_set is a bit array with at least FD_SETSIZE bits. Test that bits
- // corresponding to file descriptors above nfds are ignored.
- fd_set read_set;
- FD_ZERO(&read_set);
- constexpr int kNfds = 1;
- for (int fd = kNfds; fd < FD_SETSIZE; fd++) {
- FD_SET(fd, &read_set);
- }
- // Pass a zero timeout so that select returns immediately.
- struct timeval timeout = {};
- EXPECT_THAT(select(kNfds, &read_set, nullptr, nullptr, &timeout),
- SyscallSucceedsWithValue(0));
-}
-
-// This test illustrates Linux's behavior of 'select' calls passing after
-// setrlimit RLIMIT_NOFILE is called. In particular, versions of sshd rely on
-// this behavior. See b/122318458.
-TEST_F(SelectTest, SetrlimitCallNOFILE) {
- fd_set read_set;
- FD_ZERO(&read_set);
- timeval timeout = {};
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(NewTempAbsPath(), O_RDONLY | O_CREAT, S_IRUSR));
-
- Cleanup reset_rlimit =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSetSoftRlimit(RLIMIT_NOFILE, 0));
-
- FD_SET(fd.get(), &read_set);
- // this call with zero timeout should return immediately
- EXPECT_THAT(select(fd.get() + 1, &read_set, nullptr, nullptr, &timeout),
- SyscallSucceeds());
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/semaphore.cc b/test/syscalls/linux/semaphore.cc
deleted file mode 100644
index f72957f89..000000000
--- a/test/syscalls/linux/semaphore.cc
+++ /dev/null
@@ -1,1024 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/ipc.h>
-#include <sys/sem.h>
-#include <sys/types.h>
-
-#include <atomic>
-#include <cerrno>
-#include <ctime>
-#include <set>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "absl/memory/memory.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-constexpr int kSemMap = 1024000000;
-constexpr int kSemMni = 32000;
-constexpr int kSemMns = 1024000000;
-constexpr int kSemMnu = 1024000000;
-constexpr int kSemMsl = 32000;
-constexpr int kSemOpm = 500;
-constexpr int kSemUme = 500;
-constexpr int kSemUsz = 20;
-constexpr int kSemVmx = 32767;
-constexpr int kSemAem = 32767;
-
-class AutoSem {
- public:
- // Creates a new private semaphore.
- AutoSem() : id_(semget(IPC_PRIVATE, 1, 0)) {}
-
- explicit AutoSem(int id) : id_(id) {}
- ~AutoSem() {
- if (id_ >= 0) {
- EXPECT_THAT(semctl(id_, 0, IPC_RMID), SyscallSucceeds());
- }
- }
-
- int release() {
- int old = id_;
- id_ = -1;
- return old;
- }
-
- int get() { return id_; }
-
- private:
- int id_ = -1;
-};
-
-bool operator==(struct semid_ds const& a, struct semid_ds const& b) {
- return a.sem_perm.__key == b.sem_perm.__key &&
- a.sem_perm.uid == b.sem_perm.uid && a.sem_perm.gid == b.sem_perm.gid &&
- a.sem_perm.cuid == b.sem_perm.cuid &&
- a.sem_perm.cgid == b.sem_perm.cgid &&
- a.sem_perm.mode == b.sem_perm.mode && a.sem_otime == b.sem_otime &&
- a.sem_ctime == b.sem_ctime && a.sem_nsems == b.sem_nsems;
-}
-
-TEST(SemaphoreTest, SemGet) {
- // Test creation and lookup.
- AutoSem sem(semget(1, 10, IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- EXPECT_THAT(semget(1, 10, IPC_CREAT), SyscallSucceedsWithValue(sem.get()));
- EXPECT_THAT(semget(1, 9, IPC_CREAT), SyscallSucceedsWithValue(sem.get()));
-
- // Creation and lookup failure cases.
- EXPECT_THAT(semget(1, 11, IPC_CREAT), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(semget(1, -1, IPC_CREAT), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(semget(1, 10, IPC_CREAT | IPC_EXCL),
- SyscallFailsWithErrno(EEXIST));
- EXPECT_THAT(semget(2, 1, 0), SyscallFailsWithErrno(ENOENT));
- EXPECT_THAT(semget(2, 0, IPC_CREAT), SyscallFailsWithErrno(EINVAL));
-
- // Private semaphores never conflict.
- AutoSem sem2(semget(IPC_PRIVATE, 1, 0));
- AutoSem sem3(semget(IPC_PRIVATE, 1, 0));
- ASSERT_THAT(sem2.get(), SyscallSucceeds());
- EXPECT_NE(sem.get(), sem2.get());
- ASSERT_THAT(sem3.get(), SyscallSucceeds());
- EXPECT_NE(sem3.get(), sem2.get());
-}
-
-// Tests system-wide limits for semget.
-TEST(SemaphoreTest, SemGetSystemLimits) {
- // Disable save so that we don't trigger save/restore too many times.
- const DisableSave ds;
-
- // Exceed number of semaphores per set.
- EXPECT_THAT(semget(IPC_PRIVATE, kSemMsl + 1, 0),
- SyscallFailsWithErrno(EINVAL));
-
- // Exceed system-wide limit for semaphore sets by 1.
- AutoSem sems[kSemMni];
- EXPECT_THAT(semget(IPC_PRIVATE, 1, 0), SyscallFailsWithErrno(ENOSPC));
-}
-
-// Tests simple operations that shouldn't block in a single-thread.
-TEST(SemaphoreTest, SemOpSingleNoBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct sembuf buf = {};
- buf.sem_op = 1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
-
- buf.sem_op = -1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
-
- buf.sem_op = 0;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
-
- // Error cases with invalid values.
- ASSERT_THAT(semop(sem.get() + 1, &buf, 1), SyscallFailsWithErrno(EINVAL));
-
- buf.sem_num = 1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallFailsWithErrno(EFBIG));
-
- ASSERT_THAT(semop(sem.get(), nullptr, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-// Tests simple timed operations that shouldn't block in a single-thread.
-TEST(SemaphoreTest, SemTimedOpSingleNoBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct sembuf buf = {};
- buf.sem_op = 1;
- struct timespec timeout = {};
- // 50 milliseconds.
- timeout.tv_nsec = 5e7;
- ASSERT_THAT(semtimedop(sem.get(), &buf, 1, &timeout), SyscallSucceeds());
-
- buf.sem_op = -1;
- EXPECT_THAT(semtimedop(sem.get(), &buf, 1, &timeout), SyscallSucceeds());
-
- buf.sem_op = 0;
- EXPECT_THAT(semtimedop(sem.get(), &buf, 1, &timeout), SyscallSucceeds());
-
- // Error cases with invalid values.
- EXPECT_THAT(semtimedop(sem.get() + 1, &buf, 1, &timeout),
- SyscallFailsWithErrno(EINVAL));
-
- buf.sem_num = 1;
- EXPECT_THAT(semtimedop(sem.get(), &buf, 1, &timeout),
- SyscallFailsWithErrno(EFBIG));
- buf.sem_num = 0;
-
- EXPECT_THAT(semtimedop(sem.get(), nullptr, 0, &timeout),
- SyscallFailsWithErrno(EINVAL));
-
- timeout.tv_nsec = 1e9;
- EXPECT_THAT(semtimedop(sem.get(), &buf, 0, &timeout),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Tests multiple operations that shouldn't block in a single-thread.
-TEST(SemaphoreTest, SemOpMultiNoBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 4, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct sembuf bufs[5] = {};
- bufs[0].sem_num = 0;
- bufs[0].sem_op = 10;
- bufs[0].sem_flg = 0;
-
- bufs[1].sem_num = 1;
- bufs[1].sem_op = 2;
- bufs[1].sem_flg = 0;
-
- bufs[2].sem_num = 2;
- bufs[2].sem_op = 3;
- bufs[2].sem_flg = 0;
-
- bufs[3].sem_num = 0;
- bufs[3].sem_op = -5;
- bufs[3].sem_flg = 0;
-
- bufs[4].sem_num = 2;
- bufs[4].sem_op = 2;
- bufs[4].sem_flg = 0;
-
- ASSERT_THAT(semop(sem.get(), bufs, ABSL_ARRAYSIZE(bufs)), SyscallSucceeds());
-
- ASSERT_THAT(semctl(sem.get(), 0, GETVAL), SyscallSucceedsWithValue(5));
- ASSERT_THAT(semctl(sem.get(), 1, GETVAL), SyscallSucceedsWithValue(2));
- ASSERT_THAT(semctl(sem.get(), 2, GETVAL), SyscallSucceedsWithValue(5));
- ASSERT_THAT(semctl(sem.get(), 3, GETVAL), SyscallSucceedsWithValue(0));
-
- for (auto& b : bufs) {
- b.sem_op = -b.sem_op;
- }
- // 0 and 3 order must be reversed, otherwise it will block.
- std::swap(bufs[0].sem_op, bufs[3].sem_op);
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), bufs, ABSL_ARRAYSIZE(bufs)),
- SyscallSucceeds());
-
- // All semaphores should be back to 0 now.
- for (size_t i = 0; i < 4; ++i) {
- ASSERT_THAT(semctl(sem.get(), i, GETVAL), SyscallSucceedsWithValue(0));
- }
-}
-
-// Makes a best effort attempt to ensure that operation would block.
-TEST(SemaphoreTest, SemOpBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- std::atomic<int> blocked = ATOMIC_VAR_INIT(1);
- ScopedThread th([&sem, &blocked] {
- absl::SleepFor(absl::Milliseconds(100));
- ASSERT_EQ(blocked.load(), 1);
-
- struct sembuf buf = {};
- buf.sem_op = 1;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- });
-
- struct sembuf buf = {};
- buf.sem_op = -1;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- blocked.store(0);
-}
-
-// Makes a best effort attempt to ensure that operation would be timeout when
-// being blocked.
-TEST(SemaphoreTest, SemTimedOpBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct sembuf buf = {};
- buf.sem_op = -1;
- struct timespec timeout = {};
- timeout.tv_nsec = 5e7;
- // semtimedop reaches the time limit, it fails with errno EAGAIN.
- ASSERT_THAT(RetryEINTR(semtimedop)(sem.get(), &buf, 1, &timeout),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Tests that IPC_NOWAIT returns with no wait.
-TEST(SemaphoreTest, SemOpNoBlock) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct sembuf buf = {};
- buf.sem_flg = IPC_NOWAIT;
-
- buf.sem_op = -1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallFailsWithErrno(EAGAIN));
-
- buf.sem_op = 1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
-
- buf.sem_op = 0;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallFailsWithErrno(EAGAIN));
-}
-
-// Test runs 2 threads, one signals the other waits the same number of times.
-TEST(SemaphoreTest, SemOpSimple) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- constexpr size_t kLoops = 100;
- ScopedThread th([&sem] {
- struct sembuf buf = {};
- buf.sem_op = 1;
- for (size_t i = 0; i < kLoops; i++) {
- // Sleep to prevent making all increments in one shot without letting
- // the waiter wait.
- absl::SleepFor(absl::Milliseconds(1));
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
- }
- });
-
- struct sembuf buf = {};
- buf.sem_op = -1;
- for (size_t i = 0; i < kLoops; i++) {
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- }
-}
-
-// Tests that semaphore can be removed while there are waiters.
-// NoRandomSave: Test relies on timing that random save throws off.
-TEST(SemaphoreTest, SemOpRemoveWithWaiter) {
- AutoSem sem(semget(IPC_PRIVATE, 2, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- ScopedThread th([&sem] {
- absl::SleepFor(absl::Milliseconds(250));
- ASSERT_THAT(semctl(sem.release(), 0, IPC_RMID), SyscallSucceeds());
- });
-
- // This must happen before IPC_RMID runs above. Otherwise it fails with EINVAL
- // instead because the semaphore has already been removed.
- struct sembuf buf = {};
- buf.sem_op = -1;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1),
- SyscallFailsWithErrno(EIDRM));
-}
-
-// Semaphore isn't fair. It will execute any waiter that can satisfy the
-// request even if it gets in front of other waiters.
-TEST(SemaphoreTest, SemOpBestFitExecution) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- ScopedThread th([&sem] {
- struct sembuf buf = {};
- buf.sem_op = -2;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallFails());
- // Ensure that wait will only unblock when the semaphore is removed. On
- // EINTR retry it may race with deletion and return EINVAL.
- ASSERT_TRUE(errno == EIDRM || errno == EINVAL) << "errno=" << errno;
- });
-
- // Ensures that '-1' below will unblock even though '-10' above is waiting
- // for the same semaphore.
- for (size_t i = 0; i < 10; ++i) {
- struct sembuf buf = {};
- buf.sem_op = 1;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
-
- absl::SleepFor(absl::Milliseconds(10));
-
- buf.sem_op = -1;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- }
-
- ASSERT_THAT(semctl(sem.release(), 0, IPC_RMID), SyscallSucceeds());
-}
-
-// Executes random operations in multiple threads and verify correctness.
-TEST(SemaphoreTest, SemOpRandom) {
- // Don't do cooperative S/R tests because there are too many syscalls in
- // this test,
- const DisableSave ds;
-
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- // Protects the seed below.
- absl::Mutex mutex;
- uint32_t seed = time(nullptr);
-
- int count = 0; // Tracks semaphore value.
- bool done = false; // Tells waiters to stop after signal threads are done.
-
- // These threads will wait in a loop.
- std::unique_ptr<ScopedThread> decs[5];
- for (auto& dec : decs) {
- dec = absl::make_unique<ScopedThread>([&sem, &mutex, &count, &seed, &done] {
- for (size_t i = 0; i < 500; ++i) {
- int16_t val;
- {
- absl::MutexLock l(&mutex);
- if (done) {
- return;
- }
- val = (rand_r(&seed) % 10 + 1); // Rand between 1 and 10.
- count -= val;
- }
- struct sembuf buf = {};
- buf.sem_op = -val;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- absl::SleepFor(absl::Milliseconds(val * 2));
- }
- });
- }
-
- // These threads will wait for zero in a loop.
- std::unique_ptr<ScopedThread> zeros[5];
- for (auto& zero : zeros) {
- zero = absl::make_unique<ScopedThread>([&sem, &mutex, &done] {
- for (size_t i = 0; i < 500; ++i) {
- {
- absl::MutexLock l(&mutex);
- if (done) {
- return;
- }
- }
- struct sembuf buf = {};
- buf.sem_op = 0;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- absl::SleepFor(absl::Milliseconds(10));
- }
- });
- }
-
- // These threads will signal in a loop.
- std::unique_ptr<ScopedThread> incs[5];
- for (auto& inc : incs) {
- inc = absl::make_unique<ScopedThread>([&sem, &mutex, &count, &seed] {
- for (size_t i = 0; i < 500; ++i) {
- int16_t val;
- {
- absl::MutexLock l(&mutex);
- val = (rand_r(&seed) % 10 + 1); // Rand between 1 and 10.
- count += val;
- }
- struct sembuf buf = {};
- buf.sem_op = val;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
- absl::SleepFor(absl::Milliseconds(val * 2));
- }
- });
- }
-
- // First wait for signal threads to be done.
- for (auto& inc : incs) {
- inc->Join();
- }
-
- // Now there could be waiters blocked (remember operations are random).
- // Notify waiters that we're done and signal semaphore just the right amount.
- {
- absl::MutexLock l(&mutex);
- done = true;
- struct sembuf buf = {};
- buf.sem_op = -count;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
- }
-
- // Now all waiters should unblock and exit.
- for (auto& dec : decs) {
- dec->Join();
- }
- for (auto& zero : zeros) {
- zero->Join();
- }
-}
-
-TEST(SemaphoreTest, SemOpNamespace) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- AutoSem sem(semget(123, 1, 0600 | IPC_CREAT | IPC_EXCL));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- ScopedThread([]() {
- EXPECT_THAT(unshare(CLONE_NEWIPC), SyscallSucceeds());
- AutoSem sem(semget(123, 1, 0600 | IPC_CREAT | IPC_EXCL));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- });
-}
-
-TEST(SemaphoreTest, SemCtlVal) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- // Semaphore must start with 0.
- EXPECT_THAT(semctl(sem.get(), 0, GETVAL), SyscallSucceedsWithValue(0));
-
- // Increase value and ensure waiters are woken up.
- ScopedThread th([&sem] {
- struct sembuf buf = {};
- buf.sem_op = -10;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- });
-
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 9), SyscallSucceeds());
- EXPECT_THAT(semctl(sem.get(), 0, GETVAL), SyscallSucceedsWithValue(9));
-
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 20), SyscallSucceeds());
- const int value = semctl(sem.get(), 0, GETVAL);
- // 10 or 20 because it could have raced with waiter above.
- EXPECT_TRUE(value == 10 || value == 20) << "value=" << value;
- th.Join();
-
- // Set it back to 0 and ensure that waiters are woken up.
- ScopedThread thZero([&sem] {
- struct sembuf buf = {};
- buf.sem_op = 0;
- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());
- });
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 0), SyscallSucceeds());
- EXPECT_THAT(semctl(sem.get(), 0, GETVAL), SyscallSucceedsWithValue(0));
- thZero.Join();
-}
-
-TEST(SemaphoreTest, SemCtlValAll) {
- AutoSem sem(semget(IPC_PRIVATE, 3, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- // Semaphores must start with 0.
- uint16_t get[3] = {10, 10, 10};
- EXPECT_THAT(semctl(sem.get(), 1, GETALL, get), SyscallSucceedsWithValue(0));
- for (auto v : get) {
- EXPECT_EQ(v, 0);
- }
-
- // SetAll and check that they were set.
- uint16_t vals[3] = {0, 10, 20};
- EXPECT_THAT(semctl(sem.get(), 1, SETALL, vals), SyscallSucceedsWithValue(0));
- EXPECT_THAT(semctl(sem.get(), 1, GETALL, get), SyscallSucceedsWithValue(0));
- for (size_t i = 0; i < ABSL_ARRAYSIZE(vals); ++i) {
- EXPECT_EQ(get[i], vals[i]);
- }
-
- EXPECT_THAT(semctl(sem.get(), 1, SETALL, nullptr),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(SemaphoreTest, SemCtlGetPid) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 1), SyscallSucceeds());
- EXPECT_THAT(semctl(sem.get(), 0, GETPID), SyscallSucceedsWithValue(getpid()));
-}
-
-TEST(SemaphoreTest, SemCtlGetPidFork) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- const pid_t child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(semctl(sem.get(), 0, SETVAL, 1) == 0);
- TEST_PCHECK(semctl(sem.get(), 0, GETPID) == getpid());
-
- _exit(0);
- }
- ASSERT_THAT(child_pid, SyscallSucceeds());
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << " status " << status;
-}
-
-TEST(SemaphoreTest, SemIpcSet) {
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
-
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct semid_ds semid = {};
- semid.sem_perm.uid = getuid();
- semid.sem_perm.gid = getgid();
-
- // Make semaphore readonly and check that signal fails.
- semid.sem_perm.mode = 0400;
- EXPECT_THAT(semctl(sem.get(), 0, IPC_SET, &semid), SyscallSucceeds());
- struct sembuf buf = {};
- buf.sem_op = 1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallFailsWithErrno(EACCES));
-
- // Make semaphore writeonly and check that wait for zero fails.
- semid.sem_perm.mode = 0200;
- EXPECT_THAT(semctl(sem.get(), 0, IPC_SET, &semid), SyscallSucceeds());
- buf.sem_op = 0;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallFailsWithErrno(EACCES));
-}
-
-TEST(SemaphoreTest, SemCtlIpcStat) {
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
- const uid_t kUid = getuid();
- const gid_t kGid = getgid();
- time_t start_time = time(nullptr);
-
- AutoSem sem(semget(IPC_PRIVATE, 10, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- struct semid_ds ds;
- EXPECT_THAT(semctl(sem.get(), 0, IPC_STAT, &ds), SyscallSucceeds());
-
- EXPECT_EQ(ds.sem_perm.__key, IPC_PRIVATE);
- EXPECT_EQ(ds.sem_perm.uid, kUid);
- EXPECT_EQ(ds.sem_perm.gid, kGid);
- EXPECT_EQ(ds.sem_perm.cuid, kUid);
- EXPECT_EQ(ds.sem_perm.cgid, kGid);
- EXPECT_EQ(ds.sem_perm.mode, 0600);
- // Last semop time is not set on creation.
- EXPECT_EQ(ds.sem_otime, 0);
- EXPECT_GE(ds.sem_ctime, start_time);
- EXPECT_EQ(ds.sem_nsems, 10);
-
- // The timestamps only have a resolution of seconds; slow down so we actually
- // see the timestamps change.
- absl::SleepFor(absl::Seconds(1));
-
- // Set semid_ds structure of the set.
- auto last_ctime = ds.sem_ctime;
- start_time = time(nullptr);
- struct semid_ds semid_to_set = {};
- semid_to_set.sem_perm.uid = kUid;
- semid_to_set.sem_perm.gid = kGid;
- semid_to_set.sem_perm.mode = 0666;
- ASSERT_THAT(semctl(sem.get(), 0, IPC_SET, &semid_to_set), SyscallSucceeds());
- struct sembuf buf = {};
- buf.sem_op = 1;
- ASSERT_THAT(semop(sem.get(), &buf, 1), SyscallSucceeds());
-
- EXPECT_THAT(semctl(sem.get(), 0, IPC_STAT, &ds), SyscallSucceeds());
- EXPECT_EQ(ds.sem_perm.mode, 0666);
- EXPECT_GE(ds.sem_otime, start_time);
- EXPECT_GT(ds.sem_ctime, last_ctime);
-
- // An invalid semid fails the syscall with errno EINVAL.
- EXPECT_THAT(semctl(sem.get() + 1, 0, IPC_STAT, &ds),
- SyscallFailsWithErrno(EINVAL));
-
- // Make semaphore not readable and check the signal fails.
- semid_to_set.sem_perm.mode = 0200;
- ASSERT_THAT(semctl(sem.get(), 0, IPC_SET, &semid_to_set), SyscallSucceeds());
- EXPECT_THAT(semctl(sem.get(), 0, IPC_STAT, &ds),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Calls semctl(semid, 0, cmd) until the returned value is >= target, an
-// internal timeout expires, or semctl returns an error.
-PosixErrorOr<int> WaitSemctl(int semid, int target, int cmd) {
- constexpr absl::Duration timeout = absl::Seconds(10);
- const auto deadline = absl::Now() + timeout;
- int semcnt = 0;
- while (absl::Now() < deadline) {
- semcnt = semctl(semid, 0, cmd);
- if (semcnt < 0) {
- return PosixError(errno, "semctl(GETZCNT) failed");
- }
- if (semcnt >= target) {
- break;
- }
- absl::SleepFor(absl::Milliseconds(10));
- }
- return semcnt;
-}
-
-TEST(SemaphoreTest, SemopGetzcnt) {
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
- // Create a write only semaphore set.
- AutoSem sem(semget(IPC_PRIVATE, 1, 0200 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- // No read permission to retrieve semzcnt.
- EXPECT_THAT(semctl(sem.get(), 0, GETZCNT), SyscallFailsWithErrno(EACCES));
-
- // Remove the calling thread's read permission.
- struct semid_ds ds = {};
- ds.sem_perm.uid = getuid();
- ds.sem_perm.gid = getgid();
- ds.sem_perm.mode = 0600;
- ASSERT_THAT(semctl(sem.get(), 0, IPC_SET, &ds), SyscallSucceeds());
-
- std::vector<pid_t> children;
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 1), SyscallSucceeds());
-
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = 0;
- constexpr size_t kLoops = 10;
- for (size_t i = 0; i < kLoops; i++) {
- auto child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(RetryEINTR(semop)(sem.get(), &buf, 1) == 0);
- _exit(0);
- }
- children.push_back(child_pid);
- }
-
- EXPECT_THAT(WaitSemctl(sem.get(), kLoops, GETZCNT),
- IsPosixErrorOkAndHolds(kLoops));
- // Set semval to 0, which wakes up children that sleep on the semop.
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 0), SyscallSucceeds());
- for (const auto& child_pid : children) {
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- }
- EXPECT_EQ(semctl(sem.get(), 0, GETZCNT), 0);
-}
-
-TEST(SemaphoreTest, SemopGetzcntOnSetRemoval) {
- auto semid = semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT);
- ASSERT_THAT(semid, SyscallSucceeds());
- ASSERT_THAT(semctl(semid, 0, SETVAL, 1), SyscallSucceeds());
- ASSERT_EQ(semctl(semid, 0, GETZCNT), 0);
-
- auto child_pid = fork();
- if (child_pid == 0) {
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = 0;
-
- // Ensure that wait will only unblock when the semaphore is removed. On
- // EINTR retry it may race with deletion and return EINVAL.
- TEST_PCHECK(RetryEINTR(semop)(semid, &buf, 1) < 0 &&
- (errno == EIDRM || errno == EINVAL));
- _exit(0);
- }
-
- EXPECT_THAT(WaitSemctl(semid, 1, GETZCNT), IsPosixErrorOkAndHolds(1));
- // Remove the semaphore set, which fails the sleep semop.
- ASSERT_THAT(semctl(semid, 0, IPC_RMID), SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- EXPECT_THAT(semctl(semid, 0, GETZCNT), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SemaphoreTest, SemopGetzcntOnSignal) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, 1), SyscallSucceeds());
- ASSERT_EQ(semctl(sem.get(), 0, GETZCNT), 0);
-
- // Saving will cause semop() to be spuriously interrupted.
- DisableSave ds;
-
- auto child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(signal(SIGHUP, [](int sig) -> void {}) != SIG_ERR);
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = 0;
-
- TEST_PCHECK(semop(sem.get(), &buf, 1) < 0 && errno == EINTR);
- _exit(0);
- }
-
- EXPECT_THAT(WaitSemctl(sem.get(), 1, GETZCNT), IsPosixErrorOkAndHolds(1));
- // Send a signal to the child, which fails the sleep semop.
- ASSERT_EQ(kill(child_pid, SIGHUP), 0);
-
- ds.reset();
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- EXPECT_EQ(semctl(sem.get(), 0, GETZCNT), 0);
-}
-
-TEST(SemaphoreTest, SemopGetncnt) {
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
- // Create a write only semaphore set.
- AutoSem sem(semget(IPC_PRIVATE, 1, 0200 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
-
- // No read permission to retrieve semzcnt.
- EXPECT_THAT(semctl(sem.get(), 0, GETNCNT), SyscallFailsWithErrno(EACCES));
-
- // Remove the calling thread's read permission.
- struct semid_ds ds = {};
- ds.sem_perm.uid = getuid();
- ds.sem_perm.gid = getgid();
- ds.sem_perm.mode = 0600;
- ASSERT_THAT(semctl(sem.get(), 0, IPC_SET, &ds), SyscallSucceeds());
-
- std::vector<pid_t> children;
-
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = -1;
- constexpr size_t kLoops = 10;
- for (size_t i = 0; i < kLoops; i++) {
- auto child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(RetryEINTR(semop)(sem.get(), &buf, 1) == 0);
- _exit(0);
- }
- children.push_back(child_pid);
- }
- EXPECT_THAT(WaitSemctl(sem.get(), kLoops, GETNCNT),
- IsPosixErrorOkAndHolds(kLoops));
- // Set semval to 1, which wakes up children that sleep on the semop.
- ASSERT_THAT(semctl(sem.get(), 0, SETVAL, kLoops), SyscallSucceeds());
- for (const auto& child_pid : children) {
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- }
- EXPECT_EQ(semctl(sem.get(), 0, GETNCNT), 0);
-}
-
-TEST(SemaphoreTest, SemopGetncntOnSetRemoval) {
- auto semid = semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT);
- ASSERT_THAT(semid, SyscallSucceeds());
- ASSERT_EQ(semctl(semid, 0, GETNCNT), 0);
-
- auto child_pid = fork();
- if (child_pid == 0) {
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = -1;
-
- // Ensure that wait will only unblock when the semaphore is removed. On
- // EINTR retry it may race with deletion and return EINVAL
- TEST_PCHECK(RetryEINTR(semop)(semid, &buf, 1) < 0 &&
- (errno == EIDRM || errno == EINVAL));
- _exit(0);
- }
-
- EXPECT_THAT(WaitSemctl(semid, 1, GETNCNT), IsPosixErrorOkAndHolds(1));
- // Remove the semaphore set, which fails the sleep semop.
- ASSERT_THAT(semctl(semid, 0, IPC_RMID), SyscallSucceeds());
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- EXPECT_THAT(semctl(semid, 0, GETNCNT), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SemaphoreTest, SemopGetncntOnSignal) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- ASSERT_EQ(semctl(sem.get(), 0, GETNCNT), 0);
-
- // Saving will cause semop() to be spuriously interrupted.
- DisableSave ds;
-
- auto child_pid = fork();
- if (child_pid == 0) {
- TEST_PCHECK(signal(SIGHUP, [](int sig) -> void {}) != SIG_ERR);
- struct sembuf buf = {};
- buf.sem_num = 0;
- buf.sem_op = -1;
-
- TEST_PCHECK(semop(sem.get(), &buf, 1) < 0 && errno == EINTR);
- _exit(0);
- }
- EXPECT_THAT(WaitSemctl(sem.get(), 1, GETNCNT), IsPosixErrorOkAndHolds(1));
- // Send a signal to the child, which fails the sleep semop.
- ASSERT_EQ(kill(child_pid, SIGHUP), 0);
-
- ds.reset();
-
- int status;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
- EXPECT_EQ(semctl(sem.get(), 0, GETNCNT), 0);
-}
-
-#ifndef SEM_STAT_ANY
-#define SEM_STAT_ANY 20
-#endif // SEM_STAT_ANY
-
-TEST(SemaphoreTest, IpcInfo) {
- constexpr int kLoops = 5;
- std::set<int> sem_ids;
- struct seminfo info;
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
- for (int i = 0; i < kLoops; i++) {
- AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- sem_ids.insert(sem.release());
- }
- ASSERT_EQ(sem_ids.size(), kLoops);
-
- int max_used_index = 0;
- EXPECT_THAT(max_used_index = semctl(0, 0, IPC_INFO, &info),
- SyscallSucceeds());
-
- std::set<int> sem_ids_before_max_index;
- for (int i = 0; i <= max_used_index; i++) {
- struct semid_ds ds = {};
- int sem_id = semctl(i, 0, SEM_STAT, &ds);
- // Only if index i is used within the registry.
- if (sem_ids.find(sem_id) != sem_ids.end()) {
- struct semid_ds ipc_stat_ds;
- ASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());
- EXPECT_TRUE(ds == ipc_stat_ds);
-
- // Remove the semaphore set's read permission.
- struct semid_ds ipc_set_ds;
- ipc_set_ds.sem_perm.uid = getuid();
- ipc_set_ds.sem_perm.gid = getgid();
- // Keep the semaphore set's write permission so that it could be removed.
- ipc_set_ds.sem_perm.mode = 0200;
- // IPC_SET command here updates sem_ctime member of the sem.
- ASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());
- ASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));
- int val = semctl(i, 0, SEM_STAT_ANY, &ds);
- if (val == -1) {
- // Only if the kernel doesn't support the command SEM_STAT_ANY.
- EXPECT_TRUE(errno == EINVAL || errno == EFAULT);
- } else {
- EXPECT_EQ(sem_id, val);
- EXPECT_LE(ipc_stat_ds.sem_ctime, ds.sem_ctime);
- ipc_stat_ds.sem_ctime = 0;
- ipc_stat_ds.sem_perm.mode = 0200;
- ds.sem_ctime = 0;
- EXPECT_TRUE(ipc_stat_ds == ds);
- }
- sem_ids_before_max_index.insert(sem_id);
- }
- }
- EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);
- for (const int sem_id : sem_ids) {
- ASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());
- }
-
- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceeds());
- EXPECT_EQ(info.semmap, kSemMap);
- EXPECT_EQ(info.semmni, kSemMni);
- EXPECT_EQ(info.semmns, kSemMns);
- EXPECT_EQ(info.semmnu, kSemMnu);
- EXPECT_EQ(info.semmsl, kSemMsl);
- EXPECT_EQ(info.semopm, kSemOpm);
- EXPECT_EQ(info.semume, kSemUme);
- EXPECT_EQ(info.semusz, kSemUsz);
- EXPECT_EQ(info.semvmx, kSemVmx);
- EXPECT_EQ(info.semaem, kSemAem);
-}
-
-TEST(SemaphoreTest, SemInfo) {
- constexpr int kLoops = 5;
- constexpr int kSemSetSize = 3;
- std::set<int> sem_ids;
- struct seminfo info;
- // Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
- AutoCapability cap(CAP_IPC_OWNER, false);
- for (int i = 0; i < kLoops; i++) {
- AutoSem sem(semget(IPC_PRIVATE, kSemSetSize, 0600 | IPC_CREAT));
- ASSERT_THAT(sem.get(), SyscallSucceeds());
- sem_ids.insert(sem.release());
- }
- ASSERT_EQ(sem_ids.size(), kLoops);
- int max_used_index = 0;
- EXPECT_THAT(max_used_index = semctl(0, 0, SEM_INFO, &info),
- SyscallSucceeds());
- EXPECT_EQ(info.semmap, kSemMap);
- EXPECT_EQ(info.semmni, kSemMni);
- EXPECT_EQ(info.semmns, kSemMns);
- EXPECT_EQ(info.semmnu, kSemMnu);
- EXPECT_EQ(info.semmsl, kSemMsl);
- EXPECT_EQ(info.semopm, kSemOpm);
- EXPECT_EQ(info.semume, kSemUme);
- // There could be semaphores existing in the system during the test, which
- // prevents the test from getting a exact number, but the test could expect at
- // least the number of sempahroes it creates in the begining of the test.
- EXPECT_GE(info.semusz, sem_ids.size());
- EXPECT_EQ(info.semvmx, kSemVmx);
- EXPECT_GE(info.semaem, sem_ids.size() * kSemSetSize);
-
- std::set<int> sem_ids_before_max_index;
- for (int i = 0; i <= max_used_index; i++) {
- struct semid_ds ds = {};
- int sem_id = semctl(i, 0, SEM_STAT, &ds);
- // Only if index i is used within the registry.
- if (sem_ids.find(sem_id) != sem_ids.end()) {
- struct semid_ds ipc_stat_ds;
- ASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());
- EXPECT_TRUE(ds == ipc_stat_ds);
-
- // Remove the semaphore set's read permission.
- struct semid_ds ipc_set_ds;
- ipc_set_ds.sem_perm.uid = getuid();
- ipc_set_ds.sem_perm.gid = getgid();
- // Keep the semaphore set's write permission so that it could be removed.
- ipc_set_ds.sem_perm.mode = 0200;
- // IPC_SET command here updates sem_ctime member of the sem.
- ASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());
- ASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));
- int val = semctl(i, 0, SEM_STAT_ANY, &ds);
-
- if (val == -1) {
- // Only if the kernel doesn't support the command SEM_STAT_ANY.
- EXPECT_TRUE(errno == EINVAL || errno == EFAULT);
- } else {
- EXPECT_EQ(val, sem_id);
- EXPECT_LE(ipc_stat_ds.sem_ctime, ds.sem_ctime);
- ipc_stat_ds.sem_ctime = 0;
- ipc_stat_ds.sem_perm.mode = 0200;
- ds.sem_ctime = 0;
- EXPECT_TRUE(ipc_stat_ds == ds);
- }
- sem_ids_before_max_index.insert(sem_id);
- }
- }
- EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);
- for (const int sem_id : sem_ids) {
- ASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());
- }
-
- ASSERT_THAT(semctl(0, 0, SEM_INFO, &info), SyscallSucceeds());
- EXPECT_EQ(info.semmap, kSemMap);
- EXPECT_EQ(info.semmni, kSemMni);
- EXPECT_EQ(info.semmns, kSemMns);
- EXPECT_EQ(info.semmnu, kSemMnu);
- EXPECT_EQ(info.semmsl, kSemMsl);
- EXPECT_EQ(info.semopm, kSemOpm);
- EXPECT_EQ(info.semume, kSemUme);
- // Apart from semapahores that are not created by the test, we can't determine
- // the exact number of semaphore sets and semaphores, as a result, semusz and
- // semaem range from 0 to a random number. Since the numbers are always
- // non-negative, the test will not check the reslts of semusz and semaem.
- EXPECT_EQ(info.semvmx, kSemVmx);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sendfile.cc b/test/syscalls/linux/sendfile.cc
deleted file mode 100644
index bea4ee71c..000000000
--- a/test/syscalls/linux/sendfile.cc
+++ /dev/null
@@ -1,729 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/unistd.h>
-#include <sys/eventfd.h>
-#include <sys/sendfile.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/eventfd_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/signal_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SendFileTest, SendZeroBytes) {
- // Create temp files.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(SendFileTest, InvalidOffset) {
- // Create temp files.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- off_t offset = -1;
- EXPECT_THAT(sendfile(outf.get(), inf.get(), &offset, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-int memfd_create(const std::string& name, unsigned int flags) {
- return syscall(__NR_memfd_create, name.c_str(), flags);
-}
-
-TEST(SendFileTest, Overflow) {
- // Create input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file.
- int fd;
- EXPECT_THAT(fd = memfd_create("overflow", 0), SyscallSucceeds());
- const FileDescriptor outf(fd);
-
- // out_offset + kSize overflows INT64_MAX.
- loff_t out_offset = 0x7ffffffffffffffeull;
- constexpr int kSize = 3;
- EXPECT_THAT(sendfile(outf.get(), inf.get(), &out_offset, kSize),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SendFileTest, SendTrivially) {
- // Create temp files.
- constexpr char kData[] = "To be, or not to be, that is the question:";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kDataSize),
- SyscallSucceedsWithValue(kDataSize));
-
- // Close outf to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kDataSize));
- EXPECT_EQ(kData, absl::string_view(actual, bytes_sent));
-}
-
-TEST(SendFileTest, SendTriviallyWithBothFilesReadWrite) {
- // Create temp files.
- constexpr char kData[] = "Whether 'tis nobler in the mind to suffer";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as readwrite.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // Open the output file as readwrite.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDWR));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kDataSize),
- SyscallSucceedsWithValue(kDataSize));
-
- // Close outf to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kDataSize));
- EXPECT_EQ(kData, absl::string_view(actual, bytes_sent));
-}
-
-TEST(SendFileTest, SendAndUpdateFileOffset) {
- // Create temp files.
- // Test input string length must be > 2 AND even.
- constexpr char kData[] = "The slings and arrows of outrageous fortune,";
- constexpr int kDataSize = sizeof(kData) - 1;
- constexpr int kHalfDataSize = kDataSize / 2;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(
- bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kHalfDataSize),
- SyscallSucceedsWithValue(kHalfDataSize));
-
- // Close outf to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kHalfDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(absl::string_view(kData, kHalfDataSize),
- absl::string_view(actual, bytes_sent));
-
- // Verify that the input file offset has been updated.
- ASSERT_THAT(read(inf.get(), &actual, kDataSize - bytes_sent),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(
- absl::string_view(kData + kDataSize - bytes_sent, kDataSize - bytes_sent),
- absl::string_view(actual, kHalfDataSize));
-}
-
-TEST(SendFileTest, SendToDevZeroAndUpdateFileOffset) {
- // Create temp files.
- // Test input string length must be > 2 AND even.
- constexpr char kData[] = "The slings and arrows of outrageous fortune,";
- constexpr int kDataSize = sizeof(kData) - 1;
- constexpr int kHalfDataSize = kDataSize / 2;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open /dev/zero as write only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(
- bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kHalfDataSize),
- SyscallSucceedsWithValue(kHalfDataSize));
-
- char actual[kHalfDataSize];
- // Verify that the input file offset has been updated.
- ASSERT_THAT(read(inf.get(), &actual, kDataSize - bytes_sent),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(
- absl::string_view(kData + kDataSize - bytes_sent, kDataSize - bytes_sent),
- absl::string_view(actual, kHalfDataSize));
-}
-
-TEST(SendFileTest, SendAndUpdateFileOffsetFromNonzeroStartingPoint) {
- // Create temp files.
- // Test input string length must be > 2 AND divisible by 4.
- constexpr char kData[] = "The slings and arrows of outrageous fortune,";
- constexpr int kDataSize = sizeof(kData) - 1;
- constexpr int kHalfDataSize = kDataSize / 2;
- constexpr int kQuarterDataSize = kHalfDataSize / 2;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Read a quarter of the data from the infile which should update the file
- // offset, we don't actually care about the data so it goes into the garbage.
- char garbage[kQuarterDataSize];
- ASSERT_THAT(read(inf.get(), &garbage, kQuarterDataSize),
- SyscallSucceedsWithValue(kQuarterDataSize));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(
- bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kHalfDataSize),
- SyscallSucceedsWithValue(kHalfDataSize));
-
- // Close out_fd to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kHalfDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(absl::string_view(kData + kQuarterDataSize, kHalfDataSize),
- absl::string_view(actual, bytes_sent));
-
- // Verify that the input file offset has been updated.
- ASSERT_THAT(read(inf.get(), &actual, kQuarterDataSize),
- SyscallSucceedsWithValue(kQuarterDataSize));
-
- EXPECT_EQ(
- absl::string_view(kData + kDataSize - kQuarterDataSize, kQuarterDataSize),
- absl::string_view(actual, kQuarterDataSize));
-}
-
-TEST(SendFileTest, SendAndUpdateGivenOffset) {
- // Create temp files.
- // Test input string length must be >= 4 AND divisible by 4.
- constexpr char kData[] = "Or to take Arms against a Sea of troubles,";
- constexpr int kDataSize = sizeof(kData) + 1;
- constexpr int kHalfDataSize = kDataSize / 2;
- constexpr int kQuarterDataSize = kHalfDataSize / 2;
- constexpr int kThreeFourthsDataSize = 3 * kDataSize / 4;
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Create offset for sending.
- off_t offset = kQuarterDataSize;
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(
- bytes_sent = sendfile(outf.get(), inf.get(), &offset, kHalfDataSize),
- SyscallSucceedsWithValue(kHalfDataSize));
-
- // Close out_fd to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kHalfDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(absl::string_view(kData + kQuarterDataSize, kHalfDataSize),
- absl::string_view(actual, bytes_sent));
-
- // Verify that the input file offset has NOT been updated.
- ASSERT_THAT(read(inf.get(), &actual, kHalfDataSize),
- SyscallSucceedsWithValue(kHalfDataSize));
- EXPECT_EQ(absl::string_view(kData, kHalfDataSize),
- absl::string_view(actual, kHalfDataSize));
-
- // Verify that the offset pointer has been updated.
- EXPECT_EQ(offset, kThreeFourthsDataSize);
-}
-
-TEST(SendFileTest, DoNotSendfileIfOutfileIsAppendOnly) {
- // Create temp files.
- constexpr char kData[] = "And by opposing end them: to die, to sleep";
- constexpr int kDataSize = sizeof(kData) - 1;
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as append only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY | O_APPEND));
-
- // Send data and verify that sendfile returns the correct errno.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SendFileTest, AppendCheckOrdering) {
- constexpr char kData[] = "And by opposing end them: to die, to sleep";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
-
- const FileDescriptor read =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
- const FileDescriptor write =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY));
- const FileDescriptor append =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_APPEND));
-
- // Check that read/write file mode is verified before append.
- EXPECT_THAT(sendfile(append.get(), read.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EBADF));
- EXPECT_THAT(sendfile(write.get(), write.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(SendFileTest, DoNotSendfileIfOutfileIsNotWritable) {
- // Create temp files.
- constexpr char kData[] = "No more; and by a sleep, to say we end";
- constexpr int kDataSize = sizeof(kData) - 1;
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as read only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Send data and verify that sendfile returns the correct errno.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(SendFileTest, DoNotSendfileIfInfileIsNotReadable) {
- // Create temp files.
- constexpr char kData[] = "the heart-ache, and the thousand natural shocks";
- constexpr int kDataSize = sizeof(kData) - 1;
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as write only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_WRONLY));
-
- // Open the output file as write only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct errno.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST(SendFileTest, DoNotSendANegativeNumberOfBytes) {
- // Create temp files.
- constexpr char kData[] = "that Flesh is heir to? 'Tis a consummation";
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct errno.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, -1),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SendFileTest, SendTheCorrectNumberOfBytesEvenIfWeTryToSendTooManyBytes) {
- // Create temp files.
- constexpr char kData[] = "devoutly to be wished. To die, to sleep,";
- constexpr int kDataSize = sizeof(kData) - 1;
-
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- FileDescriptor outf;
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Send data and verify that sendfile returns the correct value.
- int bytes_sent;
- EXPECT_THAT(
- bytes_sent = sendfile(outf.get(), inf.get(), nullptr, kDataSize + 100),
- SyscallSucceedsWithValue(kDataSize));
-
- // Close outf to avoid leak.
- outf.reset();
-
- // Open the output file as read only.
- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
-
- // Verify that the output file has the correct data.
- char actual[kDataSize];
- ASSERT_THAT(read(outf.get(), &actual, bytes_sent),
- SyscallSucceedsWithValue(kDataSize));
- EXPECT_EQ(kData, absl::string_view(actual, bytes_sent));
-}
-
-TEST(SendFileTest, SendToNotARegularFile) {
- // Make temp input directory and open as read only.
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY));
-
- // Make temp output file and open as write only.
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Receive an error since a directory is not a regular file.
- EXPECT_THAT(sendfile(outf.get(), inf.get(), nullptr, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SendFileTest, SendPipeWouldBlock) {
- // Create temp file.
- constexpr char kData[] =
- "The fool doth think he is wise, but the wise man knows himself to be a "
- "fool.";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Setup the output named pipe.
- int fds[2];
- ASSERT_THAT(pipe2(fds, O_NONBLOCK), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill up the pipe's buffer.
- int pipe_size = -1;
- ASSERT_THAT(pipe_size = fcntl(wfd.get(), F_GETPIPE_SZ), SyscallSucceeds());
- std::vector<char> buf(2 * pipe_size);
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(pipe_size));
-
- EXPECT_THAT(sendfile(wfd.get(), inf.get(), nullptr, kDataSize),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST(SendFileTest, SendPipeEOF) {
- // Create and open an empty input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Setup the output named pipe.
- int fds[2];
- ASSERT_THAT(pipe2(fds, O_NONBLOCK), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- EXPECT_THAT(sendfile(wfd.get(), inf.get(), nullptr, 123),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(SendFileTest, SendToFullPipeReturnsEAGAIN) {
- // Create and open an empty input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // Set up the output pipe.
- int fds[2];
- ASSERT_THAT(pipe2(fds, O_NONBLOCK), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- int pipe_size = -1;
- ASSERT_THAT(pipe_size = fcntl(wfd.get(), F_GETPIPE_SZ), SyscallSucceeds());
- int data_size = pipe_size * 8;
- ASSERT_THAT(ftruncate(in_fd.get(), data_size), SyscallSucceeds());
-
- ASSERT_THAT(sendfile(wfd.get(), in_fd.get(), 0, data_size),
- SyscallSucceeds());
- EXPECT_THAT(sendfile(wfd.get(), in_fd.get(), 0, data_size),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(SendFileTest, SendPipeBlocks) {
- // Create temp file.
- constexpr char kData[] =
- "The fault, dear Brutus, is not in our stars, but in ourselves.";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Setup the output named pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill up the pipe's buffer.
- int pipe_size = -1;
- ASSERT_THAT(pipe_size = fcntl(wfd.get(), F_GETPIPE_SZ), SyscallSucceeds());
- std::vector<char> buf(pipe_size);
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(pipe_size));
-
- ScopedThread t([&]() {
- absl::SleepFor(absl::Milliseconds(100));
- ASSERT_THAT(read(rfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(pipe_size));
- });
-
- EXPECT_THAT(sendfile(wfd.get(), inf.get(), nullptr, kDataSize),
- SyscallSucceedsWithValue(kDataSize));
-}
-
-TEST(SendFileTest, SendToSpecialFile) {
- // Create temp file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
-
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- constexpr int kSize = 0x7ff;
- ASSERT_THAT(ftruncate(inf.get(), kSize), SyscallSucceeds());
-
- auto eventfd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());
-
- // eventfd can accept a number of bytes which is a multiple of 8.
- EXPECT_THAT(sendfile(eventfd.get(), inf.get(), nullptr, 0xfffff),
- SyscallSucceedsWithValue(kSize & (~7)));
-}
-
-TEST(SendFileTest, SendFileToPipe) {
- // Create temp file.
- constexpr char kData[] = "<insert-quote-here>";
- constexpr int kDataSize = sizeof(kData) - 1;
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), kData, TempPath::kDefaultFileMode));
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Create a pipe for sending to a pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Expect to read up to the given size.
- std::vector<char> buf(kDataSize);
- ScopedThread t([&]() {
- absl::SleepFor(absl::Milliseconds(100));
- ASSERT_THAT(read(rfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kDataSize));
- });
-
- // Send with twice the size of the file, which should hit EOF.
- EXPECT_THAT(sendfile(wfd.get(), inf.get(), nullptr, kDataSize * 2),
- SyscallSucceedsWithValue(kDataSize));
-}
-
-TEST(SendFileTest, SendFileToSelf) {
- int rawfd;
- ASSERT_THAT(rawfd = memfd_create("memfd", 0), SyscallSucceeds());
- const FileDescriptor fd(rawfd);
-
- char c = 0x01;
- ASSERT_THAT(WriteFd(fd.get(), &c, 1), SyscallSucceedsWithValue(1));
-
- // Arbitrarily chosen to make sendfile() take long enough that the sentry
- // watchdog usually fires unless it's reset by sendfile() between iterations
- // of the buffered copy. See b/172076632.
- constexpr size_t kSendfileSize = 0xa00000;
-
- off_t offset = 0;
- ASSERT_THAT(sendfile(fd.get(), fd.get(), &offset, kSendfileSize),
- SyscallSucceedsWithValue(kSendfileSize));
-}
-
-static volatile int signaled = 0;
-void SigUsr1Handler(int sig, siginfo_t* info, void* context) { signaled = 1; }
-
-TEST(SendFileTest, ToEventFDDoesNotSpin) {
- FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));
-
- // Write the maximum value of an eventfd to a file.
- const uint64_t kMaxEventfdValue = 0xfffffffffffffffe;
- const auto tempfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto tempfd = ASSERT_NO_ERRNO_AND_VALUE(Open(tempfile.path(), O_RDWR));
- ASSERT_THAT(
- pwrite(tempfd.get(), &kMaxEventfdValue, sizeof(kMaxEventfdValue), 0),
- SyscallSucceedsWithValue(sizeof(kMaxEventfdValue)));
-
- // Set the eventfd's value to 1.
- const uint64_t kOne = 1;
- ASSERT_THAT(write(efd.get(), &kOne, sizeof(kOne)),
- SyscallSucceedsWithValue(sizeof(kOne)));
-
- // Set up signal handler.
- struct sigaction sa = {};
- sa.sa_sigaction = SigUsr1Handler;
- sa.sa_flags = SA_SIGINFO;
- const auto cleanup_sigact =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGUSR1, sa));
-
- // Send SIGUSR1 to this thread in 1 second.
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = SIGUSR1;
- sev.sigev_notify_thread_id = gettid();
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
- struct itimerspec its = {};
- its.it_value = absl::ToTimespec(absl::Seconds(1));
- DisableSave ds; // Asserting an EINTR.
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // Sendfile from tempfd to the eventfd. Since the eventfd is not already at
- // its maximum value, the eventfd is "ready for writing"; however, since the
- // eventfd's existing value plus the new value would exceed the maximum, the
- // write should internally fail with EWOULDBLOCK. In this case, sendfile()
- // should block instead of spinning, and eventually be interrupted by our
- // timer. See b/172075629.
- EXPECT_THAT(
- sendfile(efd.get(), tempfd.get(), nullptr, sizeof(kMaxEventfdValue)),
- SyscallFailsWithErrno(EINTR));
-
- // Signal should have been handled.
- EXPECT_EQ(signaled, 1);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sendfile_socket.cc b/test/syscalls/linux/sendfile_socket.cc
deleted file mode 100644
index c101fe9d2..000000000
--- a/test/syscalls/linux/sendfile_socket.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <sys/sendfile.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-class SendFileTest : public ::testing::TestWithParam<int> {
- protected:
- PosixErrorOr<std::unique_ptr<SocketPair>> Sockets(int type) {
- // Bind a server socket.
- int family = GetParam();
- switch (family) {
- case AF_INET: {
- if (type == SOCK_STREAM) {
- return SocketPairKind{
- "TCP", AF_INET, type, 0,
- TCPAcceptBindSocketPairCreator(AF_INET, type, 0, false)}
- .Create();
- } else {
- return SocketPairKind{
- "UDP", AF_INET, type, 0,
- UDPBidirectionalBindSocketPairCreator(AF_INET, type, 0, false)}
- .Create();
- }
- }
- case AF_UNIX: {
- if (type == SOCK_STREAM) {
- return SocketPairKind{
- "UNIX", AF_UNIX, type, 0,
- FilesystemAcceptBindSocketPairCreator(AF_UNIX, type, 0)}
- .Create();
- } else {
- return SocketPairKind{
- "UNIX", AF_UNIX, type, 0,
- FilesystemBidirectionalBindSocketPairCreator(AF_UNIX, type, 0)}
- .Create();
- }
- }
- default:
- return PosixError(EINVAL);
- }
- }
-};
-
-// Sends large file to exercise the path that read and writes data multiple
-// times, esp. when more data is read than can be written.
-TEST_P(SendFileTest, SendMultiple) {
- std::vector<char> data(5 * 1024 * 1024);
- RandomizeBuffer(data.data(), data.size());
-
- // Create temp files.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(data.data(), data.size()),
- TempPath::kDefaultFileMode));
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Create sockets.
- auto socks = ASSERT_NO_ERRNO_AND_VALUE(Sockets(SOCK_STREAM));
-
- // Thread that reads data from socket and dumps to a file.
- ScopedThread th([&] {
- FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Read until socket is closed.
- char buf[10240];
- for (int cnt = 0;; cnt++) {
- int r = RetryEINTR(read)(socks->first_fd(), buf, sizeof(buf));
- // We cannot afford to save on every read() call.
- if (cnt % 1000 == 0) {
- ASSERT_THAT(r, SyscallSucceeds());
- } else {
- const DisableSave ds;
- ASSERT_THAT(r, SyscallSucceeds());
- }
- if (r == 0) {
- // EOF
- break;
- }
- int w = RetryEINTR(write)(outf.get(), buf, r);
- // We cannot afford to save on every write() call.
- if (cnt % 1010 == 0) {
- ASSERT_THAT(w, SyscallSucceedsWithValue(r));
- } else {
- const DisableSave ds;
- ASSERT_THAT(w, SyscallSucceedsWithValue(r));
- }
- }
- });
-
- // Open the input file as read only.
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- int cnt = 0;
- for (size_t sent = 0; sent < data.size(); cnt++) {
- const size_t remain = data.size() - sent;
- std::cout << "sendfile, size=" << data.size() << ", sent=" << sent
- << ", remain=" << remain << std::endl;
-
- // Send data and verify that sendfile returns the correct value.
- int res = sendfile(socks->second_fd(), inf.get(), nullptr, remain);
- // We cannot afford to save on every sendfile() call.
- if (cnt % 120 == 0) {
- MaybeSave();
- }
- if (res == 0) {
- // EOF
- break;
- }
- if (res > 0) {
- sent += res;
- } else {
- ASSERT_TRUE(errno == EINTR || errno == EAGAIN) << "errno=" << errno;
- }
- }
-
- // Close socket to stop thread.
- close(socks->release_second_fd());
- th.Join();
-
- // Verify that the output file has the correct data.
- const FileDescriptor outf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));
- std::vector<char> actual(data.size(), '\0');
- ASSERT_THAT(RetryEINTR(read)(outf.get(), actual.data(), actual.size()),
- SyscallSucceedsWithValue(actual.size()));
- ASSERT_EQ(memcmp(data.data(), actual.data(), data.size()), 0);
-}
-
-TEST_P(SendFileTest, Shutdown) {
- // Create a socket.
- auto socks = ASSERT_NO_ERRNO_AND_VALUE(Sockets(SOCK_STREAM));
-
- // If this is a TCP socket, then turn off linger.
- if (GetParam() == AF_INET) {
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 0;
- ASSERT_THAT(
- setsockopt(socks->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
- }
-
- // Create a 1m file with random data.
- std::vector<char> data(1024 * 1024);
- RandomizeBuffer(data.data(), data.size());
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(data.data(), data.size()),
- TempPath::kDefaultFileMode));
- const FileDescriptor inf =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Read some data, then shutdown the socket. We don't actually care about
- // checking the contents (other tests do that), so we just re-use the same
- // buffer as above.
- ScopedThread t([&]() {
- size_t done = 0;
- while (done < data.size()) {
- int n = RetryEINTR(read)(socks->first_fd(), data.data(), data.size());
- ASSERT_THAT(n, SyscallSucceeds());
- done += n;
- }
- // Close the server side socket.
- close(socks->release_first_fd());
- });
-
- // Continuously stream from the file to the socket. Note we do not assert
- // that a specific amount of data has been written at any time, just that some
- // data is written. Eventually, we should get a connection reset error.
- while (1) {
- off_t offset = 0; // Always read from the start.
- int n = sendfile(socks->second_fd(), inf.get(), &offset, data.size());
- EXPECT_THAT(n, AnyOf(SyscallFailsWithErrno(ECONNRESET),
- SyscallFailsWithErrno(EPIPE), SyscallSucceeds()));
- if (n <= 0) {
- break;
- }
- }
-}
-
-TEST_P(SendFileTest, SendpageFromEmptyFileToUDP) {
- auto socks = ASSERT_NO_ERRNO_AND_VALUE(Sockets(SOCK_DGRAM));
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
-
- // The value to the count argument has to be so that it is impossible to
- // allocate a buffer of this size. In Linux, sendfile transfer at most
- // 0x7ffff000 (MAX_RW_COUNT) bytes.
- EXPECT_THAT(sendfile(socks->first_fd(), fd.get(), 0x0, 0x8000000000004),
- SyscallSucceedsWithValue(0));
-}
-
-INSTANTIATE_TEST_SUITE_P(AddressFamily, SendFileTest,
- ::testing::Values(AF_UNIX, AF_INET));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/setgid.cc b/test/syscalls/linux/setgid.cc
deleted file mode 100644
index 6278c4fab..000000000
--- a/test/syscalls/linux/setgid.cc
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-ABSL_FLAG(std::vector<std::string>, groups, std::vector<std::string>({}),
- "groups the test can use");
-
-constexpr gid_t kNobody = 65534;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kDirmodeMask = 07777;
-constexpr int kDirmodeSgid = S_ISGID | 0777;
-constexpr int kDirmodeNoExec = S_ISGID | 0767;
-constexpr int kDirmodeNoSgid = 0777;
-
-// Sets effective GID and returns a Cleanup that restores the original.
-PosixErrorOr<Cleanup> Setegid(gid_t egid) {
- gid_t old_gid = getegid();
- if (setegid(egid) < 0) {
- return PosixError(errno, absl::StrFormat("setegid(%d)", egid));
- }
- return Cleanup(
- [old_gid]() { EXPECT_THAT(setegid(old_gid), SyscallSucceeds()); });
-}
-
-// Returns a pair of groups that the user is a member of.
-PosixErrorOr<std::pair<gid_t, gid_t>> Groups() {
- // Were we explicitly passed GIDs?
- std::vector<std::string> flagged_groups = absl::GetFlag(FLAGS_groups);
- if (flagged_groups.size() >= 2) {
- int group1;
- int group2;
- if (!absl::SimpleAtoi(flagged_groups[0], &group1) ||
- !absl::SimpleAtoi(flagged_groups[1], &group2)) {
- return PosixError(EINVAL, "failed converting group flags to ints");
- }
- return std::pair<gid_t, gid_t>(group1, group2);
- }
-
- // See whether the user is a member of at least 2 groups.
- std::vector<gid_t> groups(64);
- for (; groups.size() <= NGROUPS_MAX; groups.resize(groups.size() * 2)) {
- int ngroups = getgroups(groups.size(), groups.data());
- if (ngroups < 0 && errno == EINVAL) {
- // Need a larger list.
- continue;
- }
- if (ngroups < 0) {
- return PosixError(errno, absl::StrFormat("getgroups(%d, %p)",
- groups.size(), groups.data()));
- }
-
- if (ngroups < 2) {
- // There aren't enough groups.
- break;
- }
-
- // TODO(b/181878080): Read /proc/sys/fs/overflowgid once it is supported in
- // gVisor.
- if (groups[0] == kNobody || groups[1] == kNobody) {
- // These groups aren't mapped into our user namespace, so we can't use
- // them.
- break;
- }
- return std::pair<gid_t, gid_t>(groups[0], groups[1]);
- }
-
- // If we're running in gVisor and are root in the root user namespace, we can
- // set our GID to whatever we want. Try that before giving up.
- //
- // This won't work in native tests, as despite having CAP_SETGID, the gofer
- // process will be sandboxed and unable to change file GIDs.
- if (!IsRunningOnGvisor()) {
- return PosixError(EPERM, "no valid groups for native testing");
- }
- PosixErrorOr<bool> capable = HaveCapability(CAP_SETGID);
- if (!capable.ok()) {
- return capable.error();
- }
- if (!capable.ValueOrDie()) {
- return PosixError(EPERM, "missing CAP_SETGID");
- }
- return std::pair<gid_t, gid_t>(getegid(), kNobody);
-}
-
-class SetgidDirTest : public ::testing::Test {
- protected:
- void SetUp() override {
- original_gid_ = getegid();
-
- // If we can't find two usable groups, we're in an unsupporting environment.
- // Skip the test.
- PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
- SKIP_IF(!groups.ok());
- groups_ = groups.ValueOrDie();
-
- // Ensure we can actually use both groups.
- auto cleanup1 = Setegid(groups_.first);
- SKIP_IF(!cleanup1.ok());
- auto cleanup2 = Setegid(groups_.second);
- SKIP_IF(!cleanup2.ok());
-
- auto cleanup = Setegid(groups_.first);
- temp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
- }
-
- void TearDown() override {
- EXPECT_THAT(setegid(original_gid_), SyscallSucceeds());
- }
-
- void MkdirAsGid(gid_t gid, const std::string& path, mode_t mode) {
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(gid));
- ASSERT_THAT(mkdir(path.c_str(), mode), SyscallSucceeds());
- }
-
- PosixErrorOr<struct stat> Stat(const std::string& path) {
- struct stat stats;
- if (stat(path.c_str(), &stats) < 0) {
- return PosixError(errno, absl::StrFormat("stat(%s, _)", path));
- }
- return stats;
- }
-
- PosixErrorOr<struct stat> Stat(const FileDescriptor& fd) {
- struct stat stats;
- if (fstat(fd.get(), &stats) < 0) {
- return PosixError(errno, "fstat(_, _)");
- }
- return stats;
- }
-
- TempPath temp_dir_;
- std::pair<gid_t, gid_t> groups_;
- gid_t original_gid_;
-};
-
-// The control test. Files created with a given GID are owned by that group.
-TEST_F(SetgidDirTest, Control) {
- // Set group to G1 and create a directory.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, 0777));
-
- // Set group to G2, create a file in g1owned, and confirm that G2 owns it.
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(g1owned, "g2owned").c_str(), O_CREAT | O_RDWR, 0777));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.second);
-}
-
-// Setgid directories cause created files to inherit GID.
-TEST_F(SetgidDirTest, CreateFile) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeSgid));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());
-
- // Set group to G2, create a file, and confirm that G1 owns it.
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(g1owned, "g2created").c_str(), O_CREAT | O_RDWR, 0666));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.first);
-}
-
-// Setgid directories cause created directories to inherit GID.
-TEST_F(SetgidDirTest, CreateDir) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeSgid));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());
-
- // Set group to G2, create a directory, confirm that G1 owns it, and that the
- // setgid bit is enabled.
- auto g2created = JoinPath(g1owned, "g2created");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.second, g2created, 0666));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));
- EXPECT_EQ(stats.st_gid, groups_.first);
- EXPECT_EQ(stats.st_mode & S_ISGID, S_ISGID);
-}
-
-// Setgid directories with group execution disabled still cause GID inheritance.
-TEST_F(SetgidDirTest, NoGroupExec) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeNoExec));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoExec), SyscallSucceeds());
-
- // Set group to G2, create a directory, confirm that G2 owns it, and that the
- // setgid bit is enabled.
- auto g2created = JoinPath(g1owned, "g2created");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.second, g2created, 0666));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));
- EXPECT_EQ(stats.st_gid, groups_.first);
- EXPECT_EQ(stats.st_mode & S_ISGID, S_ISGID);
-}
-
-// Setting the setgid bit on directories with an existing file does not change
-// the file's group.
-TEST_F(SetgidDirTest, OldFile) {
- // Set group to G1 and create a directory.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeNoSgid));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());
-
- // Set group to G2, create a file, confirm that G2 owns it.
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(g1owned, "g2created").c_str(), O_CREAT | O_RDWR, 0666));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.second);
-
- // Enable setgid.
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());
-
- // Confirm that the file's group is still G2.
- stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.second);
-}
-
-// Setting the setgid bit on directories with an existing subdirectory does not
-// change the subdirectory's group.
-TEST_F(SetgidDirTest, OldDir) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeNoSgid));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());
-
- // Set group to G2, create a directory, confirm that G2 owns it.
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
- auto g2created = JoinPath(g1owned, "g2created");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.second, g2created, 0666));
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));
- EXPECT_EQ(stats.st_gid, groups_.second);
-
- // Enable setgid.
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());
-
- // Confirm that the file's group is still G2.
- stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));
- EXPECT_EQ(stats.st_gid, groups_.second);
-}
-
-// Chowning a file clears the setgid and setuid bits.
-TEST_F(SetgidDirTest, ChownFileClears) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeMask));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeMask), SyscallSucceeds());
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(g1owned, "newfile").c_str(), O_CREAT | O_RDWR, 0666));
- ASSERT_THAT(fchmod(fd.get(), 0777 | S_ISUID | S_ISGID), SyscallSucceeds());
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.first);
- EXPECT_EQ(stats.st_mode & (S_ISUID | S_ISGID), S_ISUID | S_ISGID);
-
- // Change the owning group.
- ASSERT_THAT(fchown(fd.get(), -1, groups_.second), SyscallSucceeds());
-
- // The setgid and setuid bits should be cleared.
- stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.second);
- EXPECT_EQ(stats.st_mode & (S_ISUID | S_ISGID), 0);
-}
-
-// Chowning a file with setgid enabled, but not the group exec bit, clears the
-// setuid bit and not the setgid bit. Such files are mandatory locked.
-TEST_F(SetgidDirTest, ChownNoExecFileDoesNotClear) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeNoExec));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoExec), SyscallSucceeds());
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(g1owned, "newdir").c_str(), O_CREAT | O_RDWR, 0666));
- ASSERT_THAT(fchmod(fd.get(), 0766 | S_ISUID | S_ISGID), SyscallSucceeds());
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.first);
- EXPECT_EQ(stats.st_mode & (S_ISUID | S_ISGID), S_ISUID | S_ISGID);
-
- // Change the owning group.
- ASSERT_THAT(fchown(fd.get(), -1, groups_.second), SyscallSucceeds());
-
- // Only the setuid bit is cleared.
- stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
- EXPECT_EQ(stats.st_gid, groups_.second);
- EXPECT_EQ(stats.st_mode & (S_ISUID | S_ISGID), S_ISGID);
-}
-
-// Chowning a directory with setgid enabled does not clear the bit.
-TEST_F(SetgidDirTest, ChownDirDoesNotClear) {
- // Set group to G1, create a directory, and enable setgid.
- auto g1owned = JoinPath(temp_dir_.path(), "g1owned/");
- ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, kDirmodeMask));
- ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeMask), SyscallSucceeds());
-
- // Change the owning group.
- ASSERT_THAT(chown(g1owned.c_str(), -1, groups_.second), SyscallSucceeds());
-
- struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g1owned));
- EXPECT_EQ(stats.st_gid, groups_.second);
- EXPECT_EQ(stats.st_mode & kDirmodeMask, kDirmodeMask);
-}
-
-struct FileModeTestcase {
- std::string name;
- mode_t mode;
- mode_t result_mode;
-
- FileModeTestcase(const std::string& name, mode_t mode, mode_t result_mode)
- : name(name), mode(mode), result_mode(result_mode) {}
-};
-
-class FileModeTest : public ::testing::TestWithParam<FileModeTestcase> {};
-
-TEST_P(FileModeTest, WriteToFile) {
- PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
- SKIP_IF(!groups.ok());
-
- auto cleanup = Setegid(groups.ValueOrDie().first);
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
- auto path = JoinPath(temp_dir.path(), GetParam().name);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.c_str(), O_CREAT | O_RDWR, 0666));
- ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());
- struct stat stats;
- ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());
- EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().mode);
-
- // For security reasons, writing to the file clears the SUID bit, and clears
- // the SGID bit when the group executable bit is unset (which is not a true
- // SGID binary).
- constexpr char kInput = 'M';
- ASSERT_THAT(write(fd.get(), &kInput, sizeof(kInput)),
- SyscallSucceedsWithValue(sizeof(kInput)));
-
- ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());
- EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().result_mode);
-}
-
-TEST_P(FileModeTest, TruncateFile) {
- PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
- SKIP_IF(!groups.ok());
-
- auto cleanup = Setegid(groups.ValueOrDie().first);
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
- auto path = JoinPath(temp_dir.path(), GetParam().name);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path.c_str(), O_CREAT | O_RDWR, 0666));
-
- // Write something to the file, as truncating an empty file is a no-op.
- constexpr char c = 'M';
- ASSERT_THAT(write(fd.get(), &c, sizeof(c)),
- SyscallSucceedsWithValue(sizeof(c)));
- ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());
-
- // For security reasons, truncating the file clears the SUID bit, and clears
- // the SGID bit when the group executable bit is unset (which is not a true
- // SGID binary).
- ASSERT_THAT(ftruncate(fd.get(), 0), SyscallSucceeds());
-
- struct stat stats;
- ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());
- EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().result_mode);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- FileModes, FileModeTest,
- ::testing::ValuesIn<FileModeTestcase>(
- {FileModeTestcase("normal file", 0777, 0777),
- FileModeTestcase("setuid", S_ISUID | 0777, 00777),
- FileModeTestcase("setgid", S_ISGID | 0777, 00777),
- FileModeTestcase("setuid and setgid", S_ISUID | S_ISGID | 0777, 00777),
- FileModeTestcase("setgid without exec", S_ISGID | 0767,
- S_ISGID | 0767),
- FileModeTestcase("setuid and setgid without exec",
- S_ISGID | S_ISUID | 0767, S_ISGID | 0767)}));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/shm.cc b/test/syscalls/linux/shm.cc
deleted file mode 100644
index baf794152..000000000
--- a/test/syscalls/linux/shm.cc
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/ipc.h>
-#include <sys/mman.h>
-#include <sys/shm.h>
-#include <sys/types.h>
-
-#include "absl/time/clock.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-using ::testing::_;
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-const uint64_t kAllocSize = kPageSize * 128ULL;
-
-PosixErrorOr<char*> Shmat(int shmid, const void* shmaddr, int shmflg) {
- const intptr_t addr =
- reinterpret_cast<intptr_t>(shmat(shmid, shmaddr, shmflg));
- if (addr == -1) {
- return PosixError(errno, "shmat() failed");
- }
- return reinterpret_cast<char*>(addr);
-}
-
-PosixError Shmdt(const char* shmaddr) {
- const int ret = shmdt(shmaddr);
- if (ret == -1) {
- return PosixError(errno, "shmdt() failed");
- }
- return NoError();
-}
-
-template <typename T>
-PosixErrorOr<int> Shmctl(int shmid, int cmd, T* buf) {
- int ret = shmctl(shmid, cmd, reinterpret_cast<struct shmid_ds*>(buf));
- if (ret == -1) {
- return PosixError(errno, "shmctl() failed");
- }
- return ret;
-}
-
-// ShmSegment is a RAII object for automatically cleaning up shm segments.
-class ShmSegment {
- public:
- explicit ShmSegment(int id) : id_(id) {}
-
- ~ShmSegment() {
- if (id_ >= 0) {
- EXPECT_NO_ERRNO(Rmid());
- id_ = -1;
- }
- }
-
- ShmSegment(ShmSegment&& other) : id_(other.release()) {}
-
- ShmSegment& operator=(ShmSegment&& other) {
- id_ = other.release();
- return *this;
- }
-
- ShmSegment(ShmSegment const& other) = delete;
- ShmSegment& operator=(ShmSegment const& other) = delete;
-
- int id() const { return id_; }
-
- int release() {
- int id = id_;
- id_ = -1;
- return id;
- }
-
- PosixErrorOr<int> Rmid() {
- RETURN_IF_ERRNO(Shmctl<void>(id_, IPC_RMID, nullptr));
- return release();
- }
-
- private:
- int id_ = -1;
-};
-
-PosixErrorOr<int> ShmgetRaw(key_t key, size_t size, int shmflg) {
- int id = shmget(key, size, shmflg);
- if (id == -1) {
- return PosixError(errno, "shmget() failed");
- }
- return id;
-}
-
-PosixErrorOr<ShmSegment> Shmget(key_t key, size_t size, int shmflg) {
- ASSIGN_OR_RETURN_ERRNO(int id, ShmgetRaw(key, size, shmflg));
- return ShmSegment(id);
-}
-
-TEST(ShmTest, AttachDetach) {
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- struct shmid_ds attr;
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- EXPECT_EQ(attr.shm_segsz, kAllocSize);
- EXPECT_EQ(attr.shm_nattch, 0);
-
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- EXPECT_EQ(attr.shm_nattch, 1);
-
- const char* addr2 = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- EXPECT_EQ(attr.shm_nattch, 2);
-
- ASSERT_NO_ERRNO(Shmdt(addr));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- EXPECT_EQ(attr.shm_nattch, 1);
-
- ASSERT_NO_ERRNO(Shmdt(addr2));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- EXPECT_EQ(attr.shm_nattch, 0);
-}
-
-TEST(ShmTest, LookupByKey) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
- const ShmSegment shm =
- ASSERT_NO_ERRNO_AND_VALUE(Shmget(key, kAllocSize, IPC_CREAT | 0777));
- const int id2 = ASSERT_NO_ERRNO_AND_VALUE(ShmgetRaw(key, kAllocSize, 0777));
- EXPECT_EQ(shm.id(), id2);
-}
-
-TEST(ShmTest, DetachedSegmentsPersist) {
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- addr[0] = 'x';
- ASSERT_NO_ERRNO(Shmdt(addr));
-
- // We should be able to re-attach to the same segment and get our data back.
- addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- EXPECT_EQ(addr[0], 'x');
- ASSERT_NO_ERRNO(Shmdt(addr));
-}
-
-TEST(ShmTest, MultipleDetachFails) {
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- ASSERT_NO_ERRNO(Shmdt(addr));
- EXPECT_THAT(Shmdt(addr), PosixErrorIs(EINVAL, _));
-}
-
-TEST(ShmTest, IpcStat) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
-
- const time_t start = time(nullptr);
-
- const ShmSegment shm =
- ASSERT_NO_ERRNO_AND_VALUE(Shmget(key, kAllocSize, IPC_CREAT | 0777));
-
- const uid_t uid = getuid();
- const gid_t gid = getgid();
- const pid_t pid = getpid();
-
- struct shmid_ds attr;
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
-
- EXPECT_EQ(attr.shm_perm.__key, key);
- EXPECT_EQ(attr.shm_perm.uid, uid);
- EXPECT_EQ(attr.shm_perm.gid, gid);
- EXPECT_EQ(attr.shm_perm.cuid, uid);
- EXPECT_EQ(attr.shm_perm.cgid, gid);
- EXPECT_EQ(attr.shm_perm.mode, 0777);
-
- EXPECT_EQ(attr.shm_segsz, kAllocSize);
-
- EXPECT_EQ(attr.shm_atime, 0);
- EXPECT_EQ(attr.shm_dtime, 0);
-
- // Change time is set on creation.
- EXPECT_GE(attr.shm_ctime, start);
-
- EXPECT_EQ(attr.shm_cpid, pid);
- EXPECT_EQ(attr.shm_lpid, 0);
-
- EXPECT_EQ(attr.shm_nattch, 0);
-
- // The timestamps only have a resolution of seconds; slow down so we actually
- // see the timestamps change.
- absl::SleepFor(absl::Seconds(1));
- const time_t pre_attach = time(nullptr);
-
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
-
- EXPECT_GE(attr.shm_atime, pre_attach);
- EXPECT_EQ(attr.shm_dtime, 0);
- EXPECT_LT(attr.shm_ctime, pre_attach);
- EXPECT_EQ(attr.shm_lpid, pid);
- EXPECT_EQ(attr.shm_nattch, 1);
-
- absl::SleepFor(absl::Seconds(1));
- const time_t pre_detach = time(nullptr);
-
- ASSERT_NO_ERRNO(Shmdt(addr));
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
-
- EXPECT_LT(attr.shm_atime, pre_detach);
- EXPECT_GE(attr.shm_dtime, pre_detach);
- EXPECT_LT(attr.shm_ctime, pre_detach);
- EXPECT_EQ(attr.shm_lpid, pid);
- EXPECT_EQ(attr.shm_nattch, 0);
-}
-
-TEST(ShmTest, ShmStat) {
- // This test relies on the segment we create to be the first one on the
- // system, causing it to occupy slot 1. We can't reasonably expect this on a
- // general Linux host.
- SKIP_IF(!IsRunningOnGvisor());
-
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- struct shmid_ds attr;
- ASSERT_NO_ERRNO(Shmctl(1, SHM_STAT, &attr));
- // This does the same thing as IPC_STAT, so only test that the syscall
- // succeeds here.
-}
-
-TEST(ShmTest, IpcInfo) {
- struct shminfo info;
- ASSERT_NO_ERRNO(Shmctl(0, IPC_INFO, &info));
-
- EXPECT_EQ(info.shmmin, 1); // This is always 1, according to the man page.
- EXPECT_GT(info.shmmax, info.shmmin);
- EXPECT_GT(info.shmmni, 0);
- EXPECT_GT(info.shmseg, 0);
- EXPECT_GT(info.shmall, 0);
-}
-
-TEST(ShmTest, ShmInfo) {
- // Take a snapshot of the system before the test runs.
- struct shm_info snap;
- ASSERT_NO_ERRNO(Shmctl(0, SHM_INFO, &snap));
-
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
-
- struct shm_info info;
- ASSERT_NO_ERRNO(Shmctl(1, SHM_INFO, &info));
-
- // We generally can't know what other processes on a linux machine do with
- // shared memory segments, so we can't test specific numbers on Linux. When
- // running under gvisor, we're guaranteed to be the only ones using shm, so
- // we can easily verify machine-wide numbers.
- if (IsRunningOnGvisor()) {
- ASSERT_NO_ERRNO(Shmctl(shm.id(), SHM_INFO, &info));
- EXPECT_EQ(info.used_ids, snap.used_ids + 1);
- EXPECT_EQ(info.shm_tot, snap.shm_tot + (kAllocSize / kPageSize));
- EXPECT_EQ(info.shm_rss, snap.shm_rss + (kAllocSize / kPageSize));
- EXPECT_EQ(info.shm_swp, 0); // Gvisor currently never swaps.
- }
-
- ASSERT_NO_ERRNO(Shmdt(addr));
-}
-
-TEST(ShmTest, ShmCtlSet) {
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
-
- struct shmid_ds attr;
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- ASSERT_EQ(attr.shm_perm.mode, 0777);
-
- attr.shm_perm.mode = 0766;
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_SET, &attr));
-
- ASSERT_NO_ERRNO(Shmctl(shm.id(), IPC_STAT, &attr));
- ASSERT_EQ(attr.shm_perm.mode, 0766);
-
- ASSERT_NO_ERRNO(Shmdt(addr));
-}
-
-TEST(ShmTest, RemovedSegmentsAreMarkedDeleted) {
- ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- const int id = ASSERT_NO_ERRNO_AND_VALUE(shm.Rmid());
- struct shmid_ds attr;
- ASSERT_NO_ERRNO(Shmctl(id, IPC_STAT, &attr));
- EXPECT_NE(attr.shm_perm.mode & SHM_DEST, 0);
- ASSERT_NO_ERRNO(Shmdt(addr));
-}
-
-TEST(ShmTest, RemovedSegmentsAreDestroyed) {
- ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
-
- const uint64_t alloc_pages = kAllocSize / kPageSize;
-
- struct shm_info info;
- ASSERT_NO_ERRNO(Shmctl(0 /*ignored*/, SHM_INFO, &info));
- const uint64_t before = info.shm_tot;
-
- ASSERT_NO_ERRNO(shm.Rmid());
- ASSERT_NO_ERRNO(Shmdt(addr));
-
- ASSERT_NO_ERRNO(Shmctl(0 /*ignored*/, SHM_INFO, &info));
- if (IsRunningOnGvisor()) {
- // No guarantees on system-wide shm memory usage on a generic linux host.
- const uint64_t after = info.shm_tot;
- EXPECT_EQ(after, before - alloc_pages);
- }
-}
-
-TEST(ShmTest, AllowsAttachToRemovedSegmentWithRefs) {
- ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- const int id = ASSERT_NO_ERRNO_AND_VALUE(shm.Rmid());
- const char* addr2 = ASSERT_NO_ERRNO_AND_VALUE(Shmat(id, nullptr, 0));
- ASSERT_NO_ERRNO(Shmdt(addr));
- ASSERT_NO_ERRNO(Shmdt(addr2));
-}
-
-TEST(ShmTest, RemovedSegmentsAreNotDiscoverable) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
- ShmSegment shm =
- ASSERT_NO_ERRNO_AND_VALUE(Shmget(key, kAllocSize, IPC_CREAT | 0777));
- ASSERT_NO_ERRNO(shm.Rmid());
- EXPECT_THAT(Shmget(key, kAllocSize, 0777), PosixErrorIs(ENOENT, _));
-}
-
-TEST(ShmDeathTest, ReadonlySegment) {
- SetupGvisorDeathTest();
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, SHM_RDONLY));
- // Reading succeeds.
- static_cast<void>(addr[0]);
- // Writing fails.
- EXPECT_EXIT(addr[0] = 'x', ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-TEST(ShmDeathTest, SegmentNotAccessibleAfterDetach) {
- // This test is susceptible to races with concurrent mmaps running in parallel
- // gtest threads since the test relies on the address freed during a shm
- // segment destruction to remain unused. We run the test body in a forked
- // child to guarantee a single-threaded context to avoid this.
-
- SetupGvisorDeathTest();
-
- const auto rest = [&] {
- ShmSegment shm = TEST_CHECK_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- char* addr = TEST_CHECK_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
-
- // Mark the segment as destroyed so it's automatically cleaned up when we
- // crash below. We can't rely on the standard cleanup since the destructor
- // will not run after the SIGSEGV. Note that this doesn't destroy the
- // segment immediately since we're still attached to it.
- TEST_CHECK_NO_ERRNO(shm.Rmid());
-
- addr[0] = 'x';
- TEST_CHECK_NO_ERRNO(Shmdt(addr));
-
- // This access should cause a SIGSEGV.
- addr[0] = 'x';
- };
-
- EXPECT_THAT(InForkedProcess(rest),
- IsPosixErrorOkAndHolds(AnyOf(Eq(W_EXITCODE(0, SIGSEGV)),
- Eq(W_EXITCODE(0, 128 + SIGSEGV)))));
-}
-
-TEST(ShmTest, RequestingSegmentSmallerThanSHMMINFails) {
- struct shminfo info;
- ASSERT_NO_ERRNO(Shmctl(0, IPC_INFO, &info));
- const uint64_t size = info.shmmin - 1;
- EXPECT_THAT(Shmget(IPC_PRIVATE, size, IPC_CREAT | 0777),
- PosixErrorIs(EINVAL, _));
-}
-
-TEST(ShmTest, RequestingSegmentLargerThanSHMMAXFails) {
- struct shminfo info;
- ASSERT_NO_ERRNO(Shmctl(0, IPC_INFO, &info));
- const uint64_t size = info.shmmax + kPageSize;
- EXPECT_THAT(Shmget(IPC_PRIVATE, size, IPC_CREAT | 0777),
- PosixErrorIs(EINVAL, _));
-}
-
-TEST(ShmTest, RequestingUnalignedSizeSucceeds) {
- EXPECT_NO_ERRNO(Shmget(IPC_PRIVATE, 4097, IPC_CREAT | 0777));
-}
-
-TEST(ShmTest, RequestingDuplicateCreationFails) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(key, kAllocSize, IPC_CREAT | IPC_EXCL | 0777));
- EXPECT_THAT(Shmget(key, kAllocSize, IPC_CREAT | IPC_EXCL | 0777),
- PosixErrorIs(EEXIST, _));
-}
-
-TEST(ShmTest, NonExistentSegmentsAreNotFound) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
- // Do not request creation.
- EXPECT_THAT(Shmget(key, kAllocSize, 0777), PosixErrorIs(ENOENT, _));
-}
-
-TEST(ShmTest, SegmentsSizeFixedOnCreation) {
- const TempPath keyfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const key_t key = ftok(keyfile.path().c_str(), 1);
-
- // Base segment.
- const ShmSegment shm =
- ASSERT_NO_ERRNO_AND_VALUE(Shmget(key, kAllocSize, IPC_CREAT | 0777));
-
- // Ask for the same segment at half size. This succeeds.
- const int id2 =
- ASSERT_NO_ERRNO_AND_VALUE(ShmgetRaw(key, kAllocSize / 2, 0777));
-
- // Ask for the same segment at double size.
- EXPECT_THAT(Shmget(key, kAllocSize * 2, 0777), PosixErrorIs(EINVAL, _));
-
- char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- char* addr2 = ASSERT_NO_ERRNO_AND_VALUE(Shmat(id2, nullptr, 0));
-
- // We have 2 different maps...
- EXPECT_NE(addr, addr2);
-
- // ... And both maps are kAllocSize bytes; despite asking for a half-sized
- // segment for the second map.
- addr[kAllocSize - 1] = 'x';
- addr2[kAllocSize - 1] = 'x';
-
- ASSERT_NO_ERRNO(Shmdt(addr));
- ASSERT_NO_ERRNO(Shmdt(addr2));
-}
-
-TEST(ShmTest, PartialUnmap) {
- const ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- EXPECT_THAT(munmap(addr + (kAllocSize / 4), kAllocSize / 2),
- SyscallSucceeds());
- ASSERT_NO_ERRNO(Shmdt(addr));
-}
-
-// Check that sentry does not panic when asked for a zero-length private shm
-// segment. Regression test for b/110694797.
-TEST(ShmTest, GracefullyFailOnZeroLenSegmentCreation) {
- EXPECT_THAT(Shmget(IPC_PRIVATE, 0, 0), PosixErrorIs(EINVAL, _));
-}
-
-TEST(ShmTest, NoDestructionOfAttachedSegmentWithMultipleRmid) {
- ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(
- Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));
- char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
- char* addr2 = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));
-
- // There should be 2 refs to the segment from the 2 attachments, and a single
- // self-reference. Mark the segment as destroyed more than 3 times through
- // shmctl(RMID). If there's a bug with the ref counting, this should cause the
- // count to drop to zero.
- int id = shm.release();
- for (int i = 0; i < 6; ++i) {
- ASSERT_NO_ERRNO(Shmctl<void>(id, IPC_RMID, nullptr));
- }
-
- // Segment should remain accessible.
- addr[0] = 'x';
- ASSERT_NO_ERRNO(Shmdt(addr));
-
- // Segment should remain accessible even after one of the two attachments are
- // detached.
- addr2[0] = 'x';
- ASSERT_NO_ERRNO(Shmdt(addr2));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sigaction.cc b/test/syscalls/linux/sigaction.cc
deleted file mode 100644
index 9d9dd57a8..000000000
--- a/test/syscalls/linux/sigaction.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/syscall.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SigactionTest, GetLessThanOrEqualToZeroFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(-1, nullptr, &act), SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(sigaction(0, nullptr, &act), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, SetLessThanOrEqualToZeroFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(0, &act, nullptr), SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(sigaction(0, &act, nullptr), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, GetGreaterThanMaxFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(SIGRTMAX + 1, nullptr, &act),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, SetGreaterThanMaxFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(SIGRTMAX + 1, &act, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, SetSigkillFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(SIGKILL, nullptr, &act), SyscallSucceeds());
- ASSERT_THAT(sigaction(SIGKILL, &act, nullptr), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, SetSigstopFails) {
- struct sigaction act = {};
- ASSERT_THAT(sigaction(SIGSTOP, nullptr, &act), SyscallSucceeds());
- ASSERT_THAT(sigaction(SIGSTOP, &act, nullptr), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SigactionTest, BadSigsetFails) {
- constexpr size_t kWrongSigSetSize = 43;
-
- struct sigaction act = {};
-
- // The syscall itself (rather than the libc wrapper) takes the sigset_t size.
- ASSERT_THAT(
- syscall(SYS_rt_sigaction, SIGTERM, nullptr, &act, kWrongSigSetSize),
- SyscallFailsWithErrno(EINVAL));
- ASSERT_THAT(
- syscall(SYS_rt_sigaction, SIGTERM, &act, nullptr, kWrongSigSetSize),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sigaltstack.cc b/test/syscalls/linux/sigaltstack.cc
deleted file mode 100644
index 24e7c4960..000000000
--- a/test/syscalls/linux/sigaltstack.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <functional>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<Cleanup> ScopedSigaltstack(stack_t const& stack) {
- stack_t old_stack;
- int rc = sigaltstack(&stack, &old_stack);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "sigaltstack failed");
- }
- return Cleanup([old_stack] {
- EXPECT_THAT(sigaltstack(&old_stack, nullptr), SyscallSucceeds());
- });
-}
-
-volatile bool got_signal = false;
-volatile int sigaltstack_errno = 0;
-volatile int ss_flags = 0;
-
-void sigaltstack_handler(int sig, siginfo_t* siginfo, void* arg) {
- got_signal = true;
-
- stack_t stack;
- int ret = sigaltstack(nullptr, &stack);
- MaybeSave();
- if (ret < 0) {
- sigaltstack_errno = errno;
- return;
- }
- ss_flags = stack.ss_flags;
-}
-
-TEST(SigaltstackTest, Success) {
- std::vector<char> stack_mem(SIGSTKSZ);
- stack_t stack = {};
- stack.ss_sp = stack_mem.data();
- stack.ss_size = stack_mem.size();
- auto const cleanup_sigstack =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaltstack(stack));
-
- struct sigaction sa = {};
- sa.sa_sigaction = sigaltstack_handler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
- auto const cleanup_sa =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGUSR1, sa));
-
- // Send signal to this thread, as sigaltstack is per-thread.
- EXPECT_THAT(tgkill(getpid(), gettid(), SIGUSR1), SyscallSucceeds());
-
- EXPECT_TRUE(got_signal);
- EXPECT_EQ(sigaltstack_errno, 0);
- EXPECT_NE(0, ss_flags & SS_ONSTACK);
-}
-
-TEST(SigaltstackTest, ResetByExecve) {
- std::vector<char> stack_mem(SIGSTKSZ);
- stack_t stack = {};
- stack.ss_sp = stack_mem.data();
- stack.ss_size = stack_mem.size();
- auto const cleanup_sigstack =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaltstack(stack));
-
- std::string full_path = RunfilePath("test/syscalls/linux/sigaltstack_check");
-
- pid_t child_pid = -1;
- int execve_errno = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec(full_path, {"sigaltstack_check"}, {}, nullptr, &child_pid,
- &execve_errno));
-
- ASSERT_GT(child_pid, 0);
- ASSERT_EQ(execve_errno, 0);
-
- int status = 0;
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), 0);
-}
-
-volatile bool badhandler_on_sigaltstack = true; // Set by the handler.
-char* volatile badhandler_low_water_mark = nullptr; // Set by the handler.
-volatile uint8_t badhandler_recursive_faults = 0; // Consumed by the handler.
-
-void badhandler(int sig, siginfo_t* siginfo, void* arg) {
- char stack_var = 0;
- char* current_ss = &stack_var;
-
- stack_t stack;
- int ret = sigaltstack(nullptr, &stack);
- if (ret < 0 || (stack.ss_flags & SS_ONSTACK) != SS_ONSTACK) {
- // We should always be marked as being on the stack. Don't allow this to hit
- // the bottom if this is ever not true (the main test will fail as a
- // result, but we still need to unwind the recursive faults).
- badhandler_on_sigaltstack = false;
- }
- if (current_ss < badhandler_low_water_mark) {
- // Record the low point for the signal stack. We never expected this to be
- // before stack bottom, but this is asserted in the actual test.
- badhandler_low_water_mark = current_ss;
- }
- if (badhandler_recursive_faults > 0) {
- badhandler_recursive_faults--;
- Fault();
- }
- FixupFault(reinterpret_cast<ucontext_t*>(arg));
-}
-
-TEST(SigaltstackTest, WalksOffBottom) {
- // This test marks the upper half of the stack_mem array as the signal stack.
- // It asserts that when a fault occurs in the handler (already on the signal
- // stack), we eventually continue to fault our way off the stack. We should
- // not revert to the top of the signal stack when we fall off the bottom and
- // the signal stack should remain "in use". When we fall off the signal stack,
- // we should have an unconditional signal delivered and not start using the
- // first part of the stack_mem array.
- std::vector<char> stack_mem(SIGSTKSZ * 2);
- stack_t stack = {};
- stack.ss_sp = stack_mem.data() + SIGSTKSZ; // See above: upper half.
- stack.ss_size = SIGSTKSZ; // Only one half the array.
- auto const cleanup_sigstack =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaltstack(stack));
-
- // Setup the handler: this must be for SIGSEGV, and it must allow proper
- // nesting (no signal mask, no defer) so that we can trigger multiple times.
- //
- // When we walk off the bottom of the signal stack and force signal delivery
- // of a SIGSEGV, the handler will revert to the default behavior (kill).
- struct sigaction sa = {};
- sa.sa_sigaction = badhandler;
- sa.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_NODEFER;
- auto const cleanup_sa =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGSEGV, sa));
-
- // Trigger a single fault.
- badhandler_low_water_mark =
- static_cast<char*>(stack.ss_sp) + SIGSTKSZ; // Expected top.
- badhandler_recursive_faults = 0; // Disable refault.
- Fault();
- EXPECT_TRUE(badhandler_on_sigaltstack);
- EXPECT_THAT(sigaltstack(nullptr, &stack), SyscallSucceeds());
- EXPECT_EQ(stack.ss_flags & SS_ONSTACK, 0);
- EXPECT_LT(badhandler_low_water_mark,
- reinterpret_cast<char*>(stack.ss_sp) + 2 * SIGSTKSZ);
- EXPECT_GT(badhandler_low_water_mark, reinterpret_cast<char*>(stack.ss_sp));
-
- // Trigger two faults.
- char* prev_low_water_mark = badhandler_low_water_mark; // Previous top.
- badhandler_recursive_faults = 1; // One refault.
- Fault();
- ASSERT_TRUE(badhandler_on_sigaltstack);
- EXPECT_THAT(sigaltstack(nullptr, &stack), SyscallSucceeds());
- EXPECT_EQ(stack.ss_flags & SS_ONSTACK, 0);
- EXPECT_LT(badhandler_low_water_mark, prev_low_water_mark);
- EXPECT_GT(badhandler_low_water_mark, reinterpret_cast<char*>(stack.ss_sp));
-
- // Calculate the stack growth for a fault, and set the recursive faults to
- // ensure that the signal handler stack required exceeds our marked stack area
- // by a minimal amount. It should remain in the valid stack_mem area so that
- // we can test the signal is forced merely by going out of the signal stack
- // bounds, not by a genuine fault.
- uintptr_t frame_size =
- static_cast<uintptr_t>(prev_low_water_mark - badhandler_low_water_mark);
- badhandler_recursive_faults = (SIGSTKSZ + frame_size) / frame_size;
- EXPECT_EXIT(Fault(), ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-volatile int setonstack_retval = 0; // Set by the handler.
-volatile int setonstack_errno = 0; // Set by the handler.
-
-void setonstack(int sig, siginfo_t* siginfo, void* arg) {
- char stack_mem[SIGSTKSZ];
- stack_t stack = {};
- stack.ss_sp = &stack_mem[0];
- stack.ss_size = SIGSTKSZ;
- setonstack_retval = sigaltstack(&stack, nullptr);
- setonstack_errno = errno;
- FixupFault(reinterpret_cast<ucontext_t*>(arg));
-}
-
-TEST(SigaltstackTest, SetWhileOnStack) {
- // Reserve twice as much stack here, since the handler will allocate a vector
- // of size SIGTKSZ and attempt to set the sigaltstack to that value.
- std::vector<char> stack_mem(2 * SIGSTKSZ);
- stack_t stack = {};
- stack.ss_sp = stack_mem.data();
- stack.ss_size = stack_mem.size();
- auto const cleanup_sigstack =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaltstack(stack));
-
- // See above.
- struct sigaction sa = {};
- sa.sa_sigaction = setonstack;
- sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
- auto const cleanup_sa =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGSEGV, sa));
-
- // Trigger a fault.
- Fault();
-
- // The set should have failed.
- EXPECT_EQ(setonstack_retval, -1);
- EXPECT_EQ(setonstack_errno, EPERM);
-}
-
-TEST(SigaltstackTest, SetCurrentStack) {
- // This is executed as an exit test because once the signal stack is set to
- // the local stack, there's no good way to unwind. We don't want to taint the
- // test of any other tests that might run within this process.
- EXPECT_EXIT(
- {
- char stack_value = 0;
- stack_t stack = {};
- stack.ss_sp = &stack_value - kPageSize; // Lower than current level.
- stack.ss_size = 2 * kPageSize; // => &stack_value +/- kPageSize.
- TEST_CHECK(sigaltstack(&stack, nullptr) == 0);
- TEST_CHECK(sigaltstack(nullptr, &stack) == 0);
- TEST_CHECK((stack.ss_flags & SS_ONSTACK) != 0);
-
- // Should not be able to change the stack (even no-op).
- TEST_CHECK(sigaltstack(&stack, nullptr) == -1 && errno == EPERM);
-
- // Should not be able to disable the stack.
- stack.ss_flags = SS_DISABLE;
- TEST_CHECK(sigaltstack(&stack, nullptr) == -1 && errno == EPERM);
- exit(0);
- },
- ::testing::ExitedWithCode(0), "");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sigaltstack_check.cc b/test/syscalls/linux/sigaltstack_check.cc
deleted file mode 100644
index 5ac1b661d..000000000
--- a/test/syscalls/linux/sigaltstack_check.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Checks that there is no alternate signal stack by default.
-//
-// Used by a test in sigaltstack.cc.
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-
-#include "test/util/logging.h"
-
-int main(int /* argc */, char** /* argv */) {
- stack_t stack;
- TEST_CHECK(sigaltstack(nullptr, &stack) >= 0);
- TEST_CHECK(stack.ss_flags == SS_DISABLE);
- TEST_CHECK(stack.ss_sp == 0);
- TEST_CHECK(stack.ss_size == 0);
- return 0;
-}
diff --git a/test/syscalls/linux/signalfd.cc b/test/syscalls/linux/signalfd.cc
deleted file mode 100644
index c86cd2755..000000000
--- a/test/syscalls/linux/signalfd.cc
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <poll.h>
-#include <signal.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/signalfd.h>
-#include <unistd.h>
-
-#include <functional>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-using ::testing::KilledBySignal;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int kSigno = SIGUSR1;
-constexpr int kSignoMax = 64; // SIGRTMAX
-constexpr int kSignoAlt = SIGUSR2;
-
-// Returns a new signalfd.
-inline PosixErrorOr<FileDescriptor> NewSignalFD(sigset_t* mask, int flags = 0) {
- int fd = signalfd(-1, mask, flags);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, "signalfd");
- }
- return FileDescriptor(fd);
-}
-
-class SignalfdTest : public ::testing::TestWithParam<int> {};
-
-TEST_P(SignalfdTest, Basic) {
- int signo = GetParam();
- // Create the signalfd.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, signo);
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, 0));
-
- // Deliver the blocked signal.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, signo));
- ASSERT_THAT(tgkill(getpid(), gettid(), signo), SyscallSucceeds());
-
- // We should now read the signal.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
-}
-
-TEST_P(SignalfdTest, MaskWorks) {
- int signo = GetParam();
- // Create two signalfds with different masks.
- sigset_t mask1, mask2;
- sigemptyset(&mask1);
- sigemptyset(&mask2);
- sigaddset(&mask1, signo);
- sigaddset(&mask2, kSignoAlt);
- FileDescriptor fd1 = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask1, 0));
- FileDescriptor fd2 = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask2, 0));
-
- // Deliver the two signals.
- const auto scoped_sigmask1 =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, signo));
- const auto scoped_sigmask2 =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, kSignoAlt));
- ASSERT_THAT(tgkill(getpid(), gettid(), signo), SyscallSucceeds());
- ASSERT_THAT(tgkill(getpid(), gettid(), kSignoAlt), SyscallSucceeds());
-
- // We should see the signals on the appropriate signalfds.
- //
- // We read in the opposite order as the signals deliver above, to ensure that
- // we don't happen to read the correct signal from the correct signalfd.
- struct signalfd_siginfo rbuf1, rbuf2;
- ASSERT_THAT(read(fd2.get(), &rbuf2, sizeof(rbuf2)),
- SyscallSucceedsWithValue(sizeof(rbuf2)));
- EXPECT_EQ(rbuf2.ssi_signo, kSignoAlt);
- ASSERT_THAT(read(fd1.get(), &rbuf1, sizeof(rbuf1)),
- SyscallSucceedsWithValue(sizeof(rbuf1)));
- EXPECT_EQ(rbuf1.ssi_signo, signo);
-}
-
-TEST(Signalfd, Cloexec) {
- // Exec tests confirm that O_CLOEXEC has the intended effect. We just create a
- // signalfd with the appropriate flag here and assert that the FD has it set.
- sigset_t mask;
- sigemptyset(&mask);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_CLOEXEC));
- EXPECT_THAT(fcntl(fd.get(), F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST_P(SignalfdTest, Blocking) {
- int signo = GetParam();
- // Create the signalfd in blocking mode.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, signo);
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, 0));
-
- // Shared tid variable.
- absl::Mutex mu;
- bool has_tid = false;
- pid_t tid;
-
- // Start a thread reading.
- ScopedThread t([&] {
- // Copy the tid and notify the caller.
- {
- absl::MutexLock ml(&mu);
- tid = gettid();
- has_tid = true;
- }
-
- // Read the signal from the signalfd.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
- });
-
- // Wait until blocked.
- absl::MutexLock ml(&mu);
- mu.Await(absl::Condition(&has_tid));
-
- // Deliver the signal to either the waiting thread, or
- // to this thread. N.B. this is a bug in the core gVisor
- // behavior for signalfd, and needs to be fixed.
- //
- // See gvisor.dev/issue/139.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(tgkill(getpid(), gettid(), signo), SyscallSucceeds());
- } else {
- ASSERT_THAT(tgkill(getpid(), tid, signo), SyscallSucceeds());
- }
-
- // Ensure that it was received.
- t.Join();
-}
-
-TEST_P(SignalfdTest, ThreadGroup) {
- int signo = GetParam();
- // Create the signalfd in blocking mode.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, signo);
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, 0));
-
- // Shared variable.
- absl::Mutex mu;
- bool first = false;
- bool second = false;
-
- // Start a thread reading.
- ScopedThread t([&] {
- // Read the signal from the signalfd.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
-
- // Wait for the other thread.
- absl::MutexLock ml(&mu);
- first = true;
- mu.Await(absl::Condition(&second));
- });
-
- // Deliver the signal to the threadgroup.
- ASSERT_THAT(kill(getpid(), signo), SyscallSucceeds());
-
- // Wait for the first thread to process.
- {
- absl::MutexLock ml(&mu);
- mu.Await(absl::Condition(&first));
- }
-
- // Deliver to the thread group again (other thread still exists).
- ASSERT_THAT(kill(getpid(), signo), SyscallSucceeds());
-
- // Ensure that we can also receive it.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
-
- // Mark the test as done.
- {
- absl::MutexLock ml(&mu);
- second = true;
- }
-
- // The other thread should be joinable.
- t.Join();
-}
-
-TEST_P(SignalfdTest, Nonblock) {
- int signo = GetParam();
- // Create the signalfd in non-blocking mode.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, signo);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_NONBLOCK));
-
- // We should return if we attempt to read.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Block and deliver the signal.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, signo));
- ASSERT_THAT(tgkill(getpid(), gettid(), signo), SyscallSucceeds());
-
- // Ensure that a read actually works.
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
-
- // Should block again.
- EXPECT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST_P(SignalfdTest, SetMask) {
- int signo = GetParam();
- // Create the signalfd matching nothing.
- sigset_t mask;
- sigemptyset(&mask);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_NONBLOCK));
-
- // Block and deliver a signal.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, signo));
- ASSERT_THAT(tgkill(getpid(), gettid(), signo), SyscallSucceeds());
-
- // We should have nothing.
- struct signalfd_siginfo rbuf;
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Change the signal mask.
- sigaddset(&mask, signo);
- ASSERT_THAT(signalfd(fd.get(), &mask, 0), SyscallSucceeds());
-
- // We should now have the signal.
- ASSERT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
- EXPECT_EQ(rbuf.ssi_signo, signo);
-}
-
-TEST_P(SignalfdTest, Poll) {
- int signo = GetParam();
- // Create the signalfd.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, signo);
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, 0));
-
- // Block the signal, and start a thread to deliver it.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, signo));
- pid_t orig_tid = gettid();
- ScopedThread t([&] {
- absl::SleepFor(absl::Seconds(5));
- ASSERT_THAT(tgkill(getpid(), orig_tid, signo), SyscallSucceeds());
- });
-
- // Start polling for the signal. We expect that it is not available at the
- // outset, but then becomes available when the signal is sent. We give a
- // timeout of 10000ms (or the delay above + 5 seconds of additional grace
- // time).
- struct pollfd poll_fd = {fd.get(), POLLIN, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10000),
- SyscallSucceedsWithValue(1));
-
- // Actually read the signal to prevent delivery.
- struct signalfd_siginfo rbuf;
- EXPECT_THAT(read(fd.get(), &rbuf, sizeof(rbuf)),
- SyscallSucceedsWithValue(sizeof(rbuf)));
-}
-
-std::string PrintSigno(::testing::TestParamInfo<int> info) {
- switch (info.param) {
- case kSigno:
- return "kSigno";
- case kSignoMax:
- return "kSignoMax";
- default:
- return absl::StrCat(info.param);
- }
-}
-INSTANTIATE_TEST_SUITE_P(Signalfd, SignalfdTest,
- ::testing::Values(kSigno, kSignoMax), PrintSigno);
-
-TEST(Signalfd, Ppoll) {
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGKILL);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_CLOEXEC));
-
- // Ensure that the given ppoll blocks.
- struct pollfd pfd = {};
- pfd.fd = fd.get();
- pfd.events = POLLIN;
- struct timespec timeout = {};
- timeout.tv_sec = 1;
- EXPECT_THAT(RetryEINTR(ppoll)(&pfd, 1, &timeout, &mask),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(Signalfd, KillStillKills) {
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGKILL);
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NewSignalFD(&mask, SFD_CLOEXEC));
-
- // Just because there is a signalfd, we shouldn't see any change in behavior
- // for unblockable signals. It's easier to test this with SIGKILL.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, SIGKILL));
- EXPECT_EXIT(tgkill(getpid(), gettid(), SIGKILL), KilledBySignal(SIGKILL), "");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // These tests depend on delivering signals. Block them up front so that all
- // other threads created by TestInit will also have them blocked, and they
- // will not interface with the rest of the test.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, gvisor::testing::kSigno);
- sigaddset(&set, gvisor::testing::kSignoMax);
- sigaddset(&set, gvisor::testing::kSignoAlt);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
-
- gvisor::testing::TestInit(&argc, &argv);
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/sigprocmask.cc b/test/syscalls/linux/sigprocmask.cc
deleted file mode 100644
index a603fc1d1..000000000
--- a/test/syscalls/linux/sigprocmask.cc
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <stddef.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Signals numbers used for testing.
-static constexpr int kTestSignal1 = SIGUSR1;
-static constexpr int kTestSignal2 = SIGUSR2;
-
-static int raw_sigprocmask(int how, const sigset_t* set, sigset_t* oldset) {
- return syscall(SYS_rt_sigprocmask, how, set, oldset, _NSIG / 8);
-}
-
-// count of the number of signals received
-int signal_count[kMaxSignal + 1];
-
-// signal handler increments the signal counter
-void SigHandler(int sig, siginfo_t* info, void* context) {
- TEST_CHECK(sig > 0 && sig <= kMaxSignal);
- signal_count[sig] += 1;
-}
-
-// The test fixture saves and restores the signal mask and
-// sets up handlers for kTestSignal1 and kTestSignal2.
-class SigProcMaskTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Save the current signal mask.
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &mask_),
- SyscallSucceeds());
-
- // Setup signal handlers for kTestSignal1 and kTestSignal2.
- struct sigaction sa;
- sa.sa_sigaction = SigHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- EXPECT_THAT(sigaction(kTestSignal1, &sa, &sa_test_sig_1_),
- SyscallSucceeds());
- EXPECT_THAT(sigaction(kTestSignal2, &sa, &sa_test_sig_2_),
- SyscallSucceeds());
-
- // Clear the signal counters.
- memset(signal_count, 0, sizeof(signal_count));
- }
-
- void TearDown() override {
- // Restore the signal mask.
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &mask_, nullptr),
- SyscallSucceeds());
-
- // Restore the signal handlers for kTestSignal1 and kTestSignal2.
- EXPECT_THAT(sigaction(kTestSignal1, &sa_test_sig_1_, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(sigaction(kTestSignal2, &sa_test_sig_2_, nullptr),
- SyscallSucceeds());
- }
-
- private:
- sigset_t mask_;
- struct sigaction sa_test_sig_1_;
- struct sigaction sa_test_sig_2_;
-};
-
-// Both sigsets nullptr should succeed and do nothing.
-TEST_F(SigProcMaskTest, NullAddress) {
- EXPECT_THAT(raw_sigprocmask(SIG_BLOCK, nullptr, NULL), SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_UNBLOCK, nullptr, NULL), SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, NULL), SyscallSucceeds());
-}
-
-// Bad address for either sigset should fail with EFAULT.
-TEST_F(SigProcMaskTest, BadAddress) {
- sigset_t* bad_addr = reinterpret_cast<sigset_t*>(-1);
-
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, bad_addr, nullptr),
- SyscallFailsWithErrno(EFAULT));
-
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, bad_addr),
- SyscallFailsWithErrno(EFAULT));
-}
-
-// Bad value of the "how" parameter should fail with EINVAL.
-TEST_F(SigProcMaskTest, BadParameter) {
- int bad_param_1 = -1;
- int bad_param_2 = 42;
-
- sigset_t set1;
- sigemptyset(&set1);
-
- EXPECT_THAT(raw_sigprocmask(bad_param_1, &set1, nullptr),
- SyscallFailsWithErrno(EINVAL));
-
- EXPECT_THAT(raw_sigprocmask(bad_param_2, &set1, nullptr),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Check that we can get the current signal mask.
-TEST_F(SigProcMaskTest, GetMask) {
- sigset_t set1;
- sigset_t set2;
-
- sigemptyset(&set1);
- sigfillset(&set2);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &set1), SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &set2), SyscallSucceeds());
- EXPECT_THAT(set1, EqualsSigset(set2));
-}
-
-// Check that we can set the signal mask.
-TEST_F(SigProcMaskTest, SetMask) {
- sigset_t actual;
- sigset_t expected;
-
- // Try to mask all signals
- sigfillset(&expected);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &expected, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- // sigprocmask() should have silently ignored SIGKILL and SIGSTOP.
- sigdelset(&expected, SIGSTOP);
- sigdelset(&expected, SIGKILL);
- EXPECT_THAT(actual, EqualsSigset(expected));
-
- // Try to clear the signal mask
- sigemptyset(&expected);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &expected, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- EXPECT_THAT(actual, EqualsSigset(expected));
-
- // Try to set a mask with one signal.
- sigemptyset(&expected);
- sigaddset(&expected, kTestSignal1);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &expected, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- EXPECT_THAT(actual, EqualsSigset(expected));
-}
-
-// Check that we can add and remove signals.
-TEST_F(SigProcMaskTest, BlockUnblock) {
- sigset_t actual;
- sigset_t expected;
-
- // Try to set a mask with one signal.
- sigemptyset(&expected);
- sigaddset(&expected, kTestSignal1);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &expected, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- EXPECT_THAT(actual, EqualsSigset(expected));
-
- // Try to add another signal.
- sigset_t block;
- sigemptyset(&block);
- sigaddset(&block, kTestSignal2);
- EXPECT_THAT(raw_sigprocmask(SIG_BLOCK, &block, nullptr), SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- sigaddset(&expected, kTestSignal2);
- EXPECT_THAT(actual, EqualsSigset(expected));
-
- // Try to remove a signal.
- sigset_t unblock;
- sigemptyset(&unblock);
- sigaddset(&unblock, kTestSignal1);
- EXPECT_THAT(raw_sigprocmask(SIG_UNBLOCK, &unblock, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, nullptr, &actual),
- SyscallSucceeds());
- sigdelset(&expected, kTestSignal1);
- EXPECT_THAT(actual, EqualsSigset(expected));
-}
-
-// Test that the signal mask actually blocks signals.
-TEST_F(SigProcMaskTest, SignalHandler) {
- sigset_t mask;
-
- // clear the signal mask
- sigemptyset(&mask);
- EXPECT_THAT(raw_sigprocmask(SIG_SETMASK, &mask, nullptr), SyscallSucceeds());
-
- // Check the initial signal counts.
- EXPECT_EQ(0, signal_count[kTestSignal1]);
- EXPECT_EQ(0, signal_count[kTestSignal2]);
-
- // Check that both kTestSignal1 and kTestSignal2 are not blocked.
- raise(kTestSignal1);
- raise(kTestSignal2);
- EXPECT_EQ(1, signal_count[kTestSignal1]);
- EXPECT_EQ(1, signal_count[kTestSignal2]);
-
- // Block kTestSignal1.
- sigaddset(&mask, kTestSignal1);
- EXPECT_THAT(raw_sigprocmask(SIG_BLOCK, &mask, nullptr), SyscallSucceeds());
-
- // Check that kTestSignal1 is blocked.
- raise(kTestSignal1);
- raise(kTestSignal2);
- EXPECT_EQ(1, signal_count[kTestSignal1]);
- EXPECT_EQ(2, signal_count[kTestSignal2]);
-
- // Unblock kTestSignal1.
- sigaddset(&mask, kTestSignal1);
- EXPECT_THAT(raw_sigprocmask(SIG_UNBLOCK, &mask, nullptr), SyscallSucceeds());
-
- // Check that the unblocked kTestSignal1 has been delivered.
- EXPECT_EQ(2, signal_count[kTestSignal1]);
- EXPECT_EQ(2, signal_count[kTestSignal2]);
-}
-
-// Check that sigprocmask correctly handles aliasing of the set and oldset
-// pointers. Regression test for b/30502311.
-TEST_F(SigProcMaskTest, AliasedSets) {
- sigset_t mask;
-
- // Set a mask in which only kTestSignal1 is blocked.
- sigset_t mask1;
- sigemptyset(&mask1);
- sigaddset(&mask1, kTestSignal1);
- mask = mask1;
- ASSERT_THAT(raw_sigprocmask(SIG_SETMASK, &mask, nullptr), SyscallSucceeds());
-
- // Exchange it with a mask in which only kTestSignal2 is blocked.
- sigset_t mask2;
- sigemptyset(&mask2);
- sigaddset(&mask2, kTestSignal2);
- mask = mask2;
- ASSERT_THAT(raw_sigprocmask(SIG_SETMASK, &mask, &mask), SyscallSucceeds());
-
- // Check that the exchange succeeeded:
- // mask should now contain the previously-set mask blocking only kTestSignal1.
- EXPECT_THAT(mask, EqualsSigset(mask1));
- // The current mask should block only kTestSignal2.
- ASSERT_THAT(raw_sigprocmask(0, nullptr, &mask), SyscallSucceeds());
- EXPECT_THAT(mask, EqualsSigset(mask2));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sigreturn_amd64.cc b/test/syscalls/linux/sigreturn_amd64.cc
deleted file mode 100644
index 6227774a4..000000000
--- a/test/syscalls/linux/sigreturn_amd64.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr uint64_t kOrigRcx = 0xdeadbeeffacefeed;
-constexpr uint64_t kOrigR11 = 0xfacefeedbaad1dea;
-
-volatile int gotvtalrm, ready;
-
-void sigvtalrm(int sig, siginfo_t* siginfo, void* _uc) {
- ucontext_t* uc = reinterpret_cast<ucontext_t*>(_uc);
-
- // Verify that:
- // - test is in the busy-wait loop waiting for signal.
- // - %rcx and %r11 values in mcontext_t match kOrigRcx and kOrigR11.
- if (ready &&
- static_cast<uint64_t>(uc->uc_mcontext.gregs[REG_RCX]) == kOrigRcx &&
- static_cast<uint64_t>(uc->uc_mcontext.gregs[REG_R11]) == kOrigR11) {
- // Modify the values %rcx and %r11 in the ucontext. These are the
- // values seen by the application after the signal handler returns.
- uc->uc_mcontext.gregs[REG_RCX] = ~kOrigRcx;
- uc->uc_mcontext.gregs[REG_R11] = ~kOrigR11;
- gotvtalrm = 1;
- }
-}
-
-TEST(SigIretTest, CheckRcxR11) {
- // Setup signal handler for SIGVTALRM.
- struct sigaction sa = {};
- sigfillset(&sa.sa_mask);
- sa.sa_sigaction = sigvtalrm;
- sa.sa_flags = SA_SIGINFO;
- auto const action_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGVTALRM, sa));
-
- auto const mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGVTALRM));
-
- // Setup itimer to fire after 500 msecs.
- struct itimerval itimer = {};
- itimer.it_value.tv_usec = 500 * 1000; // 500 msecs.
- auto const timer_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_VIRTUAL, itimer));
-
- // Initialize %rcx and %r11 and spin until the signal handler returns.
- uint64_t rcx = kOrigRcx;
- uint64_t r11 = kOrigR11;
- asm volatile(
- "movq %[rcx], %%rcx;" // %rcx = rcx
- "movq %[r11], %%r11;" // %r11 = r11
- "movl $1, %[ready];" // ready = 1
- "1: pause; cmpl $0, %[gotvtalrm]; je 1b;" // while (!gotvtalrm);
- "movq %%rcx, %[rcx];" // rcx = %rcx
- "movq %%r11, %[r11];" // r11 = %r11
- : [ ready ] "=m"(ready), [ rcx ] "+m"(rcx), [ r11 ] "+m"(r11)
- : [ gotvtalrm ] "m"(gotvtalrm)
- : "cc", "memory", "rcx", "r11");
-
- // If sigreturn(2) returns via 'sysret' then %rcx and %r11 will be
- // clobbered and set to 'ptregs->rip' and 'ptregs->rflags' respectively.
- //
- // The following check verifies that %rcx and %r11 were not clobbered
- // when returning from the signal handler (via sigreturn(2)).
- EXPECT_EQ(rcx, ~kOrigRcx);
- EXPECT_EQ(r11, ~kOrigR11);
-}
-
-constexpr uint64_t kNonCanonicalRip = 0xCCCC000000000000;
-
-// Test that a non-canonical signal handler faults as expected.
-TEST(SigIretTest, BadHandler) {
- struct sigaction sa = {};
- sa.sa_sigaction =
- reinterpret_cast<void (*)(int, siginfo_t*, void*)>(kNonCanonicalRip);
- auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGUSR1, sa));
-
- pid_t pid = fork();
- if (pid == 0) {
- // Child, wait for signal.
- while (1) {
- pause();
- }
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- EXPECT_THAT(kill(pid, SIGUSR1), SyscallSucceeds());
-
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
- << "status = " << status;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // SigIretTest.CheckRcxR11 depends on delivering SIGVTALRM to the main thread.
- // Block SIGVTALRM so that any other threads created by TestInit will also
- // have SIGVTALRM blocked.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, SIGVTALRM);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/sigreturn_arm64.cc b/test/syscalls/linux/sigreturn_arm64.cc
deleted file mode 100644
index 2c19e2984..000000000
--- a/test/syscalls/linux/sigreturn_arm64.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/unistd.h>
-#include <signal.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr uint64_t kOrigX7 = 0xdeadbeeffacefeed;
-
-void sigvtalrm(int sig, siginfo_t* siginfo, void* _uc) {
- ucontext_t* uc = reinterpret_cast<ucontext_t*>(_uc);
-
- // Verify that:
- // - x7 value in mcontext_t matches kOrigX7.
- if (uc->uc_mcontext.regs[7] == kOrigX7) {
- // Modify the value x7 in the ucontext. This is the value seen by the
- // application after the signal handler returns.
- uc->uc_mcontext.regs[7] = ~kOrigX7;
- }
-}
-
-int testX7(uint64_t* val, uint64_t sysno, uint64_t tgid, uint64_t tid,
- uint64_t signo) {
- register uint64_t* x9 __asm__("x9") = val;
- register uint64_t x8 __asm__("x8") = sysno;
- register uint64_t x0 __asm__("x0") = tgid;
- register uint64_t x1 __asm__("x1") = tid;
- register uint64_t x2 __asm__("x2") = signo;
-
- // Initialize x7, send SIGVTALRM to itself and read x7.
- __asm__(
- "ldr x7, [x9, 0]\n"
- "svc 0\n"
- "str x7, [x9, 0]\n"
- : "=r"(x0)
- : "r"(x0), "r"(x1), "r"(x2), "r"(x9), "r"(x8)
- : "x7");
- return x0;
-}
-
-// On ARM64, when ptrace stops on a system call, it uses the x7 register to
-// indicate whether the stop has been signalled from syscall entry or syscall
-// exit. This means that we can't get a value of this register and we can't
-// change it. More details are in the comment for tracehook_report_syscall in
-// arch/arm64/kernel/ptrace.c.
-//
-// CheckR7 checks that the ptrace platform handles the x7 register properly.
-TEST(SigreturnTest, CheckX7) {
- // Setup signal handler for SIGVTALRM.
- struct sigaction sa = {};
- sigfillset(&sa.sa_mask);
- sa.sa_sigaction = sigvtalrm;
- sa.sa_flags = SA_SIGINFO;
- auto const action_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGVTALRM, sa));
-
- auto const mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGVTALRM));
-
- uint64_t x7 = kOrigX7;
-
- testX7(&x7, __NR_tgkill, getpid(), syscall(__NR_gettid), SIGVTALRM);
-
- // The following check verifies that %x7 was not clobbered
- // when returning from the signal handler (via sigreturn(2)).
- EXPECT_EQ(x7, ~kOrigX7);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sigstop.cc b/test/syscalls/linux/sigstop.cc
deleted file mode 100644
index 8de03b4dc..000000000
--- a/test/syscalls/linux/sigstop.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/select.h>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(bool, sigstop_test_child, false,
- "If true, run the SigstopTest child workload.");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr absl::Duration kChildStartupDelay = absl::Seconds(5);
-constexpr absl::Duration kChildMainThreadDelay = absl::Seconds(10);
-constexpr absl::Duration kChildExtraThreadDelay = absl::Seconds(15);
-constexpr absl::Duration kPostSIGSTOPDelay = absl::Seconds(20);
-
-// Comparisons on absl::Duration aren't yet constexpr (2017-07-14), so we
-// can't just use static_assert.
-TEST(SigstopTest, TimesAreRelativelyConsistent) {
- EXPECT_LT(kChildStartupDelay, kChildMainThreadDelay)
- << "Child process will exit before the parent process attempts to stop "
- "it";
- EXPECT_LT(kChildMainThreadDelay, kChildExtraThreadDelay)
- << "Secondary thread in child process will exit before main thread, "
- "causing it to exit with the wrong code";
- EXPECT_LT(kChildExtraThreadDelay, kPostSIGSTOPDelay)
- << "Parent process stops waiting before child process may exit if "
- "improperly stopped, rendering the test ineffective";
-}
-
-// Exit codes communicated from the child workload to the parent test process.
-constexpr int kChildMainThreadExitCode = 10;
-constexpr int kChildExtraThreadExitCode = 11;
-
-TEST(SigstopTest, Correctness) {
- pid_t child_pid = -1;
- int execve_errno = 0;
- auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(
- ForkAndExec("/proc/self/exe", {"/proc/self/exe", "--sigstop_test_child"},
- {}, nullptr, &child_pid, &execve_errno));
-
- ASSERT_GT(child_pid, 0);
- ASSERT_EQ(execve_errno, 0);
-
- // Wait for the child subprocess to start the second thread before stopping
- // it.
- absl::SleepFor(kChildStartupDelay);
- ASSERT_THAT(kill(child_pid, SIGSTOP), SyscallSucceeds());
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child_pid, &status, WUNTRACED),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFSTOPPED(status));
- EXPECT_EQ(SIGSTOP, WSTOPSIG(status));
-
- // Sleep for longer than either of the sleeps in the child subprocess,
- // expecting the child to stay alive because it's stopped.
- absl::SleepFor(kPostSIGSTOPDelay);
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Resume the child.
- ASSERT_THAT(kill(child_pid, SIGCONT), SyscallSucceeds());
-
- EXPECT_THAT(RetryEINTR(waitpid)(child_pid, &status, WCONTINUED),
- SyscallSucceedsWithValue(child_pid));
- EXPECT_TRUE(WIFCONTINUED(status));
-
- // Expect it to die.
- ASSERT_THAT(RetryEINTR(waitpid)(child_pid, &status, 0), SyscallSucceeds());
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), kChildMainThreadExitCode);
-}
-
-// Like base:SleepFor, but tries to avoid counting time spent stopped due to a
-// stop signal toward the sleep.
-//
-// This is required due to an inconsistency in how nanosleep(2) and stop signals
-// interact on Linux. When nanosleep is interrupted, it writes the remaining
-// time back to its second timespec argument, so that if nanosleep is
-// interrupted by a signal handler then userspace can immediately call nanosleep
-// again with that timespec. However, if nanosleep is automatically restarted
-// (because it's interrupted by a signal that is not delivered to a handler,
-// such as a stop signal), it's restarted based on the timer's former *absolute*
-// expiration time (via ERESTART_RESTARTBLOCK => SYS_restart_syscall =>
-// hrtimer_nanosleep_restart). This means that time spent stopped is effectively
-// counted as time spent sleeping, resulting in less time spent sleeping than
-// expected.
-//
-// Dividing the sleep into multiple smaller sleeps limits the impact of this
-// effect to the length of each sleep during which a stop occurs; for example,
-// if a sleeping process is only stopped once, SleepIgnoreStopped can
-// under-sleep by at most 100ms.
-void SleepIgnoreStopped(absl::Duration d) {
- absl::Duration const max_sleep = absl::Milliseconds(100);
- while (d > absl::ZeroDuration()) {
- absl::Duration to_sleep = std::min(d, max_sleep);
- absl::SleepFor(to_sleep);
- d -= to_sleep;
- }
-}
-
-TEST(SigstopTest, RestartSyscall) {
- pid_t pid;
- constexpr absl::Duration kStopDelay = absl::Seconds(5);
- constexpr absl::Duration kSleepDelay = absl::Seconds(15);
- constexpr absl::Duration kStartupDelay = absl::Seconds(5);
- constexpr absl::Duration kErrorDelay = absl::Seconds(3);
-
- const DisableSave ds; // Timing-related.
-
- pid = fork();
- if (pid == 0) {
- struct timespec ts = {.tv_sec = kSleepDelay / absl::Seconds(1)};
- auto start = absl::Now();
- TEST_CHECK(nanosleep(&ts, nullptr) == 0);
- auto finish = absl::Now();
- // Check that time spent stopped is counted as time spent sleeping.
- TEST_CHECK(finish - start >= kSleepDelay);
- TEST_CHECK(finish - start < kSleepDelay + kErrorDelay);
- _exit(kChildMainThreadExitCode);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- // Wait for the child subprocess to start sleeping before stopping it.
- absl::SleepFor(kStartupDelay);
- ASSERT_THAT(kill(pid, SIGSTOP), SyscallSucceeds());
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(pid, &status, WUNTRACED),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSTOPPED(status));
- EXPECT_EQ(SIGSTOP, WSTOPSIG(status));
-
- // Sleep for shorter than the sleep in the child subprocess.
- absl::SleepFor(kStopDelay);
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, WNOHANG),
- SyscallSucceedsWithValue(0));
-
- // Resume the child.
- ASSERT_THAT(kill(pid, SIGCONT), SyscallSucceeds());
-
- EXPECT_THAT(RetryEINTR(waitpid)(pid, &status, WCONTINUED),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFCONTINUED(status));
-
- // Expect it to die.
- ASSERT_THAT(RetryEINTR(waitpid)(pid, &status, 0), SyscallSucceeds());
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), kChildMainThreadExitCode);
-}
-
-void RunChild() {
- // Start another thread that attempts to call exit_group with a different
- // error code, in order to verify that SIGSTOP stops this thread as well.
- ScopedThread t([] {
- SleepIgnoreStopped(kChildExtraThreadDelay);
- exit(kChildExtraThreadExitCode);
- });
- SleepIgnoreStopped(kChildMainThreadDelay);
- exit(kChildMainThreadExitCode);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_sigstop_test_child)) {
- gvisor::testing::RunChild();
- return 1;
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/sigtimedwait.cc b/test/syscalls/linux/sigtimedwait.cc
deleted file mode 100644
index 21651a697..000000000
--- a/test/syscalls/linux/sigtimedwait.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// N.B. main() blocks SIGALRM and SIGCHLD on all threads.
-
-constexpr int kAlarmSecs = 12;
-
-void NoopHandler(int sig, siginfo_t* info, void* context) {}
-
-TEST(SigtimedwaitTest, InvalidTimeout) {
- sigset_t mask;
- sigemptyset(&mask);
- struct timespec timeout = {0, 1000000001};
- EXPECT_THAT(sigtimedwait(&mask, nullptr, &timeout),
- SyscallFailsWithErrno(EINVAL));
- timeout = {-1, 0};
- EXPECT_THAT(sigtimedwait(&mask, nullptr, &timeout),
- SyscallFailsWithErrno(EINVAL));
- timeout = {0, -1};
- EXPECT_THAT(sigtimedwait(&mask, nullptr, &timeout),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and wait.
-TEST(SigtimedwaitTest, AlarmReturnsAlarm) {
- struct itimerval itv = {};
- itv.it_value.tv_sec = kAlarmSecs;
- const auto itimer_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_REAL, itv));
-
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGALRM);
- siginfo_t info = {};
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, &info, nullptr),
- SyscallSucceedsWithValue(SIGALRM));
- EXPECT_EQ(SIGALRM, info.si_signo);
-}
-
-// No random save as the test relies on alarm timing. Cooperative save tests
-// already cover the save between alarm and wait.
-TEST(SigtimedwaitTest, NullTimeoutReturnsEINTR) {
- struct sigaction sa;
- sa.sa_sigaction = NoopHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- const auto action_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGALRM, sa));
-
- const auto mask_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGALRM));
-
- struct itimerval itv = {};
- itv.it_value.tv_sec = kAlarmSecs;
- const auto itimer_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_REAL, itv));
-
- sigset_t mask;
- sigemptyset(&mask);
- EXPECT_THAT(sigtimedwait(&mask, nullptr, nullptr),
- SyscallFailsWithErrno(EINTR));
-}
-
-TEST(SigtimedwaitTest, LegitTimeoutReturnsEAGAIN) {
- sigset_t mask;
- sigemptyset(&mask);
- struct timespec timeout = {1, 0}; // 1 second
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &timeout),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(SigtimedwaitTest, ZeroTimeoutReturnsEAGAIN) {
- sigset_t mask;
- sigemptyset(&mask);
- struct timespec timeout = {0, 0}; // 0 second
- EXPECT_THAT(sigtimedwait(&mask, nullptr, &timeout),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(SigtimedwaitTest, KillGeneratedSIGCHLD) {
- EXPECT_THAT(kill(getpid(), SIGCHLD), SyscallSucceeds());
-
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGCHLD);
- struct timespec ts = {5, 0};
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &ts),
- SyscallSucceedsWithValue(SIGCHLD));
-}
-
-TEST(SigtimedwaitTest, ChildExitGeneratedSIGCHLD) {
- pid_t pid = fork();
- if (pid == 0) {
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
-
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGCHLD);
- struct timespec ts = {5, 0};
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &ts),
- SyscallSucceedsWithValue(SIGCHLD));
-}
-
-TEST(SigtimedwaitTest, ChildExitGeneratedSIGCHLDWithHandler) {
- // Setup handler for SIGCHLD, but don't unblock it.
- struct sigaction sa;
- sa.sa_sigaction = NoopHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- const auto action_cleanup =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGCHLD, sa));
-
- pid_t pid = fork();
- if (pid == 0) {
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGCHLD);
- struct timespec ts = {5, 0};
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &ts),
- SyscallSucceedsWithValue(SIGCHLD));
-
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
-}
-
-// sigtimedwait cannot catch SIGKILL.
-TEST(SigtimedwaitTest, SIGKILLUncaught) {
- // This is a regression test for sigtimedwait dequeuing SIGKILLs, thus
- // preventing the task from exiting.
- //
- // The explanation below is specific to behavior in gVisor. The Linux behavior
- // here is irrelevant because without a bug that prevents delivery of SIGKILL,
- // none of this behavior is visible (in Linux or gVisor).
- //
- // SIGKILL is rather intrusive. Simply sending the SIGKILL marks
- // ThreadGroup.exitStatus as exiting with SIGKILL, before the SIGKILL is even
- // delivered.
- //
- // As a result, we cannot simply exit the child with a different exit code if
- // it survives and expect to see that code in waitpid because:
- // 1. PrepareGroupExit will override Task.exitStatus with
- // ThreadGroup.exitStatus.
- // 2. waitpid(2) will always return ThreadGroup.exitStatus rather than
- // Task.exitStatus.
- //
- // We could use exit(2) to set Task.exitStatus without override, and a SIGCHLD
- // handler to receive Task.exitStatus in the parent, but with that much
- // test complexity, it is cleaner to simply use a pipe to notify the parent
- // that we survived.
- constexpr auto kSigtimedwaitSetupTime = absl::Seconds(2);
-
- int pipe_fds[2];
- ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());
- FileDescriptor rfd(pipe_fds[0]);
- FileDescriptor wfd(pipe_fds[1]);
-
- pid_t pid = fork();
- if (pid == 0) {
- rfd.reset();
-
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SIGKILL);
- RetryEINTR(sigtimedwait)(&mask, nullptr, nullptr);
-
- // Survived.
- char c = 'a';
- TEST_PCHECK(WriteFd(wfd.get(), &c, 1) == 1);
- _exit(1);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- wfd.reset();
-
- // Wait for child to block in sigtimedwait, then kill it.
- absl::SleepFor(kSigtimedwaitSetupTime);
-
- // Sending SIGKILL will attempt to enqueue the signal twice: once in the
- // normal signal sending path, and once to all Tasks in the ThreadGroup when
- // applying SIGKILL side-effects.
- //
- // If we use kill(2), the former will be on the ThreadGroup signal queue and
- // the latter will be on the Task signal queue. sigtimedwait can only dequeue
- // one signal, so the other would kill the Task, masking bugs.
- //
- // If we use tkill(2), the former will be on the Task signal queue and the
- // latter will be dropped as a duplicate. Then sigtimedwait can theoretically
- // dequeue the single SIGKILL.
- EXPECT_THAT(syscall(SYS_tkill, pid, SIGKILL), SyscallSucceeds());
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(pid, &status, 0),
- SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL) << status;
-
- // Child shouldn't have survived.
- char c;
- EXPECT_THAT(ReadFd(rfd.get(), &c, 1), SyscallSucceedsWithValue(0));
-}
-
-TEST(SigtimedwaitTest, IgnoredUnmaskedSignal) {
- constexpr int kSigno = SIGUSR1;
- constexpr auto kSigtimedwaitSetupTime = absl::Seconds(2);
- constexpr auto kSigtimedwaitTimeout = absl::Seconds(5);
- ASSERT_GT(kSigtimedwaitTimeout, kSigtimedwaitSetupTime);
-
- // Ensure that kSigno is ignored, and unmasked on this thread.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- const auto scoped_sigaction =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa));
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, mask));
-
- // Create a thread which will send us kSigno while we are blocked in
- // sigtimedwait.
- pid_t tid = gettid();
- ScopedThread sigthread([&] {
- absl::SleepFor(kSigtimedwaitSetupTime);
- EXPECT_THAT(tgkill(getpid(), tid, kSigno), SyscallSucceeds());
- });
-
- // sigtimedwait should not observe kSigno since it is ignored and already
- // unmasked, causing it to be dropped before it is enqueued.
- struct timespec timeout_ts = absl::ToTimespec(kSigtimedwaitTimeout);
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &timeout_ts),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(SigtimedwaitTest, IgnoredMaskedSignal) {
- constexpr int kSigno = SIGUSR1;
- constexpr auto kSigtimedwaitSetupTime = absl::Seconds(2);
- constexpr auto kSigtimedwaitTimeout = absl::Seconds(5);
- ASSERT_GT(kSigtimedwaitTimeout, kSigtimedwaitSetupTime);
-
- // Ensure that kSigno is ignored, and masked on this thread.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- const auto scoped_sigaction =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa));
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask));
-
- // Create a thread which will send us kSigno while we are blocked in
- // sigtimedwait.
- pid_t tid = gettid();
- ScopedThread sigthread([&] {
- absl::SleepFor(kSigtimedwaitSetupTime);
- EXPECT_THAT(tgkill(getpid(), tid, kSigno), SyscallSucceeds());
- });
-
- // sigtimedwait should observe kSigno since it is normally masked, causing it
- // to be enqueued despite being ignored.
- struct timespec timeout_ts = absl::ToTimespec(kSigtimedwaitTimeout);
- EXPECT_THAT(RetryEINTR(sigtimedwait)(&mask, nullptr, &timeout_ts),
- SyscallSucceedsWithValue(kSigno));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- // These tests depend on delivering SIGALRM/SIGCHLD to the main thread or in
- // sigtimedwait. Block them so that any other threads created by TestInit will
- // also have them blocked.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, SIGALRM);
- sigaddset(&set, SIGCHLD);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
-
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/socket.cc b/test/syscalls/linux/socket.cc
deleted file mode 100644
index 2742d19be..000000000
--- a/test/syscalls/linux/socket.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_umask.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// From linux/magic.h, but we can't depend on linux headers here.
-#define SOCKFS_MAGIC 0x534F434B
-
-TEST(SocketTest, UnixSocketPairProtocol) {
- int socks[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, PF_UNIX, socks),
- SyscallSucceeds());
- close(socks[0]);
- close(socks[1]);
-}
-
-TEST(SocketTest, ProtocolUnix) {
- struct {
- int domain, type, protocol;
- } tests[] = {
- {AF_UNIX, SOCK_STREAM, PF_UNIX},
- {AF_UNIX, SOCK_SEQPACKET, PF_UNIX},
- {AF_UNIX, SOCK_DGRAM, PF_UNIX},
- };
- for (size_t i = 0; i < ABSL_ARRAYSIZE(tests); i++) {
- ASSERT_NO_ERRNO_AND_VALUE(
- Socket(tests[i].domain, tests[i].type, tests[i].protocol));
- }
-}
-
-TEST(SocketTest, ProtocolInet) {
- struct {
- int domain, type, protocol;
- } tests[] = {
- {AF_INET, SOCK_DGRAM, IPPROTO_UDP},
- {AF_INET, SOCK_STREAM, IPPROTO_TCP},
- };
- for (size_t i = 0; i < ABSL_ARRAYSIZE(tests); i++) {
- ASSERT_NO_ERRNO_AND_VALUE(
- Socket(tests[i].domain, tests[i].type, tests[i].protocol));
- }
-}
-
-TEST(SocketTest, UnixSocketStat) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor bound =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));
-
- // The permissions of the file created with bind(2) should be defined by the
- // permissions of the bound socket and the umask.
- mode_t sock_perm = 0765, mask = 0123;
- ASSERT_THAT(fchmod(bound.get(), sock_perm), SyscallSucceeds());
- TempUmask m(mask);
-
- struct sockaddr_un addr =
- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));
- ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- struct stat statbuf = {};
- ASSERT_THAT(stat(addr.sun_path, &statbuf), SyscallSucceeds());
-
- // Mode should be S_IFSOCK.
- EXPECT_EQ(statbuf.st_mode, S_IFSOCK | (sock_perm & ~mask));
-
- // Timestamps should be equal and non-zero.
- if (!IsRunningWithVFS1()) {
- EXPECT_NE(statbuf.st_atime, 0);
- EXPECT_EQ(statbuf.st_atime, statbuf.st_mtime);
- EXPECT_EQ(statbuf.st_atime, statbuf.st_ctime);
- }
-}
-
-TEST(SocketTest, UnixSocketStatFS) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor bound =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));
-
- struct statfs st;
- EXPECT_THAT(fstatfs(bound.get(), &st), SyscallSucceeds());
- EXPECT_EQ(st.f_type, SOCKFS_MAGIC);
- EXPECT_EQ(st.f_bsize, getpagesize());
- EXPECT_EQ(st.f_namelen, NAME_MAX);
-}
-
-TEST(SocketTest, UnixSCMRightsOnlyPassedOnce) {
- const DisableSave ds;
-
- int sockets[2];
- ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, 0, sockets), SyscallSucceeds());
- // Send more than what will fit inside the send/receive buffers, so that it is
- // split into multiple messages.
- constexpr int kBufSize = 0x100000;
- // Heap allocation is async-signal-unsafe and thus cannot occur between fork()
- // and execve().
- std::vector<char> buf(kBufSize);
-
- pid_t pid = fork();
- if (pid == 0) {
- TEST_PCHECK(close(sockets[0]) == 0);
-
- // Construct a message with some control message.
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int))] = {};
- struct iovec iov = {};
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_len = CMSG_LEN(sizeof(int));
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- ((int*)CMSG_DATA(cmsg))[0] = sockets[1];
-
- iov.iov_base = buf.data();
- iov.iov_len = kBufSize;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- int n = sendmsg(sockets[1], &msg, 0);
- TEST_PCHECK(n == kBufSize);
- TEST_PCHECK(shutdown(sockets[1], SHUT_RDWR) == 0);
- TEST_PCHECK(close(sockets[1]) == 0);
- _exit(0);
- }
-
- close(sockets[1]);
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int))] = {};
- struct iovec iov = {};
- msg.msg_control = &control;
- msg.msg_controllen = sizeof(control);
-
- iov.iov_base = buf.data();
- iov.iov_len = kBufSize;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- // The control message should only be present in the first message received.
- int n;
- ASSERT_THAT(n = recvmsg(sockets[0], &msg, 0), SyscallSucceeds());
- ASSERT_GT(n, 0);
- ASSERT_EQ(msg.msg_controllen, CMSG_SPACE(sizeof(int)));
-
- while (n > 0) {
- ASSERT_THAT(n = recvmsg(sockets[0], &msg, 0), SyscallSucceeds());
- ASSERT_EQ(msg.msg_controllen, 0);
- }
-
- close(sockets[0]);
-
- int status;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0);
-}
-
-using SocketOpenTest = ::testing::TestWithParam<int>;
-
-// UDS cannot be opened.
-TEST_P(SocketOpenTest, Unix) {
- // FIXME(b/142001530): Open incorrectly succeeds on gVisor.
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor bound =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));
-
- struct sockaddr_un addr =
- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));
-
- ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- EXPECT_THAT(open(addr.sun_path, GetParam()), SyscallFailsWithErrno(ENXIO));
-}
-
-INSTANTIATE_TEST_SUITE_P(OpenModes, SocketOpenTest,
- ::testing::Values(O_RDONLY, O_RDWR));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_abstract.cc b/test/syscalls/linux/socket_abstract.cc
deleted file mode 100644
index 00999f192..000000000
--- a/test/syscalls/linux/socket_abstract.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_generic.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/socket_unix.h"
-#include "test/syscalls/linux/socket_unix_cmsg.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AbstractUnixSockets, AllSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- AbstractUnixSockets, UnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- AbstractUnixSockets, UnixSocketPairCmsgTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_bind_to_device.cc b/test/syscalls/linux/socket_bind_to_device.cc
deleted file mode 100644
index 6b27f6eab..000000000
--- a/test/syscalls/linux/socket_bind_to_device.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/if_tun.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-#include <cstring>
-#include <map>
-#include <memory>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_bind_to_device_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using std::string;
-
-// Test fixture for SO_BINDTODEVICE tests.
-class BindToDeviceTest : public ::testing::TestWithParam<SocketKind> {
- protected:
- void SetUp() override {
- printf("Testing case: %s\n", GetParam().description.c_str());
- ASSERT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)))
- << "CAP_NET_RAW is required to use SO_BINDTODEVICE";
-
- interface_name_ = "eth1";
- auto interface_names = GetInterfaceNames();
- if (interface_names.find(interface_name_) == interface_names.end()) {
- // Need a tunnel.
- tunnel_ = ASSERT_NO_ERRNO_AND_VALUE(Tunnel::New());
- interface_name_ = tunnel_->GetName();
- ASSERT_FALSE(interface_name_.empty());
- }
- socket_ = ASSERT_NO_ERRNO_AND_VALUE(GetParam().Create());
- }
-
- string interface_name() const { return interface_name_; }
-
- int socket_fd() const { return socket_->get(); }
-
- private:
- std::unique_ptr<Tunnel> tunnel_;
- string interface_name_;
- std::unique_ptr<FileDescriptor> socket_;
-};
-
-constexpr char kIllegalIfnameChar = '/';
-
-// Tests getsockopt of the default value.
-TEST_P(BindToDeviceTest, GetsockoptDefault) {
- char name_buffer[IFNAMSIZ * 2];
- char original_name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Read the default SO_BINDTODEVICE.
- memset(original_name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- for (size_t i = 0; i <= sizeof(name_buffer); i++) {
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = i;
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, &name_buffer_size),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(name_buffer_size, 0);
- EXPECT_EQ(memcmp(name_buffer, original_name_buffer, sizeof(name_buffer)),
- 0);
- }
-}
-
-// Tests setsockopt of invalid device name.
-TEST_P(BindToDeviceTest, SetsockoptInvalidDeviceName) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Set an invalid device name.
- memset(name_buffer, kIllegalIfnameChar, 5);
- name_buffer_size = 5;
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- name_buffer_size),
- SyscallFailsWithErrno(ENODEV));
-}
-
-// Tests setsockopt of a buffer with a valid device name but not
-// null-terminated, with different sizes of buffer.
-TEST_P(BindToDeviceTest, SetsockoptValidDeviceNameWithoutNullTermination) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- strncpy(name_buffer, interface_name().c_str(), interface_name().size() + 1);
- // Intentionally overwrite the null at the end.
- memset(name_buffer + interface_name().size(), kIllegalIfnameChar,
- sizeof(name_buffer) - interface_name().size());
- for (size_t i = 1; i <= sizeof(name_buffer); i++) {
- name_buffer_size = i;
- SCOPED_TRACE(absl::StrCat("Buffer size: ", i));
- // It should only work if the size provided is exactly right.
- if (name_buffer_size == interface_name().size()) {
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, name_buffer_size),
- SyscallSucceeds());
- } else {
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, name_buffer_size),
- SyscallFailsWithErrno(ENODEV));
- }
- }
-}
-
-// Tests setsockopt of a buffer with a valid device name and null-terminated,
-// with different sizes of buffer.
-TEST_P(BindToDeviceTest, SetsockoptValidDeviceNameWithNullTermination) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- strncpy(name_buffer, interface_name().c_str(), interface_name().size() + 1);
- // Don't overwrite the null at the end.
- memset(name_buffer + interface_name().size() + 1, kIllegalIfnameChar,
- sizeof(name_buffer) - interface_name().size() - 1);
- for (size_t i = 1; i <= sizeof(name_buffer); i++) {
- name_buffer_size = i;
- SCOPED_TRACE(absl::StrCat("Buffer size: ", i));
- // It should only work if the size provided is at least the right size.
- if (name_buffer_size >= interface_name().size()) {
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, name_buffer_size),
- SyscallSucceeds());
- } else {
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, name_buffer_size),
- SyscallFailsWithErrno(ENODEV));
- }
- }
-}
-
-// Tests that setsockopt of an invalid device name doesn't unset the previous
-// valid setsockopt.
-TEST_P(BindToDeviceTest, SetsockoptValidThenInvalid) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Write successfully.
- strncpy(name_buffer, interface_name().c_str(), sizeof(name_buffer));
- ASSERT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- sizeof(name_buffer)),
- SyscallSucceeds());
-
- // Read it back successfully.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, interface_name().size() + 1);
- EXPECT_STREQ(name_buffer, interface_name().c_str());
-
- // Write unsuccessfully.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = 5;
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- sizeof(name_buffer)),
- SyscallFailsWithErrno(ENODEV));
-
- // Read it back successfully, it's unchanged.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, interface_name().size() + 1);
- EXPECT_STREQ(name_buffer, interface_name().c_str());
-}
-
-// Tests that setsockopt of zero-length string correctly unsets the previous
-// value.
-TEST_P(BindToDeviceTest, SetsockoptValidThenClear) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Write successfully.
- strncpy(name_buffer, interface_name().c_str(), sizeof(name_buffer));
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- sizeof(name_buffer)),
- SyscallSucceeds());
-
- // Read it back successfully.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, interface_name().size() + 1);
- EXPECT_STREQ(name_buffer, interface_name().c_str());
-
- // Clear it successfully.
- name_buffer_size = 0;
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- name_buffer_size),
- SyscallSucceeds());
-
- // Read it back successfully, it's cleared.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, 0);
-}
-
-// Tests that setsockopt of empty string correctly unsets the previous
-// value.
-TEST_P(BindToDeviceTest, SetsockoptValidThenClearWithNull) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Write successfully.
- strncpy(name_buffer, interface_name().c_str(), sizeof(name_buffer));
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- sizeof(name_buffer)),
- SyscallSucceeds());
-
- // Read it back successfully.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, interface_name().size() + 1);
- EXPECT_STREQ(name_buffer, interface_name().c_str());
-
- // Clear it successfully.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer[0] = 0;
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- name_buffer_size),
- SyscallSucceeds());
-
- // Read it back successfully, it's cleared.
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = sizeof(name_buffer);
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, 0);
-}
-
-// Tests getsockopt with different buffer sizes.
-TEST_P(BindToDeviceTest, GetsockoptDevice) {
- char name_buffer[IFNAMSIZ * 2];
- socklen_t name_buffer_size;
-
- // Write successfully.
- strncpy(name_buffer, interface_name().c_str(), sizeof(name_buffer));
- ASSERT_THAT(setsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE, name_buffer,
- sizeof(name_buffer)),
- SyscallSucceeds());
-
- // Read it back at various buffer sizes.
- for (size_t i = 0; i <= sizeof(name_buffer); i++) {
- memset(name_buffer, kIllegalIfnameChar, sizeof(name_buffer));
- name_buffer_size = i;
- SCOPED_TRACE(absl::StrCat("Buffer size: ", i));
- // Linux only allows a buffer at least IFNAMSIZ, even if less would suffice
- // for this interface name.
- if (name_buffer_size >= IFNAMSIZ) {
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, &name_buffer_size),
- SyscallSucceeds());
- EXPECT_EQ(name_buffer_size, interface_name().size() + 1);
- EXPECT_STREQ(name_buffer, interface_name().c_str());
- } else {
- EXPECT_THAT(getsockopt(socket_fd(), SOL_SOCKET, SO_BINDTODEVICE,
- name_buffer, &name_buffer_size),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_EQ(name_buffer_size, i);
- }
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(BindToDeviceTest, BindToDeviceTest,
- ::testing::Values(IPv4UDPUnboundSocket(0),
- IPv4TCPUnboundSocket(0)));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_bind_to_device_distribution.cc b/test/syscalls/linux/socket_bind_to_device_distribution.cc
deleted file mode 100644
index 70b0b2742..000000000
--- a/test/syscalls/linux/socket_bind_to_device_distribution.cc
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/if_tun.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <atomic>
-#include <cstdio>
-#include <cstring>
-#include <map>
-#include <memory>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_bind_to_device_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using std::string;
-using std::vector;
-
-struct EndpointConfig {
- std::string bind_to_device;
- double expected_ratio;
-};
-
-struct DistributionTestCase {
- std::string name;
- std::vector<EndpointConfig> endpoints;
-};
-
-struct ListenerConnector {
- TestAddress listener;
- TestAddress connector;
-};
-
-// Test fixture for SO_BINDTODEVICE tests the distribution of packets received
-// with varying SO_BINDTODEVICE settings.
-class BindToDeviceDistributionTest
- : public ::testing::TestWithParam<
- ::testing::tuple<ListenerConnector, DistributionTestCase>> {
- protected:
- void SetUp() override {
- printf("Testing case: %s, listener=%s, connector=%s\n",
- ::testing::get<1>(GetParam()).name.c_str(),
- ::testing::get<0>(GetParam()).listener.description.c_str(),
- ::testing::get<0>(GetParam()).connector.description.c_str());
- ASSERT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)))
- << "CAP_NET_RAW is required to use SO_BINDTODEVICE";
- }
-};
-
-// Binds sockets to different devices and then creates many TCP connections.
-// Checks that the distribution of connections received on the sockets matches
-// the expectation.
-TEST_P(BindToDeviceDistributionTest, Tcp) {
- auto const& [listener_connector, test] = GetParam();
-
- TestAddress const& listener = listener_connector.listener;
- TestAddress const& connector = listener_connector.connector;
- sockaddr_storage listen_addr = listener.addr;
- sockaddr_storage conn_addr = connector.addr;
-
- auto interface_names = GetInterfaceNames();
-
- // Create the listening sockets.
- std::vector<FileDescriptor> listener_fds;
- std::vector<std::unique_ptr<Tunnel>> all_tunnels;
- for (auto const& endpoint : test.endpoints) {
- if (!endpoint.bind_to_device.empty() &&
- interface_names.find(endpoint.bind_to_device) ==
- interface_names.end()) {
- all_tunnels.push_back(
- ASSERT_NO_ERRNO_AND_VALUE(Tunnel::New(endpoint.bind_to_device)));
- interface_names.insert(endpoint.bind_to_device);
- }
-
- listener_fds.push_back(ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP)));
- int fd = listener_fds.back().get();
-
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
- endpoint.bind_to_device.c_str(),
- endpoint.bind_to_device.size() + 1),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(fd, 40), SyscallSucceeds());
-
- // On the first bind we need to determine which port was bound.
- if (listener_fds.size() > 1) {
- continue;
- }
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listener_fds[0].get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- }
-
- constexpr int kConnectAttempts = 10000;
- std::atomic<int> connects_received = ATOMIC_VAR_INIT(0);
- std::vector<int> accept_counts(listener_fds.size(), 0);
- std::vector<std::unique_ptr<ScopedThread>> listen_threads(
- listener_fds.size());
-
- for (size_t i = 0; i < listener_fds.size(); i++) {
- listen_threads[i] = absl::make_unique<ScopedThread>(
- [&listener_fds, &accept_counts, &connects_received, i,
- kConnectAttempts]() {
- do {
- auto fd = Accept(listener_fds[i].get(), nullptr, nullptr);
- if (!fd.ok()) {
- // Another thread has shutdown our read side causing the accept to
- // fail.
- ASSERT_GE(connects_received, kConnectAttempts)
- << "errno = " << fd.error();
- return;
- }
- // Receive some data from a socket to be sure that the connect()
- // system call has been completed on another side.
- // Do a short read and then close the socket to trigger a RST. This
- // ensures that both ends of the connection are cleaned up and no
- // goroutines hang around in TIME-WAIT. We do this so that this test
- // does not timeout under gotsan runs where lots of goroutines can
- // cause the test to use absurd amounts of memory.
- //
- // See: https://tools.ietf.org/html/rfc2525#page-50 section 2.17
- uint16_t data;
- EXPECT_THAT(
- RetryEINTR(recv)(fd.ValueOrDie().get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(sizeof(data)));
- accept_counts[i]++;
- } while (++connects_received < kConnectAttempts);
-
- // Shutdown all sockets to wake up other threads.
- for (auto const& listener_fd : listener_fds) {
- shutdown(listener_fd.get(), SHUT_RDWR);
- }
- });
- }
-
- for (int32_t i = 0; i < kConnectAttempts; i++) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- EXPECT_THAT(RetryEINTR(send)(fd.get(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
- }
-
- // Join threads to be sure that all connections have been counted.
- for (auto const& listen_thread : listen_threads) {
- listen_thread->Join();
- }
- // Check that connections are distributed correctly among listening sockets.
- for (size_t i = 0; i < accept_counts.size(); i++) {
- EXPECT_THAT(
- accept_counts[i],
- EquivalentWithin(static_cast<int>(kConnectAttempts *
- test.endpoints[i].expected_ratio),
- 0.10))
- << "endpoint " << i << " got the wrong number of packets";
- }
-}
-
-// Binds sockets to different devices and then sends many UDP packets. Checks
-// that the distribution of packets received on the sockets matches the
-// expectation.
-TEST_P(BindToDeviceDistributionTest, Udp) {
- auto const& [listener_connector, test] = GetParam();
-
- TestAddress const& listener = listener_connector.listener;
- TestAddress const& connector = listener_connector.connector;
- sockaddr_storage listen_addr = listener.addr;
- sockaddr_storage conn_addr = connector.addr;
-
- auto interface_names = GetInterfaceNames();
-
- // Create the listening socket.
- std::vector<FileDescriptor> listener_fds;
- std::vector<std::unique_ptr<Tunnel>> all_tunnels;
- for (auto const& endpoint : test.endpoints) {
- if (!endpoint.bind_to_device.empty() &&
- interface_names.find(endpoint.bind_to_device) ==
- interface_names.end()) {
- all_tunnels.push_back(
- ASSERT_NO_ERRNO_AND_VALUE(Tunnel::New(endpoint.bind_to_device)));
- interface_names.insert(endpoint.bind_to_device);
- }
-
- listener_fds.push_back(
- ASSERT_NO_ERRNO_AND_VALUE(Socket(listener.family(), SOCK_DGRAM, 0)));
- int fd = listener_fds.back().get();
-
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
- endpoint.bind_to_device.c_str(),
- endpoint.bind_to_device.size() + 1),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
-
- // On the first bind we need to determine which port was bound.
- if (listener_fds.size() > 1) {
- continue;
- }
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listener_fds[0].get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port));
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- }
-
- constexpr int kConnectAttempts = 10000;
- std::atomic<int> packets_received = ATOMIC_VAR_INIT(0);
- std::vector<int> packets_per_socket(listener_fds.size(), 0);
- std::vector<std::unique_ptr<ScopedThread>> receiver_threads(
- listener_fds.size());
-
- for (size_t i = 0; i < listener_fds.size(); i++) {
- receiver_threads[i] = absl::make_unique<ScopedThread>(
- [&listener_fds, &packets_per_socket, &packets_received, i]() {
- do {
- struct sockaddr_storage addr = {};
- socklen_t addrlen = sizeof(addr);
- int data;
-
- auto ret =
- RetryEINTR(recvfrom)(listener_fds[i].get(), &data, sizeof(data),
- 0, AsSockAddr(&addr), &addrlen);
-
- if (packets_received < kConnectAttempts) {
- ASSERT_THAT(ret, SyscallSucceedsWithValue(sizeof(data)));
- }
-
- if (ret != sizeof(data)) {
- // Another thread may have shutdown our read side causing the
- // recvfrom to fail.
- break;
- }
-
- packets_received++;
- packets_per_socket[i]++;
-
- // A response is required to synchronize with the main thread,
- // otherwise the main thread can send more than can fit into receive
- // queues.
- EXPECT_THAT(
- RetryEINTR(sendto)(listener_fds[i].get(), &data, sizeof(data),
- 0, AsSockAddr(&addr), addrlen),
- SyscallSucceedsWithValue(sizeof(data)));
- } while (packets_received < kConnectAttempts);
-
- // Shutdown all sockets to wake up other threads.
- for (auto const& listener_fd : listener_fds) {
- shutdown(listener_fd.get(), SHUT_RDWR);
- }
- });
- }
-
- for (int i = 0; i < kConnectAttempts; i++) {
- FileDescriptor const fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(connector.family(), SOCK_DGRAM, 0));
- EXPECT_THAT(RetryEINTR(sendto)(fd.get(), &i, sizeof(i), 0,
- AsSockAddr(&conn_addr), connector.addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- int data;
- EXPECT_THAT(RetryEINTR(recv)(fd.get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(sizeof(data)));
- }
-
- // Join threads to be sure that all connections have been counted.
- for (auto const& receiver_thread : receiver_threads) {
- receiver_thread->Join();
- }
- // Check that packets are distributed correctly among listening sockets.
- for (size_t i = 0; i < packets_per_socket.size(); i++) {
- EXPECT_THAT(
- packets_per_socket[i],
- EquivalentWithin(static_cast<int>(kConnectAttempts *
- test.endpoints[i].expected_ratio),
- 0.10))
- << "endpoint " << i << " got the wrong number of packets";
- }
-}
-
-std::vector<DistributionTestCase> GetDistributionTestCases() {
- return std::vector<DistributionTestCase>{
- {"Even distribution among sockets not bound to device",
- {{"", 1. / 3}, {"", 1. / 3}, {"", 1. / 3}}},
- {"Sockets bound to other interfaces get no packets",
- {{"eth1", 0}, {"", 1. / 2}, {"", 1. / 2}}},
- {"Bound has priority over unbound", {{"eth1", 0}, {"", 0}, {"lo", 1}}},
- {"Even distribution among sockets bound to device",
- {{"eth1", 0}, {"lo", 1. / 2}, {"lo", 1. / 2}}},
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BindToDeviceTest, BindToDeviceDistributionTest,
- ::testing::Combine(::testing::Values(
- // Listeners bound to IPv4 addresses refuse
- // connections using IPv6 addresses.
- ListenerConnector{V4Any(), V4Loopback()},
- ListenerConnector{V4Loopback(), V4MappedLoopback()}),
- ::testing::ValuesIn(GetDistributionTestCases())));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_bind_to_device_sequence.cc b/test/syscalls/linux/socket_bind_to_device_sequence.cc
deleted file mode 100644
index d3cc71dbf..000000000
--- a/test/syscalls/linux/socket_bind_to_device_sequence.cc
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/capability.h>
-#include <linux/if_tun.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-#include <cstring>
-#include <map>
-#include <memory>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/node_hash_map.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_bind_to_device_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using std::string;
-using std::vector;
-
-// Test fixture for SO_BINDTODEVICE tests the results of sequences of socket
-// binding.
-class BindToDeviceSequenceTest : public ::testing::TestWithParam<SocketKind> {
- protected:
- void SetUp() override {
- printf("Testing case: %s\n", GetParam().description.c_str());
- ASSERT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)))
- << "CAP_NET_RAW is required to use SO_BINDTODEVICE";
- socket_factory_ = GetParam();
-
- interface_names_ = GetInterfaceNames();
- }
-
- PosixErrorOr<std::unique_ptr<FileDescriptor>> NewSocket() const {
- return socket_factory_.Create();
- }
-
- // Gets a device by device_id. If the device_id has been seen before, returns
- // the previously returned device. If not, finds or creates a new device.
- // Returns an empty string on failure.
- void GetDevice(int device_id, string* device_name) {
- auto device = devices_.find(device_id);
- if (device != devices_.end()) {
- *device_name = device->second;
- return;
- }
-
- // Need to pick a new device. Try ethernet first.
- *device_name = absl::StrCat("eth", next_unused_eth_);
- if (interface_names_.find(*device_name) != interface_names_.end()) {
- devices_[device_id] = *device_name;
- next_unused_eth_++;
- return;
- }
-
- // Need to make a new tunnel device. gVisor tests should have enough
- // ethernet devices to never reach here.
- ASSERT_FALSE(IsRunningOnGvisor());
- // Need a tunnel.
- tunnels_.push_back(ASSERT_NO_ERRNO_AND_VALUE(Tunnel::New()));
- devices_[device_id] = tunnels_.back()->GetName();
- *device_name = devices_[device_id];
- }
-
- // Release the socket
- void ReleaseSocket(int socket_id) {
- // Close the socket that was made in a previous action. The socket_id
- // indicates which socket to close based on index into the list of actions.
- sockets_to_close_.erase(socket_id);
- }
-
- // SetDevice changes the bind_to_device option. It does not bind or re-bind.
- void SetDevice(int socket_id, int device_id) {
- auto socket_fd = sockets_to_close_[socket_id]->get();
- string device_name;
- ASSERT_NO_FATAL_FAILURE(GetDevice(device_id, &device_name));
- EXPECT_THAT(setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE,
- device_name.c_str(), device_name.size() + 1),
- SyscallSucceedsWithValue(0));
- }
-
- // Bind a socket with the reuse options and bind_to_device options. Checks
- // that all steps succeed and that the bind command's error matches want.
- // Sets the socket_id to uniquely identify the socket bound if it is not
- // nullptr.
- void BindSocket(bool reuse_port, bool reuse_addr, int device_id = 0,
- int want = 0, int* socket_id = nullptr) {
- next_socket_id_++;
- sockets_to_close_[next_socket_id_] = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket_fd = sockets_to_close_[next_socket_id_]->get();
- if (socket_id != nullptr) {
- *socket_id = next_socket_id_;
- }
-
- // If reuse_port is indicated, do that.
- if (reuse_port) {
- EXPECT_THAT(setsockopt(socket_fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- }
-
- // If reuse_addr is indicated, do that.
- if (reuse_addr) {
- EXPECT_THAT(setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- }
-
- // If the device is non-zero, bind to that device.
- if (device_id != 0) {
- string device_name;
- ASSERT_NO_FATAL_FAILURE(GetDevice(device_id, &device_name));
- EXPECT_THAT(setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE,
- device_name.c_str(), device_name.size() + 1),
- SyscallSucceedsWithValue(0));
- char get_device[100];
- socklen_t get_device_size = 100;
- EXPECT_THAT(getsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, get_device,
- &get_device_size),
- SyscallSucceedsWithValue(0));
- }
-
- struct sockaddr_in addr = {};
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = port_;
- if (want == 0) {
- ASSERT_THAT(
- bind(socket_fd, reinterpret_cast<const struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
- } else {
- ASSERT_THAT(
- bind(socket_fd, reinterpret_cast<const struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallFailsWithErrno(want));
- }
-
- if (port_ == 0) {
- // We don't yet know what port we'll be using so we need to fetch it and
- // remember it for future commands.
- socklen_t addr_size = sizeof(addr);
- ASSERT_THAT(
- getsockname(socket_fd, reinterpret_cast<struct sockaddr*>(&addr),
- &addr_size),
- SyscallSucceeds());
- port_ = addr.sin_port;
- }
- }
-
- private:
- SocketKind socket_factory_;
- // devices maps from the device id in the test case to the name of the device.
- absl::node_hash_map<int, string> devices_;
- // These are the tunnels that were created for the test and will be destroyed
- // by the destructor.
- vector<std::unique_ptr<Tunnel>> tunnels_;
- // A list of all interface names before the test started.
- std::unordered_set<string> interface_names_;
- // The next ethernet device to use when requested a device.
- int next_unused_eth_ = 1;
- // The port for all tests. Originally 0 (any) and later set to the port that
- // all further commands will use.
- in_port_t port_ = 0;
- // sockets_to_close_ is a map from action index to the socket that was
- // created.
- absl::node_hash_map<int,
- std::unique_ptr<gvisor::testing::FileDescriptor>>
- sockets_to_close_;
- int next_socket_id_ = 0;
-};
-
-TEST_P(BindToDeviceSequenceTest, BindTwiceWithDeviceFails) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ false, /* bind_to_device */ 3));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 3, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindToDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ false, /* bind_to_device */ 1));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ false, /* bind_to_device */ 2));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindToDeviceAndThenWithoutDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindWithoutDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindWithDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 456, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 789, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindWithReuse) {
- ASSERT_NO_FATAL_FAILURE(
- BindSocket(/* reusePort */ true, /* reuse_addr */ false));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false,
- /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 0));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindingWithReuseAndDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 456));
- ASSERT_NO_FATAL_FAILURE(
- BindSocket(/* reuse_port */ true, /* reuse_addr */ false));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 789));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 999, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, MixingReuseAndNotReuseByBindingToDevice) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 456, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 789, 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 999, 0));
-}
-
-TEST_P(BindToDeviceSequenceTest, CannotBindTo0AfterMixingReuseAndNotReuse) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 456));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindAndRelease) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 123));
- int to_release;
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, 0, &to_release));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 345, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 789));
- // Release the bind to device 0 and try again.
- ASSERT_NO_FATAL_FAILURE(ReleaseSocket(to_release));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 345));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindTwiceWithReuseOnce) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindWithReuseAddr) {
- ASSERT_NO_FATAL_FAILURE(
- BindSocket(/* reusePort */ false, /* reuse_addr */ true));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 123, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ true, /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ true, /* bind_to_device */ 0));
-}
-
-TEST_P(BindToDeviceSequenceTest,
- CannotBindTo0AfterMixingReuseAddrAndNotReuseAddr) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 123));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 456));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReusePort) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReuseAddr) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReusePort) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ true, /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReuseAddr) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ true, /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindReusePortThenReuseAddrReusePort) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ true, /* reuse_addr */ false, /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ true,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest, BindReuseAddrThenReuseAddr) {
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ true, /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0, EADDRINUSE));
-}
-
-TEST_P(BindToDeviceSequenceTest,
- BindReuseAddrThenReuseAddrReusePortThenReuseAddr) {
- // The behavior described in this test seems like a Linux bug. It doesn't
- // make any sense and it is unlikely that any applications rely on it.
- //
- // Both SO_REUSEADDR and SO_REUSEPORT allow binding multiple UDP sockets to
- // the same address and deliver each packet to exactly one of the bound
- // sockets. If both are enabled, one of the strategies is selected to route
- // packets. The strategy is selected dynamically based on the settings of the
- // currently bound sockets. Usually, the strategy is selected based on the
- // common setting (SO_REUSEADDR or SO_REUSEPORT) amongst the sockets, but for
- // some reason, Linux allows binding sets of sockets with no overlapping
- // settings in some situations. In this case, it is not obvious which strategy
- // would be selected as the configured setting is a contradiction.
- SKIP_IF(IsRunningOnGvisor());
-
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ true, /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ true,
- /* bind_to_device */ 0));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true,
- /* reuse_addr */ false,
- /* bind_to_device */ 0));
-}
-
-// Repro test for gvisor.dev/issue/1217. Not replicated in ports_test.go as this
-// test is different from the others and wouldn't fit well there.
-TEST_P(BindToDeviceSequenceTest, BindAndReleaseDifferentDevice) {
- int to_release;
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 3, 0, &to_release));
- ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false,
- /* reuse_addr */ false,
- /* bind_to_device */ 3, EADDRINUSE));
- // Change the device. Since the socket was already bound, this should have no
- // effect.
- SetDevice(to_release, 2);
- // Release the bind to device 3 and try again.
- ASSERT_NO_FATAL_FAILURE(ReleaseSocket(to_release));
- ASSERT_NO_FATAL_FAILURE(BindSocket(
- /* reuse_port */ false, /* reuse_addr */ false, /* bind_to_device */ 3));
-}
-
-INSTANTIATE_TEST_SUITE_P(BindToDeviceTest, BindToDeviceSequenceTest,
- ::testing::Values(IPv4UDPUnboundSocket(0),
- IPv4TCPUnboundSocket(0)));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_bind_to_device_util.cc b/test/syscalls/linux/socket_bind_to_device_util.cc
deleted file mode 100644
index ce5f63938..000000000
--- a/test/syscalls/linux/socket_bind_to_device_util.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_bind_to_device_util.h"
-
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <linux/if_tun.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include <cstdio>
-#include <cstring>
-#include <map>
-#include <memory>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using std::string;
-
-PosixErrorOr<std::unique_ptr<Tunnel>> Tunnel::New(string tunnel_name) {
- int fd;
- RETURN_ERROR_IF_SYSCALL_FAIL(fd = open("/dev/net/tun", O_RDWR));
-
- // Using `new` to access a non-public constructor.
- auto new_tunnel = absl::WrapUnique(new Tunnel(fd));
-
- ifreq ifr = {};
- ifr.ifr_flags = IFF_TUN;
- strncpy(ifr.ifr_name, tunnel_name.c_str(), sizeof(ifr.ifr_name));
-
- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd, TUNSETIFF, &ifr));
- new_tunnel->name_ = ifr.ifr_name;
- return new_tunnel;
-}
-
-std::unordered_set<string> GetInterfaceNames() {
- std::unordered_set<string> names;
-#ifndef ANDROID
- // Android does not support if_nameindex in r22.
- struct if_nameindex* interfaces = if_nameindex();
- if (interfaces == nullptr) {
- return names;
- }
- for (auto interface = interfaces;
- interface->if_index != 0 || interface->if_name != nullptr; interface++) {
- names.insert(interface->if_name);
- }
- if_freenameindex(interfaces);
-#endif
- return names;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_bind_to_device_util.h b/test/syscalls/linux/socket_bind_to_device_util.h
deleted file mode 100644
index f941ccc86..000000000
--- a/test/syscalls/linux/socket_bind_to_device_util.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_SOCKET_BIND_TO_DEVICE_UTILS_H_
-#define GVISOR_TEST_SYSCALLS_SOCKET_BIND_TO_DEVICE_UTILS_H_
-
-#include <arpa/inet.h>
-#include <linux/if_tun.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include <cstdio>
-#include <cstring>
-#include <map>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "absl/memory/memory.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-class Tunnel {
- public:
- static PosixErrorOr<std::unique_ptr<Tunnel>> New(
- std::string tunnel_name = "");
- const std::string& GetName() const { return name_; }
-
- ~Tunnel() {
- if (fd_ != -1) {
- close(fd_);
- }
- }
-
- private:
- Tunnel(int fd) : fd_(fd) {}
- int fd_ = -1;
- std::string name_;
-};
-
-std::unordered_set<std::string> GetInterfaceNames();
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_SOCKET_BIND_TO_DEVICE_UTILS_H_
diff --git a/test/syscalls/linux/socket_blocking.cc b/test/syscalls/linux/socket_blocking.cc
deleted file mode 100644
index 7e88aa2d9..000000000
--- a/test/syscalls/linux/socket_blocking.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_blocking.h"
-
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(BlockingSocketPairTest, RecvBlocks) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- constexpr auto kDuration = absl::Milliseconds(200);
- auto before = Now(CLOCK_MONOTONIC);
-
- const ScopedThread t([&]() {
- absl::SleepFor(kDuration);
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- });
-
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- auto after = Now(CLOCK_MONOTONIC);
- EXPECT_GE(after - before, kDuration);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_blocking.h b/test/syscalls/linux/socket_blocking.h
deleted file mode 100644
index db26e5ef5..000000000
--- a/test/syscalls/linux/socket_blocking.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_BLOCKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_BLOCKING_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of blocking connected sockets.
-using BlockingSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_BLOCKING_H_
diff --git a/test/syscalls/linux/socket_capability.cc b/test/syscalls/linux/socket_capability.cc
deleted file mode 100644
index f75482aba..000000000
--- a/test/syscalls/linux/socket_capability.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Subset of socket tests that need Linux-specific headers (compared to POSIX
-// headers).
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST(SocketTest, UnixConnectNeedsWritePerm) {
- SKIP_IF(IsRunningWithVFS1());
-
- FileDescriptor bound =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));
-
- struct sockaddr_un addr =
- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));
- ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
- ASSERT_THAT(listen(bound.get(), 1), SyscallSucceeds());
-
- // Drop capabilites that allow us to override permision checks. Otherwise if
- // the test is run as root, the connect below will bypass permission checks
- // and succeed unexpectedly.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- // Connect should fail without write perms.
- ASSERT_THAT(chmod(addr.sun_path, 0500), SyscallSucceeds());
- FileDescriptor client =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));
- ASSERT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallFailsWithErrno(EACCES));
-
- // Connect should succeed with write perms.
- ASSERT_THAT(chmod(addr.sun_path, 0200), SyscallSucceeds());
- EXPECT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_filesystem.cc b/test/syscalls/linux/socket_filesystem.cc
deleted file mode 100644
index 287359363..000000000
--- a/test/syscalls/linux/socket_filesystem.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_generic.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/socket_unix.h"
-#include "test/syscalls/linux/socket_unix_cmsg.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- FilesystemUnixSockets, AllSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- FilesystemUnixSockets, UnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- FilesystemUnixSockets, UnixSocketPairCmsgTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_generic.h b/test/syscalls/linux/socket_generic.h
deleted file mode 100644
index 00ae7bfc3..000000000
--- a/test/syscalls/linux/socket_generic.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_GENERIC_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_GENERIC_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of blocking and non-blocking
-// connected stream sockets.
-using AllSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_GENERIC_H_
diff --git a/test/syscalls/linux/socket_generic_stress.cc b/test/syscalls/linux/socket_generic_stress.cc
deleted file mode 100644
index 778c32a8e..000000000
--- a/test/syscalls/linux/socket_generic_stress.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <poll.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include <array>
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected sockets.
-using ConnectStressTest = SocketPairTest;
-
-TEST_P(ConnectStressTest, Reset) {
- const int nports = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- for (int i = 0; i < nports * 2; i++) {
- const std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Send some data to ensure that the connection gets reset and the port gets
- // released immediately. This avoids either end entering TIME-WAIT.
- char sent_data[100] = {};
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- // Poll the other FD to make sure that the data is in the receive buffer
- // before closing it to ensure a RST is triggered.
- const int kTimeout = 10000;
- struct pollfd pfd = {
- .fd = sockets->second_fd(),
- .events = POLL_IN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- }
-}
-
-// Tests that opening too many connections -- without closing them -- does lead
-// to port exhaustion.
-TEST_P(ConnectStressTest, TooManyOpen) {
- const int nports = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- int err_num = 0;
- std::vector<std::unique_ptr<SocketPair>> sockets =
- std::vector<std::unique_ptr<SocketPair>>(nports);
- for (int i = 0; i < nports * 2; i++) {
- PosixErrorOr<std::unique_ptr<SocketPair>> socks = NewSocketPair();
- if (!socks.ok()) {
- err_num = socks.error().errno_value();
- break;
- }
- sockets.push_back(std::move(socks).ValueOrDie());
- }
- ASSERT_EQ(err_num, EADDRINUSE);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllConnectedSockets, ConnectStressTest,
- ::testing::Values(IPv6UDPBidirectionalBindSocketPair(0),
- IPv4UDPBidirectionalBindSocketPair(0),
- DualStackUDPBidirectionalBindSocketPair(0),
-
- // Without REUSEADDR, we get port exhaustion on Linux.
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn)(IPv6TCPAcceptBindSocketPair(0)),
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn)(IPv4TCPAcceptBindSocketPair(0)),
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR, &kSockOptOn)(
- DualStackTCPAcceptBindSocketPair(0))));
-
-// Test fixture for tests that apply to pairs of connected sockets created with
-// a persistent listener (if applicable).
-class PersistentListenerConnectStressTest : public SocketPairTest {
- protected:
- PersistentListenerConnectStressTest() : slept_{false} {}
-
- // NewSocketSleep is the same as NewSocketPair, but will sleep once (over the
- // lifetime of the fixture) and retry if creation fails due to EADDRNOTAVAIL.
- PosixErrorOr<std::unique_ptr<SocketPair>> NewSocketSleep() {
- // We can't reuse a connection too close in time to its last use, as TCP
- // uses the timestamp difference to disambiguate connections. With a
- // sufficiently small port range, we'll cycle through too quickly, and TCP
- // won't allow for connection reuse. Thus, we sleep the first time
- // encountering EADDRINUSE to allow for that difference (1 second in
- // gVisor).
- PosixErrorOr<std::unique_ptr<SocketPair>> socks = NewSocketPair();
- if (socks.ok()) {
- return socks;
- }
- if (!slept_ && socks.error().errno_value() == EADDRNOTAVAIL) {
- absl::SleepFor(absl::Milliseconds(1500));
- slept_ = true;
- return NewSocketPair();
- }
- return socks;
- }
-
- private:
- bool slept_;
-};
-
-TEST_P(PersistentListenerConnectStressTest, ShutdownCloseFirst) {
- const int nports = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- for (int i = 0; i < nports * 2; i++) {
- std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketSleep());
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds());
- if (GetParam().type == SOCK_STREAM) {
- // Poll the other FD to make sure that we see the FIN from the other
- // side before closing the second_fd. This ensures that the first_fd
- // enters TIME-WAIT and not second_fd.
- const int kTimeout = 10000;
- struct pollfd pfd = {
- .fd = sockets->second_fd(),
- .events = POLL_IN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- }
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RDWR), SyscallSucceeds());
- }
-}
-
-TEST_P(PersistentListenerConnectStressTest, ShutdownCloseSecond) {
- const int nports = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- for (int i = 0; i < nports * 2; i++) {
- const std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RDWR), SyscallSucceeds());
- if (GetParam().type == SOCK_STREAM) {
- // Poll the other FD to make sure that we see the FIN from the other
- // side before closing the first_fd. This ensures that the second_fd
- // enters TIME-WAIT and not first_fd.
- const int kTimeout = 10000;
- struct pollfd pfd = {
- .fd = sockets->first_fd(),
- .events = POLL_IN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- }
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds());
- }
-}
-
-TEST_P(PersistentListenerConnectStressTest, Close) {
- const int nports = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- for (int i = 0; i < nports * 2; i++) {
- std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketSleep());
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllConnectedSockets, PersistentListenerConnectStressTest,
- ::testing::Values(
- IPv6UDPBidirectionalBindSocketPair(0),
- IPv4UDPBidirectionalBindSocketPair(0),
- DualStackUDPBidirectionalBindSocketPair(0),
-
- // Without REUSEADDR, we get port exhaustion on Linux.
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR, &kSockOptOn)(
- IPv6TCPAcceptBindPersistentListenerSocketPair(0)),
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR, &kSockOptOn)(
- IPv4TCPAcceptBindPersistentListenerSocketPair(0)),
- SetSockOpt(SOL_SOCKET, SO_REUSEADDR, &kSockOptOn)(
- DualStackTCPAcceptBindPersistentListenerSocketPair(0))));
-
-using DataTransferStressTest = SocketPairTest;
-
-TEST_P(DataTransferStressTest, BigDataTransfer) {
- // TODO(b/165912341): These are too slow on KVM platform with nested virt.
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
-
- const std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int client_fd = sockets->first_fd();
- int server_fd = sockets->second_fd();
-
- ScopedThread echo([server_fd]() {
- std::array<uint8_t, 1024> buf;
- for (;;) {
- ssize_t r = read(server_fd, buf.data(), buf.size());
- ASSERT_THAT(r, SyscallSucceeds());
- if (r == 0) {
- break;
- }
- for (size_t i = 0; i < r;) {
- ssize_t w = write(server_fd, buf.data() + i, r - i);
- ASSERT_GE(w, 0);
- i += w;
- }
- }
- ASSERT_THAT(shutdown(server_fd, SHUT_WR), SyscallSucceeds());
- });
-
- const std::string chunk = "Though this upload be but little, it is fierce.";
- std::string big_string;
- while (big_string.size() < 31 << 20) {
- big_string += chunk;
- }
- absl::string_view data = big_string;
-
- ScopedThread writer([client_fd, data]() {
- absl::string_view view = data;
- while (!view.empty()) {
- ssize_t n = write(client_fd, view.data(), view.size());
- ASSERT_GE(n, 0);
- view = view.substr(n);
- }
- ASSERT_THAT(shutdown(client_fd, SHUT_WR), SyscallSucceeds());
- });
-
- std::string buf;
- buf.resize(1 << 20);
- while (!data.empty()) {
- ssize_t n = read(client_fd, buf.data(), buf.size());
- ASSERT_GE(n, 0);
- for (size_t i = 0; i < n; i += chunk.size()) {
- size_t c = std::min(chunk.size(), n - i);
- ASSERT_EQ(buf.substr(i, c), data.substr(i, c)) << "offset " << i;
- }
- data = data.substr(n);
- }
- // Should read EOF now.
- ASSERT_THAT(read(client_fd, buf.data(), buf.size()),
- SyscallSucceedsWithValue(0));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllConnectedSockets, DataTransferStressTest,
- ::testing::Values(IPv6TCPAcceptBindPersistentListenerSocketPair(0),
- IPv4TCPAcceptBindPersistentListenerSocketPair(0),
- DualStackTCPAcceptBindPersistentListenerSocketPair(0)));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_generic_test_cases.cc b/test/syscalls/linux/socket_generic_test_cases.cc
deleted file mode 100644
index fe5171bc8..000000000
--- a/test/syscalls/linux/socket_generic_test_cases.cc
+++ /dev/null
@@ -1,947 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_generic.h"
-
-#ifdef __linux__
-#include <linux/capability.h>
-#endif // __linux__
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-
-// This file is a generic socket test file. It must be built with another file
-// that provides the test types.
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(AllSocketPairTest, BasicReadWrite) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[20];
- const std::string data = "abc";
- ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), 3),
- SyscallSucceedsWithValue(3));
- ASSERT_THAT(ReadFd(sockets->second_fd(), buf, 3),
- SyscallSucceedsWithValue(3));
- EXPECT_EQ(data, absl::string_view(buf, 3));
-}
-
-TEST_P(AllSocketPairTest, BasicReadWriteBadBuffer) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- const std::string data = "abc";
- ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), 3),
- SyscallSucceedsWithValue(3));
- ASSERT_THAT(ReadFd(sockets->second_fd(), nullptr, 3),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_P(AllSocketPairTest, BasicSendRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(AllSocketPairTest, BasicSendmmsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[200];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- std::vector<struct mmsghdr> msgs(10);
- std::vector<struct iovec> iovs(msgs.size());
- const int chunk_size = sizeof(sent_data) / msgs.size();
- for (size_t i = 0; i < msgs.size(); i++) {
- iovs[i].iov_len = chunk_size;
- iovs[i].iov_base = &sent_data[i * chunk_size];
- msgs[i].msg_hdr.msg_iov = &iovs[i];
- msgs[i].msg_hdr.msg_iovlen = 1;
- }
-
- ASSERT_THAT(
- RetryEINTR(sendmmsg)(sockets->first_fd(), &msgs[0], msgs.size(), 0),
- SyscallSucceedsWithValue(msgs.size()));
-
- for (const struct mmsghdr& msg : msgs) {
- EXPECT_EQ(chunk_size, msg.msg_len);
- }
-
- char received_data[sizeof(sent_data)];
- for (size_t i = 0; i < msgs.size(); i++) {
- ASSERT_THAT(ReadFd(sockets->second_fd(), &received_data[i * chunk_size],
- chunk_size),
- SyscallSucceedsWithValue(chunk_size));
- }
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(AllSocketPairTest, SendmmsgIsLimitedByMAXIOV) {
- std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char c = 0;
-
- std::vector<struct mmsghdr> msgs(UIO_MAXIOV + 1);
- std::vector<struct iovec> iovs(msgs.size());
- for (size_t i = 0; i < msgs.size(); i++) {
- iovs[i].iov_len = 1;
- iovs[i].iov_base = &c;
- msgs[i].msg_hdr.msg_iov = &iovs[i];
- msgs[i].msg_hdr.msg_iovlen = 1;
- }
-
- int n;
- ASSERT_THAT(n = RetryEINTR(sendmmsg)(sockets->first_fd(), msgs.data(),
- msgs.size(), MSG_DONTWAIT),
- SyscallSucceeds());
- EXPECT_LE(n, UIO_MAXIOV);
-}
-
-TEST_P(AllSocketPairTest, BasicRecvmmsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[200];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- char received_data[sizeof(sent_data)];
- std::vector<struct mmsghdr> msgs(10);
- std::vector<struct iovec> iovs(msgs.size());
- const int chunk_size = sizeof(sent_data) / msgs.size();
- for (size_t i = 0; i < msgs.size(); i++) {
- iovs[i].iov_len = chunk_size;
- iovs[i].iov_base = &received_data[i * chunk_size];
- msgs[i].msg_hdr.msg_iov = &iovs[i];
- msgs[i].msg_hdr.msg_iovlen = 1;
- }
-
- for (size_t i = 0; i < msgs.size(); i++) {
- ASSERT_THAT(
- WriteFd(sockets->first_fd(), &sent_data[i * chunk_size], chunk_size),
- SyscallSucceedsWithValue(chunk_size));
- }
-
- ASSERT_THAT(RetryEINTR(recvmmsg)(sockets->second_fd(), &msgs[0], msgs.size(),
- 0, nullptr),
- SyscallSucceedsWithValue(msgs.size()));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- for (const struct mmsghdr& msg : msgs) {
- EXPECT_EQ(chunk_size, msg.msg_len);
- }
-}
-
-TEST_P(AllSocketPairTest, SendmsgRecvmsg10KB) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- std::vector<char> sent_data(10 * 1024);
- RandomizeBuffer(sent_data.data(), sent_data.size());
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data.data(), sent_data.size()));
-
- std::vector<char> received_data(sent_data.size());
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sockets->second_fd(), received_data.data(),
- received_data.size()));
-
- EXPECT_EQ(0,
- memcmp(sent_data.data(), received_data.data(), sent_data.size()));
-}
-
-// This test validates that a sendmsg/recvmsg w/ MSG_CTRUNC is a no-op on
-// input flags.
-TEST_P(AllSocketPairTest, SendmsgRecvmsgMsgCtruncNoop) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- std::vector<char> sent_data(10 * 1024);
- RandomizeBuffer(sent_data.data(), sent_data.size());
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data.data(), sent_data.size()));
-
- std::vector<char> received_data(sent_data.size());
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct ucred))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- iov.iov_base = &received_data[0];
- iov.iov_len = received_data.size();
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- // MSG_CTRUNC should be a no-op.
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CTRUNC),
- SyscallSucceedsWithValue(received_data.size()));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_EQ(cmsg, nullptr);
- EXPECT_EQ(msg.msg_controllen, 0);
- EXPECT_EQ(0,
- memcmp(sent_data.data(), received_data.data(), sent_data.size()));
-}
-
-TEST_P(AllSocketPairTest, SendmsgRecvmsg16KB) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- std::vector<char> sent_data(16 * 1024);
- RandomizeBuffer(sent_data.data(), sent_data.size());
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data.data(), sent_data.size()));
-
- std::vector<char> received_data(sent_data.size());
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sockets->second_fd(), received_data.data(),
- received_data.size()));
-
- EXPECT_EQ(0,
- memcmp(sent_data.data(), received_data.data(), sent_data.size()));
-}
-
-TEST_P(AllSocketPairTest, RecvmsgMsghdrFlagsNotClearedOnFailure) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char received_data[10] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-
- // Check that msghdr flags were not changed.
- EXPECT_EQ(msg.msg_flags, -1);
-}
-
-TEST_P(AllSocketPairTest, RecvmsgMsghdrFlagsCleared) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data)] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(sent_data)));
-
- // Check that msghdr flags were cleared.
- EXPECT_EQ(msg.msg_flags, 0);
-}
-
-TEST_P(AllSocketPairTest, RecvmsgPeekMsghdrFlagsCleared) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data)] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(sent_data)));
-
- // Check that msghdr flags were cleared.
- EXPECT_EQ(msg.msg_flags, 0);
-}
-
-TEST_P(AllSocketPairTest, RecvmsgIovNotUpdated) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) * 2] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(sent_data)));
-
- // Check that the iovec length was not updated.
- EXPECT_EQ(msg.msg_iov->iov_len, sizeof(received_data));
-}
-
-TEST_P(AllSocketPairTest, RecvmmsgInvalidTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[10];
- struct mmsghdr msg = {};
- struct iovec iov = {};
- iov.iov_len = sizeof(buf);
- iov.iov_base = buf;
- msg.msg_hdr.msg_iov = &iov;
- msg.msg_hdr.msg_iovlen = 1;
- struct timespec timeout = {-1, -1};
- ASSERT_THAT(RetryEINTR(recvmmsg)(sockets->first_fd(), &msg, 1, 0, &timeout),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(AllSocketPairTest, RecvmmsgTimeoutBeforeRecv) {
- // There is a known bug in the Linux recvmmsg(2) causing it to block forever
- // if the timeout expires while blocking for the first message.
- SKIP_IF(!IsRunningOnGvisor());
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[10];
- struct mmsghdr msg = {};
- struct iovec iov = {};
- iov.iov_len = sizeof(buf);
- iov.iov_base = buf;
- msg.msg_hdr.msg_iov = &iov;
- msg.msg_hdr.msg_iovlen = 1;
- struct timespec timeout = {};
- ASSERT_THAT(RetryEINTR(recvmmsg)(sockets->first_fd(), &msg, 1, 0, &timeout),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, MsgPeek) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[50];
- memset(&sent_data, 0, sizeof(sent_data));
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data)];
- for (int i = 0; i < 3; i++) {
- memset(received_data, 0, sizeof(received_data));
- EXPECT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
- }
-
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
-}
-
-TEST_P(AllSocketPairTest, LingerSocketOption) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- struct linger got_linger = {-1, -1};
- socklen_t length = sizeof(struct linger);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &length),
- SyscallSucceedsWithValue(0));
- struct linger want_linger = {};
- EXPECT_EQ(0, memcmp(&want_linger, &got_linger, sizeof(struct linger)));
- EXPECT_EQ(sizeof(struct linger), length);
-}
-
-TEST_P(AllSocketPairTest, KeepAliveSocketOption) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int keepalive = -1;
- socklen_t length = sizeof(int);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE,
- &keepalive, &length),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, keepalive);
- EXPECT_EQ(sizeof(int), length);
-}
-
-TEST_P(AllSocketPairTest, RcvBufSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int size = 0;
- socklen_t size_size = sizeof(size);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &size, &size_size),
- SyscallSucceeds());
- EXPECT_GT(size, 0);
-}
-
-#ifdef __linux__
-
-// Check that setting SO_RCVBUFFORCE above max is not clamped to the maximum
-// receive buffer size.
-TEST_P(AllSocketPairTest, SetSocketRecvBufForceAboveMax) {
- std::unique_ptr<SocketPair> sockets =
- ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- int sso = setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUFFORCE,
- &above_max, sizeof(above_max));
- if (!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN))) {
- ASSERT_THAT(sso, SyscallFailsWithErrno(EPERM));
- return;
- }
- ASSERT_THAT(sso, SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
- // The system doubles the passed-in maximum.
- ASSERT_EQ(above_max * 2, val);
-}
-
-#endif // __linux__
-
-TEST_P(AllSocketPairTest, GetSndBufSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int size = 0;
- socklen_t size_size = sizeof(size);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &size, &size_size),
- SyscallSucceeds());
- EXPECT_GT(size, 0);
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutReadSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- EXPECT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutRecvSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- EXPECT_THAT(RetryEINTR(recv)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutRecvOneSecondSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 1, .tv_usec = 0
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- EXPECT_THAT(RetryEINTR(recv)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutRecvmsgSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- struct msghdr msg = {};
- char buf[20] = {};
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- EXPECT_THAT(RetryEINTR(recvmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- timeval actual_tv = {.tv_sec = -1, .tv_usec = -1};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv_sec, 0);
- EXPECT_EQ(actual_tv.tv_usec, 0);
-}
-
-TEST_P(AllSocketPairTest, SetGetSendTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // tv_usec should be a multiple of 4000 to work on most systems.
- timeval tv = {.tv_sec = 89, .tv_usec = 44000};
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- timeval actual_tv = {};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv_sec, tv.tv_sec);
- EXPECT_EQ(actual_tv.tv_usec, tv.tv_usec);
-}
-
-TEST_P(AllSocketPairTest, SetGetSendTimeoutLargerArg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval_with_extra {
- struct timeval tv;
- int64_t extra_data;
- } ABSL_ATTRIBUTE_PACKED;
-
- // tv_usec should be a multiple of 4000 to work on most systems.
- timeval_with_extra tv_extra = {
- .tv = {.tv_sec = 0, .tv_usec = 124000},
- };
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO,
- &tv_extra, sizeof(tv_extra)),
- SyscallSucceeds());
-
- timeval_with_extra actual_tv = {};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv.tv_sec, tv_extra.tv.tv_sec);
- EXPECT_EQ(actual_tv.tv.tv_usec, tv_extra.tv.tv_usec);
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutAllowsWrite) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutAllowsSend) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutAllowsSendmsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- ASSERT_NO_FATAL_FAILURE(SendNullCmsg(sockets->first_fd(), buf, sizeof(buf)));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- timeval actual_tv = {.tv_sec = -1, .tv_usec = -1};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv_sec, 0);
- EXPECT_EQ(actual_tv.tv_usec, 0);
-}
-
-TEST_P(AllSocketPairTest, SetGetRecvTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- timeval tv = {.tv_sec = 123, .tv_usec = 456000};
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- timeval actual_tv = {};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv_sec, 123);
- EXPECT_EQ(actual_tv.tv_usec, 456000);
-}
-
-TEST_P(AllSocketPairTest, SetGetRecvTimeoutLargerArg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval_with_extra {
- struct timeval tv;
- int64_t extra_data;
- } ABSL_ATTRIBUTE_PACKED;
-
- timeval_with_extra tv_extra = {
- .tv = {.tv_sec = 0, .tv_usec = 432000},
- };
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO,
- &tv_extra, sizeof(tv_extra)),
- SyscallSucceeds());
-
- timeval_with_extra actual_tv = {};
- socklen_t len = sizeof(actual_tv);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO,
- &actual_tv, &len),
- SyscallSucceeds());
- EXPECT_EQ(actual_tv.tv.tv_sec, 0);
- EXPECT_EQ(actual_tv.tv.tv_usec, 432000);
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutRecvmsgOneSecondSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 1, .tv_usec = 0
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- struct msghdr msg = {};
- char buf[20] = {};
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- EXPECT_THAT(RetryEINTR(recvmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutUsecTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 2000000 // 2 seconds.
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallFailsWithErrno(EDOM));
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutUsecTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 2000000 // 2 seconds.
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallFailsWithErrno(EDOM));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutUsecNeg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = -1
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallFailsWithErrno(EDOM));
-}
-
-TEST_P(AllSocketPairTest, SendTimeoutUsecNeg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = -1
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallFailsWithErrno(EDOM));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutNegSecRead) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = -1, .tv_usec = 0
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- EXPECT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutNegSecRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = -1, .tv_usec = 0
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- char buf[20] = {};
- EXPECT_THAT(RetryEINTR(recv)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutNegSecRecvmsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = -1, .tv_usec = 0
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- struct msghdr msg = {};
- char buf[20] = {};
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- EXPECT_THAT(RetryEINTR(recvmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvWaitAll) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_WAITALL),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(AllSocketPairTest, RecvWaitAllDontWait) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char data[100] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), data, sizeof(data),
- MSG_WAITALL | MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(AllSocketPairTest, RecvTimeoutWaitAll) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 200000 // 200ms
- };
- EXPECT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv,
- sizeof(tv)),
- SyscallSucceeds());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) * 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_WAITALL),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(AllSocketPairTest, GetSockoptType) {
- int type = GetParam().type;
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {
- int opt;
- socklen_t optlen = sizeof(opt);
- EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_TYPE, &opt, &optlen),
- SyscallSucceeds());
-
- // Type may have SOCK_NONBLOCK and SOCK_CLOEXEC ORed into it. Remove these
- // before comparison.
- type &= ~(SOCK_NONBLOCK | SOCK_CLOEXEC);
- EXPECT_EQ(opt, type) << absl::StrFormat(
- "getsockopt(%d, SOL_SOCKET, SO_TYPE, &opt, &optlen) => opt=%d was "
- "unexpected",
- fd, opt);
- }
-}
-
-TEST_P(AllSocketPairTest, GetSockoptDomain) {
- const int domain = GetParam().domain;
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {
- int opt;
- socklen_t optlen = sizeof(opt);
- EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &opt, &optlen),
- SyscallSucceeds());
- EXPECT_EQ(opt, domain) << absl::StrFormat(
- "getsockopt(%d, SOL_SOCKET, SO_DOMAIN, &opt, &optlen) => opt=%d was "
- "unexpected",
- fd, opt);
- }
-}
-
-TEST_P(AllSocketPairTest, GetSockoptProtocol) {
- const int protocol = GetParam().protocol;
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {
- int opt;
- socklen_t optlen = sizeof(opt);
- EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_PROTOCOL, &opt, &optlen),
- SyscallSucceeds());
- EXPECT_EQ(opt, protocol) << absl::StrFormat(
- "getsockopt(%d, SOL_SOCKET, SO_PROTOCOL, &opt, &optlen) => opt=%d was "
- "unexpected",
- fd, opt);
- }
-}
-
-TEST_P(AllSocketPairTest, SetAndGetBooleanSocketOptions) {
- int sock_opts[] = {SO_BROADCAST, SO_PASSCRED, SO_NO_CHECK,
- SO_REUSEADDR, SO_REUSEPORT, SO_KEEPALIVE};
- for (int sock_opt : sock_opts) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int enable = -1;
- socklen_t enableLen = sizeof(enable);
-
- // Test that the option is initially set to false.
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, sock_opt, &enable,
- &enableLen),
- SyscallSucceeds());
- ASSERT_EQ(enableLen, sizeof(enable));
- EXPECT_EQ(enable, 0) << absl::StrFormat(
- "getsockopt(fd, SOL_SOCKET, %d, &enable, &enableLen) => enable=%d",
- sock_opt, enable);
-
- // Test that setting the option to true is reflected in the subsequent
- // call to getsockopt(2).
- enable = 1;
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, sock_opt, &enable,
- sizeof(enable)),
- SyscallSucceeds());
- enable = -1;
- enableLen = sizeof(enable);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, sock_opt, &enable,
- &enableLen),
- SyscallSucceeds());
- ASSERT_EQ(enableLen, sizeof(enable));
- EXPECT_EQ(enable, 1) << absl::StrFormat(
- "getsockopt(fd, SOL_SOCKET, %d, &enable, &enableLen) => enable=%d",
- sock_opt, enable);
- }
-}
-
-TEST_P(AllSocketPairTest, GetSocketOutOfBandInlineOption) {
- // We do not support disabling this option. It is always enabled.
- SKIP_IF(!IsRunningOnGvisor());
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int enable = -1;
- socklen_t enableLen = sizeof(enable);
-
- int want = 1;
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_OOBINLINE, &enable,
- &enableLen),
- SyscallSucceeds());
- ASSERT_EQ(enableLen, sizeof(enable));
- EXPECT_EQ(enable, want);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc
deleted file mode 100644
index 9ae0cc59d..000000000
--- a/test/syscalls/linux/socket_inet_loopback.cc
+++ /dev/null
@@ -1,2534 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <string.h>
-#include <sys/socket.h>
-
-#include <atomic>
-#include <iostream>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "absl/strings/str_cat.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_inet_loopback_test_params.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::Gt;
-
-using SocketInetLoopbackTest = ::testing::TestWithParam<SocketInetTestParam>;
-
-TEST(BadSocketPairArgs, ValidateErrForBadCallsToSocketPair) {
- int fd[2] = {};
-
- // Valid AF but invalid for socketpair(2) return ESOCKTNOSUPPORT.
- ASSERT_THAT(socketpair(AF_INET, 0, 0, fd),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
- ASSERT_THAT(socketpair(AF_INET6, 0, 0, fd),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
-
- // Invalid AF will fail.
- ASSERT_THAT(socketpair(AF_MAX, 0, 0, fd), SyscallFails());
- ASSERT_THAT(socketpair(8675309, 0, 0, fd), SyscallFails());
-}
-
-enum class Operation {
- Bind,
- Connect,
- SendTo,
-};
-
-std::string OperationToString(Operation operation) {
- switch (operation) {
- case Operation::Bind:
- return "Bind";
- case Operation::Connect:
- return "Connect";
- // Operation::SendTo is the default.
- default:
- return "SendTo";
- }
-}
-
-using OperationSequence = std::vector<Operation>;
-
-using DualStackSocketTest =
- ::testing::TestWithParam<std::tuple<TestAddress, OperationSequence>>;
-
-TEST_P(DualStackSocketTest, AddressOperations) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_DGRAM, 0));
-
- const TestAddress& addr = std::get<0>(GetParam());
- const OperationSequence& operations = std::get<1>(GetParam());
-
- auto addr_in = reinterpret_cast<const sockaddr*>(&addr.addr);
-
- // sockets may only be bound once. Both `connect` and `sendto` cause a socket
- // to be bound.
- bool bound = false;
- for (const Operation& operation : operations) {
- bool sockname = false;
- bool peername = false;
- switch (operation) {
- case Operation::Bind: {
- ASSERT_NO_ERRNO(SetAddrPort(
- addr.family(), const_cast<sockaddr_storage*>(&addr.addr), 0));
-
- int bind_ret = bind(fd.get(), addr_in, addr.addr_len);
-
- // Dual stack sockets may only be bound to AF_INET6.
- if (!bound && addr.family() == AF_INET6) {
- EXPECT_THAT(bind_ret, SyscallSucceeds());
- bound = true;
-
- sockname = true;
- } else {
- EXPECT_THAT(bind_ret, SyscallFailsWithErrno(EINVAL));
- }
- break;
- }
- case Operation::Connect: {
- ASSERT_NO_ERRNO(SetAddrPort(
- addr.family(), const_cast<sockaddr_storage*>(&addr.addr), 1337));
-
- EXPECT_THAT(RetryEINTR(connect)(fd.get(), addr_in, addr.addr_len),
- SyscallSucceeds())
- << GetAddrStr(addr_in);
- bound = true;
-
- sockname = true;
- peername = true;
-
- break;
- }
- case Operation::SendTo: {
- const char payload[] = "hello";
- ASSERT_NO_ERRNO(SetAddrPort(
- addr.family(), const_cast<sockaddr_storage*>(&addr.addr), 1337));
-
- ssize_t sendto_ret = sendto(fd.get(), &payload, sizeof(payload), 0,
- addr_in, addr.addr_len);
-
- EXPECT_THAT(sendto_ret, SyscallSucceedsWithValue(sizeof(payload)));
- sockname = !bound;
- bound = true;
- break;
- }
- }
-
- if (sockname) {
- sockaddr_storage sock_addr;
- socklen_t addrlen = sizeof(sock_addr);
- ASSERT_THAT(getsockname(fd.get(), AsSockAddr(&sock_addr), &addrlen),
- SyscallSucceeds());
- ASSERT_EQ(addrlen, sizeof(struct sockaddr_in6));
-
- auto sock_addr_in6 = reinterpret_cast<const sockaddr_in6*>(&sock_addr);
-
- if (operation == Operation::SendTo) {
- EXPECT_EQ(sock_addr_in6->sin6_family, AF_INET6);
- EXPECT_TRUE(IN6_IS_ADDR_UNSPECIFIED(sock_addr_in6->sin6_addr.s6_addr32))
- << OperationToString(operation)
- << " getsocknam=" << GetAddrStr(AsSockAddr(&sock_addr));
-
- EXPECT_NE(sock_addr_in6->sin6_port, 0);
- } else if (IN6_IS_ADDR_V4MAPPED(
- reinterpret_cast<const sockaddr_in6*>(addr_in)
- ->sin6_addr.s6_addr32)) {
- EXPECT_TRUE(IN6_IS_ADDR_V4MAPPED(sock_addr_in6->sin6_addr.s6_addr32))
- << OperationToString(operation)
- << " getsocknam=" << GetAddrStr(AsSockAddr(&sock_addr));
- }
- }
-
- if (peername) {
- sockaddr_storage peer_addr;
- socklen_t addrlen = sizeof(peer_addr);
- ASSERT_THAT(getpeername(fd.get(), AsSockAddr(&peer_addr), &addrlen),
- SyscallSucceeds());
- ASSERT_EQ(addrlen, sizeof(struct sockaddr_in6));
-
- if (addr.family() == AF_INET ||
- IN6_IS_ADDR_V4MAPPED(reinterpret_cast<const sockaddr_in6*>(addr_in)
- ->sin6_addr.s6_addr32)) {
- EXPECT_TRUE(IN6_IS_ADDR_V4MAPPED(
- reinterpret_cast<const sockaddr_in6*>(&peer_addr)
- ->sin6_addr.s6_addr32))
- << OperationToString(operation)
- << " getpeername=" << GetAddrStr(AsSockAddr(&peer_addr));
- }
- }
- }
-}
-
-// TODO(gvisor.dev/issue/1556): uncomment V4MappedAny.
-INSTANTIATE_TEST_SUITE_P(
- All, DualStackSocketTest,
- ::testing::Combine(
- ::testing::Values(V4Any(), V4Loopback(), /*V4MappedAny(),*/
- V4MappedLoopback(), V6Any(), V6Loopback()),
- ::testing::ValuesIn<OperationSequence>(
- {{Operation::Bind, Operation::Connect, Operation::SendTo},
- {Operation::Bind, Operation::SendTo, Operation::Connect},
- {Operation::Connect, Operation::Bind, Operation::SendTo},
- {Operation::Connect, Operation::SendTo, Operation::Bind},
- {Operation::SendTo, Operation::Bind, Operation::Connect},
- {Operation::SendTo, Operation::Connect, Operation::Bind}})),
- [](::testing::TestParamInfo<
- std::tuple<TestAddress, OperationSequence>> const& info) {
- const TestAddress& addr = std::get<0>(info.param);
- const OperationSequence& operations = std::get<1>(info.param);
- std::string s = addr.description;
- for (const Operation& operation : operations) {
- absl::StrAppend(&s, OperationToString(operation));
- }
- return s;
- });
-
-void tcpSimpleConnectTest(TestAddress const& listener,
- TestAddress const& connector, bool unbound) {
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- if (!unbound) {
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- }
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- //
- // We have to assign a name to the accepted socket, as unamed temporary
- // objects are destructed upon full evaluation of the expression it is in,
- // potentially causing the connecting socket to fail to shutdown properly.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- ASSERT_THAT(shutdown(listen_fd.get(), SHUT_RDWR), SyscallSucceeds());
-
- ASSERT_THAT(shutdown(conn_fd.get(), SHUT_RDWR), SyscallSucceeds());
-}
-
-TEST_P(SocketInetLoopbackTest, TCP) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- tcpSimpleConnectTest(listener, connector, true);
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenUnbound) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- tcpSimpleConnectTest(listener, connector, false);
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenShutdownListen) {
- SocketInetTestParam const& param = GetParam();
-
- const TestAddress& listener = param.listener;
- const TestAddress& connector = param.connector;
-
- constexpr int kBacklog = 5;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
- ASSERT_THAT(shutdown(listen_fd.get(), SHUT_RD), SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- const uint16_t port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // TODO(b/157236388): Remove Disable save after bug is fixed. S/R test can
- // fail because the last socket may not be delivered to the accept queue
- // by the time connect returns.
- DisableSave ds;
- for (int i = 0; i < kBacklog; i++) {
- auto client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(client.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
- }
- for (int i = 0; i < kBacklog; i++) {
- ASSERT_THAT(accept(listen_fd.get(), nullptr, nullptr), SyscallSucceeds());
- }
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenShutdown) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- constexpr int kBacklog = 2;
- // See the comment in TCPBacklog for why this isn't kBacklog + 1.
- constexpr int kFDs = kBacklog;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // Shutdown the write of the listener, expect to not have any effect.
- ASSERT_THAT(shutdown(listen_fd.get(), SHUT_WR), SyscallSucceeds());
-
- for (int i = 0; i < kFDs; i++) {
- auto client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(client.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(accept(listen_fd.get(), nullptr, nullptr), SyscallSucceeds());
- }
-
- // Shutdown the read of the listener, expect to fail subsequent
- // server accepts, binds and client connects.
- ASSERT_THAT(shutdown(listen_fd.get(), SHUT_RD), SyscallSucceeds());
-
- ASSERT_THAT(accept(listen_fd.get(), nullptr, nullptr),
- SyscallFailsWithErrno(EINVAL));
-
- // Check that shutdown did not release the port.
- FileDescriptor new_listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(
- bind(new_listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Check that subsequent connection attempts receive a RST.
- auto client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- for (int i = 0; i < kFDs; i++) {
- auto client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(client.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallFailsWithErrno(ECONNREFUSED));
- }
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenClose) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- constexpr int kAcceptCount = 2;
- constexpr int kBacklog = kAcceptCount + 2;
- constexpr int kFDs = kBacklog * 3;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect repeatedly, keeping each connection open. After kBacklog
- // connections, we'll start getting EINPROGRESS.
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- std::vector<FileDescriptor> clients;
- for (int i = 0; i < kFDs; i++) {
- auto client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- int ret = connect(client.get(), AsSockAddr(&conn_addr), connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- }
- clients.push_back(std::move(client));
- }
- for (int i = 0; i < kAcceptCount; i++) {
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- }
-}
-
-// Test the protocol state information returned by TCPINFO.
-TEST_P(SocketInetLoopbackTest, TCPInfoState) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- FileDescriptor const listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
-
- auto state = [](int fd) -> int {
- struct tcp_info opt = {};
- socklen_t optLen = sizeof(opt);
- EXPECT_THAT(getsockopt(fd, SOL_TCP, TCP_INFO, &opt, &optLen),
- SyscallSucceeds());
- return opt.tcpi_state;
- };
- ASSERT_EQ(state(listen_fd.get()), TCP_CLOSE);
-
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_EQ(state(listen_fd.get()), TCP_CLOSE);
-
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
- ASSERT_EQ(state(listen_fd.get()), TCP_LISTEN);
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_EQ(state(conn_fd.get()), TCP_CLOSE);
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
- ASSERT_EQ(state(conn_fd.get()), TCP_ESTABLISHED);
-
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- ASSERT_EQ(state(accepted.get()), TCP_ESTABLISHED);
-
- ASSERT_THAT(close(accepted.release()), SyscallSucceeds());
-
- struct pollfd pfd = {
- .fd = conn_fd.get(),
- .events = POLLIN | POLLRDHUP,
- };
- constexpr int kTimeout = 10000;
- int n = poll(&pfd, 1, kTimeout);
- ASSERT_GE(n, 0) << strerror(errno);
- ASSERT_EQ(n, 1);
- if (IsRunningOnGvisor() && GvisorPlatform() != Platform::kFuchsia) {
- // TODO(gvisor.dev/issue/6015): Notify POLLRDHUP on incoming FIN.
- ASSERT_EQ(pfd.revents, POLLIN);
- } else {
- ASSERT_EQ(pfd.revents, POLLIN | POLLRDHUP);
- }
-
- ASSERT_THAT(state(conn_fd.get()), TCP_CLOSE_WAIT);
- ASSERT_THAT(close(conn_fd.release()), SyscallSucceeds());
-}
-
-void TestHangupDuringConnect(const SocketInetTestParam& param,
- void (*hangup)(FileDescriptor&)) {
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- for (int i = 0; i < 100; i++) {
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),
- listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), 0), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),
- &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // Connect asynchronously and immediately hang up the listener.
- FileDescriptor client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- int ret = connect(client.get(), reinterpret_cast<sockaddr*>(&conn_addr),
- connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- }
-
- hangup(listen_fd);
-
- // Wait for the connection to close.
- struct pollfd pfd = {
- .fd = client.get(),
- };
- constexpr int kTimeout = 10000;
- int n = poll(&pfd, 1, kTimeout);
- ASSERT_GE(n, 0) << strerror(errno);
- ASSERT_EQ(n, 1);
- ASSERT_EQ(pfd.revents, POLLHUP | POLLERR);
- ASSERT_EQ(close(client.release()), 0) << strerror(errno);
- }
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenCloseDuringConnect) {
- TestHangupDuringConnect(GetParam(), [](FileDescriptor& f) {
- ASSERT_THAT(close(f.release()), SyscallSucceeds());
- });
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenShutdownDuringConnect) {
- TestHangupDuringConnect(GetParam(), [](FileDescriptor& f) {
- ASSERT_THAT(shutdown(f.get(), SHUT_RD), SyscallSucceeds());
- });
-}
-
-void TestListenHangupConnectingRead(const SocketInetTestParam& param,
- void (*hangup)(FileDescriptor&)) {
- constexpr int kTimeout = 10000;
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- // This test is only interested in deterministically getting a socket in
- // connecting state. For that, we use a listen backlog of zero which would
- // mean there is exactly one connection that gets established and is enqueued
- // to the accept queue. We poll on the listener to ensure that is enqueued.
- // After that the subsequent client connect will stay in connecting state as
- // the accept queue is full.
- constexpr int kBacklog = 0;
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- FileDescriptor established_client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- int ret = connect(established_client.get(), AsSockAddr(&conn_addr),
- connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- }
-
- // On some kernels a backlog of 0 means no backlog, while on others it means a
- // backlog of 1. See commit c609e6aae4efcf383fe86b195d1b060befcb3666 for more
- // explanation.
- //
- // If we timeout connecting to loopback, we're on a kernel with no backlog.
- pollfd pfd = {
- .fd = established_client.get(),
- .events = POLLIN | POLLOUT,
- };
- if (!poll(&pfd, 1, kTimeout)) {
- // We're on one of those kernels. It should be impossible to establish the
- // connection, so connect will always return EALREADY.
- EXPECT_THAT(connect(established_client.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallFailsWithErrno(EALREADY));
- return;
- }
-
- // Ensure that the accept queue has the completed connection.
- pfd = {
- .fd = listen_fd.get(),
- .events = POLLIN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- ASSERT_EQ(pfd.revents, POLLIN);
-
- FileDescriptor connecting_client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- // Keep the last client in connecting state.
- ret = connect(connecting_client.get(), AsSockAddr(&conn_addr),
- connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- }
-
- hangup(listen_fd);
-
- std::array<std::pair<int, int>, 2> sockets = {
- std::make_pair(established_client.get(), ECONNRESET),
- std::make_pair(connecting_client.get(), ECONNREFUSED),
- };
- for (size_t i = 0; i < sockets.size(); i++) {
- SCOPED_TRACE(absl::StrCat("i=", i));
- auto [fd, expected_errno] = sockets[i];
- pollfd pfd = {
- .fd = fd,
- };
- // When the listening socket is closed, the peer would reset the connection.
- EXPECT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- EXPECT_EQ(pfd.revents, POLLHUP | POLLERR);
- char c;
- EXPECT_THAT(read(fd, &c, sizeof(c)), SyscallFailsWithErrno(expected_errno));
- }
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenCloseConnectingRead) {
- TestListenHangupConnectingRead(GetParam(), [](FileDescriptor& f) {
- ASSERT_THAT(close(f.release()), SyscallSucceeds());
- });
-}
-
-TEST_P(SocketInetLoopbackTest, TCPListenShutdownConnectingRead) {
- TestListenHangupConnectingRead(GetParam(), [](FileDescriptor& f) {
- ASSERT_THAT(shutdown(f.get(), SHUT_RD), SyscallSucceeds());
- });
-}
-
-// Test close of a non-blocking connecting socket.
-TEST_P(SocketInetLoopbackTest, TCPNonBlockingConnectClose) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), 0), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- ASSERT_EQ(addrlen, listener.addr_len);
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // Try many iterations to catch a race with socket close and handshake
- // completion.
- for (int i = 0; i < 100; ++i) {
- FileDescriptor client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- ASSERT_THAT(
- connect(client.get(), AsSockAddr(&conn_addr), connector.addr_len),
- SyscallFailsWithErrno(EINPROGRESS));
- ASSERT_THAT(close(client.release()), SyscallSucceeds());
-
- // Accept any connections and check if they were closed from the peer. Not
- // all client connects would result in an acceptable connection as the
- // client handshake might never complete if the socket close was processed
- // sooner than the non-blocking connect OR the accept queue is full. We are
- // only interested in the case where we do have an acceptable completed
- // connection. The accept is non-blocking here, which means that at the time
- // of listener close (after the loop ends), we could still have a completed
- // connection (from connect of any previous iteration) in the accept queue.
- // The listener close would clean up the accept queue.
- int accepted_fd;
- ASSERT_THAT(accepted_fd = accept(listen_fd.get(), nullptr, nullptr),
- AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EWOULDBLOCK)));
- if (accepted_fd < 0) {
- continue;
- }
- FileDescriptor accepted(accepted_fd);
- struct pollfd pfd = {
- .fd = accepted.get(),
- .events = POLLIN | POLLRDHUP,
- };
- // Use a large timeout to accomodate for retransmitted FINs.
- constexpr int kTimeout = 30000;
- int n = poll(&pfd, 1, kTimeout);
- ASSERT_GE(n, 0) << strerror(errno);
- ASSERT_EQ(n, 1);
-
- if (IsRunningOnGvisor() && GvisorPlatform() != Platform::kFuchsia) {
- // TODO(gvisor.dev/issue/6015): Notify POLLRDHUP on incoming FIN.
- ASSERT_EQ(pfd.revents, POLLIN);
- } else {
- ASSERT_EQ(pfd.revents, POLLIN | POLLRDHUP);
- }
- ASSERT_THAT(close(accepted.release()), SyscallSucceeds());
- }
-}
-
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
-// random save as established connections which can't be delivered to the accept
-// queue because the queue is full are not correctly delivered after restore
-// causing the last accept to timeout on the restore.
-TEST_P(SocketInetLoopbackTest, TCPAcceptBacklogSizes) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- std::array<int, 3> backlogs = {-1, 0, 1};
- for (auto& backlog : backlogs) {
- ASSERT_THAT(listen(listen_fd.get(), backlog), SyscallSucceeds());
-
- int expected_accepts;
- if (backlog < 0) {
- expected_accepts = 1024;
- } else {
- // See the comment in TCPBacklog for why this isn't backlog + 1.
- expected_accepts = backlog;
- }
- for (int i = 0; i < expected_accepts; i++) {
- SCOPED_TRACE(absl::StrCat("i=", i));
- // Connect to the listening socket.
- const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
- const FileDescriptor accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- }
- }
-}
-
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
-// random save as established connections which can't be delivered to the accept
-// queue because the queue is full are not correctly delivered after restore
-// causing the last accept to timeout on the restore.
-TEST_P(SocketInetLoopbackTest, TCPBacklog) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- constexpr int kBacklogSize = 2;
- ASSERT_THAT(listen(listen_fd.get(), kBacklogSize), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- int i = 0;
- while (1) {
- SCOPED_TRACE(absl::StrCat("i=", i));
- int ret;
-
- // Connect to the listening socket.
- const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ret = connect(conn_fd.get(), AsSockAddr(&conn_addr), connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- pollfd pfd = {
- .fd = conn_fd.get(),
- .events = POLLOUT,
- };
- ret = poll(&pfd, 1, 3000);
- if (ret == 0) break;
- EXPECT_THAT(ret, SyscallSucceedsWithValue(1));
- }
- EXPECT_THAT(RetryEINTR(send)(conn_fd.get(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
- ASSERT_THAT(shutdown(conn_fd.get(), SHUT_RDWR), SyscallSucceeds());
- i++;
- }
-
- int client_conns = i;
- int accepted_conns = 0;
- for (; i != 0; i--) {
- SCOPED_TRACE(absl::StrCat("i=", i));
- pollfd pfd = {
- .fd = listen_fd.get(),
- .events = POLLIN,
- };
- // Look for incoming connections to accept. The last connect request could
- // be established from the client side, but the ACK of the handshake could
- // be dropped by the listener if the accept queue was filled up by the
- // previous connect.
- int ret;
- ASSERT_THAT(ret = poll(&pfd, 1, 3000), SyscallSucceeds());
- if (ret == 0) break;
- if (pfd.revents == POLLIN) {
- // Accept the connection.
- //
- // We have to assign a name to the accepted socket, as unamed temporary
- // objects are destructed upon full evaluation of the expression it is in,
- // potentially causing the connecting socket to fail to shutdown properly.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- accepted_conns++;
- }
- }
- // We should accept at least listen backlog + 1 connections. As the stack is
- // enqueuing established connections to the accept queue, newer SYNs could
- // still be replied to causing those client connections would be accepted as
- // we start dequeuing the queue.
- //
- // On some kernels this can value can be off by one, so we don't add 1 to
- // kBacklogSize. See commit c609e6aae4efcf383fe86b195d1b060befcb3666 for more
- // explanation.
- ASSERT_GE(accepted_conns, kBacklogSize);
- ASSERT_GE(client_conns, accepted_conns);
-}
-
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
-// random save as established connections which can't be delivered to the accept
-// queue because the queue is full are not correctly delivered after restore
-// causing the last accept to timeout on the restore.
-TEST_P(SocketInetLoopbackTest, TCPBacklogAcceptAll) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- constexpr int kBacklog = 1;
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // Fill up the accept queue and trigger more client connections which would be
- // waiting to be accepted.
- //
- // See the comment in TCPBacklog for why this isn't backlog + 1.
- std::array<FileDescriptor, kBacklog> established_clients;
- for (auto& fd : established_clients) {
- fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(connect(fd.get(), AsSockAddr(&conn_addr), connector.addr_len),
- SyscallSucceeds());
- }
- std::array<FileDescriptor, kBacklog> waiting_clients;
- for (auto& fd : waiting_clients) {
- fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- int ret = connect(fd.get(), AsSockAddr(&conn_addr), connector.addr_len);
- if (ret != 0) {
- EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- }
- }
-
- auto accept_connection = [&]() {
- constexpr int kTimeout = 10000;
- pollfd pfd = {
- .fd = listen_fd.get(),
- .events = POLLIN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- ASSERT_EQ(pfd.revents, POLLIN);
- // Accept the connection.
- //
- // We have to assign a name to the accepted socket, as unamed temporary
- // objects are destructed upon full evaluation of the expression it is in,
- // potentially causing the connecting socket to fail to shutdown properly.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- };
-
- // Ensure that we accept all client connections. The waiting connections would
- // get enqueued as we drain the accept queue.
- for (int i = 0; i < std::size(established_clients); i++) {
- SCOPED_TRACE(absl::StrCat("established clients i=", i));
- accept_connection();
- }
-
- // The waiting client connections could be in one of these 2 states:
- // (1) SYN_SENT: if the SYN was dropped because accept queue was full
- // (2) ESTABLISHED: if the listener sent back a SYNACK, but may have dropped
- // the ACK from the client if the accept queue was full (send out a data to
- // re-send that ACK, to address that case).
- for (int i = 0; i < std::size(waiting_clients); i++) {
- SCOPED_TRACE(absl::StrCat("waiting clients i=", i));
- constexpr int kTimeout = 10000;
- pollfd pfd = {
- .fd = waiting_clients[i].get(),
- .events = POLLOUT,
- };
- EXPECT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- EXPECT_EQ(pfd.revents, POLLOUT);
- char c;
- EXPECT_THAT(RetryEINTR(send)(waiting_clients[i].get(), &c, sizeof(c), 0),
- SyscallSucceedsWithValue(sizeof(c)));
- accept_connection();
- }
-}
-
-// TCPResetAfterClose creates a pair of connected sockets then closes
-// one end to trigger FIN_WAIT2 state for the closed endpoint verifies
-// that we generate RSTs for any new data after the socket is fully
-// closed.
-TEST_P(SocketInetLoopbackTest, TCPResetAfterClose) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- // close the connecting FD to trigger FIN_WAIT2 on the connected fd.
- conn_fd.reset();
-
- int data = 1234;
-
- // Now send data which should trigger a RST as the other end should
- // have timed out and closed the socket.
- EXPECT_THAT(RetryEINTR(send)(accepted.get(), &data, sizeof(data), 0),
- SyscallSucceeds());
- // Sleep for a shortwhile to get a RST back.
- absl::SleepFor(absl::Seconds(1));
-
- // Try writing again and we should get an EPIPE back.
- EXPECT_THAT(RetryEINTR(send)(accepted.get(), &data, sizeof(data), 0),
- SyscallFailsWithErrno(EPIPE));
-
- // Trying to read should return zero as the other end did send
- // us a FIN. We do it twice to verify that the RST does not cause an
- // ECONNRESET on the read after EOF has been read by applicaiton.
- EXPECT_THAT(RetryEINTR(recv)(accepted.get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(RetryEINTR(recv)(accepted.get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(SocketInetLoopbackTest, AcceptedInheritsTCPUserTimeout) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- const uint16_t port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Set the userTimeout on the listening socket.
- constexpr int kUserTimeout = 10;
- ASSERT_THAT(setsockopt(listen_fd.get(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kUserTimeout, sizeof(kUserTimeout)),
- SyscallSucceeds());
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- // Verify that the accepted socket inherited the user timeout set on
- // listening socket.
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(accepted.get(), IPPROTO_TCP, TCP_USER_TIMEOUT, &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kUserTimeout);
-}
-
-TEST_P(SocketInetLoopbackTest, TCPAcceptAfterReset) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- {
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- }
-
- const uint16_t port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
-
- // TODO(b/157236388): Reenable Cooperative S/R once bug is fixed.
- DisableSave ds;
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Trigger a RST by turning linger off and closing the socket.
- struct linger opt = {
- .l_onoff = 1,
- .l_linger = 0,
- };
- ASSERT_THAT(
- setsockopt(conn_fd.get(), SOL_SOCKET, SO_LINGER, &opt, sizeof(opt)),
- SyscallSucceeds());
- ASSERT_THAT(close(conn_fd.release()), SyscallSucceeds());
-
- if (IsRunningOnGvisor()) {
- // Gvisor packet procssing is asynchronous and can take a bit of time in
- // some cases so we give it a bit of time to process the RST packet before
- // calling accept.
- //
- // There is nothing to poll() on so we have no choice but to use a sleep
- // here.
- absl::SleepFor(absl::Milliseconds(100));
- }
-
- sockaddr_storage accept_addr;
- socklen_t addrlen = sizeof(accept_addr);
-
- auto accept_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Accept(listen_fd.get(), AsSockAddr(&accept_addr), &addrlen));
- ASSERT_EQ(addrlen, listener.addr_len);
-
- // Wait for accept_fd to process the RST.
- constexpr int kTimeout = 10000;
- pollfd pfd = {
- .fd = accept_fd.get(),
- .events = POLLIN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- ASSERT_EQ(pfd.revents, POLLIN | POLLHUP | POLLERR);
-
- {
- int err;
- socklen_t optlen = sizeof(err);
- ASSERT_THAT(
- getsockopt(accept_fd.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
- // This should return ECONNRESET as the socket just received a RST packet
- // from the peer.
- ASSERT_EQ(optlen, sizeof(err));
- ASSERT_EQ(err, ECONNRESET);
- }
- {
- int err;
- socklen_t optlen = sizeof(err);
- ASSERT_THAT(
- getsockopt(accept_fd.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
- // This should return no error as the previous getsockopt call would have
- // cleared the socket error.
- ASSERT_EQ(optlen, sizeof(err));
- ASSERT_EQ(err, 0);
- }
- {
- sockaddr_storage peer_addr;
- socklen_t addrlen = sizeof(peer_addr);
- // The socket is not connected anymore and should return ENOTCONN.
- ASSERT_THAT(getpeername(accept_fd.get(), AsSockAddr(&peer_addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
- }
-}
-
-// TODO(gvisor.dev/issue/1688): Partially completed passive endpoints are not
-// saved. Enable S/R once issue is fixed.
-TEST_P(SocketInetLoopbackTest, TCPDeferAccept) {
- // TODO(gvisor.dev/issue/1688): Partially completed passive endpoints are not
- // saved. Enable S/R issue is fixed.
- DisableSave ds;
-
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- const uint16_t port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Set the TCP_DEFER_ACCEPT on the listening socket.
- constexpr int kTCPDeferAccept = 3;
- ASSERT_THAT(setsockopt(listen_fd.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT,
- &kTCPDeferAccept, sizeof(kTCPDeferAccept)),
- SyscallSucceeds());
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Set the listening socket to nonblock so that we can verify that there is no
- // connection in queue despite the connect above succeeding since the peer has
- // sent no data and TCP_DEFER_ACCEPT is set on the listening socket. Set the
- // FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(listen_fd.get(), F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(listen_fd.get(), F_SETFL, opts), SyscallSucceeds());
-
- ASSERT_THAT(accept(listen_fd.get(), nullptr, nullptr),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Set FD back to blocking.
- opts &= ~O_NONBLOCK;
- ASSERT_THAT(fcntl(listen_fd.get(), F_SETFL, opts), SyscallSucceeds());
-
- // Now write some data to the socket.
- int data = 0;
- ASSERT_THAT(RetryEINTR(write)(conn_fd.get(), &data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
-
- // This should now cause the connection to complete and be delivered to the
- // accept socket.
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- // Verify that the accepted socket returns the data written.
- int get = -1;
- ASSERT_THAT(RetryEINTR(recv)(accepted.get(), &get, sizeof(get), 0),
- SyscallSucceedsWithValue(sizeof(get)));
-
- EXPECT_EQ(get, data);
-}
-
-// TODO(gvisor.dev/issue/1688): Partially completed passive endpoints are not
-// saved. Enable S/R once issue is fixed.
-TEST_P(SocketInetLoopbackTest, TCPDeferAcceptTimeout) {
- // TODO(gvisor.dev/issue/1688): Partially completed passive endpoints are not
- // saved. Enable S/R once issue is fixed.
- DisableSave ds;
-
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- const uint16_t port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Set the TCP_DEFER_ACCEPT on the listening socket.
- constexpr int kTCPDeferAccept = 3;
- ASSERT_THAT(setsockopt(listen_fd.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT,
- &kTCPDeferAccept, sizeof(kTCPDeferAccept)),
- SyscallSucceeds());
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Set the listening socket to nonblock so that we can verify that there is no
- // connection in queue despite the connect above succeeding since the peer has
- // sent no data and TCP_DEFER_ACCEPT is set on the listening socket. Set the
- // FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(listen_fd.get(), F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(listen_fd.get(), F_SETFL, opts), SyscallSucceeds());
-
- // Verify that there is no acceptable connection before TCP_DEFER_ACCEPT
- // timeout is hit.
- absl::SleepFor(absl::Seconds(kTCPDeferAccept - 1));
- ASSERT_THAT(accept(listen_fd.get(), nullptr, nullptr),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Set FD back to blocking.
- opts &= ~O_NONBLOCK;
- ASSERT_THAT(fcntl(listen_fd.get(), F_SETFL, opts), SyscallSucceeds());
-
- // Now sleep for a little over the TCP_DEFER_ACCEPT duration. When the timeout
- // is hit a SYN-ACK should be retransmitted by the listener as a last ditch
- // attempt to complete the connection with or without data.
- absl::SleepFor(absl::Seconds(2));
-
- // Verify that we have a connection that can be accepted even though no
- // data was written.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-}
-
-INSTANTIATE_TEST_SUITE_P(All, SocketInetLoopbackTest,
- SocketInetLoopbackTestValues(),
- DescribeSocketInetTestParam);
-
-using SocketInetReusePortTest = ::testing::TestWithParam<SocketInetTestParam>;
-
-// TODO(gvisor.dev/issue/940): Remove when portHint/stack.Seed is
-// saved/restored.
-TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
- sockaddr_storage listen_addr = listener.addr;
- sockaddr_storage conn_addr = connector.addr;
- constexpr int kThreadCount = 3;
- constexpr int kConnectAttempts = 10000;
-
- // Create the listening socket.
- FileDescriptor listener_fds[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- listener_fds[i] = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- int fd = listener_fds[i].get();
-
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(fd, 40), SyscallSucceeds());
-
- // On the first bind we need to determine which port was bound.
- if (i != 0) {
- continue;
- }
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listener_fds[0].get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port));
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- }
-
- std::atomic<int> connects_received = ATOMIC_VAR_INIT(0);
- std::unique_ptr<ScopedThread> listen_thread[kThreadCount];
- int accept_counts[kThreadCount] = {};
- // TODO(avagin): figure how to not disable S/R for the whole test.
- // We need to take into account that this test executes a lot of system
- // calls from many threads.
- DisableSave ds;
-
- for (int i = 0; i < kThreadCount; i++) {
- listen_thread[i] = absl::make_unique<ScopedThread>(
- [&listener_fds, &accept_counts, i, &connects_received]() {
- do {
- auto fd = Accept(listener_fds[i].get(), nullptr, nullptr);
- if (!fd.ok()) {
- if (connects_received >= kConnectAttempts) {
- // Another thread have shutdown our read side causing the
- // accept to fail.
- ASSERT_EQ(errno, EINVAL);
- break;
- }
- ASSERT_NO_ERRNO(fd);
- break;
- }
- // Receive some data from a socket to be sure that the connect()
- // system call has been completed on another side.
- // Do a short read and then close the socket to trigger a RST. This
- // ensures that both ends of the connection are cleaned up and no
- // goroutines hang around in TIME-WAIT. We do this so that this test
- // does not timeout under gotsan runs where lots of goroutines can
- // cause the test to use absurd amounts of memory.
- //
- // See: https://tools.ietf.org/html/rfc2525#page-50 section 2.17
- uint16_t data;
- EXPECT_THAT(
- RetryEINTR(recv)(fd.ValueOrDie().get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(sizeof(data)));
- accept_counts[i]++;
- } while (++connects_received < kConnectAttempts);
-
- // Shutdown all sockets to wake up other threads.
- for (int j = 0; j < kThreadCount; j++) {
- shutdown(listener_fds[j].get(), SHUT_RDWR);
- }
- });
- }
-
- ScopedThread connecting_thread([&connector, &conn_addr]() {
- for (int32_t i = 0; i < kConnectAttempts; i++) {
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- EXPECT_THAT(RetryEINTR(send)(fd.get(), &i, sizeof(i), 0),
- SyscallSucceedsWithValue(sizeof(i)));
- }
- });
-
- // Join threads to be sure that all connections have been counted
- connecting_thread.Join();
- for (int i = 0; i < kThreadCount; i++) {
- listen_thread[i]->Join();
- }
- // Check that connections are distributed fairly between listening sockets
- for (int i = 0; i < kThreadCount; i++)
- EXPECT_THAT(accept_counts[i],
- EquivalentWithin((kConnectAttempts / kThreadCount), 0.10));
-}
-
-TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThread) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
- sockaddr_storage listen_addr = listener.addr;
- sockaddr_storage conn_addr = connector.addr;
- constexpr int kThreadCount = 3;
-
- // Create the listening socket.
- FileDescriptor listener_fds[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- listener_fds[i] =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(listener.family(), SOCK_DGRAM, 0));
- int fd = listener_fds[i].get();
-
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
-
- // On the first bind we need to determine which port was bound.
- if (i != 0) {
- continue;
- }
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listener_fds[0].get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port));
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- }
-
- constexpr int kConnectAttempts = 10000;
- std::atomic<int> packets_received = ATOMIC_VAR_INIT(0);
- std::unique_ptr<ScopedThread> receiver_thread[kThreadCount];
- int packets_per_socket[kThreadCount] = {};
- // TODO(avagin): figure how to not disable S/R for the whole test.
- DisableSave ds; // Too expensive.
-
- for (int i = 0; i < kThreadCount; i++) {
- receiver_thread[i] = absl::make_unique<ScopedThread>(
- [&listener_fds, &packets_per_socket, i, &packets_received]() {
- do {
- struct sockaddr_storage addr = {};
- socklen_t addrlen = sizeof(addr);
- int data;
-
- auto ret =
- RetryEINTR(recvfrom)(listener_fds[i].get(), &data, sizeof(data),
- 0, AsSockAddr(&addr), &addrlen);
-
- if (packets_received < kConnectAttempts) {
- ASSERT_THAT(ret, SyscallSucceedsWithValue(sizeof(data)));
- }
-
- if (ret != sizeof(data)) {
- // Another thread may have shutdown our read side causing the
- // recvfrom to fail.
- break;
- }
-
- packets_received++;
- packets_per_socket[i]++;
-
- // A response is required to synchronize with the main thread,
- // otherwise the main thread can send more than can fit into receive
- // queues.
- EXPECT_THAT(
- RetryEINTR(sendto)(listener_fds[i].get(), &data, sizeof(data),
- 0, AsSockAddr(&addr), addrlen),
- SyscallSucceedsWithValue(sizeof(data)));
- } while (packets_received < kConnectAttempts);
-
- // Shutdown all sockets to wake up other threads.
- for (int j = 0; j < kThreadCount; j++)
- shutdown(listener_fds[j].get(), SHUT_RDWR);
- });
- }
-
- ScopedThread main_thread([&connector, &conn_addr]() {
- for (int i = 0; i < kConnectAttempts; i++) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(connector.family(), SOCK_DGRAM, 0));
- EXPECT_THAT(
- RetryEINTR(sendto)(fd.get(), &i, sizeof(i), 0, AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- int data;
- EXPECT_THAT(RetryEINTR(recv)(fd.get(), &data, sizeof(data), 0),
- SyscallSucceedsWithValue(sizeof(data)));
- }
- });
-
- main_thread.Join();
-
- // Join threads to be sure that all connections have been counted
- for (int i = 0; i < kThreadCount; i++) {
- receiver_thread[i]->Join();
- }
- // Check that packets are distributed fairly between listening sockets.
- for (int i = 0; i < kThreadCount; i++)
- EXPECT_THAT(packets_per_socket[i],
- EquivalentWithin((kConnectAttempts / kThreadCount), 0.10));
-}
-
-TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort) {
- SocketInetTestParam const& param = GetParam();
-
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
- sockaddr_storage listen_addr = listener.addr;
- sockaddr_storage conn_addr = connector.addr;
- constexpr int kThreadCount = 3;
-
- // TODO(b/141211329): endpointsByNic.seed has to be saved/restored.
- const DisableSave ds141211329;
-
- // Create listening sockets.
- FileDescriptor listener_fds[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- listener_fds[i] =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(listener.family(), SOCK_DGRAM, 0));
- int fd = listener_fds[i].get();
-
- ASSERT_THAT(setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
-
- // On the first bind we need to determine which port was bound.
- if (i != 0) {
- continue;
- }
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(
- getsockname(listener_fds[0].get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
- ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port));
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- }
-
- constexpr int kConnectAttempts = 10;
- FileDescriptor client_fds[kConnectAttempts];
-
- // Do the first run without save/restore.
- DisableSave ds;
- for (int i = 0; i < kConnectAttempts; i++) {
- client_fds[i] =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(connector.family(), SOCK_DGRAM, 0));
- EXPECT_THAT(RetryEINTR(sendto)(client_fds[i].get(), &i, sizeof(i), 0,
- AsSockAddr(&conn_addr), connector.addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- }
- ds.reset();
-
- // Check that a mapping of client and server sockets has
- // not been change after save/restore.
- for (int i = 0; i < kConnectAttempts; i++) {
- EXPECT_THAT(RetryEINTR(sendto)(client_fds[i].get(), &i, sizeof(i), 0,
- AsSockAddr(&conn_addr), connector.addr_len),
- SyscallSucceedsWithValue(sizeof(i)));
- }
-
- pollfd pollfds[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- pollfds[i].fd = listener_fds[i].get();
- pollfds[i].events = POLLIN;
- }
-
- std::map<uint16_t, int> portToFD;
-
- int received = 0;
- while (received < kConnectAttempts * 2) {
- ASSERT_THAT(poll(pollfds, kThreadCount, -1),
- SyscallSucceedsWithValue(Gt(0)));
-
- for (int i = 0; i < kThreadCount; i++) {
- if ((pollfds[i].revents & POLLIN) == 0) {
- continue;
- }
-
- received++;
-
- const int fd = pollfds[i].fd;
- struct sockaddr_storage addr = {};
- socklen_t addrlen = sizeof(addr);
- int data;
- EXPECT_THAT(RetryEINTR(recvfrom)(fd, &data, sizeof(data), 0,
- AsSockAddr(&addr), &addrlen),
- SyscallSucceedsWithValue(sizeof(data)));
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(connector.family(), addr));
- auto prev_port = portToFD.find(port);
- // Check that all packets from one client have been delivered to the
- // same server socket.
- if (prev_port == portToFD.end()) {
- portToFD[port] = fd;
- } else {
- EXPECT_EQ(portToFD[port], fd);
- }
- }
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(
- All, SocketInetReusePortTest,
- ::testing::Values(
- // Listeners bound to IPv4 addresses refuse connections using IPv6
- // addresses.
- SocketInetTestParam{V4Any(), V4Loopback()},
- SocketInetTestParam{V4Loopback(), V4MappedLoopback()},
-
- // Listeners bound to IN6ADDR_ANY accept all connections.
- SocketInetTestParam{V6Any(), V4Loopback()},
- SocketInetTestParam{V6Any(), V6Loopback()},
-
- // Listeners bound to IN6ADDR_LOOPBACK refuse connections using IPv4
- // addresses.
- SocketInetTestParam{V6Loopback(), V6Loopback()}),
- DescribeSocketInetTestParam);
-
-using SocketMultiProtocolInetLoopbackTest =
- ::testing::TestWithParam<ProtocolTestParam>;
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedLoopbackOnlyReservesV4) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v4 loopback on a dual stack socket.
- TestAddress const& test_addr_dual = V4MappedLoopback();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that we can still bind the v6 loopback on the same port.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- int ret = bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len);
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- ASSERT_THAT(ret, SyscallSucceeds());
-
- // Verify that binding the v4 loopback with the same port on a v4 socket
- // fails.
- TestAddress const& test_addr_v4 = V4Loopback();
- sockaddr_storage addr_v4 = test_addr_v4.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4.family(), &addr_v4, port));
- const FileDescriptor fd_v4 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4.get(), AsSockAddr(&addr_v4), test_addr_v4.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedAnyOnlyReservesV4) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v4 any on a dual stack socket.
- TestAddress const& test_addr_dual = V4MappedAny();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that we can still bind the v6 loopback on the same port.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- int ret = bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len);
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- ASSERT_THAT(ret, SyscallSucceeds());
-
- // Verify that binding the v4 loopback with the same port on a v4 socket
- // fails.
- TestAddress const& test_addr_v4 = V4Loopback();
- sockaddr_storage addr_v4 = test_addr_v4.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4.family(), &addr_v4, port));
- const FileDescriptor fd_v4 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4.get(), AsSockAddr(&addr_v4), test_addr_v4.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, DualStackV6AnyReservesEverything) {
- ProtocolTestParam const& param = GetParam();
-
- // Bind the v6 any on a dual stack socket.
- TestAddress const& test_addr_dual = V6Any();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that binding the v6 loopback with the same port fails.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v6 socket
- // fails.
- TestAddress const& test_addr_v4_mapped = V4MappedLoopback();
- sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped, port));
- const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_mapped.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4_mapped.get(), AsSockAddr(&addr_v4_mapped),
- test_addr_v4_mapped.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v4 socket
- // fails.
- TestAddress const& test_addr_v4 = V4Loopback();
- sockaddr_storage addr_v4 = test_addr_v4.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4.family(), &addr_v4, port));
- const FileDescriptor fd_v4 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4.get(), AsSockAddr(&addr_v4), test_addr_v4.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 any on the same port with a v4 socket
- // fails.
- TestAddress const& test_addr_v4_any = V4Any();
- sockaddr_storage addr_v4_any = test_addr_v4_any.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port));
- const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_any.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4_any.get(), AsSockAddr(&addr_v4_any),
- test_addr_v4_any.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- DualStackV6AnyReuseAddrDoesNotReserveV4Any) {
- ProtocolTestParam const& param = GetParam();
-
- // Bind the v6 any on a dual stack socket.
- TestAddress const& test_addr_dual = V6Any();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(setsockopt(fd_dual.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that binding the v4 any on the same port with a v4 socket succeeds.
- TestAddress const& test_addr_v4_any = V4Any();
- sockaddr_storage addr_v4_any = test_addr_v4_any.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port));
- const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_any.family(), param.type, 0));
- ASSERT_THAT(setsockopt(fd_v4_any.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd_v4_any.get(), AsSockAddr(&addr_v4_any),
- test_addr_v4_any.addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- DualStackV6AnyReuseAddrListenReservesV4Any) {
- ProtocolTestParam const& param = GetParam();
-
- // Only TCP sockets are supported.
- SKIP_IF((param.type & SOCK_STREAM) == 0);
-
- // Bind the v6 any on a dual stack socket.
- TestAddress const& test_addr_dual = V6Any();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(setsockopt(fd_dual.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(fd_dual.get(), 5), SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that binding the v4 any on the same port with a v4 socket succeeds.
- TestAddress const& test_addr_v4_any = V4Any();
- sockaddr_storage addr_v4_any = test_addr_v4_any.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port));
- const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_any.family(), param.type, 0));
- ASSERT_THAT(setsockopt(fd_v4_any.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(fd_v4_any.get(), AsSockAddr(&addr_v4_any),
- test_addr_v4_any.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- DualStackV6AnyWithListenReservesEverything) {
- ProtocolTestParam const& param = GetParam();
-
- // Only TCP sockets are supported.
- SKIP_IF((param.type & SOCK_STREAM) == 0);
-
- // Bind the v6 any on a dual stack socket.
- TestAddress const& test_addr_dual = V6Any();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0));
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(fd_dual.get(), 5), SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that binding the v6 loopback with the same port fails.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v6 socket
- // fails.
- TestAddress const& test_addr_v4_mapped = V4MappedLoopback();
- sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped, port));
- const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_mapped.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4_mapped.get(), AsSockAddr(&addr_v4_mapped),
- test_addr_v4_mapped.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v4 socket
- // fails.
- TestAddress const& test_addr_v4 = V4Loopback();
- sockaddr_storage addr_v4 = test_addr_v4.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4.family(), &addr_v4, port));
- const FileDescriptor fd_v4 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4.get(), AsSockAddr(&addr_v4), test_addr_v4.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 any on the same port with a v4 socket
- // fails.
- TestAddress const& test_addr_v4_any = V4Any();
- sockaddr_storage addr_v4_any = test_addr_v4_any.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port));
- const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_any.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v4_any.get(), AsSockAddr(&addr_v4_any),
- test_addr_v4_any.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v6 any on a v6-only socket.
- TestAddress const& test_addr_dual = V6Any();
- sockaddr_storage addr_dual = test_addr_dual.addr;
- const FileDescriptor fd_dual = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_dual.family(), param.type, 0));
- EXPECT_THAT(setsockopt(fd_dual.get(), IPPROTO_IPV6, IPV6_V6ONLY,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(
- bind(fd_dual.get(), AsSockAddr(&addr_dual), test_addr_dual.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t addrlen = test_addr_dual.addr_len;
- ASSERT_THAT(getsockname(fd_dual.get(), AsSockAddr(&addr_dual), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual));
-
- // Verify that binding the v6 loopback with the same port fails.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that we can still bind the v4 loopback on the same port.
- TestAddress const& test_addr_v4_mapped = V4MappedLoopback();
- sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped, port));
- const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_mapped.family(), param.type, 0));
- int ret = bind(fd_v4_mapped.get(), AsSockAddr(&addr_v4_mapped),
- test_addr_v4_mapped.addr_len);
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- ASSERT_THAT(ret, SyscallSucceeds());
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v6 loopback on a dual stack socket.
- TestAddress const& test_addr = V6Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(
- bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- EXPECT_THAT(bind(checking_fd.get(), AsSockAddr(&connected_addr),
- connected_addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v6 loopback with the same port fails.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v6.family(), &addr_v6, ephemeral_port));
- const FileDescriptor fd_v6 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that we can still bind the v4 loopback on the same port.
- TestAddress const& test_addr_v4_mapped = V4MappedLoopback();
- sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped,
- ephemeral_port));
- const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_mapped.family(), param.type, 0));
- int ret = bind(fd_v4_mapped.get(), AsSockAddr(&addr_v4_mapped),
- test_addr_v4_mapped.addr_len);
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- EXPECT_THAT(ret, SyscallSucceeds());
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v4 loopback on a dual stack socket.
- TestAddress const& test_addr = V4MappedLoopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(
- bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- EXPECT_THAT(bind(checking_fd.get(), AsSockAddr(&connected_addr),
- connected_addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v4 socket
- // fails.
- TestAddress const& test_addr_v4 = V4Loopback();
- sockaddr_storage addr_v4 = test_addr_v4.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v4.family(), &addr_v4, ephemeral_port));
- const FileDescriptor fd_v4 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0));
- EXPECT_THAT(bind(fd_v4.get(), AsSockAddr(&addr_v4), test_addr_v4.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v6 any on the same port with a dual-stack socket
- // fails.
- TestAddress const& test_addr_v6_any = V6Any();
- sockaddr_storage addr_v6_any = test_addr_v6_any.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v6_any.family(), &addr_v6_any, ephemeral_port));
- const FileDescriptor fd_v6_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6_any.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6_any.get(), AsSockAddr(&addr_v6_any),
- test_addr_v6_any.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // For some reason, binding the TCP v6-only any is flaky on Linux. Maybe we
- // tend to run out of ephemeral ports? Regardless, binding the v6 loopback
- // seems pretty reliable. Only try to bind the v6-only any on UDP and
- // gVisor.
-
- int ret = -1;
-
- if (!IsRunningOnGvisor() && param.type == SOCK_STREAM) {
- // Verify that we can still bind the v6 loopback on the same port.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v6.family(), &addr_v6, ephemeral_port));
- const FileDescriptor fd_v6 = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6.family(), param.type, 0));
- ret = bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len);
- } else {
- // Verify that we can still bind the v6 any on the same port with a
- // v6-only socket.
- const FileDescriptor fd_v6_only_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6_any.family(), param.type, 0));
- EXPECT_THAT(setsockopt(fd_v6_only_any.get(), IPPROTO_IPV6, IPV6_V6ONLY,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ret = bind(fd_v6_only_any.get(), AsSockAddr(&addr_v6_any),
- test_addr_v6_any.addr_len);
- }
-
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- EXPECT_THAT(ret, SyscallSucceeds());
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) {
- ProtocolTestParam const& param = GetParam();
-
- for (int i = 0; true; i++) {
- // Bind the v4 loopback on a v4 socket.
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(
- bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- EXPECT_THAT(bind(checking_fd.get(), AsSockAddr(&connected_addr),
- connected_addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v4 loopback on the same port with a v6 socket
- // fails.
- TestAddress const& test_addr_v4_mapped = V4MappedLoopback();
- sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr;
- ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped,
- ephemeral_port));
- const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v4_mapped.family(), param.type, 0));
- EXPECT_THAT(bind(fd_v4_mapped.get(), AsSockAddr(&addr_v4_mapped),
- test_addr_v4_mapped.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Verify that binding the v6 any on the same port with a dual-stack socket
- // fails.
- TestAddress const& test_addr_v6_any = V6Any();
- sockaddr_storage addr_v6_any = test_addr_v6_any.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v6_any.family(), &addr_v6_any, ephemeral_port));
- const FileDescriptor fd_v6_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6_any.family(), param.type, 0));
- ASSERT_THAT(bind(fd_v6_any.get(), AsSockAddr(&addr_v6_any),
- test_addr_v6_any.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // For some reason, binding the TCP v6-only any is flaky on Linux. Maybe we
- // tend to run out of ephemeral ports? Regardless, binding the v6 loopback
- // seems pretty reliable. Only try to bind the v6-only any on UDP and
- // gVisor.
-
- int ret = -1;
-
- if (!IsRunningOnGvisor() && param.type == SOCK_STREAM) {
- // Verify that we can still bind the v6 loopback on the same port.
- TestAddress const& test_addr_v6 = V6Loopback();
- sockaddr_storage addr_v6 = test_addr_v6.addr;
- ASSERT_NO_ERRNO(
- SetAddrPort(test_addr_v6.family(), &addr_v6, ephemeral_port));
- const FileDescriptor fd_v6 = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6.family(), param.type, 0));
- ret = bind(fd_v6.get(), AsSockAddr(&addr_v6), test_addr_v6.addr_len);
- } else {
- // Verify that we can still bind the v6 any on the same port with a
- // v6-only socket.
- const FileDescriptor fd_v6_only_any = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(test_addr_v6_any.family(), param.type, 0));
- EXPECT_THAT(setsockopt(fd_v6_only_any.get(), IPPROTO_IPV6, IPV6_V6ONLY,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ret = bind(fd_v6_only_any.get(), AsSockAddr(&addr_v6_any),
- test_addr_v6_any.addr_len);
- }
-
- if (ret == -1 && errno == EADDRINUSE) {
- // Port may have been in use.
- ASSERT_LT(i, 100); // Give up after 100 tries.
- continue;
- }
- EXPECT_THAT(ret, SyscallSucceeds());
-
- // No need to try again.
- break;
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- MultipleBindsAllowedNoListeningReuseAddr) {
- ProtocolTestParam const& param = GetParam();
- // UDP sockets are allowed to bind/listen on the port w/ SO_REUSEADDR, for TCP
- // this is only permitted if there is no other listening socket.
- SKIP_IF(param.type != SOCK_STREAM);
- // Bind the v4 loopback on a v4 socket.
- const TestAddress& test_addr = V4Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Now create a socket and bind it to the same port, this should
- // succeed since there is no listening socket for the same port.
- FileDescriptor second_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(second_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(
- bind(second_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest, PortReuseTwoSockets) {
- ProtocolTestParam const& param = GetParam();
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage addr = test_addr.addr;
-
- for (int i = 0; i < 2; i++) {
- const int portreuse1 = i % 2;
- auto s1 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- int fd1 = s1.get();
- socklen_t addrlen = test_addr.addr_len;
-
- EXPECT_THAT(
- setsockopt(fd1, SOL_SOCKET, SO_REUSEPORT, &portreuse1, sizeof(int)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(fd1, AsSockAddr(&addr), addrlen), SyscallSucceeds());
-
- ASSERT_THAT(getsockname(fd1, AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(fd1, 1), SyscallSucceeds());
- }
-
- // j is less than 4 to check that the port reuse logic works correctly after
- // closing bound sockets.
- for (int j = 0; j < 4; j++) {
- const int portreuse2 = j % 2;
- auto s2 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- int fd2 = s2.get();
-
- EXPECT_THAT(
- setsockopt(fd2, SOL_SOCKET, SO_REUSEPORT, &portreuse2, sizeof(int)),
- SyscallSucceeds());
-
- std::cout << portreuse1 << " " << portreuse2 << std::endl;
- int ret = bind(fd2, AsSockAddr(&addr), addrlen);
-
- // Verify that two sockets can be bound to the same port only if
- // SO_REUSEPORT is set for both of them.
- if (!portreuse1 || !portreuse2) {
- ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRINUSE));
- } else {
- ASSERT_THAT(ret, SyscallSucceeds());
- }
- }
- }
-}
-
-// Check that when a socket was bound to an address with REUSEPORT and then
-// closed, we can bind a different socket to the same address without needing
-// REUSEPORT.
-TEST_P(SocketMultiProtocolInetLoopbackTest, NoReusePortFollowingReusePort) {
- ProtocolTestParam const& param = GetParam();
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage addr = test_addr.addr;
-
- auto s = ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- int fd = s.get();
- socklen_t addrlen = test_addr.addr_len;
- int portreuse = 1;
- ASSERT_THAT(
- setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &portreuse, sizeof(portreuse)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&addr), addrlen), SyscallSucceeds());
- ASSERT_THAT(getsockname(fd, AsSockAddr(&addr), &addrlen), SyscallSucceeds());
- ASSERT_EQ(addrlen, test_addr.addr_len);
-
- s.reset();
-
- // Open a new socket and bind to the same address, but w/o REUSEPORT.
- s = ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- fd = s.get();
- portreuse = 0;
- ASSERT_THAT(
- setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &portreuse, sizeof(portreuse)),
- SyscallSucceeds());
- ASSERT_THAT(bind(fd, AsSockAddr(&addr), addrlen), SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(AllFamilies, SocketMultiProtocolInetLoopbackTest,
- ProtocolTestValues(), DescribeProtocolTestParam);
-
-} // namespace
-
-// Check that loopback receives connections from any address in the range:
-// 127.0.0.1 to 127.254.255.255. This behavior is exclusive to IPv4.
-TEST_F(SocketInetLoopbackTest, LoopbackAddressRangeConnect) {
- TestAddress const& listener = V4Any();
-
- in_addr_t addresses[] = {
- INADDR_LOOPBACK,
- INADDR_LOOPBACK + 1, // 127.0.0.2
- (in_addr_t)0x7f000101, // 127.0.1.1
- (in_addr_t)0x7f010101, // 127.1.1.1
- (in_addr_t)0x7ffeffff, // 127.254.255.255
- };
- for (const auto& address : addresses) {
- TestAddress connector("V4Loopback");
- connector.addr.ss_family = AF_INET;
- connector.addr_len = sizeof(sockaddr_in);
- reinterpret_cast<sockaddr_in*>(&connector.addr)->sin_addr.s_addr =
- htonl(address);
-
- tcpSimpleConnectTest(listener, connector, true);
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_inet_loopback_isolated.cc b/test/syscalls/linux/socket_inet_loopback_isolated.cc
deleted file mode 100644
index ab2259b55..000000000
--- a/test/syscalls/linux/socket_inet_loopback_isolated.cc
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_inet_loopback_test_params.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-// Unit tests in this file will run in their own network namespace.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using SocketInetLoopbackIsolatedTest =
- ::testing::TestWithParam<SocketInetTestParam>;
-
-TEST_P(SocketInetLoopbackIsolatedTest, TCPActiveCloseTimeWaitTest) {
- SocketInetTestParam const& param = GetParam();
- sockaddr_storage listen_addr, conn_bound_addr;
- listen_addr = param.listener.addr;
- SetupTimeWaitClose(&param.listener, &param.connector, false /*reuse*/,
- false /*accept_close*/, &listen_addr, &conn_bound_addr);
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(bind(conn_fd.get(), AsSockAddr(&conn_bound_addr),
- param.connector.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(SocketInetLoopbackIsolatedTest, TCPActiveCloseTimeWaitReuseTest) {
- SocketInetTestParam const& param = GetParam();
- sockaddr_storage listen_addr, conn_bound_addr;
- listen_addr = param.listener.addr;
- SetupTimeWaitClose(&param.listener, &param.connector, true /*reuse*/,
- false /*accept_close*/, &listen_addr, &conn_bound_addr);
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(setsockopt(conn_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(conn_fd.get(), AsSockAddr(&conn_bound_addr),
- param.connector.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-// These tests are disabled under random save as the restore run
-// results in the stack.Seed() being different which can cause
-// sequence number of final connect to be one that is considered
-// old and can cause the test to be flaky.
-//
-// Test re-binding of client and server bound addresses when the older
-// connection is in TIME_WAIT.
-TEST_P(SocketInetLoopbackIsolatedTest, TCPPassiveCloseNoTimeWaitTest) {
- SocketInetTestParam const& param = GetParam();
- sockaddr_storage listen_addr, conn_bound_addr;
- listen_addr = param.listener.addr;
- SetupTimeWaitClose(&param.listener, &param.connector, false /*reuse*/,
- true /*accept_close*/, &listen_addr, &conn_bound_addr);
-
- // Now bind a new socket and verify that we can immediately rebind the address
- // bound by the conn_fd as it never entered TIME_WAIT.
- const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(bind(conn_fd.get(), AsSockAddr(&conn_bound_addr),
- param.connector.addr_len),
- SyscallSucceeds());
-
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.listener.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), param.listener.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(SocketInetLoopbackIsolatedTest, TCPPassiveCloseNoTimeWaitReuseTest) {
- SocketInetTestParam const& param = GetParam();
- sockaddr_storage listen_addr, conn_bound_addr;
- listen_addr = param.listener.addr;
- SetupTimeWaitClose(&param.listener, &param.connector, true /*reuse*/,
- true /*accept_close*/, &listen_addr, &conn_bound_addr);
-
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.listener.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(setsockopt(listen_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), param.listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Now bind and connect new socket and verify that we can immediately rebind
- // the address bound by the conn_fd as it never entered TIME_WAIT.
- const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(param.connector.family(), SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(setsockopt(conn_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(conn_fd.get(), AsSockAddr(&conn_bound_addr),
- param.connector.addr_len),
- SyscallSucceeds());
-
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(param.listener.family(), listen_addr));
- sockaddr_storage conn_addr = param.connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(param.connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- param.connector.addr_len),
- SyscallSucceeds());
-}
-
-// TCPFinWait2Test creates a pair of connected sockets then closes one end to
-// trigger FIN_WAIT2 state for the closed endpoint. Then it binds the same local
-// IP/port on a new socket and tries to connect. The connect should fail w/
-// an EADDRINUSE. Then we wait till the FIN_WAIT2 timeout is over and try the
-// connect again with a new socket and this time it should succeed.
-//
-// TCP timers are not S/R today, this can cause this test to be flaky when run
-// under random S/R due to timer being reset on a restore.
-TEST_P(SocketInetLoopbackIsolatedTest, TCPFinWait2Test) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- // Lower FIN_WAIT2 state to 5 seconds for test.
- constexpr int kTCPLingerTimeout = 5;
- EXPECT_THAT(setsockopt(conn_fd.get(), IPPROTO_TCP, TCP_LINGER2,
- &kTCPLingerTimeout, sizeof(kTCPLingerTimeout)),
- SyscallSucceedsWithValue(0));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- // Get the address/port bound by the connecting socket.
- sockaddr_storage conn_bound_addr;
- socklen_t conn_addrlen = connector.addr_len;
- ASSERT_THAT(
- getsockname(conn_fd.get(), AsSockAddr(&conn_bound_addr), &conn_addrlen),
- SyscallSucceeds());
-
- // close the connecting FD to trigger FIN_WAIT2 on the connected fd.
- conn_fd.reset();
-
- // Now bind and connect a new socket.
- const FileDescriptor conn_fd2 = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- // Disable cooperative saves after this point. As a save between the first
- // bind/connect and the second one can cause the linger timeout timer to
- // be restarted causing the final bind/connect to fail.
- DisableSave ds;
-
- ASSERT_THAT(bind(conn_fd2.get(), AsSockAddr(&conn_bound_addr), conn_addrlen),
- SyscallFailsWithErrno(EADDRINUSE));
-
- // Sleep for a little over the linger timeout to reduce flakiness in
- // save/restore tests.
- absl::SleepFor(absl::Seconds(kTCPLingerTimeout + 2));
-
- ds.reset();
-
- ASSERT_THAT(
- RetryEINTR(connect)(conn_fd2.get(), AsSockAddr(&conn_addr), conn_addrlen),
- SyscallSucceeds());
-}
-
-// TCPLinger2TimeoutAfterClose creates a pair of connected sockets
-// then closes one end to trigger FIN_WAIT2 state for the closed endpoint.
-// It then sleeps for the TCP_LINGER2 timeout and verifies that bind/
-// connecting the same address succeeds.
-//
-// TCP timers are not S/R today, this can cause this test to be flaky when run
-// under random S/R due to timer being reset on a restore.
-TEST_P(SocketInetLoopbackIsolatedTest, TCPLinger2TimeoutAfterClose) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- // Create the listening socket.
- const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
-
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector.addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- // Get the address/port bound by the connecting socket.
- sockaddr_storage conn_bound_addr;
- socklen_t conn_addrlen = connector.addr_len;
- ASSERT_THAT(
- getsockname(conn_fd.get(), AsSockAddr(&conn_bound_addr), &conn_addrlen),
- SyscallSucceeds());
-
- // Disable cooperative saves after this point as TCP timers are not restored
- // across a S/R.
- {
- DisableSave ds;
- constexpr int kTCPLingerTimeout = 5;
- EXPECT_THAT(setsockopt(conn_fd.get(), IPPROTO_TCP, TCP_LINGER2,
- &kTCPLingerTimeout, sizeof(kTCPLingerTimeout)),
- SyscallSucceedsWithValue(0));
-
- // close the connecting FD to trigger FIN_WAIT2 on the connected fd.
- conn_fd.reset();
-
- absl::SleepFor(absl::Seconds(kTCPLingerTimeout + 1));
-
- // ds going out of scope will Re-enable S/R's since at this point the timer
- // must have fired and cleaned up the endpoint.
- }
-
- // Now bind and connect a new socket and verify that we can immediately
- // rebind the address bound by the conn_fd as it never entered TIME_WAIT.
- const FileDescriptor conn_fd2 = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(bind(conn_fd2.get(), AsSockAddr(&conn_bound_addr), conn_addrlen),
- SyscallSucceeds());
- ASSERT_THAT(
- RetryEINTR(connect)(conn_fd2.get(), AsSockAddr(&conn_addr), conn_addrlen),
- SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(All, SocketInetLoopbackIsolatedTest,
- SocketInetLoopbackTestValues(),
- DescribeSocketInetTestParam);
-
-using SocketMultiProtocolInetLoopbackIsolatedTest =
- ::testing::TestWithParam<ProtocolTestParam>;
-
-TEST_P(SocketMultiProtocolInetLoopbackIsolatedTest,
- V4EphemeralPortReservedReuseAddr) {
- ProtocolTestParam const& param = GetParam();
-
- // Bind the v4 loopback on a v4 socket.
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is not reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- EXPECT_THAT(
- bind(checking_fd.get(), AsSockAddr(&connected_addr), connected_addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackIsolatedTest,
- V4MappedEphemeralPortReservedReuseAddr) {
- ProtocolTestParam const& param = GetParam();
-
- // Bind the v4 loopback on a dual stack socket.
- TestAddress const& test_addr = V4MappedLoopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is not reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- EXPECT_THAT(
- bind(checking_fd.get(), AsSockAddr(&connected_addr), connected_addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackIsolatedTest,
- V6EphemeralPortReservedReuseAddr) {
- ProtocolTestParam const& param = GetParam();
-
- // Bind the v6 loopback on a dual stack socket.
- TestAddress const& test_addr = V6Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- const FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Listen iff TCP.
- if (param.type == SOCK_STREAM) {
- ASSERT_THAT(listen(bound_fd.get(), SOMAXCONN), SyscallSucceeds());
- }
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
-
- // Connect to bind an ephemeral port.
- const FileDescriptor connected_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(setsockopt(connected_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(connected_fd.get(), AsSockAddr(&bound_addr),
- bound_addr_len),
- SyscallSucceeds());
-
- // Get the ephemeral port.
- sockaddr_storage connected_addr = {};
- socklen_t connected_addr_len = sizeof(connected_addr);
- ASSERT_THAT(getsockname(connected_fd.get(), AsSockAddr(&connected_addr),
- &connected_addr_len),
- SyscallSucceeds());
- uint16_t const ephemeral_port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr));
-
- // Verify that we actually got an ephemeral port.
- ASSERT_NE(ephemeral_port, 0);
-
- // Verify that the ephemeral port is not reserved.
- const FileDescriptor checking_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
- ASSERT_THAT(setsockopt(checking_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- EXPECT_THAT(
- bind(checking_fd.get(), AsSockAddr(&connected_addr), connected_addr_len),
- SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(AllFamilies,
- SocketMultiProtocolInetLoopbackIsolatedTest,
- ProtocolTestValues(), DescribeProtocolTestParam);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_inet_loopback_nogotsan.cc b/test/syscalls/linux/socket_inet_loopback_nogotsan.cc
deleted file mode 100644
index cc2773af1..000000000
--- a/test/syscalls/linux/socket_inet_loopback_nogotsan.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <string.h>
-
-#include <iostream>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_inet_loopback_test_params.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using SocketInetLoopbackTest = ::testing::TestWithParam<SocketInetTestParam>;
-
-// This test verifies that connect returns EADDRNOTAVAIL if all local ephemeral
-// ports are already in use for a given destination ip/port.
-//
-// We disable S/R because this test creates a large number of sockets.
-//
-// FIXME(b/162475855): This test is failing reliably.
-TEST_P(SocketInetLoopbackTest, DISABLED_TestTCPPortExhaustion) {
- SocketInetTestParam const& param = GetParam();
- TestAddress const& listener = param.listener;
- TestAddress const& connector = param.connector;
-
- constexpr int kBacklog = 10;
- constexpr int kClients = 65536;
-
- // Create the listening socket.
- auto listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
- sockaddr_storage listen_addr = listener.addr;
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(&listen_addr), listener.addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener.addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(&listen_addr), &addrlen),
- SyscallSucceeds());
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
-
- // Disable cooperative S/R as we are making too many syscalls.
- DisableSave ds;
-
- // Now we keep opening connections till we run out of local ephemeral ports.
- // and assert the error we get back.
- sockaddr_storage conn_addr = connector.addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- std::vector<FileDescriptor> clients;
- std::vector<FileDescriptor> servers;
-
- for (int i = 0; i < kClients; i++) {
- FileDescriptor client = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector.family(), SOCK_STREAM, IPPROTO_TCP));
- int ret = connect(client.get(), AsSockAddr(&conn_addr), connector.addr_len);
- if (ret == 0) {
- clients.push_back(std::move(client));
- FileDescriptor server =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
- servers.push_back(std::move(server));
- continue;
- }
- ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRNOTAVAIL));
- break;
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(All, SocketInetLoopbackTest,
- SocketInetLoopbackTestValues(),
- DescribeSocketInetTestParam);
-
-using SocketMultiProtocolInetLoopbackTest =
- ::testing::TestWithParam<ProtocolTestParam>;
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- TCPBindAvoidsOtherBoundPortsReuseAddr) {
- ProtocolTestParam const& param = GetParam();
- // UDP sockets are allowed to bind/listen on an already bound port w/
- // SO_REUSEADDR even when requesting a port from the kernel. In case of TCP
- // rebinding is only permitted when SO_REUSEADDR is set and an explicit port
- // is specified. When a zero port is specified to the bind() call then an
- // already bound port will not be picked.
- SKIP_IF(param.type != SOCK_STREAM);
-
- DisableSave ds; // Too many syscalls.
-
- // A map of port to file descriptor binding the port.
- std::map<uint16_t, FileDescriptor> bound_sockets;
-
- // Reduce number of ephemeral ports if permitted to reduce running time of
- // the test.
- [[maybe_unused]] const int nports =
- ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
-
- // Exhaust all ephemeral ports.
- while (true) {
- // Bind the v4 loopback on a v4 socket.
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int ret = bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len);
- if (ret != 0) {
- ASSERT_EQ(errno, EADDRINUSE);
- break;
- }
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
- uint16_t port = reinterpret_cast<sockaddr_in*>(&bound_addr)->sin_port;
-
- auto [iter, inserted] = bound_sockets.emplace(port, std::move(bound_fd));
- ASSERT_TRUE(inserted);
- }
-}
-
-TEST_P(SocketMultiProtocolInetLoopbackTest,
- UDPBindMayBindOtherBoundPortsReuseAddr) {
- ProtocolTestParam const& param = GetParam();
- // UDP sockets are allowed to bind/listen on an already bound port w/
- // SO_REUSEADDR even when requesting a port from the kernel.
- SKIP_IF(param.type != SOCK_DGRAM);
-
- DisableSave ds; // Too many syscalls.
-
- // A map of port to file descriptor binding the port.
- std::map<uint16_t, FileDescriptor> bound_sockets;
-
- // Reduce number of ephemeral ports if permitted to reduce running time of
- // the test.
- [[maybe_unused]] const int nports =
- ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
-
- // Exhaust all ephemeral ports.
- bool duplicate_binding = false;
- while (true) {
- // Bind the v4 loopback on a v4 socket.
- TestAddress const& test_addr = V4Loopback();
- sockaddr_storage bound_addr = test_addr.addr;
- FileDescriptor bound_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));
-
- ASSERT_THAT(setsockopt(bound_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(
- bind(bound_fd.get(), AsSockAddr(&bound_addr), test_addr.addr_len),
- SyscallSucceeds());
-
- // Get the port that we bound.
- socklen_t bound_addr_len = test_addr.addr_len;
- ASSERT_THAT(
- getsockname(bound_fd.get(), AsSockAddr(&bound_addr), &bound_addr_len),
- SyscallSucceeds());
- uint16_t port = reinterpret_cast<sockaddr_in*>(&bound_addr)->sin_port;
-
- auto [iter, inserted] = bound_sockets.emplace(port, std::move(bound_fd));
- if (!inserted) {
- duplicate_binding = true;
- break;
- }
- }
- ASSERT_TRUE(duplicate_binding);
-}
-
-INSTANTIATE_TEST_SUITE_P(AllFamilies, SocketMultiProtocolInetLoopbackTest,
- ProtocolTestValues(), DescribeProtocolTestParam);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_inet_loopback_test_params.h b/test/syscalls/linux/socket_inet_loopback_test_params.h
deleted file mode 100644
index 42b48eb8a..000000000
--- a/test/syscalls/linux/socket_inet_loopback_test_params.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_INET_LOOPBACK_TEST_PARAMS_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_INET_LOOPBACK_TEST_PARAMS_H_
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-struct SocketInetTestParam {
- TestAddress listener;
- TestAddress connector;
-};
-
-inline std::string DescribeSocketInetTestParam(
- ::testing::TestParamInfo<SocketInetTestParam> const& info) {
- return absl::StrCat("Listen", info.param.listener.description, "_Connect",
- info.param.connector.description);
-}
-
-inline auto SocketInetLoopbackTestValues() {
- return ::testing::Values(
- // Listeners bound to IPv4 addresses refuse connections using IPv6
- // addresses.
- SocketInetTestParam{V4Any(), V4Any()},
- SocketInetTestParam{V4Any(), V4Loopback()},
- SocketInetTestParam{V4Any(), V4MappedAny()},
- SocketInetTestParam{V4Any(), V4MappedLoopback()},
- SocketInetTestParam{V4Loopback(), V4Any()},
- SocketInetTestParam{V4Loopback(), V4Loopback()},
- SocketInetTestParam{V4Loopback(), V4MappedLoopback()},
- SocketInetTestParam{V4MappedAny(), V4Any()},
- SocketInetTestParam{V4MappedAny(), V4Loopback()},
- SocketInetTestParam{V4MappedAny(), V4MappedAny()},
- SocketInetTestParam{V4MappedAny(), V4MappedLoopback()},
- SocketInetTestParam{V4MappedLoopback(), V4Any()},
- SocketInetTestParam{V4MappedLoopback(), V4Loopback()},
- SocketInetTestParam{V4MappedLoopback(), V4MappedLoopback()},
-
- // Listeners bound to IN6ADDR_ANY accept all connections.
- SocketInetTestParam{V6Any(), V4Any()},
- SocketInetTestParam{V6Any(), V4Loopback()},
- SocketInetTestParam{V6Any(), V4MappedAny()},
- SocketInetTestParam{V6Any(), V4MappedLoopback()},
- SocketInetTestParam{V6Any(), V6Any()},
- SocketInetTestParam{V6Any(), V6Loopback()},
-
- // Listeners bound to IN6ADDR_LOOPBACK refuse connections using IPv4
- // addresses.
- SocketInetTestParam{V6Loopback(), V6Any()},
- SocketInetTestParam{V6Loopback(), V6Loopback()});
-}
-
-struct ProtocolTestParam {
- std::string description;
- int type;
-};
-
-inline std::string DescribeProtocolTestParam(
- ::testing::TestParamInfo<ProtocolTestParam> const& info) {
- return info.param.description;
-}
-
-inline auto ProtocolTestValues() {
- return ::testing::Values(ProtocolTestParam{"TCP", SOCK_STREAM},
- ProtocolTestParam{"UDP", SOCK_DGRAM});
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_INET_LOOPBACK_TEST_PARAMS_H_
diff --git a/test/syscalls/linux/socket_ip_loopback_blocking.cc b/test/syscalls/linux/socket_ip_loopback_blocking.cc
deleted file mode 100644
index fda252dd7..000000000
--- a/test/syscalls/linux/socket_ip_loopback_blocking.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(
- std::vector<SocketPairKind>{
- IPv6UDPBidirectionalBindSocketPair(0),
- IPv4UDPBidirectionalBindSocketPair(0),
- },
- ApplyVecToVec<SocketPairKind>(
- std::vector<Middleware>{
- NoOp, SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &kSockOptOn)},
- std::vector<SocketPairKind>{
- IPv6TCPAcceptBindSocketPair(0),
- IPv4TCPAcceptBindSocketPair(0),
- }));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BlockingIPSockets, BlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_generic.cc b/test/syscalls/linux/socket_ip_tcp_generic.cc
deleted file mode 100644
index 2f5743cda..000000000
--- a/test/syscalls/linux/socket_ip_tcp_generic.cc
+++ /dev/null
@@ -1,1308 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ip_tcp_generic.h"
-
-#include <fcntl.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-TEST_P(TCPSocketPairTest, TcpInfoSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct tcp_info opt = {};
- socklen_t optLen = sizeof(opt);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_TCP, TCP_INFO, &opt, &optLen),
- SyscallSucceeds());
-}
-
-TEST_P(TCPSocketPairTest, ShortTcpInfoSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct tcp_info opt = {};
- socklen_t optLen = 1;
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_TCP, TCP_INFO, &opt, &optLen),
- SyscallSucceeds());
-}
-
-TEST_P(TCPSocketPairTest, ZeroTcpInfoSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct tcp_info opt = {};
- socklen_t optLen = 0;
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_TCP, TCP_INFO, &opt, &optLen),
- SyscallSucceeds());
-}
-
-// Copied from include/net/tcp.h.
-constexpr int TCP_CA_OPEN = 0;
-
-TEST_P(TCPSocketPairTest, CheckTcpInfoFields) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until second_fd sees the data and then recv it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN, 0};
- constexpr int kPollTimeoutMs = 2000; // Wait up to 2 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct tcp_info opt = {};
- socklen_t optLen = sizeof(opt);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_TCP, TCP_INFO, &opt, &optLen),
- SyscallSucceeds());
- ASSERT_EQ(optLen, sizeof(opt));
-
- // Validates the received tcp_info fields.
- EXPECT_EQ(opt.tcpi_ca_state, TCP_CA_OPEN);
- EXPECT_GT(opt.tcpi_snd_cwnd, 0);
- EXPECT_GT(opt.tcpi_rto, 0);
-}
-
-// This test validates that an RST is sent instead of a FIN when data is
-// unread on calls to close(2).
-TEST_P(TCPSocketPairTest, RSTSentOnCloseWithUnreadData) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until t_ sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now close the connected without reading the data.
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
-
- // Wait for the other end to receive the RST (up to 20 seconds).
- struct pollfd poll_fd2 = {sockets->first_fd(), POLLIN | POLLHUP, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // A shutdown with unread data will cause a RST to be sent instead
- // of a FIN, per RFC 2525 section 2.17; this is also what Linux does.
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(ECONNRESET));
-}
-
-// This test will validate that a RST will cause POLLHUP to trigger.
-TEST_P(TCPSocketPairTest, RSTCausesPollHUP) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until second sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(poll_fd.revents & POLLIN, POLLIN);
-
- // Confirm we at least have one unread byte.
- int bytes_available = 0;
- ASSERT_THAT(
- RetryEINTR(ioctl)(sockets->second_fd(), FIONREAD, &bytes_available),
- SyscallSucceeds());
- EXPECT_GT(bytes_available, 0);
-
- // Now close the connected socket without reading the data from the second,
- // this will cause a RST and we should see that with POLLHUP.
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
-
- // Wait for the other end to receive the RST (up to 20 seconds).
- struct pollfd poll_fd3 = {sockets->first_fd(), POLLHUP, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd3, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
- ASSERT_NE(poll_fd3.revents & POLLHUP, 0);
-}
-
-// This test validates that even if a RST is sent the other end will not
-// get an ECONNRESET until it's read all data.
-TEST_P(TCPSocketPairTest, RSTSentOnCloseWithUnreadDataAllowsReadBuffered) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(RetryEINTR(write)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until second sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN, 0};
- constexpr int kPollTimeoutMs = 30000; // Wait up to 30 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Wait until first sees the data on its side but don't read it.
- struct pollfd poll_fd2 = {sockets->first_fd(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now close the connected socket without reading the data from the second.
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
-
- // Wait for the other end to receive the RST (up to 30 seconds).
- struct pollfd poll_fd3 = {sockets->first_fd(), POLLHUP, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd3, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Since we also have data buffered we should be able to read it before
- // the syscall will fail with ECONNRESET.
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // A shutdown with unread data will cause a RST to be sent instead
- // of a FIN, per RFC 2525 section 2.17; this is also what Linux does.
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(ECONNRESET));
-}
-
-// This test will verify that a clean shutdown (FIN) is preformed when there
-// is unread data but only the write side is closed.
-TEST_P(TCPSocketPairTest, FINSentOnShutdownWrWithUnreadData) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until t_ sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now shutdown the write end leaving the read end open.
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_WR), SyscallSucceeds());
-
- // Wait for the other end to receive the FIN (up to 20 seconds).
- struct pollfd poll_fd2 = {sockets->first_fd(), POLLIN | POLLHUP, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Since we didn't shutdown the read end this will be a clean close.
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-}
-
-// This test will verify that when data is received by a socket, even if it's
-// not read SHUT_RD will not cause any packets to be generated.
-TEST_P(TCPSocketPairTest, ShutdownRdShouldCauseNoPacketsWithUnreadData) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until t_ sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now shutdown the read end, this will generate no packets to the other end.
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RD), SyscallSucceeds());
-
- // We should not receive any events on the other side of the socket.
- struct pollfd poll_fd2 = {sockets->first_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollNoResponseTimeoutMs = 3000;
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollNoResponseTimeoutMs),
- SyscallSucceedsWithValue(0)); // Timeout.
-}
-
-// This test will verify that a socket which has unread data will still allow
-// the data to be read after shutting down the read side, and once there is no
-// unread data left, then read will return an EOF.
-TEST_P(TCPSocketPairTest, ShutdownRdAllowsReadOfReceivedDataBeforeEOF) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until t_ sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now shutdown the read end.
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RD), SyscallSucceeds());
-
- // Even though we did a SHUT_RD on the read end we can still read the data.
- ASSERT_THAT(RetryEINTR(read)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // After reading all of the data, reading the closed read end returns EOF.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
- ASSERT_THAT(RetryEINTR(read)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-}
-
-// This test verifies that a shutdown(wr) by the server after sending
-// data allows the client to still read() the queued data and a client
-// close after sending response allows server to read the incoming
-// response.
-TEST_P(TCPSocketPairTest, ShutdownWrServerClientClose) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[10] = {};
- ScopedThread t([&]() {
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(close(sockets->release_first_fd()),
- SyscallSucceedsWithValue(0));
- });
- ASSERT_THAT(RetryEINTR(write)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(RetryEINTR(shutdown)(sockets->second_fd(), SHUT_WR),
- SyscallSucceedsWithValue(0));
- t.Join();
-
- ASSERT_THAT(RetryEINTR(read)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(TCPSocketPairTest, ClosedReadNonBlockingSocket) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Set the read end to O_NONBLOCK.
- int opts = 0;
- ASSERT_THAT(opts = fcntl(sockets->second_fd(), F_GETFL), SyscallSucceeds());
- ASSERT_THAT(fcntl(sockets->second_fd(), F_SETFL, opts | O_NONBLOCK),
- SyscallSucceeds());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until second_fd sees the data and then recv it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN, 0};
- constexpr int kPollTimeoutMs = 2000; // Wait up to 2 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Now shutdown the write end leaving the read end open.
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
-
- // Wait for close notification and recv again.
- struct pollfd poll_fd2 = {sockets->second_fd(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(TCPSocketPairTest,
- ShutdownRdUnreadDataShouldCauseNoPacketsUnlessClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Wait until t_ sees the data on its side but don't read it.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollTimeoutMs = 20000; // Wait up to 20 seconds for the data.
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- // Now shutdown the read end, this will generate no packets to the other end.
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RD), SyscallSucceeds());
-
- // We should not receive any events on the other side of the socket.
- struct pollfd poll_fd2 = {sockets->first_fd(), POLLIN | POLLHUP, 0};
- constexpr int kPollNoResponseTimeoutMs = 3000;
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollNoResponseTimeoutMs),
- SyscallSucceedsWithValue(0)); // Timeout.
-
- // Now since we've fully closed the connection it will generate a RST.
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd2, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1)); // The other end has closed.
-
- // A shutdown with unread data will cause a RST to be sent instead
- // of a FIN, per RFC 2525 section 2.17; this is also what Linux does.
- ASSERT_THAT(RetryEINTR(read)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(ECONNRESET));
-}
-
-TEST_P(TCPSocketPairTest, TCPCorkDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPCork) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(TCPSocketPairTest, TCPCork) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- constexpr char kData[] = "abc";
- ASSERT_THAT(WriteFd(sockets->first_fd(), kData, sizeof(kData)),
- SyscallSucceedsWithValue(sizeof(kData)));
-
- ASSERT_NO_FATAL_FAILURE(RecvNoData(sockets->second_fd()));
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CORK,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Create a receive buffer larger than kData.
- char buf[(sizeof(kData) + 1) * 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kData)));
- EXPECT_EQ(absl::string_view(kData, sizeof(kData)),
- absl::string_view(buf, sizeof(kData)));
-}
-
-TEST_P(TCPSocketPairTest, TCPQuickAckDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_QUICKACK, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPQuickAck) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_QUICKACK,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_QUICKACK, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_QUICKACK,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_QUICKACK, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(TCPSocketPairTest, SoKeepaliveDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(TCPSocketPairTest, SetSoKeepalive) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_KEEPALIVE, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(TCPSocketPairTest, TCPKeepidleDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPIDLE, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 2 * 60 * 60); // 2 hours.
-}
-
-TEST_P(TCPSocketPairTest, TCPKeepintvlDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPINTVL, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 75); // 75 seconds.
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepidleZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPIDLE, &kZero,
- sizeof(kZero)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepintvlZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPINTVL,
- &kZero, sizeof(kZero)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Copied from include/net/tcp.h.
-constexpr int MAX_TCP_KEEPIDLE = 32767;
-constexpr int MAX_TCP_KEEPINTVL = 32767;
-constexpr int MAX_TCP_KEEPCNT = 127;
-
-TEST_P(TCPSocketPairTest, SetTCPKeepidleAboveMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kAboveMax = MAX_TCP_KEEPIDLE + 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPIDLE,
- &kAboveMax, sizeof(kAboveMax)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepintvlAboveMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kAboveMax = MAX_TCP_KEEPINTVL + 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPINTVL,
- &kAboveMax, sizeof(kAboveMax)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepidleToMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPIDLE,
- &MAX_TCP_KEEPIDLE, sizeof(MAX_TCP_KEEPIDLE)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPIDLE, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, MAX_TCP_KEEPIDLE);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepintvlToMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPINTVL,
- &MAX_TCP_KEEPINTVL, sizeof(MAX_TCP_KEEPINTVL)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPINTVL, &get,
- &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, MAX_TCP_KEEPINTVL);
-}
-
-TEST_P(TCPSocketPairTest, TCPKeepcountDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 9); // 9 keepalive probes.
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepcountZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &kZero,
- sizeof(kZero)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepcountAboveMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kAboveMax = MAX_TCP_KEEPCNT + 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,
- &kAboveMax, sizeof(kAboveMax)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepcountToMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,
- &MAX_TCP_KEEPCNT, sizeof(MAX_TCP_KEEPCNT)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, MAX_TCP_KEEPCNT);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepcountToOne) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int keepaliveCount = 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,
- &keepaliveCount, sizeof(keepaliveCount)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, keepaliveCount);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPKeepcountToNegative) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int keepaliveCount = -5;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,
- &keepaliveCount, sizeof(keepaliveCount)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(TCPSocketPairTest, SetOOBInline) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_OOBINLINE,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_OOBINLINE, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(TCPSocketPairTest, MsgTruncMsgPeek) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // Read half of the data with MSG_TRUNC | MSG_PEEK. This way there will still
- // be some data left to read in the next step even if the data gets consumed.
- char received_data1[sizeof(sent_data) / 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data1,
- sizeof(received_data1), MSG_TRUNC | MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(received_data1)));
-
- // Check that we didn't get anything.
- char zeros[sizeof(received_data1)] = {};
- EXPECT_EQ(0, memcmp(zeros, received_data1, sizeof(received_data1)));
-
- // Check that all of the data is still there.
- char received_data2[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data2,
- sizeof(received_data2), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- EXPECT_EQ(0, memcmp(received_data2, sent_data, sizeof(sent_data)));
-}
-
-TEST_P(TCPSocketPairTest, SetCongestionControlSucceedsForSupported) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- // Netstack only supports reno & cubic so we only test these two values here.
- {
- const char kSetCC[kTcpCaNameMax] = "reno";
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &kSetCC, strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax];
- memset(got_cc, '1', sizeof(got_cc));
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));
- }
- {
- const char kSetCC[kTcpCaNameMax] = "cubic";
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &kSetCC, strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax];
- memset(got_cc, '1', sizeof(got_cc));
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));
- }
-}
-
-TEST_P(TCPSocketPairTest, SetGetTCPCongestionShortReadBuffer) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- {
- // Verify that getsockopt/setsockopt work with buffers smaller than
- // kTcpCaNameMax.
- const char kSetCC[] = "cubic";
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &kSetCC, strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[sizeof(kSetCC)];
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc)));
- }
-}
-
-TEST_P(TCPSocketPairTest, SetGetTCPCongestionLargeReadBuffer) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- {
- // Verify that getsockopt works with buffers larger than
- // kTcpCaNameMax.
- const char kSetCC[] = "cubic";
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &kSetCC, strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax + 5];
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- // Linux copies the minimum of kTcpCaNameMax or the length of the passed in
- // buffer and sets optlen to the number of bytes actually copied
- // irrespective of the actual length of the congestion control name.
- EXPECT_EQ(kTcpCaNameMax, optlen);
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));
- }
-}
-
-TEST_P(TCPSocketPairTest, SetCongestionControlFailsForUnsupported) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char old_cc[kTcpCaNameMax];
- socklen_t optlen = sizeof(old_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &old_cc, &optlen),
- SyscallSucceedsWithValue(0));
-
- const char kSetCC[] = "invalid_ca_cc";
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &kSetCC, strlen(kSetCC)),
- SyscallFailsWithErrno(ENOENT));
-
- char got_cc[kTcpCaNameMax];
- optlen = sizeof(got_cc);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,
- &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(old_cc)));
-}
-
-// Linux and Netstack both default to a 60s TCP_LINGER2 timeout.
-constexpr int kDefaultTCPLingerTimeout = 60;
-// On Linux, the maximum linger2 timeout was changed from 60sec to 120sec.
-constexpr int kMaxTCPLingerTimeout = 120;
-constexpr int kOldMaxTCPLingerTimeout = 60;
-
-TEST_P(TCPSocketPairTest, TCPLingerTimeoutDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kDefaultTCPLingerTimeout);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPLingerTimeoutLessThanZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kNegative = -1234;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2,
- &kNegative, sizeof(kNegative)),
- SyscallSucceedsWithValue(0));
- int get = INT_MAX;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, -1);
-}
-
-TEST_P(TCPSocketPairTest, SetTCPLingerTimeoutZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &kZero,
- sizeof(kZero)),
- SyscallSucceedsWithValue(0));
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_THAT(get,
- AnyOf(Eq(kMaxTCPLingerTimeout), Eq(kOldMaxTCPLingerTimeout)));
-}
-
-TEST_P(TCPSocketPairTest, SetTCPLingerTimeoutAboveMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Values above the net.ipv4.tcp_fin_timeout are capped to tcp_fin_timeout
- // on linux (defaults to 60 seconds on linux).
- constexpr int kAboveDefault = kMaxTCPLingerTimeout + 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2,
- &kAboveDefault, sizeof(kAboveDefault)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(get, kMaxTCPLingerTimeout);
- } else {
- EXPECT_THAT(get,
- AnyOf(Eq(kMaxTCPLingerTimeout), Eq(kOldMaxTCPLingerTimeout)));
- }
-}
-
-TEST_P(TCPSocketPairTest, SetTCPLingerTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Values above the net.ipv4.tcp_fin_timeout are capped to tcp_fin_timeout
- // on linux (defaults to 60 seconds on linux).
- constexpr int kTCPLingerTimeout = kDefaultTCPLingerTimeout - 1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2,
- &kTCPLingerTimeout, sizeof(kTCPLingerTimeout)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_LINGER2, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kTCPLingerTimeout);
-}
-
-TEST_P(TCPSocketPairTest, TestTCPCloseWithData) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ScopedThread t([&]() {
- // Close one end to trigger sending of a FIN.
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_WR), SyscallSucceeds());
- char buf[3];
- ASSERT_THAT(read(sockets->second_fd(), buf, 3),
- SyscallSucceedsWithValue(3));
- absl::SleepFor(absl::Milliseconds(50));
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
- });
-
- absl::SleepFor(absl::Milliseconds(50));
- // Send some data then close.
- constexpr char kStr[] = "abc";
- ASSERT_THAT(write(sockets->first_fd(), kStr, 3), SyscallSucceedsWithValue(3));
- t.Join();
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
-}
-
-TEST_P(TCPSocketPairTest, TCPUserTimeoutDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 0); // 0 ms (disabled).
-}
-
-TEST_P(TCPSocketPairTest, SetTCPUserTimeoutZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kZero = 0;
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kZero, sizeof(kZero)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 0); // 0 ms (disabled).
-}
-
-TEST_P(TCPSocketPairTest, SetTCPUserTimeoutBelowZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kNeg = -10;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kNeg, sizeof(kNeg)),
- SyscallFailsWithErrno(EINVAL));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 0); // 0 ms (disabled).
-}
-
-TEST_P(TCPSocketPairTest, SetTCPUserTimeoutAboveZero) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kAbove = 10;
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kAbove, sizeof(kAbove)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kAbove);
-}
-
-#ifdef __linux__
-TEST_P(TCPSocketPairTest, SpliceFromPipe) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- FileDescriptor rfd(fds[0]);
- FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_THAT(
- splice(rfd.get(), nullptr, sockets->first_fd(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(buf.size()));
-
- std::vector<char> rbuf(buf.size());
- ASSERT_THAT(read(sockets->second_fd(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-
-TEST_P(TCPSocketPairTest, SpliceToPipe) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- FileDescriptor rfd(fds[0]);
- FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(sockets->first_fd(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- shutdown(sockets->first_fd(), SHUT_WR);
- EXPECT_THAT(
- splice(sockets->second_fd(), nullptr, wfd.get(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(buf.size()));
-
- std::vector<char> rbuf(buf.size());
- ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-
-#include <sys/sendfile.h>
-
-TEST_P(TCPSocketPairTest, SendfileFromRegularFileSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
- // Fill with some random data.
- std::vector<char> buf(kPageSize / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(pwrite(in_fd.get(), buf.data(), buf.size(), 0),
- SyscallSucceedsWithValue(buf.size()));
-
- EXPECT_THAT(
- sendfile(sockets->first_fd(), in_fd.get(), nullptr, buf.size() + 1),
- SyscallSucceedsWithValue(buf.size()));
- std::vector<char> rbuf(buf.size() + 1);
- ASSERT_THAT(read(sockets->second_fd(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(buf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-#endif // __linux__
-
-TEST_P(TCPSocketPairTest, SetTCPWindowClampBelowMinRcvBufConnectedSocket) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- // Discover minimum receive buf by setting a really low value
- // for the receive buffer.
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &kZero,
- sizeof(kZero)),
- SyscallSucceeds());
-
- // Now retrieve the minimum value for SO_RCVBUF as the set above should
- // have caused SO_RCVBUF for the socket to be set to the minimum.
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- int min_so_rcvbuf = get;
-
- {
- // Setting TCP_WINDOW_CLAMP to zero for a connected socket is not permitted.
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_WINDOW_CLAMP,
- &kZero, sizeof(kZero)),
- SyscallFailsWithErrno(EINVAL));
-
- // Non-zero clamp values below MIN_SO_RCVBUF/2 should result in the clamp
- // being set to MIN_SO_RCVBUF/2.
- int below_half_min_so_rcvbuf = min_so_rcvbuf / 2 - 1;
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_WINDOW_CLAMP,
- &below_half_min_so_rcvbuf, sizeof(below_half_min_so_rcvbuf)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_WINDOW_CLAMP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(min_so_rcvbuf / 2, get);
- }
-}
-
-TEST_P(TCPSocketPairTest, IpMulticastTtlDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_GT(get, 0);
-}
-
-TEST_P(TCPSocketPairTest, IpMulticastLoopDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 1);
-}
-
-TEST_P(TCPSocketPairTest, TCPResetDuringClose) {
- DisableSave ds; // Too many syscalls.
- constexpr int kThreadCount = 100;
- std::unique_ptr<ScopedThread> instances[kThreadCount];
- for (int i = 0; i < kThreadCount; i++) {
- instances[i] = absl::make_unique<ScopedThread>([&]() {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ScopedThread t([&]() {
- // Close one end to trigger sending of a FIN.
- struct pollfd poll_fd = {sockets->second_fd(), POLLIN | POLLHUP, 0};
- // Wait up to 20 seconds for the data.
- constexpr int kPollTimeoutMs = 20000;
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
- });
-
- // Send some data then close.
- constexpr char kStr[] = "abc";
- ASSERT_THAT(write(sockets->first_fd(), kStr, 3),
- SyscallSucceedsWithValue(3));
- absl::SleepFor(absl::Milliseconds(10));
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- t.Join();
- });
- }
- for (int i = 0; i < kThreadCount; i++) {
- instances[i]->Join();
- }
-}
-
-// Test setsockopt and getsockopt for a socket with SO_LINGER option.
-TEST_P(TCPSocketPairTest, SetAndGetLingerOption) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Check getsockopt before SO_LINGER option is set.
- struct linger got_linger = {-1, -1};
- socklen_t got_len = sizeof(got_linger);
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_THAT(got_len, sizeof(got_linger));
- struct linger want_linger = {};
- EXPECT_EQ(0, memcmp(&want_linger, &got_linger, got_len));
-
- // Set and get SO_LINGER with negative values.
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = -3;
- ASSERT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_EQ(got_len, sizeof(got_linger));
- EXPECT_EQ(sl.l_onoff, got_linger.l_onoff);
- // Linux returns a different value as it uses HZ to convert the seconds to
- // jiffies which overflows for negative values. We want to be compatible with
- // linux for getsockopt return value.
- if (IsRunningOnGvisor()) {
- EXPECT_EQ(sl.l_linger, got_linger.l_linger);
- }
-
- // Set and get SO_LINGER option with positive values.
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_EQ(got_len, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, got_len));
-}
-
-// Test socket to disable SO_LINGER option.
-TEST_P(TCPSocketPairTest, SetOffLingerOption) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Set the SO_LINGER option.
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
-
- // Check getsockopt after SO_LINGER option is set.
- struct linger got_linger = {-1, -1};
- socklen_t got_len = sizeof(got_linger);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_EQ(got_len, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, got_len));
-
- sl.l_onoff = 0;
- sl.l_linger = 5;
- ASSERT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
-
- // Check getsockopt after SO_LINGER option is set to zero.
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_EQ(got_len, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, got_len));
-}
-
-// Test close on dup'd socket with SO_LINGER option set.
-TEST_P(TCPSocketPairTest, CloseWithLingerOption) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Set the SO_LINGER option.
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
-
- // Check getsockopt after SO_LINGER option is set.
- struct linger got_linger = {-1, -1};
- socklen_t got_len = sizeof(got_linger);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &got_len),
- SyscallSucceeds());
- ASSERT_EQ(got_len, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, got_len));
-
- FileDescriptor dupFd = FileDescriptor(dup(sockets->first_fd()));
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- char buf[10] = {};
- // Write on dupFd should succeed as socket will not be closed until
- // all references are removed.
- ASSERT_THAT(RetryEINTR(write)(dupFd.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(RetryEINTR(write)(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EBADF));
-
- // Close the socket.
- dupFd.reset();
- // Write on dupFd should fail as all references for socket are removed.
- ASSERT_THAT(RetryEINTR(write)(dupFd.get(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EBADF));
-}
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_generic.h b/test/syscalls/linux/socket_ip_tcp_generic.h
deleted file mode 100644
index a3eff3c73..000000000
--- a/test/syscalls/linux/socket_ip_tcp_generic.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_TCP_GENERIC_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_TCP_GENERIC_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected TCP sockets.
-using TCPSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_TCP_GENERIC_H_
diff --git a/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc b/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc
deleted file mode 100644
index 4e79d21f4..000000000
--- a/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_ip_tcp_generic.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVecToVec<SocketPairKind>(
- std::vector<Middleware>{
- NoOp, SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &kSockOptOn)},
- std::vector<SocketPairKind>{
- IPv6TCPAcceptBindSocketPair(0),
- IPv4TCPAcceptBindSocketPair(0),
- DualStackTCPAcceptBindSocketPair(0),
- });
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllTCPSockets, TCPSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_loopback.cc b/test/syscalls/linux/socket_ip_tcp_loopback.cc
deleted file mode 100644
index 9db3037bc..000000000
--- a/test/syscalls/linux/socket_ip_tcp_loopback.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_generic.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- IPv6TCPAcceptBindSocketPair(0),
- IPv4TCPAcceptBindSocketPair(0),
- DualStackTCPAcceptBindSocketPair(0),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, AllSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc b/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc
deleted file mode 100644
index f996b93d2..000000000
--- a/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_stream_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVecToVec<SocketPairKind>(
- std::vector<Middleware>{
- NoOp, SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &kSockOptOn)},
- std::vector<SocketPairKind>{
- IPv6TCPAcceptBindSocketPair(0),
- IPv4TCPAcceptBindSocketPair(0),
- DualStackTCPAcceptBindSocketPair(0),
- });
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BlockingTCPSockets, BlockingStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc b/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc
deleted file mode 100644
index ffa377210..000000000
--- a/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/tcp.h>
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_non_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVecToVec<SocketPairKind>(
- std::vector<Middleware>{
- NoOp, SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &kSockOptOn)},
- std::vector<SocketPairKind>{
- IPv6TCPAcceptBindSocketPair(SOCK_NONBLOCK),
- IPv4TCPAcceptBindSocketPair(SOCK_NONBLOCK),
- });
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingTCPSockets, NonBlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_tcp_udp_generic.cc b/test/syscalls/linux/socket_ip_tcp_udp_generic.cc
deleted file mode 100644
index f178f1af9..000000000
--- a/test/syscalls/linux/socket_ip_tcp_udp_generic.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of TCP and UDP sockets.
-using TcpUdpSocketPairTest = SocketPairTest;
-
-TEST_P(TcpUdpSocketPairTest, ShutdownWrFollowedBySendIsError) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Now shutdown the write end of the first.
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_WR), SyscallSucceeds());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EPIPE));
-}
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- IPv6UDPBidirectionalBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- IPv4UDPBidirectionalBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- DualStackUDPBidirectionalBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- IPv6TCPAcceptBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- IPv4TCPAcceptBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- DualStackTCPAcceptBindSocketPair,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK})));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllIPSockets, TcpUdpSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_generic.cc b/test/syscalls/linux/socket_ip_udp_generic.cc
deleted file mode 100644
index 1694e188a..000000000
--- a/test/syscalls/linux/socket_ip_udp_generic.cc
+++ /dev/null
@@ -1,545 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ip_udp_generic.h"
-
-#include <errno.h>
-#ifdef __linux__
-#include <linux/in6.h>
-#endif // __linux__
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(UDPSocketPairTest, MulticastTTLDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 1);
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLMin) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kMin = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kMin, sizeof(kMin)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kMin);
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kMax = 255;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kMax, sizeof(kMax)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kMax);
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLNegativeOne) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kArbitrary = 6;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kArbitrary, sizeof(kArbitrary)),
- SyscallSucceeds());
-
- constexpr int kNegOne = -1;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kNegOne, sizeof(kNegOne)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 1);
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLBelowMin) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kBelowMin = -2;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kBelowMin, sizeof(kBelowMin)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLAboveMax) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr int kAboveMax = 256;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kAboveMax, sizeof(kAboveMax)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UDPSocketPairTest, SetUDPMulticastTTLChar) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr char kArbitrary = 6;
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &kArbitrary, sizeof(kArbitrary)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kArbitrary);
-}
-
-TEST_P(UDPSocketPairTest, SetEmptyIPAddMembership) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct ip_mreqn req = {};
- EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &req, sizeof(req)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UDPSocketPairTest, MulticastLoopDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(UDPSocketPairTest, SetMulticastLoop) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(UDPSocketPairTest, SetMulticastLoopChar) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- constexpr char kSockOptOnChar = kSockOptOn;
- constexpr char kSockOptOffChar = kSockOptOff;
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOffChar, sizeof(kSockOptOffChar)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOnChar, sizeof(kSockOptOnChar)),
- SyscallSucceeds());
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-TEST_P(UDPSocketPairTest, ReuseAddrDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(UDPSocketPairTest, SetReuseAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(UDPSocketPairTest, ReusePortDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(UDPSocketPairTest, SetReusePort) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(UDPSocketPairTest, SetReuseAddrReusePort) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEPORT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-// Test getsockopt for a socket which is not set with IP_PKTINFO option.
-TEST_P(UDPSocketPairTest, IPPKTINFODefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_IP, IP_PKTINFO, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-// Test setsockopt and getsockopt for a socket with IP_PKTINFO option.
-TEST_P(UDPSocketPairTest, SetAndGetIPPKTINFO) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int level = SOL_IP;
- int type = IP_PKTINFO;
-
- // Check getsockopt before IP_PKTINFO is set.
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), level, type, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), level, type, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOn);
- EXPECT_EQ(get_len, sizeof(get));
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), level, type, &kSockOptOff,
- sizeof(kSockOptOff)),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), level, type, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOff);
- EXPECT_EQ(get_len, sizeof(get));
-}
-
-// Test getsockopt for a socket which is not set with IP_RECVORIGDSTADDR option.
-TEST_P(UDPSocketPairTest, ReceiveOrigDstAddrDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- int level = SOL_IP;
- int type = IP_RECVORIGDSTADDR;
- if (sockets->first_addr()->sa_family == AF_INET6) {
- level = SOL_IPV6;
- type = IPV6_RECVORIGDSTADDR;
- }
- ASSERT_THAT(getsockopt(sockets->first_fd(), level, type, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-// Test setsockopt and getsockopt for a socket with IP_RECVORIGDSTADDR option.
-TEST_P(UDPSocketPairTest, SetAndGetReceiveOrigDstAddr) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int level = SOL_IP;
- int type = IP_RECVORIGDSTADDR;
- if (sockets->first_addr()->sa_family == AF_INET6) {
- level = SOL_IPV6;
- type = IPV6_RECVORIGDSTADDR;
- }
-
- // Check getsockopt before IP_PKTINFO is set.
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), level, type, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), level, type, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOn);
- EXPECT_EQ(get_len, sizeof(get));
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), level, type, &kSockOptOff,
- sizeof(kSockOptOff)),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), level, type, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOff);
- EXPECT_EQ(get_len, sizeof(get));
-}
-
-// Holds TOS or TClass information for IPv4 or IPv6 respectively.
-struct RecvTosOption {
- int level;
- int option;
-};
-
-RecvTosOption GetRecvTosOption(int domain) {
- TEST_CHECK(domain == AF_INET || domain == AF_INET6);
- RecvTosOption opt;
- switch (domain) {
- case AF_INET:
- opt.level = IPPROTO_IP;
- opt.option = IP_RECVTOS;
- break;
- case AF_INET6:
- opt.level = IPPROTO_IPV6;
- opt.option = IPV6_RECVTCLASS;
- break;
- }
- return opt;
-}
-
-// Ensure that Receiving TOS or TCLASS is off by default.
-TEST_P(UDPSocketPairTest, RecvTosDefault) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- RecvTosOption t = GetRecvTosOption(GetParam().domain);
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), t.level, t.option, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-// Test that setting and getting IP_RECVTOS or IPV6_RECVTCLASS works as
-// expected.
-TEST_P(UDPSocketPairTest, SetRecvTos) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- RecvTosOption t = GetRecvTosOption(GetParam().domain);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), t.level, t.option, &kSockOptOff,
- sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), t.level, t.option, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-
- ASSERT_THAT(setsockopt(sockets->first_fd(), t.level, t.option, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), t.level, t.option, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-}
-
-// Test that any socket (including IPv6 only) accepts the IPv4 TOS option: this
-// mirrors behavior in linux.
-TEST_P(UDPSocketPairTest, TOSRecvMismatch) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- RecvTosOption t = GetRecvTosOption(AF_INET);
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), t.level, t.option, &get, &get_len),
- SyscallSucceedsWithValue(0));
-}
-
-// Test that an IPv4 socket does not support the IPv6 TClass option.
-TEST_P(UDPSocketPairTest, TClassRecvMismatch) {
- // This should only test AF_INET6 sockets for the mismatch behavior.
- SKIP_IF(GetParam().domain != AF_INET6);
- // IPV6_RECVTCLASS is only valid for SOCK_DGRAM and SOCK_RAW.
- SKIP_IF((GetParam().type != SOCK_DGRAM) | (GetParam().type != SOCK_RAW));
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IPV6, IPV6_RECVTCLASS,
- &get, &get_len),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-// Test the SO_LINGER option can be set/get on udp socket.
-TEST_P(UDPSocketPairTest, SetAndGetSocketLinger) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int level = SOL_SOCKET;
- int type = SO_LINGER;
-
- struct linger sl;
- sl.l_onoff = 1;
- sl.l_linger = 5;
- ASSERT_THAT(setsockopt(sockets->first_fd(), level, type, &sl, sizeof(sl)),
- SyscallSucceedsWithValue(0));
-
- struct linger got_linger = {};
- socklen_t length = sizeof(sl);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), level, type, &got_linger, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&sl, &got_linger, length));
-}
-
-// Test getsockopt for SO_ACCEPTCONN on udp socket.
-TEST_P(UDPSocketPairTest, GetSocketAcceptConn) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_generic.h b/test/syscalls/linux/socket_ip_udp_generic.h
deleted file mode 100644
index 106c54e9f..000000000
--- a/test/syscalls/linux/socket_ip_udp_generic.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_GENERIC_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_GENERIC_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected UDP sockets.
-using UDPSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_GENERIC_H_
diff --git a/test/syscalls/linux/socket_ip_udp_loopback.cc b/test/syscalls/linux/socket_ip_udp_loopback.cc
deleted file mode 100644
index c7fa44884..000000000
--- a/test/syscalls/linux/socket_ip_udp_loopback.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_generic.h"
-#include "test/syscalls/linux/socket_ip_udp_generic.h"
-#include "test/syscalls/linux/socket_non_stream.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- IPv6UDPBidirectionalBindSocketPair(0),
- IPv4UDPBidirectionalBindSocketPair(0),
- DualStackUDPBidirectionalBindSocketPair(0),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUDPSockets, AllSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- AllUDPSockets, NonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- AllUDPSockets, UDPSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc b/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc
deleted file mode 100644
index d6925a8df..000000000
--- a/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_non_stream_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- IPv6UDPBidirectionalBindSocketPair(0),
- IPv4UDPBidirectionalBindSocketPair(0),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BlockingUDPSockets, BlockingNonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc b/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc
deleted file mode 100644
index d675eddc6..000000000
--- a/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_non_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- IPv6UDPBidirectionalBindSocketPair(SOCK_NONBLOCK),
- IPv4UDPBidirectionalBindSocketPair(SOCK_NONBLOCK),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingUDPSockets, NonBlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_unbound_external_networking.cc b/test/syscalls/linux/socket_ip_udp_unbound_external_networking.cc
deleted file mode 100644
index fdbb2216b..000000000
--- a/test/syscalls/linux/socket_ip_udp_unbound_external_networking.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ip_udp_unbound_external_networking.h"
-
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-void IPUDPUnboundExternalNetworkingSocketTest::SetUp() {
- // FIXME(b/137899561): Linux instance for syscall tests sometimes misses its
- // IPv4 address on eth0.
- found_net_interfaces_ = false;
-
- // Get interface list.
- ASSERT_NO_ERRNO(if_helper_.Load());
- std::vector<std::string> if_names = if_helper_.InterfaceList(AF_INET);
- if (if_names.size() != 2) {
- return;
- }
-
- // Figure out which interface is where.
- std::string lo = if_names[0];
- std::string eth = if_names[1];
- if (lo != "lo") std::swap(lo, eth);
- if (lo != "lo") return;
-
- lo_if_idx_ = ASSERT_NO_ERRNO_AND_VALUE(if_helper_.GetIndex(lo));
- auto lo_if_addr = if_helper_.GetAddr(AF_INET, lo);
- if (lo_if_addr == nullptr) {
- return;
- }
- lo_if_addr_ = *reinterpret_cast<const sockaddr_in*>(lo_if_addr);
-
- eth_if_idx_ = ASSERT_NO_ERRNO_AND_VALUE(if_helper_.GetIndex(eth));
- auto eth_if_addr = if_helper_.GetAddr(AF_INET, eth);
- if (eth_if_addr == nullptr) {
- return;
- }
- eth_if_addr_ = *reinterpret_cast<const sockaddr_in*>(eth_if_addr);
-
- found_net_interfaces_ = true;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_udp_unbound_external_networking.h b/test/syscalls/linux/socket_ip_udp_unbound_external_networking.h
deleted file mode 100644
index e5287addb..000000000
--- a/test/syscalls/linux/socket_ip_udp_unbound_external_networking.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to unbound IP UDP sockets in a sandbox
-// with external networking support.
-class IPUDPUnboundExternalNetworkingSocketTest : public SimpleSocketTest {
- protected:
- void SetUp() override;
-
- IfAddrHelper if_helper_;
-
- // found_net_interfaces_ is set to false if SetUp() could not obtain
- // all interface infos that we need.
- bool found_net_interfaces_;
-
- // Interface infos.
- int lo_if_idx_;
- int eth_if_idx_;
- sockaddr_in lo_if_addr_;
- sockaddr_in eth_if_addr_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IP_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc
deleted file mode 100644
index 029f1e872..000000000
--- a/test/syscalls/linux/socket_ip_unbound.cc
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-#include <cstring>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of IP sockets.
-using IPUnboundSocketTest = SimpleSocketTest;
-
-TEST_P(IPUnboundSocketTest, TtlDefault) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int get = -1;
- socklen_t get_sz = sizeof(get);
- EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_TRUE(get == 64 || get == 127);
- EXPECT_EQ(get_sz, sizeof(get));
-}
-
-TEST_P(IPUnboundSocketTest, SetTtl) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int get1 = -1;
- socklen_t get1_sz = sizeof(get1);
- EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get1, &get1_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get1_sz, sizeof(get1));
-
- int set = 100;
- if (set == get1) {
- set += 1;
- }
- socklen_t set_sz = sizeof(set);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set, set_sz),
- SyscallSucceedsWithValue(0));
-
- int get2 = -1;
- socklen_t get2_sz = sizeof(get2);
- EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get2, &get2_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get2_sz, sizeof(get2));
- EXPECT_EQ(get2, set);
-}
-
-TEST_P(IPUnboundSocketTest, ResetTtlToDefault) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int get1 = -1;
- socklen_t get1_sz = sizeof(get1);
- EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get1, &get1_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get1_sz, sizeof(get1));
-
- int set1 = 100;
- if (set1 == get1) {
- set1 += 1;
- }
- socklen_t set1_sz = sizeof(set1);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set1, set1_sz),
- SyscallSucceedsWithValue(0));
-
- int set2 = -1;
- socklen_t set2_sz = sizeof(set2);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set2, set2_sz),
- SyscallSucceedsWithValue(0));
-
- int get2 = -1;
- socklen_t get2_sz = sizeof(get2);
- EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get2, &get2_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get2_sz, sizeof(get2));
- EXPECT_EQ(get2, get1);
-}
-
-TEST_P(IPUnboundSocketTest, ZeroTtl) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int set = 0;
- socklen_t set_sz = sizeof(set);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(IPUnboundSocketTest, InvalidLargeTtl) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int set = 256;
- socklen_t set_sz = sizeof(set);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(IPUnboundSocketTest, InvalidNegativeTtl) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int set = -2;
- socklen_t set_sz = sizeof(set);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
-}
-
-struct TOSOption {
- int level;
- int option;
- int cmsg_level;
-};
-
-constexpr int INET_ECN_MASK = 3;
-
-static TOSOption GetTOSOption(int domain) {
- TOSOption opt;
- switch (domain) {
- case AF_INET:
- opt.level = IPPROTO_IP;
- opt.option = IP_TOS;
- opt.cmsg_level = SOL_IP;
- break;
- case AF_INET6:
- opt.level = IPPROTO_IPV6;
- opt.option = IPV6_TCLASS;
- opt.cmsg_level = SOL_IPV6;
- break;
- }
- return opt;
-}
-
-TEST_P(IPUnboundSocketTest, TOSDefault) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- TOSOption t = GetTOSOption(GetParam().domain);
- int get = -1;
- socklen_t get_sz = sizeof(get);
- constexpr int kDefaultTOS = 0;
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, kDefaultTOS);
-}
-
-TEST_P(IPUnboundSocketTest, SetTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0xC0;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, set);
-}
-
-TEST_P(IPUnboundSocketTest, ZeroTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, set);
-}
-
-TEST_P(IPUnboundSocketTest, InvalidLargeTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- // Test with exceeding the byte space.
- int set = 256;
- constexpr int kDefaultTOS = 0;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- if (GetParam().domain == AF_INET) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- } else {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
- }
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, kDefaultTOS);
-}
-
-TEST_P(IPUnboundSocketTest, CheckSkipECN) {
- // Test is inconsistant on different kernels.
- SKIP_IF(!IsRunningOnGvisor());
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0xFF;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- int expect = static_cast<uint8_t>(set);
- if (GetParam().protocol == IPPROTO_TCP) {
- expect &= ~INET_ECN_MASK;
- }
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, expect);
-}
-
-TEST_P(IPUnboundSocketTest, ZeroTOSOptionSize) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0xC0;
- socklen_t set_sz = 0;
- TOSOption t = GetTOSOption(GetParam().domain);
- if (GetParam().domain == AF_INET) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- } else {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
- }
- int get = -1;
- socklen_t get_sz = 0;
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, 0);
- EXPECT_EQ(get, -1);
-}
-
-TEST_P(IPUnboundSocketTest, SmallTOSOptionSize) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0xC0;
- constexpr int kDefaultTOS = 0;
- TOSOption t = GetTOSOption(GetParam().domain);
- for (socklen_t i = 1; i < sizeof(int); i++) {
- int expect_tos;
- socklen_t expect_sz;
- if (GetParam().domain == AF_INET) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, i),
- SyscallSucceedsWithValue(0));
- expect_tos = set;
- expect_sz = sizeof(uint8_t);
- } else {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, i),
- SyscallFailsWithErrno(EINVAL));
- expect_tos = kDefaultTOS;
- expect_sz = i;
- }
- uint get = -1;
- socklen_t get_sz = i;
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, expect_sz);
- // Account for partial copies by getsockopt, retrieve the lower
- // bits specified by get_sz, while comparing against expect_tos.
- EXPECT_EQ(get & ~(~0 << (get_sz * 8)), expect_tos);
- }
-}
-
-TEST_P(IPUnboundSocketTest, LargeTOSOptionSize) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = 0xC0;
- TOSOption t = GetTOSOption(GetParam().domain);
- for (socklen_t i = sizeof(int); i < 10; i++) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, i),
- SyscallSucceedsWithValue(0));
- int get = -1;
- socklen_t get_sz = i;
- // We expect the system call handler to only copy atmost sizeof(int) bytes
- // as asserted by the check below. Hence, we do not expect the copy to
- // overflow in getsockopt.
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(int));
- EXPECT_EQ(get, set);
- }
-}
-
-TEST_P(IPUnboundSocketTest, NegativeTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int set = -1;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- int expect;
- if (GetParam().domain == AF_INET) {
- expect = static_cast<uint8_t>(set);
- if (GetParam().protocol == IPPROTO_TCP) {
- expect &= ~INET_ECN_MASK;
- }
- } else {
- // On IPv6 TCLASS, setting -1 has the effect of resetting the
- // TrafficClass.
- expect = 0;
- }
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, expect);
-}
-
-TEST_P(IPUnboundSocketTest, InvalidNegativeTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- int set = -2;
- socklen_t set_sz = sizeof(set);
- TOSOption t = GetTOSOption(GetParam().domain);
- int expect;
- if (GetParam().domain == AF_INET) {
- ASSERT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallSucceedsWithValue(0));
- expect = static_cast<uint8_t>(set);
- if (GetParam().protocol == IPPROTO_TCP) {
- expect &= ~INET_ECN_MASK;
- }
- } else {
- ASSERT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz),
- SyscallFailsWithErrno(EINVAL));
- expect = 0;
- }
- int get = 0;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_sz, sizeof(get));
- EXPECT_EQ(get, expect);
-}
-
-TEST_P(IPUnboundSocketTest, NullTOS) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- TOSOption t = GetTOSOption(GetParam().domain);
- int set_sz = sizeof(int);
- if (GetParam().domain == AF_INET) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz),
- SyscallFailsWithErrno(EFAULT));
- } else { // AF_INET6
- // The AF_INET6 behavior is not yet compatible. gVisor will try to read
- // optval from user memory at syscall handler, it needs substantial
- // refactoring to implement this behavior just for IPv6.
- if (IsRunningOnGvisor()) {
- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz),
- SyscallFailsWithErrno(EFAULT));
- } else {
- // Linux's IPv6 stack treats nullptr optval as input of 0, so the call
- // succeeds. (net/ipv6/ipv6_sockglue.c, do_ipv6_setsockopt())
- //
- // Linux's implementation would need fixing as passing a nullptr as optval
- // and non-zero optlen may not be valid.
- // TODO(b/158666797): Combine the gVisor and linux cases for IPv6.
- // Some kernel versions return EFAULT, so we handle both.
- EXPECT_THAT(
- setsockopt(socket->get(), t.level, t.option, nullptr, set_sz),
- AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(0)));
- }
- }
- socklen_t get_sz = sizeof(int);
- EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, nullptr, &get_sz),
- SyscallFailsWithErrno(EFAULT));
- int get = -1;
- EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, nullptr),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_P(IPUnboundSocketTest, InsufficientBufferTOS) {
- SKIP_IF(GetParam().protocol == IPPROTO_TCP);
-
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- TOSOption t = GetTOSOption(GetParam().domain);
-
- in_addr addr4;
- in6_addr addr6;
- ASSERT_THAT(inet_pton(AF_INET, "127.0.0.1", &addr4), ::testing::Eq(1));
- ASSERT_THAT(inet_pton(AF_INET6, "fe80::", &addr6), ::testing::Eq(1));
-
- cmsghdr cmsg = {};
- cmsg.cmsg_len = sizeof(cmsg);
- cmsg.cmsg_level = t.cmsg_level;
- cmsg.cmsg_type = t.option;
-
- msghdr msg = {};
- msg.msg_control = &cmsg;
- msg.msg_controllen = sizeof(cmsg);
- if (GetParam().domain == AF_INET) {
- msg.msg_name = &addr4;
- msg.msg_namelen = sizeof(addr4);
- } else {
- msg.msg_name = &addr6;
- msg.msg_namelen = sizeof(addr6);
- }
-
- EXPECT_THAT(sendmsg(socket->get(), &msg, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(IPUnboundSocketTest, ReuseAddrDefault) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOff);
- EXPECT_EQ(get_sz, sizeof(get));
-}
-
-TEST_P(IPUnboundSocketTest, SetReuseAddr) {
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ASSERT_THAT(setsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- int get = -1;
- socklen_t get_sz = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &get, &get_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get, kSockOptOn);
- EXPECT_EQ(get_sz, sizeof(get));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- IPUnboundSockets, IPUnboundSocketTest,
- ::testing::ValuesIn(VecCat<SocketKind>(
- ApplyVec<SocketKind>(IPv4UDPUnboundSocket,
- std::vector<int>{0, SOCK_NONBLOCK}),
- ApplyVec<SocketKind>(IPv6UDPUnboundSocket,
- std::vector<int>{0, SOCK_NONBLOCK}),
- ApplyVec<SocketKind>(IPv4TCPUnboundSocket,
- std::vector{0, SOCK_NONBLOCK}),
- ApplyVec<SocketKind>(IPv6TCPUnboundSocket,
- std::vector{0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ip_unbound_netlink.cc b/test/syscalls/linux/socket_ip_unbound_netlink.cc
deleted file mode 100644
index b02222999..000000000
--- a/test/syscalls/linux/socket_ip_unbound_netlink.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-#include <cstring>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of IP sockets.
-using IPv6UnboundSocketTest = SimpleSocketTest;
-
-TEST_P(IPv6UnboundSocketTest, ConnectToBadLocalAddress) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // TODO(gvisor.dev/issue/4595): Addresses on net devices are not saved
- // across save/restore.
- DisableSave ds;
-
- // Delete the loopback address from the loopback interface.
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- EXPECT_NO_ERRNO(LinkDelLocalAddr(loopback_link.index, AF_INET6,
- /*prefixlen=*/128, &in6addr_loopback,
- sizeof(in6addr_loopback)));
- Cleanup defer_addr_removal =
- Cleanup([loopback_link = std::move(loopback_link)] {
- EXPECT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET6,
- /*prefixlen=*/128, &in6addr_loopback,
- sizeof(in6addr_loopback)));
- });
-
- TestAddress addr = V6Loopback();
- reinterpret_cast<sockaddr_in6*>(&addr.addr)->sin6_port = 65535;
- auto sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- EXPECT_THAT(connect(sock->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-INSTANTIATE_TEST_SUITE_P(IPUnboundSockets, IPv6UnboundSocketTest,
- ::testing::ValuesIn(std::vector<SocketKind>{
- IPv6UDPUnboundSocket(0),
- IPv6TCPUnboundSocket(0)}));
-
-using IPv4UnboundSocketTest = SimpleSocketTest;
-
-TEST_P(IPv4UnboundSocketTest, ConnectToBadLocalAddress) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // TODO(gvisor.dev/issue/4595): Addresses on net devices are not saved
- // across save/restore.
- DisableSave ds;
-
- // Delete the loopback address from the loopback interface.
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- struct in_addr laddr;
- laddr.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_NO_ERRNO(LinkDelLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/8, &laddr, sizeof(laddr)));
- Cleanup defer_addr_removal = Cleanup(
- [loopback_link = std::move(loopback_link), addr = std::move(laddr)] {
- EXPECT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/8, &addr, sizeof(addr)));
- });
- TestAddress addr = V4Loopback();
- reinterpret_cast<sockaddr_in*>(&addr.addr)->sin_port = 65535;
- auto sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- EXPECT_THAT(connect(sock->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(ENETUNREACH));
-}
-
-INSTANTIATE_TEST_SUITE_P(IPUnboundSockets, IPv4UnboundSocketTest,
- ::testing::ValuesIn(std::vector<SocketKind>{
- IPv4UDPUnboundSocket(0),
- IPv4TCPUnboundSocket(0)}));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound.cc b/test/syscalls/linux/socket_ipv4_udp_unbound.cc
deleted file mode 100644
index 18be4dcc7..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound.cc
+++ /dev/null
@@ -1,2512 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv4_udp_unbound.h"
-
-#include <arpa/inet.h>
-#include <net/if.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Check that packets are not received without a group membership. Default send
-// interface configured by bind.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNoGroup) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- EXPECT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address. If multicast worked like unicast,
- // this would ensure that we get the packet.
- auto receiver_addr = V4Any();
- EXPECT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Send the multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-// Check that not setting a default send interface prevents multicast packets
-// from being sent. Group membership interface configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackAddrNoDefaultSendIf) {
- // TODO(b/185517803): Fix for native test.
- SKIP_IF(!IsRunningOnGvisor());
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive any
- // unicast packet.
- auto receiver_addr = V4Any();
- EXPECT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallFailsWithErrno(ENETUNREACH));
-}
-
-// Check that not setting a default send interface prevents multicast packets
-// from being sent. Group membership interface configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNicNoDefaultSendIf) {
- // TODO(b/185517803): Fix for native test.
- SKIP_IF(!IsRunningOnGvisor());
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive any
- // unicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallFailsWithErrno(ENETUNREACH));
-}
-
-// Check that multicast works when the default send interface is configured by
-// bind and the group membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- ASSERT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// bind and the group membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNic) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- ASSERT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNic) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in connect, and the group
-// membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrConnect) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto connect_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&connect_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- ASSERT_THAT(
- RetryEINTR(connect)(socket1->get(), AsSockAddr(&connect_addr.addr),
- connect_addr.addr_len),
- SyscallSucceeds());
-
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(RetryEINTR(send)(socket1->get(), send_buf, sizeof(send_buf), 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in connect, and the group
-// membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicConnect) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto connect_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&connect_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- ASSERT_THAT(
- RetryEINTR(connect)(socket1->get(), AsSockAddr(&connect_addr.addr),
- connect_addr.addr_len),
- SyscallSucceeds());
-
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(RetryEINTR(send)(socket1->get(), send_buf, sizeof(send_buf), 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelf) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the first FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelf) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the first FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in connect, and the group
-// membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelfConnect) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the first FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto connect_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&connect_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- EXPECT_THAT(
- RetryEINTR(connect)(socket1->get(), AsSockAddr(&connect_addr.addr),
- connect_addr.addr_len),
- SyscallSucceeds());
-
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(RetryEINTR(send)(socket1->get(), send_buf, sizeof(send_buf), 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in connect, and the group
-// membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelfConnect) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Bind the first FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto connect_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&connect_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- ASSERT_THAT(
- RetryEINTR(connect)(socket1->get(), AsSockAddr(&connect_addr.addr),
- connect_addr.addr_len),
- SyscallSucceeds());
-
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(RetryEINTR(send)(socket1->get(), send_buf, sizeof(send_buf), 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfAddrSelfNoLoop) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Bind the first FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast works when the default send interface is configured by
-// IP_MULTICAST_IF, the send address is specified in sendto, and the group
-// membership is configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackIfNicSelfNoLoop) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Set the default send interface.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that dropping a group membership that does not exist fails.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastInvalidDrop) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Unregister from a membership that we didn't have.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-// Check that dropping a group membership prevents multicast packets from being
-// delivered. Default send address configured by bind and group membership
-// interface configured by address.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastDropAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- EXPECT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- EXPECT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register and unregister to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-// Check that dropping a group membership prevents multicast packets from being
-// delivered. Default send address configured by bind and group membership
-// interface configured by NIC ID.
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastDropNic) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- EXPECT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- EXPECT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register and unregister to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfZero) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn iface = {};
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfInvalidNic) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn iface = {};
- iface.imr_ifindex = -1;
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfInvalidAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreq iface = {};
- iface.imr_interface.s_addr = inet_addr("255.255.255");
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetShort) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Create a valid full-sized request.
- ip_mreqn iface = {};
- iface.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
-
- // Send an optlen of 1 to check that optlen is enforced.
- EXPECT_THAT(
- setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface, 1),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfDefault) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- in_addr get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
- EXPECT_EQ(size, sizeof(get));
- EXPECT_EQ(get.s_addr, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfDefaultReqn) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
-
- // getsockopt(IP_MULTICAST_IF) can only return an in_addr, so it treats the
- // first sizeof(struct in_addr) bytes of struct ip_mreqn as a struct in_addr.
- // Conveniently, this corresponds to the field ip_mreqn::imr_multiaddr.
- EXPECT_EQ(size, sizeof(in_addr));
-
- // getsockopt(IP_MULTICAST_IF) will only return the interface address which
- // hasn't been set.
- EXPECT_EQ(get.imr_multiaddr.s_addr, 0);
- EXPECT_EQ(get.imr_address.s_addr, 0);
- EXPECT_EQ(get.imr_ifindex, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetAddrGetReqn) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- in_addr set = {};
- set.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- ip_mreqn get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
-
- // getsockopt(IP_MULTICAST_IF) can only return an in_addr, so it treats the
- // first sizeof(struct in_addr) bytes of struct ip_mreqn as a struct in_addr.
- // Conveniently, this corresponds to the field ip_mreqn::imr_multiaddr.
- EXPECT_EQ(size, sizeof(in_addr));
- EXPECT_EQ(get.imr_multiaddr.s_addr, set.s_addr);
- EXPECT_EQ(get.imr_address.s_addr, 0);
- EXPECT_EQ(get.imr_ifindex, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetReqAddrGetReqn) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreq set = {};
- set.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- ip_mreqn get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
-
- // getsockopt(IP_MULTICAST_IF) can only return an in_addr, so it treats the
- // first sizeof(struct in_addr) bytes of struct ip_mreqn as a struct in_addr.
- // Conveniently, this corresponds to the field ip_mreqn::imr_multiaddr.
- EXPECT_EQ(size, sizeof(in_addr));
- EXPECT_EQ(get.imr_multiaddr.s_addr, set.imr_interface.s_addr);
- EXPECT_EQ(get.imr_address.s_addr, 0);
- EXPECT_EQ(get.imr_ifindex, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetNicGetReqn) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn set = {};
- set.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- ip_mreqn get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
- EXPECT_EQ(size, sizeof(in_addr));
- EXPECT_EQ(get.imr_multiaddr.s_addr, 0);
- EXPECT_EQ(get.imr_address.s_addr, 0);
- EXPECT_EQ(get.imr_ifindex, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- in_addr set = {};
- set.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- in_addr get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
-
- EXPECT_EQ(size, sizeof(get));
- EXPECT_EQ(get.s_addr, set.s_addr);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetReqAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreq set = {};
- set.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- in_addr get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
-
- EXPECT_EQ(size, sizeof(get));
- EXPECT_EQ(get.s_addr, set.imr_interface.s_addr);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetNic) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn set = {};
- set.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &set,
- sizeof(set)),
- SyscallSucceeds());
-
- in_addr get = {};
- socklen_t size = sizeof(get);
- ASSERT_THAT(
- getsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &get, &size),
- SyscallSucceeds());
- EXPECT_EQ(size, sizeof(get));
- EXPECT_EQ(get.s_addr, 0);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, TestJoinGroupNoIf) {
- // TODO(b/185517803): Fix for native test.
- SKIP_IF(!IsRunningOnGvisor());
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallFailsWithErrno(ENODEV));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, TestJoinGroupInvalidIf) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn group = {};
- group.imr_address.s_addr = inet_addr("255.255.255");
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallFailsWithErrno(ENODEV));
-}
-
-// Check that multiple memberships are not allowed on the same socket.
-TEST_P(IPv4UDPUnboundSocketTest, TestMultipleJoinsOnSingleSocket) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto fd = socket1->get();
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
-
- EXPECT_THAT(
- setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &group, sizeof(group)),
- SyscallSucceeds());
-
- EXPECT_THAT(
- setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &group, sizeof(group)),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-// Check that two sockets can join the same multicast group at the same time.
-TEST_P(IPv4UDPUnboundSocketTest, TestTwoSocketsJoinSameMulticastGroup) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Drop the membership twice on each socket, the second call for each socket
- // should fail.
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_DROP_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-// Check that two sockets can join the same multicast group at the same time,
-// and both will receive data on it.
-TEST_P(IPv4UDPUnboundSocketTest, TestMcastReceptionOnTwoSockets) {
- std::unique_ptr<SocketPair> socket_pairs[2] = {
- absl::make_unique<FDSocketPair>(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket())),
- absl::make_unique<FDSocketPair>(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket()))};
-
- ip_mreq iface = {}, group = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- auto receiver_addr = V4Any();
- int bound_port = 0;
-
- // Create two socketpairs with the exact same configuration.
- for (auto& sockets : socket_pairs) {
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_IF,
- &iface, sizeof(iface)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(sockets->second_fd(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
- ASSERT_THAT(bind(sockets->second_fd(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- // Get the port assigned.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(
- getsockname(sockets->second_fd(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- // On the first iteration, save the port we are bound to. On the second
- // iteration, verify the port is the same as the one from the first
- // iteration. In other words, both sockets listen on the same port.
- if (bound_port == 0) {
- bound_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- } else {
- EXPECT_EQ(bound_port,
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port);
- }
- }
-
- // Send a multicast packet to the group from two different sockets and verify
- // it is received by both sockets that joined that group.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port = bound_port;
- for (auto& sockets : socket_pairs) {
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sockets->first_fd(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet on both sockets.
- for (auto& sockets : socket_pairs) {
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),
- 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
- }
-}
-
-// Check that on two sockets that joined a group and listen on ANY, dropping
-// memberships one by one will continue to deliver packets to both sockets until
-// both memberships have been dropped.
-TEST_P(IPv4UDPUnboundSocketTest, TestMcastReceptionWhenDroppingMemberships) {
- std::unique_ptr<SocketPair> socket_pairs[2] = {
- absl::make_unique<FDSocketPair>(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket())),
- absl::make_unique<FDSocketPair>(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket()))};
-
- ip_mreq iface = {}, group = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- auto receiver_addr = V4Any();
- int bound_port = 0;
-
- // Create two socketpairs with the exact same configuration.
- for (auto& sockets : socket_pairs) {
- ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_IF,
- &iface, sizeof(iface)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(sockets->second_fd(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
- ASSERT_THAT(bind(sockets->second_fd(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- // Get the port assigned.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(
- getsockname(sockets->second_fd(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- // On the first iteration, save the port we are bound to. On the second
- // iteration, verify the port is the same as the one from the first
- // iteration. In other words, both sockets listen on the same port.
- if (bound_port == 0) {
- bound_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- } else {
- EXPECT_EQ(bound_port,
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port);
- }
- }
-
- // Drop the membership of the first socket pair and verify data is still
- // received.
- ASSERT_THAT(setsockopt(socket_pairs[0]->second_fd(), IPPROTO_IP,
- IP_DROP_MEMBERSHIP, &group, sizeof(group)),
- SyscallSucceeds());
- // Send a packet from each socket_pair.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port = bound_port;
- for (auto& sockets : socket_pairs) {
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sockets->first_fd(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet on both sockets.
- for (auto& sockets : socket_pairs) {
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),
- 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
- }
-
- // Drop the membership of the second socket pair and verify data stops being
- // received.
- ASSERT_THAT(setsockopt(socket_pairs[1]->second_fd(), IPPROTO_IP,
- IP_DROP_MEMBERSHIP, &group, sizeof(group)),
- SyscallSucceeds());
- // Send a packet from each socket_pair.
- for (auto& sockets : socket_pairs) {
- char send_buf[200];
- ASSERT_THAT(
- RetryEINTR(sendto)(sockets->first_fd(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- char recv_buf[sizeof(send_buf)] = {};
- for (auto& sockets : socket_pairs) {
- ASSERT_THAT(RecvTimeout(sockets->second_fd(), recv_buf, sizeof(recv_buf),
- 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
- }
- }
-}
-
-// Check that a receiving socket can bind to the multicast address before
-// joining the group and receive data once the group has been joined.
-TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenJoinThenReceive) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind second socket (receiver) to the multicast address.
- auto receiver_addr = V4Multicast();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- // Update receiver_addr with the correct port number.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(socket2->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet on the first socket out the loopback interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
- auto sendto_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that a receiving socket can bind to the multicast address and won't
-// receive multicast data if it hasn't joined the group.
-TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenNoJoinThenNoReceive) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind second socket (receiver) to the multicast address.
- auto receiver_addr = V4Multicast();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- // Update receiver_addr with the correct port number.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Send a multicast packet on the first socket out the loopback interface.
- ip_mreq iface = {};
- iface.imr_interface.s_addr = htonl(INADDR_LOOPBACK);
- ASSERT_THAT(setsockopt(socket1->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
- auto sendto_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we don't receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
-}
-
-// Check that a socket can bind to a multicast address and still send out
-// packets.
-TEST_P(IPv4UDPUnboundSocketTest, TestBindToMcastThenSend) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind second socket (receiver) to the ANY address.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Bind the first socket (sender) to the multicast address.
- auto sender_addr = V4Multicast();
- ASSERT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
- socklen_t sender_addr_len = sender_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&sender_addr.addr),
- &sender_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(sender_addr_len, sender_addr.addr_len);
-
- // Send a packet on the first socket to the loopback address.
- auto sendto_addr = V4Loopback();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that a receiving socket can bind to the broadcast address and receive
-// broadcast packets.
-TEST_P(IPv4UDPUnboundSocketTest, TestBindToBcastThenReceive) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind second socket (receiver) to the broadcast address.
- auto receiver_addr = V4Broadcast();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Send a broadcast packet on the first socket out the loopback interface.
- EXPECT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- // Note: Binding to the loopback interface makes the broadcast go out of it.
- auto sender_bind_addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&sender_bind_addr.addr),
- sender_bind_addr.addr_len),
- SyscallSucceeds());
- auto sendto_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that a socket can bind to the broadcast address and still send out
-// packets.
-TEST_P(IPv4UDPUnboundSocketTest, TestBindToBcastThenSend) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind second socket (receiver) to the ANY address.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(socket2->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Bind the first socket (sender) to the broadcast address.
- auto sender_addr = V4Broadcast();
- ASSERT_THAT(
- bind(socket1->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
- socklen_t sender_addr_len = sender_addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&sender_addr.addr),
- &sender_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(sender_addr_len, sender_addr.addr_len);
-
- // Send a packet on the first socket to the loopback address.
- auto sendto_addr = V4Loopback();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket1->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RecvTimeout(socket2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that SO_REUSEADDR always delivers to the most recently bound socket.
-//
-// FIXME(gvisor.dev/issue/873): Endpoint order is not restored correctly. Enable
-// random and co-op save (below) once that is fixed.
-TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution) {
- std::vector<std::unique_ptr<FileDescriptor>> sockets;
- sockets.emplace_back(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()));
-
- ASSERT_THAT(setsockopt(sockets[0]->get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(sockets[0]->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(sockets[0]->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- constexpr int kMessageSize = 200;
-
- // FIXME(gvisor.dev/issue/873): Endpoint order is not restored correctly.
- const DisableSave ds;
-
- for (int i = 0; i < 10; i++) {
- // Add a new receiver.
- sockets.emplace_back(ASSERT_NO_ERRNO_AND_VALUE(NewSocket()));
- auto& last = sockets.back();
- ASSERT_THAT(setsockopt(last->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(last->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Send a new message to the SO_REUSEADDR group. We use a new socket each
- // time so that a new ephemeral port will be used each time. This ensures
- // that we aren't doing REUSEPORT-like hash load blancing.
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- char send_buf[kMessageSize];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- EXPECT_THAT(RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Verify that the most recent socket got the message. We don't expect any
- // of the other sockets to have received it, but we will check that later.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RecvTimeout(last->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(send_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
-
- // Verify that no other messages were received.
- for (auto& socket : sockets) {
- char recv_buf[kMessageSize] = {};
- EXPECT_THAT(
- RecvTimeout(socket->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- PosixErrorIs(EAGAIN, ::testing::_));
- }
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrThenReusePort) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReusePortThenReuseAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConvertibleToReusePort) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Bind socket3 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConvertibleToReuseAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Bind socket3 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConversionReversable1) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Close socket2 to revert to just socket1 with REUSEADDR and REUSEPORT.
- socket2->reset();
-
- // Bind socket3 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConversionReversable2) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Close socket2 to revert to just socket1 with REUSEADDR and REUSEPORT.
- socket2->reset();
-
- // Bind socket3 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindDoubleReuseAddrReusePortThenReusePort) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, also with REUSEADDR and
- // REUSEPORT.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Bind socket3 to the same address as socket1, only with REUSEPORT.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, BindDoubleReuseAddrReusePortThenReuseAddr) {
- auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind socket1 with REUSEADDR and REUSEPORT.
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(socket1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(socket1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind socket2 to the same address as socket1, also with REUSEADDR and
- // REUSEPORT.
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(socket2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(socket2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- // Bind socket3 to the same address as socket1, only with REUSEADDR.
- ASSERT_THAT(setsockopt(socket3->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(socket3->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-}
-
-// Check that REUSEPORT takes precedence over REUSEADDR.
-TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrReusePortDistribution) {
- auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- ASSERT_THAT(setsockopt(receiver1->get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(receiver1->get(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(receiver1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(receiver1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Bind receiver2 to the same address as socket1, also with REUSEADDR and
- // REUSEPORT.
- ASSERT_THAT(setsockopt(receiver2->get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(setsockopt(receiver2->get(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(receiver2->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
-
- constexpr int kMessageSize = 10;
-
- // Saving during each iteration of the following loop is too expensive.
- DisableSave ds;
-
- for (int i = 0; i < 100; ++i) {
- // Send a new message to the REUSEADDR/REUSEPORT group. We use a new socket
- // each time so that a new ephemerial port will be used each time. This
- // ensures that we cycle through hashes.
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- char send_buf[kMessageSize] = {};
- EXPECT_THAT(RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- }
-
- ds.reset();
-
- // Check that both receivers got messages. This checks that we are using load
- // balancing (REUSEPORT) instead of the most recently bound socket
- // (REUSEADDR).
- char recv_buf[kMessageSize] = {};
- EXPECT_THAT(
- RecvTimeout(receiver1->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(kMessageSize));
- EXPECT_THAT(
- RecvTimeout(receiver2->get(), recv_buf, sizeof(recv_buf), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(kMessageSize));
-}
-
-// Test that socket will receive packet info control message.
-TEST_P(IPv4UDPUnboundSocketTest, SetAndReceiveIPPKTINFO) {
- // TODO(gvisor.dev/issue/1202): ioctl() is not supported by hostinet.
- SKIP_IF((IsRunningWithHostinet()));
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto sender_addr = V4Loopback();
- int level = SOL_IP;
- int type = IP_PKTINFO;
-
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&sender_addr.addr),
- sender_addr.addr_len),
- SyscallSucceeds());
- socklen_t sender_addr_len = sender_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&sender_addr.addr),
- &sender_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(sender_addr_len, sender_addr.addr_len);
-
- auto receiver_addr = V4Loopback();
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&sender_addr.addr)->sin_port;
- ASSERT_THAT(connect(sender->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
-
- // Allow socket to receive control message.
- ASSERT_THAT(
- setsockopt(receiver->get(), level, type, &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Prepare message to send.
- constexpr size_t kDataLength = 1024;
- msghdr sent_msg = {};
- iovec sent_iov = {};
- char sent_data[kDataLength];
- sent_iov.iov_base = sent_data;
- sent_iov.iov_len = kDataLength;
- sent_msg.msg_iov = &sent_iov;
- sent_msg.msg_iovlen = 1;
- sent_msg.msg_flags = 0;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sender->get(), &sent_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- msghdr received_msg = {};
- iovec received_iov = {};
- char received_data[kDataLength];
- char received_cmsg_buf[CMSG_SPACE(sizeof(in_pktinfo))] = {};
- size_t cmsg_data_len = sizeof(in_pktinfo);
- received_iov.iov_base = received_data;
- received_iov.iov_len = kDataLength;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
- received_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
- received_msg.msg_control = received_cmsg_buf;
-
- ASSERT_THAT(RecvMsgTimeout(receiver->get(), &received_msg, 1 /*timeout*/),
- IsPosixErrorOkAndHolds(kDataLength));
-
- cmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, level);
- EXPECT_EQ(cmsg->cmsg_type, type);
-
- // Get loopback index.
- ifreq ifr = {};
- absl::SNPrintF(ifr.ifr_name, IFNAMSIZ, "lo");
- ASSERT_THAT(ioctl(sender->get(), SIOCGIFINDEX, &ifr), SyscallSucceeds());
- ASSERT_NE(ifr.ifr_ifindex, 0);
-
- // Check the data
- in_pktinfo received_pktinfo = {};
- memcpy(&received_pktinfo, CMSG_DATA(cmsg), sizeof(in_pktinfo));
- EXPECT_EQ(received_pktinfo.ipi_ifindex, ifr.ifr_ifindex);
- EXPECT_EQ(received_pktinfo.ipi_spec_dst.s_addr, htonl(INADDR_LOOPBACK));
- EXPECT_EQ(received_pktinfo.ipi_addr.s_addr, htonl(INADDR_LOOPBACK));
-}
-
-// Test that socket will receive IP_RECVORIGDSTADDR control message.
-TEST_P(IPv4UDPUnboundSocketTest, SetAndReceiveIPReceiveOrigDstAddr) {
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver_addr = V4Loopback();
- int level = SOL_IP;
- int type = IP_RECVORIGDSTADDR;
-
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
-
- // Retrieve the port bound by the receiver.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- ASSERT_THAT(connect(sender->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
-
- // Get address and port bound by the sender.
- sockaddr_storage sender_addr_storage;
- socklen_t sender_addr_len = sizeof(sender_addr_storage);
- ASSERT_THAT(getsockname(sender->get(), AsSockAddr(&sender_addr_storage),
- &sender_addr_len),
- SyscallSucceeds());
- ASSERT_EQ(sender_addr_len, sizeof(struct sockaddr_in));
-
- // Enable IP_RECVORIGDSTADDR on socket so that we get the original destination
- // address of the datagram as auxiliary information in the control message.
- ASSERT_THAT(
- setsockopt(receiver->get(), level, type, &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Prepare message to send.
- constexpr size_t kDataLength = 1024;
- msghdr sent_msg = {};
- iovec sent_iov = {};
- char sent_data[kDataLength];
- sent_iov.iov_base = sent_data;
- sent_iov.iov_len = kDataLength;
- sent_msg.msg_iov = &sent_iov;
- sent_msg.msg_iovlen = 1;
- sent_msg.msg_flags = 0;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sender->get(), &sent_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- msghdr received_msg = {};
- iovec received_iov = {};
- char received_data[kDataLength];
- char received_cmsg_buf[CMSG_SPACE(sizeof(sockaddr_in))] = {};
- size_t cmsg_data_len = sizeof(sockaddr_in);
- received_iov.iov_base = received_data;
- received_iov.iov_len = kDataLength;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
- received_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
- received_msg.msg_control = received_cmsg_buf;
-
- ASSERT_THAT(RecvMsgTimeout(receiver->get(), &received_msg, 1 /*timeout*/),
- IsPosixErrorOkAndHolds(kDataLength));
-
- cmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, level);
- EXPECT_EQ(cmsg->cmsg_type, type);
-
- // Check the data
- sockaddr_in received_addr = {};
- memcpy(&received_addr, CMSG_DATA(cmsg), sizeof(received_addr));
- auto orig_receiver_addr = reinterpret_cast<sockaddr_in*>(&receiver_addr.addr);
- EXPECT_EQ(received_addr.sin_addr.s_addr, orig_receiver_addr->sin_addr.s_addr);
- EXPECT_EQ(received_addr.sin_port, orig_receiver_addr->sin_port);
-}
-
-// Check that setting SO_RCVBUF below min is clamped to the minimum
-// receive buffer size.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketRecvBufBelowMin) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Discover minimum buffer size by setting it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &below_min,
- sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_RCVBUF above max is clamped to the maximum
-// receive buffer size.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketRecvBufAboveMax) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &above_max,
- sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_RCVBUF min <= rcvBufSz <= max is honored.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketRecvBuf) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int max = 0;
- int min = 0;
- {
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kRcvBufSz = 0xffffffff;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by setting it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &quarter_sz,
- sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_RCVBUF, &val, &val_len),
- SyscallSucceeds());
-
- // Linux doubles the value set by SO_SNDBUF/SO_RCVBUF.
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-// Check that setting SO_SNDBUF below min is clamped to the minimum
-// send buffer size.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketSendBufBelowMin) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Discover minimum buffer size by setting it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &kSndBufSz,
- sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Linux doubles the value so let's use a value that when doubled will still
- // be smaller than min.
- int below_min = min / 2 - 1;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &below_min,
- sizeof(below_min)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- ASSERT_EQ(min, val);
-}
-
-// Check that setting SO_SNDBUF above max is clamped to the maximum
-// send buffer size.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketSendBufAboveMax) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &kSndBufSz,
- sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- int max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
-
- int above_max = max + 1;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &above_max,
- sizeof(above_max)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
- ASSERT_EQ(max, val);
-}
-
-// Check that setting SO_SNDBUF min <= kSndBufSz <= max is honored.
-TEST_P(IPv4UDPUnboundSocketTest, SetSocketSendBuf) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int max = 0;
- int min = 0;
- {
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kSndBufSz = 0xffffffff;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &kSndBufSz,
- sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by setting it to zero.
- constexpr int kSndBufSz = 0;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &kSndBufSz,
- sizeof(kSndBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(setsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &quarter_sz,
- sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s->get(), SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIPPacketInfo) {
- auto sender_socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver_socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the first FD to the loopback. This is an alternative to
- // IP_MULTICAST_IF for setting the default send interface.
- auto sender_addr = V4Loopback();
- ASSERT_THAT(bind(sender_socket->get(), AsSockAddr(&sender_addr.addr),
- sender_addr.addr_len),
- SyscallSucceeds());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver_socket->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver_socket->get(),
- AsSockAddr(&receiver_addr.addr), &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"));
- ASSERT_THAT(setsockopt(receiver_socket->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
-
- // Register to receive IP packet info.
- const int one = 1;
- ASSERT_THAT(setsockopt(receiver_socket->get(), IPPROTO_IP, IP_PKTINFO, &one,
- sizeof(one)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender_socket->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- msghdr recv_msg = {};
- iovec recv_iov = {};
- char recv_buf[sizeof(send_buf)];
- char recv_cmsg_buf[CMSG_SPACE(sizeof(in_pktinfo))] = {};
- size_t cmsg_data_len = sizeof(in_pktinfo);
- recv_iov.iov_base = recv_buf;
- recv_iov.iov_len = sizeof(recv_buf);
- recv_msg.msg_iov = &recv_iov;
- recv_msg.msg_iovlen = 1;
- recv_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
- recv_msg.msg_control = recv_cmsg_buf;
- ASSERT_THAT(RetryEINTR(recvmsg)(receiver_socket->get(), &recv_msg, 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-
- // Check the IP_PKTINFO control message.
- cmsghdr* cmsg = CMSG_FIRSTHDR(&recv_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, IPPROTO_IP);
- EXPECT_EQ(cmsg->cmsg_type, IP_PKTINFO);
-
- // Get loopback index.
- ifreq ifr = {};
- absl::SNPrintF(ifr.ifr_name, IFNAMSIZ, "lo");
- ASSERT_THAT(ioctl(receiver_socket->get(), SIOCGIFINDEX, &ifr),
- SyscallSucceeds());
- ASSERT_NE(ifr.ifr_ifindex, 0);
-
- in_pktinfo received_pktinfo = {};
- memcpy(&received_pktinfo, CMSG_DATA(cmsg), sizeof(in_pktinfo));
- EXPECT_EQ(received_pktinfo.ipi_ifindex, ifr.ifr_ifindex);
- if (IsRunningOnGvisor()) {
- // This should actually be a unicast address assigned to the interface.
- //
- // TODO(gvisor.dev/issue/3556): This check is validating incorrect
- // behaviour. We still include the test so that once the bug is
- // resolved, this test will start to fail and the individual tasked
- // with fixing this bug knows to also fix this test :).
- EXPECT_EQ(received_pktinfo.ipi_spec_dst.s_addr, group.imr_multiaddr.s_addr);
- } else {
- EXPECT_EQ(received_pktinfo.ipi_spec_dst.s_addr, htonl(INADDR_LOOPBACK));
- }
- EXPECT_EQ(received_pktinfo.ipi_addr.s_addr, group.imr_multiaddr.s_addr);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound.h b/test/syscalls/linux/socket_ipv4_udp_unbound.h
deleted file mode 100644
index f64c57645..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to IPv4 UDP sockets.
-using IPv4UDPUnboundSocketTest = SimpleSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_H_
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc
deleted file mode 100644
index c6e775b2a..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc
+++ /dev/null
@@ -1,994 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h"
-
-namespace gvisor {
-namespace testing {
-
-TestAddress V4EmptyAddress() {
- TestAddress t("V4Empty");
- t.addr.ss_family = AF_INET;
- t.addr_len = sizeof(sockaddr_in);
- return t;
-}
-
-// Verifies that a broadcast UDP packet will arrive at all UDP sockets with
-// the destination port number.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- UDPBroadcastReceivedOnExpectedPort) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcvr1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcvr2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto norcv = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Enable SO_BROADCAST on the sending socket.
- ASSERT_THAT(setsockopt(sender->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Enable SO_REUSEPORT on the receiving sockets so that they may both be bound
- // to the broadcast messages destination port.
- ASSERT_THAT(setsockopt(rcvr1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(setsockopt(rcvr2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Bind the first socket to the ANY address and let the system assign a port.
- auto rcv1_addr = V4Any();
- ASSERT_THAT(
- bind(rcvr1->get(), AsSockAddr(&rcv1_addr.addr), rcv1_addr.addr_len),
- SyscallSucceedsWithValue(0));
- // Retrieve port number from first socket so that it can be bound to the
- // second socket.
- socklen_t rcv_addr_sz = rcv1_addr.addr_len;
- ASSERT_THAT(
- getsockname(rcvr1->get(), AsSockAddr(&rcv1_addr.addr), &rcv_addr_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(rcv_addr_sz, rcv1_addr.addr_len);
- auto port = reinterpret_cast<sockaddr_in*>(&rcv1_addr.addr)->sin_port;
-
- // Bind the second socket to the same address:port as the first.
- ASSERT_THAT(bind(rcvr2->get(), AsSockAddr(&rcv1_addr.addr), rcv_addr_sz),
- SyscallSucceedsWithValue(0));
-
- // Bind the non-receiving socket to an ephemeral port.
- auto norecv_addr = V4Any();
- ASSERT_THAT(
- bind(norcv->get(), AsSockAddr(&norecv_addr.addr), norecv_addr.addr_len),
- SyscallSucceedsWithValue(0));
-
- // Broadcast a test message.
- auto dst_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&dst_addr.addr)->sin_port = port;
- constexpr char kTestMsg[] = "hello, world";
- EXPECT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&dst_addr.addr), dst_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
-
- // Verify that the receiving sockets received the test message.
- char buf[sizeof(kTestMsg)] = {};
- EXPECT_THAT(recv(rcvr1->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
- memset(buf, 0, sizeof(buf));
- EXPECT_THAT(recv(rcvr2->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
-
- // Verify that the non-receiving socket did not receive the test message.
- memset(buf, 0, sizeof(buf));
- EXPECT_THAT(RetryEINTR(recv)(norcv->get(), buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Verifies that a broadcast UDP packet will arrive at all UDP sockets bound to
-// the destination port number and either INADDR_ANY or INADDR_BROADCAST, but
-// not a unicast address.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- UDPBroadcastReceivedOnExpectedAddresses) {
- SKIP_IF(!found_net_interfaces_);
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcvr1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcvr2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto norcv = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Enable SO_BROADCAST on the sending socket.
- ASSERT_THAT(setsockopt(sender->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Enable SO_REUSEPORT on all sockets so that they may all be bound to the
- // broadcast messages destination port.
- ASSERT_THAT(setsockopt(rcvr1->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(setsockopt(rcvr2->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
- ASSERT_THAT(setsockopt(norcv->get(), SOL_SOCKET, SO_REUSEPORT, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Bind the first socket the ANY address and let the system assign a port.
- auto rcv1_addr = V4Any();
- ASSERT_THAT(
- bind(rcvr1->get(), AsSockAddr(&rcv1_addr.addr), rcv1_addr.addr_len),
- SyscallSucceedsWithValue(0));
- // Retrieve port number from first socket so that it can be bound to the
- // second socket.
- socklen_t rcv_addr_sz = rcv1_addr.addr_len;
- ASSERT_THAT(
- getsockname(rcvr1->get(), AsSockAddr(&rcv1_addr.addr), &rcv_addr_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(rcv_addr_sz, rcv1_addr.addr_len);
- auto port = reinterpret_cast<sockaddr_in*>(&rcv1_addr.addr)->sin_port;
-
- // Bind the second socket to the broadcast address.
- auto rcv2_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&rcv2_addr.addr)->sin_port = port;
- ASSERT_THAT(
- bind(rcvr2->get(), AsSockAddr(&rcv2_addr.addr), rcv2_addr.addr_len),
- SyscallSucceedsWithValue(0));
-
- // Bind the non-receiving socket to the unicast ethernet address.
- auto norecv_addr = rcv1_addr;
- reinterpret_cast<sockaddr_in*>(&norecv_addr.addr)->sin_addr =
- eth_if_addr_.sin_addr;
- ASSERT_THAT(
- bind(norcv->get(), AsSockAddr(&norecv_addr.addr), norecv_addr.addr_len),
- SyscallSucceedsWithValue(0));
-
- // Broadcast a test message.
- auto dst_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&dst_addr.addr)->sin_port = port;
- constexpr char kTestMsg[] = "hello, world";
- EXPECT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&dst_addr.addr), dst_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
-
- // Verify that the receiving sockets received the test message.
- char buf[sizeof(kTestMsg)] = {};
- EXPECT_THAT(recv(rcvr1->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
- memset(buf, 0, sizeof(buf));
- EXPECT_THAT(recv(rcvr2->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
-
- // Verify that the non-receiving socket did not receive the test message.
- memset(buf, 0, sizeof(buf));
- EXPECT_THAT(RetryEINTR(recv)(norcv->get(), buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Verifies that a UDP broadcast can be sent and then received back on the same
-// socket that is bound to the broadcast address (255.255.255.255).
-// FIXME(b/141938460): This can be combined with the next test
-// (UDPBroadcastSendRecvOnSocketBoundToAny).
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- UDPBroadcastSendRecvOnSocketBoundToBroadcast) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Enable SO_BROADCAST.
- ASSERT_THAT(setsockopt(sender->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Bind the sender to the broadcast address.
- auto src_addr = V4Broadcast();
- ASSERT_THAT(
- bind(sender->get(), AsSockAddr(&src_addr.addr), src_addr.addr_len),
- SyscallSucceedsWithValue(0));
- socklen_t src_sz = src_addr.addr_len;
- ASSERT_THAT(getsockname(sender->get(), AsSockAddr(&src_addr.addr), &src_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(src_sz, src_addr.addr_len);
-
- // Send the message.
- auto dst_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&dst_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&src_addr.addr)->sin_port;
- constexpr char kTestMsg[] = "hello, world";
- EXPECT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&dst_addr.addr), dst_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
-
- // Verify that the message was received.
- char buf[sizeof(kTestMsg)] = {};
- EXPECT_THAT(RetryEINTR(recv)(sender->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
-}
-
-// Verifies that a UDP broadcast can be sent and then received back on the same
-// socket that is bound to the ANY address (0.0.0.0).
-// FIXME(b/141938460): This can be combined with the previous test
-// (UDPBroadcastSendRecvOnSocketBoundToBroadcast).
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- UDPBroadcastSendRecvOnSocketBoundToAny) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Enable SO_BROADCAST.
- ASSERT_THAT(setsockopt(sender->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0));
-
- // Bind the sender to the ANY address.
- auto src_addr = V4Any();
- ASSERT_THAT(
- bind(sender->get(), AsSockAddr(&src_addr.addr), src_addr.addr_len),
- SyscallSucceedsWithValue(0));
- socklen_t src_sz = src_addr.addr_len;
- ASSERT_THAT(getsockname(sender->get(), AsSockAddr(&src_addr.addr), &src_sz),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(src_sz, src_addr.addr_len);
-
- // Send the message.
- auto dst_addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&dst_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&src_addr.addr)->sin_port;
- constexpr char kTestMsg[] = "hello, world";
- EXPECT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&dst_addr.addr), dst_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
-
- // Verify that the message was received.
- char buf[sizeof(kTestMsg)] = {};
- EXPECT_THAT(RetryEINTR(recv)(sender->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- EXPECT_EQ(0, memcmp(buf, kTestMsg, sizeof(kTestMsg)));
-}
-
-// Verifies that a UDP broadcast fails to send on a socket with SO_BROADCAST
-// disabled.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendBroadcast) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Broadcast a test message without having enabled SO_BROADCAST on the sending
- // socket.
- auto addr = V4Broadcast();
- reinterpret_cast<sockaddr_in*>(&addr.addr)->sin_port = htons(12345);
- constexpr char kTestMsg[] = "hello, world";
-
- EXPECT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&addr.addr), addr.addr_len),
- SyscallFailsWithErrno(EACCES));
-}
-
-// Verifies that a UDP unicast on an unbound socket reaches its destination.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendUnicastOnUnbound) {
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcvr = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the receiver and retrieve its address and port number.
- sockaddr_in addr = {};
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = htons(0);
- ASSERT_THAT(bind(rcvr->get(), AsSockAddr(&addr), sizeof(addr)),
- SyscallSucceedsWithValue(0));
- memset(&addr, 0, sizeof(addr));
- socklen_t addr_sz = sizeof(addr);
- ASSERT_THAT(getsockname(rcvr->get(), AsSockAddr(&addr), &addr_sz),
- SyscallSucceedsWithValue(0));
-
- // Send a test message to the receiver.
- constexpr char kTestMsg[] = "hello, world";
- ASSERT_THAT(sendto(sender->get(), kTestMsg, sizeof(kTestMsg), 0,
- AsSockAddr(&addr), addr_sz),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
- char buf[sizeof(kTestMsg)] = {};
- ASSERT_THAT(recv(rcvr->get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(kTestMsg)));
-}
-
-// Check that multicast packets won't be delivered to the sending socket with no
-// set interface or group membership.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastSelfNoGroup) {
- // FIXME(b/125485338): A group membership is not required for external
- // multicast on gVisor.
- SKIP_IF(IsRunningOnGvisor());
-
- SKIP_IF(!found_net_interfaces_);
-
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- auto bind_addr = V4Any();
- ASSERT_THAT(
- bind(socket->get(), AsSockAddr(&bind_addr.addr), bind_addr.addr_len),
- SyscallSucceeds());
- socklen_t bind_addr_len = bind_addr.addr_len;
- ASSERT_THAT(
- getsockname(socket->get(), AsSockAddr(&bind_addr.addr), &bind_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(bind_addr_len, bind_addr.addr_len);
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&bind_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RetryEINTR(recv)(socket->get(), recv_buf, sizeof(recv_buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Check that multicast packets will be delivered to the sending socket without
-// setting an interface.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendMulticastSelf) {
- SKIP_IF(!found_net_interfaces_);
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- auto bind_addr = V4Any();
- ASSERT_THAT(
- bind(socket->get(), AsSockAddr(&bind_addr.addr), bind_addr.addr_len),
- SyscallSucceeds());
- socklen_t bind_addr_len = bind_addr.addr_len;
- ASSERT_THAT(
- getsockname(socket->get(), AsSockAddr(&bind_addr.addr), &bind_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(bind_addr_len, bind_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&bind_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(socket->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast packets won't be delivered to the sending socket with no
-// set interface and IP_MULTICAST_LOOP disabled.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastSelfLoopOff) {
- SKIP_IF(!found_net_interfaces_);
- auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- auto bind_addr = V4Any();
- ASSERT_THAT(
- bind(socket->get(), AsSockAddr(&bind_addr.addr), bind_addr.addr_len),
- SyscallSucceeds());
- socklen_t bind_addr_len = bind_addr.addr_len;
- ASSERT_THAT(
- getsockname(socket->get(), AsSockAddr(&bind_addr.addr), &bind_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(bind_addr_len, bind_addr.addr_len);
-
- // Disable multicast looping.
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Register to receive multicast packets.
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&bind_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(socket->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- EXPECT_THAT(
- RetryEINTR(recv)(socket->get(), recv_buf, sizeof(recv_buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Check that multicast packets won't be delivered to another socket with no
-// set interface or group membership.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendMulticastNoGroup) {
- // FIXME(b/125485338): A group membership is not required for external
- // multicast on gVisor.
- SKIP_IF(IsRunningOnGvisor());
-
- SKIP_IF(!found_net_interfaces_);
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf),
- MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Check that multicast packets will be delivered to another socket without
-// setting an interface.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendMulticast) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that multicast packets won't be delivered to another socket with no
-// set interface and IP_MULTICAST_LOOP disabled on the sending socket.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastSenderNoLoop) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Disable multicast looping on the sender.
- EXPECT_THAT(setsockopt(sender->get(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- EXPECT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we did not receive the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf),
- MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Check that multicast packets will be delivered to the sending socket without
-// setting an interface and IP_MULTICAST_LOOP disabled on the receiving socket.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastReceiverNoLoop) {
- SKIP_IF(!found_net_interfaces_);
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Bind the second FD to the v4 any address to ensure that we can receive the
- // multicast packet.
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Disable multicast looping on the receiver.
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_MULTICAST_LOOP,
- &kSockOptOff, sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- // Register to receive multicast packets.
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-}
-
-// Check that two sockets can join the same multicast group at the same time,
-// and both will receive data on it when bound to the ANY address.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastToTwoBoundToAny) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- std::unique_ptr<FileDescriptor> receivers[2] = {
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket())};
-
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- auto receiver_addr = V4Any();
- int bound_port = 0;
- for (auto& receiver : receivers) {
- ASSERT_THAT(setsockopt(receiver->get(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- // Bind to ANY to receive multicast packets.
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- EXPECT_EQ(
- htonl(INADDR_ANY),
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_addr.s_addr);
- // On the first iteration, save the port we are bound to. On the second
- // iteration, verify the port is the same as the one from the first
- // iteration. In other words, both sockets listen on the same port.
- if (bound_port == 0) {
- bound_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- } else {
- EXPECT_EQ(bound_port,
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port);
- }
-
- // Register to receive multicast packets.
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
- }
-
- // Send a multicast packet to the group and verify both receivers get it.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port = bound_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- for (auto& receiver : receivers) {
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
-}
-
-// Check that two sockets can join the same multicast group at the same time,
-// and both will receive data on it when bound to the multicast address.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastToTwoBoundToMulticastAddress) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- std::unique_ptr<FileDescriptor> receivers[2] = {
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket())};
-
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- auto receiver_addr = V4Multicast();
- int bound_port = 0;
- for (auto& receiver : receivers) {
- ASSERT_THAT(setsockopt(receiver->get(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- EXPECT_EQ(
- inet_addr(kMulticastAddress),
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_addr.s_addr);
- // On the first iteration, save the port we are bound to. On the second
- // iteration, verify the port is the same as the one from the first
- // iteration. In other words, both sockets listen on the same port.
- if (bound_port == 0) {
- bound_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- } else {
- EXPECT_EQ(
- inet_addr(kMulticastAddress),
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_addr.s_addr);
- EXPECT_EQ(bound_port,
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port);
- }
-
- // Register to receive multicast packets.
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
- }
-
- // Send a multicast packet to the group and verify both receivers get it.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port = bound_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- for (auto& receiver : receivers) {
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
-}
-
-// Check that two sockets can join the same multicast group at the same time,
-// and with one bound to the wildcard address and the other bound to the
-// multicast address, both will receive data.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- TestSendMulticastToTwoBoundToAnyAndMulticastAddress) {
- SKIP_IF(!found_net_interfaces_);
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- std::unique_ptr<FileDescriptor> receivers[2] = {
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket()),
- ASSERT_NO_ERRNO_AND_VALUE(NewSocket())};
-
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- // The first receiver binds to the wildcard address.
- auto receiver_addr = V4Any();
- int bound_port = 0;
- for (auto& receiver : receivers) {
- ASSERT_THAT(setsockopt(receiver->get(), SOL_SOCKET, SO_REUSEPORT,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- // On the first iteration, save the port we are bound to and change the
- // receiver address from V4Any to V4Multicast so the second receiver binds
- // to that. On the second iteration, verify the port is the same as the one
- // from the first iteration but the address is different.
- if (bound_port == 0) {
- EXPECT_EQ(
- htonl(INADDR_ANY),
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_addr.s_addr);
- bound_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- receiver_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port =
- bound_port;
- } else {
- EXPECT_EQ(
- inet_addr(kMulticastAddress),
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_addr.s_addr);
- EXPECT_EQ(bound_port,
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port);
- }
-
- // Register to receive multicast packets.
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP,
- &group, sizeof(group)),
- SyscallSucceeds());
- }
-
- // Send a multicast packet to the group and verify both receivers get it.
- auto send_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&send_addr.addr)->sin_port = bound_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- for (auto& receiver : receivers) {
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(
- RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
- }
-}
-
-// Check that when receiving a looped-back multicast packet, its source address
-// is not a multicast address.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- IpMulticastLoopbackFromAddr) {
- SKIP_IF(!found_net_interfaces_);
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- int receiver_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
-
- ip_mreq group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Connect to the multicast address. This binds us to the outgoing interface
- // and allows us to get its IP (to be compared against the src-IP on the
- // receiver side).
- auto sendto_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port = receiver_port;
- ASSERT_THAT(RetryEINTR(connect)(sender->get(), AsSockAddr(&sendto_addr.addr),
- sendto_addr.addr_len),
- SyscallSucceeds());
- auto sender_addr = V4EmptyAddress();
- ASSERT_THAT(getsockname(sender->get(), AsSockAddr(&sender_addr.addr),
- &sender_addr.addr_len),
- SyscallSucceeds());
- ASSERT_EQ(sizeof(struct sockaddr_in), sender_addr.addr_len);
- sockaddr_in* sender_addr_in =
- reinterpret_cast<sockaddr_in*>(&sender_addr.addr);
-
- // Send a multicast packet.
- char send_buf[4] = {};
- ASSERT_THAT(RetryEINTR(send)(sender->get(), send_buf, sizeof(send_buf), 0),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Receive a multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- auto src_addr = V4EmptyAddress();
- ASSERT_THAT(
- RetryEINTR(recvfrom)(receiver->get(), recv_buf, sizeof(recv_buf), 0,
- AsSockAddr(&src_addr.addr), &src_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- ASSERT_EQ(sizeof(struct sockaddr_in), src_addr.addr_len);
- sockaddr_in* src_addr_in = reinterpret_cast<sockaddr_in*>(&src_addr.addr);
-
- // Verify that the received source IP:port matches the sender one.
- EXPECT_EQ(sender_addr_in->sin_port, src_addr_in->sin_port);
- EXPECT_EQ(sender_addr_in->sin_addr.s_addr, src_addr_in->sin_addr.s_addr);
-}
-
-// Check that when setting the IP_MULTICAST_IF option to both an index pointing
-// to the loopback interface and an address pointing to the non-loopback
-// interface, a multicast packet sent out uses the latter as its source address.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- IpMulticastLoopbackIfNicAndAddr) {
- SKIP_IF(!found_net_interfaces_);
-
- // Create receiver, bind to ANY and join the multicast group.
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver_addr = V4Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
- int receiver_port =
- reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)->sin_port;
- ip_mreqn group = {};
- group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress);
- group.imr_ifindex = lo_if_idx_;
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group,
- sizeof(group)),
- SyscallSucceeds());
-
- // Set outgoing multicast interface config, with NIC and addr pointing to
- // different interfaces.
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- ip_mreqn iface = {};
- iface.imr_ifindex = lo_if_idx_;
- iface.imr_address = eth_if_addr_.sin_addr;
- ASSERT_THAT(setsockopt(sender->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto sendto_addr = V4Multicast();
- reinterpret_cast<sockaddr_in*>(&sendto_addr.addr)->sin_port = receiver_port;
- char send_buf[4] = {};
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&sendto_addr.addr), sendto_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Receive a multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- auto src_addr = V4EmptyAddress();
- ASSERT_THAT(
- RetryEINTR(recvfrom)(receiver->get(), recv_buf, sizeof(recv_buf), 0,
- AsSockAddr(&src_addr.addr), &src_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
- ASSERT_EQ(sizeof(struct sockaddr_in), src_addr.addr_len);
- sockaddr_in* src_addr_in = reinterpret_cast<sockaddr_in*>(&src_addr.addr);
-
- // FIXME (b/137781162): When sending a multicast packet use the proper logic
- // to determine the packet's src-IP.
- SKIP_IF(IsRunningOnGvisor());
-
- // Verify the received source address.
- EXPECT_EQ(eth_if_addr_.sin_addr.s_addr, src_addr_in->sin_addr.s_addr);
-}
-
-// Check that when we are bound to one interface we can set IP_MULTICAST_IF to
-// another interface.
-TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest,
- IpMulticastLoopbackBindToOneIfSetMcastIfToAnother) {
- SKIP_IF(!found_net_interfaces_);
-
- // FIXME (b/137790511): When bound to one interface it is not possible to set
- // IP_MULTICAST_IF to a different interface.
- SKIP_IF(IsRunningOnGvisor());
-
- // Create sender and bind to eth interface.
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- ASSERT_THAT(
- bind(sender->get(), AsSockAddr(&eth_if_addr_), sizeof(eth_if_addr_)),
- SyscallSucceeds());
-
- // Run through all possible combinations of index and address for
- // IP_MULTICAST_IF that selects the loopback interface.
- struct {
- int imr_ifindex;
- struct in_addr imr_address;
- } test_data[] = {
- {lo_if_idx_, {}},
- {0, lo_if_addr_.sin_addr},
- {lo_if_idx_, lo_if_addr_.sin_addr},
- {lo_if_idx_, eth_if_addr_.sin_addr},
- };
- for (auto t : test_data) {
- ip_mreqn iface = {};
- iface.imr_ifindex = t.imr_ifindex;
- iface.imr_address = t.imr_address;
- EXPECT_THAT(setsockopt(sender->get(), IPPROTO_IP, IP_MULTICAST_IF, &iface,
- sizeof(iface)),
- SyscallSucceeds())
- << "imr_index=" << iface.imr_ifindex
- << " imr_address=" << GetAddr4Str(&iface.imr_address);
- }
-}
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h
deleted file mode 100644
index 20922ac1f..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-
-#include "test/syscalls/linux/socket_ip_udp_unbound_external_networking.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to unbound IPv4 UDP sockets in a sandbox
-// with external networking support.
-using IPv4UDPUnboundExternalNetworkingSocketTest =
- IPUDPUnboundExternalNetworkingSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc
deleted file mode 100644
index f6e64c157..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.h"
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketKind> GetSockets() {
- return ApplyVec<SocketKind>(
- IPv4UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(IPv4UDPUnboundSockets,
- IPv4UDPUnboundExternalNetworkingSocketTest,
- ::testing::ValuesIn(GetSockets()));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_loopback.cc
deleted file mode 100644
index f121c044d..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_ipv4_udp_unbound.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-INSTANTIATE_TEST_SUITE_P(
- IPv4UDPSockets, IPv4UDPUnboundSocketTest,
- ::testing::ValuesIn(ApplyVec<SocketKind>(IPv4UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{
- 0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_netlink.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_netlink.cc
deleted file mode 100644
index 8052bf404..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_netlink.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-INSTANTIATE_TEST_SUITE_P(
- IPv4UDPSockets, IPv4UDPUnboundSocketNetlinkTest,
- ::testing::ValuesIn(ApplyVec<SocketKind>(IPv4UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{
- 0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc
deleted file mode 100644
index a2c6d4491..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_loopback_nogotsan.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to IPv4 UDP sockets.
-using IPv4UDPUnboundSocketNogotsanTest = SimpleSocketTest;
-
-// Check that connect returns EAGAIN when out of local ephemeral ports.
-// We disable S/R because this test creates a large number of sockets.
-TEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPConnectPortExhaustion) {
- auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- const int kClients = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- // Bind the first socket to the loopback and take note of the selected port.
- auto addr = V4Loopback();
- ASSERT_THAT(bind(receiver1->get(), AsSockAddr(&addr.addr), addr.addr_len),
- SyscallSucceeds());
- socklen_t addr_len = addr.addr_len;
- ASSERT_THAT(getsockname(receiver1->get(), AsSockAddr(&addr.addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, addr.addr_len);
-
- // Disable cooperative S/R as we are making too many syscalls.
- DisableSave ds;
- std::vector<std::unique_ptr<FileDescriptor>> sockets;
- for (int i = 0; i < kClients; i++) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int ret = connect(s->get(), AsSockAddr(&addr.addr), addr.addr_len);
- if (ret == 0) {
- sockets.push_back(std::move(s));
- continue;
- }
- ASSERT_THAT(ret, SyscallFailsWithErrno(EAGAIN));
- break;
- }
-}
-
-// Check that bind returns EADDRINUSE when out of local ephemeral ports.
-// We disable S/R because this test creates a large number of sockets.
-TEST_P(IPv4UDPUnboundSocketNogotsanTest, UDPBindPortExhaustion) {
- auto receiver1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- const int kClients = ASSERT_NO_ERRNO_AND_VALUE(MaybeLimitEphemeralPorts());
- auto addr = V4Loopback();
- // Disable cooperative S/R as we are making too many syscalls.
- DisableSave ds;
- std::vector<std::unique_ptr<FileDescriptor>> sockets;
- for (int i = 0; i < kClients; i++) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- int ret = bind(s->get(), AsSockAddr(&addr.addr), addr.addr_len);
- if (ret == 0) {
- sockets.push_back(std::move(s));
- continue;
- }
- ASSERT_THAT(ret, SyscallFailsWithErrno(EADDRINUSE));
- break;
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(
- IPv4UDPSockets, IPv4UDPUnboundSocketNogotsanTest,
- ::testing::ValuesIn(ApplyVec<SocketKind>(IPv4UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{
- 0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc
deleted file mode 100644
index 020ce5d6e..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h"
-
-#include <arpa/inet.h>
-#include <poll.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-
-namespace gvisor {
-namespace testing {
-
-constexpr size_t kSendBufSize = 200;
-
-// Checks that the loopback interface considers itself bound to all IPs in an
-// associated subnet.
-TEST_P(IPv4UDPUnboundSocketNetlinkTest, JoinSubnet) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // Add an IP address to the loopback interface.
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- struct in_addr addr;
- ASSERT_EQ(1, inet_pton(AF_INET, "192.0.2.1", &addr));
- ASSERT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr, sizeof(addr)));
- Cleanup defer_addr_removal = Cleanup(
- [loopback_link = std::move(loopback_link), addr = std::move(addr)] {
- EXPECT_NO_ERRNO(LinkDelLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr,
- sizeof(addr)));
- });
-
- auto snd_sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto rcv_sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- // Send from an unassigned address but an address that is in the subnet
- // associated with the loopback interface.
- TestAddress sender_addr("V4NotAssignd1");
- sender_addr.addr.ss_family = AF_INET;
- sender_addr.addr_len = sizeof(sockaddr_in);
- ASSERT_EQ(1, inet_pton(AF_INET, "192.0.2.2",
- &(reinterpret_cast<sockaddr_in*>(&sender_addr.addr)
- ->sin_addr.s_addr)));
- ASSERT_THAT(bind(snd_sock->get(), AsSockAddr(&sender_addr.addr),
- sender_addr.addr_len),
- SyscallSucceeds());
-
- // Send the packet to an unassigned address but an address that is in the
- // subnet associated with the loopback interface.
- TestAddress receiver_addr("V4NotAssigned2");
- receiver_addr.addr.ss_family = AF_INET;
- receiver_addr.addr_len = sizeof(sockaddr_in);
- ASSERT_EQ(1, inet_pton(AF_INET, "192.0.2.254",
- &(reinterpret_cast<sockaddr_in*>(&receiver_addr.addr)
- ->sin_addr.s_addr)));
- ASSERT_THAT(bind(rcv_sock->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(rcv_sock->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- ASSERT_EQ(receiver_addr_len, receiver_addr.addr_len);
- char send_buf[kSendBufSize];
- RandomizeBuffer(send_buf, kSendBufSize);
- ASSERT_THAT(RetryEINTR(sendto)(snd_sock->get(), send_buf, kSendBufSize, 0,
- AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceedsWithValue(kSendBufSize));
-
- // Check that we received the packet.
- char recv_buf[kSendBufSize] = {};
- ASSERT_THAT(RetryEINTR(recv)(rcv_sock->get(), recv_buf, kSendBufSize, 0),
- SyscallSucceedsWithValue(kSendBufSize));
- ASSERT_EQ(0, memcmp(send_buf, recv_buf, kSendBufSize));
-}
-
-// Tests that broadcast packets are delivered to all interested sockets
-// (wildcard and broadcast address specified sockets).
-//
-// Note, we cannot test the IPv4 Broadcast (255.255.255.255) because we do
-// not have a route to it.
-TEST_P(IPv4UDPUnboundSocketNetlinkTest, ReuseAddrSubnetDirectedBroadcast) {
- constexpr uint16_t kPort = 9876;
- // Wait up to 20 seconds for the data.
- constexpr int kPollTimeoutMs = 20000;
- // Number of sockets per socket type.
- constexpr int kNumSocketsPerType = 2;
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // Add an IP address to the loopback interface.
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- struct in_addr addr;
- ASSERT_EQ(1, inet_pton(AF_INET, "192.0.2.1", &addr));
- ASSERT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET,
- 24 /* prefixlen */, &addr, sizeof(addr)));
- Cleanup defer_addr_removal = Cleanup(
- [loopback_link = std::move(loopback_link), addr = std::move(addr)] {
- EXPECT_NO_ERRNO(LinkDelLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr,
- sizeof(addr)));
- });
-
- TestAddress broadcast_address("SubnetBroadcastAddress");
- broadcast_address.addr.ss_family = AF_INET;
- broadcast_address.addr_len = sizeof(sockaddr_in);
- auto broadcast_address_in =
- reinterpret_cast<sockaddr_in*>(&broadcast_address.addr);
- ASSERT_EQ(1, inet_pton(AF_INET, "192.0.2.255",
- &broadcast_address_in->sin_addr.s_addr));
- broadcast_address_in->sin_port = htons(kPort);
-
- TestAddress any_address = V4Any();
- reinterpret_cast<sockaddr_in*>(&any_address.addr)->sin_port = htons(kPort);
-
- // We create sockets bound to both the wildcard address and the broadcast
- // address to make sure both of these types of "broadcast interested" sockets
- // receive broadcast packets.
- std::vector<std::unique_ptr<FileDescriptor>> socks;
- for (bool bind_wildcard : {false, true}) {
- // Create multiple sockets for each type of "broadcast interested"
- // socket so we can test that all sockets receive the broadcast packet.
- for (int i = 0; i < kNumSocketsPerType; i++) {
- auto sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto idx = socks.size();
-
- ASSERT_THAT(setsockopt(sock->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0))
- << "socks[" << idx << "]";
-
- ASSERT_THAT(setsockopt(sock->get(), SOL_SOCKET, SO_BROADCAST, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceedsWithValue(0))
- << "socks[" << idx << "]";
-
- if (bind_wildcard) {
- ASSERT_THAT(bind(sock->get(), AsSockAddr(&any_address.addr),
- any_address.addr_len),
- SyscallSucceeds())
- << "socks[" << idx << "]";
- } else {
- ASSERT_THAT(bind(sock->get(), AsSockAddr(&broadcast_address.addr),
- broadcast_address.addr_len),
- SyscallSucceeds())
- << "socks[" << idx << "]";
- }
-
- socks.push_back(std::move(sock));
- }
- }
-
- char send_buf[kSendBufSize];
- RandomizeBuffer(send_buf, kSendBufSize);
-
- // Broadcasts from each socket should be received by every socket (including
- // the sending socket).
- for (size_t w = 0; w < socks.size(); w++) {
- auto& w_sock = socks[w];
- ASSERT_THAT(RetryEINTR(sendto)(w_sock->get(), send_buf, kSendBufSize, 0,
- AsSockAddr(&broadcast_address.addr),
- broadcast_address.addr_len),
- SyscallSucceedsWithValue(kSendBufSize))
- << "write socks[" << w << "]";
-
- // Check that we received the packet on all sockets.
- for (size_t r = 0; r < socks.size(); r++) {
- auto& r_sock = socks[r];
-
- struct pollfd poll_fd = {r_sock->get(), POLLIN, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1))
- << "write socks[" << w << "] & read socks[" << r << "]";
-
- char recv_buf[kSendBufSize] = {};
- EXPECT_THAT(RetryEINTR(recv)(r_sock->get(), recv_buf, kSendBufSize, 0),
- SyscallSucceedsWithValue(kSendBufSize))
- << "write socks[" << w << "] & read socks[" << r << "]";
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, kSendBufSize))
- << "write socks[" << w << "] & read socks[" << r << "]";
- }
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h b/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h
deleted file mode 100644
index 73e7836d5..000000000
--- a/test/syscalls/linux/socket_ipv4_udp_unbound_netlink.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_NETLINK_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_NETLINK_UTIL_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to IPv4 UDP sockets.
-using IPv4UDPUnboundSocketNetlinkTest = SimpleSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV4_UDP_UNBOUND_NETLINK_UTIL_H_
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound.cc b/test/syscalls/linux/socket_ipv6_udp_unbound.cc
deleted file mode 100644
index a4e3371f4..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv6_udp_unbound.h"
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#ifdef __linux__
-#include <linux/in6.h>
-#endif // __linux__
-#include <net/if.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <cstdio>
-#include <cstring>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test that socket will receive IP_RECVORIGDSTADDR control message.
-TEST_P(IPv6UDPUnboundSocketTest, SetAndReceiveIPReceiveOrigDstAddr) {
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver_addr = V6Loopback();
- int level = SOL_IPV6;
- int type = IPV6_RECVORIGDSTADDR;
-
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
-
- // Retrieve the port bound by the receiver.
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- ASSERT_THAT(connect(sender->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
-
- // Get address and port bound by the sender.
- sockaddr_storage sender_addr_storage;
- socklen_t sender_addr_len = sizeof(sender_addr_storage);
- ASSERT_THAT(getsockname(sender->get(), AsSockAddr(&sender_addr_storage),
- &sender_addr_len),
- SyscallSucceeds());
- ASSERT_EQ(sender_addr_len, sizeof(struct sockaddr_in6));
-
- // Enable IP_RECVORIGDSTADDR on socket so that we get the original destination
- // address of the datagram as auxiliary information in the control message.
- ASSERT_THAT(
- setsockopt(receiver->get(), level, type, &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Prepare message to send.
- constexpr size_t kDataLength = 1024;
- msghdr sent_msg = {};
- iovec sent_iov = {};
- char sent_data[kDataLength];
- sent_iov.iov_base = sent_data;
- sent_iov.iov_len = kDataLength;
- sent_msg.msg_iov = &sent_iov;
- sent_msg.msg_iovlen = 1;
- sent_msg.msg_flags = 0;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sender->get(), &sent_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- msghdr received_msg = {};
- iovec received_iov = {};
- char received_data[kDataLength];
- char received_cmsg_buf[CMSG_SPACE(sizeof(sockaddr_in6))] = {};
- size_t cmsg_data_len = sizeof(sockaddr_in6);
- received_iov.iov_base = received_data;
- received_iov.iov_len = kDataLength;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
- received_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
- received_msg.msg_control = received_cmsg_buf;
-
- ASSERT_THAT(RecvMsgTimeout(receiver->get(), &received_msg, 1 /*timeout*/),
- IsPosixErrorOkAndHolds(kDataLength));
-
- cmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, level);
- EXPECT_EQ(cmsg->cmsg_type, type);
-
- // Check that the received address in the control message matches the expected
- // receiver's address.
- sockaddr_in6 received_addr = {};
- memcpy(&received_addr, CMSG_DATA(cmsg), sizeof(received_addr));
- auto orig_receiver_addr =
- reinterpret_cast<sockaddr_in6*>(&receiver_addr.addr);
- EXPECT_EQ(memcmp(&received_addr.sin6_addr, &orig_receiver_addr->sin6_addr,
- sizeof(in6_addr)),
- 0);
- EXPECT_EQ(received_addr.sin6_port, orig_receiver_addr->sin6_port);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound.h b/test/syscalls/linux/socket_ipv6_udp_unbound.h
deleted file mode 100644
index 71e160f73..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to IPv6 UDP sockets.
-using IPv6UDPUnboundSocketTest = SimpleSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_H_
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.cc b/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.cc
deleted file mode 100644
index 09f070797..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(IPv6UDPUnboundExternalNetworkingSocketTest, TestJoinLeaveMulticast) {
- SKIP_IF(!found_net_interfaces_);
-
- auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
-
- auto receiver_addr = V6Any();
- ASSERT_THAT(bind(receiver->get(), AsSockAddr(&receiver_addr.addr),
- receiver_addr.addr_len),
- SyscallSucceeds());
- socklen_t receiver_addr_len = receiver_addr.addr_len;
- ASSERT_THAT(getsockname(receiver->get(), AsSockAddr(&receiver_addr.addr),
- &receiver_addr_len),
- SyscallSucceeds());
- EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len);
-
- // Register to receive multicast packets.
- auto multicast_addr = V6Multicast();
- ipv6_mreq group_req = {
- .ipv6mr_multiaddr =
- reinterpret_cast<sockaddr_in6*>(&multicast_addr.addr)->sin6_addr,
- .ipv6mr_interface = static_cast<decltype(ipv6_mreq::ipv6mr_interface)>(
- ASSERT_NO_ERRNO_AND_VALUE(InterfaceIndex("lo"))),
- };
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP,
- &group_req, sizeof(group_req)),
- SyscallSucceeds());
-
- // Set the sender to the loopback interface.
- auto sender_addr = V6Loopback();
- ASSERT_THAT(
- bind(sender->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallSucceeds());
-
- // Send a multicast packet.
- auto send_addr = multicast_addr;
- reinterpret_cast<sockaddr_in6*>(&send_addr.addr)->sin6_port =
- reinterpret_cast<sockaddr_in6*>(&receiver_addr.addr)->sin6_port;
- char send_buf[200];
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
-
- // Check that we received the multicast packet.
- char recv_buf[sizeof(send_buf)] = {};
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0),
- SyscallSucceedsWithValue(sizeof(recv_buf)));
-
- EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf)));
-
- // Leave the group and make sure we don't receive its multicast traffic.
- ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP,
- &group_req, sizeof(group_req)),
- SyscallSucceeds());
- RandomizeBuffer(send_buf, sizeof(send_buf));
- ASSERT_THAT(
- RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0,
- AsSockAddr(&send_addr.addr), send_addr.addr_len),
- SyscallSucceedsWithValue(sizeof(send_buf)));
- ASSERT_THAT(RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf),
- MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h b/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h
deleted file mode 100644
index 731ae0a1f..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
-
-#include "test/syscalls/linux/socket_ip_udp_unbound_external_networking.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to unbound IPv6 UDP sockets in a sandbox
-// with external networking support.
-using IPv6UDPUnboundExternalNetworkingSocketTest =
- IPUDPUnboundExternalNetworkingSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6yy_UDP_UNBOUND_EXTERNAL_NETWORKING_H_
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking_test.cc b/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking_test.cc
deleted file mode 100644
index 5c764b8fd..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_external_networking_test.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv6_udp_unbound_external_networking.h"
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketKind> GetSockets() {
- return ApplyVec<SocketKind>(
- IPv6UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{0, SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(IPv6UDPUnboundSockets,
- IPv6UDPUnboundExternalNetworkingSocketTest,
- ::testing::ValuesIn(GetSockets()));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_loopback.cc b/test/syscalls/linux/socket_ipv6_udp_unbound_loopback.cc
deleted file mode 100644
index 058336ecc..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_loopback.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_ipv6_udp_unbound.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-INSTANTIATE_TEST_SUITE_P(
- IPv6UDPSockets, IPv6UDPUnboundSocketTest,
- ::testing::ValuesIn(ApplyVec<SocketKind>(IPv6UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{
- 0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_loopback_netlink.cc b/test/syscalls/linux/socket_ipv6_udp_unbound_loopback_netlink.cc
deleted file mode 100644
index 17021ff82..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_loopback_netlink.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-INSTANTIATE_TEST_SUITE_P(
- IPv6UDPSockets, IPv6UDPUnboundSocketNetlinkTest,
- ::testing::ValuesIn(ApplyVec<SocketKind>(IPv6UDPUnboundSocket,
- AllBitwiseCombinations(List<int>{
- 0, SOCK_NONBLOCK}))));
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc b/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc
deleted file mode 100644
index 48aace78a..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h"
-
-#include <arpa/inet.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-#include "test/util/capability_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Checks that the loopback interface does not consider itself bound to all IPs
-// in an associated subnet.
-TEST_P(IPv6UDPUnboundSocketNetlinkTest, JoinSubnet) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // Add an IP address to the loopback interface.
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- struct in6_addr addr;
- EXPECT_EQ(1, inet_pton(AF_INET6, "2001:db8::1", &addr));
- EXPECT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET6,
- /*prefixlen=*/64, &addr, sizeof(addr)));
-
- // Binding to an unassigned address but an address that is in the subnet
- // associated with the loopback interface should fail.
- TestAddress sender_addr("V6NotAssignd1");
- sender_addr.addr.ss_family = AF_INET6;
- sender_addr.addr_len = sizeof(sockaddr_in6);
- EXPECT_EQ(1, inet_pton(AF_INET6, "2001:db8::2",
- reinterpret_cast<sockaddr_in6*>(&sender_addr.addr)
- ->sin6_addr.s6_addr));
- auto sock = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());
- EXPECT_THAT(
- bind(sock->get(), AsSockAddr(&sender_addr.addr), sender_addr.addr_len),
- SyscallFailsWithErrno(EADDRNOTAVAIL));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h b/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h
deleted file mode 100644
index 88098be82..000000000
--- a/test/syscalls/linux/socket_ipv6_udp_unbound_netlink.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_NETLINK_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_NETLINK_UTIL_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to IPv6 UDP sockets.
-using IPv6UDPUnboundSocketNetlinkTest = SimpleSocketTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_IPV6_UDP_UNBOUND_NETLINK_UTIL_H_
diff --git a/test/syscalls/linux/socket_netdevice.cc b/test/syscalls/linux/socket_netdevice.cc
deleted file mode 100644
index 8d214a2b7..000000000
--- a/test/syscalls/linux/socket_netdevice.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/ethtool.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-#include <linux/sockios.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-
-#include "gtest/gtest.h"
-#include "absl/base/internal/endian.h"
-#include "test/syscalls/linux/socket_netlink_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Tests for netdevice queries.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-TEST(NetdeviceTest, Loopback) {
- SKIP_IF(IsRunningWithHostinet());
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- // Prepare the request.
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
-
- // Check for a non-zero interface index.
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
-
- // Check that the loopback is zero hardware address.
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFHWADDR, &ifr), SyscallSucceeds());
- EXPECT_EQ(ifr.ifr_hwaddr.sa_family, ARPHRD_LOOPBACK);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[0], 0);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[1], 0);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[2], 0);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[3], 0);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[4], 0);
- EXPECT_EQ(ifr.ifr_hwaddr.sa_data[5], 0);
-}
-
-TEST(NetdeviceTest, Netmask) {
- SKIP_IF(IsRunningWithHostinet());
- // We need an interface index to identify the loopback device.
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
-
- // Use a netlink socket to get the netmask, which we'll then compare to the
- // netmask obtained via ioctl.
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- constexpr uint32_t kSeq = 12345;
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- // Iterate through messages until we find the one containing the prefix length
- // (i.e. netmask) for the loopback device.
- int prefixlen = -1;
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- EXPECT_THAT(hdr->nlmsg_type, AnyOf(Eq(RTM_NEWADDR), Eq(NLMSG_DONE)));
-
- EXPECT_TRUE((hdr->nlmsg_flags & NLM_F_MULTI) == NLM_F_MULTI)
- << std::hex << hdr->nlmsg_flags;
-
- EXPECT_EQ(hdr->nlmsg_seq, kSeq);
- EXPECT_EQ(hdr->nlmsg_pid, port);
-
- if (hdr->nlmsg_type != RTM_NEWADDR) {
- return;
- }
-
- // RTM_NEWADDR contains at least the header and ifaddrmsg.
- EXPECT_GE(hdr->nlmsg_len, sizeof(*hdr) + sizeof(struct ifaddrmsg));
-
- struct ifaddrmsg* ifaddrmsg =
- reinterpret_cast<struct ifaddrmsg*>(NLMSG_DATA(hdr));
- if (ifaddrmsg->ifa_index == static_cast<uint32_t>(ifr.ifr_ifindex) &&
- ifaddrmsg->ifa_family == AF_INET) {
- prefixlen = ifaddrmsg->ifa_prefixlen;
- }
- },
- false));
-
- ASSERT_GE(prefixlen, 0);
-
- // Netmask is stored big endian in struct sockaddr_in, so we do the same for
- // comparison.
- uint32_t mask = 0xffffffff << (32 - prefixlen);
- mask = absl::gbswap_32(mask);
-
- // Check that the loopback interface has the correct subnet mask.
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFNETMASK, &ifr), SyscallSucceeds());
- EXPECT_EQ(ifr.ifr_netmask.sa_family, AF_INET);
- struct sockaddr_in* sin =
- reinterpret_cast<struct sockaddr_in*>(&ifr.ifr_netmask);
- EXPECT_EQ(sin->sin_addr.s_addr, mask);
-}
-
-TEST(NetdeviceTest, InterfaceName) {
- SKIP_IF(IsRunningWithHostinet());
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- // Prepare the request.
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
-
- // Check for a non-zero interface index.
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFINDEX, &ifr), SyscallSucceeds());
- EXPECT_NE(ifr.ifr_ifindex, 0);
-
- // Check that SIOCGIFNAME finds the loopback interface.
- snprintf(ifr.ifr_name, IFNAMSIZ, "foo");
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFNAME, &ifr), SyscallSucceeds());
- EXPECT_STREQ(ifr.ifr_name, "lo");
-}
-
-TEST(NetdeviceTest, InterfaceFlags) {
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- // Prepare the request.
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
-
- // Check that SIOCGIFFLAGS marks the interface with IFF_LOOPBACK, IFF_UP, and
- // IFF_RUNNING.
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFFLAGS, &ifr), SyscallSucceeds());
- EXPECT_EQ(ifr.ifr_flags & IFF_UP, IFF_UP);
- EXPECT_EQ(ifr.ifr_flags & IFF_RUNNING, IFF_RUNNING);
-}
-
-TEST(NetdeviceTest, InterfaceMTU) {
- SKIP_IF(IsRunningWithHostinet());
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- // Prepare the request.
- struct ifreq ifr = {};
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
-
- // Check that SIOCGIFMTU returns a nonzero MTU.
- ASSERT_THAT(ioctl(sock.get(), SIOCGIFMTU, &ifr), SyscallSucceeds());
- EXPECT_GT(ifr.ifr_mtu, 0);
-}
-
-TEST(NetdeviceTest, EthtoolGetTSInfo) {
- SKIP_IF(IsRunningWithHostinet());
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_DGRAM, 0));
-
- struct ethtool_ts_info tsi = {};
- tsi.cmd = ETHTOOL_GET_TS_INFO; // Get NIC's Timestamping capabilities.
-
- // Prepare the request.
- struct ifreq ifr = {};
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
- ifr.ifr_data = (void*)&tsi;
-
- // Check that SIOCGIFMTU returns a nonzero MTU.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(ioctl(sock.get(), SIOCETHTOOL, &ifr),
- SyscallFailsWithErrno(EOPNOTSUPP));
- return;
- }
- ASSERT_THAT(ioctl(sock.get(), SIOCETHTOOL, &ifr), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink.cc b/test/syscalls/linux/socket_netlink.cc
deleted file mode 100644
index 4ec0fd4fa..000000000
--- a/test/syscalls/linux/socket_netlink.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/netlink.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Tests for all netlink socket protocols.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// NetlinkTest parameter is the protocol to test.
-using NetlinkTest = ::testing::TestWithParam<int>;
-
-// Netlink sockets must be SOCK_DGRAM or SOCK_RAW.
-TEST_P(NetlinkTest, Types) {
- const int protocol = GetParam();
-
- EXPECT_THAT(socket(AF_NETLINK, SOCK_STREAM, protocol),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
- EXPECT_THAT(socket(AF_NETLINK, SOCK_SEQPACKET, protocol),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
- EXPECT_THAT(socket(AF_NETLINK, SOCK_RDM, protocol),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
- EXPECT_THAT(socket(AF_NETLINK, SOCK_DCCP, protocol),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
- EXPECT_THAT(socket(AF_NETLINK, SOCK_PACKET, protocol),
- SyscallFailsWithErrno(ESOCKTNOSUPPORT));
-
- int fd;
- EXPECT_THAT(fd = socket(AF_NETLINK, SOCK_DGRAM, protocol), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- EXPECT_THAT(fd = socket(AF_NETLINK, SOCK_RAW, protocol), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_P(NetlinkTest, AutomaticPort) {
- const int protocol = GetParam();
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_NETLINK, SOCK_RAW, protocol));
-
- struct sockaddr_nl addr = {};
- addr.nl_family = AF_NETLINK;
-
- EXPECT_THAT(
- bind(fd.get(), reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
-
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, sizeof(addr));
- // This is the only netlink socket in the process, so it should get the PID as
- // the port id.
- //
- // N.B. Another process could theoretically have explicitly reserved our pid
- // as a port ID, but that is very unlikely.
- EXPECT_EQ(addr.nl_pid, getpid());
-}
-
-// Calling connect automatically binds to an automatic port.
-TEST_P(NetlinkTest, ConnectBinds) {
- const int protocol = GetParam();
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_NETLINK, SOCK_RAW, protocol));
-
- struct sockaddr_nl addr = {};
- addr.nl_family = AF_NETLINK;
-
- EXPECT_THAT(connect(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, sizeof(addr));
-
- // Each test is running in a pid namespace, so another process can explicitly
- // reserve our pid as a port ID. In this case, a negative portid value will be
- // set.
- if (static_cast<pid_t>(addr.nl_pid) > 0) {
- EXPECT_EQ(addr.nl_pid, getpid());
- }
-
- memset(&addr, 0, sizeof(addr));
- addr.nl_family = AF_NETLINK;
-
- // Connecting again is allowed, but keeps the same port.
- EXPECT_THAT(connect(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallSucceeds());
-
- addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, sizeof(addr));
- EXPECT_EQ(addr.nl_pid, getpid());
-}
-
-TEST_P(NetlinkTest, GetPeerName) {
- const int protocol = GetParam();
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_NETLINK, SOCK_RAW, protocol));
-
- struct sockaddr_nl addr = {};
- socklen_t addrlen = sizeof(addr);
-
- EXPECT_THAT(getpeername(fd.get(), reinterpret_cast<struct sockaddr*>(&addr),
- &addrlen),
- SyscallSucceeds());
-
- EXPECT_EQ(addrlen, sizeof(addr));
- EXPECT_EQ(addr.nl_family, AF_NETLINK);
- // Peer is the kernel if we didn't connect elsewhere.
- EXPECT_EQ(addr.nl_pid, 0);
-}
-
-INSTANTIATE_TEST_SUITE_P(ProtocolTest, NetlinkTest,
- ::testing::Values(NETLINK_ROUTE,
- NETLINK_KOBJECT_UEVENT));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink_route.cc b/test/syscalls/linux/socket_netlink_route.cc
deleted file mode 100644
index ee3c08770..000000000
--- a/test/syscalls/linux/socket_netlink_route.cc
+++ /dev/null
@@ -1,971 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <ifaddrs.h>
-#include <linux/if.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_format.h"
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-#include "test/syscalls/linux/socket_netlink_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Tests for NETLINK_ROUTE sockets.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr uint32_t kSeq = 12345;
-
-using ::testing::AnyOf;
-using ::testing::Eq;
-
-// Parameters for SockOptTest. They are:
-// 0: Socket option to query.
-// 1: A predicate to run on the returned sockopt value. Should return true if
-// the value is considered ok.
-// 2: A description of what the sockopt value is expected to be. Should complete
-// the sentence "<value> was unexpected, expected <description>"
-using SockOptTest = ::testing::TestWithParam<
- std::tuple<int, std::function<bool(int)>, std::string>>;
-
-TEST_P(SockOptTest, GetSockOpt) {
- int sockopt = std::get<0>(GetParam());
- auto verifier = std::get<1>(GetParam());
- std::string verifier_description = std::get<2>(GetParam());
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE));
-
- int res;
- socklen_t len = sizeof(res);
-
- EXPECT_THAT(getsockopt(fd.get(), SOL_SOCKET, sockopt, &res, &len),
- SyscallSucceeds());
-
- EXPECT_EQ(len, sizeof(res));
- EXPECT_TRUE(verifier(res)) << absl::StrFormat(
- "getsockopt(%d, SOL_SOCKET, %d, &res, &len) => res=%d was unexpected, "
- "expected %s",
- fd.get(), sockopt, res, verifier_description);
-}
-
-std::function<bool(int)> IsPositive() {
- return [](int val) { return val > 0; };
-}
-
-std::function<bool(int)> IsEqual(int target) {
- return [target](int val) { return val == target; };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NetlinkRouteTest, SockOptTest,
- ::testing::Values(
- std::make_tuple(SO_SNDBUF, IsPositive(), "positive send buffer size"),
- std::make_tuple(SO_RCVBUF, IsPositive(),
- "positive receive buffer size"),
- std::make_tuple(SO_TYPE, IsEqual(SOCK_RAW),
- absl::StrFormat("SOCK_RAW (%d)", SOCK_RAW)),
- std::make_tuple(SO_DOMAIN, IsEqual(AF_NETLINK),
- absl::StrFormat("AF_NETLINK (%d)", AF_NETLINK)),
- std::make_tuple(SO_PROTOCOL, IsEqual(NETLINK_ROUTE),
- absl::StrFormat("NETLINK_ROUTE (%d)", NETLINK_ROUTE)),
- std::make_tuple(SO_PASSCRED, IsEqual(0), "0")));
-
-// Validates the reponses to RTM_GETLINK + NLM_F_DUMP.
-void CheckGetLinkResponse(const struct nlmsghdr* hdr, int seq, int port) {
- EXPECT_THAT(hdr->nlmsg_type, AnyOf(Eq(RTM_NEWLINK), Eq(NLMSG_DONE)));
-
- EXPECT_TRUE((hdr->nlmsg_flags & NLM_F_MULTI) == NLM_F_MULTI)
- << std::hex << hdr->nlmsg_flags;
-
- EXPECT_EQ(hdr->nlmsg_seq, seq);
- EXPECT_EQ(hdr->nlmsg_pid, port);
-
- if (hdr->nlmsg_type != RTM_NEWLINK) {
- return;
- }
-
- // RTM_NEWLINK contains at least the header and ifinfomsg.
- EXPECT_GE(hdr->nlmsg_len, NLMSG_SPACE(sizeof(struct ifinfomsg)));
-
- // TODO(mpratt): Check ifinfomsg contents and following attrs.
-}
-
-TEST(NetlinkRouteTest, GetLinkDump) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- // Loopback is common among all tests, check that it's found.
- bool loopbackFound = false;
- ASSERT_NO_ERRNO(DumpLinks(fd, kSeq, [&](const struct nlmsghdr* hdr) {
- CheckGetLinkResponse(hdr, kSeq, port);
- if (hdr->nlmsg_type != RTM_NEWLINK) {
- return;
- }
- ASSERT_GE(hdr->nlmsg_len, NLMSG_SPACE(sizeof(struct ifinfomsg)));
- const struct ifinfomsg* msg =
- reinterpret_cast<const struct ifinfomsg*>(NLMSG_DATA(hdr));
- std::cout << "Found interface idx=" << msg->ifi_index
- << ", type=" << std::hex << msg->ifi_type << std::endl;
- if (msg->ifi_type == ARPHRD_LOOPBACK) {
- loopbackFound = true;
- EXPECT_NE(msg->ifi_flags & IFF_LOOPBACK, 0);
- }
- }));
- EXPECT_TRUE(loopbackFound);
-}
-
-// CheckLinkMsg checks a netlink message against an expected link.
-void CheckLinkMsg(const struct nlmsghdr* hdr, const Link& link) {
- ASSERT_THAT(hdr->nlmsg_type, Eq(RTM_NEWLINK));
- ASSERT_GE(hdr->nlmsg_len, NLMSG_SPACE(sizeof(struct ifinfomsg)));
- const struct ifinfomsg* msg =
- reinterpret_cast<const struct ifinfomsg*>(NLMSG_DATA(hdr));
- EXPECT_EQ(msg->ifi_index, link.index);
-
- const struct rtattr* rta = FindRtAttr(hdr, msg, IFLA_IFNAME);
- EXPECT_NE(nullptr, rta) << "IFLA_IFNAME not found in message.";
- if (rta != nullptr) {
- std::string name(reinterpret_cast<const char*>(RTA_DATA(rta)));
- EXPECT_EQ(name, link.name);
- }
-}
-
-TEST(NetlinkRouteTest, GetLinkByIndex) {
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
- req.ifm.ifi_index = loopback_link.index;
-
- bool found = false;
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- CheckLinkMsg(hdr, loopback_link);
- found = true;
- },
- false));
- EXPECT_TRUE(found) << "Netlink response does not contain any links.";
-}
-
-TEST(NetlinkRouteTest, GetLinkByName) {
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- struct rtattr rtattr;
- char ifname[IFNAMSIZ];
- char pad[NLMSG_ALIGNTO + RTA_ALIGNTO];
- };
-
- struct request req = {};
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
- req.rtattr.rta_type = IFLA_IFNAME;
- req.rtattr.rta_len = RTA_LENGTH(loopback_link.name.size() + 1);
- strncpy(req.ifname, loopback_link.name.c_str(), sizeof(req.ifname));
- req.hdr.nlmsg_len =
- NLMSG_LENGTH(sizeof(req.ifm)) + NLMSG_ALIGN(req.rtattr.rta_len);
-
- bool found = false;
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- CheckLinkMsg(hdr, loopback_link);
- found = true;
- },
- false));
- EXPECT_TRUE(found) << "Netlink response does not contain any links.";
-}
-
-TEST(NetlinkRouteTest, GetLinkByIndexNotFound) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
- req.ifm.ifi_index = 1234590;
-
- EXPECT_THAT(NetlinkRequestAckOrError(fd, kSeq, &req, sizeof(req)),
- PosixErrorIs(ENODEV, ::testing::_));
-}
-
-TEST(NetlinkRouteTest, GetLinkByNameNotFound) {
- const std::string name = "nodevice?!";
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- struct rtattr rtattr;
- char ifname[IFNAMSIZ];
- char pad[NLMSG_ALIGNTO + RTA_ALIGNTO];
- };
-
- struct request req = {};
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
- req.rtattr.rta_type = IFLA_IFNAME;
- req.rtattr.rta_len = RTA_LENGTH(name.size() + 1);
- strncpy(req.ifname, name.c_str(), sizeof(req.ifname));
- req.hdr.nlmsg_len =
- NLMSG_LENGTH(sizeof(req.ifm)) + NLMSG_ALIGN(req.rtattr.rta_len);
-
- EXPECT_THAT(NetlinkRequestAckOrError(fd, kSeq, &req, sizeof(req)),
- PosixErrorIs(ENODEV, ::testing::_));
-}
-
-TEST(NetlinkRouteTest, MsgHdrMsgUnsuppType) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- // If type & 0x3 is equal to 0x2, this means a get request
- // which doesn't require CAP_SYS_ADMIN.
- req.hdr.nlmsg_type = ((__RTM_MAX + 1024) & (~0x3)) | 0x2;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
-
- EXPECT_THAT(NetlinkRequestAckOrError(fd, kSeq, &req, sizeof(req)),
- PosixErrorIs(EOPNOTSUPP, ::testing::_));
-}
-
-TEST(NetlinkRouteTest, MsgHdrMsgTrunc) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- // No destination required; it defaults to pid 0, the kernel.
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- // Small enough to ensure that the response doesn't fit.
- constexpr size_t kBufferSize = 10;
- std::vector<char> buf(kBufferSize);
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- ASSERT_THAT(RetryEINTR(recvmsg)(fd.get(), &msg, 0),
- SyscallSucceedsWithValue(kBufferSize));
- EXPECT_EQ((msg.msg_flags & MSG_TRUNC), MSG_TRUNC);
-}
-
-TEST(NetlinkRouteTest, SpliceFromPipe) {
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- FileDescriptor rfd(fds[0]);
- FileDescriptor wfd(fds[1]);
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
- req.ifm.ifi_index = loopback_link.index;
-
- ASSERT_THAT(write(wfd.get(), &req, sizeof(req)),
- SyscallSucceedsWithValue(sizeof(req)));
-
- EXPECT_THAT(splice(rfd.get(), nullptr, fd.get(), nullptr, sizeof(req) + 1, 0),
- SyscallSucceedsWithValue(sizeof(req)));
- close(wfd.release());
- EXPECT_THAT(splice(rfd.get(), nullptr, fd.get(), nullptr, sizeof(req) + 1, 0),
- SyscallSucceedsWithValue(0));
-
- bool found = false;
- ASSERT_NO_ERRNO(NetlinkResponse(
- fd,
- [&](const struct nlmsghdr* hdr) {
- CheckLinkMsg(hdr, loopback_link);
- found = true;
- },
- false));
- EXPECT_TRUE(found) << "Netlink response does not contain any links.";
-}
-
-TEST(NetlinkRouteTest, MsgTruncMsgHdrMsgTrunc) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.ifm.ifi_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- // No destination required; it defaults to pid 0, the kernel.
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- // Small enough to ensure that the response doesn't fit.
- constexpr size_t kBufferSize = 10;
- std::vector<char> buf(kBufferSize);
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- int res = 0;
- ASSERT_THAT(res = RetryEINTR(recvmsg)(fd.get(), &msg, MSG_TRUNC),
- SyscallSucceeds());
- EXPECT_GT(res, kBufferSize);
- EXPECT_EQ((msg.msg_flags & MSG_TRUNC), MSG_TRUNC);
-}
-
-TEST(NetlinkRouteTest, ControlMessageIgnored) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- struct request {
- struct nlmsghdr control_hdr;
- struct nlmsghdr message_hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
-
- // This control message is ignored. We still receive a response for the
- // following RTM_GETLINK.
- req.control_hdr.nlmsg_len = sizeof(req.control_hdr);
- req.control_hdr.nlmsg_type = NLMSG_DONE;
- req.control_hdr.nlmsg_seq = kSeq;
-
- req.message_hdr.nlmsg_len = sizeof(req.message_hdr) + sizeof(req.ifm);
- req.message_hdr.nlmsg_type = RTM_GETLINK;
- req.message_hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.message_hdr.nlmsg_seq = kSeq;
-
- req.ifm.ifi_family = AF_UNSPEC;
-
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- CheckGetLinkResponse(hdr, kSeq, port);
- },
- false));
-}
-
-TEST(NetlinkRouteTest, GetAddrDump) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- EXPECT_THAT(hdr->nlmsg_type, AnyOf(Eq(RTM_NEWADDR), Eq(NLMSG_DONE)));
-
- EXPECT_TRUE((hdr->nlmsg_flags & NLM_F_MULTI) == NLM_F_MULTI)
- << std::hex << hdr->nlmsg_flags;
-
- EXPECT_EQ(hdr->nlmsg_seq, kSeq);
- EXPECT_EQ(hdr->nlmsg_pid, port);
-
- if (hdr->nlmsg_type != RTM_NEWADDR) {
- return;
- }
-
- // RTM_NEWADDR contains at least the header and ifaddrmsg.
- EXPECT_GE(hdr->nlmsg_len, sizeof(*hdr) + sizeof(struct ifaddrmsg));
-
- // TODO(mpratt): Check ifaddrmsg contents and following attrs.
- },
- false));
-}
-
-TEST(NetlinkRouteTest, LookupAll) {
- struct ifaddrs* if_addr_list = nullptr;
- auto cleanup = Cleanup([&if_addr_list]() { freeifaddrs(if_addr_list); });
-
- // Not a syscall but we can use the syscall matcher as glibc sets errno.
- ASSERT_THAT(getifaddrs(&if_addr_list), SyscallSucceeds());
-
- int count = 0;
- for (struct ifaddrs* i = if_addr_list; i; i = i->ifa_next) {
- if (!i->ifa_addr || (i->ifa_addr->sa_family != AF_INET &&
- i->ifa_addr->sa_family != AF_INET6)) {
- continue;
- }
- count++;
- }
- ASSERT_GT(count, 0);
-}
-
-TEST(NetlinkRouteTest, AddAndRemoveAddr) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
- // Don't do cooperative save/restore because netstack state is not restored.
- // TODO(gvisor.dev/issue/4595): enable cooperative save tests.
- const DisableSave ds;
-
- Link loopback_link = ASSERT_NO_ERRNO_AND_VALUE(LoopbackLink());
-
- struct in_addr addr;
- ASSERT_EQ(inet_pton(AF_INET, "10.0.0.1", &addr), 1);
-
- // Create should succeed, as no such address in kernel.
- ASSERT_NO_ERRNO(LinkAddLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr, sizeof(addr)));
-
- Cleanup defer_addr_removal = Cleanup(
- [loopback_link = std::move(loopback_link), addr = std::move(addr)] {
- // First delete should succeed, as address exists.
- EXPECT_NO_ERRNO(LinkDelLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr,
- sizeof(addr)));
-
- // Second delete should fail, as address no longer exists.
- EXPECT_THAT(LinkDelLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr, sizeof(addr)),
- PosixErrorIs(EADDRNOTAVAIL, ::testing::_));
- });
-
- // Replace an existing address should succeed.
- ASSERT_NO_ERRNO(LinkReplaceLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr, sizeof(addr)));
-
- // Create exclusive should fail, as we created the address above.
- EXPECT_THAT(LinkAddExclusiveLocalAddr(loopback_link.index, AF_INET,
- /*prefixlen=*/24, &addr, sizeof(addr)),
- PosixErrorIs(EEXIST, ::testing::_));
-}
-
-// GetRouteDump tests a RTM_GETROUTE + NLM_F_DUMP request.
-TEST(NetlinkRouteTest, GetRouteDump) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtmsg rtm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETROUTE;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rtm.rtm_family = AF_UNSPEC;
-
- bool routeFound = false;
- bool dstFound = true;
- ASSERT_NO_ERRNO(NetlinkRequestResponse(
- fd, &req, sizeof(req),
- [&](const struct nlmsghdr* hdr) {
- // Validate the reponse to RTM_GETROUTE + NLM_F_DUMP.
- EXPECT_THAT(hdr->nlmsg_type, AnyOf(Eq(RTM_NEWROUTE), Eq(NLMSG_DONE)));
-
- EXPECT_TRUE((hdr->nlmsg_flags & NLM_F_MULTI) == NLM_F_MULTI)
- << std::hex << hdr->nlmsg_flags;
-
- EXPECT_EQ(hdr->nlmsg_seq, kSeq);
- EXPECT_EQ(hdr->nlmsg_pid, port);
-
- // The test should not proceed if it's not a RTM_NEWROUTE message.
- if (hdr->nlmsg_type != RTM_NEWROUTE) {
- return;
- }
-
- // RTM_NEWROUTE contains at least the header and rtmsg.
- ASSERT_GE(hdr->nlmsg_len, NLMSG_SPACE(sizeof(struct rtmsg)));
- const struct rtmsg* msg =
- reinterpret_cast<const struct rtmsg*>(NLMSG_DATA(hdr));
- // NOTE: rtmsg fields are char fields.
- std::cout << "Found route table=" << static_cast<int>(msg->rtm_table)
- << ", protocol=" << static_cast<int>(msg->rtm_protocol)
- << ", scope=" << static_cast<int>(msg->rtm_scope)
- << ", type=" << static_cast<int>(msg->rtm_type);
-
- int len = RTM_PAYLOAD(hdr);
- bool rtDstFound = false;
- for (struct rtattr* attr = RTM_RTA(msg); RTA_OK(attr, len);
- attr = RTA_NEXT(attr, len)) {
- if (attr->rta_type == RTA_DST) {
- char address[INET_ADDRSTRLEN] = {};
- inet_ntop(AF_INET, RTA_DATA(attr), address, sizeof(address));
- std::cout << ", dst=" << address;
- rtDstFound = true;
- }
- }
-
- std::cout << std::endl;
-
- // If the test is running in a new network namespace, it will have only
- // the local route table.
- if (msg->rtm_table == RT_TABLE_MAIN ||
- (!IsRunningOnGvisor() && msg->rtm_table == RT_TABLE_LOCAL)) {
- routeFound = true;
- dstFound = rtDstFound && dstFound;
- }
- },
- false));
- // At least one route found in main route table.
- EXPECT_TRUE(routeFound);
- // Found RTA_DST for each route in main table.
- EXPECT_TRUE(dstFound);
-}
-
-// GetRouteRequest tests a RTM_GETROUTE request with RTM_F_LOOKUP_TABLE flag.
-TEST(NetlinkRouteTest, GetRouteRequest) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
- uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtmsg rtm;
- struct nlattr nla;
- struct in_addr sin_addr;
- };
-
- constexpr uint32_t kSeq = 12345;
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETROUTE;
- req.hdr.nlmsg_flags = NLM_F_REQUEST;
- req.hdr.nlmsg_seq = kSeq;
-
- req.rtm.rtm_family = AF_INET;
- req.rtm.rtm_dst_len = 32;
- req.rtm.rtm_src_len = 0;
- req.rtm.rtm_tos = 0;
- req.rtm.rtm_table = RT_TABLE_UNSPEC;
- req.rtm.rtm_protocol = RTPROT_UNSPEC;
- req.rtm.rtm_scope = RT_SCOPE_UNIVERSE;
- req.rtm.rtm_type = RTN_UNSPEC;
- req.rtm.rtm_flags = RTM_F_LOOKUP_TABLE;
-
- req.nla.nla_len = 8;
- req.nla.nla_type = RTA_DST;
- inet_aton("127.0.0.2", &req.sin_addr);
-
- bool rtDstFound = false;
- ASSERT_NO_ERRNO(NetlinkRequestResponseSingle(
- fd, &req, sizeof(req), [&](const struct nlmsghdr* hdr) {
- // Validate the reponse to RTM_GETROUTE request with RTM_F_LOOKUP_TABLE
- // flag.
- EXPECT_THAT(hdr->nlmsg_type, RTM_NEWROUTE);
-
- EXPECT_TRUE(hdr->nlmsg_flags == 0) << std::hex << hdr->nlmsg_flags;
-
- EXPECT_EQ(hdr->nlmsg_seq, kSeq);
- EXPECT_EQ(hdr->nlmsg_pid, port);
-
- // RTM_NEWROUTE contains at least the header and rtmsg.
- ASSERT_GE(hdr->nlmsg_len, NLMSG_SPACE(sizeof(struct rtmsg)));
- const struct rtmsg* msg =
- reinterpret_cast<const struct rtmsg*>(NLMSG_DATA(hdr));
-
- // NOTE: rtmsg fields are char fields.
- std::cout << "Found route table=" << static_cast<int>(msg->rtm_table)
- << ", protocol=" << static_cast<int>(msg->rtm_protocol)
- << ", scope=" << static_cast<int>(msg->rtm_scope)
- << ", type=" << static_cast<int>(msg->rtm_type);
-
- EXPECT_EQ(msg->rtm_family, AF_INET);
- EXPECT_EQ(msg->rtm_dst_len, 32);
- EXPECT_TRUE((msg->rtm_flags & RTM_F_CLONED) == RTM_F_CLONED)
- << std::hex << msg->rtm_flags;
-
- int len = RTM_PAYLOAD(hdr);
- std::cout << ", len=" << len;
- for (struct rtattr* attr = RTM_RTA(msg); RTA_OK(attr, len);
- attr = RTA_NEXT(attr, len)) {
- if (attr->rta_type == RTA_DST) {
- char address[INET_ADDRSTRLEN] = {};
- inet_ntop(AF_INET, RTA_DATA(attr), address, sizeof(address));
- std::cout << ", dst=" << address;
- rtDstFound = true;
- } else if (attr->rta_type == RTA_OIF) {
- const char* oif = reinterpret_cast<const char*>(RTA_DATA(attr));
- std::cout << ", oif=" << oif;
- }
- }
-
- std::cout << std::endl;
- }));
- // Found RTA_DST for RTM_F_LOOKUP_TABLE.
- EXPECT_TRUE(rtDstFound);
-}
-
-// RecvmsgTrunc tests the recvmsg MSG_TRUNC flag with zero length output
-// buffer. MSG_TRUNC with a zero length buffer should consume subsequent
-// messages off the socket.
-TEST(NetlinkRouteTest, RecvmsgTrunc) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- iov.iov_base = NULL;
- iov.iov_len = 0;
-
- int trunclen, trunclen2;
-
- // Note: This test assumes at least two messages are returned by the
- // RTM_GETADDR request. That means at least one RTM_NEWLINK message and one
- // NLMSG_DONE message. We cannot read all the messages without blocking
- // because we would need to read the message into a buffer and check the
- // nlmsg_type for NLMSG_DONE. However, the test depends on reading into a
- // zero-length buffer.
-
- // First, call recvmsg with MSG_TRUNC. This will read the full message from
- // the socket and return it's full length. Subsequent calls to recvmsg will
- // read the next messages from the socket.
- ASSERT_THAT(trunclen = RetryEINTR(recvmsg)(fd.get(), &msg, MSG_TRUNC),
- SyscallSucceeds());
-
- // Message should always be truncated. However, While the destination iov is
- // zero length, MSG_TRUNC returns the size of the next message so it should
- // not be zero.
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
- ASSERT_NE(trunclen, 0);
- // Returned length is at least the header and ifaddrmsg.
- EXPECT_GE(trunclen, sizeof(struct nlmsghdr) + sizeof(struct ifaddrmsg));
-
- // Reset the msg_flags to make sure that the recvmsg call is setting them
- // properly.
- msg.msg_flags = 0;
-
- // Make a second recvvmsg call to get the next message.
- ASSERT_THAT(trunclen2 = RetryEINTR(recvmsg)(fd.get(), &msg, MSG_TRUNC),
- SyscallSucceeds());
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
- ASSERT_NE(trunclen2, 0);
-
- // Assert that the received messages are not the same.
- //
- // We are calling recvmsg with a zero length buffer so we have no way to
- // inspect the messages to make sure they are not equal in value. The best
- // we can do is to compare their lengths.
- ASSERT_NE(trunclen, trunclen2);
-}
-
-// RecvmsgTruncPeek tests recvmsg with the combination of the MSG_TRUNC and
-// MSG_PEEK flags and a zero length output buffer. This is normally used to
-// read the full length of the next message on the socket without consuming
-// it, so a properly sized buffer can be allocated to store the message. This
-// test tests that scenario.
-TEST(NetlinkRouteTest, RecvmsgTruncPeek) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- int type = -1;
- do {
- int peeklen;
- int len;
-
- iov.iov_base = NULL;
- iov.iov_len = 0;
-
- // Call recvmsg with MSG_PEEK and MSG_TRUNC. This will peek at the message
- // and return it's full length.
- // See: MSG_TRUNC http://man7.org/linux/man-pages/man2/recv.2.html
- ASSERT_THAT(
- peeklen = RetryEINTR(recvmsg)(fd.get(), &msg, MSG_PEEK | MSG_TRUNC),
- SyscallSucceeds());
-
- // Message should always be truncated.
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
- ASSERT_NE(peeklen, 0);
-
- // Reset the message flags for the next call.
- msg.msg_flags = 0;
-
- // Make the actual call to recvmsg to get the actual data. We will use
- // the length returned from the peek call for the allocated buffer size..
- std::vector<char> buf(peeklen);
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
- ASSERT_THAT(len = RetryEINTR(recvmsg)(fd.get(), &msg, 0),
- SyscallSucceeds());
-
- // Message should not be truncated since we allocated the correct buffer
- // size.
- EXPECT_NE(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
-
- // MSG_PEEK should have left data on the socket and the subsequent call
- // with should have retrieved the same data. Both calls should have
- // returned the message's full length so they should be equal.
- ASSERT_NE(len, 0);
- ASSERT_EQ(peeklen, len);
-
- for (struct nlmsghdr* hdr = reinterpret_cast<struct nlmsghdr*>(buf.data());
- NLMSG_OK(hdr, len); hdr = NLMSG_NEXT(hdr, len)) {
- type = hdr->nlmsg_type;
- }
- } while (type != NLMSG_DONE && type != NLMSG_ERROR);
-}
-
-// No SCM_CREDENTIALS are received without SO_PASSCRED set.
-TEST(NetlinkRouteTest, NoPasscredNoCreds) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- ASSERT_THAT(setsockopt(fd.get(), SOL_SOCKET, SO_PASSCRED, &kSockOptOff,
- sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- iov.iov_base = NULL;
- iov.iov_len = 0;
-
- char control[CMSG_SPACE(sizeof(struct ucred))] = {};
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- // Note: This test assumes at least one message is returned by the
- // RTM_GETADDR request.
- ASSERT_THAT(RetryEINTR(recvmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- // No control messages.
- EXPECT_EQ(CMSG_FIRSTHDR(&msg), nullptr);
-}
-
-// SCM_CREDENTIALS are received with SO_PASSCRED set.
-TEST(NetlinkRouteTest, PasscredCreds) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE));
-
- ASSERT_THAT(setsockopt(fd.get(), SOL_SOCKET, SO_PASSCRED, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- struct request {
- struct nlmsghdr hdr;
- struct rtgenmsg rgm;
- };
-
- struct request req;
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETADDR;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = kSeq;
- req.rgm.rtgen_family = AF_UNSPEC;
-
- struct iovec iov = {};
- iov.iov_base = &req;
- iov.iov_len = sizeof(req);
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- iov.iov_base = NULL;
- iov.iov_len = 0;
-
- char control[CMSG_SPACE(sizeof(struct ucred))] = {};
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- // Note: This test assumes at least one message is returned by the
- // RTM_GETADDR request.
- ASSERT_THAT(RetryEINTR(recvmsg)(fd.get(), &msg, 0), SyscallSucceeds());
-
- struct ucred creds;
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(creds)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-
- memcpy(&creds, CMSG_DATA(cmsg), sizeof(creds));
-
- // The peer is the kernel, which is "PID" 0.
- EXPECT_EQ(creds.pid, 0);
- // The kernel identifies as root. Also allow nobody in case this test is
- // running in a userns without root mapped.
- EXPECT_THAT(creds.uid, AnyOf(Eq(0), Eq(65534)));
- EXPECT_THAT(creds.gid, AnyOf(Eq(0), Eq(65534)));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink_route_util.cc b/test/syscalls/linux/socket_netlink_route_util.cc
deleted file mode 100644
index 46f749c7c..000000000
--- a/test/syscalls/linux/socket_netlink_route_util.cc
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-
-#include <linux/if.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-
-#include "test/syscalls/linux/socket_netlink_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-constexpr uint32_t kSeq = 12345;
-
-// Types of address modifications that may be performed on an interface.
-enum class LinkAddrModification {
- kAdd,
- kAddExclusive,
- kReplace,
- kDelete,
-};
-
-// Populates |hdr| with appripriate values for the modification type.
-PosixError PopulateNlmsghdr(LinkAddrModification modification,
- struct nlmsghdr* hdr) {
- switch (modification) {
- case LinkAddrModification::kAdd:
- hdr->nlmsg_type = RTM_NEWADDR;
- hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- return NoError();
- case LinkAddrModification::kAddExclusive:
- hdr->nlmsg_type = RTM_NEWADDR;
- hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_EXCL | NLM_F_ACK;
- return NoError();
- case LinkAddrModification::kReplace:
- hdr->nlmsg_type = RTM_NEWADDR;
- hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_REPLACE | NLM_F_ACK;
- return NoError();
- case LinkAddrModification::kDelete:
- hdr->nlmsg_type = RTM_DELADDR;
- hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- return NoError();
- }
-
- return PosixError(EINVAL);
-}
-
-// Adds or removes the specified address from the specified interface.
-PosixError LinkModifyLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen,
- LinkAddrModification modification) {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifaddrmsg ifaddr;
- char attrbuf[512];
- };
-
- struct request req = {};
- PosixError err = PopulateNlmsghdr(modification, &req.hdr);
- if (!err.ok()) {
- return err;
- }
- req.hdr.nlmsg_len = NLMSG_LENGTH(sizeof(req.ifaddr));
- req.hdr.nlmsg_seq = kSeq;
- req.ifaddr.ifa_index = index;
- req.ifaddr.ifa_family = family;
- req.ifaddr.ifa_prefixlen = prefixlen;
-
- struct rtattr* rta = reinterpret_cast<struct rtattr*>(
- reinterpret_cast<int8_t*>(&req) + NLMSG_ALIGN(req.hdr.nlmsg_len));
- rta->rta_type = IFA_LOCAL;
- rta->rta_len = RTA_LENGTH(addrlen);
- req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) + RTA_LENGTH(addrlen);
- memcpy(RTA_DATA(rta), addr, addrlen);
-
- return NetlinkRequestAckOrError(fd, kSeq, &req, req.hdr.nlmsg_len);
-}
-
-} // namespace
-
-PosixError DumpLinks(
- const FileDescriptor& fd, uint32_t seq,
- const std::function<void(const struct nlmsghdr* hdr)>& fn) {
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifm;
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = sizeof(req);
- req.hdr.nlmsg_type = RTM_GETLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
- req.hdr.nlmsg_seq = seq;
- req.ifm.ifi_family = AF_UNSPEC;
-
- return NetlinkRequestResponse(fd, &req, sizeof(req), fn, false);
-}
-
-PosixErrorOr<std::vector<Link>> DumpLinks() {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, NetlinkBoundSocket(NETLINK_ROUTE));
-
- std::vector<Link> links;
- RETURN_IF_ERRNO(DumpLinks(fd, kSeq, [&](const struct nlmsghdr* hdr) {
- if (hdr->nlmsg_type != RTM_NEWLINK ||
- hdr->nlmsg_len < NLMSG_SPACE(sizeof(struct ifinfomsg))) {
- return;
- }
- const struct ifinfomsg* msg =
- reinterpret_cast<const struct ifinfomsg*>(NLMSG_DATA(hdr));
- const auto* rta = FindRtAttr(hdr, msg, IFLA_IFNAME);
- if (rta == nullptr) {
- // Ignore links that do not have a name.
- return;
- }
-
- links.emplace_back();
- links.back().index = msg->ifi_index;
- links.back().type = msg->ifi_type;
- links.back().name =
- std::string(reinterpret_cast<const char*>(RTA_DATA(rta)));
- }));
- return links;
-}
-
-PosixErrorOr<Link> LoopbackLink() {
- ASSIGN_OR_RETURN_ERRNO(auto links, DumpLinks());
- for (const auto& link : links) {
- if (link.type == ARPHRD_LOOPBACK) {
- return link;
- }
- }
- return PosixError(ENOENT, "loopback link not found");
-}
-
-PosixError LinkAddLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen) {
- return LinkModifyLocalAddr(index, family, prefixlen, addr, addrlen,
- LinkAddrModification::kAdd);
-}
-
-PosixError LinkAddExclusiveLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen) {
- return LinkModifyLocalAddr(index, family, prefixlen, addr, addrlen,
- LinkAddrModification::kAddExclusive);
-}
-
-PosixError LinkReplaceLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen) {
- return LinkModifyLocalAddr(index, family, prefixlen, addr, addrlen,
- LinkAddrModification::kReplace);
-}
-
-PosixError LinkDelLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen) {
- return LinkModifyLocalAddr(index, family, prefixlen, addr, addrlen,
- LinkAddrModification::kDelete);
-}
-
-PosixError LinkChangeFlags(int index, unsigned int flags, unsigned int change) {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifinfo;
- char pad[NLMSG_ALIGNTO];
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = NLMSG_LENGTH(sizeof(req.ifinfo));
- req.hdr.nlmsg_type = RTM_NEWLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.hdr.nlmsg_seq = kSeq;
- req.ifinfo.ifi_index = index;
- req.ifinfo.ifi_flags = flags;
- req.ifinfo.ifi_change = change;
-
- return NetlinkRequestAckOrError(fd, kSeq, &req, req.hdr.nlmsg_len);
-}
-
-PosixError LinkSetMacAddr(int index, const void* addr, int addrlen) {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, NetlinkBoundSocket(NETLINK_ROUTE));
-
- struct request {
- struct nlmsghdr hdr;
- struct ifinfomsg ifinfo;
- char attrbuf[512];
- };
-
- struct request req = {};
- req.hdr.nlmsg_len = NLMSG_LENGTH(sizeof(req.ifinfo));
- req.hdr.nlmsg_type = RTM_NEWLINK;
- req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.hdr.nlmsg_seq = kSeq;
- req.ifinfo.ifi_index = index;
-
- struct rtattr* rta = reinterpret_cast<struct rtattr*>(
- reinterpret_cast<int8_t*>(&req) + NLMSG_ALIGN(req.hdr.nlmsg_len));
- rta->rta_type = IFLA_ADDRESS;
- rta->rta_len = RTA_LENGTH(addrlen);
- req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) + RTA_LENGTH(addrlen);
- memcpy(RTA_DATA(rta), addr, addrlen);
-
- return NetlinkRequestAckOrError(fd, kSeq, &req, req.hdr.nlmsg_len);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink_route_util.h b/test/syscalls/linux/socket_netlink_route_util.h
deleted file mode 100644
index eaa91ad79..000000000
--- a/test/syscalls/linux/socket_netlink_route_util.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NETLINK_ROUTE_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NETLINK_ROUTE_UTIL_H_
-
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_netlink_util.h"
-
-namespace gvisor {
-namespace testing {
-
-struct Link {
- int index;
- int16_t type;
- std::string name;
-};
-
-PosixError DumpLinks(const FileDescriptor& fd, uint32_t seq,
- const std::function<void(const struct nlmsghdr* hdr)>& fn);
-
-PosixErrorOr<std::vector<Link>> DumpLinks();
-
-// Returns the loopback link on the system. ENOENT if not found.
-PosixErrorOr<Link> LoopbackLink();
-
-// LinkAddLocalAddr adds a new IFA_LOCAL address to the interface.
-PosixError LinkAddLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen);
-
-// LinkAddExclusiveLocalAddr adds a new IFA_LOCAL address with NLM_F_EXCL flag
-// to the interface.
-PosixError LinkAddExclusiveLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen);
-
-// LinkReplaceLocalAddr replaces an IFA_LOCAL address on the interface.
-PosixError LinkReplaceLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen);
-
-// LinkDelLocalAddr removes IFA_LOCAL attribute on the interface.
-PosixError LinkDelLocalAddr(int index, int family, int prefixlen,
- const void* addr, int addrlen);
-
-// LinkChangeFlags changes interface flags. E.g. IFF_UP.
-PosixError LinkChangeFlags(int index, unsigned int flags, unsigned int change);
-
-// LinkSetMacAddr sets IFLA_ADDRESS attribute of the interface.
-PosixError LinkSetMacAddr(int index, const void* addr, int addrlen);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NETLINK_ROUTE_UTIL_H_
diff --git a/test/syscalls/linux/socket_netlink_uevent.cc b/test/syscalls/linux/socket_netlink_uevent.cc
deleted file mode 100644
index da425bed4..000000000
--- a/test/syscalls/linux/socket_netlink_uevent.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <linux/filter.h>
-#include <linux/netlink.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_netlink_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-// Tests for NETLINK_KOBJECT_UEVENT sockets.
-//
-// gVisor never sends any messages on these sockets, so we don't test the events
-// themselves.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// SO_PASSCRED can be enabled. Since no messages are sent in gVisor, we don't
-// actually test receiving credentials.
-TEST(NetlinkUeventTest, PassCred) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_KOBJECT_UEVENT));
-
- EXPECT_THAT(setsockopt(fd.get(), SOL_SOCKET, SO_PASSCRED, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-}
-
-// SO_DETACH_FILTER fails without a filter already installed.
-TEST(NetlinkUeventTest, DetachNoFilter) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_KOBJECT_UEVENT));
-
- int opt;
- EXPECT_THAT(
- setsockopt(fd.get(), SOL_SOCKET, SO_DETACH_FILTER, &opt, sizeof(opt)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-// We can attach a BPF filter.
-TEST(NetlinkUeventTest, AttachFilter) {
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_KOBJECT_UEVENT));
-
- // Minimal BPF program: a single ret.
- struct sock_filter filter = {0x6, 0, 0, 0};
- struct sock_fprog prog = {};
- prog.len = 1;
- prog.filter = &filter;
-
- EXPECT_THAT(
- setsockopt(fd.get(), SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)),
- SyscallSucceeds());
-
- int opt;
- EXPECT_THAT(
- setsockopt(fd.get(), SOL_SOCKET, SO_DETACH_FILTER, &opt, sizeof(opt)),
- SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink_util.cc b/test/syscalls/linux/socket_netlink_util.cc
deleted file mode 100644
index bdebea321..000000000
--- a/test/syscalls/linux/socket_netlink_util.cc
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_netlink_util.h"
-
-#include <linux/if_arp.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-#include <sys/socket.h>
-
-#include <vector>
-
-#include "absl/strings/str_cat.h"
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<FileDescriptor> NetlinkBoundSocket(int protocol) {
- FileDescriptor fd;
- ASSIGN_OR_RETURN_ERRNO(fd, Socket(AF_NETLINK, SOCK_RAW, protocol));
-
- struct sockaddr_nl addr = {};
- addr.nl_family = AF_NETLINK;
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- bind(fd.get(), reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)));
- MaybeSave();
-
- return std::move(fd);
-}
-
-PosixErrorOr<uint32_t> NetlinkPortID(int fd) {
- struct sockaddr_nl addr;
- socklen_t addrlen = sizeof(addr);
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- getsockname(fd, reinterpret_cast<struct sockaddr*>(&addr), &addrlen));
- MaybeSave();
-
- return static_cast<uint32_t>(addr.nl_pid);
-}
-
-PosixError NetlinkRequestResponse(
- const FileDescriptor& fd, void* request, size_t len,
- const std::function<void(const struct nlmsghdr* hdr)>& fn,
- bool expect_nlmsgerr) {
- struct iovec iov = {};
- iov.iov_base = request;
- iov.iov_len = len;
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- // No destination required; it defaults to pid 0, the kernel.
-
- RETURN_ERROR_IF_SYSCALL_FAIL(RetryEINTR(sendmsg)(fd.get(), &msg, 0));
-
- return NetlinkResponse(fd, fn, expect_nlmsgerr);
-}
-
-PosixError NetlinkResponse(
- const FileDescriptor& fd,
- const std::function<void(const struct nlmsghdr* hdr)>& fn,
- bool expect_nlmsgerr) {
- constexpr size_t kBufferSize = 4096;
- std::vector<char> buf(kBufferSize);
- struct iovec iov = {};
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- // If NLM_F_MULTI is set, response is a series of messages that ends with a
- // NLMSG_DONE message.
- int type = -1;
- int flags = 0;
- do {
- int len;
- RETURN_ERROR_IF_SYSCALL_FAIL(len = RetryEINTR(recvmsg)(fd.get(), &msg, 0));
-
- // We don't bother with the complexity of dealing with truncated messages.
- // We must allocate a large enough buffer up front.
- if ((msg.msg_flags & MSG_TRUNC) == MSG_TRUNC) {
- return PosixError(EIO,
- absl::StrCat("Received truncated message with flags: ",
- msg.msg_flags));
- }
-
- for (struct nlmsghdr* hdr = reinterpret_cast<struct nlmsghdr*>(buf.data());
- NLMSG_OK(hdr, len); hdr = NLMSG_NEXT(hdr, len)) {
- fn(hdr);
- flags = hdr->nlmsg_flags;
- type = hdr->nlmsg_type;
- // Done should include an integer payload for dump_done_errno.
- // See net/netlink/af_netlink.c:netlink_dump
- // Some tools like the 'ip' tool check the minimum length of the
- // NLMSG_DONE message.
- if (type == NLMSG_DONE) {
- EXPECT_GE(hdr->nlmsg_len, NLMSG_LENGTH(sizeof(int)));
- }
- }
- } while ((flags & NLM_F_MULTI) && type != NLMSG_DONE && type != NLMSG_ERROR);
-
- if (expect_nlmsgerr) {
- EXPECT_EQ(type, NLMSG_ERROR);
- } else if (flags & NLM_F_MULTI) {
- EXPECT_EQ(type, NLMSG_DONE);
- }
- return NoError();
-}
-
-PosixError NetlinkRequestResponseSingle(
- const FileDescriptor& fd, void* request, size_t len,
- const std::function<void(const struct nlmsghdr* hdr)>& fn) {
- struct iovec iov = {};
- iov.iov_base = request;
- iov.iov_len = len;
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- // No destination required; it defaults to pid 0, the kernel.
-
- RETURN_ERROR_IF_SYSCALL_FAIL(RetryEINTR(sendmsg)(fd.get(), &msg, 0));
-
- constexpr size_t kBufferSize = 4096;
- std::vector<char> buf(kBufferSize);
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- int ret;
- RETURN_ERROR_IF_SYSCALL_FAIL(ret = RetryEINTR(recvmsg)(fd.get(), &msg, 0));
-
- // We don't bother with the complexity of dealing with truncated messages.
- // We must allocate a large enough buffer up front.
- if ((msg.msg_flags & MSG_TRUNC) == MSG_TRUNC) {
- return PosixError(
- EIO,
- absl::StrCat("Received truncated message with flags: ", msg.msg_flags));
- }
-
- for (struct nlmsghdr* hdr = reinterpret_cast<struct nlmsghdr*>(buf.data());
- NLMSG_OK(hdr, ret); hdr = NLMSG_NEXT(hdr, ret)) {
- fn(hdr);
- }
-
- return NoError();
-}
-
-PosixError NetlinkRequestAckOrError(const FileDescriptor& fd, uint32_t seq,
- void* request, size_t len) {
- // Dummy negative number for no error message received.
- // We won't get a negative error number so there will be no confusion.
- int err = -42;
- RETURN_IF_ERRNO(NetlinkRequestResponse(
- fd, request, len,
- [&](const struct nlmsghdr* hdr) {
- EXPECT_EQ(NLMSG_ERROR, hdr->nlmsg_type);
- EXPECT_EQ(hdr->nlmsg_seq, seq);
- EXPECT_GE(hdr->nlmsg_len, sizeof(*hdr) + sizeof(struct nlmsgerr));
-
- const struct nlmsgerr* msg =
- reinterpret_cast<const struct nlmsgerr*>(NLMSG_DATA(hdr));
- err = -msg->error;
- },
- true));
- return PosixError(err);
-}
-
-const struct rtattr* FindRtAttr(const struct nlmsghdr* hdr,
- const struct ifinfomsg* msg, int16_t attr) {
- const int ifi_space = NLMSG_SPACE(sizeof(*msg));
- int attrlen = hdr->nlmsg_len - ifi_space;
- const struct rtattr* rta = reinterpret_cast<const struct rtattr*>(
- reinterpret_cast<const uint8_t*>(hdr) + NLMSG_ALIGN(ifi_space));
- for (; RTA_OK(rta, attrlen); rta = RTA_NEXT(rta, attrlen)) {
- if (rta->rta_type == attr) {
- return rta;
- }
- }
- return nullptr;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_netlink_util.h b/test/syscalls/linux/socket_netlink_util.h
deleted file mode 100644
index f97276d44..000000000
--- a/test/syscalls/linux/socket_netlink_util.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_SOCKET_NETLINK_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_SOCKET_NETLINK_UTIL_H_
-
-#include <sys/socket.h>
-// socket.h has to be included before if_arp.h.
-#include <linux/if_arp.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Returns a bound netlink socket.
-PosixErrorOr<FileDescriptor> NetlinkBoundSocket(int protocol);
-
-// Returns the port ID of the passed socket.
-PosixErrorOr<uint32_t> NetlinkPortID(int fd);
-
-// Send the passed request and call fn on all response netlink messages.
-//
-// To be used on requests with NLM_F_MULTI reponses.
-PosixError NetlinkRequestResponse(
- const FileDescriptor& fd, void* request, size_t len,
- const std::function<void(const struct nlmsghdr* hdr)>& fn,
- bool expect_nlmsgerr);
-
-// Call fn on all response netlink messages.
-//
-// To be used on requests with NLM_F_MULTI reponses.
-PosixError NetlinkResponse(
- const FileDescriptor& fd,
- const std::function<void(const struct nlmsghdr* hdr)>& fn,
- bool expect_nlmsgerr);
-
-// Send the passed request and call fn on all response netlink messages.
-//
-// To be used on requests without NLM_F_MULTI reponses.
-PosixError NetlinkRequestResponseSingle(
- const FileDescriptor& fd, void* request, size_t len,
- const std::function<void(const struct nlmsghdr* hdr)>& fn);
-
-// Send the passed request then expect and return an ack or error.
-PosixError NetlinkRequestAckOrError(const FileDescriptor& fd, uint32_t seq,
- void* request, size_t len);
-
-// Find rtnetlink attribute in message.
-const struct rtattr* FindRtAttr(const struct nlmsghdr* hdr,
- const struct ifinfomsg* msg, int16_t attr);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_SOCKET_NETLINK_UTIL_H_
diff --git a/test/syscalls/linux/socket_non_blocking.cc b/test/syscalls/linux/socket_non_blocking.cc
deleted file mode 100644
index c3520cadd..000000000
--- a/test/syscalls/linux/socket_non_blocking.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_non_blocking.h"
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-TEST_P(NonBlockingSocketPairTest, ReadNothingAvailable) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[20] = {};
- ASSERT_THAT(ReadFd(sockets->first_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(NonBlockingSocketPairTest, RecvNothingAvailable) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char buf[20] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->first_fd(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(NonBlockingSocketPairTest, RecvMsgNothingAvailable) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct iovec iov;
- char buf[20] = {};
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_non_blocking.h b/test/syscalls/linux/socket_non_blocking.h
deleted file mode 100644
index bd3e02fd2..000000000
--- a/test/syscalls/linux/socket_non_blocking.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_BLOCKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_BLOCKING_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected non-blocking sockets.
-using NonBlockingSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_BLOCKING_H_
diff --git a/test/syscalls/linux/socket_non_stream.cc b/test/syscalls/linux/socket_non_stream.cc
deleted file mode 100644
index c61817f14..000000000
--- a/test/syscalls/linux/socket_non_stream.cc
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_non_stream.h"
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(NonStreamSocketPairTest, SendMsgTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int sndbuf;
- socklen_t length = sizeof(sndbuf);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &sndbuf, &length),
- SyscallSucceeds());
-
- // Make the call too large to fit in the send buffer.
- const int buffer_size = 3 * sndbuf;
-
- EXPECT_THAT(SendLargeSendMsg(sockets, buffer_size, false /* reader */),
- SyscallFailsWithErrno(EMSGSIZE));
-}
-
-// Stream sockets allow data sent with a single (e.g. write, sendmsg) syscall
-// to be read in pieces with multiple (e.g. read, recvmsg) syscalls.
-//
-// SplitRecv checks that control messages can only be read on the first (e.g.
-// read, recvmsg) syscall, even if it doesn't provide space for the control
-// message.
-TEST_P(NonStreamSocketPairTest, SplitRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data) / 2];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-// Stream sockets allow data sent with multiple sends to be read in a single
-// recv. Datagram sockets do not.
-//
-// SingleRecv checks that only a single message is readable in a single recv.
-TEST_P(NonStreamSocketPairTest, SingleRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data1, sizeof(sent_data1), 0),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data2, sizeof(sent_data2), 0),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
-}
-
-TEST_P(NonStreamSocketPairTest, RecvmsgMsghdrFlagMsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) / 2] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));
-
- // Check that msghdr flags were updated.
- EXPECT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
-}
-
-// Stream sockets allow data sent with multiple sends to be peeked at in a
-// single recv. Datagram sockets (except for unix sockets) do not.
-//
-// SinglePeek checks that only a single message is peekable in a single recv.
-TEST_P(NonStreamSocketPairTest, SinglePeek) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data1, sizeof(sent_data1), 0),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data2, sizeof(sent_data2), 0),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- for (int i = 0; i < 3; i++) {
- memset(received_data, 0, sizeof(received_data));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- }
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(sent_data1), 0),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(sent_data2), 0),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data, sizeof(sent_data2)));
-}
-
-TEST_P(NonStreamSocketPairTest, MsgTruncTruncation) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data) / 2, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
-
- // Check that we didn't get any extra data.
- EXPECT_NE(0, memcmp(sent_data + sizeof(sent_data) / 2,
- received_data + sizeof(received_data) / 2,
- sizeof(sent_data) / 2));
-}
-
-TEST_P(NonStreamSocketPairTest, MsgTruncTruncationRecvmsgMsghdrFlagMsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) / 2] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));
-
- // Check that msghdr flags were updated.
- EXPECT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
-}
-
-TEST_P(NonStreamSocketPairTest, MsgTruncSameSize) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(NonStreamSocketPairTest, MsgTruncNotFull) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[2 * sizeof(sent_data)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-// This test tests reading from a socket with MSG_TRUNC and a zero length
-// receive buffer. The user should be able to get the message length.
-TEST_P(NonStreamSocketPairTest, RecvmsgMsgTruncZeroLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // The receive buffer is of zero length.
- char received_data[0] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- // The syscall succeeds returning the full size of the message on the socket.
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // Check that MSG_TRUNC is set on msghdr flags.
- EXPECT_EQ(msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
-}
-
-// This test tests reading from a socket with MSG_TRUNC | MSG_PEEK and a zero
-// length receive buffer. The user should be able to get the message length
-// without reading data off the socket.
-TEST_P(NonStreamSocketPairTest, RecvmsgMsgTruncMsgPeekZeroLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // The receive buffer is of zero length.
- char peek_data[0] = {};
-
- struct iovec peek_iov;
- peek_iov.iov_base = peek_data;
- peek_iov.iov_len = sizeof(peek_data);
- struct msghdr peek_msg = {};
- peek_msg.msg_flags = -1;
- peek_msg.msg_iov = &peek_iov;
- peek_msg.msg_iovlen = 1;
-
- // The syscall succeeds returning the full size of the message on the socket.
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &peek_msg,
- MSG_TRUNC | MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // Check that MSG_TRUNC is set on msghdr flags because the receive buffer is
- // smaller than the message size.
- EXPECT_EQ(peek_msg.msg_flags & MSG_TRUNC, MSG_TRUNC);
-
- char received_data[sizeof(sent_data)] = {};
-
- struct iovec received_iov;
- received_iov.iov_base = received_data;
- received_iov.iov_len = sizeof(received_data);
- struct msghdr received_msg = {};
- received_msg.msg_flags = -1;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
-
- // Next we can read the actual data.
- ASSERT_THAT(
- RetryEINTR(recvmsg)(sockets->second_fd(), &received_msg, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- // Check that MSG_TRUNC is not set on msghdr flags because we read the whole
- // message.
- EXPECT_EQ(received_msg.msg_flags & MSG_TRUNC, 0);
-}
-
-// This test tests reading from a socket with MSG_TRUNC | MSG_PEEK and a zero
-// length receive buffer and MSG_DONTWAIT. The user should be able to get an
-// EAGAIN or EWOULDBLOCK error response.
-TEST_P(NonStreamSocketPairTest, RecvmsgTruncPeekDontwaitZeroLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // NOTE: We don't send any data on the socket.
-
- // The receive buffer is of zero length.
- char peek_data[0] = {};
-
- struct iovec peek_iov;
- peek_iov.iov_base = peek_data;
- peek_iov.iov_len = sizeof(peek_data);
- struct msghdr peek_msg = {};
- peek_msg.msg_flags = -1;
- peek_msg.msg_iov = &peek_iov;
- peek_msg.msg_iovlen = 1;
-
- // recvmsg fails with EAGAIN because no data is available on the socket.
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &peek_msg,
- MSG_TRUNC | MSG_PEEK | MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_non_stream.h b/test/syscalls/linux/socket_non_stream.h
deleted file mode 100644
index 469fbe6a2..000000000
--- a/test/syscalls/linux/socket_non_stream.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected non-stream sockets.
-using NonStreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_H_
diff --git a/test/syscalls/linux/socket_non_stream_blocking.cc b/test/syscalls/linux/socket_non_stream_blocking.cc
deleted file mode 100644
index b052f6e61..000000000
--- a/test/syscalls/linux/socket_non_stream_blocking.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_non_stream_blocking.h"
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(BlockingNonStreamSocketPairTest, RecvLessThanBufferWaitAll) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) * 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_WAITALL),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-}
-
-// This test tests reading from a socket with MSG_TRUNC | MSG_PEEK and a zero
-// length receive buffer and MSG_DONTWAIT. The recvmsg call should block on
-// reading the data.
-TEST_P(BlockingNonStreamSocketPairTest,
- RecvmsgTruncPeekDontwaitZeroLenBlocking) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // NOTE: We don't initially send any data on the socket.
- const int data_size = 10;
- char sent_data[data_size];
- RandomizeBuffer(sent_data, data_size);
-
- // The receive buffer is of zero length.
- char peek_data[0] = {};
-
- struct iovec peek_iov;
- peek_iov.iov_base = peek_data;
- peek_iov.iov_len = sizeof(peek_data);
- struct msghdr peek_msg = {};
- peek_msg.msg_flags = -1;
- peek_msg.msg_iov = &peek_iov;
- peek_msg.msg_iovlen = 1;
-
- ScopedThread t([&]() {
- // The syscall succeeds returning the full size of the message on the
- // socket. This should block until there is data on the socket.
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &peek_msg,
- MSG_TRUNC | MSG_PEEK),
- SyscallSucceedsWithValue(data_size));
- });
-
- absl::SleepFor(absl::Seconds(1));
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), sent_data, data_size, 0),
- SyscallSucceedsWithValue(data_size));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_non_stream_blocking.h b/test/syscalls/linux/socket_non_stream_blocking.h
deleted file mode 100644
index 6e205a039..000000000
--- a/test/syscalls/linux/socket_non_stream_blocking.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_BLOCKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_BLOCKING_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of blocking connected non-stream
-// sockets.
-using BlockingNonStreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_NON_STREAM_BLOCKING_H_
diff --git a/test/syscalls/linux/socket_stream.cc b/test/syscalls/linux/socket_stream.cc
deleted file mode 100644
index 6522b2e01..000000000
--- a/test/syscalls/linux/socket_stream.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_stream.h"
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(StreamSocketPairTest, SplitRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data) / 2];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data + sizeof(received_data), received_data,
- sizeof(received_data)));
-}
-
-// Stream sockets allow data sent with multiple sends to be read in a single
-// recv.
-//
-// CoalescedRecv checks that multiple messages are readable in a single recv.
-TEST_P(StreamSocketPairTest, CoalescedRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data1, sizeof(sent_data1), 0),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data2, sizeof(sent_data2), 0),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
-}
-
-TEST_P(StreamSocketPairTest, WriteOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- const char str[] = "abc";
- ASSERT_THAT(write(sockets->second_fd(), str, 3),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(StreamSocketPairTest, RecvmsgMsghdrFlagsNoMsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) / 2] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));
-
- // Check that msghdr flags were cleared (MSG_TRUNC was not set).
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, 0);
-}
-
-TEST_P(StreamSocketPairTest, RecvmsgTruncZeroLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[0] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_TRUNC),
- SyscallSucceedsWithValue(0));
-
- // Check that msghdr flags were cleared (MSG_TRUNC was not set).
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, 0);
-}
-
-TEST_P(StreamSocketPairTest, RecvmsgTruncPeekZeroLen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[0] = {};
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(
- RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_TRUNC | MSG_PEEK),
- SyscallSucceedsWithValue(0));
-
- // Check that msghdr flags were cleared (MSG_TRUNC was not set).
- ASSERT_EQ(msg.msg_flags & MSG_TRUNC, 0);
-}
-
-TEST_P(StreamSocketPairTest, MsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)];
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data) / 2, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_stream.h b/test/syscalls/linux/socket_stream.h
deleted file mode 100644
index b837b8f8c..000000000
--- a/test/syscalls/linux/socket_stream.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of blocking and non-blocking
-// connected stream sockets.
-using StreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_H_
diff --git a/test/syscalls/linux/socket_stream_blocking.cc b/test/syscalls/linux/socket_stream_blocking.cc
deleted file mode 100644
index 0743322ac..000000000
--- a/test/syscalls/linux/socket_stream_blocking.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_stream_blocking.h"
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(BlockingStreamSocketPairTest, BlockPartialWriteClosed) {
- // FIXME(b/35921550): gVisor doesn't support SO_SNDBUF on UDS, nor does it
- // enforce any limit; it will write arbitrary amounts of data without
- // blocking.
- SKIP_IF(IsRunningOnGvisor());
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int buffer_size;
- socklen_t length = sizeof(buffer_size);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF,
- &buffer_size, &length),
- SyscallSucceeds());
-
- int wfd = sockets->first_fd();
- ScopedThread t([wfd, buffer_size]() {
- std::vector<char> buf(2 * buffer_size);
- // Write more than fits in the buffer. Blocks then returns partial write
- // when the other end is closed. The next call returns EPIPE.
- //
- // N.B. writes occur in chunks, so we may see less than buffer_size from
- // the first call.
- ASSERT_THAT(write(wfd, buf.data(), buf.size()),
- SyscallSucceedsWithValue(::testing::Gt(0)));
- ASSERT_THAT(write(wfd, buf.data(), buf.size()),
- ::testing::AnyOf(SyscallFailsWithErrno(EPIPE),
- SyscallFailsWithErrno(ECONNRESET)));
- });
-
- // Leave time for write to become blocked.
- absl::SleepFor(absl::Seconds(1));
-
- ASSERT_THAT(close(sockets->release_second_fd()), SyscallSucceeds());
-}
-
-// Random save may interrupt the call to sendmsg() in SendLargeSendMsg(),
-// causing the write to be incomplete and the test to hang.
-TEST_P(BlockingStreamSocketPairTest, SendMsgTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int sndbuf;
- socklen_t length = sizeof(sndbuf);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &sndbuf, &length),
- SyscallSucceeds());
-
- // Make the call too large to fit in the send buffer.
- const int buffer_size = 3 * sndbuf;
-
- EXPECT_THAT(SendLargeSendMsg(sockets, buffer_size, true /* reader */),
- SyscallSucceedsWithValue(buffer_size));
-}
-
-TEST_P(BlockingStreamSocketPairTest, RecvLessThanBuffer) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[200] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-}
-
-// Test that MSG_WAITALL causes recv to block until all requested data is
-// received. Random save can interrupt blocking and cause received data to be
-// returned, even if the amount received is less than the full requested amount.
-TEST_P(BlockingStreamSocketPairTest, RecvLessThanBufferWaitAll) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[100];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- constexpr auto kDuration = absl::Milliseconds(200);
- auto before = Now(CLOCK_MONOTONIC);
-
- const ScopedThread t([&]() {
- absl::SleepFor(kDuration);
-
- // Don't let saving after the write interrupt the blocking recv.
- const DisableSave ds;
-
- ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- });
-
- char received_data[sizeof(sent_data) * 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_WAITALL),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- auto after = Now(CLOCK_MONOTONIC);
- EXPECT_GE(after - before, kDuration);
-}
-
-TEST_P(BlockingStreamSocketPairTest, SendTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- std::vector<char> buf(kPageSize);
- // We don't know how much data the socketpair will buffer, so we may do an
- // arbitrarily large number of writes; saving after each write causes this
- // test's time to explode.
- const DisableSave ds;
- for (;;) {
- int ret;
- ASSERT_THAT(
- ret = RetryEINTR(send)(sockets->first_fd(), buf.data(), buf.size(), 0),
- ::testing::AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EAGAIN)));
- if (ret == -1) {
- break;
- }
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_stream_blocking.h b/test/syscalls/linux/socket_stream_blocking.h
deleted file mode 100644
index 9fd19ff90..000000000
--- a/test/syscalls/linux/socket_stream_blocking.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_BLOCKING_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_BLOCKING_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of blocking connected stream
-// sockets.
-using BlockingStreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_BLOCKING_H_
diff --git a/test/syscalls/linux/socket_stream_nonblock.cc b/test/syscalls/linux/socket_stream_nonblock.cc
deleted file mode 100644
index 74d608741..000000000
--- a/test/syscalls/linux/socket_stream_nonblock.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_stream_nonblock.h"
-
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-using ::testing::Le;
-
-TEST_P(NonBlockingStreamSocketPairTest, SendMsgTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int sndbuf;
- socklen_t length = sizeof(sndbuf);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &sndbuf, &length),
- SyscallSucceeds());
-
- // Make the call too large to fit in the send buffer.
- const int buffer_size = 3 * sndbuf;
-
- EXPECT_THAT(SendLargeSendMsg(sockets, buffer_size, false /* reader */),
- SyscallSucceedsWithValue(Le(buffer_size)));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_stream_nonblock.h b/test/syscalls/linux/socket_stream_nonblock.h
deleted file mode 100644
index c3b7fad91..000000000
--- a/test/syscalls/linux/socket_stream_nonblock.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_NONBLOCK_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_NONBLOCK_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of non-blocking connected stream
-// sockets.
-using NonBlockingStreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_STREAM_NONBLOCK_H_
diff --git a/test/syscalls/linux/socket_test_util.cc b/test/syscalls/linux/socket_test_util.cc
deleted file mode 100644
index c1cded834..000000000
--- a/test/syscalls/linux/socket_test_util.cc
+++ /dev/null
@@ -1,1117 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <poll.h>
-#include <sys/socket.h>
-
-#include <memory>
-
-#include "gtest/gtest.h"
-#include "absl/memory/memory.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/time/clock.h"
-#include "absl/types/optional.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-Creator<SocketPair> SyscallSocketPairCreator(int domain, int type,
- int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FDSocketPair>> {
- int pair[2];
- RETURN_ERROR_IF_SYSCALL_FAIL(socketpair(domain, type, protocol, pair));
- MaybeSave(); // Save on successful creation.
- return absl::make_unique<FDSocketPair>(pair[0], pair[1]);
- };
-}
-
-Creator<FileDescriptor> SyscallSocketCreator(int domain, int type,
- int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FileDescriptor>> {
- int fd = 0;
- RETURN_ERROR_IF_SYSCALL_FAIL(fd = socket(domain, type, protocol));
- MaybeSave(); // Save on successful creation.
- return absl::make_unique<FileDescriptor>(fd);
- };
-}
-
-PosixErrorOr<struct sockaddr_un> UniqueUnixAddr(bool abstract, int domain) {
- struct sockaddr_un addr = {};
- std::string path = NewTempAbsPathInDir("/tmp");
- if (path.size() >= sizeof(addr.sun_path)) {
- return PosixError(EINVAL,
- "Unable to generate a temp path of appropriate length");
- }
-
- if (abstract) {
- // Indicate that the path is in the abstract namespace.
- path[0] = 0;
- }
- memcpy(addr.sun_path, path.c_str(), path.length());
- addr.sun_family = domain;
- return addr;
-}
-
-Creator<SocketPair> AcceptBindSocketPairCreator(bool abstract, int domain,
- int type, int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> {
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un bind_addr,
- UniqueUnixAddr(abstract, domain));
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un extra_addr,
- UniqueUnixAddr(abstract, domain));
-
- int bound;
- RETURN_ERROR_IF_SYSCALL_FAIL(bound = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- bind(bound, AsSockAddr(&bind_addr), sizeof(bind_addr)));
- MaybeSave(); // Successful bind.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- listen(bound, /* backlog = */ 5)); // NOLINT(bugprone-argument-comment)
- MaybeSave(); // Successful listen.
-
- int connected;
- RETURN_ERROR_IF_SYSCALL_FAIL(connected = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(connected, AsSockAddr(&bind_addr), sizeof(bind_addr)));
- MaybeSave(); // Successful connect.
-
- int accepted;
- RETURN_ERROR_IF_SYSCALL_FAIL(
- accepted = accept4(bound, nullptr, nullptr,
- type & (SOCK_NONBLOCK | SOCK_CLOEXEC)));
- MaybeSave(); // Successful connect.
-
- // Cleanup no longer needed resources.
- RETURN_ERROR_IF_SYSCALL_FAIL(close(bound));
- MaybeSave(); // Dropped original socket.
-
- // Only unlink if path is not in abstract namespace.
- if (bind_addr.sun_path[0] != 0) {
- RETURN_ERROR_IF_SYSCALL_FAIL(unlink(bind_addr.sun_path));
- MaybeSave(); // Unlinked path.
- }
-
- // accepted is before connected to destruct connected before accepted.
- // Destructors for nonstatic member objects are called in the reverse order
- // in which they appear in the class declaration.
- return absl::make_unique<AddrFDSocketPair>(accepted, connected, bind_addr,
- extra_addr);
- };
-}
-
-Creator<SocketPair> FilesystemAcceptBindSocketPairCreator(int domain, int type,
- int protocol) {
- return AcceptBindSocketPairCreator(/* abstract= */ false, domain, type,
- protocol);
-}
-
-Creator<SocketPair> AbstractAcceptBindSocketPairCreator(int domain, int type,
- int protocol) {
- return AcceptBindSocketPairCreator(/* abstract= */ true, domain, type,
- protocol);
-}
-
-Creator<SocketPair> BidirectionalBindSocketPairCreator(bool abstract,
- int domain, int type,
- int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FDSocketPair>> {
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un addr1,
- UniqueUnixAddr(abstract, domain));
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un addr2,
- UniqueUnixAddr(abstract, domain));
-
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- bind(sock1, AsSockAddr(&addr1), sizeof(addr1)));
- MaybeSave(); // Successful bind.
-
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- bind(sock2, AsSockAddr(&addr2), sizeof(addr2)));
- MaybeSave(); // Successful bind.
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(sock1, AsSockAddr(&addr2), sizeof(addr2)));
- MaybeSave(); // Successful connect.
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(sock2, AsSockAddr(&addr1), sizeof(addr1)));
- MaybeSave(); // Successful connect.
-
- // Cleanup no longer needed resources.
-
- // Only unlink if path is not in abstract namespace.
- if (addr1.sun_path[0] != 0) {
- RETURN_ERROR_IF_SYSCALL_FAIL(unlink(addr1.sun_path));
- MaybeSave(); // Successful unlink.
- }
-
- // Only unlink if path is not in abstract namespace.
- if (addr2.sun_path[0] != 0) {
- RETURN_ERROR_IF_SYSCALL_FAIL(unlink(addr2.sun_path));
- MaybeSave(); // Successful unlink.
- }
-
- return absl::make_unique<FDSocketPair>(sock1, sock2);
- };
-}
-
-Creator<SocketPair> FilesystemBidirectionalBindSocketPairCreator(int domain,
- int type,
- int protocol) {
- return BidirectionalBindSocketPairCreator(/* abstract= */ false, domain, type,
- protocol);
-}
-
-Creator<SocketPair> AbstractBidirectionalBindSocketPairCreator(int domain,
- int type,
- int protocol) {
- return BidirectionalBindSocketPairCreator(/* abstract= */ true, domain, type,
- protocol);
-}
-
-Creator<SocketPair> SocketpairGoferSocketPairCreator(int domain, int type,
- int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FDSocketPair>> {
- struct sockaddr_un addr = {};
- constexpr char kSocketGoferPath[] = "/socket";
- memcpy(addr.sun_path, kSocketGoferPath, sizeof(kSocketGoferPath));
- addr.sun_family = domain;
-
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(sock1, AsSockAddr(&addr), sizeof(addr)));
- MaybeSave(); // Successful connect.
-
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(sock2, AsSockAddr(&addr), sizeof(addr)));
- MaybeSave(); // Successful connect.
-
- // Make and close another socketpair to ensure that the duped ends of the
- // first socketpair get closed.
- //
- // The problem is that there is no way to atomically send and close an FD.
- // The closest that we can do is send and then immediately close the FD,
- // which is what we do in the gofer. The gofer won't respond to another
- // request until the reply is sent and the FD is closed, so forcing the
- // gofer to handle another request will ensure that this has happened.
- for (int i = 0; i < 2; i++) {
- int sock;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock = socket(domain, type, protocol));
- RETURN_ERROR_IF_SYSCALL_FAIL(
- connect(sock, AsSockAddr(&addr), sizeof(addr)));
- RETURN_ERROR_IF_SYSCALL_FAIL(close(sock));
- }
-
- return absl::make_unique<FDSocketPair>(sock1, sock2);
- };
-}
-
-Creator<SocketPair> SocketpairGoferFileSocketPairCreator(int flags) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FDSocketPair>> {
- constexpr char kSocketGoferPath[] = "/socket";
-
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 =
- open(kSocketGoferPath, O_RDWR | flags));
- MaybeSave(); // Successful socket creation.
-
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 =
- open(kSocketGoferPath, O_RDWR | flags));
- MaybeSave(); // Successful socket creation.
-
- return absl::make_unique<FDSocketPair>(sock1, sock2);
- };
-}
-
-Creator<SocketPair> UnboundSocketPairCreator(bool abstract, int domain,
- int type, int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> {
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un addr1,
- UniqueUnixAddr(abstract, domain));
- ASSIGN_OR_RETURN_ERRNO(struct sockaddr_un addr2,
- UniqueUnixAddr(abstract, domain));
-
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
- return absl::make_unique<AddrFDSocketPair>(sock1, sock2, addr1, addr2);
- };
-}
-
-Creator<SocketPair> FilesystemUnboundSocketPairCreator(int domain, int type,
- int protocol) {
- return UnboundSocketPairCreator(/* abstract= */ false, domain, type,
- protocol);
-}
-
-Creator<SocketPair> AbstractUnboundSocketPairCreator(int domain, int type,
- int protocol) {
- return UnboundSocketPairCreator(/* abstract= */ true, domain, type, protocol);
-}
-
-void LocalhostAddr(struct sockaddr_in* addr, bool dual_stack) {
- addr->sin_family = AF_INET;
- addr->sin_port = htons(0);
- inet_pton(AF_INET, "127.0.0.1",
- reinterpret_cast<void*>(&addr->sin_addr.s_addr));
-}
-
-void LocalhostAddr(struct sockaddr_in6* addr, bool dual_stack) {
- addr->sin6_family = AF_INET6;
- addr->sin6_port = htons(0);
- if (dual_stack) {
- inet_pton(AF_INET6, "::ffff:127.0.0.1",
- reinterpret_cast<void*>(&addr->sin6_addr.s6_addr));
- } else {
- inet_pton(AF_INET6, "::1",
- reinterpret_cast<void*>(&addr->sin6_addr.s6_addr));
- }
- addr->sin6_scope_id = 0;
-}
-
-template <typename T>
-PosixErrorOr<T> BindIP(int fd, bool dual_stack) {
- T addr = {};
- LocalhostAddr(&addr, dual_stack);
- RETURN_ERROR_IF_SYSCALL_FAIL(bind(fd, AsSockAddr(&addr), sizeof(addr)));
- socklen_t addrlen = sizeof(addr);
- RETURN_ERROR_IF_SYSCALL_FAIL(getsockname(fd, AsSockAddr(&addr), &addrlen));
- return addr;
-}
-
-template <typename T>
-PosixErrorOr<T> TCPBindAndListen(int fd, bool dual_stack) {
- ASSIGN_OR_RETURN_ERRNO(T addr, BindIP<T>(fd, dual_stack));
- RETURN_ERROR_IF_SYSCALL_FAIL(
- listen(fd, /* backlog = */ 5)); // NOLINT(bugprone-argument-comment)
- return addr;
-}
-
-template <typename T>
-PosixErrorOr<std::unique_ptr<AddrFDSocketPair>>
-CreateTCPConnectAcceptSocketPair(int bound, int connected, int type,
- bool dual_stack, T bind_addr) {
- int connect_result = 0;
- RETURN_ERROR_IF_SYSCALL_FAIL(
- (connect_result = RetryEINTR(connect)(connected, AsSockAddr(&bind_addr),
- sizeof(bind_addr))) == -1 &&
- errno == EINPROGRESS
- ? 0
- : connect_result);
- MaybeSave(); // Successful connect.
-
- if (connect_result == -1) {
- struct pollfd connect_poll = {connected, POLLOUT | POLLERR | POLLHUP, 0};
- RETURN_ERROR_IF_SYSCALL_FAIL(RetryEINTR(poll)(&connect_poll, 1, 0));
- int error = 0;
- socklen_t errorlen = sizeof(error);
- RETURN_ERROR_IF_SYSCALL_FAIL(
- getsockopt(connected, SOL_SOCKET, SO_ERROR, &error, &errorlen));
- errno = error;
- RETURN_ERROR_IF_SYSCALL_FAIL(
- /* connect */ error == 0 ? 0 : -1);
- }
-
- int accepted = -1;
- struct pollfd accept_poll = {bound, POLLIN, 0};
- while (accepted == -1) {
- RETURN_ERROR_IF_SYSCALL_FAIL(RetryEINTR(poll)(&accept_poll, 1, 0));
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- (accepted = RetryEINTR(accept4)(
- bound, nullptr, nullptr, type & (SOCK_NONBLOCK | SOCK_CLOEXEC))) ==
- -1 &&
- errno == EAGAIN
- ? 0
- : accepted);
- }
- MaybeSave(); // Successful accept.
-
- T extra_addr = {};
- LocalhostAddr(&extra_addr, dual_stack);
- return absl::make_unique<AddrFDSocketPair>(connected, accepted, bind_addr,
- extra_addr);
-}
-
-template <typename T>
-PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> CreateTCPAcceptBindSocketPair(
- int bound, int connected, int type, bool dual_stack) {
- ASSIGN_OR_RETURN_ERRNO(T bind_addr, TCPBindAndListen<T>(bound, dual_stack));
-
- auto result = CreateTCPConnectAcceptSocketPair(bound, connected, type,
- dual_stack, bind_addr);
-
- // Cleanup no longer needed resources.
- RETURN_ERROR_IF_SYSCALL_FAIL(close(bound));
- MaybeSave(); // Successful close.
-
- return result;
-}
-
-Creator<SocketPair> TCPAcceptBindSocketPairCreator(int domain, int type,
- int protocol,
- bool dual_stack) {
- return [=]() -> PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> {
- int bound;
- RETURN_ERROR_IF_SYSCALL_FAIL(bound = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- int connected;
- RETURN_ERROR_IF_SYSCALL_FAIL(connected = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- if (domain == AF_INET) {
- return CreateTCPAcceptBindSocketPair<sockaddr_in>(bound, connected, type,
- dual_stack);
- }
- return CreateTCPAcceptBindSocketPair<sockaddr_in6>(bound, connected, type,
- dual_stack);
- };
-}
-
-Creator<SocketPair> TCPAcceptBindPersistentListenerSocketPairCreator(
- int domain, int type, int protocol, bool dual_stack) {
- // These are lazily initialized below, on the first call to the returned
- // lambda. These values are private to each returned lambda, but shared across
- // invocations of a specific lambda.
- //
- // The sharing allows pairs created with the same parameters to share a
- // listener. This prevents future connects from failing if the connecting
- // socket selects a port which had previously been used by a listening socket
- // that still has some connections in TIME-WAIT.
- //
- // The lazy initialization is to avoid creating sockets during parameter
- // enumeration. This is important because parameters are enumerated during the
- // build process where networking may not be available.
- auto listener = std::make_shared<absl::optional<int>>(absl::optional<int>());
- auto addr4 = std::make_shared<absl::optional<sockaddr_in>>(
- absl::optional<sockaddr_in>());
- auto addr6 = std::make_shared<absl::optional<sockaddr_in6>>(
- absl::optional<sockaddr_in6>());
-
- return [=]() -> PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> {
- int connected;
- RETURN_ERROR_IF_SYSCALL_FAIL(connected = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- // Share the listener across invocations.
- if (!listener->has_value()) {
- int fd = socket(domain, type, protocol);
- if (fd < 0) {
- return PosixError(errno, absl::StrCat("socket(", domain, ", ", type,
- ", ", protocol, ")"));
- }
- listener->emplace(fd);
- MaybeSave(); // Successful socket creation.
- }
-
- // Bind the listener once, but create a new connect/accept pair each
- // time.
- if (domain == AF_INET) {
- if (!addr4->has_value()) {
- addr4->emplace(
- TCPBindAndListen<sockaddr_in>(listener->value(), dual_stack)
- .ValueOrDie());
- }
- return CreateTCPConnectAcceptSocketPair(listener->value(), connected,
- type, dual_stack, addr4->value());
- }
- if (!addr6->has_value()) {
- addr6->emplace(
- TCPBindAndListen<sockaddr_in6>(listener->value(), dual_stack)
- .ValueOrDie());
- }
- return CreateTCPConnectAcceptSocketPair(listener->value(), connected, type,
- dual_stack, addr6->value());
- };
-}
-
-template <typename T>
-PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> CreateUDPBoundSocketPair(
- int sock1, int sock2, int type, bool dual_stack) {
- ASSIGN_OR_RETURN_ERRNO(T addr1, BindIP<T>(sock1, dual_stack));
- ASSIGN_OR_RETURN_ERRNO(T addr2, BindIP<T>(sock2, dual_stack));
-
- return absl::make_unique<AddrFDSocketPair>(sock1, sock2, addr1, addr2);
-}
-
-template <typename T>
-PosixErrorOr<std::unique_ptr<AddrFDSocketPair>>
-CreateUDPBidirectionalBindSocketPair(int sock1, int sock2, int type,
- bool dual_stack) {
- ASSIGN_OR_RETURN_ERRNO(
- auto socks, CreateUDPBoundSocketPair<T>(sock1, sock2, type, dual_stack));
-
- // Connect sock1 to sock2.
- RETURN_ERROR_IF_SYSCALL_FAIL(connect(socks->first_fd(), socks->second_addr(),
- socks->second_addr_size()));
- MaybeSave(); // Successful connection.
-
- // Connect sock2 to sock1.
- RETURN_ERROR_IF_SYSCALL_FAIL(connect(socks->second_fd(), socks->first_addr(),
- socks->first_addr_size()));
- MaybeSave(); // Successful connection.
-
- return socks;
-}
-
-Creator<SocketPair> UDPBidirectionalBindSocketPairCreator(int domain, int type,
- int protocol,
- bool dual_stack) {
- return [=]() -> PosixErrorOr<std::unique_ptr<AddrFDSocketPair>> {
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- if (domain == AF_INET) {
- return CreateUDPBidirectionalBindSocketPair<sockaddr_in>(
- sock1, sock2, type, dual_stack);
- }
- return CreateUDPBidirectionalBindSocketPair<sockaddr_in6>(sock1, sock2,
- type, dual_stack);
- };
-}
-
-Creator<SocketPair> UDPUnboundSocketPairCreator(int domain, int type,
- int protocol, bool dual_stack) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FDSocketPair>> {
- int sock1;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock1 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- int sock2;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock2 = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- return absl::make_unique<FDSocketPair>(sock1, sock2);
- };
-}
-
-SocketPairKind Reversed(SocketPairKind const& base) {
- auto const& creator = base.creator;
- return SocketPairKind{
- absl::StrCat("reversed ", base.description), base.domain, base.type,
- base.protocol,
- [creator]() -> PosixErrorOr<std::unique_ptr<ReversedSocketPair>> {
- ASSIGN_OR_RETURN_ERRNO(auto creator_value, creator());
- return absl::make_unique<ReversedSocketPair>(std::move(creator_value));
- }};
-}
-
-Creator<FileDescriptor> UnboundSocketCreator(int domain, int type,
- int protocol) {
- return [=]() -> PosixErrorOr<std::unique_ptr<FileDescriptor>> {
- int sock;
- RETURN_ERROR_IF_SYSCALL_FAIL(sock = socket(domain, type, protocol));
- MaybeSave(); // Successful socket creation.
-
- return absl::make_unique<FileDescriptor>(sock);
- };
-}
-
-std::vector<SocketPairKind> IncludeReversals(std::vector<SocketPairKind> vec) {
- return ApplyVecToVec<SocketPairKind>(std::vector<Middleware>{NoOp, Reversed},
- vec);
-}
-
-SocketPairKind NoOp(SocketPairKind const& base) { return base; }
-
-void TransferTest(int fd1, int fd2) {
- char buf1[20];
- RandomizeBuffer(buf1, sizeof(buf1));
- ASSERT_THAT(WriteFd(fd1, buf1, sizeof(buf1)),
- SyscallSucceedsWithValue(sizeof(buf1)));
-
- char buf2[20];
- ASSERT_THAT(ReadFd(fd2, buf2, sizeof(buf2)),
- SyscallSucceedsWithValue(sizeof(buf2)));
-
- EXPECT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
-
- RandomizeBuffer(buf1, sizeof(buf1));
- ASSERT_THAT(WriteFd(fd2, buf1, sizeof(buf1)),
- SyscallSucceedsWithValue(sizeof(buf1)));
-
- ASSERT_THAT(ReadFd(fd1, buf2, sizeof(buf2)),
- SyscallSucceedsWithValue(sizeof(buf2)));
-
- EXPECT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
-}
-
-// Initializes the given buffer with random data.
-void RandomizeBuffer(char* ptr, size_t len) {
- uint32_t seed = time(nullptr);
- for (size_t i = 0; i < len; ++i) {
- ptr[i] = static_cast<char>(rand_r(&seed));
- }
-}
-
-size_t CalculateUnixSockAddrLen(const char* sun_path) {
- // Abstract addresses always return the full length.
- if (sun_path[0] == 0) {
- return sizeof(sockaddr_un);
- }
- // Filesystem addresses use the address length plus the 2 byte sun_family
- // and null terminator.
- return strlen(sun_path) + 3;
-}
-
-struct sockaddr_storage AddrFDSocketPair::to_storage(const sockaddr_un& addr) {
- struct sockaddr_storage addr_storage = {};
- memcpy(&addr_storage, &addr, sizeof(addr));
- return addr_storage;
-}
-
-struct sockaddr_storage AddrFDSocketPair::to_storage(const sockaddr_in& addr) {
- struct sockaddr_storage addr_storage = {};
- memcpy(&addr_storage, &addr, sizeof(addr));
- return addr_storage;
-}
-
-struct sockaddr_storage AddrFDSocketPair::to_storage(const sockaddr_in6& addr) {
- struct sockaddr_storage addr_storage = {};
- memcpy(&addr_storage, &addr, sizeof(addr));
- return addr_storage;
-}
-
-SocketKind SimpleSocket(int fam, int type, int proto) {
- return SocketKind{
- absl::StrCat("Family ", fam, ", type ", type, ", proto ", proto), fam,
- type, proto, SyscallSocketCreator(fam, type, proto)};
-}
-
-ssize_t SendLargeSendMsg(const std::unique_ptr<SocketPair>& sockets,
- size_t size, bool reader) {
- const int rfd = sockets->second_fd();
- ScopedThread t([rfd, size, reader] {
- if (!reader) {
- return;
- }
-
- // Potentially too many syscalls in the loop.
- const DisableSave ds;
-
- std::vector<char> buf(size);
- size_t total = 0;
-
- while (total < size) {
- int ret = read(rfd, buf.data(), buf.size());
- if (ret == -1 && errno == EAGAIN) {
- continue;
- }
- if (ret > 0) {
- total += ret;
- }
-
- // Assert to return on first failure.
- ASSERT_THAT(ret, SyscallSucceeds());
- }
- });
-
- std::vector<char> buf(size);
-
- struct iovec iov = {};
- iov.iov_base = buf.data();
- iov.iov_len = buf.size();
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- return RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0);
-}
-
-namespace internal {
-PosixErrorOr<int> TryPortAvailable(int port, AddressFamily family,
- SocketType type, bool reuse_addr) {
- if (port < 0) {
- return PosixError(EINVAL, "Invalid port");
- }
-
- // Both Ipv6 and Dualstack are AF_INET6.
- int sock_fam = (family == AddressFamily::kIpv4 ? AF_INET : AF_INET6);
- int sock_type = (type == SocketType::kTcp ? SOCK_STREAM : SOCK_DGRAM);
- ASSIGN_OR_RETURN_ERRNO(auto fd, Socket(sock_fam, sock_type, 0));
-
- if (reuse_addr) {
- int one = 1;
- RETURN_ERROR_IF_SYSCALL_FAIL(
- setsockopt(fd.get(), SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)));
- }
-
- // Try to bind.
- sockaddr_storage storage = {};
- int storage_size = 0;
- if (family == AddressFamily::kIpv4) {
- sockaddr_in* addr = reinterpret_cast<sockaddr_in*>(&storage);
- storage_size = sizeof(*addr);
- addr->sin_family = AF_INET;
- addr->sin_port = htons(port);
- addr->sin_addr.s_addr = htonl(INADDR_ANY);
- } else {
- sockaddr_in6* addr = reinterpret_cast<sockaddr_in6*>(&storage);
- storage_size = sizeof(*addr);
- addr->sin6_family = AF_INET6;
- addr->sin6_port = htons(port);
- if (family == AddressFamily::kDualStack) {
- inet_pton(AF_INET6, "::ffff:0.0.0.0",
- reinterpret_cast<void*>(&addr->sin6_addr.s6_addr));
- } else {
- addr->sin6_addr = in6addr_any;
- }
- }
-
- RETURN_ERROR_IF_SYSCALL_FAIL(
- bind(fd.get(), AsSockAddr(&storage), storage_size));
-
- // If the user specified 0 as the port, we will return the port that the
- // kernel gave us, otherwise we will validate that this socket bound to the
- // requested port.
- sockaddr_storage bound_storage = {};
- socklen_t bound_storage_size = sizeof(bound_storage);
- RETURN_ERROR_IF_SYSCALL_FAIL(
- getsockname(fd.get(), AsSockAddr(&bound_storage), &bound_storage_size));
-
- int available_port = -1;
- if (bound_storage.ss_family == AF_INET) {
- sockaddr_in* addr = reinterpret_cast<sockaddr_in*>(&bound_storage);
- available_port = ntohs(addr->sin_port);
- } else if (bound_storage.ss_family == AF_INET6) {
- sockaddr_in6* addr = reinterpret_cast<sockaddr_in6*>(&bound_storage);
- available_port = ntohs(addr->sin6_port);
- } else {
- return PosixError(EPROTOTYPE, "Getsockname returned invalid family");
- }
-
- // If we requested a specific port make sure our bound port is that port.
- if (port != 0 && available_port != port) {
- return PosixError(EINVAL,
- absl::StrCat("Bound port ", available_port,
- " was not equal to requested port ", port));
- }
-
- // If we're trying to do a TCP socket, let's also try to listen.
- if (type == SocketType::kTcp) {
- RETURN_ERROR_IF_SYSCALL_FAIL(listen(fd.get(), 1));
- }
-
- return available_port;
-}
-} // namespace internal
-
-PosixErrorOr<int> SendMsg(int sock, msghdr* msg, char buf[], int buf_size) {
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg->msg_iov = &iov;
- msg->msg_iovlen = 1;
-
- int ret;
- RETURN_ERROR_IF_SYSCALL_FAIL(ret = RetryEINTR(sendmsg)(sock, msg, 0));
- return ret;
-}
-
-PosixErrorOr<int> RecvTimeout(int sock, char buf[], int buf_size, int timeout) {
- fd_set rfd;
- struct timeval to = {.tv_sec = timeout, .tv_usec = 0};
- FD_ZERO(&rfd);
- FD_SET(sock, &rfd);
-
- int ret;
- RETURN_ERROR_IF_SYSCALL_FAIL(ret = select(1, &rfd, NULL, NULL, &to));
- RETURN_ERROR_IF_SYSCALL_FAIL(
- ret = RetryEINTR(recv)(sock, buf, buf_size, MSG_DONTWAIT));
- return ret;
-}
-
-PosixErrorOr<int> RecvMsgTimeout(int sock, struct msghdr* msg, int timeout) {
- fd_set rfd;
- struct timeval to = {.tv_sec = timeout, .tv_usec = 0};
- FD_ZERO(&rfd);
- FD_SET(sock, &rfd);
-
- int ret;
- RETURN_ERROR_IF_SYSCALL_FAIL(ret = select(1, &rfd, NULL, NULL, &to));
- RETURN_ERROR_IF_SYSCALL_FAIL(
- ret = RetryEINTR(recvmsg)(sock, msg, MSG_DONTWAIT));
- return ret;
-}
-
-void RecvNoData(int sock) {
- char data = 0;
- struct iovec iov;
- iov.iov_base = &data;
- iov.iov_len = 1;
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TestAddress TestAddress::WithPort(uint16_t port) const {
- TestAddress addr = *this;
- switch (addr.family()) {
- case AF_INET:
- reinterpret_cast<sockaddr_in*>(&addr.addr)->sin_port = htons(port);
- break;
- case AF_INET6:
- reinterpret_cast<sockaddr_in6*>(&addr.addr)->sin6_port = htons(port);
- break;
- }
- return addr;
-}
-
-namespace {
-
-TestAddress V4Addr(std::string description, in_addr_t addr) {
- TestAddress t(std::move(description));
- t.addr.ss_family = AF_INET;
- t.addr_len = sizeof(sockaddr_in);
- reinterpret_cast<sockaddr_in*>(&t.addr)->sin_addr.s_addr = addr;
- return t;
-}
-
-TestAddress V6Addr(std::string description, const in6_addr& addr) {
- TestAddress t(std::move(description));
- t.addr.ss_family = AF_INET6;
- t.addr_len = sizeof(sockaddr_in6);
- reinterpret_cast<sockaddr_in6*>(&t.addr)->sin6_addr = addr;
- return t;
-}
-
-} // namespace
-
-TestAddress V4AddrStr(std::string description, const char* addr) {
- in_addr_t s_addr;
- inet_pton(AF_INET, addr, &s_addr);
- return V4Addr(description, s_addr);
-}
-
-TestAddress V6AddrStr(std::string description, const char* addr) {
- struct in6_addr s_addr;
- inet_pton(AF_INET6, addr, &s_addr);
- return V6Addr(description, s_addr);
-}
-
-TestAddress V4Any() { return V4Addr("V4Any", htonl(INADDR_ANY)); }
-
-TestAddress V4Broadcast() {
- return V4Addr("V4Broadcast", htonl(INADDR_BROADCAST));
-}
-
-TestAddress V4Loopback() {
- return V4Addr("V4Loopback", htonl(INADDR_LOOPBACK));
-}
-
-TestAddress V4LoopbackSubnetBroadcast() {
- return V4AddrStr("V4LoopbackSubnetBroadcast", "127.255.255.255");
-}
-
-TestAddress V4MappedAny() { return V6AddrStr("V4MappedAny", "::ffff:0.0.0.0"); }
-
-TestAddress V4MappedLoopback() {
- return V6AddrStr("V4MappedLoopback", "::ffff:127.0.0.1");
-}
-
-TestAddress V4Multicast() {
- return V4Addr("V4Multicast", inet_addr(kMulticastAddress));
-}
-
-TestAddress V4MulticastAllHosts() {
- return V4Addr("V4MulticastAllHosts", htonl(INADDR_ALLHOSTS_GROUP));
-}
-
-TestAddress V6Any() { return V6Addr("V6Any", in6addr_any); }
-
-TestAddress V6Loopback() { return V6Addr("V6Loopback", in6addr_loopback); }
-
-TestAddress V6Multicast() { return V6AddrStr("V6Multicast", "ff05::1234"); }
-
-TestAddress V6MulticastInterfaceLocalAllNodes() {
- return V6AddrStr("V6MulticastInterfaceLocalAllNodes", "ff01::1");
-}
-
-TestAddress V6MulticastLinkLocalAllNodes() {
- return V6AddrStr("V6MulticastLinkLocalAllNodes", "ff02::1");
-}
-
-TestAddress V6MulticastLinkLocalAllRouters() {
- return V6AddrStr("V6MulticastLinkLocalAllRouters", "ff02::2");
-}
-
-// Checksum computes the internet checksum of a buffer.
-uint16_t Checksum(uint16_t* buf, ssize_t buf_size) {
- // Add up the 16-bit values in the buffer.
- uint32_t total = 0;
- for (unsigned int i = 0; i < buf_size; i += sizeof(*buf)) {
- total += *buf;
- buf++;
- }
-
- // If buf has an odd size, add the remaining byte.
- if (buf_size % 2) {
- total += *(reinterpret_cast<unsigned char*>(buf) - 1);
- }
-
- // This carries any bits past the lower 16 until everything fits in 16 bits.
- while (total >> 16) {
- uint16_t lower = total & 0xffff;
- uint16_t upper = total >> 16;
- total = lower + upper;
- }
-
- return ~total;
-}
-
-uint16_t IPChecksum(struct iphdr ip) {
- return Checksum(reinterpret_cast<uint16_t*>(&ip), sizeof(ip));
-}
-
-// The pseudo-header defined in RFC 768 for calculating the UDP checksum.
-struct udp_pseudo_hdr {
- uint32_t srcip;
- uint32_t destip;
- char zero;
- char protocol;
- uint16_t udplen;
-};
-
-uint16_t UDPChecksum(struct iphdr iphdr, struct udphdr udphdr,
- const char* payload, ssize_t payload_len) {
- struct udp_pseudo_hdr phdr = {};
- phdr.srcip = iphdr.saddr;
- phdr.destip = iphdr.daddr;
- phdr.zero = 0;
- phdr.protocol = IPPROTO_UDP;
- phdr.udplen = udphdr.len;
-
- ssize_t buf_size = sizeof(phdr) + sizeof(udphdr) + payload_len;
- char* buf = static_cast<char*>(malloc(buf_size));
- memcpy(buf, &phdr, sizeof(phdr));
- memcpy(buf + sizeof(phdr), &udphdr, sizeof(udphdr));
- memcpy(buf + sizeof(phdr) + sizeof(udphdr), payload, payload_len);
-
- uint16_t csum = Checksum(reinterpret_cast<uint16_t*>(buf), buf_size);
- free(buf);
- return csum;
-}
-
-uint16_t ICMPChecksum(struct icmphdr icmphdr, const char* payload,
- ssize_t payload_len) {
- ssize_t buf_size = sizeof(icmphdr) + payload_len;
- char* buf = static_cast<char*>(malloc(buf_size));
- memcpy(buf, &icmphdr, sizeof(icmphdr));
- memcpy(buf + sizeof(icmphdr), payload, payload_len);
-
- uint16_t csum = Checksum(reinterpret_cast<uint16_t*>(buf), buf_size);
- free(buf);
- return csum;
-}
-
-PosixErrorOr<uint16_t> AddrPort(int family, sockaddr_storage const& addr) {
- switch (family) {
- case AF_INET:
- return static_cast<uint16_t>(
- reinterpret_cast<sockaddr_in const*>(&addr)->sin_port);
- case AF_INET6:
- return static_cast<uint16_t>(
- reinterpret_cast<sockaddr_in6 const*>(&addr)->sin6_port);
- default:
- return PosixError(EINVAL,
- absl::StrCat("unknown socket family: ", family));
- }
-}
-
-PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16_t port) {
- switch (family) {
- case AF_INET:
- reinterpret_cast<sockaddr_in*>(addr)->sin_port = port;
- return NoError();
- case AF_INET6:
- reinterpret_cast<sockaddr_in6*>(addr)->sin6_port = port;
- return NoError();
- default:
- return PosixError(EINVAL,
- absl::StrCat("unknown socket family: ", family));
- }
-}
-
-void SetupTimeWaitClose(const TestAddress* listener,
- const TestAddress* connector, bool reuse,
- bool accept_close, sockaddr_storage* listen_addr,
- sockaddr_storage* conn_bound_addr) {
- // Create the listening socket.
- FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(listener->family(), SOCK_STREAM, IPPROTO_TCP));
- if (reuse) {
- ASSERT_THAT(setsockopt(listen_fd.get(), SOL_SOCKET, SO_REUSEADDR,
- &kSockOptOn, sizeof(kSockOptOn)),
- SyscallSucceeds());
- }
- ASSERT_THAT(
- bind(listen_fd.get(), AsSockAddr(listen_addr), listener->addr_len),
- SyscallSucceeds());
- ASSERT_THAT(listen(listen_fd.get(), SOMAXCONN), SyscallSucceeds());
-
- // Get the port bound by the listening socket.
- socklen_t addrlen = listener->addr_len;
- ASSERT_THAT(getsockname(listen_fd.get(), AsSockAddr(listen_addr), &addrlen),
- SyscallSucceeds());
-
- uint16_t const port =
- ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener->family(), *listen_addr));
-
- // Connect to the listening socket.
- FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(connector->family(), SOCK_STREAM, IPPROTO_TCP));
-
- // We disable saves after this point as a S/R causes the netstack seed
- // to be regenerated which changes what ports/ISN is picked for a given
- // tuple (src ip,src port, dst ip, dst port). This can cause the final
- // SYN to use a sequence number that looks like one from the current
- // connection in TIME_WAIT and will not be accepted causing the test
- // to timeout.
- //
- // TODO(gvisor.dev/issue/940): S/R portSeed/portHint
- DisableSave ds;
-
- sockaddr_storage conn_addr = connector->addr;
- ASSERT_NO_ERRNO(SetAddrPort(connector->family(), &conn_addr, port));
- ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
- connector->addr_len),
- SyscallSucceeds());
-
- // Accept the connection.
- auto accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr));
-
- // Get the address/port bound by the connecting socket.
- socklen_t conn_addrlen = connector->addr_len;
- ASSERT_THAT(
- getsockname(conn_fd.get(), AsSockAddr(conn_bound_addr), &conn_addrlen),
- SyscallSucceeds());
-
- FileDescriptor active_closefd, passive_closefd;
- if (accept_close) {
- active_closefd = std::move(accepted);
- passive_closefd = std::move(conn_fd);
- } else {
- active_closefd = std::move(conn_fd);
- passive_closefd = std::move(accepted);
- }
-
- // shutdown to trigger TIME_WAIT.
- ASSERT_THAT(shutdown(active_closefd.get(), SHUT_WR), SyscallSucceeds());
- {
- constexpr int kTimeout = 10000;
- pollfd pfd = {
- .fd = passive_closefd.get(),
- .events = POLLIN,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- ASSERT_EQ(pfd.revents, POLLIN);
- }
- ASSERT_THAT(shutdown(passive_closefd.get(), SHUT_WR), SyscallSucceeds());
- {
- constexpr int kTimeout = 10000;
- constexpr int16_t want_events = POLLHUP;
- pollfd pfd = {
- .fd = active_closefd.get(),
- .events = want_events,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- }
-
- // This sleep is needed to reduce flake to ensure that the passive-close
- // ensures the state transitions to CLOSE from LAST_ACK.
- absl::SleepFor(absl::Seconds(1));
-}
-
-constexpr char kRangeFile[] = "/proc/sys/net/ipv4/ip_local_port_range";
-
-PosixErrorOr<int> MaybeLimitEphemeralPorts() {
- int min = 0;
- int max = 1 << 16;
-
- // Read the ephemeral range from /proc.
- ASSIGN_OR_RETURN_ERRNO(std::string rangefile, GetContents(kRangeFile));
- const std::string err_msg =
- absl::StrFormat("%s has invalid content: %s", kRangeFile, rangefile);
- if (rangefile.back() != '\n') {
- return PosixError(EINVAL, err_msg);
- }
- rangefile.pop_back();
- std::vector<std::string> range =
- absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
- if (range.size() < 2 || !absl::SimpleAtoi(range.front(), &min) ||
- !absl::SimpleAtoi(range.back(), &max)) {
- return PosixError(EINVAL, err_msg);
- }
-
- // If we can open as writable, limit the range.
- if (!access(kRangeFile, W_OK)) {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd,
- Open(kRangeFile, O_WRONLY | O_TRUNC, 0));
- int newMax = min + 50;
- const std::string small_range = absl::StrFormat("%d %d", min, newMax);
- int n = write(fd.get(), small_range.c_str(), small_range.size());
- if (n < 0) {
- // Hostinet doesn't allow modifying the host port range. And if we're root
- // (as we are in some tests), access and open will succeed even if the
- // file mode is readonly.
- if (errno != EACCES) {
- return PosixError(
- errno,
- absl::StrFormat("write(%d [%s], \"%s\", %d)", fd.get(), kRangeFile,
- small_range.c_str(), small_range.size()));
- }
- } else {
- max = newMax;
- }
- }
- return max - min;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_test_util.h b/test/syscalls/linux/socket_test_util.h
deleted file mode 100644
index 0e2be63cc..000000000
--- a/test/syscalls/linux/socket_test_util.h
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_SOCKET_TEST_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_SOCKET_TEST_UTIL_H_
-
-#include <errno.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <netinet/udp.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <functional>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_format.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Wrapper for socket(2) that returns a FileDescriptor.
-inline PosixErrorOr<FileDescriptor> Socket(int family, int type, int protocol) {
- int fd = socket(family, type, protocol);
- MaybeSave();
- if (fd < 0) {
- return PosixError(
- errno, absl::StrFormat("socket(%d, %d, %d)", family, type, protocol));
- }
- return FileDescriptor(fd);
-}
-
-// Wrapper for accept(2) that returns a FileDescriptor.
-inline PosixErrorOr<FileDescriptor> Accept(int sockfd, sockaddr* addr,
- socklen_t* addrlen) {
- int fd = RetryEINTR(accept)(sockfd, addr, addrlen);
- MaybeSave();
- if (fd < 0) {
- return PosixError(
- errno, absl::StrFormat("accept(%d, %p, %p)", sockfd, addr, addrlen));
- }
- return FileDescriptor(fd);
-}
-
-// Wrapper for accept4(2) that returns a FileDescriptor.
-inline PosixErrorOr<FileDescriptor> Accept4(int sockfd, sockaddr* addr,
- socklen_t* addrlen, int flags) {
- int fd = RetryEINTR(accept4)(sockfd, addr, addrlen, flags);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, absl::StrFormat("accept4(%d, %p, %p, %#x)", sockfd,
- addr, addrlen, flags));
- }
- return FileDescriptor(fd);
-}
-
-inline ssize_t SendFd(int fd, void* buf, size_t count, int flags) {
- return internal::ApplyFileIoSyscall(
- [&](size_t completed) {
- return sendto(fd, static_cast<char*>(buf) + completed,
- count - completed, flags, nullptr, 0);
- },
- count);
-}
-
-PosixErrorOr<struct sockaddr_un> UniqueUnixAddr(bool abstract, int domain);
-
-// A Creator<T> is a function that attempts to create and return a new T. (This
-// is copy/pasted from cloud/gvisor/api/sandbox_util.h and is just duplicated
-// here for clarity.)
-template <typename T>
-using Creator = std::function<PosixErrorOr<std::unique_ptr<T>>()>;
-
-// A SocketPair represents a pair of socket file descriptors owned by the
-// SocketPair.
-class SocketPair {
- public:
- virtual ~SocketPair() = default;
-
- virtual int first_fd() const = 0;
- virtual int second_fd() const = 0;
- virtual int release_first_fd() = 0;
- virtual int release_second_fd() = 0;
- virtual const struct sockaddr* first_addr() const = 0;
- virtual const struct sockaddr* second_addr() const = 0;
- virtual size_t first_addr_size() const = 0;
- virtual size_t second_addr_size() const = 0;
- virtual size_t first_addr_len() const = 0;
- virtual size_t second_addr_len() const = 0;
-};
-
-// A FDSocketPair is a SocketPair that consists of only a pair of file
-// descriptors.
-class FDSocketPair : public SocketPair {
- public:
- FDSocketPair(int first_fd, int second_fd)
- : first_(first_fd), second_(second_fd) {}
- FDSocketPair(std::unique_ptr<FileDescriptor> first_fd,
- std::unique_ptr<FileDescriptor> second_fd)
- : first_(first_fd->release()), second_(second_fd->release()) {}
-
- int first_fd() const override { return first_.get(); }
- int second_fd() const override { return second_.get(); }
- int release_first_fd() override { return first_.release(); }
- int release_second_fd() override { return second_.release(); }
- const struct sockaddr* first_addr() const override { return nullptr; }
- const struct sockaddr* second_addr() const override { return nullptr; }
- size_t first_addr_size() const override { return 0; }
- size_t second_addr_size() const override { return 0; }
- size_t first_addr_len() const override { return 0; }
- size_t second_addr_len() const override { return 0; }
-
- private:
- FileDescriptor first_;
- FileDescriptor second_;
-};
-
-// CalculateUnixSockAddrLen calculates the length returned by recvfrom(2) and
-// recvmsg(2) for Unix sockets.
-size_t CalculateUnixSockAddrLen(const char* sun_path);
-
-// A AddrFDSocketPair is a SocketPair that consists of a pair of file
-// descriptors in addition to a pair of socket addresses.
-class AddrFDSocketPair : public SocketPair {
- public:
- AddrFDSocketPair(int first_fd, int second_fd,
- const struct sockaddr_un& first_address,
- const struct sockaddr_un& second_address)
- : first_(first_fd),
- second_(second_fd),
- first_addr_(to_storage(first_address)),
- second_addr_(to_storage(second_address)),
- first_len_(CalculateUnixSockAddrLen(first_address.sun_path)),
- second_len_(CalculateUnixSockAddrLen(second_address.sun_path)),
- first_size_(sizeof(first_address)),
- second_size_(sizeof(second_address)) {}
-
- AddrFDSocketPair(int first_fd, int second_fd,
- const struct sockaddr_in& first_address,
- const struct sockaddr_in& second_address)
- : first_(first_fd),
- second_(second_fd),
- first_addr_(to_storage(first_address)),
- second_addr_(to_storage(second_address)),
- first_len_(sizeof(first_address)),
- second_len_(sizeof(second_address)),
- first_size_(sizeof(first_address)),
- second_size_(sizeof(second_address)) {}
-
- AddrFDSocketPair(int first_fd, int second_fd,
- const struct sockaddr_in6& first_address,
- const struct sockaddr_in6& second_address)
- : first_(first_fd),
- second_(second_fd),
- first_addr_(to_storage(first_address)),
- second_addr_(to_storage(second_address)),
- first_len_(sizeof(first_address)),
- second_len_(sizeof(second_address)),
- first_size_(sizeof(first_address)),
- second_size_(sizeof(second_address)) {}
-
- int first_fd() const override { return first_.get(); }
- int second_fd() const override { return second_.get(); }
- int release_first_fd() override { return first_.release(); }
- int release_second_fd() override { return second_.release(); }
- const struct sockaddr* first_addr() const override {
- return reinterpret_cast<const struct sockaddr*>(&first_addr_);
- }
- const struct sockaddr* second_addr() const override {
- return reinterpret_cast<const struct sockaddr*>(&second_addr_);
- }
- size_t first_addr_size() const override { return first_size_; }
- size_t second_addr_size() const override { return second_size_; }
- size_t first_addr_len() const override { return first_len_; }
- size_t second_addr_len() const override { return second_len_; }
-
- private:
- // to_storage coverts a sockaddr_* to a sockaddr_storage.
- static struct sockaddr_storage to_storage(const sockaddr_un& addr);
- static struct sockaddr_storage to_storage(const sockaddr_in& addr);
- static struct sockaddr_storage to_storage(const sockaddr_in6& addr);
-
- FileDescriptor first_;
- FileDescriptor second_;
- const struct sockaddr_storage first_addr_;
- const struct sockaddr_storage second_addr_;
- const size_t first_len_;
- const size_t second_len_;
- const size_t first_size_;
- const size_t second_size_;
-};
-
-// SyscallSocketPairCreator returns a Creator<SocketPair> that obtains file
-// descriptors by invoking the socketpair() syscall.
-Creator<SocketPair> SyscallSocketPairCreator(int domain, int type,
- int protocol);
-
-// SyscallSocketCreator returns a Creator<FileDescriptor> that obtains a file
-// descriptor by invoking the socket() syscall.
-Creator<FileDescriptor> SyscallSocketCreator(int domain, int type,
- int protocol);
-
-// FilesystemBidirectionalBindSocketPairCreator returns a Creator<SocketPair>
-// that obtains file descriptors by invoking the bind() and connect() syscalls
-// on filesystem paths. Only works for DGRAM sockets.
-Creator<SocketPair> FilesystemBidirectionalBindSocketPairCreator(int domain,
- int type,
- int protocol);
-
-// AbstractBidirectionalBindSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by invoking the bind() and connect() syscalls on
-// abstract namespace paths. Only works for DGRAM sockets.
-Creator<SocketPair> AbstractBidirectionalBindSocketPairCreator(int domain,
- int type,
- int protocol);
-
-// SocketpairGoferSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by connect() syscalls on two sockets with socketpair
-// gofer paths.
-Creator<SocketPair> SocketpairGoferSocketPairCreator(int domain, int type,
- int protocol);
-
-// SocketpairGoferFileSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by open() syscalls on socketpair gofer paths.
-Creator<SocketPair> SocketpairGoferFileSocketPairCreator(int flags);
-
-// FilesystemAcceptBindSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by invoking the accept() and bind() syscalls on
-// a filesystem path. Only works for STREAM and SEQPACKET sockets.
-Creator<SocketPair> FilesystemAcceptBindSocketPairCreator(int domain, int type,
- int protocol);
-
-// AbstractAcceptBindSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by invoking the accept() and bind() syscalls on a
-// abstract namespace path. Only works for STREAM and SEQPACKET sockets.
-Creator<SocketPair> AbstractAcceptBindSocketPairCreator(int domain, int type,
- int protocol);
-
-// FilesystemUnboundSocketPairCreator returns a Creator<SocketPair> that obtains
-// file descriptors by invoking the socket() syscall and generates a filesystem
-// path for binding.
-Creator<SocketPair> FilesystemUnboundSocketPairCreator(int domain, int type,
- int protocol);
-
-// AbstractUnboundSocketPairCreator returns a Creator<SocketPair> that obtains
-// file descriptors by invoking the socket() syscall and generates an abstract
-// path for binding.
-Creator<SocketPair> AbstractUnboundSocketPairCreator(int domain, int type,
- int protocol);
-
-// TCPAcceptBindSocketPairCreator returns a Creator<SocketPair> that obtains
-// file descriptors by invoking the accept() and bind() syscalls on TCP sockets.
-Creator<SocketPair> TCPAcceptBindSocketPairCreator(int domain, int type,
- int protocol,
- bool dual_stack);
-
-// TCPAcceptBindPersistentListenerSocketPairCreator is like
-// TCPAcceptBindSocketPairCreator, except it uses the same listening socket to
-// create all SocketPairs.
-Creator<SocketPair> TCPAcceptBindPersistentListenerSocketPairCreator(
- int domain, int type, int protocol, bool dual_stack);
-
-// UDPBidirectionalBindSocketPairCreator returns a Creator<SocketPair> that
-// obtains file descriptors by invoking the bind() and connect() syscalls on UDP
-// sockets.
-Creator<SocketPair> UDPBidirectionalBindSocketPairCreator(int domain, int type,
- int protocol,
- bool dual_stack);
-
-// UDPUnboundSocketPairCreator returns a Creator<SocketPair> that obtains file
-// descriptors by creating UDP sockets.
-Creator<SocketPair> UDPUnboundSocketPairCreator(int domain, int type,
- int protocol, bool dual_stack);
-
-// UnboundSocketCreator returns a Creator<FileDescriptor> that obtains a file
-// descriptor by creating a socket.
-Creator<FileDescriptor> UnboundSocketCreator(int domain, int type,
- int protocol);
-
-// A SocketPairKind couples a human-readable description of a socket pair with
-// a function that creates such a socket pair.
-struct SocketPairKind {
- std::string description;
- int domain;
- int type;
- int protocol;
- Creator<SocketPair> creator;
-
- // Create creates a socket pair of this kind.
- PosixErrorOr<std::unique_ptr<SocketPair>> Create() const { return creator(); }
-};
-
-// A SocketKind couples a human-readable description of a socket with
-// a function that creates such a socket.
-struct SocketKind {
- std::string description;
- int domain;
- int type;
- int protocol;
- Creator<FileDescriptor> creator;
-
- // Create creates a socket pair of this kind.
- PosixErrorOr<std::unique_ptr<FileDescriptor>> Create() const {
- return creator();
- }
-};
-
-// A ReversedSocketPair wraps another SocketPair but flips the first and second
-// file descriptors. ReversedSocketPair is used to test socket pairs that
-// should be symmetric.
-class ReversedSocketPair : public SocketPair {
- public:
- explicit ReversedSocketPair(std::unique_ptr<SocketPair> base)
- : base_(std::move(base)) {}
-
- int first_fd() const override { return base_->second_fd(); }
- int second_fd() const override { return base_->first_fd(); }
- int release_first_fd() override { return base_->release_second_fd(); }
- int release_second_fd() override { return base_->release_first_fd(); }
- const struct sockaddr* first_addr() const override {
- return base_->second_addr();
- }
- const struct sockaddr* second_addr() const override {
- return base_->first_addr();
- }
- size_t first_addr_size() const override { return base_->second_addr_size(); }
- size_t second_addr_size() const override { return base_->first_addr_size(); }
- size_t first_addr_len() const override { return base_->second_addr_len(); }
- size_t second_addr_len() const override { return base_->first_addr_len(); }
-
- private:
- std::unique_ptr<SocketPair> base_;
-};
-
-// Reversed returns a SocketPairKind that represents SocketPairs created by
-// flipping the file descriptors provided by another SocketPair.
-SocketPairKind Reversed(SocketPairKind const& base);
-
-// IncludeReversals returns a vector<SocketPairKind> that returns all
-// SocketPairKinds in `vec` as well as all SocketPairKinds obtained by flipping
-// the file descriptors provided by the kinds in `vec`.
-std::vector<SocketPairKind> IncludeReversals(std::vector<SocketPairKind> vec);
-
-// A Middleware is a function wraps a SocketPairKind.
-using Middleware = std::function<SocketPairKind(SocketPairKind)>;
-
-// Reversed returns a SocketPairKind that represents SocketPairs created by
-// flipping the file descriptors provided by another SocketPair.
-template <typename T>
-Middleware SetSockOpt(int level, int optname, T* value) {
- return [=](SocketPairKind const& base) {
- auto const& creator = base.creator;
- return SocketPairKind{
- absl::StrCat("setsockopt(", level, ", ", optname, ", ", *value, ") ",
- base.description),
- base.domain, base.type, base.protocol,
- [creator, level, optname,
- value]() -> PosixErrorOr<std::unique_ptr<SocketPair>> {
- ASSIGN_OR_RETURN_ERRNO(auto creator_value, creator());
- if (creator_value->first_fd() >= 0) {
- RETURN_ERROR_IF_SYSCALL_FAIL(setsockopt(
- creator_value->first_fd(), level, optname, value, sizeof(T)));
- }
- if (creator_value->second_fd() >= 0) {
- RETURN_ERROR_IF_SYSCALL_FAIL(setsockopt(
- creator_value->second_fd(), level, optname, value, sizeof(T)));
- }
- return creator_value;
- }};
- };
-}
-
-constexpr int kSockOptOn = 1;
-constexpr int kSockOptOff = 0;
-
-// NoOp returns the same SocketPairKind that it is passed.
-SocketPairKind NoOp(SocketPairKind const& base);
-
-// TransferTest tests that data can be send back and fourth between two
-// specified FDs. Note that calls to this function should be wrapped in
-// ASSERT_NO_FATAL_FAILURE().
-void TransferTest(int fd1, int fd2);
-
-// Fills [buf, buf+len) with random bytes.
-void RandomizeBuffer(char* buf, size_t len);
-
-// Base test fixture for tests that operate on pairs of connected sockets.
-class SocketPairTest : public ::testing::TestWithParam<SocketPairKind> {
- protected:
- SocketPairTest() {
- // gUnit uses printf, so so will we.
- printf("Testing with %s\n", GetParam().description.c_str());
- fflush(stdout);
- }
-
- PosixErrorOr<std::unique_ptr<SocketPair>> NewSocketPair() const {
- return GetParam().Create();
- }
-};
-
-// Base test fixture for tests that operate on simple Sockets.
-class SimpleSocketTest : public ::testing::TestWithParam<SocketKind> {
- protected:
- SimpleSocketTest() {
- // gUnit uses printf, so so will we.
- printf("Testing with %s\n", GetParam().description.c_str());
- }
-
- PosixErrorOr<std::unique_ptr<FileDescriptor>> NewSocket() const {
- return GetParam().Create();
- }
-};
-
-SocketKind SimpleSocket(int fam, int type, int proto);
-
-// Send a buffer of size 'size' to sockets->first_fd(), returning the result of
-// sendmsg.
-//
-// If reader, read from second_fd() until size bytes have been read.
-ssize_t SendLargeSendMsg(const std::unique_ptr<SocketPair>& sockets,
- size_t size, bool reader);
-
-// Initializes the given buffer with random data.
-void RandomizeBuffer(char* ptr, size_t len);
-
-enum class AddressFamily { kIpv4 = 1, kIpv6 = 2, kDualStack = 3 };
-enum class SocketType { kUdp = 1, kTcp = 2 };
-
-// Returns a PosixError or a port that is available. If 0 is specified as the
-// port it will bind port 0 (and allow the kernel to select any free port).
-// Otherwise, it will try to bind the specified port and validate that it can be
-// used for the requested family and socket type. The final option is
-// reuse_addr. This specifies whether SO_REUSEADDR should be applied before a
-// bind(2) attempt. SO_REUSEADDR means that sockets in TIME_WAIT states or other
-// bound UDP sockets would not cause an error on bind(2). This option should be
-// set if subsequent calls to bind on the returned port will also use
-// SO_REUSEADDR.
-//
-// Note: That this test will attempt to bind the ANY address for the respective
-// protocol.
-PosixErrorOr<int> PortAvailable(int port, AddressFamily family, SocketType type,
- bool reuse_addr);
-
-// FreeAvailablePort is used to return a port that was obtained by using
-// the PortAvailable helper with port 0.
-PosixError FreeAvailablePort(int port);
-
-// SendMsg converts a buffer to an iovec and adds it to msg before sending it.
-PosixErrorOr<int> SendMsg(int sock, msghdr* msg, char buf[], int buf_size);
-
-// RecvTimeout calls select on sock with timeout and then calls recv on sock.
-PosixErrorOr<int> RecvTimeout(int sock, char buf[], int buf_size, int timeout);
-
-// RecvMsgTimeout calls select on sock with timeout and then calls recvmsg on
-// sock.
-PosixErrorOr<int> RecvMsgTimeout(int sock, msghdr* msg, int timeout);
-
-// RecvNoData checks that no data is receivable on sock.
-void RecvNoData(int sock);
-
-// Base test fixture for tests that apply to all kinds of pairs of connected
-// sockets.
-using AllSocketPairTest = SocketPairTest;
-
-struct TestAddress {
- std::string description;
- sockaddr_storage addr;
- socklen_t addr_len;
-
- explicit TestAddress(std::string description = "")
- : description(std::move(description)), addr(), addr_len() {}
-
- int family() const { return addr.ss_family; }
-
- // Returns a new TestAddress with specified port. If port is not supported,
- // the same TestAddress is returned.
- TestAddress WithPort(uint16_t port) const;
-};
-
-constexpr char kMulticastAddress[] = "224.0.2.1";
-constexpr char kBroadcastAddress[] = "255.255.255.255";
-
-// Returns a TestAddress with `addr` parsed as an IPv4 address described by
-// `description`.
-TestAddress V4AddrStr(std::string description, const char* addr);
-// Returns a TestAddress with `addr` parsed as an IPv6 address described by
-// `description`.
-TestAddress V6AddrStr(std::string description, const char* addr);
-
-// Returns a TestAddress for the IPv4 any address.
-TestAddress V4Any();
-// Returns a TestAddress for the IPv4 limited broadcast address.
-TestAddress V4Broadcast();
-// Returns a TestAddress for the IPv4 loopback address.
-TestAddress V4Loopback();
-// Returns a TestAddress for the subnet broadcast of the IPv4 loopback address.
-TestAddress V4LoopbackSubnetBroadcast();
-// Returns a TestAddress for the IPv4-mapped IPv6 any address.
-TestAddress V4MappedAny();
-// Returns a TestAddress for the IPv4-mapped IPv6 loopback address.
-TestAddress V4MappedLoopback();
-// Returns a TestAddress for a IPv4 multicast address.
-TestAddress V4Multicast();
-// Returns a TestAddress for the IPv4 all-hosts multicast group address.
-TestAddress V4MulticastAllHosts();
-
-// Returns a TestAddress for the IPv6 any address.
-TestAddress V6Any();
-// Returns a TestAddress for the IPv6 loopback address.
-TestAddress V6Loopback();
-// Returns a TestAddress for a IPv6 multicast address.
-TestAddress V6Multicast();
-// Returns a TestAddress for the IPv6 interface-local all-nodes multicast group
-// address.
-TestAddress V6MulticastInterfaceLocalAllNodes();
-// Returns a TestAddress for the IPv6 link-local all-nodes multicast group
-// address.
-TestAddress V6MulticastLinkLocalAllNodes();
-// Returns a TestAddress for the IPv6 link-local all-routers multicast group
-// address.
-TestAddress V6MulticastLinkLocalAllRouters();
-
-// Compute the internet checksum of an IP header.
-uint16_t IPChecksum(struct iphdr ip);
-
-// Compute the internet checksum of a UDP header.
-uint16_t UDPChecksum(struct iphdr iphdr, struct udphdr udphdr,
- const char* payload, ssize_t payload_len);
-
-// Compute the internet checksum of an ICMP header.
-uint16_t ICMPChecksum(struct icmphdr icmphdr, const char* payload,
- ssize_t payload_len);
-
-// Convenient functions for reinterpreting common types to sockaddr pointer.
-inline sockaddr* AsSockAddr(sockaddr_storage* s) {
- return reinterpret_cast<sockaddr*>(s);
-}
-inline sockaddr* AsSockAddr(sockaddr_in* s) {
- return reinterpret_cast<sockaddr*>(s);
-}
-inline sockaddr* AsSockAddr(sockaddr_in6* s) {
- return reinterpret_cast<sockaddr*>(s);
-}
-inline sockaddr* AsSockAddr(sockaddr_un* s) {
- return reinterpret_cast<sockaddr*>(s);
-}
-
-PosixErrorOr<uint16_t> AddrPort(int family, sockaddr_storage const& addr);
-
-PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16_t port);
-
-// setupTimeWaitClose sets up a socket endpoint in TIME_WAIT state.
-// Callers can choose to perform active close on either ends of the connection
-// and also specify if they want to enabled SO_REUSEADDR.
-void SetupTimeWaitClose(const TestAddress* listener,
- const TestAddress* connector, bool reuse,
- bool accept_close, sockaddr_storage* listen_addr,
- sockaddr_storage* conn_bound_addr);
-
-// MaybeLimitEphemeralPorts attempts to reduce the number of ephemeral ports and
-// returns the number of ephemeral ports.
-PosixErrorOr<int> MaybeLimitEphemeralPorts();
-
-namespace internal {
-PosixErrorOr<int> TryPortAvailable(int port, AddressFamily family,
- SocketType type, bool reuse_addr);
-} // namespace internal
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_SOCKET_TEST_UTIL_H_
diff --git a/test/syscalls/linux/socket_test_util_impl.cc b/test/syscalls/linux/socket_test_util_impl.cc
deleted file mode 100644
index ef661a0e3..000000000
--- a/test/syscalls/linux/socket_test_util_impl.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<int> PortAvailable(int port, AddressFamily family, SocketType type,
- bool reuse_addr) {
- return internal::TryPortAvailable(port, family, type, reuse_addr);
-}
-
-PosixError FreeAvailablePort(int port) { return NoError(); }
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix.cc b/test/syscalls/linux/socket_unix.cc
deleted file mode 100644
index 591cab3fd..000000000
--- a/test/syscalls/linux/socket_unix.cc
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_unix.h"
-
-#include <errno.h>
-#include <net/if.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-// This file contains tests specific to Unix domain sockets. It does not contain
-// tests for UDS control messages. Those belong in socket_unix_cmsg.cc.
-//
-// This file is a generic socket test file. It must be built with another file
-// that provides the test types.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(UnixSocketPairTest, InvalidGetSockOpt) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int opt;
- socklen_t optlen = sizeof(opt);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, -1, &opt, &optlen),
- SyscallFailsWithErrno(ENOPROTOOPT));
-}
-
-TEST_P(UnixSocketPairTest, BindToBadName) {
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- constexpr char kBadName[] = "/some/path/that/does/not/exist";
- sockaddr_un sockaddr;
- sockaddr.sun_family = AF_LOCAL;
- memcpy(sockaddr.sun_path, kBadName, sizeof(kBadName));
-
- EXPECT_THAT(
- bind(pair->first_fd(), reinterpret_cast<struct sockaddr*>(&sockaddr),
- sizeof(sockaddr)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_P(UnixSocketPairTest, BindToBadFamily) {
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- constexpr char kBadName[] = "/some/path/that/does/not/exist";
- sockaddr_un sockaddr;
- sockaddr.sun_family = AF_INET;
- memcpy(sockaddr.sun_path, kBadName, sizeof(kBadName));
-
- EXPECT_THAT(
- bind(pair->first_fd(), reinterpret_cast<struct sockaddr*>(&sockaddr),
- sizeof(sockaddr)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UnixSocketPairTest, RecvmmsgTimeoutAfterRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[10];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- char received_data[sizeof(sent_data) * 2];
- std::vector<struct mmsghdr> msgs(2);
- std::vector<struct iovec> iovs(msgs.size());
- const int chunk_size = sizeof(received_data) / msgs.size();
- for (size_t i = 0; i < msgs.size(); i++) {
- iovs[i].iov_len = chunk_size;
- iovs[i].iov_base = &received_data[i * chunk_size];
- msgs[i].msg_hdr.msg_iov = &iovs[i];
- msgs[i].msg_hdr.msg_iovlen = 1;
- }
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- struct timespec timeout = {0, 1};
- ASSERT_THAT(RetryEINTR(recvmmsg)(sockets->second_fd(), &msgs[0], msgs.size(),
- 0, &timeout),
- SyscallSucceedsWithValue(1));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- EXPECT_EQ(chunk_size, msgs[0].msg_len);
-}
-
-TEST_P(UnixSocketPairTest, TIOCINQSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- if (IsRunningOnGvisor()) {
- // TODO(gvisor.dev/issue/273): Inherited host UDS don't support TIOCINQ.
- // Skip the test.
- int size = -1;
- int ret = ioctl(sockets->first_fd(), TIOCINQ, &size);
- SKIP_IF(ret == -1 && errno == ENOTTY);
- }
-
- int size = -1;
- EXPECT_THAT(ioctl(sockets->first_fd(), TIOCINQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, 0);
-
- const char some_data[] = "dangerzone";
- ASSERT_THAT(
- RetryEINTR(send)(sockets->second_fd(), &some_data, sizeof(some_data), 0),
- SyscallSucceeds());
- EXPECT_THAT(ioctl(sockets->first_fd(), TIOCINQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, sizeof(some_data));
-
- // Linux only reports the first message's size, which is wrong. We test for
- // the behavior described in the man page.
- SKIP_IF(!IsRunningOnGvisor());
-
- ASSERT_THAT(
- RetryEINTR(send)(sockets->second_fd(), &some_data, sizeof(some_data), 0),
- SyscallSucceeds());
- EXPECT_THAT(ioctl(sockets->first_fd(), TIOCINQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, sizeof(some_data) * 2);
-}
-
-TEST_P(UnixSocketPairTest, TIOCOUTQSucceeds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- if (IsRunningOnGvisor()) {
- // TODO(gvisor.dev/issue/273): Inherited host UDS don't support TIOCOUTQ.
- // Skip the test.
- int size = -1;
- int ret = ioctl(sockets->second_fd(), TIOCOUTQ, &size);
- SKIP_IF(ret == -1 && errno == ENOTTY);
- }
-
- int size = -1;
- EXPECT_THAT(ioctl(sockets->second_fd(), TIOCOUTQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, 0);
-
- // Linux reports bogus numbers which are related to its internal allocations.
- // We test for the behavior described in the man page.
- SKIP_IF(!IsRunningOnGvisor());
-
- const char some_data[] = "dangerzone";
- ASSERT_THAT(
- RetryEINTR(send)(sockets->second_fd(), &some_data, sizeof(some_data), 0),
- SyscallSucceeds());
- EXPECT_THAT(ioctl(sockets->second_fd(), TIOCOUTQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, sizeof(some_data));
-
- ASSERT_THAT(
- RetryEINTR(send)(sockets->second_fd(), &some_data, sizeof(some_data), 0),
- SyscallSucceeds());
- EXPECT_THAT(ioctl(sockets->second_fd(), TIOCOUTQ, &size), SyscallSucceeds());
- EXPECT_EQ(size, sizeof(some_data) * 2);
-}
-
-TEST_P(UnixSocketPairTest, NetdeviceIoctlsSucceed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Prepare the request.
- struct ifreq ifr;
- snprintf(ifr.ifr_name, IFNAMSIZ, "lo");
-
- // Check that the ioctl either succeeds or fails with ENODEV.
- int err = ioctl(sockets->first_fd(), SIOCGIFINDEX, &ifr);
- if (err < 0) {
- ASSERT_EQ(errno, ENODEV);
- }
-}
-
-TEST_P(UnixSocketPairTest, Shutdown) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- const std::string data = "abc";
- ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
-
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds());
- ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RDWR), SyscallSucceeds());
-
- // Shutting down a socket does not clear the buffer.
- char buf[3];
- ASSERT_THAT(ReadFd(sockets->second_fd(), buf, data.size()),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_EQ(data, absl::string_view(buf, data.size()));
-}
-
-TEST_P(UnixSocketPairTest, ShutdownRead) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RD), SyscallSucceeds());
-
- // When the socket is shutdown for read, read behavior varies between
- // different socket types. This is covered by the various ReadOneSideClosed
- // test cases.
-
- // ... and the peer cannot write.
- const std::string data = "abc";
- EXPECT_THAT(WriteFd(sockets->second_fd(), data.c_str(), data.size()),
- SyscallFailsWithErrno(EPIPE));
-
- // ... but the socket can still write.
- ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
-
- // ... and the peer can still read.
- char buf[3];
- EXPECT_THAT(ReadFd(sockets->second_fd(), buf, data.size()),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_EQ(data, absl::string_view(buf, data.size()));
-}
-
-TEST_P(UnixSocketPairTest, ShutdownWrite) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_WR), SyscallSucceeds());
-
- // When the socket is shutdown for write, it cannot write.
- const std::string data = "abc";
- EXPECT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()),
- SyscallFailsWithErrno(EPIPE));
-
- // ... and the peer read behavior varies between different socket types. This
- // is covered by the various ReadOneSideClosed test cases.
-
- // ... but the peer can still write.
- char buf[3];
- ASSERT_THAT(WriteFd(sockets->second_fd(), data.c_str(), data.size()),
- SyscallSucceedsWithValue(data.size()));
-
- // ... and the socket can still read.
- EXPECT_THAT(ReadFd(sockets->first_fd(), buf, data.size()),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_EQ(data, absl::string_view(buf, data.size()));
-}
-
-TEST_P(UnixSocketPairTest, SocketReopenFromProcfs) {
- // TODO(gvisor.dev/issue/1624): In VFS1, we return EIO instead of ENXIO (see
- // b/122310852). Remove this skip once VFS1 is deleted.
- SKIP_IF(IsRunningWithVFS1());
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Opening a socket pair via /proc/self/fd/X is a ENXIO.
- for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {
- ASSERT_THAT(Open(absl::StrCat("/proc/self/fd/", fd), O_WRONLY),
- PosixErrorIs(ENXIO, ::testing::_));
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix.h b/test/syscalls/linux/socket_unix.h
deleted file mode 100644
index 3625cc404..000000000
--- a/test/syscalls/linux/socket_unix.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected unix sockets.
-using UnixSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_H_
diff --git a/test/syscalls/linux/socket_unix_abstract_nonblock.cc b/test/syscalls/linux/socket_unix_abstract_nonblock.cc
deleted file mode 100644
index 8bef76b67..000000000
--- a/test/syscalls/linux/socket_unix_abstract_nonblock.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingAbstractUnixSockets, NonBlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_blocking_local.cc b/test/syscalls/linux/socket_unix_blocking_local.cc
deleted file mode 100644
index 77cb8c6d6..000000000
--- a/test/syscalls/linux/socket_unix_blocking_local.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- std::vector<int>{SOCK_STREAM, SOCK_SEQPACKET, SOCK_DGRAM}),
- ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- std::vector<int>{SOCK_STREAM, SOCK_SEQPACKET, SOCK_DGRAM}),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- std::vector<int>{SOCK_STREAM, SOCK_SEQPACKET, SOCK_DGRAM}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingUnixDomainSockets, BlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_cmsg.cc b/test/syscalls/linux/socket_unix_cmsg.cc
deleted file mode 100644
index 22a4ee0d1..000000000
--- a/test/syscalls/linux/socket_unix_cmsg.cc
+++ /dev/null
@@ -1,1501 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_unix_cmsg.h"
-
-#include <errno.h>
-#include <net/if.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-// This file contains tests for control message in Unix domain sockets.
-//
-// This file is a generic socket test file. It must be built with another file
-// that provides the test types.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(UnixSocketPairCmsgTest, BasicFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[20];
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, BasicTwoFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- int sent_fds[] = {pair1->second_fd(), pair2->second_fd()};
-
- ASSERT_NO_FATAL_FAILURE(
- SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data)));
-
- char received_data[20];
- int received_fds[] = {-1, -1};
-
- ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 2,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd()));
- ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, BasicThreeFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- auto pair3 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()};
-
- ASSERT_NO_FATAL_FAILURE(
- SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data)));
-
- char received_data[20];
- int received_fds[] = {-1, -1, -1};
-
- ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 3,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd()));
- ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd()));
- ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[2], pair3->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, BadFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- int sent_fd = -1;
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(sent_fd))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_len = CMSG_LEN(sizeof(sent_fd));
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- memcpy(CMSG_DATA(cmsg), &sent_fd, sizeof(sent_fd));
-
- struct iovec iov;
- iov.iov_base = sent_data;
- iov.iov_len = sizeof(sent_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EBADF));
-}
-
-TEST_P(UnixSocketPairCmsgTest, ShortCmsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- int sent_fd = -1;
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(sent_fd))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_len = 1;
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- memcpy(CMSG_DATA(cmsg), &sent_fd, sizeof(sent_fd));
-
- struct iovec iov;
- iov.iov_base = sent_data;
- iov.iov_len = sizeof(sent_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// BasicFDPassNoSpace starts off by sending a single FD just like BasicFDPass.
-// The difference is that when calling recvmsg, no space for FDs is provided,
-// only space for the cmsg header.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassNoSpace) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[20];
-
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(0));
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_controllen, 0);
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-// BasicFDPassNoSpaceMsgCtrunc sends an FD, but does not provide any space to
-// receive it. It then verifies that the MSG_CTRUNC flag is set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassNoSpaceMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(0));
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- char received_data[sizeof(sent_data)];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_controllen, 0);
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
-}
-
-// BasicFDPassNullControlMsgCtrunc sends an FD and sets contradictory values for
-// msg_controllen and msg_control. msg_controllen is set to the correct size to
-// accommodate the FD, but msg_control is set to NULL. In this case, msg_control
-// should override msg_controllen.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassNullControlMsgCtrunc) {
- // FIXME(gvisor.dev/issue/207): Fix handling of NULL msg_control.
- SKIP_IF(IsRunningOnGvisor());
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- msg.msg_controllen = CMSG_SPACE(1);
-
- char received_data[sizeof(sent_data)];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_controllen, 0);
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
-}
-
-// BasicFDPassNotEnoughSpaceMsgCtrunc sends an FD, but does not provide enough
-// space to receive it. It then verifies that the MSG_CTRUNC flag is set in the
-// msghdr.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassNotEnoughSpaceMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(0) + 1);
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- char received_data[sizeof(sent_data)];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_controllen, 0);
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
-}
-
-// BasicThreeFDPassTruncationMsgCtrunc sends three FDs, but only provides enough
-// space to receive two of them. It then verifies that the MSG_CTRUNC flag is
-// set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, BasicThreeFDPassTruncationMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- auto pair3 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()};
-
- ASSERT_NO_FATAL_FAILURE(
- SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(2 * sizeof(int)));
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- char received_data[sizeof(sent_data)];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(2 * sizeof(int)));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-}
-
-// BasicFDPassUnalignedRecv starts off by sending a single FD just like
-// BasicFDPass. The difference is that when calling recvmsg, the length of the
-// receive data is only aligned on a 4 byte boundary instead of the normal 8.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassUnalignedRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[20];
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFDUnaligned(
- sockets->second_fd(), &fd, received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-// BasicFDPassUnalignedRecvNoMsgTrunc sends one FD and only provides enough
-// space to receive just it. (Normally the minimum amount of space one would
-// provide would be enough space for two FDs.) It then verifies that the
-// MSG_CTRUNC flag is not set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, BasicFDPassUnalignedRecvNoMsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int)) - sizeof(int)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_flags, 0);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-}
-
-// BasicTwoFDPassUnalignedRecvTruncationMsgTrunc sends two FDs, but only
-// provides enough space to receive one of them. It then verifies that the
-// MSG_CTRUNC flag is set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, BasicTwoFDPassUnalignedRecvTruncationMsgTrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- int sent_fds[] = {pair->first_fd(), pair->second_fd()};
-
- ASSERT_NO_FATAL_FAILURE(
- SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- // CMSG_SPACE rounds up to two FDs, we only want one.
- char control[CMSG_SPACE(sizeof(int)) - sizeof(int)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-}
-
-TEST_P(UnixSocketPairCmsgTest, ConcurrentBasicFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- int sockfd1 = sockets->first_fd();
- auto recv_func = [sockfd1, sent_data]() {
- char received_data[20];
- int fd = -1;
- RecvSingleFD(sockfd1, &fd, received_data, sizeof(received_data));
- ASSERT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
- char buf[20];
- ASSERT_THAT(ReadFd(fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(WriteFd(fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- };
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->second_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- ScopedThread t(recv_func);
-
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(WriteFd(pair->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[20];
- ASSERT_THAT(ReadFd(pair->first_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- t.Join();
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-// FDPassNoRecv checks that the control message can be safely ignored by using
-// read(2) instead of recvmsg(2).
-TEST_P(UnixSocketPairCmsgTest, FDPassNoRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- // Read while ignoring the passed FD.
- char received_data[20];
- ASSERT_THAT(
- ReadFd(sockets->second_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- // Check that the socket still works for reads and writes.
- ASSERT_NO_FATAL_FAILURE(
- TransferTest(sockets->first_fd(), sockets->second_fd()));
-}
-
-// FDPassInterspersed1 checks that sent control messages cannot be read before
-// their associated data has been read.
-TEST_P(UnixSocketPairCmsgTest, FDPassInterspersed1) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char written_data[20];
- RandomizeBuffer(written_data, sizeof(written_data));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)),
- SyscallSucceedsWithValue(sizeof(written_data)));
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- // Check that we don't get a control message, but do get the data.
- char received_data[20];
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data));
- EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data)));
-}
-
-// FDPassInterspersed2 checks that sent control messages cannot be read after
-// their associated data has been read while ignoring the control message by
-// using read(2) instead of recvmsg(2).
-TEST_P(UnixSocketPairCmsgTest, FDPassInterspersed2) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char written_data[20];
- RandomizeBuffer(written_data, sizeof(written_data));
- ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)),
- SyscallSucceedsWithValue(sizeof(written_data)));
-
- char received_data[20];
- ASSERT_THAT(
- ReadFd(sockets->second_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
- EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassNotCoalesced) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair1->second_fd(),
- sent_data1, sizeof(sent_data1)));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair2->second_fd(),
- sent_data2, sizeof(sent_data2)));
-
- char received_data1[sizeof(sent_data1) + sizeof(sent_data2)];
- int received_fd1 = -1;
-
- RecvSingleFD(sockets->second_fd(), &received_fd1, received_data1,
- sizeof(received_data1), sizeof(sent_data1));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data1, sizeof(sent_data1)));
- TransferTest(pair1->first_fd(), pair1->second_fd());
-
- char received_data2[sizeof(sent_data1) + sizeof(sent_data2)];
- int received_fd2 = -1;
-
- RecvSingleFD(sockets->second_fd(), &received_fd2, received_data2,
- sizeof(received_data2), sizeof(sent_data2));
-
- EXPECT_EQ(0, memcmp(sent_data2, received_data2, sizeof(sent_data2)));
- TransferTest(pair2->first_fd(), pair2->second_fd());
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassPeek) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char peek_data[20];
- int peek_fd = -1;
- PeekSingleFD(sockets->second_fd(), &peek_fd, peek_data, sizeof(peek_data));
- EXPECT_EQ(0, memcmp(sent_data, peek_data, sizeof(sent_data)));
- TransferTest(peek_fd, pair->first_fd());
- EXPECT_THAT(close(peek_fd), SyscallSucceeds());
-
- char received_data[20];
- int received_fd = -1;
- RecvSingleFD(sockets->second_fd(), &received_fd, received_data,
- sizeof(received_data));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
- TransferTest(received_fd, pair->first_fd());
- EXPECT_THAT(close(received_fd), SyscallSucceeds());
-}
-
-TEST_P(UnixSocketPairCmsgTest, BasicCredPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
- EXPECT_EQ(sent_creds.pid, received_creds.pid);
- EXPECT_EQ(sent_creds.uid, received_creds.uid);
- EXPECT_EQ(sent_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, SendNullCredsBeforeSoPassCredRecvEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, SendNullCredsAfterSoPassCredRecvEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- SetSoPassCred(sockets->second_fd());
-
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data)));
-
- char received_data[20];
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, SendNullCredsBeforeSoPassCredSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->first_fd());
-
- char received_data[20];
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest, SendNullCredsAfterSoPassCredSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- SetSoPassCred(sockets->first_fd());
-
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data)));
-
- char received_data[20];
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest,
- SendNullCredsBeforeSoPassCredRecvEndAfterSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- SetSoPassCred(sockets->first_fd());
-
- ASSERT_NO_FATAL_FAILURE(
- SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredRecvEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
-
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, WriteAfterSoPassCredRecvEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[20];
-
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- SetSoPassCred(sockets->first_fd());
-
- char received_data[20];
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest, WriteAfterSoPassCredSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->first_fd());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[20];
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredRecvEndAfterSendEnd) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- SetSoPassCred(sockets->first_fd());
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
-
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixSocketPairCmsgTest, CredPassTruncated) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(0) + sizeof(pid_t)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- EXPECT_EQ(msg.msg_controllen, sizeof(control));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, sizeof(control));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-
- pid_t pid = 0;
- memcpy(&pid, CMSG_DATA(cmsg), sizeof(pid));
- EXPECT_EQ(pid, sent_creds.pid);
-}
-
-// CredPassNoMsgCtrunc passes a full set of credentials. It then verifies that
-// receiving the full set does not result in MSG_CTRUNC being set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, CredPassNoMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(struct ucred))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- // The control message should not be truncated.
- EXPECT_EQ(msg.msg_flags, 0);
- EXPECT_EQ(msg.msg_controllen, sizeof(control));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct ucred)));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-}
-
-// CredPassNoSpaceMsgCtrunc passes a full set of credentials. It then receives
-// the data without providing space for any credentials and verifies that
-// MSG_CTRUNC is set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, CredPassNoSpaceMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(0)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- // The control message should be truncated.
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
- EXPECT_EQ(msg.msg_controllen, sizeof(control));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, sizeof(control));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-}
-
-// CredPassTruncatedMsgCtrunc passes a full set of credentials. It then receives
-// the data while providing enough space for only the first field of the
-// credentials and verifies that MSG_CTRUNC is set in the msghdr.
-TEST_P(UnixSocketPairCmsgTest, CredPassTruncatedMsgCtrunc) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(0) + sizeof(pid_t)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[sizeof(sent_data)] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- // The control message should be truncated.
- EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);
- EXPECT_EQ(msg.msg_controllen, sizeof(control));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, sizeof(control));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-}
-
-TEST_P(UnixSocketPairCmsgTest, SoPassCred) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int opt;
- socklen_t optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_FALSE(opt);
-
- optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_FALSE(opt);
-
- SetSoPassCred(sockets->first_fd());
-
- optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_TRUE(opt);
-
- optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_FALSE(opt);
-
- int zero = 0;
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &zero,
- sizeof(zero)),
- SyscallSucceeds());
-
- optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_FALSE(opt);
-
- optLen = sizeof(opt);
- EXPECT_THAT(
- getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen),
- SyscallSucceeds());
- EXPECT_FALSE(opt);
-}
-
-TEST_P(UnixSocketPairCmsgTest, NoDataCredPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct msghdr msg = {};
-
- struct iovec iov;
- iov.iov_base = sent_data;
- iov.iov_len = sizeof(sent_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- char control[CMSG_SPACE(0)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_CREDENTIALS;
- cmsg->cmsg_len = CMSG_LEN(0);
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UnixSocketPairCmsgTest, NoPassCred) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- char received_data[20];
-
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(UnixSocketPairCmsgTest, CredAndFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendCredsAndFD(sockets->first_fd(), sent_creds,
- pair->second_fd(), sent_data,
- sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
- struct ucred received_creds;
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds,
- &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- EXPECT_EQ(sent_creds.pid, received_creds.pid);
- EXPECT_EQ(sent_creds.uid, received_creds.uid);
- EXPECT_EQ(sent_creds.gid, received_creds.gid);
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassBeforeSoPassCred) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[20];
- struct ucred received_creds;
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds,
- &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCred) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- SetSoPassCred(sockets->second_fd());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[20];
- struct ucred received_creds;
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds,
- &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-TEST_P(UnixSocketPairCmsgTest, CloexecDroppedWhenFDPassed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair = ASSERT_NO_ERRNO_AND_VALUE(
- UnixDomainSocketPair(SOCK_SEQPACKET | SOCK_CLOEXEC).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[20];
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(0));
-}
-
-TEST_P(UnixSocketPairCmsgTest, CloexecRecvFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- char received_data[20];
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CMSG_CLOEXEC),
- SyscallSucceedsWithValue(sizeof(received_data)));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-
- int fd = -1;
- memcpy(&fd, CMSG_DATA(cmsg), sizeof(int));
-
- EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC));
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCredWithoutCredSpace) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- SetSoPassCred(sockets->second_fd());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- char control[CMSG_LEN(0)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[20];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- EXPECT_EQ(msg.msg_controllen, sizeof(control));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, sizeof(control));
- EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-}
-
-// This test will validate that MSG_CTRUNC as an input flag to recvmsg will
-// not appear as an output flag on the control message when truncation doesn't
-// happen.
-TEST_P(UnixSocketPairCmsgTest, MsgCtruncInputIsNoop) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int)) /* we're passing a single fd */];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- char received_data[20];
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CTRUNC),
- SyscallSucceedsWithValue(sizeof(received_data)));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-
- // Now we should verify that MSG_CTRUNC wasn't set as an output flag.
- EXPECT_EQ(msg.msg_flags & MSG_CTRUNC, 0);
-}
-
-TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCredWithoutCredHeaderSpace) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- SetSoPassCred(sockets->second_fd());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- struct msghdr msg = {};
- char control[CMSG_LEN(0) / 2];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- char received_data[20];
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
- EXPECT_EQ(msg.msg_controllen, 0);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_cmsg.h b/test/syscalls/linux/socket_unix_cmsg.h
deleted file mode 100644
index 431606903..000000000
--- a/test/syscalls/linux/socket_unix_cmsg.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected unix sockets about
-// control messages.
-using UnixSocketPairCmsgTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_
diff --git a/test/syscalls/linux/socket_unix_dgram.cc b/test/syscalls/linux/socket_unix_dgram.cc
deleted file mode 100644
index 5b0844493..000000000
--- a/test/syscalls/linux/socket_unix_dgram.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_unix_dgram.h"
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(DgramUnixSocketPairTest, WriteOneSideClosed) {
- // FIXME(b/35925052): gVisor datagram sockets return EPIPE instead of
- // ECONNREFUSED.
- SKIP_IF(IsRunningOnGvisor());
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- constexpr char kStr[] = "abc";
- ASSERT_THAT(write(sockets->second_fd(), kStr, 3),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-TEST_P(DgramUnixSocketPairTest, IncreasedSocketSendBufUnblocksWrites) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int sock = sockets->first_fd();
- int buf_size = 0;
- socklen_t buf_size_len = sizeof(buf_size);
- ASSERT_THAT(getsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, &buf_size_len),
- SyscallSucceeds());
- int opts;
- ASSERT_THAT(opts = fcntl(sock, F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sock, F_SETFL, opts), SyscallSucceeds());
-
- std::vector<char> buf(buf_size / 4);
- // Write till the socket buffer is full.
- while (RetryEINTR(send)(sock, buf.data(), buf.size(), 0) != -1) {
- // Sleep to give linux a chance to move data from the send buffer to the
- // receive buffer.
- absl::SleepFor(absl::Milliseconds(10)); // 10ms.
- }
- // The last error should have been EWOULDBLOCK.
- ASSERT_EQ(errno, EWOULDBLOCK);
-
- // Now increase the socket send buffer.
- buf_size = buf_size * 2;
- ASSERT_THAT(
- setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, sizeof(buf_size)),
- SyscallSucceeds());
-
- // The send should succeed again.
- ASSERT_THAT(RetryEINTR(send)(sock, buf.data(), buf.size(), 0),
- SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_dgram.h b/test/syscalls/linux/socket_unix_dgram.h
deleted file mode 100644
index 0764ef85b..000000000
--- a/test/syscalls/linux/socket_unix_dgram.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_DGRAM_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_DGRAM_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected dgram unix sockets.
-using DgramUnixSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_DGRAM_H_
diff --git a/test/syscalls/linux/socket_unix_dgram_local.cc b/test/syscalls/linux/socket_unix_dgram_local.cc
deleted file mode 100644
index 31d2d5216..000000000
--- a/test/syscalls/linux/socket_unix_dgram_local.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_stream.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/socket_unix_dgram.h"
-#include "test/syscalls/linux/socket_unix_non_stream.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_DGRAM, SOCK_RAW},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_DGRAM, SOCK_RAW},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_DGRAM, SOCK_RAW},
- List<int>{0, SOCK_NONBLOCK}))));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- DgramUnixSockets, DgramUnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- DgramUnixSockets, UnixNonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- DgramUnixSockets, NonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_dgram_non_blocking.cc b/test/syscalls/linux/socket_unix_dgram_non_blocking.cc
deleted file mode 100644
index 2db8b68d3..000000000
--- a/test/syscalls/linux/socket_unix_dgram_non_blocking.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of connected non-blocking dgram
-// unix sockets.
-using NonBlockingDgramUnixSocketPairTest = SocketPairTest;
-
-TEST_P(NonBlockingDgramUnixSocketPairTest, ReadOneSideClosed) {
- if (IsRunningOnGvisor()) {
- // FIXME(b/70803293): gVisor datagram sockets return 0 instead of
- // EAGAIN.
- return;
- }
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- char data[10] = {};
- ASSERT_THAT(read(sockets->second_fd(), data, sizeof(data)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingDgramUnixSockets, NonBlockingDgramUnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(std::vector<SocketPairKind>{
- UnixDomainSocketPair(SOCK_DGRAM | SOCK_NONBLOCK),
- FilesystemBoundUnixDomainSocketPair(SOCK_DGRAM | SOCK_NONBLOCK),
- AbstractBoundUnixDomainSocketPair(SOCK_DGRAM | SOCK_NONBLOCK),
- })));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_domain.cc b/test/syscalls/linux/socket_unix_domain.cc
deleted file mode 100644
index f7dff8b4d..000000000
--- a/test/syscalls/linux/socket_unix_domain.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_generic.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, AllSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_filesystem_nonblock.cc b/test/syscalls/linux/socket_unix_filesystem_nonblock.cc
deleted file mode 100644
index 6700b4d90..000000000
--- a/test/syscalls/linux/socket_unix_filesystem_nonblock.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingFilesystemUnixSockets, NonBlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_non_stream.cc b/test/syscalls/linux/socket_unix_non_stream.cc
deleted file mode 100644
index 9425e87a6..000000000
--- a/test/syscalls/linux/socket_unix_non_stream.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_unix_non_stream.h"
-
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-TEST_P(UnixNonStreamSocketPairTest, RecvMsgTooLarge) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int rcvbuf;
- socklen_t length = sizeof(rcvbuf);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVBUF, &rcvbuf, &length),
- SyscallSucceeds());
-
- // Make the call larger than the receive buffer.
- const int recv_size = 3 * rcvbuf;
-
- // Write a message that does fit in the receive buffer.
- const int write_size = rcvbuf - kPageSize;
-
- std::vector<char> write_buf(write_size, 'a');
- const int ret = RetryEINTR(write)(sockets->second_fd(), write_buf.data(),
- write_buf.size());
- if (ret < 0 && errno == ENOBUFS) {
- // NOTE(b/116636318): Linux may stall the write for a long time and
- // ultimately return ENOBUFS. Allow this error, since a retry will likely
- // result in the same error.
- return;
- }
- ASSERT_THAT(ret, SyscallSucceeds());
-
- std::vector<char> recv_buf(recv_size);
-
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(sockets->first_fd(), recv_buf.data(),
- recv_buf.size(), write_size));
-
- recv_buf.resize(write_size);
- EXPECT_EQ(recv_buf, write_buf);
-}
-
-// Create a region of anonymous memory of size 'size', which is fragmented in
-// FileMem.
-//
-// ptr contains the start address of the region. The returned vector contains
-// all of the mappings to be unmapped when done.
-PosixErrorOr<std::vector<Mapping>> CreateFragmentedRegion(const int size,
- void** ptr) {
- Mapping region;
- ASSIGN_OR_RETURN_ERRNO(region, Mmap(nullptr, size, PROT_NONE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
-
- *ptr = region.ptr();
-
- // Don't save hundreds of times for all of these mmaps.
- DisableSave ds;
-
- std::vector<Mapping> pages;
-
- // Map and commit a single page at a time, mapping and committing an unrelated
- // page between each call to force FileMem fragmentation.
- for (uintptr_t addr = region.addr(); addr < region.endaddr();
- addr += kPageSize) {
- Mapping page;
- ASSIGN_OR_RETURN_ERRNO(
- page,
- Mmap(reinterpret_cast<void*>(addr), kPageSize, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0));
- *reinterpret_cast<volatile char*>(page.ptr()) = 42;
-
- pages.emplace_back(std::move(page));
-
- // Unrelated page elsewhere.
- ASSIGN_OR_RETURN_ERRNO(page,
- Mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
- *reinterpret_cast<volatile char*>(page.ptr()) = 42;
-
- pages.emplace_back(std::move(page));
- }
-
- // The mappings above have taken ownership of the region.
- region.release();
-
- return std::move(pages);
-}
-
-// A contiguous iov that is heavily fragmented in FileMem can still be sent
-// successfully. See b/115833655.
-TEST_P(UnixNonStreamSocketPairTest, FragmentedSendMsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- const int buffer_size = UIO_MAXIOV * kPageSize;
- // Extra page for message header overhead.
- const int sndbuf = buffer_size + kPageSize;
- // N.B. setsockopt(SO_SNDBUF) doubles the passed value.
- const int set_sndbuf = sndbuf / 2;
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF,
- &set_sndbuf, sizeof(set_sndbuf)),
- SyscallSucceeds());
-
- int actual_sndbuf = 0;
- socklen_t length = sizeof(actual_sndbuf);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF,
- &actual_sndbuf, &length),
- SyscallSucceeds());
-
- if (actual_sndbuf != sndbuf) {
- // Unable to get the sndbuf we want.
- //
- // N.B. At minimum, the socketpair gofer should provide a socket that is
- // already the correct size.
- //
- // TODO(b/35921550): When internal UDS support SO_SNDBUF, we can assert that
- // we always get the right SO_SNDBUF on gVisor.
- GTEST_SKIP() << "SO_SNDBUF = " << actual_sndbuf << ", want " << sndbuf;
- }
-
- // Create a contiguous region of memory of 2*UIO_MAXIOV*PAGE_SIZE. We'll call
- // sendmsg with a single iov, but the goal is to get the sentry to split this
- // into > UIO_MAXIOV iovs when calling the kernel.
- void* ptr;
- std::vector<Mapping> pages =
- ASSERT_NO_ERRNO_AND_VALUE(CreateFragmentedRegion(buffer_size, &ptr));
-
- struct iovec iov = {};
- iov.iov_base = ptr;
- iov.iov_len = buffer_size;
-
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- // NOTE(b/116636318,b/115833655): Linux has poor behavior in the presence of
- // physical memory fragmentation. As a result, this may stall for a long time
- // and ultimately return ENOBUFS. Allow this error, since it means that we
- // made it to the host kernel and started the sendmsg.
- EXPECT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0),
- AnyOf(SyscallSucceedsWithValue(buffer_size),
- SyscallFailsWithErrno(ENOBUFS)));
-}
-
-// A contiguous iov that is heavily fragmented in FileMem can still be received
-// into successfully. Regression test for b/115833655.
-TEST_P(UnixNonStreamSocketPairTest, FragmentedRecvMsg) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- const int buffer_size = UIO_MAXIOV * kPageSize;
- // Extra page for message header overhead.
- const int sndbuf = buffer_size + kPageSize;
- // N.B. setsockopt(SO_SNDBUF) doubles the passed value.
- const int set_sndbuf = sndbuf / 2;
-
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF,
- &set_sndbuf, sizeof(set_sndbuf)),
- SyscallSucceeds());
-
- int actual_sndbuf = 0;
- socklen_t length = sizeof(actual_sndbuf);
- ASSERT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF,
- &actual_sndbuf, &length),
- SyscallSucceeds());
-
- if (actual_sndbuf != sndbuf) {
- // Unable to get the sndbuf we want.
- //
- // N.B. At minimum, the socketpair gofer should provide a socket that is
- // already the correct size.
- //
- // TODO(b/35921550): When internal UDS support SO_SNDBUF, we can assert that
- // we always get the right SO_SNDBUF on gVisor.
- GTEST_SKIP() << "SO_SNDBUF = " << actual_sndbuf << ", want " << sndbuf;
- }
-
- std::vector<char> write_buf(buffer_size, 'a');
- const int ret = RetryEINTR(write)(sockets->first_fd(), write_buf.data(),
- write_buf.size());
- if (ret < 0 && errno == ENOBUFS) {
- // NOTE(b/116636318): Linux may stall the write for a long time and
- // ultimately return ENOBUFS. Allow this error, since a retry will likely
- // result in the same error.
- return;
- }
- ASSERT_THAT(ret, SyscallSucceeds());
-
- // Create a contiguous region of memory of 2*UIO_MAXIOV*PAGE_SIZE. We'll call
- // sendmsg with a single iov, but the goal is to get the sentry to split this
- // into > UIO_MAXIOV iovs when calling the kernel.
- void* ptr;
- std::vector<Mapping> pages =
- ASSERT_NO_ERRNO_AND_VALUE(CreateFragmentedRegion(buffer_size, &ptr));
-
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(
- sockets->second_fd(), reinterpret_cast<char*>(ptr), buffer_size));
-
- EXPECT_EQ(0, memcmp(write_buf.data(), ptr, buffer_size));
-}
-
-TEST_P(UnixNonStreamSocketPairTest, SendTimeout) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- const int buf_size = 5 * kPageSize;
- EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDBUF, &buf_size,
- sizeof(buf_size)),
- SyscallSucceeds());
- EXPECT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_RCVBUF, &buf_size,
- sizeof(buf_size)),
- SyscallSucceeds());
-
- // The buffer size should be big enough to avoid many iterations in the next
- // loop. Otherwise, this will slow down save tests.
- std::vector<char> buf(kPageSize);
- for (;;) {
- int ret;
- ASSERT_THAT(
- ret = RetryEINTR(send)(sockets->first_fd(), buf.data(), buf.size(), 0),
- ::testing::AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EAGAIN)));
- if (ret == -1) {
- break;
- }
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_non_stream.h b/test/syscalls/linux/socket_unix_non_stream.h
deleted file mode 100644
index 7478ab172..000000000
--- a/test/syscalls/linux/socket_unix_non_stream.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_NON_STREAM_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_NON_STREAM_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected non-stream
-// unix-domain sockets.
-using UnixNonStreamSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_NON_STREAM_H_
diff --git a/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc b/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc
deleted file mode 100644
index fddcdf1c5..000000000
--- a/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_stream_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(UnixDomainSocketPair,
- std::vector<int>{SOCK_DGRAM, SOCK_SEQPACKET}),
- ApplyVec<SocketPairKind>(FilesystemBoundUnixDomainSocketPair,
- std::vector<int>{SOCK_DGRAM, SOCK_SEQPACKET}),
- ApplyVec<SocketPairKind>(AbstractBoundUnixDomainSocketPair,
- std::vector<int>{SOCK_DGRAM, SOCK_SEQPACKET}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BlockingNonStreamUnixSockets, BlockingNonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_pair.cc b/test/syscalls/linux/socket_unix_pair.cc
deleted file mode 100644
index 85999db04..000000000
--- a/test/syscalls/linux/socket_unix_pair.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/socket_unix.h"
-#include "test/syscalls/linux/socket_unix_cmsg.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnixSocketPairCmsgTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_pair_nonblock.cc b/test/syscalls/linux/socket_unix_pair_nonblock.cc
deleted file mode 100644
index 281410a9a..000000000
--- a/test/syscalls/linux/socket_unix_pair_nonblock.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},
- List<int>{SOCK_NONBLOCK}));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingUnixSockets, NonBlockingSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_seqpacket.cc b/test/syscalls/linux/socket_unix_seqpacket.cc
deleted file mode 100644
index d6e7031c0..000000000
--- a/test/syscalls/linux/socket_unix_seqpacket.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/socket_unix_seqpacket.h"
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST_P(SeqpacketUnixSocketPairTest, WriteOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- constexpr char kStr[] = "abc";
- ASSERT_THAT(write(sockets->second_fd(), kStr, 3),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(SeqpacketUnixSocketPairTest, ReadOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- char data[10] = {};
- ASSERT_THAT(read(sockets->second_fd(), data, sizeof(data)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(SeqpacketUnixSocketPairTest, Sendto) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- constexpr char kPath[] = "\0nonexistent";
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- constexpr char kStr[] = "abc";
- ASSERT_THAT(sendto(sockets->second_fd(), kStr, 3, 0, (struct sockaddr*)&addr,
- sizeof(addr)),
- SyscallSucceedsWithValue(3));
-
- char data[10] = {};
- ASSERT_THAT(read(sockets->first_fd(), data, sizeof(data)),
- SyscallSucceedsWithValue(3));
-}
-
-TEST_P(SeqpacketUnixSocketPairTest, IncreasedSocketSendBufUnblocksWrites) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int sock = sockets->first_fd();
- int buf_size = 0;
- socklen_t buf_size_len = sizeof(buf_size);
- ASSERT_THAT(getsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, &buf_size_len),
- SyscallSucceeds());
- int opts;
- ASSERT_THAT(opts = fcntl(sock, F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sock, F_SETFL, opts), SyscallSucceeds());
-
- std::vector<char> buf(buf_size / 4);
- // Write till the socket buffer is full.
- while (RetryEINTR(send)(sock, buf.data(), buf.size(), 0) != -1) {
- // Sleep to give linux a chance to move data from the send buffer to the
- // receive buffer.
- absl::SleepFor(absl::Milliseconds(10)); // 10ms.
- }
- // The last error should have been EWOULDBLOCK.
- ASSERT_EQ(errno, EWOULDBLOCK);
-
- // Now increase the socket send buffer.
- buf_size = buf_size * 2;
- ASSERT_THAT(
- setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, sizeof(buf_size)),
- SyscallSucceeds());
-
- // Skip test if the setsockopt didn't increase the sendbuf. This happens for
- // tests where the socket is a host fd where gVisor does not permit increasing
- // send buffer size.
- int new_buf_size = 0;
- buf_size_len = sizeof(new_buf_size);
- ASSERT_THAT(
- getsockopt(sock, SOL_SOCKET, SO_SNDBUF, &new_buf_size, &buf_size_len),
- SyscallSucceeds());
- if (IsRunningOnGvisor() && (new_buf_size <= buf_size)) {
- GTEST_SKIP() << "Skipping test new send buffer size " << new_buf_size
- << " is the same as the value before setsockopt, "
- << " socket is probably a host backed socket." << std ::endl;
- }
- // send should succeed again.
- ASSERT_THAT(RetryEINTR(send)(sock, buf.data(), buf.size(), 0),
- SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_seqpacket.h b/test/syscalls/linux/socket_unix_seqpacket.h
deleted file mode 100644
index 30d9b9edf..000000000
--- a/test/syscalls/linux/socket_unix_seqpacket.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_SEQPACKET_H_
-#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_SEQPACKET_H_
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Test fixture for tests that apply to pairs of connected seqpacket unix
-// sockets.
-using SeqpacketUnixSocketPairTest = SocketPairTest;
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_SEQPACKET_H_
diff --git a/test/syscalls/linux/socket_unix_seqpacket_local.cc b/test/syscalls/linux/socket_unix_seqpacket_local.cc
deleted file mode 100644
index 69a5f150d..000000000
--- a/test/syscalls/linux/socket_unix_seqpacket_local.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_non_stream.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/socket_unix_non_stream.h"
-#include "test/syscalls/linux/socket_unix_seqpacket.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK}))));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- SeqpacketUnixSockets, NonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- SeqpacketUnixSockets, SeqpacketUnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-INSTANTIATE_TEST_SUITE_P(
- SeqpacketUnixSockets, UnixNonStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_stream.cc b/test/syscalls/linux/socket_unix_stream.cc
deleted file mode 100644
index 3ff810914..000000000
--- a/test/syscalls/linux/socket_unix_stream.cc
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <poll.h>
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of connected stream unix sockets.
-using StreamUnixSocketPairTest = SocketPairTest;
-
-TEST_P(StreamUnixSocketPairTest, WriteOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- constexpr char kStr[] = "abc";
- ASSERT_THAT(write(sockets->second_fd(), kStr, 3),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(StreamUnixSocketPairTest, ReadOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
- char data[10] = {};
- ASSERT_THAT(read(sockets->second_fd(), data, sizeof(data)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(StreamUnixSocketPairTest, RecvmsgOneSideClosed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Set timeout so that it will not wait for ever.
- struct timeval tv {
- .tv_sec = 0, .tv_usec = 10
- };
- EXPECT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv,
- sizeof(tv)),
- SyscallSucceeds());
-
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
-
- char received_data[10] = {};
- struct iovec iov;
- iov.iov_base = received_data;
- iov.iov_len = sizeof(received_data);
- struct msghdr msg = {};
- msg.msg_flags = -1;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(recvmsg(sockets->second_fd(), &msg, MSG_WAITALL),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(StreamUnixSocketPairTest, ReadOneSideClosedWithUnreadData) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char buf[10] = {};
- ASSERT_THAT(RetryEINTR(write)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(read)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(close(sockets->release_first_fd()), SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(read)(sockets->second_fd(), buf, sizeof(buf)),
- SyscallFailsWithErrno(ECONNRESET));
-}
-
-TEST_P(StreamUnixSocketPairTest, Sendto) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- constexpr char kPath[] = "\0nonexistent";
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- constexpr char kStr[] = "abc";
- ASSERT_THAT(sendto(sockets->second_fd(), kStr, 3, 0, (struct sockaddr*)&addr,
- sizeof(addr)),
- SyscallFailsWithErrno(EISCONN));
-}
-
-TEST_P(StreamUnixSocketPairTest, SetAndGetSocketLinger) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct linger sl = {1, 5};
- EXPECT_THAT(
- setsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceedsWithValue(0));
-
- struct linger got_linger = {};
- socklen_t length = sizeof(sl);
- EXPECT_THAT(getsockopt(sockets->first_fd(), SOL_SOCKET, SO_LINGER,
- &got_linger, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got_linger));
- EXPECT_EQ(0, memcmp(&got_linger, &sl, length));
-}
-
-TEST_P(StreamUnixSocketPairTest, GetSocketAcceptConn) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(
- getsockopt(sockets->first_fd(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceedsWithValue(0));
-
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-
-TEST_P(StreamUnixSocketPairTest, SetSocketSendBuf) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- auto s = sockets->first_fd();
- int max = 0;
- int min = 0;
- {
- // Discover maxmimum buffer size by setting to a really large value.
- constexpr int kRcvBufSz = INT_MAX;
- ASSERT_THAT(
- setsockopt(s, SOL_SOCKET, SO_SNDBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- max = 0;
- socklen_t max_len = sizeof(max);
- ASSERT_THAT(getsockopt(s, SOL_SOCKET, SO_SNDBUF, &max, &max_len),
- SyscallSucceeds());
- }
-
- {
- // Discover minimum buffer size by setting it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(
- setsockopt(s, SOL_SOCKET, SO_SNDBUF, &kRcvBufSz, sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(s, SOL_SOCKET, SO_SNDBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- int quarter_sz = min + (max - min) / 4;
- ASSERT_THAT(
- setsockopt(s, SOL_SOCKET, SO_SNDBUF, &quarter_sz, sizeof(quarter_sz)),
- SyscallSucceeds());
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s, SOL_SOCKET, SO_SNDBUF, &val, &val_len),
- SyscallSucceeds());
-
- // Linux doubles the value set by SO_SNDBUF/SO_SNDBUF.
- quarter_sz *= 2;
- ASSERT_EQ(quarter_sz, val);
-}
-
-TEST_P(StreamUnixSocketPairTest, IncreasedSocketSendBufUnblocksWrites) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- int sock = sockets->first_fd();
- int buf_size = 0;
- socklen_t buf_size_len = sizeof(buf_size);
- ASSERT_THAT(getsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, &buf_size_len),
- SyscallSucceeds());
- int opts;
- ASSERT_THAT(opts = fcntl(sock, F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sock, F_SETFL, opts), SyscallSucceeds());
-
- std::vector<char> buf(buf_size / 4);
- // Write till the socket buffer is full.
- while (RetryEINTR(send)(sock, buf.data(), buf.size(), 0) != -1) {
- // Sleep to give linux a chance to move data from the send buffer to the
- // receive buffer.
- absl::SleepFor(absl::Milliseconds(10)); // 10ms.
- }
- // The last error should have been EWOULDBLOCK.
- ASSERT_EQ(errno, EWOULDBLOCK);
-
- // Now increase the socket send buffer.
- buf_size = buf_size * 2;
- ASSERT_THAT(
- setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &buf_size, sizeof(buf_size)),
- SyscallSucceeds());
-
- // The send should succeed again.
- ASSERT_THAT(RetryEINTR(send)(sock, buf.data(), buf.size(), 0),
- SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, StreamUnixSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK}))))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_stream_blocking_local.cc b/test/syscalls/linux/socket_unix_stream_blocking_local.cc
deleted file mode 100644
index 8429bd429..000000000
--- a/test/syscalls/linux/socket_unix_stream_blocking_local.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_stream_blocking.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- UnixDomainSocketPair(SOCK_STREAM),
- FilesystemBoundUnixDomainSocketPair(SOCK_STREAM),
- AbstractBoundUnixDomainSocketPair(SOCK_STREAM),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- BlockingStreamUnixSockets, BlockingStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_stream_local.cc b/test/syscalls/linux/socket_unix_stream_local.cc
deleted file mode 100644
index a7e3449a9..000000000
--- a/test/syscalls/linux/socket_unix_stream_local.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <vector>
-
-#include "test/syscalls/linux/socket_stream.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK})));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- StreamUnixSockets, StreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_stream_nonblock_local.cc b/test/syscalls/linux/socket_unix_stream_nonblock_local.cc
deleted file mode 100644
index 4b763c8e2..000000000
--- a/test/syscalls/linux/socket_unix_stream_nonblock_local.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <vector>
-
-#include "test/syscalls/linux/socket_stream_nonblock.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::vector<SocketPairKind> GetSocketPairs() {
- return {
- UnixDomainSocketPair(SOCK_STREAM | SOCK_NONBLOCK),
- FilesystemBoundUnixDomainSocketPair(SOCK_STREAM | SOCK_NONBLOCK),
- AbstractBoundUnixDomainSocketPair(SOCK_STREAM | SOCK_NONBLOCK),
- };
-}
-
-INSTANTIATE_TEST_SUITE_P(
- NonBlockingStreamUnixSockets, NonBlockingStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_unbound_abstract.cc b/test/syscalls/linux/socket_unix_unbound_abstract.cc
deleted file mode 100644
index dd3d25450..000000000
--- a/test/syscalls/linux/socket_unix_unbound_abstract.cc
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of unbound abstract unix sockets.
-using UnboundAbstractUnixSocketPairTest = SocketPairTest;
-
-TEST_P(UnboundAbstractUnixSocketPairTest, AddressAfterNull) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr =
- *reinterpret_cast<const struct sockaddr_un*>(sockets->first_addr());
- ASSERT_EQ(addr.sun_path[sizeof(addr.sun_path) - 1], 0);
- SKIP_IF(addr.sun_path[sizeof(addr.sun_path) - 2] != 0 ||
- addr.sun_path[sizeof(addr.sun_path) - 3] != 0);
-
- addr.sun_path[sizeof(addr.sun_path) - 2] = 'a';
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundAbstractUnixSocketPairTest, ShortAddressNotExtended) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr =
- *reinterpret_cast<const struct sockaddr_un*>(sockets->first_addr());
- ASSERT_EQ(addr.sun_path[sizeof(addr.sun_path) - 1], 0);
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size() - 1),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundAbstractUnixSocketPairTest, BindNothing) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- struct sockaddr_un addr = {.sun_family = AF_UNIX};
- ASSERT_THAT(bind(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundAbstractUnixSocketPairTest, ListenZeroBacklog) {
- SKIP_IF((GetParam().type & SOCK_DGRAM) != 0);
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- constexpr char kPath[] = "\x00/foo_bar";
- memcpy(addr.sun_path, kPath, sizeof(kPath));
- ASSERT_THAT(bind(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
- ASSERT_THAT(listen(sockets->first_fd(), 0 /* backlog */), SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallSucceeds());
- auto sockets2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- {
- // Set the FD to O_NONBLOCK.
- int opts;
- int orig_opts;
- ASSERT_THAT(opts = fcntl(sockets2->first_fd(), F_GETFL), SyscallSucceeds());
- orig_opts = opts;
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sockets2->first_fd(), F_SETFL, opts), SyscallSucceeds());
-
- ASSERT_THAT(
- connect(sockets2->first_fd(), reinterpret_cast<struct sockaddr*>(&addr),
- sizeof(addr)),
- SyscallFailsWithErrno(EAGAIN));
- }
- {
- // Set the FD to O_NONBLOCK.
- int opts;
- int orig_opts;
- ASSERT_THAT(opts = fcntl(sockets2->second_fd(), F_GETFL),
- SyscallSucceeds());
- orig_opts = opts;
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sockets2->second_fd(), F_SETFL, opts), SyscallSucceeds());
-
- ASSERT_THAT(
- connect(sockets2->second_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(EAGAIN));
- }
-}
-
-TEST_P(UnboundAbstractUnixSocketPairTest, GetSockNameFullLength) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- sockaddr_storage addr = {};
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(getsockname(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, sockets->first_addr_size());
-}
-
-TEST_P(UnboundAbstractUnixSocketPairTest, GetSockNamePartialLength) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size() - 1),
- SyscallSucceeds());
-
- sockaddr_storage addr = {};
- socklen_t addr_len = sizeof(addr);
- ASSERT_THAT(getsockname(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), &addr_len),
- SyscallSucceeds());
- EXPECT_EQ(addr_len, sockets->first_addr_size() - 1);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnboundAbstractUnixSocketPairTest,
- ::testing::ValuesIn(ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_SEQPACKET,
- SOCK_DGRAM},
- List<int>{0, SOCK_NONBLOCK}))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_unbound_dgram.cc b/test/syscalls/linux/socket_unix_unbound_dgram.cc
deleted file mode 100644
index 907dca0f1..000000000
--- a/test/syscalls/linux/socket_unix_unbound_dgram.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of unbound dgram unix sockets.
-using UnboundDgramUnixSocketPairTest = SocketPairTest;
-
-TEST_P(UnboundDgramUnixSocketPairTest, BindConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, SelfConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, DoubleConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, GetRemoteAddress) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- socklen_t addressLength = sockets->first_addr_size();
- struct sockaddr_storage address = {};
- ASSERT_THAT(getpeername(sockets->second_fd(), (struct sockaddr*)(&address),
- &addressLength),
- SyscallSucceeds());
- EXPECT_EQ(
- 0, memcmp(&address, sockets->first_addr(), sockets->first_addr_size()));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, Sendto) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(sendto(sockets->second_fd(), sent_data, sizeof(sent_data), 0,
- sockets->first_addr(), sockets->first_addr_size()),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- char received_data[sizeof(sent_data)];
- ASSERT_THAT(ReadFd(sockets->first_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, ZeroWriteAllowed) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
- ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- char sent_data[3];
- // Send a zero length packet.
- ASSERT_THAT(write(sockets->second_fd(), sent_data, 0),
- SyscallSucceedsWithValue(0));
- // Receive the packet.
- char received_data[sizeof(sent_data)];
- ASSERT_THAT(read(sockets->first_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, Listen) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(listen(sockets->first_fd(), 0), SyscallFailsWithErrno(ENOTSUP));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, Accept) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- ASSERT_THAT(accept(sockets->first_fd(), nullptr, nullptr),
- SyscallFailsWithErrno(ENOTSUP));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- char data = 'a';
- ASSERT_THAT(
- RetryEINTR(sendto)(sockets->second_fd(), &data, sizeof(data), 0,
- sockets->first_addr(), sockets->first_addr_size()),
- SyscallSucceedsWithValue(sizeof(data)));
-}
-
-TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnectPassCreds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- SetSoPassCred(sockets->first_fd());
- char data = 'a';
- ASSERT_THAT(
- RetryEINTR(sendto)(sockets->second_fd(), &data, sizeof(data), 0,
- sockets->first_addr(), sockets->first_addr_size()),
- SyscallSucceedsWithValue(sizeof(data)));
- ucred creds;
- creds.pid = -1;
- char buf[sizeof(data) + 1];
- ASSERT_NO_FATAL_FAILURE(
- RecvCreds(sockets->first_fd(), &creds, buf, sizeof(buf), sizeof(data)));
- EXPECT_EQ(0, memcmp(&data, buf, sizeof(data)));
- EXPECT_THAT(getpid(), SyscallSucceedsWithValue(creds.pid));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnboundDgramUnixSocketPairTest,
- ::testing::ValuesIn(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_DGRAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_DGRAM},
- List<int>{0, SOCK_NONBLOCK})))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_unbound_filesystem.cc b/test/syscalls/linux/socket_unix_unbound_filesystem.cc
deleted file mode 100644
index a035fb095..000000000
--- a/test/syscalls/linux/socket_unix_unbound_filesystem.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of unbound filesystem unix
-// sockets.
-using UnboundFilesystemUnixSocketPairTest = SocketPairTest;
-
-TEST_P(UnboundFilesystemUnixSocketPairTest, AddressAfterNull) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- struct sockaddr_un addr =
- *reinterpret_cast<const struct sockaddr_un*>(sockets->first_addr());
- ASSERT_EQ(addr.sun_path[sizeof(addr.sun_path) - 1], 0);
- SKIP_IF(addr.sun_path[sizeof(addr.sun_path) - 2] != 0 ||
- addr.sun_path[sizeof(addr.sun_path) - 3] != 0);
-
- addr.sun_path[sizeof(addr.sun_path) - 2] = 'a';
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(sockets->second_fd(),
- reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(UnboundFilesystemUnixSocketPairTest, GetSockNameLength) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- sockaddr_storage got_addr = {};
- socklen_t got_addr_len = sizeof(got_addr);
- ASSERT_THAT(
- getsockname(sockets->first_fd(),
- reinterpret_cast<struct sockaddr*>(&got_addr), &got_addr_len),
- SyscallSucceeds());
-
- sockaddr_un want_addr =
- *reinterpret_cast<const struct sockaddr_un*>(sockets->first_addr());
-
- EXPECT_EQ(got_addr_len,
- strlen(want_addr.sun_path) + 1 + sizeof(want_addr.sun_family));
-}
-
-TEST_P(UnboundFilesystemUnixSocketPairTest, OpenSocketWithTruncate) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- const struct sockaddr_un *addr =
- reinterpret_cast<const struct sockaddr_un *>(sockets->first_addr());
- EXPECT_THAT(chmod(addr->sun_path, 0777), SyscallSucceeds());
- EXPECT_THAT(open(addr->sun_path, O_RDONLY | O_TRUNC),
- SyscallFailsWithErrno(ENXIO));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnboundFilesystemUnixSocketPairTest,
- ::testing::ValuesIn(ApplyVec<SocketPairKind>(
- FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_SEQPACKET,
- SOCK_DGRAM},
- List<int>{0, SOCK_NONBLOCK}))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_unbound_seqpacket.cc b/test/syscalls/linux/socket_unix_unbound_seqpacket.cc
deleted file mode 100644
index cb99030f5..000000000
--- a/test/syscalls/linux/socket_unix_unbound_seqpacket.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of unbound seqpacket unix sockets.
-using UnboundUnixSeqpacketSocketPairTest = SocketPairTest;
-
-TEST_P(UnboundUnixSeqpacketSocketPairTest, SendtoWithoutConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- char data = 'a';
- ASSERT_THAT(sendto(sockets->second_fd(), &data, sizeof(data), 0,
- sockets->first_addr(), sockets->first_addr_size()),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(UnboundUnixSeqpacketSocketPairTest, SendtoWithoutConnectIgnoresAddr) {
- // FIXME(b/68223466): gVisor tries to find /foo/bar and thus returns ENOENT.
- if (IsRunningOnGvisor()) {
- return;
- }
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- // Even a bogus address is completely ignored.
- constexpr char kPath[] = "/foo/bar";
-
- // Sanity check that kPath doesn't exist.
- struct stat s;
- ASSERT_THAT(stat(kPath, &s), SyscallFailsWithErrno(ENOENT));
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- char data = 'a';
- ASSERT_THAT(
- sendto(sockets->second_fd(), &data, sizeof(data), 0,
- reinterpret_cast<const struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnboundUnixSeqpacketSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(
- FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_SEQPACKET},
- List<int>{0, SOCK_NONBLOCK}))))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/socket_unix_unbound_stream.cc b/test/syscalls/linux/socket_unix_unbound_stream.cc
deleted file mode 100644
index f185dded3..000000000
--- a/test/syscalls/linux/socket_unix_unbound_stream.cc
+++ /dev/null
@@ -1,733 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdio.h>
-#include <sys/un.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Test fixture for tests that apply to pairs of connected unix stream sockets.
-using UnixStreamSocketPairTest = SocketPairTest;
-
-// FDPassPartialRead checks that sent control messages cannot be read after
-// any of their associated data has been read while ignoring the control message
-// by using read(2) instead of recvmsg(2).
-TEST_P(UnixStreamSocketPairTest, FDPassPartialRead) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data)));
-
- char received_data[sizeof(sent_data) / 2];
- ASSERT_THAT(
- ReadFd(sockets->second_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(received_data)));
-
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data));
- EXPECT_EQ(0, memcmp(sent_data + sizeof(received_data), received_data,
- sizeof(received_data)));
-}
-
-TEST_P(UnixStreamSocketPairTest, FDPassCoalescedRead) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair1->second_fd(),
- sent_data1, sizeof(sent_data1)));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair2->second_fd(),
- sent_data2, sizeof(sent_data2)));
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- ASSERT_THAT(
- ReadFd(sockets->second_fd(), received_data, sizeof(received_data)),
- SyscallSucceedsWithValue(sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
-}
-
-// ZeroLengthMessageFDDiscarded checks that control messages associated with
-// zero length messages are discarded.
-TEST_P(UnixStreamSocketPairTest, ZeroLengthMessageFDDiscarded) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- // Zero length arrays are invalid in ISO C++, so allocate one of size 1 and
- // send a length of 0.
- char sent_data1[1] = {};
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(
- SendSingleFD(sockets->first_fd(), pair->second_fd(), sent_data1, 0));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- char received_data[sizeof(sent_data2)] = {};
-
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data));
- EXPECT_EQ(0, memcmp(sent_data2, received_data, sizeof(received_data)));
-}
-
-// FDPassCoalescedRecv checks that control messages not in the first message are
-// preserved in a coalesced recv.
-TEST_P(UnixStreamSocketPairTest, FDPassCoalescedRecv) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data) / 2),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data + sizeof(sent_data) / 2,
- sizeof(sent_data) / 2));
-
- char received_data[sizeof(sent_data)];
-
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
-}
-
-// ReadsNotCoalescedAfterFDPass checks that messages after a message containing
-// an FD control message are not coalesced.
-TEST_P(UnixStreamSocketPairTest, ReadsNotCoalescedAfterFDPass) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),
- sent_data, sizeof(sent_data) / 2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data + sizeof(sent_data) / 2,
- sizeof(sent_data) / 2),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- char received_data[sizeof(sent_data)];
-
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data),
- sizeof(sent_data) / 2));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(sent_data) / 2));
-
- EXPECT_EQ(0, memcmp(sent_data + sizeof(sent_data) / 2, received_data,
- sizeof(sent_data) / 2));
-}
-
-// FDPassNotCombined checks that FD control messages are not combined in a
-// coalesced read.
-TEST_P(UnixStreamSocketPairTest, FDPassNotCombined) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- auto pair1 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair1->second_fd(),
- sent_data, sizeof(sent_data) / 2));
-
- auto pair2 =
- ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());
-
- ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair2->second_fd(),
- sent_data + sizeof(sent_data) / 2,
- sizeof(sent_data) / 2));
-
- char received_data[sizeof(sent_data)];
-
- int fd = -1;
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data),
- sizeof(sent_data) / 2));
-
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair1->first_fd()));
-
- EXPECT_THAT(close(fd), SyscallSucceeds());
- fd = -1;
-
- ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data,
- sizeof(received_data),
- sizeof(sent_data) / 2));
-
- EXPECT_EQ(0, memcmp(sent_data + sizeof(sent_data) / 2, received_data,
- sizeof(sent_data) / 2));
-
- ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair2->first_fd()));
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_P(UnixStreamSocketPairTest, CredPassPartialRead) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data[20];
- RandomizeBuffer(sent_data, sizeof(sent_data));
-
- struct ucred sent_creds;
-
- ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());
-
- ASSERT_NO_FATAL_FAILURE(
- SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));
-
- int one = 1;
- ASSERT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &one,
- sizeof(one)),
- SyscallSucceeds());
-
- for (int i = 0; i < 2; i++) {
- char received_data[10];
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data),
- sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data + i * sizeof(received_data), received_data,
- sizeof(received_data)));
- EXPECT_EQ(sent_creds.pid, received_creds.pid);
- EXPECT_EQ(sent_creds.uid, received_creds.uid);
- EXPECT_EQ(sent_creds.gid, received_creds.gid);
- }
-}
-
-// Unix stream sockets peek in the same way as datagram sockets.
-//
-// SinglePeek checks that only a single message is peekable in a single recv.
-TEST_P(UnixStreamSocketPairTest, SinglePeek) {
- if (!IsRunningOnGvisor()) {
- // Don't run this test on linux kernels newer than 4.3.x Linux kernel commit
- // 9f389e35674f5b086edd70ed524ca0f287259725 which changes this behavior. We
- // used to target 3.11 compatibility, so disable this test on newer kernels.
- //
- // NOTE(b/118902768): Bring this up to Linux 4.4 compatibility.
- auto version = ASSERT_NO_ERRNO_AND_VALUE(GetKernelVersion());
- SKIP_IF(version.major > 4 || (version.major == 4 && version.minor >= 3));
- }
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
- char sent_data[40];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), sent_data,
- sizeof(sent_data) / 2, 0),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- ASSERT_THAT(
- RetryEINTR(send)(sockets->first_fd(), sent_data + sizeof(sent_data) / 2,
- sizeof(sent_data) / 2, 0),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- char received_data[sizeof(sent_data)];
- for (int i = 0; i < 3; i++) {
- memset(received_data, 0, sizeof(received_data));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(received_data), MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
- }
- memset(received_data, 0, sizeof(received_data));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(sent_data) / 2, 0),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
- memset(received_data, 0, sizeof(received_data));
- ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,
- sizeof(sent_data) / 2, 0),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
- EXPECT_EQ(0, memcmp(sent_data + sizeof(sent_data) / 2, received_data,
- sizeof(sent_data) / 2));
-}
-
-TEST_P(UnixStreamSocketPairTest, CredsNotCoalescedUp) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
-
- struct ucred received_creds;
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data),
- sizeof(sent_data1)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
-
- struct ucred want_creds {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data),
- sizeof(sent_data2)));
-
- EXPECT_EQ(0, memcmp(sent_data2, received_data, sizeof(sent_data2)));
-
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, CredsNotCoalescedDown) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- UnsetSoPassCred(sockets->second_fd());
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data),
- sizeof(sent_data1)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data),
- sizeof(sent_data2)));
-
- EXPECT_EQ(0, memcmp(sent_data2, received_data, sizeof(sent_data2)));
-
- want_creds = {0, 65534, 65534};
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, CoalescedCredsNoPasscred) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- UnsetSoPassCred(sockets->second_fd());
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
-
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
-}
-
-TEST_P(UnixStreamSocketPairTest, CoalescedCreds1) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
-
- struct ucred want_creds {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, CoalescedCreds2) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds,
- received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
-
- struct ucred want_creds;
- ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds.pid, received_creds.pid);
- EXPECT_EQ(want_creds.uid, received_creds.uid);
- EXPECT_EQ(want_creds.gid, received_creds.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, NonCoalescedDifferingCreds1) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- char received_data1[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds1;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds1,
- received_data1, sizeof(sent_data1)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data1, sizeof(sent_data1)));
-
- struct ucred want_creds1 {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds1.pid, received_creds1.pid);
- EXPECT_EQ(want_creds1.uid, received_creds1.uid);
- EXPECT_EQ(want_creds1.gid, received_creds1.gid);
-
- char received_data2[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds2;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds2,
- received_data2, sizeof(sent_data2)));
-
- EXPECT_EQ(0, memcmp(sent_data2, received_data2, sizeof(sent_data2)));
-
- struct ucred want_creds2;
- ASSERT_THAT(want_creds2.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds2.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds2.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds2.pid, received_creds2.pid);
- EXPECT_EQ(want_creds2.uid, received_creds2.uid);
- EXPECT_EQ(want_creds2.gid, received_creds2.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, NonCoalescedDifferingCreds2) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- UnsetSoPassCred(sockets->second_fd());
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- SetSoPassCred(sockets->second_fd());
-
- char received_data1[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds1;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds1,
- received_data1, sizeof(sent_data1)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data1, sizeof(sent_data1)));
-
- struct ucred want_creds1;
- ASSERT_THAT(want_creds1.pid = getpid(), SyscallSucceeds());
- ASSERT_THAT(want_creds1.uid = getuid(), SyscallSucceeds());
- ASSERT_THAT(want_creds1.gid = getgid(), SyscallSucceeds());
-
- EXPECT_EQ(want_creds1.pid, received_creds1.pid);
- EXPECT_EQ(want_creds1.uid, received_creds1.uid);
- EXPECT_EQ(want_creds1.gid, received_creds1.gid);
-
- char received_data2[sizeof(sent_data1) + sizeof(sent_data2)];
- struct ucred received_creds2;
-
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds2,
- received_data2, sizeof(sent_data2)));
-
- EXPECT_EQ(0, memcmp(sent_data2, received_data2, sizeof(sent_data2)));
-
- struct ucred want_creds2 {
- 0, 65534, 65534
- };
-
- EXPECT_EQ(want_creds2.pid, received_creds2.pid);
- EXPECT_EQ(want_creds2.uid, received_creds2.uid);
- EXPECT_EQ(want_creds2.gid, received_creds2.gid);
-}
-
-TEST_P(UnixStreamSocketPairTest, CoalescedDifferingCreds) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- SetSoPassCred(sockets->second_fd());
-
- char sent_data1[20];
- RandomizeBuffer(sent_data1, sizeof(sent_data1));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data1, sizeof(sent_data1)),
- SyscallSucceedsWithValue(sizeof(sent_data1)));
-
- char sent_data2[20];
- RandomizeBuffer(sent_data2, sizeof(sent_data2));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data2, sizeof(sent_data2)),
- SyscallSucceedsWithValue(sizeof(sent_data2)));
-
- UnsetSoPassCred(sockets->second_fd());
-
- char sent_data3[20];
- RandomizeBuffer(sent_data3, sizeof(sent_data3));
-
- ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data3, sizeof(sent_data3)),
- SyscallSucceedsWithValue(sizeof(sent_data3)));
-
- char received_data[sizeof(sent_data1) + sizeof(sent_data2) +
- sizeof(sent_data3)];
-
- ASSERT_NO_FATAL_FAILURE(
- RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)));
-
- EXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));
- EXPECT_EQ(0, memcmp(sent_data2, received_data + sizeof(sent_data1),
- sizeof(sent_data2)));
- EXPECT_EQ(0, memcmp(sent_data3,
- received_data + sizeof(sent_data1) + sizeof(sent_data2),
- sizeof(sent_data3)));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnixStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(UnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(FilesystemBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractBoundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK}))))));
-
-// Test fixture for tests that apply to pairs of unbound unix stream sockets.
-using UnboundUnixStreamSocketPairTest = SocketPairTest;
-
-TEST_P(UnboundUnixStreamSocketPairTest, SendtoWithoutConnect) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- char data = 'a';
- ASSERT_THAT(sendto(sockets->second_fd(), &data, sizeof(data), 0,
- sockets->first_addr(), sockets->first_addr_size()),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-TEST_P(UnboundUnixStreamSocketPairTest, SendtoWithoutConnectIgnoresAddr) {
- // FIXME(b/68223466): gVisor tries to find /foo/bar and thus returns ENOENT.
- if (IsRunningOnGvisor()) {
- return;
- }
-
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
-
- ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),
- sockets->first_addr_size()),
- SyscallSucceeds());
-
- // Even a bogus address is completely ignored.
- constexpr char kPath[] = "/foo/bar";
-
- // Sanity check that kPath doesn't exist.
- struct stat s;
- ASSERT_THAT(stat(kPath, &s), SyscallFailsWithErrno(ENOENT));
-
- struct sockaddr_un addr = {};
- addr.sun_family = AF_UNIX;
- memcpy(addr.sun_path, kPath, sizeof(kPath));
-
- char data = 'a';
- ASSERT_THAT(
- sendto(sockets->second_fd(), &data, sizeof(data), 0,
- reinterpret_cast<const struct sockaddr*>(&addr), sizeof(addr)),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-INSTANTIATE_TEST_SUITE_P(
- AllUnixDomainSockets, UnboundUnixStreamSocketPairTest,
- ::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(
- ApplyVec<SocketPairKind>(FilesystemUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{
- 0, SOCK_NONBLOCK})),
- ApplyVec<SocketPairKind>(
- AbstractUnboundUnixDomainSocketPair,
- AllBitwiseCombinations(List<int>{SOCK_STREAM},
- List<int>{0, SOCK_NONBLOCK}))))));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/splice.cc b/test/syscalls/linux/splice.cc
deleted file mode 100644
index c85f6da0b..000000000
--- a/test/syscalls/linux/splice.cc
+++ /dev/null
@@ -1,939 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/unistd.h>
-#include <sys/eventfd.h>
-#include <sys/resource.h>
-#include <sys/sendfile.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/signal_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SpliceTest, TwoRegularFiles) {
- // Create temp files.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Open the input file as read only.
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Open the output file as write only.
- const FileDescriptor out_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));
-
- // Verify that it is rejected as expected; regardless of offsets.
- loff_t in_offset = 0;
- loff_t out_offset = 0;
- EXPECT_THAT(splice(in_fd.get(), &in_offset, out_fd.get(), &out_offset, 1, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(splice(in_fd.get(), nullptr, out_fd.get(), &out_offset, 1, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(splice(in_fd.get(), &in_offset, out_fd.get(), nullptr, 1, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(splice(in_fd.get(), nullptr, out_fd.get(), nullptr, 1, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-int memfd_create(const std::string& name, unsigned int flags) {
- return syscall(__NR_memfd_create, name.c_str(), flags);
-}
-
-TEST(SpliceTest, NegativeOffset) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill the pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Open the output file as write only.
- int fd;
- EXPECT_THAT(fd = memfd_create("negative", 0), SyscallSucceeds());
- const FileDescriptor out_fd(fd);
-
- loff_t out_offset = 0xffffffffffffffffull;
- constexpr int kSize = 2;
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), &out_offset, kSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Write offset + size overflows int64.
-//
-// This is a regression test for b/148041624.
-TEST(SpliceTest, WriteOverflow) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill the pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Open the output file.
- int fd;
- EXPECT_THAT(fd = memfd_create("overflow", 0), SyscallSucceeds());
- const FileDescriptor out_fd(fd);
-
- // out_offset + kSize overflows INT64_MAX.
- loff_t out_offset = 0x7ffffffffffffffeull;
- constexpr int kSize = 3;
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), &out_offset, kSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SpliceTest, SamePipe) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill the pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Attempt to splice to itself.
- EXPECT_THAT(splice(rfd.get(), nullptr, wfd.get(), nullptr, kPageSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TeeTest, SamePipe) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill the pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Attempt to tee to itself.
- EXPECT_THAT(tee(rfd.get(), wfd.get(), kPageSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TeeTest, RegularFile) {
- // Open some file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Attempt to tee from the file.
- EXPECT_THAT(tee(in_fd.get(), wfd.get(), kPageSize, 0),
- SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(tee(rfd.get(), in_fd.get(), kPageSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SpliceTest, PipeOffsets) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // All pipe offsets should be rejected.
- loff_t in_offset = 0;
- loff_t out_offset = 0;
- EXPECT_THAT(splice(rfd1.get(), &in_offset, wfd2.get(), &out_offset, 1, 0),
- SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(splice(rfd1.get(), nullptr, wfd2.get(), &out_offset, 1, 0),
- SyscallFailsWithErrno(ESPIPE));
- EXPECT_THAT(splice(rfd1.get(), &in_offset, wfd2.get(), nullptr, 1, 0),
- SyscallFailsWithErrno(ESPIPE));
-}
-
-// Event FDs may be used with splice without an offset.
-TEST(SpliceTest, FromEventFD) {
- // Open the input eventfd with an initial value so that it is readable.
- constexpr uint64_t kEventFDValue = 1;
- int efd;
- ASSERT_THAT(efd = eventfd(kEventFDValue, 0), SyscallSucceeds());
- const FileDescriptor in_fd(efd);
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Splice 8-byte eventfd value to pipe.
- constexpr int kEventFDSize = 8;
- EXPECT_THAT(splice(in_fd.get(), nullptr, wfd.get(), nullptr, kEventFDSize, 0),
- SyscallSucceedsWithValue(kEventFDSize));
-
- // Contents should be equal.
- std::vector<char> rbuf(kEventFDSize);
- ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kEventFDSize));
- EXPECT_EQ(memcmp(rbuf.data(), &kEventFDValue, rbuf.size()), 0);
-}
-
-// Event FDs may not be used with splice with an offset.
-TEST(SpliceTest, FromEventFDOffset) {
- int efd;
- ASSERT_THAT(efd = eventfd(0, 0), SyscallSucceeds());
- const FileDescriptor in_fd(efd);
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Attempt to splice 8-byte eventfd value to pipe with offset.
- //
- // This is not allowed because eventfd doesn't support pread.
- constexpr int kEventFDSize = 8;
- loff_t in_off = 0;
- EXPECT_THAT(splice(in_fd.get(), &in_off, wfd.get(), nullptr, kEventFDSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-// Event FDs may not be used with splice with an offset.
-TEST(SpliceTest, ToEventFDOffset) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill with a value.
- constexpr int kEventFDSize = 8;
- std::vector<char> buf(kEventFDSize);
- buf[0] = 1;
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kEventFDSize));
-
- int efd;
- ASSERT_THAT(efd = eventfd(0, 0), SyscallSucceeds());
- const FileDescriptor out_fd(efd);
-
- // Attempt to splice 8-byte eventfd value to pipe with offset.
- //
- // This is not allowed because eventfd doesn't support pwrite.
- loff_t out_off = 0;
- EXPECT_THAT(
- splice(rfd.get(), nullptr, out_fd.get(), &out_off, kEventFDSize, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SpliceTest, ToPipe) {
- // Open the input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(in_fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(lseek(in_fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Splice to the pipe.
- EXPECT_THAT(splice(in_fd.get(), nullptr, wfd.get(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Contents should be equal.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-
-TEST(SpliceTest, ToPipeEOF) {
- // Create and open an empty input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Splice from the empty file to the pipe.
- EXPECT_THAT(splice(in_fd.get(), nullptr, wfd.get(), nullptr, 123, 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST(SpliceTest, ToPipeOffset) {
- // Open the input file.
- const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor in_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(in_fd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Splice to the pipe.
- loff_t in_offset = kPageSize / 2;
- EXPECT_THAT(
- splice(in_fd.get(), &in_offset, wfd.get(), nullptr, kPageSize / 2, 0),
- SyscallSucceedsWithValue(kPageSize / 2));
-
- // Contents should be equal to only the second part.
- std::vector<char> rbuf(kPageSize / 2);
- ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize / 2));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data() + (kPageSize / 2), rbuf.size()), 0);
-}
-
-TEST(SpliceTest, FromPipe) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Open the output file.
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor out_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDWR));
-
- // Splice to the output file.
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // The offset of the output should be equal to kPageSize. We assert that and
- // reset to zero so that we can read the contents and ensure they match.
- EXPECT_THAT(lseek(out_fd.get(), 0, SEEK_CUR),
- SyscallSucceedsWithValue(kPageSize));
- ASSERT_THAT(lseek(out_fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Contents should be equal.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(out_fd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-
-TEST(SpliceTest, FromPipeMultiple) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- std::string buf = "abcABC123";
- ASSERT_THAT(write(wfd.get(), buf.c_str(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Open the output file.
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor out_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDWR));
-
- // Splice from the pipe to the output file over several calls.
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), nullptr, 3, 0),
- SyscallSucceedsWithValue(3));
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), nullptr, 3, 0),
- SyscallSucceedsWithValue(3));
- EXPECT_THAT(splice(rfd.get(), nullptr, out_fd.get(), nullptr, 3, 0),
- SyscallSucceedsWithValue(3));
-
- // Reset cursor to zero so that we can check the contents.
- ASSERT_THAT(lseek(out_fd.get(), 0, SEEK_SET), SyscallSucceedsWithValue(0));
-
- // Contents should be equal.
- std::vector<char> rbuf(buf.size());
- ASSERT_THAT(read(out_fd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(rbuf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), buf.c_str(), buf.size()), 0);
-}
-
-TEST(SpliceTest, FromPipeOffset) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Open the input file.
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor out_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDWR));
-
- // Splice to the output file.
- loff_t out_offset = kPageSize / 2;
- EXPECT_THAT(
- splice(rfd.get(), nullptr, out_fd.get(), &out_offset, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Content should reflect the splice. We write to a specific offset in the
- // file, so the internals should now be allocated sparsely.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(out_fd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- std::vector<char> zbuf(kPageSize / 2);
- memset(zbuf.data(), 0, zbuf.size());
- EXPECT_EQ(memcmp(rbuf.data(), zbuf.data(), zbuf.size()), 0);
- EXPECT_EQ(memcmp(rbuf.data() + kPageSize / 2, buf.data(), kPageSize / 2), 0);
-}
-
-TEST(SpliceTest, TwoPipes) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd1.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Splice to the second pipe, using two operations.
- EXPECT_THAT(
- splice(rfd1.get(), nullptr, wfd2.get(), nullptr, kPageSize / 2, 0),
- SyscallSucceedsWithValue(kPageSize / 2));
- EXPECT_THAT(
- splice(rfd1.get(), nullptr, wfd2.get(), nullptr, kPageSize / 2, 0),
- SyscallSucceedsWithValue(kPageSize / 2));
-
- // Content should reflect the splice.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd2.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), kPageSize), 0);
-}
-
-TEST(SpliceTest, TwoPipesPartialRead) {
- // Create two pipes.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor first_rfd(fds[0]);
- const FileDescriptor first_wfd(fds[1]);
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor second_rfd(fds[0]);
- const FileDescriptor second_wfd(fds[1]);
-
- // Write half a page of data to the first pipe.
- std::vector<char> buf(kPageSize / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(first_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize / 2));
-
- // Attempt to splice one page from the first pipe to the second; it should
- // immediately return after splicing the half-page previously written to the
- // first pipe.
- EXPECT_THAT(
- splice(first_rfd.get(), nullptr, second_wfd.get(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize / 2));
-}
-
-TEST(SpliceTest, TwoPipesPartialWrite) {
- // Create two pipes.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor first_rfd(fds[0]);
- const FileDescriptor first_wfd(fds[1]);
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor second_rfd(fds[0]);
- const FileDescriptor second_wfd(fds[1]);
-
- // Write two pages of data to the first pipe.
- std::vector<char> buf(2 * kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(first_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(2 * kPageSize));
-
- // Limit the second pipe to two pages, then write one page of data to it.
- ASSERT_THAT(fcntl(second_wfd.get(), F_SETPIPE_SZ, 2 * kPageSize),
- SyscallSucceeds());
- ASSERT_THAT(write(second_wfd.get(), buf.data(), buf.size() / 2),
- SyscallSucceedsWithValue(kPageSize));
-
- // Attempt to splice two pages from the first pipe to the second; it should
- // immediately return after splicing the first page previously written to the
- // first pipe.
- EXPECT_THAT(splice(first_rfd.get(), nullptr, second_wfd.get(), nullptr,
- 2 * kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-}
-
-TEST(TeeTest, TwoPipesPartialRead) {
- // Create two pipes.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor first_rfd(fds[0]);
- const FileDescriptor first_wfd(fds[1]);
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor second_rfd(fds[0]);
- const FileDescriptor second_wfd(fds[1]);
-
- // Write half a page of data to the first pipe.
- std::vector<char> buf(kPageSize / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(first_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize / 2));
-
- // Attempt to tee one page from the first pipe to the second; it should
- // immediately return after copying the half-page previously written to the
- // first pipe.
- EXPECT_THAT(tee(first_rfd.get(), second_wfd.get(), kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize / 2));
-}
-
-TEST(TeeTest, TwoPipesPartialWrite) {
- // Create two pipes.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor first_rfd(fds[0]);
- const FileDescriptor first_wfd(fds[1]);
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor second_rfd(fds[0]);
- const FileDescriptor second_wfd(fds[1]);
-
- // Write two pages of data to the first pipe.
- std::vector<char> buf(2 * kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(first_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(2 * kPageSize));
-
- // Limit the second pipe to two pages, then write one page of data to it.
- ASSERT_THAT(fcntl(second_wfd.get(), F_SETPIPE_SZ, 2 * kPageSize),
- SyscallSucceeds());
- ASSERT_THAT(write(second_wfd.get(), buf.data(), buf.size() / 2),
- SyscallSucceedsWithValue(kPageSize));
-
- // Attempt to tee two pages from the first pipe to the second; it should
- // immediately return after copying the first page previously written to the
- // first pipe.
- EXPECT_THAT(tee(first_rfd.get(), second_wfd.get(), 2 * kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-}
-
-TEST(SpliceTest, TwoPipesCircular) {
- // This test deadlocks the sentry on VFS1 because VFS1 splice ordering is
- // based on fs.File.UniqueID, which does not prevent circular ordering between
- // e.g. inode-level locks taken by fs.FileOperations.
- SKIP_IF(IsRunningWithVFS1());
-
- // Create two pipes.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor first_rfd(fds[0]);
- const FileDescriptor first_wfd(fds[1]);
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor second_rfd(fds[0]);
- const FileDescriptor second_wfd(fds[1]);
-
- // On Linux, each pipe is normally limited to
- // include/linux/pipe_fs_i.h:PIPE_DEF_BUFFERS buffers worth of data.
- constexpr size_t PIPE_DEF_BUFFERS = 16;
-
- // Write some data to each pipe. Below we splice 1 byte at a time between
- // pipes, which very quickly causes each byte to be stored in a separate
- // buffer, so we must ensure that the total amount of data in the system is <=
- // PIPE_DEF_BUFFERS bytes.
- std::vector<char> buf(PIPE_DEF_BUFFERS / 2);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(first_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
- ASSERT_THAT(write(second_wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Have another thread splice from the second pipe to the first, while we
- // splice from the first to the second. The test passes if this does not
- // deadlock.
- const int kIterations = 1000;
- DisableSave ds;
- ScopedThread t([&]() {
- for (int i = 0; i < kIterations; i++) {
- ASSERT_THAT(
- splice(second_rfd.get(), nullptr, first_wfd.get(), nullptr, 1, 0),
- SyscallSucceedsWithValue(1));
- }
- });
- for (int i = 0; i < kIterations; i++) {
- ASSERT_THAT(
- splice(first_rfd.get(), nullptr, second_wfd.get(), nullptr, 1, 0),
- SyscallSucceedsWithValue(1));
- }
-}
-
-TEST(SpliceTest, Blocking) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // This thread writes to the main pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ScopedThread t([&]() {
- ASSERT_THAT(write(wfd1.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
- });
-
- // Attempt a splice immediately; it should block.
- EXPECT_THAT(splice(rfd1.get(), nullptr, wfd2.get(), nullptr, kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Thread should be joinable.
- t.Join();
-
- // Content should reflect the splice.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd2.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), kPageSize), 0);
-}
-
-TEST(TeeTest, Blocking) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // This thread writes to the main pipe.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ScopedThread t([&]() {
- ASSERT_THAT(write(wfd1.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
- });
-
- // Attempt a tee immediately; it should block.
- EXPECT_THAT(tee(rfd1.get(), wfd2.get(), kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Thread should be joinable.
- t.Join();
-
- // Content should reflect the splice, in both pipes.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd2.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), kPageSize), 0);
- ASSERT_THAT(read(rfd1.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), kPageSize), 0);
-}
-
-TEST(TeeTest, BlockingWrite) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // Make some data available to be read.
- std::vector<char> buf1(kPageSize);
- RandomizeBuffer(buf1.data(), buf1.size());
- ASSERT_THAT(write(wfd1.get(), buf1.data(), buf1.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Fill up the write pipe's buffer.
- int pipe_size = -1;
- ASSERT_THAT(pipe_size = fcntl(wfd2.get(), F_GETPIPE_SZ), SyscallSucceeds());
- std::vector<char> buf2(pipe_size);
- ASSERT_THAT(write(wfd2.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(pipe_size));
-
- ScopedThread t([&]() {
- absl::SleepFor(absl::Milliseconds(100));
- ASSERT_THAT(read(rfd2.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(pipe_size));
- });
-
- // Attempt a tee immediately; it should block.
- EXPECT_THAT(tee(rfd1.get(), wfd2.get(), kPageSize, 0),
- SyscallSucceedsWithValue(kPageSize));
-
- // Thread should be joinable.
- t.Join();
-
- // Content should reflect the tee.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd2.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf1.data(), kPageSize), 0);
-}
-
-TEST(SpliceTest, NonBlocking) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // Splice with no data to back it.
- EXPECT_THAT(splice(rfd1.get(), nullptr, wfd2.get(), nullptr, kPageSize,
- SPLICE_F_NONBLOCK),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(TeeTest, NonBlocking) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // Splice with no data to back it.
- EXPECT_THAT(tee(rfd1.get(), wfd2.get(), kPageSize, SPLICE_F_NONBLOCK),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(TeeTest, MultiPage) {
- // Create two new pipes.
- int first[2], second[2];
- ASSERT_THAT(pipe(first), SyscallSucceeds());
- const FileDescriptor rfd1(first[0]);
- const FileDescriptor wfd1(first[1]);
- ASSERT_THAT(pipe(second), SyscallSucceeds());
- const FileDescriptor rfd2(second[0]);
- const FileDescriptor wfd2(second[1]);
-
- // Make some data available to be read.
- std::vector<char> wbuf(8 * kPageSize);
- RandomizeBuffer(wbuf.data(), wbuf.size());
- ASSERT_THAT(write(wfd1.get(), wbuf.data(), wbuf.size()),
- SyscallSucceedsWithValue(wbuf.size()));
-
- // Attempt a tee immediately; it should complete.
- EXPECT_THAT(tee(rfd1.get(), wfd2.get(), wbuf.size(), 0),
- SyscallSucceedsWithValue(wbuf.size()));
-
- // Content should reflect the tee.
- std::vector<char> rbuf(wbuf.size());
- ASSERT_THAT(read(rfd2.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(rbuf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), wbuf.data(), rbuf.size()), 0);
- ASSERT_THAT(read(rfd1.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(rbuf.size()));
- EXPECT_EQ(memcmp(rbuf.data(), wbuf.data(), rbuf.size()), 0);
-}
-
-TEST(SpliceTest, FromPipeMaxFileSize) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- const FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- // Open the input file.
- const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor out_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDWR));
-
- EXPECT_THAT(ftruncate(out_fd.get(), 13 << 20), SyscallSucceeds());
- EXPECT_THAT(lseek(out_fd.get(), 0, SEEK_END),
- SyscallSucceedsWithValue(13 << 20));
-
- // Set our file size limit.
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, SIGXFSZ);
- TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
- rlimit rlim = {};
- rlim.rlim_cur = rlim.rlim_max = (13 << 20);
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &rlim), SyscallSucceeds());
-
- // Splice to the output file.
- EXPECT_THAT(
- splice(rfd.get(), nullptr, out_fd.get(), nullptr, 3 * kPageSize, 0),
- SyscallFailsWithErrno(EFBIG));
-
- // Contents should be equal.
- std::vector<char> rbuf(kPageSize);
- ASSERT_THAT(read(rfd.get(), rbuf.data(), rbuf.size()),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);
-}
-
-TEST(SpliceTest, FromPipeToDevZero) {
- // Create a new pipe.
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- const FileDescriptor rfd(fds[0]);
- FileDescriptor wfd(fds[1]);
-
- // Fill with some random data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(kPageSize));
-
- const FileDescriptor zero =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/zero", O_WRONLY));
-
- // Close the write end to prevent blocking below.
- wfd.reset();
-
- // Splice to /dev/zero. The first call should empty the pipe, and the return
- // value should not exceed the number of bytes available for reading.
- EXPECT_THAT(
- splice(rfd.get(), nullptr, zero.get(), nullptr, kPageSize + 123, 0),
- SyscallSucceedsWithValue(kPageSize));
- EXPECT_THAT(splice(rfd.get(), nullptr, zero.get(), nullptr, 1, 0),
- SyscallSucceedsWithValue(0));
-}
-
-static volatile int signaled = 0;
-void SigUsr1Handler(int sig, siginfo_t* info, void* context) { signaled = 1; }
-
-TEST(SpliceTest, ToPipeWithSmallCapacityDoesNotSpin) {
- // Writes to a pipe that are less than PIPE_BUF must be atomic. This test
- // creates a pipe with only 128 bytes of capacity (< PIPE_BUF) and checks that
- // splicing to the pipe does not spin. See b/170743336.
-
- // Create a file with one page of data.
- std::vector<char> buf(kPageSize);
- RandomizeBuffer(buf.data(), buf.size());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(buf.data(), buf.size()),
- TempPath::kDefaultFileMode));
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- // Create a pipe with size 4096, and fill all but 128 bytes of it.
- int p[2];
- ASSERT_THAT(pipe(p), SyscallSucceeds());
- ASSERT_THAT(fcntl(p[1], F_SETPIPE_SZ, kPageSize), SyscallSucceeds());
- const int kWriteSize = kPageSize - 128;
- std::vector<char> writeBuf(kWriteSize);
- RandomizeBuffer(writeBuf.data(), writeBuf.size());
- ASSERT_THAT(write(p[1], writeBuf.data(), writeBuf.size()),
- SyscallSucceedsWithValue(kWriteSize));
-
- // Set up signal handler.
- struct sigaction sa = {};
- sa.sa_sigaction = SigUsr1Handler;
- sa.sa_flags = SA_SIGINFO;
- const auto cleanup_sigact =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGUSR1, sa));
-
- // Send SIGUSR1 to this thread in 1 second.
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = SIGUSR1;
- sev.sigev_notify_thread_id = gettid();
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
- struct itimerspec its = {};
- its.it_value = absl::ToTimespec(absl::Seconds(1));
- DisableSave ds; // Asserting an EINTR.
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // Now splice the file to the pipe. This should block, but not spin, and
- // should return EINTR because it is interrupted by the signal.
- EXPECT_THAT(splice(fd.get(), nullptr, p[1], nullptr, kPageSize, 0),
- SyscallFailsWithErrno(EINTR));
-
- // Alarm should have been handled.
- EXPECT_EQ(signaled, 1);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/stat.cc b/test/syscalls/linux/stat.cc
deleted file mode 100644
index 72f888659..000000000
--- a/test/syscalls/linux/stat.cc
+++ /dev/null
@@ -1,799 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/match.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/save_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-#ifndef AT_STATX_FORCE_SYNC
-#define AT_STATX_FORCE_SYNC 0x2000
-#endif
-#ifndef AT_STATX_DONT_SYNC
-#define AT_STATX_DONT_SYNC 0x4000
-#endif
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class StatTest : public FileTest {};
-
-TEST_F(StatTest, FstatatAbs) {
- struct stat st;
-
- // Check that the stat works.
- EXPECT_THAT(fstatat(AT_FDCWD, test_file_name_.c_str(), &st, 0),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(st.st_mode));
-}
-
-TEST_F(StatTest, FstatatEmptyPath) {
- struct stat st;
- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
-
- // Check that the stat works.
- EXPECT_THAT(fstatat(fd.get(), "", &st, AT_EMPTY_PATH), SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(st.st_mode));
-}
-
-TEST_F(StatTest, FstatatRel) {
- struct stat st;
- int dirfd;
- auto filename = std::string(Basename(test_file_name_));
-
- // Open the temporary directory read-only.
- ASSERT_THAT(dirfd = open(GetAbsoluteTestTmpdir().c_str(), O_RDONLY),
- SyscallSucceeds());
-
- // Check that the stat works.
- EXPECT_THAT(fstatat(dirfd, filename.c_str(), &st, 0), SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(st.st_mode));
- close(dirfd);
-}
-
-TEST_F(StatTest, FstatatSymlink) {
- struct stat st;
-
- // Check that the link is followed.
- EXPECT_THAT(fstatat(AT_FDCWD, "/proc/self", &st, 0), SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
- EXPECT_FALSE(S_ISLNK(st.st_mode));
-
- // Check that the flag works.
- EXPECT_THAT(fstatat(AT_FDCWD, "/proc/self", &st, AT_SYMLINK_NOFOLLOW),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISLNK(st.st_mode));
- EXPECT_FALSE(S_ISDIR(st.st_mode));
-}
-
-TEST_F(StatTest, Nlinks) {
- // Skip this test if we are testing overlayfs because overlayfs does not
- // (intentionally) return the correct nlink value for directories.
- // See fs/overlayfs/inode.c:ovl_getattr().
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir())));
-
- TempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Directory is initially empty, it should contain 2 links (one from itself,
- // one from ".").
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(2));
-
- // Create a file in the test directory. Files shouldn't increase the link
- // count on the base directory.
- TempPath file1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(basedir.path()));
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(2));
-
- // Create subdirectories. This should increase the link count by 1 per
- // subdirectory.
- TempPath dir1 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(basedir.path()));
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(3));
- TempPath dir2 =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(basedir.path()));
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(4));
-
- // Removing directories should reduce the link count.
- dir1.reset();
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(3));
- dir2.reset();
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(2));
-
- // Removing files should have no effect on link count.
- file1.reset();
- EXPECT_THAT(Links(basedir.path()), IsPosixErrorOkAndHolds(2));
-}
-
-TEST_F(StatTest, BlocksIncreaseOnWrite) {
- struct stat st;
-
- // Stat the empty file.
- ASSERT_THAT(fstat(test_file_fd_.get(), &st), SyscallSucceeds());
-
- const int initial_blocks = st.st_blocks;
-
- // Write to the file, making sure to exceed the block size.
- std::vector<char> buf(2 * st.st_blksize, 'a');
- ASSERT_THAT(write(test_file_fd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Stat the file again, and verify that number of allocated blocks has
- // increased.
- ASSERT_THAT(fstat(test_file_fd_.get(), &st), SyscallSucceeds());
- EXPECT_GT(st.st_blocks, initial_blocks);
-}
-
-TEST_F(StatTest, PathNotCleaned) {
- TempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Create a file in the basedir.
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(basedir.path()));
-
- // Stating the file directly should succeed.
- struct stat buf;
- EXPECT_THAT(lstat(file.path().c_str(), &buf), SyscallSucceeds());
-
- // Try to stat the file using a directory that does not exist followed by
- // "..". If the path is cleaned prior to stating (which it should not be)
- // then this will succeed.
- const std::string bad_path = JoinPath("/does_not_exist/..", file.path());
- EXPECT_THAT(lstat(bad_path.c_str(), &buf), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_F(StatTest, PathCanContainDotDot) {
- TempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath subdir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(basedir.path()));
- const std::string subdir_name = std::string(Basename(subdir.path()));
-
- // Create a file in the subdir.
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(subdir.path()));
- const std::string file_name = std::string(Basename(file.path()));
-
- // Stat the file through a path that includes '..' and '.' but still resolves
- // to the file.
- const std::string good_path =
- JoinPath(basedir.path(), subdir_name, "..", subdir_name, ".", file_name);
- struct stat buf;
- EXPECT_THAT(lstat(good_path.c_str(), &buf), SyscallSucceeds());
-}
-
-TEST_F(StatTest, PathCanContainEmptyComponent) {
- TempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Create a file in the basedir.
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(basedir.path()));
- const std::string file_name = std::string(Basename(file.path()));
-
- // Stat the file through a path that includes an empty component. We have to
- // build this ourselves because JoinPath automatically removes empty
- // components.
- const std::string good_path = absl::StrCat(basedir.path(), "//", file_name);
- struct stat buf;
- EXPECT_THAT(lstat(good_path.c_str(), &buf), SyscallSucceeds());
-}
-
-TEST_F(StatTest, TrailingSlashNotCleanedReturnsENOTDIR) {
- TempPath basedir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Create a file in the basedir.
- TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(basedir.path()));
-
- // Stat the file with an extra "/" on the end of it. Since file is not a
- // directory, this should return ENOTDIR.
- const std::string bad_path = absl::StrCat(file.path(), "/");
- struct stat buf;
- EXPECT_THAT(lstat(bad_path.c_str(), &buf), SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST_F(StatTest, FstatFileWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- struct stat st;
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_PATH));
-
- // Stat the directory.
- ASSERT_THAT(fstat(fd.get(), &st), SyscallSucceeds());
-}
-
-TEST_F(StatTest, FstatDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- struct stat st;
- TempPath tmpdir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- FileDescriptor dirfd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(tmpdir.path().c_str(), O_PATH | O_DIRECTORY));
-
- // Stat the directory.
- ASSERT_THAT(fstat(dirfd.get(), &st), SyscallSucceeds());
-}
-
-// fstatat with an O_PATH fd
-TEST_F(StatTest, FstatatDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath tmpdir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- FileDescriptor dirfd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(tmpdir.path().c_str(), O_PATH | O_DIRECTORY));
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- struct stat st = {};
- EXPECT_THAT(fstatat(dirfd.get(), tmpfile.path().c_str(), &st, 0),
- SyscallSucceeds());
- EXPECT_FALSE(S_ISDIR(st.st_mode));
- EXPECT_TRUE(S_ISREG(st.st_mode));
-}
-
-// Test fstatating a symlink directory.
-TEST_F(StatTest, FstatatSymlinkDir) {
- // Create a directory and symlink to it.
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- const std::string symlink_to_dir = NewTempAbsPath();
- EXPECT_THAT(symlink(dir.path().c_str(), symlink_to_dir.c_str()),
- SyscallSucceeds());
- auto cleanup = Cleanup([&symlink_to_dir]() {
- EXPECT_THAT(unlink(symlink_to_dir.c_str()), SyscallSucceeds());
- });
-
- // Fstatat the link with AT_SYMLINK_NOFOLLOW should return symlink data.
- struct stat st = {};
- EXPECT_THAT(
- fstatat(AT_FDCWD, symlink_to_dir.c_str(), &st, AT_SYMLINK_NOFOLLOW),
- SyscallSucceeds());
- EXPECT_FALSE(S_ISDIR(st.st_mode));
- EXPECT_TRUE(S_ISLNK(st.st_mode));
-
- // Fstatat the link should return dir data.
- EXPECT_THAT(fstatat(AT_FDCWD, symlink_to_dir.c_str(), &st, 0),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
- EXPECT_FALSE(S_ISLNK(st.st_mode));
-}
-
-// Test fstatating a symlink directory with trailing slash.
-TEST_F(StatTest, FstatatSymlinkDirWithTrailingSlash) {
- // Create a directory and symlink to it.
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string symlink_to_dir = NewTempAbsPath();
- EXPECT_THAT(symlink(dir.path().c_str(), symlink_to_dir.c_str()),
- SyscallSucceeds());
- auto cleanup = Cleanup([&symlink_to_dir]() {
- EXPECT_THAT(unlink(symlink_to_dir.c_str()), SyscallSucceeds());
- });
-
- // Fstatat on the symlink with a trailing slash should return the directory
- // data.
- struct stat st = {};
- EXPECT_THAT(
- fstatat(AT_FDCWD, absl::StrCat(symlink_to_dir, "/").c_str(), &st, 0),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
- EXPECT_FALSE(S_ISLNK(st.st_mode));
-
- // Fstatat on the symlink with a trailing slash with AT_SYMLINK_NOFOLLOW
- // should return the directory data.
- // Symlink to directory with trailing slash will ignore AT_SYMLINK_NOFOLLOW.
- EXPECT_THAT(fstatat(AT_FDCWD, absl::StrCat(symlink_to_dir, "/").c_str(), &st,
- AT_SYMLINK_NOFOLLOW),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
- EXPECT_FALSE(S_ISLNK(st.st_mode));
-}
-
-// Test fstatating a symlink directory with a trailing slash
-// should return same stat data with fstatating directory.
-TEST_F(StatTest, FstatatSymlinkDirWithTrailingSlashSameInode) {
- // Create a directory and symlink to it.
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // We are going to assert that the symlink inode id is the same as the linked
- // dir's inode id. In order for the inode id to be stable across
- // save/restore, it must be kept open. The FileDescriptor type will do that
- // for us automatically.
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY));
-
- const std::string symlink_to_dir = NewTempAbsPath();
- EXPECT_THAT(symlink(dir.path().c_str(), symlink_to_dir.c_str()),
- SyscallSucceeds());
- auto cleanup = Cleanup([&symlink_to_dir]() {
- EXPECT_THAT(unlink(symlink_to_dir.c_str()), SyscallSucceeds());
- });
-
- // Fstatat on the symlink with a trailing slash should return the directory
- // data.
- struct stat st = {};
- EXPECT_THAT(fstatat(AT_FDCWD, absl::StrCat(symlink_to_dir, "/").c_str(), &st,
- AT_SYMLINK_NOFOLLOW),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
-
- // Dir and symlink should point to same inode.
- struct stat st_dir = {};
- EXPECT_THAT(
- fstatat(AT_FDCWD, dir.path().c_str(), &st_dir, AT_SYMLINK_NOFOLLOW),
- SyscallSucceeds());
- EXPECT_EQ(st.st_ino, st_dir.st_ino);
-}
-
-TEST_F(StatTest, LeadingDoubleSlash) {
- // Create a file, and make sure we can stat it.
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- struct stat st;
- ASSERT_THAT(lstat(file.path().c_str(), &st), SyscallSucceeds());
-
- // Now add an extra leading slash.
- const std::string double_slash_path = absl::StrCat("/", file.path());
- ASSERT_TRUE(absl::StartsWith(double_slash_path, "//"));
-
- // We should be able to stat the new path, and it should resolve to the same
- // file (same device and inode).
- struct stat double_slash_st;
- ASSERT_THAT(lstat(double_slash_path.c_str(), &double_slash_st),
- SyscallSucceeds());
- EXPECT_EQ(st.st_dev, double_slash_st.st_dev);
- // Inode numbers for gofer-accessed files may change across save/restore.
- if (!IsRunningWithSaveRestore()) {
- EXPECT_EQ(st.st_ino, double_slash_st.st_ino);
- }
-}
-
-// Test that a rename doesn't change the underlying file.
-TEST_F(StatTest, StatDoesntChangeAfterRename) {
- const TempPath old_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const TempPath new_path(NewTempAbsPath());
-
- struct stat st_old = {};
- struct stat st_new = {};
-
- ASSERT_THAT(stat(old_file.path().c_str(), &st_old), SyscallSucceeds());
- ASSERT_THAT(rename(old_file.path().c_str(), new_path.path().c_str()),
- SyscallSucceeds());
- ASSERT_THAT(stat(new_path.path().c_str(), &st_new), SyscallSucceeds());
-
- EXPECT_EQ(st_old.st_nlink, st_new.st_nlink);
- EXPECT_EQ(st_old.st_dev, st_new.st_dev);
- // Inode numbers for gofer-accessed files on which no reference is held may
- // change across save/restore because the information that the gofer client
- // uses to track file identity (9P QID path) is inconsistent between gofer
- // processes, which are restarted across save/restore.
- //
- // Overlay filesystems may synthesize directory inode numbers on the fly.
- if (!IsRunningWithSaveRestore() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir()))) {
- EXPECT_EQ(st_old.st_ino, st_new.st_ino);
- }
- EXPECT_EQ(st_old.st_mode, st_new.st_mode);
- EXPECT_EQ(st_old.st_uid, st_new.st_uid);
- EXPECT_EQ(st_old.st_gid, st_new.st_gid);
- EXPECT_EQ(st_old.st_size, st_new.st_size);
-}
-
-// Test link counts with a regular file as the child.
-TEST_F(StatTest, LinkCountsWithRegularFileChild) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- struct stat st_parent_before = {};
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_before), SyscallSucceeds());
- EXPECT_EQ(st_parent_before.st_nlink, 2);
-
- // Adding a regular file doesn't adjust the parent's link count.
- const TempPath child =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- struct stat st_parent_after = {};
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_after), SyscallSucceeds());
- EXPECT_EQ(st_parent_after.st_nlink, 2);
-
- // The child should have a single link from the parent.
- struct stat st_child = {};
- ASSERT_THAT(stat(child.path().c_str(), &st_child), SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(st_child.st_mode));
- EXPECT_EQ(st_child.st_nlink, 1);
-
- // Finally unlinking the child should not affect the parent's link count.
- ASSERT_THAT(unlink(child.path().c_str()), SyscallSucceeds());
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_after), SyscallSucceeds());
- EXPECT_EQ(st_parent_after.st_nlink, 2);
-}
-
-// This test verifies that inodes remain around when there is an open fd
-// after link count hits 0.
-//
-// It is marked NoSave because we don't support saving unlinked files.
-TEST_F(StatTest, ZeroLinksOpenFdRegularFileChild_NoSave) {
- // Setting the enviornment variable GVISOR_GOFER_UNCACHED to any value
- // will prevent this test from running, see the tmpfs lifecycle.
- //
- // We need to support this because when a file is unlinked and we forward
- // the stat to the gofer it would return ENOENT.
- const char* uncached_gofer = getenv("GVISOR_GOFER_UNCACHED");
- SKIP_IF(uncached_gofer != nullptr);
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath child = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- dir.path(), "hello", TempPath::kDefaultFileMode));
-
- // The child should have a single link from the parent.
- struct stat st_child_before = {};
- ASSERT_THAT(stat(child.path().c_str(), &st_child_before), SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(st_child_before.st_mode));
- EXPECT_EQ(st_child_before.st_nlink, 1);
- EXPECT_EQ(st_child_before.st_size, 5); // Hello is 5 bytes.
-
- // Open the file so we can fstat after unlinking.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(child.path(), O_RDONLY));
-
- // Now a stat should return ENOENT but we should still be able to stat
- // via the open fd and fstat.
- ASSERT_THAT(unlink(child.path().c_str()), SyscallSucceeds());
-
- // Since the file has no more links stat should fail.
- struct stat st_child_after = {};
- ASSERT_THAT(stat(child.path().c_str(), &st_child_after),
- SyscallFailsWithErrno(ENOENT));
-
- // Fstat should still allow us to access the same file via the fd.
- struct stat st_child_fd = {};
- ASSERT_THAT(fstat(fd.get(), &st_child_fd), SyscallSucceeds());
- EXPECT_EQ(st_child_before.st_dev, st_child_fd.st_dev);
- EXPECT_EQ(st_child_before.st_ino, st_child_fd.st_ino);
- EXPECT_EQ(st_child_before.st_mode, st_child_fd.st_mode);
- EXPECT_EQ(st_child_before.st_uid, st_child_fd.st_uid);
- EXPECT_EQ(st_child_before.st_gid, st_child_fd.st_gid);
- EXPECT_EQ(st_child_before.st_size, st_child_fd.st_size);
-
- // TODO(b/34861058): This isn't ideal but since fstatfs(2) will always return
- // OVERLAYFS_SUPER_MAGIC we have no way to know if this fs is backed by a
- // gofer which doesn't support links.
- EXPECT_TRUE(st_child_fd.st_nlink == 0 || st_child_fd.st_nlink == 1);
-}
-
-// Test link counts with a directory as the child.
-TEST_F(StatTest, LinkCountsWithDirChild) {
- // Skip this test if we are testing overlayfs because overlayfs does not
- // (intentionally) return the correct nlink value for directories.
- // See fs/overlayfs/inode.c:ovl_getattr().
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(IsOverlayfs(GetAbsoluteTestTmpdir())));
-
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Before a child is added the two links are "." and the link from the parent.
- struct stat st_parent_before = {};
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_before), SyscallSucceeds());
- EXPECT_EQ(st_parent_before.st_nlink, 2);
-
- // Create a subdirectory and stat for the parent link counts.
- const TempPath sub_dir =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
-
- // The three links are ".", the link from the parent, and the link from
- // the child as "..".
- struct stat st_parent_after = {};
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_after), SyscallSucceeds());
- EXPECT_EQ(st_parent_after.st_nlink, 3);
-
- // The child will have 1 link from the parent and 1 link which represents ".".
- struct stat st_child = {};
- ASSERT_THAT(stat(sub_dir.path().c_str(), &st_child), SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st_child.st_mode));
- EXPECT_EQ(st_child.st_nlink, 2);
-
- // Finally delete the child dir and the parent link count should return to 2.
- ASSERT_THAT(rmdir(sub_dir.path().c_str()), SyscallSucceeds());
- ASSERT_THAT(stat(dir.path().c_str(), &st_parent_after), SyscallSucceeds());
-
- // Now we should only have links from the parent and "." since the subdir
- // has been removed.
- EXPECT_EQ(st_parent_after.st_nlink, 2);
-}
-
-// Test statting a child of a non-directory.
-TEST_F(StatTest, ChildOfNonDir) {
- // Create a path that has a child of a regular file.
- const std::string filename = JoinPath(test_file_name_, "child");
-
- // Statting the path should return ENOTDIR.
- struct stat st;
- EXPECT_THAT(lstat(filename.c_str(), &st), SyscallFailsWithErrno(ENOTDIR));
-}
-
-// Test lstating a symlink directory.
-TEST_F(StatTest, LstatSymlinkDir) {
- // Create a directory and symlink to it.
- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string symlink_to_dir = NewTempAbsPath();
- EXPECT_THAT(symlink(dir.path().c_str(), symlink_to_dir.c_str()),
- SyscallSucceeds());
- auto cleanup = Cleanup([&symlink_to_dir]() {
- EXPECT_THAT(unlink(symlink_to_dir.c_str()), SyscallSucceeds());
- });
-
- // Lstat on the symlink should return symlink data.
- struct stat st = {};
- ASSERT_THAT(lstat(symlink_to_dir.c_str(), &st), SyscallSucceeds());
- EXPECT_FALSE(S_ISDIR(st.st_mode));
- EXPECT_TRUE(S_ISLNK(st.st_mode));
-
- // Lstat on the symlink with a trailing slash should return the directory
- // data.
- ASSERT_THAT(lstat(absl::StrCat(symlink_to_dir, "/").c_str(), &st),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISDIR(st.st_mode));
- EXPECT_FALSE(S_ISLNK(st.st_mode));
-}
-
-// Verify that we get an ELOOP from too many symbolic links even when there
-// are directories in the middle.
-TEST_F(StatTest, LstatELOOPPath) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string subdir_base = "subdir";
- ASSERT_THAT(mkdir(JoinPath(dir.path(), subdir_base).c_str(), 0755),
- SyscallSucceeds());
-
- std::string target = JoinPath(dir.path(), subdir_base, subdir_base);
- std::string dst = JoinPath("..", subdir_base);
- ASSERT_THAT(symlink(dst.c_str(), target.c_str()), SyscallSucceeds());
- auto cleanup = Cleanup(
- [&target]() { EXPECT_THAT(unlink(target.c_str()), SyscallSucceeds()); });
-
- // Now build a path which is /subdir/subdir/... repeated many times so that
- // we can build a path that is shorter than PATH_MAX but can still cause
- // too many symbolic links. Note: Every other subdir is actually a directory
- // so we're not in a situation where it's a -> b -> a -> b, where a and b
- // are symbolic links.
- std::string path = dir.path();
- std::string subdir_append = absl::StrCat("/", subdir_base);
- do {
- absl::StrAppend(&path, subdir_append);
- // Keep appending /subdir until we would overflow PATH_MAX.
- } while ((path.size() + subdir_append.size()) < PATH_MAX);
-
- struct stat s = {};
- ASSERT_THAT(lstat(path.c_str(), &s), SyscallFailsWithErrno(ELOOP));
-}
-
-TEST(SimpleStatTest, DifferentFilesHaveDifferentDeviceInodeNumberPairs) {
- // TODO(gvisor.dev/issue/1624): This test case fails in VFS1 save/restore
- // tests because VFS1 gofer inode number assignment restarts after
- // save/restore, such that the inodes for file1 and file2 (which are
- // unreferenced and therefore not retained in sentry checkpoints before the
- // calls to lstat()) are assigned the same inode number.
- SKIP_IF(IsRunningWithVFS1() && IsRunningWithSaveRestore());
-
- TempPath file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- TempPath file2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- MaybeSave();
- struct stat st1 = ASSERT_NO_ERRNO_AND_VALUE(Lstat(file1.path()));
- MaybeSave();
- struct stat st2 = ASSERT_NO_ERRNO_AND_VALUE(Lstat(file2.path()));
- EXPECT_FALSE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino)
- << "both files have device number " << st1.st_dev << " and inode number "
- << st1.st_ino;
-}
-
-// Ensure that inode allocation for anonymous devices work correctly across
-// save/restore. In particular, inode numbers should be unique across S/R.
-TEST(SimpleStatTest, AnonDeviceAllocatesUniqueInodesAcrossSaveRestore) {
- // Use sockets as a convenient way to create inodes on an anonymous device.
- int fd;
- ASSERT_THAT(fd = socket(AF_UNIX, SOCK_STREAM, 0), SyscallSucceeds());
- FileDescriptor fd1(fd);
- MaybeSave();
- ASSERT_THAT(fd = socket(AF_UNIX, SOCK_STREAM, 0), SyscallSucceeds());
- FileDescriptor fd2(fd);
-
- struct stat st1;
- struct stat st2;
- ASSERT_THAT(fstat(fd1.get(), &st1), SyscallSucceeds());
- ASSERT_THAT(fstat(fd2.get(), &st2), SyscallSucceeds());
-
- // The two fds should have different inode numbers.
- EXPECT_NE(st2.st_ino, st1.st_ino);
-
- // Verify again after another S/R cycle. The inode numbers should remain the
- // same.
- MaybeSave();
-
- struct stat st1_after;
- struct stat st2_after;
- ASSERT_THAT(fstat(fd1.get(), &st1_after), SyscallSucceeds());
- ASSERT_THAT(fstat(fd2.get(), &st2_after), SyscallSucceeds());
-
- EXPECT_EQ(st1_after.st_ino, st1.st_ino);
- EXPECT_EQ(st2_after.st_ino, st2.st_ino);
-}
-
-#ifndef SYS_statx
-#if defined(__x86_64__)
-#define SYS_statx 332
-#elif defined(__aarch64__)
-#define SYS_statx 291
-#else
-#error "Unknown architecture"
-#endif
-#endif // SYS_statx
-
-#ifndef STATX_ALL
-#define STATX_ALL 0x00000fffU
-#endif // STATX_ALL
-
-// struct kernel_statx_timestamp is a Linux statx_timestamp struct.
-struct kernel_statx_timestamp {
- int64_t tv_sec;
- uint32_t tv_nsec;
- int32_t __reserved;
-};
-
-// struct kernel_statx is a Linux statx struct. Old versions of glibc do not
-// expose it. See include/uapi/linux/stat.h
-struct kernel_statx {
- uint32_t stx_mask;
- uint32_t stx_blksize;
- uint64_t stx_attributes;
- uint32_t stx_nlink;
- uint32_t stx_uid;
- uint32_t stx_gid;
- uint16_t stx_mode;
- uint16_t __spare0[1];
- uint64_t stx_ino;
- uint64_t stx_size;
- uint64_t stx_blocks;
- uint64_t stx_attributes_mask;
- struct kernel_statx_timestamp stx_atime;
- struct kernel_statx_timestamp stx_btime;
- struct kernel_statx_timestamp stx_ctime;
- struct kernel_statx_timestamp stx_mtime;
- uint32_t stx_rdev_major;
- uint32_t stx_rdev_minor;
- uint32_t stx_dev_major;
- uint32_t stx_dev_minor;
- uint64_t __spare2[14];
-};
-
-int statx(int dirfd, const char* pathname, int flags, unsigned int mask,
- struct kernel_statx* statxbuf) {
- return syscall(SYS_statx, dirfd, pathname, flags, mask, statxbuf);
-}
-
-TEST_F(StatTest, StatxAbsPath) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- struct kernel_statx stx;
- EXPECT_THAT(statx(-1, test_file_name_.c_str(), 0, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxRelPathDirFD) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- struct kernel_statx stx;
- auto const dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(GetAbsoluteTestTmpdir(), O_RDONLY));
- auto filename = std::string(Basename(test_file_name_));
-
- EXPECT_THAT(statx(dirfd.get(), filename.c_str(), 0, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxRelPathCwd) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
- auto filename = std::string(Basename(test_file_name_));
- struct kernel_statx stx;
- EXPECT_THAT(statx(AT_FDCWD, filename.c_str(), 0, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxEmptyPath) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDONLY));
- struct kernel_statx stx;
- EXPECT_THAT(statx(fd.get(), "", AT_EMPTY_PATH, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxDoesNotRejectExtraneousMaskBits) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- struct kernel_statx stx;
- // Set all mask bits except for STATX__RESERVED.
- uint mask = 0xffffffff & ~0x80000000;
- EXPECT_THAT(statx(-1, test_file_name_.c_str(), 0, mask, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxRejectsReservedMaskBit) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- struct kernel_statx stx;
- // Set STATX__RESERVED in the mask.
- EXPECT_THAT(statx(-1, test_file_name_.c_str(), 0, 0x80000000, &stx),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(StatTest, StatxSymlink) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- std::string parent_dir = "/tmp";
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(parent_dir, test_file_name_));
- std::string p = link.path();
-
- struct kernel_statx stx;
- EXPECT_THAT(statx(AT_FDCWD, p.c_str(), AT_SYMLINK_NOFOLLOW, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISLNK(stx.stx_mode));
- EXPECT_THAT(statx(AT_FDCWD, p.c_str(), 0, STATX_ALL, &stx),
- SyscallSucceeds());
- EXPECT_TRUE(S_ISREG(stx.stx_mode));
-}
-
-TEST_F(StatTest, StatxInvalidFlags) {
- SKIP_IF(!IsRunningOnGvisor() && statx(-1, nullptr, 0, 0, nullptr) < 0 &&
- errno == ENOSYS);
-
- struct kernel_statx stx;
- EXPECT_THAT(statx(AT_FDCWD, test_file_name_.c_str(), 12345, 0, &stx),
- SyscallFailsWithErrno(EINVAL));
-
- // Sync flags are mutually exclusive.
- EXPECT_THAT(statx(AT_FDCWD, test_file_name_.c_str(),
- AT_STATX_FORCE_SYNC | AT_STATX_DONT_SYNC, 0, &stx),
- SyscallFailsWithErrno(EINVAL));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/stat_times.cc b/test/syscalls/linux/stat_times.cc
deleted file mode 100644
index 68c0bef09..000000000
--- a/test/syscalls/linux/stat_times.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-
-#include <tuple>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::testing::IsEmpty;
-using ::testing::Not;
-
-std::tuple<absl::Time, absl::Time, absl::Time> GetTime(const TempPath& file) {
- struct stat statbuf = {};
- EXPECT_THAT(stat(file.path().c_str(), &statbuf), SyscallSucceeds());
-
- const auto atime = absl::TimeFromTimespec(statbuf.st_atim);
- const auto mtime = absl::TimeFromTimespec(statbuf.st_mtim);
- const auto ctime = absl::TimeFromTimespec(statbuf.st_ctim);
- return std::make_tuple(atime, mtime, ctime);
-}
-
-enum class AtimeEffect {
- Unchanged,
- Changed,
-};
-
-enum class MtimeEffect {
- Unchanged,
- Changed,
-};
-
-enum class CtimeEffect {
- Unchanged,
- Changed,
-};
-
-// Tests that fn modifies the atime/mtime/ctime of path as specified.
-void CheckTimes(const TempPath& path, std::function<void()> fn,
- AtimeEffect atime_effect, MtimeEffect mtime_effect,
- CtimeEffect ctime_effect) {
- absl::Time atime, mtime, ctime;
- std::tie(atime, mtime, ctime) = GetTime(path);
-
- // FIXME(b/132819225): gVisor filesystem timestamps inconsistently use the
- // internal or host clock, which may diverge slightly. Allow some slack on
- // times to account for the difference.
- //
- // Here we sleep for 1s so that initial creation of path doesn't fall within
- // the before slack window.
- absl::SleepFor(absl::Seconds(1));
-
- const absl::Time before = absl::Now() - absl::Seconds(1);
-
- // Perform the op.
- fn();
-
- const absl::Time after = absl::Now() + absl::Seconds(1);
-
- absl::Time atime2, mtime2, ctime2;
- std::tie(atime2, mtime2, ctime2) = GetTime(path);
-
- if (atime_effect == AtimeEffect::Changed) {
- EXPECT_LE(before, atime2);
- EXPECT_GE(after, atime2);
- EXPECT_GT(atime2, atime);
- } else {
- EXPECT_EQ(atime2, atime);
- }
-
- if (mtime_effect == MtimeEffect::Changed) {
- EXPECT_LE(before, mtime2);
- EXPECT_GE(after, mtime2);
- EXPECT_GT(mtime2, mtime);
- } else {
- EXPECT_EQ(mtime2, mtime);
- }
-
- if (ctime_effect == CtimeEffect::Changed) {
- EXPECT_LE(before, ctime2);
- EXPECT_GE(after, ctime2);
- EXPECT_GT(ctime2, ctime);
- } else {
- EXPECT_EQ(ctime2, ctime);
- }
-}
-
-// File creation time is reflected in atime, mtime, and ctime.
-TEST(StatTimesTest, FileCreation) {
- const DisableSave ds; // Timing-related test.
-
- // Get a time for when the file is created.
- //
- // FIXME(b/132819225): See above.
- const absl::Time before = absl::Now() - absl::Seconds(1);
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const absl::Time after = absl::Now() + absl::Seconds(1);
-
- absl::Time atime, mtime, ctime;
- std::tie(atime, mtime, ctime) = GetTime(file);
-
- EXPECT_LE(before, atime);
- EXPECT_LE(before, mtime);
- EXPECT_LE(before, ctime);
- EXPECT_GE(after, atime);
- EXPECT_GE(after, mtime);
- EXPECT_GE(after, ctime);
-}
-
-// Calling chmod on a file changes ctime.
-TEST(StatTimesTest, FileChmod) {
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- auto fn = [&] {
- EXPECT_THAT(chmod(file.path().c_str(), 0666), SyscallSucceeds());
- };
- CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,
- CtimeEffect::Changed);
-}
-
-// Renaming a file changes ctime.
-TEST(StatTimesTest, FileRename) {
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- const std::string newpath = NewTempAbsPath();
-
- auto fn = [&] {
- ASSERT_THAT(rename(file.release().c_str(), newpath.c_str()),
- SyscallSucceeds());
- file.reset(newpath);
- };
- CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,
- CtimeEffect::Changed);
-}
-
-// Renaming a file changes ctime, even with an open FD.
-//
-// NOTE(b/132732387): This is a regression test for fs/gofer failing to update
-// cached ctime.
-TEST(StatTimesTest, FileRenameOpenFD) {
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // Holding an FD shouldn't affect behavior.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));
-
- const std::string newpath = NewTempAbsPath();
-
- // FIXME(b/132814682): Restore fails with an uncached gofer and an open FD
- // across rename.
- //
- // N.B. The logic here looks backwards because it isn't possible to
- // conditionally disable save, only conditionally re-enable it.
- DisableSave ds;
- if (!getenv("GVISOR_GOFER_UNCACHED")) {
- ds.reset();
- }
-
- auto fn = [&] {
- ASSERT_THAT(rename(file.release().c_str(), newpath.c_str()),
- SyscallSucceeds());
- file.reset(newpath);
- };
- CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,
- CtimeEffect::Changed);
-}
-
-// Calling utimes on a file changes ctime and the time that we ask to change
-// (atime to now in this case).
-TEST(StatTimesTest, FileUtimes) {
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- auto fn = [&] {
- const struct timespec ts[2] = {{0, UTIME_NOW}, {0, UTIME_OMIT}};
- ASSERT_THAT(utimensat(AT_FDCWD, file.path().c_str(), ts, 0),
- SyscallSucceeds());
- };
- CheckTimes(file, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,
- CtimeEffect::Changed);
-}
-
-// Truncating a file changes mtime and ctime.
-TEST(StatTimesTest, FileTruncate) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), "yaaass", 0666));
-
- auto fn = [&] {
- EXPECT_THAT(truncate(file.path().c_str(), 0), SyscallSucceeds());
- };
- CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,
- CtimeEffect::Changed);
-}
-
-// Writing a file changes mtime and ctime.
-TEST(StatTimesTest, FileWrite) {
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), "yaaass", 0666));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0));
-
- auto fn = [&] {
- const std::string contents = "all the single dollars";
- EXPECT_THAT(WriteFd(fd.get(), contents.data(), contents.size()),
- SyscallSucceeds());
- };
- CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,
- CtimeEffect::Changed);
-}
-
-// Reading a file changes atime.
-TEST(StatTimesTest, FileRead) {
- const std::string contents = "bills bills bills";
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), contents, 0666));
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY, 0));
-
- auto fn = [&] {
- char buf[20];
- ASSERT_THAT(ReadFd(fd.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(contents.size()));
- };
- CheckTimes(file, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,
- CtimeEffect::Unchanged);
-}
-
-// Listing files in a directory changes atime.
-TEST(StatTimesTest, DirList) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const TempPath file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- auto fn = [&] {
- const auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), false));
- EXPECT_THAT(contents, Not(IsEmpty()));
- };
- CheckTimes(dir, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,
- CtimeEffect::Unchanged);
-}
-
-// Creating a file in a directory changes mtime and ctime.
-TEST(StatTimesTest, DirCreateFile) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- TempPath file;
- auto fn = [&] {
- file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- };
- CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,
- CtimeEffect::Changed);
-}
-
-// Creating a directory in a directory changes mtime and ctime.
-TEST(StatTimesTest, DirCreateDir) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- TempPath dir2;
- auto fn = [&] {
- dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- };
- CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,
- CtimeEffect::Changed);
-}
-
-// Removing a file from a directory changes mtime and ctime.
-TEST(StatTimesTest, DirRemoveFile) {
- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- auto fn = [&] { file.reset(); };
- CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,
- CtimeEffect::Changed);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/statfs.cc b/test/syscalls/linux/statfs.cc
deleted file mode 100644
index d4ea8e026..000000000
--- a/test/syscalls/linux/statfs.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <linux/magic.h>
-#include <sys/statfs.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(StatfsTest, CannotStatBadPath) {
- auto temp_file = NewTempAbsPathInDir("/tmp");
-
- struct statfs st;
- EXPECT_THAT(statfs(temp_file.c_str(), &st), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(StatfsTest, InternalTmpfs) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- struct statfs st;
- EXPECT_THAT(statfs(temp_file.path().c_str(), &st), SyscallSucceeds());
-}
-
-TEST(StatfsTest, InternalDevShm) {
- struct statfs st;
- EXPECT_THAT(statfs("/dev/shm", &st), SyscallSucceeds());
-
- // This assumes that /dev/shm is tmpfs.
- // Note: We could be an overlay on some configurations.
- EXPECT_TRUE(st.f_type == TMPFS_MAGIC || st.f_type == OVERLAYFS_SUPER_MAGIC);
-}
-
-TEST(FstatfsTest, CannotStatBadFd) {
- struct statfs st;
- EXPECT_THAT(fstatfs(-1, &st), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(FstatfsTest, InternalTmpfs) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDONLY));
-
- struct statfs st;
- EXPECT_THAT(fstatfs(fd.get(), &st), SyscallSucceeds());
-}
-
-TEST(FstatfsTest, CanStatFileWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_PATH));
-
- struct statfs st;
- EXPECT_THAT(fstatfs(fd.get(), &st), SyscallSucceeds());
-}
-
-TEST(FstatfsTest, InternalDevShm) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/shm", O_RDONLY));
-
- struct statfs st;
- EXPECT_THAT(fstatfs(fd.get(), &st), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sticky.cc b/test/syscalls/linux/sticky.cc
deleted file mode 100644
index 5a2841899..000000000
--- a/test/syscalls/linux/sticky.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <grp.h>
-#include <sys/prctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid, 65534, "first scratch UID");
-ABSL_FLAG(int32_t, scratch_gid, 65534, "first scratch GID");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(StickyTest, StickyBitPermDenied) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());
-
- // After changing credentials below, we need to use an open fd to make
- // modifications in the parent dir, because there is no guarantee that we will
- // still have the ability to open it.
- const FileDescriptor parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));
- ASSERT_THAT(openat(parent_fd.get(), "file", O_CREAT), SyscallSucceeds());
- ASSERT_THAT(mkdirat(parent_fd.get(), "dir", 0777), SyscallSucceeds());
- ASSERT_THAT(symlinkat("xyz", parent_fd.get(), "link"), SyscallSucceeds());
-
- // Drop privileges and change IDs only in child thread, or else this parent
- // thread won't be able to open some log files after the test ends.
- ScopedThread([&] {
- // Drop privileges.
- AutoCapability cap(CAP_FOWNER, false);
-
- // Change EUID and EGID.
- EXPECT_THAT(
- syscall(SYS_setresgid, -1, absl::GetFlag(FLAGS_scratch_gid), -1),
- SyscallSucceeds());
- EXPECT_THAT(
- syscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid), -1),
- SyscallSucceeds());
-
- EXPECT_THAT(renameat(parent_fd.get(), "file", parent_fd.get(), "file2"),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(unlinkat(parent_fd.get(), "file", 0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(unlinkat(parent_fd.get(), "dir", AT_REMOVEDIR),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(unlinkat(parent_fd.get(), "link", 0),
- SyscallFailsWithErrno(EPERM));
- });
-}
-
-TEST(StickyTest, StickyBitSameUID) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());
-
- // After changing credentials below, we need to use an open fd to make
- // modifications in the parent dir, because there is no guarantee that we will
- // still have the ability to open it.
- const FileDescriptor parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));
- ASSERT_THAT(openat(parent_fd.get(), "file", O_CREAT), SyscallSucceeds());
- ASSERT_THAT(mkdirat(parent_fd.get(), "dir", 0777), SyscallSucceeds());
- ASSERT_THAT(symlinkat("xyz", parent_fd.get(), "link"), SyscallSucceeds());
-
- // Drop privileges and change IDs only in child thread, or else this parent
- // thread won't be able to open some log files after the test ends.
- ScopedThread([&] {
- // Drop privileges.
- AutoCapability cap(CAP_FOWNER, false);
-
- // Change EGID.
- EXPECT_THAT(
- syscall(SYS_setresgid, -1, absl::GetFlag(FLAGS_scratch_gid), -1),
- SyscallSucceeds());
-
- // We still have the same EUID.
- EXPECT_THAT(renameat(parent_fd.get(), "file", parent_fd.get(), "file2"),
- SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "file2", 0), SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "dir", AT_REMOVEDIR),
- SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "link", 0), SyscallSucceeds());
- });
-}
-
-TEST(StickyTest, StickyBitCapFOWNER) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETUID)));
-
- const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(chmod(parent.path().c_str(), 0777 | S_ISVTX), SyscallSucceeds());
-
- // After changing credentials below, we need to use an open fd to make
- // modifications in the parent dir, because there is no guarantee that we will
- // still have the ability to open it.
- const FileDescriptor parent_fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(parent.path(), O_DIRECTORY));
- ASSERT_THAT(openat(parent_fd.get(), "file", O_CREAT), SyscallSucceeds());
- ASSERT_THAT(mkdirat(parent_fd.get(), "dir", 0777), SyscallSucceeds());
- ASSERT_THAT(symlinkat("xyz", parent_fd.get(), "link"), SyscallSucceeds());
-
- // Drop privileges and change IDs only in child thread, or else this parent
- // thread won't be able to open some log files after the test ends.
- ScopedThread([&] {
- // Set PR_SET_KEEPCAPS.
- EXPECT_THAT(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), SyscallSucceeds());
-
- // Change EUID and EGID.
- EXPECT_THAT(
- syscall(SYS_setresgid, -1, absl::GetFlag(FLAGS_scratch_gid), -1),
- SyscallSucceeds());
- EXPECT_THAT(
- syscall(SYS_setresuid, -1, absl::GetFlag(FLAGS_scratch_uid), -1),
- SyscallSucceeds());
-
- EXPECT_NO_ERRNO(SetCapability(CAP_FOWNER, true));
- EXPECT_THAT(renameat(parent_fd.get(), "file", parent_fd.get(), "file2"),
- SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "file2", 0), SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "dir", AT_REMOVEDIR),
- SyscallSucceeds());
- EXPECT_THAT(unlinkat(parent_fd.get(), "link", 0), SyscallSucceeds());
- });
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/symlink.cc b/test/syscalls/linux/symlink.cc
deleted file mode 100644
index fa6849f11..000000000
--- a/test/syscalls/linux/symlink.cc
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-mode_t FilePermission(const std::string& path) {
- struct stat buf = {0};
- TEST_CHECK(lstat(path.c_str(), &buf) == 0);
- return buf.st_mode & 0777;
-}
-
-// Test that name collisions are checked on the new link path, not the source
-// path. Regression test for b/31782115.
-TEST(SymlinkTest, CanCreateSymlinkWithCachedSourceDirent) {
- const std::string srcname = NewTempAbsPath();
- const std::string newname = NewTempAbsPath();
- const std::string basedir = std::string(Dirname(srcname));
- ASSERT_EQ(basedir, Dirname(newname));
-
- ASSERT_THAT(chdir(basedir.c_str()), SyscallSucceeds());
-
- // Open the source node to cause the underlying dirent to be cached. It will
- // remain cached while we have the file open.
- int fd;
- ASSERT_THAT(fd = open(srcname.c_str(), O_CREAT | O_RDWR, 0666),
- SyscallSucceeds());
- FileDescriptor fd_closer(fd);
-
- // Attempt to create a symlink. If the bug exists, this will fail since the
- // dirent link creation code will check for a name collision on the source
- // link name.
- EXPECT_THAT(symlink(std::string(Basename(srcname)).c_str(),
- std::string(Basename(newname)).c_str()),
- SyscallSucceeds());
-}
-
-TEST(SymlinkTest, CanCreateSymlinkFile) {
- const std::string oldname = NewTempAbsPath();
- const std::string newname = NewTempAbsPath();
-
- int fd;
- ASSERT_THAT(fd = open(oldname.c_str(), O_CREAT | O_RDWR, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- EXPECT_THAT(symlink(oldname.c_str(), newname.c_str()), SyscallSucceeds());
- EXPECT_EQ(FilePermission(newname), 0777);
-
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink(newname));
- EXPECT_EQ(oldname, link);
-
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(oldname.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, CanCreateSymlinkDir) {
- const std::string olddir = NewTempAbsPath();
- const std::string newdir = NewTempAbsPath();
-
- EXPECT_THAT(mkdir(olddir.c_str(), 0777), SyscallSucceeds());
- EXPECT_THAT(symlink(olddir.c_str(), newdir.c_str()), SyscallSucceeds());
- EXPECT_EQ(FilePermission(newdir), 0777);
-
- auto link = ASSERT_NO_ERRNO_AND_VALUE(ReadLink(newdir));
- EXPECT_EQ(olddir, link);
-
- EXPECT_THAT(unlink(newdir.c_str()), SyscallSucceeds());
-
- ASSERT_THAT(rmdir(olddir.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, CannotCreateSymlinkInReadOnlyDir) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- const std::string olddir = NewTempAbsPath();
- ASSERT_THAT(mkdir(olddir.c_str(), 0444), SyscallSucceeds());
-
- const std::string newdir = NewTempAbsPathInDir(olddir);
- EXPECT_THAT(symlink(olddir.c_str(), newdir.c_str()),
- SyscallFailsWithErrno(EACCES));
-
- ASSERT_THAT(rmdir(olddir.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, CannotSymlinkOverExistingFile) {
- const auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto newfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- EXPECT_THAT(symlink(oldfile.path().c_str(), newfile.path().c_str()),
- SyscallFailsWithErrno(EEXIST));
-}
-
-TEST(SymlinkTest, CannotSymlinkOverExistingDir) {
- const auto oldfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const auto newdir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- EXPECT_THAT(symlink(oldfile.path().c_str(), newdir.path().c_str()),
- SyscallFailsWithErrno(EEXIST));
-}
-
-TEST(SymlinkTest, OldnameIsEmpty) {
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(symlink("", newname.c_str()), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(SymlinkTest, OldnameIsDangling) {
- const std::string newname = NewTempAbsPath();
- EXPECT_THAT(symlink("/dangling", newname.c_str()), SyscallSucceeds());
-
- // This is required for S/R random save tests, which pre-run this test
- // in the same TEST_TMPDIR, which means that we need to clean it for any
- // operations exclusively creating files, like symlink above.
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, NewnameCannotExist) {
- const std::string newname =
- JoinPath(GetAbsoluteTestTmpdir(), "thisdoesnotexist", "foo");
- EXPECT_THAT(symlink("/thisdoesnotmatter", newname.c_str()),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(SymlinkTest, CanEvaluateLink) {
- const auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
-
- // We are going to assert that the symlink inode id is the same as the linked
- // file's inode id. In order for the inode id to be stable across
- // save/restore, it must be kept open. The FileDescriptor type will do that
- // for us automatically.
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
- struct stat file_st;
- EXPECT_THAT(fstat(fd.get(), &file_st), SyscallSucceeds());
-
- const std::string link = NewTempAbsPath();
- EXPECT_THAT(symlink(file.path().c_str(), link.c_str()), SyscallSucceeds());
- EXPECT_EQ(FilePermission(link), 0777);
-
- auto linkfd = ASSERT_NO_ERRNO_AND_VALUE(Open(link.c_str(), O_RDWR));
- struct stat link_st;
- EXPECT_THAT(fstat(linkfd.get(), &link_st), SyscallSucceeds());
-
- // Check that in fact newname points to the file we expect.
- EXPECT_EQ(file_st.st_dev, link_st.st_dev);
- EXPECT_EQ(file_st.st_ino, link_st.st_ino);
-}
-
-TEST(SymlinkTest, TargetIsNotMapped) {
- const std::string oldname = NewTempAbsPath();
- const std::string newname = NewTempAbsPath();
-
- int fd;
- // Create the target so that when we read the link, it exists.
- ASSERT_THAT(fd = open(oldname.c_str(), O_CREAT | O_RDWR, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- // Create a symlink called newname that points to oldname.
- EXPECT_THAT(symlink(oldname.c_str(), newname.c_str()), SyscallSucceeds());
-
- std::vector<char> buf(1024);
- int linksize;
- // Read the link and assert that the oldname is still the same.
- EXPECT_THAT(linksize = readlink(newname.c_str(), buf.data(), 1024),
- SyscallSucceeds());
- EXPECT_EQ(0, strncmp(oldname.c_str(), buf.data(), linksize));
-
- EXPECT_THAT(unlink(newname.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(oldname.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, PreadFromSymlink) {
- std::string name = NewTempAbsPath();
- int fd;
- ASSERT_THAT(fd = open(name.c_str(), O_CREAT, 0644), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- std::string linkname = NewTempAbsPath();
- ASSERT_THAT(symlink(name.c_str(), linkname.c_str()), SyscallSucceeds());
-
- ASSERT_THAT(fd = open(linkname.c_str(), O_RDONLY), SyscallSucceeds());
-
- char buf[1024];
- EXPECT_THAT(pread64(fd, buf, 1024, 0), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-
- EXPECT_THAT(unlink(name.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(linkname.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, PwriteToSymlink) {
- std::string name = NewTempAbsPath();
- int fd;
- ASSERT_THAT(fd = open(name.c_str(), O_CREAT, 0644), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- std::string linkname = NewTempAbsPath();
- ASSERT_THAT(symlink(name.c_str(), linkname.c_str()), SyscallSucceeds());
-
- ASSERT_THAT(fd = open(linkname.c_str(), O_WRONLY), SyscallSucceeds());
-
- const int data_size = 10;
- const std::string data = std::string(data_size, 'a');
- EXPECT_THAT(pwrite64(fd, data.c_str(), data.size(), 0),
- SyscallSucceedsWithValue(data.size()));
-
- ASSERT_THAT(close(fd), SyscallSucceeds());
- ASSERT_THAT(fd = open(name.c_str(), O_RDONLY), SyscallSucceeds());
-
- char buf[data_size + 1];
- EXPECT_THAT(pread64(fd, buf, data.size(), 0), SyscallSucceeds());
- buf[data.size()] = '\0';
- EXPECT_STREQ(buf, data.c_str());
-
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- EXPECT_THAT(unlink(name.c_str()), SyscallSucceeds());
- EXPECT_THAT(unlink(linkname.c_str()), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, SymlinkAtDegradedPermissions) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- int dirfd;
- ASSERT_THAT(dirfd = open(dir.path().c_str(), O_DIRECTORY, 0),
- SyscallSucceeds());
-
- const DisableSave ds; // Permissions are dropped.
- EXPECT_THAT(fchmod(dirfd, 0), SyscallSucceeds());
-
- std::string basename = std::string(Basename(file.path()));
- EXPECT_THAT(symlinkat("/dangling", dirfd, basename.c_str()),
- SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(close(dirfd), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, SymlinkAtDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string filepath = NewTempAbsPathInDir(dir.path());
- const std::string base = std::string(Basename(filepath));
- FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path().c_str(), O_DIRECTORY | O_PATH));
-
- EXPECT_THAT(symlinkat("/dangling", dirfd.get(), base.c_str()),
- SyscallSucceeds());
-}
-
-TEST(SymlinkTest, ReadlinkAtDirWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string filepath = NewTempAbsPathInDir(dir.path());
- const std::string base = std::string(Basename(filepath));
- ASSERT_THAT(symlink("/dangling", filepath.c_str()), SyscallSucceeds());
-
- FileDescriptor dirfd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path().c_str(), O_DIRECTORY | O_PATH));
-
- std::vector<char> buf(1024);
- int linksize;
- EXPECT_THAT(
- linksize = readlinkat(dirfd.get(), base.c_str(), buf.data(), 1024),
- SyscallSucceeds());
- EXPECT_EQ(0, strncmp("/dangling", buf.data(), linksize));
-}
-
-TEST(SymlinkTest, ReadlinkAtDegradedPermissions) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string oldpath = NewTempAbsPathInDir(dir.path());
- const std::string oldbase = std::string(Basename(oldpath));
- ASSERT_THAT(symlink("/dangling", oldpath.c_str()), SyscallSucceeds());
-
- int dirfd;
- EXPECT_THAT(dirfd = open(dir.path().c_str(), O_DIRECTORY, 0),
- SyscallSucceeds());
-
- const DisableSave ds; // Permissions are dropped.
- EXPECT_THAT(fchmod(dirfd, 0), SyscallSucceeds());
-
- char buf[1024];
- int linksize;
- EXPECT_THAT(linksize = readlinkat(dirfd, oldbase.c_str(), buf, 1024),
- SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(close(dirfd), SyscallSucceeds());
-}
-
-TEST(SymlinkTest, ChmodSymlink) {
- auto target = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string newpath = NewTempAbsPath();
- ASSERT_THAT(symlink(target.path().c_str(), newpath.c_str()),
- SyscallSucceeds());
- EXPECT_EQ(FilePermission(newpath), 0777);
- EXPECT_THAT(chmod(newpath.c_str(), 0666), SyscallSucceeds());
- EXPECT_EQ(FilePermission(newpath), 0777);
-}
-
-// Test that following a symlink updates the atime on the symlink.
-TEST(SymlinkTest, FollowUpdatesATime) {
- const auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string link = NewTempAbsPath();
- EXPECT_THAT(symlink(file.path().c_str(), link.c_str()), SyscallSucceeds());
-
- // Lstat the symlink.
- struct stat st_before_follow;
- ASSERT_THAT(lstat(link.c_str(), &st_before_follow), SyscallSucceeds());
-
- // Let the clock advance.
- absl::SleepFor(absl::Seconds(1));
-
- // Open the file via the symlink.
- int fd;
- ASSERT_THAT(fd = open(link.c_str(), O_RDWR, 0666), SyscallSucceeds());
- FileDescriptor fd_closer(fd);
-
- // Lstat the symlink again, and check that atime is updated.
- struct stat st_after_follow;
- ASSERT_THAT(lstat(link.c_str(), &st_after_follow), SyscallSucceeds());
- EXPECT_LT(st_before_follow.st_atime, st_after_follow.st_atime);
-}
-
-TEST(SymlinkTest, SymlinkAtEmptyPath) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- auto fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY | O_DIRECTORY, 0666));
- EXPECT_THAT(symlinkat(file.path().c_str(), fd.get(), ""),
- SyscallFailsWithErrno(ENOENT));
-}
-
-class ParamSymlinkTest : public ::testing::TestWithParam<std::string> {};
-
-// Test that creating an existing symlink with creat will create the target.
-TEST_P(ParamSymlinkTest, CreatLinkCreatesTarget) {
- const std::string target = GetParam();
- const std::string linkpath = NewTempAbsPath();
-
- ASSERT_THAT(symlink(target.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- int fd;
- EXPECT_THAT(fd = creat(linkpath.c_str(), 0666), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
- struct stat st;
- EXPECT_THAT(stat(target.c_str(), &st), SyscallSucceeds());
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
- ASSERT_THAT(unlink(target.c_str()), SyscallSucceeds());
-}
-
-// Test that opening an existing symlink with O_CREAT will create the target.
-TEST_P(ParamSymlinkTest, OpenLinkCreatesTarget) {
- const std::string target = GetParam();
- const std::string linkpath = NewTempAbsPath();
-
- ASSERT_THAT(symlink(target.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- int fd;
- EXPECT_THAT(fd = open(linkpath.c_str(), O_CREAT, 0666), SyscallSucceeds());
- ASSERT_THAT(close(fd), SyscallSucceeds());
-
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
- struct stat st;
- EXPECT_THAT(stat(target.c_str(), &st), SyscallSucceeds());
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
- ASSERT_THAT(unlink(target.c_str()), SyscallSucceeds());
-}
-
-// Test that opening a self-symlink with O_CREAT will fail with ELOOP.
-TEST_P(ParamSymlinkTest, CreateExistingSelfLink) {
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
-
- const std::string linkpath = GetParam();
- ASSERT_THAT(symlink(linkpath.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- EXPECT_THAT(open(linkpath.c_str(), O_CREAT, 0666),
- SyscallFailsWithErrno(ELOOP));
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
-}
-
-// Test that opening a file that is a symlink to its parent directory fails
-// with ELOOP.
-TEST_P(ParamSymlinkTest, CreateExistingParentLink) {
- ASSERT_THAT(chdir(GetAbsoluteTestTmpdir().c_str()), SyscallSucceeds());
-
- const std::string linkpath = GetParam();
- const std::string target = JoinPath(linkpath, "child");
- ASSERT_THAT(symlink(target.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- EXPECT_THAT(open(linkpath.c_str(), O_CREAT, 0666),
- SyscallFailsWithErrno(ELOOP));
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
-}
-
-// Test that opening an existing symlink with O_CREAT|O_EXCL will fail with
-// EEXIST.
-TEST_P(ParamSymlinkTest, OpenLinkExclFails) {
- const std::string target = GetParam();
- const std::string linkpath = NewTempAbsPath();
-
- ASSERT_THAT(symlink(target.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- EXPECT_THAT(open(linkpath.c_str(), O_CREAT | O_EXCL, 0666),
- SyscallFailsWithErrno(EEXIST));
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
-}
-
-// Test that opening an existing symlink with O_CREAT|O_NOFOLLOW will fail with
-// ELOOP.
-TEST_P(ParamSymlinkTest, OpenLinkNoFollowFails) {
- const std::string target = GetParam();
- const std::string linkpath = NewTempAbsPath();
-
- ASSERT_THAT(symlink(target.c_str(), linkpath.c_str()), SyscallSucceeds());
-
- EXPECT_THAT(open(linkpath.c_str(), O_CREAT | O_NOFOLLOW, 0666),
- SyscallFailsWithErrno(ELOOP));
-
- ASSERT_THAT(unlink(linkpath.c_str()), SyscallSucceeds());
-}
-
-INSTANTIATE_TEST_SUITE_P(AbsAndRelTarget, ParamSymlinkTest,
- ::testing::Values(NewTempAbsPath(), NewTempRelPath()));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sync.cc b/test/syscalls/linux/sync.cc
deleted file mode 100644
index 84a2c4ed7..000000000
--- a/test/syscalls/linux/sync.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SyncTest, SyncEverything) {
- ASSERT_THAT(syscall(SYS_sync), SyscallSucceeds());
-}
-
-TEST(SyncTest, SyncFileSytem) {
- int fd;
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- ASSERT_THAT(fd = open(f.path().c_str(), O_RDONLY), SyscallSucceeds());
- EXPECT_THAT(syncfs(fd), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(SyncTest, SyncFromPipe) {
- int pipes[2];
- EXPECT_THAT(pipe(pipes), SyscallSucceeds());
- EXPECT_THAT(syncfs(pipes[0]), SyscallSucceeds());
- EXPECT_THAT(syncfs(pipes[1]), SyscallSucceeds());
- EXPECT_THAT(close(pipes[0]), SyscallSucceeds());
- EXPECT_THAT(close(pipes[1]), SyscallSucceeds());
-}
-
-TEST(SyncTest, CannotSyncFileSystemAtBadFd) {
- EXPECT_THAT(syncfs(-1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST(SyncTest, CannotSyncFileSystemAtOpathFD) {
- SKIP_IF(IsRunningWithVFS1());
-
- const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), "", TempPath::kDefaultFileMode));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_PATH));
-
- EXPECT_THAT(syncfs(fd.get()), SyscallFailsWithErrno(EBADF));
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sync_file_range.cc b/test/syscalls/linux/sync_file_range.cc
deleted file mode 100644
index 36cc42043..000000000
--- a/test/syscalls/linux/sync_file_range.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include <string>
-
-#include "gtest/gtest.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SyncFileRangeTest, TempFileSucceeds) {
- auto tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path(), O_RDWR));
- constexpr char data[] = "some data to sync";
- int fd = f.get();
-
- EXPECT_THAT(write(fd, data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
- EXPECT_THAT(sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WRITE),
- SyscallSucceeds());
- EXPECT_THAT(sync_file_range(fd, 0, 0, 0), SyscallSucceeds());
- EXPECT_THAT(
- sync_file_range(fd, 0, 0,
- SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER |
- SYNC_FILE_RANGE_WAIT_BEFORE),
- SyscallSucceeds());
- EXPECT_THAT(sync_file_range(
- fd, 0, 1, SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER),
- SyscallSucceeds());
- EXPECT_THAT(sync_file_range(
- fd, 1, 0, SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER),
- SyscallSucceeds());
-}
-
-TEST(SyncFileRangeTest, CannotSyncFileRangeOnUnopenedFd) {
- auto tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path(), O_RDWR));
- constexpr char data[] = "some data to sync";
- int fd = f.get();
-
- EXPECT_THAT(write(fd, data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
-
- pid_t pid = fork();
- if (pid == 0) {
- f.reset();
-
- // fd is now invalid.
- TEST_CHECK(sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WRITE) == -1);
- TEST_PCHECK(errno == EBADF);
- _exit(0);
- }
- ASSERT_THAT(pid, SyscallSucceeds());
-
- int status = 0;
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(WEXITSTATUS(status), 0);
-}
-
-TEST(SyncFileRangeTest, BadArgs) {
- auto tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path(), O_RDWR));
- int fd = f.get();
-
- EXPECT_THAT(sync_file_range(fd, -1, 0, 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(sync_file_range(fd, 0, -1, 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(sync_file_range(fd, 8912, INT64_MAX - 4096, 0),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(SyncFileRangeTest, CannotSyncFileRangeWithWaitBefore) {
- auto tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path(), O_RDWR));
- constexpr char data[] = "some data to sync";
- int fd = f.get();
-
- EXPECT_THAT(write(fd, data, sizeof(data)),
- SyscallSucceedsWithValue(sizeof(data)));
- if (IsRunningOnGvisor()) {
- EXPECT_THAT(sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WAIT_BEFORE),
- SyscallFailsWithErrno(ENOSYS));
- EXPECT_THAT(
- sync_file_range(fd, 0, 0,
- SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE),
- SyscallFailsWithErrno(ENOSYS));
- }
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sysinfo.cc b/test/syscalls/linux/sysinfo.cc
deleted file mode 100644
index 1a71256da..000000000
--- a/test/syscalls/linux/sysinfo.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This is a very simple sanity test to validate that the sysinfo syscall is
-// supported by gvisor and returns sane values.
-#include <sys/syscall.h>
-#include <sys/sysinfo.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(SysinfoTest, SysinfoIsCallable) {
- struct sysinfo ignored = {};
- EXPECT_THAT(syscall(SYS_sysinfo, &ignored), SyscallSucceedsWithValue(0));
-}
-
-TEST(SysinfoTest, EfaultProducedOnBadAddress) {
- // Validate that we return EFAULT when a bad address is provided.
- // specified by man 2 sysinfo
- EXPECT_THAT(syscall(SYS_sysinfo, nullptr), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(SysinfoTest, TotalRamSaneValue) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- EXPECT_GT(s.totalram, 0);
-}
-
-TEST(SysinfoTest, MemunitSet) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- EXPECT_GE(s.mem_unit, 1);
-}
-
-TEST(SysinfoTest, UptimeSaneValue) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- EXPECT_GE(s.uptime, 0);
-}
-
-TEST(SysinfoTest, UptimeIncreasingValue) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- absl::SleepFor(absl::Seconds(2));
- struct sysinfo s2 = {};
- EXPECT_THAT(sysinfo(&s2), SyscallSucceedsWithValue(0));
- EXPECT_LT(s.uptime, s2.uptime);
-}
-
-TEST(SysinfoTest, FreeRamSaneValue) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- EXPECT_GT(s.freeram, 0);
- EXPECT_LT(s.freeram, s.totalram);
-}
-
-TEST(SysinfoTest, NumProcsSaneValue) {
- struct sysinfo s = {};
- EXPECT_THAT(sysinfo(&s), SyscallSucceedsWithValue(0));
- EXPECT_GT(s.procs, 0);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/syslog.cc b/test/syscalls/linux/syslog.cc
deleted file mode 100644
index 9a7407d96..000000000
--- a/test/syscalls/linux/syslog.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/klog.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr int SYSLOG_ACTION_READ_ALL = 3;
-constexpr int SYSLOG_ACTION_SIZE_BUFFER = 10;
-
-int Syslog(int type, char* buf, int len) {
- return syscall(__NR_syslog, type, buf, len);
-}
-
-// Only SYSLOG_ACTION_SIZE_BUFFER and SYSLOG_ACTION_READ_ALL are implemented in
-// gVisor.
-
-TEST(Syslog, Size) {
- EXPECT_THAT(Syslog(SYSLOG_ACTION_SIZE_BUFFER, nullptr, 0), SyscallSucceeds());
-}
-
-TEST(Syslog, ReadAll) {
- // There might not be anything to read, so we can't check the write count.
- char buf[100];
- EXPECT_THAT(Syslog(SYSLOG_ACTION_READ_ALL, buf, sizeof(buf)),
- SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/sysret.cc b/test/syscalls/linux/sysret.cc
deleted file mode 100644
index 19ffbd85b..000000000
--- a/test/syscalls/linux/sysret.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Tests to verify that the behavior of linux and gvisor matches when
-// 'sysret' returns to bad (aka non-canonical) %rip or %rsp.
-
-#include <linux/elf.h>
-#include <sys/ptrace.h>
-#include <sys/user.h>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr uint64_t kNonCanonicalRip = 0xCCCC000000000000;
-constexpr uint64_t kNonCanonicalRsp = 0xFFFF000000000000;
-
-class SysretTest : public ::testing::Test {
- protected:
- struct user_regs_struct regs_;
- struct iovec iov;
- pid_t child_;
-
- void SetUp() override {
- pid_t pid = fork();
-
- // Child.
- if (pid == 0) {
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, 0, 0) == 0);
- MaybeSave();
- TEST_PCHECK(raise(SIGSTOP) == 0);
- MaybeSave();
- _exit(0);
- }
-
- // Parent.
- int status;
- memset(&iov, 0, sizeof(iov));
- ASSERT_THAT(pid, SyscallSucceeds()); // Might still be < 0.
- ASSERT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
-
- iov.iov_base = &regs_;
- iov.iov_len = sizeof(regs_);
- ASSERT_THAT(ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov),
- SyscallSucceeds());
-
- child_ = pid;
- }
-
- void Detach() {
- ASSERT_THAT(ptrace(PTRACE_DETACH, child_, 0, 0), SyscallSucceeds());
- }
-
- void SetRip(uint64_t newrip) {
-#if defined(__x86_64__)
- regs_.rip = newrip;
-#elif defined(__aarch64__)
- regs_.pc = newrip;
-#else
-#error "Unknown architecture"
-#endif
- ASSERT_THAT(ptrace(PTRACE_SETREGSET, child_, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- }
-
- void SetRsp(uint64_t newrsp) {
-#if defined(__x86_64__)
- regs_.rsp = newrsp;
-#elif defined(__aarch64__)
- regs_.sp = newrsp;
-#else
-#error "Unknown architecture"
-#endif
- ASSERT_THAT(ptrace(PTRACE_SETREGSET, child_, NT_PRSTATUS, &iov),
- SyscallSucceeds());
- }
-
- // Wait waits for the child pid and returns the exit status.
- int Wait() {
- int status;
- while (true) {
- int rval = wait4(child_, &status, 0, NULL);
- if (rval < 0) {
- return rval;
- }
- if (rval == child_) {
- return status;
- }
- }
- }
-};
-
-TEST_F(SysretTest, JustDetach) {
- Detach();
- int status = Wait();
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0)
- << "status = " << status;
-}
-
-TEST_F(SysretTest, BadRip) {
- SetRip(kNonCanonicalRip);
- Detach();
- int status = Wait();
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
- << "status = " << status;
-}
-
-TEST_F(SysretTest, BadRsp) {
- SetRsp(kNonCanonicalRsp);
- Detach();
- int status = Wait();
-#if defined(__x86_64__)
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGBUS)
- << "status = " << status;
-#elif defined(__aarch64__)
- EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
- << "status = " << status;
-#else
-#error "Unknown architecture"
-#endif
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/tcp_socket.cc b/test/syscalls/linux/tcp_socket.cc
deleted file mode 100644
index 183819faf..000000000
--- a/test/syscalls/linux/tcp_socket.cc
+++ /dev/null
@@ -1,2097 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#ifdef __linux__
-#include <linux/filter.h>
-#endif // __linux__
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <limits>
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-PosixErrorOr<sockaddr_storage> InetLoopbackAddr(int family) {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- addr.ss_family = family;
- switch (family) {
- case AF_INET:
- reinterpret_cast<struct sockaddr_in*>(&addr)->sin_addr.s_addr =
- htonl(INADDR_LOOPBACK);
- break;
- case AF_INET6:
- reinterpret_cast<struct sockaddr_in6*>(&addr)->sin6_addr =
- in6addr_loopback;
- break;
- default:
- return PosixError(EINVAL,
- absl::StrCat("unknown socket family: ", family));
- }
- return addr;
-}
-
-static void FillSocketBuffers(int sender, int receiver) {
- // Set the FD to O_NONBLOCK.
- int opts;
- int orig_opts;
- ASSERT_THAT(opts = fcntl(sender, F_GETFL), SyscallSucceeds());
- orig_opts = opts;
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(sender, F_SETFL, opts), SyscallSucceeds());
-
- // Set TCP_NODELAY, which will cause linux to fill the receive buffer from the
- // send buffer as quickly as possibly. This way we can fill up both buffers
- // faster.
- constexpr int tcp_nodelay_flag = 1;
- ASSERT_THAT(setsockopt(sender, IPPROTO_TCP, TCP_NODELAY, &tcp_nodelay_flag,
- sizeof(tcp_nodelay_flag)),
- SyscallSucceeds());
-
- // Set a 256KB send/receive buffer.
- int buf_sz = 1 << 18;
- EXPECT_THAT(
- setsockopt(receiver, SOL_SOCKET, SO_RCVBUF, &buf_sz, sizeof(buf_sz)),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(
- setsockopt(sender, SOL_SOCKET, SO_SNDBUF, &buf_sz, sizeof(buf_sz)),
- SyscallSucceedsWithValue(0));
-
- // Create a large buffer that will be used for sending.
- std::vector<char> buf(1 << 16);
-
- // Write until we receive an error.
- while (RetryEINTR(send)(sender, buf.data(), buf.size(), 0) != -1) {
- // Sleep to give linux a chance to move data from the send buffer to the
- // receive buffer.
- usleep(10000); // 10ms.
- }
- // The last error should have been EWOULDBLOCK.
- ASSERT_EQ(errno, EWOULDBLOCK);
-
- // Restore the fcntl opts
- ASSERT_THAT(fcntl(sender, F_SETFL, orig_opts), SyscallSucceeds());
-}
-
-// Fixture for tests parameterized by the address family to use (AF_INET and
-// AF_INET6) when creating sockets.
-class TcpSocketTest : public ::testing::TestWithParam<int> {
- protected:
- // Creates three sockets that will be used by test cases -- a listener, one
- // that connects, and the accepted one.
- void SetUp() override;
-
- // Closes the sockets created by SetUp().
- void TearDown() override;
-
- // Listening socket.
- int listener_ = -1;
-
- // Socket connected via connect().
- int first_fd = -1;
-
- // Socket connected via accept().
- int second_fd = -1;
-
- // Initial size of the send buffer.
- int sendbuf_size_ = -1;
-};
-
-void TcpSocketTest::SetUp() {
- ASSERT_THAT(listener_ = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
-
- ASSERT_THAT(first_fd = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listener_, AsSockAddr(&addr), addrlen), SyscallSucceeds());
-
- ASSERT_THAT(listen(listener_, SOMAXCONN), SyscallSucceeds());
-
- // Get the address we're listening on, then connect to it. We need to do this
- // because we're allowing the stack to pick a port for us.
- ASSERT_THAT(getsockname(listener_, AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(connect)(first_fd, AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- // Get the initial send buffer size.
- socklen_t optlen = sizeof(sendbuf_size_);
- ASSERT_THAT(
- getsockopt(first_fd, SOL_SOCKET, SO_SNDBUF, &sendbuf_size_, &optlen),
- SyscallSucceeds());
-
- // Accept the connection.
- ASSERT_THAT(second_fd = RetryEINTR(accept)(listener_, nullptr, nullptr),
- SyscallSucceeds());
-}
-
-void TcpSocketTest::TearDown() {
- EXPECT_THAT(close(listener_), SyscallSucceeds());
- if (first_fd >= 0) {
- EXPECT_THAT(close(first_fd), SyscallSucceeds());
- }
- if (second_fd >= 0) {
- EXPECT_THAT(close(second_fd), SyscallSucceeds());
- }
-}
-
-TEST_P(TcpSocketTest, ConnectOnEstablishedConnection) {
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- ASSERT_THAT(connect(first_fd, reinterpret_cast<const struct sockaddr*>(&addr),
- addrlen),
- SyscallFailsWithErrno(EISCONN));
- ASSERT_THAT(connect(second_fd,
- reinterpret_cast<const struct sockaddr*>(&addr), addrlen),
- SyscallFailsWithErrno(EISCONN));
-}
-
-TEST_P(TcpSocketTest, ShutdownWriteInTimeWait) {
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallSucceeds());
- EXPECT_THAT(shutdown(first_fd, SHUT_RDWR), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1)); // Wait to enter TIME_WAIT.
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(TcpSocketTest, ShutdownWriteInFinWait1) {
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallSucceeds());
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallSucceeds());
- absl::SleepFor(absl::Seconds(1)); // Wait to enter FIN-WAIT2.
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallSucceeds());
-}
-
-TEST_P(TcpSocketTest, DataCoalesced) {
- char buf[10];
-
- // Write in two steps.
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf) / 2),
- SyscallSucceedsWithValue(sizeof(buf) / 2));
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf) / 2),
- SyscallSucceedsWithValue(sizeof(buf) / 2));
-
- // Allow stack to process both packets.
- absl::SleepFor(absl::Seconds(1));
-
- // Read in one shot.
- EXPECT_THAT(RetryEINTR(recv)(second_fd, buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(TcpSocketTest, SenderAddressIgnored) {
- char buf[3];
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- memset(&addr, 0, sizeof(addr));
-
- ASSERT_THAT(RetryEINTR(recvfrom)(second_fd, buf, sizeof(buf), 0,
- AsSockAddr(&addr), &addrlen),
- SyscallSucceedsWithValue(3));
-
- // Check that addr remains zeroed-out.
- const char* ptr = reinterpret_cast<char*>(&addr);
- for (size_t i = 0; i < sizeof(addr); i++) {
- EXPECT_EQ(ptr[i], 0);
- }
-}
-
-TEST_P(TcpSocketTest, SenderAddressIgnoredOnPeek) {
- char buf[3];
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- memset(&addr, 0, sizeof(addr));
-
- ASSERT_THAT(RetryEINTR(recvfrom)(second_fd, buf, sizeof(buf), MSG_PEEK,
- AsSockAddr(&addr), &addrlen),
- SyscallSucceedsWithValue(3));
-
- // Check that addr remains zeroed-out.
- const char* ptr = reinterpret_cast<char*>(&addr);
- for (size_t i = 0; i < sizeof(addr); i++) {
- EXPECT_EQ(ptr[i], 0);
- }
-}
-
-TEST_P(TcpSocketTest, SendtoAddressIgnored) {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- addr.ss_family = GetParam(); // FIXME(b/63803955)
-
- char data = '\0';
- EXPECT_THAT(RetryEINTR(sendto)(first_fd, &data, sizeof(data), 0,
- AsSockAddr(&addr), sizeof(addr)),
- SyscallSucceedsWithValue(1));
-}
-
-TEST_P(TcpSocketTest, WritevZeroIovec) {
- // 2 bytes just to be safe and have vecs[1] not point to something random
- // (even though length is 0).
- char buf[2];
- char recv_buf[1];
-
- // Construct a vec where the final vector is of length 0.
- iovec vecs[2] = {};
- vecs[0].iov_base = buf;
- vecs[0].iov_len = 1;
- vecs[1].iov_base = buf + 1;
- vecs[1].iov_len = 0;
-
- EXPECT_THAT(RetryEINTR(writev)(first_fd, vecs, 2),
- SyscallSucceedsWithValue(1));
-
- EXPECT_THAT(RetryEINTR(recv)(second_fd, recv_buf, 1, 0),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(memcmp(recv_buf, buf, 1), 0);
-}
-
-TEST_P(TcpSocketTest, ZeroWriteAllowed) {
- char buf[3];
- // Send a zero length packet.
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, 0), SyscallSucceedsWithValue(0));
- // Verify that there is no packet available.
- EXPECT_THAT(RetryEINTR(recv)(second_fd, buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-// Test that a non-blocking write with a buffer that is larger than the send
-// buffer size will not actually write the whole thing at once. Regression test
-// for b/64438887.
-TEST_P(TcpSocketTest, NonblockingLargeWrite) {
- // Set the FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(first_fd, F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(first_fd, F_SETFL, opts), SyscallSucceeds());
-
- // Allocate a buffer three times the size of the send buffer. We do this with
- // a vector to avoid allocating on the stack.
- int size = 3 * sendbuf_size_;
- std::vector<char> buf(size);
-
- // Try to write the whole thing.
- int n;
- ASSERT_THAT(n = RetryEINTR(write)(first_fd, buf.data(), size),
- SyscallSucceeds());
-
- // We should have written something, but not the whole thing.
- EXPECT_GT(n, 0);
- EXPECT_LT(n, size);
-}
-
-// Test that a blocking write with a buffer that is larger than the send buffer
-// will block until the entire buffer is sent.
-TEST_P(TcpSocketTest, BlockingLargeWrite) {
- // Allocate a buffer three times the size of the send buffer on the heap. We
- // do this as a vector to avoid allocating on the stack.
- int size = 3 * sendbuf_size_;
- std::vector<char> writebuf(size);
-
- // Start reading the response in a loop.
- int read_bytes = 0;
- ScopedThread t([this, &read_bytes]() {
- // Avoid interrupting the blocking write in main thread.
- const DisableSave disable_save;
-
- // Take ownership of the FD so that we close it on failure. This will
- // unblock the blocking write below.
- FileDescriptor fd(second_fd);
- second_fd = -1;
-
- char readbuf[2500] = {};
- int n = -1;
- while (n != 0) {
- ASSERT_THAT(n = RetryEINTR(read)(fd.get(), &readbuf, sizeof(readbuf)),
- SyscallSucceeds());
- read_bytes += n;
- }
- });
-
- // Try to write the whole thing.
- int n;
- ASSERT_THAT(n = WriteFd(first_fd, writebuf.data(), size), SyscallSucceeds());
-
- // We should have written the whole thing.
- EXPECT_EQ(n, size);
- EXPECT_THAT(close(first_fd), SyscallSucceedsWithValue(0));
- first_fd = -1;
- t.Join();
-
- // We should have read the whole thing.
- EXPECT_EQ(read_bytes, size);
-}
-
-// Test that a send with MSG_DONTWAIT flag and buffer that larger than the send
-// buffer size will not write the whole thing.
-TEST_P(TcpSocketTest, LargeSendDontWait) {
- // Allocate a buffer three times the size of the send buffer. We do this on
- // with a vector to avoid allocating on the stack.
- int size = 3 * sendbuf_size_;
- std::vector<char> buf(size);
-
- // Try to write the whole thing with MSG_DONTWAIT flag, which can
- // return a partial write.
- int n;
- ASSERT_THAT(n = RetryEINTR(send)(first_fd, buf.data(), size, MSG_DONTWAIT),
- SyscallSucceeds());
-
- // We should have written something, but not the whole thing.
- EXPECT_GT(n, 0);
- EXPECT_LT(n, size);
-}
-
-// Test that a send on a non-blocking socket with a buffer that larger than the
-// send buffer will not write the whole thing at once.
-TEST_P(TcpSocketTest, NonblockingLargeSend) {
- // Set the FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(first_fd, F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(first_fd, F_SETFL, opts), SyscallSucceeds());
-
- // Allocate a buffer three times the size of the send buffer. We do this on
- // with a vector to avoid allocating on the stack.
- int size = 3 * sendbuf_size_;
- std::vector<char> buf(size);
-
- // Try to write the whole thing.
- int n;
- ASSERT_THAT(n = RetryEINTR(send)(first_fd, buf.data(), size, 0),
- SyscallSucceeds());
-
- // We should have written something, but not the whole thing.
- EXPECT_GT(n, 0);
- EXPECT_LT(n, size);
-}
-
-// Same test as above, but calls send instead of write.
-TEST_P(TcpSocketTest, BlockingLargeSend) {
- // Allocate a buffer three times the size of the send buffer. We do this on
- // with a vector to avoid allocating on the stack.
- int size = 3 * sendbuf_size_;
- std::vector<char> writebuf(size);
-
- // Start reading the response in a loop.
- int read_bytes = 0;
- ScopedThread t([this, &read_bytes]() {
- // Avoid interrupting the blocking write in main thread.
- const DisableSave disable_save;
-
- // Take ownership of the FD so that we close it on failure. This will
- // unblock the blocking write below.
- FileDescriptor fd(second_fd);
- second_fd = -1;
-
- char readbuf[2500] = {};
- int n = -1;
- while (n != 0) {
- ASSERT_THAT(n = RetryEINTR(read)(fd.get(), &readbuf, sizeof(readbuf)),
- SyscallSucceeds());
- read_bytes += n;
- }
- });
-
- // Try to send the whole thing.
- int n;
- ASSERT_THAT(n = SendFd(first_fd, writebuf.data(), size, 0),
- SyscallSucceeds());
-
- // We should have written the whole thing.
- EXPECT_EQ(n, size);
- EXPECT_THAT(close(first_fd), SyscallSucceedsWithValue(0));
- first_fd = -1;
- t.Join();
-
- // We should have read the whole thing.
- EXPECT_EQ(read_bytes, size);
-}
-
-// Test that polling on a socket with a full send buffer will block.
-TEST_P(TcpSocketTest, PollWithFullBufferBlocks) {
- FillSocketBuffers(first_fd, second_fd);
- // Now polling on the FD with a timeout should return 0 corresponding to no
- // FDs ready.
- struct pollfd poll_fd = {first_fd, POLLOUT, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10), SyscallSucceedsWithValue(0));
-}
-
-TEST_P(TcpSocketTest, ClosedWriteBlockingSocket) {
- FillSocketBuffers(first_fd, second_fd);
- constexpr int timeout = 10;
- struct timeval tv = {.tv_sec = timeout, .tv_usec = 0};
- EXPECT_THAT(setsockopt(first_fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- struct timespec begin;
- struct timespec end;
- const DisableSave disable_save; // Timing-related.
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &begin), SyscallSucceeds());
-
- ScopedThread send_thread([this]() {
- char send_byte;
- // Expect the send() to be blocked until receive timeout.
- ASSERT_THAT(RetryEINTR(send)(first_fd, &send_byte, sizeof(send_byte), 0),
- SyscallFailsWithErrno(EAGAIN));
- });
-
- // Wait for the thread to be blocked on write.
- absl::SleepFor(absl::Milliseconds(250));
- // Socket close does not have any effect on a blocked write.
- ASSERT_THAT(close(first_fd), SyscallSucceeds());
- // Indicate to the cleanup routine that we are already closed.
- first_fd = -1;
-
- send_thread.Join();
-
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &end), SyscallSucceeds());
- // Check the lower bound on the timeout. Checking for an upper bound is
- // fragile because Linux can overrun the timeout due to scheduling delays.
- EXPECT_GT(ms_elapsed(begin, end), timeout * 1000 - 1);
-}
-
-TEST_P(TcpSocketTest, ClosedReadBlockingSocket) {
- constexpr int timeout = 10;
- struct timeval tv = {.tv_sec = timeout, .tv_usec = 0};
- EXPECT_THAT(setsockopt(first_fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
- SyscallSucceeds());
-
- struct timespec begin;
- struct timespec end;
- const DisableSave disable_save; // Timing-related.
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &begin), SyscallSucceeds());
-
- ScopedThread read_thread([this]() {
- char read_byte;
- // Expect the read() to be blocked until receive timeout.
- ASSERT_THAT(read(first_fd, &read_byte, sizeof(read_byte)),
- SyscallFailsWithErrno(EAGAIN));
- });
-
- // Wait for the thread to be blocked on read.
- absl::SleepFor(absl::Milliseconds(250));
- // Socket close does not have any effect on a blocked read.
- ASSERT_THAT(close(first_fd), SyscallSucceeds());
- // Indicate to the cleanup routine that we are already closed.
- first_fd = -1;
-
- read_thread.Join();
-
- EXPECT_THAT(clock_gettime(CLOCK_MONOTONIC, &end), SyscallSucceeds());
- // Check the lower bound on the timeout. Checking for an upper bound is
- // fragile because Linux can overrun the timeout due to scheduling delays.
- EXPECT_GT(ms_elapsed(begin, end), timeout * 1000 - 1);
-}
-
-TEST_P(TcpSocketTest, MsgTrunc) {
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(first_fd, sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(second_fd, received_data,
- sizeof(received_data) / 2, MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- // Check that we didn't get anything.
- char zeros[sizeof(received_data)] = {};
- EXPECT_EQ(0, memcmp(zeros, received_data, sizeof(received_data)));
-}
-
-// MSG_CTRUNC is a return flag but linux allows it to be set on input flags
-// without returning an error.
-TEST_P(TcpSocketTest, MsgTruncWithCtrunc) {
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(first_fd, sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(
- RetryEINTR(recv)(second_fd, received_data, sizeof(received_data) / 2,
- MSG_TRUNC | MSG_CTRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- // Check that we didn't get anything.
- char zeros[sizeof(received_data)] = {};
- EXPECT_EQ(0, memcmp(zeros, received_data, sizeof(received_data)));
-}
-
-// This test will verify that MSG_CTRUNC doesn't do anything when specified
-// on input.
-TEST_P(TcpSocketTest, MsgTruncWithCtruncOnly) {
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(first_fd, sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(second_fd, received_data,
- sizeof(received_data) / 2, MSG_CTRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- // Since MSG_CTRUNC here had no affect, it should not behave like MSG_TRUNC.
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data) / 2));
-}
-
-TEST_P(TcpSocketTest, MsgTruncLargeSize) {
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(first_fd, sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data) * 2] = {};
- ASSERT_THAT(RetryEINTR(recv)(second_fd, received_data, sizeof(received_data),
- MSG_TRUNC),
- SyscallSucceedsWithValue(sizeof(sent_data)));
-
- // Check that we didn't get anything.
- char zeros[sizeof(received_data)] = {};
- EXPECT_EQ(0, memcmp(zeros, received_data, sizeof(received_data)));
-}
-
-TEST_P(TcpSocketTest, MsgTruncPeek) {
- char sent_data[512];
- RandomizeBuffer(sent_data, sizeof(sent_data));
- ASSERT_THAT(RetryEINTR(send)(first_fd, sent_data, sizeof(sent_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- char received_data[sizeof(sent_data)] = {};
- ASSERT_THAT(RetryEINTR(recv)(second_fd, received_data,
- sizeof(received_data) / 2, MSG_TRUNC | MSG_PEEK),
- SyscallSucceedsWithValue(sizeof(sent_data) / 2));
-
- // Check that we didn't get anything.
- char zeros[sizeof(received_data)] = {};
- EXPECT_EQ(0, memcmp(zeros, received_data, sizeof(received_data)));
-
- // Check that we can still get all of the data.
- ASSERT_THAT(
- RetryEINTR(recv)(second_fd, received_data, sizeof(received_data), 0),
- SyscallSucceedsWithValue(sizeof(sent_data)));
- EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));
-}
-
-TEST_P(TcpSocketTest, NoDelayDefault) {
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(first_fd, IPPROTO_TCP, TCP_NODELAY, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-TEST_P(TcpSocketTest, SetNoDelay) {
- ASSERT_THAT(setsockopt(first_fd, IPPROTO_TCP, TCP_NODELAY, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- EXPECT_THAT(getsockopt(first_fd, IPPROTO_TCP, TCP_NODELAY, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOn);
-
- ASSERT_THAT(setsockopt(first_fd, IPPROTO_TCP, TCP_NODELAY, &kSockOptOff,
- sizeof(kSockOptOff)),
- SyscallSucceeds());
-
- EXPECT_THAT(getsockopt(first_fd, IPPROTO_TCP, TCP_NODELAY, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kSockOptOff);
-}
-
-#ifndef TCP_INQ
-#define TCP_INQ 36
-#endif
-
-TEST_P(TcpSocketTest, TcpInqSetSockOpt) {
- char buf[1024];
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // TCP_INQ is disabled by default.
- int val = -1;
- socklen_t slen = sizeof(val);
- EXPECT_THAT(getsockopt(second_fd, SOL_TCP, TCP_INQ, &val, &slen),
- SyscallSucceedsWithValue(0));
- ASSERT_EQ(val, 0);
-
- // Try to set TCP_INQ.
- val = 1;
- EXPECT_THAT(setsockopt(second_fd, SOL_TCP, TCP_INQ, &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
- val = -1;
- slen = sizeof(val);
- EXPECT_THAT(getsockopt(second_fd, SOL_TCP, TCP_INQ, &val, &slen),
- SyscallSucceedsWithValue(0));
- ASSERT_EQ(val, 1);
-
- // Try to unset TCP_INQ.
- val = 0;
- EXPECT_THAT(setsockopt(second_fd, SOL_TCP, TCP_INQ, &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
- val = -1;
- slen = sizeof(val);
- EXPECT_THAT(getsockopt(second_fd, SOL_TCP, TCP_INQ, &val, &slen),
- SyscallSucceedsWithValue(0));
- ASSERT_EQ(val, 0);
-}
-
-TEST_P(TcpSocketTest, TcpInq) {
- char buf[1024];
- // Write more than one TCP segment.
- int size = sizeof(buf);
- int kChunk = sizeof(buf) / 4;
- for (int i = 0; i < size; i += kChunk) {
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, kChunk),
- SyscallSucceedsWithValue(kChunk));
- }
-
- int val = 1;
- kChunk = sizeof(buf) / 2;
- EXPECT_THAT(setsockopt(second_fd, SOL_TCP, TCP_INQ, &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
-
- // Wait when all data will be in the received queue.
- while (true) {
- ASSERT_THAT(ioctl(second_fd, TIOCINQ, &size), SyscallSucceeds());
- if (size == sizeof(buf)) {
- break;
- }
- absl::SleepFor(absl::Milliseconds(10));
- }
-
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(sizeof(int)));
- size = sizeof(buf);
- struct iovec iov;
- for (int i = 0; size != 0; i += kChunk) {
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- iov.iov_base = buf;
- iov.iov_len = kChunk;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- ASSERT_THAT(RetryEINTR(recvmsg)(second_fd, &msg, 0),
- SyscallSucceedsWithValue(kChunk));
- size -= kChunk;
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_TCP);
- ASSERT_EQ(cmsg->cmsg_type, TCP_INQ);
-
- int inq = 0;
- memcpy(&inq, CMSG_DATA(cmsg), sizeof(int));
- ASSERT_EQ(inq, size);
- }
-}
-
-TEST_P(TcpSocketTest, Tiocinq) {
- char buf[1024];
- size_t size = sizeof(buf);
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, size),
- SyscallSucceedsWithValue(size));
-
- uint32_t seed = time(nullptr);
- const size_t max_chunk = size / 10;
- while (size > 0) {
- size_t chunk = (rand_r(&seed) % max_chunk) + 1;
- ssize_t read =
- RetryEINTR(recvfrom)(second_fd, buf, chunk, 0, nullptr, nullptr);
- ASSERT_THAT(read, SyscallSucceeds());
- size -= read;
-
- int inq = 0;
- ASSERT_THAT(ioctl(second_fd, TIOCINQ, &inq), SyscallSucceeds());
- ASSERT_EQ(inq, size);
- }
-}
-
-TEST_P(TcpSocketTest, TcpSCMPriority) {
- char buf[1024];
- ASSERT_THAT(RetryEINTR(write)(first_fd, buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- int val = 1;
- EXPECT_THAT(setsockopt(second_fd, SOL_TCP, TCP_INQ, &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(
- setsockopt(second_fd, SOL_SOCKET, SO_TIMESTAMP, &val, sizeof(val)),
- SyscallSucceedsWithValue(0));
-
- struct msghdr msg = {};
- std::vector<char> control(
- CMSG_SPACE(sizeof(struct timeval) + CMSG_SPACE(sizeof(int))));
- struct iovec iov;
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- ASSERT_THAT(RetryEINTR(recvmsg)(second_fd, &msg, 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- // TODO(b/78348848): SO_TIMESTAMP isn't implemented for TCP sockets.
- if (!IsRunningOnGvisor() || cmsg->cmsg_level == SOL_SOCKET) {
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SO_TIMESTAMP);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct timeval)));
-
- cmsg = CMSG_NXTHDR(&msg, cmsg);
- ASSERT_NE(cmsg, nullptr);
- }
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_TCP);
- ASSERT_EQ(cmsg->cmsg_type, TCP_INQ);
-
- int inq = 0;
- memcpy(&inq, CMSG_DATA(cmsg), sizeof(int));
- ASSERT_EQ(inq, 0);
-
- cmsg = CMSG_NXTHDR(&msg, cmsg);
- ASSERT_EQ(cmsg, nullptr);
-}
-
-TEST_P(TcpSocketTest, TimeWaitPollHUP) {
- shutdown(first_fd, SHUT_RDWR);
- ScopedThread t([&]() {
- constexpr int kTimeout = 10000;
- constexpr int16_t want_events = POLLHUP;
- struct pollfd pfd = {
- .fd = first_fd,
- .events = want_events,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- });
- shutdown(second_fd, SHUT_RDWR);
- t.Join();
- // At this point first_fd should be in TIME-WAIT and polling for POLLHUP
- // should return with 1 FD.
- constexpr int kTimeout = 10000;
- constexpr int16_t want_events = POLLHUP;
- struct pollfd pfd = {
- .fd = first_fd,
- .events = want_events,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
-}
-
-INSTANTIATE_TEST_SUITE_P(AllInetTests, TcpSocketTest,
- ::testing::Values(AF_INET, AF_INET6));
-
-// Fixture for tests parameterized by address family that don't want the fixture
-// to do things.
-using SimpleTcpSocketTest = ::testing::TestWithParam<int>;
-
-TEST_P(SimpleTcpSocketTest, SendUnconnected) {
- int fd;
- ASSERT_THAT(fd = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
- FileDescriptor sock_fd(fd);
-
- char data = '\0';
- EXPECT_THAT(RetryEINTR(send)(fd, &data, sizeof(data), 0),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(SimpleTcpSocketTest, SendtoWithoutAddressUnconnected) {
- int fd;
- ASSERT_THAT(fd = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
- FileDescriptor sock_fd(fd);
-
- char data = '\0';
- EXPECT_THAT(RetryEINTR(sendto)(fd, &data, sizeof(data), 0, nullptr, 0),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(SimpleTcpSocketTest, SendtoWithAddressUnconnected) {
- int fd;
- ASSERT_THAT(fd = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
- FileDescriptor sock_fd(fd);
-
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- char data = '\0';
- EXPECT_THAT(RetryEINTR(sendto)(fd, &data, sizeof(data), 0, AsSockAddr(&addr),
- sizeof(addr)),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(SimpleTcpSocketTest, GetPeerNameUnconnected) {
- int fd;
- ASSERT_THAT(fd = socket(GetParam(), SOCK_STREAM, IPPROTO_TCP),
- SyscallSucceeds());
- FileDescriptor sock_fd(fd);
-
- sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(fd, AsSockAddr(&addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(TcpSocketTest, FullBuffer) {
- // Set both FDs to be blocking.
- int flags = 0;
- ASSERT_THAT(flags = fcntl(first_fd, F_GETFL), SyscallSucceeds());
- EXPECT_THAT(fcntl(first_fd, F_SETFL, flags & ~O_NONBLOCK), SyscallSucceeds());
- flags = 0;
- ASSERT_THAT(flags = fcntl(second_fd, F_GETFL), SyscallSucceeds());
- EXPECT_THAT(fcntl(second_fd, F_SETFL, flags & ~O_NONBLOCK),
- SyscallSucceeds());
-
- // 2500 was chosen as a small value that can be set on Linux.
- int set_snd = 2500;
- EXPECT_THAT(
- setsockopt(first_fd, SOL_SOCKET, SO_SNDBUF, &set_snd, sizeof(set_snd)),
- SyscallSucceedsWithValue(0));
- int get_snd = -1;
- socklen_t get_snd_len = sizeof(get_snd);
- EXPECT_THAT(
- getsockopt(first_fd, SOL_SOCKET, SO_SNDBUF, &get_snd, &get_snd_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_snd_len, sizeof(get_snd));
- EXPECT_GT(get_snd, 0);
-
- // 2500 was chosen as a small value that can be set on Linux and gVisor.
- int set_rcv = 2500;
- EXPECT_THAT(
- setsockopt(second_fd, SOL_SOCKET, SO_RCVBUF, &set_rcv, sizeof(set_rcv)),
- SyscallSucceedsWithValue(0));
- int get_rcv = -1;
- socklen_t get_rcv_len = sizeof(get_rcv);
- EXPECT_THAT(
- getsockopt(second_fd, SOL_SOCKET, SO_RCVBUF, &get_rcv, &get_rcv_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_rcv_len, sizeof(get_rcv));
- EXPECT_GE(get_rcv, 2500);
-
- // Quick sanity test.
- EXPECT_LT(get_snd + get_rcv, 2500 * IOV_MAX);
-
- char data[2500] = {};
- std::vector<struct iovec> iovecs;
- for (int i = 0; i < IOV_MAX; i++) {
- struct iovec iov = {};
- iov.iov_base = data;
- iov.iov_len = sizeof(data);
- iovecs.push_back(iov);
- }
- ScopedThread t([this, &iovecs]() {
- int result = -1;
- EXPECT_THAT(
- result = RetryEINTR(writev)(first_fd, iovecs.data(), iovecs.size()),
- SyscallSucceeds());
- EXPECT_GT(result, 1);
- EXPECT_LT(result, sizeof(data) * iovecs.size());
- });
-
- char recv = 0;
- EXPECT_THAT(RetryEINTR(read)(second_fd, &recv, 1),
- SyscallSucceedsWithValue(1));
- EXPECT_THAT(close(second_fd), SyscallSucceedsWithValue(0));
- second_fd = -1;
-}
-
-TEST_P(TcpSocketTest, PollAfterShutdown) {
- ScopedThread client_thread([this]() {
- EXPECT_THAT(shutdown(first_fd, SHUT_WR), SyscallSucceedsWithValue(0));
- struct pollfd poll_fd = {first_fd, POLLIN | POLLERR | POLLHUP, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10000),
- SyscallSucceedsWithValue(1));
- });
-
- EXPECT_THAT(shutdown(second_fd, SHUT_WR), SyscallSucceedsWithValue(0));
- struct pollfd poll_fd = {second_fd, POLLIN | POLLERR | POLLHUP, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10000),
- SyscallSucceedsWithValue(1));
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectRetry) {
- const FileDescriptor listener =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port but don't listen yet.
- ASSERT_THAT(bind(listener.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- // Get the address we're bound to, then connect to it. We need to do this
- // because we're allowing the stack to pick a port for us.
- ASSERT_THAT(getsockname(listener.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- FileDescriptor connector =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Verify that connect fails.
- ASSERT_THAT(RetryEINTR(connect)(connector.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNREFUSED));
-
- // Now start listening
- ASSERT_THAT(listen(listener.get(), SOMAXCONN), SyscallSucceeds());
-
- // TODO(gvisor.dev/issue/3828): Issuing connect() again on a socket that
- // failed first connect should succeed.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(
- RetryEINTR(connect)(connector.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNABORTED));
- return;
- }
-
- // Verify that connect now succeeds.
- ASSERT_THAT(RetryEINTR(connect)(connector.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- // Accept the connection.
- const FileDescriptor accepted =
- ASSERT_NO_ERRNO_AND_VALUE(Accept(listener.get(), nullptr, nullptr));
-}
-
-// nonBlockingConnectNoListener returns a socket on which a connect that is
-// expected to fail has been issued.
-PosixErrorOr<FileDescriptor> nonBlockingConnectNoListener(const int family,
- sockaddr_storage addr,
- socklen_t addrlen) {
- // We will first create a socket and bind to ensure we bind a port but will
- // not call listen on this socket.
- // Then we will create a new socket that will connect to the port bound by
- // the first socket and that shoud fail.
- constexpr int sock_type = SOCK_STREAM | SOCK_NONBLOCK;
- int b_sock;
- RETURN_ERROR_IF_SYSCALL_FAIL(b_sock = socket(family, sock_type, IPPROTO_TCP));
- FileDescriptor b(b_sock);
- EXPECT_THAT(bind(b.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
-
- // Get the address bound by the listening socket.
- EXPECT_THAT(getsockname(b.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- // Now create another socket and issue a connect on this one. This connect
- // should fail as there is no listener.
- int c_sock;
- RETURN_ERROR_IF_SYSCALL_FAIL(c_sock = socket(family, sock_type, IPPROTO_TCP));
- FileDescriptor s(c_sock);
-
- // Now connect to the bound address and this should fail as nothing
- // is listening on the bound address.
- EXPECT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(EINPROGRESS));
-
- // Wait for the connect to fail.
- struct pollfd poll_fd = {s.get(), POLLERR, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 1000), SyscallSucceedsWithValue(1));
- return std::move(s);
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectNoListener) {
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- nonBlockingConnectNoListener(GetParam(), addr, addrlen).ValueOrDie();
-
- int err;
- socklen_t optlen = sizeof(err);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
- ASSERT_THAT(optlen, sizeof(err));
- EXPECT_EQ(err, ECONNREFUSED);
-
- unsigned char c;
- ASSERT_THAT(read(s.get(), &c, sizeof(c)), SyscallSucceedsWithValue(0));
- int opts;
- EXPECT_THAT(opts = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- opts &= ~O_NONBLOCK;
- EXPECT_THAT(fcntl(s.get(), F_SETFL, opts), SyscallSucceeds());
- // Try connecting again.
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNABORTED));
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectNoListenerRead) {
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- nonBlockingConnectNoListener(GetParam(), addr, addrlen).ValueOrDie();
-
- unsigned char c;
- ASSERT_THAT(read(s.get(), &c, 1), SyscallFailsWithErrno(ECONNREFUSED));
- ASSERT_THAT(read(s.get(), &c, 1), SyscallSucceedsWithValue(0));
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNABORTED));
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectNoListenerPeek) {
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- nonBlockingConnectNoListener(GetParam(), addr, addrlen).ValueOrDie();
-
- unsigned char c;
- ASSERT_THAT(recv(s.get(), &c, 1, MSG_PEEK),
- SyscallFailsWithErrno(ECONNREFUSED));
- ASSERT_THAT(recv(s.get(), &c, 1, MSG_PEEK), SyscallSucceedsWithValue(0));
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNABORTED));
-}
-
-TEST_P(SimpleTcpSocketTest, SelfConnectSendRecv) {
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT((bind)(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
- // Get the bound port.
- ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- constexpr int kBufSz = 1 << 20; // 1 MiB
- std::vector<char> writebuf(kBufSz);
-
- // Start reading the response in a loop.
- int read_bytes = 0;
- ScopedThread t([&s, &read_bytes]() {
- // Too many syscalls.
- const DisableSave disable_save;
-
- char readbuf[2500] = {};
- int n = -1;
- while (n != 0) {
- ASSERT_THAT(n = RetryEINTR(read)(s.get(), &readbuf, sizeof(readbuf)),
- SyscallSucceeds());
- read_bytes += n;
- }
- });
-
- // Try to send the whole thing.
- int n;
- ASSERT_THAT(n = SendFd(s.get(), writebuf.data(), kBufSz, 0),
- SyscallSucceeds());
-
- // We should have written the whole thing.
- EXPECT_EQ(n, kBufSz);
- EXPECT_THAT(shutdown(s.get(), SHUT_WR), SyscallSucceedsWithValue(0));
- t.Join();
-
- // We should have read the whole thing.
- EXPECT_EQ(read_bytes, kBufSz);
-}
-
-TEST_P(SimpleTcpSocketTest, SelfConnectSend) {
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- constexpr int max_seg = 256;
- ASSERT_THAT(
- setsockopt(s.get(), SOL_TCP, TCP_MAXSEG, &max_seg, sizeof(max_seg)),
- SyscallSucceeds());
-
- ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
- // Get the bound port.
- ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- // Ensure the write buffer is large enough not to block on a single write.
- size_t write_size = 512 << 10; // 512 KiB.
- EXPECT_THAT(setsockopt(s.get(), SOL_SOCKET, SO_SNDBUF, &write_size,
- sizeof(write_size)),
- SyscallSucceedsWithValue(0));
-
- std::vector<char> writebuf(write_size);
-
- // Try to send the whole thing.
- int n;
- ASSERT_THAT(n = SendFd(s.get(), writebuf.data(), writebuf.size(), 0),
- SyscallSucceeds());
-
- // We should have written the whole thing.
- EXPECT_EQ(n, writebuf.size());
- EXPECT_THAT(shutdown(s.get(), SHUT_WR), SyscallSucceedsWithValue(0));
-}
-
-TEST_P(SimpleTcpSocketTest, SelfConnectSendShutdownWrite) {
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
- // Get the bound port.
- ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- // Write enough data to fill send and receive buffers.
- size_t write_size = 24 << 20; // 24 MiB.
- std::vector<char> writebuf(write_size);
-
- ScopedThread t([&s]() {
- absl::SleepFor(absl::Milliseconds(250));
- ASSERT_THAT(shutdown(s.get(), SHUT_WR), SyscallSucceeds());
- });
-
- // Try to send the whole thing.
- int n;
- ASSERT_THAT(n = SendFd(s.get(), writebuf.data(), writebuf.size(), 0),
- SyscallFailsWithErrno(EPIPE));
-}
-
-TEST_P(SimpleTcpSocketTest, SelfConnectRecvShutdownRead) {
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- const FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
- // Get the bound port.
- ASSERT_THAT(getsockname(s.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- ScopedThread t([&s]() {
- absl::SleepFor(absl::Milliseconds(250));
- ASSERT_THAT(shutdown(s.get(), SHUT_RD), SyscallSucceeds());
- });
-
- char buf[1];
- EXPECT_THAT(recv(s.get(), buf, 0, 0), SyscallSucceedsWithValue(0));
-}
-
-void NonBlockingConnect(int family, int16_t pollMask) {
- const FileDescriptor listener =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(family, SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr = ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(family));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listener.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listener.get(), SOMAXCONN), SyscallSucceeds());
-
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(family, SOCK_STREAM, IPPROTO_TCP));
-
- // Set the FD to O_NONBLOCK.
- int opts;
- ASSERT_THAT(opts = fcntl(s.get(), F_GETFL), SyscallSucceeds());
- opts |= O_NONBLOCK;
- ASSERT_THAT(fcntl(s.get(), F_SETFL, opts), SyscallSucceeds());
-
- ASSERT_THAT(getsockname(listener.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(EINPROGRESS));
-
- int t;
- ASSERT_THAT(t = RetryEINTR(accept)(listener.get(), nullptr, nullptr),
- SyscallSucceeds());
-
- struct pollfd poll_fd = {s.get(), pollMask, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10000),
- SyscallSucceedsWithValue(1));
-
- int err;
- socklen_t optlen = sizeof(err);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
-
- EXPECT_EQ(err, 0);
-
- EXPECT_THAT(close(t), SyscallSucceeds());
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnect_PollOut) {
- NonBlockingConnect(GetParam(), POLLOUT);
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnect_PollWrNorm) {
- NonBlockingConnect(GetParam(), POLLWRNORM);
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnect_PollWrNorm_PollOut) {
- NonBlockingConnect(GetParam(), POLLWRNORM | POLLOUT);
-}
-
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectRemoteClose) {
- const FileDescriptor listener =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(listener.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(listen(listener.get(), SOMAXCONN), SyscallSucceeds());
-
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(GetParam(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
-
- ASSERT_THAT(getsockname(listener.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(EINPROGRESS));
-
- int t;
- ASSERT_THAT(t = RetryEINTR(accept)(listener.get(), nullptr, nullptr),
- SyscallSucceeds());
-
- EXPECT_THAT(close(t), SyscallSucceeds());
-
- // Now polling on the FD with a timeout should return 0 corresponding to no
- // FDs ready.
- struct pollfd poll_fd = {s.get(), POLLOUT, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10000),
- SyscallSucceedsWithValue(1));
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(EISCONN));
-}
-
-// Test that we get an ECONNREFUSED with a blocking socket when no one is
-// listening on the other end.
-TEST_P(SimpleTcpSocketTest, BlockingConnectRefused) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNREFUSED));
-
- // Avoiding triggering save in destructor of s.
- EXPECT_THAT(close(s.release()), SyscallSucceeds());
-}
-
-// Test that connecting to a non-listening port and thus receiving a RST is
-// handled appropriately by the socket - the port that the socket was bound to
-// is released and the expected error is returned.
-TEST_P(SimpleTcpSocketTest, CleanupOnConnectionRefused) {
- // Create a socket that is known to not be listening. As is it bound but not
- // listening, when another socket connects to the port, it will refuse..
- FileDescriptor bound_s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- sockaddr_storage bound_addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t bound_addrlen = sizeof(bound_addr);
-
- ASSERT_THAT(bind(bound_s.get(), AsSockAddr(&bound_addr), bound_addrlen),
- SyscallSucceeds());
-
- // Get the addresses the socket is bound to because the port is chosen by the
- // stack.
- ASSERT_THAT(
- getsockname(bound_s.get(), AsSockAddr(&bound_addr), &bound_addrlen),
- SyscallSucceeds());
-
- // Create, initialize, and bind the socket that is used to test connecting to
- // the non-listening port.
- FileDescriptor client_s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- // Initialize client address to the loopback one.
- sockaddr_storage client_addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t client_addrlen = sizeof(client_addr);
-
- ASSERT_THAT(bind(client_s.get(), AsSockAddr(&client_addr), client_addrlen),
- SyscallSucceeds());
-
- ASSERT_THAT(
- getsockname(client_s.get(), AsSockAddr(&client_addr), &client_addrlen),
- SyscallSucceeds());
-
- // Now the test: connect to the bound but not listening socket with the
- // client socket. The bound socket should return a RST and cause the client
- // socket to return an error and clean itself up immediately.
- // The error being ECONNREFUSED diverges with RFC 793, page 37, but does what
- // Linux does.
- ASSERT_THAT(connect(client_s.get(),
- reinterpret_cast<const struct sockaddr*>(&bound_addr),
- bound_addrlen),
- SyscallFailsWithErrno(ECONNREFUSED));
-
- FileDescriptor new_s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Test binding to the address from the client socket. This should be okay
- // if it was dropped correctly.
- ASSERT_THAT(bind(new_s.get(), AsSockAddr(&client_addr), client_addrlen),
- SyscallSucceeds());
-
- // Attempt #2, with the new socket and reused addr our connect should fail in
- // the same way as before, not with an EADDRINUSE.
- //
- // TODO(gvisor.dev/issue/3828): 2nd connect on a socket which failed connect
- // first time should succeed.
- // gVisor never issues the second connect and returns ECONNABORTED instead.
- // Linux actually sends a SYN again and gets a RST and correctly returns
- // ECONNREFUSED.
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(connect(client_s.get(),
- reinterpret_cast<const struct sockaddr*>(&bound_addr),
- bound_addrlen),
- SyscallFailsWithErrno(ECONNABORTED));
- return;
- }
- ASSERT_THAT(connect(client_s.get(),
- reinterpret_cast<const struct sockaddr*>(&bound_addr),
- bound_addrlen),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-// Test that we get an ECONNREFUSED with a nonblocking socket.
-TEST_P(SimpleTcpSocketTest, NonBlockingConnectRefused) {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(GetParam(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(EINPROGRESS));
-
- // We don't need to specify any events to get POLLHUP or POLLERR as these
- // are added before the poll.
- struct pollfd poll_fd = {s.get(), /*events=*/0, 0};
- EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 1000), SyscallSucceedsWithValue(1));
-
- // The ECONNREFUSED should cause us to be woken up with POLLHUP.
- EXPECT_NE(poll_fd.revents & (POLLHUP | POLLERR), 0);
-
- // Avoiding triggering save in destructor of s.
- EXPECT_THAT(close(s.release()), SyscallSucceeds());
-}
-
-// Test that setting a supported congestion control algorithm succeeds for an
-// unconnected TCP socket
-TEST_P(SimpleTcpSocketTest, SetCongestionControlSucceedsForSupported) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- {
- const char kSetCC[kTcpCaNameMax] = "reno";
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,
- strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax];
- memset(got_cc, '1', sizeof(got_cc));
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- // We ignore optlen here as the linux kernel sets optlen to the lower of the
- // size of the buffer passed in or kTcpCaNameMax and not the length of the
- // congestion control algorithm's actual name.
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax)));
- }
- {
- const char kSetCC[kTcpCaNameMax] = "cubic";
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,
- strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax];
- memset(got_cc, '1', sizeof(got_cc));
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- // We ignore optlen here as the linux kernel sets optlen to the lower of the
- // size of the buffer passed in or kTcpCaNameMax and not the length of the
- // congestion control algorithm's actual name.
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax)));
- }
-}
-
-// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is
-// consistent between linux and gvisor when the passed in buffer is smaller than
-// kTcpCaNameMax.
-TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionShortReadBuffer) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- {
- // Verify that getsockopt/setsockopt work with buffers smaller than
- // kTcpCaNameMax.
- const char kSetCC[] = "cubic";
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,
- strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[sizeof(kSetCC)];
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(sizeof(got_cc), optlen);
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc)));
- }
-}
-
-// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is
-// consistent between linux and gvisor when the passed in buffer is larger than
-// kTcpCaNameMax.
-TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionLargeReadBuffer) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- {
- // Verify that getsockopt works with buffers larger than
- // kTcpCaNameMax.
- const char kSetCC[] = "cubic";
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,
- strlen(kSetCC)),
- SyscallSucceedsWithValue(0));
-
- char got_cc[kTcpCaNameMax + 5];
- socklen_t optlen = sizeof(got_cc);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- // Linux copies the minimum of kTcpCaNameMax or the length of the passed in
- // buffer and sets optlen to the number of bytes actually copied
- // irrespective of the actual length of the congestion control name.
- EXPECT_EQ(kTcpCaNameMax, optlen);
- EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));
- }
-}
-
-// Test that setting an unsupported congestion control algorithm fails for an
-// unconnected TCP socket.
-TEST_P(SimpleTcpSocketTest, SetCongestionControlFailsForUnsupported) {
- // This is Linux's net/tcp.h TCP_CA_NAME_MAX.
- const int kTcpCaNameMax = 16;
-
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- char old_cc[kTcpCaNameMax];
- socklen_t optlen = sizeof(old_cc);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &old_cc, &optlen),
- SyscallSucceedsWithValue(0));
-
- const char kSetCC[] = "invalid_ca_kSetCC";
- ASSERT_THAT(
- setsockopt(s.get(), SOL_TCP, TCP_CONGESTION, &kSetCC, strlen(kSetCC)),
- SyscallFailsWithErrno(ENOENT));
-
- char got_cc[kTcpCaNameMax];
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),
- SyscallSucceedsWithValue(0));
- // We ignore optlen here as the linux kernel sets optlen to the lower of the
- // size of the buffer passed in or kTcpCaNameMax and not the length of the
- // congestion control algorithm's actual name.
- EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(kTcpCaNameMax)));
-}
-
-TEST_P(SimpleTcpSocketTest, MaxSegDefault) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- constexpr int kDefaultMSS = 536;
- int tcp_max_seg;
- socklen_t optlen = sizeof(tcp_max_seg);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_MAXSEG, &tcp_max_seg, &optlen),
- SyscallSucceedsWithValue(0));
-
- EXPECT_EQ(kDefaultMSS, tcp_max_seg);
- EXPECT_EQ(sizeof(tcp_max_seg), optlen);
-}
-
-TEST_P(SimpleTcpSocketTest, SetMaxSeg) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- constexpr int kDefaultMSS = 536;
- constexpr int kTCPMaxSeg = 1024;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_MAXSEG, &kTCPMaxSeg,
- sizeof(kTCPMaxSeg)),
- SyscallSucceedsWithValue(0));
-
- // Linux actually never returns the user_mss value. It will always return the
- // default MSS value defined above for an unconnected socket and always return
- // the actual current MSS for a connected one.
- int optval;
- socklen_t optlen = sizeof(optval);
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_MAXSEG, &optval, &optlen),
- SyscallSucceedsWithValue(0));
-
- EXPECT_EQ(kDefaultMSS, optval);
- EXPECT_EQ(sizeof(optval), optlen);
-}
-
-TEST_P(SimpleTcpSocketTest, SetMaxSegFailsForInvalidMSSValues) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- {
- constexpr int tcp_max_seg = 10;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_MAXSEG, &tcp_max_seg,
- sizeof(tcp_max_seg)),
- SyscallFailsWithErrno(EINVAL));
- }
- {
- constexpr int tcp_max_seg = 75000;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_MAXSEG, &tcp_max_seg,
- sizeof(tcp_max_seg)),
- SyscallFailsWithErrno(EINVAL));
- }
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPUserTimeout) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- {
- constexpr int kTCPUserTimeout = -1;
- EXPECT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kTCPUserTimeout, sizeof(kTCPUserTimeout)),
- SyscallFailsWithErrno(EINVAL));
- }
-
- // kTCPUserTimeout is in milliseconds.
- constexpr int kTCPUserTimeout = 100;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_USER_TIMEOUT,
- &kTCPUserTimeout, sizeof(kTCPUserTimeout)),
- SyscallSucceedsWithValue(0));
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_USER_TIMEOUT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kTCPUserTimeout);
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPDeferAcceptNeg) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // -ve TCP_DEFER_ACCEPT is same as setting it to zero.
- constexpr int kNeg = -1;
- EXPECT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT, &kNeg, sizeof(kNeg)),
- SyscallSucceeds());
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 0);
-}
-
-TEST_P(SimpleTcpSocketTest, GetTCPDeferAcceptDefault) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, 0);
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPDeferAcceptGreaterThanZero) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- // kTCPDeferAccept is in seconds.
- // NOTE: linux translates seconds to # of retries and back from
- // #of retries to seconds. Which means only certain values
- // translate back exactly. That's why we use 3 here, a value of
- // 5 will result in us getting back 7 instead of 5 in the
- // getsockopt.
- constexpr int kTCPDeferAccept = 3;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT,
- &kTCPDeferAccept, sizeof(kTCPDeferAccept)),
- SyscallSucceeds());
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_DEFER_ACCEPT, &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kTCPDeferAccept);
-}
-
-TEST_P(SimpleTcpSocketTest, RecvOnClosedSocket) {
- auto s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- char buf[1];
- EXPECT_THAT(recv(s.get(), buf, 0, 0), SyscallFailsWithErrno(ENOTCONN));
- EXPECT_THAT(recv(s.get(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(SimpleTcpSocketTest, TCPConnectSoRcvBufRace) {
- auto s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(GetParam(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen);
- int buf_sz = 1 << 18;
- EXPECT_THAT(
- setsockopt(s.get(), SOL_SOCKET, SO_RCVBUF, &buf_sz, sizeof(buf_sz)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPSynCntLessThanOne) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- int default_syn_cnt = get;
-
- {
- // TCP_SYNCNT less than 1 should be rejected with an EINVAL.
- constexpr int kZero = 0;
- EXPECT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &kZero, sizeof(kZero)),
- SyscallFailsWithErrno(EINVAL));
-
- // TCP_SYNCNT less than 1 should be rejected with an EINVAL.
- constexpr int kNeg = -1;
- EXPECT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &kNeg, sizeof(kNeg)),
- SyscallFailsWithErrno(EINVAL));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(default_syn_cnt, get);
- }
-}
-
-TEST_P(SimpleTcpSocketTest, GetTCPSynCntDefault) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- constexpr int kDefaultSynCnt = 6;
-
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kDefaultSynCnt);
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPSynCntGreaterThanOne) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- constexpr int kTCPSynCnt = 20;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &kTCPSynCnt,
- sizeof(kTCPSynCnt)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kTCPSynCnt);
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPSynCntAboveMax) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- int default_syn_cnt = get;
- {
- constexpr int kTCPSynCnt = 256;
- ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &kTCPSynCnt,
- sizeof(kTCPSynCnt)),
- SyscallFailsWithErrno(EINVAL));
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), IPPROTO_TCP, TCP_SYNCNT, &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, default_syn_cnt);
- }
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPWindowClampBelowMinRcvBuf) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Discover minimum receive buf by setting a really low value
- // for the receive buffer.
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(s.get(), SOL_SOCKET, SO_RCVBUF, &kZero, sizeof(kZero)),
- SyscallSucceeds());
-
- // Now retrieve the minimum value for SO_RCVBUF as the set above should
- // have caused SO_RCVBUF for the socket to be set to the minimum.
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_RCVBUF, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- int min_so_rcvbuf = get;
-
- {
- // TCP_WINDOW_CLAMP less than min_so_rcvbuf/2 should be set to
- // min_so_rcvbuf/2.
- int below_half_min_rcvbuf = min_so_rcvbuf / 2 - 1;
- EXPECT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP,
- &below_half_min_rcvbuf, sizeof(below_half_min_rcvbuf)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(min_so_rcvbuf / 2, get);
- }
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPWindowClampZeroClosedSocket) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- constexpr int kZero = 0;
- ASSERT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP, &kZero, sizeof(kZero)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP, &get, &get_len),
- SyscallSucceeds());
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(get, kZero);
-}
-
-TEST_P(SimpleTcpSocketTest, SetTCPWindowClampAboveHalfMinRcvBuf) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Discover minimum receive buf by setting a really low value
- // for the receive buffer.
- constexpr int kZero = 0;
- EXPECT_THAT(setsockopt(s.get(), SOL_SOCKET, SO_RCVBUF, &kZero, sizeof(kZero)),
- SyscallSucceeds());
-
- // Now retrieve the minimum value for SO_RCVBUF as the set above should
- // have caused SO_RCVBUF for the socket to be set to the minimum.
- int get = -1;
- socklen_t get_len = sizeof(get);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_RCVBUF, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- int min_so_rcvbuf = get;
-
- {
- int above_half_min_rcv_buf = min_so_rcvbuf / 2 + 1;
- EXPECT_THAT(
- setsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP,
- &above_half_min_rcv_buf, sizeof(above_half_min_rcv_buf)),
- SyscallSucceeds());
-
- int get = -1;
- socklen_t get_len = sizeof(get);
-
- ASSERT_THAT(
- getsockopt(s.get(), IPPROTO_TCP, TCP_WINDOW_CLAMP, &get, &get_len),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(get_len, sizeof(get));
- EXPECT_EQ(above_half_min_rcv_buf, get);
- }
-}
-
-#ifdef __linux__
-
-// TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
-// gVisor currently silently ignores attaching a filter.
-TEST_P(SimpleTcpSocketTest, SetSocketAttachDetachFilter) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- // Program generated using sudo tcpdump -i lo tcp and port 1234 -dd
- struct sock_filter code[] = {
- {0x28, 0, 0, 0x0000000c}, {0x15, 0, 6, 0x000086dd},
- {0x30, 0, 0, 0x00000014}, {0x15, 0, 15, 0x00000006},
- {0x28, 0, 0, 0x00000036}, {0x15, 12, 0, 0x000004d2},
- {0x28, 0, 0, 0x00000038}, {0x15, 10, 11, 0x000004d2},
- {0x15, 0, 10, 0x00000800}, {0x30, 0, 0, 0x00000017},
- {0x15, 0, 8, 0x00000006}, {0x28, 0, 0, 0x00000014},
- {0x45, 6, 0, 0x00001fff}, {0xb1, 0, 0, 0x0000000e},
- {0x48, 0, 0, 0x0000000e}, {0x15, 2, 0, 0x000004d2},
- {0x48, 0, 0, 0x00000010}, {0x15, 0, 1, 0x000004d2},
- {0x6, 0, 0, 0x00040000}, {0x6, 0, 0, 0x00000000},
- };
- struct sock_fprog bpf = {
- .len = ABSL_ARRAYSIZE(code),
- .filter = code,
- };
- ASSERT_THAT(
- setsockopt(s.get(), SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)),
- SyscallSucceeds());
-
- constexpr int val = 0;
- ASSERT_THAT(
- setsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallSucceeds());
-}
-
-#endif // __linux__
-
-TEST_P(SimpleTcpSocketTest, SetSocketDetachFilterNoInstalledFilter) {
- // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
- SKIP_IF(IsRunningOnGvisor());
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
- constexpr int val = 0;
- ASSERT_THAT(
- setsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_P(SimpleTcpSocketTest, GetSocketDetachFilter) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),
- SyscallFailsWithErrno(ENOPROTOOPT));
-}
-
-TEST_P(SimpleTcpSocketTest, CloseNonConnectedLingerOption) {
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- constexpr int kLingerTimeout = 10; // Seconds.
-
- // Set the SO_LINGER option.
- struct linger sl = {
- .l_onoff = 1,
- .l_linger = kLingerTimeout,
- };
- ASSERT_THAT(setsockopt(s.get(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),
- SyscallSucceeds());
-
- struct pollfd poll_fd = {
- .fd = s.get(),
- .events = POLLHUP,
- };
- constexpr int kPollTimeoutMs = 0;
- ASSERT_THAT(RetryEINTR(poll)(&poll_fd, 1, kPollTimeoutMs),
- SyscallSucceedsWithValue(1));
-
- auto const start_time = absl::Now();
- EXPECT_THAT(close(s.release()), SyscallSucceeds());
- auto const end_time = absl::Now();
-
- // Close() should not linger and return immediately.
- ASSERT_LT((end_time - start_time), absl::Seconds(kLingerTimeout));
-}
-
-// Tests that SO_ACCEPTCONN returns non zero value for listening sockets.
-TEST_P(TcpSocketTest, GetSocketAcceptConnListener) {
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(getsockopt(listener_, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceeds());
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 1);
-}
-
-// Tests that SO_ACCEPTCONN returns zero value for not listening sockets.
-TEST_P(TcpSocketTest, GetSocketAcceptConnNonListener) {
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(getsockopt(first_fd, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceeds());
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-
- ASSERT_THAT(getsockopt(second_fd, SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceeds());
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-
-TEST_P(SimpleTcpSocketTest, GetSocketAcceptConnWithShutdown) {
- // TODO(b/171345701): Fix the TCP state for listening socket on shutdown.
- SKIP_IF(IsRunningOnGvisor());
-
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
-
- // Initialize address to the loopback one.
- sockaddr_storage addr =
- ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
- socklen_t addrlen = sizeof(addr);
-
- // Bind to some port then start listening.
- ASSERT_THAT(bind(s.get(), AsSockAddr(&addr), addrlen), SyscallSucceeds());
-
- ASSERT_THAT(listen(s.get(), SOMAXCONN), SyscallSucceeds());
-
- int got = -1;
- socklen_t length = sizeof(got);
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceeds());
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 1);
-
- EXPECT_THAT(shutdown(s.get(), SHUT_RD), SyscallSucceeds());
- ASSERT_THAT(getsockopt(s.get(), SOL_SOCKET, SO_ACCEPTCONN, &got, &length),
- SyscallSucceeds());
- ASSERT_EQ(length, sizeof(got));
- EXPECT_EQ(got, 0);
-}
-
-// Tests that connecting to an unspecified address results in ECONNREFUSED.
-TEST_P(SimpleTcpSocketTest, ConnectUnspecifiedAddress) {
- sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- memset(&addr, 0, addrlen);
- addr.ss_family = GetParam();
- auto do_connect = [&addr, addrlen]() {
- FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(addr.ss_family, SOCK_STREAM, IPPROTO_TCP));
- ASSERT_THAT(RetryEINTR(connect)(s.get(), AsSockAddr(&addr), addrlen),
- SyscallFailsWithErrno(ECONNREFUSED));
- };
- do_connect();
- // Test the v4 mapped address as well.
- if (GetParam() == AF_INET6) {
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);
- sin6->sin6_addr.s6_addr[10] = sin6->sin6_addr.s6_addr[11] = 0xff;
- do_connect();
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,
- ::testing::Values(AF_INET, AF_INET6));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/tgkill.cc b/test/syscalls/linux/tgkill.cc
deleted file mode 100644
index 80acae5de..000000000
--- a/test/syscalls/linux/tgkill.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(TgkillTest, InvalidTID) {
- EXPECT_THAT(tgkill(getpid(), -1, 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(tgkill(getpid(), 0, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TgkillTest, InvalidTGID) {
- EXPECT_THAT(tgkill(-1, gettid(), 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(tgkill(0, gettid(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TgkillTest, ValidInput) {
- EXPECT_THAT(tgkill(getpid(), gettid(), 0), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/time.cc b/test/syscalls/linux/time.cc
deleted file mode 100644
index e75bba669..000000000
--- a/test/syscalls/linux/time.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <time.h>
-
-#include "gtest/gtest.h"
-#include "test/util/proc_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-constexpr long kFudgeSeconds = 5;
-
-#if defined(__x86_64__) || defined(__i386__)
-// Mimics the time(2) wrapper from glibc prior to 2.15.
-time_t vsyscall_time(time_t* t) {
- constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400;
- return reinterpret_cast<time_t (*)(time_t*)>(kVsyscallTimeEntry)(t);
-}
-
-TEST(TimeTest, VsyscallTime_Succeeds) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
-
- time_t t1, t2;
-
- {
- const DisableSave ds; // Timing assertions.
- EXPECT_THAT(time(&t1), SyscallSucceeds());
- EXPECT_THAT(vsyscall_time(&t2), SyscallSucceeds());
- }
-
- // Time should be monotonic.
- EXPECT_LE(static_cast<long>(t1), static_cast<long>(t2));
-
- // Check that it's within kFudge seconds.
- EXPECT_LE(static_cast<long>(t2), static_cast<long>(t1) + kFudgeSeconds);
-
- // Redo with save.
- EXPECT_THAT(time(&t1), SyscallSucceeds());
- EXPECT_THAT(vsyscall_time(&t2), SyscallSucceeds());
-
- // Time should be monotonic.
- EXPECT_LE(static_cast<long>(t1), static_cast<long>(t2));
-}
-
-TEST(TimeTest, VsyscallTime_InvalidAddressSIGSEGV) {
- EXPECT_EXIT(vsyscall_time(reinterpret_cast<time_t*>(0x1)),
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-// Mimics the gettimeofday(2) wrapper from the Go runtime <= 1.2.
-int vsyscall_gettimeofday(struct timeval* tv, struct timezone* tz) {
- constexpr uint64_t kVsyscallGettimeofdayEntry = 0xffffffffff600000;
- return reinterpret_cast<int (*)(struct timeval*, struct timezone*)>(
- kVsyscallGettimeofdayEntry)(tv, tz);
-}
-
-TEST(TimeTest, VsyscallGettimeofday_Succeeds) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
-
- struct timeval tv1, tv2;
- struct timezone tz1, tz2;
-
- {
- const DisableSave ds; // Timing assertions.
- EXPECT_THAT(gettimeofday(&tv1, &tz1), SyscallSucceeds());
- EXPECT_THAT(vsyscall_gettimeofday(&tv2, &tz2), SyscallSucceeds());
- }
-
- // See above.
- EXPECT_LE(static_cast<long>(tv1.tv_sec), static_cast<long>(tv2.tv_sec));
- EXPECT_LE(static_cast<long>(tv2.tv_sec),
- static_cast<long>(tv1.tv_sec) + kFudgeSeconds);
-
- // Redo with save.
- EXPECT_THAT(gettimeofday(&tv1, &tz1), SyscallSucceeds());
- EXPECT_THAT(vsyscall_gettimeofday(&tv2, &tz2), SyscallSucceeds());
-}
-
-TEST(TimeTest, VsyscallGettimeofday_InvalidAddressSIGSEGV) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
-
- EXPECT_EXIT(vsyscall_gettimeofday(reinterpret_cast<struct timeval*>(0x1),
- reinterpret_cast<struct timezone*>(0x1)),
- ::testing::KilledBySignal(SIGSEGV), "");
-}
-#endif
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/timerfd.cc b/test/syscalls/linux/timerfd.cc
deleted file mode 100644
index 072c92797..000000000
--- a/test/syscalls/linux/timerfd.cc
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <poll.h>
-#include <sys/timerfd.h>
-#include <time.h>
-
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Wrapper around timerfd_create(2) that returns a FileDescriptor.
-PosixErrorOr<FileDescriptor> TimerfdCreate(int clockid, int flags) {
- int fd = timerfd_create(clockid, flags);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, "timerfd_create failed");
- }
- return FileDescriptor(fd);
-}
-
-// In tests that race a timerfd with a sleep, some slack is required because:
-//
-// - Timerfd expirations are asynchronous with respect to nanosleeps.
-//
-// - Because clock_gettime(CLOCK_MONOTONIC) is implemented through the VDSO,
-// it technically uses a closely-related, but distinct, time domain from the
-// CLOCK_MONOTONIC used to trigger timerfd expirations. The same applies to
-// CLOCK_BOOTTIME which is an alias for CLOCK_MONOTONIC.
-absl::Duration TimerSlack() { return absl::Milliseconds(500); }
-
-class TimerfdTest : public ::testing::TestWithParam<int> {};
-
-TEST_P(TimerfdTest, IsInitiallyStopped) {
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- struct itimerspec its = {};
- ASSERT_THAT(timerfd_gettime(tfd.get(), &its), SyscallSucceeds());
- EXPECT_EQ(0, its.it_value.tv_sec);
- EXPECT_EQ(0, its.it_value.tv_nsec);
-}
-
-TEST_P(TimerfdTest, SingleShot) {
- constexpr absl::Duration kDelay = absl::Seconds(1);
-
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- struct itimerspec its = {};
- its.it_value = absl::ToTimespec(kDelay);
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- // The timer should fire exactly once since the interval is zero.
- absl::SleepFor(kDelay + TimerSlack());
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- EXPECT_EQ(1, val);
-}
-
-TEST_P(TimerfdTest, Periodic) {
- constexpr absl::Duration kDelay = absl::Seconds(1);
- constexpr int kPeriods = 3;
-
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- struct itimerspec its = {};
- its.it_value = absl::ToTimespec(kDelay);
- its.it_interval = absl::ToTimespec(kDelay);
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- // Expect to see at least kPeriods expirations. More may occur due to the
- // timer slack, or due to delays from scheduling or save/restore.
- absl::SleepFor(kPeriods * kDelay + TimerSlack());
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- EXPECT_GE(val, kPeriods);
-}
-
-TEST_P(TimerfdTest, BlockingRead) {
- constexpr absl::Duration kDelay = absl::Seconds(3);
-
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- struct itimerspec its = {};
- its.it_value.tv_sec = absl::ToInt64Seconds(kDelay);
- auto const start_time = absl::Now();
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- // read should block until the timer fires.
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- auto const end_time = absl::Now();
- EXPECT_EQ(1, val);
- EXPECT_GE((end_time - start_time) + TimerSlack(), kDelay);
-}
-
-TEST_P(TimerfdTest, NonblockingRead) {
- constexpr absl::Duration kDelay = absl::Seconds(5);
-
- auto const tfd =
- ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK));
-
- // Since the timer is initially disabled and has never fired, read should
- // return EAGAIN.
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallFailsWithErrno(EAGAIN));
-
- DisableSave ds; // Timing-sensitive.
-
- // Arm the timer.
- struct itimerspec its = {};
- its.it_value.tv_sec = absl::ToInt64Seconds(kDelay);
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- // Since the timer has not yet fired, read should return EAGAIN.
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallFailsWithErrno(EAGAIN));
-
- ds.reset(); // No longer timing-sensitive.
-
- // After the timer fires, read should indicate 1 expiration.
- absl::SleepFor(kDelay + TimerSlack());
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- EXPECT_EQ(1, val);
-
- // The successful read should have reset the number of expirations.
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(TimerfdTest, BlockingPoll_SetTimeResetsExpirations) {
- constexpr absl::Duration kDelay = absl::Seconds(3);
-
- auto const tfd =
- ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK));
- struct itimerspec its = {};
- its.it_value.tv_sec = absl::ToInt64Seconds(kDelay);
- auto const start_time = absl::Now();
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- // poll should block until the timer fires.
- struct pollfd pfd = {};
- pfd.fd = tfd.get();
- pfd.events = POLLIN;
- ASSERT_THAT(poll(&pfd, /* nfds = */ 1,
- /* timeout = */ 2 * absl::ToInt64Seconds(kDelay) * 1000),
- SyscallSucceedsWithValue(1));
- auto const end_time = absl::Now();
- EXPECT_EQ(POLLIN, pfd.revents);
- EXPECT_GE((end_time - start_time) + TimerSlack(), kDelay);
-
- // Call timerfd_settime again with a value of 0. This should reset the number
- // of expirations to 0, causing read to return EAGAIN since the timerfd is
- // non-blocking.
- its.it_value.tv_sec = 0;
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(TimerfdTest, SetAbsoluteTime) {
- constexpr absl::Duration kDelay = absl::Seconds(3);
-
- // Use a non-blocking timerfd so that if TFD_TIMER_ABSTIME is incorrectly
- // non-functional, we get EAGAIN rather than a test timeout.
- auto const tfd =
- ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK));
- struct itimerspec its = {};
- ASSERT_THAT(clock_gettime(GetParam(), &its.it_value), SyscallSucceeds());
- its.it_value.tv_sec += absl::ToInt64Seconds(kDelay);
- ASSERT_THAT(timerfd_settime(tfd.get(), TFD_TIMER_ABSTIME, &its, nullptr),
- SyscallSucceeds());
-
- absl::SleepFor(kDelay + TimerSlack());
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- EXPECT_EQ(1, val);
-}
-
-TEST_P(TimerfdTest, IllegalSeek) {
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- if (!IsRunningWithVFS1()) {
- EXPECT_THAT(lseek(tfd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));
- }
-}
-
-TEST_P(TimerfdTest, IllegalPread) {
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- int val;
- EXPECT_THAT(pread(tfd.get(), &val, sizeof(val), 0),
- SyscallFailsWithErrno(ESPIPE));
-}
-
-TEST_P(TimerfdTest, IllegalPwrite) {
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), 0));
- EXPECT_THAT(pwrite(tfd.get(), "x", 1, 0), SyscallFailsWithErrno(ESPIPE));
- if (!IsRunningWithVFS1()) {
- }
-}
-
-TEST_P(TimerfdTest, IllegalWrite) {
- auto const tfd =
- ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK));
- uint64_t val = 0;
- EXPECT_THAT(write(tfd.get(), &val, sizeof(val)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-std::string PrintClockId(::testing::TestParamInfo<int> info) {
- switch (info.param) {
- case CLOCK_MONOTONIC:
- return "CLOCK_MONOTONIC";
- case CLOCK_BOOTTIME:
- return "CLOCK_BOOTTIME";
- default:
- return absl::StrCat(info.param);
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(AllTimerTypes, TimerfdTest,
- ::testing::Values(CLOCK_MONOTONIC, CLOCK_BOOTTIME),
- PrintClockId);
-
-TEST(TimerfdClockRealtimeTest, ClockRealtime) {
- // Since CLOCK_REALTIME can, by definition, change, we can't make any
- // non-flaky assertions about the amount of time it takes for a
- // CLOCK_REALTIME-based timer to expire. Just check that it expires at all,
- // and hope it happens before the test times out.
- constexpr int kDelaySecs = 1;
-
- auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(CLOCK_REALTIME, 0));
- struct itimerspec its = {};
- its.it_value.tv_sec = kDelaySecs;
- ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr),
- SyscallSucceeds());
-
- uint64_t val = 0;
- ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)),
- SyscallSucceedsWithValue(sizeof(uint64_t)));
- EXPECT_EQ(1, val);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/timers.cc b/test/syscalls/linux/timers.cc
deleted file mode 100644
index bc12dd4af..000000000
--- a/test/syscalls/linux/timers.cc
+++ /dev/null
@@ -1,572 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <syscall.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <atomic>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
-#include "test/util/cleanup.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/timer_util.h"
-
-ABSL_FLAG(bool, timers_test_sleep, false,
- "If true, sleep forever instead of running tests.");
-
-using ::testing::_;
-using ::testing::AnyOf;
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-#ifndef CPUCLOCK_PROF
-#define CPUCLOCK_PROF 0
-#endif // CPUCLOCK_PROF
-
-PosixErrorOr<absl::Duration> ProcessCPUTime(pid_t pid) {
- // Use pid-specific CPUCLOCK_PROF, which is the clock used to enforce
- // RLIMIT_CPU.
- clockid_t clockid = (~static_cast<clockid_t>(pid) << 3) | CPUCLOCK_PROF;
-
- struct timespec ts;
- int ret = clock_gettime(clockid, &ts);
- if (ret < 0) {
- return PosixError(errno, "clock_gettime failed");
- }
-
- return absl::DurationFromTimespec(ts);
-}
-
-void NoopSignalHandler(int signo) {
- TEST_CHECK_MSG(SIGXCPU == signo,
- "NoopSigHandler did not receive expected signal");
-}
-
-void UninstallingSignalHandler(int signo) {
- TEST_CHECK_MSG(SIGXCPU == signo,
- "UninstallingSignalHandler did not receive expected signal");
- struct sigaction rev_action;
- rev_action.sa_handler = SIG_DFL;
- rev_action.sa_flags = 0;
- sigemptyset(&rev_action.sa_mask);
- sigaction(SIGXCPU, &rev_action, nullptr);
-}
-
-TEST(TimerTest, ProcessKilledOnCPUSoftLimit) {
- constexpr absl::Duration kSoftLimit = absl::Seconds(1);
- constexpr absl::Duration kHardLimit = absl::Seconds(3);
-
- struct rlimit cpu_limits;
- cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit);
- cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit);
-
- int pid = fork();
- MaybeSave();
- if (pid == 0) {
- TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0);
- MaybeSave();
- for (;;) {
- int x = 0;
- benchmark::DoNotOptimize(x); // Don't optimize this loop away.
- }
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- auto c = Cleanup([pid] {
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(WTERMSIG(status), SIGXCPU);
- });
-
- // Wait for the child to exit, but do not reap it. This will allow us to check
- // its CPU usage while it is zombied.
- EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT),
- SyscallSucceeds());
-
- // Assert that the child spent 1s of CPU before getting killed.
- //
- // We must be careful to use CPUCLOCK_PROF, the same clock used for RLIMIT_CPU
- // enforcement, to get correct results. Note that this is slightly different
- // from rusage-reported CPU usage:
- //
- // RLIMIT_CPU, CPUCLOCK_PROF use kernel/sched/cputime.c:thread_group_cputime.
- // rusage uses kernel/sched/cputime.c:thread_group_cputime_adjusted.
- absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid));
- EXPECT_GE(cpu, kSoftLimit);
-
- // Child did not make it to the hard limit.
- //
- // Linux sends SIGXCPU synchronously with CPU tick updates. See
- // kernel/time/timer.c:update_process_times:
- // => account_process_tick // update task CPU usage.
- // => run_posix_cpu_timers // enforce RLIMIT_CPU, sending signal.
- //
- // Thus, only chance for this to flake is if the system time required to
- // deliver the signal exceeds 2s.
- EXPECT_LT(cpu, kHardLimit);
-}
-
-TEST(TimerTest, ProcessPingedRepeatedlyAfterCPUSoftLimit) {
- struct sigaction new_action;
- new_action.sa_handler = UninstallingSignalHandler;
- new_action.sa_flags = 0;
- sigemptyset(&new_action.sa_mask);
-
- constexpr absl::Duration kSoftLimit = absl::Seconds(1);
- constexpr absl::Duration kHardLimit = absl::Seconds(10);
-
- struct rlimit cpu_limits;
- cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit);
- cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit);
-
- int pid = fork();
- MaybeSave();
- if (pid == 0) {
- TEST_PCHECK(sigaction(SIGXCPU, &new_action, nullptr) == 0);
- MaybeSave();
- TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0);
- MaybeSave();
- for (;;) {
- int x = 0;
- benchmark::DoNotOptimize(x); // Don't optimize this loop away.
- }
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- auto c = Cleanup([pid] {
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(WTERMSIG(status), SIGXCPU);
- });
-
- // Wait for the child to exit, but do not reap it. This will allow us to check
- // its CPU usage while it is zombied.
- EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT),
- SyscallSucceeds());
-
- absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid));
- // Following signals come every CPU second.
- EXPECT_GE(cpu, kSoftLimit + absl::Seconds(1));
-
- // Child did not make it to the hard limit.
- //
- // As above, should not flake.
- EXPECT_LT(cpu, kHardLimit);
-}
-
-TEST(TimerTest, ProcessKilledOnCPUHardLimit) {
- struct sigaction new_action;
- new_action.sa_handler = NoopSignalHandler;
- new_action.sa_flags = 0;
- sigemptyset(&new_action.sa_mask);
-
- constexpr absl::Duration kSoftLimit = absl::Seconds(1);
- constexpr absl::Duration kHardLimit = absl::Seconds(3);
-
- struct rlimit cpu_limits;
- cpu_limits.rlim_cur = absl::ToInt64Seconds(kSoftLimit);
- cpu_limits.rlim_max = absl::ToInt64Seconds(kHardLimit);
-
- int pid = fork();
- MaybeSave();
- if (pid == 0) {
- TEST_PCHECK(sigaction(SIGXCPU, &new_action, nullptr) == 0);
- MaybeSave();
- TEST_PCHECK(setrlimit(RLIMIT_CPU, &cpu_limits) == 0);
- MaybeSave();
- for (;;) {
- int x = 0;
- benchmark::DoNotOptimize(x); // Don't optimize this loop away.
- }
- }
- ASSERT_THAT(pid, SyscallSucceeds());
- auto c = Cleanup([pid] {
- int status;
- EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(WTERMSIG(status), SIGKILL);
- });
-
- // Wait for the child to exit, but do not reap it. This will allow us to check
- // its CPU usage while it is zombied.
- EXPECT_THAT(waitid(P_PID, pid, nullptr, WEXITED | WNOWAIT),
- SyscallSucceeds());
-
- absl::Duration cpu = ASSERT_NO_ERRNO_AND_VALUE(ProcessCPUTime(pid));
- EXPECT_GE(cpu, kHardLimit);
-}
-
-// See timerfd.cc:TimerSlack() for rationale.
-constexpr absl::Duration kTimerSlack = absl::Milliseconds(500);
-
-TEST(IntervalTimerTest, IsInitiallyStopped) {
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_NONE;
- const auto timer =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
- const struct itimerspec its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get());
- EXPECT_EQ(0, its.it_value.tv_sec);
- EXPECT_EQ(0, its.it_value.tv_nsec);
-}
-
-// Kernel can create multiple timers without issue.
-//
-// Regression test for gvisor.dev/issue/1738.
-TEST(IntervalTimerTest, MultipleTimers) {
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_NONE;
- const auto timer1 =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
- const auto timer2 =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-}
-
-TEST(IntervalTimerTest, SingleShotSilent) {
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_NONE;
- const auto timer =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kDelay = absl::Seconds(1);
- struct itimerspec its = {};
- its.it_value = absl::ToTimespec(kDelay);
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // The timer should count down to 0 and stop since the interval is zero. No
- // overruns should be counted.
- absl::SleepFor(kDelay + kTimerSlack);
- its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get());
- EXPECT_EQ(0, its.it_value.tv_sec);
- EXPECT_EQ(0, its.it_value.tv_nsec);
- EXPECT_THAT(timer.Overruns(), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(IntervalTimerTest, PeriodicSilent) {
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_NONE;
- const auto timer =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- absl::SleepFor(kPeriod * 3 + kTimerSlack);
-
- // The timer should still be running.
- its = ASSERT_NO_ERRNO_AND_VALUE(timer.Get());
- EXPECT_TRUE(its.it_value.tv_nsec != 0 || its.it_value.tv_sec != 0);
-
- // Timer expirations are not counted as overruns under SIGEV_NONE.
- EXPECT_THAT(timer.Overruns(), IsPosixErrorOkAndHolds(0));
-}
-
-std::atomic<int> counted_signals;
-
-void IntervalTimerCountingSignalHandler(int sig, siginfo_t* info,
- void* ucontext) {
- counted_signals.fetch_add(1 + info->si_overrun);
-}
-
-TEST(IntervalTimerTest, PeriodicGroupDirectedSignal) {
- constexpr int kSigno = SIGUSR1;
- constexpr int kSigvalue = 42;
-
- // Install our signal handler.
- counted_signals.store(0);
- struct sigaction sa = {};
- sa.sa_sigaction = IntervalTimerCountingSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- const auto scoped_sigaction =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa));
-
- // Ensure that kSigno is unblocked on at least one thread.
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, kSigno));
-
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_SIGNAL;
- sev.sigev_signo = kSigno;
- sev.sigev_value.sival_int = kSigvalue;
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- constexpr int kCycles = 3;
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- absl::SleepFor(kPeriod * kCycles + kTimerSlack);
- EXPECT_GE(counted_signals.load(), kCycles);
-}
-
-TEST(IntervalTimerTest, PeriodicThreadDirectedSignal) {
- constexpr int kSigno = SIGUSR1;
- constexpr int kSigvalue = 42;
-
- // Block kSigno so that we can accumulate overruns.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask));
-
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = kSigno;
- sev.sigev_value.sival_int = kSigvalue;
- sev.sigev_notify_thread_id = gettid();
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- constexpr int kCycles = 3;
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
- absl::SleepFor(kPeriod * kCycles + kTimerSlack);
-
- // At least kCycles expirations should have occurred, resulting in kCycles-1
- // overruns (the first expiration sent the signal successfully).
- siginfo_t si;
- struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration());
- ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallSucceedsWithValue(kSigno));
- EXPECT_EQ(si.si_signo, kSigno);
- EXPECT_EQ(si.si_code, SI_TIMER);
- EXPECT_EQ(si.si_timerid, timer.get());
- EXPECT_GE(si.si_overrun, kCycles - 1);
- EXPECT_EQ(si.si_int, kSigvalue);
-
- // Kill the timer, then drain any additional signal it may have enqueued. We
- // can't do this before the preceding sigtimedwait because stopping or
- // deleting the timer resets si_overrun to 0.
- timer.reset();
- sigtimedwait(&mask, &si, &zero_ts);
-}
-
-TEST(IntervalTimerTest, OtherThreadGroup) {
- constexpr int kSigno = SIGUSR1;
-
- // Create a subprocess that does nothing until killed.
- pid_t child_pid;
- const auto sp = ASSERT_NO_ERRNO_AND_VALUE(ForkAndExec(
- "/proc/self/exe", ExecveArray({"timers", "--timers_test_sleep"}),
- ExecveArray(), &child_pid, nullptr));
-
- // Verify that we can't create a timer that would send signals to it.
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = kSigno;
- sev.sigev_notify_thread_id = child_pid;
- EXPECT_THAT(TimerCreate(CLOCK_MONOTONIC, sev), PosixErrorIs(EINVAL, _));
-}
-
-TEST(IntervalTimerTest, RealTimeSignalsAreNotDuplicated) {
- const int kSigno = SIGRTMIN;
- constexpr int kSigvalue = 42;
-
- // Block signo so that we can accumulate overruns.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- const auto scoped_sigmask = ScopedSignalMask(SIG_BLOCK, mask);
-
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = kSigno;
- sev.sigev_value.sival_int = kSigvalue;
- sev.sigev_notify_thread_id = gettid();
- const auto timer =
- ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- constexpr int kCycles = 3;
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
- absl::SleepFor(kPeriod * kCycles + kTimerSlack);
-
- // Stop the timer so that no further signals are enqueued after sigtimedwait.
- struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration());
- its.it_value = its.it_interval = zero_ts;
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // The timer should have sent only a single signal, even though the kernel
- // supports enqueueing of multiple RT signals.
- siginfo_t si;
- ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallSucceedsWithValue(kSigno));
- EXPECT_EQ(si.si_signo, kSigno);
- EXPECT_EQ(si.si_code, SI_TIMER);
- EXPECT_EQ(si.si_timerid, timer.get());
- // si_overrun was reset by timer_settime.
- EXPECT_EQ(si.si_overrun, 0);
- EXPECT_EQ(si.si_int, kSigvalue);
- EXPECT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST(IntervalTimerTest, AlreadyPendingSignal) {
- constexpr int kSigno = SIGUSR1;
- constexpr int kSigvalue = 42;
-
- // Block kSigno so that we can accumulate overruns.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- const auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask));
-
- // Send ourselves a signal, preventing the timer from enqueuing.
- ASSERT_THAT(tgkill(getpid(), gettid(), kSigno), SyscallSucceeds());
-
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = kSigno;
- sev.sigev_value.sival_int = kSigvalue;
- sev.sigev_notify_thread_id = gettid();
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- constexpr int kCycles = 3;
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // End the sleep one cycle short; we will sleep for one more cycle below.
- absl::SleepFor(kPeriod * (kCycles - 1));
-
- // Dequeue the first signal, which we sent to ourselves with tgkill.
- siginfo_t si;
- struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration());
- ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallSucceedsWithValue(kSigno));
- EXPECT_EQ(si.si_signo, kSigno);
- // glibc sigtimedwait silently replaces SI_TKILL with SI_USER:
- // sysdeps/unix/sysv/linux/sigtimedwait.c:__sigtimedwait(). This isn't
- // documented, so we don't depend on it.
- EXPECT_THAT(si.si_code, AnyOf(SI_USER, SI_TKILL));
-
- // Sleep for 1 more cycle to give the timer time to send a signal.
- absl::SleepFor(kPeriod + kTimerSlack);
-
- // At least kCycles expirations should have occurred, resulting in kCycles-1
- // overruns (the last expiration sent the signal successfully).
- ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallSucceedsWithValue(kSigno));
- EXPECT_EQ(si.si_signo, kSigno);
- EXPECT_EQ(si.si_code, SI_TIMER);
- EXPECT_EQ(si.si_timerid, timer.get());
- EXPECT_GE(si.si_overrun, kCycles - 1);
- EXPECT_EQ(si.si_int, kSigvalue);
-
- // Kill the timer, then drain any additional signal it may have enqueued. We
- // can't do this before the preceding sigtimedwait because stopping or
- // deleting the timer resets si_overrun to 0.
- timer.reset();
- sigtimedwait(&mask, &si, &zero_ts);
-}
-
-TEST(IntervalTimerTest, IgnoredSignalCountsAsOverrun) {
- constexpr int kSigno = SIGUSR1;
- constexpr int kSigvalue = 42;
-
- // Ignore kSigno.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- const auto scoped_sigaction =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa));
-
- // Unblock kSigno so that ignored signals will be discarded.
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, kSigno);
- auto scoped_sigmask =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, mask));
-
- struct sigevent sev = {};
- sev.sigev_notify = SIGEV_THREAD_ID;
- sev.sigev_signo = kSigno;
- sev.sigev_value.sival_int = kSigvalue;
- sev.sigev_notify_thread_id = gettid();
- auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));
-
- constexpr absl::Duration kPeriod = absl::Seconds(1);
- constexpr int kCycles = 3;
- struct itimerspec its = {};
- its.it_value = its.it_interval = absl::ToTimespec(kPeriod);
- ASSERT_NO_ERRNO(timer.Set(0, its));
-
- // End the sleep one cycle short; we will sleep for one more cycle below.
- absl::SleepFor(kPeriod * (kCycles - 1));
-
- // Block kSigno so that ignored signals will be enqueued.
- scoped_sigmask.Release()();
- scoped_sigmask = ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_BLOCK, mask));
-
- // Sleep for 1 more cycle to give the timer time to send a signal.
- absl::SleepFor(kPeriod + kTimerSlack);
-
- // At least kCycles expirations should have occurred, resulting in kCycles-1
- // overruns (the last expiration sent the signal successfully).
- siginfo_t si;
- struct timespec zero_ts = absl::ToTimespec(absl::ZeroDuration());
- ASSERT_THAT(sigtimedwait(&mask, &si, &zero_ts),
- SyscallSucceedsWithValue(kSigno));
- EXPECT_EQ(si.si_signo, kSigno);
- EXPECT_EQ(si.si_code, SI_TIMER);
- EXPECT_EQ(si.si_timerid, timer.get());
- EXPECT_GE(si.si_overrun, kCycles - 1);
- EXPECT_EQ(si.si_int, kSigvalue);
-
- // Kill the timer, then drain any additional signal it may have enqueued. We
- // can't do this before the preceding sigtimedwait because stopping or
- // deleting the timer resets si_overrun to 0.
- timer.reset();
- sigtimedwait(&mask, &si, &zero_ts);
-}
-
-} // namespace
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_timers_test_sleep)) {
- while (true) {
- absl::SleepFor(absl::Seconds(10));
- }
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/tkill.cc b/test/syscalls/linux/tkill.cc
deleted file mode 100644
index 8d8ebbb24..000000000
--- a/test/syscalls/linux/tkill.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <cerrno>
-#include <csignal>
-
-#include "gtest/gtest.h"
-#include "test/util/logging.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-static int tkill(pid_t tid, int sig) {
- int ret;
- do {
- // NOTE(b/25434735): tkill(2) could return EAGAIN for RT signals.
- ret = syscall(SYS_tkill, tid, sig);
- } while (ret == -1 && errno == EAGAIN);
- return ret;
-}
-
-TEST(TkillTest, InvalidTID) {
- EXPECT_THAT(tkill(-1, 0), SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(tkill(0, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TkillTest, ValidTID) {
- EXPECT_THAT(tkill(gettid(), 0), SyscallSucceeds());
-}
-
-void SigHandler(int sig, siginfo_t* info, void* context) {
- TEST_CHECK(sig == SIGRTMAX);
- TEST_CHECK(info->si_pid == getpid());
- TEST_CHECK(info->si_uid == getuid());
- TEST_CHECK(info->si_code == SI_TKILL);
-}
-
-// Test with a real signal. Regression test for b/24790092.
-TEST(TkillTest, ValidTIDAndRealSignal) {
- struct sigaction sa;
- sa.sa_sigaction = SigHandler;
- sigfillset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- ASSERT_THAT(sigaction(SIGRTMAX, &sa, nullptr), SyscallSucceeds());
- // InitGoogle blocks all RT signals, so we need undo it.
- sigset_t unblock;
- sigemptyset(&unblock);
- sigaddset(&unblock, SIGRTMAX);
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &unblock, nullptr), SyscallSucceeds());
- EXPECT_THAT(tkill(gettid(), SIGRTMAX), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/truncate.cc b/test/syscalls/linux/truncate.cc
deleted file mode 100644
index 0f08d9996..000000000
--- a/test/syscalls/linux/truncate.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <signal.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/vfs.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <string>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class FixtureTruncateTest : public FileTest {
- void SetUp() override { FileTest::SetUp(); }
-};
-
-TEST_F(FixtureTruncateTest, Truncate) {
- // Get the current rlimit and restore after test run.
- struct rlimit initial_lim;
- ASSERT_THAT(getrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- auto cleanup = Cleanup([&initial_lim] {
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- });
-
- // Check that it starts at size zero.
- struct stat buf;
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-
- // Stay at size zero.
- EXPECT_THAT(truncate(test_file_name_.c_str(), 0), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-
- // Grow to ten bytes.
- EXPECT_THAT(truncate(test_file_name_.c_str(), 10), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 10);
-
- // Can't be truncated to a negative number.
- EXPECT_THAT(truncate(test_file_name_.c_str(), -1),
- SyscallFailsWithErrno(EINVAL));
-
- // Try growing past the file size limit.
- sigset_t new_mask;
- sigemptyset(&new_mask);
- sigaddset(&new_mask, SIGXFSZ);
- sigprocmask(SIG_BLOCK, &new_mask, nullptr);
- struct timespec timelimit;
- timelimit.tv_sec = 10;
- timelimit.tv_nsec = 0;
-
- struct rlimit setlim;
- setlim.rlim_cur = 1024;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_FSIZE, &setlim), SyscallSucceeds());
- EXPECT_THAT(truncate(test_file_name_.c_str(), 1025),
- SyscallFailsWithErrno(EFBIG));
- EXPECT_EQ(sigtimedwait(&new_mask, nullptr, &timelimit), SIGXFSZ);
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &new_mask, nullptr), SyscallSucceeds());
-
- // Shrink back down to zero.
- EXPECT_THAT(truncate(test_file_name_.c_str(), 0), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-}
-
-TEST_F(FixtureTruncateTest, Ftruncate) {
- // Get the current rlimit and restore after test run.
- struct rlimit initial_lim;
- ASSERT_THAT(getrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- auto cleanup = Cleanup([&initial_lim] {
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- });
-
- // Check that it starts at size zero.
- struct stat buf;
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-
- // Stay at size zero.
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 0), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-
- // Grow to ten bytes.
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 10), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 10);
-
- // Can't be truncated to a negative number.
- EXPECT_THAT(ftruncate(test_file_fd_.get(), -1),
- SyscallFailsWithErrno(EINVAL));
-
- // Try growing past the file size limit.
- sigset_t new_mask;
- sigemptyset(&new_mask);
- sigaddset(&new_mask, SIGXFSZ);
- sigprocmask(SIG_BLOCK, &new_mask, nullptr);
- struct timespec timelimit;
- timelimit.tv_sec = 10;
- timelimit.tv_nsec = 0;
-
- struct rlimit setlim;
- setlim.rlim_cur = 1024;
- setlim.rlim_max = RLIM_INFINITY;
- ASSERT_THAT(setrlimit(RLIMIT_FSIZE, &setlim), SyscallSucceeds());
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 1025),
- SyscallFailsWithErrno(EFBIG));
- EXPECT_EQ(sigtimedwait(&new_mask, nullptr, &timelimit), SIGXFSZ);
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &new_mask, nullptr), SyscallSucceeds());
-
- // Shrink back down to zero.
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 0), SyscallSucceeds());
- ASSERT_THAT(fstat(test_file_fd_.get(), &buf), SyscallSucceeds());
- EXPECT_EQ(buf.st_size, 0);
-}
-
-// Truncating a file down clears that portion of the file.
-TEST_F(FixtureTruncateTest, FtruncateShrinkGrow) {
- std::vector<char> buf(10, 'a');
- EXPECT_THAT(WriteFd(test_file_fd_.get(), buf.data(), buf.size()),
- SyscallSucceedsWithValue(buf.size()));
-
- // Shrink then regrow the file. This should clear the second half of the file.
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 5), SyscallSucceeds());
- EXPECT_THAT(ftruncate(test_file_fd_.get(), 10), SyscallSucceeds());
-
- EXPECT_THAT(lseek(test_file_fd_.get(), 0, SEEK_SET), SyscallSucceeds());
-
- std::vector<char> buf2(10);
- EXPECT_THAT(ReadFd(test_file_fd_.get(), buf2.data(), buf2.size()),
- SyscallSucceedsWithValue(buf2.size()));
-
- std::vector<char> expect = {'a', 'a', 'a', 'a', 'a',
- '\0', '\0', '\0', '\0', '\0'};
- EXPECT_EQ(expect, buf2);
-}
-
-TEST(TruncateTest, TruncateDir) {
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(truncate(temp_dir.path().c_str(), 0),
- SyscallFailsWithErrno(EISDIR));
-}
-
-TEST(TruncateTest, FtruncateDir) {
- auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_dir.path(), O_DIRECTORY | O_RDONLY));
- EXPECT_THAT(ftruncate(fd.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TruncateTest, TruncateNonWriteable) {
- // Make sure we don't have CAP_DAC_OVERRIDE, since that allows the user to
- // always override write permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(), 0555 /* mode */));
- EXPECT_THAT(truncate(temp_file.path().c_str(), 0),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(TruncateTest, FtruncateNonWriteable) {
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(), 0555 /* mode */));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_RDONLY));
- EXPECT_THAT(ftruncate(fd.get(), 0), SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(TruncateTest, FtruncateWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- auto temp_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), absl::string_view(), 0555 /* mode */));
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file.path(), O_PATH));
- EXPECT_THAT(ftruncate(fd.get(), 0), AnyOf(SyscallFailsWithErrno(EBADF),
- SyscallFailsWithErrno(EINVAL)));
-}
-
-// ftruncate(2) should succeed as long as the file descriptor is writeable,
-// regardless of whether the file permissions allow writing.
-TEST(TruncateTest, FtruncateWithoutWritePermission) {
- // Drop capabilities that allow us to override file permissions.
- AutoCapability cap(CAP_DAC_OVERRIDE, false);
-
- // The only time we can open a file with flags forbidden by its permissions
- // is when we are creating the file. We cannot re-open with the same flags,
- // so we cannot restore an fd obtained from such an operation.
- const DisableSave ds;
- auto path = NewTempAbsPath();
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_RDWR | O_CREAT, 0444));
-
- // In goferfs, ftruncate may be converted to a remote truncate operation that
- // unavoidably requires write permission.
- SKIP_IF(IsRunningOnGvisor() && !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(path)));
- ASSERT_THAT(ftruncate(fd.get(), 100), SyscallSucceeds());
-}
-
-TEST(TruncateTest, TruncateNonExist) {
- EXPECT_THAT(truncate("/foo/bar", 0), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST(TruncateTest, FtruncateVirtualTmp) {
- auto temp_file = NewTempAbsPathInDir("/dev/shm");
- const DisableSave ds; // Incompatible permissions.
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(temp_file, O_RDWR | O_CREAT | O_EXCL, 0));
- EXPECT_THAT(ftruncate(fd.get(), 100), SyscallSucceeds());
-}
-
-// NOTE: There are additional truncate(2)/ftruncate(2) tests in mknod.cc
-// which are there to avoid running the tests on a number of different
-// filesystems which may not support mknod.
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/tuntap.cc b/test/syscalls/linux/tuntap.cc
deleted file mode 100644
index 1c74b9724..000000000
--- a/test/syscalls/linux/tuntap.cc
+++ /dev/null
@@ -1,538 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <linux/capability.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/if_tun.h>
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include <cstddef>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/ascii.h"
-#include "absl/strings/str_split.h"
-#include "test/syscalls/linux/socket_netlink_route_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-constexpr int kIPLen = 4;
-
-constexpr const char kDevNetTun[] = "/dev/net/tun";
-constexpr const char kTapName[] = "tap0";
-constexpr const char kTunName[] = "tun0";
-
-#define kTapIPAddr htonl(0x0a000001) /* Inet 10.0.0.1 */
-#define kTapPeerIPAddr htonl(0x0a000002) /* Inet 10.0.0.2 */
-
-constexpr const uint8_t kMacA[ETH_ALEN] = {0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA};
-constexpr const uint8_t kMacB[ETH_ALEN] = {0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB};
-
-PosixErrorOr<std::set<std::string>> DumpLinkNames() {
- ASSIGN_OR_RETURN_ERRNO(auto links, DumpLinks());
- std::set<std::string> names;
- for (const auto& link : links) {
- names.emplace(link.name);
- }
- return names;
-}
-
-PosixErrorOr<Link> GetLinkByName(const std::string& name) {
- ASSIGN_OR_RETURN_ERRNO(auto links, DumpLinks());
- for (const auto& link : links) {
- if (link.name == name) {
- return link;
- }
- }
- return PosixError(ENOENT, "interface not found");
-}
-
-struct pihdr {
- uint16_t pi_flags;
- uint16_t pi_protocol;
-} __attribute__((packed));
-
-struct ping_pkt {
- pihdr pi;
- struct ethhdr eth;
- struct iphdr ip;
- struct icmphdr icmp;
- char payload[64];
-} __attribute__((packed));
-
-ping_pkt CreatePingPacket(const uint8_t srcmac[ETH_ALEN], const in_addr_t srcip,
- const uint8_t dstmac[ETH_ALEN],
- const in_addr_t dstip) {
- ping_pkt pkt = {};
-
- pkt.pi.pi_protocol = htons(ETH_P_IP);
-
- memcpy(pkt.eth.h_dest, dstmac, sizeof(pkt.eth.h_dest));
- memcpy(pkt.eth.h_source, srcmac, sizeof(pkt.eth.h_source));
- pkt.eth.h_proto = htons(ETH_P_IP);
-
- pkt.ip.ihl = 5;
- pkt.ip.version = 4;
- pkt.ip.tos = 0;
- pkt.ip.tot_len = htons(sizeof(struct iphdr) + sizeof(struct icmphdr) +
- sizeof(pkt.payload));
- pkt.ip.id = 1;
- pkt.ip.frag_off = 1 << 6; // Do not fragment
- pkt.ip.ttl = 64;
- pkt.ip.protocol = IPPROTO_ICMP;
- pkt.ip.daddr = dstip;
- pkt.ip.saddr = srcip;
- pkt.ip.check = IPChecksum(pkt.ip);
-
- pkt.icmp.type = ICMP_ECHO;
- pkt.icmp.code = 0;
- pkt.icmp.checksum = 0;
- pkt.icmp.un.echo.sequence = 1;
- pkt.icmp.un.echo.id = 1;
-
- strncpy(pkt.payload, "abcd", sizeof(pkt.payload));
- pkt.icmp.checksum = ICMPChecksum(pkt.icmp, pkt.payload, sizeof(pkt.payload));
-
- return pkt;
-}
-
-struct arp_pkt {
- pihdr pi;
- struct ethhdr eth;
- struct arphdr arp;
- uint8_t arp_sha[ETH_ALEN];
- uint8_t arp_spa[kIPLen];
- uint8_t arp_tha[ETH_ALEN];
- uint8_t arp_tpa[kIPLen];
-} __attribute__((packed));
-
-std::string CreateArpPacket(const uint8_t srcmac[ETH_ALEN],
- const in_addr_t srcip,
- const uint8_t dstmac[ETH_ALEN],
- const in_addr_t dstip) {
- std::string buffer;
- buffer.resize(sizeof(arp_pkt));
-
- arp_pkt* pkt = reinterpret_cast<arp_pkt*>(&buffer[0]);
- {
- pkt->pi.pi_protocol = htons(ETH_P_ARP);
-
- memcpy(pkt->eth.h_dest, kMacA, sizeof(pkt->eth.h_dest));
- memcpy(pkt->eth.h_source, kMacB, sizeof(pkt->eth.h_source));
- pkt->eth.h_proto = htons(ETH_P_ARP);
-
- pkt->arp.ar_hrd = htons(ARPHRD_ETHER);
- pkt->arp.ar_pro = htons(ETH_P_IP);
- pkt->arp.ar_hln = ETH_ALEN;
- pkt->arp.ar_pln = kIPLen;
- pkt->arp.ar_op = htons(ARPOP_REPLY);
-
- memcpy(pkt->arp_sha, srcmac, sizeof(pkt->arp_sha));
- memcpy(pkt->arp_spa, &srcip, sizeof(pkt->arp_spa));
- memcpy(pkt->arp_tha, dstmac, sizeof(pkt->arp_tha));
- memcpy(pkt->arp_tpa, &dstip, sizeof(pkt->arp_tpa));
- }
- return buffer;
-}
-
-} // namespace
-
-TEST(TuntapStaticTest, NetTunExists) {
- struct stat statbuf;
- ASSERT_THAT(stat(kDevNetTun, &statbuf), SyscallSucceeds());
- // Check that it's a character device with rw-rw-rw- permissions.
- EXPECT_EQ(statbuf.st_mode, S_IFCHR | 0666);
-}
-
-class TuntapTest : public ::testing::Test {
- protected:
- void SetUp() override {
- const bool have_net_admin_cap =
- ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN));
-
- if (have_net_admin_cap && !IsRunningOnGvisor()) {
- // gVisor always creates enabled/up'd interfaces, while Linux does not (as
- // observed in b/110961832). Some of the tests require the Linux stack to
- // notify the socket of any link-address-resolution failures. Those
- // notifications do not seem to show up when the loopback interface in the
- // namespace is down.
- auto link = ASSERT_NO_ERRNO_AND_VALUE(GetLinkByName("lo"));
- ASSERT_NO_ERRNO(LinkChangeFlags(link.index, IFF_UP, IFF_UP));
- }
- }
-};
-
-TEST_F(TuntapTest, CreateInterfaceNoCap) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- AutoCapability cap(CAP_NET_ADMIN, false);
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- struct ifreq ifr = {};
- ifr.ifr_flags = IFF_TAP;
- strncpy(ifr.ifr_name, kTapName, IFNAMSIZ);
-
- EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr), SyscallFailsWithErrno(EPERM));
-}
-
-TEST_F(TuntapTest, CreateFixedNameInterface) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- struct ifreq ifr_set = {};
- ifr_set.ifr_flags = IFF_TAP;
- strncpy(ifr_set.ifr_name, kTapName, IFNAMSIZ);
- EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr_set),
- SyscallSucceedsWithValue(0));
-
- struct ifreq ifr_get = {};
- EXPECT_THAT(ioctl(fd.get(), TUNGETIFF, &ifr_get),
- SyscallSucceedsWithValue(0));
-
- struct ifreq ifr_expect = ifr_set;
- // See __tun_chr_ioctl() in net/drivers/tun.c.
- ifr_expect.ifr_flags |= IFF_NOFILTER;
-
- EXPECT_THAT(DumpLinkNames(),
- IsPosixErrorOkAndHolds(::testing::Contains(kTapName)));
- EXPECT_THAT(memcmp(&ifr_expect, &ifr_get, sizeof(ifr_get)), ::testing::Eq(0));
-}
-
-TEST_F(TuntapTest, CreateInterface) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- struct ifreq ifr = {};
- ifr.ifr_flags = IFF_TAP;
- // Empty ifr.ifr_name. Let kernel assign.
-
- EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr), SyscallSucceedsWithValue(0));
-
- struct ifreq ifr_get = {};
- EXPECT_THAT(ioctl(fd.get(), TUNGETIFF, &ifr_get),
- SyscallSucceedsWithValue(0));
-
- std::string ifname = ifr_get.ifr_name;
- EXPECT_THAT(ifname, ::testing::StartsWith("tap"));
- EXPECT_THAT(DumpLinkNames(),
- IsPosixErrorOkAndHolds(::testing::Contains(ifname)));
-}
-
-TEST_F(TuntapTest, InvalidReadWrite) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- char buf[128] = {};
- EXPECT_THAT(read(fd.get(), buf, sizeof(buf)), SyscallFailsWithErrno(EBADFD));
- EXPECT_THAT(write(fd.get(), buf, sizeof(buf)), SyscallFailsWithErrno(EBADFD));
-}
-
-TEST_F(TuntapTest, WriteToDownDevice) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // FIXME(b/110961832): gVisor always creates enabled/up'd interfaces.
- SKIP_IF(IsRunningOnGvisor());
-
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- // Device created should be down by default.
- struct ifreq ifr = {};
- ifr.ifr_flags = IFF_TAP;
- EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr), SyscallSucceedsWithValue(0));
-
- char buf[128] = {};
- EXPECT_THAT(write(fd.get(), buf, sizeof(buf)), SyscallFailsWithErrno(EIO));
-}
-
-PosixErrorOr<FileDescriptor> OpenAndAttachTap(const std::string& dev_name,
- const in_addr_t dev_addr) {
- // Interface creation.
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, Open(kDevNetTun, O_RDWR));
-
- struct ifreq ifr_set = {};
- ifr_set.ifr_flags = IFF_TAP;
- strncpy(ifr_set.ifr_name, dev_name.c_str(), IFNAMSIZ);
- if (ioctl(fd.get(), TUNSETIFF, &ifr_set) < 0) {
- return PosixError(errno);
- }
-
- ASSIGN_OR_RETURN_ERRNO(auto link, GetLinkByName(dev_name));
-
- const struct in_addr dev_ipv4_addr = {.s_addr = dev_addr};
- // Interface setup.
- EXPECT_NO_ERRNO(LinkAddLocalAddr(link.index, AF_INET, /*prefixlen=*/24,
- &dev_ipv4_addr, sizeof(dev_ipv4_addr)));
-
- if (!IsRunningOnGvisor()) {
- // FIXME(b/110961832): gVisor doesn't support setting MAC address on
- // interfaces yet.
- RETURN_IF_ERRNO(LinkSetMacAddr(link.index, kMacA, sizeof(kMacA)));
-
- // FIXME(b/110961832): gVisor always creates enabled/up'd interfaces.
- RETURN_IF_ERRNO(LinkChangeFlags(link.index, IFF_UP, IFF_UP));
- }
-
- return fd;
-}
-
-// This test sets up a TAP device and pings kernel by sending ICMP echo request.
-//
-// It works as the following:
-// * Open /dev/net/tun, and create kTapName interface.
-// * Use rtnetlink to do initial setup of the interface:
-// * Assign IP address 10.0.0.1/24 to kernel.
-// * MAC address: kMacA
-// * Bring up the interface.
-// * Send an ICMP echo reqest (ping) packet from 10.0.0.2 (kMacB) to kernel.
-// * Loop to receive packets from TAP device/fd:
-// * If packet is an ICMP echo reply, it stops and passes the test.
-// * If packet is an ARP request, it responds with canned reply and resends
-// the
-// ICMP request packet.
-TEST_F(TuntapTest, PingKernel) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));
- ping_pkt ping_req =
- CreatePingPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);
- std::string arp_rep =
- CreateArpPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);
-
- // Send ping, this would trigger an ARP request on Linux.
- EXPECT_THAT(write(fd.get(), &ping_req, sizeof(ping_req)),
- SyscallSucceedsWithValue(sizeof(ping_req)));
-
- // Receive loop to process inbound packets.
- struct inpkt {
- union {
- pihdr pi;
- ping_pkt ping;
- arp_pkt arp;
- };
- };
- while (1) {
- inpkt r = {};
- size_t n;
- EXPECT_THAT(n = read(fd.get(), &r, sizeof(r)), SyscallSucceeds());
-
- if (n < sizeof(pihdr)) {
- std::cerr << "Ignored packet, protocol: " << r.pi.pi_protocol
- << " len: " << n << std::endl;
- continue;
- }
-
- // Process ARP packet.
- if (n >= sizeof(arp_pkt) && r.pi.pi_protocol == htons(ETH_P_ARP)) {
- // Respond with canned ARP reply.
- EXPECT_THAT(write(fd.get(), arp_rep.data(), arp_rep.size()),
- SyscallSucceedsWithValue(arp_rep.size()));
- // First ping request might have been dropped due to mac address not in
- // ARP cache. Send it again.
- EXPECT_THAT(write(fd.get(), &ping_req, sizeof(ping_req)),
- SyscallSucceedsWithValue(sizeof(ping_req)));
- }
-
- // Process ping response packet.
- if (n >= sizeof(ping_pkt) && r.pi.pi_protocol == ping_req.pi.pi_protocol &&
- r.ping.ip.protocol == ping_req.ip.protocol &&
- !memcmp(&r.ping.ip.saddr, &ping_req.ip.daddr, kIPLen) &&
- !memcmp(&r.ping.ip.daddr, &ping_req.ip.saddr, kIPLen) &&
- r.ping.icmp.type == 0 && r.ping.icmp.code == 0) {
- // Ends and passes the test.
- break;
- }
- }
-}
-
-TEST_F(TuntapTest, SendUdpTriggersArpResolution) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));
-
- // Send a UDP packet to remote.
- int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
- ASSERT_THAT(sock, SyscallSucceeds());
-
- struct sockaddr_in remote = {
- .sin_family = AF_INET,
- .sin_port = htons(42),
- .sin_addr = {.s_addr = kTapPeerIPAddr},
- };
- ASSERT_THAT(sendto(sock, "hello", 5, 0, AsSockAddr(&remote), sizeof(remote)),
- SyscallSucceeds());
-
- struct inpkt {
- union {
- pihdr pi;
- arp_pkt arp;
- };
- };
- while (1) {
- inpkt r = {};
- size_t n;
- EXPECT_THAT(n = read(fd.get(), &r, sizeof(r)), SyscallSucceeds());
-
- if (n < sizeof(pihdr)) {
- std::cerr << "Ignored packet, protocol: " << r.pi.pi_protocol
- << " len: " << n << std::endl;
- continue;
- }
-
- if (n >= sizeof(arp_pkt) && r.pi.pi_protocol == htons(ETH_P_ARP)) {
- break;
- }
- }
-}
-
-TEST_F(TuntapTest, TUNNoPacketInfo) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- // Interface creation.
- FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));
-
- struct ifreq ifr_set = {};
- ifr_set.ifr_flags = IFF_TUN | IFF_NO_PI;
- strncpy(ifr_set.ifr_name, kTunName, IFNAMSIZ);
- EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr_set), SyscallSucceeds());
-
- // Interface setup.
- auto link = ASSERT_NO_ERRNO_AND_VALUE(GetLinkByName(kTunName));
- const struct in_addr dev_ipv4_addr = {.s_addr = kTapIPAddr};
- EXPECT_NO_ERRNO(LinkAddLocalAddr(link.index, AF_INET, 24, &dev_ipv4_addr,
- sizeof(dev_ipv4_addr)));
-
- ping_pkt ping_req =
- CreatePingPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);
- size_t packet_size = sizeof(ping_req) - offsetof(ping_pkt, ip);
-
- // Send ICMP query
- EXPECT_THAT(write(fd.get(), &ping_req.ip, packet_size),
- SyscallSucceedsWithValue(packet_size));
-
- // Receive loop to process inbound packets.
- while (1) {
- ping_pkt ping_resp = {};
- EXPECT_THAT(read(fd.get(), &ping_resp.ip, packet_size),
- SyscallSucceedsWithValue(packet_size));
-
- // Process ping response packet.
- if (!memcmp(&ping_resp.ip.saddr, &ping_req.ip.daddr, kIPLen) &&
- !memcmp(&ping_resp.ip.daddr, &ping_req.ip.saddr, kIPLen) &&
- ping_resp.icmp.type == 0 && ping_resp.icmp.code == 0) {
- // Ends and passes the test.
- break;
- }
- }
-}
-
-// TCPBlockingConnectFailsArpResolution tests for TCP connect to fail on link
-// address resolution failure to a routable, but non existent peer.
-TEST_F(TuntapTest, TCPBlockingConnectFailsArpResolution) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor sender =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));
-
- sockaddr_in connect_addr = {
- .sin_family = AF_INET,
- .sin_addr = {.s_addr = kTapPeerIPAddr},
- };
- ASSERT_THAT(connect(sender.get(),
- reinterpret_cast<const struct sockaddr*>(&connect_addr),
- sizeof(connect_addr)),
- SyscallFailsWithErrno(EHOSTUNREACH));
-}
-
-// TCPNonBlockingConnectFailsArpResolution tests for TCP non-blocking connect to
-// to trigger an error event to be notified to poll on link address resolution
-// failure to a routable, but non existent peer.
-TEST_F(TuntapTest, TCPNonBlockingConnectFailsArpResolution) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor sender = ASSERT_NO_ERRNO_AND_VALUE(
- Socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));
-
- sockaddr_in connect_addr = {
- .sin_family = AF_INET,
- .sin_addr = {.s_addr = kTapPeerIPAddr},
- };
- ASSERT_THAT(connect(sender.get(),
- reinterpret_cast<const struct sockaddr*>(&connect_addr),
- sizeof(connect_addr)),
- SyscallFailsWithErrno(EINPROGRESS));
-
- constexpr int kTimeout = 10000;
- struct pollfd pfd = {
- .fd = sender.get(),
- .events = POLLIN | POLLOUT,
- };
- ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
- ASSERT_EQ(pfd.revents, POLLIN | POLLOUT | POLLHUP | POLLERR);
-
- ASSERT_THAT(connect(sender.get(),
- reinterpret_cast<const struct sockaddr*>(&connect_addr),
- sizeof(connect_addr)),
- SyscallFailsWithErrno(EHOSTUNREACH));
-}
-
-// Write hang bug found by syskaller: b/155928773
-// https://syzkaller.appspot.com/bug?id=065b893bd8d1d04a4e0a1d53c578537cde1efe99
-TEST_F(TuntapTest, WriteHangBug155928773) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));
-
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));
-
- int sock = socket(AF_INET, SOCK_DGRAM, 0);
- ASSERT_THAT(sock, SyscallSucceeds());
-
- struct sockaddr_in remote = {
- .sin_family = AF_INET,
- .sin_port = htons(42),
- .sin_addr = {.s_addr = kTapIPAddr},
- };
- // Return values do not matter in this test.
- connect(sock, AsSockAddr(&remote), sizeof(remote));
- write(sock, "hello", 5);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/tuntap_hostinet.cc b/test/syscalls/linux/tuntap_hostinet.cc
deleted file mode 100644
index 1513fb9d5..000000000
--- a/test/syscalls/linux/tuntap_hostinet.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(TuntapHostInetTest, NoNetTun) {
- SKIP_IF(!IsRunningOnGvisor());
- SKIP_IF(!IsRunningWithHostinet());
-
- struct stat statbuf;
- ASSERT_THAT(stat("/dev/net/tun", &statbuf), SyscallFailsWithErrno(ENOENT));
-}
-
-} // namespace
-} // namespace testing
-
-} // namespace gvisor
diff --git a/test/syscalls/linux/udp_bind.cc b/test/syscalls/linux/udp_bind.cc
deleted file mode 100644
index f68d78aa2..000000000
--- a/test/syscalls/linux/udp_bind.cc
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include "gtest/gtest.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-struct sockaddr_in_common {
- sa_family_t sin_family;
- in_port_t sin_port;
-};
-
-struct SendtoTestParam {
- // Human readable description of test parameter.
- std::string description;
-
- // Test is broken in gVisor, skip.
- bool skip_on_gvisor;
-
- // Domain for the socket that will do the sending.
- int send_domain;
-
- // Address to bind for the socket that will do the sending.
- struct sockaddr_storage send_addr;
- socklen_t send_addr_len; // 0 for unbound.
-
- // Address to connect to for the socket that will do the sending.
- struct sockaddr_storage connect_addr;
- socklen_t connect_addr_len; // 0 for no connection.
-
- // Domain for the socket that will do the receiving.
- int recv_domain;
-
- // Address to bind for the socket that will do the receiving.
- struct sockaddr_storage recv_addr;
- socklen_t recv_addr_len;
-
- // Address to send to.
- struct sockaddr_storage sendto_addr;
- socklen_t sendto_addr_len;
-
- // Expected errno for the sendto call.
- std::vector<int> sendto_errnos; // empty on success.
-};
-
-class SendtoTest : public ::testing::TestWithParam<SendtoTestParam> {
- protected:
- SendtoTest() {
- // gUnit uses printf, so so will we.
- printf("Testing with %s\n", GetParam().description.c_str());
- }
-};
-
-TEST_P(SendtoTest, Sendto) {
- auto param = GetParam();
-
- SKIP_IF(param.skip_on_gvisor && IsRunningOnGvisor());
-
- const FileDescriptor s1 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(param.send_domain, SOCK_DGRAM, 0));
- const FileDescriptor s2 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(param.recv_domain, SOCK_DGRAM, 0));
-
- if (param.send_addr_len > 0) {
- ASSERT_THAT(
- bind(s1.get(), AsSockAddr(&param.send_addr), param.send_addr_len),
- SyscallSucceeds());
- }
-
- if (param.connect_addr_len > 0) {
- ASSERT_THAT(connect(s1.get(), AsSockAddr(&param.connect_addr),
- param.connect_addr_len),
- SyscallSucceeds());
- }
-
- ASSERT_THAT(bind(s2.get(), AsSockAddr(&param.recv_addr), param.recv_addr_len),
- SyscallSucceeds());
-
- struct sockaddr_storage real_recv_addr = {};
- socklen_t real_recv_addr_len = param.recv_addr_len;
- ASSERT_THAT(
- getsockname(s2.get(), AsSockAddr(&real_recv_addr), &real_recv_addr_len),
- SyscallSucceeds());
-
- ASSERT_EQ(real_recv_addr_len, param.recv_addr_len);
-
- int recv_port =
- reinterpret_cast<sockaddr_in_common*>(&real_recv_addr)->sin_port;
-
- struct sockaddr_storage sendto_addr = param.sendto_addr;
- reinterpret_cast<sockaddr_in_common*>(&sendto_addr)->sin_port = recv_port;
-
- char buf[20] = {};
- if (!param.sendto_errnos.empty()) {
- ASSERT_THAT(
- RetryEINTR(sendto)(s1.get(), buf, sizeof(buf), 0,
- AsSockAddr(&sendto_addr), param.sendto_addr_len),
- SyscallFailsWithErrno(ElementOf(param.sendto_errnos)));
- return;
- }
-
- ASSERT_THAT(
- RetryEINTR(sendto)(s1.get(), buf, sizeof(buf), 0,
- AsSockAddr(&sendto_addr), param.sendto_addr_len),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct sockaddr_storage got_addr = {};
- socklen_t got_addr_len = sizeof(sockaddr_storage);
- ASSERT_THAT(RetryEINTR(recvfrom)(s2.get(), buf, sizeof(buf), 0,
- AsSockAddr(&got_addr), &got_addr_len),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- ASSERT_GT(got_addr_len, sizeof(sockaddr_in_common));
- int got_port = reinterpret_cast<sockaddr_in_common*>(&got_addr)->sin_port;
-
- struct sockaddr_storage sender_addr = {};
- socklen_t sender_addr_len = sizeof(sockaddr_storage);
- ASSERT_THAT(getsockname(s1.get(), AsSockAddr(&sender_addr), &sender_addr_len),
- SyscallSucceeds());
-
- ASSERT_GT(sender_addr_len, sizeof(sockaddr_in_common));
- int sender_port =
- reinterpret_cast<sockaddr_in_common*>(&sender_addr)->sin_port;
-
- EXPECT_EQ(got_port, sender_port);
-}
-
-socklen_t Ipv4Addr(sockaddr_storage* addr, int port = 0) {
- auto addr4 = reinterpret_cast<sockaddr_in*>(addr);
- addr4->sin_family = AF_INET;
- addr4->sin_port = port;
- inet_pton(AF_INET, "127.0.0.1", &addr4->sin_addr.s_addr);
- return sizeof(struct sockaddr_in);
-}
-
-socklen_t Ipv6Addr(sockaddr_storage* addr, int port = 0) {
- auto addr6 = reinterpret_cast<sockaddr_in6*>(addr);
- addr6->sin6_family = AF_INET6;
- addr6->sin6_port = port;
- inet_pton(AF_INET6, "::1", &addr6->sin6_addr.s6_addr);
- return sizeof(struct sockaddr_in6);
-}
-
-socklen_t Ipv4MappedIpv6Addr(sockaddr_storage* addr, int port = 0) {
- auto addr6 = reinterpret_cast<sockaddr_in6*>(addr);
- addr6->sin6_family = AF_INET6;
- addr6->sin6_port = port;
- inet_pton(AF_INET6, "::ffff:127.0.0.1", &addr6->sin6_addr.s6_addr);
- return sizeof(struct sockaddr_in6);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- UdpBindTest, SendtoTest,
- ::testing::Values(
- []() {
- SendtoTestParam param = {};
- param.description = "IPv4 mapped IPv6 sendto IPv4 mapped IPv6";
- param.send_domain = AF_INET6;
- param.send_addr_len = Ipv4MappedIpv6Addr(&param.send_addr);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv4MappedIpv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv6 sendto IPv6";
- param.send_domain = AF_INET6;
- param.send_addr_len = Ipv6Addr(&param.send_addr);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv4 sendto IPv4";
- param.send_domain = AF_INET;
- param.send_addr_len = Ipv4Addr(&param.send_addr);
- param.recv_domain = AF_INET;
- param.recv_addr_len = Ipv4Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv4 mapped IPv6 sendto IPv4";
- param.send_domain = AF_INET6;
- param.send_addr_len = Ipv4MappedIpv6Addr(&param.send_addr);
- param.recv_domain = AF_INET;
- param.recv_addr_len = Ipv4Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv4 sendto IPv4 mapped IPv6";
- param.send_domain = AF_INET;
- param.send_addr_len = Ipv4Addr(&param.send_addr);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv4MappedIpv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "unbound IPv6 sendto IPv4 mapped IPv6";
- param.send_domain = AF_INET6;
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv4MappedIpv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "unbound IPv6 sendto IPv4";
- param.send_domain = AF_INET6;
- param.recv_domain = AF_INET;
- param.recv_addr_len = Ipv4Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv6 sendto IPv4";
- param.send_domain = AF_INET6;
- param.send_addr_len = Ipv6Addr(&param.send_addr);
- param.recv_domain = AF_INET;
- param.recv_addr_len = Ipv4Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- param.sendto_errnos = {ENETUNREACH};
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "IPv4 mapped IPv6 sendto IPv6";
- param.send_domain = AF_INET6;
- param.send_addr_len = Ipv4MappedIpv6Addr(&param.send_addr);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv6Addr(&param.sendto_addr);
- param.sendto_errnos = {EAFNOSUPPORT};
- // The errno returned changed in Linux commit c8e6ad0829a723.
- param.sendto_errnos = {EINVAL, EAFNOSUPPORT};
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "connected IPv4 mapped IPv6 sendto IPv6";
- param.send_domain = AF_INET6;
- param.connect_addr_len =
- Ipv4MappedIpv6Addr(&param.connect_addr, 5000);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv6Addr(&param.sendto_addr);
- // The errno returned changed in Linux commit c8e6ad0829a723.
- param.sendto_errnos = {EINVAL, EAFNOSUPPORT};
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "connected IPv6 sendto IPv4 mapped IPv6";
- // TODO(igudger): Determine if this inconsistent behavior is worth
- // implementing.
- param.skip_on_gvisor = true;
- param.send_domain = AF_INET6;
- param.connect_addr_len = Ipv6Addr(&param.connect_addr, 5000);
- param.recv_domain = AF_INET6;
- param.recv_addr_len = Ipv4MappedIpv6Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }(),
- []() {
- SendtoTestParam param = {};
- param.description = "connected IPv6 sendto IPv4";
- // TODO(igudger): Determine if this inconsistent behavior is worth
- // implementing.
- param.skip_on_gvisor = true;
- param.send_domain = AF_INET6;
- param.connect_addr_len = Ipv6Addr(&param.connect_addr, 5000);
- param.recv_domain = AF_INET;
- param.recv_addr_len = Ipv4Addr(&param.recv_addr);
- param.sendto_addr_len = Ipv4MappedIpv6Addr(&param.sendto_addr);
- return param;
- }()));
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/udp_socket.cc b/test/syscalls/linux/udp_socket.cc
deleted file mode 100644
index b40598767..000000000
--- a/test/syscalls/linux/udp_socket.cc
+++ /dev/null
@@ -1,2100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <netinet/icmp6.h>
-#include <netinet/ip_icmp.h>
-
-#include <ctime>
-
-#ifdef __linux__
-#include <linux/errqueue.h>
-#include <linux/filter.h>
-#endif // __linux__
-#include <netinet/in.h>
-#include <poll.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include "absl/strings/str_format.h"
-#ifndef SIOCGSTAMP
-#include <linux/sockios.h>
-#endif
-
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/syscalls/linux/ip_socket_test_util.h"
-#include "test/syscalls/linux/socket_test_util.h"
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Fixture for tests parameterized by the address family to use (AF_INET and
-// AF_INET6) when creating sockets.
-class UdpSocketTest
- : public ::testing::TestWithParam<gvisor::testing::AddressFamily> {
- protected:
- // Creates two sockets that will be used by test cases.
- void SetUp() override;
-
- // Binds the socket bind_ to the loopback and updates bind_addr_.
- PosixError BindLoopback();
-
- // Binds the socket bind_ to Any and updates bind_addr_.
- PosixError BindAny();
-
- // Binds given socket to address addr and updates.
- PosixError BindSocket(int socket, struct sockaddr* addr);
-
- // Return initialized Any address to port 0.
- struct sockaddr_storage InetAnyAddr();
-
- // Return initialized Loopback address to port 0.
- struct sockaddr_storage InetLoopbackAddr();
-
- // Disconnects socket sockfd.
- void Disconnect(int sockfd);
-
- // Get family for the test.
- int GetFamily();
-
- // Socket used by Bind methods
- FileDescriptor bind_;
-
- // Second socket used for tests.
- FileDescriptor sock_;
-
- // Address for bind_ socket.
- struct sockaddr* bind_addr_;
-
- // Initialized to the length based on GetFamily().
- socklen_t addrlen_;
-
- // Storage for bind_addr_.
- struct sockaddr_storage bind_addr_storage_;
-
- private:
- // Helper to initialize addrlen_ for the test case.
- socklen_t GetAddrLength();
-};
-
-// Gets a pointer to the port component of the given address.
-uint16_t* Port(struct sockaddr_storage* addr) {
- switch (addr->ss_family) {
- case AF_INET: {
- auto sin = reinterpret_cast<struct sockaddr_in*>(addr);
- return &sin->sin_port;
- }
- case AF_INET6: {
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(addr);
- return &sin6->sin6_port;
- }
- }
-
- return nullptr;
-}
-
-// Sets addr port to "port".
-void SetPort(struct sockaddr_storage* addr, uint16_t port) {
- switch (addr->ss_family) {
- case AF_INET: {
- auto sin = reinterpret_cast<struct sockaddr_in*>(addr);
- sin->sin_port = port;
- break;
- }
- case AF_INET6: {
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(addr);
- sin6->sin6_port = port;
- break;
- }
- }
-}
-
-void UdpSocketTest::SetUp() {
- addrlen_ = GetAddrLength();
-
- bind_ =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
- memset(&bind_addr_storage_, 0, sizeof(bind_addr_storage_));
- bind_addr_ = AsSockAddr(&bind_addr_storage_);
-
- sock_ =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
-}
-
-int UdpSocketTest::GetFamily() {
- if (GetParam() == AddressFamily::kIpv4) {
- return AF_INET;
- }
- return AF_INET6;
-}
-
-PosixError UdpSocketTest::BindLoopback() {
- bind_addr_storage_ = InetLoopbackAddr();
- struct sockaddr* bind_addr_ = AsSockAddr(&bind_addr_storage_);
- return BindSocket(bind_.get(), bind_addr_);
-}
-
-PosixError UdpSocketTest::BindAny() {
- bind_addr_storage_ = InetAnyAddr();
- struct sockaddr* bind_addr_ = AsSockAddr(&bind_addr_storage_);
- return BindSocket(bind_.get(), bind_addr_);
-}
-
-PosixError UdpSocketTest::BindSocket(int socket, struct sockaddr* addr) {
- socklen_t len = sizeof(bind_addr_storage_);
-
- // Bind, then check that we get the right address.
- RETURN_ERROR_IF_SYSCALL_FAIL(bind(socket, addr, addrlen_));
-
- RETURN_ERROR_IF_SYSCALL_FAIL(getsockname(socket, addr, &len));
-
- if (addrlen_ != len) {
- return PosixError(
- EINVAL,
- absl::StrFormat("getsockname len: %u expected: %u", len, addrlen_));
- }
- return PosixError(0);
-}
-
-socklen_t UdpSocketTest::GetAddrLength() {
- struct sockaddr_storage addr;
- if (GetFamily() == AF_INET) {
- auto sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- return sizeof(*sin);
- }
-
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);
- return sizeof(*sin6);
-}
-
-sockaddr_storage UdpSocketTest::InetAnyAddr() {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- AsSockAddr(&addr)->sa_family = GetFamily();
-
- if (GetFamily() == AF_INET) {
- auto sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- sin->sin_addr.s_addr = htonl(INADDR_ANY);
- sin->sin_port = htons(0);
- return addr;
- }
-
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);
- sin6->sin6_addr = IN6ADDR_ANY_INIT;
- sin6->sin6_port = htons(0);
- return addr;
-}
-
-sockaddr_storage UdpSocketTest::InetLoopbackAddr() {
- struct sockaddr_storage addr;
- memset(&addr, 0, sizeof(addr));
- AsSockAddr(&addr)->sa_family = GetFamily();
-
- if (GetFamily() == AF_INET) {
- auto sin = reinterpret_cast<struct sockaddr_in*>(&addr);
- sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- sin->sin_port = htons(0);
- return addr;
- }
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);
- sin6->sin6_addr = in6addr_loopback;
- sin6->sin6_port = htons(0);
- return addr;
-}
-
-void UdpSocketTest::Disconnect(int sockfd) {
- sockaddr_storage addr_storage = InetAnyAddr();
- sockaddr* addr = AsSockAddr(&addr_storage);
- socklen_t addrlen = sizeof(addr_storage);
-
- addr->sa_family = AF_UNSPEC;
- ASSERT_THAT(connect(sockfd, addr, addrlen), SyscallSucceeds());
-
- // Check that after disconnect the socket is bound to the ANY address.
- EXPECT_THAT(getsockname(sockfd, addr, &addrlen), SyscallSucceeds());
- if (GetParam() == AddressFamily::kIpv4) {
- auto addr_out = reinterpret_cast<struct sockaddr_in*>(addr);
- EXPECT_EQ(addrlen, sizeof(*addr_out));
- EXPECT_EQ(addr_out->sin_addr.s_addr, htonl(INADDR_ANY));
- } else {
- auto addr_out = reinterpret_cast<struct sockaddr_in6*>(addr);
- EXPECT_EQ(addrlen, sizeof(*addr_out));
- struct in6_addr loopback = IN6ADDR_ANY_INIT;
-
- EXPECT_EQ(memcmp(&addr_out->sin6_addr, &loopback, sizeof(in6_addr)), 0);
- }
-}
-
-TEST_P(UdpSocketTest, Creation) {
- FileDescriptor sock =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
- EXPECT_THAT(close(sock.release()), SyscallSucceeds());
-
- sock = ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, 0));
- EXPECT_THAT(close(sock.release()), SyscallSucceeds());
-
- ASSERT_THAT(socket(GetFamily(), SOCK_STREAM, IPPROTO_UDP), SyscallFails());
-}
-
-TEST_P(UdpSocketTest, Getsockname) {
- // Check that we're not bound.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(bind_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- struct sockaddr_storage any = InetAnyAddr();
- EXPECT_EQ(memcmp(&addr, AsSockAddr(&any), addrlen_), 0);
-
- ASSERT_NO_ERRNO(BindLoopback());
-
- EXPECT_THAT(getsockname(bind_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(memcmp(&addr, bind_addr_, addrlen_), 0);
-}
-
-TEST_P(UdpSocketTest, Getpeername) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Check that we're not connected.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-
- // Connect, then check that we get the right address.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(memcmp(&addr, bind_addr_, addrlen_), 0);
-}
-
-TEST_P(UdpSocketTest, SendNotConnected) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Do send & write, they must fail.
- char buf[512];
- EXPECT_THAT(send(sock_.get(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(EDESTADDRREQ));
-
- EXPECT_THAT(write(sock_.get(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EDESTADDRREQ));
-
- // Use sendto.
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Check that we're bound now.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_NE(*Port(&addr), 0);
-}
-
-TEST_P(UdpSocketTest, ConnectBinds) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect the socket.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Check that we're bound now.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_NE(*Port(&addr), 0);
-}
-
-TEST_P(UdpSocketTest, ReceiveNotBound) {
- char buf[512];
- EXPECT_THAT(recv(sock_.get(), buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST_P(UdpSocketTest, Bind) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Try to bind again.
- EXPECT_THAT(bind(bind_.get(), bind_addr_, addrlen_),
- SyscallFailsWithErrno(EINVAL));
-
- // Check that we're still bound to the original address.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(bind_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(memcmp(&addr, bind_addr_, addrlen_), 0);
-}
-
-TEST_P(UdpSocketTest, BindInUse) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Try to bind again.
- EXPECT_THAT(bind(sock_.get(), bind_addr_, addrlen_),
- SyscallFailsWithErrno(EADDRINUSE));
-}
-
-TEST_P(UdpSocketTest, ConnectWriteToInvalidPort) {
- // Discover a free unused port by creating a new UDP socket, binding it
- // recording the just bound port and closing it. This is not guaranteed as it
- // can still race with other port UDP sockets trying to bind a port at the
- // same time.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- socklen_t addrlen = sizeof(addr_storage);
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
- ASSERT_THAT(bind(s.get(), addr, addrlen), SyscallSucceeds());
- ASSERT_THAT(getsockname(s.get(), addr, &addrlen), SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_NE(*Port(&addr_storage), 0);
- ASSERT_THAT(close(s.release()), SyscallSucceeds());
-
- // Now connect to the port that we just released. This should generate an
- // ECONNREFUSED error.
- ASSERT_THAT(connect(sock_.get(), addr, addrlen_), SyscallSucceeds());
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
- // Send from sock_ to an unbound port.
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, addr, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Now verify that we got an ICMP error back of ECONNREFUSED.
- int err;
- socklen_t optlen = sizeof(err);
- ASSERT_THAT(getsockopt(sock_.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(err, ECONNREFUSED);
- ASSERT_EQ(optlen, sizeof(err));
-}
-
-TEST_P(UdpSocketTest, ConnectSimultaneousWriteToInvalidPort) {
- // Discover a free unused port by creating a new UDP socket, binding it
- // recording the just bound port and closing it. This is not guaranteed as it
- // can still race with other port UDP sockets trying to bind a port at the
- // same time.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- socklen_t addrlen = sizeof(addr_storage);
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- FileDescriptor s =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
- ASSERT_THAT(bind(s.get(), addr, addrlen), SyscallSucceeds());
- ASSERT_THAT(getsockname(s.get(), addr, &addrlen), SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_NE(*Port(&addr_storage), 0);
- ASSERT_THAT(close(s.release()), SyscallSucceeds());
-
- // Now connect to the port that we just released.
- ScopedThread t([&] {
- ASSERT_THAT(connect(sock_.get(), addr, addrlen_), SyscallSucceeds());
- });
-
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
- // Send from sock_ to an unbound port.
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, addr, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
- t.Join();
-}
-
-TEST_P(UdpSocketTest, ReceiveAfterConnect) {
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Send from sock_ to bind_
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Receive the data.
- char received[sizeof(buf)];
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-}
-
-TEST_P(UdpSocketTest, ReceiveAfterDisconnect) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- for (int i = 0; i < 2; i++) {
- // Connet sock_ to bound address.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
-
- // Send from sock to bind_.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- ASSERT_THAT(
- sendto(bind_.get(), buf, sizeof(buf), 0, AsSockAddr(&addr), addrlen),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Receive the data.
- char received[sizeof(buf)];
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-
- // Disconnect sock_.
- struct sockaddr unspec = {};
- unspec.sa_family = AF_UNSPEC;
- ASSERT_THAT(connect(sock_.get(), &unspec, sizeof(unspec.sa_family)),
- SyscallSucceeds());
- }
-}
-
-TEST_P(UdpSocketTest, Connect) {
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Check that we're connected to the right peer.
- struct sockaddr_storage peer;
- socklen_t peerlen = sizeof(peer);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&peer), &peerlen),
- SyscallSucceeds());
- EXPECT_EQ(peerlen, addrlen_);
- EXPECT_EQ(memcmp(&peer, bind_addr_, addrlen_), 0);
-
- // Try to bind after connect.
- struct sockaddr_storage any = InetAnyAddr();
- EXPECT_THAT(bind(sock_.get(), AsSockAddr(&any), addrlen_),
- SyscallFailsWithErrno(EINVAL));
-
- struct sockaddr_storage bind2_storage = InetLoopbackAddr();
- struct sockaddr* bind2_addr = AsSockAddr(&bind2_storage);
- FileDescriptor bind2 =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));
- ASSERT_NO_ERRNO(BindSocket(bind2.get(), bind2_addr));
-
- // Try to connect again.
- EXPECT_THAT(connect(sock_.get(), bind2_addr, addrlen_), SyscallSucceeds());
-
- // Check that peer name changed.
- peerlen = sizeof(peer);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&peer), &peerlen),
- SyscallSucceeds());
- EXPECT_EQ(peerlen, addrlen_);
- EXPECT_EQ(memcmp(&peer, bind2_addr, addrlen_), 0);
-}
-
-TEST_P(UdpSocketTest, ConnectAnyZero) {
- // TODO(138658473): Enable when we can connect to port 0 with gVisor.
- SKIP_IF(IsRunningOnGvisor());
-
- struct sockaddr_storage any = InetAnyAddr();
- EXPECT_THAT(connect(sock_.get(), AsSockAddr(&any), addrlen_),
- SyscallSucceeds());
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(UdpSocketTest, ConnectAnyWithPort) {
- ASSERT_NO_ERRNO(BindAny());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-}
-
-TEST_P(UdpSocketTest, DisconnectAfterConnectAny) {
- // TODO(138658473): Enable when we can connect to port 0 with gVisor.
- SKIP_IF(IsRunningOnGvisor());
- struct sockaddr_storage any = InetAnyAddr();
- EXPECT_THAT(connect(sock_.get(), AsSockAddr(&any), addrlen_),
- SyscallSucceeds());
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-
- Disconnect(sock_.get());
-}
-
-TEST_P(UdpSocketTest, DisconnectAfterConnectAnyWithPort) {
- ASSERT_NO_ERRNO(BindAny());
- EXPECT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(*Port(&bind_addr_storage_), *Port(&addr));
-
- Disconnect(sock_.get());
-}
-
-TEST_P(UdpSocketTest, DisconnectAfterBind) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Bind to the next port above bind_.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_NO_ERRNO(BindSocket(sock_.get(), addr));
-
- // Connect the socket.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- struct sockaddr_storage unspec = {};
- unspec.ss_family = AF_UNSPEC;
- EXPECT_THAT(
- connect(sock_.get(), AsSockAddr(&unspec), sizeof(unspec.ss_family)),
- SyscallSucceeds());
-
- // Check that we're still bound.
- socklen_t addrlen = sizeof(unspec);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&unspec), &addrlen),
- SyscallSucceeds());
-
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(memcmp(addr, &unspec, addrlen_), 0);
-
- addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), addr, &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(UdpSocketTest, BindToAnyConnnectToLocalhost) {
- ASSERT_NO_ERRNO(BindAny());
-
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- socklen_t addrlen = sizeof(addr);
-
- // Connect the socket.
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- EXPECT_THAT(getsockname(bind_.get(), addr, &addrlen), SyscallSucceeds());
-
- // If the socket is bound to ANY and connected to a loopback address,
- // getsockname() has to return the loopback address.
- if (GetParam() == AddressFamily::kIpv4) {
- auto addr_out = reinterpret_cast<struct sockaddr_in*>(addr);
- EXPECT_EQ(addrlen, sizeof(*addr_out));
- EXPECT_EQ(addr_out->sin_addr.s_addr, htonl(INADDR_LOOPBACK));
- } else {
- auto addr_out = reinterpret_cast<struct sockaddr_in6*>(addr);
- struct in6_addr loopback = IN6ADDR_LOOPBACK_INIT;
- EXPECT_EQ(addrlen, sizeof(*addr_out));
- EXPECT_EQ(memcmp(&addr_out->sin6_addr, &loopback, sizeof(in6_addr)), 0);
- }
-}
-
-TEST_P(UdpSocketTest, DisconnectAfterBindToAny) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- struct sockaddr_storage any_storage = InetAnyAddr();
- struct sockaddr* any = AsSockAddr(&any_storage);
- SetPort(&any_storage, *Port(&bind_addr_storage_) + 1);
-
- ASSERT_NO_ERRNO(BindSocket(sock_.get(), any));
-
- // Connect the socket.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- Disconnect(sock_.get());
-
- // Check that we're still bound.
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
-
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(memcmp(&addr, any, addrlen), 0);
-
- addrlen = sizeof(addr);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(UdpSocketTest, Disconnect) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- struct sockaddr_storage any_storage = InetAnyAddr();
- struct sockaddr* any = AsSockAddr(&any_storage);
- SetPort(&any_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_NO_ERRNO(BindSocket(sock_.get(), any));
-
- for (int i = 0; i < 2; i++) {
- // Try to connect again.
- EXPECT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Check that we're connected to the right peer.
- struct sockaddr_storage peer;
- socklen_t peerlen = sizeof(peer);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&peer), &peerlen),
- SyscallSucceeds());
- EXPECT_EQ(peerlen, addrlen_);
- EXPECT_EQ(memcmp(&peer, bind_addr_, addrlen_), 0);
-
- // Try to disconnect.
- struct sockaddr_storage addr = {};
- addr.ss_family = AF_UNSPEC;
- EXPECT_THAT(connect(sock_.get(), AsSockAddr(&addr), sizeof(addr.ss_family)),
- SyscallSucceeds());
-
- peerlen = sizeof(peer);
- EXPECT_THAT(getpeername(sock_.get(), AsSockAddr(&peer), &peerlen),
- SyscallFailsWithErrno(ENOTCONN));
-
- // Check that we're still bound.
- socklen_t addrlen = sizeof(addr);
- EXPECT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &addrlen),
- SyscallSucceeds());
- EXPECT_EQ(addrlen, addrlen_);
- EXPECT_EQ(*Port(&addr), *Port(&any_storage));
- }
-}
-
-TEST_P(UdpSocketTest, ConnectBadAddress) {
- struct sockaddr addr = {};
- addr.sa_family = GetFamily();
- ASSERT_THAT(connect(sock_.get(), &addr, sizeof(addr.sa_family)),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_P(UdpSocketTest, SendToAddressOtherThanConnected) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- struct sockaddr_storage addr_storage = InetAnyAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
-
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Send to a different destination than we're connected to.
- char buf[512];
- EXPECT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, addr, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(UdpSocketTest, ConnectAndSendNoReceiver) {
- ASSERT_NO_ERRNO(BindLoopback());
- // Close the socket to release the port so that we get an ICMP error.
- ASSERT_THAT(close(bind_.release()), SyscallSucceeds());
-
- // Connect to loopback:bind_addr_ which should *hopefully* not be bound by an
- // UDP socket. There is no easy way to ensure that the UDP port is not bound
- // by another conncurrently running test. *This is potentially flaky*.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- char buf[512];
- EXPECT_THAT(send(sock_.get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- constexpr int kTimeout = 1000;
- // Poll to make sure we get the ICMP error back before issuing more writes.
- struct pollfd pfd = {sock_.get(), POLLERR, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
-
- // Next write should fail with ECONNREFUSED due to the ICMP error generated in
- // response to the previous write.
- ASSERT_THAT(send(sock_.get(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(ECONNREFUSED));
-
- // The next write should succeed again since the last write call would have
- // retrieved and cleared the socket error.
- ASSERT_THAT(send(sock_.get(), buf, sizeof(buf), 0), SyscallSucceeds());
-
- // Poll to make sure we get the ICMP error back before issuing more writes.
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));
-
- // Next write should fail with ECONNREFUSED due to the ICMP error generated in
- // response to the previous write.
- ASSERT_THAT(send(sock_.get(), buf, sizeof(buf), 0),
- SyscallFailsWithErrno(ECONNREFUSED));
-}
-
-#ifdef __linux__
-TEST_P(UdpSocketTest, RecvErrorConnRefused) {
- // We will simulate an ICMP error and verify that we do receive that error via
- // recvmsg(MSG_ERRQUEUE).
- ASSERT_NO_ERRNO(BindLoopback());
- // Close the socket to release the port so that we get an ICMP error.
- ASSERT_THAT(close(bind_.release()), SyscallSucceeds());
-
- // Set IP_RECVERR socket option to enable error queueing.
- int v = kSockOptOn;
- socklen_t optlen = sizeof(v);
- int opt_level = SOL_IP;
- int opt_type = IP_RECVERR;
- if (GetParam() != AddressFamily::kIpv4) {
- opt_level = SOL_IPV6;
- opt_type = IPV6_RECVERR;
- }
- ASSERT_THAT(setsockopt(sock_.get(), opt_level, opt_type, &v, optlen),
- SyscallSucceeds());
-
- // Connect to loopback:bind_addr_ which should *hopefully* not be bound by an
- // UDP socket. There is no easy way to ensure that the UDP port is not bound
- // by another conncurrently running test. *This is potentially flaky*.
- const int kBufLen = 300;
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
- char buf[kBufLen];
- RandomizeBuffer(buf, sizeof(buf));
- // Send from sock_ to an unbound port. This should cause ECONNREFUSED.
- EXPECT_THAT(send(sock_.get(), buf, sizeof(buf), 0),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Dequeue error using recvmsg(MSG_ERRQUEUE).
- char got[kBufLen];
- struct iovec iov;
- iov.iov_base = reinterpret_cast<void*>(got);
- iov.iov_len = kBufLen;
-
- size_t control_buf_len = CMSG_SPACE(sizeof(sock_extended_err) + addrlen_);
- std::vector<char> control_buf(control_buf_len);
- struct sockaddr_storage remote;
- memset(&remote, 0, sizeof(remote));
- struct msghdr msg = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_flags = 0;
- msg.msg_control = control_buf.data();
- msg.msg_controllen = control_buf_len;
- msg.msg_name = reinterpret_cast<void*>(&remote);
- msg.msg_namelen = addrlen_;
- ASSERT_THAT(recvmsg(sock_.get(), &msg, MSG_ERRQUEUE),
- SyscallSucceedsWithValue(kBufLen));
-
- // Check the contents of msg.
- EXPECT_EQ(memcmp(got, buf, sizeof(buf)), 0); // iovec check
- // TODO(b/176251997): The next check fails on the gvisor platform due to the
- // kernel bug.
- if (!IsRunningWithHostinet() || GvisorPlatform() == Platform::kPtrace ||
- GvisorPlatform() == Platform::kKVM ||
- GvisorPlatform() == Platform::kNative)
- EXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);
- EXPECT_EQ(memcmp(&remote, bind_addr_, addrlen_), 0);
-
- // Check the contents of the control message.
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(CMSG_NXTHDR(&msg, cmsg), nullptr);
- EXPECT_EQ(cmsg->cmsg_level, opt_level);
- EXPECT_EQ(cmsg->cmsg_type, opt_type);
-
- // Check the contents of socket error.
- struct sock_extended_err* sock_err =
- (struct sock_extended_err*)CMSG_DATA(cmsg);
- EXPECT_EQ(sock_err->ee_errno, ECONNREFUSED);
- if (GetParam() == AddressFamily::kIpv4) {
- EXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_ICMP);
- EXPECT_EQ(sock_err->ee_type, ICMP_DEST_UNREACH);
- EXPECT_EQ(sock_err->ee_code, ICMP_PORT_UNREACH);
- } else {
- EXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_ICMP6);
- EXPECT_EQ(sock_err->ee_type, ICMP6_DST_UNREACH);
- EXPECT_EQ(sock_err->ee_code, ICMP6_DST_UNREACH_NOPORT);
- }
-
- // Now verify that the socket error was cleared by recvmsg(MSG_ERRQUEUE).
- int err;
- optlen = sizeof(err);
- ASSERT_THAT(getsockopt(sock_.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(err, 0);
- ASSERT_EQ(optlen, sizeof(err));
-}
-#endif // __linux__
-
-TEST_P(UdpSocketTest, ZerolengthWriteAllowed) {
- // TODO(gvisor.dev/issue/1202): Hostinet does not support zero length writes.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
- // Connect to loopback:bind_addr_+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind sock to loopback:bind_addr_+1.
- ASSERT_THAT(bind(sock_.get(), addr, addrlen_), SyscallSucceeds());
-
- char buf[3];
- // Send zero length packet from bind_ to sock_.
- ASSERT_THAT(write(bind_.get(), buf, 0), SyscallSucceedsWithValue(0));
-
- struct pollfd pfd = {sock_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout*/ 1000),
- SyscallSucceedsWithValue(1));
-
- // Receive the packet.
- char received[3];
- EXPECT_THAT(read(sock_.get(), received, sizeof(received)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(UdpSocketTest, ZerolengthWriteAllowedNonBlockRead) {
- // TODO(gvisor.dev/issue/1202): Hostinet does not support zero length writes.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind sock to loopback:bind_addr_port+1.
- ASSERT_THAT(bind(sock_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Set sock to non-blocking.
- int opts = 0;
- ASSERT_THAT(opts = fcntl(sock_.get(), F_GETFL), SyscallSucceeds());
- ASSERT_THAT(fcntl(sock_.get(), F_SETFL, opts | O_NONBLOCK),
- SyscallSucceeds());
-
- char buf[3];
- // Send zero length packet from bind_ to sock_.
- ASSERT_THAT(write(bind_.get(), buf, 0), SyscallSucceedsWithValue(0));
-
- struct pollfd pfd = {sock_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // Receive the packet.
- char received[3];
- EXPECT_THAT(read(sock_.get(), received, sizeof(received)),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(read(sock_.get(), received, sizeof(received)),
- SyscallFailsWithErrno(EAGAIN));
-}
-
-TEST_P(UdpSocketTest, SendAndReceiveNotConnected) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Send some data to bind_.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Receive the data.
- char received[sizeof(buf)];
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-}
-
-TEST_P(UdpSocketTest, SendAndReceiveConnected) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind sock to loopback:bind_addr_port+1.
- ASSERT_THAT(bind(sock_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Send some data from sock to bind_.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Receive the data.
- char received[sizeof(buf)];
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-}
-
-TEST_P(UdpSocketTest, ReceiveFromNotConnected) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind sock to loopback:bind_addr_port+2.
- struct sockaddr_storage addr2_storage = InetLoopbackAddr();
- struct sockaddr* addr2 = AsSockAddr(&addr2_storage);
- SetPort(&addr2_storage, *Port(&bind_addr_storage_) + 2);
- ASSERT_THAT(bind(sock_.get(), addr2, addrlen_), SyscallSucceeds());
-
- // Send some data from sock to bind_.
- char buf[512];
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Check that the data isn't received because it was sent from a different
- // address than we're connected.
- EXPECT_THAT(recv(sock_.get(), buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST_P(UdpSocketTest, ReceiveBeforeConnect) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Bind sock to loopback:bind_addr_port+2.
- struct sockaddr_storage addr2_storage = InetLoopbackAddr();
- struct sockaddr* addr2 = AsSockAddr(&addr2_storage);
- SetPort(&addr2_storage, *Port(&bind_addr_storage_) + 2);
- ASSERT_THAT(bind(sock_.get(), addr2, addrlen_), SyscallSucceeds());
-
- // Send some data from sock to bind_.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Receive the data. It works because it was sent before the connect.
- char received[sizeof(buf)];
- EXPECT_THAT(
- RecvTimeout(bind_.get(), received, sizeof(received), 1 /*timeout*/),
- IsPosixErrorOkAndHolds(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-
- // Send again. This time it should not be received.
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- EXPECT_THAT(recv(bind_.get(), buf, sizeof(buf), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST_P(UdpSocketTest, ReceiveFrom) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind sock to loopback:bind_addr_port+1.
- ASSERT_THAT(bind(sock_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Send some data from sock to bind_.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- // Receive the data and sender address.
- char received[sizeof(buf)];
- struct sockaddr_storage addr2;
- socklen_t addr2len = sizeof(addr2);
- EXPECT_THAT(recvfrom(bind_.get(), received, sizeof(received), 0,
- AsSockAddr(&addr2), &addr2len),
- SyscallSucceedsWithValue(sizeof(received)));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
- EXPECT_EQ(addr2len, addrlen_);
- EXPECT_EQ(memcmp(addr, &addr2, addrlen_), 0);
-}
-
-TEST_P(UdpSocketTest, Listen) {
- ASSERT_THAT(listen(sock_.get(), SOMAXCONN),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-TEST_P(UdpSocketTest, Accept) {
- ASSERT_THAT(accept(sock_.get(), nullptr, nullptr),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-// This test validates that a read shutdown with pending data allows the read
-// to proceed with the data before returning EAGAIN.
-TEST_P(UdpSocketTest, ReadShutdownNonblockPendingData) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- // Bind to loopback:bind_addr_port+1 and connect to bind_addr_.
- ASSERT_THAT(bind(sock_.get(), addr, addrlen_), SyscallSucceeds());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Verify that we get EWOULDBLOCK when there is nothing to read.
- char received[512];
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- const char* buf = "abc";
- EXPECT_THAT(write(sock_.get(), buf, 3), SyscallSucceedsWithValue(3));
-
- int opts = 0;
- ASSERT_THAT(opts = fcntl(bind_.get(), F_GETFL), SyscallSucceeds());
- ASSERT_THAT(fcntl(bind_.get(), F_SETFL, opts | O_NONBLOCK),
- SyscallSucceeds());
- ASSERT_THAT(opts = fcntl(bind_.get(), F_GETFL), SyscallSucceeds());
- ASSERT_NE(opts & O_NONBLOCK, 0);
-
- EXPECT_THAT(shutdown(bind_.get(), SHUT_RD), SyscallSucceeds());
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // We should get the data even though read has been shutdown.
- EXPECT_THAT(RecvTimeout(bind_.get(), received, 2 /*buf_size*/, 1 /*timeout*/),
- IsPosixErrorOkAndHolds(2));
-
- // Because we read less than the entire packet length, since it's a packet
- // based socket any subsequent reads should return EWOULDBLOCK.
- EXPECT_THAT(recv(bind_.get(), received, 1, 0),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-// This test is validating that even after a socket is shutdown if it's
-// reconnected it will reset the shutdown state.
-TEST_P(UdpSocketTest, ReadShutdownSameSocketResetsShutdownState) {
- char received[512];
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- EXPECT_THAT(shutdown(bind_.get(), SHUT_RD), SyscallFailsWithErrno(ENOTCONN));
-
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Connect the socket, then try to shutdown again.
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Connect to loopback:bind_addr_port+1.
- struct sockaddr_storage addr_storage = InetLoopbackAddr();
- struct sockaddr* addr = AsSockAddr(&addr_storage);
- SetPort(&addr_storage, *Port(&bind_addr_storage_) + 1);
- ASSERT_THAT(connect(bind_.get(), addr, addrlen_), SyscallSucceeds());
-
- EXPECT_THAT(recv(bind_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-}
-
-TEST_P(UdpSocketTest, ReadShutdown) {
- // TODO(gvisor.dev/issue/1202): Calling recv() after shutdown without
- // MSG_DONTWAIT blocks indefinitely.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
-
- char received[512];
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- EXPECT_THAT(shutdown(sock_.get(), SHUT_RD), SyscallFailsWithErrno(ENOTCONN));
-
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Connect the socket, then try to shutdown again.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- EXPECT_THAT(shutdown(sock_.get(), SHUT_RD), SyscallSucceeds());
-
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(UdpSocketTest, ReadShutdownDifferentThread) {
- // TODO(gvisor.dev/issue/1202): Calling recv() after shutdown without
- // MSG_DONTWAIT blocks indefinitely.
- SKIP_IF(IsRunningWithHostinet());
- ASSERT_NO_ERRNO(BindLoopback());
-
- char received[512];
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Connect the socket, then shutdown from another thread.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- EXPECT_THAT(recv(sock_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- ScopedThread t([&] {
- absl::SleepFor(absl::Milliseconds(200));
- EXPECT_THAT(shutdown(sock_.get(), SHUT_RD), SyscallSucceeds());
- });
- EXPECT_THAT(RetryEINTR(recv)(sock_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(0));
- t.Join();
-
- EXPECT_THAT(RetryEINTR(recv)(sock_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_P(UdpSocketTest, WriteShutdown) {
- ASSERT_NO_ERRNO(BindLoopback());
- EXPECT_THAT(shutdown(sock_.get(), SHUT_WR), SyscallFailsWithErrno(ENOTCONN));
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
- EXPECT_THAT(shutdown(sock_.get(), SHUT_WR), SyscallSucceeds());
-}
-
-TEST_P(UdpSocketTest, SynchronousReceive) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Send some data to bind_ from another thread.
- char buf[512];
- RandomizeBuffer(buf, sizeof(buf));
-
- // Receive the data prior to actually starting the other thread.
- char received[512];
- EXPECT_THAT(
- RetryEINTR(recv)(bind_.get(), received, sizeof(received), MSG_DONTWAIT),
- SyscallFailsWithErrno(EWOULDBLOCK));
-
- // Start the thread.
- ScopedThread t([&] {
- absl::SleepFor(absl::Milliseconds(200));
- ASSERT_THAT(sendto(sock_.get(), buf, sizeof(buf), 0, this->bind_addr_,
- this->addrlen_),
- SyscallSucceedsWithValue(sizeof(buf)));
- });
-
- EXPECT_THAT(RetryEINTR(recv)(bind_.get(), received, sizeof(received), 0),
- SyscallSucceedsWithValue(512));
- EXPECT_EQ(memcmp(buf, received, sizeof(buf)), 0);
-}
-
-TEST_P(UdpSocketTest, BoundaryPreserved_SendRecv) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Send 3 packets from sock to bind_.
- constexpr int psize = 100;
- char buf[3 * psize];
- RandomizeBuffer(buf, sizeof(buf));
-
- for (int i = 0; i < 3; ++i) {
- ASSERT_THAT(
- sendto(sock_.get(), buf + i * psize, psize, 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(psize));
- }
-
- // Receive the data as 3 separate packets.
- char received[6 * psize];
- for (int i = 0; i < 3; ++i) {
- EXPECT_THAT(recv(bind_.get(), received + i * psize, 3 * psize, 0),
- SyscallSucceedsWithValue(psize));
- }
- EXPECT_EQ(memcmp(buf, received, 3 * psize), 0);
-}
-
-TEST_P(UdpSocketTest, BoundaryPreserved_WritevReadv) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Direct writes from sock to bind_.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Send 2 packets from sock to bind_, where each packet's data consists of
- // 2 discontiguous iovecs.
- constexpr size_t kPieceSize = 100;
- char buf[4 * kPieceSize];
- RandomizeBuffer(buf, sizeof(buf));
-
- for (int i = 0; i < 2; i++) {
- struct iovec iov[2];
- for (int j = 0; j < 2; j++) {
- iov[j].iov_base = reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(buf) + (i + 2 * j) * kPieceSize);
- iov[j].iov_len = kPieceSize;
- }
- ASSERT_THAT(writev(sock_.get(), iov, 2),
- SyscallSucceedsWithValue(2 * kPieceSize));
- }
-
- // Receive the data as 2 separate packets.
- char received[6 * kPieceSize];
- for (int i = 0; i < 2; i++) {
- struct iovec iov[3];
- for (int j = 0; j < 3; j++) {
- iov[j].iov_base = reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(received) + (i + 2 * j) * kPieceSize);
- iov[j].iov_len = kPieceSize;
- }
- ASSERT_THAT(readv(bind_.get(), iov, 3),
- SyscallSucceedsWithValue(2 * kPieceSize));
- }
- EXPECT_EQ(memcmp(buf, received, 4 * kPieceSize), 0);
-}
-
-TEST_P(UdpSocketTest, BoundaryPreserved_SendMsgRecvMsg) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Send 2 packets from sock to bind_, where each packet's data consists of
- // 2 discontiguous iovecs.
- constexpr size_t kPieceSize = 100;
- char buf[4 * kPieceSize];
- RandomizeBuffer(buf, sizeof(buf));
-
- for (int i = 0; i < 2; i++) {
- struct iovec iov[2];
- for (int j = 0; j < 2; j++) {
- iov[j].iov_base = reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(buf) + (i + 2 * j) * kPieceSize);
- iov[j].iov_len = kPieceSize;
- }
- struct msghdr msg = {};
- msg.msg_name = bind_addr_;
- msg.msg_namelen = addrlen_;
- msg.msg_iov = iov;
- msg.msg_iovlen = 2;
- ASSERT_THAT(sendmsg(sock_.get(), &msg, 0),
- SyscallSucceedsWithValue(2 * kPieceSize));
- }
-
- // Receive the data as 2 separate packets.
- char received[6 * kPieceSize];
- for (int i = 0; i < 2; i++) {
- struct iovec iov[3];
- for (int j = 0; j < 3; j++) {
- iov[j].iov_base = reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(received) + (i + 2 * j) * kPieceSize);
- iov[j].iov_len = kPieceSize;
- }
- struct msghdr msg = {};
- msg.msg_iov = iov;
- msg.msg_iovlen = 3;
- ASSERT_THAT(recvmsg(bind_.get(), &msg, 0),
- SyscallSucceedsWithValue(2 * kPieceSize));
- }
- EXPECT_EQ(memcmp(buf, received, 4 * kPieceSize), 0);
-}
-
-TEST_P(UdpSocketTest, FIONREADShutdown) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- int n = -1;
- EXPECT_THAT(ioctl(sock_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- // A UDP socket must be connected before it can be shutdown.
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(sock_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- EXPECT_THAT(shutdown(sock_.get(), SHUT_RD), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(sock_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-}
-
-TEST_P(UdpSocketTest, FIONREADWriteShutdown) {
- int n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- ASSERT_NO_ERRNO(BindLoopback());
-
- // A UDP socket must be connected before it can be shutdown.
- ASSERT_THAT(connect(bind_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- const char str[] = "abc";
- ASSERT_THAT(send(bind_.get(), str, sizeof(str), 0),
- SyscallSucceedsWithValue(sizeof(str)));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, sizeof(str));
-
- EXPECT_THAT(shutdown(bind_.get(), SHUT_RD), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, sizeof(str));
-}
-
-// NOTE: Do not use `FIONREAD` as test name because it will be replaced by the
-// corresponding macro and become `0x541B`.
-TEST_P(UdpSocketTest, Fionread) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Check that the bound socket with an empty buffer reports an empty first
- // packet.
- int n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- // Send 3 packets from sock to bind_.
- constexpr int psize = 100;
- char buf[3 * psize];
- RandomizeBuffer(buf, sizeof(buf));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- for (int i = 0; i < 3; ++i) {
- ASSERT_THAT(
- sendto(sock_.get(), buf + i * psize, psize, 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(psize));
-
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // Check that regardless of how many packets are in the queue, the size
- // reported is that of a single packet.
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, psize);
- }
-}
-
-TEST_P(UdpSocketTest, FIONREADZeroLengthPacket) {
- ASSERT_NO_ERRNO(BindLoopback());
-
- // Check that the bound socket with an empty buffer reports an empty first
- // packet.
- int n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- // Send 3 packets from sock to bind_.
- constexpr int psize = 100;
- char buf[3 * psize];
- RandomizeBuffer(buf, sizeof(buf));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- for (int i = 0; i < 3; ++i) {
- ASSERT_THAT(
- sendto(sock_.get(), buf + i * psize, 0, 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // Check that regardless of how many packets are in the queue, the size
- // reported is that of a single packet.
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
- }
-}
-
-TEST_P(UdpSocketTest, FIONREADZeroLengthWriteShutdown) {
- int n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- ASSERT_NO_ERRNO(BindLoopback());
-
- // A UDP socket must be connected before it can be shutdown.
- ASSERT_THAT(connect(bind_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- const char str[] = "abc";
- ASSERT_THAT(send(bind_.get(), str, 0, 0), SyscallSucceedsWithValue(0));
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-
- EXPECT_THAT(shutdown(bind_.get(), SHUT_RD), SyscallSucceeds());
-
- n = -1;
- EXPECT_THAT(ioctl(bind_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));
- EXPECT_EQ(n, 0);
-}
-
-TEST_P(UdpSocketTest, SoNoCheckOffByDefault) {
- // TODO(gvisor.dev/issue/1202): SO_NO_CHECK socket option not supported by
- // hostinet.
- SKIP_IF(IsRunningWithHostinet());
-
- int v = -1;
- socklen_t optlen = sizeof(v);
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_NO_CHECK, &v, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(v, kSockOptOff);
- ASSERT_EQ(optlen, sizeof(v));
-}
-
-TEST_P(UdpSocketTest, SoNoCheck) {
- // TODO(gvisor.dev/issue/1202): SO_NO_CHECK socket option not supported by
- // hostinet.
- SKIP_IF(IsRunningWithHostinet());
-
- int v = kSockOptOn;
- socklen_t optlen = sizeof(v);
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_NO_CHECK, &v, optlen),
- SyscallSucceeds());
- v = -1;
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_NO_CHECK, &v, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(v, kSockOptOn);
- ASSERT_EQ(optlen, sizeof(v));
-
- v = kSockOptOff;
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_NO_CHECK, &v, optlen),
- SyscallSucceeds());
- v = -1;
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_NO_CHECK, &v, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(v, kSockOptOff);
- ASSERT_EQ(optlen, sizeof(v));
-}
-
-#ifdef __linux__
-TEST_P(UdpSocketTest, ErrorQueue) {
- char cmsgbuf[CMSG_SPACE(sizeof(sock_extended_err))];
- msghdr msg;
- memset(&msg, 0, sizeof(msg));
- iovec iov;
- memset(&iov, 0, sizeof(iov));
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = cmsgbuf;
- msg.msg_controllen = sizeof(cmsgbuf);
-
- // recv*(MSG_ERRQUEUE) never blocks, even without MSG_DONTWAIT.
- EXPECT_THAT(RetryEINTR(recvmsg)(bind_.get(), &msg, MSG_ERRQUEUE),
- SyscallFailsWithErrno(EAGAIN));
-}
-#endif // __linux__
-
-TEST_P(UdpSocketTest, SoTimestampOffByDefault) {
- int v = -1;
- socklen_t optlen = sizeof(v);
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_TIMESTAMP, &v, &optlen),
- SyscallSucceeds());
- ASSERT_EQ(v, kSockOptOff);
- ASSERT_EQ(optlen, sizeof(v));
-}
-
-TEST_P(UdpSocketTest, SoTimestamp) {
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- int v = 1;
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_TIMESTAMP, &v, sizeof(v)),
- SyscallSucceeds());
-
- char buf[3];
- // Send zero length packet from sock to bind_.
- ASSERT_THAT(RetryEINTR(write)(sock_.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- char cmsgbuf[CMSG_SPACE(sizeof(struct timeval))];
- msghdr msg;
- memset(&msg, 0, sizeof(msg));
- iovec iov;
- memset(&iov, 0, sizeof(iov));
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = cmsgbuf;
- msg.msg_controllen = sizeof(cmsgbuf);
-
- ASSERT_THAT(RetryEINTR(recvmsg)(bind_.get(), &msg, 0),
- SyscallSucceedsWithValue(0));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SO_TIMESTAMP);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct timeval)));
-
- struct timeval tv = {};
- memcpy(&tv, CMSG_DATA(cmsg), sizeof(struct timeval));
-
- ASSERT_TRUE(tv.tv_sec != 0 || tv.tv_usec != 0);
-
- // TODO(gvisor.dev/issue/1202): ioctl(SIOCGSTAMP) is not supported by
- // hostinet.
- if (!IsRunningWithHostinet()) {
- // There should be nothing to get via ioctl.
- ASSERT_THAT(ioctl(bind_.get(), SIOCGSTAMP, &tv),
- SyscallFailsWithErrno(ENOENT));
- }
-}
-
-TEST_P(UdpSocketTest, WriteShutdownNotConnected) {
- EXPECT_THAT(shutdown(bind_.get(), SHUT_WR), SyscallFailsWithErrno(ENOTCONN));
-}
-
-TEST_P(UdpSocketTest, TimestampIoctl) {
- // TODO(gvisor.dev/issue/1202): ioctl() is not supported by hostinet.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- char buf[3];
- // Send packet from sock to bind_.
- ASSERT_THAT(RetryEINTR(write)(sock_.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // There should be no control messages.
- char recv_buf[sizeof(buf)];
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(bind_.get(), recv_buf, sizeof(recv_buf)));
-
- // A nonzero timeval should be available via ioctl.
- struct timeval tv = {};
- ASSERT_THAT(ioctl(bind_.get(), SIOCGSTAMP, &tv), SyscallSucceeds());
- ASSERT_TRUE(tv.tv_sec != 0 || tv.tv_usec != 0);
-}
-
-TEST_P(UdpSocketTest, TimestampIoctlNothingRead) {
- // TODO(gvisor.dev/issue/1202): ioctl() is not supported by hostinet.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- struct timeval tv = {};
- ASSERT_THAT(ioctl(sock_.get(), SIOCGSTAMP, &tv),
- SyscallFailsWithErrno(ENOENT));
-}
-
-// Test that the timestamp accessed via SIOCGSTAMP is still accessible after
-// SO_TIMESTAMP is enabled and used to retrieve a timestamp.
-TEST_P(UdpSocketTest, TimestampIoctlPersistence) {
- // TODO(gvisor.dev/issue/1202): ioctl() and SO_TIMESTAMP socket option are not
- // supported by hostinet.
- SKIP_IF(IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- char buf[3];
- // Send packet from sock to bind_.
- ASSERT_THAT(RetryEINTR(write)(sock_.get(), buf, sizeof(buf)),
- SyscallSucceedsWithValue(sizeof(buf)));
- ASSERT_THAT(RetryEINTR(write)(sock_.get(), buf, 0),
- SyscallSucceedsWithValue(0));
-
- struct pollfd pfd = {bind_.get(), POLLIN, 0};
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // There should be no control messages.
- char recv_buf[sizeof(buf)];
- ASSERT_NO_FATAL_FAILURE(RecvNoCmsg(bind_.get(), recv_buf, sizeof(recv_buf)));
-
- // A nonzero timeval should be available via ioctl.
- struct timeval tv = {};
- ASSERT_THAT(ioctl(bind_.get(), SIOCGSTAMP, &tv), SyscallSucceeds());
- ASSERT_TRUE(tv.tv_sec != 0 || tv.tv_usec != 0);
-
- // Enable SO_TIMESTAMP and send a message.
- int v = 1;
- EXPECT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_TIMESTAMP, &v, sizeof(v)),
- SyscallSucceeds());
- ASSERT_THAT(RetryEINTR(write)(sock_.get(), buf, 0),
- SyscallSucceedsWithValue(0));
-
- ASSERT_THAT(RetryEINTR(poll)(&pfd, 1, /*timeout=*/1000),
- SyscallSucceedsWithValue(1));
-
- // There should be a message for SO_TIMESTAMP.
- char cmsgbuf[CMSG_SPACE(sizeof(struct timeval))];
- msghdr msg = {};
- iovec iov = {};
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = cmsgbuf;
- msg.msg_controllen = sizeof(cmsgbuf);
- ASSERT_THAT(RetryEINTR(recvmsg)(bind_.get(), &msg, 0),
- SyscallSucceedsWithValue(0));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
-
- // The ioctl should return the exact same values as before.
- struct timeval tv2 = {};
- ASSERT_THAT(ioctl(bind_.get(), SIOCGSTAMP, &tv2), SyscallSucceeds());
- ASSERT_EQ(tv.tv_sec, tv2.tv_sec);
- ASSERT_EQ(tv.tv_usec, tv2.tv_usec);
-}
-
-// Test that a socket with IP_TOS or IPV6_TCLASS set will set the TOS byte on
-// outgoing packets, and that a receiving socket with IP_RECVTOS or
-// IPV6_RECVTCLASS will create the corresponding control message.
-TEST_P(UdpSocketTest, SetAndReceiveTOS) {
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Allow socket to receive control message.
- int recv_level = SOL_IP;
- int recv_type = IP_RECVTOS;
- if (GetParam() != AddressFamily::kIpv4) {
- recv_level = SOL_IPV6;
- recv_type = IPV6_RECVTCLASS;
- }
- ASSERT_THAT(setsockopt(bind_.get(), recv_level, recv_type, &kSockOptOn,
- sizeof(kSockOptOn)),
- SyscallSucceeds());
-
- // Set socket TOS.
- int sent_level = recv_level;
- int sent_type = IP_TOS;
- if (sent_level == SOL_IPV6) {
- sent_type = IPV6_TCLASS;
- }
- int sent_tos = IPTOS_LOWDELAY; // Choose some TOS value.
- ASSERT_THAT(setsockopt(sock_.get(), sent_level, sent_type, &sent_tos,
- sizeof(sent_tos)),
- SyscallSucceeds());
-
- // Prepare message to send.
- constexpr size_t kDataLength = 1024;
- struct msghdr sent_msg = {};
- struct iovec sent_iov = {};
- char sent_data[kDataLength];
- sent_iov.iov_base = &sent_data[0];
- sent_iov.iov_len = kDataLength;
- sent_msg.msg_iov = &sent_iov;
- sent_msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sock_.get(), &sent_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- // Receive message.
- struct msghdr received_msg = {};
- struct iovec received_iov = {};
- char received_data[kDataLength];
- received_iov.iov_base = &received_data[0];
- received_iov.iov_len = kDataLength;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
- size_t cmsg_data_len = sizeof(int8_t);
- if (sent_type == IPV6_TCLASS) {
- cmsg_data_len = sizeof(int);
- }
- std::vector<char> received_cmsgbuf(CMSG_SPACE(cmsg_data_len));
- received_msg.msg_control = &received_cmsgbuf[0];
- received_msg.msg_controllen = received_cmsgbuf.size();
- ASSERT_THAT(RetryEINTR(recvmsg)(bind_.get(), &received_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, sent_level);
- EXPECT_EQ(cmsg->cmsg_type, sent_type);
- int8_t received_tos = 0;
- memcpy(&received_tos, CMSG_DATA(cmsg), sizeof(received_tos));
- EXPECT_EQ(received_tos, sent_tos);
-}
-
-// Test that sendmsg with IP_TOS and IPV6_TCLASS control messages will set the
-// TOS byte on outgoing packets, and that a receiving socket with IP_RECVTOS or
-// IPV6_RECVTCLASS will create the corresponding control message.
-TEST_P(UdpSocketTest, SendAndReceiveTOS) {
- // TODO(b/146661005): Setting TOS via cmsg not supported for netstack.
- SKIP_IF(IsRunningOnGvisor() && !IsRunningWithHostinet());
-
- ASSERT_NO_ERRNO(BindLoopback());
- ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());
-
- // Allow socket to receive control message.
- int recv_level = SOL_IP;
- int recv_type = IP_RECVTOS;
- if (GetParam() != AddressFamily::kIpv4) {
- recv_level = SOL_IPV6;
- recv_type = IPV6_RECVTCLASS;
- }
- int recv_opt = kSockOptOn;
- ASSERT_THAT(setsockopt(bind_.get(), recv_level, recv_type, &recv_opt,
- sizeof(recv_opt)),
- SyscallSucceeds());
-
- // Prepare message to send.
- constexpr size_t kDataLength = 1024;
- int sent_level = recv_level;
- int sent_type = IP_TOS;
- int sent_tos = IPTOS_LOWDELAY; // Choose some TOS value.
-
- struct msghdr sent_msg = {};
- struct iovec sent_iov = {};
- char sent_data[kDataLength];
- sent_iov.iov_base = &sent_data[0];
- sent_iov.iov_len = kDataLength;
- sent_msg.msg_iov = &sent_iov;
- sent_msg.msg_iovlen = 1;
- size_t cmsg_data_len = sizeof(int8_t);
- if (sent_level == SOL_IPV6) {
- sent_type = IPV6_TCLASS;
- cmsg_data_len = sizeof(int);
- }
- std::vector<char> sent_cmsgbuf(CMSG_SPACE(cmsg_data_len));
- sent_msg.msg_control = &sent_cmsgbuf[0];
- sent_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
-
- // Manually add control message.
- struct cmsghdr* sent_cmsg = CMSG_FIRSTHDR(&sent_msg);
- sent_cmsg->cmsg_len = CMSG_LEN(cmsg_data_len);
- sent_cmsg->cmsg_level = sent_level;
- sent_cmsg->cmsg_type = sent_type;
- *(int8_t*)CMSG_DATA(sent_cmsg) = sent_tos;
-
- ASSERT_THAT(RetryEINTR(sendmsg)(sock_.get(), &sent_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- // Receive message.
- struct msghdr received_msg = {};
- struct iovec received_iov = {};
- char received_data[kDataLength];
- received_iov.iov_base = &received_data[0];
- received_iov.iov_len = kDataLength;
- received_msg.msg_iov = &received_iov;
- received_msg.msg_iovlen = 1;
- std::vector<char> received_cmsgbuf(CMSG_SPACE(cmsg_data_len));
- received_msg.msg_control = &received_cmsgbuf[0];
- received_msg.msg_controllen = CMSG_LEN(cmsg_data_len);
- ASSERT_THAT(RetryEINTR(recvmsg)(bind_.get(), &received_msg, 0),
- SyscallSucceedsWithValue(kDataLength));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&received_msg);
- ASSERT_NE(cmsg, nullptr);
- EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len));
- EXPECT_EQ(cmsg->cmsg_level, sent_level);
- EXPECT_EQ(cmsg->cmsg_type, sent_type);
- int8_t received_tos = 0;
- memcpy(&received_tos, CMSG_DATA(cmsg), sizeof(received_tos));
- EXPECT_EQ(received_tos, sent_tos);
-}
-
-TEST_P(UdpSocketTest, RecvBufLimitsEmptyRcvBuf) {
- // Discover minimum buffer size by setting it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- int min = 0;
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
-
- // Bind bind_ to loopback.
- ASSERT_NO_ERRNO(BindLoopback());
-
- {
- // Send data of size min and verify that it's received.
- std::vector<char> buf(min);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- std::vector<char> received(buf.size());
- EXPECT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),
- 1 /*timeout*/),
- IsPosixErrorOkAndHolds(received.size()));
- }
-
- {
- // Send data of size min + 1 and verify that its received. Both linux and
- // Netstack accept a dgram that exceeds rcvBuf limits if the receive buffer
- // is currently empty.
- std::vector<char> buf(min + 1);
- RandomizeBuffer(buf.data(), buf.size());
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
-
- std::vector<char> received(buf.size());
- ASSERT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),
- 1 /*timeout*/),
- IsPosixErrorOkAndHolds(received.size()));
- }
-}
-
-// Test that receive buffer limits are enforced.
-TEST_P(UdpSocketTest, RecvBufLimits) {
- // Bind s_ to loopback.
- ASSERT_NO_ERRNO(BindLoopback());
-
- int min = 0;
- {
- // Discover minimum buffer size by trying to set it to zero.
- constexpr int kRcvBufSz = 0;
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &kRcvBufSz,
- sizeof(kRcvBufSz)),
- SyscallSucceeds());
-
- socklen_t min_len = sizeof(min);
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &min, &min_len),
- SyscallSucceeds());
- }
-
- // Now set the limit to min * 2.
- int new_rcv_buf_sz = min * 2;
- ASSERT_THAT(setsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &new_rcv_buf_sz,
- sizeof(new_rcv_buf_sz)),
- SyscallSucceeds());
- int rcv_buf_sz = 0;
- {
- socklen_t rcv_buf_len = sizeof(rcv_buf_sz);
- ASSERT_THAT(getsockopt(bind_.get(), SOL_SOCKET, SO_RCVBUF, &rcv_buf_sz,
- &rcv_buf_len),
- SyscallSucceeds());
- }
-
- {
- std::vector<char> buf(min);
- RandomizeBuffer(buf.data(), buf.size());
-
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- int sent = 4;
- if (IsRunningOnGvisor() && !IsRunningWithHostinet()) {
- // Linux seems to drop the 4th packet even though technically it should
- // fit in the receive buffer.
- ASSERT_THAT(
- sendto(sock_.get(), buf.data(), buf.size(), 0, bind_addr_, addrlen_),
- SyscallSucceedsWithValue(buf.size()));
- sent++;
- }
-
- for (int i = 0; i < sent - 1; i++) {
- // Receive the data.
- std::vector<char> received(buf.size());
- EXPECT_THAT(RecvTimeout(bind_.get(), received.data(), received.size(),
- 1 /*timeout*/),
- IsPosixErrorOkAndHolds(received.size()));
- EXPECT_EQ(memcmp(buf.data(), received.data(), buf.size()), 0);
- }
-
- // The last receive should fail with EAGAIN as the last packet should have
- // been dropped due to lack of space in the receive buffer.
- std::vector<char> received(buf.size());
- EXPECT_THAT(
- recv(bind_.get(), received.data(), received.size(), MSG_DONTWAIT),
- SyscallFailsWithErrno(EAGAIN));
- }
-}
-
-#ifdef __linux__
-
-// TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
-// gVisor currently silently ignores attaching a filter.
-TEST_P(UdpSocketTest, SetSocketDetachFilter) {
- // Program generated using sudo tcpdump -i lo udp and port 1234 -dd
- struct sock_filter code[] = {
- {0x28, 0, 0, 0x0000000c}, {0x15, 0, 6, 0x000086dd},
- {0x30, 0, 0, 0x00000014}, {0x15, 0, 15, 0x00000011},
- {0x28, 0, 0, 0x00000036}, {0x15, 12, 0, 0x000004d2},
- {0x28, 0, 0, 0x00000038}, {0x15, 10, 11, 0x000004d2},
- {0x15, 0, 10, 0x00000800}, {0x30, 0, 0, 0x00000017},
- {0x15, 0, 8, 0x00000011}, {0x28, 0, 0, 0x00000014},
- {0x45, 6, 0, 0x00001fff}, {0xb1, 0, 0, 0x0000000e},
- {0x48, 0, 0, 0x0000000e}, {0x15, 2, 0, 0x000004d2},
- {0x48, 0, 0, 0x00000010}, {0x15, 0, 1, 0x000004d2},
- {0x6, 0, 0, 0x00040000}, {0x6, 0, 0, 0x00000000},
- };
- struct sock_fprog bpf = {
- .len = ABSL_ARRAYSIZE(code),
- .filter = code,
- };
- ASSERT_THAT(
- setsockopt(sock_.get(), SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)),
- SyscallSucceeds());
-
- constexpr int val = 0;
- ASSERT_THAT(
- setsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallSucceeds());
-}
-
-#endif // __linux__
-
-TEST_P(UdpSocketTest, SetSocketDetachFilterNoInstalledFilter) {
- // TODO(gvisor.dev/2746): Support SO_ATTACH_FILTER/SO_DETACH_FILTER.
- SKIP_IF(IsRunningOnGvisor());
- constexpr int val = 0;
- ASSERT_THAT(
- setsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val)),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_P(UdpSocketTest, GetSocketDetachFilter) {
- int val = 0;
- socklen_t val_len = sizeof(val);
- ASSERT_THAT(
- getsockopt(sock_.get(), SOL_SOCKET, SO_DETACH_FILTER, &val, &val_len),
- SyscallFailsWithErrno(ENOPROTOOPT));
-}
-
-TEST_P(UdpSocketTest, SendToZeroPort) {
- char buf[8];
- struct sockaddr_storage addr = InetLoopbackAddr();
-
- // Sending to an invalid port should fail.
- SetPort(&addr, 0);
- EXPECT_THAT(
- sendto(sock_.get(), buf, sizeof(buf), 0, AsSockAddr(&addr), sizeof(addr)),
- SyscallFailsWithErrno(EINVAL));
-
- SetPort(&addr, 1234);
- EXPECT_THAT(
- sendto(sock_.get(), buf, sizeof(buf), 0, AsSockAddr(&addr), sizeof(addr)),
- SyscallSucceedsWithValue(sizeof(buf)));
-}
-
-TEST_P(UdpSocketTest, ConnectToZeroPortUnbound) {
- struct sockaddr_storage addr = InetLoopbackAddr();
- SetPort(&addr, 0);
- ASSERT_THAT(connect(sock_.get(), AsSockAddr(&addr), addrlen_),
- SyscallSucceeds());
-}
-
-TEST_P(UdpSocketTest, ConnectToZeroPortBound) {
- struct sockaddr_storage addr = InetLoopbackAddr();
- ASSERT_NO_ERRNO(BindSocket(sock_.get(), AsSockAddr(&addr)));
-
- SetPort(&addr, 0);
- ASSERT_THAT(connect(sock_.get(), AsSockAddr(&addr), addrlen_),
- SyscallSucceeds());
- socklen_t len = sizeof(sockaddr_storage);
- ASSERT_THAT(getsockname(sock_.get(), AsSockAddr(&addr), &len),
- SyscallSucceeds());
- ASSERT_EQ(len, addrlen_);
-}
-
-TEST_P(UdpSocketTest, ConnectToZeroPortConnected) {
- struct sockaddr_storage addr = InetLoopbackAddr();
- ASSERT_NO_ERRNO(BindSocket(sock_.get(), AsSockAddr(&addr)));
-
- // Connect to an address with non-zero port should succeed.
- ASSERT_THAT(connect(sock_.get(), AsSockAddr(&addr), addrlen_),
- SyscallSucceeds());
- sockaddr_storage peername;
- socklen_t peerlen = sizeof(peername);
- ASSERT_THAT(getpeername(sock_.get(), AsSockAddr(&peername), &peerlen),
- SyscallSucceeds());
- ASSERT_EQ(peerlen, addrlen_);
- ASSERT_EQ(memcmp(&peername, &addr, addrlen_), 0);
-
- // However connect() to an address with port 0 will make the following
- // getpeername() fail.
- SetPort(&addr, 0);
- ASSERT_THAT(connect(sock_.get(), AsSockAddr(&addr), addrlen_),
- SyscallSucceeds());
- ASSERT_THAT(getpeername(sock_.get(), AsSockAddr(&peername), &peerlen),
- SyscallFailsWithErrno(ENOTCONN));
-}
-
-INSTANTIATE_TEST_SUITE_P(AllInetTests, UdpSocketTest,
- ::testing::Values(AddressFamily::kIpv4,
- AddressFamily::kIpv6,
- AddressFamily::kDualStack));
-
-TEST(UdpInet6SocketTest, ConnectInet4Sockaddr) {
- // glibc getaddrinfo expects the invariant expressed by this test to be held.
- const sockaddr_in connect_sockaddr = {
- .sin_family = AF_INET, .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)}};
- auto sock_ =
- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP));
- ASSERT_THAT(
- connect(sock_.get(),
- reinterpret_cast<const struct sockaddr*>(&connect_sockaddr),
- sizeof(sockaddr_in)),
- SyscallSucceeds());
- sockaddr_storage sockname;
- socklen_t len = sizeof(sockaddr_storage);
- ASSERT_THAT(getsockname(sock_.get(), AsSockAddr(&sockname), &len),
- SyscallSucceeds());
- ASSERT_EQ(sockname.ss_family, AF_INET6);
- ASSERT_EQ(len, sizeof(sockaddr_in6));
- auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&sockname);
- char addr_buf[INET6_ADDRSTRLEN];
- const char* addr;
- ASSERT_NE(addr = inet_ntop(sockname.ss_family, &sockname, addr_buf,
- sizeof(addr_buf)),
- nullptr);
- ASSERT_TRUE(IN6_IS_ADDR_V4MAPPED(sin6->sin6_addr.s6_addr)) << addr;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/uidgid.cc b/test/syscalls/linux/uidgid.cc
deleted file mode 100644
index d95a3e010..000000000
--- a/test/syscalls/linux/uidgid.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <grp.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_join.h"
-#include "test/util/capability_util.h"
-#include "test/util/cleanup.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/uid_util.h"
-
-ABSL_FLAG(int32_t, scratch_uid1, 65534, "first scratch UID");
-ABSL_FLAG(int32_t, scratch_uid2, 65533, "second scratch UID");
-ABSL_FLAG(int32_t, scratch_gid1, 65534, "first scratch GID");
-ABSL_FLAG(int32_t, scratch_gid2, 65533, "second scratch GID");
-
-// Force use of syscall instead of glibc set*id() wrappers because we want to
-// apply to the current task only. libc sets all threads in a process because
-// "POSIX requires that all threads in a process share the same credentials."
-#define setuid USE_SYSCALL_INSTEAD
-#define setgid USE_SYSCALL_INSTEAD
-#define setreuid USE_SYSCALL_INSTEAD
-#define setregid USE_SYSCALL_INSTEAD
-#define setresuid USE_SYSCALL_INSTEAD
-#define setresgid USE_SYSCALL_INSTEAD
-
-using ::testing::UnorderedElementsAreArray;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(UidGidTest, Getuid) {
- uid_t ruid, euid, suid;
- EXPECT_THAT(getresuid(&ruid, &euid, &suid), SyscallSucceeds());
- EXPECT_THAT(getuid(), SyscallSucceedsWithValue(ruid));
- EXPECT_THAT(geteuid(), SyscallSucceedsWithValue(euid));
-}
-
-TEST(UidGidTest, Getgid) {
- gid_t rgid, egid, sgid;
- EXPECT_THAT(getresgid(&rgid, &egid, &sgid), SyscallSucceeds());
- EXPECT_THAT(getgid(), SyscallSucceedsWithValue(rgid));
- EXPECT_THAT(getegid(), SyscallSucceedsWithValue(egid));
-}
-
-TEST(UidGidTest, Getgroups) {
- // "If size is zero, list is not modified, but the total number of
- // supplementary group IDs for the process is returned." - getgroups(2)
- int nr_groups;
- ASSERT_THAT(nr_groups = getgroups(0, nullptr), SyscallSucceeds());
- std::vector<gid_t> list(nr_groups);
- EXPECT_THAT(getgroups(list.size(), list.data()), SyscallSucceeds());
-
- // "EINVAL: size is less than the number of supplementary group IDs, but is
- // not zero."
- EXPECT_THAT(getgroups(-1, nullptr), SyscallFailsWithErrno(EINVAL));
-
- // Testing for EFAULT requires actually having groups, which isn't guaranteed
- // here; see the setgroups test below.
-}
-
-// Checks that the calling process' real/effective/saved user IDs are
-// ruid/euid/suid respectively.
-PosixError CheckUIDs(uid_t ruid, uid_t euid, uid_t suid) {
- uid_t actual_ruid, actual_euid, actual_suid;
- int rc = getresuid(&actual_ruid, &actual_euid, &actual_suid);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "getresuid");
- }
- if (ruid != actual_ruid || euid != actual_euid || suid != actual_suid) {
- return PosixError(
- EPERM, absl::StrCat(
- "incorrect user IDs: got (",
- absl::StrJoin({actual_ruid, actual_euid, actual_suid}, ", "),
- ", wanted (", absl::StrJoin({ruid, euid, suid}, ", "), ")"));
- }
- return NoError();
-}
-
-PosixError CheckGIDs(gid_t rgid, gid_t egid, gid_t sgid) {
- gid_t actual_rgid, actual_egid, actual_sgid;
- int rc = getresgid(&actual_rgid, &actual_egid, &actual_sgid);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "getresgid");
- }
- if (rgid != actual_rgid || egid != actual_egid || sgid != actual_sgid) {
- return PosixError(
- EPERM, absl::StrCat(
- "incorrect group IDs: got (",
- absl::StrJoin({actual_rgid, actual_egid, actual_sgid}, ", "),
- ", wanted (", absl::StrJoin({rgid, egid, sgid}, ", "), ")"));
- }
- return NoError();
-}
-
-// N.B. These tests may break horribly unless run via a gVisor test runner,
-// because changing UID in one test may forfeit permissions required by other
-// tests. (The test runner runs each test in a separate process.)
-
-TEST(UidGidRootTest, Setuid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting this
- // test. Otherwise, the files are created by root (UID before the test), but
- // cannot be opened by the `uid` set below after the test. After calling
- // setuid(non-zero-UID), there is no way to get root privileges back.
- ScopedThread([&] {
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. POSIX threads, however, require that all
- // threads have the same UIDs, so using the setuid wrapper sets all threads'
- // real UID.
- EXPECT_THAT(syscall(SYS_setuid, -1), SyscallFailsWithErrno(EINVAL));
-
- const uid_t uid = absl::GetFlag(FLAGS_scratch_uid1);
- EXPECT_THAT(syscall(SYS_setuid, uid), SyscallSucceeds());
- // "If the effective UID of the caller is root (more precisely: if the
- // caller has the CAP_SETUID capability), the real UID and saved set-user-ID
- // are also set." - setuid(2)
- EXPECT_NO_ERRNO(CheckUIDs(uid, uid, uid));
- });
-}
-
-TEST(UidGidRootTest, Setgid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- EXPECT_THAT(syscall(SYS_setgid, -1), SyscallFailsWithErrno(EINVAL));
-
- ScopedThread([&] {
- const gid_t gid = absl::GetFlag(FLAGS_scratch_gid1);
- EXPECT_THAT(syscall(SYS_setgid, gid), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckGIDs(gid, gid, gid));
- });
-}
-
-TEST(UidGidRootTest, SetgidNotFromThreadGroupLeader) {
-#pragma push_macro("allow_setgid")
-#undef setgid
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- int old_gid = getgid();
- auto clean = Cleanup([old_gid] { setgid(old_gid); });
-
- const gid_t gid = absl::GetFlag(FLAGS_scratch_gid1);
- // NOTE(b/64676707): Do setgid in a separate thread so that we can test if
- // info.si_pid is set correctly.
- ScopedThread thread =
- ScopedThread([gid] { ASSERT_THAT(setgid(gid), SyscallSucceeds()); });
- thread.Join();
- EXPECT_NO_ERRNO(CheckGIDs(gid, gid, gid));
-
-#pragma pop_macro("allow_setgid")
-}
-
-TEST(UidGidRootTest, Setreuid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- // "Supplying a value of -1 for either the real or effective user ID forces
- // the system to leave that ID unchanged." - setreuid(2)
- EXPECT_THAT(syscall(SYS_setreuid, -1, -1), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(CheckUIDs(0, 0, 0));
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting
- // this test. Otherwise, the files are created by root (UID before the
- // test), but cannot be opened by the `uid` set below after the test. After
- // calling setuid(non-zero-UID), there is no way to get root privileges
- // back.
- ScopedThread([&] {
- const uid_t ruid = absl::GetFlag(FLAGS_scratch_uid1);
- const uid_t euid = absl::GetFlag(FLAGS_scratch_uid2);
-
- EXPECT_THAT(syscall(SYS_setreuid, ruid, euid), SyscallSucceeds());
-
- // "If the real user ID is set or the effective user ID is set to a value
- // not equal to the previous real user ID, the saved set-user-ID will be
- // set to the new effective user ID." - setreuid(2)
- EXPECT_NO_ERRNO(CheckUIDs(ruid, euid, euid));
- });
-}
-
-TEST(UidGidRootTest, Setregid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- EXPECT_THAT(syscall(SYS_setregid, -1, -1), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckGIDs(0, 0, 0));
-
- ScopedThread([&] {
- const gid_t rgid = absl::GetFlag(FLAGS_scratch_gid1);
- const gid_t egid = absl::GetFlag(FLAGS_scratch_gid2);
- ASSERT_THAT(syscall(SYS_setregid, rgid, egid), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckGIDs(rgid, egid, egid));
- });
-}
-
-TEST(UidGidRootTest, Setresuid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- // "If one of the arguments equals -1, the corresponding value is not
- // changed." - setresuid(2)
- EXPECT_THAT(syscall(SYS_setresuid, -1, -1, -1), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckUIDs(0, 0, 0));
-
- // Do setuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting
- // this test. Otherwise, the files are created by root (UID before the
- // test), but cannot be opened by the `uid` set below after the test. After
- // calling setuid(non-zero-UID), there is no way to get root privileges
- // back.
- ScopedThread([&] {
- const uid_t ruid = 12345;
- const uid_t euid = 23456;
- const uid_t suid = 34567;
-
- // Use syscall instead of glibc setuid wrapper because we want this setuid
- // call to only apply to this task. posix threads, however, require that
- // all threads have the same UIDs, so using the setuid wrapper sets all
- // threads' real UID.
- EXPECT_THAT(syscall(SYS_setresuid, ruid, euid, suid), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckUIDs(ruid, euid, suid));
- });
-}
-
-TEST(UidGidRootTest, Setresgid) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- EXPECT_THAT(syscall(SYS_setresgid, -1, -1, -1), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckGIDs(0, 0, 0));
-
- ScopedThread([&] {
- const gid_t rgid = 12345;
- const gid_t egid = 23456;
- const gid_t sgid = 34567;
- ASSERT_THAT(syscall(SYS_setresgid, rgid, egid, sgid), SyscallSucceeds());
- EXPECT_NO_ERRNO(CheckGIDs(rgid, egid, sgid));
- });
-}
-
-TEST(UidGidRootTest, Setgroups) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- std::vector<gid_t> list = {123, 500};
- ASSERT_THAT(setgroups(list.size(), list.data()), SyscallSucceeds());
- std::vector<gid_t> list2(list.size());
- ASSERT_THAT(getgroups(list2.size(), list2.data()), SyscallSucceeds());
- EXPECT_THAT(list, UnorderedElementsAreArray(list2));
-
- // "EFAULT: list has an invalid address."
- EXPECT_THAT(getgroups(100, reinterpret_cast<gid_t*>(-1)),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(UidGidRootTest, Setuid_prlimit) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(IsRoot()));
-
- // Do seteuid in a separate thread so that after finishing this test, the
- // process can still open files the test harness created before starting
- // this test. Otherwise, the files are created by root (UID before the
- // test), but cannot be opened by the `uid` set below after the test.
- ScopedThread([&] {
- // Use syscall instead of glibc setuid wrapper because we want this
- // seteuid call to only apply to this task. POSIX threads, however,
- // require that all threads have the same UIDs, so using the seteuid
- // wrapper sets all threads' UID.
- EXPECT_THAT(syscall(SYS_setreuid, -1, 65534), SyscallSucceeds());
-
- // Despite the UID change, we should be able to get our own limits.
- struct rlimit rl = {};
- EXPECT_THAT(prlimit(0, RLIMIT_NOFILE, NULL, &rl), SyscallSucceeds());
- });
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/uname.cc b/test/syscalls/linux/uname.cc
deleted file mode 100644
index c52abef5c..000000000
--- a/test/syscalls/linux/uname.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sched.h>
-#include <sys/utsname.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/string_view.h"
-#include "test/util/capability_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(UnameTest, Sanity) {
- struct utsname buf;
- ASSERT_THAT(uname(&buf), SyscallSucceeds());
- EXPECT_NE(strlen(buf.release), 0);
- EXPECT_NE(strlen(buf.version), 0);
- EXPECT_NE(strlen(buf.machine), 0);
- EXPECT_NE(strlen(buf.sysname), 0);
- EXPECT_NE(strlen(buf.nodename), 0);
- EXPECT_NE(strlen(buf.domainname), 0);
-}
-
-TEST(UnameTest, SetNames) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- char hostname[65];
- ASSERT_THAT(sethostname("0123456789", 3), SyscallSucceeds());
- EXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(hostname), "012");
-
- ASSERT_THAT(sethostname("0123456789\0xxx", 11), SyscallSucceeds());
- EXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(hostname), "0123456789");
-
- ASSERT_THAT(sethostname("0123456789\0xxx", 12), SyscallSucceeds());
- EXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(hostname), "0123456789");
-
- constexpr char kHostname[] = "wubbalubba";
- ASSERT_THAT(sethostname(kHostname, sizeof(kHostname)), SyscallSucceeds());
-
- constexpr char kDomainname[] = "dubdub.com";
- ASSERT_THAT(setdomainname(kDomainname, sizeof(kDomainname)),
- SyscallSucceeds());
-
- struct utsname buf;
- EXPECT_THAT(uname(&buf), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(buf.nodename), kHostname);
- EXPECT_EQ(absl::string_view(buf.domainname), kDomainname);
-
- // These should just be glibc wrappers that also call uname(2).
- EXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(hostname), kHostname);
-
- char domainname[65];
- EXPECT_THAT(getdomainname(domainname, sizeof(domainname)), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(domainname), kDomainname);
-}
-
-TEST(UnameTest, UnprivilegedSetNames) {
- AutoCapability cap(CAP_SYS_ADMIN, false);
-
- EXPECT_THAT(sethostname("", 0), SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(setdomainname("", 0), SyscallFailsWithErrno(EPERM));
-}
-
-TEST(UnameTest, UnshareUTS) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- struct utsname init;
- ASSERT_THAT(uname(&init), SyscallSucceeds());
-
- ScopedThread thread = ScopedThread([&]() {
- EXPECT_THAT(unshare(CLONE_NEWUTS), SyscallSucceeds());
-
- constexpr char kHostname[] = "wubbalubba";
- EXPECT_THAT(sethostname(kHostname, sizeof(kHostname)), SyscallSucceeds());
-
- char hostname[65];
- EXPECT_THAT(gethostname(hostname, sizeof(hostname)), SyscallSucceeds());
- });
- thread.Join();
-
- struct utsname after;
- EXPECT_THAT(uname(&after), SyscallSucceeds());
- EXPECT_EQ(absl::string_view(after.nodename), init.nodename);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/unix_domain_socket_test_util.cc b/test/syscalls/linux/unix_domain_socket_test_util.cc
deleted file mode 100644
index b05ab2900..000000000
--- a/test/syscalls/linux/unix_domain_socket_test_util.cc
+++ /dev/null
@@ -1,351 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/syscalls/linux/unix_domain_socket_test_util.h"
-
-#include <sys/un.h>
-
-#include <vector>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-std::string DescribeUnixDomainSocketType(int type) {
- const char* type_str = nullptr;
- switch (type & ~(SOCK_NONBLOCK | SOCK_CLOEXEC)) {
- case SOCK_STREAM:
- type_str = "SOCK_STREAM";
- break;
- case SOCK_DGRAM:
- type_str = "SOCK_DGRAM";
- break;
- case SOCK_SEQPACKET:
- type_str = "SOCK_SEQPACKET";
- break;
- }
- if (!type_str) {
- return absl::StrCat("Unix domain socket with unknown type ", type);
- } else {
- return absl::StrCat(((type & SOCK_NONBLOCK) != 0) ? "non-blocking " : "",
- ((type & SOCK_CLOEXEC) != 0) ? "close-on-exec " : "",
- type_str, " Unix domain socket");
- }
-}
-
-SocketPairKind UnixDomainSocketPair(int type) {
- return SocketPairKind{DescribeUnixDomainSocketType(type), AF_UNIX, type, 0,
- SyscallSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-SocketPairKind FilesystemBoundUnixDomainSocketPair(int type) {
- std::string description = absl::StrCat(DescribeUnixDomainSocketType(type),
- " created with filesystem binding");
- if ((type & SOCK_DGRAM) == SOCK_DGRAM) {
- return SocketPairKind{
- description, AF_UNIX, type, 0,
- FilesystemBidirectionalBindSocketPairCreator(AF_UNIX, type, 0)};
- }
- return SocketPairKind{
- description, AF_UNIX, type, 0,
- FilesystemAcceptBindSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-SocketPairKind AbstractBoundUnixDomainSocketPair(int type) {
- std::string description =
- absl::StrCat(DescribeUnixDomainSocketType(type),
- " created with abstract namespace binding");
- if ((type & SOCK_DGRAM) == SOCK_DGRAM) {
- return SocketPairKind{
- description, AF_UNIX, type, 0,
- AbstractBidirectionalBindSocketPairCreator(AF_UNIX, type, 0)};
- }
- return SocketPairKind{description, AF_UNIX, type, 0,
- AbstractAcceptBindSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-SocketPairKind SocketpairGoferUnixDomainSocketPair(int type) {
- std::string description = absl::StrCat(DescribeUnixDomainSocketType(type),
- " created with the socketpair gofer");
- return SocketPairKind{description, AF_UNIX, type, 0,
- SocketpairGoferSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-SocketPairKind SocketpairGoferFileSocketPair(int type) {
- std::string description =
- absl::StrCat(((type & O_NONBLOCK) != 0) ? "non-blocking " : "",
- ((type & O_CLOEXEC) != 0) ? "close-on-exec " : "",
- "file socket created with the socketpair gofer");
- // The socketpair gofer always creates SOCK_STREAM sockets on open(2).
- return SocketPairKind{description, AF_UNIX, SOCK_STREAM, 0,
- SocketpairGoferFileSocketPairCreator(type)};
-}
-
-SocketPairKind FilesystemUnboundUnixDomainSocketPair(int type) {
- return SocketPairKind{absl::StrCat(DescribeUnixDomainSocketType(type),
- " unbound with a filesystem address"),
- AF_UNIX, type, 0,
- FilesystemUnboundSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-SocketPairKind AbstractUnboundUnixDomainSocketPair(int type) {
- return SocketPairKind{
- absl::StrCat(DescribeUnixDomainSocketType(type),
- " unbound with an abstract namespace address"),
- AF_UNIX, type, 0, AbstractUnboundSocketPairCreator(AF_UNIX, type, 0)};
-}
-
-void SendSingleFD(int sock, int fd, char buf[], int buf_size) {
- ASSERT_NO_FATAL_FAILURE(SendFDs(sock, &fd, 1, buf, buf_size));
-}
-
-void SendFDs(int sock, int fds[], int fds_size, char buf[], int buf_size) {
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(fds_size * sizeof(int)));
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_len = CMSG_LEN(fds_size * sizeof(int));
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_RIGHTS;
- for (int i = 0; i < fds_size; i++) {
- memcpy(CMSG_DATA(cmsg) + i * sizeof(int), &fds[i], sizeof(int));
- }
-
- ASSERT_THAT(SendMsg(sock, &msg, buf, buf_size),
- IsPosixErrorOkAndHolds(buf_size));
-}
-
-void RecvSingleFD(int sock, int* fd, char buf[], int buf_size) {
- ASSERT_NO_FATAL_FAILURE(RecvFDs(sock, fd, 1, buf, buf_size, buf_size));
-}
-
-void RecvSingleFD(int sock, int* fd, char buf[], int buf_size,
- int expected_size) {
- ASSERT_NO_FATAL_FAILURE(RecvFDs(sock, fd, 1, buf, buf_size, expected_size));
-}
-
-void RecvFDs(int sock, int fds[], int fds_size, char buf[], int buf_size) {
- ASSERT_NO_FATAL_FAILURE(
- RecvFDs(sock, fds, fds_size, buf, buf_size, buf_size));
-}
-
-void RecvFDs(int sock, int fds[], int fds_size, char buf[], int buf_size,
- int expected_size, bool peek) {
- struct msghdr msg = {};
- std::vector<char> control(CMSG_SPACE(fds_size * sizeof(int)));
- msg.msg_control = &control[0];
- msg.msg_controllen = control.size();
-
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- int flags = 0;
- if (peek) {
- flags |= MSG_PEEK;
- }
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, flags),
- SyscallSucceedsWithValue(expected_size));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(fds_size * sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-
- for (int i = 0; i < fds_size; i++) {
- memcpy(&fds[i], CMSG_DATA(cmsg) + i * sizeof(int), sizeof(int));
- }
-}
-
-void RecvFDs(int sock, int fds[], int fds_size, char buf[], int buf_size,
- int expected_size) {
- ASSERT_NO_FATAL_FAILURE(
- RecvFDs(sock, fds, fds_size, buf, buf_size, expected_size, false));
-}
-
-void PeekSingleFD(int sock, int* fd, char buf[], int buf_size) {
- ASSERT_NO_FATAL_FAILURE(RecvFDs(sock, fd, 1, buf, buf_size, buf_size, true));
-}
-
-void RecvNoCmsg(int sock, char buf[], int buf_size, int expected_size) {
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct ucred))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, 0),
- SyscallSucceedsWithValue(expected_size));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_EQ(cmsg, nullptr);
-}
-
-void SendNullCmsg(int sock, char buf[], int buf_size) {
- struct msghdr msg = {};
- msg.msg_control = nullptr;
- msg.msg_controllen = 0;
-
- ASSERT_THAT(SendMsg(sock, &msg, buf, buf_size),
- IsPosixErrorOkAndHolds(buf_size));
-}
-
-void SendCreds(int sock, ucred creds, char buf[], int buf_size) {
- struct msghdr msg = {};
-
- char control[CMSG_SPACE(sizeof(struct ucred))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SCM_CREDENTIALS;
- cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred));
- memcpy(CMSG_DATA(cmsg), &creds, sizeof(struct ucred));
-
- ASSERT_THAT(SendMsg(sock, &msg, buf, buf_size),
- IsPosixErrorOkAndHolds(buf_size));
-}
-
-void SendCredsAndFD(int sock, ucred creds, int fd, char buf[], int buf_size) {
- struct msghdr msg = {};
-
- char control[CMSG_SPACE(sizeof(struct ucred)) + CMSG_SPACE(sizeof(int))] = {};
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct cmsghdr* cmsg1 = CMSG_FIRSTHDR(&msg);
- cmsg1->cmsg_level = SOL_SOCKET;
- cmsg1->cmsg_type = SCM_CREDENTIALS;
- cmsg1->cmsg_len = CMSG_LEN(sizeof(struct ucred));
- memcpy(CMSG_DATA(cmsg1), &creds, sizeof(struct ucred));
-
- struct cmsghdr* cmsg2 = CMSG_NXTHDR(&msg, cmsg1);
- cmsg2->cmsg_level = SOL_SOCKET;
- cmsg2->cmsg_type = SCM_RIGHTS;
- cmsg2->cmsg_len = CMSG_LEN(sizeof(int));
- memcpy(CMSG_DATA(cmsg2), &fd, sizeof(int));
-
- ASSERT_THAT(SendMsg(sock, &msg, buf, buf_size),
- IsPosixErrorOkAndHolds(buf_size));
-}
-
-void RecvCreds(int sock, ucred* creds, char buf[], int buf_size) {
- ASSERT_NO_FATAL_FAILURE(RecvCreds(sock, creds, buf, buf_size, buf_size));
-}
-
-void RecvCreds(int sock, ucred* creds, char buf[], int buf_size,
- int expected_size) {
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(struct ucred))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, 0),
- SyscallSucceedsWithValue(expected_size));
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct ucred)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);
-
- memcpy(creds, CMSG_DATA(cmsg), sizeof(struct ucred));
-}
-
-void RecvCredsAndFD(int sock, ucred* creds, int* fd, char buf[], int buf_size) {
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(struct ucred)) + CMSG_SPACE(sizeof(int))];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, 0),
- SyscallSucceedsWithValue(buf_size));
-
- struct cmsghdr* cmsg1 = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg1, nullptr);
- ASSERT_EQ(cmsg1->cmsg_len, CMSG_LEN(sizeof(struct ucred)));
- ASSERT_EQ(cmsg1->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg1->cmsg_type, SCM_CREDENTIALS);
- memcpy(creds, CMSG_DATA(cmsg1), sizeof(struct ucred));
-
- struct cmsghdr* cmsg2 = CMSG_NXTHDR(&msg, cmsg1);
- ASSERT_NE(cmsg2, nullptr);
- ASSERT_EQ(cmsg2->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg2->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg2->cmsg_type, SCM_RIGHTS);
- memcpy(fd, CMSG_DATA(cmsg2), sizeof(int));
-}
-
-void RecvSingleFDUnaligned(int sock, int* fd, char buf[], int buf_size) {
- struct msghdr msg = {};
- char control[CMSG_SPACE(sizeof(int)) - sizeof(int)];
- msg.msg_control = control;
- msg.msg_controllen = sizeof(control);
-
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = buf_size;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- ASSERT_THAT(RetryEINTR(recvmsg)(sock, &msg, 0),
- SyscallSucceedsWithValue(buf_size));
-
- struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
- ASSERT_NE(cmsg, nullptr);
- ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
- ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET);
- ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
-
- memcpy(fd, CMSG_DATA(cmsg), sizeof(int));
-}
-
-void SetSoPassCred(int sock) {
- int one = 1;
- EXPECT_THAT(setsockopt(sock, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)),
- SyscallSucceeds());
-}
-
-void UnsetSoPassCred(int sock) {
- int zero = 0;
- EXPECT_THAT(setsockopt(sock, SOL_SOCKET, SO_PASSCRED, &zero, sizeof(zero)),
- SyscallSucceeds());
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/unix_domain_socket_test_util.h b/test/syscalls/linux/unix_domain_socket_test_util.h
deleted file mode 100644
index b8073db17..000000000
--- a/test/syscalls/linux/unix_domain_socket_test_util.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_UNIX_DOMAIN_SOCKET_TEST_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_UNIX_DOMAIN_SOCKET_TEST_UTIL_H_
-
-#include <string>
-
-#include "test/syscalls/linux/socket_test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// DescribeUnixDomainSocketType returns a human-readable string explaining the
-// given Unix domain socket type.
-std::string DescribeUnixDomainSocketType(int type);
-
-// UnixDomainSocketPair returns a SocketPairKind that represents SocketPairs
-// created by invoking the socketpair() syscall with AF_UNIX and the given type.
-SocketPairKind UnixDomainSocketPair(int type);
-
-// FilesystemBoundUnixDomainSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and accept() syscalls with a temp file path,
-// AF_UNIX and the given type.
-SocketPairKind FilesystemBoundUnixDomainSocketPair(int type);
-
-// AbstractBoundUnixDomainSocketPair returns a SocketPairKind that represents
-// SocketPairs created with bind() and accept() syscalls with a temp abstract
-// path, AF_UNIX and the given type.
-SocketPairKind AbstractBoundUnixDomainSocketPair(int type);
-
-// SocketpairGoferUnixDomainSocketPair returns a SocketPairKind that was created
-// with two sockets connected to the socketpair gofer.
-SocketPairKind SocketpairGoferUnixDomainSocketPair(int type);
-
-// SocketpairGoferFileSocketPair returns a SocketPairKind that was created with
-// two open() calls on paths backed by the socketpair gofer.
-SocketPairKind SocketpairGoferFileSocketPair(int type);
-
-// FilesystemUnboundUnixDomainSocketPair returns a SocketPairKind that
-// represents two unbound sockets and a filesystem path for binding.
-SocketPairKind FilesystemUnboundUnixDomainSocketPair(int type);
-
-// AbstractUnboundUnixDomainSocketPair returns a SocketPairKind that represents
-// two unbound sockets and an abstract namespace path for binding.
-SocketPairKind AbstractUnboundUnixDomainSocketPair(int type);
-
-// SendSingleFD sends both a single FD and some data over a unix domain socket
-// specified by an FD. Note that calls to this function must be wrapped in
-// ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void SendSingleFD(int sock, int fd, char buf[], int buf_size);
-
-// SendFDs sends an arbitrary number of FDs and some data over a unix domain
-// socket specified by an FD. Note that calls to this function must be wrapped
-// in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void SendFDs(int sock, int fds[], int fds_size, char buf[], int buf_size);
-
-// RecvSingleFD receives both a single FD and some data over a unix domain
-// socket specified by an FD. Note that calls to this function must be wrapped
-// in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void RecvSingleFD(int sock, int* fd, char buf[], int buf_size);
-
-// RecvSingleFD receives both a single FD and some data over a unix domain
-// socket specified by an FD. This version allows the expected amount of data
-// received to be different than the buffer size. Note that calls to this
-// function must be wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions
-// to halt the test.
-void RecvSingleFD(int sock, int* fd, char buf[], int buf_size,
- int expected_size);
-
-// PeekSingleFD peeks at both a single FD and some data over a unix domain
-// socket specified by an FD. Note that calls to this function must be wrapped
-// in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void PeekSingleFD(int sock, int* fd, char buf[], int buf_size);
-
-// RecvFDs receives both an arbitrary number of FDs and some data over a unix
-// domain socket specified by an FD. Note that calls to this function must be
-// wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void RecvFDs(int sock, int fds[], int fds_size, char buf[], int buf_size);
-
-// RecvFDs receives both an arbitrary number of FDs and some data over a unix
-// domain socket specified by an FD. This version allows the expected amount of
-// data received to be different than the buffer size. Note that calls to this
-// function must be wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions
-// to halt the test.
-void RecvFDs(int sock, int fds[], int fds_size, char buf[], int buf_size,
- int expected_size);
-
-// RecvNoCmsg receives some data over a unix domain socket specified by an FD
-// and asserts that no control messages are available for receiving. Note that
-// calls to this function must be wrapped in ASSERT_NO_FATAL_FAILURE for
-// internal assertions to halt the test.
-void RecvNoCmsg(int sock, char buf[], int buf_size, int expected_size);
-
-inline void RecvNoCmsg(int sock, char buf[], int buf_size) {
- RecvNoCmsg(sock, buf, buf_size, buf_size);
-}
-
-// SendCreds sends the credentials of the current process and some data over a
-// unix domain socket specified by an FD. Note that calls to this function must
-// be wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the
-// test.
-void SendCreds(int sock, ucred creds, char buf[], int buf_size);
-
-// SendCredsAndFD sends the credentials of the current process, a single FD, and
-// some data over a unix domain socket specified by an FD. Note that calls to
-// this function must be wrapped in ASSERT_NO_FATAL_FAILURE for internal
-// assertions to halt the test.
-void SendCredsAndFD(int sock, ucred creds, int fd, char buf[], int buf_size);
-
-// RecvCreds receives some credentials and some data over a unix domain socket
-// specified by an FD. Note that calls to this function must be wrapped in
-// ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void RecvCreds(int sock, ucred* creds, char buf[], int buf_size);
-
-// RecvCreds receives some credentials and some data over a unix domain socket
-// specified by an FD. This version allows the expected amount of data received
-// to be different than the buffer size. Note that calls to this function must
-// be wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the
-// test.
-void RecvCreds(int sock, ucred* creds, char buf[], int buf_size,
- int expected_size);
-
-// RecvCredsAndFD receives some credentials, a single FD, and some data over a
-// unix domain socket specified by an FD. Note that calls to this function must
-// be wrapped in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the
-// test.
-void RecvCredsAndFD(int sock, ucred* creds, int* fd, char buf[], int buf_size);
-
-// SendNullCmsg sends a null control message and some data over a unix domain
-// socket specified by an FD. Note that calls to this function must be wrapped
-// in ASSERT_NO_FATAL_FAILURE for internal assertions to halt the test.
-void SendNullCmsg(int sock, char buf[], int buf_size);
-
-// RecvSingleFDUnaligned sends both a single FD and some data over a unix domain
-// socket specified by an FD. This function does not obey the spec, but Linux
-// allows it and the apphosting code depends on this quirk. Note that calls to
-// this function must be wrapped in ASSERT_NO_FATAL_FAILURE for internal
-// assertions to halt the test.
-void RecvSingleFDUnaligned(int sock, int* fd, char buf[], int buf_size);
-
-// SetSoPassCred sets the SO_PASSCRED option on the specified socket.
-void SetSoPassCred(int sock);
-
-// UnsetSoPassCred clears the SO_PASSCRED option on the specified socket.
-void UnsetSoPassCred(int sock);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_UNIX_DOMAIN_SOCKET_TEST_UTIL_H_
diff --git a/test/syscalls/linux/unlink.cc b/test/syscalls/linux/unlink.cc
deleted file mode 100644
index 75dcf4465..000000000
--- a/test/syscalls/linux/unlink.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(UnlinkTest, IsDir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- EXPECT_THAT(unlink(dir.path().c_str()), SyscallFailsWithErrno(EISDIR));
-}
-
-TEST(UnlinkTest, DirNotEmpty) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- int fd;
- std::string path = JoinPath(dir.path(), "ExistingFile");
- EXPECT_THAT(fd = open(path.c_str(), O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- EXPECT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(ENOTEMPTY));
-}
-
-TEST(UnlinkTest, Rmdir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- EXPECT_THAT(rmdir(dir.path().c_str()), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, AtDir) {
- int dirfd;
- auto tmpdir = GetAbsoluteTestTmpdir();
- EXPECT_THAT(dirfd = open(tmpdir.c_str(), O_DIRECTORY, 0), SyscallSucceeds());
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(tmpdir));
- auto dir_relpath =
- ASSERT_NO_ERRNO_AND_VALUE(GetRelativePath(tmpdir, dir.path()));
- EXPECT_THAT(unlinkat(dirfd, dir_relpath.c_str(), AT_REMOVEDIR),
- SyscallSucceeds());
- ASSERT_THAT(close(dirfd), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, AtDirDegradedPermissions) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- int dirfd;
- ASSERT_THAT(dirfd = open(dir.path().c_str(), O_DIRECTORY, 0),
- SyscallSucceeds());
-
- std::string sub_dir = JoinPath(dir.path(), "NewDir");
- EXPECT_THAT(mkdir(sub_dir.c_str(), 0755), SyscallSucceeds());
- EXPECT_THAT(fchmod(dirfd, 0444), SyscallSucceeds());
- EXPECT_THAT(unlinkat(dirfd, "NewDir", AT_REMOVEDIR),
- SyscallFailsWithErrno(EACCES));
- ASSERT_THAT(close(dirfd), SyscallSucceeds());
-}
-
-// Files cannot be unlinked if the parent is not writable and executable.
-TEST(UnlinkTest, ParentDegradedPermissions) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
-
- ASSERT_THAT(chmod(dir.path().c_str(), 0000), SyscallSucceeds());
-
- struct stat st;
- ASSERT_THAT(stat(file.path().c_str(), &st), SyscallFailsWithErrno(EACCES));
- ASSERT_THAT(unlinkat(AT_FDCWD, file.path().c_str(), 0),
- SyscallFailsWithErrno(EACCES));
-
- // Non-existent files also return EACCES.
- const std::string nonexist = JoinPath(dir.path(), "doesnotexist");
- ASSERT_THAT(stat(nonexist.c_str(), &st), SyscallFailsWithErrno(EACCES));
- ASSERT_THAT(unlinkat(AT_FDCWD, nonexist.c_str(), 0),
- SyscallFailsWithErrno(EACCES));
-}
-
-TEST(UnlinkTest, AtBad) {
- int dirfd;
- EXPECT_THAT(dirfd = open(GetAbsoluteTestTmpdir().c_str(), O_DIRECTORY, 0),
- SyscallSucceeds());
-
- // Try removing a directory as a file.
- std::string path = JoinPath(GetAbsoluteTestTmpdir(), "NewDir");
- EXPECT_THAT(mkdir(path.c_str(), 0755), SyscallSucceeds());
- EXPECT_THAT(unlinkat(dirfd, "NewDir", 0), SyscallFailsWithErrno(EISDIR));
- EXPECT_THAT(unlinkat(dirfd, "NewDir", AT_REMOVEDIR), SyscallSucceeds());
-
- // Try removing a file as a directory.
- int fd;
- EXPECT_THAT(fd = openat(dirfd, "UnlinkAtFile", O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(unlinkat(dirfd, "UnlinkAtFile", AT_REMOVEDIR),
- SyscallFailsWithErrno(ENOTDIR));
- EXPECT_THAT(unlinkat(dirfd, "UnlinkAtFile/", 0),
- SyscallFailsWithErrno(ENOTDIR));
- ASSERT_THAT(close(fd), SyscallSucceeds());
- EXPECT_THAT(unlinkat(dirfd, "UnlinkAtFile", 0), SyscallSucceeds());
-
- // Cleanup.
- ASSERT_THAT(close(dirfd), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, AbsTmpFile) {
- int fd;
- std::string path = JoinPath(GetAbsoluteTestTmpdir(), "ExistingFile");
- EXPECT_THAT(fd = open(path.c_str(), O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- EXPECT_THAT(unlink(path.c_str()), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, TooLongName) {
- EXPECT_THAT(unlink(std::vector<char>(16384, '0').data()),
- SyscallFailsWithErrno(ENAMETOOLONG));
-}
-
-TEST(UnlinkTest, BadNamePtr) {
- EXPECT_THAT(unlink(reinterpret_cast<char*>(1)),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST(UnlinkTest, AtFile) {
- int dirfd;
- EXPECT_THAT(dirfd = open(GetAbsoluteTestTmpdir().c_str(), O_DIRECTORY, 0666),
- SyscallSucceeds());
- int fd;
- EXPECT_THAT(fd = openat(dirfd, "UnlinkAtFile", O_RDWR | O_CREAT, 0666),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
- EXPECT_THAT(unlinkat(dirfd, "UnlinkAtFile", 0), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, OpenFile) {
- // We can't save unlinked file unless they are on tmpfs.
- const DisableSave ds;
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- int fd;
- EXPECT_THAT(fd = open(file.path().c_str(), O_RDWR, 0666), SyscallSucceeds());
- EXPECT_THAT(unlink(file.path().c_str()), SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, CannotRemoveDots) {
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const std::string self = JoinPath(file.path(), ".");
- ASSERT_THAT(unlink(self.c_str()), SyscallFailsWithErrno(ENOTDIR));
- const std::string parent = JoinPath(file.path(), "..");
- ASSERT_THAT(unlink(parent.c_str()), SyscallFailsWithErrno(ENOTDIR));
-}
-
-TEST(UnlinkTest, CannotRemoveRoot) {
- ASSERT_THAT(unlinkat(-1, "/", AT_REMOVEDIR), SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(UnlinkTest, CannotRemoveRootWithAtDir) {
- const FileDescriptor dirfd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(GetAbsoluteTestTmpdir(), O_DIRECTORY, 0666));
- ASSERT_THAT(unlinkat(dirfd.get(), "/", AT_REMOVEDIR),
- SyscallFailsWithErrno(EBUSY));
-}
-
-TEST(RmdirTest, CannotRemoveDots) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string self = JoinPath(dir.path(), ".");
- ASSERT_THAT(rmdir(self.c_str()), SyscallFailsWithErrno(EINVAL));
- const std::string parent = JoinPath(dir.path(), "..");
- ASSERT_THAT(rmdir(parent.c_str()), SyscallFailsWithErrno(ENOTEMPTY));
-}
-
-TEST(RmdirTest, CanRemoveWithTrailingSlashes) {
- auto dir1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string slash = absl::StrCat(dir1.path(), "/");
- ASSERT_THAT(rmdir(slash.c_str()), SyscallSucceeds());
- auto dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string slashslash = absl::StrCat(dir2.path(), "//");
- ASSERT_THAT(rmdir(slashslash.c_str()), SyscallSucceeds());
-}
-
-TEST(UnlinkTest, UnlinkAtEmptyPath) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
- auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0666));
- EXPECT_THAT(unlinkat(fd.get(), "", 0), SyscallFailsWithErrno(ENOENT));
-
- auto dirInDir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));
- auto dirFD = ASSERT_NO_ERRNO_AND_VALUE(
- Open(dirInDir.path(), O_RDONLY | O_DIRECTORY, 0666));
- EXPECT_THAT(unlinkat(dirFD.get(), "", AT_REMOVEDIR),
- SyscallFailsWithErrno(ENOENT));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/unshare.cc b/test/syscalls/linux/unshare.cc
deleted file mode 100644
index e32619efe..000000000
--- a/test/syscalls/linux/unshare.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sched.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/synchronization/mutex.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(UnshareTest, AllowsZeroFlags) {
- ASSERT_THAT(unshare(0), SyscallSucceeds());
-}
-
-TEST(UnshareTest, ThreadFlagFailsIfMultithreaded) {
- absl::Mutex mu;
- bool finished = false;
- ScopedThread t([&] {
- mu.Lock();
- mu.Await(absl::Condition(&finished));
- mu.Unlock();
- });
- ASSERT_THAT(unshare(CLONE_THREAD), SyscallFailsWithErrno(EINVAL));
- mu.Lock();
- finished = true;
- mu.Unlock();
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/utimes.cc b/test/syscalls/linux/utimes.cc
deleted file mode 100644
index e711d6657..000000000
--- a/test/syscalls/linux/utimes.cc
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-#include <utime.h>
-
-#include <string>
-
-#include "absl/time/time.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// TimeBoxed runs fn, setting before and after to (coarse realtime) times
-// guaranteed* to come before and after fn started and completed, respectively.
-//
-// fn may be called more than once if the clock is adjusted.
-void TimeBoxed(absl::Time* before, absl::Time* after,
- std::function<void()> const& fn) {
- do {
- // N.B. utimes and friends use CLOCK_REALTIME_COARSE for setting time (i.e.,
- // current_kernel_time()). See fs/attr.c:notify_change.
- //
- // notify_change truncates the time to a multiple of s_time_gran, but most
- // filesystems set it to 1, so we don't do any truncation.
- struct timespec ts;
- EXPECT_THAT(clock_gettime(CLOCK_REALTIME_COARSE, &ts), SyscallSucceeds());
- // FIXME(b/132819225): gVisor filesystem timestamps inconsistently use the
- // internal or host clock, which may diverge slightly. Allow some slack on
- // times to account for the difference.
- *before = absl::TimeFromTimespec(ts) - absl::Seconds(1);
-
- fn();
-
- EXPECT_THAT(clock_gettime(CLOCK_REALTIME_COARSE, &ts), SyscallSucceeds());
- *after = absl::TimeFromTimespec(ts) + absl::Seconds(1);
-
- if (*after < *before) {
- // Clock jumped backwards; retry.
- //
- // Technically this misses jumps small enough to keep after > before,
- // which could lead to test failures, but that is very unlikely to happen.
- continue;
- }
- } while (*after < *before);
-}
-
-void TestUtimesOnPath(std::string const& path) {
- struct stat statbuf;
-
- struct timeval times[2] = {{10, 0}, {20, 0}};
- EXPECT_THAT(utimes(path.c_str(), times), SyscallSucceeds());
- EXPECT_THAT(stat(path.c_str(), &statbuf), SyscallSucceeds());
- EXPECT_EQ(10, statbuf.st_atime);
- EXPECT_EQ(20, statbuf.st_mtime);
-
- absl::Time before;
- absl::Time after;
- TimeBoxed(&before, &after, [&] {
- EXPECT_THAT(utimes(path.c_str(), nullptr), SyscallSucceeds());
- });
-
- EXPECT_THAT(stat(path.c_str(), &statbuf), SyscallSucceeds());
-
- absl::Time atime = absl::TimeFromTimespec(statbuf.st_atim);
- EXPECT_GE(atime, before);
- EXPECT_LE(atime, after);
-
- absl::Time mtime = absl::TimeFromTimespec(statbuf.st_mtim);
- EXPECT_GE(mtime, before);
- EXPECT_LE(mtime, after);
-}
-
-TEST(UtimesTest, OnFile) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- TestUtimesOnPath(f.path());
-}
-
-TEST(UtimesTest, OnDir) {
- auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TestUtimesOnPath(dir.path());
-}
-
-TEST(UtimesTest, MissingPath) {
- auto path = NewTempAbsPath();
- struct timeval times[2] = {{10, 0}, {20, 0}};
- EXPECT_THAT(utimes(path.c_str(), times), SyscallFailsWithErrno(ENOENT));
-}
-
-void TestFutimesat(int dirFd, std::string const& path) {
- struct stat statbuf;
-
- struct timeval times[2] = {{10, 0}, {20, 0}};
- EXPECT_THAT(futimesat(dirFd, path.c_str(), times), SyscallSucceeds());
- EXPECT_THAT(fstatat(dirFd, path.c_str(), &statbuf, 0), SyscallSucceeds());
- EXPECT_EQ(10, statbuf.st_atime);
- EXPECT_EQ(20, statbuf.st_mtime);
-
- absl::Time before;
- absl::Time after;
- TimeBoxed(&before, &after, [&] {
- EXPECT_THAT(futimesat(dirFd, path.c_str(), nullptr), SyscallSucceeds());
- });
-
- EXPECT_THAT(fstatat(dirFd, path.c_str(), &statbuf, 0), SyscallSucceeds());
-
- absl::Time atime = absl::TimeFromTimespec(statbuf.st_atim);
- EXPECT_GE(atime, before);
- EXPECT_LE(atime, after);
-
- absl::Time mtime = absl::TimeFromTimespec(statbuf.st_mtim);
- EXPECT_GE(mtime, before);
- EXPECT_LE(mtime, after);
-}
-
-TEST(FutimesatTest, OnAbsPath) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- TestFutimesat(0, f.path());
-}
-
-TEST(FutimesatTest, OnRelPath) {
- auto d = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(d.path()));
- auto basename = std::string(Basename(f.path()));
- const FileDescriptor dirFd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(d.path(), O_RDONLY | O_DIRECTORY));
- TestFutimesat(dirFd.get(), basename);
-}
-
-TEST(FutimesatTest, InvalidNsec) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- struct timeval times[4][2] = {{
- {0, 1}, // Valid
- {1, static_cast<int64_t>(1e7)} // Invalid
- },
- {
- {1, static_cast<int64_t>(1e7)}, // Invalid
- {0, 1} // Valid
- },
- {
- {0, 1}, // Valid
- {1, -1} // Invalid
- },
- {
- {1, -1}, // Invalid
- {0, 1} // Valid
- }};
-
- for (unsigned int i = 0; i < sizeof(times) / sizeof(times[0]); i++) {
- std::cout << "test:" << i << "\n";
- EXPECT_THAT(futimesat(0, f.path().c_str(), times[i]),
- SyscallFailsWithErrno(EINVAL));
- }
-}
-
-void TestUtimensat(int dirFd, std::string const& path) {
- struct stat statbuf;
- const struct timespec times[2] = {{10, 0}, {20, 0}};
- EXPECT_THAT(utimensat(dirFd, path.c_str(), times, 0), SyscallSucceeds());
- EXPECT_THAT(fstatat(dirFd, path.c_str(), &statbuf, 0), SyscallSucceeds());
- EXPECT_EQ(10, statbuf.st_atime);
- EXPECT_EQ(20, statbuf.st_mtime);
-
- // Test setting with UTIME_NOW and UTIME_OMIT.
- struct stat statbuf2;
- const struct timespec times2[2] = {
- {0, UTIME_NOW}, // Should set atime to now.
- {0, UTIME_OMIT} // Should not change mtime.
- };
-
- absl::Time before;
- absl::Time after;
- TimeBoxed(&before, &after, [&] {
- EXPECT_THAT(utimensat(dirFd, path.c_str(), times2, 0), SyscallSucceeds());
- });
-
- EXPECT_THAT(fstatat(dirFd, path.c_str(), &statbuf2, 0), SyscallSucceeds());
-
- absl::Time atime2 = absl::TimeFromTimespec(statbuf2.st_atim);
- EXPECT_GE(atime2, before);
- EXPECT_LE(atime2, after);
-
- absl::Time mtime = absl::TimeFromTimespec(statbuf.st_mtim);
- absl::Time mtime2 = absl::TimeFromTimespec(statbuf2.st_mtim);
- // mtime should not be changed.
- EXPECT_EQ(mtime, mtime2);
-
- // Test setting with times = NULL. Should set both atime and mtime to the
- // current system time.
- struct stat statbuf3;
- TimeBoxed(&before, &after, [&] {
- EXPECT_THAT(utimensat(dirFd, path.c_str(), nullptr, 0), SyscallSucceeds());
- });
-
- EXPECT_THAT(fstatat(dirFd, path.c_str(), &statbuf3, 0), SyscallSucceeds());
-
- absl::Time atime3 = absl::TimeFromTimespec(statbuf3.st_atim);
- EXPECT_GE(atime3, before);
- EXPECT_LE(atime3, after);
-
- absl::Time mtime3 = absl::TimeFromTimespec(statbuf3.st_mtim);
- EXPECT_GE(mtime3, before);
- EXPECT_LE(mtime3, after);
-
- // TODO(b/187074006): atime/mtime may differ with local_gofer_uncached.
- // EXPECT_EQ(atime3, mtime3);
-}
-
-TEST(UtimensatTest, OnAbsPath) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- TestUtimensat(0, f.path());
-}
-
-TEST(UtimensatTest, OnRelPath) {
- auto d = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(d.path()));
- auto basename = std::string(Basename(f.path()));
- const FileDescriptor dirFd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(d.path(), O_RDONLY | O_DIRECTORY));
- TestUtimensat(dirFd.get(), basename);
-}
-
-TEST(UtimensatTest, OmitNoop) {
- // Setting both timespecs to UTIME_OMIT on a nonexistant path should succeed.
- auto path = NewTempAbsPath();
- const struct timespec times[2] = {{0, UTIME_OMIT}, {0, UTIME_OMIT}};
- EXPECT_THAT(utimensat(0, path.c_str(), times, 0), SyscallSucceeds());
-}
-
-// Verify that we can actually set atime and mtime to 0.
-TEST(UtimeTest, ZeroAtimeandMtime) {
- const auto tmp_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const auto tmp_file =
- ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(tmp_dir.path()));
-
- // Stat the file before and after updating atime and mtime.
- struct stat stat_before = {};
- EXPECT_THAT(stat(tmp_file.path().c_str(), &stat_before), SyscallSucceeds());
-
- ASSERT_NE(stat_before.st_atime, 0);
- ASSERT_NE(stat_before.st_mtime, 0);
-
- const struct utimbuf times = {}; // Zero for both atime and mtime.
- EXPECT_THAT(utime(tmp_file.path().c_str(), &times), SyscallSucceeds());
-
- struct stat stat_after = {};
- EXPECT_THAT(stat(tmp_file.path().c_str(), &stat_after), SyscallSucceeds());
-
- // We should see the atime and mtime changed when we set them to 0.
- ASSERT_EQ(stat_after.st_atime, 0);
- ASSERT_EQ(stat_after.st_mtime, 0);
-}
-
-TEST(UtimensatTest, InvalidNsec) {
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- struct timespec times[2][2] = {
- {
- {0, UTIME_OMIT}, // Valid
- {2, static_cast<int64_t>(1e10)} // Invalid
- },
- {
- {2, static_cast<int64_t>(1e10)}, // Invalid
- {0, UTIME_OMIT} // Valid
- }};
-
- for (unsigned int i = 0; i < sizeof(times) / sizeof(times[0]); i++) {
- std::cout << "test:" << i << "\n";
- EXPECT_THAT(utimensat(0, f.path().c_str(), times[i], 0),
- SyscallFailsWithErrno(EINVAL));
- }
-}
-
-TEST(Utimensat, NullPath) {
- // From man utimensat(2):
- // "the Linux utimensat() system call implements a nonstandard feature: if
- // pathname is NULL, then the call modifies the timestamps of the file
- // referred to by the file descriptor dirfd (which may refer to any type of
- // file).
- // Note, however, that the glibc wrapper for utimensat() disallows
- // passing NULL as the value for file: the wrapper function returns the error
- // EINVAL in this case."
- auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));
- struct stat statbuf;
- const struct timespec times[2] = {{10, 0}, {20, 0}};
- // Call syscall directly.
- EXPECT_THAT(syscall(SYS_utimensat, fd.get(), NULL, times, 0),
- SyscallSucceeds());
- EXPECT_THAT(fstatat(0, f.path().c_str(), &statbuf, 0), SyscallSucceeds());
- EXPECT_EQ(10, statbuf.st_atime);
- EXPECT_EQ(20, statbuf.st_mtime);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/vdso.cc b/test/syscalls/linux/vdso.cc
deleted file mode 100644
index 19c80add8..000000000
--- a/test/syscalls/linux/vdso.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <string.h>
-#include <sys/mman.h>
-
-#include <algorithm>
-
-#include "gtest/gtest.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/proc_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Ensure that the vvar page cannot be made writable.
-TEST(VvarTest, WriteVvar) {
- auto contents = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/self/maps"));
- auto maps = ASSERT_NO_ERRNO_AND_VALUE(ParseProcMaps(contents));
- auto it = std::find_if(maps.begin(), maps.end(), [](const ProcMapsEntry& e) {
- return e.filename == "[vvar]";
- });
-
- SKIP_IF(it == maps.end());
- EXPECT_THAT(mprotect(reinterpret_cast<void*>(it->start), kPageSize,
- PROT_READ | PROT_WRITE),
- SyscallFailsWithErrno(EACCES));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/vdso_clock_gettime.cc b/test/syscalls/linux/vdso_clock_gettime.cc
deleted file mode 100644
index 2a8699a7b..000000000
--- a/test/syscalls/linux/vdso_clock_gettime.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <sys/time.h>
-#include <syscall.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <map>
-#include <string>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/numbers.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-std::string PrintClockId(::testing::TestParamInfo<clockid_t> info) {
- switch (info.param) {
- case CLOCK_MONOTONIC:
- return "CLOCK_MONOTONIC";
- case CLOCK_BOOTTIME:
- return "CLOCK_BOOTTIME";
- default:
- return absl::StrCat(info.param);
- }
-}
-
-class MonotonicVDSOClockTest : public ::testing::TestWithParam<clockid_t> {};
-
-TEST_P(MonotonicVDSOClockTest, IsCorrect) {
- // The VDSO implementation of clock_gettime() uses the TSC. On KVM, sentry and
- // application TSCs can be very desynchronized; see
- // sentry/platform/kvm/kvm.vCPU.setSystemTime().
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
-
- // Check that when we alternate readings from the clock_gettime syscall and
- // the VDSO's implementation, we observe the combined sequence as being
- // monotonic.
- struct timespec tvdso, tsys;
- absl::Time vdso_time, sys_time;
- ASSERT_THAT(syscall(__NR_clock_gettime, GetParam(), &tsys),
- SyscallSucceeds());
- sys_time = absl::TimeFromTimespec(tsys);
- auto end = absl::Now() + absl::Seconds(10);
- while (absl::Now() < end) {
- ASSERT_THAT(clock_gettime(GetParam(), &tvdso), SyscallSucceeds());
- vdso_time = absl::TimeFromTimespec(tvdso);
- EXPECT_LE(sys_time, vdso_time);
- ASSERT_THAT(syscall(__NR_clock_gettime, GetParam(), &tsys),
- SyscallSucceeds());
- sys_time = absl::TimeFromTimespec(tsys);
- EXPECT_LE(vdso_time, sys_time);
- }
-}
-
-INSTANTIATE_TEST_SUITE_P(ClockGettime, MonotonicVDSOClockTest,
- ::testing::Values(CLOCK_MONOTONIC, CLOCK_BOOTTIME),
- PrintClockId);
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/verity_getdents.cc b/test/syscalls/linux/verity_getdents.cc
deleted file mode 100644
index 2eafc3dd3..000000000
--- a/test/syscalls/linux/verity_getdents.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <dirent.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/mount.h>
-#include <sys/syscall.h>
-
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/verity_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class GetDentsTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Verity is implemented in VFS2.
- SKIP_IF(IsRunningWithVFS1());
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- // Mount a tmpfs file system, to be wrapped by a verity fs.
- tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(mount("", tmpfs_dir_.path().c_str(), "tmpfs", 0, ""),
- SyscallSucceeds());
-
- // Create a new file in the tmpfs mount.
- file_ = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));
- filename_ = Basename(file_.path());
- }
-
- TempPath tmpfs_dir_;
- TempPath file_;
- std::string filename_;
-};
-
-TEST_F(GetDentsTest, GetDents) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- std::vector<std::string> expect = {".", "..", filename_};
- EXPECT_NO_ERRNO(DirContains(verity_dir, expect, /*exclude=*/{}));
-}
-
-TEST_F(GetDentsTest, Deleted) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- EXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),
- SyscallSucceeds());
-
- EXPECT_THAT(DirContains(verity_dir, /*expect=*/{}, /*exclude=*/{}),
- PosixErrorIs(EIO, ::testing::_));
-}
-
-TEST_F(GetDentsTest, Renamed) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- std::string new_file_name = "renamed-" + filename_;
- EXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),
- JoinPath(tmpfs_dir_.path(), new_file_name).c_str()),
- SyscallSucceeds());
-
- EXPECT_THAT(DirContains(verity_dir, /*expect=*/{}, /*exclude=*/{}),
- PosixErrorIs(EIO, ::testing::_));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/verity_ioctl.cc b/test/syscalls/linux/verity_ioctl.cc
deleted file mode 100644
index e7e4fa64b..000000000
--- a/test/syscalls/linux/verity_ioctl.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/mount.h>
-#include <sys/stat.h>
-#include <time.h>
-
-#include <iomanip>
-#include <sstream>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/mount_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/verity_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class IoctlTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Verity is implemented in VFS2.
- SKIP_IF(IsRunningWithVFS1());
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- // Mount a tmpfs file system, to be wrapped by a verity fs.
- tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(mount("", tmpfs_dir_.path().c_str(), "tmpfs", 0, ""),
- SyscallSucceeds());
-
- // Create a new file in the tmpfs mount.
- file_ = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));
- filename_ = Basename(file_.path());
- }
-
- TempPath tmpfs_dir_;
- TempPath file_;
- std::string filename_;
-};
-
-TEST_F(IoctlTest, Enable) {
- // Mount a verity fs on the existing tmpfs mount.
- std::string mount_opts = "lower_path=" + tmpfs_dir_.path();
- auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(
- mount("", verity_dir.path().c_str(), "verity", 0, mount_opts.c_str()),
- SyscallSucceeds());
-
- // Confirm that the verity flag is absent.
- int flag = 0;
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_GETFLAGS, &flag), SyscallSucceeds());
- EXPECT_EQ(flag & FS_VERITY_FL, 0);
-
- // Enable the file and confirm that the verity flag is present.
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_GETFLAGS, &flag), SyscallSucceeds());
- EXPECT_EQ(flag & FS_VERITY_FL, FS_VERITY_FL);
-}
-
-TEST_F(IoctlTest, Measure) {
- // Mount a verity fs on the existing tmpfs mount.
- std::string mount_opts = "lower_path=" + tmpfs_dir_.path();
- auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(
- mount("", verity_dir.path().c_str(), "verity", 0, mount_opts.c_str()),
- SyscallSucceeds());
-
- // Confirm that the file cannot be measured.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));
- uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};
- struct fsverity_digest* digest =
- reinterpret_cast<struct fsverity_digest*>(digest_array);
- digest->digest_size = kMaxDigestSize;
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),
- SyscallFailsWithErrno(ENODATA));
-
- // Enable the file and confirm that the file can be measured.
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());
- ASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),
- SyscallSucceeds());
- EXPECT_EQ(digest->digest_size, kDefaultDigestSize);
-}
-
-TEST_F(IoctlTest, Mount) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Make sure the file can be open and read in the mounted verity fs.
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
- char buf[sizeof(kContents)];
- EXPECT_THAT(ReadFd(verity_fd.get(), buf, sizeof(kContents)),
- SyscallSucceeds());
-}
-
-TEST_F(IoctlTest, NonExistingFile) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Confirm that opening a non-existing file in the verity-enabled directory
- // triggers the expected error instead of verification failure.
- EXPECT_THAT(
- open(JoinPath(verity_dir, filename_ + "abc").c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_F(IoctlTest, ModifiedFile) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Modify the file and check verification failure upon reading from it.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(tmpfs_dir_.path(), filename_), O_RDWR, 0777));
- ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), sizeof(kContents) - 1));
-
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
- char buf[sizeof(kContents)];
- EXPECT_THAT(pread(verity_fd.get(), buf, 16, 0), SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, ModifiedMerkle) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Modify the Merkle file and check verification failure upon opening the
- // corresponding file.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)), O_RDWR, 0777));
- auto stat = ASSERT_NO_ERRNO_AND_VALUE(Fstat(fd.get()));
- ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), stat.st_size));
-
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, ModifiedDirMerkle) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Modify the Merkle file for the parent directory and check verification
- // failure upon opening the corresponding file.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(MerkleRootPath(JoinPath(tmpfs_dir_.path(), "root")), O_RDWR, 0777));
- auto stat = ASSERT_NO_ERRNO_AND_VALUE(Fstat(fd.get()));
- ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), stat.st_size));
-
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, Stat) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- struct stat st;
- EXPECT_THAT(stat(JoinPath(verity_dir, filename_).c_str(), &st),
- SyscallSucceeds());
-}
-
-TEST_F(IoctlTest, ModifiedStat) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- EXPECT_THAT(chmod(JoinPath(tmpfs_dir_.path(), filename_).c_str(), 0644),
- SyscallSucceeds());
- struct stat st;
- EXPECT_THAT(stat(JoinPath(verity_dir, filename_).c_str(), &st),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, DeleteFile) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- EXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),
- SyscallSucceeds());
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, DeleteMerkle) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- EXPECT_THAT(
- unlink(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)).c_str()),
- SyscallSucceeds());
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, RenameFile) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- std::string new_file_name = "renamed-" + filename_;
- EXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),
- JoinPath(tmpfs_dir_.path(), new_file_name).c_str()),
- SyscallSucceeds());
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(IoctlTest, RenameMerkle) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- std::string new_file_name = "renamed-" + filename_;
- EXPECT_THAT(
- rename(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)).c_str(),
- MerklePath(JoinPath(tmpfs_dir_.path(), new_file_name)).c_str()),
- SyscallSucceeds());
- EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/verity_mmap.cc b/test/syscalls/linux/verity_mmap.cc
deleted file mode 100644
index 09ced6eb3..000000000
--- a/test/syscalls/linux/verity_mmap.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-
-#include <string>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/memory_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/verity_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-class MmapTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Verity is implemented in VFS2.
- SKIP_IF(IsRunningWithVFS1());
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- // Mount a tmpfs file system, to be wrapped by a verity fs.
- tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(mount("", tmpfs_dir_.path().c_str(), "tmpfs", 0, ""),
- SyscallSucceeds());
-
- // Create a new file in the tmpfs mount.
- file_ = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));
- filename_ = Basename(file_.path());
- }
-
- TempPath tmpfs_dir_;
- TempPath file_;
- std::string filename_;
-};
-
-TEST_F(MmapTest, MmapRead) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Make sure the file can be open and mmapped in the mounted verity fs.
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
-
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(Mmap(nullptr, sizeof(kContents) - 1, PROT_READ,
- MAP_SHARED, verity_fd.get(), 0));
- EXPECT_THAT(std::string(m.view()), ::testing::StrEq(kContents));
-}
-
-TEST_F(MmapTest, ModifiedBeforeMmap) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Modify the file and check verification failure upon mmapping.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(tmpfs_dir_.path(), filename_), O_RDWR, 0777));
- ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), sizeof(kContents) - 1));
-
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(Mmap(nullptr, sizeof(kContents) - 1, PROT_READ,
- MAP_SHARED, verity_fd.get(), 0));
-
- // Memory fault is expected when Translate fails.
- EXPECT_EXIT(std::string(m.view()), ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-TEST_F(MmapTest, ModifiedAfterMmap) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
- Mapping const m =
- ASSERT_NO_ERRNO_AND_VALUE(Mmap(nullptr, sizeof(kContents) - 1, PROT_READ,
- MAP_SHARED, verity_fd.get(), 0));
-
- // Modify the file after mapping and check verification failure upon mmapping.
- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(tmpfs_dir_.path(), filename_), O_RDWR, 0777));
- ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), sizeof(kContents) - 1));
-
- // Memory fault is expected when Translate fails.
- EXPECT_EXIT(std::string(m.view()), ::testing::KilledBySignal(SIGSEGV), "");
-}
-
-class MmapParamTest
- : public MmapTest,
- public ::testing::WithParamInterface<std::tuple<int, int>> {
- protected:
- int prot() const { return std::get<0>(GetParam()); }
- int flags() const { return std::get<1>(GetParam()); }
-};
-
-INSTANTIATE_TEST_SUITE_P(
- WriteExecNoneSharedPrivate, MmapParamTest,
- ::testing::Combine(::testing::ValuesIn({
- PROT_WRITE,
- PROT_EXEC,
- PROT_NONE,
- }),
- ::testing::ValuesIn({MAP_SHARED, MAP_PRIVATE})));
-
-TEST_P(MmapParamTest, Mmap) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
-
- // Make sure the file can be open and mmapped in the mounted verity fs.
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
-
- if (prot() == PROT_WRITE && flags() == MAP_SHARED) {
- // Verity file system is read-only.
- EXPECT_THAT(
- reinterpret_cast<intptr_t>(mmap(nullptr, sizeof(kContents) - 1, prot(),
- flags(), verity_fd.get(), 0)),
- SyscallFailsWithErrno(EACCES));
- } else {
- Mapping const m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(
- nullptr, sizeof(kContents) - 1, prot(), flags(), verity_fd.get(), 0));
- if (prot() == PROT_NONE) {
- // Memory mapped by MAP_NONE cannot be accessed.
- EXPECT_EXIT(std::string(m.view()), ::testing::KilledBySignal(SIGSEGV),
- "");
- } else {
- EXPECT_THAT(std::string(m.view()), ::testing::StrEq(kContents));
- }
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/verity_mount.cc b/test/syscalls/linux/verity_mount.cc
deleted file mode 100644
index d6bfcb46d..000000000
--- a/test/syscalls/linux/verity_mount.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <sys/mount.h>
-
-#include <iomanip>
-#include <sstream>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/verity_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// Mount verity file system on an existing tmpfs mount.
-TEST(MountTest, MountExisting) {
- // Verity is implemented in VFS2.
- SKIP_IF(IsRunningWithVFS1());
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- // Mount a new tmpfs file system.
- auto const tmpfs_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(mount("", tmpfs_dir.path().c_str(), "tmpfs", 0, ""),
- SyscallSucceeds());
-
- // Mount a verity file system on the existing gofer mount.
- auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- std::string opts = "lower_path=" + tmpfs_dir.path();
- ASSERT_THAT(mount("", verity_dir.path().c_str(), "verity", 0, opts.c_str()),
- SyscallSucceeds());
- auto const fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(verity_dir.path(), O_RDONLY, 0777));
- EXPECT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/verity_symlink.cc b/test/syscalls/linux/verity_symlink.cc
deleted file mode 100644
index bbf5375cb..000000000
--- a/test/syscalls/linux/verity_symlink.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/mount.h>
-#include <sys/stat.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/capability_util.h"
-#include "test/util/fs_util.h"
-#include "test/util/mount_util.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-#include "test/util/verity_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-const char kSymlink[] = "verity_symlink";
-
-class SymlinkTest : public ::testing::Test {
- protected:
- void SetUp() override {
- // Verity is implemented in VFS2.
- SKIP_IF(IsRunningWithVFS1());
-
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- // Mount a tmpfs file system, to be wrapped by a verity fs.
- tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- ASSERT_THAT(mount("", tmpfs_dir_.path().c_str(), "tmpfs", 0, ""),
- SyscallSucceeds());
-
- // Create a new file in the tmpfs mount.
- file_ = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));
- filename_ = Basename(file_.path());
-
- // Create a symlink to the file.
- ASSERT_THAT(symlink(file_.path().c_str(),
- JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
- SyscallSucceeds());
- }
-
- TempPath tmpfs_dir_;
- TempPath file_;
- std::string filename_;
-};
-
-TEST_F(SymlinkTest, Success) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
-
- char buf[256];
- EXPECT_THAT(
- readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),
- SyscallSucceeds());
- auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
- Open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777));
- EXPECT_THAT(ReadFd(verity_fd.get(), buf, sizeof(kContents)),
- SyscallSucceeds());
-}
-
-TEST_F(SymlinkTest, DeleteLink) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
-
- ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
- SyscallSucceeds());
- char buf[256];
- EXPECT_THAT(
- readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EIO));
- EXPECT_THAT(open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-TEST_F(SymlinkTest, ModifyLink) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
-
- ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
- SyscallSucceeds());
-
- std::string newlink = "newlink";
- ASSERT_THAT(symlink(JoinPath(tmpfs_dir_.path(), newlink).c_str(),
- JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
- SyscallSucceeds());
- char buf[256];
- EXPECT_THAT(
- readlink(JoinPath(verity_dir, kSymlink).c_str(), buf, sizeof(buf)),
- SyscallFailsWithErrno(EIO));
- EXPECT_THAT(open(JoinPath(verity_dir, kSymlink).c_str(), O_RDONLY, 0777),
- SyscallFailsWithErrno(EIO));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/vfork.cc b/test/syscalls/linux/vfork.cc
deleted file mode 100644
index 1a282e371..000000000
--- a/test/syscalls/linux/vfork.cc
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <string>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/time/time.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/test_util.h"
-#include "test/util/time_util.h"
-
-ABSL_FLAG(bool, vfork_test_child, false,
- "If true, run the VforkTest child workload.");
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// We don't test with raw CLONE_VFORK to avoid interacting with glibc's use of
-// TLS.
-//
-// Even with vfork(2), we must be careful to do little more in the child than
-// call execve(2). We use the simplest sleep function possible, though this is
-// still precarious, as we're officially only allowed to call execve(2) and
-// _exit(2).
-constexpr absl::Duration kChildDelay = absl::Seconds(10);
-
-// Exit code for successful child subprocesses. We don't want to use 0 since
-// it's too common, and an execve(2) failure causes the child to exit with the
-// errno, so kChildExitCode is chosen to be an unlikely errno:
-constexpr int kChildExitCode = 118; // ENOTNAM: Not a XENIX named type file
-
-int64_t MonotonicNow() {
- struct timespec now;
- TEST_PCHECK(clock_gettime(CLOCK_MONOTONIC, &now) == 0);
- return now.tv_sec * 1000000000ll + now.tv_nsec;
-}
-
-TEST(VforkTest, ParentStopsUntilChildExits) {
- const auto test = [] {
- // N.B. Run the test in a single-threaded subprocess because
- // vfork is not safe in a multi-threaded process.
-
- const int64_t start = MonotonicNow();
-
- pid_t pid = vfork();
- if (pid == 0) {
- SleepSafe(kChildDelay);
- _exit(kChildExitCode);
- }
- TEST_PCHECK_MSG(pid > 0, "vfork failed");
- MaybeSave();
-
- const int64_t end = MonotonicNow();
-
- absl::Duration dur = absl::Nanoseconds(end - start);
-
- TEST_CHECK(dur >= kChildDelay);
-
- int status = 0;
- TEST_PCHECK(RetryEINTR(waitpid)(pid, &status, 0));
- TEST_CHECK(WIFEXITED(status));
- TEST_CHECK(WEXITSTATUS(status) == kChildExitCode);
- };
-
- EXPECT_THAT(InForkedProcess(test), IsPosixErrorOkAndHolds(0));
-}
-
-TEST(VforkTest, ParentStopsUntilChildExecves) {
- ExecveArray const owned_child_argv = {"/proc/self/exe", "--vfork_test_child"};
- char* const* const child_argv = owned_child_argv.get();
-
- const auto test = [&] {
- const int64_t start = MonotonicNow();
-
- pid_t pid = vfork();
- if (pid == 0) {
- SleepSafe(kChildDelay);
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- _exit(errno);
- }
- // Don't attempt save/restore until after recording end_time,
- // since the test expects an upper bound on the time spent
- // stopped.
- int saved_errno = errno;
- const int64_t end = MonotonicNow();
- errno = saved_errno;
- TEST_PCHECK_MSG(pid > 0, "vfork failed");
- MaybeSave();
-
- absl::Duration dur = absl::Nanoseconds(end - start);
-
- // The parent should resume execution after execve, but before
- // the post-execve test child exits.
- TEST_CHECK(dur >= kChildDelay);
- TEST_CHECK(dur <= 2 * kChildDelay);
-
- int status = 0;
- TEST_PCHECK(RetryEINTR(waitpid)(pid, &status, 0));
- TEST_CHECK(WIFEXITED(status));
- TEST_CHECK(WEXITSTATUS(status) == kChildExitCode);
- };
-
- EXPECT_THAT(InForkedProcess(test), IsPosixErrorOkAndHolds(0));
-}
-
-// A vfork child does not unstop the parent a second time when it exits after
-// exec.
-TEST(VforkTest, ExecedChildExitDoesntUnstopParent) {
- ExecveArray const owned_child_argv = {"/proc/self/exe", "--vfork_test_child"};
- char* const* const child_argv = owned_child_argv.get();
-
- const auto test = [&] {
- pid_t pid1 = vfork();
- if (pid1 == 0) {
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- _exit(errno);
- }
- TEST_PCHECK_MSG(pid1 > 0, "vfork failed");
- MaybeSave();
-
- // pid1 exec'd and is now sleeping.
- SleepSafe(kChildDelay / 2);
-
- const int64_t start = MonotonicNow();
-
- pid_t pid2 = vfork();
- if (pid2 == 0) {
- SleepSafe(kChildDelay);
- _exit(kChildExitCode);
- }
- TEST_PCHECK_MSG(pid2 > 0, "vfork failed");
- MaybeSave();
-
- const int64_t end = MonotonicNow();
-
- absl::Duration dur = absl::Nanoseconds(end - start);
-
- // The parent should resume execution only after pid2 exits, not
- // when pid1 exits.
- TEST_CHECK(dur >= kChildDelay);
-
- int status = 0;
- TEST_PCHECK(RetryEINTR(waitpid)(pid1, &status, 0));
- TEST_CHECK(WIFEXITED(status));
- TEST_CHECK(WEXITSTATUS(status) == kChildExitCode);
-
- TEST_PCHECK(RetryEINTR(waitpid)(pid2, &status, 0));
- TEST_CHECK(WIFEXITED(status));
- TEST_CHECK(WEXITSTATUS(status) == kChildExitCode);
- };
-
- EXPECT_THAT(InForkedProcess(test), IsPosixErrorOkAndHolds(0));
-}
-
-int RunChild() {
- SleepSafe(kChildDelay);
- return kChildExitCode;
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
-
- if (absl::GetFlag(FLAGS_vfork_test_child)) {
- return gvisor::testing::RunChild();
- }
-
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/syscalls/linux/vsyscall.cc b/test/syscalls/linux/vsyscall.cc
deleted file mode 100644
index ae4377108..000000000
--- a/test/syscalls/linux/vsyscall.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <time.h>
-
-#include "gtest/gtest.h"
-#include "test/util/proc_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-#if defined(__x86_64__) || defined(__i386__)
-time_t vsyscall_time(time_t* t) {
- constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400;
- return reinterpret_cast<time_t (*)(time_t*)>(kVsyscallTimeEntry)(t);
-}
-
-TEST(VsyscallTest, VsyscallAlwaysAvailableOnGvisor) {
- SKIP_IF(!IsRunningOnGvisor());
- // Vsyscall is always advertised by gvisor.
- EXPECT_TRUE(ASSERT_NO_ERRNO_AND_VALUE(IsVsyscallEnabled()));
- // Vsyscall should always works on gvisor.
- time_t t;
- EXPECT_THAT(vsyscall_time(&t), SyscallSucceeds());
-}
-#endif
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/wait.cc b/test/syscalls/linux/wait.cc
deleted file mode 100644
index 944149d5e..000000000
--- a/test/syscalls/linux/wait.cc
+++ /dev/null
@@ -1,913 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <functional>
-#include <tuple>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "absl/synchronization/mutex.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/logging.h"
-#include "test/util/multiprocess_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/signal_util.h"
-#include "test/util/test_util.h"
-#include "test/util/thread_util.h"
-#include "test/util/time_util.h"
-
-using ::testing::UnorderedElementsAre;
-
-// These unit tests focus on the wait4(2) system call, but include a basic
-// checks for the i386 waitpid(2) syscall, which is a subset of wait4(2).
-//
-// NOTE(b/22640830,b/27680907,b/29049891): Some functionality is not tested as
-// it is not currently supported by gVisor:
-// * Process groups.
-// * Core dump status (WCOREDUMP).
-//
-// Tests for waiting on stopped/continued children are in sigstop.cc.
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// The CloneChild function seems to need more than one page of stack space.
-static const size_t kStackSize = 2 * kPageSize;
-
-// The child thread created in CloneAndExit runs this function.
-// This child does not have the TLS setup, so it must not use glibc functions.
-int CloneChild(void* priv) {
- int64_t sleep = reinterpret_cast<int64_t>(priv);
- SleepSafe(absl::Seconds(sleep));
-
- // glibc's _exit(2) function wrapper will helpfully call exit_group(2),
- // exiting the entire process.
- syscall(__NR_exit, 0);
- return 1;
-}
-
-// ForkAndExit forks a child process which exits with exit_code, after
-// sleeping for the specified duration (seconds).
-pid_t ForkAndExit(int exit_code, int64_t sleep) {
- pid_t child = fork();
- if (child == 0) {
- SleepSafe(absl::Seconds(sleep));
- _exit(exit_code);
- }
- return child;
-}
-
-int64_t clock_gettime_nsecs(clockid_t id) {
- struct timespec ts;
- TEST_PCHECK(clock_gettime(id, &ts) == 0);
- return (ts.tv_sec * 1000000000 + ts.tv_nsec);
-}
-
-void spin(int64_t sec) {
- int64_t ns = sec * 1000000000;
- int64_t start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID);
- int64_t end = start + ns;
-
- do {
- constexpr int kLoopCount = 1000000; // large and arbitrary
- // volatile to prevent the compiler from skipping this loop.
- for (volatile int i = 0; i < kLoopCount; i++) {
- }
- } while (clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID) < end);
-}
-
-// ForkSpinAndExit forks a child process which exits with exit_code, after
-// spinning for the specified duration (seconds).
-pid_t ForkSpinAndExit(int exit_code, int64_t spintime) {
- pid_t child = fork();
- if (child == 0) {
- spin(spintime);
- _exit(exit_code);
- }
- return child;
-}
-
-absl::Duration RusageCpuTime(const struct rusage& ru) {
- return absl::DurationFromTimeval(ru.ru_utime) +
- absl::DurationFromTimeval(ru.ru_stime);
-}
-
-// Returns the address of the top of the stack.
-// Free with FreeStack.
-uintptr_t AllocStack() {
- void* addr = mmap(nullptr, kStackSize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-
- if (addr == MAP_FAILED) {
- return reinterpret_cast<uintptr_t>(MAP_FAILED);
- }
-
- return reinterpret_cast<uintptr_t>(addr) + kStackSize;
-}
-
-// Frees a stack page allocated with AllocStack.
-int FreeStack(uintptr_t addr) {
- addr -= kStackSize;
- return munmap(reinterpret_cast<void*>(addr), kPageSize);
-}
-
-// CloneAndExit clones a child thread, which exits with 0 after sleeping for
-// the specified duration (must be in seconds). extra_flags are ORed against
-// the standard clone(2) flags.
-int CloneAndExit(int64_t sleep, uintptr_t stack, int extra_flags) {
- return clone(CloneChild, reinterpret_cast<void*>(stack),
- CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_VM | extra_flags,
- reinterpret_cast<void*>(sleep));
-}
-
-// Simple wrappers around wait4(2) and waitid(2) that ignore interrupts.
-constexpr auto Wait4 = RetryEINTR(wait4);
-constexpr auto Waitid = RetryEINTR(waitid);
-
-// Fixture for tests parameterized by a function that waits for any child to
-// exit with the given options, checks that it exited with the given code, and
-// then returns its PID.
-//
-// N.B. These tests run in a multi-threaded environment. We assume that
-// background threads do not create child processes and are not themselves
-// created with clone(... | SIGCHLD). Either may cause these tests to
-// erroneously wait on child processes/threads.
-class WaitAnyChildTest : public ::testing::TestWithParam<
- std::function<PosixErrorOr<pid_t>(int, int)>> {
- protected:
- PosixErrorOr<pid_t> WaitAny(int code) { return WaitAnyWithOptions(code, 0); }
-
- PosixErrorOr<pid_t> WaitAnyWithOptions(int code, int options) {
- return GetParam()(code, options);
- }
-};
-
-// Wait for any child to exit.
-TEST_P(WaitAnyChildTest, Fork) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_THAT(WaitAny(0), IsPosixErrorOkAndHolds(child));
-}
-
-// Call wait4 for any process after the child has already exited.
-TEST_P(WaitAnyChildTest, AfterExit) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- absl::SleepFor(absl::Seconds(5));
-
- EXPECT_THAT(WaitAny(0), IsPosixErrorOkAndHolds(child));
-}
-
-// Wait for multiple children to exit, waiting for either at a time.
-TEST_P(WaitAnyChildTest, MultipleFork) {
- pid_t child1, child2;
- ASSERT_THAT(child1 = ForkAndExit(0, 0), SyscallSucceeds());
- ASSERT_THAT(child2 = ForkAndExit(0, 0), SyscallSucceeds());
-
- std::vector<pid_t> pids;
- pids.push_back(ASSERT_NO_ERRNO_AND_VALUE(WaitAny(0)));
- pids.push_back(ASSERT_NO_ERRNO_AND_VALUE(WaitAny(0)));
- EXPECT_THAT(pids, UnorderedElementsAre(child1, child2));
-}
-
-// Wait for any child to exit.
-// A non-CLONE_THREAD child which sends SIGCHLD upon exit behaves much like
-// a forked process.
-TEST_P(WaitAnyChildTest, CloneSIGCHLD) {
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- ASSERT_THAT(child = CloneAndExit(0, stack, SIGCHLD), SyscallSucceeds());
-
- EXPECT_THAT(WaitAny(0), IsPosixErrorOkAndHolds(child));
-}
-
-// Wait for a child thread and process.
-TEST_P(WaitAnyChildTest, ForkAndClone) {
- pid_t process;
- ASSERT_THAT(process = ForkAndExit(0, 0), SyscallSucceeds());
-
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int thread;
- // Send SIGCHLD for normal wait semantics.
- ASSERT_THAT(thread = CloneAndExit(0, stack, SIGCHLD), SyscallSucceeds());
-
- std::vector<pid_t> pids;
- pids.push_back(ASSERT_NO_ERRNO_AND_VALUE(WaitAny(0)));
- pids.push_back(ASSERT_NO_ERRNO_AND_VALUE(WaitAny(0)));
- EXPECT_THAT(pids, UnorderedElementsAre(process, thread));
-}
-
-// Return immediately if no child has exited.
-TEST_P(WaitAnyChildTest, WaitWNOHANG) {
- EXPECT_THAT(WaitAnyWithOptions(0, WNOHANG),
- PosixErrorIs(ECHILD, ::testing::_));
-}
-
-// Bad options passed
-TEST_P(WaitAnyChildTest, BadOption) {
- EXPECT_THAT(WaitAnyWithOptions(0, 123456),
- PosixErrorIs(EINVAL, ::testing::_));
-}
-
-TEST_P(WaitAnyChildTest, WaitedChildRusage) {
- struct rusage before;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &before), SyscallSucceeds());
-
- pid_t child;
- constexpr absl::Duration kSpin = absl::Seconds(3);
- ASSERT_THAT(child = ForkSpinAndExit(0, absl::ToInt64Seconds(kSpin)),
- SyscallSucceeds());
- ASSERT_THAT(WaitAny(0), IsPosixErrorOkAndHolds(child));
-
- struct rusage after;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &after), SyscallSucceeds());
-
- EXPECT_GE(RusageCpuTime(after) - RusageCpuTime(before), kSpin);
-}
-
-TEST_P(WaitAnyChildTest, IgnoredChildRusage) {
- // "POSIX.1-2001 specifies that if the disposition of SIGCHLD is
- // set to SIG_IGN or the SA_NOCLDWAIT flag is set for SIGCHLD (see
- // sigaction(2)), then children that terminate do not become zombies and a
- // call to wait() or waitpid() will block until all children have terminated,
- // and then fail with errno set to ECHILD." - waitpid(2)
- //
- // "RUSAGE_CHILDREN: Return resource usage statistics for all children of the
- // calling process that have terminated *and been waited for*." -
- // getrusage(2), emphasis added
-
- struct sigaction sa;
- sa.sa_handler = SIG_IGN;
- const auto cleanup_sigact =
- ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGCHLD, sa));
-
- struct rusage before;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &before), SyscallSucceeds());
-
- const absl::Duration start =
- absl::Nanoseconds(clock_gettime_nsecs(CLOCK_MONOTONIC));
-
- constexpr absl::Duration kSpin = absl::Seconds(3);
-
- // ForkAndSpin uses CLOCK_THREAD_CPUTIME_ID, which is lower resolution than,
- // and may diverge from, CLOCK_MONOTONIC, so we allow a small grace period but
- // still check that we blocked for a while.
- constexpr absl::Duration kSpinGrace = absl::Milliseconds(100);
-
- pid_t child;
- ASSERT_THAT(child = ForkSpinAndExit(0, absl::ToInt64Seconds(kSpin)),
- SyscallSucceeds());
- ASSERT_THAT(WaitAny(0), PosixErrorIs(ECHILD, ::testing::_));
- const absl::Duration end =
- absl::Nanoseconds(clock_gettime_nsecs(CLOCK_MONOTONIC));
- EXPECT_GE(end - start, kSpin - kSpinGrace);
-
- struct rusage after;
- ASSERT_THAT(getrusage(RUSAGE_CHILDREN, &after), SyscallSucceeds());
- EXPECT_EQ(before.ru_utime.tv_sec, after.ru_utime.tv_sec);
- EXPECT_EQ(before.ru_utime.tv_usec, after.ru_utime.tv_usec);
- EXPECT_EQ(before.ru_stime.tv_sec, after.ru_stime.tv_sec);
- EXPECT_EQ(before.ru_stime.tv_usec, after.ru_stime.tv_usec);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Waiters, WaitAnyChildTest,
- ::testing::Values(
- [](int code, int options) -> PosixErrorOr<pid_t> {
- int status;
- auto const pid = Wait4(-1, &status, options, nullptr);
- MaybeSave();
- if (pid < 0) {
- return PosixError(errno, "wait4");
- }
- if (!WIFEXITED(status) || WEXITSTATUS(status) != code) {
- return PosixError(
- EINVAL, absl::StrCat("unexpected wait status: got ", status,
- ", wanted ", code));
- }
- return static_cast<pid_t>(pid);
- },
- [](int code, int options) -> PosixErrorOr<pid_t> {
- siginfo_t si;
- auto const rv = Waitid(P_ALL, 0, &si, WEXITED | options);
- MaybeSave();
- if (rv < 0) {
- return PosixError(errno, "waitid");
- }
- if (si.si_signo != SIGCHLD) {
- return PosixError(
- EINVAL, absl::StrCat("unexpected signo: got ", si.si_signo,
- ", wanted ", SIGCHLD));
- }
- if (si.si_status != code) {
- return PosixError(
- EINVAL, absl::StrCat("unexpected status: got ", si.si_status,
- ", wanted ", code));
- }
- if (si.si_code != CLD_EXITED) {
- return PosixError(EINVAL,
- absl::StrCat("unexpected code: got ", si.si_code,
- ", wanted ", CLD_EXITED));
- }
- auto const uid = getuid();
- if (si.si_uid != uid) {
- return PosixError(EINVAL,
- absl::StrCat("unexpected uid: got ", si.si_uid,
- ", wanted ", uid));
- }
- return static_cast<pid_t>(si.si_pid);
- }));
-
-// Fixture for tests parameterized by a (sysno, function) tuple. The function
-// takes the PID of a specific child to wait for, waits for it to exit, and
-// checks that it exits with the given code.
-class WaitSpecificChildTest
- : public ::testing::TestWithParam<
- std::tuple<int, std::function<PosixError(pid_t, int, int)>>> {
- protected:
- int Sysno() { return std::get<0>(GetParam()); }
-
- PosixError WaitForWithOptions(pid_t pid, int options, int code) {
- return std::get<1>(GetParam())(pid, options, code);
- }
-
- PosixError WaitFor(pid_t pid, int code) {
- return std::get<1>(GetParam())(pid, 0, code);
- }
-};
-
-// Wait for specific child to exit.
-TEST_P(WaitSpecificChildTest, Fork) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// Non-zero exit codes are correctly propagated.
-TEST_P(WaitSpecificChildTest, NormalExit) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(42, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child, 42));
-}
-
-// Wait for multiple children to exit.
-TEST_P(WaitSpecificChildTest, MultipleFork) {
- pid_t child1, child2;
- ASSERT_THAT(child1 = ForkAndExit(0, 0), SyscallSucceeds());
- ASSERT_THAT(child2 = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child1, 0));
- EXPECT_NO_ERRNO(WaitFor(child2, 0));
-}
-
-// Wait for multiple children to exit, out of the order they were created.
-TEST_P(WaitSpecificChildTest, MultipleForkOutOfOrder) {
- pid_t child1, child2;
- ASSERT_THAT(child1 = ForkAndExit(0, 0), SyscallSucceeds());
- ASSERT_THAT(child2 = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child2, 0));
- EXPECT_NO_ERRNO(WaitFor(child1, 0));
-}
-
-// Wait for specific child to exit, entering wait4 before the exit occurs.
-TEST_P(WaitSpecificChildTest, ForkSleep) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 5), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// Wait should block until the child exits.
-TEST_P(WaitSpecificChildTest, ForkBlock) {
- pid_t child;
-
- auto start = absl::Now();
- ASSERT_THAT(child = ForkAndExit(0, 5), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-
- EXPECT_GE(absl::Now() - start, absl::Seconds(5));
-}
-
-// Waiting after the child has already exited returns immediately.
-TEST_P(WaitSpecificChildTest, AfterExit) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- absl::SleepFor(absl::Seconds(5));
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// Wait for child of sibling thread.
-TEST_P(WaitSpecificChildTest, SiblingChildren) {
- absl::Mutex mu;
- pid_t child;
- bool ready = false;
- bool stop = false;
-
- ScopedThread t([&] {
- absl::MutexLock ml(&mu);
- EXPECT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
- ready = true;
- mu.Await(absl::Condition(&stop));
- });
-
- // N.B. This must be declared after ScopedThread, so it is destructed first,
- // thus waking the thread.
- absl::MutexLock ml(&mu);
- mu.Await(absl::Condition(&ready));
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-
- // Keep the sibling alive until after we've waited so the child isn't
- // reparented.
- stop = true;
-}
-
-// Waiting for child of sibling thread not allowed with WNOTHREAD.
-TEST_P(WaitSpecificChildTest, SiblingChildrenWNOTHREAD) {
- // Linux added WNOTHREAD support to waitid(2) in
- // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba ("wait: allow sys_waitid() to
- // accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7.
- //
- // Skip the test if it isn't supported yet.
- if (Sysno() == SYS_waitid) {
- int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WNOTHREAD);
- SKIP_IF(ret < 0 && errno == EINVAL);
- }
-
- absl::Mutex mu;
- pid_t child;
- bool ready = false;
- bool stop = false;
-
- ScopedThread t([&] {
- absl::MutexLock ml(&mu);
- EXPECT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
- ready = true;
- mu.Await(absl::Condition(&stop));
-
- // This thread can wait on child.
- EXPECT_NO_ERRNO(WaitForWithOptions(child, __WNOTHREAD, 0));
- });
-
- // N.B. This must be declared after ScopedThread, so it is destructed first,
- // thus waking the thread.
- absl::MutexLock ml(&mu);
- mu.Await(absl::Condition(&ready));
-
- // This thread can't wait on child.
- EXPECT_THAT(WaitForWithOptions(child, __WNOTHREAD, 0),
- PosixErrorIs(ECHILD, ::testing::_));
-
- // Keep the sibling alive until after we've waited so the child isn't
- // reparented.
- stop = true;
-}
-
-// Wait for specific child to exit.
-// A non-CLONE_THREAD child which sends SIGCHLD upon exit behaves much like
-// a forked process.
-TEST_P(WaitSpecificChildTest, CloneSIGCHLD) {
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- ASSERT_THAT(child = CloneAndExit(0, stack, SIGCHLD), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// Wait for specific child to exit.
-// A non-CLONE_THREAD child which does not send SIGCHLD upon exit can be waited
-// on, but returns ECHILD.
-TEST_P(WaitSpecificChildTest, CloneNoSIGCHLD) {
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());
-
- EXPECT_THAT(WaitFor(child, 0), PosixErrorIs(ECHILD, ::testing::_));
-}
-
-// Waiting after the child has already exited returns immediately.
-TEST_P(WaitSpecificChildTest, CloneAfterExit) {
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- // Send SIGCHLD for normal wait semantics.
- ASSERT_THAT(child = CloneAndExit(0, stack, SIGCHLD), SyscallSucceeds());
-
- absl::SleepFor(absl::Seconds(5));
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// A CLONE_THREAD child cannot be waited on.
-TEST_P(WaitSpecificChildTest, CloneThread) {
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- ASSERT_THAT(child = CloneAndExit(15, stack, CLONE_THREAD), SyscallSucceeds());
- auto start = absl::Now();
-
- EXPECT_THAT(WaitFor(child, 0), PosixErrorIs(ECHILD, ::testing::_));
-
- // Ensure wait4 didn't block.
- EXPECT_LE(absl::Now() - start, absl::Seconds(10));
-
- // Since we can't wait on the child, we sleep to try to avoid freeing its
- // stack before it exits.
- absl::SleepFor(absl::Seconds(5));
-}
-
-// A child that does not send a SIGCHLD on exit may be waited on with
-// the __WCLONE flag.
-TEST_P(WaitSpecificChildTest, CloneWCLONE) {
- // Linux added WCLONE support to waitid(2) in
- // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba ("wait: allow sys_waitid() to
- // accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7.
- //
- // Skip the test if it isn't supported yet.
- if (Sysno() == SYS_waitid) {
- int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WCLONE);
- SKIP_IF(ret < 0 && errno == EINVAL);
- }
-
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- int child;
- ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitForWithOptions(child, __WCLONE, 0));
-}
-
-// A forked child cannot be waited on with WCLONE.
-TEST_P(WaitSpecificChildTest, ForkWCLONE) {
- // Linux added WCLONE support to waitid(2) in
- // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba ("wait: allow sys_waitid() to
- // accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7.
- //
- // Skip the test if it isn't supported yet.
- if (Sysno() == SYS_waitid) {
- int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WCLONE);
- SKIP_IF(ret < 0 && errno == EINVAL);
- }
-
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_THAT(WaitForWithOptions(child, WNOHANG | __WCLONE, 0),
- PosixErrorIs(ECHILD, ::testing::_));
-
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-// Any type of child can be waited on with WALL.
-TEST_P(WaitSpecificChildTest, WALL) {
- // Linux added WALL support to waitid(2) in
- // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba ("wait: allow sys_waitid() to
- // accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7.
- //
- // Skip the test if it isn't supported yet.
- if (Sysno() == SYS_waitid) {
- int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WALL);
- SKIP_IF(ret < 0 && errno == EINVAL);
- }
-
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitForWithOptions(child, __WALL, 0));
-
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());
-
- EXPECT_NO_ERRNO(WaitForWithOptions(child, __WALL, 0));
-}
-
-// Return ECHILD for bad child.
-TEST_P(WaitSpecificChildTest, BadChild) {
- EXPECT_THAT(WaitFor(42, 0), PosixErrorIs(ECHILD, ::testing::_));
-}
-
-// Wait for a child process that only exits after calling execve(2) from a
-// non-leader thread.
-TEST_P(WaitSpecificChildTest, AfterChildExecve) {
- ExecveArray const owned_child_argv = {"/bin/true"};
- char* const* const child_argv = owned_child_argv.get();
-
- uintptr_t stack;
- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());
- auto free =
- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });
-
- pid_t const child = fork();
- if (child == 0) {
- // Give the parent some time to start waiting.
- SleepSafe(absl::Seconds(5));
- // Pass CLONE_VFORK to block the original thread in the child process until
- // the clone thread calls execve, annihilating them both. (This means that
- // if clone returns at all, something went wrong.)
- //
- // N.B. clone(2) is not officially async-signal-safe, but at minimum glibc's
- // x86_64 implementation is safe. See glibc
- // sysdeps/unix/sysv/linux/x86_64/clone.S.
- clone(
- +[](void* arg) {
- auto child_argv = static_cast<char* const*>(arg);
- execve(child_argv[0], child_argv, /* envp = */ nullptr);
- return errno;
- },
- reinterpret_cast<void*>(stack),
- CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_THREAD | CLONE_VM |
- CLONE_VFORK,
- const_cast<char**>(child_argv));
- _exit(errno);
- }
- ASSERT_THAT(child, SyscallSucceeds());
- EXPECT_NO_ERRNO(WaitFor(child, 0));
-}
-
-PosixError CheckWait4(pid_t pid, int options, int code) {
- int status;
- auto const rv = Wait4(pid, &status, options, nullptr);
- MaybeSave();
- if (rv < 0) {
- return PosixError(errno, "wait4");
- } else if (rv != pid) {
- return PosixError(
- EINVAL, absl::StrCat("unexpected pid: got ", rv, ", wanted ", pid));
- }
- if (!WIFEXITED(status) || WEXITSTATUS(status) != code) {
- return PosixError(EINVAL, absl::StrCat("unexpected wait status: got ",
- status, ", wanted ", code));
- }
- return NoError();
-};
-
-PosixError CheckWaitid(pid_t pid, int options, int code) {
- siginfo_t si;
- auto const rv = Waitid(P_PID, pid, &si, options | WEXITED);
- MaybeSave();
- if (rv < 0) {
- return PosixError(errno, "waitid");
- }
- if (si.si_pid != pid) {
- return PosixError(EINVAL, absl::StrCat("unexpected pid: got ", si.si_pid,
- ", wanted ", pid));
- }
- if (si.si_signo != SIGCHLD) {
- return PosixError(EINVAL, absl::StrCat("unexpected signo: got ",
- si.si_signo, ", wanted ", SIGCHLD));
- }
- if (si.si_status != code) {
- return PosixError(EINVAL, absl::StrCat("unexpected status: got ",
- si.si_status, ", wanted ", code));
- }
- if (si.si_code != CLD_EXITED) {
- return PosixError(EINVAL, absl::StrCat("unexpected code: got ", si.si_code,
- ", wanted ", CLD_EXITED));
- }
- return NoError();
-}
-
-INSTANTIATE_TEST_SUITE_P(
- Waiters, WaitSpecificChildTest,
- ::testing::Values(std::make_tuple(SYS_wait4, CheckWait4),
- std::make_tuple(SYS_waitid, CheckWaitid)));
-
-// WIFEXITED, WIFSIGNALED, WTERMSIG indicate signal exit.
-TEST(WaitTest, SignalExit) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 10), SyscallSucceeds());
-
- EXPECT_THAT(kill(child, SIGKILL), SyscallSucceeds());
-
- int status;
- EXPECT_THAT(Wait4(child, &status, 0, nullptr),
- SyscallSucceedsWithValue(child));
-
- EXPECT_FALSE(WIFEXITED(status));
- EXPECT_TRUE(WIFSIGNALED(status));
- EXPECT_EQ(SIGKILL, WTERMSIG(status));
-}
-
-// waitid requires at least one option.
-TEST(WaitTest, WaitidOptions) {
- EXPECT_THAT(Waitid(P_ALL, 0, nullptr, 0), SyscallFailsWithErrno(EINVAL));
-}
-
-// waitid does not wait for a child to exit if not passed WEXITED.
-TEST(WaitTest, WaitidNoWEXITED) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());
- EXPECT_THAT(Waitid(P_ALL, 0, nullptr, WSTOPPED),
- SyscallFailsWithErrno(ECHILD));
- EXPECT_THAT(Waitid(P_ALL, 0, nullptr, WEXITED), SyscallSucceeds());
-}
-
-// WNOWAIT allows the same wait result to be returned again.
-TEST(WaitTest, WaitidWNOWAIT) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(42, 0), SyscallSucceeds());
-
- siginfo_t info;
- ASSERT_THAT(Waitid(P_PID, child, &info, WEXITED | WNOWAIT),
- SyscallSucceeds());
- EXPECT_EQ(child, info.si_pid);
- EXPECT_EQ(SIGCHLD, info.si_signo);
- EXPECT_EQ(CLD_EXITED, info.si_code);
- EXPECT_EQ(42, info.si_status);
-
- ASSERT_THAT(Waitid(P_PID, child, &info, WEXITED), SyscallSucceeds());
- EXPECT_EQ(child, info.si_pid);
- EXPECT_EQ(SIGCHLD, info.si_signo);
- EXPECT_EQ(CLD_EXITED, info.si_code);
- EXPECT_EQ(42, info.si_status);
-
- EXPECT_THAT(Waitid(P_PID, child, &info, WEXITED),
- SyscallFailsWithErrno(ECHILD));
-}
-
-// waitpid(pid, status, options) is equivalent to
-// wait4(pid, status, options, nullptr).
-// This is a dedicated syscall on i386, glibc maps it to wait4 on amd64.
-TEST(WaitTest, WaitPid) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(42, 0), SyscallSucceeds());
-
- int status;
- EXPECT_THAT(RetryEINTR(waitpid)(child, &status, 0),
- SyscallSucceedsWithValue(child));
-
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(42, WEXITSTATUS(status));
-}
-
-// Test that signaling a zombie succeeds. This is a signals test that is in this
-// file for some reason.
-TEST(WaitTest, KillZombie) {
- pid_t child;
- ASSERT_THAT(child = ForkAndExit(42, 0), SyscallSucceeds());
-
- // Sleep for three seconds to ensure the child has exited.
- absl::SleepFor(absl::Seconds(3));
-
- // The child is now a zombie. Check that killing it returns 0.
- EXPECT_THAT(kill(child, SIGTERM), SyscallSucceeds());
- EXPECT_THAT(kill(child, 0), SyscallSucceeds());
-
- EXPECT_THAT(Wait4(child, nullptr, 0, nullptr),
- SyscallSucceedsWithValue(child));
-}
-
-TEST(WaitTest, Wait4Rusage) {
- pid_t child;
- constexpr absl::Duration kSpin = absl::Seconds(3);
- ASSERT_THAT(child = ForkSpinAndExit(21, absl::ToInt64Seconds(kSpin)),
- SyscallSucceeds());
-
- int status;
- struct rusage rusage = {};
- ASSERT_THAT(Wait4(child, &status, 0, &rusage),
- SyscallSucceedsWithValue(child));
-
- EXPECT_TRUE(WIFEXITED(status));
- EXPECT_EQ(21, WEXITSTATUS(status));
-
- EXPECT_GE(RusageCpuTime(rusage), kSpin);
-}
-
-TEST(WaitTest, WaitidRusage) {
- pid_t child;
- constexpr absl::Duration kSpin = absl::Seconds(3);
- ASSERT_THAT(child = ForkSpinAndExit(27, absl::ToInt64Seconds(kSpin)),
- SyscallSucceeds());
-
- siginfo_t si = {};
- struct rusage rusage = {};
-
- // From waitid(2):
- // The raw waitid() system call takes a fifth argument, of type
- // struct rusage *. If this argument is non-NULL, then it is used
- // to return resource usage information about the child, in the
- // same manner as wait4(2).
- EXPECT_THAT(
- RetryEINTR(syscall)(SYS_waitid, P_PID, child, &si, WEXITED, &rusage),
- SyscallSucceeds());
- EXPECT_EQ(si.si_signo, SIGCHLD);
- EXPECT_EQ(si.si_code, CLD_EXITED);
- EXPECT_EQ(si.si_status, 27);
- EXPECT_EQ(si.si_pid, child);
-
- EXPECT_GE(RusageCpuTime(rusage), kSpin);
-}
-
-// After bf959931ddb88c4e4366e96dd22e68fa0db9527c ("wait/ptrace: assume __WALL
-// if the child is traced") (Linux 4.7), tracees are always eligible for
-// waiting, regardless of type.
-TEST(WaitTest, TraceeWALL) {
- int fds[2];
- ASSERT_THAT(pipe(fds), SyscallSucceeds());
- FileDescriptor rfd(fds[0]);
- FileDescriptor wfd(fds[1]);
-
- pid_t child = fork();
- if (child == 0) {
- // Child.
- rfd.reset();
-
- TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == 0);
-
- // Notify parent that we're now a tracee.
- wfd.reset();
-
- _exit(0);
- }
- ASSERT_THAT(child, SyscallSucceeds());
-
- wfd.reset();
-
- // Wait for child to become tracee.
- char c;
- EXPECT_THAT(ReadFd(rfd.get(), &c, sizeof(c)), SyscallSucceedsWithValue(0));
-
- // We can wait on the fork child with WCLONE, as it is a tracee.
- int status;
- if (IsRunningOnGvisor()) {
- ASSERT_THAT(Wait4(child, &status, __WCLONE, nullptr),
- SyscallSucceedsWithValue(child));
-
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
- } else {
- // On older versions of Linux, we may get ECHILD.
- ASSERT_THAT(Wait4(child, &status, __WCLONE, nullptr),
- ::testing::AnyOf(SyscallSucceedsWithValue(child),
- SyscallFailsWithErrno(ECHILD)));
- }
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/write.cc b/test/syscalls/linux/write.cc
deleted file mode 100644
index 3373ba72b..000000000
--- a/test/syscalls/linux/write.cc
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/base/macros.h"
-#include "test/util/cleanup.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// TODO(gvisor.dev/issue/2370): This test is currently very rudimentary.
-class WriteTest : public ::testing::Test {
- public:
- ssize_t WriteBytes(int fd, int bytes) {
- std::vector<char> buf(bytes);
- std::fill(buf.begin(), buf.end(), 'a');
- return WriteFd(fd, buf.data(), buf.size());
- }
-};
-
-TEST_F(WriteTest, WriteNoExceedsRLimit) {
- // Get the current rlimit and restore after test run.
- struct rlimit initial_lim;
- ASSERT_THAT(getrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- auto cleanup = Cleanup([&initial_lim] {
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- });
-
- int fd;
- struct rlimit setlim;
- const int target_lim = 1024;
- setlim.rlim_cur = target_lim;
- setlim.rlim_max = RLIM_INFINITY;
- const std::string pathname = NewTempAbsPath();
- ASSERT_THAT(fd = open(pathname.c_str(), O_WRONLY | O_CREAT, S_IRWXU),
- SyscallSucceeds());
- ASSERT_THAT(setrlimit(RLIMIT_FSIZE, &setlim), SyscallSucceeds());
-
- EXPECT_THAT(WriteBytes(fd, target_lim), SyscallSucceedsWithValue(target_lim));
-
- std::vector<char> buf(target_lim + 1);
- std::fill(buf.begin(), buf.end(), 'a');
- EXPECT_THAT(pwrite(fd, buf.data(), target_lim, 1), SyscallSucceeds());
- EXPECT_THAT(pwrite64(fd, buf.data(), target_lim, 1), SyscallSucceeds());
-
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(WriteTest, WriteExceedsRLimit) {
- // Get the current rlimit and restore after test run.
- struct rlimit initial_lim;
- ASSERT_THAT(getrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- auto cleanup = Cleanup([&initial_lim] {
- EXPECT_THAT(setrlimit(RLIMIT_FSIZE, &initial_lim), SyscallSucceeds());
- });
-
- int fd;
- sigset_t filesize_mask;
- sigemptyset(&filesize_mask);
- sigaddset(&filesize_mask, SIGXFSZ);
-
- struct rlimit setlim;
- const int target_lim = 1024;
- setlim.rlim_cur = target_lim;
- setlim.rlim_max = RLIM_INFINITY;
-
- const std::string pathname = NewTempAbsPath();
- ASSERT_THAT(fd = open(pathname.c_str(), O_WRONLY | O_CREAT, S_IRWXU),
- SyscallSucceeds());
- ASSERT_THAT(setrlimit(RLIMIT_FSIZE, &setlim), SyscallSucceeds());
- ASSERT_THAT(sigprocmask(SIG_BLOCK, &filesize_mask, nullptr),
- SyscallSucceeds());
- std::vector<char> buf(target_lim + 2);
- std::fill(buf.begin(), buf.end(), 'a');
-
- EXPECT_THAT(write(fd, buf.data(), target_lim + 1),
- SyscallSucceedsWithValue(target_lim));
- EXPECT_THAT(write(fd, buf.data(), 1), SyscallFailsWithErrno(EFBIG));
- siginfo_t info;
- struct timespec timelimit = {0, 0};
- ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),
- SyscallSucceedsWithValue(SIGXFSZ));
- EXPECT_EQ(info.si_code, SI_USER);
- EXPECT_EQ(info.si_pid, getpid());
- EXPECT_EQ(info.si_uid, getuid());
-
- EXPECT_THAT(pwrite(fd, buf.data(), target_lim + 1, 1),
- SyscallSucceedsWithValue(target_lim - 1));
- EXPECT_THAT(pwrite(fd, buf.data(), 1, target_lim),
- SyscallFailsWithErrno(EFBIG));
- ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),
- SyscallSucceedsWithValue(SIGXFSZ));
- EXPECT_EQ(info.si_code, SI_USER);
- EXPECT_EQ(info.si_pid, getpid());
- EXPECT_EQ(info.si_uid, getuid());
-
- EXPECT_THAT(pwrite64(fd, buf.data(), target_lim + 1, 1),
- SyscallSucceedsWithValue(target_lim - 1));
- EXPECT_THAT(pwrite64(fd, buf.data(), 1, target_lim),
- SyscallFailsWithErrno(EFBIG));
- ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),
- SyscallSucceedsWithValue(SIGXFSZ));
- EXPECT_EQ(info.si_code, SI_USER);
- EXPECT_EQ(info.si_pid, getpid());
- EXPECT_EQ(info.si_uid, getuid());
-
- ASSERT_THAT(sigprocmask(SIG_UNBLOCK, &filesize_mask, nullptr),
- SyscallSucceeds());
- EXPECT_THAT(close(fd), SyscallSucceeds());
-}
-
-TEST_F(WriteTest, WriteIncrementOffset) {
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_WRONLY));
- int fd = f.get();
-
- EXPECT_THAT(WriteBytes(fd, 0), SyscallSucceedsWithValue(0));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- const int bytes_total = 1024;
-
- EXPECT_THAT(WriteBytes(fd, bytes_total),
- SyscallSucceedsWithValue(bytes_total));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(bytes_total));
-}
-
-TEST_F(WriteTest, WriteIncrementOffsetSeek) {
- const std::string data = "hello world\n";
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), data, TempPath::kDefaultFileMode));
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_WRONLY));
- int fd = f.get();
-
- const int seek_offset = data.size() / 2;
- ASSERT_THAT(lseek(fd, seek_offset, SEEK_SET),
- SyscallSucceedsWithValue(seek_offset));
-
- const int write_bytes = 512;
- EXPECT_THAT(WriteBytes(fd, write_bytes),
- SyscallSucceedsWithValue(write_bytes));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR),
- SyscallSucceedsWithValue(seek_offset + write_bytes));
-}
-
-TEST_F(WriteTest, WriteIncrementOffsetAppend) {
- const std::string data = "hello world\n";
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), data, TempPath::kDefaultFileMode));
- FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(
- Open(tmpfile.path().c_str(), O_WRONLY | O_APPEND));
- int fd = f.get();
-
- EXPECT_THAT(WriteBytes(fd, 1024), SyscallSucceedsWithValue(1024));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR),
- SyscallSucceedsWithValue(data.size() + 1024));
-}
-
-TEST_F(WriteTest, WriteIncrementOffsetEOF) {
- const std::string data = "hello world\n";
- const TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
- GetAbsoluteTestTmpdir(), data, TempPath::kDefaultFileMode));
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_WRONLY));
- int fd = f.get();
-
- EXPECT_THAT(lseek(fd, 0, SEEK_END), SyscallSucceedsWithValue(data.size()));
-
- EXPECT_THAT(WriteBytes(fd, 1024), SyscallSucceedsWithValue(1024));
- EXPECT_THAT(lseek(fd, 0, SEEK_END),
- SyscallSucceedsWithValue(data.size() + 1024));
-}
-
-TEST_F(WriteTest, PwriteNoChangeOffset) {
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_WRONLY));
- int fd = f.get();
-
- const std::string data = "hello world\n";
-
- EXPECT_THAT(pwrite(fd, data.data(), data.size(), 0),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(0));
-
- const int bytes_total = 1024;
- ASSERT_THAT(WriteBytes(fd, bytes_total),
- SyscallSucceedsWithValue(bytes_total));
- ASSERT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(bytes_total));
-
- EXPECT_THAT(pwrite(fd, data.data(), data.size(), bytes_total),
- SyscallSucceedsWithValue(data.size()));
- EXPECT_THAT(lseek(fd, 0, SEEK_CUR), SyscallSucceedsWithValue(bytes_total));
-}
-
-TEST_F(WriteTest, WriteWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_PATH));
- int fd = f.get();
-
- EXPECT_THAT(WriteBytes(fd, 1024), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(WriteTest, WritevWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_PATH));
- int fd = f.get();
-
- char buf[16];
- struct iovec iov;
- iov.iov_base = buf;
- iov.iov_len = sizeof(buf);
-
- EXPECT_THAT(writev(fd, &iov, /*__count=*/1), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(WriteTest, PwriteWithOpath) {
- SKIP_IF(IsRunningWithVFS1());
- TempPath tmpfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor f =
- ASSERT_NO_ERRNO_AND_VALUE(Open(tmpfile.path().c_str(), O_PATH));
- int fd = f.get();
-
- const std::string data = "hello world\n";
-
- EXPECT_THAT(pwrite(fd, data.data(), data.size(), 0),
- SyscallFailsWithErrno(EBADF));
-}
-
-// Test that partial writes that hit SIGSEGV are correctly handled and return
-// partial write.
-TEST_F(WriteTest, PartialWriteSIGSEGV) {
- // Allocate 2 pages and remove permission from the second.
- const size_t size = 2 * kPageSize;
- void* addr = mmap(0, size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
- ASSERT_NE(addr, MAP_FAILED);
- auto cleanup = Cleanup(
- [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
-
- void* badAddr = reinterpret_cast<char*>(addr) + kPageSize;
- ASSERT_THAT(mprotect(badAddr, kPageSize, PROT_NONE), SyscallSucceeds());
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_WRONLY));
-
- // Attempt to write both pages to the file. Create a non-contiguous iovec pair
- // to ensure operation is done in 2 steps.
- struct iovec iov[] = {
- {
- .iov_base = addr,
- .iov_len = kPageSize,
- },
- {
- .iov_base = addr,
- .iov_len = size,
- },
- };
- // Write should succeed for the first iovec and half of the second (=2 pages).
- EXPECT_THAT(pwritev(fd.get(), iov, ABSL_ARRAYSIZE(iov), 0),
- SyscallSucceedsWithValue(2 * kPageSize));
-}
-
-// Test that partial writes that hit SIGBUS are correctly handled and return
-// partial write.
-TEST_F(WriteTest, PartialWriteSIGBUS) {
- SKIP_IF(getenv("GVISOR_GOFER_UNCACHED")); // Can't mmap from uncached files.
-
- TempPath mapfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd_map =
- ASSERT_NO_ERRNO_AND_VALUE(Open(mapfile.path().c_str(), O_RDWR));
-
- // Let the first page be read to force a partial write.
- ASSERT_THAT(ftruncate(fd_map.get(), kPageSize), SyscallSucceeds());
-
- // Map 2 pages, one of which is not allocated in the backing file. Reading
- // from it will trigger a SIGBUS.
- const size_t size = 2 * kPageSize;
- void* addr =
- mmap(NULL, size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd_map.get(), 0);
- ASSERT_NE(addr, MAP_FAILED);
- auto cleanup = Cleanup(
- [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
-
- TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
- FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_WRONLY));
-
- // Attempt to write both pages to the file. Create a non-contiguous iovec pair
- // to ensure operation is done in 2 steps.
- struct iovec iov[] = {
- {
- .iov_base = addr,
- .iov_len = kPageSize,
- },
- {
- .iov_base = addr,
- .iov_len = size,
- },
- };
- // Write should succeed for the first iovec and half of the second (=2 pages).
- ASSERT_THAT(pwritev(fd.get(), iov, ABSL_ARRAYSIZE(iov), 0),
- SyscallSucceedsWithValue(2 * kPageSize));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/syscalls/linux/xattr.cc b/test/syscalls/linux/xattr.cc
deleted file mode 100644
index c8a97df6b..000000000
--- a/test/syscalls/linux/xattr.cc
+++ /dev/null
@@ -1,709 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <sys/types.h>
-#include <sys/xattr.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/container/flat_hash_set.h"
-#include "test/syscalls/linux/file_base.h"
-#include "test/util/capability_util.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-using ::gvisor::testing::IsTmpfs;
-
-class XattrTest : public FileTest {};
-
-TEST_F(XattrTest, XattrNonexistentFile) {
- const char* path = "/does/not/exist";
- const char* name = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(ENOENT));
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENOENT));
- EXPECT_THAT(listxattr(path, nullptr, 0), SyscallFailsWithErrno(ENOENT));
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(ENOENT));
-}
-
-TEST_F(XattrTest, XattrNullName) {
- const char* path = test_file_name_.c_str();
-
- EXPECT_THAT(setxattr(path, nullptr, nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(EFAULT));
- EXPECT_THAT(getxattr(path, nullptr, nullptr, 0),
- SyscallFailsWithErrno(EFAULT));
- EXPECT_THAT(removexattr(path, nullptr), SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(XattrTest, XattrEmptyName) {
- const char* path = test_file_name_.c_str();
-
- EXPECT_THAT(setxattr(path, "", nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(ERANGE));
- EXPECT_THAT(getxattr(path, "", nullptr, 0), SyscallFailsWithErrno(ERANGE));
- EXPECT_THAT(removexattr(path, ""), SyscallFailsWithErrno(ERANGE));
-}
-
-TEST_F(XattrTest, XattrLargeName) {
- const char* path = test_file_name_.c_str();
- std::string name = "user.";
- name += std::string(XATTR_NAME_MAX - name.length(), 'a');
-
- if (!IsRunningOnGvisor()) {
- // In gVisor, access to xattrs is controlled with an explicit list of
- // allowed names. This name isn't going to be configured to allow access, so
- // don't test it.
- EXPECT_THAT(setxattr(path, name.c_str(), nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(getxattr(path, name.c_str(), nullptr, 0),
- SyscallSucceedsWithValue(0));
- }
-
- name += "a";
- EXPECT_THAT(setxattr(path, name.c_str(), nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(ERANGE));
- EXPECT_THAT(getxattr(path, name.c_str(), nullptr, 0),
- SyscallFailsWithErrno(ERANGE));
- EXPECT_THAT(removexattr(path, name.c_str()), SyscallFailsWithErrno(ERANGE));
-}
-
-TEST_F(XattrTest, XattrInvalidPrefix) {
- const char* path = test_file_name_.c_str();
- std::string name(XATTR_NAME_MAX, 'a');
- EXPECT_THAT(setxattr(path, name.c_str(), nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(EOPNOTSUPP));
- EXPECT_THAT(getxattr(path, name.c_str(), nullptr, 0),
- SyscallFailsWithErrno(EOPNOTSUPP));
- EXPECT_THAT(removexattr(path, name.c_str()),
- SyscallFailsWithErrno(EOPNOTSUPP));
-}
-
-// Do not allow save/restore cycles after making the test file read-only, as
-// the restore will fail to open it with r/w permissions.
-TEST_F(XattrTest, XattrReadOnly) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- size_t size = sizeof(val);
-
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- DisableSave ds;
- ASSERT_NO_ERRNO(testing::Chmod(test_file_name_, S_IRUSR));
-
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0),
- SyscallFailsWithErrno(EACCES));
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(EACCES));
-
- char buf = '-';
- EXPECT_THAT(getxattr(path, name, &buf, size), SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, val);
-
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(path, list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-}
-
-// Do not allow save/restore cycles after making the test file write-only, as
-// the restore will fail to open it with r/w permissions.
-TEST_F(XattrTest, XattrWriteOnly) {
- // Drop capabilities that allow us to override file and directory permissions.
- AutoCapability cap1(CAP_DAC_OVERRIDE, false);
- AutoCapability cap2(CAP_DAC_READ_SEARCH, false);
-
- DisableSave ds;
- ASSERT_NO_ERRNO(testing::Chmod(test_file_name_, S_IWUSR));
-
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- size_t size = sizeof(val);
-
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(EACCES));
-
- // listxattr will succeed even without read permissions.
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(path, list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- EXPECT_THAT(removexattr(path, name), SyscallSucceeds());
-}
-
-TEST_F(XattrTest, XattrTrustedWithNonadmin) {
- // TODO(b/148380782): Support setxattr and getxattr with "trusted" prefix.
- SKIP_IF(IsRunningOnGvisor());
- SKIP_IF(ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
-
- const char* path = test_file_name_.c_str();
- const char name[] = "trusted.abc";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, XattrOnDirectory) {
- TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(dir.path().c_str(), name, nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(getxattr(dir.path().c_str(), name, nullptr, 0),
- SyscallSucceedsWithValue(0));
-
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(dir.path().c_str(), list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- EXPECT_THAT(removexattr(dir.path().c_str(), name), SyscallSucceeds());
-}
-
-TEST_F(XattrTest, XattrOnSymlink) {
- TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(dir.path(), test_file_name_));
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(link.path().c_str(), name, nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(getxattr(link.path().c_str(), name, nullptr, 0),
- SyscallSucceedsWithValue(0));
-
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(link.path().c_str(), list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- EXPECT_THAT(removexattr(link.path().c_str(), name), SyscallSucceeds());
-}
-
-TEST_F(XattrTest, XattrOnInvalidFileTypes) {
- const char name[] = "user.test";
-
- char char_device[] = "/dev/zero";
- EXPECT_THAT(setxattr(char_device, name, nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(getxattr(char_device, name, nullptr, 0),
- SyscallFailsWithErrno(ENODATA));
- EXPECT_THAT(listxattr(char_device, nullptr, 0), SyscallSucceedsWithValue(0));
-
- // Use tmpfs, where creation of named pipes is supported.
- const std::string fifo = NewTempAbsPathInDir("/dev/shm");
- const char* path = fifo.c_str();
- EXPECT_THAT(mknod(path, S_IFIFO | S_IRUSR | S_IWUSR, 0), SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
- EXPECT_THAT(listxattr(path, nullptr, 0), SyscallSucceedsWithValue(0));
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(EPERM));
-}
-
-TEST_F(XattrTest, SetXattrSizeSmallerThanValue) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- std::vector<char> val = {'a', 'a'};
- size_t size = 1;
- EXPECT_THAT(setxattr(path, name, val.data(), size, /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> buf = {'-', '-'};
- std::vector<char> expected_buf = {'a', '-'};
- EXPECT_THAT(getxattr(path, name, buf.data(), buf.size()),
- SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, expected_buf);
-}
-
-TEST_F(XattrTest, SetXattrZeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- EXPECT_THAT(setxattr(path, name, &val, 0, /*flags=*/0), SyscallSucceeds());
-
- char buf = '-';
- EXPECT_THAT(getxattr(path, name, &buf, XATTR_SIZE_MAX),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(buf, '-');
-}
-
-TEST_F(XattrTest, SetXattrSizeTooLarge) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
-
- // Note that each particular fs implementation may stipulate a lower size
- // limit, in which case we actually may fail (e.g. error with ENOSPC) for
- // some sizes under XATTR_SIZE_MAX.
- size_t size = XATTR_SIZE_MAX + 1;
- std::vector<char> val(size);
- EXPECT_THAT(setxattr(path, name, val.data(), size, /*flags=*/0),
- SyscallFailsWithErrno(E2BIG));
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, SetXattrNullValueAndNonzeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 1, /*flags=*/0),
- SyscallFailsWithErrno(EFAULT));
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, SetXattrNullValueAndZeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(XattrTest, SetXattrValueTooLargeButOKSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- std::vector<char> val(XATTR_SIZE_MAX + 1);
- std::fill(val.begin(), val.end(), 'a');
- size_t size = 1;
- EXPECT_THAT(setxattr(path, name, val.data(), size, /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> buf = {'-', '-'};
- std::vector<char> expected_buf = {'a', '-'};
- EXPECT_THAT(getxattr(path, name, buf.data(), size),
- SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, expected_buf);
-}
-
-TEST_F(XattrTest, SetXattrReplaceWithSmaller) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- std::vector<char> val = {'a', 'a'};
- EXPECT_THAT(setxattr(path, name, val.data(), 2, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name, val.data(), 1, /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> buf = {'-', '-'};
- std::vector<char> expected_buf = {'a', '-'};
- EXPECT_THAT(getxattr(path, name, buf.data(), 2), SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, expected_buf);
-}
-
-TEST_F(XattrTest, SetXattrReplaceWithLarger) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- std::vector<char> val = {'a', 'a'};
- EXPECT_THAT(setxattr(path, name, val.data(), 1, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name, val.data(), 2, /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> buf = {'-', '-'};
- EXPECT_THAT(getxattr(path, name, buf.data(), 2), SyscallSucceedsWithValue(2));
- EXPECT_EQ(buf, val);
-}
-
-TEST_F(XattrTest, SetXattrCreateFlag) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, XATTR_CREATE),
- SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name, nullptr, 0, XATTR_CREATE),
- SyscallFailsWithErrno(EEXIST));
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(XattrTest, SetXattrReplaceFlag) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, XATTR_REPLACE),
- SyscallFailsWithErrno(ENODATA));
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name, nullptr, 0, XATTR_REPLACE),
- SyscallSucceeds());
-
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallSucceedsWithValue(0));
-}
-
-TEST_F(XattrTest, SetXattrInvalidFlags) {
- const char* path = test_file_name_.c_str();
- int invalid_flags = 0xff;
- EXPECT_THAT(setxattr(path, nullptr, nullptr, 0, invalid_flags),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST_F(XattrTest, GetXattr) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- int val = 1234;
- size_t size = sizeof(val);
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- int buf = 0;
- EXPECT_THAT(getxattr(path, name, &buf, size), SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, val);
-}
-
-TEST_F(XattrTest, GetXattrSizeSmallerThanValue) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- std::vector<char> val = {'a', 'a'};
- size_t size = val.size();
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- char buf = '-';
- EXPECT_THAT(getxattr(path, name, &buf, 1), SyscallFailsWithErrno(ERANGE));
- EXPECT_EQ(buf, '-');
-}
-
-TEST_F(XattrTest, GetXattrSizeLargerThanValue) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- EXPECT_THAT(setxattr(path, name, &val, 1, /*flags=*/0), SyscallSucceeds());
-
- std::vector<char> buf(XATTR_SIZE_MAX);
- std::fill(buf.begin(), buf.end(), '-');
- std::vector<char> expected_buf = buf;
- expected_buf[0] = 'a';
- EXPECT_THAT(getxattr(path, name, buf.data(), buf.size()),
- SyscallSucceedsWithValue(1));
- EXPECT_EQ(buf, expected_buf);
-}
-
-TEST_F(XattrTest, GetXattrZeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- EXPECT_THAT(setxattr(path, name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
-
- char buf = '-';
- EXPECT_THAT(getxattr(path, name, &buf, 0),
- SyscallSucceedsWithValue(sizeof(val)));
- EXPECT_EQ(buf, '-');
-}
-
-TEST_F(XattrTest, GetXattrSizeTooLarge) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- EXPECT_THAT(setxattr(path, name, &val, sizeof(val), /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> buf(XATTR_SIZE_MAX + 1);
- std::fill(buf.begin(), buf.end(), '-');
- std::vector<char> expected_buf = buf;
- expected_buf[0] = 'a';
- EXPECT_THAT(getxattr(path, name, buf.data(), buf.size()),
- SyscallSucceedsWithValue(sizeof(val)));
- EXPECT_EQ(buf, expected_buf);
-}
-
-TEST_F(XattrTest, GetXattrNullValue) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- size_t size = sizeof(val);
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- EXPECT_THAT(getxattr(path, name, nullptr, size),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(XattrTest, GetXattrNullValueAndZeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- char val = 'a';
- size_t size = sizeof(val);
- // Set value with zero size.
- EXPECT_THAT(setxattr(path, name, &val, 0, /*flags=*/0), SyscallSucceeds());
- // Get value with nonzero size.
- EXPECT_THAT(getxattr(path, name, nullptr, size), SyscallSucceedsWithValue(0));
-
- // Set value with nonzero size.
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
- // Get value with zero size.
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallSucceedsWithValue(size));
-}
-
-TEST_F(XattrTest, GetXattrNonexistentName) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, ListXattr) {
- const char* path = test_file_name_.c_str();
- const std::string name = "user.test";
- const std::string name2 = "user.test2";
- const std::string name3 = "user.test3";
- EXPECT_THAT(setxattr(path, name.c_str(), nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name2.c_str(), nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
- EXPECT_THAT(setxattr(path, name3.c_str(), nullptr, 0, /*flags=*/0),
- SyscallSucceeds());
-
- std::vector<char> list(name.size() + 1 + name2.size() + 1 + name3.size() + 1);
- char* buf = list.data();
- EXPECT_THAT(listxattr(path, buf, XATTR_SIZE_MAX),
- SyscallSucceedsWithValue(list.size()));
-
- absl::flat_hash_set<std::string> got = {};
- for (char* p = buf; p < buf + list.size(); p += strlen(p) + 1) {
- got.insert(std::string{p});
- }
-
- absl::flat_hash_set<std::string> expected = {name, name2, name3};
- EXPECT_EQ(got, expected);
-}
-
-TEST_F(XattrTest, ListXattrNoXattrs) {
- const char* path = test_file_name_.c_str();
-
- std::vector<char> list, expected;
- EXPECT_THAT(listxattr(path, list.data(), sizeof(list)),
- SyscallSucceedsWithValue(0));
- EXPECT_EQ(list, expected);
-
- // ListXattr should succeed if there are no attributes, even if the buffer
- // passed in is a nullptr.
- EXPECT_THAT(listxattr(path, nullptr, sizeof(list)),
- SyscallSucceedsWithValue(0));
-}
-
-TEST_F(XattrTest, ListXattrNullBuffer) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
-
- EXPECT_THAT(listxattr(path, nullptr, sizeof(name)),
- SyscallFailsWithErrno(EFAULT));
-}
-
-TEST_F(XattrTest, ListXattrSizeTooSmall) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
-
- char list[sizeof(name) - 1];
- EXPECT_THAT(listxattr(path, list, sizeof(list)),
- SyscallFailsWithErrno(ERANGE));
-}
-
-TEST_F(XattrTest, ListXattrZeroSize) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
- EXPECT_THAT(listxattr(path, nullptr, 0),
- SyscallSucceedsWithValue(sizeof(name)));
-}
-
-TEST_F(XattrTest, RemoveXattr) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(setxattr(path, name, nullptr, 0, /*flags=*/0), SyscallSucceeds());
- EXPECT_THAT(removexattr(path, name), SyscallSucceeds());
- EXPECT_THAT(getxattr(path, name, nullptr, 0), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, RemoveXattrNonexistentName) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, LXattrOnSymlink) {
- const char name[] = "user.test";
- TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- TempPath link = ASSERT_NO_ERRNO_AND_VALUE(
- TempPath::CreateSymlinkTo(dir.path(), test_file_name_));
-
- EXPECT_THAT(lsetxattr(link.path().c_str(), name, nullptr, 0, 0),
- SyscallFailsWithErrno(EPERM));
- EXPECT_THAT(lgetxattr(link.path().c_str(), name, nullptr, 0),
- SyscallFailsWithErrno(ENODATA));
- EXPECT_THAT(llistxattr(link.path().c_str(), nullptr, 0),
- SyscallSucceedsWithValue(0));
- EXPECT_THAT(lremovexattr(link.path().c_str(), name),
- SyscallFailsWithErrno(EPERM));
-}
-
-TEST_F(XattrTest, LXattrOnNonsymlink) {
- const char* path = test_file_name_.c_str();
- const char name[] = "user.test";
- int val = 1234;
- size_t size = sizeof(val);
- EXPECT_THAT(lsetxattr(path, name, &val, size, /*flags=*/0),
- SyscallSucceeds());
-
- int buf = 0;
- EXPECT_THAT(lgetxattr(path, name, &buf, size),
- SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, val);
-
- char list[sizeof(name)];
- EXPECT_THAT(llistxattr(path, list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- EXPECT_THAT(lremovexattr(path, name), SyscallSucceeds());
-}
-
-TEST_F(XattrTest, XattrWithFD) {
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_.c_str(), 0));
- const char name[] = "user.test";
- int val = 1234;
- size_t size = sizeof(val);
- EXPECT_THAT(fsetxattr(fd.get(), name, &val, size, /*flags=*/0),
- SyscallSucceeds());
-
- int buf = 0;
- EXPECT_THAT(fgetxattr(fd.get(), name, &buf, size),
- SyscallSucceedsWithValue(size));
- EXPECT_EQ(buf, val);
-
- char list[sizeof(name)];
- EXPECT_THAT(flistxattr(fd.get(), list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- EXPECT_THAT(fremovexattr(fd.get(), name), SyscallSucceeds());
-}
-
-TEST_F(XattrTest, XattrWithOPath) {
- SKIP_IF(IsRunningWithVFS1());
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_.c_str(), O_PATH));
- const char name[] = "user.test";
- int val = 1234;
- size_t size = sizeof(val);
- EXPECT_THAT(fsetxattr(fd.get(), name, &val, size, /*flags=*/0),
- SyscallFailsWithErrno(EBADF));
-
- int buf;
- EXPECT_THAT(fgetxattr(fd.get(), name, &buf, size),
- SyscallFailsWithErrno(EBADF));
-
- char list[sizeof(name)];
- EXPECT_THAT(flistxattr(fd.get(), list, sizeof(list)),
- SyscallFailsWithErrno(EBADF));
-
- EXPECT_THAT(fremovexattr(fd.get(), name), SyscallFailsWithErrno(EBADF));
-}
-
-TEST_F(XattrTest, TrustedNamespaceWithCapSysAdmin) {
- // Trusted namespace not supported in VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- // TODO(b/166162845): Only gVisor tmpfs currently supports trusted namespace.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(test_file_name_)));
-
- const char* path = test_file_name_.c_str();
- const char name[] = "trusted.test";
-
- // Writing to the trusted.* xattr namespace requires CAP_SYS_ADMIN in the root
- // user namespace. There's no easy way to check that, other than trying the
- // operation and seeing what happens. We'll call removexattr because it's
- // simplest.
- if (removexattr(path, name) < 0) {
- SKIP_IF(errno == EPERM);
- FAIL() << "unexpected errno from removexattr: " << errno;
- }
-
- // Set.
- char val = 'a';
- size_t size = sizeof(val);
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0), SyscallSucceeds());
-
- // Get.
- char got = '\0';
- EXPECT_THAT(getxattr(path, name, &got, size), SyscallSucceedsWithValue(size));
- EXPECT_EQ(val, got);
-
- // List.
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(path, list, sizeof(list)),
- SyscallSucceedsWithValue(sizeof(name)));
- EXPECT_STREQ(list, name);
-
- // Remove.
- EXPECT_THAT(removexattr(path, name), SyscallSucceeds());
-
- // Get should now return ENODATA.
- EXPECT_THAT(getxattr(path, name, &got, size), SyscallFailsWithErrno(ENODATA));
-}
-
-TEST_F(XattrTest, TrustedNamespaceWithoutCapSysAdmin) {
- // Trusted namespace not supported in VFS1.
- SKIP_IF(IsRunningWithVFS1());
-
- // TODO(b/66162845): Only gVisor tmpfs currently supports trusted namespace.
- SKIP_IF(IsRunningOnGvisor() &&
- !ASSERT_NO_ERRNO_AND_VALUE(IsTmpfs(test_file_name_)));
-
- // Drop CAP_SYS_ADMIN if we have it.
- AutoCapability cap(CAP_SYS_ADMIN, false);
-
- const char* path = test_file_name_.c_str();
- const char name[] = "trusted.test";
-
- // Set fails.
- char val = 'a';
- size_t size = sizeof(val);
- EXPECT_THAT(setxattr(path, name, &val, size, /*flags=*/0),
- SyscallFailsWithErrno(EPERM));
-
- // Get fails.
- char got = '\0';
- EXPECT_THAT(getxattr(path, name, &got, size), SyscallFailsWithErrno(ENODATA));
-
- // List still works, but returns no items.
- char list[sizeof(name)];
- EXPECT_THAT(listxattr(path, list, sizeof(list)), SyscallSucceedsWithValue(0));
-
- // Remove fails.
- EXPECT_THAT(removexattr(path, name), SyscallFailsWithErrno(EPERM));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/uds/BUILD b/test/uds/BUILD
deleted file mode 100644
index a8f49b50c..000000000
--- a/test/uds/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-go_library(
- name = "uds",
- testonly = 1,
- srcs = ["uds.go"],
- deps = [
- "//pkg/log",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/test/uds/uds.go b/test/uds/uds.go
deleted file mode 100644
index 02a4a7dee..000000000
--- a/test/uds/uds.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package uds contains helpers for testing external UDS functionality.
-package uds
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-// createEchoSocket creates a socket that echoes back anything received.
-//
-// Only works for stream, seqpacket sockets.
-func createEchoSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
- if err != nil {
- return nil, fmt.Errorf("error creating echo(%d) socket: %v", protocol, err)
- }
-
- if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
- return nil, fmt.Errorf("error binding echo(%d) socket: %v", protocol, err)
- }
-
- if err := unix.Listen(fd, 0); err != nil {
- return nil, fmt.Errorf("error listening echo(%d) socket: %v", protocol, err)
- }
-
- server, err := unet.NewServerSocket(fd)
- if err != nil {
- return nil, fmt.Errorf("error creating echo(%d) unet socket: %v", protocol, err)
- }
-
- acceptAndEchoOne := func() error {
- s, err := server.Accept()
- if err != nil {
- return fmt.Errorf("failed to accept: %v", err)
- }
- defer s.Close()
-
- for {
- buf := make([]byte, 512)
- for {
- n, err := s.Read(buf)
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return fmt.Errorf("failed to read: %d, %v", n, err)
- }
-
- n, err = s.Write(buf[:n])
- if err != nil {
- return fmt.Errorf("failed to write: %d, %v", n, err)
- }
- }
- }
- }
-
- go func() {
- for {
- if err := acceptAndEchoOne(); err != nil {
- log.Warningf("Failed to handle echo(%d) socket: %v", protocol, err)
- return
- }
- }
- }()
-
- cleanup = func() {
- if err := server.Close(); err != nil {
- log.Warningf("Failed to close echo(%d) socket: %v", protocol, err)
- }
- }
-
- return cleanup, nil
-}
-
-// createNonListeningSocket creates a socket that is bound but not listening.
-//
-// Only relevant for stream, seqpacket sockets.
-func createNonListeningSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
- if err != nil {
- return nil, fmt.Errorf("error creating nonlistening(%d) socket: %v", protocol, err)
- }
-
- if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
- return nil, fmt.Errorf("error binding nonlistening(%d) socket: %v", protocol, err)
- }
-
- cleanup = func() {
- if err := unix.Close(fd); err != nil {
- log.Warningf("Failed to close nonlistening(%d) socket: %v", protocol, err)
- }
- }
-
- return cleanup, nil
-}
-
-// createNullSocket creates a socket that reads anything received.
-//
-// Only works for dgram sockets.
-func createNullSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
- if err != nil {
- return nil, fmt.Errorf("error creating null(%d) socket: %v", protocol, err)
- }
-
- if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
- return nil, fmt.Errorf("error binding null(%d) socket: %v", protocol, err)
- }
-
- s, err := unet.NewSocket(fd)
- if err != nil {
- return nil, fmt.Errorf("error creating null(%d) unet socket: %v", protocol, err)
- }
-
- go func() {
- buf := make([]byte, 512)
- for {
- n, err := s.Read(buf)
- if err != nil {
- log.Warningf("failed to read: %d, %v", n, err)
- return
- }
- }
- }()
-
- cleanup = func() {
- if err := s.Close(); err != nil {
- log.Warningf("Failed to close null(%d) socket: %v", protocol, err)
- }
- }
-
- return cleanup, nil
-}
-
-type socketCreator func(path string, proto int) (cleanup func(), err error)
-
-// CreateSocketTree creates a local tree of unix domain sockets for use in
-// testing:
-// * /stream/echo
-// * /stream/nonlistening
-// * /seqpacket/echo
-// * /seqpacket/nonlistening
-// * /dgram/null
-func CreateSocketTree(baseDir string) (dir string, cleanup func(), err error) {
- dir, err = ioutil.TempDir(baseDir, "sockets")
- if err != nil {
- return "", nil, fmt.Errorf("error creating temp dir: %v", err)
- }
-
- var protocols = []struct {
- protocol int
- name string
- sockets map[string]socketCreator
- }{
- {
- protocol: unix.SOCK_STREAM,
- name: "stream",
- sockets: map[string]socketCreator{
- "echo": createEchoSocket,
- "nonlistening": createNonListeningSocket,
- },
- },
- {
- protocol: unix.SOCK_SEQPACKET,
- name: "seqpacket",
- sockets: map[string]socketCreator{
- "echo": createEchoSocket,
- "nonlistening": createNonListeningSocket,
- },
- },
- {
- protocol: unix.SOCK_DGRAM,
- name: "dgram",
- sockets: map[string]socketCreator{
- "null": createNullSocket,
- },
- },
- }
-
- var cleanups []func()
- for _, proto := range protocols {
- protoDir := filepath.Join(dir, proto.name)
- if err := os.Mkdir(protoDir, 0755); err != nil {
- return "", nil, fmt.Errorf("error creating %s dir: %v", proto.name, err)
- }
-
- for name, fn := range proto.sockets {
- path := filepath.Join(protoDir, name)
- cleanup, err := fn(path, proto.protocol)
- if err != nil {
- return "", nil, fmt.Errorf("error creating %s %s socket: %v", proto.name, name, err)
- }
-
- cleanups = append(cleanups, cleanup)
- }
- }
-
- cleanup = func() {
- for _, c := range cleanups {
- c()
- }
-
- os.RemoveAll(dir)
- }
-
- return dir, cleanup, nil
-}
diff --git a/test/util/BUILD b/test/util/BUILD
deleted file mode 100644
index cc83221ea..000000000
--- a/test/util/BUILD
+++ /dev/null
@@ -1,416 +0,0 @@
-load("//tools:defs.bzl", "cc_library", "cc_test", "coreutil", "gbenchmark", "gtest", "select_system")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-cc_library(
- name = "capability_util",
- testonly = 1,
- srcs = ["capability_util.cc"],
- hdrs = ["capability_util.h"],
- deps = [
- ":cleanup",
- ":memory_util",
- ":posix_error",
- ":save_util",
- ":test_util",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_library(
- name = "eventfd_util",
- testonly = 1,
- hdrs = ["eventfd_util.h"],
- deps = [
- ":file_descriptor",
- ":posix_error",
- ":save_util",
- ],
-)
-
-cc_library(
- name = "file_descriptor",
- testonly = 1,
- hdrs = ["file_descriptor.h"],
- deps = [
- ":logging",
- ":posix_error",
- ":save_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- gtest,
- ],
-)
-
-cc_library(
- name = "fuse_util",
- testonly = 1,
- srcs = ["fuse_util.cc"],
- hdrs = ["fuse_util.h"],
-)
-
-cc_library(
- name = "proc_util",
- testonly = 1,
- srcs = ["proc_util.cc"],
- hdrs = ["proc_util.h"],
- deps = [
- ":fs_util",
- ":posix_error",
- ":test_util",
- "@com_google_absl//absl/strings",
- gtest,
- ],
-)
-
-cc_test(
- name = "proc_util_test",
- size = "small",
- srcs = ["proc_util_test.cc"],
- deps = [
- ":proc_util",
- ":test_main",
- ":test_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "cleanup",
- testonly = 1,
- hdrs = ["cleanup.h"],
-)
-
-cc_library(
- name = "fs_util",
- testonly = 1,
- srcs = ["fs_util.cc"],
- hdrs = ["fs_util.h"],
- deps = [
- ":cleanup",
- ":file_descriptor",
- ":posix_error",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- ],
-)
-
-cc_test(
- name = "fs_util_test",
- size = "small",
- srcs = ["fs_util_test.cc"],
- deps = [
- ":fs_util",
- ":posix_error",
- ":temp_path",
- ":test_main",
- ":test_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "logging",
- testonly = 1,
- srcs = ["logging.cc"],
- hdrs = ["logging.h"],
-)
-
-cc_library(
- name = "memory_util",
- testonly = 1,
- hdrs = ["memory_util.h"],
- deps = [
- ":logging",
- ":posix_error",
- ":save_util",
- ":test_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- ],
-)
-
-cc_library(
- name = "mount_util",
- testonly = 1,
- srcs = ["mount_util.cc"],
- hdrs = ["mount_util.h"],
- deps = [
- ":cleanup",
- ":posix_error",
- ":test_util",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/strings",
- gtest,
- ],
-)
-
-cc_test(
- name = "mount_util_test",
- size = "small",
- srcs = ["mount_util_test.cc"],
- deps = [
- ":mount_util",
- ":test_main",
- ":test_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "save_util",
- testonly = 1,
- srcs = [
- "save_util.cc",
- "save_util_linux.cc",
- "save_util_other.cc",
- ],
- hdrs = ["save_util.h"],
- defines = select_system(),
- deps = [
- ":logging",
- "@com_google_absl//absl/types:optional",
- ],
-)
-
-cc_library(
- name = "multiprocess_util",
- testonly = 1,
- srcs = ["multiprocess_util.cc"],
- hdrs = ["multiprocess_util.h"],
- deps = [
- ":cleanup",
- ":file_descriptor",
- ":posix_error",
- ":save_util",
- ":test_util",
- gtest,
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_library(
- name = "platform_util",
- testonly = 1,
- srcs = ["platform_util.cc"],
- hdrs = ["platform_util.h"],
- deps = [":test_util"],
-)
-
-cc_library(
- name = "posix_error",
- testonly = 1,
- srcs = ["posix_error.cc"],
- hdrs = ["posix_error.h"],
- deps = [
- ":logging",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/types:variant",
- gtest,
- ],
-)
-
-cc_test(
- name = "posix_error_test",
- size = "small",
- srcs = ["posix_error_test.cc"],
- deps = [
- ":posix_error",
- ":test_main",
- gtest,
- ],
-)
-
-cc_library(
- name = "pty_util",
- testonly = 1,
- srcs = ["pty_util.cc"],
- hdrs = ["pty_util.h"],
- deps = [
- ":file_descriptor",
- ":posix_error",
- ],
-)
-
-cc_library(
- name = "signal_util",
- testonly = 1,
- srcs = ["signal_util.cc"],
- hdrs = ["signal_util.h"],
- deps = [
- ":cleanup",
- ":posix_error",
- ":test_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "temp_path",
- testonly = 1,
- srcs = ["temp_path.cc"],
- hdrs = ["temp_path.h"],
- deps = [
- ":fs_util",
- ":posix_error",
- ":test_util",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/time",
- gtest,
- ],
-)
-
-cc_library(
- name = "test_util",
- testonly = 1,
- srcs = [
- "test_util.cc",
- "test_util_impl.cc",
- "test_util_runfiles.cc",
- ],
- hdrs = ["test_util.h"],
- defines = select_system(),
- deps = coreutil() + [
- ":fs_util",
- ":logging",
- ":posix_error",
- ":save_util",
- "@bazel_tools//tools/cpp/runfiles",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/flags:flag",
- "@com_google_absl//absl/flags:parse",
- "@com_google_absl//absl/strings",
- "@com_google_absl//absl/strings:str_format",
- "@com_google_absl//absl/time",
- gtest,
- gbenchmark,
- ],
-)
-
-cc_library(
- name = "thread_util",
- testonly = 1,
- hdrs = ["thread_util.h"],
- deps = [":logging"],
-)
-
-cc_library(
- name = "time_util",
- testonly = 1,
- srcs = ["time_util.cc"],
- hdrs = ["time_util.h"],
- deps = [
- "@com_google_absl//absl/time",
- ],
-)
-
-cc_library(
- name = "timer_util",
- testonly = 1,
- srcs = ["timer_util.cc"],
- hdrs = ["timer_util.h"],
- deps = [
- ":cleanup",
- ":logging",
- ":posix_error",
- ":test_util",
- "@com_google_absl//absl/time",
- gtest,
- ],
-)
-
-cc_test(
- name = "test_util_test",
- size = "small",
- srcs = ["test_util_test.cc"],
- deps = [
- ":test_main",
- ":test_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "test_main",
- testonly = 1,
- srcs = ["test_main.cc"],
- deps = [":test_util"],
-)
-
-cc_library(
- name = "epoll_util",
- testonly = 1,
- srcs = ["epoll_util.cc"],
- hdrs = ["epoll_util.h"],
- deps = [
- ":file_descriptor",
- ":posix_error",
- ":save_util",
- gtest,
- ],
-)
-
-cc_library(
- name = "rlimit_util",
- testonly = 1,
- srcs = ["rlimit_util.cc"],
- hdrs = ["rlimit_util.h"],
- deps = [
- ":cleanup",
- ":logging",
- ":posix_error",
- ":test_util",
- ],
-)
-
-cc_library(
- name = "uid_util",
- testonly = 1,
- srcs = ["uid_util.cc"],
- hdrs = ["uid_util.h"],
- deps = [
- ":posix_error",
- ":save_util",
- ],
-)
-
-cc_library(
- name = "temp_umask",
- testonly = 1,
- hdrs = ["temp_umask.h"],
-)
-
-cc_library(
- name = "cgroup_util",
- testonly = 1,
- srcs = ["cgroup_util.cc"],
- hdrs = ["cgroup_util.h"],
- deps = [
- ":cleanup",
- ":fs_util",
- ":mount_util",
- ":posix_error",
- ":temp_path",
- "@com_google_absl//absl/container:flat_hash_map",
- "@com_google_absl//absl/container:flat_hash_set",
- "@com_google_absl//absl/strings",
- ],
-)
-
-cc_library(
- name = "verity_util",
- testonly = 1,
- srcs = ["verity_util.cc"],
- hdrs = ["verity_util.h"],
- deps = [
- ":fs_util",
- ":mount_util",
- ":posix_error",
- ":temp_path",
- ],
-)
diff --git a/test/util/capability_util.cc b/test/util/capability_util.cc
deleted file mode 100644
index 3bf218128..000000000
--- a/test/util/capability_util.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifdef __linux__
-
-#include "test/util/capability_util.h"
-
-#include <linux/capability.h>
-#include <sched.h>
-#include <sys/mman.h>
-#include <sys/wait.h>
-
-#include <iostream>
-
-#include "absl/strings/str_cat.h"
-#include "test/util/memory_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<bool> CanCreateUserNamespace() {
- // The most reliable way to determine if userns creation is possible is by
- // trying to create one; see below.
- ASSIGN_OR_RETURN_ERRNO(
- auto child_stack,
- MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));
- int const child_pid = clone(
- +[](void*) { return 0; },
- reinterpret_cast<void*>(child_stack.addr() + kPageSize),
- CLONE_NEWUSER | SIGCHLD, /* arg = */ nullptr);
- if (child_pid > 0) {
- int status;
- int const ret = waitpid(child_pid, &status, /* options = */ 0);
- MaybeSave();
- if (ret < 0) {
- return PosixError(errno, "waitpid");
- }
- if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- return PosixError(
- ESRCH, absl::StrCat("child process exited with status ", status));
- }
- return true;
- } else if (errno == EPERM) {
- // Per clone(2), EPERM can be returned if:
- //
- // - "CLONE_NEWUSER was specified in flags, but either the effective user ID
- // or the effective group ID of the caller does not have a mapping in the
- // parent namespace (see user_namespaces(7))."
- //
- // - "(since Linux 3.9) CLONE_NEWUSER was specified in flags and the caller
- // is in a chroot environment (i.e., the caller's root directory does
- // not match the root directory of the mount namespace in which it
- // resides)."
- std::cerr << "clone(CLONE_NEWUSER) failed with EPERM" << std::endl;
- return false;
- } else if (errno == EUSERS) {
- // "(since Linux 3.11) CLONE_NEWUSER was specified in flags, and the call
- // would cause the limit on the number of nested user namespaces to be
- // exceeded. See user_namespaces(7)."
- std::cerr << "clone(CLONE_NEWUSER) failed with EUSERS" << std::endl;
- return false;
- } else {
- // Unexpected error code; indicate an actual error.
- return PosixError(errno, "clone(CLONE_NEWUSER)");
- }
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // __linux__
diff --git a/test/util/capability_util.h b/test/util/capability_util.h
deleted file mode 100644
index c4b0feade..000000000
--- a/test/util/capability_util.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Utilities for testing capabilities.
-
-#ifndef GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_
-#define GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_
-
-#ifdef __linux__
-
-#include <errno.h>
-#include <linux/capability.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-#ifndef _LINUX_CAPABILITY_VERSION_3
-#error Expecting _LINUX_CAPABILITY_VERSION_3 support
-#endif
-
-namespace gvisor {
-namespace testing {
-
-// HaveCapability returns true if the process has the specified EFFECTIVE
-// capability.
-inline PosixErrorOr<bool> HaveCapability(int cap) {
- if (!cap_valid(cap)) {
- return PosixError(EINVAL, "Invalid capability");
- }
-
- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};
- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};
- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));
- MaybeSave();
-
- return (caps[CAP_TO_INDEX(cap)].effective & CAP_TO_MASK(cap)) != 0;
-}
-
-// SetCapability sets the specified EFFECTIVE capability.
-inline PosixError SetCapability(int cap, bool set) {
- if (!cap_valid(cap)) {
- return PosixError(EINVAL, "Invalid capability");
- }
-
- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};
- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};
- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));
- MaybeSave();
-
- if (set) {
- caps[CAP_TO_INDEX(cap)].effective |= CAP_TO_MASK(cap);
- } else {
- caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);
- }
- header = {_LINUX_CAPABILITY_VERSION_3, 0};
- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));
- MaybeSave();
-
- return NoError();
-}
-
-// DropPermittedCapability drops the specified PERMITTED. The EFFECTIVE
-// capabilities must be a subset of PERMITTED, so those are dropped as well.
-inline PosixError DropPermittedCapability(int cap) {
- if (!cap_valid(cap)) {
- return PosixError(EINVAL, "Invalid capability");
- }
-
- struct __user_cap_header_struct header = {_LINUX_CAPABILITY_VERSION_3, 0};
- struct __user_cap_data_struct caps[_LINUX_CAPABILITY_U32S_3] = {};
- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capget, &header, &caps));
- MaybeSave();
-
- caps[CAP_TO_INDEX(cap)].effective &= ~CAP_TO_MASK(cap);
- caps[CAP_TO_INDEX(cap)].permitted &= ~CAP_TO_MASK(cap);
-
- header = {_LINUX_CAPABILITY_VERSION_3, 0};
- RETURN_ERROR_IF_SYSCALL_FAIL(syscall(__NR_capset, &header, &caps));
- MaybeSave();
-
- return NoError();
-}
-
-PosixErrorOr<bool> CanCreateUserNamespace();
-
-class AutoCapability {
- public:
- AutoCapability(int cap, bool set) : cap_(cap), set_(set) {
- const bool has = EXPECT_NO_ERRNO_AND_VALUE(HaveCapability(cap));
- if (set != has) {
- EXPECT_NO_ERRNO(SetCapability(cap_, set_));
- applied_ = true;
- }
- }
-
- ~AutoCapability() {
- if (applied_) {
- EXPECT_NO_ERRNO(SetCapability(cap_, !set_));
- }
- }
-
- private:
- int cap_;
- bool set_;
- bool applied_ = false;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // __linux__
-
-#endif // GVISOR_TEST_UTIL_CAPABILITY_UTIL_H_
diff --git a/test/util/cgroup_util.cc b/test/util/cgroup_util.cc
deleted file mode 100644
index 977993f41..000000000
--- a/test/util/cgroup_util.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/cgroup_util.h"
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "test/util/fs_util.h"
-#include "test/util/mount_util.h"
-
-namespace gvisor {
-namespace testing {
-
-Cgroup::Cgroup(std::string_view path) : cgroup_path_(path) {
- id_ = ++Cgroup::next_id_;
- std::cerr << absl::StreamFormat("[cg#%d] <= %s", id_, cgroup_path_)
- << std::endl;
-}
-
-PosixErrorOr<std::string> Cgroup::ReadControlFile(
- absl::string_view name) const {
- std::string buf;
- RETURN_IF_ERRNO(GetContents(Relpath(name), &buf));
-
- const std::string alias_path = absl::StrFormat("[cg#%d]/%s", id_, name);
- std::cerr << absl::StreamFormat("<contents of %s>", alias_path) << std::endl;
- std::cerr << buf;
- std::cerr << absl::StreamFormat("<end of %s>", alias_path) << std::endl;
-
- return buf;
-}
-
-PosixErrorOr<int64_t> Cgroup::ReadIntegerControlFile(
- absl::string_view name) const {
- ASSIGN_OR_RETURN_ERRNO(const std::string buf, ReadControlFile(name));
- ASSIGN_OR_RETURN_ERRNO(const int64_t val, Atoi<int64_t>(buf));
- return val;
-}
-
-PosixError Cgroup::WriteControlFile(absl::string_view name,
- const std::string& value) const {
- ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, Open(Relpath(name), O_WRONLY));
- RETURN_ERROR_IF_SYSCALL_FAIL(WriteFd(fd.get(), value.c_str(), value.size()));
- return NoError();
-}
-
-PosixError Cgroup::WriteIntegerControlFile(absl::string_view name,
- int64_t value) const {
- return WriteControlFile(name, absl::StrCat(value));
-}
-
-PosixErrorOr<absl::flat_hash_set<pid_t>> Cgroup::Procs() const {
- ASSIGN_OR_RETURN_ERRNO(std::string buf, ReadControlFile("cgroup.procs"));
- return ParsePIDList(buf);
-}
-
-PosixErrorOr<absl::flat_hash_set<pid_t>> Cgroup::Tasks() const {
- ASSIGN_OR_RETURN_ERRNO(std::string buf, ReadControlFile("tasks"));
- return ParsePIDList(buf);
-}
-
-PosixError Cgroup::ContainsCallingProcess() const {
- ASSIGN_OR_RETURN_ERRNO(const absl::flat_hash_set<pid_t> procs, Procs());
- ASSIGN_OR_RETURN_ERRNO(const absl::flat_hash_set<pid_t> tasks, Tasks());
- const pid_t pid = getpid();
- const pid_t tid = syscall(SYS_gettid);
- if (!procs.contains(pid)) {
- return PosixError(
- ENOENT, absl::StrFormat("Cgroup doesn't contain process %d", pid));
- }
- if (!tasks.contains(tid)) {
- return PosixError(ENOENT,
- absl::StrFormat("Cgroup doesn't contain task %d", tid));
- }
- return NoError();
-}
-
-PosixErrorOr<absl::flat_hash_set<pid_t>> Cgroup::ParsePIDList(
- absl::string_view data) const {
- absl::flat_hash_set<pid_t> res;
- std::vector<absl::string_view> lines = absl::StrSplit(data, '\n');
- for (const std::string_view& line : lines) {
- if (line.empty()) {
- continue;
- }
- ASSIGN_OR_RETURN_ERRNO(const int32_t pid, Atoi<int32_t>(line));
- res.insert(static_cast<pid_t>(pid));
- }
- return res;
-}
-
-int64_t Cgroup::next_id_ = 0;
-
-PosixErrorOr<Cgroup> Mounter::MountCgroupfs(std::string mopts) {
- ASSIGN_OR_RETURN_ERRNO(TempPath mountpoint,
- TempPath::CreateDirIn(root_.path()));
- ASSIGN_OR_RETURN_ERRNO(
- Cleanup mount, Mount("none", mountpoint.path(), "cgroup", 0, mopts, 0));
- const std::string mountpath = mountpoint.path();
- std::cerr << absl::StreamFormat(
- "Mount(\"none\", \"%s\", \"cgroup\", 0, \"%s\", 0) => OK",
- mountpath, mopts)
- << std::endl;
- Cgroup cg = Cgroup(mountpath);
- mountpoints_[cg.id()] = std::move(mountpoint);
- mounts_[cg.id()] = std::move(mount);
- return cg;
-}
-
-PosixError Mounter::Unmount(const Cgroup& c) {
- auto mount = mounts_.find(c.id());
- auto mountpoint = mountpoints_.find(c.id());
-
- if (mount == mounts_.end() || mountpoint == mountpoints_.end()) {
- return PosixError(
- ESRCH, absl::StrFormat("No mount found for cgroupfs containing cg#%d",
- c.id()));
- }
-
- std::cerr << absl::StreamFormat("Unmount([cg#%d])", c.id()) << std::endl;
-
- // Simply delete the entries, their destructors will unmount and delete the
- // mountpoint. Note the order is important to avoid errors: mount then
- // mountpoint.
- mounts_.erase(mount);
- mountpoints_.erase(mountpoint);
-
- return NoError();
-}
-
-void Mounter::release(const Cgroup& c) {
- auto mp = mountpoints_.find(c.id());
- if (mp != mountpoints_.end()) {
- mp->second.release();
- mountpoints_.erase(mp);
- }
-
- auto m = mounts_.find(c.id());
- if (m != mounts_.end()) {
- m->second.Release();
- mounts_.erase(m);
- }
-}
-
-constexpr char kProcCgroupsHeader[] =
- "#subsys_name\thierarchy\tnum_cgroups\tenabled";
-
-PosixErrorOr<absl::flat_hash_map<std::string, CgroupsEntry>>
-ProcCgroupsEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/cgroups", &content));
-
- bool found_header = false;
- absl::flat_hash_map<std::string, CgroupsEntry> entries;
- std::vector<std::string> lines = absl::StrSplit(content, '\n');
- std::cerr << "<contents of /proc/cgroups>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
-
- if (!found_header) {
- EXPECT_EQ(line, kProcCgroupsHeader);
- found_header = true;
- continue;
- }
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/cgroups.
- //
- // Example entries, fields are tab separated in the real file:
- //
- // #subsys_name hierarchy num_cgroups enabled
- // cpuset 12 35 1
- // cpu 3 222 1
- // ^ ^ ^ ^
- // 0 1 2 3
-
- CgroupsEntry entry;
- std::vector<std::string> fields =
- StrSplit(line, absl::ByAnyChar(": \t"), absl::SkipEmpty());
-
- entry.subsys_name = fields[0];
- ASSIGN_OR_RETURN_ERRNO(entry.hierarchy, Atoi<uint32_t>(fields[1]));
- ASSIGN_OR_RETURN_ERRNO(entry.num_cgroups, Atoi<uint64_t>(fields[2]));
- ASSIGN_OR_RETURN_ERRNO(const int enabled, Atoi<int>(fields[3]));
- entry.enabled = enabled != 0;
-
- entries[entry.subsys_name] = entry;
- }
- std::cerr << "<end of /proc/cgroups>" << std::endl;
-
- return entries;
-}
-
-PosixErrorOr<absl::flat_hash_map<std::string, PIDCgroupEntry>>
-ProcPIDCgroupEntries(pid_t pid) {
- const std::string path = absl::StrFormat("/proc/%d/cgroup", pid);
- std::string content;
- RETURN_IF_ERRNO(GetContents(path, &content));
-
- absl::flat_hash_map<std::string, PIDCgroupEntry> entries;
- std::vector<std::string> lines = absl::StrSplit(content, '\n');
-
- std::cerr << absl::StreamFormat("<contents of %s>", path) << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
-
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/<pid>/cgroup.
- //
- // Example entries:
- //
- // 2:cpu:/path/to/cgroup
- // 1:memory:/
-
- PIDCgroupEntry entry;
- std::vector<std::string> fields =
- absl::StrSplit(line, absl::ByChar(':'), absl::SkipEmpty());
-
- ASSIGN_OR_RETURN_ERRNO(entry.hierarchy, Atoi<uint32_t>(fields[0]));
- entry.controllers = fields[1];
- entry.path = fields[2];
-
- entries[entry.controllers] = entry;
- }
- std::cerr << absl::StreamFormat("<end of %s>", path) << std::endl;
-
- return entries;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/cgroup_util.h b/test/util/cgroup_util.h
deleted file mode 100644
index e3f696a89..000000000
--- a/test/util/cgroup_util.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_CGROUP_UTIL_H_
-#define GVISOR_TEST_UTIL_CGROUP_UTIL_H_
-
-#include <unistd.h>
-
-#include "absl/container/flat_hash_map.h"
-#include "absl/container/flat_hash_set.h"
-#include "absl/strings/string_view.h"
-#include "test/util/cleanup.h"
-#include "test/util/fs_util.h"
-#include "test/util/temp_path.h"
-
-namespace gvisor {
-namespace testing {
-
-// Cgroup represents a cgroup directory on a mounted cgroupfs.
-class Cgroup {
- public:
- Cgroup(std::string_view path);
-
- uint64_t id() const { return id_; }
-
- const std::string& Path() const { return cgroup_path_; }
-
- std::string Relpath(absl::string_view leaf) const {
- return JoinPath(cgroup_path_, leaf);
- }
-
- // Returns the contents of a cgroup control file with the given name.
- PosixErrorOr<std::string> ReadControlFile(absl::string_view name) const;
-
- // Reads the contents of a cgroup control with the given name, and attempts
- // to parse it as an integer.
- PosixErrorOr<int64_t> ReadIntegerControlFile(absl::string_view name) const;
-
- // Writes a string to a cgroup control file.
- PosixError WriteControlFile(absl::string_view name,
- const std::string& value) const;
-
- // Writes an integer value to a cgroup control file.
- PosixError WriteIntegerControlFile(absl::string_view name,
- int64_t value) const;
-
- // Returns the thread ids of the leaders of thread groups managed by this
- // cgroup.
- PosixErrorOr<absl::flat_hash_set<pid_t>> Procs() const;
-
- PosixErrorOr<absl::flat_hash_set<pid_t>> Tasks() const;
-
- // ContainsCallingProcess checks whether the calling process is part of the
- PosixError ContainsCallingProcess() const;
-
- private:
- PosixErrorOr<absl::flat_hash_set<pid_t>> ParsePIDList(
- absl::string_view data) const;
-
- static int64_t next_id_;
- int64_t id_;
- const std::string cgroup_path_;
-};
-
-// Mounter is a utility for creating cgroupfs mounts. It automatically manages
-// the lifetime of created mounts.
-class Mounter {
- public:
- Mounter(TempPath root) : root_(std::move(root)) {}
-
- PosixErrorOr<Cgroup> MountCgroupfs(std::string mopts);
-
- PosixError Unmount(const Cgroup& c);
-
- void release(const Cgroup& c);
-
- private:
- // The destruction order of these members avoids errors during cleanup. We
- // first unmount (by executing the mounts_ cleanups), then delete the
- // mountpoint subdirs, then delete the root.
- TempPath root_;
- absl::flat_hash_map<int64_t, TempPath> mountpoints_;
- absl::flat_hash_map<int64_t, Cleanup> mounts_;
-};
-
-// Represents a line from /proc/cgroups.
-struct CgroupsEntry {
- std::string subsys_name;
- uint32_t hierarchy;
- uint64_t num_cgroups;
- bool enabled;
-};
-
-// Returns a parsed representation of /proc/cgroups.
-PosixErrorOr<absl::flat_hash_map<std::string, CgroupsEntry>>
-ProcCgroupsEntries();
-
-// Represents a line from /proc/<pid>/cgroup.
-struct PIDCgroupEntry {
- uint32_t hierarchy;
- std::string controllers;
- std::string path;
-};
-
-// Returns a parsed representation of /proc/<pid>/cgroup.
-PosixErrorOr<absl::flat_hash_map<std::string, PIDCgroupEntry>>
-ProcPIDCgroupEntries(pid_t pid);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_CGROUP_UTIL_H_
diff --git a/test/util/cleanup.h b/test/util/cleanup.h
deleted file mode 100644
index c76482ef4..000000000
--- a/test/util/cleanup.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_CLEANUP_H_
-#define GVISOR_TEST_UTIL_CLEANUP_H_
-
-#include <functional>
-#include <utility>
-
-namespace gvisor {
-namespace testing {
-
-class Cleanup {
- public:
- Cleanup() : released_(true) {}
- explicit Cleanup(std::function<void()>&& callback) : cb_(callback) {}
-
- Cleanup(Cleanup&& other) {
- released_ = other.released_;
- cb_ = other.Release();
- }
-
- Cleanup& operator=(Cleanup&& other) {
- released_ = other.released_;
- cb_ = other.Release();
- return *this;
- }
-
- ~Cleanup() {
- if (!released_) {
- cb_();
- }
- }
-
- std::function<void()>&& Release() {
- released_ = true;
- return std::move(cb_);
- }
-
- private:
- Cleanup(Cleanup const& other) = delete;
-
- bool released_ = false;
- std::function<void(void)> cb_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_CLEANUP_H_
diff --git a/test/util/epoll_util.cc b/test/util/epoll_util.cc
deleted file mode 100644
index 2e5051468..000000000
--- a/test/util/epoll_util.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/epoll_util.h"
-
-#include <sys/epoll.h>
-
-#include "gmock/gmock.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<FileDescriptor> NewEpollFD(int size) {
- // "Since Linux 2.6.8, the size argument is ignored, but must be greater than
- // zero." - epoll_create(2)
- int fd = epoll_create(size);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, "epoll_create");
- }
- return FileDescriptor(fd);
-}
-
-PosixError RegisterEpollFD(int epoll_fd, int target_fd, int events,
- uint64_t data) {
- struct epoll_event event;
- event.events = events;
- event.data.u64 = data;
- int rc = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, target_fd, &event);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "epoll_ctl");
- }
- return NoError();
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/epoll_util.h b/test/util/epoll_util.h
deleted file mode 100644
index f233b37d5..000000000
--- a/test/util/epoll_util.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_EPOLL_UTIL_H_
-#define GVISOR_TEST_UTIL_EPOLL_UTIL_H_
-
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Returns a new epoll file descriptor.
-PosixErrorOr<FileDescriptor> NewEpollFD(int size = 1);
-
-// Registers `target_fd` with the epoll instance represented by `epoll_fd` for
-// the epoll events `events`. Events on `target_fd` will be indicated by setting
-// data.u64 to `data` in the returned epoll_event.
-PosixError RegisterEpollFD(int epoll_fd, int target_fd, int events,
- uint64_t data);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_EPOLL_UTIL_H_
diff --git a/test/util/eventfd_util.h b/test/util/eventfd_util.h
deleted file mode 100644
index cb9ce829c..000000000
--- a/test/util/eventfd_util.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_EVENTFD_UTIL_H_
-#define GVISOR_TEST_UTIL_EVENTFD_UTIL_H_
-
-#include <sys/eventfd.h>
-
-#include <cerrno>
-
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Returns a new eventfd with the given initial value and flags.
-inline PosixErrorOr<FileDescriptor> NewEventFD(unsigned int initval = 0,
- int flags = 0) {
- int fd = eventfd(initval, flags);
- MaybeSave();
- if (fd < 0) {
- return PosixError(errno, "eventfd");
- }
- return FileDescriptor(fd);
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_EVENTFD_UTIL_H_
diff --git a/test/util/file_descriptor.h b/test/util/file_descriptor.h
deleted file mode 100644
index fc5caa55b..000000000
--- a/test/util/file_descriptor.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_FILE_DESCRIPTOR_H_
-#define GVISOR_TEST_UTIL_FILE_DESCRIPTOR_H_
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <string>
-
-#include "gmock/gmock.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// FileDescriptor is an RAII type class which takes ownership of a file
-// descriptor. It will close the FD when this object goes out of scope.
-class FileDescriptor {
- public:
- // Constructs an empty FileDescriptor (one that does not own a file
- // descriptor).
- FileDescriptor() = default;
-
- // Constructs a FileDescriptor that owns fd. If fd is negative, constructs an
- // empty FileDescriptor.
- explicit FileDescriptor(int fd) { set_fd(fd); }
-
- FileDescriptor(FileDescriptor&& orig) : fd_(orig.release()) {}
-
- FileDescriptor& operator=(FileDescriptor&& orig) {
- reset(orig.release());
- return *this;
- }
-
- PosixErrorOr<FileDescriptor> Dup() const {
- if (fd_ < 0) {
- return PosixError(EINVAL, "Attempting to Dup unset fd");
- }
-
- int fd = dup(fd_);
- if (fd < 0) {
- return PosixError(errno, absl::StrCat("dup ", fd_));
- }
- MaybeSave();
- return FileDescriptor(fd);
- }
-
- FileDescriptor(FileDescriptor const& other) = delete;
- FileDescriptor& operator=(FileDescriptor const& other) = delete;
-
- ~FileDescriptor() { reset(); }
-
- // If this object is non-empty, returns the owned file descriptor. (Ownership
- // is retained by the FileDescriptor.) Otherwise returns -1.
- int get() const { return fd_; }
-
- // If this object is non-empty, transfers ownership of the file descriptor to
- // the caller and returns it. Otherwise returns -1.
- int release() {
- int const fd = fd_;
- fd_ = -1;
- return fd;
- }
-
- // If this object is non-empty, closes the owned file descriptor (recording a
- // test failure if the close fails).
- void reset() { reset(-1); }
-
- // Like no-arg reset(), but the FileDescriptor takes ownership of fd after
- // closing its existing file descriptor.
- void reset(int fd) {
- if (fd_ >= 0) {
- TEST_PCHECK(close(fd_) == 0);
- MaybeSave();
- }
- set_fd(fd);
- }
-
- private:
- // Wrapper that coerces negative fd values other than -1 to -1 so that get()
- // etc. return -1.
- void set_fd(int fd) { fd_ = std::max(fd, -1); }
-
- int fd_ = -1;
-};
-
-// Wrapper around open(2) that returns a FileDescriptor.
-inline PosixErrorOr<FileDescriptor> Open(std::string const& path, int flags,
- mode_t mode = 0) {
- int fd = open(path.c_str(), flags, mode);
- if (fd < 0) {
- return PosixError(errno, absl::StrFormat("open(%s, %#x, %#o)", path.c_str(),
- flags, mode));
- }
- MaybeSave();
- return FileDescriptor(fd);
-}
-
-// Wrapper around openat(2) that returns a FileDescriptor.
-inline PosixErrorOr<FileDescriptor> OpenAt(int dirfd, std::string const& path,
- int flags, mode_t mode = 0) {
- int fd = openat(dirfd, path.c_str(), flags, mode);
- if (fd < 0) {
- return PosixError(errno, absl::StrFormat("openat(%d, %s, %#x, %#o)", dirfd,
- path, flags, mode));
- }
- MaybeSave();
- return FileDescriptor(fd);
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_FILE_DESCRIPTOR_H_
diff --git a/test/util/fs_util.cc b/test/util/fs_util.cc
deleted file mode 100644
index 483ae848d..000000000
--- a/test/util/fs_util.cc
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/fs_util.h"
-
-#include <dirent.h>
-#ifdef __linux__
-#include <linux/magic.h>
-#endif // __linux__
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "gmock/gmock.h"
-#include "absl/strings/match.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-PosixError WriteContentsToFD(int fd, absl::string_view contents) {
- int written = 0;
- while (static_cast<absl::string_view::size_type>(written) < contents.size()) {
- int wrote = write(fd, contents.data() + written, contents.size() - written);
- if (wrote < 0) {
- if (errno == EINTR) {
- continue;
- }
- return PosixError(
- errno, absl::StrCat("WriteContentsToFD fd: ", fd, " write failure."));
- }
- written += wrote;
- }
- return NoError();
-}
-} // namespace
-
-namespace internal {
-
-// Given a collection of file paths, append them all together,
-// ensuring that the proper path separators are inserted between them.
-std::string JoinPathImpl(std::initializer_list<absl::string_view> paths) {
- std::string result;
-
- if (paths.size() != 0) {
- // This size calculation is worst-case: it assumes one extra "/" for every
- // path other than the first.
- size_t total_size = paths.size() - 1;
- for (const absl::string_view path : paths) total_size += path.size();
- result.resize(total_size);
-
- auto begin = result.begin();
- auto out = begin;
- bool trailing_slash = false;
- for (absl::string_view path : paths) {
- if (path.empty()) continue;
- if (path.front() == '/') {
- if (trailing_slash) {
- path.remove_prefix(1);
- }
- } else {
- if (!trailing_slash && out != begin) *out++ = '/';
- }
- const size_t this_size = path.size();
- memcpy(&*out, path.data(), this_size);
- out += this_size;
- trailing_slash = out[-1] == '/';
- }
- result.erase(out - begin);
- }
- return result;
-}
-} // namespace internal
-
-// Returns a status or the current working directory.
-PosixErrorOr<std::string> GetCWD() {
- char buffer[PATH_MAX + 1] = {};
- if (getcwd(buffer, PATH_MAX) == nullptr) {
- return PosixError(errno, "GetCWD() failed");
- }
-
- return std::string(buffer);
-}
-
-PosixErrorOr<struct stat> Stat(absl::string_view path) {
- struct stat stat_buf;
- int res = stat(std::string(path).c_str(), &stat_buf);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("stat ", path));
- }
- return stat_buf;
-}
-
-PosixErrorOr<struct stat> Lstat(absl::string_view path) {
- struct stat stat_buf;
- int res = lstat(std::string(path).c_str(), &stat_buf);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("lstat ", path));
- }
- return stat_buf;
-}
-
-PosixErrorOr<struct stat> Fstat(int fd) {
- struct stat stat_buf;
- int res = fstat(fd, &stat_buf);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("fstat ", fd));
- }
- return stat_buf;
-}
-
-PosixErrorOr<bool> Exists(absl::string_view path) {
- struct stat stat_buf;
- int res = lstat(std::string(path).c_str(), &stat_buf);
- if (res < 0) {
- if (errno == ENOENT) {
- return false;
- }
- return PosixError(errno, absl::StrCat("lstat ", path));
- }
- return true;
-}
-
-PosixErrorOr<bool> IsDirectory(absl::string_view path) {
- ASSIGN_OR_RETURN_ERRNO(struct stat stat_buf, Lstat(path));
- if (S_ISDIR(stat_buf.st_mode)) {
- return true;
- }
-
- return false;
-}
-
-PosixError Delete(absl::string_view path) {
- int res = unlink(std::string(path).c_str());
- if (res < 0) {
- return PosixError(errno, absl::StrCat("unlink ", path));
- }
-
- return NoError();
-}
-
-PosixError Truncate(absl::string_view path, int length) {
- int res = truncate(std::string(path).c_str(), length);
- if (res < 0) {
- return PosixError(errno,
- absl::StrCat("truncate ", path, " to length ", length));
- }
-
- return NoError();
-}
-
-PosixError Chmod(absl::string_view path, int mode) {
- int res = chmod(std::string(path).c_str(), mode);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("chmod ", path));
- }
-
- return NoError();
-}
-
-PosixError MknodAt(const FileDescriptor& dfd, absl::string_view path, int mode,
- dev_t dev) {
- int res = mknodat(dfd.get(), std::string(path).c_str(), mode, dev);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("mknod ", path));
- }
-
- return NoError();
-}
-
-PosixError UnlinkAt(const FileDescriptor& dfd, absl::string_view path,
- int flags) {
- int res = unlinkat(dfd.get(), std::string(path).c_str(), flags);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("unlink ", path));
- }
-
- return NoError();
-}
-
-PosixError Mkdir(absl::string_view path, int mode) {
- int res = mkdir(std::string(path).c_str(), mode);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("mkdir ", path, " mode ", mode));
- }
-
- return NoError();
-}
-
-PosixError Rmdir(absl::string_view path) {
- int res = rmdir(std::string(path).c_str());
- if (res < 0) {
- return PosixError(errno, absl::StrCat("rmdir ", path));
- }
-
- return NoError();
-}
-
-PosixError SetContents(absl::string_view path, absl::string_view contents) {
- ASSIGN_OR_RETURN_ERRNO(bool exists, Exists(path));
- if (!exists) {
- return PosixError(
- ENOENT, absl::StrCat("SetContents file ", path, " doesn't exist."));
- }
-
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(std::string(path), O_WRONLY | O_TRUNC));
- return WriteContentsToFD(fd.get(), contents);
-}
-
-// Create a file with the given contents (if it does not already exist with the
-// given mode) and then set the contents.
-PosixError CreateWithContents(absl::string_view path,
- absl::string_view contents, int mode) {
- ASSIGN_OR_RETURN_ERRNO(
- auto fd, Open(std::string(path), O_WRONLY | O_CREAT | O_TRUNC, mode));
- return WriteContentsToFD(fd.get(), contents);
-}
-
-PosixError GetContents(absl::string_view path, std::string* output) {
- ASSIGN_OR_RETURN_ERRNO(auto fd, Open(std::string(path), O_RDONLY));
- output->clear();
-
- // Keep reading until we hit an EOF or an error.
- return GetContentsFD(fd.get(), output);
-}
-
-PosixErrorOr<std::string> GetContents(absl::string_view path) {
- std::string ret;
- RETURN_IF_ERRNO(GetContents(path, &ret));
- return ret;
-}
-
-PosixErrorOr<std::string> GetContentsFD(int fd) {
- std::string ret;
- RETURN_IF_ERRNO(GetContentsFD(fd, &ret));
- return ret;
-}
-
-PosixError GetContentsFD(int fd, std::string* output) {
- // Keep reading until we hit an EOF or an error.
- while (true) {
- char buf[16 * 1024] = {}; // Read in 16KB chunks.
- int bytes_read = read(fd, buf, sizeof(buf));
- if (bytes_read < 0) {
- if (errno == EINTR) {
- continue;
- }
- return PosixError(errno, "GetContentsFD read failure.");
- }
-
- if (bytes_read == 0) {
- break; // EOF.
- }
-
- output->append(buf, bytes_read);
- }
- return NoError();
-}
-
-PosixErrorOr<std::string> ReadLink(absl::string_view path) {
- char buf[PATH_MAX + 1] = {};
- int ret = readlink(std::string(path).c_str(), buf, PATH_MAX);
- if (ret < 0) {
- return PosixError(errno, absl::StrCat("readlink ", path));
- }
-
- return std::string(buf, ret);
-}
-
-PosixError WalkTree(
- absl::string_view path, bool recursive,
- const std::function<void(absl::string_view, const struct stat&)>& cb) {
- DIR* dir = opendir(std::string(path).c_str());
- if (dir == nullptr) {
- return PosixError(errno, absl::StrCat("opendir ", path));
- }
- auto dir_closer = Cleanup([&dir]() { closedir(dir); });
- while (true) {
- // Readdir(3): If the end of the directory stream is reached, NULL is
- // returned and errno is not changed. If an error occurs, NULL is returned
- // and errno is set appropriately. To distinguish end of stream and from an
- // error, set errno to zero before calling readdir() and then check the
- // value of errno if NULL is returned.
- errno = 0;
- struct dirent* dp = readdir(dir);
- if (dp == nullptr) {
- if (errno != 0) {
- return PosixError(errno, absl::StrCat("readdir ", path));
- }
- break; // We're done.
- }
-
- if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) {
- // Skip dots.
- continue;
- }
-
- auto full_path = JoinPath(path, dp->d_name);
- ASSIGN_OR_RETURN_ERRNO(struct stat s, Stat(full_path));
- if (S_ISDIR(s.st_mode) && recursive) {
- RETURN_IF_ERRNO(WalkTree(full_path, recursive, cb));
- } else {
- cb(full_path, s);
- }
- }
- // We're done walking so let's invoke our cleanup callback now.
- dir_closer.Release()();
-
- // And we have to dispatch the callback on the base directory.
- ASSIGN_OR_RETURN_ERRNO(struct stat s, Stat(path));
- cb(path, s);
-
- return NoError();
-}
-
-PosixErrorOr<std::vector<std::string>> ListDir(absl::string_view abspath,
- bool skipdots) {
- std::vector<std::string> files;
-
- DIR* dir = opendir(std::string(abspath).c_str());
- if (dir == nullptr) {
- return PosixError(errno, absl::StrCat("opendir ", abspath));
- }
- auto dir_closer = Cleanup([&dir]() { closedir(dir); });
- while (true) {
- // Readdir(3): If the end of the directory stream is reached, NULL is
- // returned and errno is not changed. If an error occurs, NULL is returned
- // and errno is set appropriately. To distinguish end of stream and from an
- // error, set errno to zero before calling readdir() and then check the
- // value of errno if NULL is returned.
- errno = 0;
- struct dirent* dp = readdir(dir);
- if (dp == nullptr) {
- if (errno != 0) {
- return PosixError(errno, absl::StrCat("readdir ", abspath));
- }
- break; // We're done.
- }
-
- if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) {
- if (skipdots) {
- continue;
- }
- }
- files.push_back(std::string(dp->d_name));
- }
-
- return files;
-}
-
-PosixError DirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude) {
- ASSIGN_OR_RETURN_ERRNO(auto listing, ListDir(path, false));
-
- for (auto& expected_entry : expect) {
- auto cursor = std::find(listing.begin(), listing.end(), expected_entry);
- if (cursor == listing.end()) {
- return PosixError(ENOENT, absl::StrFormat("Failed to find '%s' in '%s'",
- expected_entry, path));
- }
- }
- for (auto& excluded_entry : exclude) {
- auto cursor = std::find(listing.begin(), listing.end(), excluded_entry);
- if (cursor != listing.end()) {
- return PosixError(ENOENT, absl::StrCat("File '", excluded_entry,
- "' found in path '", path, "'"));
- }
- }
- return NoError();
-}
-
-PosixError EventuallyDirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude) {
- constexpr int kRetryCount = 100;
- const absl::Duration kRetryDelay = absl::Milliseconds(100);
-
- for (int i = 0; i < kRetryCount; ++i) {
- auto res = DirContains(path, expect, exclude);
- if (res.ok()) {
- return res;
- }
- if (i < kRetryCount - 1) {
- // Sleep if this isn't the final iteration.
- absl::SleepFor(kRetryDelay);
- }
- }
- return PosixError(ETIMEDOUT,
- "Timed out while waiting for directory to contain files ");
-}
-
-PosixError RecursivelyDelete(absl::string_view path, int* undeleted_dirs,
- int* undeleted_files) {
- ASSIGN_OR_RETURN_ERRNO(bool exists, Exists(path));
- if (!exists) {
- return PosixError(ENOENT, absl::StrCat(path, " does not exist"));
- }
-
- ASSIGN_OR_RETURN_ERRNO(bool dir, IsDirectory(path));
- if (!dir) {
- // Nothing recursive needs to happen we can just call Delete.
- auto status = Delete(path);
- if (!status.ok() && undeleted_files) {
- (*undeleted_files)++;
- }
- return status;
- }
-
- return WalkTree(path, /*recursive=*/true,
- [&](absl::string_view absolute_path, const struct stat& s) {
- if (S_ISDIR(s.st_mode)) {
- auto rm_status = Rmdir(absolute_path);
- if (!rm_status.ok() && undeleted_dirs) {
- (*undeleted_dirs)++;
- }
- } else {
- auto delete_status = Delete(absolute_path);
- if (!delete_status.ok() && undeleted_files) {
- (*undeleted_files)++;
- }
- }
- });
-}
-
-PosixError RecursivelyCreateDir(absl::string_view path) {
- if (path.empty() || path == "/") {
- return PosixError(EINVAL, "Cannot create root!");
- }
-
- // Does it already exist, if so we're done.
- ASSIGN_OR_RETURN_ERRNO(bool exists, Exists(path));
- if (exists) {
- return NoError();
- }
-
- // Do we need to create directories under us?
- auto dirname = Dirname(path);
- ASSIGN_OR_RETURN_ERRNO(exists, Exists(dirname));
- if (!exists) {
- RETURN_IF_ERRNO(RecursivelyCreateDir(dirname));
- }
-
- return Mkdir(path);
-}
-
-// Makes a path absolute with respect to an optional base. If no base is
-// provided it will use the current working directory.
-PosixErrorOr<std::string> MakeAbsolute(absl::string_view filename,
- absl::string_view base) {
- if (filename.empty()) {
- return PosixError(EINVAL, "filename cannot be empty.");
- }
-
- if (filename[0] == '/') {
- // This path is already absolute.
- return std::string(filename);
- }
-
- std::string actual_base;
- if (!base.empty()) {
- actual_base = std::string(base);
- } else {
- auto cwd_or = GetCWD();
- RETURN_IF_ERRNO(cwd_or.error());
- actual_base = cwd_or.ValueOrDie();
- }
-
- // Reverse iterate removing trailing slashes, effectively right trim '/'.
- for (int i = actual_base.size() - 1; i >= 0 && actual_base[i] == '/'; --i) {
- actual_base.erase(i, 1);
- }
-
- if (filename == ".") {
- return actual_base.empty() ? "/" : actual_base;
- }
-
- return absl::StrCat(actual_base, "/", filename);
-}
-
-std::string CleanPath(const absl::string_view unclean_path) {
- std::string path = std::string(unclean_path);
- const char* src = path.c_str();
- std::string::iterator dst = path.begin();
-
- // Check for absolute path and determine initial backtrack limit.
- const bool is_absolute_path = *src == '/';
- if (is_absolute_path) {
- *dst++ = *src++;
- while (*src == '/') ++src;
- }
- std::string::const_iterator backtrack_limit = dst;
-
- // Process all parts
- while (*src) {
- bool parsed = false;
-
- if (src[0] == '.') {
- // 1dot ".<whateverisnext>", check for END or SEP.
- if (src[1] == '/' || !src[1]) {
- if (*++src) {
- ++src;
- }
- parsed = true;
- } else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
- // 2dot END or SEP (".." | "../<whateverisnext>").
- src += 2;
- if (dst != backtrack_limit) {
- // We can backtrack the previous part
- for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
- // Empty.
- }
- } else if (!is_absolute_path) {
- // Failed to backtrack and we can't skip it either. Rewind and copy.
- src -= 2;
- *dst++ = *src++;
- *dst++ = *src++;
- if (*src) {
- *dst++ = *src;
- }
- // We can never backtrack over a copied "../" part so set new limit.
- backtrack_limit = dst;
- }
- if (*src) {
- ++src;
- }
- parsed = true;
- }
- }
-
- // If not parsed, copy entire part until the next SEP or EOS.
- if (!parsed) {
- while (*src && *src != '/') {
- *dst++ = *src++;
- }
- if (*src) {
- *dst++ = *src++;
- }
- }
-
- // Skip consecutive SEP occurrences
- while (*src == '/') {
- ++src;
- }
- }
-
- // Calculate and check the length of the cleaned path.
- int path_length = dst - path.begin();
- if (path_length != 0) {
- // Remove trailing '/' except if it is root path ("/" ==> path_length := 1)
- if (path_length > 1 && path[path_length - 1] == '/') {
- --path_length;
- }
- path.resize(path_length);
- } else {
- // The cleaned path is empty; assign "." as per the spec.
- path.assign(1, '.');
- }
- return path;
-}
-
-PosixErrorOr<std::string> GetRelativePath(absl::string_view source,
- absl::string_view dest) {
- if (!absl::StartsWith(source, "/") || !absl::StartsWith(dest, "/")) {
- // At least one of the inputs is not an absolute path.
- return PosixError(
- EINVAL,
- "GetRelativePath: At least one of the inputs is not an absolute path.");
- }
- const std::string clean_source = CleanPath(source);
- const std::string clean_dest = CleanPath(dest);
- auto source_parts = absl::StrSplit(clean_source, '/', absl::SkipEmpty());
- auto dest_parts = absl::StrSplit(clean_dest, '/', absl::SkipEmpty());
- auto source_iter = source_parts.begin();
- auto dest_iter = dest_parts.begin();
-
- // Advance past common prefix.
- while (source_iter != source_parts.end() && dest_iter != dest_parts.end() &&
- *source_iter == *dest_iter) {
- ++source_iter;
- ++dest_iter;
- }
-
- // Build result backtracking.
- std::string result = "";
- while (source_iter != source_parts.end()) {
- absl::StrAppend(&result, "../");
- ++source_iter;
- }
-
- // Add remaining path to dest.
- while (dest_iter != dest_parts.end()) {
- absl::StrAppend(&result, *dest_iter, "/");
- ++dest_iter;
- }
-
- if (result.empty()) {
- return std::string(".");
- }
-
- // Remove trailing slash.
- result.erase(result.size() - 1);
- return result;
-}
-
-absl::string_view Dirname(absl::string_view path) {
- return SplitPath(path).first;
-}
-
-absl::string_view Basename(absl::string_view path) {
- return SplitPath(path).second;
-}
-
-std::pair<absl::string_view, absl::string_view> SplitPath(
- absl::string_view path) {
- std::string::size_type pos = path.find_last_of('/');
-
- // Handle the case with no '/' in 'path'.
- if (pos == absl::string_view::npos) {
- return std::make_pair(path.substr(0, 0), path);
- }
-
- // Handle the case with a single leading '/' in 'path'.
- if (pos == 0) {
- return std::make_pair(path.substr(0, 1), absl::ClippedSubstr(path, 1));
- }
-
- return std::make_pair(path.substr(0, pos),
- absl::ClippedSubstr(path, pos + 1));
-}
-
-std::string JoinPath(absl::string_view path1, absl::string_view path2) {
- if (path1.empty()) {
- return std::string(path2);
- }
- if (path2.empty()) {
- return std::string(path1);
- }
-
- if (path1.back() == '/') {
- if (path2.front() == '/') {
- return absl::StrCat(path1, absl::ClippedSubstr(path2, 1));
- }
- } else {
- if (path2.front() != '/') {
- return absl::StrCat(path1, "/", path2);
- }
- }
- return absl::StrCat(path1, path2);
-}
-
-PosixErrorOr<std::string> ProcessExePath(int pid) {
- if (pid <= 0) {
- return PosixError(EINVAL, "Invalid pid specified");
- }
-
- return ReadLink(absl::StrCat("/proc/", pid, "/exe"));
-}
-
-#ifdef __linux__
-PosixErrorOr<bool> IsTmpfs(const std::string& path) {
- struct statfs stat;
- if (statfs(path.c_str(), &stat)) {
- if (errno == ENOENT) {
- // Nothing at path, don't raise this as an error. Instead, just report no
- // tmpfs at path.
- return false;
- }
- return PosixError(errno,
- absl::StrFormat("statfs(\"%s\", %#p)", path, &stat));
- }
- return stat.f_type == TMPFS_MAGIC;
-}
-#endif // __linux__
-
-PosixErrorOr<bool> IsOverlayfs(const std::string& path) {
- struct statfs stat;
- if (statfs(path.c_str(), &stat)) {
- if (errno == ENOENT) {
- // Nothing at path, don't raise this as an error. Instead, just report no
- // overlayfs at path.
- return false;
- }
- return PosixError(errno,
- absl::StrFormat("statfs(\"%s\", %#p)", path, &stat));
- }
- return stat.f_type == OVERLAYFS_SUPER_MAGIC;
-}
-
-PosixError CheckSameFile(const FileDescriptor& fd1, const FileDescriptor& fd2) {
- struct stat stat_result1, stat_result2;
- int res = fstat(fd1.get(), &stat_result1);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("fstat ", fd1.get()));
- }
-
- res = fstat(fd2.get(), &stat_result2);
- if (res < 0) {
- return PosixError(errno, absl::StrCat("fstat ", fd2.get()));
- }
- EXPECT_EQ(stat_result1.st_dev, stat_result2.st_dev);
- EXPECT_EQ(stat_result1.st_ino, stat_result2.st_ino);
-
- return NoError();
-}
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/fs_util.h b/test/util/fs_util.h
deleted file mode 100644
index bb2d1d3c8..000000000
--- a/test/util/fs_util.h
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_FS_UTIL_H_
-#define GVISOR_TEST_UTIL_FS_UTIL_H_
-
-#include <dirent.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "absl/strings/string_view.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// O_LARGEFILE as defined by Linux. glibc tries to be clever by setting it to 0
-// because "it isn't needed", even though Linux can return it via F_GETFL.
-#if defined(__x86_64__)
-constexpr int kOLargeFile = 00100000;
-#elif defined(__aarch64__)
-constexpr int kOLargeFile = 00400000;
-#else
-#error "Unknown architecture"
-#endif
-
-// From linux/magic.h. For some reason, not defined in the headers for some
-// build environments.
-#define OVERLAYFS_SUPER_MAGIC 0x794c7630
-
-// Returns a status or the current working directory.
-PosixErrorOr<std::string> GetCWD();
-
-// Returns true/false depending on whether or not path exists, or an error if it
-// can't be determined.
-PosixErrorOr<bool> Exists(absl::string_view path);
-
-// Returns a stat structure for the given path or an error. If the path
-// represents a symlink, it will be traversed.
-PosixErrorOr<struct stat> Stat(absl::string_view path);
-
-// Returns a stat structure for the given path or an error. If the path
-// represents a symlink, it will not be traversed.
-PosixErrorOr<struct stat> Lstat(absl::string_view path);
-
-// Returns a stat struct for the given fd.
-PosixErrorOr<struct stat> Fstat(int fd);
-
-// Deletes the file or directory at path or returns an error.
-PosixError Delete(absl::string_view path);
-
-// Changes the mode of a file or returns an error.
-PosixError Chmod(absl::string_view path, int mode);
-
-// Create a special or ordinary file.
-PosixError MknodAt(const FileDescriptor& dfd, absl::string_view path, int mode,
- dev_t dev);
-
-// Unlink the file.
-PosixError UnlinkAt(const FileDescriptor& dfd, absl::string_view path,
- int flags);
-
-// Truncates a file to the given length or returns an error.
-PosixError Truncate(absl::string_view path, int length);
-
-// Returns true/false depending on whether or not the path is a directory or
-// returns an error.
-PosixErrorOr<bool> IsDirectory(absl::string_view path);
-
-// Makes a directory or returns an error.
-PosixError Mkdir(absl::string_view path, int mode = 0755);
-
-// Removes a directory or returns an error.
-PosixError Rmdir(absl::string_view path);
-
-// Attempts to set the contents of a file or returns an error.
-PosixError SetContents(absl::string_view path, absl::string_view contents);
-
-// Creates a file with the given contents and mode or returns an error.
-PosixError CreateWithContents(absl::string_view path,
- absl::string_view contents, int mode = 0666);
-
-// Attempts to read the entire contents of the file into the provided string
-// buffer or returns an error.
-PosixError GetContents(absl::string_view path, std::string* output);
-
-// Attempts to read the entire contents of the file or returns an error.
-PosixErrorOr<std::string> GetContents(absl::string_view path);
-
-// Attempts to read the entire contents of the provided fd into the provided
-// string or returns an error.
-PosixError GetContentsFD(int fd, std::string* output);
-
-// Attempts to read the entire contents of the provided fd or returns an error.
-PosixErrorOr<std::string> GetContentsFD(int fd);
-
-// Executes the readlink(2) system call or returns an error.
-PosixErrorOr<std::string> ReadLink(absl::string_view path);
-
-// WalkTree will walk a directory tree in a depth first search manner (if
-// recursive). It will invoke a provided callback for each file and directory,
-// the parent will always be invoked last making this appropriate for things
-// such as deleting an entire directory tree.
-//
-// This method will return an error when it's unable to access the provided
-// path, or when the path is not a directory.
-PosixError WalkTree(
- absl::string_view path, bool recursive,
- const std::function<void(absl::string_view, const struct stat&)>& cb);
-
-// Returns the base filenames for all files under a given absolute path. If
-// skipdots is true the returned vector will not contain "." or "..". This
-// method does not walk the tree recursively it only returns the elements
-// in that directory.
-PosixErrorOr<std::vector<std::string>> ListDir(absl::string_view abspath,
- bool skipdots);
-
-// Check that a directory contains children nodes named in expect, and does not
-// contain any children nodes named in exclude.
-PosixError DirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude);
-
-// Same as DirContains, but adds a retry. Suitable for checking a directory
-// being modified asynchronously.
-PosixError EventuallyDirContains(absl::string_view path,
- const std::vector<std::string>& expect,
- const std::vector<std::string>& exclude);
-
-// Attempt to recursively delete a directory or file. Returns an error and
-// the number of undeleted directories and files. If either
-// undeleted_dirs or undeleted_files is nullptr then it will not be used.
-PosixError RecursivelyDelete(absl::string_view path, int* undeleted_dirs,
- int* undeleted_files);
-
-// Recursively create the directory provided or return an error.
-PosixError RecursivelyCreateDir(absl::string_view path);
-
-// Makes a path absolute with respect to an optional base. If no base is
-// provided it will use the current working directory.
-PosixErrorOr<std::string> MakeAbsolute(absl::string_view filename,
- absl::string_view base);
-
-// Generates a relative path from the source directory to the destination
-// (dest) file or directory. This uses ../ when necessary for destinations
-// which are not nested within the source. Both source and dest are required
-// to be absolute paths, and an empty string will be returned if they are not.
-PosixErrorOr<std::string> GetRelativePath(absl::string_view source,
- absl::string_view dest);
-
-// Returns the part of the path before the final "/", EXCEPT:
-// * If there is a single leading "/" in the path, the result will be the
-// leading "/".
-// * If there is no "/" in the path, the result is the empty prefix of the
-// input string.
-absl::string_view Dirname(absl::string_view path);
-
-// Return the parts of the path, split on the final "/". If there is no
-// "/" in the path, the first part of the output is empty and the second
-// is the input. If the only "/" in the path is the first character, it is
-// the first part of the output.
-std::pair<absl::string_view, absl::string_view> SplitPath(
- absl::string_view path);
-
-// Returns the part of the path after the final "/". If there is no
-// "/" in the path, the result is the same as the input.
-// Note that this function's behavior differs from the Unix basename
-// command if path ends with "/". For such paths, this function returns the
-// empty string.
-absl::string_view Basename(absl::string_view path);
-
-// Collapse duplicate "/"s, resolve ".." and "." path elements, remove
-// trailing "/".
-//
-// NOTE: This respects relative vs. absolute paths, but does not
-// invoke any system calls (getcwd(2)) in order to resolve relative
-// paths wrt actual working directory. That is, this is purely a
-// string manipulation, completely independent of process state.
-std::string CleanPath(absl::string_view path);
-
-// Returns the full path to the executable of the given pid or a PosixError.
-PosixErrorOr<std::string> ProcessExePath(int pid);
-
-#ifdef __linux__
-// IsTmpfs returns true if the file at path is backed by tmpfs.
-PosixErrorOr<bool> IsTmpfs(const std::string& path);
-#endif // __linux__
-
-// IsOverlayfs returns true if the file at path is backed by overlayfs.
-PosixErrorOr<bool> IsOverlayfs(const std::string& path);
-
-PosixError CheckSameFile(const FileDescriptor& fd1, const FileDescriptor& fd2);
-
-namespace internal {
-// Not part of the public API.
-std::string JoinPathImpl(std::initializer_list<absl::string_view> paths);
-} // namespace internal
-
-// Join multiple paths together.
-// All paths will be treated as relative paths, regardless of whether or not
-// they start with a leading '/'. That is, all paths will be concatenated
-// together, with the appropriate path separator inserted in between.
-// Arguments must be convertible to absl::string_view.
-//
-// Usage:
-// std::string path = JoinPath("/foo", dirname, filename);
-// std::string path = JoinPath(FLAGS_test_srcdir, filename);
-//
-// 0, 1, 2-path specializations exist to optimize common cases.
-inline std::string JoinPath() { return std::string(); }
-inline std::string JoinPath(absl::string_view path) {
- return std::string(path.data(), path.size());
-}
-
-std::string JoinPath(absl::string_view path1, absl::string_view path2);
-template <typename... T>
-inline std::string JoinPath(absl::string_view path1, absl::string_view path2,
- absl::string_view path3, const T&... args) {
- return internal::JoinPathImpl({path1, path2, path3, args...});
-}
-} // namespace testing
-} // namespace gvisor
-#endif // GVISOR_TEST_UTIL_FS_UTIL_H_
diff --git a/test/util/fs_util_test.cc b/test/util/fs_util_test.cc
deleted file mode 100644
index 657b6a46e..000000000
--- a/test/util/fs_util_test.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/fs_util.h"
-
-#include <errno.h>
-
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/posix_error.h"
-#include "test/util/temp_path.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(FsUtilTest, RecursivelyCreateDirManualDelete) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string base_path =
- JoinPath(root.path(), "/a/b/c/d/e/f/g/h/i/j/k/l/m");
-
- ASSERT_THAT(Exists(base_path), IsPosixErrorOkAndHolds(false));
- ASSERT_NO_ERRNO(RecursivelyCreateDir(base_path));
-
- // Delete everything until we hit root and then stop, we want to try this
- // without using RecursivelyDelete.
- std::string cur_path = base_path;
- while (cur_path != root.path()) {
- ASSERT_THAT(Exists(cur_path), IsPosixErrorOkAndHolds(true));
- ASSERT_NO_ERRNO(Rmdir(cur_path));
- ASSERT_THAT(Exists(cur_path), IsPosixErrorOkAndHolds(false));
- auto dir = Dirname(cur_path);
- cur_path = std::string(dir);
- }
-}
-
-TEST(FsUtilTest, RecursivelyCreateAndDeleteDir) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string base_path =
- JoinPath(root.path(), "/a/b/c/d/e/f/g/h/i/j/k/l/m");
-
- ASSERT_THAT(Exists(base_path), IsPosixErrorOkAndHolds(false));
- ASSERT_NO_ERRNO(RecursivelyCreateDir(base_path));
-
- const std::string sub_path = JoinPath(root.path(), "a");
- ASSERT_NO_ERRNO(RecursivelyDelete(sub_path, nullptr, nullptr));
- ASSERT_THAT(Exists(sub_path), IsPosixErrorOkAndHolds(false));
-}
-
-TEST(FsUtilTest, RecursivelyCreateAndDeletePartial) {
- const TempPath root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
- const std::string base_path =
- JoinPath(root.path(), "/a/b/c/d/e/f/g/h/i/j/k/l/m");
-
- ASSERT_THAT(Exists(base_path), IsPosixErrorOkAndHolds(false));
- ASSERT_NO_ERRNO(RecursivelyCreateDir(base_path));
-
- const std::string a = JoinPath(root.path(), "a");
- auto listing = ASSERT_NO_ERRNO_AND_VALUE(ListDir(a, true));
- ASSERT_THAT(listing, ::testing::Contains("b"));
- ASSERT_EQ(listing.size(), 1);
-
- listing = ASSERT_NO_ERRNO_AND_VALUE(ListDir(a, false));
- ASSERT_THAT(listing, ::testing::Contains("."));
- ASSERT_THAT(listing, ::testing::Contains(".."));
- ASSERT_THAT(listing, ::testing::Contains("b"));
- ASSERT_EQ(listing.size(), 3);
-
- const std::string sub_path = JoinPath(root.path(), "/a/b/c/d/e/f");
-
- ASSERT_NO_ERRNO(
- CreateWithContents(JoinPath(Dirname(sub_path), "file"), "Hello World"));
- std::string contents = "";
- ASSERT_NO_ERRNO(GetContents(JoinPath(Dirname(sub_path), "file"), &contents));
- ASSERT_EQ(contents, "Hello World");
-
- ASSERT_NO_ERRNO(RecursivelyDelete(sub_path, nullptr, nullptr));
- ASSERT_THAT(Exists(sub_path), IsPosixErrorOkAndHolds(false));
-
- // The parent of the subpath (directory e) should still exist.
- ASSERT_THAT(Exists(Dirname(sub_path)), IsPosixErrorOkAndHolds(true));
-
- // The file we created along side f should also still exist.
- ASSERT_THAT(Exists(JoinPath(Dirname(sub_path), "file")),
- IsPosixErrorOkAndHolds(true));
-}
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/fuse_util.cc b/test/util/fuse_util.cc
deleted file mode 100644
index 027f8386c..000000000
--- a/test/util/fuse_util.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/fuse_util.h"
-
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <string>
-
-namespace gvisor {
-namespace testing {
-
-// Create a default FuseAttr struct with specified mode, inode, and size.
-fuse_attr DefaultFuseAttr(mode_t mode, uint64_t inode, uint64_t size) {
- const int time_sec = 1595436289;
- const int time_nsec = 134150844;
- return (struct fuse_attr){
- .ino = inode,
- .size = size,
- .blocks = 4,
- .atime = time_sec,
- .mtime = time_sec,
- .ctime = time_sec,
- .atimensec = time_nsec,
- .mtimensec = time_nsec,
- .ctimensec = time_nsec,
- .mode = mode,
- .nlink = 2,
- .uid = 1234,
- .gid = 4321,
- .rdev = 12,
- .blksize = 4096,
- };
-}
-
-// Create response body with specified mode, nodeID, and size.
-fuse_entry_out DefaultEntryOut(mode_t mode, uint64_t node_id, uint64_t size) {
- struct fuse_entry_out default_entry_out = {
- .nodeid = node_id,
- .generation = 0,
- .entry_valid = 0,
- .attr_valid = 0,
- .entry_valid_nsec = 0,
- .attr_valid_nsec = 0,
- .attr = DefaultFuseAttr(mode, node_id, size),
- };
- return default_entry_out;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/fuse_util.h b/test/util/fuse_util.h
deleted file mode 100644
index 544fe1b38..000000000
--- a/test/util/fuse_util.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_FUSE_UTIL_H_
-#define GVISOR_TEST_UTIL_FUSE_UTIL_H_
-
-#include <linux/fuse.h>
-#include <sys/uio.h>
-
-#include <string>
-#include <vector>
-
-namespace gvisor {
-namespace testing {
-
-// The fundamental generation function with a single argument. If passed by
-// std::string or std::vector<char>, it will call specialized versions as
-// implemented below.
-template <typename T>
-std::vector<struct iovec> FuseGenerateIovecs(T &first) {
- return {(struct iovec){.iov_base = &first, .iov_len = sizeof(first)}};
-}
-
-// If an argument is of type std::string, it must be used in read-only scenario.
-// Because we are setting up iovec, which contains the original address of a
-// data structure, we have to drop const qualification. Usually used with
-// variable-length payload data.
-template <typename T = std::string>
-std::vector<struct iovec> FuseGenerateIovecs(std::string &first) {
- // Pad one byte for null-terminate c-string.
- return {(struct iovec){.iov_base = const_cast<char *>(first.c_str()),
- .iov_len = first.size() + 1}};
-}
-
-// If an argument is of type std::vector<char>, it must be used in write-only
-// scenario and the size of the variable must be greater than or equal to the
-// size of the expected data. Usually used with variable-length payload data.
-template <typename T = std::vector<char>>
-std::vector<struct iovec> FuseGenerateIovecs(std::vector<char> &first) {
- return {(struct iovec){.iov_base = first.data(), .iov_len = first.size()}};
-}
-
-// A helper function to set up an array of iovec struct for testing purpose.
-// Use variadic class template to generalize different numbers and different
-// types of FUSE structs.
-template <typename T, typename... Types>
-std::vector<struct iovec> FuseGenerateIovecs(T &first, Types &...args) {
- auto first_iovec = FuseGenerateIovecs(first);
- auto iovecs = FuseGenerateIovecs(args...);
- first_iovec.insert(std::end(first_iovec), std::begin(iovecs),
- std::end(iovecs));
- return first_iovec;
-}
-
-// Create a fuse_attr filled with the specified mode and inode.
-fuse_attr DefaultFuseAttr(mode_t mode, uint64_t inode, uint64_t size = 512);
-
-// Return a fuse_entry_out FUSE server response body.
-fuse_entry_out DefaultEntryOut(mode_t mode, uint64_t node_id,
- uint64_t size = 512);
-
-} // namespace testing
-} // namespace gvisor
-#endif // GVISOR_TEST_UTIL_FUSE_UTIL_H_
diff --git a/test/util/logging.cc b/test/util/logging.cc
deleted file mode 100644
index 5fadb076b..000000000
--- a/test/util/logging.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/logging.h"
-
-#include <errno.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// We implement this here instead of using test_util to avoid cyclic
-// dependencies.
-int Write(int fd, const char* buf, size_t size) {
- size_t written = 0;
- while (written < size) {
- int res = write(fd, buf + written, size - written);
- if (res < 0 && errno == EINTR) {
- continue;
- } else if (res <= 0) {
- break;
- }
-
- written += res;
- }
- return static_cast<int>(written);
-}
-
-// Write 32-bit decimal number to fd.
-int WriteNumber(int fd, uint32_t val) {
- constexpr char kDigits[] = "0123456789";
- constexpr int kBase = 10;
-
- // 10 chars for 32-bit number in decimal, 1 char for the NUL-terminator.
- constexpr int kBufferSize = 11;
- char buf[kBufferSize];
-
- // Convert the number to string.
- char* s = buf + sizeof(buf) - 1;
- size_t size = 0;
-
- *s = '\0';
- do {
- s--;
- size++;
-
- *s = kDigits[val % kBase];
- val /= kBase;
- } while (val);
-
- return Write(fd, s, size);
-}
-
-} // namespace
-
-void CheckFailure(const char* cond, size_t cond_size, const char* msg,
- size_t msg_size, int errno_value) {
- constexpr char kCheckFailure[] = "Check failed: ";
- Write(2, kCheckFailure, sizeof(kCheckFailure) - 1);
- Write(2, cond, cond_size);
-
- if (msg != nullptr) {
- Write(2, ": ", 2);
- Write(2, msg, msg_size);
- }
-
- if (errno_value != 0) {
- constexpr char kErrnoMessage[] = " (errno ";
- Write(2, kErrnoMessage, sizeof(kErrnoMessage) - 1);
- WriteNumber(2, errno_value);
- Write(2, ")", 1);
- }
-
- Write(2, "\n", 1);
-
- abort();
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/logging.h b/test/util/logging.h
deleted file mode 100644
index 5c17f1233..000000000
--- a/test/util/logging.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_LOGGING_H_
-#define GVISOR_TEST_UTIL_LOGGING_H_
-
-#include <stddef.h>
-
-namespace gvisor {
-namespace testing {
-
-void CheckFailure(const char* cond, size_t cond_size, const char* msg,
- size_t msg_size, int errno_value);
-
-// If cond is false, aborts the current process.
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK(cond) \
- do { \
- if (!(cond)) { \
- ::gvisor::testing::CheckFailure(#cond, sizeof(#cond) - 1, nullptr, \
- 0, 0); \
- } \
- } while (0)
-
-// If cond is false, logs msg then aborts the current process.
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK_MSG(cond, msg) \
- do { \
- if (!(cond)) { \
- ::gvisor::testing::CheckFailure(#cond, sizeof(#cond) - 1, msg, \
- sizeof(msg) - 1, 0); \
- } \
- } while (0)
-
-// If cond is false, logs errno, then aborts the current process.
-//
-// This macro is async-signal-safe.
-#define TEST_PCHECK(cond) \
- do { \
- if (!(cond)) { \
- ::gvisor::testing::CheckFailure(#cond, sizeof(#cond) - 1, nullptr, \
- 0, errno); \
- } \
- } while (0)
-
-// If cond is false, logs msg and errno, then aborts the current process.
-//
-// This macro is async-signal-safe.
-#define TEST_PCHECK_MSG(cond, msg) \
- do { \
- if (!(cond)) { \
- ::gvisor::testing::CheckFailure(#cond, sizeof(#cond) - 1, msg, \
- sizeof(msg) - 1, errno); \
- } \
- } while (0)
-
-// expr must return PosixErrorOr<T>. The current process is aborted if
-// !PosixError<T>.ok().
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK_NO_ERRNO(expr) \
- ({ \
- auto _expr_result = (expr); \
- if (!_expr_result.ok()) { \
- ::gvisor::testing::CheckFailure( \
- #expr, sizeof(#expr) - 1, nullptr, 0, \
- _expr_result.error().errno_value()); \
- } \
- })
-
-// expr must return PosixErrorOr<T>. The current process is aborted if
-// !PosixError<T>.ok(). Otherwise, PosixErrorOr<T> value is returned.
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK_NO_ERRNO_AND_VALUE(expr) \
- ({ \
- auto _expr_result = (expr); \
- if (!_expr_result.ok()) { \
- ::gvisor::testing::CheckFailure( \
- #expr, sizeof(#expr) - 1, nullptr, 0, \
- _expr_result.error().errno_value()); \
- } \
- std::move(_expr_result).ValueOrDie(); \
- })
-
-// cond must be greater or equal than 0. Used to test result of syscalls.
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK_SUCCESS(cond) TEST_PCHECK((cond) >= 0)
-
-// cond must be -1 and errno must match errno_value. Used to test errors from
-// syscalls.
-//
-// This macro is async-signal-safe.
-#define TEST_CHECK_ERRNO(cond, errno_value) \
- do { \
- TEST_PCHECK((cond) == -1); \
- TEST_PCHECK_MSG(errno == (errno_value), #cond " expected " #errno_value); \
- } while (0)
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_LOGGING_H_
diff --git a/test/util/memory_util.h b/test/util/memory_util.h
deleted file mode 100644
index e189b73e8..000000000
--- a/test/util/memory_util.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_MEMORY_UTIL_H_
-#define GVISOR_TEST_UTIL_MEMORY_UTIL_H_
-
-#include <errno.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/mman.h>
-
-#include "absl/strings/str_format.h"
-#include "absl/strings/string_view.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// RAII type for mmap'ed memory. Only usable in tests due to use of a test-only
-// macro that can't be named without invoking the presubmit's wrath.
-class Mapping {
- public:
- // Constructs a mapping that owns nothing.
- Mapping() = default;
-
- // Constructs a mapping that owns the mmapped memory [ptr, ptr+len). Most
- // users should use Mmap or MmapAnon instead.
- Mapping(void* ptr, size_t len) : ptr_(ptr), len_(len) {}
-
- Mapping(Mapping&& orig) : ptr_(orig.ptr_), len_(orig.len_) { orig.release(); }
-
- Mapping& operator=(Mapping&& orig) {
- ptr_ = orig.ptr_;
- len_ = orig.len_;
- orig.release();
- return *this;
- }
-
- Mapping(Mapping const&) = delete;
- Mapping& operator=(Mapping const&) = delete;
-
- ~Mapping() { reset(); }
-
- void* ptr() const { return ptr_; }
- size_t len() const { return len_; }
-
- // Returns a pointer to the end of the mapping. Useful for when the mapping
- // is used as a thread stack.
- void* endptr() const { return reinterpret_cast<void*>(addr() + len_); }
-
- // Returns the start of this mapping cast to uintptr_t for ease of pointer
- // arithmetic.
- uintptr_t addr() const { return reinterpret_cast<uintptr_t>(ptr_); }
-
- // Returns the end of this mapping cast to uintptr_t for ease of pointer
- // arithmetic.
- uintptr_t endaddr() const { return reinterpret_cast<uintptr_t>(endptr()); }
-
- // Returns this mapping as a StringPiece for ease of comparison.
- //
- // This function is named view in anticipation of the eventual replacement of
- // StringPiece with std::string_view.
- absl::string_view view() const {
- return absl::string_view(static_cast<char const*>(ptr_), len_);
- }
-
- // These are both named reset for consistency with standard smart pointers.
-
- void reset(void* ptr, size_t len) {
- if (len_) {
- TEST_PCHECK(munmap(ptr_, len_) == 0);
- }
- ptr_ = ptr;
- len_ = len;
- }
-
- void reset() { reset(nullptr, 0); }
-
- void release() {
- ptr_ = nullptr;
- len_ = 0;
- }
-
- private:
- void* ptr_ = nullptr;
- size_t len_ = 0;
-};
-
-// Wrapper around mmap(2) that returns a Mapping.
-inline PosixErrorOr<Mapping> Mmap(void* addr, size_t length, int prot,
- int flags, int fd, off_t offset) {
- void* ptr = mmap(addr, length, prot, flags, fd, offset);
- if (ptr == MAP_FAILED) {
- return PosixError(
- errno, absl::StrFormat("mmap(%p, %d, %x, %x, %d, %d)", addr, length,
- prot, flags, fd, offset));
- }
- MaybeSave();
- return Mapping(ptr, length);
-}
-
-// Convenience wrapper around Mmap for anonymous mappings.
-inline PosixErrorOr<Mapping> MmapAnon(size_t length, int prot, int flags) {
- return Mmap(nullptr, length, prot, flags | MAP_ANONYMOUS, -1, 0);
-}
-
-// Wrapper for mremap that returns a PosixErrorOr<>, since the return type of
-// void* isn't directly compatible with SyscallSucceeds.
-inline PosixErrorOr<void*> Mremap(void* old_address, size_t old_size,
- size_t new_size, int flags,
- void* new_address) {
- void* rv = mremap(old_address, old_size, new_size, flags, new_address);
- if (rv == MAP_FAILED) {
- return PosixError(errno, "mremap failed");
- }
- return rv;
-}
-
-// Returns true if the page containing addr is mapped.
-inline bool IsMapped(uintptr_t addr) {
- int const rv = msync(reinterpret_cast<void*>(addr & ~(kPageSize - 1)),
- kPageSize, MS_ASYNC);
- if (rv == 0) {
- return true;
- }
- TEST_PCHECK_MSG(errno == ENOMEM, "msync failed with unexpected errno");
- return false;
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_MEMORY_UTIL_H_
diff --git a/test/util/mount_util.cc b/test/util/mount_util.cc
deleted file mode 100644
index 48640d6a1..000000000
--- a/test/util/mount_util.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/mount_util.h"
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_split.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/self/mounts", &content));
- return ProcSelfMountsEntriesFrom(content);
-}
-
-PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntriesFrom(
- const std::string& content) {
- std::vector<ProcMountsEntry> entries;
- std::vector<std::string> lines =
- absl::StrSplit(content, absl::ByChar('\n'), absl::AllowEmpty());
- std::cerr << "<contents of /proc/self/mounts>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/self/mounts.
- //
- // Example entries:
- //
- // sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
- // proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
- // ^ ^ ^ ^ ^ ^
- // 0 1 2 3 4 5
-
- ProcMountsEntry entry;
- std::vector<std::string> fields =
- absl::StrSplit(line, absl::ByChar(' '), absl::AllowEmpty());
- if (fields.size() != 6) {
- return PosixError(
- EINVAL, absl::StrFormat("Not enough tokens, got %d, content: <<%s>>",
- fields.size(), content));
- }
-
- entry.spec = fields[0];
- entry.mount_point = fields[1];
- entry.fstype = fields[2];
- entry.mount_opts = fields[3];
- ASSIGN_OR_RETURN_ERRNO(entry.dump, Atoi<uint32_t>(fields[4]));
- ASSIGN_OR_RETURN_ERRNO(entry.fsck, Atoi<uint32_t>(fields[5]));
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/self/mounts>" << std::endl;
-
- return entries;
-}
-
-PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntries() {
- std::string content;
- RETURN_IF_ERRNO(GetContents("/proc/self/mountinfo", &content));
- return ProcSelfMountInfoEntriesFrom(content);
-}
-
-PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntriesFrom(
- const std::string& content) {
- std::vector<ProcMountInfoEntry> entries;
- std::vector<std::string> lines =
- absl::StrSplit(content, absl::ByChar('\n'), absl::AllowEmpty());
- std::cerr << "<contents of /proc/self/mountinfo>" << std::endl;
- for (const std::string& line : lines) {
- std::cerr << line << std::endl;
- if (line.empty()) {
- continue;
- }
-
- // Parse a single entry from /proc/self/mountinfo.
- //
- // Example entries:
- //
- // 22 28 0:20 / /sys rw,relatime shared:7 - sysfs sysfs rw
- // 23 28 0:21 / /proc rw,relatime shared:14 - proc proc rw
- // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
- // 0 1 2 3 4 5 6 7 8 9 10
-
- ProcMountInfoEntry entry;
- std::vector<std::string> fields =
- absl::StrSplit(line, absl::ByChar(' '), absl::AllowEmpty());
- if (fields.size() < 10 || fields.size() > 11) {
- return PosixError(
- EINVAL, absl::StrFormat(
- "Unexpected number of tokens, got %d, content: <<%s>>",
- fields.size(), content));
- }
-
- ASSIGN_OR_RETURN_ERRNO(entry.id, Atoi<uint64_t>(fields[0]));
- ASSIGN_OR_RETURN_ERRNO(entry.parent_id, Atoi<uint64_t>(fields[1]));
-
- std::vector<std::string> devs =
- absl::StrSplit(fields[2], absl::ByChar(':'));
- if (devs.size() != 2) {
- return PosixError(
- EINVAL,
- absl::StrFormat(
- "Failed to parse dev number field %s: too many tokens, got %d",
- fields[2], devs.size()));
- }
- ASSIGN_OR_RETURN_ERRNO(entry.major, Atoi<dev_t>(devs[0]));
- ASSIGN_OR_RETURN_ERRNO(entry.minor, Atoi<dev_t>(devs[1]));
-
- entry.root = fields[3];
- entry.mount_point = fields[4];
- entry.mount_opts = fields[5];
-
- // The optional field (fields[6]) may or may not be present. We know based
- // on the total number of tokens.
- int off = -1;
- if (fields.size() == 11) {
- entry.optional = fields[6];
- off = 0;
- }
- // Field 7 is the optional field terminator char '-'.
- entry.fstype = fields[8 + off];
- entry.mount_source = fields[9 + off];
- entry.super_opts = fields[10 + off];
-
- entries.push_back(entry);
- }
- std::cerr << "<end of /proc/self/mountinfo>" << std::endl;
-
- return entries;
-}
-
-absl::flat_hash_map<std::string, std::string> ParseMountOptions(
- std::string mopts) {
- absl::flat_hash_map<std::string, std::string> entries;
- const std::vector<std::string> tokens =
- absl::StrSplit(mopts, absl::ByChar(','), absl::AllowEmpty());
- for (const auto& token : tokens) {
- std::vector<std::string> kv =
- absl::StrSplit(token, absl::MaxSplits('=', 1));
- if (kv.size() == 2) {
- entries[kv[0]] = kv[1];
- } else if (kv.size() == 1) {
- entries[kv[0]] = "";
- } else {
- TEST_CHECK_MSG(
- false,
- absl::StrFormat(
- "Invalid mount option token '%s', was split into %d subtokens",
- token, kv.size())
- .c_str());
- }
- }
- return entries;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/mount_util.h b/test/util/mount_util.h
deleted file mode 100644
index 3f8a1c0f1..000000000
--- a/test/util/mount_util.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_MOUNT_UTIL_H_
-#define GVISOR_TEST_UTIL_MOUNT_UTIL_H_
-
-#include <errno.h>
-#include <sys/mount.h>
-
-#include <functional>
-#include <string>
-
-#include "gmock/gmock.h"
-#include "absl/container/flat_hash_map.h"
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Mount mounts the filesystem, and unmounts when the returned reference is
-// destroyed.
-inline PosixErrorOr<Cleanup> Mount(const std::string& source,
- const std::string& target,
- const std::string& fstype,
- uint64_t mountflags, const std::string& data,
- uint64_t umountflags) {
- if (mount(source.c_str(), target.c_str(), fstype.c_str(), mountflags,
- data.c_str()) == -1) {
- return PosixError(errno, "mount failed");
- }
- return Cleanup([target, umountflags]() {
- EXPECT_THAT(umount2(target.c_str(), umountflags), SyscallSucceeds());
- });
-}
-
-struct ProcMountsEntry {
- std::string spec;
- std::string mount_point;
- std::string fstype;
- std::string mount_opts;
- uint32_t dump;
- uint32_t fsck;
-};
-
-// ProcSelfMountsEntries returns a parsed representation of /proc/self/mounts.
-PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntries();
-
-// ProcSelfMountsEntries returns a parsed representation of mounts from the
-// provided content.
-PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntriesFrom(
- const std::string& content);
-
-struct ProcMountInfoEntry {
- uint64_t id;
- uint64_t parent_id;
- dev_t major;
- dev_t minor;
- std::string root;
- std::string mount_point;
- std::string mount_opts;
- std::string optional;
- std::string fstype;
- std::string mount_source;
- std::string super_opts;
-};
-
-// ProcSelfMountInfoEntries returns a parsed representation of
-// /proc/self/mountinfo.
-PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntries();
-
-// ProcSelfMountInfoEntriesFrom returns a parsed representation of
-// mountinfo from the provided content.
-PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntriesFrom(
- const std::string&);
-
-// Interprets the input string mopts as a comma separated list of mount
-// options. A mount option can either be just a value, or a key=value pair. For
-// example, the string "rw,relatime,fd=7" will be parsed into a map like { "rw":
-// "", "relatime": "", "fd": "7" }.
-absl::flat_hash_map<std::string, std::string> ParseMountOptions(
- std::string mopts);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_MOUNT_UTIL_H_
diff --git a/test/util/mount_util_test.cc b/test/util/mount_util_test.cc
deleted file mode 100644
index 2bcb6cc43..000000000
--- a/test/util/mount_util_test.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/mount_util.h"
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ParseMounts, Mounts) {
- auto entries = ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntriesFrom(
- R"proc(sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
-proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
- /mnt tmpfs rw,noexec 0 0
-)proc"));
- EXPECT_EQ(entries.size(), 3);
-}
-
-TEST(ParseMounts, MountInfo) {
- auto entries = ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntriesFrom(
- R"proc(22 28 0:20 / /sys rw,relatime shared:7 - sysfs sysfs rw
-23 28 0:21 / /proc rw,relatime shared:14 - proc proc rw
-2007 8844 0:278 / /mnt rw,noexec - tmpfs rw,mode=123,uid=268601820,gid=5000
-)proc"));
- EXPECT_EQ(entries.size(), 3);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/multiprocess_util.cc b/test/util/multiprocess_util.cc
deleted file mode 100644
index a6b0de24b..000000000
--- a/test/util/multiprocess_util.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/multiprocess_util.h"
-
-#include <asm/unistd.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <sys/prctl.h>
-#include <unistd.h>
-
-#include "absl/strings/str_cat.h"
-#include "test/util/cleanup.h"
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-// exec_fn wraps a variant of the exec family, e.g. execve or execveat.
-PosixErrorOr<Cleanup> ForkAndExecHelper(const std::function<void()>& exec_fn,
- const std::function<void()>& fn,
- pid_t* child, int* execve_errno) {
- int pfds[2];
- int ret = pipe2(pfds, O_CLOEXEC);
- if (ret < 0) {
- return PosixError(errno, "pipe failed");
- }
- FileDescriptor rfd(pfds[0]);
- FileDescriptor wfd(pfds[1]);
-
- int parent_stdout = dup(STDOUT_FILENO);
- if (parent_stdout < 0) {
- return PosixError(errno, "dup stdout");
- }
- int parent_stderr = dup(STDERR_FILENO);
- if (parent_stdout < 0) {
- return PosixError(errno, "dup stderr");
- }
-
- pid_t pid = fork();
- if (pid < 0) {
- return PosixError(errno, "fork failed");
- } else if (pid == 0) {
- // Child.
- rfd.reset();
- if (dup2(parent_stdout, STDOUT_FILENO) < 0) {
- _exit(3);
- }
- if (dup2(parent_stderr, STDERR_FILENO) < 0) {
- _exit(4);
- }
- close(parent_stdout);
- close(parent_stderr);
-
- // Clean ourself up in case the parent doesn't.
- if (prctl(PR_SET_PDEATHSIG, SIGKILL)) {
- _exit(3);
- }
-
- if (fn) {
- fn();
- }
-
- // Call variant of exec function.
- exec_fn();
-
- int error = errno;
- if (WriteFd(pfds[1], &error, sizeof(error)) != sizeof(error)) {
- // We can't do much if the write fails, but we can at least exit with a
- // different code.
- _exit(2);
- }
- _exit(1);
- }
-
- // Parent.
- if (child) {
- *child = pid;
- }
-
- auto cleanup = Cleanup([pid] {
- kill(pid, SIGKILL);
- RetryEINTR(waitpid)(pid, nullptr, 0);
- });
-
- wfd.reset();
-
- int read_errno;
- ret = ReadFd(rfd.get(), &read_errno, sizeof(read_errno));
- if (ret == 0) {
- // Other end of the pipe closed, execve must have succeeded.
- read_errno = 0;
- } else if (ret < 0) {
- return PosixError(errno, "read pipe failed");
- } else if (ret != sizeof(read_errno)) {
- return PosixError(EPIPE, absl::StrCat("pipe read wrong size ", ret));
- }
-
- if (execve_errno) {
- *execve_errno = read_errno;
- }
-
- return std::move(cleanup);
-}
-
-} // namespace
-
-PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,
- const ExecveArray& argv,
- const ExecveArray& envv,
- const std::function<void()>& fn, pid_t* child,
- int* execve_errno) {
- char* const* argv_data = argv.get();
- char* const* envv_data = envv.get();
- const std::function<void()> exec_fn = [=] {
- execve(filename.c_str(), argv_data, envv_data);
- };
- return ForkAndExecHelper(exec_fn, fn, child, execve_errno);
-}
-
-PosixErrorOr<Cleanup> ForkAndExecveat(const int32_t dirfd,
- const std::string& pathname,
- const ExecveArray& argv,
- const ExecveArray& envv, const int flags,
- const std::function<void()>& fn,
- pid_t* child, int* execve_errno) {
- char* const* argv_data = argv.get();
- char* const* envv_data = envv.get();
- const std::function<void()> exec_fn = [=] {
- syscall(__NR_execveat, dirfd, pathname.c_str(), argv_data, envv_data,
- flags);
- };
- return ForkAndExecHelper(exec_fn, fn, child, execve_errno);
-}
-
-PosixErrorOr<int> InForkedProcess(const std::function<void()>& fn) {
- pid_t pid = fork();
- if (pid == 0) {
- fn();
- TEST_CHECK_MSG(!::testing::Test::HasFailure(),
- "EXPECT*/ASSERT* failed. These are not async-signal-safe "
- "and must not be called from fn.");
- _exit(0);
- }
- MaybeSave();
- if (pid < 0) {
- return PosixError(errno, "fork failed");
- }
-
- int status;
- if (waitpid(pid, &status, 0) < 0) {
- return PosixError(errno, "waitpid failed");
- }
-
- return status;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/multiprocess_util.h b/test/util/multiprocess_util.h
deleted file mode 100644
index 840fde4ee..000000000
--- a/test/util/multiprocess_util.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_MULTIPROCESS_UTIL_H_
-#define GVISOR_TEST_UTIL_MULTIPROCESS_UTIL_H_
-
-#include <unistd.h>
-
-#include <algorithm>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "absl/strings/string_view.h"
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Immutable holder for a dynamically-sized array of pointers to mutable char,
-// terminated by a null pointer, as required for the argv and envp arguments to
-// execve(2).
-class ExecveArray {
- public:
- // Constructs an empty ExecveArray.
- ExecveArray() = default;
-
- // Constructs an ExecveArray by copying strings from the given range. T must
- // be a range over ranges of char.
- template <typename T>
- explicit ExecveArray(T const& strs) : ExecveArray(strs.begin(), strs.end()) {}
-
- // Constructs an ExecveArray by copying strings from [first, last). InputIt
- // must be an input iterator over a range over char.
- template <typename InputIt>
- ExecveArray(InputIt first, InputIt last) {
- std::vector<size_t> offsets;
- auto output_it = std::back_inserter(str_);
- for (InputIt it = first; it != last; ++it) {
- offsets.push_back(str_.size());
- auto const& s = *it;
- std::copy(s.begin(), s.end(), output_it);
- str_.push_back('\0');
- }
- ptrs_.reserve(offsets.size() + 1);
- for (auto offset : offsets) {
- ptrs_.push_back(str_.data() + offset);
- }
- ptrs_.push_back(nullptr);
- }
-
- // Constructs an ExecveArray by copying strings from list. This overload must
- // exist independently of the single-argument template constructor because
- // std::initializer_list does not participate in template argument deduction
- // (i.e. cannot be type-inferred in an invocation of the templated
- // constructor).
- /* implicit */ ExecveArray(std::initializer_list<absl::string_view> list)
- : ExecveArray(list.begin(), list.end()) {}
-
- // Disable move construction and assignment since ptrs_ points into str_.
- ExecveArray(ExecveArray&&) = delete;
- ExecveArray& operator=(ExecveArray&&) = delete;
-
- char* const* get() const { return ptrs_.data(); }
- size_t get_size() { return str_.size(); }
-
- private:
- std::vector<char> str_;
- std::vector<char*> ptrs_;
-};
-
-// Simplified version of SubProcess. Returns OK and a cleanup function to kill
-// the child if it made it to execve.
-//
-// fn is run between fork and exec. If it needs to fail, it should exit the
-// process.
-//
-// The child pid is returned via child, if provided.
-// execve's error code is returned via execve_errno, if provided.
-PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,
- const ExecveArray& argv,
- const ExecveArray& envv,
- const std::function<void()>& fn, pid_t* child,
- int* execve_errno);
-
-inline PosixErrorOr<Cleanup> ForkAndExec(const std::string& filename,
- const ExecveArray& argv,
- const ExecveArray& envv, pid_t* child,
- int* execve_errno) {
- return ForkAndExec(
- filename, argv, envv, [] {}, child, execve_errno);
-}
-
-// Equivalent to ForkAndExec, except using dirfd and flags with execveat.
-PosixErrorOr<Cleanup> ForkAndExecveat(int32_t dirfd,
- const std::string& pathname,
- const ExecveArray& argv,
- const ExecveArray& envv, int flags,
- const std::function<void()>& fn,
- pid_t* child, int* execve_errno);
-
-inline PosixErrorOr<Cleanup> ForkAndExecveat(int32_t dirfd,
- const std::string& pathname,
- const ExecveArray& argv,
- const ExecveArray& envv, int flags,
- pid_t* child, int* execve_errno) {
- return ForkAndExecveat(
- dirfd, pathname, argv, envv, flags, [] {}, child, execve_errno);
-}
-
-// Calls fn in a forked subprocess and returns the exit status of the
-// subprocess.
-//
-// fn must be async-signal-safe. Use of ASSERT/EXPECT functions is prohibited.
-// Use TEST_CHECK variants instead.
-PosixErrorOr<int> InForkedProcess(const std::function<void()>& fn);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_MULTIPROCESS_UTIL_H_
diff --git a/test/util/platform_util.cc b/test/util/platform_util.cc
deleted file mode 100644
index c9200d381..000000000
--- a/test/util/platform_util.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/platform_util.h"
-
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PlatformSupport PlatformSupport32Bit() {
- if (GvisorPlatform() == Platform::kPtrace ||
- GvisorPlatform() == Platform::kKVM) {
- return PlatformSupport::NotSupported;
- } else {
- return PlatformSupport::Allowed;
- }
-}
-
-PlatformSupport PlatformSupportAlignmentCheck() {
- return PlatformSupport::Allowed;
-}
-
-PlatformSupport PlatformSupportMultiProcess() {
- return PlatformSupport::Allowed;
-}
-
-PlatformSupport PlatformSupportInt3() {
- if (GvisorPlatform() == Platform::kKVM) {
- return PlatformSupport::NotSupported;
- } else {
- return PlatformSupport::Allowed;
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/platform_util.h b/test/util/platform_util.h
deleted file mode 100644
index 28cc92371..000000000
--- a/test/util/platform_util.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_PLATFORM_UTIL_H_
-#define GVISOR_TEST_UTIL_PLATFORM_UTIL_H_
-
-namespace gvisor {
-namespace testing {
-
-// PlatformSupport is a generic enumeration of classes of support.
-//
-// It is up to the individual functions and callers to agree on the precise
-// definition for each case. The document here generally refers to 32-bit
-// as an example. Many cases will use only NotSupported and Allowed.
-enum class PlatformSupport {
- // The feature is not supported on the current platform.
- //
- // In the case of 32-bit, this means that calls will generally be interpreted
- // as 64-bit calls, and there is no support for 32-bit binaries, long calls,
- // etc. This usually means that the underlying implementation just pretends
- // that 32-bit doesn't exist.
- NotSupported,
-
- // Calls will be ignored by the kernel with a fixed error.
- Ignored,
-
- // Calls will result in a SIGSEGV or similar fault.
- Segfault,
-
- // The feature is supported as expected.
- //
- // In the case of 32-bit, this means that the system call or far call will be
- // handled properly.
- Allowed,
-};
-
-PlatformSupport PlatformSupport32Bit();
-PlatformSupport PlatformSupportAlignmentCheck();
-PlatformSupport PlatformSupportMultiProcess();
-PlatformSupport PlatformSupportInt3();
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_PLATFORM_UTL_H_
diff --git a/test/util/posix_error.cc b/test/util/posix_error.cc
deleted file mode 100644
index 8522e4c81..000000000
--- a/test/util/posix_error.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/posix_error.h"
-
-#include <cassert>
-#include <cerrno>
-#include <cstring>
-#include <string>
-
-#include "absl/strings/str_cat.h"
-
-namespace gvisor {
-namespace testing {
-
-std::string PosixError::ToString() const {
- if (ok()) {
- return "No Error";
- }
-
- std::string ret;
-
- char strerrno_buf[1024] = {};
-
- auto res = strerror_r(errno_, strerrno_buf, sizeof(strerrno_buf));
-
-// The GNU version of strerror_r always returns a non-null char* pointing to a
-// buffer containing the stringified errno; the XSI version returns a positive
-// errno which indicates the result of writing the stringified errno into the
-// supplied buffer. The gymnastics below are needed to support both.
-#ifndef _GNU_SOURCE
- if (res != 0) {
- ret = absl::StrCat("PosixError(errno=", errno_, " strerror_r FAILED(", ret,
- "))");
- } else {
- ret = absl::StrCat("PosixError(errno=", errno_, " ", strerrno_buf, ")");
- }
-#else
- ret = absl::StrCat("PosixError(errno=", errno_, " ", res, ")");
-#endif
-
- if (strnlen(msg_, sizeof(msg_)) > 0) {
- ret.append(" ");
- ret.append(msg_);
- }
-
- return ret;
-}
-
-::std::ostream& operator<<(::std::ostream& os, const PosixError& e) {
- os << e.ToString();
- return os;
-}
-
-void PosixErrorIsMatcherCommonImpl::DescribeTo(std::ostream* os) const {
- *os << "has an errno value that ";
- code_matcher_.DescribeTo(os);
- *os << ", and has an error message that ";
- message_matcher_.DescribeTo(os);
-}
-
-void PosixErrorIsMatcherCommonImpl::DescribeNegationTo(std::ostream* os) const {
- *os << "has an errno value that ";
- code_matcher_.DescribeNegationTo(os);
- *os << ", or has an error message that ";
- message_matcher_.DescribeNegationTo(os);
-}
-
-bool PosixErrorIsMatcherCommonImpl::MatchAndExplain(
- const PosixError& error,
- ::testing::MatchResultListener* result_listener) const {
- ::testing::StringMatchResultListener inner_listener;
-
- inner_listener.Clear();
- if (!code_matcher_.MatchAndExplain(error.errno_value(), &inner_listener)) {
- return false;
- }
-
- if (!message_matcher_.Matches(error.message())) {
- return false;
- }
-
- return true;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/posix_error.h b/test/util/posix_error.h
deleted file mode 100644
index 40853cb21..000000000
--- a/test/util/posix_error.h
+++ /dev/null
@@ -1,459 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_POSIX_ERROR_H_
-#define GVISOR_TEST_UTIL_POSIX_ERROR_H_
-
-#include <string>
-
-#include "gmock/gmock.h"
-#include "absl/base/attributes.h"
-#include "absl/strings/string_view.h"
-#include "absl/types/variant.h"
-#include "test/util/logging.h"
-
-namespace gvisor {
-namespace testing {
-
-// PosixError must be async-signal-safe.
-class ABSL_MUST_USE_RESULT PosixError {
- public:
- PosixError() {}
-
- explicit PosixError(int errno_value) : errno_(errno_value) {}
-
- PosixError(int errno_value, std::string_view msg) : errno_(errno_value) {
- // Check that `msg` will fit, leaving room for '\0' at the end.
- TEST_CHECK(msg.size() < sizeof(msg_));
- msg.copy(msg_, msg.size());
- }
-
- PosixError(PosixError&& other) = default;
- PosixError& operator=(PosixError&& other) = default;
- PosixError(const PosixError&) = default;
- PosixError& operator=(const PosixError&) = default;
-
- bool ok() const { return errno_ == 0; }
-
- // Returns a reference to *this to make matchers compatible with
- // PosixErrorOr.
- const PosixError& error() const { return *this; }
-
- int errno_value() const { return errno_; }
- const char* message() const { return msg_; }
-
- // ToString produces a full string representation of this posix error
- // including the printable representation of the errno and the error message.
- std::string ToString() const;
-
- // Ignores any errors. This method does nothing except potentially suppress
- // complaints from any tools that are checking that errors are not dropped on
- // the floor.
- void IgnoreError() const {}
-
- private:
- int errno_ = 0;
- char msg_[1024] = {};
-};
-
-template <typename T>
-class ABSL_MUST_USE_RESULT PosixErrorOr {
- public:
- // A PosixErrorOr will check fail if it is constructed with NoError().
- PosixErrorOr(const PosixError& error);
- PosixErrorOr(const T& value);
- PosixErrorOr(T&& value);
-
- PosixErrorOr(PosixErrorOr&& other) = default;
- PosixErrorOr& operator=(PosixErrorOr&& other) = default;
- PosixErrorOr(const PosixErrorOr&) = default;
- PosixErrorOr& operator=(const PosixErrorOr&) = default;
-
- // Conversion copy/move constructor, T must be convertible from U.
- template <typename U>
- friend class PosixErrorOr;
-
- template <typename U>
- PosixErrorOr(PosixErrorOr<U> other);
-
- template <typename U>
- PosixErrorOr& operator=(PosixErrorOr<U> other);
-
- // Returns true if this PosixErrorOr contains some T.
- bool ok() const;
-
- // Return a copy of the contained PosixError or NoError().
- PosixError error() const;
-
- // Returns a reference to our current value, or CHECK-fails if !this->ok().
- const T& ValueOrDie() const&;
- T& ValueOrDie() &;
- const T&& ValueOrDie() const&&;
- T&& ValueOrDie() &&;
-
- // Ignores any errors. This method does nothing except potentially suppress
- // complaints from any tools that are checking that errors are not dropped on
- // the floor.
- void IgnoreError() const {}
-
- private:
- absl::variant<T, PosixError> value_;
-
- friend class PosixErrorIsMatcherCommonImpl;
-};
-
-template <typename T>
-PosixErrorOr<T>::PosixErrorOr(const PosixError& error) : value_(error) {
- TEST_CHECK_MSG(
- !error.ok(),
- "Constructing PosixErrorOr with NoError, eg. errno 0 is not allowed.");
-}
-
-template <typename T>
-PosixErrorOr<T>::PosixErrorOr(const T& value) : value_(value) {}
-
-template <typename T>
-PosixErrorOr<T>::PosixErrorOr(T&& value) : value_(std::move(value)) {}
-
-// Conversion copy/move constructor, T must be convertible from U.
-template <typename T>
-template <typename U>
-inline PosixErrorOr<T>::PosixErrorOr(PosixErrorOr<U> other) {
- if (absl::holds_alternative<U>(other.value_)) {
- // T is convertible from U.
- value_ = absl::get<U>(std::move(other.value_));
- } else if (absl::holds_alternative<PosixError>(other.value_)) {
- value_ = absl::get<PosixError>(std::move(other.value_));
- } else {
- TEST_CHECK_MSG(false, "PosixErrorOr does not contain PosixError or value");
- }
-}
-
-template <typename T>
-template <typename U>
-inline PosixErrorOr<T>& PosixErrorOr<T>::operator=(PosixErrorOr<U> other) {
- if (absl::holds_alternative<U>(other.value_)) {
- // T is convertible from U.
- value_ = absl::get<U>(std::move(other.value_));
- } else if (absl::holds_alternative<PosixError>(other.value_)) {
- value_ = absl::get<PosixError>(std::move(other.value_));
- } else {
- TEST_CHECK_MSG(false, "PosixErrorOr does not contain PosixError or value");
- }
- return *this;
-}
-
-template <typename T>
-PosixError PosixErrorOr<T>::error() const {
- if (!absl::holds_alternative<PosixError>(value_)) {
- return PosixError();
- }
- return absl::get<PosixError>(value_);
-}
-
-template <typename T>
-bool PosixErrorOr<T>::ok() const {
- return absl::holds_alternative<T>(value_);
-}
-
-template <typename T>
-const T& PosixErrorOr<T>::ValueOrDie() const& {
- TEST_CHECK(absl::holds_alternative<T>(value_));
- return absl::get<T>(value_);
-}
-
-template <typename T>
-T& PosixErrorOr<T>::ValueOrDie() & {
- TEST_CHECK(absl::holds_alternative<T>(value_));
- return absl::get<T>(value_);
-}
-
-template <typename T>
-const T&& PosixErrorOr<T>::ValueOrDie() const&& {
- TEST_CHECK(absl::holds_alternative<T>(value_));
- return std::move(absl::get<T>(value_));
-}
-
-template <typename T>
-T&& PosixErrorOr<T>::ValueOrDie() && {
- TEST_CHECK(absl::holds_alternative<T>(value_));
- return std::move(absl::get<T>(value_));
-}
-
-extern ::std::ostream& operator<<(::std::ostream& os, const PosixError& e);
-
-template <typename T>
-::std::ostream& operator<<(::std::ostream& os, const PosixErrorOr<T>& e) {
- os << e.error();
- return os;
-}
-
-// NoError is a PosixError that represents a successful state, i.e. No Error.
-inline PosixError NoError() { return PosixError(); }
-
-// Monomorphic implementation of matcher IsPosixErrorOk() for a given type T.
-// T can be PosixError, PosixErrorOr<>, or a reference to either of them.
-template <typename T>
-class MonoPosixErrorIsOkMatcherImpl : public ::testing::MatcherInterface<T> {
- public:
- void DescribeTo(std::ostream* os) const override { *os << "is OK"; }
- void DescribeNegationTo(std::ostream* os) const override {
- *os << "is not OK";
- }
- bool MatchAndExplain(T actual_value,
- ::testing::MatchResultListener*) const override {
- return actual_value.ok();
- }
-};
-
-// Implements IsPosixErrorOkMatcher() as a polymorphic matcher.
-class IsPosixErrorOkMatcher {
- public:
- template <typename T>
- operator ::testing::Matcher<T>() const { // NOLINT
- return MakeMatcher(new MonoPosixErrorIsOkMatcherImpl<T>());
- }
-};
-
-// Monomorphic implementation of a matcher for a PosixErrorOr.
-template <typename PosixErrorOrType>
-class IsPosixErrorOkAndHoldsMatcherImpl
- : public ::testing::MatcherInterface<PosixErrorOrType> {
- public:
- using ValueType = typename std::remove_reference<decltype(
- std::declval<PosixErrorOrType>().ValueOrDie())>::type;
-
- template <typename InnerMatcher>
- explicit IsPosixErrorOkAndHoldsMatcherImpl(InnerMatcher&& inner_matcher)
- : inner_matcher_(::testing::SafeMatcherCast<const ValueType&>(
- std::forward<InnerMatcher>(inner_matcher))) {}
-
- void DescribeTo(std::ostream* os) const override {
- *os << "is OK and has a value that ";
- inner_matcher_.DescribeTo(os);
- }
-
- void DescribeNegationTo(std::ostream* os) const override {
- *os << "isn't OK or has a value that ";
- inner_matcher_.DescribeNegationTo(os);
- }
-
- bool MatchAndExplain(
- PosixErrorOrType actual_value,
- ::testing::MatchResultListener* listener) const override {
- // We can't extract the value if it doesn't contain one.
- if (!actual_value.ok()) {
- return false;
- }
-
- ::testing::StringMatchResultListener inner_listener;
- const bool matches = inner_matcher_.MatchAndExplain(
- actual_value.ValueOrDie(), &inner_listener);
- const std::string inner_explanation = inner_listener.str();
- *listener << "has a value "
- << ::testing::PrintToString(actual_value.ValueOrDie());
-
- if (!inner_explanation.empty()) {
- *listener << " " << inner_explanation;
- }
- return matches;
- }
-
- private:
- const ::testing::Matcher<const ValueType&> inner_matcher_;
-};
-
-// Implements IsOkAndHolds() as a polymorphic matcher.
-template <typename InnerMatcher>
-class IsPosixErrorOkAndHoldsMatcher {
- public:
- explicit IsPosixErrorOkAndHoldsMatcher(InnerMatcher inner_matcher)
- : inner_matcher_(std::move(inner_matcher)) {}
-
- // Converts this polymorphic matcher to a monomorphic one of the given type.
- // PosixErrorOrType can be either PosixErrorOr<T> or a reference to
- // PosixErrorOr<T>.
- template <typename PosixErrorOrType>
- operator ::testing::Matcher<PosixErrorOrType>() const { // NOLINT
- return ::testing::MakeMatcher(
- new IsPosixErrorOkAndHoldsMatcherImpl<PosixErrorOrType>(
- inner_matcher_));
- }
-
- private:
- const InnerMatcher inner_matcher_;
-};
-
-// PosixErrorIs() is a polymorphic matcher. This class is the common
-// implementation of it shared by all types T where PosixErrorIs() can be
-// used as a Matcher<T>.
-class PosixErrorIsMatcherCommonImpl {
- public:
- PosixErrorIsMatcherCommonImpl(
- ::testing::Matcher<int> code_matcher,
- ::testing::Matcher<const std::string&> message_matcher)
- : code_matcher_(std::move(code_matcher)),
- message_matcher_(std::move(message_matcher)) {}
-
- void DescribeTo(std::ostream* os) const;
-
- void DescribeNegationTo(std::ostream* os) const;
-
- bool MatchAndExplain(const PosixError& error,
- ::testing::MatchResultListener* result_listener) const;
-
- template <typename T>
- bool MatchAndExplain(const PosixErrorOr<T>& error_or,
- ::testing::MatchResultListener* result_listener) const {
- if (error_or.ok()) {
- *result_listener << "has a value "
- << ::testing::PrintToString(error_or.ValueOrDie());
- return false;
- }
-
- return MatchAndExplain(error_or.error(), result_listener);
- }
-
- private:
- const ::testing::Matcher<int> code_matcher_;
- const ::testing::Matcher<const std::string&> message_matcher_;
-};
-
-// Monomorphic implementation of matcher PosixErrorIs() for a given type
-// T. T can be PosixError, PosixErrorOr<>, or a reference to either of them.
-template <typename T>
-class MonoPosixErrorIsMatcherImpl : public ::testing::MatcherInterface<T> {
- public:
- explicit MonoPosixErrorIsMatcherImpl(
- PosixErrorIsMatcherCommonImpl common_impl)
- : common_impl_(std::move(common_impl)) {}
-
- void DescribeTo(std::ostream* os) const override {
- common_impl_.DescribeTo(os);
- }
-
- void DescribeNegationTo(std::ostream* os) const override {
- common_impl_.DescribeNegationTo(os);
- }
-
- bool MatchAndExplain(
- T actual_value,
- ::testing::MatchResultListener* result_listener) const override {
- return common_impl_.MatchAndExplain(actual_value, result_listener);
- }
-
- private:
- PosixErrorIsMatcherCommonImpl common_impl_;
-};
-
-inline ::testing::Matcher<int> ToErrorCodeMatcher(
- const ::testing::Matcher<int>& m) {
- return m;
-}
-
-// Implements PosixErrorIs() as a polymorphic matcher.
-class PosixErrorIsMatcher {
- public:
- template <typename ErrorCodeMatcher>
- PosixErrorIsMatcher(ErrorCodeMatcher&& code_matcher,
- ::testing::Matcher<const std::string&> message_matcher)
- : common_impl_(
- ToErrorCodeMatcher(std::forward<ErrorCodeMatcher>(code_matcher)),
- std::move(message_matcher)) {}
-
- // Converts this polymorphic matcher to a monomorphic matcher of the
- // given type. T can be StatusOr<>, Status, or a reference to
- // either of them.
- template <typename T>
- operator ::testing::Matcher<T>() const { // NOLINT
- return MakeMatcher(new MonoPosixErrorIsMatcherImpl<T>(common_impl_));
- }
-
- private:
- const PosixErrorIsMatcherCommonImpl common_impl_;
-};
-
-// Returns a gMock matcher that matches a PosixError or PosixErrorOr<> whose
-// error code matches code_matcher, and whose error message matches
-// message_matcher.
-template <typename ErrorCodeMatcher>
-PosixErrorIsMatcher PosixErrorIs(
- ErrorCodeMatcher&& code_matcher,
- ::testing::Matcher<const std::string&> message_matcher) {
- return PosixErrorIsMatcher(std::forward<ErrorCodeMatcher>(code_matcher),
- std::move(message_matcher));
-}
-
-// Returns a gMock matcher that matches a PosixError or PosixErrorOr<> whose
-// error code matches code_matcher.
-template <typename ErrorCodeMatcher>
-PosixErrorIsMatcher PosixErrorIs(ErrorCodeMatcher&& code_matcher) {
- return PosixErrorIsMatcher(std::forward<ErrorCodeMatcher>(code_matcher),
- ::testing::_);
-}
-
-// Returns a gMock matcher that matches a PosixErrorOr<> which is ok() and
-// value matches the inner matcher.
-template <typename InnerMatcher>
-IsPosixErrorOkAndHoldsMatcher<typename std::decay<InnerMatcher>::type>
-IsPosixErrorOkAndHolds(InnerMatcher&& inner_matcher) {
- return IsPosixErrorOkAndHoldsMatcher<typename std::decay<InnerMatcher>::type>(
- std::forward<InnerMatcher>(inner_matcher));
-}
-
-// Internal helper for concatenating macro values.
-#define POSIX_ERROR_IMPL_CONCAT_INNER_(x, y) x##y
-#define POSIX_ERROR_IMPL_CONCAT_(x, y) POSIX_ERROR_IMPL_CONCAT_INNER_(x, y)
-
-#define POSIX_ERROR_IMPL_ASSIGN_OR_RETURN_(posixerroror, lhs, rexpr) \
- auto posixerroror = (rexpr); \
- if (!posixerroror.ok()) { \
- return (posixerroror.error()); \
- } \
- lhs = std::move(posixerroror).ValueOrDie()
-
-#define EXPECT_NO_ERRNO(expression) \
- EXPECT_THAT(expression, IsPosixErrorOkMatcher())
-#define ASSERT_NO_ERRNO(expression) \
- ASSERT_THAT(expression, IsPosixErrorOkMatcher())
-
-#define ASSIGN_OR_RETURN_ERRNO(lhs, rexpr) \
- POSIX_ERROR_IMPL_ASSIGN_OR_RETURN_( \
- POSIX_ERROR_IMPL_CONCAT_(_status_or_value, __LINE__), lhs, rexpr)
-
-#define RETURN_IF_ERRNO(s) \
- do { \
- if (!s.ok()) { \
- return s; \
- } \
- } while (false);
-
-#define ASSERT_NO_ERRNO_AND_VALUE(expr) \
- ({ \
- auto _expr_result = (expr); \
- ASSERT_NO_ERRNO(_expr_result); \
- std::move(_expr_result).ValueOrDie(); \
- })
-
-#define EXPECT_NO_ERRNO_AND_VALUE(expr) \
- ({ \
- auto _expr_result = (expr); \
- EXPECT_NO_ERRNO(_expr_result); \
- std::move(_expr_result).ValueOrDie(); \
- })
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_POSIX_ERROR_H_
diff --git a/test/util/posix_error_test.cc b/test/util/posix_error_test.cc
deleted file mode 100644
index bf9465abb..000000000
--- a/test/util/posix_error_test.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/posix_error.h"
-
-#include <errno.h>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(PosixErrorTest, PosixError) {
- auto err = PosixError(EAGAIN);
- EXPECT_THAT(err, PosixErrorIs(EAGAIN, ""));
-}
-
-TEST(PosixErrorTest, PosixErrorOrPosixError) {
- auto err = PosixErrorOr<std::nullptr_t>(PosixError(EAGAIN));
- EXPECT_THAT(err, PosixErrorIs(EAGAIN, ""));
-}
-
-TEST(PosixErrorTest, PosixErrorOrNullptr) {
- auto err = PosixErrorOr<std::nullptr_t>(nullptr);
- EXPECT_TRUE(err.ok());
- EXPECT_NO_ERRNO(err);
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/proc_util.cc b/test/util/proc_util.cc
deleted file mode 100644
index 34d636ba9..000000000
--- a/test/util/proc_util.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/proc_util.h"
-
-#include <algorithm>
-#include <iostream>
-#include <vector>
-
-#include "absl/strings/ascii.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/strings/string_view.h"
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// Parses a single line from /proc/<xxx>/maps.
-PosixErrorOr<ProcMapsEntry> ParseProcMapsLine(absl::string_view line) {
- ProcMapsEntry map_entry = {};
-
- // Limit splitting to 6 parts so that if there is a file path and it contains
- // spaces, the file path is not split.
- std::vector<std::string> parts =
- absl::StrSplit(line, absl::MaxSplits(' ', 5), absl::SkipEmpty());
-
- // parts.size() should be 6 if there is a file name specified, and 5
- // otherwise.
- if (parts.size() < 5) {
- return PosixError(EINVAL, absl::StrCat("Invalid line: ", line));
- }
-
- // Address range in the form X-X where X are hex values without leading 0x.
- std::vector<std::string> addresses = absl::StrSplit(parts[0], '-');
- if (addresses.size() != 2) {
- return PosixError(EINVAL,
- absl::StrCat("Invalid address range: ", parts[0]));
- }
- ASSIGN_OR_RETURN_ERRNO(map_entry.start, AtoiBase(addresses[0], 16));
- ASSIGN_OR_RETURN_ERRNO(map_entry.end, AtoiBase(addresses[1], 16));
-
- // Permissions are four bytes of the form rwxp or - if permission not set.
- if (parts[1].size() != 4) {
- return PosixError(EINVAL,
- absl::StrCat("Invalid permission field: ", parts[1]));
- }
-
- map_entry.readable = parts[1][0] == 'r';
- map_entry.writable = parts[1][1] == 'w';
- map_entry.executable = parts[1][2] == 'x';
- map_entry.priv = parts[1][3] == 'p';
-
- ASSIGN_OR_RETURN_ERRNO(map_entry.offset, AtoiBase(parts[2], 16));
-
- std::vector<std::string> device = absl::StrSplit(parts[3], ':');
- if (device.size() != 2) {
- return PosixError(EINVAL, absl::StrCat("Invalid device: ", parts[3]));
- }
- ASSIGN_OR_RETURN_ERRNO(map_entry.major, AtoiBase(device[0], 16));
- ASSIGN_OR_RETURN_ERRNO(map_entry.minor, AtoiBase(device[1], 16));
-
- ASSIGN_OR_RETURN_ERRNO(map_entry.inode, Atoi<int64_t>(parts[4]));
- if (parts.size() == 6) {
- // A filename is present. However, absl::StrSplit retained the whitespace
- // between the inode number and the filename.
- map_entry.filename =
- std::string(absl::StripLeadingAsciiWhitespace(parts[5]));
- }
-
- return map_entry;
-}
-
-PosixErrorOr<std::vector<ProcMapsEntry>> ParseProcMaps(
- absl::string_view contents) {
- std::vector<ProcMapsEntry> entries;
- auto lines = absl::StrSplit(contents, '\n', absl::SkipEmpty());
- for (const auto& l : lines) {
- std::cout << "line: " << l << std::endl;
- ASSIGN_OR_RETURN_ERRNO(auto entry, ParseProcMapsLine(l));
- entries.push_back(entry);
- }
- return entries;
-}
-
-PosixErrorOr<bool> IsVsyscallEnabled() {
- ASSIGN_OR_RETURN_ERRNO(auto contents, GetContents("/proc/self/maps"));
- ASSIGN_OR_RETURN_ERRNO(auto maps, ParseProcMaps(contents));
- return std::any_of(maps.begin(), maps.end(), [](const ProcMapsEntry& e) {
- return e.filename == "[vsyscall]";
- });
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/proc_util.h b/test/util/proc_util.h
deleted file mode 100644
index af209a51e..000000000
--- a/test/util/proc_util.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_PROC_UTIL_H_
-#define GVISOR_TEST_UTIL_PROC_UTIL_H_
-
-#include <ostream>
-#include <string>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/string_view.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// ProcMapsEntry contains the data from a single line in /proc/<xxx>/maps.
-struct ProcMapsEntry {
- uint64_t start;
- uint64_t end;
- bool readable;
- bool writable;
- bool executable;
- bool priv;
- uint64_t offset;
- int major;
- int minor;
- int64_t inode;
- std::string filename;
-};
-
-// Parses a ProcMaps line or returns an error.
-PosixErrorOr<ProcMapsEntry> ParseProcMapsLine(absl::string_view line);
-PosixErrorOr<std::vector<ProcMapsEntry>> ParseProcMaps(
- absl::string_view contents);
-
-// Returns true if vsyscall (emmulation or not) is enabled.
-PosixErrorOr<bool> IsVsyscallEnabled();
-
-// Printer for ProcMapsEntry.
-inline std::ostream& operator<<(std::ostream& os, const ProcMapsEntry& entry) {
- std::string str =
- absl::StrCat(absl::Hex(entry.start, absl::PadSpec::kZeroPad8), "-",
- absl::Hex(entry.end, absl::PadSpec::kZeroPad8), " ");
-
- absl::StrAppend(&str, entry.readable ? "r" : "-");
- absl::StrAppend(&str, entry.writable ? "w" : "-");
- absl::StrAppend(&str, entry.executable ? "x" : "-");
- absl::StrAppend(&str, entry.priv ? "p" : "s");
-
- absl::StrAppend(&str, " ", absl::Hex(entry.offset, absl::PadSpec::kZeroPad8),
- " ", absl::Hex(entry.major, absl::PadSpec::kZeroPad2), ":",
- absl::Hex(entry.minor, absl::PadSpec::kZeroPad2), " ",
- entry.inode);
- if (absl::string_view(entry.filename) != "") {
- // Pad to column 74
- int pad = 73 - str.length();
- if (pad > 0) {
- absl::StrAppend(&str, std::string(pad, ' '));
- }
- absl::StrAppend(&str, entry.filename);
- }
- os << str;
- return os;
-}
-
-// Printer for std::vector<ProcMapsEntry>.
-inline std::ostream& operator<<(std::ostream& os,
- const std::vector<ProcMapsEntry>& vec) {
- for (unsigned int i = 0; i < vec.size(); i++) {
- os << vec[i];
- if (i != vec.size() - 1) {
- os << "\n";
- }
- }
- return os;
-}
-
-// GMock printer for std::vector<ProcMapsEntry>.
-inline void PrintTo(const std::vector<ProcMapsEntry>& vec, std::ostream* os) {
- *os << vec;
-}
-
-// Checks that /proc/pid/maps contains all of the passed mappings.
-//
-// The major, minor, and inode fields are ignored.
-MATCHER_P(ContainsMappings, mappings,
- "contains mappings:\n" + ::testing::PrintToString(mappings)) {
- auto contents_or = GetContents(absl::StrCat("/proc/", arg, "/maps"));
- if (!contents_or.ok()) {
- *result_listener << "Unable to read mappings: "
- << contents_or.error().ToString();
- return false;
- }
-
- auto maps_or = ParseProcMaps(contents_or.ValueOrDie());
- if (!maps_or.ok()) {
- *result_listener << "Unable to parse mappings: "
- << maps_or.error().ToString();
- return false;
- }
-
- auto maps = std::move(maps_or).ValueOrDie();
-
- // Does maps contain all elements in mappings? The comparator ignores
- // the major, minor, and inode fields.
- bool all_present = true;
- std::for_each(mappings.begin(), mappings.end(), [&](const ProcMapsEntry& e1) {
- auto it =
- std::find_if(maps.begin(), maps.end(), [&e1](const ProcMapsEntry& e2) {
- return e1.start == e2.start && e1.end == e2.end &&
- e1.readable == e2.readable && e1.writable == e2.writable &&
- e1.executable == e2.executable && e1.priv == e2.priv &&
- e1.offset == e2.offset && e1.filename == e2.filename;
- });
- if (it == maps.end()) {
- // It wasn't found.
- if (all_present) {
- // We will output the message once and then a line for each mapping
- // that wasn't found.
- all_present = false;
- *result_listener << "Got mappings:\n"
- << maps << "\nThat were missing:\n";
- }
- *result_listener << e1 << "\n";
- }
- });
-
- return all_present;
-}
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_PROC_UTIL_H_
diff --git a/test/util/proc_util_test.cc b/test/util/proc_util_test.cc
deleted file mode 100644
index 71dd2355e..000000000
--- a/test/util/proc_util_test.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/proc_util.h"
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "test/util/test_util.h"
-
-using ::testing::IsEmpty;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(ParseProcMapsLineTest, WithoutFilename) {
- auto entry = ASSERT_NO_ERRNO_AND_VALUE(
- ParseProcMapsLine("2ab4f00b7000-2ab4f00b9000 r-xp 00000000 00:00 0 "));
- EXPECT_EQ(entry.start, 0x2ab4f00b7000);
- EXPECT_EQ(entry.end, 0x2ab4f00b9000);
- EXPECT_TRUE(entry.readable);
- EXPECT_FALSE(entry.writable);
- EXPECT_TRUE(entry.executable);
- EXPECT_TRUE(entry.priv);
- EXPECT_EQ(entry.offset, 0);
- EXPECT_EQ(entry.major, 0);
- EXPECT_EQ(entry.minor, 0);
- EXPECT_EQ(entry.inode, 0);
- EXPECT_THAT(entry.filename, IsEmpty());
-}
-
-TEST(ParseProcMapsLineTest, WithFilename) {
- auto entry = ASSERT_NO_ERRNO_AND_VALUE(
- ParseProcMapsLine("00407000-00408000 rw-p 00006000 00:0e 10 "
- " /bin/cat"));
- EXPECT_EQ(entry.start, 0x407000);
- EXPECT_EQ(entry.end, 0x408000);
- EXPECT_TRUE(entry.readable);
- EXPECT_TRUE(entry.writable);
- EXPECT_FALSE(entry.executable);
- EXPECT_TRUE(entry.priv);
- EXPECT_EQ(entry.offset, 0x6000);
- EXPECT_EQ(entry.major, 0);
- EXPECT_EQ(entry.minor, 0x0e);
- EXPECT_EQ(entry.inode, 10);
- EXPECT_EQ(entry.filename, "/bin/cat");
-}
-
-TEST(ParseProcMapsLineTest, WithFilenameContainingSpaces) {
- auto entry = ASSERT_NO_ERRNO_AND_VALUE(
- ParseProcMapsLine("7f26b3b12000-7f26b3b13000 rw-s 00000000 00:05 1432484 "
- " /dev/zero (deleted)"));
- EXPECT_EQ(entry.start, 0x7f26b3b12000);
- EXPECT_EQ(entry.end, 0x7f26b3b13000);
- EXPECT_TRUE(entry.readable);
- EXPECT_TRUE(entry.writable);
- EXPECT_FALSE(entry.executable);
- EXPECT_FALSE(entry.priv);
- EXPECT_EQ(entry.offset, 0);
- EXPECT_EQ(entry.major, 0);
- EXPECT_EQ(entry.minor, 0x05);
- EXPECT_EQ(entry.inode, 1432484);
- EXPECT_EQ(entry.filename, "/dev/zero (deleted)");
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/pty_util.cc b/test/util/pty_util.cc
deleted file mode 100644
index 351f4730c..000000000
--- a/test/util/pty_util.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/pty_util.h"
-
-#include <sys/ioctl.h>
-#include <termios.h>
-
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<FileDescriptor> OpenReplica(const FileDescriptor& master) {
- return OpenReplica(master, O_NONBLOCK | O_RDWR | O_NOCTTY);
-}
-
-PosixErrorOr<FileDescriptor> OpenReplica(const FileDescriptor& master,
- int flags) {
- PosixErrorOr<int> n = ReplicaID(master);
- if (!n.ok()) {
- return PosixErrorOr<FileDescriptor>(n.error());
- }
- return Open(absl::StrCat("/dev/pts/", n.ValueOrDie()), flags);
-}
-
-PosixErrorOr<int> ReplicaID(const FileDescriptor& master) {
- // Get pty index.
- int n;
- int ret = ioctl(master.get(), TIOCGPTN, &n);
- if (ret < 0) {
- return PosixError(errno, "ioctl(TIOCGPTN) failed");
- }
-
- // Unlock pts.
- int unlock = 0;
- ret = ioctl(master.get(), TIOCSPTLCK, &unlock);
- if (ret < 0) {
- return PosixError(errno, "ioctl(TIOSPTLCK) failed");
- }
-
- return n;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/pty_util.h b/test/util/pty_util.h
deleted file mode 100644
index 0cca2182c..000000000
--- a/test/util/pty_util.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_PTY_UTIL_H_
-#define GVISOR_TEST_UTIL_PTY_UTIL_H_
-
-#include "test/util/file_descriptor.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Opens the replica end of the passed master as R/W and nonblocking. It does
-// not set the replica as the controlling TTY.
-PosixErrorOr<FileDescriptor> OpenReplica(const FileDescriptor& master);
-
-// Identical to the above OpenReplica, but flags are all specified by the
-// caller.
-PosixErrorOr<FileDescriptor> OpenReplica(const FileDescriptor& master,
- int flags);
-
-// Get the number of the replica end of the master.
-PosixErrorOr<int> ReplicaID(const FileDescriptor& master);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_PTY_UTIL_H_
diff --git a/test/util/rlimit_util.cc b/test/util/rlimit_util.cc
deleted file mode 100644
index d7bfc1606..000000000
--- a/test/util/rlimit_util.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/rlimit_util.h"
-
-#include <sys/resource.h>
-
-#include <cerrno>
-
-#include "test/util/cleanup.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<Cleanup> ScopedSetSoftRlimit(int resource, rlim_t newval) {
- struct rlimit old_rlim;
- if (getrlimit(resource, &old_rlim) != 0) {
- return PosixError(errno, "getrlimit failed");
- }
- struct rlimit new_rlim = old_rlim;
- new_rlim.rlim_cur = newval;
- if (setrlimit(resource, &new_rlim) != 0) {
- return PosixError(errno, "setrlimit failed");
- }
- return Cleanup([resource, old_rlim] {
- TEST_PCHECK(setrlimit(resource, &old_rlim) == 0);
- });
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/rlimit_util.h b/test/util/rlimit_util.h
deleted file mode 100644
index 873252a32..000000000
--- a/test/util/rlimit_util.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_RLIMIT_UTIL_H_
-#define GVISOR_TEST_UTIL_RLIMIT_UTIL_H_
-
-#include <sys/resource.h>
-#include <sys/time.h>
-
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<Cleanup> ScopedSetSoftRlimit(int resource, rlim_t newval);
-
-} // namespace testing
-} // namespace gvisor
-#endif // GVISOR_TEST_UTIL_RLIMIT_UTIL_H_
diff --git a/test/util/save_util.cc b/test/util/save_util.cc
deleted file mode 100644
index 3e724d99b..000000000
--- a/test/util/save_util.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/save_util.h"
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <atomic>
-#include <cerrno>
-
-#include "absl/types/optional.h"
-
-namespace gvisor {
-namespace testing {
-namespace {
-
-std::atomic<absl::optional<bool>> save_present;
-
-bool SavePresent() {
- auto present = save_present.load();
- if (!present.has_value()) {
- present = getenv("GVISOR_SAVE_TEST") != nullptr;
- save_present.store(present);
- }
- return present.value();
-}
-
-std::atomic<int> save_disable;
-
-} // namespace
-
-bool IsRunningWithSaveRestore() { return SavePresent(); }
-
-void MaybeSave() {
- if (SavePresent() && save_disable.load() == 0) {
- internal::DoCooperativeSave();
- }
-}
-
-DisableSave::DisableSave() { save_disable++; }
-
-DisableSave::~DisableSave() { reset(); }
-
-void DisableSave::reset() {
- if (!reset_) {
- reset_ = true;
- save_disable--;
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/save_util.h b/test/util/save_util.h
deleted file mode 100644
index e7218ae88..000000000
--- a/test/util/save_util.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_SAVE_UTIL_H_
-#define GVISOR_TEST_UTIL_SAVE_UTIL_H_
-
-namespace gvisor {
-namespace testing {
-
-// Returns true if the environment in which the calling process is executing
-// allows the test to be checkpointed and restored during execution.
-bool IsRunningWithSaveRestore();
-
-// May perform a co-operative save cycle.
-//
-// errno is guaranteed to be preserved.
-void MaybeSave();
-
-// Causes MaybeSave to become a no-op until destroyed or reset.
-class DisableSave {
- public:
- DisableSave();
- ~DisableSave();
- DisableSave(DisableSave const&) = delete;
- DisableSave(DisableSave&&) = delete;
- DisableSave& operator=(DisableSave const&) = delete;
- DisableSave& operator=(DisableSave&&) = delete;
-
- // reset allows saves to continue, and is called implicitly by the destructor.
- // It may be called multiple times safely, but is not thread-safe.
- void reset();
-
- private:
- bool reset_ = false;
-};
-
-namespace internal {
-
-// Causes a co-operative save cycle to occur.
-//
-// errno is guaranteed to be preserved.
-void DoCooperativeSave();
-
-} // namespace internal
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_SAVE_UTIL_H_
diff --git a/test/util/save_util_linux.cc b/test/util/save_util_linux.cc
deleted file mode 100644
index 57431b3ea..000000000
--- a/test/util/save_util_linux.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifdef __linux__
-
-#include <errno.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "test/util/save_util.h"
-
-#if defined(__x86_64__) || defined(__i386__)
-#define SYS_TRIGGER_SAVE SYS_create_module
-#elif defined(__aarch64__)
-#define SYS_TRIGGER_SAVE SYS_finit_module
-#else
-#error "Unknown architecture"
-#endif
-
-namespace gvisor {
-namespace testing {
-namespace internal {
-
-void DoCooperativeSave() {
- int orig_errno = errno;
- // We use it to trigger saving the sentry state
- // when this syscall is called.
- // Notice: this needs to be a valid syscall
- // that is not used in any of the syscall tests.
- syscall(SYS_TRIGGER_SAVE, nullptr, 0);
- errno = orig_errno;
-}
-
-} // namespace internal
-} // namespace testing
-} // namespace gvisor
-
-#endif // __linux__
diff --git a/test/util/save_util_other.cc b/test/util/save_util_other.cc
deleted file mode 100644
index 7749ded76..000000000
--- a/test/util/save_util_other.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef __linux__
-
-#include "test/util/logging.h"
-
-namespace gvisor {
-namespace testing {
-namespace internal {
-
-void DoCooperativeSave() {
- TEST_CHECK_MSG(false, "DoCooperativeSave not implemented");
-}
-
-} // namespace internal
-} // namespace testing
-} // namespace gvisor
-
-#endif
diff --git a/test/util/signal_util.cc b/test/util/signal_util.cc
deleted file mode 100644
index 5ee95ee80..000000000
--- a/test/util/signal_util.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/signal_util.h"
-
-#include <signal.h>
-
-#include <ostream>
-
-#include "gtest/gtest.h"
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace {
-
-struct Range {
- int start;
- int end;
-};
-
-// Format a Range as "start-end" or "start" for single value Ranges.
-static ::std::ostream& operator<<(::std::ostream& os, const Range& range) {
- if (range.end > range.start) {
- return os << range.start << '-' << range.end;
- }
-
- return os << range.start;
-}
-
-} // namespace
-
-// Format a sigset_t as a comma separated list of numeric ranges.
-// Empty sigset: []
-// Full sigset: [1-31,34-64]
-::std::ostream& operator<<(::std::ostream& os, const sigset_t& sigset) {
- const char* delim = "";
- Range range = {0, 0};
-
- os << '[';
-
- for (int sig = 1; sig <= gvisor::testing::kMaxSignal; ++sig) {
- if (sigismember(&sigset, sig)) {
- if (range.start) {
- range.end = sig;
- } else {
- range.start = sig;
- range.end = sig;
- }
- } else if (range.start) {
- os << delim << range;
- delim = ",";
- range.start = 0;
- range.end = 0;
- }
- }
-
- if (range.start) {
- os << delim << range;
- }
-
- return os << ']';
-}
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<Cleanup> ScopedSigaction(int sig, struct sigaction const& sa) {
- struct sigaction old_sa;
- int rc = sigaction(sig, &sa, &old_sa);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "sigaction failed");
- }
- return Cleanup([sig, old_sa] {
- EXPECT_THAT(sigaction(sig, &old_sa, nullptr), SyscallSucceeds());
- });
-}
-
-PosixErrorOr<Cleanup> ScopedSignalMask(int how, sigset_t const& set) {
- sigset_t old;
- int rc = sigprocmask(how, &set, &old);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "sigprocmask failed");
- }
- return Cleanup([old] {
- EXPECT_THAT(sigprocmask(SIG_SETMASK, &old, nullptr), SyscallSucceeds());
- });
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/signal_util.h b/test/util/signal_util.h
deleted file mode 100644
index 20eebd7e4..000000000
--- a/test/util/signal_util.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_SIGNAL_UTIL_H_
-#define GVISOR_TEST_UTIL_SIGNAL_UTIL_H_
-
-#include <signal.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include <ostream>
-
-#include "gmock/gmock.h"
-#include "test/util/cleanup.h"
-#include "test/util/posix_error.h"
-
-// Format a sigset_t as a comma separated list of numeric ranges.
-::std::ostream& operator<<(::std::ostream& os, const sigset_t& sigset);
-
-namespace gvisor {
-namespace testing {
-
-// The maximum signal number.
-static constexpr int kMaxSignal = 64;
-
-// Wrapper for the tgkill(2) syscall, which glibc does not provide.
-inline int tgkill(pid_t tgid, pid_t tid, int sig) {
- return syscall(__NR_tgkill, tgid, tid, sig);
-}
-
-// Installs the passed sigaction and returns a cleanup function to restore the
-// previous handler when it goes out of scope.
-PosixErrorOr<Cleanup> ScopedSigaction(int sig, struct sigaction const& sa);
-
-// Updates the signal mask as per sigprocmask(2) and returns a cleanup function
-// to restore the previous signal mask when it goes out of scope.
-PosixErrorOr<Cleanup> ScopedSignalMask(int how, sigset_t const& set);
-
-// ScopedSignalMask variant that creates a mask of the single signal 'sig'.
-inline PosixErrorOr<Cleanup> ScopedSignalMask(int how, int sig) {
- sigset_t set;
- sigemptyset(&set);
- sigaddset(&set, sig);
- return ScopedSignalMask(how, set);
-}
-
-// Asserts equality of two sigset_t values.
-MATCHER_P(EqualsSigset, value, "equals " + ::testing::PrintToString(value)) {
- for (int sig = 1; sig <= kMaxSignal; ++sig) {
- if (sigismember(&arg, sig) != sigismember(&value, sig)) {
- return false;
- }
- }
- return true;
-}
-
-#ifdef __x86_64__
-// Fault can be used to generate a synchronous SIGSEGV.
-//
-// This fault can be fixed up in a handler via fixup, below.
-inline void Fault() {
- // Zero and dereference %ax.
- asm("movabs $0, %%rax\r\n"
- "mov 0(%%rax), %%rax\r\n"
- :
- :
- : "ax");
-}
-
-// FixupFault fixes up a fault generated by fault, above.
-inline void FixupFault(ucontext_t* ctx) {
- // Skip the bad instruction above.
- //
- // The encoding is 0x48 0xab 0x00.
- ctx->uc_mcontext.gregs[REG_RIP] += 3;
-}
-#elif __aarch64__
-inline void Fault() {
- // Zero and dereference x0.
- asm("mov x0, xzr\r\n"
- "str xzr, [x0]\r\n"
- :
- :
- : "x0");
-}
-
-inline void FixupFault(ucontext_t* ctx) {
- // Skip the bad instruction above.
- ctx->uc_mcontext.pc += 4;
-}
-#endif
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_SIGNAL_UTIL_H_
diff --git a/test/util/temp_path.cc b/test/util/temp_path.cc
deleted file mode 100644
index e1bdee7fd..000000000
--- a/test/util/temp_path.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/temp_path.h"
-
-#include <unistd.h>
-
-#include <atomic>
-#include <cstdlib>
-#include <iostream>
-
-#include "gtest/gtest.h"
-#include "absl/time/clock.h"
-#include "absl/time/time.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-std::atomic<uint64_t> global_temp_file_number = ATOMIC_VAR_INIT(1);
-
-// Return a new temp filename, intended to be unique system-wide.
-//
-// The global file number helps maintain file naming consistency across
-// different runs of a test.
-//
-// The timestamp is necessary because the test infrastructure invokes each
-// test case in a separate process (resetting global_temp_file_number) and
-// potentially in parallel, which allows for races between selecting and using a
-// name.
-std::string NextTempBasename() {
- return absl::StrCat("gvisor_test_temp_", global_temp_file_number++, "_",
- absl::ToUnixNanos(absl::Now()));
-}
-
-void TryDeleteRecursively(std::string const& path) {
- if (!path.empty()) {
- int undeleted_dirs = 0;
- int undeleted_files = 0;
- auto status = RecursivelyDelete(path, &undeleted_dirs, &undeleted_files);
- if (undeleted_dirs || undeleted_files || !status.ok()) {
- std::cerr << path << ": failed to delete " << undeleted_dirs
- << " directories and " << undeleted_files
- << " files: " << status << std::endl;
- }
- }
-}
-
-} // namespace
-
-constexpr mode_t TempPath::kDefaultFileMode;
-constexpr mode_t TempPath::kDefaultDirMode;
-
-std::string NewTempAbsPathInDir(absl::string_view const dir) {
- return JoinPath(dir, NextTempBasename());
-}
-
-std::string NewTempAbsPath() {
- return NewTempAbsPathInDir(GetAbsoluteTestTmpdir());
-}
-
-std::string NewTempRelPath() { return NextTempBasename(); }
-
-std::string GetAbsoluteTestTmpdir() {
- // Note that TEST_TMPDIR is guaranteed to be set.
- char* env_tmpdir = getenv("TEST_TMPDIR");
- std::string tmp_dir =
- env_tmpdir != nullptr ? std::string(env_tmpdir) : "/tmp";
-
- return MakeAbsolute(tmp_dir, "").ValueOrDie();
-}
-
-PosixErrorOr<TempPath> TempPath::CreateFileWith(absl::string_view const parent,
- absl::string_view const content,
- mode_t const mode) {
- return CreateIn(parent, [=](absl::string_view path) -> PosixError {
- // CreateWithContents will call open(O_WRONLY) with the given mode. If the
- // mode is not user-writable, save/restore cannot preserve the fd. Hence
- // the little permission dance that's done here.
- auto res = CreateWithContents(path, content, mode | 0200);
- RETURN_IF_ERRNO(res);
-
- return Chmod(path, mode);
- });
-}
-
-PosixErrorOr<TempPath> TempPath::CreateDirWith(absl::string_view const parent,
- mode_t const mode) {
- return CreateIn(parent,
- [=](absl::string_view path) { return Mkdir(path, mode); });
-}
-
-PosixErrorOr<TempPath> TempPath::CreateSymlinkTo(absl::string_view const parent,
- std::string const& dest) {
- return CreateIn(parent, [=](absl::string_view path) {
- int ret = symlink(dest.c_str(), std::string(path).c_str());
- if (ret != 0) {
- return PosixError(errno, "symlink failed");
- }
- return NoError();
- });
-}
-
-PosixErrorOr<TempPath> TempPath::CreateFileIn(absl::string_view const parent) {
- return TempPath::CreateFileWith(parent, absl::string_view(),
- kDefaultFileMode);
-}
-
-PosixErrorOr<TempPath> TempPath::CreateDirIn(absl::string_view const parent) {
- return TempPath::CreateDirWith(parent, kDefaultDirMode);
-}
-
-PosixErrorOr<TempPath> TempPath::CreateFileMode(mode_t mode) {
- return TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), absl::string_view(),
- mode);
-}
-
-PosixErrorOr<TempPath> TempPath::CreateFile() {
- return TempPath::CreateFileIn(GetAbsoluteTestTmpdir());
-}
-
-PosixErrorOr<TempPath> TempPath::CreateDir() {
- return TempPath::CreateDirIn(GetAbsoluteTestTmpdir());
-}
-
-TempPath::~TempPath() { TryDeleteRecursively(path_); }
-
-TempPath::TempPath(TempPath&& orig) { reset(orig.release()); }
-
-TempPath& TempPath::operator=(TempPath&& orig) {
- reset(orig.release());
- return *this;
-}
-
-std::string TempPath::reset(std::string newpath) {
- std::string path = path_;
- TryDeleteRecursively(path_);
- path_ = std::move(newpath);
- return path;
-}
-
-std::string TempPath::release() {
- std::string path = path_;
- path_ = std::string();
- return path;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/temp_path.h b/test/util/temp_path.h
deleted file mode 100644
index 9e5ac11f4..000000000
--- a/test/util/temp_path.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_TEMP_PATH_H_
-#define GVISOR_TEST_UTIL_TEMP_PATH_H_
-
-#include <sys/stat.h>
-
-#include <string>
-#include <utility>
-
-#include "absl/strings/str_cat.h"
-#include "absl/strings/string_view.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Returns an absolute path for a file in `dir` that does not yet exist.
-// Distinct calls to NewTempAbsPathInDir from the same process, even from
-// multiple threads, are guaranteed to return different paths. Distinct calls to
-// NewTempAbsPathInDir from different processes are not synchronized.
-std::string NewTempAbsPathInDir(absl::string_view const dir);
-
-// Like NewTempAbsPathInDir, but the returned path is in the test's temporary
-// directory, as provided by the testing framework.
-std::string NewTempAbsPath();
-
-// Like NewTempAbsPathInDir, but the returned path is relative (to the current
-// working directory).
-std::string NewTempRelPath();
-
-// Returns the absolute path for the test temp dir.
-std::string GetAbsoluteTestTmpdir();
-
-// Represents a temporary file or directory.
-class TempPath {
- public:
- // Default creation mode for files.
- static constexpr mode_t kDefaultFileMode = 0644;
-
- // Default creation mode for directories.
- static constexpr mode_t kDefaultDirMode = 0755;
-
- // Creates a temporary file in directory `parent` with mode `mode` and
- // contents `content`.
- static PosixErrorOr<TempPath> CreateFileWith(absl::string_view parent,
- absl::string_view content,
- mode_t mode);
-
- // Creates an empty temporary subdirectory in directory `parent` with mode
- // `mode`.
- static PosixErrorOr<TempPath> CreateDirWith(absl::string_view parent,
- mode_t mode);
-
- // Creates a temporary symlink in directory `parent` to destination `dest`.
- static PosixErrorOr<TempPath> CreateSymlinkTo(absl::string_view parent,
- std::string const& dest);
-
- // Creates an empty temporary file in directory `parent` with mode
- // kDefaultFileMode.
- static PosixErrorOr<TempPath> CreateFileIn(absl::string_view parent);
-
- // Creates an empty temporary subdirectory in directory `parent` with mode
- // kDefaultDirMode.
- static PosixErrorOr<TempPath> CreateDirIn(absl::string_view parent);
-
- // Creates an empty temporary file in the test's temporary directory with mode
- // `mode`.
- static PosixErrorOr<TempPath> CreateFileMode(mode_t mode);
-
- // Creates an empty temporary file in the test's temporary directory with
- // mode kDefaultFileMode.
- static PosixErrorOr<TempPath> CreateFile();
-
- // Creates an empty temporary subdirectory in the test's temporary directory
- // with mode kDefaultDirMode.
- static PosixErrorOr<TempPath> CreateDir();
-
- // Constructs a TempPath that represents nothing.
- TempPath() = default;
-
- // Constructs a TempPath that represents the given path, which will be deleted
- // when the TempPath is destroyed.
- explicit TempPath(std::string path) : path_(std::move(path)) {}
-
- // Attempts to delete the represented temporary file or directory (in the
- // latter case, also attempts to delete its contents).
- ~TempPath();
-
- // Attempts to delete the represented temporary file or directory, then
- // transfers ownership of the path represented by orig to this TempPath.
- TempPath(TempPath&& orig);
- TempPath& operator=(TempPath&& orig);
-
- // Changes the path this TempPath represents. If the TempPath already
- // represented a path, deletes and returns that path. Otherwise returns the
- // empty string.
- std::string reset(std::string newpath);
- std::string reset() { return reset(""); }
-
- // Forgets and returns the path this TempPath represents. The path is not
- // deleted.
- std::string release();
-
- // Returns the path this TempPath represents.
- std::string path() const { return path_; }
-
- private:
- template <typename F>
- static PosixErrorOr<TempPath> CreateIn(absl::string_view const parent,
- F const& f) {
- std::string path = NewTempAbsPathInDir(parent);
- RETURN_IF_ERRNO(f(path));
- return TempPath(std::move(path));
- }
-
- std::string path_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_TEMP_PATH_H_
diff --git a/test/util/temp_umask.h b/test/util/temp_umask.h
deleted file mode 100644
index e7de84a54..000000000
--- a/test/util/temp_umask.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_TEMP_UMASK_H_
-#define GVISOR_TEST_UTIL_TEMP_UMASK_H_
-
-#include <sys/stat.h>
-#include <sys/types.h>
-
-namespace gvisor {
-namespace testing {
-
-class TempUmask {
- public:
- // Sets the process umask to `mask`.
- explicit TempUmask(mode_t mask) : old_mask_(umask(mask)) {}
-
- // Sets the process umask to its previous value.
- ~TempUmask() { umask(old_mask_); }
-
- private:
- mode_t old_mask_;
-};
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_TEMP_UMASK_H_
diff --git a/test/util/test_main.cc b/test/util/test_main.cc
deleted file mode 100644
index 1f389e58f..000000000
--- a/test/util/test_main.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/test_util.h"
-
-int main(int argc, char** argv) {
- gvisor::testing::TestInit(&argc, &argv);
- return gvisor::testing::RunAllTests();
-}
diff --git a/test/util/test_util.cc b/test/util/test_util.cc
deleted file mode 100644
index d0c1d6426..000000000
--- a/test/util/test_util.cc
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/test_util.h"
-
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <sys/utsname.h>
-#include <unistd.h>
-
-#include <ctime>
-#include <iostream>
-#include <vector>
-
-#include "absl/base/attributes.h"
-#include "absl/flags/flag.h" // IWYU pragma: keep
-#include "absl/flags/parse.h" // IWYU pragma: keep
-#include "absl/strings/numbers.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_split.h"
-#include "absl/time/time.h"
-#include "test/util/fs_util.h"
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-constexpr char kGvisorNetwork[] = "GVISOR_NETWORK";
-constexpr char kGvisorVfs[] = "GVISOR_VFS";
-constexpr char kFuseEnabled[] = "FUSE_ENABLED";
-
-bool IsRunningOnGvisor() { return GvisorPlatform() != Platform::kNative; }
-
-const std::string GvisorPlatform() {
- // Set by runner.go.
- const char* env = getenv(kTestOnGvisor);
- if (!env) {
- return Platform::kNative;
- }
- return std::string(env);
-}
-
-bool IsRunningWithHostinet() {
- const char* env = getenv(kGvisorNetwork);
- return env && strcmp(env, "host") == 0;
-}
-
-bool IsRunningWithVFS1() {
- const char* env = getenv(kGvisorVfs);
- if (env == nullptr) {
- // If not set, it's running on Linux.
- return false;
- }
- return strcmp(env, "VFS1") == 0;
-}
-
-bool IsFUSEEnabled() {
- const char* env = getenv(kFuseEnabled);
- return env && strcmp(env, "TRUE") == 0;
-}
-
-// Inline cpuid instruction. Preserve %ebx/%rbx register. In PIC compilations
-// %ebx contains the address of the global offset table. %rbx is occasionally
-// used to address stack variables in presence of dynamic allocas.
-#if defined(__x86_64__)
-#define GETCPUID(a, b, c, d, a_inp, c_inp) \
- asm("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
- : "a"(a_inp), "2"(c_inp))
-
-CPUVendor GetCPUVendor() {
- uint32_t eax, ebx, ecx, edx;
- std::string vendor_str;
- // Get vendor string (issue CPUID with eax = 0)
- GETCPUID(eax, ebx, ecx, edx, 0, 0);
- vendor_str.append(reinterpret_cast<char*>(&ebx), 4);
- vendor_str.append(reinterpret_cast<char*>(&edx), 4);
- vendor_str.append(reinterpret_cast<char*>(&ecx), 4);
- if (vendor_str == "GenuineIntel") {
- return CPUVendor::kIntel;
- } else if (vendor_str == "AuthenticAMD") {
- return CPUVendor::kAMD;
- }
- return CPUVendor::kUnknownVendor;
-}
-#endif // defined(__x86_64__)
-
-bool operator==(const KernelVersion& first, const KernelVersion& second) {
- return first.major == second.major && first.minor == second.minor &&
- first.micro == second.micro;
-}
-
-PosixErrorOr<KernelVersion> ParseKernelVersion(absl::string_view vers_str) {
- KernelVersion version = {};
- std::vector<std::string> values =
- absl::StrSplit(vers_str, absl::ByAnyChar(".-"));
- if (values.size() == 2) {
- ASSIGN_OR_RETURN_ERRNO(version.major, Atoi<int>(values[0]));
- ASSIGN_OR_RETURN_ERRNO(version.minor, Atoi<int>(values[1]));
- return version;
- } else if (values.size() >= 3) {
- ASSIGN_OR_RETURN_ERRNO(version.major, Atoi<int>(values[0]));
- ASSIGN_OR_RETURN_ERRNO(version.minor, Atoi<int>(values[1]));
- ASSIGN_OR_RETURN_ERRNO(version.micro, Atoi<int>(values[2]));
- return version;
- }
- return PosixError(EINVAL, absl::StrCat("Unknown kernel release: ", vers_str));
-}
-
-PosixErrorOr<KernelVersion> GetKernelVersion() {
- utsname buf;
- RETURN_ERROR_IF_SYSCALL_FAIL(uname(&buf));
- return ParseKernelVersion(buf.release);
-}
-
-std::string CPUSetToString(const cpu_set_t& set, size_t cpus) {
- std::string str = "cpuset[";
- for (unsigned int n = 0; n < cpus; n++) {
- if (CPU_ISSET(n, &set)) {
- if (n != 0) {
- absl::StrAppend(&str, " ");
- }
- absl::StrAppend(&str, n);
- }
- }
- absl::StrAppend(&str, "]");
- return str;
-}
-
-// An overloaded operator<< makes it easy to dump the value of an OpenFd.
-std::ostream& operator<<(std::ostream& out, OpenFd const& ofd) {
- out << ofd.fd << " -> " << ofd.link;
- return out;
-}
-
-// An overloaded operator<< makes it easy to dump a vector of OpenFDs.
-std::ostream& operator<<(std::ostream& out, std::vector<OpenFd> const& v) {
- for (const auto& ofd : v) {
- out << ofd << std::endl;
- }
- return out;
-}
-
-PosixErrorOr<std::vector<OpenFd>> GetOpenFDs() {
- // Get the results from /proc/self/fd.
- ASSIGN_OR_RETURN_ERRNO(auto dir_list,
- ListDir("/proc/self/fd", /*skipdots=*/true));
-
- std::vector<OpenFd> ret_fds;
- for (const auto& str_fd : dir_list) {
- OpenFd open_fd = {};
- ASSIGN_OR_RETURN_ERRNO(open_fd.fd, Atoi<int>(str_fd));
- std::string path = absl::StrCat("/proc/self/fd/", open_fd.fd);
-
- // Resolve the link.
- char buf[PATH_MAX] = {};
- int ret = readlink(path.c_str(), buf, sizeof(buf));
- if (ret < 0) {
- if (errno == ENOENT) {
- // The FD may have been closed, let's be resilient.
- continue;
- }
-
- return PosixError(
- errno, absl::StrCat("readlink of ", path, " returned errno ", errno));
- }
- open_fd.link = std::string(buf, ret);
- ret_fds.emplace_back(std::move(open_fd));
- }
- return ret_fds;
-}
-
-PosixErrorOr<uint64_t> Links(const std::string& path) {
- struct stat st;
- if (stat(path.c_str(), &st)) {
- return PosixError(errno, absl::StrCat("Failed to stat ", path));
- }
- return static_cast<uint64_t>(st.st_nlink);
-}
-
-void RandomizeBuffer(void* buffer, size_t len) {
- struct timespec ts = {};
- clock_gettime(CLOCK_MONOTONIC, &ts);
- uint32_t seed = static_cast<uint32_t>(ts.tv_nsec);
- char* const buf = static_cast<char*>(buffer);
- for (size_t i = 0; i < len; i++) {
- buf[i] = rand_r(&seed) % 255;
- }
-}
-
-std::vector<std::vector<struct iovec>> GenerateIovecs(uint64_t total_size,
- void* buf,
- size_t buflen) {
- std::vector<std::vector<struct iovec>> result;
- for (uint64_t offset = 0; offset < total_size;) {
- auto& iovec_array = *result.emplace(result.end());
-
- for (; offset < total_size && iovec_array.size() < IOV_MAX;
- offset += buflen) {
- struct iovec iov = {};
- iov.iov_base = buf;
- iov.iov_len = std::min<uint64_t>(total_size - offset, buflen);
- iovec_array.push_back(iov);
- }
- }
-
- return result;
-}
-
-uint64_t Megabytes(uint64_t n) {
- // Overflow check, upper 20 bits in n shouldn't be set.
- TEST_CHECK(!(0xfffff00000000000 & n));
- return n << 20;
-}
-
-bool Equivalent(uint64_t current, uint64_t target, double tolerance) {
- auto abs_diff = target > current ? target - current : current - target;
- return abs_diff <= static_cast<uint64_t>(tolerance * target);
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/test_util.h b/test/util/test_util.h
deleted file mode 100644
index bcbb388ed..000000000
--- a/test/util/test_util.h
+++ /dev/null
@@ -1,816 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Utilities for syscall testing.
-//
-// Initialization
-// ==============
-//
-// Prior to calling RUN_ALL_TESTS, all tests must use TestInit(&argc, &argv).
-// See the TestInit function for exact side-effects and semantics.
-//
-// Configuration
-// =============
-//
-// IsRunningOnGvisor returns true if the test is known to be running on gVisor.
-// GvisorPlatform can be used to get more detail:
-//
-// if (GvisorPlatform() == Platform::kPtrace) {
-// ...
-// }
-//
-// SetupGvisorDeathTest ensures that signal handling does not interfere with
-/// tests that rely on fatal signals.
-//
-// Matchers
-// ========
-//
-// ElementOf(xs) matches if the matched value is equal to an element of the
-// container xs. Example:
-//
-// // PASS
-// EXPECT_THAT(1, ElementOf({0, 1, 2}));
-//
-// // FAIL
-// // Value of: 3
-// // Expected: one of {0, 1, 2}
-// // Actual: 3
-// EXPECT_THAT(3, ElementOf({0, 1, 2}));
-//
-// SyscallSucceeds() matches if the syscall is successful. A successful syscall
-// is defined by either a return value not equal to -1, or a return value of -1
-// with an errno of 0 (which is a possible successful return for e.g.
-// PTRACE_PEEK). Example:
-//
-// // PASS
-// EXPECT_THAT(open("/dev/null", O_RDONLY), SyscallSucceeds());
-//
-// // FAIL
-// // Value of: open("/", O_RDWR)
-// // Expected: not -1 (success)
-// // Actual: -1 (of type int), with errno 21 (Is a directory)
-// EXPECT_THAT(open("/", O_RDWR), SyscallSucceeds());
-//
-// SyscallSucceedsWithValue(m) matches if the syscall is successful, and the
-// value also matches m. Example:
-//
-// // PASS
-// EXPECT_THAT(read(4, buf, 8192), SyscallSucceedsWithValue(8192));
-//
-// // FAIL
-// // Value of: read(-1, buf, 8192)
-// // Expected: is equal to 8192
-// // Actual: -1 (of type long), with errno 9 (Bad file number)
-// EXPECT_THAT(read(-1, buf, 8192), SyscallSucceedsWithValue(8192));
-//
-// // FAIL
-// // Value of: read(4, buf, 1)
-// // Expected: is > 4096
-// // Actual: 1 (of type long)
-// EXPECT_THAT(read(4, buf, 1), SyscallSucceedsWithValue(Gt(4096)));
-//
-// SyscallFails() matches if the syscall is unsuccessful. An unsuccessful
-// syscall is defined by a return value of -1 with a non-zero errno. Example:
-//
-// // PASS
-// EXPECT_THAT(open("/", O_RDWR), SyscallFails());
-//
-// // FAIL
-// // Value of: open("/dev/null", O_RDONLY)
-// // Expected: -1 (failure)
-// // Actual: 0 (of type int)
-// EXPECT_THAT(open("/dev/null", O_RDONLY), SyscallFails());
-//
-// SyscallFailsWithErrno(m) matches if the syscall is unsuccessful, and errno
-// matches m. Example:
-//
-// // PASS
-// EXPECT_THAT(open("/", O_RDWR), SyscallFailsWithErrno(EISDIR));
-//
-// // PASS
-// EXPECT_THAT(open("/etc/passwd", O_RDWR | O_DIRECTORY),
-// SyscallFailsWithErrno(AnyOf(EACCES, ENOTDIR)));
-//
-// // FAIL
-// // Value of: open("/dev/null", O_RDONLY)
-// // Expected: -1 (failure) with errno 21 (Is a directory)
-// // Actual: 0 (of type int)
-// EXPECT_THAT(open("/dev/null", O_RDONLY), SyscallFailsWithErrno(EISDIR));
-//
-// // FAIL
-// // Value of: open("/", O_RDWR)
-// // Expected: -1 (failure) with errno 22 (Invalid argument)
-// // Actual: -1 (of type int), failure, but with errno 21 (Is a directory)
-// EXPECT_THAT(open("/", O_RDWR), SyscallFailsWithErrno(EINVAL));
-//
-// Because the syscall matchers encode save/restore functionality, their meaning
-// should not be inverted via Not. That is, AnyOf(SyscallSucceedsWithValue(1),
-// SyscallSucceedsWithValue(2)) is permitted, but not
-// Not(SyscallFailsWithErrno(EPERM)).
-//
-// Syscalls
-// ========
-//
-// RetryEINTR wraps a function that returns -1 and sets errno on failure
-// to be automatically retried when EINTR occurs. Example:
-//
-// auto rv = RetryEINTR(waitpid)(pid, &status, 0);
-//
-// ReadFd/WriteFd/PreadFd/PwriteFd are interface-compatible wrappers around the
-// read/write/pread/pwrite syscalls to handle both EINTR and partial
-// reads/writes. Example:
-//
-// EXPECT_THAT(ReadFd(fd, &buf, size), SyscallSucceedsWithValue(size));
-//
-// General Utilities
-// =================
-//
-// ApplyVec(f, xs) returns a vector containing the result of applying function
-// `f` to each value in `xs`.
-//
-// AllBitwiseCombinations takes a variadic number of ranges containing integers
-// and returns a vector containing every integer that can be formed by ORing
-// together exactly one integer from each list. List<T> is an alias for
-// std::initializer_list<T> that makes AllBitwiseCombinations more ergonomic to
-// use with list literals (initializer lists do not otherwise participate in
-// template argument deduction). Example:
-//
-// EXPECT_THAT(
-// AllBitwiseCombinations<int>(
-// List<int>{SOCK_DGRAM, SOCK_STREAM},
-// List<int>{0, SOCK_NONBLOCK}),
-// Contains({SOCK_DGRAM, SOCK_STREAM, SOCK_DGRAM | SOCK_NONBLOCK,
-// SOCK_STREAM | SOCK_NONBLOCK}));
-//
-// VecCat takes a variadic number of containers and returns a vector containing
-// the concatenated contents.
-//
-// VecAppend takes an initial container and a variadic number of containers and
-// appends each to the initial container.
-//
-// RandomizeBuffer will use MTRandom to fill the given buffer with random bytes.
-//
-// GenerateIovecs will return the smallest number of iovec arrays for writing a
-// given total number of bytes to a file, each iovec array size up to IOV_MAX,
-// each iovec in each array pointing to the same buffer.
-
-#ifndef GVISOR_TEST_UTIL_TEST_UTIL_H_
-#define GVISOR_TEST_UTIL_TEST_UTIL_H_
-
-#include <stddef.h>
-#include <stdlib.h>
-#include <sys/uio.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <cerrno>
-#include <initializer_list>
-#include <iterator>
-#include <string>
-#include <thread> // NOLINT: using std::thread::hardware_concurrency().
-#include <utility>
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
-#include "absl/strings/string_view.h"
-#include "absl/time/time.h"
-#include "test/util/fs_util.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-
-namespace gvisor {
-namespace testing {
-
-constexpr char kTestOnGvisor[] = "TEST_ON_GVISOR";
-
-// TestInit must be called prior to RUN_ALL_TESTS.
-//
-// This parses all arguments and adjusts argc and argv appropriately.
-//
-// TestInit may create background threads.
-void TestInit(int* argc, char*** argv);
-
-// SKIP_IF may be used to skip a test case.
-//
-// These cases are still emitted, but a SKIPPED line will appear.
-#define SKIP_IF(expr) \
- do { \
- if (expr) GTEST_SKIP() << #expr; \
- } while (0)
-
-// Platform contains platform names.
-namespace Platform {
-constexpr char kNative[] = "native";
-constexpr char kPtrace[] = "ptrace";
-constexpr char kKVM[] = "kvm";
-constexpr char kFuchsia[] = "fuchsia";
-} // namespace Platform
-
-bool IsRunningOnGvisor();
-const std::string GvisorPlatform();
-bool IsRunningWithHostinet();
-// TODO(gvisor.dev/issue/1624): Delete once VFS1 is gone.
-bool IsRunningWithVFS1();
-bool IsFUSEEnabled();
-
-#ifdef __linux__
-void SetupGvisorDeathTest();
-#endif
-
-struct KernelVersion {
- int major;
- int minor;
- int micro;
-};
-
-bool operator==(const KernelVersion& first, const KernelVersion& second);
-
-PosixErrorOr<KernelVersion> ParseKernelVersion(absl::string_view vers_string);
-PosixErrorOr<KernelVersion> GetKernelVersion();
-
-static const size_t kPageSize = sysconf(_SC_PAGESIZE);
-
-enum class CPUVendor { kIntel, kAMD, kUnknownVendor };
-
-CPUVendor GetCPUVendor();
-
-inline int NumCPUs() { return std::thread::hardware_concurrency(); }
-
-// Converts cpu_set_t to a std::string for easy examination.
-std::string CPUSetToString(const cpu_set_t& set, size_t cpus = CPU_SETSIZE);
-
-struct OpenFd {
- // fd is the open file descriptor number.
- int fd = -1;
-
- // link is the resolution of the symbolic link.
- std::string link;
-};
-
-// Make it easier to log OpenFds to error streams.
-std::ostream& operator<<(std::ostream& out, std::vector<OpenFd> const& v);
-std::ostream& operator<<(std::ostream& out, OpenFd const& ofd);
-
-// Gets a detailed list of open fds for this process.
-PosixErrorOr<std::vector<OpenFd>> GetOpenFDs();
-
-// Returns the number of hard links to a path.
-PosixErrorOr<uint64_t> Links(const std::string& path);
-
-inline uint64_t ns_elapsed(const struct timespec& begin,
- const struct timespec& end) {
- return (end.tv_sec - begin.tv_sec) * 1000000000 +
- (end.tv_nsec - begin.tv_nsec);
-}
-
-inline uint64_t ms_elapsed(const struct timespec& begin,
- const struct timespec& end) {
- return ns_elapsed(begin, end) / 1000000;
-}
-
-namespace internal {
-
-template <typename Container>
-class ElementOfMatcher {
- public:
- explicit ElementOfMatcher(Container container)
- : container_(::std::move(container)) {}
-
- template <typename T>
- bool MatchAndExplain(T const& rv,
- ::testing::MatchResultListener* const listener) const {
- using std::count;
- return count(container_.begin(), container_.end(), rv) != 0;
- }
-
- void DescribeTo(::std::ostream* const os) const {
- *os << "one of {";
- char const* sep = "";
- for (auto const& elem : container_) {
- *os << sep << elem;
- sep = ", ";
- }
- *os << "}";
- }
-
- void DescribeNegationTo(::std::ostream* const os) const {
- *os << "none of {";
- char const* sep = "";
- for (auto const& elem : container_) {
- *os << sep << elem;
- sep = ", ";
- }
- *os << "}";
- }
-
- private:
- Container const container_;
-};
-
-template <typename E>
-class SyscallSuccessMatcher {
- public:
- explicit SyscallSuccessMatcher(E expected)
- : expected_(::std::move(expected)) {}
-
- template <typename T>
- operator ::testing::Matcher<T>() const {
- // E is one of three things:
- // - T, or a type losslessly and implicitly convertible to T.
- // - A monomorphic Matcher<T>.
- // - A polymorphic matcher.
- // SafeMatcherCast handles any of the above correctly.
- //
- // Similarly, gMock will invoke this conversion operator to obtain a
- // monomorphic matcher (this is how polymorphic matchers are implemented).
- return ::testing::MakeMatcher(
- new Impl<T>(::testing::SafeMatcherCast<T>(expected_)));
- }
-
- private:
- template <typename T>
- class Impl : public ::testing::MatcherInterface<T> {
- public:
- explicit Impl(::testing::Matcher<T> matcher)
- : matcher_(::std::move(matcher)) {}
-
- bool MatchAndExplain(
- T const& rv,
- ::testing::MatchResultListener* const listener) const override {
- if (rv == static_cast<decltype(rv)>(-1) && errno != 0) {
- *listener << "with errno " << PosixError(errno);
- return false;
- }
- bool match = matcher_.MatchAndExplain(rv, listener);
- if (match) {
- MaybeSave();
- }
- return match;
- }
-
- void DescribeTo(::std::ostream* const os) const override {
- matcher_.DescribeTo(os);
- }
-
- void DescribeNegationTo(::std::ostream* const os) const override {
- matcher_.DescribeNegationTo(os);
- }
-
- private:
- ::testing::Matcher<T> matcher_;
- };
-
- private:
- E expected_;
-};
-
-// A polymorphic matcher equivalent to ::testing::internal::AnyMatcher, except
-// not in namespace ::testing::internal, and describing SyscallSucceeds()'s
-// match constraints (which are enforced by SyscallSuccessMatcher::Impl).
-class AnySuccessValueMatcher {
- public:
- template <typename T>
- operator ::testing::Matcher<T>() const {
- return ::testing::MakeMatcher(new Impl<T>());
- }
-
- private:
- template <typename T>
- class Impl : public ::testing::MatcherInterface<T> {
- public:
- bool MatchAndExplain(
- T const& rv,
- ::testing::MatchResultListener* const listener) const override {
- return true;
- }
-
- void DescribeTo(::std::ostream* const os) const override {
- *os << "not -1 (success)";
- }
-
- void DescribeNegationTo(::std::ostream* const os) const override {
- *os << "-1 (failure)";
- }
- };
-};
-
-class SyscallFailureMatcher {
- public:
- explicit SyscallFailureMatcher(::testing::Matcher<int> errno_matcher)
- : errno_matcher_(std::move(errno_matcher)) {}
-
- template <typename T>
- bool MatchAndExplain(T const& rv,
- ::testing::MatchResultListener* const listener) const {
- if (rv != static_cast<decltype(rv)>(-1)) {
- return false;
- }
- int actual_errno = errno;
- *listener << "with errno " << PosixError(actual_errno);
- bool match = errno_matcher_.MatchAndExplain(actual_errno, listener);
- if (match) {
- MaybeSave();
- }
- return match;
- }
-
- void DescribeTo(::std::ostream* const os) const {
- *os << "-1 (failure), with errno ";
- errno_matcher_.DescribeTo(os);
- }
-
- void DescribeNegationTo(::std::ostream* const os) const {
- *os << "not -1 (success), with errno ";
- errno_matcher_.DescribeNegationTo(os);
- }
-
- private:
- ::testing::Matcher<int> errno_matcher_;
-};
-
-class SpecificErrnoMatcher : public ::testing::MatcherInterface<int> {
- public:
- explicit SpecificErrnoMatcher(int const expected) : expected_(expected) {}
-
- bool MatchAndExplain(
- int const actual_errno,
- ::testing::MatchResultListener* const listener) const override {
- return actual_errno == expected_;
- }
-
- void DescribeTo(::std::ostream* const os) const override {
- *os << PosixError(expected_);
- }
-
- void DescribeNegationTo(::std::ostream* const os) const override {
- *os << "not " << PosixError(expected_);
- }
-
- private:
- int const expected_;
-};
-
-inline ::testing::Matcher<int> SpecificErrno(int const expected) {
- return ::testing::MakeMatcher(new SpecificErrnoMatcher(expected));
-}
-
-} // namespace internal
-
-template <typename Container>
-inline ::testing::PolymorphicMatcher<internal::ElementOfMatcher<Container>>
-ElementOf(Container container) {
- return ::testing::MakePolymorphicMatcher(
- internal::ElementOfMatcher<Container>(::std::move(container)));
-}
-
-template <typename T>
-inline ::testing::PolymorphicMatcher<
- internal::ElementOfMatcher<::std::vector<T>>>
-ElementOf(::std::initializer_list<T> elems) {
- return ::testing::MakePolymorphicMatcher(
- internal::ElementOfMatcher<::std::vector<T>>(::std::vector<T>(elems)));
-}
-
-template <typename E>
-inline internal::SyscallSuccessMatcher<E> SyscallSucceedsWithValue(E expected) {
- return internal::SyscallSuccessMatcher<E>(::std::move(expected));
-}
-
-inline internal::SyscallSuccessMatcher<internal::AnySuccessValueMatcher>
-SyscallSucceeds() {
- return SyscallSucceedsWithValue(
- ::gvisor::testing::internal::AnySuccessValueMatcher());
-}
-
-inline ::testing::PolymorphicMatcher<internal::SyscallFailureMatcher>
-SyscallFailsWithErrno(::testing::Matcher<int> expected) {
- return ::testing::MakePolymorphicMatcher(
- internal::SyscallFailureMatcher(::std::move(expected)));
-}
-
-// Overload taking an int so that SyscallFailsWithErrno(<specific errno>) uses
-// internal::SpecificErrno (which stringifies the errno) rather than
-// ::testing::Eq (which doesn't).
-inline ::testing::PolymorphicMatcher<internal::SyscallFailureMatcher>
-SyscallFailsWithErrno(int const expected) {
- return SyscallFailsWithErrno(internal::SpecificErrno(expected));
-}
-
-inline ::testing::PolymorphicMatcher<internal::SyscallFailureMatcher>
-SyscallFails() {
- return SyscallFailsWithErrno(::testing::Gt(0));
-}
-
-// As of GCC 7.2, -Wall => -Wc++17-compat => -Wnoexcept-type generates an
-// irrelevant, non-actionable warning about ABI compatibility when
-// RetryEINTRImpl is constructed with a noexcept function, such as glibc's
-// syscall(). See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80985.
-#if defined(__GNUC__) && !defined(__clang__) && \
- (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2))
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wnoexcept-type"
-#endif
-
-namespace internal {
-
-template <typename F>
-struct RetryEINTRImpl {
- F const f;
-
- explicit constexpr RetryEINTRImpl(F f) : f(std::move(f)) {}
-
- template <typename... Args>
- auto operator()(Args&&... args) const
- -> decltype(f(std::forward<Args>(args)...)) {
- while (true) {
- errno = 0;
- auto const ret = f(std::forward<Args>(args)...);
- if (ret != -1 || errno != EINTR) {
- return ret;
- }
- }
- }
-};
-
-} // namespace internal
-
-template <typename F>
-constexpr internal::RetryEINTRImpl<F> RetryEINTR(F&& f) {
- return internal::RetryEINTRImpl<F>(std::forward<F>(f));
-}
-
-#if defined(__GNUC__) && !defined(__clang__) && \
- (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2))
-#pragma GCC diagnostic pop
-#endif
-
-namespace internal {
-
-template <typename F>
-ssize_t ApplyFileIoSyscall(F const& f, size_t const count) {
- size_t completed = 0;
- // `do ... while` because some callers actually want to make a syscall with a
- // count of 0.
- do {
- auto const cur = RetryEINTR(f)(completed);
- if (cur < 0) {
- return cur;
- } else if (cur == 0) {
- break;
- }
- completed += cur;
- } while (completed < count);
- return completed;
-}
-
-} // namespace internal
-
-inline PosixErrorOr<std::string> ReadAllFd(int fd) {
- std::string all;
- all.reserve(128 * 1024); // arbitrary.
-
- std::vector<char> buffer(16 * 1024);
- for (;;) {
- auto const bytes = RetryEINTR(read)(fd, buffer.data(), buffer.size());
- if (bytes < 0) {
- return PosixError(errno, "file read");
- }
- if (bytes == 0) {
- return std::move(all);
- }
- if (bytes > 0) {
- all.append(buffer.data(), bytes);
- }
- }
-}
-
-inline ssize_t ReadFd(int fd, void* buf, size_t count) {
- return internal::ApplyFileIoSyscall(
- [&](size_t completed) {
- return read(fd, static_cast<char*>(buf) + completed, count - completed);
- },
- count);
-}
-
-inline ssize_t WriteFd(int fd, void const* buf, size_t count) {
- return internal::ApplyFileIoSyscall(
- [&](size_t completed) {
- return write(fd, static_cast<char const*>(buf) + completed,
- count - completed);
- },
- count);
-}
-
-inline ssize_t PreadFd(int fd, void* buf, size_t count, off_t offset) {
- return internal::ApplyFileIoSyscall(
- [&](size_t completed) {
- return pread(fd, static_cast<char*>(buf) + completed, count - completed,
- offset + completed);
- },
- count);
-}
-
-inline ssize_t PwriteFd(int fd, void const* buf, size_t count, off_t offset) {
- return internal::ApplyFileIoSyscall(
- [&](size_t completed) {
- return pwrite(fd, static_cast<char const*>(buf) + completed,
- count - completed, offset + completed);
- },
- count);
-}
-
-template <typename T>
-using List = std::initializer_list<T>;
-
-namespace internal {
-
-template <typename T>
-void AppendAllBitwiseCombinations(std::vector<T>* combinations, T current) {
- combinations->push_back(current);
-}
-
-template <typename T, typename Arg, typename... Args>
-void AppendAllBitwiseCombinations(std::vector<T>* combinations, T current,
- Arg&& next, Args&&... rest) {
- for (auto const option : next) {
- AppendAllBitwiseCombinations(combinations, current | option, rest...);
- }
-}
-
-inline size_t CombinedSize(size_t accum) { return accum; }
-
-template <typename T, typename... Args>
-size_t CombinedSize(size_t accum, T const& x, Args&&... xs) {
- return CombinedSize(accum + x.size(), std::forward<Args>(xs)...);
-}
-
-// Base case: no more containers, so do nothing.
-template <typename T>
-void DoMoveExtendContainer(T* c) {}
-
-// Append each container next to c.
-template <typename T, typename U, typename... Args>
-void DoMoveExtendContainer(T* c, U&& next, Args&&... rest) {
- std::move(std::begin(next), std::end(next), std::back_inserter(*c));
- DoMoveExtendContainer(c, std::forward<Args>(rest)...);
-}
-
-} // namespace internal
-
-template <typename T = int>
-std::vector<T> AllBitwiseCombinations() {
- return std::vector<T>();
-}
-
-template <typename T = int, typename... Args>
-std::vector<T> AllBitwiseCombinations(Args&&... args) {
- std::vector<T> combinations;
- internal::AppendAllBitwiseCombinations(&combinations, 0, args...);
- return combinations;
-}
-
-template <typename T, typename U, typename F>
-std::vector<T> ApplyVec(F const& f, std::vector<U> const& us) {
- std::vector<T> vec;
- vec.reserve(us.size());
- for (auto const& u : us) {
- vec.push_back(f(u));
- }
- return vec;
-}
-
-template <typename T, typename U>
-std::vector<T> ApplyVecToVec(std::vector<std::function<T(U)>> const& fs,
- std::vector<U> const& us) {
- std::vector<T> vec;
- vec.reserve(us.size() * fs.size());
- for (auto const& f : fs) {
- for (auto const& u : us) {
- vec.push_back(f(u));
- }
- }
- return vec;
-}
-
-// Moves all elements from the containers `args` to the end of `c`.
-template <typename T, typename... Args>
-void VecAppend(T* c, Args&&... args) {
- c->reserve(internal::CombinedSize(c->size(), args...));
- internal::DoMoveExtendContainer(c, std::forward<Args>(args)...);
-}
-
-// Returns a vector containing the concatenated contents of the containers
-// `args`.
-template <typename T, typename... Args>
-std::vector<T> VecCat(Args&&... args) {
- std::vector<T> combined;
- VecAppend(&combined, std::forward<Args>(args)...);
- return combined;
-}
-
-#define RETURN_ERROR_IF_SYSCALL_FAIL(syscall) \
- do { \
- if ((syscall) < 0 && errno != 0) { \
- return PosixError(errno, #syscall); \
- } \
- } while (false)
-
-// Fill the given buffer with random bytes.
-void RandomizeBuffer(void* buffer, size_t len);
-
-template <typename T>
-inline PosixErrorOr<T> Atoi(absl::string_view str) {
- T ret;
- if (!absl::SimpleAtoi<T>(str, &ret)) {
- return PosixError(EINVAL, "String not a number.");
- }
- return ret;
-}
-
-inline PosixErrorOr<uint64_t> AtoiBase(absl::string_view str, int base) {
- if (base > 255 || base < 2) {
- return PosixError(EINVAL, "Invalid Base");
- }
-
- uint64_t ret = 0;
- if (!absl::numbers_internal::safe_strtou64_base(str, &ret, base)) {
- return PosixError(EINVAL, "String not a number.");
- }
-
- return ret;
-}
-
-inline PosixErrorOr<double> Atod(absl::string_view str) {
- double ret;
- if (!absl::SimpleAtod(str, &ret)) {
- return PosixError(EINVAL, "String not a double type.");
- }
- return ret;
-}
-
-inline PosixErrorOr<float> Atof(absl::string_view str) {
- float ret;
- if (!absl::SimpleAtof(str, &ret)) {
- return PosixError(EINVAL, "String not a float type.");
- }
- return ret;
-}
-
-// Return the smallest number of iovec arrays that can be used to write
-// "total_bytes" number of bytes, each iovec writing one "buf".
-std::vector<std::vector<struct iovec>> GenerateIovecs(uint64_t total_size,
- void* buf, size_t buflen);
-
-// Returns bytes in 'n' megabytes. Used for readability.
-uint64_t Megabytes(uint64_t n);
-
-// Predicate for checking that a value is within some tolerance of another
-// value. Returns true iff current is in the range [target * (1 - tolerance),
-// target * (1 + tolerance)].
-bool Equivalent(uint64_t current, uint64_t target, double tolerance);
-
-// Matcher wrapping the Equivalent predicate.
-MATCHER_P2(EquivalentWithin, target, tolerance,
- std::string(negation ? "Isn't" : "Is") +
- ::absl::StrFormat(" within %.2f%% of the target of %zd bytes",
- tolerance * 100, target)) {
- if (target == 0) {
- *result_listener << ::absl::StreamFormat("difference of infinity%%");
- } else {
- int64_t delta = static_cast<int64_t>(arg) - static_cast<int64_t>(target);
- double delta_percent =
- static_cast<double>(delta) / static_cast<double>(target) * 100;
- *result_listener << ::absl::StreamFormat("difference of %.2f%%",
- delta_percent);
- }
- return Equivalent(arg, target, tolerance);
-}
-
-// Returns the absolute path to the a data dependency. 'path' is the runfile
-// location relative to workspace root.
-#ifdef __linux__
-std::string RunfilePath(std::string path);
-#endif
-
-void TestInit(int* argc, char*** argv);
-int RunAllTests(void);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_TEST_UTIL_H_
diff --git a/test/util/test_util_impl.cc b/test/util/test_util_impl.cc
deleted file mode 100644
index 6b6826898..000000000
--- a/test/util/test_util_impl.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <signal.h>
-
-#include "gtest/gtest.h"
-#include "absl/flags/flag.h"
-#include "absl/flags/parse.h"
-#include "benchmark/benchmark.h"
-#include "test/util/logging.h"
-
-extern bool FLAGS_gtest_list_tests;
-extern bool FLAGS_benchmark_list_tests;
-extern std::string FLAGS_benchmark_filter;
-
-namespace gvisor {
-namespace testing {
-
-void SetupGvisorDeathTest() {}
-
-void TestInit(int* argc, char*** argv) {
- ::testing::InitGoogleTest(argc, *argv);
- benchmark::Initialize(argc, *argv);
- ::absl::ParseCommandLine(*argc, *argv);
-
- // Always mask SIGPIPE as it's common and tests aren't expected to handle it.
- struct sigaction sa = {};
- sa.sa_handler = SIG_IGN;
- TEST_CHECK(sigaction(SIGPIPE, &sa, nullptr) == 0);
-}
-
-int RunAllTests() {
- if (::testing::FLAGS_gtest_list_tests) {
- return RUN_ALL_TESTS();
- }
- if (FLAGS_benchmark_list_tests) {
- benchmark::RunSpecifiedBenchmarks();
- return 0;
- }
-
- // Run selected tests & benchmarks.
- int rc = RUN_ALL_TESTS();
- benchmark::RunSpecifiedBenchmarks();
- return rc;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/test_util_runfiles.cc b/test/util/test_util_runfiles.cc
deleted file mode 100644
index 7210094eb..000000000
--- a/test/util/test_util_runfiles.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <iostream>
-#include <string>
-
-#include "test/util/fs_util.h"
-#include "test/util/test_util.h"
-#include "tools/cpp/runfiles/runfiles.h"
-
-namespace gvisor {
-namespace testing {
-
-std::string RunfilePath(std::string path) {
- static const bazel::tools::cpp::runfiles::Runfiles* const runfiles = [] {
- std::string error;
- auto* runfiles =
- bazel::tools::cpp::runfiles::Runfiles::CreateForTest(&error);
- if (runfiles == nullptr) {
- std::cerr << "Unable to find runfiles: " << error << std::endl;
- }
- return runfiles;
- }();
-
- if (!runfiles) {
- // Can't find runfiles? This probably won't work, but __main__/path is our
- // best guess.
- return JoinPath("__main__", path);
- }
-
- return runfiles->Rlocation(JoinPath("__main__", path));
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/test_util_test.cc b/test/util/test_util_test.cc
deleted file mode 100644
index f42100374..000000000
--- a/test/util/test_util_test.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/test_util.h"
-
-#include <errno.h>
-
-#include <vector>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-using ::testing::AnyOf;
-using ::testing::Gt;
-using ::testing::IsEmpty;
-using ::testing::Lt;
-using ::testing::Not;
-using ::testing::TypedEq;
-using ::testing::UnorderedElementsAre;
-using ::testing::UnorderedElementsAreArray;
-
-namespace gvisor {
-namespace testing {
-
-namespace {
-
-TEST(KernelVersionParsing, ValidateParsing) {
- KernelVersion v = ASSERT_NO_ERRNO_AND_VALUE(
- ParseKernelVersion("4.18.10-1foo2-amd64 baz blah"));
- ASSERT_TRUE(v == KernelVersion({4, 18, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.18.10-1foo2-amd64"));
- ASSERT_TRUE(v == KernelVersion({4, 18, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.18.10-14-amd64"));
- ASSERT_TRUE(v == KernelVersion({4, 18, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.18.10-amd64"));
- ASSERT_TRUE(v == KernelVersion({4, 18, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.18.10"));
- ASSERT_TRUE(v == KernelVersion({4, 18, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.0.10"));
- ASSERT_TRUE(v == KernelVersion({4, 0, 10}));
-
- v = ASSERT_NO_ERRNO_AND_VALUE(ParseKernelVersion("4.0"));
- ASSERT_TRUE(v == KernelVersion({4, 0, 0}));
-
- ASSERT_THAT(ParseKernelVersion("4.a"), PosixErrorIs(EINVAL, ::testing::_));
- ASSERT_THAT(ParseKernelVersion("3"), PosixErrorIs(EINVAL, ::testing::_));
- ASSERT_THAT(ParseKernelVersion(""), PosixErrorIs(EINVAL, ::testing::_));
- ASSERT_THAT(ParseKernelVersion("version 3.3.10"),
- PosixErrorIs(EINVAL, ::testing::_));
-}
-
-TEST(MatchersTest, SyscallSucceeds) {
- EXPECT_THAT(0, SyscallSucceeds());
- EXPECT_THAT(0L, SyscallSucceeds());
-
- errno = 0;
- EXPECT_THAT(-1, SyscallSucceeds());
- EXPECT_THAT(-1L, SyscallSucceeds());
-
- errno = ENOMEM;
- EXPECT_THAT(-1, Not(SyscallSucceeds()));
- EXPECT_THAT(-1L, Not(SyscallSucceeds()));
-}
-
-TEST(MatchersTest, SyscallSucceedsWithValue) {
- EXPECT_THAT(0, SyscallSucceedsWithValue(0));
- EXPECT_THAT(1, SyscallSucceedsWithValue(Lt(3)));
- EXPECT_THAT(-1, Not(SyscallSucceedsWithValue(Lt(3))));
- EXPECT_THAT(4, Not(SyscallSucceedsWithValue(Lt(3))));
-
- // Non-int -1
- EXPECT_THAT(-1L, Not(SyscallSucceedsWithValue(0)));
-
- // Non-int, truncates to -1 if converted to int, with expected value
- EXPECT_THAT(0xffffffffL, SyscallSucceedsWithValue(0xffffffffL));
-
- // Non-int, truncates to -1 if converted to int, with monomorphic matcher
- EXPECT_THAT(0xffffffffL,
- SyscallSucceedsWithValue(TypedEq<long>(0xffffffffL)));
-
- // Non-int, truncates to -1 if converted to int, with polymorphic matcher
- EXPECT_THAT(0xffffffffL, SyscallSucceedsWithValue(Gt(1)));
-}
-
-TEST(MatchersTest, SyscallFails) {
- EXPECT_THAT(0, Not(SyscallFails()));
- EXPECT_THAT(0L, Not(SyscallFails()));
-
- errno = 0;
- EXPECT_THAT(-1, Not(SyscallFails()));
- EXPECT_THAT(-1L, Not(SyscallFails()));
-
- errno = ENOMEM;
- EXPECT_THAT(-1, SyscallFails());
- EXPECT_THAT(-1L, SyscallFails());
-}
-
-TEST(MatchersTest, SyscallFailsWithErrno) {
- EXPECT_THAT(0, Not(SyscallFailsWithErrno(EINVAL)));
- EXPECT_THAT(0L, Not(SyscallFailsWithErrno(EINVAL)));
-
- errno = ENOMEM;
- EXPECT_THAT(-1, Not(SyscallFailsWithErrno(EINVAL)));
- EXPECT_THAT(-1L, Not(SyscallFailsWithErrno(EINVAL)));
-
- errno = EINVAL;
- EXPECT_THAT(-1, SyscallFailsWithErrno(EINVAL));
- EXPECT_THAT(-1L, SyscallFailsWithErrno(EINVAL));
-
- EXPECT_THAT(-1, SyscallFailsWithErrno(AnyOf(EINVAL, ENOMEM)));
- EXPECT_THAT(-1L, SyscallFailsWithErrno(AnyOf(EINVAL, ENOMEM)));
-
- std::vector<int> expected_errnos({EINVAL, ENOMEM});
- errno = ENOMEM;
- EXPECT_THAT(-1, SyscallFailsWithErrno(ElementOf(expected_errnos)));
- EXPECT_THAT(-1L, SyscallFailsWithErrno(ElementOf(expected_errnos)));
-}
-
-TEST(AllBitwiseCombinationsTest, NoArguments) {
- EXPECT_THAT(AllBitwiseCombinations(), IsEmpty());
-}
-
-TEST(AllBitwiseCombinationsTest, EmptyList) {
- EXPECT_THAT(AllBitwiseCombinations(List<int>{}), IsEmpty());
-}
-
-TEST(AllBitwiseCombinationsTest, SingleElementList) {
- EXPECT_THAT(AllBitwiseCombinations(List<int>{5}), UnorderedElementsAre(5));
-}
-
-TEST(AllBitwiseCombinationsTest, SingleList) {
- EXPECT_THAT(AllBitwiseCombinations(List<int>{0, 1, 2, 4}),
- UnorderedElementsAre(0, 1, 2, 4));
-}
-
-TEST(AllBitwiseCombinationsTest, MultipleLists) {
- EXPECT_THAT(
- AllBitwiseCombinations(List<int>{0, 1, 2, 3}, List<int>{0, 4, 8, 12}),
- UnorderedElementsAreArray(
- {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}));
-}
-
-TEST(RandomizeBuffer, Works) {
- const std::vector<char> original(4096);
- std::vector<char> buffer = original;
- RandomizeBuffer(buffer.data(), buffer.size());
- EXPECT_NE(buffer, original);
-}
-
-// Enable comparison of vectors of iovec arrays for the following test.
-MATCHER_P(IovecsListEq, expected, "") {
- if (arg.size() != expected.size()) {
- *result_listener << "sizes are different (actual: " << arg.size()
- << ", expected: " << expected.size() << ")";
- return false;
- }
-
- for (uint64_t i = 0; i < expected.size(); ++i) {
- const std::vector<struct iovec>& actual_iovecs = arg[i];
- const std::vector<struct iovec>& expected_iovecs = expected[i];
- if (actual_iovecs.size() != expected_iovecs.size()) {
- *result_listener << "iovec array size at position " << i
- << " is different (actual: " << actual_iovecs.size()
- << ", expected: " << expected_iovecs.size() << ")";
- return false;
- }
-
- for (uint64_t j = 0; j < expected_iovecs.size(); ++j) {
- const struct iovec& actual_iov = actual_iovecs[j];
- const struct iovec& expected_iov = expected_iovecs[j];
- if (actual_iov.iov_base != expected_iov.iov_base) {
- *result_listener << "iovecs in array " << i << " at position " << j
- << " are different (expected iov_base: "
- << expected_iov.iov_base
- << ", got: " << actual_iov.iov_base << ")";
- return false;
- }
- if (actual_iov.iov_len != expected_iov.iov_len) {
- *result_listener << "iovecs in array " << i << " at position " << j
- << " are different (expected iov_len: "
- << expected_iov.iov_len
- << ", got: " << actual_iov.iov_len << ")";
- return false;
- }
- }
- }
-
- return true;
-}
-
-// Verify empty iovec list generation.
-TEST(GenerateIovecs, EmptyList) {
- std::vector<char> buffer = {'a', 'b', 'c'};
-
- EXPECT_THAT(GenerateIovecs(0, buffer.data(), buffer.size()),
- IovecsListEq(std::vector<std::vector<struct iovec>>()));
-}
-
-// Verify generating a single array of only one, partial, iovec.
-TEST(GenerateIovecs, OneArray) {
- std::vector<char> buffer = {'a', 'b', 'c'};
-
- std::vector<std::vector<struct iovec>> expected;
- struct iovec iov = {};
- iov.iov_base = buffer.data();
- iov.iov_len = 2;
- expected.push_back(std::vector<struct iovec>({iov}));
- EXPECT_THAT(GenerateIovecs(2, buffer.data(), buffer.size()),
- IovecsListEq(expected));
-}
-
-// Verify that it wraps around after IOV_MAX iovecs.
-TEST(GenerateIovecs, WrapsAtIovMax) {
- std::vector<char> buffer = {'a', 'b', 'c'};
-
- std::vector<std::vector<struct iovec>> expected;
- struct iovec iov = {};
- iov.iov_base = buffer.data();
- iov.iov_len = buffer.size();
- expected.emplace_back();
- for (int i = 0; i < IOV_MAX; ++i) {
- expected[0].push_back(iov);
- }
- iov.iov_len = 1;
- expected.push_back(std::vector<struct iovec>({iov}));
-
- EXPECT_THAT(
- GenerateIovecs(IOV_MAX * buffer.size() + 1, buffer.data(), buffer.size()),
- IovecsListEq(expected));
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/thread_util.h b/test/util/thread_util.h
deleted file mode 100644
index 923c4fe10..000000000
--- a/test/util/thread_util.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_THREAD_UTIL_H_
-#define GVISOR_TEST_UTIL_THREAD_UTIL_H_
-
-#include <pthread.h>
-#ifdef __linux__
-#include <sys/syscall.h>
-#endif
-#include <unistd.h>
-
-#include <functional>
-#include <utility>
-
-#include "test/util/logging.h"
-
-namespace gvisor {
-namespace testing {
-
-// ScopedThread is a minimal wrapper around pthreads.
-//
-// This is used in lieu of more complex mechanisms because it provides very
-// predictable behavior (no messing with timers, etc.) The thread will
-// automatically joined when it is destructed (goes out of scope), but can be
-// joined manually as well.
-class ScopedThread {
- public:
- // Constructs a thread that executes f exactly once.
- explicit ScopedThread(std::function<void*()> f) : f_(std::move(f)) {
- CreateThread();
- }
-
- explicit ScopedThread(const std::function<void()>& f) {
- f_ = [=] {
- f();
- return nullptr;
- };
- CreateThread();
- }
-
- ScopedThread(const ScopedThread& other) = delete;
- ScopedThread& operator=(const ScopedThread& other) = delete;
-
- // Joins the thread.
- ~ScopedThread() { Join(); }
-
- // Waits until this thread has finished executing. Join is idempotent and may
- // be called multiple times, however Join itself is not thread-safe.
- void* Join() {
- if (!joined_) {
- TEST_PCHECK(pthread_join(pt_, &retval_) == 0);
- joined_ = true;
- }
- return retval_;
- }
-
- private:
- void CreateThread() {
- TEST_PCHECK_MSG(pthread_create(
- &pt_, /* attr = */ nullptr,
- +[](void* arg) -> void* {
- return static_cast<ScopedThread*>(arg)->f_();
- },
- this) == 0,
- "thread creation failed");
- }
-
- std::function<void*()> f_;
- pthread_t pt_;
- bool joined_ = false;
- void* retval_ = nullptr;
-};
-
-#ifdef __linux__
-inline pid_t gettid() { return syscall(SYS_gettid); }
-#endif
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_THREAD_UTIL_H_
diff --git a/test/util/time_util.cc b/test/util/time_util.cc
deleted file mode 100644
index 1ddfbfc9c..000000000
--- a/test/util/time_util.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/time_util.h"
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-#include "absl/time/time.h"
-
-namespace gvisor {
-namespace testing {
-
-void SleepSafe(absl::Duration duration) {
- if (duration == absl::ZeroDuration()) {
- return;
- }
-
- struct timespec ts = absl::ToTimespec(duration);
- int ret;
- while (1) {
- ret = syscall(__NR_nanosleep, &ts, &ts);
- if (ret == 0 || (ret <= 0 && errno != EINTR)) {
- break;
- }
- }
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/time_util.h b/test/util/time_util.h
deleted file mode 100644
index f3ddc9fde..000000000
--- a/test/util/time_util.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_TIME_UTIL_H_
-#define GVISOR_TEST_UTIL_TIME_UTIL_H_
-
-#include "absl/time/time.h"
-
-namespace gvisor {
-namespace testing {
-
-// Sleep for at least the specified duration. Avoids glibc.
-void SleepSafe(absl::Duration duration);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_TIME_UTIL_H_
diff --git a/test/util/timer_util.cc b/test/util/timer_util.cc
deleted file mode 100644
index 75cfc4f40..000000000
--- a/test/util/timer_util.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/timer_util.h"
-
-namespace gvisor {
-namespace testing {
-
-absl::Time Now(clockid_t id) {
- struct timespec now;
- TEST_PCHECK(clock_gettime(id, &now) == 0);
- return absl::TimeFromTimespec(now);
-}
-
-#ifdef __linux__
-
-PosixErrorOr<IntervalTimer> TimerCreate(clockid_t clockid,
- const struct sigevent& sev) {
- int timerid;
- int ret = syscall(SYS_timer_create, clockid, &sev, &timerid);
- if (ret < 0) {
- return PosixError(errno, "timer_create");
- }
- if (ret > 0) {
- return PosixError(EINVAL, "timer_create should never return positive");
- }
- MaybeSave();
- return IntervalTimer(timerid);
-}
-
-#endif // __linux__
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/timer_util.h b/test/util/timer_util.h
deleted file mode 100644
index e389108ef..000000000
--- a/test/util/timer_util.h
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_TIMER_UTIL_H_
-#define GVISOR_TEST_UTIL_TIMER_UTIL_H_
-
-#include <errno.h>
-#ifdef __linux__
-#include <sys/syscall.h>
-#endif
-#include <sys/time.h>
-
-#include <functional>
-
-#include "gmock/gmock.h"
-#include "absl/time/time.h"
-#include "test/util/cleanup.h"
-#include "test/util/logging.h"
-#include "test/util/posix_error.h"
-#include "test/util/test_util.h"
-
-namespace gvisor {
-namespace testing {
-
-// From Linux's include/uapi/asm-generic/siginfo.h.
-#ifndef sigev_notify_thread_id
-#define sigev_notify_thread_id _sigev_un._tid
-#endif
-
-// Returns the current time.
-absl::Time Now(clockid_t id);
-
-// MonotonicTimer is a simple timer that uses a monotonic clock.
-class MonotonicTimer {
- public:
- MonotonicTimer() {}
- absl::Duration Duration() {
- struct timespec ts;
- TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
- return absl::TimeFromTimespec(ts) - start_;
- }
-
- void Start() {
- struct timespec ts;
- TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
- start_ = absl::TimeFromTimespec(ts);
- }
-
- protected:
- absl::Time start_;
-};
-
-// Sets the given itimer and returns a cleanup function that restores the
-// previous itimer when it goes out of scope.
-inline PosixErrorOr<Cleanup> ScopedItimer(int which,
- struct itimerval const& new_value) {
- struct itimerval old_value;
- int rc = setitimer(which, &new_value, &old_value);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "setitimer failed");
- }
- return Cleanup(std::function<void(void)>([which, old_value] {
- EXPECT_THAT(setitimer(which, &old_value, nullptr), SyscallSucceeds());
- }));
-}
-
-#ifdef __linux__
-
-// RAII type for a kernel "POSIX" interval timer. (The kernel provides system
-// calls such as timer_create that behave very similarly, but not identically,
-// to those described by timer_create(2); in particular, the kernel does not
-// implement SIGEV_THREAD. glibc builds POSIX-compliant interval timers based on
-// these kernel interval timers.)
-//
-// Compare implementation to FileDescriptor.
-class IntervalTimer {
- public:
- IntervalTimer() = default;
-
- explicit IntervalTimer(int id) { set_id(id); }
-
- IntervalTimer(IntervalTimer&& orig) : id_(orig.release()) {}
-
- IntervalTimer& operator=(IntervalTimer&& orig) {
- if (this == &orig) return *this;
- reset(orig.release());
- return *this;
- }
-
- IntervalTimer(const IntervalTimer& other) = delete;
- IntervalTimer& operator=(const IntervalTimer& other) = delete;
-
- ~IntervalTimer() { reset(); }
-
- int get() const { return id_; }
-
- int release() {
- int const id = id_;
- id_ = -1;
- return id;
- }
-
- void reset() { reset(-1); }
-
- void reset(int id) {
- if (id_ >= 0) {
- TEST_PCHECK(syscall(SYS_timer_delete, id_) == 0);
- MaybeSave();
- }
- set_id(id);
- }
-
- PosixErrorOr<struct itimerspec> Set(
- int flags, const struct itimerspec& new_value) const {
- struct itimerspec old_value = {};
- if (syscall(SYS_timer_settime, id_, flags, &new_value, &old_value) < 0) {
- return PosixError(errno, "timer_settime");
- }
- MaybeSave();
- return old_value;
- }
-
- PosixErrorOr<struct itimerspec> Get() const {
- struct itimerspec curr_value = {};
- if (syscall(SYS_timer_gettime, id_, &curr_value) < 0) {
- return PosixError(errno, "timer_gettime");
- }
- MaybeSave();
- return curr_value;
- }
-
- PosixErrorOr<int> Overruns() const {
- int rv = syscall(SYS_timer_getoverrun, id_);
- if (rv < 0) {
- return PosixError(errno, "timer_getoverrun");
- }
- MaybeSave();
- return rv;
- }
-
- private:
- void set_id(int id) { id_ = std::max(id, -1); }
-
- // Kernel timer_t is int; glibc timer_t is void*.
- int id_ = -1;
-};
-
-// A wrapper around timer_create(2).
-PosixErrorOr<IntervalTimer> TimerCreate(clockid_t clockid,
- const struct sigevent& sev);
-
-#endif // __linux__
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_TIMER_UTIL_H_
diff --git a/test/util/uid_util.cc b/test/util/uid_util.cc
deleted file mode 100644
index b131b4b99..000000000
--- a/test/util/uid_util.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/posix_error.h"
-#include "test/util/save_util.h"
-
-namespace gvisor {
-namespace testing {
-
-PosixErrorOr<bool> IsRoot() {
- uid_t ruid, euid, suid;
- int rc = getresuid(&ruid, &euid, &suid);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "getresuid");
- }
- if (ruid != 0 || euid != 0 || suid != 0) {
- return false;
- }
- gid_t rgid, egid, sgid;
- rc = getresgid(&rgid, &egid, &sgid);
- MaybeSave();
- if (rc < 0) {
- return PosixError(errno, "getresgid");
- }
- if (rgid != 0 || egid != 0 || sgid != 0) {
- return false;
- }
- return true;
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/uid_util.h b/test/util/uid_util.h
deleted file mode 100644
index 2cd387fb0..000000000
--- a/test/util/uid_util.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_SYSCALLS_UID_UTIL_H_
-#define GVISOR_TEST_SYSCALLS_UID_UTIL_H_
-
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-// Returns true if the caller's real/effective/saved user/group IDs are all 0.
-PosixErrorOr<bool> IsRoot();
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_SYSCALLS_UID_UTIL_H_
diff --git a/test/util/verity_util.cc b/test/util/verity_util.cc
deleted file mode 100644
index 501d7c2cf..000000000
--- a/test/util/verity_util.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "test/util/verity_util.h"
-
-#include "test/util/fs_util.h"
-#include "test/util/mount_util.h"
-#include "test/util/temp_path.h"
-
-namespace gvisor {
-namespace testing {
-
-std::string BytesToHexString(uint8_t bytes[], int size) {
- std::stringstream ss;
- ss << std::hex;
- for (int i = 0; i < size; ++i) {
- ss << std::setw(2) << std::setfill('0') << static_cast<int>(bytes[i]);
- }
- return ss.str();
-}
-
-std::string MerklePath(absl::string_view path) {
- return JoinPath(Dirname(path),
- std::string(kMerklePrefix) + std::string(Basename(path)));
-}
-
-std::string MerkleRootPath(absl::string_view path) {
- return JoinPath(Dirname(path),
- std::string(kMerkleRootPrefix) + std::string(Basename(path)));
-}
-
-PosixError FlipRandomBit(int fd, int size) {
- // Generate a random offset in the file.
- srand(time(nullptr));
- unsigned int seed = 0;
- int random_offset = rand_r(&seed) % size;
-
- // Read a random byte and flip a bit in it.
- char buf[1];
- RETURN_ERROR_IF_SYSCALL_FAIL(PreadFd(fd, buf, 1, random_offset));
- buf[0] ^= 1;
- RETURN_ERROR_IF_SYSCALL_FAIL(PwriteFd(fd, buf, 1, random_offset));
- return NoError();
-}
-
-PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,
- std::string filename,
- std::vector<EnableTarget> targets) {
- // Mount a verity fs on the existing tmpfs mount.
- std::string mount_opts = "lower_path=" + tmpfs_dir;
- ASSIGN_OR_RETURN_ERRNO(TempPath verity_dir, TempPath::CreateDir());
- RETURN_ERROR_IF_SYSCALL_FAIL(
- mount("", verity_dir.path().c_str(), "verity", 0, mount_opts.c_str()));
-
- // Enable the file, symlink(if provided) and the directory.
- ASSIGN_OR_RETURN_ERRNO(
- auto fd, Open(JoinPath(verity_dir.path(), filename), O_RDONLY, 0777));
- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd.get(), FS_IOC_ENABLE_VERITY));
-
- for (const EnableTarget& target : targets) {
- ASSIGN_OR_RETURN_ERRNO(
- auto target_fd,
- Open(JoinPath(verity_dir.path(), target.path), target.flags, 0777));
- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(target_fd.get(), FS_IOC_ENABLE_VERITY));
- }
-
- ASSIGN_OR_RETURN_ERRNO(auto dir_fd, Open(verity_dir.path(), O_RDONLY, 0777));
- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(dir_fd.get(), FS_IOC_ENABLE_VERITY));
-
- // Measure the root hash.
- uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};
- struct fsverity_digest* digest =
- reinterpret_cast<struct fsverity_digest*>(digest_array);
- digest->digest_size = kMaxDigestSize;
- RETURN_ERROR_IF_SYSCALL_FAIL(
- ioctl(dir_fd.get(), FS_IOC_MEASURE_VERITY, digest));
-
- // Mount a verity fs with specified root hash.
- mount_opts +=
- ",root_hash=" + BytesToHexString(digest->digest, digest->digest_size);
- ASSIGN_OR_RETURN_ERRNO(TempPath verity_with_hash_dir, TempPath::CreateDir());
- RETURN_ERROR_IF_SYSCALL_FAIL(mount("", verity_with_hash_dir.path().c_str(),
- "verity", 0, mount_opts.c_str()));
- // Verity directories should not be deleted. Release the TempPath objects to
- // prevent those directories from being deleted by the destructor.
- verity_dir.release();
- return verity_with_hash_dir.release();
-}
-
-} // namespace testing
-} // namespace gvisor
diff --git a/test/util/verity_util.h b/test/util/verity_util.h
deleted file mode 100644
index 44863f322..000000000
--- a/test/util/verity_util.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef GVISOR_TEST_UTIL_VERITY_UTIL_H_
-#define GVISOR_TEST_UTIL_VERITY_UTIL_H_
-
-#include <stdint.h>
-
-#include <vector>
-
-#include "test/util/posix_error.h"
-
-namespace gvisor {
-namespace testing {
-
-#ifndef FS_IOC_ENABLE_VERITY
-#define FS_IOC_ENABLE_VERITY 1082156677
-#endif
-
-#ifndef FS_IOC_MEASURE_VERITY
-#define FS_IOC_MEASURE_VERITY 3221513862
-#endif
-
-#ifndef FS_VERITY_FL
-#define FS_VERITY_FL 1048576
-#endif
-
-#ifndef FS_IOC_GETFLAGS
-#define FS_IOC_GETFLAGS 2148034049
-#endif
-
-struct fsverity_digest {
- unsigned short digest_algorithm;
- unsigned short digest_size; /* input/output */
- unsigned char digest[];
-};
-
-struct EnableTarget {
- std::string path;
- int flags;
-
- EnableTarget(std::string path, int flags) : path(path), flags(flags) {}
-};
-
-constexpr int kMaxDigestSize = 64;
-constexpr int kDefaultDigestSize = 32;
-constexpr char kContents[] = "foobarbaz";
-constexpr char kMerklePrefix[] = ".merkle.verity.";
-constexpr char kMerkleRootPrefix[] = ".merkleroot.verity.";
-
-// Get the Merkle tree file path for |path|.
-std::string MerklePath(absl::string_view path);
-
-// Get the root Merkle tree file path for |path|.
-std::string MerkleRootPath(absl::string_view path);
-
-// Provide a function to convert bytes to hex string, since
-// absl::BytesToHexString does not seem to be compatible with golang
-// hex.DecodeString used in verity due to zero-padding.
-std::string BytesToHexString(uint8_t bytes[], int size);
-
-// Flip a random bit in the file represented by fd.
-PosixError FlipRandomBit(int fd, int size);
-
-// Mount a verity on the tmpfs and enable both the file and the direcotry. Then
-// mount a new verity with measured root hash.
-PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,
- std::string filename,
- std::vector<EnableTarget> targets);
-
-} // namespace testing
-} // namespace gvisor
-
-#endif // GVISOR_TEST_UTIL_VERITY_UTIL_H_
diff --git a/tools/BUILD b/tools/BUILD
deleted file mode 100644
index 3861ff2a5..000000000
--- a/tools/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-package(licenses = ["notice"])
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
-
-bzl_library(
- name = "deps_bzl",
- srcs = ["deps.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/tools/bazel.mk b/tools/bazel.mk
deleted file mode 100644
index 60b50cfb0..000000000
--- a/tools/bazel.mk
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##
-## Docker options.
-##
-## This file supports targets that wrap bazel in a running Docker
-## container to simplify development. Some options are available to
-## control the behavior of this container:
-##
-## USER - The in-container user.
-## DOCKER_RUN_OPTIONS - Options for the container (default: --privileged, required for tests).
-## DOCKER_NAME - The container name (default: gvisor-bazel-HASH).
-## DOCKER_PRIVILEGED - Docker privileged flags (default: --privileged).
-## BAZEL_CACHE - The bazel cache directory (default: detected).
-## GCLOUD_CONFIG - The gcloud config directory (detect: detected).
-## DOCKER_SOCKET - The Docker socket (default: detected).
-##
-## To opt out of these wrappers, set DOCKER_BUILD=false.
-DOCKER_BUILD := true
-ifeq ($(DOCKER_BUILD),true)
--include bazel-server
-endif
-
-# See base Makefile.
-BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \
- git rev-parse --abbrev-ref HEAD 2>/dev/null) | \
- xargs -n 1 basename 2>/dev/null)
-BUILD_ROOTS := bazel-bin/ bazel-out/
-
-# Bazel container configuration (see below).
-USER := $(shell whoami)
-HASH := $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)
-BUILDER_NAME := gvisor-builder-$(HASH)-$(ARCH)
-DOCKER_NAME := gvisor-bazel-$(HASH)-$(ARCH)
-DOCKER_PRIVILEGED := --privileged
-BAZEL_CACHE := $(HOME)/.cache/bazel/
-GCLOUD_CONFIG := $(HOME)/.config/gcloud/
-DOCKER_SOCKET := /var/run/docker.sock
-DOCKER_CONFIG := /etc/docker
-
-##
-## Bazel helpers.
-##
-## Bazel will be run with standard flags. You can specify the following flags
-## to control which flags are passed:
-##
-## STARTUP_OPTIONS - Startup options passed to Bazel.
-##
-STARTUP_OPTIONS :=
-BAZEL_OPTIONS :=
-BAZEL := bazel $(STARTUP_OPTIONS)
-BASE_OPTIONS := --color=no --curses=no
-TEST_OPTIONS := $(BASE_OPTIONS) \
- --test_output=errors \
- --keep_going \
- --verbose_failures=true \
- --build_event_json_file=.build_events.json
-
-# Basic options.
-UID := $(shell id -u ${USER})
-GID := $(shell id -g ${USER})
-USERADD_OPTIONS :=
-DOCKER_RUN_OPTIONS :=
-DOCKER_RUN_OPTIONS += --rm
-DOCKER_RUN_OPTIONS += --user $(UID):$(GID)
-DOCKER_RUN_OPTIONS += --entrypoint ""
-DOCKER_RUN_OPTIONS += --init
-DOCKER_RUN_OPTIONS += -v "$(shell readlink -m $(BAZEL_CACHE)):$(BAZEL_CACHE)"
-DOCKER_RUN_OPTIONS += -v "$(shell readlink -m $(GCLOUD_CONFIG)):$(GCLOUD_CONFIG)"
-DOCKER_RUN_OPTIONS += -v "/tmp:/tmp"
-DOCKER_EXEC_OPTIONS := --user $(UID):$(GID)
-DOCKER_EXEC_OPTIONS += --interactive
-ifeq (true,$(shell test -t 0 && echo true))
-DOCKER_EXEC_OPTIONS += --tty
-endif
-
-# Add basic UID/GID options.
-#
-# Note that USERADD_DOCKER and GROUPADD_DOCKER are both defined as "deferred"
-# variables in Make terminology, that is they will be expanded at time of use
-# and may include other variables, including those defined below.
-#
-# NOTE: we pass -l to useradd below because otherwise you can hit a bug
-# best described here:
-# https://github.com/moby/moby/issues/5419#issuecomment-193876183
-# TLDR; trying to add to /var/log/lastlog (sparse file) runs the machine out
-# out of disk space.
-ifneq ($(UID),0)
-USERADD_DOCKER += useradd -l --uid $(UID) --non-unique --no-create-home \
- --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) &&
-endif
-ifneq ($(GID),0)
-GROUPADD_DOCKER += groupadd --gid $(GID) --non-unique $(USER) &&
-endif
-
-# Add docker passthrough options.
-ifneq ($(DOCKER_PRIVILEGED),)
-DOCKER_RUN_OPTIONS += -v "$(DOCKER_SOCKET):$(DOCKER_SOCKET)"
-DOCKER_RUN_OPTIONS += -v "$(DOCKER_CONFIG):$(DOCKER_CONFIG)"
-DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED)
-DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED)
-DOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET))
-ifneq ($(GID),$(DOCKER_GROUP))
-USERADD_OPTIONS += --groups $(DOCKER_GROUP)
-GROUPADD_DOCKER += groupadd --gid $(DOCKER_GROUP) --non-unique docker-$(HASH) &&
-DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP)
-endif
-endif
-
-# Add KVM passthrough options.
-ifneq (,$(wildcard /dev/kvm))
-DOCKER_RUN_OPTIONS += --device=/dev/kvm
-KVM_GROUP := $(shell stat -c '%g' /dev/kvm)
-ifneq ($(GID),$(KVM_GROUP))
-USERADD_OPTIONS += --groups $(KVM_GROUP)
-GROUPADD_DOCKER += groupadd --gid $(KVM_GROUP) --non-unique kvm-$(HASH) &&
-DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP)
-endif
-endif
-
-# Top-level functions.
-#
-# This command runs a bazel server, and the container sticks around
-# until the bazel server exits. This should ensure that it does not
-# exit in the middle of running a build, but also it won't stick around
-# forever. The build commands wrap around an appropriate exec into the
-# container in order to perform work via the bazel client.
-ifeq ($(DOCKER_BUILD),true)
-wrapper = docker exec $(DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(1)
-else
-wrapper = $(1)
-endif
-
-bazel-shutdown: ## Shuts down a running bazel server.
- @$(call wrapper,$(BAZEL) shutdown)
-.PHONY: bazel-shutdown
-
-bazel-alias: ## Emits an alias that can be used within the shell.
- @echo "alias bazel='$(call wrapper,$(BAZEL))'"
-.PHONY: bazel-alias
-
-bazel-image: load-default ## Ensures that the local builder exists.
- @$(call header,DOCKER BUILD)
- @docker rm -f $(BUILDER_NAME) 2>/dev/null || true
- @docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) gvisor.dev/images/default \
- bash -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi" >&2
- @docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2
-.PHONY: bazel-image
-
-ifneq (true,$(shell $(wrapper echo true)))
-bazel-server: bazel-image ## Ensures that the server exists.
- @$(call header,DOCKER RUN)
- @docker rm -f $(DOCKER_NAME) 2>/dev/null || true
- @mkdir -p $(BAZEL_CACHE)
- @mkdir -p $(GCLOUD_CONFIG)
- @docker run -d --name $(DOCKER_NAME) \
- -v "$(CURDIR):$(CURDIR)" \
- --workdir "$(CURDIR)" \
- $(DOCKER_RUN_OPTIONS) \
- gvisor.dev/images/builder \
- bash -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null"
-else
-bazel-server:
- @
-endif
-.PHONY: bazel-server
-
-# build_paths extracts the built binary from the bazel stderr output.
-#
-# This could be alternately done by parsing the bazel build event stream, but
-# this is a complex schema, and begs the question: what will build the thing
-# that parses the output? Bazel? Do we need a separate bootstrapping build
-# command here? Yikes, let's just stick with the ugly shell pipeline.
-#
-# The last line is used to prevent terminal shenanigans.
-build_paths = \
- (set -euo pipefail; \
- $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \
- | tee /dev/fd/2 \
- | sed -n -e '/^Target/,$$p' \
- | sed -n -e '/^ \($(subst /,\/,$(subst $(SPACE),\|,$(BUILD_ROOTS)))\)/p' \
- | sed -e 's/ /\n/g' \
- | awk '{$$1=$$1};1' \
- | strings \
- | xargs -r -n 1 -I {} readlink -f "{}" \
- | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
-
-clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
-build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
-copy = $(call header,COPY $(1) $(2)) && $(call build_paths,$(1),cp -fa {} $(2))
-run = $(call header,RUN $(1) $(2)) && $(call build_paths,$(1),{} $(2))
-sudo = $(call header,SUDO $(1) $(2)) && $(call build_paths,$(1),sudo -E {} $(2))
-test = $(call header,TEST $(1)) && $(call wrapper,$(BAZEL) test $(TEST_OPTIONS) $(1))
-
-clean: ## Cleans the bazel cache.
- @$(call clean)
-.PHONY: clean
-
-testlogs: ## Returns the most recent set of test logs.
- @if test -f .build_events.json; then \
- cat .build_events.json | jq -r \
- 'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | "\(.id.testSummary.label) \(.testSummary.failed[].uri)"' | \
- sed -e 's|file://||'; \
- fi
-.PHONY: testlogs
diff --git a/tools/bazel_gazelle_generate.patch b/tools/bazel_gazelle_generate.patch
deleted file mode 100644
index fd1e1bda6..000000000
--- a/tools/bazel_gazelle_generate.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-diff --git a/language/go/generate.go b/language/go/generate.go
-index 2892948..feb4ad6 100644
---- a/language/go/generate.go
-+++ b/language/go/generate.go
-@@ -691,6 +691,10 @@ func (g *generator) setImportAttrs(r *rule.Rule, importPath string) {
- }
-
- func (g *generator) commonVisibility(importPath string) []string {
-+ if importPath == "golang.org/x/tools/go/analysis/internal/facts" {
-+ // Imported by nogo main. We add a visibility exception.
-+ return []string{"//visibility:public"}
-+ }
- // If the Bazel package name (rel) contains "internal", add visibility for
- // subpackages of the parent.
- // If the import path contains "internal" but rel does not, this is
diff --git a/tools/bazeldefs/BUILD b/tools/bazeldefs/BUILD
deleted file mode 100644
index 5295f4a85..000000000
--- a/tools/bazeldefs/BUILD
+++ /dev/null
@@ -1,61 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_proto_library")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-bzl_library(
- name = "platforms_bzl",
- srcs = ["platforms.bzl"],
- visibility = ["//visibility:private"],
-)
-
-bzl_library(
- name = "tags_bzl",
- srcs = ["tags.bzl"],
- visibility = ["//visibility:private"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
-
-config_setting(
- name = "linux_arm64_cross",
- values = {
- "cpu": "aarch64",
- "host_cpu": "k8",
- },
- visibility = ["//visibility:private"],
-)
-
-config_setting(
- name = "linux_amd64_cross",
- values = {
- "cpu": "k8",
- "host_cpu": "aarch64",
- },
- visibility = ["//visibility:private"],
-)
-
-genrule(
- name = "version",
- outs = ["version.txt"],
- cmd = "cat bazel-out/stable-status.txt | grep STABLE_VERSION | cut -d' ' -f2- | sed 's/^[^[:digit:]]*//g' >$@",
- stamp = True,
- tags = [
- "manual",
- "nobuilder",
- "notap",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_proto_library(
- name = "worker_protocol_go_proto",
- importpath = "gvisor.dev/bazel/worker_protocol_go_proto",
- proto = "@bazel_tools//src/main/protobuf:worker_protocol_proto",
-)
diff --git a/tools/bazeldefs/cc.bzl b/tools/bazeldefs/cc.bzl
deleted file mode 100644
index 2831eac5f..000000000
--- a/tools/bazeldefs/cc.bzl
+++ /dev/null
@@ -1,51 +0,0 @@
-"""C++ rules."""
-
-load("@rules_cc//cc:defs.bzl", _cc_binary = "cc_binary", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test")
-load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", _cc_grpc_library = "cc_grpc_library")
-
-cc_library = _cc_library
-cc_proto_library = _cc_proto_library
-cc_test = _cc_test
-cc_toolchain = "@bazel_tools//tools/cpp:current_cc_toolchain"
-gtest = "@com_google_googletest//:gtest"
-gbenchmark = "@com_google_benchmark//:benchmark"
-grpcpp = "@com_github_grpc_grpc//:grpc++"
-vdso_linker_option = "-fuse-ld=gold "
-
-def _cc_flags_supplier_impl(ctx):
- variables = platform_common.TemplateVariableInfo({
- "CC_FLAGS": "",
- })
- return [variables]
-
-cc_flags_supplier = rule(
- implementation = _cc_flags_supplier_impl,
-)
-
-def cc_grpc_library(name, **kwargs):
- _cc_grpc_library(name = name, grpc_only = True, **kwargs)
-
-def cc_binary(name, static = False, **kwargs):
- """Run cc_binary.
-
- Args:
- name: name of the target.
- static: make a static binary if True
- **kwargs: the rest of the args.
- """
- if static:
- # How to statically link a c++ program that uses threads, like for gRPC:
- # https://gcc.gnu.org/legacy-ml/gcc-help/2010-05/msg00029.html
- if "linkopts" not in kwargs:
- kwargs["linkopts"] = []
- kwargs["linkopts"] += [
- "-static",
- "-lstdc++",
- "-Wl,--whole-archive",
- "-lpthread",
- "-Wl,--no-whole-archive",
- ]
- _cc_binary(
- name = name,
- **kwargs
- )
diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl
deleted file mode 100644
index 7875bbaea..000000000
--- a/tools/bazeldefs/defs.bzl
+++ /dev/null
@@ -1,81 +0,0 @@
-"""Meta and miscellaneous rules."""
-
-load("@bazel_skylib//rules:build_test.bzl", _build_test = "build_test")
-load("@bazel_skylib//:bzl_library.bzl", _bzl_library = "bzl_library")
-
-build_test = _build_test
-bzl_library = _bzl_library
-more_shards = 4
-most_shards = 8
-version = "//tools/bazeldefs:version"
-
-def short_path(path):
- return path
-
-def proto_library(name, has_services = None, **kwargs):
- native.proto_library(
- name = name,
- **kwargs
- )
-
-def select_arch(amd64 = "amd64", arm64 = "arm64", default = None, **kwargs):
- values = {
- "@bazel_tools//src/conditions:linux_x86_64": amd64,
- "@bazel_tools//src/conditions:linux_aarch64": arm64,
- }
- if default:
- values["//conditions:default"] = default
- return select(values, **kwargs)
-
-def select_system(linux = ["__linux__"], **kwargs):
- return linux # Only Linux supported.
-
-def default_installer():
- return None
-
-def default_net_util():
- return [] # Nothing needed.
-
-def coreutil():
- return [] # Nothing needed.
-
-def select_native_vs_cross(native = [], amd64 = [], arm64 = [], cross = []):
- values = {
- "//tools/bazeldefs:linux_arm64_cross": arm64 + cross,
- "//tools/bazeldefs:linux_amd64_cross": amd64 + cross,
- "//conditions:default": native,
- }
- return select(values)
-
-def arch_genrule(name, srcs, outs, cmd, tools):
- """Runs a gen command on the target architecture.
-
- If the target architecture isn't match the host architecture, it will build
- a command for the target architecture and run it via qemu.
-
- The native genrule runs the command on the host architecture.
-
- Args:
- name: name of generated target.
- srcs: A list of inputs for this rule.
- cmd: The command to run. It has to contain " QEMU " before executed binaries.
- outs: A list of files generated by this rule.
- tools: A list of tool dependencies for this rule.
- """
- qemu_arm64 = "qemu-aarch64-static"
- qemu_amd64 = "qemu-x86_64-static"
- srcs = select_native_vs_cross(
- cross = srcs + tools,
- native = srcs,
- )
- tools = select_native_vs_cross(
- cross = [],
- native = tools,
- )
- cmd = select_native_vs_cross(
- arm64 = cmd.replace("QEMU", qemu_arm64),
- amd64 = cmd.replace("QEMU", qemu_amd64),
- native = cmd.replace("QEMU", ""),
- cross = "",
- )
- native.genrule(name = name, srcs = srcs, outs = outs, cmd = cmd, tools = tools)
diff --git a/tools/bazeldefs/go.bzl b/tools/bazeldefs/go.bzl
deleted file mode 100644
index af3a1c3ee..000000000
--- a/tools/bazeldefs/go.bzl
+++ /dev/null
@@ -1,159 +0,0 @@
-"""Go rules."""
-
-load("@bazel_gazelle//:def.bzl", _gazelle = "gazelle")
-load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", _go_binary = "go_binary", _go_context = "go_context", _go_embed_data = "go_embed_data", _go_library = "go_library", _go_path = "go_path", _go_test = "go_test")
-load("@io_bazel_rules_go//proto:def.bzl", _go_grpc_library = "go_grpc_library", _go_proto_library = "go_proto_library")
-load("//tools/bazeldefs:defs.bzl", "select_arch", "select_system")
-
-gazelle = _gazelle
-
-go_embed_data = _go_embed_data
-
-go_path = _go_path
-
-bazel_worker_proto = "//tools/bazeldefs:worker_protocol_go_proto"
-
-def _go_proto_or_grpc_library(go_library_func, name, **kwargs):
- if "importpath" in kwargs:
- # If importpath is explicit, pass straight through.
- go_library_func(name = name, **kwargs)
- return
- deps = []
- for d in (kwargs.pop("deps", []) or []):
- if d == "@com_google_protobuf//:timestamp_proto":
- # Special case: this proto has its Go definitions in a different
- # repository.
- deps.append("@org_golang_google_protobuf//" +
- "types/known/timestamppb")
- continue
- if "//" in d:
- repo, path = d.split("//", 1)
- deps.append(repo + "//" + path.replace("_proto", "_go_proto"))
- else:
- deps.append(d.replace("_proto", "_go_proto"))
- go_library_func(
- name = name + "_go_proto",
- importpath = "gvisor.dev/gvisor/" + native.package_name() + "/" + name + "_go_proto",
- proto = ":" + name + "_proto",
- deps = deps,
- **kwargs
- )
-
-def go_proto_library(name, **kwargs):
- _go_proto_or_grpc_library(_go_proto_library, name, **kwargs)
-
-def go_grpc_and_proto_libraries(name, **kwargs):
- _go_proto_or_grpc_library(_go_grpc_library, name, **kwargs)
-
-def go_binary(name, static = False, pure = False, x_defs = None, system_malloc = False, **kwargs):
- """Build a go binary.
-
- Args:
- name: name of the target.
- static: build a static binary.
- pure: build without cgo.
- x_defs: additional definitions.
- **kwargs: rest of the arguments are passed to _go_binary.
- """
- if static:
- kwargs["static"] = "on"
- if pure:
- kwargs["pure"] = "on"
- _go_binary(
- name = name,
- x_defs = x_defs,
- **kwargs
- )
-
-def go_importpath(target):
- """Returns the importpath for the target."""
- return target[GoLibrary].importpath
-
-def go_library(name, arch_deps = [], **kwargs):
- _go_library(
- name = name,
- importpath = "gvisor.dev/gvisor/" + native.package_name(),
- **kwargs
- )
-
-def go_test(name, pure = False, library = None, **kwargs):
- """Build a go test.
-
- Args:
- name: name of the output binary.
- pure: should it be built without cgo.
- library: the library to embed.
- **kwargs: rest of the arguments to pass to _go_test.
- """
- if pure:
- kwargs["pure"] = "on"
- if library:
- kwargs["embed"] = [library]
- _go_test(
- name = name,
- **kwargs
- )
-
-def go_rule(rule, implementation, **kwargs):
- """Wraps a rule definition with Go attributes.
-
- Args:
- rule: rule function (typically rule or aspect).
- implementation: implementation function.
- **kwargs: other arguments to pass to rule.
-
- Returns:
- The result of invoking the rule.
- """
- attrs = kwargs.pop("attrs", dict())
- attrs["_go_context_data"] = attr.label(default = "@io_bazel_rules_go//:go_context_data")
- attrs["_stdlib"] = attr.label(default = "@io_bazel_rules_go//:stdlib")
- toolchains = kwargs.get("toolchains", []) + ["@io_bazel_rules_go//go:toolchain"]
- return rule(implementation, attrs = attrs, toolchains = toolchains, **kwargs)
-
-def go_embed_libraries(target):
- if hasattr(target.attr, "embed"):
- return target.attr.embed
- return []
-
-def go_context(ctx, goos = None, goarch = None, std = False):
- """Extracts a standard Go context struct.
-
- Args:
- ctx: the starlark context (required).
- goos: the GOOS value.
- goarch: the GOARCH value.
- std: ignored.
-
- Returns:
- A context Go struct with pointers to Go toolchain components.
- """
-
- # We don't change anything for the standard library analysis. All Go files
- # are available in all instances. Note that this includes the standard
- # library sources, which are analyzed by nogo.
- go_ctx = _go_context(ctx)
- if goos == None:
- goos = go_ctx.sdk.goos
- elif goos != go_ctx.sdk.goos:
- fail("Internal GOOS (%s) doesn't match GoSdk GOOS (%s)." % (goos, go_ctx.sdk.goos))
- if goarch == None:
- goarch = go_ctx.sdk.goarch
- elif goarch != go_ctx.sdk.goarch:
- fail("Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s)." % (goarch, go_ctx.sdk.goarch))
- return struct(
- env = go_ctx.env,
- go = go_ctx.go,
- goarch = go_ctx.sdk.goarch,
- goos = go_ctx.sdk.goos,
- gotags = go_ctx.tags,
- nogo_args = [],
- runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),
- stdlib_srcs = go_ctx.sdk.srcs,
- )
-
-def select_goarch():
- return select_arch(amd64 = "amd64", arm64 = "arm64")
-
-def select_goos():
- return select_system(linux = "linux")
diff --git a/tools/bazeldefs/pkg.bzl b/tools/bazeldefs/pkg.bzl
deleted file mode 100644
index ccc9bdeef..000000000
--- a/tools/bazeldefs/pkg.bzl
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Packaging rules."""
-
-# N.B. We refer to pkg_deb_impl to avoid the macro, which cannot use select.
-load("@rules_pkg//:pkg.bzl", _pkg_deb = "pkg_deb_impl", _pkg_tar = "pkg_tar")
-
-pkg_deb = _pkg_deb
-pkg_tar = _pkg_tar
diff --git a/tools/bazeldefs/platforms.bzl b/tools/bazeldefs/platforms.bzl
deleted file mode 100644
index 165b22311..000000000
--- a/tools/bazeldefs/platforms.bzl
+++ /dev/null
@@ -1,9 +0,0 @@
-"""List of platforms."""
-
-# Platform to associated tags.
-platforms = {
- "ptrace": [],
- "kvm": [],
-}
-
-default_platform = "ptrace"
diff --git a/tools/bazeldefs/tags.bzl b/tools/bazeldefs/tags.bzl
deleted file mode 100644
index 6564c3b25..000000000
--- a/tools/bazeldefs/tags.bzl
+++ /dev/null
@@ -1,60 +0,0 @@
-"""List of special Go suffixes."""
-
-def explode(tagset, suffixes):
- """explode combines tagset and suffixes in all ways.
-
- Args:
- tagset: Original suffixes.
- suffixes: Suffixes to combine before and after.
-
- Returns:
- The set of possible combinations.
- """
- result = [t for t in tagset]
- result += [s for s in suffixes]
- for t in tagset:
- result += [t + s for s in suffixes]
- result += [s + t for s in suffixes]
- return result
-
-archs = [
- "_386",
- "_aarch64",
- "_amd64",
- "_arm",
- "_arm64",
- "_mips",
- "_mips64",
- "_mips64le",
- "_mipsle",
- "_ppc64",
- "_ppc64le",
- "_riscv64",
- "_s390x",
- "_sparc64",
- "_x86",
-
- # Pseudo-architectures to group by word side.
- "_32bit",
- "_64bit",
-]
-
-oses = [
- "_linux",
-]
-
-generic = [
- "_impl",
- "_race",
- "_norace",
- "_unsafe",
- "_opts",
-]
-
-# State explosion? Sure. This is approximately:
-# len(archs) * (1 + 2 * len(oses) * (1 + 2 * len(generic))
-#
-# This evaluates to 495 at the time of writing. So it's a lot of different
-# combinations, but not so much that it will cause issues. We can probably add
-# quite a few more variants before this becomes a genuine problem.
-go_suffixes = explode(explode(archs, oses), generic)
diff --git a/tools/bigquery/BUILD b/tools/bigquery/BUILD
deleted file mode 100644
index 2b116fe0d..000000000
--- a/tools/bigquery/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "bigquery",
- testonly = 1,
- srcs = ["bigquery.go"],
- nogo = False, # FIXME(b/184974218): Analysis failing for cloud libraries.
- visibility = [
- "//:sandbox",
- ],
- deps = [
- "@com_google_cloud_go_bigquery//:go_default_library",
- "@org_golang_google_api//option:go_default_library",
- "@org_golang_x_oauth2//:go_default_library",
- ],
-)
diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go
deleted file mode 100644
index 935154acc..000000000
--- a/tools/bigquery/bigquery.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package bigquery defines a BigQuery schema for benchmarks.
-//
-// This package contains a schema for BigQuery and methods for publishing
-// benchmark data into tables.
-package bigquery
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- bq "cloud.google.com/go/bigquery"
- "google.golang.org/api/option"
-)
-
-// Suite is the top level structure for a benchmark run. BigQuery
-// will infer the schema from this.
-type Suite struct {
- Name string `bq:"name"`
- Conditions []*Condition `bq:"conditions"`
- Benchmarks []*Benchmark `bq:"benchmarks"`
- Official bool `bq:"official"`
- Timestamp time.Time `bq:"timestamp"`
-}
-
-// Benchmark represents an individual benchmark in a suite.
-type Benchmark struct {
- Name string `bq:"name"`
- Condition []*Condition `bq:"condition"`
- Metric []*Metric `bq:"metric"`
-}
-
-// Condition represents qualifiers for the benchmark or suite. For example:
-// Get_Pid/1/real_time would have Benchmark Name "Get_Pid" with "1"
-// and "real_time" parameters as conditions. Suite conditions include
-// information such as the CL number and platform name.
-type Condition struct {
- Name string `bq:"name"`
- Value string `bq:"value"`
-}
-
-// Metric holds the actual metric data and unit information for this benchmark.
-type Metric struct {
- Name string `bq:"name"`
- Unit string `bq:"unit"`
- Sample float64 `bq:"sample"`
-}
-
-// InitBigQuery initializes a BigQuery dataset/table in the project. If the dataset/table already exists, it is not duplicated.
-func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opts []option.ClientOption) error {
- client, err := bq.NewClient(ctx, projectID, opts...)
- if err != nil {
- return fmt.Errorf("failed to initialize client on project %s: %v", projectID, err)
- }
- defer client.Close()
-
- dataset := client.Dataset(datasetID)
- if err := dataset.Create(ctx, nil); err != nil && !checkDuplicateError(err) {
- return fmt.Errorf("failed to create dataset: %s: %v", datasetID, err)
- }
-
- table := dataset.Table(tableID)
- schema, err := bq.InferSchema(Suite{})
- if err != nil {
- return fmt.Errorf("failed to infer schema: %v", err)
- }
-
- if err := table.Create(ctx, &bq.TableMetadata{Schema: schema}); err != nil && !checkDuplicateError(err) {
- return fmt.Errorf("failed to create table: %s: %v", tableID, err)
- }
- return nil
-}
-
-// AddCondition adds a condition to an existing Benchmark.
-func (bm *Benchmark) AddCondition(name, value string) {
- bm.Condition = append(bm.Condition, &Condition{
- Name: name,
- Value: value,
- })
-}
-
-// AddMetric adds a metric to an existing Benchmark.
-func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
- m := &Metric{
- Name: metricName,
- Unit: unit,
- Sample: sample,
- }
- bm.Metric = append(bm.Metric, m)
-}
-
-// NewBenchmark initializes a new benchmark.
-func NewBenchmark(name string, iters int) *Benchmark {
- return &Benchmark{
- Name: name,
- Metric: make([]*Metric, 0),
- Condition: []*Condition{
- {
- Name: "iterations",
- Value: strconv.Itoa(iters),
- },
- },
- }
-}
-
-// NewBenchmarkWithMetric creates a new sending to BigQuery, initialized with a
-// single iteration and single metric.
-func NewBenchmarkWithMetric(name, metric, unit string, value float64) *Benchmark {
- b := NewBenchmark(name, 1)
- b.AddMetric(metric, unit, value)
- return b
-}
-
-// NewSuite initializes a new Suite.
-func NewSuite(name string, official bool) *Suite {
- return &Suite{
- Name: name,
- Timestamp: time.Now().UTC(),
- Benchmarks: make([]*Benchmark, 0),
- Conditions: make([]*Condition, 0),
- Official: official,
- }
-}
-
-// SendBenchmarks sends the slice of benchmarks to the BigQuery dataset/table.
-func SendBenchmarks(ctx context.Context, suite *Suite, projectID, datasetID, tableID string, opts []option.ClientOption) error {
- client, err := bq.NewClient(ctx, projectID, opts...)
- if err != nil {
- return fmt.Errorf("failed to initialize client on project: %s: %v", projectID, err)
- }
- defer client.Close()
-
- uploader := client.Dataset(datasetID).Table(tableID).Uploader()
- if err = uploader.Put(ctx, suite); err != nil {
- return fmt.Errorf("failed to upload benchmarks %s to project %s, table %s.%s: %v", suite.Name, projectID, datasetID, tableID, err)
- }
-
- return nil
-}
-
-// BigQuery will error "409" for duplicate tables and datasets.
-func checkDuplicateError(err error) bool {
- return strings.Contains(err.Error(), "googleapi: Error 409: Already Exists")
-}
diff --git a/tools/checkescape/BUILD b/tools/checkescape/BUILD
deleted file mode 100644
index 109b5410c..000000000
--- a/tools/checkescape/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checkescape",
- srcs = ["checkescape.go"],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "//tools/nogo/objdump",
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
- "@org_golang_x_tools//go/ssa:go_default_library",
- ],
-)
diff --git a/tools/checkescape/checkescape.go b/tools/checkescape/checkescape.go
deleted file mode 100644
index ddd1212d7..000000000
--- a/tools/checkescape/checkescape.go
+++ /dev/null
@@ -1,881 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checkescape allows recursive escape analysis for hot paths.
-//
-// The analysis tracks multiple types of escapes, in two categories. First,
-// 'hard' escapes are explicit allocations. Second, 'soft' escapes are
-// interface dispatches or dynamic function dispatches; these don't necessarily
-// escape but they *may* escape. The analysis is capable of making assertions
-// recursively: soft escapes cannot be analyzed in this way, and therefore
-// count as escapes for recursive purposes.
-//
-// The different types of escapes are as follows, with the category in
-// parentheses:
-//
-// heap: A direct allocation is made on the heap (hard).
-// builtin: A call is made to a built-in allocation function (hard).
-// stack: A stack split as part of a function preamble (soft).
-// interface: A call is made via an interface which *may* escape (soft).
-// dynamic: A dynamic function is dispatched which *may* escape (soft).
-//
-// To the use the package, annotate a function-level comment with either the
-// line "// +checkescape" or "// +checkescape:OPTION[,OPTION]". In the second
-// case, the OPTION field is either a type above, or one of:
-//
-// local: Escape analysis is limited to local hard escapes only.
-// all: All the escapes are included.
-// hard: All hard escapes are included.
-//
-// If the "// +checkescape" annotation is provided, this is equivalent to
-// provided the local and hard options.
-//
-// Some examples of this syntax are:
-//
-// +checkescape:all - Analyzes for all escapes in this function and all calls.
-// +checkescape:local - Analyzes only for default local hard escapes.
-// +checkescape:heap - Only analyzes for heap escapes.
-// +checkescape:interface,dynamic - Only checks for dynamic calls and interface calls.
-// +checkescape - Does the same as +checkescape:local,hard.
-//
-// Note that all of the above can be inverted by using +mustescape. The
-// +checkescape keyword will ensure failure if the class of escape occurs,
-// whereas +mustescape will fail if the given class of escape does not occur.
-//
-// Local exemptions can be made by a comment of the form "// escapes: reason."
-// This must appear on the line of the escape and will also apply to callers of
-// the function as well (for non-local escape analysis).
-package checkescape
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "io"
- "log"
- "path/filepath"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/buildssa"
- "golang.org/x/tools/go/ssa"
- "gvisor.dev/gvisor/tools/nogo/objdump"
-)
-
-const (
- // magic is the magic annotation.
- magic = "// +checkescape"
-
- // magicParams is the magic annotation with specific parameters.
- magicParams = magic + ":"
-
- // testMagic is the test magic annotation (parameters required).
- testMagic = "// +mustescape:"
-
- // exempt is the exemption annotation.
- exempt = "// escapes"
-)
-
-// EscapeReason is an escape reason.
-//
-// This is a simple enum.
-type EscapeReason int
-
-const (
- allocation EscapeReason = iota
- builtin
- interfaceInvoke
- dynamicCall
- stackSplit
- unknownPackage
- reasonCount // Count for below.
-)
-
-// String returns the string for the EscapeReason.
-//
-// Note that this also implicitly defines the reverse string -> EscapeReason
-// mapping, which is the word before the colon (computed below).
-func (e EscapeReason) String() string {
- switch e {
- case interfaceInvoke:
- return "interface: call to potentially allocating function"
- case unknownPackage:
- return "unknown: no package information available"
- case allocation:
- return "heap: explicit allocation"
- case builtin:
- return "builtin: call to potentially allocating builtin"
- case dynamicCall:
- return "dynamic: call to potentially allocating function"
- case stackSplit:
- return "stack: possible split on function entry"
- default:
- panic(fmt.Sprintf("unknown reason: %d", e))
- }
-}
-
-var hardReasons = []EscapeReason{
- allocation,
- builtin,
-}
-
-var softReasons = []EscapeReason{
- interfaceInvoke,
- unknownPackage,
- dynamicCall,
- stackSplit,
-}
-
-var allReasons = append(hardReasons, softReasons...)
-
-var escapeTypes = func() map[string]EscapeReason {
- result := make(map[string]EscapeReason)
- for _, r := range allReasons {
- parts := strings.Split(r.String(), ":")
- result[parts[0]] = r // Key before ':'.
- }
- return result
-}()
-
-// escapingBuiltins are builtins known to escape.
-//
-// These are lowered at an earlier stage of compilation to explicit function
-// calls, but are not available for recursive analysis.
-var escapingBuiltins = []string{
- "append",
- "makemap",
- "newobject",
- "mallocgc",
-}
-
-// packageEscapeFacts is the set of all functions in a package, and whether or
-// not they recursively pass escape analysis.
-//
-// All the type names for receivers are encoded in the full key. The key
-// represents the fully qualified package and type name used at link time.
-//
-// Note that each Escapes object is a summary. Local findings may be reported
-// using more detailed information.
-type packageEscapeFacts struct {
- Funcs map[string]Escapes
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*packageEscapeFacts) AFact() {}
-
-// Analyzer includes specific results.
-var Analyzer = &analysis.Analyzer{
- Name: "checkescape",
- Doc: "escape analysis checks based on +checkescape annotations",
- Run: runSelectEscapes,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
- FactTypes: []analysis.Fact{(*packageEscapeFacts)(nil)},
-}
-
-// EscapeAnalyzer includes all local escape results.
-var EscapeAnalyzer = &analysis.Analyzer{
- Name: "checkescape",
- Doc: "complete local escape analysis results (requires Analyzer facts)",
- Run: runAllEscapes,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
-}
-
-// LinePosition is a low-resolution token.Position.
-//
-// This is used to match against possible exemptions placed in the source.
-type LinePosition struct {
- Filename string
- Line int
-}
-
-// String implements fmt.Stringer.String.
-func (e LinePosition) String() string {
- return fmt.Sprintf("%s:%d", e.Filename, e.Line)
-}
-
-// Simplified returns the simplified name.
-func (e LinePosition) Simplified() string {
- return fmt.Sprintf("%s:%d", filepath.Base(e.Filename), e.Line)
-}
-
-// CallSite is a single call site.
-//
-// These can be chained.
-type CallSite struct {
- LocalPos token.Pos
- Resolved LinePosition
-}
-
-// IsValid indicates whether the CallSite is valid or not.
-func (cs *CallSite) IsValid() bool {
- return cs.LocalPos.IsValid()
-}
-
-// Escapes is a collection of escapes.
-//
-// We record at most one escape for each reason, but record the number of
-// escapes that were omitted.
-//
-// This object should be used to summarize all escapes for a single line (local
-// analysis) or a single function (package facts).
-//
-// All fields are exported for gob.
-type Escapes struct {
- CallSites [reasonCount][]CallSite
- Details [reasonCount]string
- Omitted [reasonCount]int
-}
-
-// add is called by Add and Merge.
-func (es *Escapes) add(r EscapeReason, detail string, omitted int, callSites ...CallSite) {
- if es.CallSites[r] != nil {
- // We will either be replacing the current escape or dropping
- // the added one. Either way, we increment omitted by the
- // appropriate amount.
- es.Omitted[r]++
- // If the callSites in the other is only a single element, then
- // we will universally favor this. This provides the cleanest
- // set of escapes to summarize, and more importantly: if there
- if len(es.CallSites) == 1 || len(callSites) != 1 {
- return
- }
- }
- es.Details[r] = detail
- es.CallSites[r] = callSites
- es.Omitted[r] += omitted
-}
-
-// Add adds a single escape.
-func (es *Escapes) Add(r EscapeReason, detail string, callSites ...CallSite) {
- es.add(r, detail, 0, callSites...)
-}
-
-// IsEmpty returns true iff this Escapes is empty.
-func (es *Escapes) IsEmpty() bool {
- for _, cs := range es.CallSites {
- if cs != nil {
- return false
- }
- }
- return true
-}
-
-// Filter filters out all escapes except those matches the given reasons.
-//
-// If local is set, then non-local escapes will also be filtered.
-func (es *Escapes) Filter(reasons []EscapeReason, local bool) {
-FilterReasons:
- for r := EscapeReason(0); r < reasonCount; r++ {
- for i := 0; i < len(reasons); i++ {
- if r == reasons[i] {
- continue FilterReasons
- }
- }
- // Zap this reason.
- es.CallSites[r] = nil
- es.Details[r] = ""
- es.Omitted[r] = 0
- }
- if !local {
- return
- }
- for r := EscapeReason(0); r < reasonCount; r++ {
- // Is does meet our local requirement?
- if len(es.CallSites[r]) > 1 {
- es.CallSites[r] = nil
- es.Details[r] = ""
- es.Omitted[r] = 0
- }
- }
-}
-
-// MergeWithCall merges these escapes with another.
-//
-// If callSite is nil, no call is added.
-func (es *Escapes) MergeWithCall(other Escapes, callSite CallSite) {
- for r := EscapeReason(0); r < reasonCount; r++ {
- if other.CallSites[r] != nil {
- // Construct our new call chain.
- newCallSites := other.CallSites[r]
- if callSite.IsValid() {
- newCallSites = append([]CallSite{callSite}, newCallSites...)
- }
- // Add (potentially replacing) the underlying escape.
- es.add(r, other.Details[r], other.Omitted[r], newCallSites...)
- }
- }
-}
-
-// Reportf will call Reportf for each class of escapes.
-func (es *Escapes) Reportf(pass *analysis.Pass) {
- var b bytes.Buffer // Reused for all escapes.
- for r := EscapeReason(0); r < reasonCount; r++ {
- if es.CallSites[r] == nil {
- continue
- }
- b.Reset()
- fmt.Fprintf(&b, "%s ", r.String())
- if es.Omitted[r] > 0 {
- fmt.Fprintf(&b, "(%d omitted) ", es.Omitted[r])
- }
- for _, cs := range es.CallSites[r][1:] {
- fmt.Fprintf(&b, "→ %s ", cs.Resolved.String())
- }
- fmt.Fprintf(&b, "→ %s", es.Details[r])
- pass.Reportf(es.CallSites[r][0].LocalPos, b.String())
- }
-}
-
-// MergeAll merges a sequence of escapes.
-func MergeAll(others []Escapes) (es Escapes) {
- for _, other := range others {
- es.MergeWithCall(other, CallSite{})
- }
- return
-}
-
-// loadObjdump reads the objdump output.
-//
-// This records if there is a call any function for every source line. It is
-// used only to remove false positives for escape analysis. The call will be
-// elided if escape analysis is able to put the object on the heap exclusively.
-//
-// Note that the map uses <basename.go>:<line> because that is all that is
-// provided in the objdump format. Since this is all local, it is sufficient.
-func loadObjdump() (map[string][]string, error) {
- // Identify calls by address or name. Note that this is also
- // constructed dynamically below, as we encounted the addresses.
- // This is because some of the functions (duffzero) may have
- // jump targets in the middle of the function itself.
- funcsAllowed := map[string]struct{}{
- "runtime.duffzero": {},
- "runtime.duffcopy": {},
- "runtime.racefuncenter": {},
- "runtime.gcWriteBarrier": {},
- "runtime.retpolineAX": {},
- "runtime.retpolineBP": {},
- "runtime.retpolineBX": {},
- "runtime.retpolineCX": {},
- "runtime.retpolineDI": {},
- "runtime.retpolineDX": {},
- "runtime.retpolineR10": {},
- "runtime.retpolineR11": {},
- "runtime.retpolineR12": {},
- "runtime.retpolineR13": {},
- "runtime.retpolineR14": {},
- "runtime.retpolineR15": {},
- "runtime.retpolineR8": {},
- "runtime.retpolineR9": {},
- "runtime.retpolineSI": {},
- "runtime.stackcheck": {},
- "runtime.settls": {},
- }
- addrsAllowed := make(map[string]struct{})
-
- // Build the map.
- nextFunc := "" // For funcsAllowed.
- m := make(map[string][]string)
- if err := objdump.Load(func(origR io.Reader) error {
- r := bufio.NewReader(origR)
- NextLine:
- for {
- line, err := r.ReadString('\n')
- if err != nil && err != io.EOF {
- return err
- }
- fields := strings.Fields(line)
-
- // Is this an "allowed" function definition?
- if len(fields) >= 2 && fields[0] == "TEXT" {
- nextFunc = strings.TrimSuffix(fields[1], "(SB)")
- if _, ok := funcsAllowed[nextFunc]; !ok {
- nextFunc = "" // Don't record addresses.
- }
- }
- if nextFunc != "" && len(fields) > 2 {
- // Save the given address (in hex form, as it appears).
- addrsAllowed[fields[1]] = struct{}{}
- }
-
- // We recognize lines corresponding to actual code (not the
- // symbol name or other metadata) and annotate them if they
- // correspond to an explicit CALL instruction. We assume that
- // the lack of a CALL for a given line is evidence that escape
- // analysis has eliminated an allocation.
- //
- // Lines look like this (including the first space):
- // gohacks_unsafe.go:33 0xa39 488b442408 MOVQ 0x8(SP), AX
- if len(fields) >= 5 && line[0] == ' ' {
- if !strings.Contains(fields[3], "CALL") {
- continue
- }
- site := fields[0]
- target := strings.TrimSuffix(fields[4], "(SB)")
-
- // Ignore strings containing allowed functions.
- if _, ok := funcsAllowed[target]; ok {
- continue
- }
- if _, ok := addrsAllowed[target]; ok {
- continue
- }
- if len(fields) > 5 {
- // This may be a future relocation. Some
- // objdump versions describe this differently.
- // If it contains any of the functions allowed
- // above as a string, we let it go.
- softTarget := strings.Join(fields[5:], " ")
- for name := range funcsAllowed {
- if strings.Contains(softTarget, name) {
- continue NextLine
- }
- }
- }
-
- // Does this exist already?
- existing, ok := m[site]
- if !ok {
- existing = make([]string, 0, 1)
- }
- for _, other := range existing {
- if target == other {
- continue NextLine
- }
- }
- existing = append(existing, target)
- m[site] = existing // Update.
- }
- if err == io.EOF {
- break
- }
- }
- return nil
- }); err != nil {
- return nil, err
- }
-
- // Zap any accidental false positives.
- final := make(map[string][]string)
- for site, calls := range m {
- filteredCalls := make([]string, 0, len(calls))
- for _, call := range calls {
- if _, ok := addrsAllowed[call]; ok {
- continue // Omit this call.
- }
- filteredCalls = append(filteredCalls, call)
- }
- final[site] = filteredCalls
- }
-
- return final, nil
-}
-
-// poser is a type that implements Pos.
-type poser interface {
- Pos() token.Pos
-}
-
-// runSelectEscapes runs with only select escapes.
-func runSelectEscapes(pass *analysis.Pass) (interface{}, error) {
- return run(pass, false)
-}
-
-// runAllEscapes runs with all escapes included.
-func runAllEscapes(pass *analysis.Pass) (interface{}, error) {
- return run(pass, true)
-}
-
-// findReasons extracts reasons from the function.
-func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool, map[EscapeReason]bool) {
- // Is there a comment?
- if fdecl.Doc == nil {
- return nil, false, nil
- }
- var (
- reasons []EscapeReason
- local bool
- testReasons = make(map[EscapeReason]bool) // reason -> local?
- )
- // Scan all lines.
- found := false
- for _, c := range fdecl.Doc.List {
- // Does the comment contain a +checkescape line?
- if !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {
- continue
- }
- if c.Text == magic {
- // Default: hard reasons, local only.
- reasons = hardReasons
- local = true
- } else if strings.HasPrefix(c.Text, magicParams) {
- // Extract specific reasons.
- types := strings.Split(c.Text[len(magicParams):], ",")
- found = true // For below.
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- // Limit search to local escapes.
- local = true
- } else if types[i] == "all" {
- // Append all reasons.
- reasons = append(reasons, allReasons...)
- } else if types[i] == "hard" {
- // Append all hard reasons.
- reasons = append(reasons, hardReasons...)
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- reasons = append(reasons, r)
- }
- }
- } else if strings.HasPrefix(c.Text, testMagic) {
- types := strings.Split(c.Text[len(testMagic):], ",")
- local := false
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- local = true
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- if v, ok := testReasons[r]; ok && v {
- // Already registered as local.
- continue
- }
- testReasons[r] = local
- }
- }
- }
- }
- if len(reasons) == 0 && found {
- // A magic annotation was provided, but no reasons.
- pass.Reportf(fdecl.Pos(), "no reasons provided")
- }
- return reasons, local, testReasons
-}
-
-// run performs the analysis.
-func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {
- calls, callsErr := loadObjdump()
- if callsErr != nil {
- // Note that if this analysis fails, then we don't actually
- // fail the analyzer itself. We simply report every possible
- // escape. In most cases this will work just fine.
- log.Printf("WARNING: unable to load objdump: %v", callsErr)
- }
- allEscapes := make(map[string][]Escapes)
- mergedEscapes := make(map[string]Escapes)
- linePosition := func(inst, parent poser) LinePosition {
- p := pass.Fset.Position(inst.Pos())
- if (p.Filename == "" || p.Line == 0) && parent != nil {
- p = pass.Fset.Position(parent.Pos())
- }
- return LinePosition{
- Filename: p.Filename,
- Line: p.Line,
- }
- }
- callSite := func(inst ssa.Instruction) CallSite {
- return CallSite{
- LocalPos: inst.Pos(),
- Resolved: linePosition(inst, inst.Parent()),
- }
- }
- hasCall := func(inst poser) (string, bool) {
- p := linePosition(inst, nil)
- if callsErr != nil {
- // See above: we don't have access to the binary
- // itself, so need to include every possible call.
- return fmt.Sprintf("(possible, unable to load objdump: %v)", callsErr), true
- }
- s, ok := calls[p.Simplified()]
- if !ok {
- return "", false
- }
- // Join all calls together.
- return strings.Join(s, " or "), true
- }
- state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
-
- // Build the exception list.
- exemptions := make(map[LinePosition]string)
- for _, f := range pass.Files {
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- p := pass.Fset.Position(c.Slash)
- if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
- exemptions[LinePosition{
- Filename: p.Filename,
- Line: p.Line,
- }] = c.Text[len(exempt):]
- }
- }
- }
- }
-
- var loadFunc func(*ssa.Function) Escapes // Used below.
- analyzeInstruction := func(inst ssa.Instruction) (es Escapes) {
- cs := callSite(inst)
- if _, ok := exemptions[cs.Resolved]; ok {
- return // No escape.
- }
- switch x := inst.(type) {
- case *ssa.Call:
- if x.Call.IsInvoke() {
- // This is an interface dispatch. There is no
- // way to know if this is actually escaping or
- // not, since we don't know the underlying
- // type.
- call, _ := hasCall(inst)
- es.Add(interfaceInvoke, call, cs)
- return
- }
- switch x := x.Call.Value.(type) {
- case *ssa.Function:
- if x.Pkg == nil {
- // Can't resolve the package.
- es.Add(unknownPackage, "no package", cs)
- return
- }
-
- // Is this a local function? If yes, call the
- // function to load the local function. The
- // local escapes are the escapes found in the
- // local function.
- if x.Pkg.Pkg == pass.Pkg {
- es.MergeWithCall(loadFunc(x), cs)
- return
- }
-
- // If this package is the atomic package, the implementation
- // may be replaced by instrinsics that don't have analysis.
- if x.Pkg.Pkg.Path() == "sync/atomic" {
- return
- }
-
- // Recursively collect information.
- var imp packageEscapeFacts
- if !pass.ImportPackageFact(x.Pkg.Pkg, &imp) {
- // Unable to import the dependency; we must
- // declare these as escaping.
- es.Add(unknownPackage, "no analysis", cs)
- return
- }
-
- // The escapes of this instruction are the
- // escapes of the called function directly.
- // Note that this may record many escapes.
- es.MergeWithCall(imp.Funcs[x.RelString(x.Pkg.Pkg)], cs)
- return
- case *ssa.Builtin:
- // Ignore elided escapes.
- if _, has := hasCall(inst); !has {
- return
- }
-
- // Check if the builtin is escaping.
- for _, name := range escapingBuiltins {
- if x.Name() == name {
- es.Add(builtin, name, cs)
- return
- }
- }
- default:
- // All dynamic calls are counted as soft
- // escapes. They are similar to interface
- // dispatches. We cannot actually look up what
- // this refers to using static analysis alone.
- call, _ := hasCall(inst)
- es.Add(dynamicCall, call, cs)
- }
- case *ssa.Alloc:
- // Ignore non-heap allocations.
- if !x.Heap {
- return
- }
-
- // Ignore elided escapes.
- call, has := hasCall(inst)
- if !has {
- return
- }
-
- // This is a real heap allocation.
- es.Add(allocation, call, cs)
- case *ssa.MakeMap:
- es.Add(builtin, "makemap", cs)
- case *ssa.MakeSlice:
- es.Add(builtin, "makeslice", cs)
- case *ssa.MakeClosure:
- es.Add(builtin, "makeclosure", cs)
- case *ssa.MakeChan:
- es.Add(builtin, "makechan", cs)
- }
- return
- }
-
- var analyzeBasicBlock func(*ssa.BasicBlock) []Escapes // Recursive.
- analyzeBasicBlock = func(block *ssa.BasicBlock) (rval []Escapes) {
- for _, inst := range block.Instrs {
- if es := analyzeInstruction(inst); !es.IsEmpty() {
- rval = append(rval, es)
- }
- }
- return
- }
-
- loadFunc = func(fn *ssa.Function) Escapes {
- // Is this already available?
- name := fn.RelString(pass.Pkg)
- if es, ok := mergedEscapes[name]; ok {
- return es
- }
-
- // In the case of a true cycle, we assume that the current
- // function itself has no escapes.
- //
- // When evaluating the function again, the proper escapes will
- // be filled in here.
- allEscapes[name] = nil
- mergedEscapes[name] = Escapes{}
-
- // Perform the basic analysis.
- var es []Escapes
- if fn.Recover != nil {
- es = append(es, analyzeBasicBlock(fn.Recover)...)
- }
- for _, block := range fn.Blocks {
- es = append(es, analyzeBasicBlock(block)...)
- }
-
- // Check for a stack split.
- if call, has := hasCall(fn); has {
- var ss Escapes
- ss.Add(stackSplit, call, CallSite{
- LocalPos: fn.Pos(),
- Resolved: linePosition(fn, fn.Parent()),
- })
- es = append(es, ss)
- }
-
- // Save the result and return.
- //
- // Note that we merge the result when saving to the facts. It
- // doesn't really matter the specific escapes, as long as we
- // have recorded all the appropriate classes of escapes.
- summary := MergeAll(es)
- allEscapes[name] = es
- mergedEscapes[name] = summary
- return summary
- }
-
- // Complete all local functions.
- for _, fn := range state.SrcFuncs {
- loadFunc(fn)
- }
-
- if !localEscapes {
- // Export all findings for future packages. We only do this in
- // non-local escapes mode, and expect to run this analysis
- // after the SelectAnalysis.
- pass.ExportPackageFact(&packageEscapeFacts{
- Funcs: mergedEscapes,
- })
- }
-
- // Scan all functions for violations.
- for _, f := range pass.Files {
- // Scan all declarations.
- for _, decl := range f.Decls {
- // Function declaration?
- fdecl, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- var (
- reasons []EscapeReason
- local bool
- testReasons map[EscapeReason]bool
- )
- if localEscapes {
- // Find all hard escapes.
- reasons = hardReasons
- } else {
- // Find all declared reasons.
- reasons, local, testReasons = findReasons(pass, fdecl)
- }
-
- // Scan for matches.
- fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
- fv := state.Pkg.Prog.FuncValue(fn)
- if fv == nil {
- continue
- }
- name := fv.RelString(pass.Pkg)
- all, allOk := allEscapes[name]
- merged, mergedOk := mergedEscapes[name]
- if !allOk || !mergedOk {
- pass.Reportf(fdecl.Pos(), "internal error: function %s not found.", name)
- continue
- }
-
- // Filter reasons and report.
- //
- // For the findings, we use all escapes.
- for _, es := range all {
- es.Filter(reasons, local)
- es.Reportf(pass)
- }
-
- // Scan for test (required) matches.
- //
- // For tests we need only the merged escapes.
- testReasonsFound := make(map[EscapeReason]bool)
- for r := EscapeReason(0); r < reasonCount; r++ {
- if merged.CallSites[r] == nil {
- continue
- }
- // Is this local?
- wantLocal, ok := testReasons[r]
- isLocal := len(merged.CallSites[r]) == 1
- testReasonsFound[r] = isLocal
- if !ok {
- continue
- }
- if isLocal == wantLocal {
- delete(testReasons, r)
- }
- }
- for reason, local := range testReasons {
- // We didn't find the escapes we wanted.
- pass.Reportf(fdecl.Pos(), fmt.Sprintf("testescapes not found: reason=%s, local=%t", reason, local))
- }
- if len(testReasons) > 0 {
- // Report for debugging.
- merged.Reportf(pass)
- }
- }
- }
-
- return nil, nil
-}
diff --git a/tools/checkescape/test1/BUILD b/tools/checkescape/test1/BUILD
deleted file mode 100644
index 783403247..000000000
--- a/tools/checkescape/test1/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test1",
- srcs = ["test1.go"],
- visibility = ["//tools/checkescape/test2:__pkg__"],
-)
diff --git a/tools/checkescape/test1/test1.go b/tools/checkescape/test1/test1.go
deleted file mode 100644
index f46eba39b..000000000
--- a/tools/checkescape/test1/test1.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test1 is a test package.
-package test1
-
-import (
- "fmt"
-)
-
-// Interface is a generic interface.
-type Interface interface {
- Foo()
-}
-
-// Type is a concrete implementation of Interface.
-type Type struct {
- A uint64
- B uint64
-}
-
-// Foo implements Interface.Foo.
-//go:nosplit
-func (t Type) Foo() {
- fmt.Printf("%v", t) // Never executed.
-}
-
-// InterfaceFunction is passed an interface argument.
-// +checkescape:all,hard
-//go:nosplit
-func InterfaceFunction(i Interface) {
- // Do nothing; exported for tests.
-}
-
-// TypeFunction is passed a concrete pointer argument.
-// +checkesacape:all,hard
-//go:nosplit
-func TypeFunction(t *Type) {
-}
-
-// BuiltinMap creates a new map.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinMap(x int) map[string]bool {
- return make(map[string]bool)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinMapRec(x int) map[string]bool {
- return BuiltinMap(x)
-}
-
-// BuiltinClosure returns a closure around x.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinClosure(x int) func() {
- return func() {
- fmt.Printf("%v", x)
- }
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinClosureRec(x int) func() {
- return BuiltinClosure(x)
-}
-
-// BuiltinMakeSlice makes a new slice.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinMakeSlice(x int) []byte {
- return make([]byte, x)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinMakeSliceRec(x int) []byte {
- return BuiltinMakeSlice(x)
-}
-
-// BuiltinAppend calls append on a slice.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinAppend(x []byte) []byte {
- return append(x, 0)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinAppendRec() []byte {
- return BuiltinAppend(nil)
-}
-
-// BuiltinChan makes a channel.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinChan() chan int {
- return make(chan int)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinChanRec() chan int {
- return BuiltinChan()
-}
-
-// Heap performs an explicit heap allocation.
-// +mustescape:local,heap
-//go:noinline
-//go:nosplit
-func Heap() *Type {
- var t Type
- return &t
-}
-
-// +mustescape:heap
-//go:noinline
-//go:nosplit
-func heapRec() *Type {
- return Heap()
-}
-
-// Dispatch dispatches via an interface.
-// +mustescape:local,interface
-//go:noinline
-//go:nosplit
-func Dispatch(i Interface) {
- i.Foo()
-}
-
-// +mustescape:interface
-//go:noinline
-//go:nosplit
-func dispatchRec(i Interface) {
- Dispatch(i)
-}
-
-// Dynamic invokes a dynamic function.
-// +mustescape:local,dynamic
-//go:noinline
-//go:nosplit
-func Dynamic(f func()) {
- f()
-}
-
-// +mustescape:dynamic
-//go:noinline
-//go:nosplit
-func dynamicRec(f func()) {
- Dynamic(f)
-}
-
-//go:noinline
-//go:nosplit
-func internalFunc() {
-}
-
-// Split includes a guaranteed stack split.
-// +mustescape:local,stack
-//go:noinline
-func Split() {
- internalFunc()
-}
-
-// +mustescape:stack
-//go:noinline
-//go:nosplit
-func splitRec() {
- Split()
-}
diff --git a/tools/checkescape/test2/BUILD b/tools/checkescape/test2/BUILD
deleted file mode 100644
index 5a11e4b43..000000000
--- a/tools/checkescape/test2/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test2",
- srcs = ["test2.go"],
- deps = ["//tools/checkescape/test1"],
-)
diff --git a/tools/checkescape/test2/test2.go b/tools/checkescape/test2/test2.go
deleted file mode 100644
index 067d5a1f4..000000000
--- a/tools/checkescape/test2/test2.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test2 is a test package that imports test1.
-package test2
-
-import (
- "gvisor.dev/gvisor/tools/checkescape/test1"
-)
-
-// +checkescape:all
-//go:nosplit
-func interfaceFunctionCrossPkg() {
- var i test1.Interface
- test1.InterfaceFunction(i)
-}
-
-// +checkesacape:all
-//go:nosplit
-func typeFunctionCrossPkg() {
- var t test1.Type
- test1.TypeFunction(&t)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinMapCrossPkg(x int) map[string]bool {
- return test1.BuiltinMap(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinClosureCrossPkg(x int) func() {
- return test1.BuiltinClosure(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinMakeSliceCrossPkg(x int) []byte {
- return test1.BuiltinMakeSlice(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinAppendCrossPkg() []byte {
- return test1.BuiltinAppend(nil)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinChanCrossPkg() chan int {
- return test1.BuiltinChan()
-}
-
-// +mustescape:heap
-//go:noinline
-func heapCrossPkg() *test1.Type {
- return test1.Heap()
-}
-
-// +mustescape:interface
-//go:noinline
-func dispatchCrossPkg(i test1.Interface) {
- test1.Dispatch(i)
-}
-
-// +mustescape:dynamic
-//go:noinline
-func dynamicCrossPkg(f func()) {
- test1.Dynamic(f)
-}
-
-// +mustescape:stack
-//go:noinline
-//go:nosplit
-func splitCrosssPkt() {
- test1.Split()
-}
diff --git a/tools/checklocks/BUILD b/tools/checklocks/BUILD
deleted file mode 100644
index d23b7cde6..000000000
--- a/tools/checklocks/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checklocks",
- srcs = [
- "analysis.go",
- "annotations.go",
- "checklocks.go",
- "facts.go",
- "state.go",
- ],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
- "@org_golang_x_tools//go/ssa:go_default_library",
- ],
-)
diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md
deleted file mode 100644
index bd4beb649..000000000
--- a/tools/checklocks/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-# CheckLocks Analyzer
-
-<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-03-21' } *-->
-
-Checklocks is an analyzer for lock and atomic constraints. The analyzer relies
-on explicit annotations to identify fields that should be checked for access.
-
-## Atomic annotations
-
-Individual struct members may be noted as requiring atomic access. These
-annotations are of the form:
-
-```go
-type foo struct {
- // +checkatomic
- bar int32
-}
-```
-
-This will ensure that all accesses to bar are atomic, with the exception of
-operations on newly allocated objects.
-
-## Lock annotations
-
-Individual struct members may be protected by annotations that indicate locking
-requirements for accessing members. These annotations are of the form:
-
-```go
-type foo struct {
- mu sync.Mutex
- // +checklocks:mu
- bar int
-
- foo int // No annotation on foo means it's not guarded by mu.
-
- secondMu sync.Mutex
-
- // Multiple annotations indicate that both must be held but the
- // checker does not assert any lock ordering.
- // +checklocks:secondMu
- // +checklocks:mu
- foobar int
-}
-```
-
-The checklocks annotation may also apply to functions. For example:
-
-```go
-// +checklocks:f.mu
-func (f *foo) doThingLocked() { }
-```
-
-This will check that the "f.mu" is locked for any calls, where possible.
-
-In case of functions which initialize structs that may have annotations one can
-use the following annotation on the function to disable reporting by the lock
-checker. The lock checker will still track any mutexes acquired or released but
-won't report any failures for this function for unguarded field access.
-
-```go
-// +checklocks:ignore
-func newXXX() *X {
-...
-}
-```
-
-***The checker treats both 'sync.Mutex' and 'sync.RWMutex' identically, i.e, as
-a sync.Mutex. The checker does not distinguish between read locks vs. exclusive
-locks and treats all locks as exclusive locks***.
-
-For cases the checker is able to correctly handle today please see test/test.go.
-
-The checklocks check also flags any invalid annotations where the mutex
-annotation refers either to something that is not a 'sync.Mutex' or
-'sync.RWMutex' or where the field does not exist at all. This will prevent the
-annotations from becoming stale over time as fields are renamed, etc.
-
-# Currently not supported
-
-1. Anonymous functions are not correctly evaluated. The analyzer does not
- currently support specifying annotations on anonymous functions as a result
- evaluation of a function that accesses protected fields will fail.
-
-```go
-type A struct {
- mu sync.Mutex
-
- // +checklocks:mu
- x int
-}
-
-func abc() {
- var a A
- f := func() { a.x = 1 } <=== This line will be flagged by analyzer
- a.mu.Lock()
- f()
- a.mu.Unlock()
-}
-```
-
-### Explicitly Not Supported
-
-1. Checking for embedded mutexes as sync.Locker rather than directly as
- 'sync.Mutex'. In other words, the checker will not track mutex Lock and
- Unlock() methods where the mutex is behind an interface dispatch.
-
-An example that we won't handle is shown below (this in fact will fail to
-build):
-
-```go
-type A struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int
-}
-
-func abc() {
- mu sync.Mutex
- a := A{mu: &mu}
- a.x = 1 // This won't be flagged by copylocks checker.
-}
-
-```
-
-1. The checker will not support guards on anything other than the cases
- described above. For example, global mutexes cannot be referred to by
- checklocks. Only struct members can be used.
-
-2. The checker will not support checking for lock ordering violations.
-
-## Mixed mode
-
-Some members may allow read-only atomic access, but be protected against writes
-by a mutex. Generally, this imposes the following requirements:
-
-For a read, one of the following must be true:
-
-1. A lock held be held.
-1. The access is atomic.
-
-For a write, both of the following must be true:
-
-1. The lock must be held.
-1. The write must be atomic.
-
-In order to annotate a relevant field, simply apply *both* annotations from
-above. For example:
-
-```go
-type foo struct {
- mu sync.Mutex
- // +checklocks:mu
- // +checkatomic
- bar int32
-}
-```
diff --git a/tools/checklocks/analysis.go b/tools/checklocks/analysis.go
deleted file mode 100644
index d3fd797d0..000000000
--- a/tools/checklocks/analysis.go
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/ssa"
-)
-
-func gcd(a, b atomicAlignment) atomicAlignment {
- for b != 0 {
- a, b = b, a%b
- }
- return a
-}
-
-// typeAlignment returns the type alignment for the given type.
-func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment {
- requiredOffset := atomicAlignment(1)
- if pc.pass.ImportObjectFact(obj, &requiredOffset) {
- return requiredOffset
- }
-
- switch x := obj.Type().Underlying().(type) {
- case *types.Struct:
- fields := make([]*types.Var, x.NumFields())
- for i := 0; i < x.NumFields(); i++ {
- fields[i] = x.Field(i)
- }
- offsets := pc.pass.TypesSizes.Offsetsof(fields)
- for i := 0; i < x.NumFields(); i++ {
- // Check the offset, and then assuming that this offset
- // aligns with the offset for the broader type.
- fieldRequired := pc.typeAlignment(pkg, fields[i])
- if offsets[i]%int64(fieldRequired) != 0 {
- // The offset of this field is not compatible.
- pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired)
- }
- // Ensure the requiredOffset is the LCM of the offset.
- requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired)
- }
- case *types.Array:
- // Export direct alignment requirements.
- if named, ok := x.Elem().(*types.Named); ok {
- requiredOffset = pc.typeAlignment(pkg, named.Obj())
- }
- default:
- // Use the compiler's underlying alignment.
- requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying()))
- }
-
- if pkg == obj.Pkg() {
- // Cache as an object fact, to subsequent calls. Note that we
- // can only export object facts for the package that we are
- // currently analyzing. There may be no exported facts for
- // array types or alias types, for example.
- pc.pass.ExportObjectFact(obj, &requiredOffset)
- }
-
- return requiredOffset
-}
-
-// checkTypeAlignment checks the alignment of the given type.
-//
-// This calls typeAlignment, which resolves all types recursively. This method
-// should be called for all types individual to ensure full coverage.
-func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) {
- _ = pc.typeAlignment(pkg, typ.Obj())
-}
-
-// checkAtomicCall checks for an atomic access.
-//
-// inst is the instruction analyzed, obj is used only for maybeFail.
-//
-// If mustBeAtomic is true, then we assert that the instruction *is* an atomic
-// fucnction call. If it is false, then we assert that it is *not* an atomic
-// dispatch.
-//
-// If readOnly is true, then only atomic read access are allowed. Note that
-// readOnly is only meaningful if mustBeAtomic is set.
-func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, mustBeAtomic, readOnly bool) {
- switch x := inst.(type) {
- case *ssa.Call:
- if x.Common().IsInvoke() {
- if mustBeAtomic {
- // This is an illegal interface dispatch.
- pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field")
- }
- return
- }
- fn, ok := x.Common().Value.(*ssa.Function)
- if !ok {
- if mustBeAtomic {
- // This is an illegal call to a non-static function.
- pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field")
- }
- return
- }
- pkg := fn.Package()
- if pkg == nil {
- if mustBeAtomic {
- // This is a call to some shared wrapper function.
- pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper")
- }
- return
- }
- var lff lockFunctionFacts // Check for exemption.
- if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore {
- return
- }
- if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" {
- if mustBeAtomic {
- // This is an illegal call to a non-atomic package function.
- pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field")
- }
- return
- }
- if !mustBeAtomic {
- // We are *not* expecting an atomic dispatch.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "unexpected call to atomic function")
- }
- }
- if !strings.HasPrefix(fn.Name(), "Load") && readOnly {
- // We are not allowing any reads in this context.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?")
- }
- return
- }
- default:
- if mustBeAtomic {
- // This is something else entirely.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst)
- }
- return
- }
- }
-}
-
-func resolveStruct(typ types.Type) (*types.Struct, bool) {
- structType, ok := typ.Underlying().(*types.Struct)
- if ok {
- return structType, true
- }
- ptrType, ok := typ.Underlying().(*types.Pointer)
- if ok {
- return resolveStruct(ptrType.Elem())
- }
- return nil, false
-}
-
-func findField(typ types.Type, field int) (types.Object, bool) {
- structType, ok := resolveStruct(typ)
- if !ok {
- return nil, false
- }
- return structType.Field(field), true
-}
-
-// instructionWithReferrers is a generalization over ssa.Field, ssa.FieldAddr.
-type instructionWithReferrers interface {
- ssa.Instruction
- Referrers() *[]ssa.Instruction
-}
-
-// checkFieldAccess checks the validity of a field access.
-//
-// This also enforces atomicity constraints for fields that must be accessed
-// atomically. The parameter isWrite indicates whether this field is used
-// downstream for a write operation.
-func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
- var (
- lff lockFieldFacts
- lgf lockGuardFacts
- guardsFound int
- guardsHeld int
- )
-
- fieldObj, _ := findField(structObj.Type(), field)
- pc.pass.ImportObjectFact(fieldObj, &lff)
- pc.pass.ImportObjectFact(fieldObj, &lgf)
-
- for guardName, fl := range lgf.GuardedBy {
- guardsFound++
- r := fl.resolve(structObj)
- if _, ok := ls.isHeld(r); ok {
- guardsHeld++
- continue
- }
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok {
- // Mark this as locked, since it has been forced.
- ls.lockField(r)
- guardsHeld++
- continue
- }
- // Note that we may allow this if the disposition is atomic,
- // and we are allowing atomic reads only. This will fall into
- // the atomic disposition check below, which asserts that the
- // access is atomic. Further, guardsHeld < guardsFound will be
- // true for this case, so we require it to be read-only.
- if lgf.AtomicDisposition != atomicRequired {
- // There is no force key, no atomic access and no lock held.
- pc.maybeFail(inst.Pos(), "invalid field access, %s must be locked when accessing %s (locks: %s)", guardName, fieldObj.Name(), ls.String())
- }
- }
-
- // Check the atomic access for this field.
- switch lgf.AtomicDisposition {
- case atomicRequired:
- // Check that this is used safely as an input.
- readOnly := guardsHeld < guardsFound
- if refs := inst.Referrers(); refs != nil {
- for _, otherInst := range *refs {
- pc.checkAtomicCall(otherInst, fieldObj, true, readOnly)
- }
- }
- // Check that this is not otherwise written non-atomically,
- // even if we do hold all the locks.
- if isWrite {
- pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", fieldObj.Name(), ls.String())
- }
- case atomicDisallow:
- // Check that this is *not* used atomically.
- if refs := inst.Referrers(); refs != nil {
- for _, otherInst := range *refs {
- pc.checkAtomicCall(otherInst, fieldObj, false, false)
- }
- }
- }
-}
-
-func (pc *passContext) checkCall(call callCommon, ls *lockState) {
- // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
- //
- // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
- // function call of the value in Value, which may be a *Builtin, a *Function or any
- // other value of kind 'func'.
- //
- // Value may be one of:
- // (a) a *Function, indicating a statically dispatched call
- // to a package-level function, an anonymous function, or
- // a method of a named type.
- //
- // (b) a *MakeClosure, indicating an immediately applied
- // function literal with free variables.
- //
- // (c) a *Builtin, indicating a statically dispatched call
- // to a built-in function.
- //
- // (d) any other value, indicating a dynamically dispatched
- // function call.
- switch fn := call.Common().Value.(type) {
- case *ssa.Function:
- var lff lockFunctionFacts
- if fn.Object() != nil {
- pc.pass.ImportObjectFact(fn.Object(), &lff)
- pc.checkFunctionCall(call, fn, &lff, ls)
- } else {
- // Anonymous functions have no facts, and cannot be
- // annotated. We don't check for violations using the
- // function facts, since they cannot exist. Instead, we
- // do a fresh analysis using the current lock state.
- fnls := ls.fork()
- for i, arg := range call.Common().Args {
- fnls.store(fn.Params[i], arg)
- }
- pc.checkFunction(call, fn, &lff, fnls, true /* force */)
- }
- case *ssa.MakeClosure:
- // Note that creating and then invoking closures locally is
- // allowed, but analysis of passing closures is done when
- // checking individual instructions.
- pc.checkClosure(call, fn, ls)
- default:
- return
- }
-}
-
-// postFunctionCallUpdate updates all conditions.
-func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState) {
- // Release all locks not still held.
- for fieldName, fg := range lff.HeldOnEntry {
- if _, ok := lff.HeldOnExit[fieldName]; ok {
- continue
- }
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.unlockField(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
- }
- }
- }
-
- // Update all held locks if acquired.
- for fieldName, fg := range lff.HeldOnExit {
- if _, ok := lff.HeldOnEntry[fieldName]; ok {
- continue
- }
- // Acquire the lock per the annotation.
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.lockField(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
- }
- }
- }
-}
-
-// checkFunctionCall checks preconditions for function calls, and tracks the
-// lock state by recording relevant calls to sync functions. Note that calls to
-// atomic functions are tracked by checkFieldAccess by looking directly at the
-// referrers (because ordering doesn't matter there, so we need not scan in
-// instruction order).
-func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, ls *lockState) {
- // Check all guards required are held.
- for fieldName, fg := range lff.HeldOnEntry {
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.isHeld(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "must hold %s (%s) to call %s, but not held (locks: %s)", fieldName, s, fn.Name(), ls.String())
- } else {
- // Force the lock to be acquired.
- ls.lockField(r)
- }
- }
- }
-
- // Update all lock state accordingly.
- pc.postFunctionCallUpdate(call, lff, ls)
-
- // Check if it's a method dispatch for something in the sync package.
- // See: https://godoc.org/golang.org/x/tools/go/ssa#Function
- if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
- switch fn.Name() {
- case "Lock", "RLock":
- if s, ok := ls.lockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- // Double locking a mutex that is already locked.
- pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
- }
- }
- case "Unlock", "RUnlock":
- if s, ok := ls.unlockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- // Unlocking something that is already unlocked.
- pc.maybeFail(call.Pos(), "%s already unlocked (locks: %s)", s, ls.String())
- }
- }
- }
- }
-}
-
-// checkClosure forks the lock state, and creates a binding for the FreeVars of
-// the closure. This allows the analysis to resolve the closure.
-func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lockState) {
- clls := ls.fork()
- clfn := fn.Fn.(*ssa.Function)
- for i, fv := range clfn.FreeVars {
- clls.store(fv, fn.Bindings[i])
- }
-
- // Note that this is *not* a call to check function call, which checks
- // against the function preconditions. Instead, this does a fresh
- // analysis of the function from source code with a different state.
- var nolff lockFunctionFacts
- pc.checkFunction(call, clfn, &nolff, clls, true /* force */)
-}
-
-// freshAlloc indicates that v has been allocated within the local scope. There
-// is no lock checking done on objects that are freshly allocated.
-func freshAlloc(v ssa.Value) bool {
- switch x := v.(type) {
- case *ssa.Alloc:
- return true
- case *ssa.FieldAddr:
- return freshAlloc(x.X)
- case *ssa.Field:
- return freshAlloc(x.X)
- case *ssa.IndexAddr:
- return freshAlloc(x.X)
- case *ssa.Index:
- return freshAlloc(x.X)
- case *ssa.Convert:
- return freshAlloc(x.X)
- case *ssa.ChangeType:
- return freshAlloc(x.X)
- default:
- return false
- }
-}
-
-// isWrite indicates that this value is used as the addr field in a store.
-//
-// Note that this may still be used for a write. The return here is optimistic
-// but sufficient for basic analysis.
-func isWrite(v ssa.Value) bool {
- refs := v.Referrers()
- if refs == nil {
- return false
- }
- for _, ref := range *refs {
- if s, ok := ref.(*ssa.Store); ok && s.Addr == v {
- return true
- }
- }
- return false
-}
-
-// callCommon is an ssa.Value that also implements Common.
-type callCommon interface {
- Pos() token.Pos
- Common() *ssa.CallCommon
- Value() *ssa.Call
-}
-
-// checkInstruction checks the legality the single instruction based on the
-// current lockState.
-func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*ssa.Return, *lockState) {
- switch x := inst.(type) {
- case *ssa.Store:
- // Record that this value is holding this other value. This is
- // because at the beginning of each ssa execution, there is a
- // series of assignments of parameter values to alloc objects.
- // This allows us to trace these back to the original
- // parameters as aliases above.
- //
- // Note that this may overwrite an existing value in the lock
- // state, but this is intentional.
- ls.store(x.Addr, x.Val)
- case *ssa.Field:
- if !freshAlloc(x.X) {
- pc.checkFieldAccess(x, x.X, x.Field, ls, false)
- }
- case *ssa.FieldAddr:
- if !freshAlloc(x.X) {
- pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
- }
- case *ssa.Call:
- pc.checkCall(x, ls)
- case *ssa.Defer:
- ls.pushDefer(x)
- case *ssa.RunDefers:
- for d := ls.popDefer(); d != nil; d = ls.popDefer() {
- pc.checkCall(d, ls)
- }
- case *ssa.MakeClosure:
- refs := x.Referrers()
- if refs == nil {
- // This is strange, it's not used? Ignore this case,
- // since it will probably be optimized away.
- return nil, nil
- }
- hasNonCall := false
- for _, ref := range *refs {
- switch ref.(type) {
- case *ssa.Call, *ssa.Defer:
- // Analysis will be done on the call itself
- // subsequently, including the lock state at
- // the time of the call.
- default:
- // We need to analyze separately. Per below,
- // this means that we'll analyze at closure
- // construction time no zero assumptions about
- // when it will be called.
- hasNonCall = true
- }
- }
- if !hasNonCall {
- return nil, nil
- }
- // Analyze the closure without bindings. This means that we
- // assume no lock facts or have any existing lock state. Only
- // trivial closures are acceptable in this case.
- clfn := x.Fn.(*ssa.Function)
- var nolff lockFunctionFacts
- pc.checkFunction(nil, clfn, &nolff, nil, false /* force */)
- case *ssa.Return:
- return x, ls // Valid return state.
- }
- return nil, nil
-}
-
-// checkBasicBlock traverses the control flow graph starting at a set of given
-// block and checks each instruction for allowed operations.
-func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState) *lockState {
- if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
- return nil
- }
-
- // If the lock state is not compatible, then we need to do the
- // recursive analysis to ensure that it is still sane. For example, the
- // following is guaranteed to generate incompatible locking states:
- //
- // if foo {
- // mu.Lock()
- // }
- // other stuff ...
- // if foo {
- // mu.Unlock()
- // }
-
- var (
- rv *ssa.Return
- rls *lockState
- )
-
- // Analyze this block.
- seen[block] = parent
- ls := parent.fork()
- for _, inst := range block.Instrs {
- rv, rls = pc.checkInstruction(inst, ls)
- if rls != nil {
- failed := false
- // Validate held locks.
- for fieldName, fg := range lff.HeldOnExit {
- r := fg.resolveStatic(fn, rv)
- if s, ok := rls.isHeld(r); !ok {
- if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok {
- pc.maybeFail(rv.Pos(), "lock %s (%s) not held (locks: %s)", fieldName, s, rls.String())
- failed = true
- } else {
- // Force the lock to be acquired.
- rls.lockField(r)
- }
- }
- }
- // Check for other locks, but only if the above didn't trip.
- if !failed && rls.count() != len(lff.HeldOnExit) {
- pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
- }
- }
- }
-
- // Analyze all successors.
- for _, succ := range block.Succs {
- // Collect possible return values, and make sure that the lock
- // state aligns with any return value that we may have found
- // above. Note that checkBasicBlock will recursively analyze
- // the lock state to ensure that Releases and Acquires are
- // respected.
- if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen); pls != nil {
- if rls != nil && !rls.isCompatible(pls) {
- if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok {
- pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %v)", rls.String(), pls.String())
- }
- }
- rls = pls
- }
- }
- return rls
-}
-
-// checkFunction checks a function invocation, typically starting with nil lockState.
-func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) {
- defer func() {
- // Mark this function as checked. This is used by the top-level
- // loop to ensure that all anonymous functions are scanned, if
- // they are not explicitly invoked here. Note that this can
- // happen if the anonymous functions are e.g. passed only as
- // parameters or used to initialize some structure.
- pc.functions[fn] = struct{}{}
- }()
- if _, ok := pc.functions[fn]; !force && ok {
- // This function has already been analyzed at least once.
- // That's all we permit for each function, although this may
- // cause some anonymous functions to be analyzed in only one
- // context.
- return
- }
-
- // If no return value is provided, then synthesize one. This is used
- // below only to check against the locks preconditions, which may
- // include return values.
- if call == nil {
- call = &ssa.Call{Call: ssa.CallCommon{Value: fn}}
- }
-
- // Initialize ls with any preconditions that require locks to be held
- // for the method to be invoked. Note that in the overwhleming majority
- // of cases, parent will be nil. However, in the case of closures and
- // anonymous functions, we may start with a non-nil lock state.
- ls := parent.fork()
- for fieldName, fg := range lff.HeldOnEntry {
- // The first is the method object itself so we skip that when looking
- // for receiver/function parameters.
- r := fg.resolveStatic(fn, call.Value())
- if s, ok := ls.lockField(r); !ok {
- // This can only happen if the same value is declared
- // multiple times, and should be caught by the earlier
- // fact scanning. Keep it here as a sanity check.
- pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times (locks: %s)", fieldName, s, ls.String())
- }
- }
-
- // Scan the blocks.
- seen := make(map[*ssa.BasicBlock]*lockState)
- if len(fn.Blocks) > 0 {
- pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen)
- }
-
- // Scan the recover block.
- if fn.Recover != nil {
- pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen)
- }
-
- // Update all lock state accordingly. This will be called only if we
- // are doing inline analysis for e.g. an anonymous function.
- if call != nil && parent != nil {
- pc.postFunctionCallUpdate(call, lff, parent)
- }
-}
diff --git a/tools/checklocks/annotations.go b/tools/checklocks/annotations.go
deleted file mode 100644
index 371260980..000000000
--- a/tools/checklocks/annotations.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
-
- "go/token"
- "strconv"
- "strings"
-)
-
-const (
- checkLocksAnnotation = "// +checklocks:"
- checkLocksAcquires = "// +checklocksacquire:"
- checkLocksReleases = "// +checklocksrelease:"
- checkLocksIgnore = "// +checklocksignore"
- checkLocksForce = "// +checklocksforce"
- checkLocksFail = "// +checklocksfail"
- checkAtomicAnnotation = "// +checkatomic"
-)
-
-// failData indicates an expected failure.
-type failData struct {
- pos token.Pos
- count int
- seen int
-}
-
-// positionKey is a simple position string.
-type positionKey string
-
-// positionKey converts from a token.Pos to a key we can use to track failures
-// as the position of the failure annotation is not the same as the position of
-// the actual failure (different column/offsets). Hence we ignore these fields
-// and only use the file/line numbers to track failures.
-func (pc *passContext) positionKey(pos token.Pos) positionKey {
- position := pc.pass.Fset.Position(pos)
- return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line))
-}
-
-// addFailures adds an expected failure.
-func (pc *passContext) addFailures(pos token.Pos, s string) {
- count := 1
- if len(s) > 0 && s[0] == ':' {
- parsedCount, err := strconv.Atoi(s[1:])
- if err != nil {
- pc.pass.Reportf(pos, "unable to parse failure annotation %q: %v", s[1:], err)
- return
- }
- count = parsedCount
- }
- pc.failures[pc.positionKey(pos)] = &failData{
- pos: pos,
- count: count,
- }
-}
-
-// addExemption adds an exemption.
-func (pc *passContext) addExemption(pos token.Pos) {
- pc.exemptions[pc.positionKey(pos)] = struct{}{}
-}
-
-// addForce adds a force annotation.
-func (pc *passContext) addForce(pos token.Pos) {
- pc.forced[pc.positionKey(pos)] = struct{}{}
-}
-
-// maybeFail checks a potential failure against a specific failure map.
-func (pc *passContext) maybeFail(pos token.Pos, fmtStr string, args ...interface{}) {
- if fd, ok := pc.failures[pc.positionKey(pos)]; ok {
- fd.seen++
- return
- }
- if _, ok := pc.exemptions[pc.positionKey(pos)]; ok {
- return // Ignored, not counted.
- }
- pc.pass.Reportf(pos, fmtStr, args...)
-}
-
-// checkFailure checks for the expected failure counts.
-func (pc *passContext) checkFailures() {
- for _, fd := range pc.failures {
- if fd.count != fd.seen {
- // We are missing expect failures, report as much as possible.
- pc.pass.Reportf(fd.pos, "got %d failures, want %d failures", fd.seen, fd.count)
- }
- }
-}
-
-// extractAnnotations extracts annotations from text.
-func (pc *passContext) extractAnnotations(s string, fns map[string]func(p string)) {
- for prefix, fn := range fns {
- if strings.HasPrefix(s, prefix) {
- fn(s[len(prefix):])
- }
- }
-}
-
-// extractLineFailures extracts all line-based exceptions.
-//
-// Note that this applies only to individual line exemptions, and does not
-// consider function-wide exemptions, or specific field exemptions, which are
-// extracted separately as part of the saved facts for those objects.
-func (pc *passContext) extractLineFailures() {
- for _, f := range pc.pass.Files {
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- pc.extractAnnotations(c.Text, map[string]func(string){
- checkLocksFail: func(p string) { pc.addFailures(c.Pos(), p) },
- checkLocksIgnore: func(string) { pc.addExemption(c.Pos()) },
- checkLocksForce: func(string) { pc.addForce(c.Pos()) },
- })
- }
- }
- }
-}
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
deleted file mode 100644
index 180f8873f..000000000
--- a/tools/checklocks/checklocks.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checklocks performs lock analysis to identify and flag unprotected
-// access to annotated fields.
-//
-// For detailed usage refer to README.md in the same directory.
-package checklocks
-
-import (
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/buildssa"
- "golang.org/x/tools/go/ssa"
-)
-
-// Analyzer is the main entrypoint.
-var Analyzer = &analysis.Analyzer{
- Name: "checklocks",
- Doc: "checks lock preconditions on functions and fields",
- Run: run,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
- FactTypes: []analysis.Fact{(*atomicAlignment)(nil), (*lockFieldFacts)(nil), (*lockGuardFacts)(nil), (*lockFunctionFacts)(nil)},
-}
-
-// passContext is a pass with additional expected failures.
-type passContext struct {
- pass *analysis.Pass
- failures map[positionKey]*failData
- exemptions map[positionKey]struct{}
- forced map[positionKey]struct{}
- functions map[*ssa.Function]struct{}
-}
-
-// forAllTypes applies the given function over all types.
-func (pc *passContext) forAllTypes(fn func(ts *ast.TypeSpec)) {
- for _, f := range pc.pass.Files {
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.TYPE {
- continue
- }
- for _, gs := range d.Specs {
- fn(gs.(*ast.TypeSpec))
- }
- }
- }
-}
-
-// forAllFunctions applies the given function over all functions.
-func (pc *passContext) forAllFunctions(fn func(fn *ast.FuncDecl)) {
- for _, f := range pc.pass.Files {
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- fn(d)
- }
- }
-}
-
-// run is the main entrypoint.
-func run(pass *analysis.Pass) (interface{}, error) {
- pc := &passContext{
- pass: pass,
- failures: make(map[positionKey]*failData),
- exemptions: make(map[positionKey]struct{}),
- forced: make(map[positionKey]struct{}),
- functions: make(map[*ssa.Function]struct{}),
- }
-
- // Find all line failure annotations.
- pc.extractLineFailures()
-
- // Find all struct declarations and export relevant facts.
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- if ss, ok := ts.Type.(*ast.StructType); ok {
- pc.exportLockFieldFacts(ts, ss)
- }
- })
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- if ss, ok := ts.Type.(*ast.StructType); ok {
- pc.exportLockGuardFacts(ts, ss)
- }
- })
-
- // Check all alignments.
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- typ, ok := pass.TypesInfo.TypeOf(ts.Name).(*types.Named)
- if !ok {
- return
- }
- pc.checkTypeAlignment(pass.Pkg, typ)
- })
-
- // Find all function declarations and export relevant facts.
- pc.forAllFunctions(func(fn *ast.FuncDecl) {
- pc.exportFunctionFacts(fn)
- })
-
- // Scan all code looking for invalid accesses.
- state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
- for _, fn := range state.SrcFuncs {
- // Import function facts generated above.
- //
- // Note that anonymous(closures) functions do not have an
- // object but do show up in the SSA. They can only be invoked
- // by named functions in the package, and they are analyzing
- // inline on every call. Thus we skip the analysis here. They
- // will be hit on calls, or picked up in the pass below.
- if obj := fn.Object(); obj == nil {
- continue
- }
- var lff lockFunctionFacts
- pc.pass.ImportObjectFact(fn.Object(), &lff)
-
- // Do we ignore this?
- if lff.Ignore {
- continue
- }
-
- // Check the basic blocks in the function.
- pc.checkFunction(nil, fn, &lff, nil, false /* force */)
- }
- for _, fn := range state.SrcFuncs {
- // Ensure all anonymous functions are hit. They are not
- // permitted to have any lock preconditions.
- if obj := fn.Object(); obj != nil {
- continue
- }
- var nolff lockFunctionFacts
- pc.checkFunction(nil, fn, &nolff, nil, false /* force */)
- }
-
- // Check for expected failures.
- pc.checkFailures()
-
- return nil, nil
-}
diff --git a/tools/checklocks/facts.go b/tools/checklocks/facts.go
deleted file mode 100644
index 1a43dbbe6..000000000
--- a/tools/checklocks/facts.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "regexp"
- "strings"
-
- "golang.org/x/tools/go/ssa"
-)
-
-// atomicAlignment is saved per type.
-//
-// This represents the alignment required for the type, which may
-// be implied and imposed by other types within the aggregate type.
-type atomicAlignment int
-
-// AFact implements analysis.Fact.AFact.
-func (*atomicAlignment) AFact() {}
-
-// atomicDisposition is saved per field.
-//
-// This represents how the field must be accessed. It must either
-// be non-atomic (default), atomic or ignored.
-type atomicDisposition int
-
-const (
- atomicDisallow atomicDisposition = iota
- atomicIgnore
- atomicRequired
-)
-
-// fieldList is a simple list of fields, used in two types below.
-//
-// Note that the integers in this list refer to one of two things:
-// - A positive integer refers to a field index in a struct.
-// - A negative integer refers to a field index in a struct, where
-// that field is a pointer and must be subsequently resolved.
-type fieldList []int
-
-// resolvedValue is an ssa.Value with additional fields.
-//
-// This can be resolved to a string as part of a lock state.
-type resolvedValue struct {
- value ssa.Value
- valid bool
- fieldList []int
-}
-
-// findExtract finds a relevant extract. This must exist within the referrers
-// to the call object. If this doesn't then the object which is locked is never
-// consumed, and we should consider this a bug.
-func findExtract(v ssa.Value, index int) (ssa.Value, bool) {
- if refs := v.Referrers(); refs != nil {
- for _, inst := range *refs {
- if x, ok := inst.(*ssa.Extract); ok && x.Tuple == v && x.Index == index {
- return inst.(ssa.Value), true
- }
- }
- }
- return nil, false
-}
-
-// resolve resolves the given field list.
-func (fl fieldList) resolve(v ssa.Value) (rv resolvedValue) {
- return resolvedValue{
- value: v,
- fieldList: fl,
- valid: true,
- }
-}
-
-// valueAsString returns a string representing this value.
-//
-// This must align with how the string is generated in valueAsString.
-func (rv resolvedValue) valueAsString(ls *lockState) string {
- typ := rv.value.Type()
- s := ls.valueAsString(rv.value)
- for i, fieldNumber := range rv.fieldList {
- switch {
- case fieldNumber > 0:
- field, ok := findField(typ, fieldNumber-1)
- if !ok {
- // This can't be resolved, return for debugging.
- return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:])
- }
- s = fmt.Sprintf("&(%s.%s)", s, field.Name())
- typ = field.Type()
- case fieldNumber < 1:
- field, ok := findField(typ, (-fieldNumber)-1)
- if !ok {
- // See above.
- return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:])
- }
- s = fmt.Sprintf("*(&(%s.%s))", s, field.Name())
- typ = field.Type()
- }
- }
- return s
-}
-
-// lockFieldFacts apply on every struct field.
-type lockFieldFacts struct {
- // IsMutex is true if the field is of type sync.Mutex.
- IsMutex bool
-
- // IsRWMutex is true if the field is of type sync.RWMutex.
- IsRWMutex bool
-
- // IsPointer indicates if the field is a pointer.
- IsPointer bool
-
- // FieldNumber is the number of this field in the struct.
- FieldNumber int
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockFieldFacts) AFact() {}
-
-// lockGuardFacts contains guard information.
-type lockGuardFacts struct {
- // GuardedBy is the set of locks that are guarding this field. The key
- // is the original annotation value, and the field list is the object
- // traversal path.
- GuardedBy map[string]fieldList
-
- // AtomicDisposition is the disposition for this field. Note that this
- // can affect the interpretation of the GuardedBy field above, see the
- // relevant comment.
- AtomicDisposition atomicDisposition
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockGuardFacts) AFact() {}
-
-// functionGuard is used by lockFunctionFacts, below.
-type functionGuard struct {
- // ParameterNumber is the index of the object that contains the
- // guarding mutex. From this parameter, a walk is performed
- // subsequently using the resolve method.
- //
- // Note that is ParameterNumber is beyond the size of parameters, then
- // it may return to a return value. This applies only for the Acquires
- // relation below.
- ParameterNumber int
-
- // NeedsExtract is used in the case of a return value, and indicates
- // that the field must be extracted from a tuple.
- NeedsExtract bool
-
- // FieldList is the traversal path to the object.
- FieldList fieldList
-}
-
-// resolveReturn resolves a return value.
-//
-// Precondition: rv is either an ssa.Value, or an *ssa.Return.
-func (fg *functionGuard) resolveReturn(rv interface{}, args int) resolvedValue {
- if rv == nil {
- // For defers and other objects, this may be nil. This is
- // handled in state.go in the actual lock checking logic.
- return resolvedValue{
- value: nil,
- valid: false,
- }
- }
- index := fg.ParameterNumber - args
- // If this is a *ssa.Return object, i.e. we are analyzing the function
- // and not the call site, then we can just pull the result directly.
- if r, ok := rv.(*ssa.Return); ok {
- return fg.FieldList.resolve(r.Results[index])
- }
- if fg.NeedsExtract {
- // Resolve on the extracted field, this is necessary if the
- // type here is not an explicit return. Note that rv must be an
- // ssa.Value, since it is not an *ssa.Return.
- v, ok := findExtract(rv.(ssa.Value), index)
- if !ok {
- return resolvedValue{
- value: v,
- valid: false,
- }
- }
- return fg.FieldList.resolve(v)
- }
- if index != 0 {
- // This should not happen, NeedsExtract should always be set.
- panic("NeedsExtract is false, but return value index is non-zero")
- }
- // Resolve on the single return.
- return fg.FieldList.resolve(rv.(ssa.Value))
-}
-
-// resolveStatic returns an ssa.Value representing the given field.
-//
-// Precondition: per resolveReturn.
-func (fg *functionGuard) resolveStatic(fn *ssa.Function, rv interface{}) resolvedValue {
- if fg.ParameterNumber >= len(fn.Params) {
- return fg.resolveReturn(rv, len(fn.Params))
- }
- return fg.FieldList.resolve(fn.Params[fg.ParameterNumber])
-}
-
-// resolveCall returns an ssa.Value representing the given field.
-func (fg *functionGuard) resolveCall(args []ssa.Value, rv ssa.Value) resolvedValue {
- if fg.ParameterNumber >= len(args) {
- return fg.resolveReturn(rv, len(args))
- }
- return fg.FieldList.resolve(args[fg.ParameterNumber])
-}
-
-// lockFunctionFacts apply on every method.
-type lockFunctionFacts struct {
- // HeldOnEntry tracks the names and number of parameter (including receiver)
- // lockFuncfields that guard calls to this function.
- //
- // The key is the name specified in the checklocks annotation. e.g given
- // the following code:
- //
- // ```
- // type A struct {
- // mu sync.Mutex
- // a int
- // }
- //
- // // +checklocks:a.mu
- // func xyz(a *A) {..}
- // ```
- //
- // '`+checklocks:a.mu' will result in an entry in this map as shown below.
- // HeldOnEntry: {"a.mu" => {ParameterNumber: 0, FieldNumbers: {0}}
- //
- // Unlikely lockFieldFacts, there is no atomic interpretation.
- HeldOnEntry map[string]functionGuard
-
- // HeldOnExit tracks the locks that are expected to be held on exit.
- HeldOnExit map[string]functionGuard
-
- // Ignore means this function has local analysis ignores.
- //
- // This is not used outside the local package.
- Ignore bool
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockFunctionFacts) AFact() {}
-
-// checkGuard validates the guardName.
-func (lff *lockFunctionFacts) checkGuard(pc *passContext, d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) {
- if _, ok := lff.HeldOnEntry[guardName]; ok {
- pc.maybeFail(d.Pos(), "annotation %s specified more than once, already required", guardName)
- return functionGuard{}, false
- }
- if _, ok := lff.HeldOnExit[guardName]; ok {
- pc.maybeFail(d.Pos(), "annotation %s specified more than once, already acquired", guardName)
- return functionGuard{}, false
- }
- fg, ok := pc.findFunctionGuard(d, guardName, allowReturn)
- return fg, ok
-}
-
-// addGuardedBy adds a field to both HeldOnEntry and HeldOnExit.
-func (lff *lockFunctionFacts) addGuardedBy(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok {
- if lff.HeldOnEntry == nil {
- lff.HeldOnEntry = make(map[string]functionGuard)
- }
- if lff.HeldOnExit == nil {
- lff.HeldOnExit = make(map[string]functionGuard)
- }
- lff.HeldOnEntry[guardName] = fg
- lff.HeldOnExit[guardName] = fg
- }
-}
-
-// addAcquires adds a field to HeldOnExit.
-func (lff *lockFunctionFacts) addAcquires(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, true /* allowReturn */); ok {
- if lff.HeldOnExit == nil {
- lff.HeldOnExit = make(map[string]functionGuard)
- }
- lff.HeldOnExit[guardName] = fg
- }
-}
-
-// addReleases adds a field to HeldOnEntry.
-func (lff *lockFunctionFacts) addReleases(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok {
- if lff.HeldOnEntry == nil {
- lff.HeldOnEntry = make(map[string]functionGuard)
- }
- lff.HeldOnEntry[guardName] = fg
- }
-}
-
-// fieldListFor returns the fieldList for the given object.
-func (pc *passContext) fieldListFor(pos token.Pos, fieldObj types.Object, index int, fieldName string, checkMutex bool) (int, bool) {
- var lff lockFieldFacts
- if !pc.pass.ImportObjectFact(fieldObj, &lff) {
- // This should not happen: we export facts for all fields.
- panic(fmt.Sprintf("no lockFieldFacts available for field %s", fieldName))
- }
- // Check that it is indeed a mutex.
- if checkMutex && !lff.IsMutex && !lff.IsRWMutex {
- pc.maybeFail(pos, "field %s is not a mutex or an rwmutex", fieldName)
- return 0, false
- }
- // Return the resolution path.
- if lff.IsPointer {
- return -(index + 1), true
- }
- return (index + 1), true
-}
-
-// resolveOneField resolves a field in a single struct.
-func (pc *passContext) resolveOneField(pos token.Pos, structType *types.Struct, fieldName string, checkMutex bool) (fl fieldList, fieldObj types.Object, ok bool) {
- // Scan to match the next field.
- for i := 0; i < structType.NumFields(); i++ {
- fieldObj := structType.Field(i)
- if fieldObj.Name() != fieldName {
- continue
- }
- flOne, ok := pc.fieldListFor(pos, fieldObj, i, fieldName, checkMutex)
- if !ok {
- return nil, nil, false
- }
- fl = append(fl, flOne)
- return fl, fieldObj, true
- }
- // Is this an embed?
- for i := 0; i < structType.NumFields(); i++ {
- fieldObj := structType.Field(i)
- if !fieldObj.Embedded() {
- continue
- }
- // Is this an embedded struct?
- structType, ok := resolveStruct(fieldObj.Type())
- if !ok {
- continue
- }
- // Need to check that there is a resolution path. If there is
- // no resolution path that's not a failure: we just continue
- // scanning the next embed to find a match.
- flEmbed, okEmbed := pc.fieldListFor(pos, fieldObj, i, fieldName, false)
- flCont, fieldObjCont, okCont := pc.resolveOneField(pos, structType, fieldName, checkMutex)
- if okEmbed && okCont {
- fl = append(fl, flEmbed)
- fl = append(fl, flCont...)
- return fl, fieldObjCont, true
- }
- }
- pc.maybeFail(pos, "field %s does not exist", fieldName)
- return nil, nil, false
-}
-
-// resolveField resolves a set of fields given a string, such a 'a.b.c'.
-//
-// Note that this checks that the final element is a mutex of some kind, and
-// will fail appropriately.
-func (pc *passContext) resolveField(pos token.Pos, structType *types.Struct, parts []string) (fl fieldList, ok bool) {
- for partNumber, fieldName := range parts {
- flOne, fieldObj, ok := pc.resolveOneField(pos, structType, fieldName, partNumber >= len(parts)-1 /* checkMutex */)
- if !ok {
- // Error already reported.
- return nil, false
- }
- fl = append(fl, flOne...)
- if partNumber < len(parts)-1 {
- // Traverse to the next type.
- structType, ok = resolveStruct(fieldObj.Type())
- if !ok {
- pc.maybeFail(pos, "invalid intermediate field %s", fieldName)
- return fl, false
- }
- }
- }
- return fl, true
-}
-
-var (
- mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)")
- rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)")
-)
-
-// exportLockFieldFacts finds all struct fields that are mutexes, and ensures
-// that they are annotated approperly.
-//
-// This information is consumed subsequently by exportLockGuardFacts, and this
-// function must be called first on all structures.
-func (pc *passContext) exportLockFieldFacts(ts *ast.TypeSpec, ss *ast.StructType) {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
- for i := range ss.Fields.List {
- lff := &lockFieldFacts{
- FieldNumber: i,
- }
- // We use HasSuffix below because fieldType can be fully
- // qualified with the package name eg for the gvisor sync
- // package mutex fields have the type:
- // "<package path>/sync/sync.Mutex"
- fieldObj := structType.Field(i)
- s := fieldObj.Type().String()
- switch {
- case mutexRE.MatchString(s):
- lff.IsMutex = true
- case rwMutexRE.MatchString(s):
- lff.IsRWMutex = true
- }
- // Save whether this is a pointer.
- _, lff.IsPointer = fieldObj.Type().Underlying().(*types.Pointer)
- // We must always export the lockFieldFacts, since traversal
- // can take place along any object in the struct.
- pc.pass.ExportObjectFact(fieldObj, lff)
- }
-}
-
-// exportLockGuardFacts finds all relevant guard information for structures.
-//
-// This function requires exportLockFieldFacts be called first on all
-// structures.
-func (pc *passContext) exportLockGuardFacts(ts *ast.TypeSpec, ss *ast.StructType) {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
- for i, field := range ss.Fields.List {
- if field.Doc == nil {
- continue
- }
- var (
- lff lockFieldFacts
- lgf lockGuardFacts
- )
- pc.pass.ImportObjectFact(structType.Field(i), &lff)
- fieldObj := structType.Field(i)
- for _, l := range field.Doc.List {
- pc.extractAnnotations(l.Text, map[string]func(string){
- checkAtomicAnnotation: func(string) {
- switch lgf.AtomicDisposition {
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic required")
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic ignored")
- }
- lgf.AtomicDisposition = atomicRequired
- },
- checkLocksIgnore: func(string) {
- switch lgf.AtomicDisposition {
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic ignored")
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic required")
- }
- lgf.AtomicDisposition = atomicIgnore
- },
- checkLocksAnnotation: func(guardName string) {
- // Check for a duplicate annotation.
- if _, ok := lgf.GuardedBy[guardName]; ok {
- pc.maybeFail(fieldObj.Pos(), "annotation %s specified more than once", guardName)
- return
- }
- fl, ok := pc.resolveField(fieldObj.Pos(), structType, strings.Split(guardName, "."))
- if ok {
- // If we successfully resolved
- // the field, then save it.
- if lgf.GuardedBy == nil {
- lgf.GuardedBy = make(map[string]fieldList)
- }
- lgf.GuardedBy[guardName] = fl
- }
- },
- })
- }
- // Save only if there is something meaningful.
- if len(lgf.GuardedBy) > 0 || lgf.AtomicDisposition != atomicDisallow {
- pc.pass.ExportObjectFact(structType.Field(i), &lgf)
- }
- }
-}
-
-// countFields gives an accurate field count, according for unnamed arguments
-// and return values and the compact identifier format.
-func countFields(fl []*ast.Field) (count int) {
- for _, field := range fl {
- if len(field.Names) == 0 {
- count++
- continue
- }
- count += len(field.Names)
- }
- return
-}
-
-// matchFieldList attempts to match the given field.
-func (pc *passContext) matchFieldList(pos token.Pos, fl []*ast.Field, guardName string) (functionGuard, bool) {
- parts := strings.Split(guardName, ".")
- parameterName := parts[0]
- parameterNumber := 0
- for _, field := range fl {
- // See countFields, above.
- if len(field.Names) == 0 {
- parameterNumber++
- continue
- }
- for _, name := range field.Names {
- if name.Name != parameterName {
- parameterNumber++
- continue
- }
- ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer)
- if !ok {
- // Since mutexes cannot be copied we only care
- // about parameters that are pointer types when
- // checking for guards.
- pc.maybeFail(pos, "parameter name %s does not refer to a pointer type", parameterName)
- return functionGuard{}, false
- }
- structType, ok := ptrType.Elem().Underlying().(*types.Struct)
- if !ok {
- // Fields can only be in named structures.
- pc.maybeFail(pos, "parameter name %s does not refer to a pointer to a struct", parameterName)
- return functionGuard{}, false
- }
- fg := functionGuard{
- ParameterNumber: parameterNumber,
- }
- fl, ok := pc.resolveField(pos, structType, parts[1:])
- fg.FieldList = fl
- return fg, ok // If ok is false, already failed.
- }
- }
- return functionGuard{}, false
-}
-
-// findFunctionGuard identifies the parameter number and field number for a
-// particular string of the 'a.b'.
-//
-// This function will report any errors directly.
-func (pc *passContext) findFunctionGuard(d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) {
- var (
- parameterList []*ast.Field
- returnList []*ast.Field
- )
- if d.Recv != nil {
- parameterList = append(parameterList, d.Recv.List...)
- }
- if d.Type.Params != nil {
- parameterList = append(parameterList, d.Type.Params.List...)
- }
- if fg, ok := pc.matchFieldList(d.Pos(), parameterList, guardName); ok {
- return fg, ok
- }
- if allowReturn {
- if d.Type.Results != nil {
- returnList = append(returnList, d.Type.Results.List...)
- }
- if fg, ok := pc.matchFieldList(d.Pos(), returnList, guardName); ok {
- // Fix this up to apply to the return value, as noted
- // in fg.ParameterNumber. For the ssa analysis, we must
- // record whether this has multiple results, since
- // *ssa.Call indicates: "The Call instruction yields
- // the function result if there is exactly one.
- // Otherwise it returns a tuple, the components of
- // which are accessed via Extract."
- fg.ParameterNumber += countFields(parameterList)
- fg.NeedsExtract = countFields(returnList) > 1
- return fg, ok
- }
- }
- // We never saw a matching parameter.
- pc.maybeFail(d.Pos(), "annotation %s does not have a matching parameter", guardName)
- return functionGuard{}, false
-}
-
-// exportFunctionFacts exports relevant function findings.
-func (pc *passContext) exportFunctionFacts(d *ast.FuncDecl) {
- if d.Doc == nil || d.Doc.List == nil {
- return
- }
- var lff lockFunctionFacts
- for _, l := range d.Doc.List {
- pc.extractAnnotations(l.Text, map[string]func(string){
- checkLocksIgnore: func(string) {
- // Note that this applies to all atomic
- // analysis as well. There is no provided way
- // to selectively ignore only lock analysis or
- // atomic analysis, as we expect this use to be
- // extremely rare.
- lff.Ignore = true
- },
- checkLocksAnnotation: func(guardName string) { lff.addGuardedBy(pc, d, guardName) },
- checkLocksAcquires: func(guardName string) { lff.addAcquires(pc, d, guardName) },
- checkLocksReleases: func(guardName string) { lff.addReleases(pc, d, guardName) },
- })
- }
-
- // Export the function facts if there is anything to save.
- if lff.Ignore || len(lff.HeldOnEntry) > 0 || len(lff.HeldOnExit) > 0 {
- funcObj := pc.pass.TypesInfo.Defs[d.Name].(*types.Func)
- pc.pass.ExportObjectFact(funcObj, &lff)
- }
-}
diff --git a/tools/checklocks/state.go b/tools/checklocks/state.go
deleted file mode 100644
index 57061a32e..000000000
--- a/tools/checklocks/state.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
- "go/token"
- "go/types"
- "strings"
- "sync/atomic"
-
- "golang.org/x/tools/go/ssa"
-)
-
-// lockState tracks the locking state and aliases.
-type lockState struct {
- // lockedMutexes is used to track which mutexes in a given struct are
- // currently locked. Note that most of the heavy lifting is done by
- // valueAsString below, which maps to specific structure fields, etc.
- lockedMutexes []string
-
- // stored stores values that have been stored in memory, bound to
- // FreeVars or passed as Parameterse.
- stored map[ssa.Value]ssa.Value
-
- // used is a temporary map, used only for valueAsString. It prevents
- // multiple use of the same memory location.
- used map[ssa.Value]struct{}
-
- // defers are the stack of defers that have been pushed.
- defers []*ssa.Defer
-
- // refs indicates the number of references on this structure. If it's
- // greater than one, we will do copy-on-write.
- refs *int32
-}
-
-// newLockState makes a new lockState.
-func newLockState() *lockState {
- refs := int32(1) // Not shared.
- return &lockState{
- lockedMutexes: make([]string, 0),
- used: make(map[ssa.Value]struct{}),
- stored: make(map[ssa.Value]ssa.Value),
- defers: make([]*ssa.Defer, 0),
- refs: &refs,
- }
-}
-
-// fork forks the locking state. When a lockState is forked, any modifications
-// will cause maps to be copied.
-func (l *lockState) fork() *lockState {
- if l == nil {
- return newLockState()
- }
- atomic.AddInt32(l.refs, 1)
- return &lockState{
- lockedMutexes: l.lockedMutexes,
- used: make(map[ssa.Value]struct{}),
- stored: l.stored,
- defers: l.defers,
- refs: l.refs,
- }
-}
-
-// modify indicates that this state will be modified.
-func (l *lockState) modify() {
- if atomic.LoadInt32(l.refs) > 1 {
- // Copy the lockedMutexes.
- lm := make([]string, len(l.lockedMutexes))
- copy(lm, l.lockedMutexes)
- l.lockedMutexes = lm
-
- // Copy the stored values.
- s := make(map[ssa.Value]ssa.Value)
- for k, v := range l.stored {
- s[k] = v
- }
- l.stored = s
-
- // Reset the used values.
- l.used = make(map[ssa.Value]struct{})
-
- // Copy the defers.
- ds := make([]*ssa.Defer, len(l.defers))
- copy(ds, l.defers)
- l.defers = ds
-
- // Drop our reference.
- atomic.AddInt32(l.refs, -1)
- newRefs := int32(1) // Not shared.
- l.refs = &newRefs
- }
-}
-
-// isHeld indicates whether the field is held is not.
-func (l *lockState) isHeld(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for _, k := range l.lockedMutexes {
- if k == s {
- return s, true
- }
- }
- return s, false
-}
-
-// lockField locks the given field.
-//
-// If false is returned, the field was already locked.
-func (l *lockState) lockField(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for _, k := range l.lockedMutexes {
- if k == s {
- return s, false
- }
- }
- l.modify()
- l.lockedMutexes = append(l.lockedMutexes, s)
- return s, true
-}
-
-// unlockField unlocks the given field.
-//
-// If false is returned, the field was not locked.
-func (l *lockState) unlockField(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for i, k := range l.lockedMutexes {
- if k == s {
- // Copy the last lock in and truncate.
- l.modify()
- l.lockedMutexes[i] = l.lockedMutexes[len(l.lockedMutexes)-1]
- l.lockedMutexes = l.lockedMutexes[:len(l.lockedMutexes)-1]
- return s, true
- }
- }
- return s, false
-}
-
-// store records an alias.
-func (l *lockState) store(addr ssa.Value, v ssa.Value) {
- l.modify()
- l.stored[addr] = v
-}
-
-// isSubset indicates other holds all the locks held by l.
-func (l *lockState) isSubset(other *lockState) bool {
- held := 0 // Number in l, held by other.
- for _, k := range l.lockedMutexes {
- for _, ok := range other.lockedMutexes {
- if k == ok {
- held++
- break
- }
- }
- }
- return held >= len(l.lockedMutexes)
-}
-
-// count indicates the number of locks held.
-func (l *lockState) count() int {
- return len(l.lockedMutexes)
-}
-
-// isCompatible returns true if the states are compatible.
-func (l *lockState) isCompatible(other *lockState) bool {
- return l.isSubset(other) && other.isSubset(l)
-}
-
-// elemType is a type that implements the Elem function.
-type elemType interface {
- Elem() types.Type
-}
-
-// valueAsString returns a string for a given value.
-//
-// This decomposes the value into the simplest possible representation in terms
-// of parameters, free variables and globals. During resolution, stored values
-// may be transferred, as well as bound free variables.
-//
-// Nil may not be passed here.
-func (l *lockState) valueAsString(v ssa.Value) string {
- switch x := v.(type) {
- case *ssa.Parameter:
- // Was this provided as a paramter for a local anonymous
- // function invocation?
- v, ok := l.stored[x]
- if ok {
- return l.valueAsString(v)
- }
- return fmt.Sprintf("{param:%s}", x.Name())
- case *ssa.Global:
- return fmt.Sprintf("{global:%s}", x.Name())
- case *ssa.FreeVar:
- // Attempt to resolve this, in case we are being invoked in a
- // scope where all the variables are bound.
- v, ok := l.stored[x]
- if ok {
- // The FreeVar is typically bound to a location, so we
- // check what's been stored there. Note that the second
- // may map to the same FreeVar, which we can check.
- stored, ok := l.stored[v]
- if ok {
- return l.valueAsString(stored)
- }
- }
- return fmt.Sprintf("{freevar:%s}", x.Name())
- case *ssa.Convert:
- // Just disregard conversion.
- return l.valueAsString(x.X)
- case *ssa.ChangeType:
- // Ditto, disregard.
- return l.valueAsString(x.X)
- case *ssa.UnOp:
- if x.Op != token.MUL {
- break
- }
- // Is this loading a free variable? If yes, then this can be
- // resolved in the original isAlias function.
- if fv, ok := x.X.(*ssa.FreeVar); ok {
- return l.valueAsString(fv)
- }
- // Should be try to resolve via a memory address? This needs to
- // be done since a memory location can hold its own value.
- if _, ok := l.used[x.X]; !ok {
- // Check if we know what the accessed location holds.
- // This is used to disambiguate memory locations.
- v, ok := l.stored[x.X]
- if ok {
- l.used[x.X] = struct{}{}
- defer func() { delete(l.used, x.X) }()
- return l.valueAsString(v)
- }
- }
- // x.X.Type is pointer. We must construct this type
- // dynamically, since the ssa.Value could be synthetic.
- return fmt.Sprintf("*(%s)", l.valueAsString(x.X))
- case *ssa.Field:
- structType, ok := resolveStruct(x.X.Type())
- if !ok {
- // This should not happen.
- panic(fmt.Sprintf("structType not available for struct: %#v", x.X))
- }
- fieldObj := structType.Field(x.Field)
- return fmt.Sprintf("%s.%s", l.valueAsString(x.X), fieldObj.Name())
- case *ssa.FieldAddr:
- structType, ok := resolveStruct(x.X.Type())
- if !ok {
- // This should not happen.
- panic(fmt.Sprintf("structType not available for struct: %#v", x.X))
- }
- fieldObj := structType.Field(x.Field)
- return fmt.Sprintf("&(%s.%s)", l.valueAsString(x.X), fieldObj.Name())
- case *ssa.Index:
- return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.IndexAddr:
- return fmt.Sprintf("&(%s[%s])", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.Lookup:
- return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.Extract:
- return fmt.Sprintf("%s[%d]", l.valueAsString(x.Tuple), x.Index)
- }
-
- // In the case of any other type (e.g. this may be an alloc, a return
- // value, etc.), just return the literal pointer value to the Value.
- // This will be unique within the ssa graph, and so if two values are
- // equal, they are from the same type.
- return fmt.Sprintf("{%T:%p}", v, v)
-}
-
-// String returns the full lock state.
-func (l *lockState) String() string {
- if l.count() == 0 {
- return "no locks held"
- }
- return strings.Join(l.lockedMutexes, ",")
-}
-
-// pushDefer pushes a defer onto the stack.
-func (l *lockState) pushDefer(d *ssa.Defer) {
- l.modify()
- l.defers = append(l.defers, d)
-}
-
-// popDefer pops a defer from the stack.
-func (l *lockState) popDefer() *ssa.Defer {
- // Does not technically modify the underlying slice.
- count := len(l.defers)
- if count == 0 {
- return nil
- }
- d := l.defers[count-1]
- l.defers = l.defers[:count-1]
- return d
-}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
deleted file mode 100644
index 966bbac22..000000000
--- a/tools/checklocks/test/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test",
- srcs = [
- "alignment.go",
- "atomics.go",
- "basics.go",
- "branches.go",
- "closures.go",
- "defer.go",
- "incompat.go",
- "methods.go",
- "parameters.go",
- "return.go",
- "test.go",
- ],
-)
diff --git a/tools/checklocks/test/alignment.go b/tools/checklocks/test/alignment.go
deleted file mode 100644
index cd857ff73..000000000
--- a/tools/checklocks/test/alignment.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-type alignedStruct32 struct {
- v int32
-}
-
-type alignedStruct64 struct {
- v int64
-}
-
-type alignedStructGood struct {
- v0 alignedStruct32
- v1 alignedStruct32
- v2 alignedStruct64
-}
-
-type alignedStructGoodArray0 struct {
- v0 [3]alignedStruct32
- v1 [3]alignedStruct32
- v2 alignedStruct64
-}
-
-type alignedStructGoodArray1 [16]alignedStructGood
-
-type alignedStructBad struct {
- v0 alignedStruct32
- v1 alignedStruct64
- v2 alignedStruct32
-}
-
-type alignedStructBadArray0 struct {
- v0 [3]alignedStruct32
- v1 [2]alignedStruct64
- v2 [1]alignedStruct32
-}
-
-type alignedStructBadArray1 [16]alignedStructBad
diff --git a/tools/checklocks/test/atomics.go b/tools/checklocks/test/atomics.go
deleted file mode 100644
index 8e060d8a2..000000000
--- a/tools/checklocks/test/atomics.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
- "sync/atomic"
-)
-
-type atomicStruct struct {
- accessedNormally int32
-
- // +checkatomic
- accessedAtomically int32
-
- // +checklocksignore
- ignored int32
-}
-
-func testNormalAccess(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- tc.accessedNormally
- p <- &tc.accessedNormally
-}
-
-func testAtomicAccess(tc *atomicStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedAtomically)
-}
-
-func testAtomicAccessInvalid(tc *atomicStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedNormally) // +checklocksfail
-}
-
-func testNormalAccessInvalid(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- tc.accessedAtomically // +checklocksfail
- p <- &tc.accessedAtomically // +checklocksfail
-}
-
-func testIgnored(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- atomic.LoadInt32(&tc.ignored)
- v <- tc.ignored
- p <- &tc.ignored
-}
-
-type atomicMixedStruct struct {
- mu sync.Mutex
-
- // +checkatomic
- // +checklocks:mu
- accessedMixed int32
-}
-
-func testAtomicMixedValidRead(tc *atomicMixedStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedMixed)
-}
-
-func testAtomicMixedInvalidRead(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- v <- tc.accessedMixed // +checklocksfail
- p <- &tc.accessedMixed // +checklocksfail
-}
-
-func testAtomicMixedValidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.mu.Lock()
- atomic.StoreInt32(&tc.accessedMixed, 1)
- tc.mu.Unlock()
-}
-
-func testAtomicMixedInvalidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.mu.Lock()
- tc.accessedMixed = 1 // +checklocksfail:2
- tc.mu.Unlock()
-}
-
-func testAtomicMixedInvalidAtomicWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- atomic.StoreInt32(&tc.accessedMixed, 1) // +checklocksfail
-}
-
-func testAtomicMixedInvalidWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.accessedMixed = 1 // +checklocksfail:2
-}
diff --git a/tools/checklocks/test/basics.go b/tools/checklocks/test/basics.go
deleted file mode 100644
index 7a773171f..000000000
--- a/tools/checklocks/test/basics.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-func testLockedAccessValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-func testLockedAccessIgnore(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.unguardedField = 1
- tc.mu.Unlock()
-}
-
-func testUnlockedAccessInvalidWrite(tc *oneGuardStruct) {
- tc.guardedField = 2 // +checklocksfail
-}
-
-func testUnlockedAccessInvalidRead(tc *oneGuardStruct) {
- x := tc.guardedField // +checklocksfail
- _ = x
-}
-
-func testUnlockedAccessValid(tc *oneGuardStruct) {
- tc.unguardedField = 2
-}
-
-func testCallValidAccess(tc *oneGuardStruct) {
- callValidAccess(tc)
-}
-
-func callValidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-func testCallValueMixup(tc *oneGuardStruct) {
- callValueMixup(tc, tc)
-}
-
-func callValueMixup(tc1, tc2 *oneGuardStruct) {
- tc1.mu.Lock()
- tc2.guardedField = 2 // +checklocksfail
- tc1.mu.Unlock()
-}
-
-func testCallPreconditionsInvalid(tc *oneGuardStruct) {
- callPreconditions(tc) // +checklocksfail
-}
-
-func testCallPreconditionsValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- callPreconditions(tc)
- tc.mu.Unlock()
-}
-
-// +checklocks:tc.mu
-func callPreconditions(tc *oneGuardStruct) {
- tc.guardedField = 1
-}
-
-type nestedFieldsStruct struct {
- mu sync.Mutex
-
- // +checklocks:mu
- nestedStruct struct {
- nested1 int
- nested2 int
- }
-}
-
-func testNestedGuardValid(tc *nestedFieldsStruct) {
- tc.mu.Lock()
- tc.nestedStruct.nested1 = 1
- tc.nestedStruct.nested2 = 2
- tc.mu.Unlock()
-}
-
-func testNestedGuardInvalid(tc *nestedFieldsStruct) {
- tc.nestedStruct.nested1 = 1 // +checklocksfail
-}
-
-type rwGuardStruct struct {
- rwMu sync.RWMutex
-
- // +checklocks:rwMu
- guardedField int
-}
-
-func testRWValidRead(tc *rwGuardStruct) {
- tc.rwMu.Lock()
- tc.guardedField = 1
- tc.rwMu.Unlock()
-}
-
-func testRWValidWrite(tc *rwGuardStruct) {
- tc.rwMu.RLock()
- tc.guardedField = 2
- tc.rwMu.RUnlock()
-}
-
-func testRWInvalidWrite(tc *rwGuardStruct) {
- tc.guardedField = 3 // +checklocksfail
-}
-
-func testRWInvalidRead(tc *rwGuardStruct) {
- x := tc.guardedField + 3 // +checklocksfail
- _ = x
-}
-
-func testTwoLocksDoubleGuardStructValid(tc *twoLocksDoubleGuardStruct) {
- tc.mu.Lock()
- tc.secondMu.Lock()
- tc.doubleGuardedField = 1
- tc.secondMu.Unlock()
-}
-
-func testTwoLocksDoubleGuardStructOnlyOne(tc *twoLocksDoubleGuardStruct) {
- tc.mu.Lock()
- tc.doubleGuardedField = 2 // +checklocksfail
- tc.mu.Unlock()
-}
-
-func testTwoLocksDoubleGuardStructInvalid(tc *twoLocksDoubleGuardStruct) {
- tc.doubleGuardedField = 3 // +checklocksfail:2
-}
diff --git a/tools/checklocks/test/branches.go b/tools/checklocks/test/branches.go
deleted file mode 100644
index 81fec29e5..000000000
--- a/tools/checklocks/test/branches.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "math/rand"
-)
-
-func testInconsistentReturn(tc *oneGuardStruct) { // +checklocksfail
- if x := rand.Intn(10); x%2 == 1 {
- tc.mu.Lock()
- }
-}
-
-func testConsistentBranching(tc *oneGuardStruct) {
- x := rand.Intn(10)
- if x%2 == 1 {
- tc.mu.Lock()
- } else {
- tc.mu.Lock()
- }
- tc.guardedField = 1
- if x%2 == 1 {
- tc.mu.Unlock()
- } else {
- tc.mu.Unlock()
- }
-}
-
-func testInconsistentBranching(tc *oneGuardStruct) { // +checklocksfail:2
- // We traverse the control flow graph in all consistent ways. We cannot
- // determine however, that the first if block and second if block will
- // evaluate to the same condition. Therefore, there are two consistent
- // paths through this code, and two inconsistent paths. Either way, the
- // guardedField should be also marked as an invalid access.
- x := rand.Intn(10)
- if x%2 == 1 {
- tc.mu.Lock()
- }
- tc.guardedField = 1 // +checklocksfail
- if x%2 == 1 {
- tc.mu.Unlock() // +checklocksforce
- }
-}
diff --git a/tools/checklocks/test/closures.go b/tools/checklocks/test/closures.go
deleted file mode 100644
index 7da87540a..000000000
--- a/tools/checklocks/test/closures.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testClosureInvalid(tc *oneGuardStruct) {
- // This is expected to fail.
- callClosure(func() {
- tc.guardedField = 1 // +checklocksfail
- })
-}
-
-func testClosureUnsupported(tc *oneGuardStruct) {
- // Locked outside the closure, so may or may not be valid. This cannot
- // be handled and we should explicitly fail. This can't be handled
- // because of the call through callClosure, below, which means the
- // closure will actually be passed as a value somewhere.
- tc.mu.Lock()
- callClosure(func() {
- tc.guardedField = 1 // +checklocksfail
- })
- tc.mu.Unlock()
-}
-
-func testClosureValid(tc *oneGuardStruct) {
- // All locking happens within the closure. This should not present a
- // problem for analysis.
- callClosure(func() {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
- })
-}
-
-func testClosureInline(tc *oneGuardStruct) {
- // If the closure is being dispatching inline only, then we should be
- // able to analyze this call and give it a thumbs up.
- tc.mu.Lock()
- func() {
- tc.guardedField = 1
- }()
- tc.mu.Unlock()
-}
-
-func testAnonymousInvalid(tc *oneGuardStruct) {
- // Invalid, as per testClosureInvalid above.
- callAnonymous(func(tc *oneGuardStruct) {
- tc.guardedField = 1 // +checklocksfail
- }, tc)
-}
-
-func testAnonymousUnsupported(tc *oneGuardStruct) {
- // Not supportable, as per testClosureUnsupported above.
- tc.mu.Lock()
- callAnonymous(func(tc *oneGuardStruct) {
- tc.guardedField = 1 // +checklocksfail
- }, tc)
- tc.mu.Unlock()
-}
-
-func testAnonymousValid(tc *oneGuardStruct) {
- // Valid, as per testClosureValid above.
- callAnonymous(func(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
- }, tc)
-}
-
-func testAnonymousInline(tc *oneGuardStruct) {
- // Unlike the closure case, we are able to dynamically infer the set of
- // preconditions for the function dispatch and assert that this is
- // a valid call.
- tc.mu.Lock()
- func(tc *oneGuardStruct) {
- tc.guardedField = 1
- }(tc)
- tc.mu.Unlock()
-}
-
-//go:noinline
-func callClosure(fn func()) {
- fn()
-}
-
-//go:noinline
-func callAnonymous(fn func(*oneGuardStruct), tc *oneGuardStruct) {
- fn(tc)
-}
diff --git a/tools/checklocks/test/defer.go b/tools/checklocks/test/defer.go
deleted file mode 100644
index 6e574e5eb..000000000
--- a/tools/checklocks/test/defer.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testDeferValidUnlock(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- defer tc.mu.Unlock()
-}
-
-func testDeferValidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- defer func() {
- tc.guardedField = 1
- tc.mu.Unlock()
- }()
-}
-
-func testDeferInvalidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- defer func() {
- // N.B. Executed after tc.mu.Unlock().
- tc.guardedField = 1 // +checklocksfail
- }()
- tc.mu.Unlock()
-}
diff --git a/tools/checklocks/test/incompat.go b/tools/checklocks/test/incompat.go
deleted file mode 100644
index b39bc66c1..000000000
--- a/tools/checklocks/test/incompat.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-// unsupportedLockerStruct verifies that trying to annotate a field that is not a
-// sync.Mutex or sync.RWMutex results in a failure.
-type unsupportedLockerStruct struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// badFieldsStruct verifies that refering invalid fields fails.
-type badFieldsStruct struct {
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// redundantStruct verifies that redundant annotations fail.
-type redundantStruct struct {
- mu sync.Mutex
-
- // +checklocks:mu
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// conflictsStruct verifies that conflicting annotations fail.
-type conflictsStruct struct {
- // +checkatomicignore
- // +checkatomic
- x int // +checklocksfail
-
- // +checkatomic
- // +checkatomicignore
- y int // +checklocksfail
-}
diff --git a/tools/checklocks/test/methods.go b/tools/checklocks/test/methods.go
deleted file mode 100644
index 72e26fca6..000000000
--- a/tools/checklocks/test/methods.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-type testMethods struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
-}
-
-func (t *testMethods) methodValid() {
- t.mu.Lock()
- t.guardedField = 1
- t.mu.Unlock()
-}
-
-func (t *testMethods) methodInvalid() {
- t.guardedField = 2 // +checklocksfail
-}
-
-// +checklocks:t.mu
-func (t *testMethods) MethodLocked(a, b, c int) {
- t.guardedField = 3
-}
-
-// +checklocksignore
-func (t *testMethods) methodIgnore() {
- t.guardedField = 2
-}
-
-func testMethodCallsValid(tc *testMethods) {
- tc.methodValid()
-}
-
-func testMethodCallsValidPreconditions(tc *testMethods) {
- tc.mu.Lock()
- tc.MethodLocked(1, 2, 3)
- tc.mu.Unlock()
-}
-
-func testMethodCallsInvalid(tc *testMethods) {
- tc.MethodLocked(4, 5, 6) // +checklocksfail
-}
-
-func testMultipleParameters(tc1, tc2, tc3 *testMethods) {
- tc1.mu.Lock()
- tc1.guardedField = 1
- tc2.guardedField = 2 // +checklocksfail
- tc3.guardedField = 3 // +checklocksfail
- tc1.mu.Unlock()
-}
-
-type testMethodsWithParameters struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
-}
-
-type ptrToTestMethodsWithParameters *testMethodsWithParameters
-
-// +checklocks:t.mu
-// +checklocks:a.mu
-func (t *testMethodsWithParameters) methodLockedWithParameters(a *testMethodsWithParameters, b *testMethodsWithParameters) {
- t.guardedField = a.guardedField
- b.guardedField = a.guardedField // +checklocksfail
-}
-
-// +checklocks:t.mu
-// +checklocks:a.mu
-// +checklocks:b.mu
-func (t *testMethodsWithParameters) methodLockedWithPtrType(a *testMethodsWithParameters, b ptrToTestMethodsWithParameters) {
- t.guardedField = a.guardedField
- b.guardedField = a.guardedField
-}
-
-// +checklocks:a.mu
-func standaloneFunctionWithGuard(a *testMethodsWithParameters) {
- a.guardedField = 1
- a.mu.Unlock()
- a.guardedField = 1 // +checklocksfail
-}
-
-type testMethodsWithEmbedded struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
- p *testMethodsWithParameters
-}
-
-// +checklocks:t.mu
-func (t *testMethodsWithEmbedded) DoLocked(a, b *testMethodsWithParameters) {
- t.guardedField = 1
- a.mu.Lock()
- b.mu.Lock()
- t.p.methodLockedWithParameters(a, b) // +checklocksfail
- a.mu.Unlock()
- b.mu.Unlock()
-}
diff --git a/tools/checklocks/test/parameters.go b/tools/checklocks/test/parameters.go
deleted file mode 100644
index 5b9e664b6..000000000
--- a/tools/checklocks/test/parameters.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testParameterPassingbyAddrValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField)
- tc.mu.Unlock()
-}
-
-func testParameterPassingByAddrInalid(tc *oneGuardStruct) {
- nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail
-}
-
-func testParameterPassingByValueValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- nestedWithGuardByValue(tc.guardedField, tc.unguardedField)
- tc.mu.Unlock()
-}
-
-func testParameterPassingByValueInalid(tc *oneGuardStruct) {
- nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail
-}
-
-func nestedWithGuardByAddr(guardedField, unguardedField *int) {
- *guardedField = 4
- *unguardedField = 5
-}
-
-func nestedWithGuardByValue(guardedField, unguardedField int) {
- // read the fields to keep SA4009 static analyzer happy.
- _ = guardedField
- _ = unguardedField
- guardedField = 4
- unguardedField = 5
-}
diff --git a/tools/checklocks/test/return.go b/tools/checklocks/test/return.go
deleted file mode 100644
index 47c7b6773..000000000
--- a/tools/checklocks/test/return.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-// +checklocks:tc.mu
-func testReturnInvalidGuard() (tc *oneGuardStruct) { // +checklocksfail
- return new(oneGuardStruct)
-}
-
-// +checklocksrelease:tc.mu
-func testReturnInvalidRelease() (tc *oneGuardStruct) { // +checklocksfail
- return new(oneGuardStruct)
-}
-
-// +checklocksacquire:tc.mu
-func testReturnInvalidAcquire() (tc *oneGuardStruct) {
- return new(oneGuardStruct) // +checklocksfail
-}
-
-// +checklocksacquire:tc.mu
-func testReturnValidAcquire() (tc *oneGuardStruct) {
- tc = new(oneGuardStruct)
- tc.mu.Lock()
- return tc
-}
-
-func testReturnAcquireCall() {
- tc := testReturnValidAcquire()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-// +checklocksacquire:tc.val.mu
-// +checklocksacquire:tc.ptr.mu
-func testReturnValidNestedAcquire() (tc *nestedGuardStruct) {
- tc = new(nestedGuardStruct)
- tc.ptr = new(oneGuardStruct)
- tc.val.mu.Lock()
- tc.ptr.mu.Lock()
- return tc
-}
-
-func testReturnNestedAcquireCall() {
- tc := testReturnValidNestedAcquire()
- tc.val.guardedField = 1
- tc.ptr.guardedField = 1
- tc.val.mu.Unlock()
- tc.ptr.mu.Unlock()
-}
diff --git a/tools/checklocks/test/test.go b/tools/checklocks/test/test.go
deleted file mode 100644
index cbf6b1635..000000000
--- a/tools/checklocks/test/test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test is a test package.
-//
-// Tests are all compilation tests in separate files.
-package test
-
-import (
- "sync"
-)
-
-// oneGuardStruct has one guarded field.
-type oneGuardStruct struct {
- mu sync.Mutex
- // +checklocks:mu
- guardedField int
- unguardedField int
-}
-
-// twoGuardStruct has two guarded fields.
-type twoGuardStruct struct {
- mu sync.Mutex
- // +checklocks:mu
- guardedField1 int
- // +checklocks:mu
- guardedField2 int
-}
-
-// twoLocksStruct has two locks and two fields.
-type twoLocksStruct struct {
- mu sync.Mutex
- secondMu sync.Mutex
- // +checklocks:mu
- guardedField1 int
- // +checklocks:secondMu
- guardedField2 int
-}
-
-// twoLocksDoubleGuardStruct has two locks and a single field with two guards.
-type twoLocksDoubleGuardStruct struct {
- mu sync.Mutex
- secondMu sync.Mutex
- // +checklocks:mu
- // +checklocks:secondMu
- doubleGuardedField int
-}
-
-// nestedGuardStruct nests oneGuardStruct fields.
-type nestedGuardStruct struct {
- val oneGuardStruct
- ptr *oneGuardStruct
-}
diff --git a/tools/checkunsafe/BUILD b/tools/checkunsafe/BUILD
deleted file mode 100644
index 0bb07b415..000000000
--- a/tools/checkunsafe/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checkunsafe",
- srcs = ["check_unsafe.go"],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "@org_golang_x_tools//go/analysis:go_default_library",
- ],
-)
diff --git a/tools/checkunsafe/check_unsafe.go b/tools/checkunsafe/check_unsafe.go
deleted file mode 100644
index 4ccd7cc5a..000000000
--- a/tools/checkunsafe/check_unsafe.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checkunsafe allows unsafe imports only in files named appropriately.
-package checkunsafe
-
-import (
- "fmt"
- "path"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/analysis"
-)
-
-// Analyzer defines the entrypoint.
-var Analyzer = &analysis.Analyzer{
- Name: "checkunsafe",
- Doc: "allows unsafe use only in specified files",
- Run: run,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- for _, imp := range f.Imports {
- // Is this an unsafe import?
- pkg, err := strconv.Unquote(imp.Path.Value)
- if err != nil || pkg != "unsafe" {
- continue
- }
-
- // Extract the filename.
- filename := pass.Fset.File(imp.Pos()).Name()
-
- // Allow files named _unsafe.go or _test.go to opt out.
- if strings.HasSuffix(filename, "_unsafe.go") || strings.HasSuffix(filename, "_test.go") {
- continue
- }
-
- // Throw the error.
- pass.Reportf(imp.Pos(), fmt.Sprintf("package unsafe imported by %s; must end with _unsafe.go", path.Base(filename)))
- }
- }
- return nil, nil
-}
diff --git a/tools/defs.bzl b/tools/defs.bzl
deleted file mode 100644
index 27542a2f5..000000000
--- a/tools/defs.bzl
+++ /dev/null
@@ -1,349 +0,0 @@
-"""Wrappers for common build rules.
-
-These wrappers apply common BUILD configurations (e.g., proto_library
-automagically creating cc_ and go_ proto targets) and act as a single point of
-change for Google-internal and bazel-compatible rules.
-"""
-
-load("//tools/go_stateify:defs.bzl", "go_stateify")
-load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps")
-load("//tools/nogo:defs.bzl", "nogo_test")
-load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _version = "version")
-load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option")
-load("//tools/bazeldefs:go.bzl", _bazel_worker_proto = "bazel_worker_proto", _gazelle = "gazelle", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_rule = "go_rule", _go_test = "go_test", _select_goarch = "select_goarch", _select_goos = "select_goos")
-load("//tools/bazeldefs:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar")
-load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", _platforms = "platforms")
-load("//tools/bazeldefs:tags.bzl", "go_suffixes")
-
-# Core rules.
-arch_genrule = _arch_genrule
-build_test = _build_test
-bzl_library = _bzl_library
-default_installer = _default_installer
-default_net_util = _default_net_util
-select_arch = _select_arch
-select_system = _select_system
-short_path = _short_path
-coreutil = _coreutil
-more_shards = _more_shards
-most_shards = _most_shards
-version = _version
-
-# C++ rules.
-cc_binary = _cc_binary
-cc_flags_supplier = _cc_flags_supplier
-cc_grpc_library = _cc_grpc_library
-cc_library = _cc_library
-cc_test = _cc_test
-cc_toolchain = _cc_toolchain
-gbenchmark = _gbenchmark
-gtest = _gtest
-grpcpp = _grpcpp
-vdso_linker_option = _vdso_linker_option
-
-# Go rules.
-gazelle = _gazelle
-go_path = _go_path
-select_goos = _select_goos
-select_goarch = _select_goarch
-go_embed_data = _go_embed_data
-go_proto_library = _go_proto_library
-bazel_worker_proto = _bazel_worker_proto
-
-# Packaging rules.
-pkg_deb = _pkg_deb
-pkg_tar = _pkg_tar
-
-# Platform options.
-default_platform = _default_platform
-platforms = _platforms
-
-def _go_add_tags(ctx):
- """ Adds tags to the given source file. """
- output = ctx.outputs.out
- runner = ctx.actions.declare_file(ctx.label.name + ".sh")
- lines = ["#!/bin/bash"]
- lines += ["echo '// +build %s' >> %s" % (tag, output.path) for tag in ctx.attr.go_tags]
- lines.append("echo '' >> %s" % output.path)
- lines += ["cat %s >> %s" % (f.path, output.path) for f in ctx.files.src]
- lines.append("")
- ctx.actions.write(runner, "\n".join(lines), is_executable = True)
- ctx.actions.run(
- inputs = ctx.files.src,
- outputs = [output],
- executable = runner,
- )
- return [DefaultInfo(
- files = depset([output]),
- )]
-
-go_add_tags = _go_rule(
- rule,
- implementation = _go_add_tags,
- attrs = {
- "go_tags": attr.string_list(doc = "Go build tags to be added.", mandatory = True),
- "src": attr.label(doc = "Source file.", allow_single_file = True, mandatory = True),
- "out": attr.output(doc = "Output file.", mandatory = True),
- },
-)
-
-def go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **kwargs):
- """Wraps the standard go_binary.
-
- Args:
- name: the rule name.
- nogo: enable nogo analysis.
- pure: build a pure Go (no CGo) binary.
- static: build a static binary.
- x_defs: additional linker definitions.
- **kwargs: standard go_binary arguments.
- """
- _go_binary(
- name = name,
- pure = pure,
- static = static,
- x_defs = x_defs,
- **kwargs
- )
- if nogo:
- # Note that the nogo rule applies only for go_library and go_test
- # targets, therefore we construct a library from the binary sources.
- # This is done because the binary may not be in a form that objdump
- # supports (i.e. a pure Go binary).
- _go_library(
- name = name + "_nogo_library",
- srcs = kwargs.get("srcs", []),
- deps = kwargs.get("deps", []),
- testonly = 1,
- )
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = kwargs.get("srcs", []),
- deps = [":" + name + "_nogo_library"],
- tags = ["nogo"],
- )
-
-def calculate_sets(srcs):
- """Calculates special Go sets for templates.
-
- Args:
- srcs: the full set of Go sources.
-
- Returns:
- A dictionary of the form:
-
- "": [src1.go, src2.go]
- "suffix": [src3suffix.go, src4suffix.go]
-
- Note that suffix will typically start with '_'.
- """
- result = dict()
- for file in srcs:
- if not file.endswith(".go"):
- continue
- target = ""
- for suffix in go_suffixes:
- if file.endswith(suffix + ".go"):
- target = suffix
- if not target in result:
- result[target] = [file]
- else:
- result[target].append(file)
- return result
-
-def go_imports(name, src, out):
- """Simplify a single Go source file by eliminating unused imports."""
- native.genrule(
- name = name,
- srcs = [src],
- outs = [out],
- tools = ["@org_golang_x_tools//cmd/goimports:goimports"],
- cmd = ("$(location @org_golang_x_tools//cmd/goimports:goimports) $(SRCS) > $@"),
- )
-
-def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = True, **kwargs):
- """Wraps the standard go_library and does stateification and marshalling.
-
- The recommended way is to use this rule with mostly identical configuration as the native
- go_library rule.
-
- These definitions provide additional flags (stateify, marshal) that can be used
- with the generators to automatically supplement the library code.
-
- load("//tools:defs.bzl", "go_library")
-
- go_library(
- name = "foo",
- srcs = ["foo.go"],
- )
-
- Args:
- name: the rule name.
- srcs: the library sources.
- deps: the library dependencies.
- imports: imports required for stateify.
- stateify: whether statify is enabled (default: true).
- marshal: whether marshal is enabled (default: false).
- marshal_debug: whether the gomarshal tools emits debugging output (default: false).
- nogo: enable nogo analysis.
- **kwargs: standard go_library arguments.
- """
- all_srcs = srcs
- all_deps = deps
- dirname, _, _ = native.package_name().rpartition("/")
- full_pkg = dirname + "/" + name
- if stateify:
- # Only do stateification for non-state packages without manual autogen.
- # First, we need to segregate the input files via the special suffixes,
- # and calculate the final output set.
- state_sets = calculate_sets(srcs)
- for (suffix, src_subset) in state_sets.items():
- go_stateify(
- name = name + suffix + "_state_autogen_with_imports",
- srcs = src_subset,
- imports = imports,
- package = full_pkg,
- out = name + suffix + "_state_autogen_with_imports.go",
- )
- go_imports(
- name = name + suffix + "_state_autogen",
- src = name + suffix + "_state_autogen_with_imports.go",
- out = name + suffix + "_state_autogen.go",
- )
- all_srcs = all_srcs + [
- name + suffix + "_state_autogen.go"
- for suffix in state_sets.keys()
- ]
-
- if "//pkg/state" not in all_deps:
- all_deps = all_deps + ["//pkg/state"]
-
- if marshal:
- # See above.
- marshal_sets = calculate_sets(srcs)
- for (suffix, src_subset) in marshal_sets.items():
- go_marshal(
- name = name + suffix + "_abi_autogen",
- srcs = src_subset,
- debug = select({
- "//tools/go_marshal:marshal_config_verbose": True,
- "//conditions:default": marshal_debug,
- }),
- imports = imports,
- package = name,
- )
- extra_deps = [
- dep
- for dep in marshal_deps
- if not dep in all_deps
- ]
- all_deps = all_deps + extra_deps
- all_srcs = all_srcs + [
- name + suffix + "_abi_autogen_unsafe.go"
- for suffix in marshal_sets.keys()
- ]
-
- _go_library(
- name = name,
- srcs = all_srcs,
- deps = all_deps,
- **kwargs
- )
- if nogo:
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = all_srcs,
- deps = [":" + name],
- tags = ["nogo"],
- )
-
- if marshal:
- # Ignore importpath for go_test.
- kwargs.pop("importpath", None)
-
- # See above.
- marshal_sets = calculate_sets(srcs)
- for (suffix, _) in marshal_sets.items():
- _go_test(
- name = name + suffix + "_abi_autogen_test",
- srcs = [
- name + suffix + "_abi_autogen_test.go",
- name + suffix + "_abi_autogen_unconditional_test.go",
- ],
- library = ":" + name,
- deps = marshal_test_deps,
- **kwargs
- )
-
-def go_test(name, nogo = True, **kwargs):
- """Wraps the standard go_test.
-
- Args:
- name: the rule name.
- nogo: enable nogo analysis.
- **kwargs: standard go_test arguments.
- """
- _go_test(
- name = name,
- **kwargs
- )
- if nogo:
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = kwargs.get("srcs", []),
- deps = [":" + name],
- tags = ["nogo"],
- )
-
-def proto_library(name, srcs, deps = None, has_services = 0, **kwargs):
- """Wraps the standard proto_library.
-
- Given a proto_library named "foo", this produces up to five different
- targets:
- - foo_proto: proto_library rule.
- - foo_go_proto: go_proto_library rule.
- - foo_cc_proto: cc_proto_library rule.
- - foo_go_grpc_proto: go_grpc_library rule.
- - foo_cc_grpc_proto: cc_grpc_library rule.
-
- Args:
- name: the name to which _proto, _go_proto, etc, will be appended.
- srcs: the proto sources.
- deps: for the proto library and the go_proto_library.
- has_services: 1 to build gRPC code, otherwise 0.
- **kwargs: standard proto_library arguments.
- """
- _proto_library(
- name = name + "_proto",
- srcs = srcs,
- deps = deps,
- has_services = has_services,
- **kwargs
- )
- if has_services:
- _go_grpc_and_proto_libraries(
- name = name,
- deps = deps,
- **kwargs
- )
- else:
- _go_proto_library(
- name = name,
- deps = deps,
- **kwargs
- )
- _cc_proto_library(
- name = name + "_cc_proto",
- deps = [":" + name + "_proto"],
- **kwargs
- )
- if has_services:
- _cc_grpc_library(
- name = name + "_cc_grpc_proto",
- srcs = [":" + name + "_proto"],
- deps = [":" + name + "_cc_proto"],
- **kwargs
- )
diff --git a/tools/deps.bzl b/tools/deps.bzl
deleted file mode 100644
index 91442617c..000000000
--- a/tools/deps.bzl
+++ /dev/null
@@ -1,119 +0,0 @@
-"""Rules for dependency checking."""
-
-# DepsInfo provides a list of dependencies found when building a target.
-DepsInfo = provider(
- "lists dependencies encountered while building",
- fields = {
- "nodes": "a dict from targets to a list of their dependencies",
- },
-)
-
-def _deps_check_impl(target, ctx):
- # Check the target's dependencies and add any of our own deps.
- deps = []
- for dep in ctx.rule.attr.deps:
- deps.append(dep)
- nodes = {}
- if len(deps) != 0:
- nodes[target] = deps
-
- # Keep and propagate each dep's providers.
- for dep in ctx.rule.attr.deps:
- nodes.update(dep[DepsInfo].nodes)
-
- return [DepsInfo(nodes = nodes)]
-
-_deps_check = aspect(
- implementation = _deps_check_impl,
- attr_aspects = ["deps"],
-)
-
-def _is_allowed(target, allowlist, prefixes):
- # Check for allowed prefixes.
- for prefix in prefixes:
- workspace, pfx = prefix.split("//", 1)
- if len(workspace) > 0 and workspace[0] == "@":
- workspace = workspace[1:]
- if target.workspace_name == workspace and target.package.startswith(pfx):
- return True
-
- # Check the allowlist.
- for allowed in allowlist:
- if target == allowed.label:
- return True
-
- return False
-
-def _deps_test_impl(ctx):
- nodes = {}
- for target in ctx.attr.targets:
- for (node_target, node_deps) in target[DepsInfo].nodes.items():
- # Ignore any disallowed targets. This generates more useful error
- # messages. Consider the case where A dependes on B and B depends
- # on C, and both B and C are disallowed. Avoid emitting an error
- # that B depends on C, when the real issue is that A depends on B.
- if not _is_allowed(node_target.label, ctx.attr.allowed, ctx.attr.allowed_prefixes) and node_target.label != target.label:
- continue
- bad_deps = []
- for dep in node_deps:
- if not _is_allowed(dep.label, ctx.attr.allowed, ctx.attr.allowed_prefixes):
- bad_deps.append(dep)
- if len(bad_deps) > 0:
- nodes[node_target] = bad_deps
-
- # If there aren't any violations, write a passing test.
- if len(nodes) == 0:
- ctx.actions.write(
- output = ctx.outputs.executable,
- content = "#!/bin/bash\n\nexit 0\n",
- )
- return []
-
- # If we're here, we've found at least one violation.
- script_lines = [
- "#!/bin/bash",
- "echo Invalid dependencies found. If you\\'re sure you want to add dependencies,",
- "echo modify this target.",
- "echo",
- ]
-
- # List the violations.
- for target, deps in nodes.items():
- script_lines.append(
- 'echo "{target} depends on:"'.format(target = target.label),
- )
- for dep in deps:
- script_lines.append('echo "\t{dep}"'.format(dep = dep.label))
-
- # The test must fail.
- script_lines.append("exit 1\n")
-
- ctx.actions.write(
- output = ctx.outputs.executable,
- content = "\n".join(script_lines),
- )
- return []
-
-# Checks that targets only depend on an allowlist of other targets. Targets can
-# be specified directly, or prefixes can be used to allow entire packages or
-# directory trees.
-#
-# This recursively checks the "deps" attribute of each target, dependencies
-# expressed other ways are not checked. For example, protobuf targets pull in
-# protobuf code, but aren't analyzed by deps_test.
-deps_test = rule(
- implementation = _deps_test_impl,
- attrs = {
- "targets": attr.label_list(
- doc = "The targets to check the transitive dependencies of.",
- aspects = [_deps_check],
- ),
- "allowed": attr.label_list(
- doc = "The allowed dependency targets.",
- ),
- "allowed_prefixes": attr.string_list(
- doc = "Any packages beginning with these prefixes are allowed.",
- ),
- },
- test = True,
-)
diff --git a/tools/github/BUILD b/tools/github/BUILD
deleted file mode 100644
index 9c94bbc63..000000000
--- a/tools/github/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "github",
- srcs = ["main.go"],
- nogo = False,
- deps = [
- "//tools/github/reviver",
- "@com_github_google_go_github_v35//github:go_default_library",
- "@org_golang_x_oauth2//:go_default_library",
- ],
-)
diff --git a/tools/github/main.go b/tools/github/main.go
deleted file mode 100644
index dfb4c769d..000000000
--- a/tools/github/main.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary github is the entry point for GitHub utilities.
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-
- "github.com/google/go-github/github"
- "golang.org/x/oauth2"
- "gvisor.dev/gvisor/tools/github/reviver"
-)
-
-var (
- owner string
- repo string
- tokenFile string
- paths stringList
- commit string
- dryRun bool
-)
-
-type stringList []string
-
-func (s *stringList) String() string {
- return strings.Join(*s, ",")
-}
-
-func (s *stringList) Set(value string) error {
- *s = append(*s, value)
- return nil
-}
-
-// Keep the options simple for now. Supports only a single path and repo.
-func init() {
- flag.StringVar(&owner, "owner", "", "GitHub project org/owner")
- flag.StringVar(&repo, "repo", "", "GitHub repo")
- flag.StringVar(&tokenFile, "oauth-token-file", "", "file containing the GitHub token (or GITHUB_TOKEN is set)")
- flag.Var(&paths, "path", "path(s) to scan (required for revive)")
- flag.BoolVar(&dryRun, "dry-run", false, "just print changes to be made")
-}
-
-func filterPaths(paths []string) (existing []string) {
- for _, path := range paths {
- if _, err := os.Stat(path); err != nil {
- log.Printf("WARNING: skipping %v: %v", path, err)
- continue
- }
- existing = append(existing, path)
- }
- return
-}
-
-func main() {
- // Set defaults from the environment.
- repository := os.Getenv("GITHUB_REPOSITORY")
- if parts := strings.SplitN(repository, "/", 2); len(parts) == 2 {
- owner = parts[0]
- repo = parts[1]
- }
-
- // Parse flags.
- flag.Usage = func() {
- fmt.Fprintf(flag.CommandLine.Output(), "usage: %s [options] <command>\n", os.Args[0])
- fmt.Fprintf(flag.CommandLine.Output(), "commands: revive, nogo\n")
- flag.PrintDefaults()
- }
- flag.Parse()
- args := flag.Args()
- if len(args) != 1 {
- fmt.Fprintf(flag.CommandLine.Output(), "extra arguments: %s\n", strings.Join(args[1:], ", "))
- flag.Usage()
- os.Exit(1)
- }
-
- // Check for mandatory parameters.
- command := args[0]
- if len(owner) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "missing --owner option.")
- flag.Usage()
- os.Exit(1)
- }
- if len(repo) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "missing --repo option.")
- flag.Usage()
- os.Exit(1)
- }
- filteredPaths := filterPaths(paths)
- if len(filteredPaths) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "no valid --path options provided.")
- flag.Usage()
- os.Exit(1)
- }
-
- // The access token may be passed as a file so it doesn't show up in
- // command line arguments. It also may be provided through the
- // environment to faciliate use through GitHub's CI system.
- token := os.Getenv("GITHUB_TOKEN")
- if len(tokenFile) != 0 {
- bytes, err := ioutil.ReadFile(tokenFile)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
- token = string(bytes)
- }
- var client *github.Client
- if len(token) == 0 {
- // Client is unauthenticated.
- client = github.NewClient(nil)
- } else {
- // Using the above token.
- ts := oauth2.StaticTokenSource(
- &oauth2.Token{AccessToken: token},
- )
- tc := oauth2.NewClient(context.Background(), ts)
- client = github.NewClient(tc)
- }
-
- switch command {
- case "revive":
- // Load existing GitHub bugs.
- bugger, err := reviver.NewGitHubBugger(client, owner, repo, dryRun)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error getting github issues: %v\n", err)
- os.Exit(1)
- }
- // Scan the provided path.
- rev := reviver.New(filteredPaths, []reviver.Bugger{bugger})
- if errs := rev.Run(); len(errs) > 0 {
- fmt.Fprintf(os.Stderr, "Encountered %d errors:\n", len(errs))
- for _, err := range errs {
- fmt.Fprintf(os.Stderr, "\t%v\n", err)
- }
- os.Exit(1)
- }
- default:
- // Not a known command.
- fmt.Fprintf(flag.CommandLine.Output(), "unknown command: %s\n", command)
- flag.Usage()
- os.Exit(1)
- }
-}
diff --git a/tools/github/reviver/BUILD b/tools/github/reviver/BUILD
deleted file mode 100644
index aac1c18bf..000000000
--- a/tools/github/reviver/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "reviver",
- srcs = [
- "github.go",
- "reviver.go",
- ],
- nogo = False,
- visibility = [
- "//tools/github:__subpackages__",
- ],
- deps = ["@com_github_google_go_github_v35//github:go_default_library"],
-)
-
-go_test(
- name = "reviver_test",
- size = "small",
- srcs = [
- "github_test.go",
- "reviver_test.go",
- ],
- library = ":reviver",
- nogo = False,
-)
diff --git a/tools/github/reviver/github.go b/tools/github/reviver/github.go
deleted file mode 100644
index b360f0544..000000000
--- a/tools/github/reviver/github.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/google/go-github/github"
-)
-
-// GitHubBugger implements Bugger interface for github issues.
-type GitHubBugger struct {
- owner string
- repo string
- dryRun bool
-
- client *github.Client
- issues map[int]*github.Issue
-}
-
-// NewGitHubBugger creates a new GitHubBugger.
-func NewGitHubBugger(client *github.Client, owner, repo string, dryRun bool) (*GitHubBugger, error) {
- b := &GitHubBugger{
- owner: owner,
- repo: repo,
- dryRun: dryRun,
- issues: map[int]*github.Issue{},
- client: client,
- }
- if err := b.load(); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func (b *GitHubBugger) load() error {
- err := processAllPages(func(listOpts github.ListOptions) (*github.Response, error) {
- opts := &github.IssueListByRepoOptions{State: "open", ListOptions: listOpts}
- tmps, resp, err := b.client.Issues.ListByRepo(context.Background(), b.owner, b.repo, opts)
- if err != nil {
- return resp, err
- }
- for _, issue := range tmps {
- b.issues[issue.GetNumber()] = issue
- }
- return resp, nil
- })
- if err != nil {
- return err
- }
-
- fmt.Printf("Loaded %d issues from github.com/%s/%s\n", len(b.issues), b.owner, b.repo)
- return nil
-}
-
-// Activate implements Bugger.Activate.
-func (b *GitHubBugger) Activate(todo *Todo) (bool, error) {
- id, err := parseIssueNo(todo.Issue)
- if err != nil {
- return true, err
- }
- if id <= 0 {
- return false, nil
- }
-
- // Check against active issues cache.
- if _, ok := b.issues[id]; ok {
- fmt.Printf("%q is active: OK\n", todo.Issue)
- return true, nil
- }
-
- fmt.Printf("%q is not active: reopening issue %d\n", todo.Issue, id)
-
- // Format comment with TODO locations and search link.
- comment := strings.Builder{}
- fmt.Fprintln(&comment, "There are TODOs still referencing this issue:")
- for _, l := range todo.Locations {
- fmt.Fprintf(&comment,
- "1. [%s:%d](https://github.com/%s/%s/blob/HEAD/%s#L%d): %s\n",
- l.File, l.Line, b.owner, b.repo, l.File, l.Line, l.Comment)
- }
- fmt.Fprintf(&comment,
- "\n\nSearch [TODO](https://github.com/%s/%s/search?q=%%22%s%%22)", b.owner, b.repo, todo.Issue)
-
- if b.dryRun {
- fmt.Printf("[dry-run: skipping change to issue %d]\n%s\n=======================\n", id, comment.String())
- return true, nil
- }
-
- ctx := context.Background()
- req := &github.IssueRequest{State: github.String("open")}
- _, _, err = b.client.Issues.Edit(ctx, b.owner, b.repo, id, req)
- if err != nil {
- return true, fmt.Errorf("failed to reactivate issue %d: %v", id, err)
- }
-
- _, _, err = b.client.Issues.AddLabelsToIssue(ctx, b.owner, b.repo, id, []string{"revived"})
- if err != nil {
- return true, fmt.Errorf("failed to set label on issue %d: %v", id, err)
- }
-
- cmt := &github.IssueComment{
- Body: github.String(comment.String()),
- Reactions: &github.Reactions{Confused: github.Int(1)},
- }
- if _, _, err := b.client.Issues.CreateComment(ctx, b.owner, b.repo, id, cmt); err != nil {
- return true, fmt.Errorf("failed to add comment to issue %d: %v", id, err)
- }
-
- return true, nil
-}
-
-var issuePrefixes = []string{
- "gvisor.dev/issue/",
- "gvisor.dev/issues/",
-}
-
-// parseIssueNo parses the issue number out of the issue url.
-//
-// 0 is returned if url does not correspond to an issue.
-func parseIssueNo(url string) (int, error) {
- // First check if I can handle the TODO.
- var idStr string
- for _, p := range issuePrefixes {
- if str := strings.TrimPrefix(url, p); len(str) < len(url) {
- idStr = str
- break
- }
- }
- if len(idStr) == 0 {
- return 0, nil
- }
-
- id, err := strconv.ParseInt(strings.TrimRight(idStr, "/"), 10, 64)
- if err != nil {
- return 0, err
- }
- return int(id), nil
-}
-
-func processAllPages(fn func(github.ListOptions) (*github.Response, error)) error {
- opts := github.ListOptions{PerPage: 1000}
- for {
- resp, err := fn(opts)
- if err != nil {
- if rateErr, ok := err.(*github.RateLimitError); ok {
- duration := rateErr.Rate.Reset.Sub(time.Now())
- if duration > 5*time.Minute {
- return fmt.Errorf("Rate limited for too long: %v", duration)
- }
- fmt.Printf("Rate limited, sleeping for: %v\n", duration)
- time.Sleep(duration)
- continue
- }
- return err
- }
- if resp.NextPage == 0 {
- return nil
- }
- opts.Page = resp.NextPage
- }
-}
diff --git a/tools/github/reviver/github_test.go b/tools/github/reviver/github_test.go
deleted file mode 100644
index 5df7e3624..000000000
--- a/tools/github/reviver/github_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "testing"
-)
-
-func TestParseIssueNo(t *testing.T) {
- testCases := []struct {
- issue string
- expectErr bool
- expected int
- }{
- {
- issue: "gvisor.dev/issue/123",
- expected: 123,
- },
- {
- issue: "gvisor.dev/issue/123/",
- expected: 123,
- },
- {
- issue: "not a url",
- expected: 0,
- },
- {
- issue: "gvisor.dev/issue//",
- expectErr: true,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.issue, func(t *testing.T) {
- id, err := parseIssueNo(tc.issue)
- if err != nil && !tc.expectErr {
- t.Errorf("got error: %v", err)
- } else if tc.expected != id {
- t.Errorf("got: %v, want: %v", id, tc.expected)
- }
- })
- }
-}
diff --git a/tools/github/reviver/reviver.go b/tools/github/reviver/reviver.go
deleted file mode 100644
index 2af7f0d59..000000000
--- a/tools/github/reviver/reviver.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package reviver scans the code looking for TODOs and pass them to registered
-// Buggers to ensure TODOs point to active issues.
-package reviver
-
-import (
- "bufio"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "sync"
-)
-
-// regexTodo matches a TODO or FIXME comment.
-var regexTodo = regexp.MustCompile(`(\/\/|#)\s*(TODO|FIXME)\(([a-zA-Z0-9.\/]+)\):\s*(.+)`)
-
-// Bugger interface is called for every TODO found in the code. If it can handle
-// the TODO, it must return true. If it returns false, the next Bugger is
-// called. If no Bugger handles the TODO, it's dropped on the floor.
-type Bugger interface {
- Activate(todo *Todo) (bool, error)
-}
-
-// Location saves the location where the TODO was found.
-type Location struct {
- Comment string
- File string
- Line uint
-}
-
-// Todo represents a unique TODO. There can be several TODOs pointing to the
-// same issue in the code. They are all grouped together.
-type Todo struct {
- Issue string
- Locations []Location
-}
-
-// Reviver scans the given paths for TODOs and calls Buggers to handle them.
-type Reviver struct {
- paths []string
- buggers []Bugger
-
- mu sync.Mutex
- todos map[string]*Todo
- errs []error
-}
-
-// New create a new Reviver.
-func New(paths []string, buggers []Bugger) *Reviver {
- return &Reviver{
- paths: paths,
- buggers: buggers,
- todos: map[string]*Todo{},
- }
-}
-
-// Run runs. It returns all errors found during processing, it doesn't stop
-// on errors.
-func (r *Reviver) Run() []error {
- // Process each directory in parallel.
- wg := sync.WaitGroup{}
- for _, path := range r.paths {
- wg.Add(1)
- go func(path string) {
- defer wg.Done()
- r.processPath(path, &wg)
- }(path)
- }
-
- wg.Wait()
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- fmt.Printf("Processing %d TODOs (%d errors)...\n", len(r.todos), len(r.errs))
- dropped := 0
- for _, todo := range r.todos {
- ok, err := r.processTodo(todo)
- if err != nil {
- r.errs = append(r.errs, err)
- }
- if !ok {
- dropped++
- }
- }
- fmt.Printf("Processed %d TODOs, %d were skipped (%d errors)\n", len(r.todos)-dropped, dropped, len(r.errs))
-
- return r.errs
-}
-
-func (r *Reviver) processPath(path string, wg *sync.WaitGroup) {
- fmt.Printf("Processing dir %q\n", path)
- fis, err := ioutil.ReadDir(path)
- if err != nil {
- r.addErr(fmt.Errorf("error processing dir %q: %v", path, err))
- return
- }
-
- for _, fi := range fis {
- childPath := filepath.Join(path, fi.Name())
- switch {
- case fi.Mode().IsDir():
- wg.Add(1)
- go func() {
- defer wg.Done()
- r.processPath(childPath, wg)
- }()
-
- case fi.Mode().IsRegular():
- file, err := os.Open(childPath)
- if err != nil {
- r.addErr(err)
- continue
- }
-
- scanner := bufio.NewScanner(file)
- lineno := uint(0)
- for scanner.Scan() {
- lineno++
- line := scanner.Text()
- if todo := r.processLine(line, childPath, lineno); todo != nil {
- r.addTodo(todo)
- }
- }
- }
- }
-}
-
-func (r *Reviver) processLine(line, path string, lineno uint) *Todo {
- matches := regexTodo.FindStringSubmatch(line)
- if matches == nil {
- return nil
- }
- if len(matches) != 5 {
- panic(fmt.Sprintf("regex returned wrong matches for %q: %v", line, matches))
- }
- return &Todo{
- Issue: matches[3],
- Locations: []Location{
- {
- File: path,
- Line: lineno,
- Comment: matches[4],
- },
- },
- }
-}
-
-func (r *Reviver) addTodo(newTodo *Todo) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if todo := r.todos[newTodo.Issue]; todo == nil {
- r.todos[newTodo.Issue] = newTodo
- } else {
- todo.Locations = append(todo.Locations, newTodo.Locations...)
- }
-}
-
-func (r *Reviver) addErr(err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- r.errs = append(r.errs, err)
-}
-
-func (r *Reviver) processTodo(todo *Todo) (bool, error) {
- for _, bugger := range r.buggers {
- ok, err := bugger.Activate(todo)
- if err != nil {
- return false, err
- }
- if ok {
- return true, nil
- }
- }
- return false, nil
-}
diff --git a/tools/github/reviver/reviver_test.go b/tools/github/reviver/reviver_test.go
deleted file mode 100644
index 851306c9d..000000000
--- a/tools/github/reviver/reviver_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "testing"
-)
-
-func TestProcessLine(t *testing.T) {
- for _, tc := range []struct {
- line string
- want *Todo
- }{
- {
- line: "// TODO(foobar.com/issue/123): comment, bla. blabla.",
- want: &Todo{
- Issue: "foobar.com/issue/123",
- Locations: []Location{
- {Comment: "comment, bla. blabla."},
- },
- },
- },
- {
- line: "// TODO(foobar.com/issues/123): comment, bla. blabla.",
- want: &Todo{
- Issue: "foobar.com/issues/123",
- Locations: []Location{
- {Comment: "comment, bla. blabla."},
- },
- },
- },
- {
- line: "// FIXME(b/123): internal bug",
- want: &Todo{
- Issue: "b/123",
- Locations: []Location{
- {Comment: "internal bug"},
- },
- },
- },
- {
- line: "TODO(issue): not todo",
- },
- {
- line: "FIXME(issue): not todo",
- },
- {
- line: "// TODO (issue): not todo",
- },
- {
- line: "// TODO(issue) not todo",
- },
- {
- line: "// todo(issue): not todo",
- },
- {
- line: "// TODO(issue):",
- },
- } {
- t.Logf("Testing: %s", tc.line)
- r := Reviver{}
- got := r.processLine(tc.line, "test", 0)
- if got == nil {
- if tc.want != nil {
- t.Errorf("failed to process line, want: %+v", tc.want)
- }
- } else {
- if tc.want == nil {
- t.Errorf("expected error, got: %+v", got)
- continue
- }
- if got.Issue != tc.want.Issue {
- t.Errorf("wrong issue, got: %v, want: %v", got.Issue, tc.want.Issue)
- }
- if len(got.Locations) != len(tc.want.Locations) {
- t.Errorf("wrong number of locations, got: %v, want: %v, locations: %+v", len(got.Locations), len(tc.want.Locations), got.Locations)
- }
- for i, wantLoc := range tc.want.Locations {
- if got.Locations[i].Comment != wantLoc.Comment {
- t.Errorf("wrong comment, got: %v, want: %v", got.Locations[i].Comment, wantLoc.Comment)
- }
- }
- }
- }
-}
diff --git a/tools/go_branch.sh b/tools/go_branch.sh
deleted file mode 100755
index 392e40619..000000000
--- a/tools/go_branch.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeou pipefail
-
-# Remember our current directory.
-declare orig_dir
-orig_dir=$(pwd)
-readonly orig_dir
-
-# Record the current working commit.
-declare head
-head=$(git describe --always)
-readonly head
-
-# Create a temporary working directory, and ensure that this directory and all
-# subdirectories are cleaned up upon exit.
-declare tmp_dir
-tmp_dir=$(mktemp -d)
-readonly tmp_dir
-finish() {
- cd "${orig_dir}" # Leave tmp_dir.
- rm -rf "${tmp_dir}" # Remove all contents.
- git checkout -f "${head}" # Restore commit.
-}
-trap finish EXIT
-
-# Discover the package name from the go.mod file.
-declare module origpwd othersrc
-module=$(cat go.mod | grep -E "^module" | cut -d' ' -f2)
-origpwd=$(pwd)
-othersrc=("go.mod" "go.sum" "AUTHORS" "LICENSE")
-readonly module origpwd othersrc
-
-# Build an amd64 & arm64 gopath.
-declare -r go_amd64="${tmp_dir}/amd64"
-declare -r go_arm64="${tmp_dir}/arm64"
-make build BAZEL_OPTIONS="" TARGETS="//:gopath"
-rsync --recursive --delete --copy-links bazel-bin/gopath/ "${go_amd64}"
-make build BAZEL_OPTIONS=--config=cross-aarch64 TARGETS="//:gopath" 2>/dev/null
-rsync --recursive --delete --copy-links bazel-bin/gopath/ "${go_arm64}"
-
-# Strip irrelevant files, i.e. use only arm64 files from the arm64 build.
-# This is because bazel may generate incorrect files for non-target platforms
-# as a workaround. See pkg/sentry/loader/vdsodata as an example.
-find "${go_amd64}/src/${module}" -name '*_arm64*.go' -exec rm -f {} \;
-find "${go_amd64}/src/${module}" -name '*_arm64*.s' -exec rm -f {} \;
-find "${go_arm64}/src/${module}" -name '*_amd64*.go' -exec rm -f {} \;
-find "${go_arm64}/src/${module}" -name '*_amd64*.s' -exec rm -f {} \;
-
-# See below. The certs.go file is pseudo-random, and therefore will also
-# differ between the branches. Since we merge, it only has to come from one.
-# We arbitrarily keep the one from the amd64 branch, and drop the arm64 one.
-rm -f "${go_arm64}/src/${module}/webhook/pkg/injector/certs.go"
-
-# Check that all files are compatible. This means that if the files exist in
-# both architectures, then they must be identical. The only ones that we expect
-# to exist in a single architecture (due to binary builds) may be different.
-function cross_check() {
- (cd "${1}" && find "src/${module}" -type f | \
- xargs -n 1 -I {} sh -c "diff '${1}/{}' '${2}/{}' 2>/dev/null; test \$? -ne 1")
-}
-cross_check "${go_arm64}" "${go_amd64}"
-cross_check "${go_amd64}" "${go_arm64}"
-
-# Merge the two for a complete set of source files.
-declare -r go_merged="${tmp_dir}/merged"
-rsync --recursive "${go_amd64}/" "${go_merged}"
-rsync --recursive "${go_arm64}/" "${go_merged}"
-
-# We expect to have an existing go branch that we will use as the basis for this
-# commit. That branch may be empty, but it must exist. We search for this branch
-# using the local branch, the "origin" branch, and other remotes, in order.
-git fetch --all
-declare go_branch
-go_branch=$( \
- git show-ref --hash refs/heads/go || \
- git show-ref --hash refs/remotes/origin/go || \
- git show-ref --hash go | head -n 1 \
-)
-readonly go_branch
-
-# Clone the current repository to the temporary directory, and check out the
-# current go_branch directory. We move to the new repository for convenience.
-declare repo_orig
-repo_orig="$(pwd)"
-readonly repo_orig
-declare -r repo_new="${tmp_dir}/repository"
-git clone . "${repo_new}"
-cd "${repo_new}"
-
-# Setup the repository and checkout the branch.
-git config user.email "gvisor-bot@google.com"
-git config user.name "gVisor bot"
-git fetch origin "${go_branch}"
-git checkout -b go "${go_branch}"
-
-# Start working on a merge commit that combines the previous history with the
-# current history. Note that we don't actually want any changes yet.
-#
-# N.B. The git behavior changed at some point and the relevant flag was added
-# to allow for override, so try the only behavior first then pass the flag.
-git merge --no-commit --strategy ours "${head}" || \
- git merge --allow-unrelated-histories --no-commit --strategy ours "${head}"
-
-# Normalize the permissions on the old branch. Note that they should be
-# normalized if constructed by this tool, but we do so before the rsync.
-find . -type f -exec chmod 0644 {} \;
-find . -type d -exec chmod 0755 {} \;
-
-# Sync the entire gopath. Note that we exclude auto-generated source files that
-# will change here. Otherwise, it adds a tremendous amount of noise to commits.
-# If this file disappears in the future, then presumably we will still delete
-# the underlying directory.
-declare -r gopath="${go_merged}/src/${module}"
-rsync --recursive --delete \
- --exclude .git \
- --exclude webhook/pkg/injector/certs.go \
- "${gopath}/" .
-
-# Add additional files.
-for file in "${othersrc[@]}"; do
- cp "${origpwd}"/"${file}" .
-done
-
-# Construct a new README.md.
-cat > README.md <<EOF
-# gVisor
-
-This branch is a synthetic branch, containing only Go sources, that is
-compatible with standard Go tools. See the master branch for authoritative
-sources and tests.
-EOF
-
-# There are a few solitary files that can get left behind due to the way bazel
-# constructs the gopath target. Note that we don't find all Go files here
-# because they may correspond to unused templates, etc.
-declare -ar binaries=( "runsc" "shim" "webhook" )
-for target in "${binaries[@]}"; do
- mkdir -p "${target}"
- cp "${repo_orig}/${target}"/*.go "${target}/"
-done
-
-# Normalize all permissions. The way bazel constructs the :gopath tree may leave
-# some strange permissions on files. We don't have anything in this tree that
-# should be execution, only the Go source files, README.md, and ${othersrc}.
-find . -type f -exec chmod 0644 {} \;
-find . -type d -exec chmod 0755 {} \;
-
-# Update the current working set and commit.
-# If the current working commit has already been committed to the remote go
-# branch, then we have nothing to commit here. So allow empty commit. This can
-# occur when this script is run parallely (via pull_request and push events)
-# and the push workflow finishes before the pull_request workflow can run this.
-git add --all && git commit --allow-empty -m "Merge ${head} (automated)"
-
-# Push the branch back to the original repository.
-git remote add orig "${repo_orig}" && git push -f orig go:go
diff --git a/tools/go_generics/BUILD b/tools/go_generics/BUILD
deleted file mode 100644
index 78b636130..000000000
--- a/tools/go_generics/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "go_generics",
- srcs = [
- "imports.go",
- "main.go",
- "remove.go",
- ],
- visibility = ["//:sandbox"],
- deps = ["//tools/go_generics/globals"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_generics/defs.bzl b/tools/go_generics/defs.bzl
deleted file mode 100644
index 50e2546bf..000000000
--- a/tools/go_generics/defs.bzl
+++ /dev/null
@@ -1,127 +0,0 @@
-"""Generics support via go_generics.
-
-A Go template is similar to a go library, except that it has certain types that
-can be replaced before usage. For example, one could define a templatized List
-struct, whose elements are of type T, then instantiate that template for
-T=segment, where "segment" is the concrete type.
-"""
-
-TemplateInfo = provider(
- "Information about a go_generics template.",
- fields = {
- "unsafe": "whether the template requires unsafe code",
- "types": "required types",
- "opt_types": "optional types",
- "consts": "required consts",
- "opt_consts": "optional consts",
- "deps": "package dependencies",
- "template": "merged template source file",
- },
-)
-
-def _go_template_impl(ctx):
- srcs = ctx.files.srcs
- template = ctx.actions.declare_file(ctx.label.name + "_template.go")
- args = ["-o=%s" % template.path] + [f.path for f in srcs]
-
- ctx.actions.run(
- inputs = srcs,
- outputs = [template],
- mnemonic = "GoGenericsTemplate",
- progress_message = "Building Go template %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
- return [TemplateInfo(
- types = ctx.attr.types,
- opt_types = ctx.attr.opt_types,
- consts = ctx.attr.consts,
- opt_consts = ctx.attr.opt_consts,
- deps = ctx.attr.deps,
- template = template,
- )]
-
-go_template = rule(
- implementation = _go_template_impl,
- attrs = {
- "srcs": attr.label_list(doc = "the list of source files that comprise the template", mandatory = True, allow_files = True),
- "deps": attr.label_list(doc = "the standard dependency list", allow_files = True, cfg = "target"),
- "types": attr.string_list(doc = "the list of generic types in the template that are required to be specified"),
- "opt_types": attr.string_list(doc = "the list of generic types in the template that can but aren't required to be specified"),
- "consts": attr.string_list(doc = "the list of constants in the template that are required to be specified"),
- "opt_consts": attr.string_list(doc = "the list of constants in the template that can but aren't required to be specified"),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics/go_merge")),
- },
-)
-
-def _go_template_instance_impl(ctx):
- info = ctx.attr.template[TemplateInfo]
- output = ctx.outputs.out
-
- # Check that all required types are defined.
- for t in info.types:
- if t not in ctx.attr.types:
- fail("Missing value for type %s in %s" % (t, ctx.attr.template.label))
-
- # Check that all defined types are expected by the template.
- for t in ctx.attr.types:
- if (t not in info.types) and (t not in info.opt_types):
- fail("Type %s is not a parameter to %s" % (t, ctx.attr.template.label))
-
- # Check that all required consts are defined.
- for t in info.consts:
- if t not in ctx.attr.consts:
- fail("Missing value for constant %s in %s" % (t, ctx.attr.template.label))
-
- # Check that all defined consts are expected by the template.
- for t in ctx.attr.consts:
- if (t not in info.consts) and (t not in info.opt_consts):
- fail("Const %s is not a parameter to %s" % (t, ctx.attr.template.label))
-
- # Build the argument list.
- args = ["-i=%s" % info.template.path, "-o=%s" % output.path]
- if ctx.attr.package:
- args.append("-p=%s" % ctx.attr.package)
-
- if len(ctx.attr.prefix) > 0:
- args.append("-prefix=%s" % ctx.attr.prefix)
-
- if len(ctx.attr.suffix) > 0:
- args.append("-suffix=%s" % ctx.attr.suffix)
-
- args += [("-t=%s=%s" % (p[0], p[1])) for p in ctx.attr.types.items()]
- args += [("-c=%s=%s" % (p[0], p[1])) for p in ctx.attr.consts.items()]
- args += [("-import=%s=%s" % (p[0], p[1])) for p in ctx.attr.imports.items()]
-
- if ctx.attr.anon:
- args.append("-anon")
-
- ctx.actions.run(
- inputs = [info.template],
- outputs = [output],
- mnemonic = "GoGenericsInstance",
- progress_message = "Building Go template instance %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
- return [DefaultInfo(
- files = depset([output]),
- )]
-
-go_template_instance = rule(
- implementation = _go_template_instance_impl,
- attrs = {
- "template": attr.label(doc = "the label of the template to be instantiated", mandatory = True),
- "prefix": attr.string(doc = "a prefix to be added to globals in the template"),
- "suffix": attr.string(doc = "a suffix to be added to globals in the template"),
- "types": attr.string_dict(doc = "the map from generic type names to concrete ones"),
- "consts": attr.string_dict(doc = "the map from constant names to their values"),
- "imports": attr.string_dict(doc = "the map from imports used in types/consts to their import paths"),
- "anon": attr.bool(doc = "whether anoymous fields should be processed", mandatory = False, default = False),
- "package": attr.string(doc = "the package for the generated source file", mandatory = False),
- "out": attr.output(doc = "output file", mandatory = True),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics")),
- },
-)
diff --git a/tools/go_generics/globals/BUILD b/tools/go_generics/globals/BUILD
deleted file mode 100644
index 38caa3ce7..000000000
--- a/tools/go_generics/globals/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "globals",
- srcs = [
- "globals_visitor.go",
- "scope.go",
- ],
- stateify = False,
- visibility = ["//tools/go_generics:__pkg__"],
-)
diff --git a/tools/go_generics/globals/globals_visitor.go b/tools/go_generics/globals/globals_visitor.go
deleted file mode 100644
index 883f21ebe..000000000
--- a/tools/go_generics/globals/globals_visitor.go
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package globals provides an AST visitor that calls the visit function for all
-// global identifiers.
-package globals
-
-import (
- "fmt"
-
- "go/ast"
- "go/token"
- "path/filepath"
- "strconv"
-)
-
-// globalsVisitor holds the state used while traversing the nodes of a file in
-// search of globals.
-//
-// The visitor does two passes on the global declarations: the first one adds
-// all globals to the global scope (since Go allows references to globals that
-// haven't been declared yet), and the second one calls f() for the definition
-// and uses of globals found in the first pass.
-//
-// The implementation correctly handles cases when globals are aliased by
-// locals; in such cases, f() is not called.
-type globalsVisitor struct {
- // file is the file whose nodes are being visited.
- file *ast.File
-
- // fset is the file set the file being visited belongs to.
- fset *token.FileSet
-
- // f is the visit function to be called when a global symbol is reached.
- f func(*ast.Ident, SymKind)
-
- // scope is the current scope as nodes are visited.
- scope *scope
-
- // processAnon indicates whether we should process anonymous struct fields.
- // It does not perform strict checking on parameter types that share the same name
- // as the global type and therefore will rename them as well.
- processAnon bool
-}
-
-// unexpected is called when an unexpected node appears in the AST. It dumps
-// the location of the associated token and panics because this should only
-// happen when there is a bug in the traversal code.
-func (v *globalsVisitor) unexpected(p token.Pos) {
- panic(fmt.Sprintf("Unable to parse at %v", v.fset.Position(p)))
-}
-
-// pushScope creates a new scope and pushes it to the top of the scope stack.
-func (v *globalsVisitor) pushScope() {
- v.scope = newScope(v.scope)
-}
-
-// popScope removes the scope created by the last call to pushScope.
-func (v *globalsVisitor) popScope() {
- v.scope = v.scope.outer
-}
-
-// visitType is called when an expression is known to be a type, for example,
-// on the first argument of make(). It visits all children nodes and reports
-// any globals.
-func (v *globalsVisitor) visitType(ge ast.Expr) {
- switch e := ge.(type) {
- case *ast.Ident:
- if s := v.scope.deepLookup(e.Name); s != nil && s.scope.isGlobal() {
- v.f(e, s.kind)
- }
-
- case *ast.SelectorExpr:
- id := GetIdent(e.X)
- if id == nil {
- v.unexpected(e.X.Pos())
- }
-
- case *ast.StarExpr:
- v.visitType(e.X)
- case *ast.ParenExpr:
- v.visitType(e.X)
- case *ast.ChanType:
- v.visitType(e.Value)
- case *ast.Ellipsis:
- v.visitType(e.Elt)
- case *ast.ArrayType:
- v.visitExpr(e.Len)
- v.visitType(e.Elt)
- case *ast.MapType:
- v.visitType(e.Key)
- v.visitType(e.Value)
- case *ast.StructType:
- v.visitFields(e.Fields, KindUnknown)
- case *ast.FuncType:
- v.visitFields(e.Params, KindUnknown)
- v.visitFields(e.Results, KindUnknown)
- case *ast.InterfaceType:
- v.visitFields(e.Methods, KindUnknown)
- default:
- v.unexpected(ge.Pos())
- }
-}
-
-// visitFields visits all fields, and add symbols if kind isn't KindUnknown.
-func (v *globalsVisitor) visitFields(l *ast.FieldList, kind SymKind) {
- if l == nil {
- return
- }
-
- for _, f := range l.List {
- if kind != KindUnknown {
- for _, n := range f.Names {
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- v.visitType(f.Type)
- if f.Tag != nil {
- tag := ast.NewIdent(f.Tag.Value)
- v.f(tag, KindTag)
- // Replace the tag if updated.
- if tag.Name != f.Tag.Value {
- f.Tag.Value = tag.Name
- }
- }
- }
-}
-
-// visitGenDecl is called when a generic declaration is encountered, for example,
-// on variable, constant and type declarations. It adds all newly defined
-// symbols to the current scope and reports them if the current scope is the
-// global one.
-func (v *globalsVisitor) visitGenDecl(d *ast.GenDecl) {
- switch d.Tok {
- case token.IMPORT:
- case token.TYPE:
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- v.scope.add(s.Name.Name, KindType, s.Name.Pos())
- if v.scope.isGlobal() {
- v.f(s.Name, KindType)
- }
- v.visitType(s.Type)
- }
- case token.CONST, token.VAR:
- kind := KindConst
- if d.Tok == token.VAR {
- kind = KindVar
- }
-
- for _, gs := range d.Specs {
- s := gs.(*ast.ValueSpec)
- if s.Type != nil {
- v.visitType(s.Type)
- }
-
- for _, e := range s.Values {
- v.visitExpr(e)
- }
-
- for _, n := range s.Names {
- if v.scope.isGlobal() {
- v.f(n, kind)
- }
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- default:
- v.unexpected(d.Pos())
- }
-}
-
-// isViableType determines if the given expression is a viable type expression,
-// that is, if it could be interpreted as a type, for example, sync.Mutex,
-// myType, func(int)int, as opposed to -1, 2 * 2, a + b, etc.
-func (v *globalsVisitor) isViableType(expr ast.Expr) bool {
- switch e := expr.(type) {
- case *ast.Ident:
- // This covers the plain identifier case. When we see it, we
- // have to check if it resolves to a type; if the symbol is not
- // known, we'll claim it's viable as a type.
- s := v.scope.deepLookup(e.Name)
- return s == nil || s.kind == KindType
-
- case *ast.ChanType, *ast.ArrayType, *ast.MapType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.Ellipsis:
- // This covers the following cases:
- // 1. ChanType:
- // chan T
- // <-chan T
- // chan<- T
- // 2. ArrayType:
- // [Expr]T
- // 3. MapType:
- // map[T]U
- // 4. StructType:
- // struct { Fields }
- // 5. FuncType:
- // func(Fields)Returns
- // 6. Interface:
- // interface { Fields }
- // 7. Ellipsis:
- // ...T
- return true
-
- case *ast.SelectorExpr:
- // The only case in which an expression involving a selector can
- // be a type is if it has the following form X.T, where X is an
- // import, and T is a type exported by X.
- //
- // There's no way to know whether T is a type because we don't
- // parse imports. So we just claim that this is a viable type;
- // it doesn't affect the general result because we don't visit
- // imported symbols.
- id := GetIdent(e.X)
- if id == nil {
- return false
- }
-
- s := v.scope.deepLookup(id.Name)
- return s != nil && s.kind == KindImport
-
- case *ast.StarExpr:
- // This covers the *T case. The expression is a viable type if
- // T is.
- return v.isViableType(e.X)
-
- case *ast.ParenExpr:
- // This covers the (T) case. The expression is a viable type if
- // T is.
- return v.isViableType(e.X)
-
- default:
- return false
- }
-}
-
-// visitCallExpr visits a "call expression" which can be either a
-// function/method call (e.g., f(), pkg.f(), obj.f(), etc.) call or a type
-// conversion (e.g., int32(1), (*sync.Mutex)(ptr), etc.).
-func (v *globalsVisitor) visitCallExpr(e *ast.CallExpr) {
- if v.isViableType(e.Fun) {
- v.visitType(e.Fun)
- } else {
- v.visitExpr(e.Fun)
- }
-
- // If the function being called is new or make, the first argument is
- // a type, so it needs to be visited as such.
- first := 0
- if id := GetIdent(e.Fun); id != nil && (id.Name == "make" || id.Name == "new") {
- if len(e.Args) > 0 {
- v.visitType(e.Args[0])
- }
- first = 1
- }
-
- for i := first; i < len(e.Args); i++ {
- v.visitExpr(e.Args[i])
- }
-}
-
-// visitExpr visits all nodes of an expression, and reports any globals that it
-// finds.
-func (v *globalsVisitor) visitExpr(ge ast.Expr) {
- switch e := ge.(type) {
- case nil:
- case *ast.Ident:
- if s := v.scope.deepLookup(e.Name); s != nil && s.scope.isGlobal() {
- v.f(e, s.kind)
- }
-
- case *ast.BasicLit:
- case *ast.CompositeLit:
- v.visitType(e.Type)
- for _, ne := range e.Elts {
- v.visitExpr(ne)
- }
- case *ast.FuncLit:
- v.pushScope()
- v.visitFields(e.Type.Params, KindParameter)
- v.visitFields(e.Type.Results, KindResult)
- v.visitBlockStmt(e.Body)
- v.popScope()
-
- case *ast.BinaryExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Y)
-
- case *ast.CallExpr:
- v.visitCallExpr(e)
-
- case *ast.IndexExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Index)
-
- case *ast.KeyValueExpr:
- v.visitExpr(e.Value)
-
- case *ast.ParenExpr:
- v.visitExpr(e.X)
-
- case *ast.SelectorExpr:
- v.visitExpr(e.X)
- if v.processAnon {
- v.visitExpr(e.Sel)
- }
-
- case *ast.SliceExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Low)
- v.visitExpr(e.High)
- v.visitExpr(e.Max)
-
- case *ast.StarExpr:
- v.visitExpr(e.X)
-
- case *ast.TypeAssertExpr:
- v.visitExpr(e.X)
- if e.Type != nil {
- v.visitType(e.Type)
- }
-
- case *ast.UnaryExpr:
- v.visitExpr(e.X)
-
- default:
- v.unexpected(ge.Pos())
- }
-}
-
-// GetIdent returns the identifier associated with the given expression by
-// removing parentheses if needed.
-func GetIdent(expr ast.Expr) *ast.Ident {
- switch e := expr.(type) {
- case *ast.Ident:
- return e
- case *ast.ParenExpr:
- return GetIdent(e.X)
- default:
- return nil
- }
-}
-
-// visitStmt visits all nodes of a statement, and reports any globals that it
-// finds. It also adds to the current scope new symbols defined/declared.
-func (v *globalsVisitor) visitStmt(gs ast.Stmt) {
- switch s := gs.(type) {
- case nil, *ast.BranchStmt, *ast.EmptyStmt:
- case *ast.AssignStmt:
- for _, e := range s.Rhs {
- v.visitExpr(e)
- }
-
- // We visit the LHS after the RHS because the symbols we'll
- // potentially add to the table aren't meant to be visible to
- // the RHS.
- for _, e := range s.Lhs {
- if s.Tok == token.DEFINE {
- if n := GetIdent(e); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
- }
- v.visitExpr(e)
- }
-
- case *ast.BlockStmt:
- v.visitBlockStmt(s)
-
- case *ast.DeclStmt:
- v.visitGenDecl(s.Decl.(*ast.GenDecl))
-
- case *ast.DeferStmt:
- v.visitCallExpr(s.Call)
-
- case *ast.ExprStmt:
- v.visitExpr(s.X)
-
- case *ast.ForStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Cond)
- v.visitStmt(s.Post)
- v.visitBlockStmt(s.Body)
- v.popScope()
-
- case *ast.GoStmt:
- v.visitCallExpr(s.Call)
-
- case *ast.IfStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Cond)
- v.visitBlockStmt(s.Body)
- v.visitStmt(s.Else)
- v.popScope()
-
- case *ast.IncDecStmt:
- v.visitExpr(s.X)
-
- case *ast.LabeledStmt:
- v.visitStmt(s.Stmt)
-
- case *ast.RangeStmt:
- v.pushScope()
- v.visitExpr(s.X)
- if s.Tok == token.DEFINE {
- if n := GetIdent(s.Key); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
-
- if n := GetIdent(s.Value); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
- }
- v.visitExpr(s.Key)
- v.visitExpr(s.Value)
- v.visitBlockStmt(s.Body)
- v.popScope()
-
- case *ast.ReturnStmt:
- for _, r := range s.Results {
- v.visitExpr(r)
- }
-
- case *ast.SelectStmt:
- for _, ns := range s.Body.List {
- c := ns.(*ast.CommClause)
-
- v.pushScope()
- v.visitStmt(c.Comm)
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
-
- case *ast.SendStmt:
- v.visitExpr(s.Chan)
- v.visitExpr(s.Value)
-
- case *ast.SwitchStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Tag)
- for _, ns := range s.Body.List {
- c := ns.(*ast.CaseClause)
- v.pushScope()
- for _, ce := range c.List {
- v.visitExpr(ce)
- }
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
- v.popScope()
-
- case *ast.TypeSwitchStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitStmt(s.Assign)
- for _, ns := range s.Body.List {
- c := ns.(*ast.CaseClause)
- v.pushScope()
- for _, ce := range c.List {
- v.visitType(ce)
- }
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
- v.popScope()
-
- default:
- v.unexpected(gs.Pos())
- }
-}
-
-// visitBlockStmt visits all statements in the block, adding symbols to a newly
-// created scope.
-func (v *globalsVisitor) visitBlockStmt(s *ast.BlockStmt) {
- v.pushScope()
- for _, c := range s.List {
- v.visitStmt(c)
- }
- v.popScope()
-}
-
-// visitFuncDecl is called when a function or method declaration is encountered.
-// it creates a new scope for the function [optional] receiver, parameters and
-// results, and visits all children nodes.
-func (v *globalsVisitor) visitFuncDecl(d *ast.FuncDecl) {
- // We don't report methods.
- if d.Recv == nil {
- v.f(d.Name, KindFunction)
- }
-
- v.pushScope()
- v.visitFields(d.Recv, KindReceiver)
- v.visitFields(d.Type.Params, KindParameter)
- v.visitFields(d.Type.Results, KindResult)
- if d.Body != nil {
- v.visitBlockStmt(d.Body)
- }
- v.popScope()
-}
-
-// globalsFromDecl is called in the first, and adds symbols to global scope.
-func (v *globalsVisitor) globalsFromGenDecl(d *ast.GenDecl) {
- switch d.Tok {
- case token.IMPORT:
- for _, gs := range d.Specs {
- s := gs.(*ast.ImportSpec)
- if s.Name == nil {
- str, _ := strconv.Unquote(s.Path.Value)
- v.scope.add(filepath.Base(str), KindImport, s.Path.Pos())
- } else if s.Name.Name != "_" {
- v.scope.add(s.Name.Name, KindImport, s.Name.Pos())
- }
- }
- case token.TYPE:
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- v.scope.add(s.Name.Name, KindType, s.Name.Pos())
- }
- case token.CONST, token.VAR:
- kind := KindConst
- if d.Tok == token.VAR {
- kind = KindVar
- }
-
- for _, s := range d.Specs {
- for _, n := range s.(*ast.ValueSpec).Names {
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- default:
- v.unexpected(d.Pos())
- }
-}
-
-// visit implements the visiting of globals. It does performs the two passes
-// described in the description of the globalsVisitor struct.
-func (v *globalsVisitor) visit() {
- // Gather all symbols in the global scope. This excludes methods.
- v.pushScope()
- for _, gd := range v.file.Decls {
- switch d := gd.(type) {
- case *ast.GenDecl:
- v.globalsFromGenDecl(d)
- case *ast.FuncDecl:
- if d.Recv == nil {
- v.scope.add(d.Name.Name, KindFunction, d.Name.Pos())
- }
- default:
- v.unexpected(gd.Pos())
- }
- }
-
- // Go through the contents of the declarations.
- for _, gd := range v.file.Decls {
- switch d := gd.(type) {
- case *ast.GenDecl:
- v.visitGenDecl(d)
- case *ast.FuncDecl:
- v.visitFuncDecl(d)
- }
- }
-}
-
-// Visit traverses the provided AST and calls f() for each identifier that
-// refers to global names. The global name must be defined in the file itself.
-//
-// The function f() is allowed to modify the identifier, for example, to rename
-// uses of global references.
-func Visit(fset *token.FileSet, file *ast.File, f func(*ast.Ident, SymKind), processAnon bool) {
- v := globalsVisitor{
- fset: fset,
- file: file,
- f: f,
- processAnon: processAnon,
- }
-
- v.visit()
-}
diff --git a/tools/go_generics/globals/scope.go b/tools/go_generics/globals/scope.go
deleted file mode 100644
index eec93534b..000000000
--- a/tools/go_generics/globals/scope.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package globals
-
-import (
- "go/token"
-)
-
-// SymKind specifies the kind of a global symbol. For example, a variable, const
-// function, etc.
-type SymKind int
-
-// Constants for different kinds of symbols.
-const (
- KindUnknown SymKind = iota
- KindImport
- KindType
- KindVar
- KindConst
- KindFunction
- KindReceiver
- KindParameter
- KindResult
- KindTag
-)
-
-type symbol struct {
- kind SymKind
- pos token.Pos
- scope *scope
-}
-
-type scope struct {
- outer *scope
- syms map[string]*symbol
-}
-
-func newScope(outer *scope) *scope {
- return &scope{
- outer: outer,
- syms: make(map[string]*symbol),
- }
-}
-
-func (s *scope) isGlobal() bool {
- return s.outer == nil
-}
-
-func (s *scope) lookup(n string) *symbol {
- return s.syms[n]
-}
-
-func (s *scope) deepLookup(n string) *symbol {
- for x := s; x != nil; x = x.outer {
- if sym := x.lookup(n); sym != nil {
- return sym
- }
- }
- return nil
-}
-
-func (s *scope) add(name string, kind SymKind, pos token.Pos) {
- if s.syms[name] != nil {
- return
- }
-
- s.syms[name] = &symbol{
- kind: kind,
- pos: pos,
- scope: s,
- }
-}
diff --git a/tools/go_generics/go_merge/BUILD b/tools/go_generics/go_merge/BUILD
deleted file mode 100644
index 5e0487e93..000000000
--- a/tools/go_generics/go_merge/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "go_merge",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//tools/tags",
- ],
-)
diff --git a/tools/go_generics/go_merge/main.go b/tools/go_generics/go_merge/main.go
deleted file mode 100644
index 801f2354f..000000000
--- a/tools/go_generics/go_merge/main.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- "gvisor.dev/gvisor/tools/tags"
-)
-
-var (
- output = flag.String("o", "", "output `file`")
-)
-
-func fatalf(s string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, s, args...)
- os.Exit(1)
-}
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options] <input1> [<input2> ...]\n", os.Args[0])
- flag.PrintDefaults()
- }
-
- flag.Parse()
- if *output == "" || len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
-
- // Load all files.
- files := make(map[string]*ast.File)
- fset := token.NewFileSet()
- var name string
- for _, fname := range flag.Args() {
- f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments|parser.DeclarationErrors|parser.SpuriousErrors)
- if err != nil {
- fatalf("%v\n", err)
- }
-
- files[fname] = f
- if name == "" {
- name = f.Name.Name
- } else if name != f.Name.Name {
- fatalf("Expected '%s' for package name instead of '%s'.\n", name, f.Name.Name)
- }
- }
-
- // Merge all files into one.
- pkg := &ast.Package{
- Name: name,
- Files: files,
- }
- f := ast.MergePackageFiles(pkg, ast.FilterUnassociatedComments|ast.FilterFuncDuplicates|ast.FilterImportDuplicates)
-
- // Create a new declaration slice with all imports at the top, merging any
- // redundant imports.
- imports := make(map[string]*ast.ImportSpec)
- var importNames []string // Keep imports in the original order to get deterministic output.
- var anonImports []*ast.ImportSpec
- for _, d := range f.Decls {
- if g, ok := d.(*ast.GenDecl); ok && g.Tok == token.IMPORT {
- for _, s := range g.Specs {
- i := s.(*ast.ImportSpec)
- p, _ := strconv.Unquote(i.Path.Value)
- var n string
- if i.Name == nil {
- n = filepath.Base(p)
- } else {
- n = i.Name.Name
- }
- if n == "_" {
- anonImports = append(anonImports, i)
- } else {
- if i2, ok := imports[n]; ok {
- if first, second := i.Path.Value, i2.Path.Value; first != second {
- fatalf("Conflicting paths for import name '%s': '%s' vs. '%s'\n", n, first, second)
- }
- } else {
- imports[n] = i
- importNames = append(importNames, n)
- }
- }
- }
- }
- }
- newDecls := make([]ast.Decl, 0, len(f.Decls))
- if l := len(imports) + len(anonImports); l > 0 {
- // Non-NoPos Lparen is needed for Go to recognize more than one spec in
- // ast.GenDecl.Specs.
- d := &ast.GenDecl{
- Tok: token.IMPORT,
- Lparen: token.NoPos + 1,
- Specs: make([]ast.Spec, 0, l),
- }
- for _, i := range importNames {
- d.Specs = append(d.Specs, imports[i])
- }
- for _, i := range anonImports {
- d.Specs = append(d.Specs, i)
- }
- newDecls = append(newDecls, d)
- }
- for _, d := range f.Decls {
- if g, ok := d.(*ast.GenDecl); !ok || g.Tok != token.IMPORT {
- newDecls = append(newDecls, d)
- }
- }
- f.Decls = newDecls
-
- // Write the output file.
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, f); err != nil {
- fatalf("fomatting: %v\n", err)
- }
- outf, err := os.OpenFile(*output, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fatalf("opening output: %v\n", err)
- }
- defer outf.Close()
- if t := tags.Aggregate(flag.Args()); len(t) > 0 {
- fmt.Fprintf(outf, "%s\n\n", strings.Join(t.Lines(), "\n"))
- }
- if _, err := outf.Write(buf.Bytes()); err != nil {
- fatalf("write: %v\n", err)
- }
-}
diff --git a/tools/go_generics/imports.go b/tools/go_generics/imports.go
deleted file mode 100644
index 370650e46..000000000
--- a/tools/go_generics/imports.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "sort"
- "strconv"
-
- "gvisor.dev/gvisor/tools/go_generics/globals"
-)
-
-type importedPackage struct {
- newName string
- path string
-}
-
-// updateImportIdent modifies the given import identifier with the new name
-// stored in the used map. If the identifier doesn't exist in the used map yet,
-// a new name is generated and inserted into the map.
-func updateImportIdent(orig string, imports mapValue, id *ast.Ident, used map[string]*importedPackage) error {
- importName := id.Name
-
- // If the name is already in the table, just use the new name.
- m := used[importName]
- if m != nil {
- id.Name = m.newName
- return nil
- }
-
- // Create a new entry in the used map.
- path := imports[importName]
- if path == "" {
- return fmt.Errorf("unknown path to package '%s', used in '%s'", importName, orig)
- }
-
- m = &importedPackage{
- newName: fmt.Sprintf("__generics_imported%d", len(used)),
- path: strconv.Quote(path),
- }
- used[importName] = m
-
- id.Name = m.newName
-
- return nil
-}
-
-// convertExpression creates a new string that is a copy of the input one with
-// all imports references renamed to the names in the "used" map. If the
-// referenced import isn't in "used" yet, a new one is created based on the path
-// in "imports" and stored in "used". For example, if string s is
-// "math.MaxUint32-math.MaxUint16+10", it would be converted to
-// "x.MaxUint32-x.MathUint16+10", where x is a generated name.
-func convertExpression(s string, imports mapValue, used map[string]*importedPackage) (string, error) {
- // Parse the expression in the input string.
- expr, err := parser.ParseExpr(s)
- if err != nil {
- return "", fmt.Errorf("unable to parse \"%s\": %v", s, err)
- }
-
- // Go through the AST and update references.
- var retErr error
- ast.Inspect(expr, func(n ast.Node) bool {
- switch x := n.(type) {
- case *ast.SelectorExpr:
- if id := globals.GetIdent(x.X); id != nil {
- if err := updateImportIdent(s, imports, id, used); err != nil {
- retErr = err
- }
- return false
- }
- }
- return true
- })
- if retErr != nil {
- return "", retErr
- }
-
- // Convert the modified AST back to a string.
- fset := token.NewFileSet()
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, expr); err != nil {
- return "", err
- }
-
- return string(buf.Bytes()), nil
-}
-
-// updateImports replaces all maps in the input slice with copies where the
-// mapped values have had all references to imported packages renamed to
-// generated names. It also returns an import declaration for all the renamed
-// import packages.
-//
-// For example, if the input maps contains A=math.B and C=math.D, the updated
-// maps will instead contain A=__generics_imported0.B and
-// C=__generics_imported0.C, and the 'import __generics_imported0 "math"' would
-// be returned as the import declaration.
-func updateImports(maps []mapValue, imports mapValue) (ast.Decl, error) {
- importsUsed := make(map[string]*importedPackage)
-
- // Update all maps.
- for i, m := range maps {
- newMap := make(mapValue)
- for n, e := range m {
- updated, err := convertExpression(e, imports, importsUsed)
- if err != nil {
- return nil, err
- }
-
- newMap[n] = updated
- }
- maps[i] = newMap
- }
-
- // Nothing else to do if no imports are used in the expressions.
- if len(importsUsed) == 0 {
- return nil, nil
- }
- var names []string
- for n := range importsUsed {
- names = append(names, n)
- }
- // Sort the new imports for deterministic build outputs.
- sort.Strings(names)
-
- // Create spec array for each new import.
- specs := make([]ast.Spec, 0, len(importsUsed))
- for _, n := range names {
- i := importsUsed[n]
- specs = append(specs, &ast.ImportSpec{
- Name: &ast.Ident{Name: i.newName},
- Path: &ast.BasicLit{Value: i.path},
- })
- }
-
- return &ast.GenDecl{
- Tok: token.IMPORT,
- Specs: specs,
- Lparen: token.NoPos + 1,
- }, nil
-}
diff --git a/tools/go_generics/main.go b/tools/go_generics/main.go
deleted file mode 100644
index 30584006c..000000000
--- a/tools/go_generics/main.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// go_generics reads a Go source file and writes a new version of that file with
-// a few transformations applied to each. Namely:
-//
-// 1. Global types can be explicitly renamed with the -t option. For example,
-// if -t=A=B is passed in, all references to A will be replaced with
-// references to B; a function declaration like:
-//
-// func f(arg *A)
-//
-// would be renamed to:
-//
-// func f(arg *B)
-//
-// 2. Global type definitions and their method sets will be removed when they're
-// being renamed with -t. For example, if -t=A=B is passed in, the following
-// definition and methods that existed in the input file wouldn't exist at
-// all in the output file:
-//
-// type A struct{}
-//
-// func (*A) f() {}
-//
-// 3. All global types, variables, constants and functions (not methods) are
-// prefixed and suffixed based on the option -prefix and -suffix arguments.
-// For example, if -suffix=A is passed in, the following globals:
-//
-// func f()
-// type t struct{}
-//
-// would be renamed to:
-//
-// func fA()
-// type tA struct{}
-//
-// Some special tags are also modified. For example:
-//
-// "state:.(t)"
-//
-// would become:
-//
-// "state:.(tA)"
-//
-// 4. The package is renamed to the value via the -p argument.
-// 5. Value of constants can be modified with -c argument.
-//
-// Note that not just the top-level declarations are renamed, all references to
-// them are also properly renamed as well, taking into account visibility rules
-// and shadowing. For example, if -suffix=A is passed in, the following:
-//
-// var b = 100
-//
-// func f() {
-// g(b)
-// b := 0
-// g(b)
-// }
-//
-// Would be replaced with:
-//
-// var bA = 100
-//
-// func f() {
-// g(bA)
-// b := 0
-// g(b)
-// }
-//
-// Note that the second call to g() kept "b" as an argument because it refers to
-// the local variable "b".
-//
-// Note that go_generics can handle anonymous fields with renamed types if
-// -anon is passed in, however it does not perform strict checking on parameter
-// types that share the same name as the global type and therefore will rename
-// them as well.
-//
-// You can see an example in the tools/go_generics/generics_tests/interface test.
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-
- "gvisor.dev/gvisor/tools/go_generics/globals"
-)
-
-var (
- input = flag.String("i", "", "input `file`")
- output = flag.String("o", "", "output `file`")
- suffix = flag.String("suffix", "", "`suffix` to add to each global symbol")
- prefix = flag.String("prefix", "", "`prefix` to add to each global symbol")
- packageName = flag.String("p", "main", "output package `name`")
- printAST = flag.Bool("ast", false, "prints the AST")
- processAnon = flag.Bool("anon", false, "process anonymous fields")
- types = make(mapValue)
- consts = make(mapValue)
- imports = make(mapValue)
-)
-
-// mapValue implements flag.Value. We use a mapValue flag instead of a regular
-// string flag when we want to allow more than one instance of the flag. For
-// example, we allow several "-t A=B" arguments, and will rename them all.
-type mapValue map[string]string
-
-func (m mapValue) String() string {
- var b bytes.Buffer
- first := true
- for k, v := range m {
- if !first {
- b.WriteRune(',')
- } else {
- first = false
- }
- b.WriteString(k)
- b.WriteRune('=')
- b.WriteString(v)
- }
- return b.String()
-}
-
-func (m mapValue) Set(s string) error {
- sep := strings.Index(s, "=")
- if sep == -1 {
- return fmt.Errorf("missing '=' from '%s'", s)
- }
-
- m[s[:sep]] = s[sep+1:]
-
- return nil
-}
-
-// stateTagRegexp matches against the 'typed' state tags.
-var stateTagRegexp = regexp.MustCompile(`^(.*[^a-z0-9_])state:"\.\(([^\)]*)\)"(.*)$`)
-
-var identifierRegexp = regexp.MustCompile(`^(.*[^a-zA-Z_])([a-zA-Z_][a-zA-Z0-9_]*)(.*)$`)
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
- flag.PrintDefaults()
- }
-
- flag.Var(types, "t", "rename type A to B when `A=B` is passed in. Multiple such mappings are allowed.")
- flag.Var(consts, "c", "reassign constant A to value B when `A=B` is passed in. Multiple such mappings are allowed.")
- flag.Var(imports, "import", "specifies the import libraries to use when types are not local. `name=path` specifies that 'name', used in types as name.type, refers to the package living in 'path'.")
- flag.Parse()
-
- if *input == "" || *output == "" {
- flag.Usage()
- os.Exit(1)
- }
-
- // Parse the input file.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, *input, nil, parser.ParseComments|parser.DeclarationErrors|parser.SpuriousErrors)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-
- // Print the AST if requested.
- if *printAST {
- ast.Print(fset, f)
- }
-
- cmap := ast.NewCommentMap(fset, f, f.Comments)
-
- // Update imports based on what's used in types and consts.
- maps := []mapValue{types, consts}
- importDecl, err := updateImports(maps, imports)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- types = maps[0]
- consts = maps[1]
-
- // Reassign all specified constants.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.CONST {
- continue
- }
-
- for _, gs := range d.Specs {
- s := gs.(*ast.ValueSpec)
- for i, id := range s.Names {
- if n, ok := consts[id.Name]; ok {
- s.Values[i] = &ast.BasicLit{Value: n}
- }
- }
- }
- }
-
- // Go through all globals and their uses in the AST and rename the types
- // with explicitly provided names, and rename all types, variables,
- // consts and functions with the provided prefix and suffix.
- globals.Visit(fset, f, func(ident *ast.Ident, kind globals.SymKind) {
- if n, ok := types[ident.Name]; ok && kind == globals.KindType {
- ident.Name = n
- } else {
- switch kind {
- case globals.KindType, globals.KindVar, globals.KindConst, globals.KindFunction:
- if ident.Name != "_" && !(ident.Name == "init" && kind == globals.KindFunction) {
- ident.Name = *prefix + ident.Name + *suffix
- }
- case globals.KindTag:
- // Modify the state tag appropriately.
- if m := stateTagRegexp.FindStringSubmatch(ident.Name); m != nil {
- if t := identifierRegexp.FindStringSubmatch(m[2]); t != nil {
- typeName := *prefix + t[2] + *suffix
- if n, ok := types[t[2]]; ok {
- typeName = n
- }
- ident.Name = m[1] + `state:".(` + t[1] + typeName + t[3] + `)"` + m[3]
- }
- }
- }
- }
- }, *processAnon)
-
- // Remove the definition of all types that are being remapped.
- set := make(typeSet)
- for _, v := range types {
- set[v] = struct{}{}
- }
- removeTypes(set, f)
-
- // Add the new imports, if any, to the top.
- if importDecl != nil {
- newDecls := make([]ast.Decl, 0, len(f.Decls)+1)
- newDecls = append(newDecls, importDecl)
- newDecls = append(newDecls, f.Decls...)
- f.Decls = newDecls
- }
-
- // Update comments to remove the ones potentially associated with the
- // type T that we removed.
- f.Comments = cmap.Filter(f).Comments()
-
- // If there are file (package) comments, delete them.
- if f.Doc != nil {
- for i, cg := range f.Comments {
- if cg == f.Doc {
- f.Comments = append(f.Comments[:i], f.Comments[i+1:]...)
- break
- }
- }
- }
-
- // Write the output file.
- f.Name.Name = *packageName
-
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, f); err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-
- if err := ioutil.WriteFile(*output, buf.Bytes(), 0644); err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-}
diff --git a/tools/go_generics/remove.go b/tools/go_generics/remove.go
deleted file mode 100644
index 568a6bbd3..000000000
--- a/tools/go_generics/remove.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "go/ast"
- "go/token"
-)
-
-type typeSet map[string]struct{}
-
-// isTypeOrPointerToType determines if the given AST expression represents a
-// type or a pointer to a type that exists in the provided type set.
-func isTypeOrPointerToType(set typeSet, expr ast.Expr, starCount int) bool {
- switch e := expr.(type) {
- case *ast.Ident:
- _, ok := set[e.Name]
- return ok
- case *ast.StarExpr:
- if starCount > 1 {
- return false
- }
- return isTypeOrPointerToType(set, e.X, starCount+1)
- case *ast.ParenExpr:
- return isTypeOrPointerToType(set, e.X, starCount)
- default:
- return false
- }
-}
-
-// isMethodOf determines if the given function declaration is a method of one
-// of the types in the provided type set. To do that, it checks if the function
-// has a receiver and that its type is either T or *T, where T is a type that
-// exists in the set. This is per the spec:
-//
-// That parameter section must declare a single parameter, the receiver. Its
-// type must be of the form T or *T (possibly using parentheses) where T is a
-// type name. The type denoted by T is called the receiver base type; it must
-// not be a pointer or interface type and it must be declared in the same
-// package as the method.
-func isMethodOf(set typeSet, f *ast.FuncDecl) bool {
- // If the function doesn't have exactly one receiver, then it's
- // definitely not a method.
- if f.Recv == nil || len(f.Recv.List) != 1 {
- return false
- }
-
- return isTypeOrPointerToType(set, f.Recv.List[0].Type, 0)
-}
-
-// removeTypeDefinitions removes the definition of all types contained in the
-// provided type set.
-func removeTypeDefinitions(set typeSet, d *ast.GenDecl) {
- if d.Tok != token.TYPE {
- return
- }
-
- i := 0
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- if _, ok := set[s.Name.Name]; !ok {
- d.Specs[i] = gs
- i++
- }
- }
-
- d.Specs = d.Specs[:i]
-}
-
-// removeTypes removes from the AST the definition of all types and their
-// method sets that are contained in the provided type set.
-func removeTypes(set typeSet, f *ast.File) {
- // Go through the top-level declarations.
- i := 0
- for _, decl := range f.Decls {
- keep := true
- switch d := decl.(type) {
- case *ast.GenDecl:
- countBefore := len(d.Specs)
- removeTypeDefinitions(set, d)
- keep = countBefore == 0 || len(d.Specs) > 0
- case *ast.FuncDecl:
- keep = !isMethodOf(set, d)
- }
-
- if keep {
- f.Decls[i] = decl
- i++
- }
- }
-
- f.Decls = f.Decls[:i]
-}
diff --git a/tools/go_generics/rules_tests/BUILD b/tools/go_generics/rules_tests/BUILD
deleted file mode 100644
index 8a329dfc6..000000000
--- a/tools/go_generics/rules_tests/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "instance",
- out = "instance_test.go",
- consts = {
- "n": "20",
- "m": "\"test\"",
- "o": "math.MaxUint64",
- },
- imports = {
- "math": "math",
- },
- package = "template_test",
- template = ":test_template",
- types = {
- "t": "int",
- },
-)
-
-go_template(
- name = "test_template",
- srcs = [
- "template.go",
- ],
- opt_consts = [
- "n",
- "m",
- "o",
- ],
- opt_types = ["t"],
-)
-
-go_test(
- name = "template_test",
- srcs = [
- "instance_test.go",
- "template_test.go",
- ],
-)
diff --git a/tools/go_generics/rules_tests/template.go b/tools/go_generics/rules_tests/template.go
deleted file mode 100644
index aace61da1..000000000
--- a/tools/go_generics/rules_tests/template.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package template
-
-type t float
-
-const (
- n t = 10.1
- m = "abc"
- o = 0
-)
-
-func max(a, b t) t {
- if a > b {
- return a
- }
- return b
-}
-
-func add(a t) t {
- return a + n
-}
-
-func getName() string {
- return m
-}
-
-func getMax() uint64 {
- return o
-}
diff --git a/tools/go_generics/rules_tests/template_test.go b/tools/go_generics/rules_tests/template_test.go
deleted file mode 100644
index b2a3446ef..000000000
--- a/tools/go_generics/rules_tests/template_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package template_test
-
-import (
- "math"
- "testing"
-)
-
-func TestMax(t *testing.T) {
- var a int = max(10, 20)
- if a != 20 {
- t.Errorf("Bad result of max, got %v, want %v", a, 20)
- }
-}
-
-func TestIntConst(t *testing.T) {
- var a int = add(10)
- if a != 30 {
- t.Errorf("Bad result of add, got %v, want %v", a, 30)
- }
-}
-
-func TestStrConst(t *testing.T) {
- v := getName()
- if v != "test" {
- t.Errorf("Bad name, got %v, want %v", v, "test")
- }
-}
-
-func TestImport(t *testing.T) {
- v := getMax()
- if v != math.MaxUint64 {
- t.Errorf("Bad max value, got %v, want %v", v, uint64(math.MaxUint64))
- }
-}
diff --git a/tools/go_generics/tests/BUILD b/tools/go_generics/tests/BUILD
deleted file mode 100644
index 7547a6b53..000000000
--- a/tools/go_generics/tests/BUILD
+++ /dev/null
@@ -1,7 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_generics/tests/all_stmts/BUILD b/tools/go_generics/tests/all_stmts/BUILD
deleted file mode 100644
index a4a7c775a..000000000
--- a/tools/go_generics/tests/all_stmts/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "all_stmts",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/all_stmts/input.go b/tools/go_generics/tests/all_stmts/input.go
deleted file mode 100644
index 7ebe7c40e..000000000
--- a/tools/go_generics/tests/all_stmts/input.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "sync"
-)
-
-type T int
-
-func h(T) {
-}
-
-type s struct {
- a, b int
- c []int
-}
-
-func g(T) *s {
- return &s{}
-}
-
-func f() (T, []int) {
- // Branch.
- goto T
- goto R
-
- // Labeled.
-T:
- _ = T(0)
-
- // Empty.
-R:
- ;
-
- // Assignment with definition.
- a, b, c := T(1), T(2), T(3)
- _, _, _ = a, b, c
-
- // Assignment without definition.
- g(T(0)).a, g(T(1)).b, c = int(T(1)), int(T(2)), T(3)
- _, _, _ = a, b, c
-
- // Block.
- {
- var T T
- T = 0
- _ = T
- }
-
- // Declarations.
- type Type T
- const Const T = 10
- var g1 func(T, int, ...T) (int, T)
- var v T
- var w = T(0)
- {
- var T struct {
- f []T
- }
- _ = T
- }
-
- // Defer.
- defer g1(T(0), 1)
-
- // Expression.
- h(v + w + T(1))
-
- // For statements.
- for i := T(0); i < T(10); i++ {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- for {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- // Go.
- go g1(T(0), 1)
-
- // If statements.
- if a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else if b := T(0); b != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else if T := T(0); T != 1 {
- T++
- _ = T
- } else {
- T--
- _ = T
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- // Inc/Dec statements.
- (*(*T)(nil))++
- (*(*T)(nil))--
-
- // Range statements.
- for g(T(0)).a, g(T(1)).b = range g(T(10)).c {
- var d T
- _ = d
- }
-
- for T, b := range g(T(10)).c {
- _ = T
- _ = b
- }
-
- // Select statement.
- {
- var fch func(T) chan int
-
- select {
- case <-fch(T(30)):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- case T := <-fch(T(30)):
- T = 0
- _ = T
- case g(T(0)).a = <-fch(T(30)):
- var T T
- T = 0
- _ = T
- case fch(T(30)) <- int(T(0)):
- var T T
- T = 0
- _ = T
- }
- }
-
- // Send statements.
- {
- var ch chan T
- var fch func(T) chan int
-
- ch <- T(0)
- fch(T(1)) <- g(T(10)).a
- }
-
- // Switch statements.
- {
- var a T
- var b int
- switch {
- case a == T(0):
- var T T
- T = 0
- _ = T
- case a < T(0), b < g(T(10)).a:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
- }
-
- switch T(g(T(10)).a) {
- case T(0):
- var T T
- T = 0
- _ = T
- case T(1), T(g(T(10)).a):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch b := g(T(10)); T(b.a) + T(10) {
- case T(0):
- var T T
- T = 0
- _ = T
- case T(1), T(g(T(10)).a):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- // Type switch statements.
- {
- var interfaceFunc func(T) interface{}
-
- switch interfaceFunc(T(0)).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch x := interfaceFunc(T(0)).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch t := T(0); x := interfaceFunc(T(0) + t).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
- }
-
- // Return statement.
- return T(10), g(T(11)).c
-}
diff --git a/tools/go_generics/tests/all_stmts/output.go b/tools/go_generics/tests/all_stmts/output.go
deleted file mode 100644
index a33944d85..000000000
--- a/tools/go_generics/tests/all_stmts/output.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "sync"
-)
-
-func h(Q) {
-}
-
-type s struct {
- a, b int
- c []int
-}
-
-func g(Q) *s {
- return &s{}
-}
-
-func f() (Q, []int) {
- // Branch.
- goto T
- goto R
-
- // Labeled.
-T:
- _ = Q(0)
-
- // Empty.
-R:
- ;
-
- // Assignment with definition.
- a, b, c := Q(1), Q(2), Q(3)
- _, _, _ = a, b, c
-
- // Assignment without definition.
- g(Q(0)).a, g(Q(1)).b, c = int(Q(1)), int(Q(2)), Q(3)
- _, _, _ = a, b, c
-
- // Block.
- {
- var T Q
- T = 0
- _ = T
- }
-
- // Declarations.
- type Type Q
- const Const Q = 10
- var g1 func(Q, int, ...Q) (int, Q)
- var v Q
- var w = Q(0)
- {
- var T struct {
- f []Q
- }
- _ = T
- }
-
- // Defer.
- defer g1(Q(0), 1)
-
- // Expression.
- h(v + w + Q(1))
-
- // For statements.
- for i := Q(0); i < Q(10); i++ {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- for {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- // Go.
- go g1(Q(0), 1)
-
- // If statements.
- if a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else if b := Q(0); b != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else if T := Q(0); T != 1 {
- T++
- _ = T
- } else {
- T--
- _ = T
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- // Inc/Dec statements.
- (*(*Q)(nil))++
- (*(*Q)(nil))--
-
- // Range statements.
- for g(Q(0)).a, g(Q(1)).b = range g(Q(10)).c {
- var d Q
- _ = d
- }
-
- for T, b := range g(Q(10)).c {
- _ = T
- _ = b
- }
-
- // Select statement.
- {
- var fch func(Q) chan int
-
- select {
- case <-fch(Q(30)):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- case T := <-fch(Q(30)):
- T = 0
- _ = T
- case g(Q(0)).a = <-fch(Q(30)):
- var T Q
- T = 0
- _ = T
- case fch(Q(30)) <- int(Q(0)):
- var T Q
- T = 0
- _ = T
- }
- }
-
- // Send statements.
- {
- var ch chan Q
- var fch func(Q) chan int
-
- ch <- Q(0)
- fch(Q(1)) <- g(Q(10)).a
- }
-
- // Switch statements.
- {
- var a Q
- var b int
- switch {
- case a == Q(0):
- var T Q
- T = 0
- _ = T
- case a < Q(0), b < g(Q(10)).a:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
- }
-
- switch Q(g(Q(10)).a) {
- case Q(0):
- var T Q
- T = 0
- _ = T
- case Q(1), Q(g(Q(10)).a):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch b := g(Q(10)); Q(b.a) + Q(10) {
- case Q(0):
- var T Q
- T = 0
- _ = T
- case Q(1), Q(g(Q(10)).a):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- // Type switch statements.
- {
- var interfaceFunc func(Q) interface{}
-
- switch interfaceFunc(Q(0)).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch x := interfaceFunc(Q(0)).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch t := Q(0); x := interfaceFunc(Q(0) + t).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
- }
-
- // Return statement.
- return Q(10), g(Q(11)).c
-}
diff --git a/tools/go_generics/tests/all_types/BUILD b/tools/go_generics/tests/all_types/BUILD
deleted file mode 100644
index 60b1fd314..000000000
--- a/tools/go_generics/tests/all_types/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "all_types",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/all_types/input.go b/tools/go_generics/tests/all_types/input.go
deleted file mode 100644
index 6f85bbb69..000000000
--- a/tools/go_generics/tests/all_types/input.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "./lib"
-)
-
-type T int
-
-type newType struct {
- a T
- b lib.T
- c *T
- d (T)
- e chan T
- f <-chan T
- g chan<- T
- h []T
- i [10]T
- j map[T]T
- k func(T, T) (T, T)
- l interface {
- f(T)
- }
- m struct {
- T
- a T
- }
-}
-
-func f(...T) {
-}
diff --git a/tools/go_generics/tests/all_types/lib/lib.go b/tools/go_generics/tests/all_types/lib/lib.go
deleted file mode 100644
index 99edb371f..000000000
--- a/tools/go_generics/tests/all_types/lib/lib.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-// T is a test type.
-type T int32
diff --git a/tools/go_generics/tests/all_types/output.go b/tools/go_generics/tests/all_types/output.go
deleted file mode 100644
index c0bbebfe7..000000000
--- a/tools/go_generics/tests/all_types/output.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "./lib"
-)
-
-type newType struct {
- a Q
- b lib.T
- c *Q
- d (Q)
- e chan Q
- f <-chan Q
- g chan<- Q
- h []Q
- i [10]Q
- j map[Q]Q
- k func(Q, Q) (Q, Q)
- l interface {
- f(Q)
- }
- m struct {
- Q
- a Q
- }
-}
-
-func f(...Q) {
-}
diff --git a/tools/go_generics/tests/anon/BUILD b/tools/go_generics/tests/anon/BUILD
deleted file mode 100644
index ef24f4b25..000000000
--- a/tools/go_generics/tests/anon/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "anon",
- anon = True,
- inputs = ["input.go"],
- output = "output.go",
- suffix = "New",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/anon/input.go b/tools/go_generics/tests/anon/input.go
deleted file mode 100644
index 44086d522..000000000
--- a/tools/go_generics/tests/anon/input.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T interface {
- Apply(T) T
-}
-
-type Foo struct {
- T
- Bar map[string]T `json:"bar,omitempty"`
-}
-
-type Baz struct {
- T someTypeNotT
-}
-
-func (f Foo) GetBar(name string) T {
- b, ok := f.Bar[name]
- if ok {
- b = f.Apply(b)
- } else {
- b = f.T
- }
- return b
-}
-
-func foobar() {
- a := Baz{}
- a.T = 0 // should not be renamed, this is a limitation
-
- b := otherpkg.UnrelatedType{}
- b.T = 0 // should not be renamed, this is a limitation
-}
diff --git a/tools/go_generics/tests/anon/output.go b/tools/go_generics/tests/anon/output.go
deleted file mode 100644
index 7fa791853..000000000
--- a/tools/go_generics/tests/anon/output.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-type FooNew struct {
- Q
- Bar map[string]Q `json:"bar,omitempty"`
-}
-
-type BazNew struct {
- T someTypeNotT
-}
-
-func (f FooNew) GetBar(name string) Q {
- b, ok := f.Bar[name]
- if ok {
- b = f.Apply(b)
- } else {
- b = f.Q
- }
- return b
-}
-
-func foobarNew() {
- a := BazNew{}
- a.Q = 0
-
- b := otherpkg.UnrelatedType{}
- b.Q = 0
-}
diff --git a/tools/go_generics/tests/consts/BUILD b/tools/go_generics/tests/consts/BUILD
deleted file mode 100644
index fd7caccad..000000000
--- a/tools/go_generics/tests/consts/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "consts",
- consts = {
- "c1": "20",
- "z": "600",
- "v": "3.3",
- "s": "\"def\"",
- "A": "20",
- "C": "100",
- "S": "\"def\"",
- "T": "\"ABC\"",
- },
- inputs = ["input.go"],
- output = "output.go",
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/consts/input.go b/tools/go_generics/tests/consts/input.go
deleted file mode 100644
index 04b95fcc6..000000000
--- a/tools/go_generics/tests/consts/input.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-const c1 = 10
-const x, y, z = 100, 200, 300
-const v float32 = 1.0 + 2.0
-const s = "abc"
-const (
- A = 10
- B, C, D = 10, 20, 30
- S = "abc"
- T, U, V string = "abc", "def", "ghi"
-)
diff --git a/tools/go_generics/tests/consts/output.go b/tools/go_generics/tests/consts/output.go
deleted file mode 100644
index 18d316cc9..000000000
--- a/tools/go_generics/tests/consts/output.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-const c1 = 20
-const x, y, z = 100, 200, 600
-const v float32 = 3.3
-const s = "def"
-const (
- A = 20
- B, C, D = 10, 100, 30
- S = "def"
- T, U, V string = "ABC", "def", "ghi"
-)
diff --git a/tools/go_generics/tests/defs.bzl b/tools/go_generics/tests/defs.bzl
deleted file mode 100644
index 6277c3947..000000000
--- a/tools/go_generics/tests/defs.bzl
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Generics tests."""
-
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-def _go_generics_test_impl(ctx):
- runner = ctx.actions.declare_file(ctx.label.name)
- runner_content = "\n".join([
- "#!/bin/bash",
- "exec diff --ignore-blank-lines --ignore-matching-lines=^[[:space:]]*// %s %s" % (
- ctx.files.template_output[0].short_path,
- ctx.files.expected_output[0].short_path,
- ),
- "",
- ])
- ctx.actions.write(runner, runner_content, is_executable = True)
- return [DefaultInfo(
- executable = runner,
- runfiles = ctx.runfiles(
- files = ctx.files.template_output + ctx.files.expected_output,
- collect_default = True,
- collect_data = True,
- ),
- )]
-
-_go_generics_test = rule(
- implementation = _go_generics_test_impl,
- attrs = {
- "template_output": attr.label(mandatory = True, allow_single_file = True),
- "expected_output": attr.label(mandatory = True, allow_single_file = True),
- },
- test = True,
-)
-
-def go_generics_test(name, inputs, output, types = None, consts = None, **kwargs):
- """Instantiates a generics test.
-
- Args:
- name: the name of the test.
- inputs: all the input files.
- output: the output files.
- types: the template types (dictionary).
- consts: the template consts (dictionary).
- **kwargs: additional arguments for the template_instance.
- """
- if types == None:
- types = dict()
- if consts == None:
- consts = dict()
- go_template(
- name = name + "_template",
- srcs = inputs,
- types = types.keys(),
- consts = consts.keys(),
- )
- go_template_instance(
- name = name + "_output",
- template = ":" + name + "_template",
- out = name + "_output.go",
- types = types,
- consts = consts,
- **kwargs
- )
- _go_generics_test(
- name = name + "_test",
- template_output = name + "_output.go",
- expected_output = output,
- )
diff --git a/tools/go_generics/tests/imports/BUILD b/tools/go_generics/tests/imports/BUILD
deleted file mode 100644
index a86223d41..000000000
--- a/tools/go_generics/tests/imports/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "imports",
- consts = {
- "n": "math.Uint32",
- "m": "math.Uint64",
- },
- imports = {
- "sync": "sync",
- "math": "mymathpath",
- },
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "sync.Mutex",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/imports/input.go b/tools/go_generics/tests/imports/input.go
deleted file mode 100644
index 0f032c2a1..000000000
--- a/tools/go_generics/tests/imports/input.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T int
-
-var global T
-
-const (
- m = 0
- n = 0
-)
diff --git a/tools/go_generics/tests/imports/output.go b/tools/go_generics/tests/imports/output.go
deleted file mode 100644
index 2488ca58c..000000000
--- a/tools/go_generics/tests/imports/output.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- __generics_imported1 "mymathpath"
- __generics_imported0 "sync"
-)
-
-var global __generics_imported0.Mutex
-
-const (
- m = __generics_imported1.Uint64
- n = __generics_imported1.Uint32
-)
diff --git a/tools/go_generics/tests/remove_typedef/BUILD b/tools/go_generics/tests/remove_typedef/BUILD
deleted file mode 100644
index 46457cec6..000000000
--- a/tools/go_generics/tests/remove_typedef/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "remove_typedef",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "U",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/remove_typedef/input.go b/tools/go_generics/tests/remove_typedef/input.go
deleted file mode 100644
index cf632bae7..000000000
--- a/tools/go_generics/tests/remove_typedef/input.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-func f(T) Q {
- return Q{}
-}
-
-type T struct{}
-
-type Q struct{}
-
-func (*T) f() {
-}
-
-func (T) g() {
-}
-
-func (*Q) f(T) T {
- return T{}
-}
-
-func (*Q) g(T) *T {
- return nil
-}
diff --git a/tools/go_generics/tests/remove_typedef/output.go b/tools/go_generics/tests/remove_typedef/output.go
deleted file mode 100644
index d44fd8e1c..000000000
--- a/tools/go_generics/tests/remove_typedef/output.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-func f(U) Q {
- return Q{}
-}
-
-type Q struct{}
-
-func (*Q) f(U) U {
- return U{}
-}
-
-func (*Q) g(U) *U {
- return nil
-}
diff --git a/tools/go_generics/tests/simple/BUILD b/tools/go_generics/tests/simple/BUILD
deleted file mode 100644
index 4b9265ea4..000000000
--- a/tools/go_generics/tests/simple/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "simple",
- inputs = ["input.go"],
- output = "output.go",
- suffix = "New",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/simple/input.go b/tools/go_generics/tests/simple/input.go
deleted file mode 100644
index 2a917f16c..000000000
--- a/tools/go_generics/tests/simple/input.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T int
-
-var global T
-
-func f(_ T, a int) {
-}
-
-func g(a T, b int) {
- var c T
- _ = c
-
- d := (*T)(nil)
- _ = d
-}
-
-type R struct {
- T
- a *T
-}
-
-var (
- Z *T = (*T)(nil)
-)
-
-const (
- X T = (T)(0)
-)
-
-type Y T
diff --git a/tools/go_generics/tests/simple/output.go b/tools/go_generics/tests/simple/output.go
deleted file mode 100644
index 6bfa0b25b..000000000
--- a/tools/go_generics/tests/simple/output.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-var globalNew Q
-
-func fNew(_ Q, a int) {
-}
-
-func gNew(a Q, b int) {
- var c Q
- _ = c
-
- d := (*Q)(nil)
- _ = d
-}
-
-type RNew struct {
- Q
- a *Q
-}
-
-var (
- ZNew *Q = (*Q)(nil)
-)
-
-const (
- XNew Q = (Q)(0)
-)
-
-type YNew Q
diff --git a/tools/go_marshal/BUILD b/tools/go_marshal/BUILD
deleted file mode 100644
index 85a1adf66..000000000
--- a/tools/go_marshal/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-licenses(["notice"])
-
-go_binary(
- name = "go_marshal",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//tools/go_marshal/gomarshal",
- ],
-)
-
-config_setting(
- name = "marshal_config_verbose",
- values = {"define": "gomarshal=verbose"},
- visibility = ["//:sandbox"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_marshal/README.md b/tools/go_marshal/README.md
deleted file mode 100644
index bbd4c9f48..000000000
--- a/tools/go_marshal/README.md
+++ /dev/null
@@ -1,145 +0,0 @@
-This package implements the go_marshal utility.
-
-# Overview
-
-`go_marshal` is a code generation utility similar to `go_stateify` for
-marshalling go data structures to and from memory.
-
-`go_marshal` attempts to improve on `binary.Write` and the sentry's
-`binary.Marshal` by moving the expensive use of reflection from runtime to
-compile-time.
-
-`go_marshal` automatically generates implementations for `marshal.Marshallable`
-interface. Data structures that require custom serialization can be accomodated
-through a manual implementation this interface.
-
-Data structures can be flagged for code generation by adding a struct-level
-comment `// +marshal`. For additional details and options, see the documentation
-for the `marshal.Marshallable` interface.
-
-# Usage
-
-See `defs.bzl`: a new rule is provided, `go_marshal`.
-
-Under the hood, the `go_marshal` rule is used to generate a file that will
-appear in a Go target; the output file should appear explicitly in a srcs list.
-For example (note that the above is the preferred method):
-
-```
-load("<PKGPATH>/gvisor/tools/go_marshal:defs.bzl", "go_marshal")
-
-go_marshal(
- name = "foo_abi",
- srcs = ["foo.go"],
- out = "foo_abi.go",
- package = "foo",
-)
-
-go_library(
- name = "foo",
- srcs = [
- "foo.go",
- "foo_abi.go",
- ],
- ...
-)
-```
-
-As part of the interface generation, `go_marshal` also generates some tests for
-sanity checking the struct definitions for potential alignment issues, and a
-simple round-trip test through Marshal/Unmarshal to verify the implementation.
-These tests use reflection to verify properties of the ABI struct, and should be
-considered part of the generated interfaces (but are too expensive to execute at
-runtime). Ensure these tests run at some point.
-
-# Restrictions
-
-Not all valid go type definitions can be used with `go_marshal`. `go_marshal` is
-intended for ABI structs, which have these additional restrictions:
-
-- At the moment, `go_marshal` only supports struct declarations.
-
-- Structs are marshalled as packed types. This means no implicit padding is
- inserted between fields shorter than the platform register size. For
- alignment, manually insert padding fields.
-
-- Structs used with `go_marshal` must have a compile-time static size. This
- means no dynamically sizes fields like slices or strings. Use statically
- sized array (byte arrays for strings) instead.
-
-- No pointers, channel, map or function pointer fields, and no fields that are
- arrays of these types. These don't make sense in an ABI data structure.
-
-- We could support opaque pointers as `uintptr`, but this is currently not
- implemented. Implementing this would require handling the architecture
- dependent native pointer size.
-
-- Fields must either be a primitive integer type (`byte`,
- `[u]int{8,16,32,64}`), or of a type that implements `marshal.Marshallable`.
-
-- `int` and `uint` fields are not allowed. Use an explicitly-sized numeric
- type.
-
-- `float*` fields are currently not supported, but could be if necessary.
-
-# Appendix
-
-## Working with Non-Packed Structs
-
-ABI structs must generally be packed types, meaning they should have no implicit
-padding between short fields. However, if a field is tagged
-`marshal:"unaligned"`, `go_marshal` will fall back to a safer but slower
-mechanism to deal with potentially unaligned fields.
-
-Note that the non-packed property is inheritted by any other struct that embeds
-this struct, since the `go_marshal` tool currently can't reason about alignments
-for embedded structs that are not aligned.
-
-Because of this, it's generally best to avoid using `marshal:"unaligned"` and
-insert explicit padding fields instead.
-
-## Working with dynamically sized structs
-
-While `go_marshal` seamlessly supports statically sized structs (which most ABI
-structs are), it can also used for other uses cases where marshalling is
-required. There is some provision to partially support dynamically sized structs
-that may not be ABI structs. A user can define a dynamic struct and define
-`SizeBytes()`, `MarshalBytes(dst)` and `UnmarshalBytes(src)` for it. Then user
-can then add a comment above the struct like `// +marshal dynamic` while will
-make `go_marshal` autogenerate the remaining methods required to complete the
-`Marshallable` interface. This feature is currently only available for structs
-and can not be used alongside the Slice API.
-
-## Modifying the `go_marshal` Tool
-
-The following are some guidelines for modifying the `go_marshal` tool:
-
-- The `go_marshal` tool currently does a single pass over all types requesting
- code generation, in arbitrary order. This means the generated code can't
- directly obtain information about embedded marshallable types at
- compile-time. One way to work around this restriction is to add a new
- Marshallable interface method providing this piece of information, and
- calling it from the generated code. Use this sparingly, as we want to rely
- on compile-time information as much as possible for performance.
-
-- No runtime reflection in the code generated for the marshallable interface.
- The entire point of the tool is to avoid runtime reflection. The generated
- tests may use reflection.
-
-## Debugging
-
-To enable debugging output from the go-marshal tool, use one of the following
-options, depending on how go-marshal is being invoked:
-
-- Pass `--define gomarshal=verbose` to the bazel command. Note that this can
- generate a lot of output depending on what's being compiled, as this will
- enable debugging for all packages built by the command.
-
-- Set `marshal_debug = True` on the top-level `go_library` BUILD rule.
-
-- Set `debug = True` on the `go_marshal` BUILD rule.
-
-- Pass `-debug` to the go-marshal tool invocation.
-
-If bazel complains about stdout output being too large, set a larger value
-through `--experimental_ui_max_stdouterr_bytes`, or `-1` for unlimited output.
diff --git a/tools/go_marshal/analysis/BUILD b/tools/go_marshal/analysis/BUILD
deleted file mode 100644
index c2a4d45c4..000000000
--- a/tools/go_marshal/analysis/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "analysis",
- testonly = 1,
- srcs = ["analysis_unsafe.go"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/tools/go_marshal/analysis/analysis_unsafe.go b/tools/go_marshal/analysis/analysis_unsafe.go
deleted file mode 100644
index 7a3d6bbea..000000000
--- a/tools/go_marshal/analysis/analysis_unsafe.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package analysis implements common functionality used by generated
-// go_marshal tests.
-package analysis
-
-// All functions in this package are unsafe and are not intended for general
-// consumption. They contain sharp edge cases and the caller is responsible for
-// ensuring none of them are hit. Callers must be carefully to pass in only sane
-// arguments. Failure to do so may cause panics at best and arbitrary memory
-// corruption at worst.
-//
-// Never use outside of tests.
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "testing"
- "unsafe"
-)
-
-// RandomizeValue assigns random value(s) to an abitrary type. This is intended
-// for used with ABI structs from go_marshal, meaning the typical restrictions
-// apply (fixed-size types, no pointers, maps, channels, etc), and should only
-// be used on zeroed values to avoid overwriting pointers to active go objects.
-//
-// Internally, we populate the type with random data by doing an unsafe cast to
-// access the underlying memory of the type and filling it as if it were a byte
-// slice. This almost gets us what we want, but padding fields named "_" are
-// normally not accessible, so we walk the type and recursively zero all "_"
-// fields.
-//
-// Precondition: x must be a pointer. x must not contain any valid
-// pointers to active go objects (pointer fields aren't allowed in ABI
-// structs anyways), or we'd be violating the go runtime contract and
-// the GC may malfunction.
-func RandomizeValue(x interface{}) {
- v := reflect.Indirect(reflect.ValueOf(x))
- if !v.CanSet() {
- panic("RandomizeType() called with an unaddressable value. You probably need to pass a pointer to the argument")
- }
-
- // Cast the underlying memory for the type into a byte slice.
- var b []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- // Note: v.UnsafeAddr panics if x is passed by value. x should be a pointer.
- hdr.Data = v.UnsafeAddr()
- hdr.Len = int(v.Type().Size())
- hdr.Cap = hdr.Len
-
- // Fill the byte slice with random data, which in effect fills the type with
- // random values.
- n, err := rand.Read(b)
- if err != nil || n != len(b) {
- panic("unreachable")
- }
-
- // Normally, padding fields are not accessible, so zero them out.
- reflectZeroPaddingFields(v.Type(), b, false)
-}
-
-// reflectZeroPaddingFields assigns zero values to padding fields for the value
-// of type r, represented by the memory in data. Padding fields are defined as
-// fields with the name "_". If zero is true, the immediate value itself is
-// zeroed. In addition, the type is recursively scanned for padding fields in
-// inner types.
-//
-// This is used for zeroing padding fields after calling RandomizeValue.
-func reflectZeroPaddingFields(r reflect.Type, data []byte, zero bool) {
- if zero {
- for i := range data {
- data[i] = 0
- }
- }
- switch r.Kind() {
- case reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
- // These types are explicitly allowed in an ABI type, but we don't need
- // to recurse further as they're scalar types.
- case reflect.Struct:
- for i, numFields := 0, r.NumField(); i < numFields; i++ {
- f := r.Field(i)
- off := f.Offset
- len := f.Type.Size()
- window := data[off : off+len]
- reflectZeroPaddingFields(f.Type, window, f.Name == "_")
- }
- case reflect.Array:
- eLen := int(r.Elem().Size())
- if int(r.Size()) != eLen*r.Len() {
- panic("Array has unexpected size?")
- }
- for i, n := 0, r.Len(); i < n; i++ {
- reflectZeroPaddingFields(r.Elem(), data[i*eLen:(i+1)*eLen], false)
- }
- default:
- panic(fmt.Sprintf("Type %v not allowed in ABI struct", r.Kind()))
-
- }
-}
-
-// AlignmentCheck ensures the definition of the type represented by typ doesn't
-// cause the go compiler to emit implicit padding between elements of the type
-// (i.e. fields in a struct).
-//
-// AlignmentCheck doesn't explicitly recurse for embedded structs because any
-// struct present in an ABI struct must also be Marshallable, and therefore
-// they're aligned by definition (or their alignment check would have failed).
-func AlignmentCheck(t *testing.T, typ reflect.Type) (ok bool, delta uint64) {
- switch typ.Kind() {
- case reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
- // Primitive types are always considered well aligned. Primitive types
- // that are fields in structs are checked independently, this branch
- // exists to handle recursive calls to alignmentCheck.
- case reflect.Struct:
- xOff := 0
- nextXOff := 0
- skipNext := false
- for i, numFields := 0, typ.NumField(); i < numFields; i++ {
- xOff = nextXOff
- f := typ.Field(i)
- fmt.Printf("Checking alignment of %s.%s @ %d [+%d]...\n", typ.Name(), f.Name, f.Offset, f.Type.Size())
- nextXOff = int(f.Offset + f.Type.Size())
-
- if f.Name == "_" {
- // Padding fields need not be aligned.
- fmt.Printf("Padding field of type %v\n", f.Type)
- continue
- }
-
- if tag, ok := f.Tag.Lookup("marshal"); ok && tag == "unaligned" {
- skipNext = true
- continue
- }
-
- if skipNext {
- skipNext = false
- fmt.Printf("Skipping alignment check for field %s.%s explicitly marked as unaligned.\n", typ.Name(), f.Name)
- continue
- }
-
- if xOff != int(f.Offset) {
- implicitPad := int(f.Offset) - xOff
- t.Fatalf("Suspect offset for field %s.%s, detected an implicit %d byte padding from offset %d to %d; either add %d bytes of explicit padding before this field or tag it as `marshal:\"unaligned\"`.", typ.Name(), f.Name, implicitPad, xOff, f.Offset, implicitPad)
- }
- }
-
- // Ensure structs end on a byte explicitly defined by the type.
- if typ.NumField() > 0 && nextXOff != int(typ.Size()) {
- implicitPad := int(typ.Size()) - nextXOff
- f := typ.Field(typ.NumField() - 1) // Final field
- if tag, ok := f.Tag.Lookup("marshal"); ok && tag == "unaligned" {
- // Final field explicitly marked unaligned.
- break
- }
- t.Fatalf("Suspect offset for field %s.%s at the end of %s, detected an implicit %d byte padding from offset %d to %d at the end of the struct; either add %d bytes of explict padding at end of the struct or tag the final field %s as `marshal:\"unaligned\"`.",
- typ.Name(), f.Name, typ.Name(), implicitPad, nextXOff, typ.Size(), implicitPad, f.Name)
- }
- case reflect.Array:
- // Independent arrays are also always considered well aligned. We only
- // need to worry about their alignment when they're embedded in structs,
- // which we handle above.
- default:
- t.Fatalf("Unsupported type in ABI struct while checking for field alignment for type: %v", typ.Kind())
- }
- return true, uint64(typ.Size())
-}
diff --git a/tools/go_marshal/defs.bzl b/tools/go_marshal/defs.bzl
deleted file mode 100644
index 9f620cb76..000000000
--- a/tools/go_marshal/defs.bzl
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Marshal is a tool for generating marshalling interfaces for Go types."""
-
-def _go_marshal_impl(ctx):
- """Execute the go_marshal tool."""
- output = ctx.outputs.lib
- output_test = ctx.outputs.test
- output_test_unconditional = ctx.outputs.test_unconditional
-
- # Run the marshal command.
- args = ["-output=%s" % output.path]
- args.append("-pkg=%s" % ctx.attr.package)
- args.append("-output_test=%s" % output_test.path)
- args.append("-output_test_unconditional=%s" % output_test_unconditional.path)
-
- if ctx.attr.debug:
- args += ["-debug"]
-
- args += ["--"]
- for src in ctx.attr.srcs:
- args += [f.path for f in src.files.to_list()]
- ctx.actions.run(
- inputs = ctx.files.srcs,
- outputs = [output, output_test, output_test_unconditional],
- mnemonic = "GoMarshal",
- progress_message = "go_marshal: %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
-# Generates save and restore logic from a set of Go files.
-#
-# Args:
-# name: the name of the rule.
-# srcs: the input source files. These files should include all structs in the
-# package that need to be saved.
-# imports: an optional list of extra, non-aliased, Go-style absolute import
-# paths.
-# out: the name of the generated file output. This must not conflict with any
-# other files and must be added to the srcs of the relevant go_library.
-# package: the package name for the input sources.
-go_marshal = rule(
- implementation = _go_marshal_impl,
- attrs = {
- "srcs": attr.label_list(mandatory = True, allow_files = True),
- "imports": attr.string_list(mandatory = False),
- "package": attr.string(mandatory = True),
- "debug": attr.bool(doc = "enable debugging output from the go_marshal tool"),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_marshal:go_marshal")),
- },
- outputs = {
- "lib": "%{name}_unsafe.go",
- "test": "%{name}_test.go",
- "test_unconditional": "%{name}_unconditional_test.go",
- },
-)
-
-# marshal_deps are the dependencies requied by generated code.
-marshal_deps = [
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/marshal",
-]
-
-# marshal_test_deps are required by test targets.
-marshal_test_deps = [
- "//tools/go_marshal/analysis",
-]
diff --git a/tools/go_marshal/gomarshal/BUILD b/tools/go_marshal/gomarshal/BUILD
deleted file mode 100644
index c2747d94c..000000000
--- a/tools/go_marshal/gomarshal/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "gomarshal",
- srcs = [
- "generator.go",
- "generator_interfaces.go",
- "generator_interfaces_array_newtype.go",
- "generator_interfaces_dynamic.go",
- "generator_interfaces_primitive_newtype.go",
- "generator_interfaces_struct.go",
- "generator_tests.go",
- "util.go",
- ],
- stateify = False,
- visibility = [
- "//:sandbox",
- ],
- deps = ["//tools/tags"],
-)
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
deleted file mode 100644
index 00961c90d..000000000
--- a/tools/go_marshal/gomarshal/generator.go
+++ /dev/null
@@ -1,584 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package gomarshal implements the go_marshal code generator. See README.md.
-package gomarshal
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "os"
- "sort"
- "strings"
-
- "gvisor.dev/gvisor/tools/tags"
-)
-
-// List of identifiers we use in generated code that may conflict with a
-// similarly-named source identifier. Abort gracefully when we see these to
-// avoid potentially confusing compilation failures in generated code.
-//
-// This only applies to import aliases at the moment. All other identifiers
-// are qualified by a receiver argument, since they're struct fields.
-//
-// All recievers are single letters, so we don't allow import aliases to be a
-// single letter.
-var badIdents = []string{
- "addr", "blk", "buf", "cc", "dst", "dsts", "count", "err", "hdr", "idx",
- "inner", "length", "limit", "ptr", "size", "src", "srcs", "val",
- // All single-letter identifiers.
-}
-
-// Constructed fromt badIdents in init().
-var badIdentsMap map[string]struct{}
-
-func init() {
- badIdentsMap = make(map[string]struct{})
- for _, ident := range badIdents {
- badIdentsMap[ident] = struct{}{}
- }
-}
-
-// Generator drives code generation for a single invocation of the go_marshal
-// utility.
-//
-// The Generator holds arguments passed to the tool, and drives parsing,
-// processing and code Generator for all types marked with +marshal declared in
-// the input files.
-//
-// See Generator.run() as the entry point.
-type Generator struct {
- // Paths to input go source files.
- inputs []string
- // Output file to write generated go source.
- output *os.File
- // Output file to write generated tests.
- outputTest *os.File
- // Output file to write unconditionally generated tests.
- outputTestUC *os.File
- // Package name for the generated file.
- pkg string
- // Set of extra packages to import in the generated file.
- imports *importTable
-}
-
-// NewGenerator creates a new code Generator.
-func NewGenerator(srcs []string, out, outTest, outTestUnconditional, pkg string, imports []string) (*Generator, error) {
- f, err := os.OpenFile(out, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open output file %q: %w", out, err)
- }
- fTest, err := os.OpenFile(outTest, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open test output file %q: %w", out, err)
- }
- fTestUC, err := os.OpenFile(outTestUnconditional, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open unconditional test output file %q: %w", out, err)
- }
- g := Generator{
- inputs: srcs,
- output: f,
- outputTest: fTest,
- outputTestUC: fTestUC,
- pkg: pkg,
- imports: newImportTable(),
- }
- for _, i := range imports {
- // All imports on the extra imports list are unconditionally marked as
- // used, so that they're always added to the generated code.
- g.imports.add(i).markUsed()
- }
-
- // The following imports may or may not be used by the generated code,
- // depending on what's required for the target types. Don't mark these as
- // used by default.
- g.imports.add("io")
- g.imports.add("reflect")
- g.imports.add("runtime")
- g.imports.add("unsafe")
- g.imports.add("gvisor.dev/gvisor/pkg/gohacks")
- g.imports.add("gvisor.dev/gvisor/pkg/hostarch")
- g.imports.add("gvisor.dev/gvisor/pkg/marshal")
- return &g, nil
-}
-
-// writeHeader writes the header for the generated source file. The header
-// includes the package name, package level comments and import statements.
-func (g *Generator) writeHeader() error {
- var b sourceBuffer
- b.emit("// Automatically generated marshal implementation. See tools/go_marshal.\n\n")
-
- // Emit build tags.
- b.emit("// If there are issues with build tag aggregation, see\n")
- b.emit("// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here\n")
- b.emit("// come from the input set of files used to generate this file. This input set\n")
- b.emit("// is filtered based on pre-defined file suffixes related to build tags, see \n")
- b.emit("// tools/defs.bzl:calculate_sets().\n\n")
-
- if t := tags.Aggregate(g.inputs); len(t) > 0 {
- b.emit(strings.Join(t.Lines(), "\n"))
- b.emit("\n\n")
- }
-
- // Package header.
- b.emit("package %s\n\n", g.pkg)
- if err := b.write(g.output); err != nil {
- return err
- }
-
- return g.imports.write(g.output)
-}
-
-// writeTypeChecks writes a statement to force the compiler to perform a type
-// check for all Marshallable types referenced by the generated code.
-func (g *Generator) writeTypeChecks(ms map[string]struct{}) error {
- if len(ms) == 0 {
- return nil
- }
-
- msl := make([]string, 0, len(ms))
- for m := range ms {
- msl = append(msl, m)
- }
- sort.Strings(msl)
-
- var buf bytes.Buffer
- fmt.Fprint(&buf, "// Marshallable types used by this file.\n")
-
- for _, m := range msl {
- fmt.Fprintf(&buf, "var _ marshal.Marshallable = (*%s)(nil)\n", m)
- }
- fmt.Fprint(&buf, "\n")
-
- _, err := fmt.Fprint(g.output, buf.String())
- return err
-}
-
-// parse processes all input files passed this generator and produces a set of
-// parsed go ASTs.
-func (g *Generator) parse() ([]*ast.File, []*token.FileSet, error) {
- debugf("go_marshal invoked with %d input files:\n", len(g.inputs))
- for _, path := range g.inputs {
- debugf(" %s\n", path)
- }
-
- files := make([]*ast.File, 0, len(g.inputs))
- fsets := make([]*token.FileSet, 0, len(g.inputs))
-
- for _, path := range g.inputs {
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
- if err != nil {
- // Not a valid input file?
- return nil, nil, fmt.Errorf("input %q can't be parsed: %w", path, err)
- }
-
- if debugEnabled() {
- debugf("AST for %q:\n", path)
- ast.Print(fset, f)
- }
-
- files = append(files, f)
- fsets = append(fsets, fset)
- }
-
- return files, fsets, nil
-}
-
-// sliceAPI carries information about the '+marshal slice' directive.
-type sliceAPI struct {
- // Comment node in the AST containing the +marshal tag.
- comment *ast.Comment
- // Identifier fragment to use when naming generated functions for the slice
- // API.
- ident string
- // Whether the generated functions should reference the newtype name, or the
- // inner type name. Only meaningful on newtype declarations on primitives.
- inner bool
-}
-
-// marshallableType carries information about a type marked with the '+marshal'
-// directive.
-type marshallableType struct {
- spec *ast.TypeSpec
- slice *sliceAPI
- recv string
- dynamic bool
-}
-
-func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.TypeSpec) *marshallableType {
- mt := &marshallableType{
- spec: spec,
- slice: nil,
- }
-
- var unhandledTags []string
-
- for _, tag := range strings.Fields(strings.TrimPrefix(tagLine.Text, "// +marshal")) {
- if strings.HasPrefix(tag, "slice:") {
- tokens := strings.Split(tag, ":")
- if len(tokens) < 2 || len(tokens) > 3 {
- abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive has invalid 'slice' clause. Expecting format 'slice:<IDENTIFIER>[:inner]', got '%v'", tag))
- }
- if len(tokens[1]) == 0 {
- abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has empty identifier argument. Expecting '+marshal slice:identifier'")
- }
-
- sa := &sliceAPI{
- comment: tagLine,
- ident: tokens[1],
- }
- mt.slice = sa
-
- if len(tokens) == 3 {
- if tokens[2] != "inner" {
- abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has an invalid argument. Expecting '+marshal slice:<IDENTIFIER>[:inner]'")
- }
- sa.inner = true
- }
-
- continue
- } else if tag == "dynamic" {
- mt.dynamic = true
- continue
- }
-
- unhandledTags = append(unhandledTags, tag)
- }
-
- if len(unhandledTags) > 0 {
- abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive contained the following unknown clauses: %v", strings.Join(unhandledTags, " ")))
- }
-
- return mt
-}
-
-// collectMarshallableTypes walks the parsed AST and collects a list of type
-// declarations for which we need to generate the Marshallable interface.
-func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) map[*ast.TypeSpec]*marshallableType {
- recv := make(map[string]string) // Type name to recevier name.
- types := make(map[*ast.TypeSpec]*marshallableType)
- for _, decl := range a.Decls {
- gdecl, ok := decl.(*ast.GenDecl)
- // Type declaration?
- if !ok || gdecl.Tok != token.TYPE {
- // Is this a function declaration? We remember receiver names.
- d, ok := decl.(*ast.FuncDecl)
- if ok && d.Recv != nil && len(d.Recv.List) == 1 {
- // Accept concrete methods & pointer methods.
- ident, ok := d.Recv.List[0].Type.(*ast.Ident)
- if !ok {
- var st *ast.StarExpr
- st, ok = d.Recv.List[0].Type.(*ast.StarExpr)
- if ok {
- ident, ok = st.X.(*ast.Ident)
- }
- }
- // The receiver name may be not present.
- if ok && len(d.Recv.List[0].Names) == 1 {
- // Recover the type receiver name in this case.
- recv[ident.Name] = d.Recv.List[0].Names[0].Name
- }
- }
- debugfAt(f.Position(decl.Pos()), "Skipping declaration since it's not a type declaration.\n")
- continue
- }
- // Does it have a comment?
- if gdecl.Doc == nil {
- debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment.\n")
- continue
- }
- // Does the comment contain a "+marshal" line?
- marked := false
- var tagLine *ast.Comment
- for _, c := range gdecl.Doc.List {
- if strings.HasPrefix(c.Text, "// +marshal") {
- marked = true
- tagLine = c
- break
- }
- }
- if !marked {
- debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment containing +marshal line.\n")
- continue
- }
- for _, spec := range gdecl.Specs {
- // We already confirmed we're in a type declaration earlier, so this
- // cast will succeed.
- t := spec.(*ast.TypeSpec)
- switch t.Type.(type) {
- case *ast.StructType:
- debugfAt(f.Position(t.Pos()), "Collected marshallable struct %s.\n", t.Name.Name)
- case *ast.Ident: // Newtype on primitive.
- debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on primitive %s.\n", t.Name.Name)
- case *ast.ArrayType: // Newtype on array.
- debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on array %s.\n", t.Name.Name)
- default:
- // A user specifically requested marshalling on this type, but we
- // don't support it.
- abortAt(f.Position(t.Pos()), fmt.Sprintf("Marshalling codegen was requested on type '%s', but go-marshal doesn't support this kind of declaration.\n", t.Name))
- }
- types[t] = newMarshallableType(f, tagLine, t)
- }
- }
- // Update the types with the last seen receiver. As long as the
- // receiver name is consistent for the type, then we will generate
- // code that is still consistent with itself.
- for t, mt := range types {
- r, ok := recv[t.Name.Name]
- if !ok {
- mt.recv = receiverName(t) // Default.
- continue
- }
- mt.recv = r // Last seen.
- }
- return types
-}
-
-// collectImports collects all imports from all input source files. Some of
-// these imports are copied to the generated output, if they're referenced by
-// the generated code.
-//
-// collectImports de-duplicates imports while building the list, and ensures
-// identifiers in the generated code don't conflict with any imported package
-// names.
-func (g *Generator) collectImports(a *ast.File, f *token.FileSet) map[string]importStmt {
- is := make(map[string]importStmt)
- for _, decl := range a.Decls {
- gdecl, ok := decl.(*ast.GenDecl)
- // Import statement?
- if !ok || gdecl.Tok != token.IMPORT {
- continue
- }
- for _, spec := range gdecl.Specs {
- i := g.imports.addFromSpec(spec.(*ast.ImportSpec), f)
- debugf("Collected import '%s' as '%s'\n", i.path, i.name)
-
- // Make sure we have an import that doesn't use any local names that
- // would conflict with identifiers in the generated code.
- if len(i.name) == 1 && i.name != "_" {
- abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import has a single character local name '%s'; this may conflict with code generated by go_marshal, use a multi-character import alias", i.name))
- }
- if _, ok := badIdentsMap[i.name]; ok {
- abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import name '%s' is likely to conflict with code generated by go_marshal, use a different import alias", i.name))
- }
- }
- }
- return is
-
-}
-
-func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *interfaceGenerator {
- i := newInterfaceGenerator(t.spec, t.recv, fset)
- if t.dynamic {
- if t.slice != nil {
- abortAt(fset.Position(t.slice.comment.Slash), "Slice API is not supported for dynamic types because it assumes that each slice element is statically sized.")
- }
- // No validation needed, assume the user knows what they are doing.
- i.emitMarshallableForDynamicType()
- return i
- }
- switch ty := t.spec.Type.(type) {
- case *ast.StructType:
- i.validateStruct(t.spec, ty)
- i.emitMarshallableForStruct(ty)
- if t.slice != nil {
- i.emitMarshallableSliceForStruct(ty, t.slice)
- }
- case *ast.Ident:
- i.validatePrimitiveNewtype(ty)
- i.emitMarshallableForPrimitiveNewtype(ty)
- if t.slice != nil {
- i.emitMarshallableSliceForPrimitiveNewtype(ty, t.slice)
- }
- case *ast.ArrayType:
- i.validateArrayNewtype(t.spec.Name, ty)
- // After validate, we can safely call arrayLen.
- i.emitMarshallableForArrayNewtype(t.spec.Name, ty, ty.Elt.(*ast.Ident))
- if t.slice != nil {
- abortAt(fset.Position(t.slice.comment.Slash), "Array type marked as '+marshal slice:...', but this is not supported. Perhaps fold one of the dimensions?")
- }
- default:
- // This should've been filtered out by collectMarshallabeTypes.
- panic(fmt.Sprintf("Unexpected type %+v", ty))
- }
- return i
-}
-
-// generateOneTestSuite generates a test suite for the automatically generated
-// implementations type t.
-func (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator {
- i := newTestGenerator(t.spec, t.recv)
- i.emitTests(t.slice)
- return i
-}
-
-// Run is the entry point to code generation using g.
-//
-// Run parses all input source files specified in g and emits generated code.
-func (g *Generator) Run() error {
- // Parse our input source files into ASTs and token sets.
- asts, fsets, err := g.parse()
- if err != nil {
- return err
- }
-
- if len(asts) != len(fsets) {
- panic("ASTs and FileSets don't match")
- }
-
- // Map of imports in source files; key = local package name, value = import
- // path.
- is := make(map[string]importStmt)
- for i, a := range asts {
- // Collect all imports from the source files. We may need to copy some
- // of these to the generated code if they're referenced. This has to be
- // done before the loop below because we need to process all ASTs before
- // we start requesting imports to be copied one by one as we encounter
- // them in each generated source.
- for name, i := range g.collectImports(a, fsets[i]) {
- is[name] = i
- }
- }
-
- var impls []*interfaceGenerator
- var ts []*testGenerator
- // Set of Marshallable types referenced by generated code.
- ms := make(map[string]struct{})
- for i, a := range asts {
- // Collect type declarations marked for code generation and generate
- // Marshallable interfaces.
- var sortedTypes []*marshallableType
- for _, t := range g.collectMarshallableTypes(a, fsets[i]) {
- sortedTypes = append(sortedTypes, t)
- }
- sort.Slice(sortedTypes, func(x, y int) bool {
- // Sort by type name, which should be unique within a package.
- return sortedTypes[x].spec.Name.String() < sortedTypes[y].spec.Name.String()
- })
- for _, t := range sortedTypes {
- impl := g.generateOne(t, fsets[i])
- // Collect Marshallable types referenced by the generated code.
- for ref := range impl.ms {
- ms[ref] = struct{}{}
- }
- impls = append(impls, impl)
- // Collect imports referenced by the generated code and add them to
- // the list of imports we need to copy to the generated code.
- for name := range impl.is {
- if !g.imports.markUsed(name) {
- panic(fmt.Sprintf("Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.", impl.typeName(), name))
- }
- }
- // Do not generate tests for dynamic types because they inherently
- // violate some go_marshal requirements.
- if !t.dynamic {
- ts = append(ts, g.generateOneTestSuite(t))
- }
- }
- }
-
- // Write output file header. These include things like package name and
- // import statements.
- if err := g.writeHeader(); err != nil {
- return err
- }
-
- // Write type checks for referenced marshallable types to output file.
- if err := g.writeTypeChecks(ms); err != nil {
- return err
- }
-
- // Write generated interfaces to output file.
- for _, i := range impls {
- if err := i.write(g.output); err != nil {
- return err
- }
- }
-
- // Write generated tests to test file.
- return g.writeTests(ts)
-}
-
-// writeTests outputs tests for the generated interface implementations to a go
-// source file.
-func (g *Generator) writeTests(ts []*testGenerator) error {
- var b sourceBuffer
-
- // Write the unconditional test file. This file is always compiled,
- // regardless of what build tags were specified on the original input
- // files. We use this file to guarantee we never end up with an empty test
- // file, as that causes the build to fail with "no tests/benchmarks/examples
- // found".
- //
- // There's no easy way to determine ahead of time if we'll end up with an
- // empty build file since build constraints can arbitrarily cause some of
- // the original types to be not defined. We also have no way to tell bazel
- // to omit the entire test suite since the output files are already defined
- // before go-marshal is called.
- b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
- b.emit("package %s\n\n", g.pkg)
- b.emit("func Example() {\n")
- b.inIndent(func() {
- b.emit("// This example is intentionally empty, and ensures this package contains at\n")
- b.emit("// least one testable entity. go-marshal is forced to emit a test package if the\n")
- b.emit("// input package is marked marshallable, but emitting no testable entities \n")
- b.emit("// results in a build failure.\n")
- })
- b.emit("}\n")
- if err := b.write(g.outputTestUC); err != nil {
- return err
- }
-
- // Now generate the real test file that contains the real types we
- // processed. These need to be conditionally compiled according to the build
- // tags, as the original types may not be defined under all build
- // configurations.
-
- b.reset()
- b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
-
- // Emit build tags.
- if t := tags.Aggregate(g.inputs); len(t) > 0 {
- b.emit(strings.Join(t.Lines(), "\n"))
- b.emit("\n\n")
- }
-
- b.emit("package %s\n\n", g.pkg)
- if err := b.write(g.outputTest); err != nil {
- return err
- }
-
- // Collect and write test import statements.
- imports := newImportTable()
- for _, t := range ts {
- imports.merge(t.imports)
- }
-
- if err := imports.write(g.outputTest); err != nil {
- return err
- }
-
- // Write test functions.
- for _, t := range ts {
- if err := t.write(g.outputTest); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces.go b/tools/go_marshal/gomarshal/generator_interfaces.go
deleted file mode 100644
index 3e643e77f..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "strings"
-)
-
-// interfaceGenerator generates marshalling interfaces for a single type.
-//
-// getState is not thread-safe.
-type interfaceGenerator struct {
- sourceBuffer
-
- // The type we're serializing.
- t *ast.TypeSpec
-
- // Receiver argument for generated methods.
- r string
-
- // FileSet containing the tokens for the type we're processing.
- f *token.FileSet
-
- // is records external packages referenced by the generated implementation.
- is map[string]struct{}
-
- // ms records Marshallable types referenced by the generated implementation
- // of t's interfaces.
- ms map[string]struct{}
-
- // as records fields in t that are potentially not packed. The key is the
- // accessor for the field.
- as map[string]struct{}
-}
-
-// typeName returns the name of the type this g represents.
-func (g *interfaceGenerator) typeName() string {
- return g.t.Name.Name
-}
-
-// newinterfaceGenerator creates a new interface generator.
-func newInterfaceGenerator(t *ast.TypeSpec, r string, fset *token.FileSet) *interfaceGenerator {
- g := &interfaceGenerator{
- t: t,
- r: r,
- f: fset,
- is: make(map[string]struct{}),
- ms: make(map[string]struct{}),
- as: make(map[string]struct{}),
- }
- g.recordUsedMarshallable(g.typeName())
- return g
-}
-
-func (g *interfaceGenerator) recordUsedMarshallable(m string) {
- g.ms[m] = struct{}{}
-
-}
-
-func (g *interfaceGenerator) recordUsedImport(i string) {
- g.is[i] = struct{}{}
-}
-
-func (g *interfaceGenerator) recordPotentiallyNonPackedField(fieldName string) {
- g.as[fieldName] = struct{}{}
-}
-
-// abortAt aborts the go_marshal tool with the given error message, with a
-// reference position to the input source. Same as abortAt, but uses g to
-// resolve p to position.
-func (g *interfaceGenerator) abortAt(p token.Pos, msg string) {
- abortAt(g.f.Position(p), msg)
-}
-
-// scalarSize returns the size of type identified by t. If t isn't a primitive
-// type, the size isn't known at code generation time, and must be resolved via
-// the marshal.Marshallable interface.
-func (g *interfaceGenerator) scalarSize(t *ast.Ident) (size int, unknownSize bool) {
- switch t.Name {
- case "int8", "uint8", "byte":
- return 1, false
- case "int16", "uint16":
- return 2, false
- case "int32", "uint32":
- return 4, false
- case "int64", "uint64":
- return 8, false
- default:
- return 0, true
- }
-}
-
-func (g *interfaceGenerator) shift(bufVar string, n int) {
- g.emit("%s = %s[%d:]\n", bufVar, bufVar, n)
-}
-
-func (g *interfaceGenerator) shiftDynamic(bufVar, name string) {
- g.emit("%s = %s[%s.SizeBytes():]\n", bufVar, bufVar, name)
-}
-
-// marshalScalar writes a single scalar to a byte slice.
-func (g *interfaceGenerator) marshalScalar(accessor, typ, bufVar string) {
- switch typ {
- case "int8", "uint8", "byte":
- g.emit("%s[0] = byte(%s)\n", bufVar, accessor)
- g.shift(bufVar, 1)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(%s))\n", bufVar, accessor)
- g.shift(bufVar, 2)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(%s))\n", bufVar, accessor)
- g.shift(bufVar, 4)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(%s))\n", bufVar, accessor)
- g.shift(bufVar, 8)
- default:
- g.emit("%s.MarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
- g.shiftDynamic(bufVar, accessor)
- }
-}
-
-// unmarshalScalar reads a single scalar from a byte slice.
-func (g *interfaceGenerator) unmarshalScalar(accessor, typ, bufVar string) {
- switch typ {
- case "byte":
- g.emit("%s = %s[0]\n", accessor, bufVar)
- g.shift(bufVar, 1)
- case "int8", "uint8":
- g.emit("%s = %s(%s[0])\n", accessor, typ, bufVar)
- g.shift(bufVar, 1)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint16(%s[:2]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 2)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint32(%s[:4]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 4)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint64(%s[:8]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 8)
- default:
- g.emit("%s.UnmarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
- g.shiftDynamic(bufVar, accessor)
- g.recordPotentiallyNonPackedField(accessor)
- }
-}
-
-// emitCastToByteSlice unsafely casts an arbitrary type's underlying memory to a
-// byte slice, bypassing escape analysis. The caller is responsible for ensuring
-// srcPtr lives until they're done with dstVar, the runtime does not consider
-// dstVar dependent on srcPtr due to the escape analysis bypass.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifier "hdr", and cannot be used
-// in a context where it is already bound.
-func (g *interfaceGenerator) emitCastToByteSlice(srcPtr, dstVar, lenExpr string) {
- g.recordUsedImport("gohacks")
- g.emit("// Construct a slice backed by dst's underlying memory.\n")
- g.emit("var %s []byte\n", dstVar)
- g.emit("hdr := (*reflect.SliceHeader)(unsafe.Pointer(&%s))\n", dstVar)
- g.emit("hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(%s)))\n", srcPtr)
- g.emit("hdr.Len = %s\n", lenExpr)
- g.emit("hdr.Cap = %s\n\n", lenExpr)
-}
-
-// emitCastToByteSlice unsafely casts a slice with elements of an abitrary type
-// to a byte slice. As part of the cast, the byte slice is made to look
-// independent of the src slice by bypassing escape analysis. This means the
-// byte slice can be used without causing the source to escape. The caller is
-// responsible for ensuring srcPtr lives until they're done with dstVar, as the
-// runtime no longer considers dstVar dependent on srcPtr and is free to GC it.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifiers "ptr", "val" and "hdr",
-// and cannot be used in a context where these identifiers are already bound.
-func (g *interfaceGenerator) emitCastSliceToByteSlice(srcPtr, dstVar, lenExpr string) {
- g.emitNoEscapeSliceDataPointer(srcPtr, "val")
-
- g.emit("// Construct a slice backed by dst's underlying memory.\n")
- g.emit("var %s []byte\n", dstVar)
- g.emit("hdr := (*reflect.SliceHeader)(unsafe.Pointer(&%s))\n", dstVar)
- g.emit("hdr.Data = uintptr(val)\n")
- g.emit("hdr.Len = %s\n", lenExpr)
- g.emit("hdr.Cap = %s\n\n", lenExpr)
-}
-
-// emitNoEscapeSliceDataPointer unsafely casts a slice's data pointer to an
-// unsafe.Pointer, bypassing escape analysis. The caller is responsible for
-// ensuring srcPtr lives until they're done with dstVar, as the runtime no
-// longer considers dstVar dependent on srcPtr and is free to GC it.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifier "ptr" cannot be used in a
-// context where this identifier is already bound.
-func (g *interfaceGenerator) emitNoEscapeSliceDataPointer(srcPtr, dstVar string) {
- g.recordUsedImport("gohacks")
- g.emit("ptr := unsafe.Pointer(%s)\n", srcPtr)
- g.emit("%s := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))\n\n", dstVar)
-}
-
-func (g *interfaceGenerator) emitKeepAlive(ptrVar string) {
- g.emit("// Since we bypassed the compiler's escape analysis, indicate that %s\n", ptrVar)
- g.emit("// must live until the use above.\n")
- g.emit("runtime.KeepAlive(%s) // escapes: replaced by intrinsic.\n", ptrVar)
-}
-
-func (g *interfaceGenerator) expandBinaryExpr(b *strings.Builder, e *ast.BinaryExpr) {
- switch x := e.X.(type) {
- case *ast.BinaryExpr:
- // Recursively expand sub-expression.
- g.expandBinaryExpr(b, x)
- case *ast.Ident:
- fmt.Fprintf(b, "%s", x.Name)
- case *ast.BasicLit:
- fmt.Fprintf(b, "%s", x.Value)
- default:
- g.abortAt(e.Pos(), "Cannot convert binary expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
-
- fmt.Fprintf(b, "%s", e.Op)
-
- switch y := e.Y.(type) {
- case *ast.BinaryExpr:
- // Recursively expand sub-expression.
- g.expandBinaryExpr(b, y)
- case *ast.Ident:
- fmt.Fprintf(b, "%s", y.Name)
- case *ast.BasicLit:
- fmt.Fprintf(b, "%s", y.Value)
- default:
- g.abortAt(e.Pos(), "Cannot convert binary expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
-}
-
-// arrayLenExpr returns a string containing a valid golang expression
-// representing the length of array a. The returned expression should be treated
-// as a single value, and will be already parenthesized as required.
-func (g *interfaceGenerator) arrayLenExpr(a *ast.ArrayType) string {
- var b strings.Builder
-
- switch l := a.Len.(type) {
- case *ast.Ident:
- fmt.Fprintf(&b, "%s", l.Name)
- case *ast.BasicLit:
- fmt.Fprintf(&b, "%s", l.Value)
- case *ast.BinaryExpr:
- g.expandBinaryExpr(&b, l)
- return fmt.Sprintf("(%s)", b.String())
- default:
- g.abortAt(l.Pos(), "Cannot convert this array len expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
- return b.String()
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
deleted file mode 100644
index bd7741ae5..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// newtypes on arrays.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
-)
-
-func (g *interfaceGenerator) validateArrayNewtype(n *ast.Ident, a *ast.ArrayType) {
- if a.Len == nil {
- g.abortAt(a.Pos(), fmt.Sprintf("Dynamically sized slice '%s' cannot be marshalled, arrays must be statically sized", n.Name))
- }
-
- if _, ok := a.Elt.(*ast.Ident); !ok {
- g.abortAt(a.Elt.Pos(), fmt.Sprintf("Marshalling not supported for arrays with %s elements, array elements must be primitive types", kindString(a.Elt)))
- }
-}
-
-func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *ast.ArrayType, elt *ast.Ident) {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("io")
- g.recordUsedImport("marshal")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- lenExpr := g.arrayLenExpr(a)
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- if size, dynamic := g.scalarSize(elt); !dynamic {
- g.emit("return %d * %s\n", size, lenExpr)
- } else {
- g.emit("return (*%s)(nil).SizeBytes() * %s\n", n.Name, lenExpr)
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.marshalScalar(fmt.Sprintf("%s[idx]", g.r), elt.Name, "dst")
- })
- g.emit("}\n")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.unmarshalScalar(fmt.Sprintf("%s[idx]", g.r), elt.Name, "src")
- })
- g.emit("}\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Array newtypes are always packed.\n")
- g.emit("return true\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&%s[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.emit("func (%s *%s) WriteTo(w io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := w.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
-
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go b/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
deleted file mode 100644
index 345020ddc..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-func (g *interfaceGenerator) emitMarshallableForDynamicType() {
- // The user writes their own MarshalBytes, UnmarshalBytes and SizeBytes for
- // dynamic types. Generate the rest using these definitions.
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s is dynamic so it might have slice/string headers. Hence, it is not packed.\n", g.typeName())
- g.emit("return false\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
- g.emit("%s.MarshalBytes(dst)\n", g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\n", g.typeName())
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("%s.MarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emit("// Unmarshal unconditionally. If we had a short copy-in, this results in a\n")
- g.emit("// partially unmarshalled struct.\n")
- g.emit("%s.UnmarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.recordUsedImport("io")
- g.emit("func (%s *%s) WriteTo(writer io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := make([]byte, %s.SizeBytes())\n", g.r)
- g.emit("%s.MarshalBytes(buf)\n", g.r)
- g.emit("length, err := writer.Write(buf)\n")
- g.emit("return int64(length), err\n")
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
deleted file mode 100644
index ba4b7324e..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// newtypes on primitives.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
-)
-
-// marshalPrimitiveScalar writes a single primitive variable to a byte
-// slice.
-func (g *interfaceGenerator) marshalPrimitiveScalar(accessor, typ, bufVar string) {
- switch typ {
- case "int8", "uint8", "byte":
- g.emit("%s[0] = byte(*%s)\n", bufVar, accessor)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(*%s))\n", bufVar, accessor)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(*%s))\n", bufVar, accessor)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(*%s))\n", bufVar, accessor)
- default:
- g.emit("// Explicilty cast to the underlying type before dispatching to\n")
- g.emit("// MarshalBytes, so we don't recursively call %s.MarshalBytes\n", accessor)
- g.emit("inner := (*%s)(%s)\n", typ, accessor)
- g.emit("inner.MarshalBytes(%s[:%s.SizeBytes()])\n", bufVar, accessor)
- }
-}
-
-// unmarshalPrimitiveScalar read a single primitive variable from a byte slice.
-func (g *interfaceGenerator) unmarshalPrimitiveScalar(accessor, typ, bufVar, typeCast string) {
- switch typ {
- case "byte":
- g.emit("*%s = %s(%s[0])\n", accessor, typeCast, bufVar)
- case "int8", "uint8":
- g.emit("*%s = %s(%s(%s[0]))\n", accessor, typeCast, typ, bufVar)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint16(%s[:2])))\n", accessor, typeCast, typ, bufVar)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint32(%s[:4])))\n", accessor, typeCast, typ, bufVar)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint64(%s[:8])))\n", accessor, typeCast, typ, bufVar)
- default:
- g.emit("// Explicilty cast to the underlying type before dispatching to\n")
- g.emit("// UnmarshalBytes, so we don't recursively call %s.UnmarshalBytes\n", accessor)
- g.emit("inner := (*%s)(%s)\n", typ, accessor)
- g.emit("inner.UnmarshalBytes(%s[:%s.SizeBytes()])\n", bufVar, accessor)
- }
-}
-
-func (g *interfaceGenerator) validatePrimitiveNewtype(t *ast.Ident) {
- switch t.Name {
- case "int8", "uint8", "byte", "int16", "uint16", "int32", "uint32", "int64", "uint64":
- // These are the only primitive types we're allow. Below, we provide
- // suggestions for some disallowed types and reject them, then attempt
- // to marshal any remaining types by invoking the marshal.Marshallable
- // interface on them. If these types don't actually implement
- // marshal.Marshallable, compilation of the generated code will fail
- // with an appropriate error message.
- return
- case "int":
- g.abortAt(t.Pos(), "Type 'int' has ambiguous width, use int32 or int64")
- case "uint":
- g.abortAt(t.Pos(), "Type 'uint' has ambiguous width, use uint32 or uint64")
- case "string":
- g.abortAt(t.Pos(), "Type 'string' is dynamically-sized and cannot be marshalled, use a fixed size byte array '[...]byte' instead")
- default:
- debugfAt(g.f.Position(t.Pos()), fmt.Sprintf("Found derived type '%s', will attempt dispatch via marshal.Marshallable.\n", t.Name))
- }
-}
-
-// emitMarshallableForPrimitiveNewtype outputs code to implement the
-// marshal.Marshallable interface for a newtype on a primitive. Primitive
-// newtypes are always packed, so we can omit the various fallbacks required for
-// non-packed structs.
-func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident) {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("io")
- g.recordUsedImport("marshal")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- if size, dynamic := g.scalarSize(nt); !dynamic {
- g.emit("return %d\n", size)
- } else {
- g.emit("return (*%s)(nil).SizeBytes()\n", nt.Name)
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.marshalPrimitiveScalar(g.r, nt.Name, "dst")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.unmarshalPrimitiveScalar(g.r, nt.Name, "src", g.typeName())
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Scalar newtypes are always packed.\n")
- g.emit("return true\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.emit("func (%s *%s) WriteTo(w io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := w.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
-
- })
- g.emit("}\n\n")
-}
-
-func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Ident, slice *sliceAPI) {
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- eltType := g.typeName()
- if slice.inner {
- eltType = nt.Name
- }
-
- g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, eltType)
- g.emit("//go:nosplit\n")
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, eltType)
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emitCastSliceToByteSlice("&dst", "buf", "size * count")
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive("dst")
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, eltType)
- g.emit("//go:nosplit\n")
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, eltType)
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emitCastSliceToByteSlice("&src", "buf", "size * count")
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive("src")
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe%s is like %s.MarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func MarshalUnsafe%s(src []%s, dst []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("dst = dst[:size*count]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\n")
- g.emit("return size*count, nil\n")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe%s is like %s.UnmarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func UnmarshalUnsafe%s(dst []%s, src []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("src = src[:(size*count)]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\n")
- g.emit("return size*count, nil\n")
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_struct.go b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
deleted file mode 100644
index 4c47218f1..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_struct.go
+++ /dev/null
@@ -1,616 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// structs.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "sort"
- "strings"
-)
-
-func (g *interfaceGenerator) fieldAccessor(n *ast.Ident) string {
- return fmt.Sprintf("%s.%s", g.r, n.Name)
-}
-
-// areFieldsPackedExpression returns a go expression checking whether g.t's fields are
-// packed. Returns "", false if g.t has no fields that may be potentially
-// packed, otherwise returns <clause>, true, where <clause> is an expression
-// like "t.a.Packed() && t.b.Packed() && t.c.Packed()".
-func (g *interfaceGenerator) areFieldsPackedExpression() (string, bool) {
- if len(g.as) == 0 {
- return "", false
- }
-
- cs := make([]string, 0, len(g.as))
- for accessor := range g.as {
- cs = append(cs, fmt.Sprintf("%s.Packed()", accessor))
- }
- // Sort expressions for determinstic build outputs.
- sort.Strings(cs)
- return strings.Join(cs, " && "), true
-}
-
-// validateStruct ensures the type we're working with can be marshalled. These
-// checks are done ahead of time and in one place so we can make assumptions
-// later.
-func (g *interfaceGenerator) validateStruct(ts *ast.TypeSpec, st *ast.StructType) {
- forEachStructField(st, func(f *ast.Field) {
- fieldDispatcher{
- primitive: func(_, t *ast.Ident) {
- g.validatePrimitiveNewtype(t)
- },
- selector: func(_, _, _ *ast.Ident) {
- // No validation to perform on selector fields. However this
- // callback must still be provided.
- },
- array: func(n *ast.Ident, a *ast.ArrayType, _ *ast.Ident) {
- g.validateArrayNewtype(n, a)
- },
- unhandled: func(_ *ast.Ident) {
- g.abortAt(f.Pos(), fmt.Sprintf("Marshalling not supported for %s fields", kindString(f.Type)))
- },
- }.dispatch(f)
- })
-}
-
-func (g *interfaceGenerator) isStructPacked(st *ast.StructType) bool {
- packed := true
- forEachStructField(st, func(f *ast.Field) {
- if f.Tag != nil {
- if f.Tag.Value == "`marshal:\"unaligned\"`" {
- if packed {
- debugfAt(g.f.Position(g.t.Pos()),
- fmt.Sprintf("Marking type '%s' as not packed due to tag `marshal:\"unaligned\"`.\n", g.t.Name))
- packed = false
- }
- }
- }
- })
- return packed
-}
-
-func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
- thisPacked := g.isStructPacked(st)
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- primitiveSize := 0
- var dynamicSizeTerms []string
-
- forEachStructField(st, fieldDispatcher{
- primitive: func(_, t *ast.Ident) {
- if size, dynamic := g.scalarSize(t); !dynamic {
- primitiveSize += size
- } else {
- g.recordUsedMarshallable(t.Name)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()", t.Name))
- }
- },
- selector: func(_, tX, tSel *ast.Ident) {
- tName := fmt.Sprintf("%s.%s", tX.Name, tSel.Name)
- g.recordUsedImport(tX.Name)
- g.recordUsedMarshallable(tName)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()", tName))
- },
- array: func(_ *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if size, dynamic := g.scalarSize(t); !dynamic {
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("%d*%s", size, lenExpr))
- } else {
- g.recordUsedMarshallable(t.Name)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()*%s", t.Name, lenExpr))
- }
- },
- }.dispatch)
- g.emit("return %d", primitiveSize)
- if len(dynamicSizeTerms) > 0 {
- g.incIndent()
- }
- {
- for _, d := range dynamicSizeTerms {
- g.emitNoIndent(" +\n")
- g.emit(d)
- }
- }
- if len(dynamicSizeTerms) > 0 {
- g.decIndent()
- }
- })
- g.emit("\n}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- forEachStructField(st, fieldDispatcher{
- primitive: func(n, t *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)] ~= %s(0)\n", t.Name, t.Name)
- if len, dynamic := g.scalarSize(t); !dynamic {
- g.shift("dst", len)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can reference here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("dst = dst[(*%s)(nil).SizeBytes():]\n", t.Name)
- }
- return
- }
- g.marshalScalar(g.fieldAccessor(n), t.Name, "dst")
- },
- selector: func(n, tX, tSel *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)] ~= %s(0)\n", tX.Name, tSel.Name)
- g.emit("dst = dst[(*%s.%s)(nil).SizeBytes():]\n", tX.Name, tSel.Name)
- return
- }
- g.marshalScalar(g.fieldAccessor(n), fmt.Sprintf("%s.%s", tX.Name, tSel.Name), "dst")
- },
- array: func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)*%s] ~= [%s]%s{0}\n", t.Name, lenExpr, lenExpr, t.Name)
- if size, dynamic := g.scalarSize(t); !dynamic {
- g.emit("dst = dst[%d*(%s):]\n", size, lenExpr)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can reference here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("dst = dst[(*%s)(nil).SizeBytes()*(%s):]\n", t.Name, lenExpr)
- }
- return
- }
-
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.marshalScalar(fmt.Sprintf("%s[idx]", g.fieldAccessor(n)), t.Name, "dst")
- })
- g.emit("}\n")
- },
- }.dispatch)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- forEachStructField(st, fieldDispatcher{
- primitive: func(n, t *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: var _ %s ~= src[:sizeof(%s)]\n", t.Name, t.Name)
- if len, dynamic := g.scalarSize(t); !dynamic {
- g.shift("src", len)
- } else {
- // We don't have an instance of the dynamic type we can
- // reference here (since the version in this struct is
- // anonymous). Use a typed nil pointer to call
- // SizeBytes() instead.
- g.shiftDynamic("src", fmt.Sprintf("(*%s)(nil)", t.Name))
- g.recordPotentiallyNonPackedField(fmt.Sprintf("(*%s)(nil)", t.Name))
- }
- return
- }
- g.unmarshalScalar(g.fieldAccessor(n), t.Name, "src")
- },
- selector: func(n, tX, tSel *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: %s ~= src[:sizeof(%s.%s)]\n", g.fieldAccessor(n), tX.Name, tSel.Name)
- g.emit("src = src[(*%s.%s)(nil).SizeBytes():]\n", tX.Name, tSel.Name)
- g.recordPotentiallyNonPackedField(fmt.Sprintf("(*%s.%s)(nil)", tX.Name, tSel.Name))
- return
- }
- g.unmarshalScalar(g.fieldAccessor(n), fmt.Sprintf("%s.%s", tX.Name, tSel.Name), "src")
- },
- array: func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if n.Name == "_" {
- g.emit("// Padding: ~ copy([%s]%s(%s), src[:sizeof(%s)*%s])\n", lenExpr, t.Name, g.fieldAccessor(n), t.Name, lenExpr)
- if size, dynamic := g.scalarSize(t); !dynamic {
- g.emit("src = src[%d*(%s):]\n", size, lenExpr)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can referece here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("src = src[(*%s)(nil).SizeBytes()*(%s):]\n", t.Name, lenExpr)
- }
- return
- }
-
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.unmarshalScalar(fmt.Sprintf("%s[idx]", g.fieldAccessor(n)), t.Name, "src")
- })
- g.emit("}\n")
- },
- }.dispatch)
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- expr, fieldsMaybePacked := g.areFieldsPackedExpression()
- switch {
- case !thisPacked:
- g.emit("return false\n")
- case fieldsMaybePacked:
- g.emit("return %s\n", expr)
- default:
- g.emit("return true\n")
-
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
- g.emit("%s.MarshalBytes(dst)\n", g.r)
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if %s {\n", cond)
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("} else {\n")
- g.inIndent(fallback)
- g.emit("}\n")
- } else {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- }
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\n", g.typeName())
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if %s {\n", cond)
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("} else {\n")
- g.inIndent(fallback)
- g.emit("}\n")
- } else {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- }
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("%s.MarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emit("// Unmarshal unconditionally. If we had a short copy-in, this results in a\n")
- g.emit("// partially unmarshalled struct.\n")
- g.emit("%s.UnmarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return length, err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast deserialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.recordUsedImport("io")
- g.emit("func (%s *%s) WriteTo(writer io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := make([]byte, %s.SizeBytes())\n", g.r)
- g.emit("%s.MarshalBytes(buf)\n", g.r)
- g.emit("length, err := writer.Write(buf)\n")
- g.emit("return int64(length), err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := writer.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-}
-
-func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType, slice *sliceAPI) {
- thisPacked := g.isStructPacked(st)
-
- if slice.inner {
- abortAt(g.f.Position(slice.comment.Slash), fmt.Sprintf("The ':inner' argument to '+marshal slice:%s:inner' is only applicable to newtypes on primitives. Remove it from this struct declaration.", slice.ident))
- }
-
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
-
- g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(size * count)\n")
- g.emit("length, err := cc.CopyInBytes(addr, buf)\n\n")
-
- g.emit("// Unmarshal as much as possible, even on error. First handle full objects.\n")
- g.emit("limit := length/size\n")
- g.emit("for idx := 0; idx < limit; idx++ {\n")
- g.inIndent(func() {
- g.emit("dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Handle any final partial object. buf is guaranteed to be long enough for the\n")
- g.emit("// final element, but may not contain valid data for the entire range. This may\n")
- g.emit("// result in unmarshalling zero values for some parts of the object.\n")
- g.emit("if length%size != 0 {\n")
- g.inIndent(func() {
- g.emit("idx := limit\n")
- g.emit("dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n\n")
-
- g.emit("return length, err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !dst[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast deserialization.
- g.emitCastSliceToByteSlice("&dst", "buf", "size * count")
-
- g.emit("length, err := cc.CopyInBytes(addr, buf)\n")
- g.emitKeepAlive("dst")
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(size * count)\n")
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("src[idx].MarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return cc.CopyOutBytes(addr, buf)\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !src[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastSliceToByteSlice("&src", "buf", "size * count")
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf)\n")
- g.emitKeepAlive("src")
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe%s is like %s.MarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func MarshalUnsafe%s(src []%s, dst []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("src[idx].MarshalBytes(dst[size*idx:(size)*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return size * count, nil\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- g.recordUsedImport("gohacks")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !src[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- g.emit("dst = dst[:size*count]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\n")
- g.emit("return size * count, nil\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe%s is like %s.UnmarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func UnmarshalUnsafe%s(dst []%s, src []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("dst[idx].UnmarshalBytes(src[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return size * count, nil\n")
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !dst[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
-
- g.emit("src = src[:(size*count)]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\n")
-
- g.emit("return count*size, nil\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_tests.go b/tools/go_marshal/gomarshal/generator_tests.go
deleted file mode 100644
index 8f93a1de5..000000000
--- a/tools/go_marshal/gomarshal/generator_tests.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "io"
- "strings"
-)
-
-var standardImports = []string{
- "bytes",
- "fmt",
- "reflect",
- "testing",
-
- "gvisor.dev/gvisor/tools/go_marshal/analysis",
-}
-
-var sliceAPIImports = []string{
- "encoding/binary",
- "gvisor.dev/gvisor/pkg/hostarch",
-}
-
-type testGenerator struct {
- sourceBuffer
-
- // The type we're serializing.
- t *ast.TypeSpec
-
- // Receiver argument for generated methods.
- r string
-
- // Imports used by generated code.
- imports *importTable
-
- // Import statement for the package declaring the type we generated code
- // for. We need this to construct test instances for the type, since the
- // tests aren't written in the same package.
- decl *importStmt
-}
-
-func newTestGenerator(t *ast.TypeSpec, r string) *testGenerator {
- g := &testGenerator{
- t: t,
- r: r,
- imports: newImportTable(),
- }
-
- for _, i := range standardImports {
- g.imports.add(i).markUsed()
- }
- // These imports are used if a type requests the slice API. Don't
- // mark them as used by default.
- for _, i := range sliceAPIImports {
- g.imports.add(i)
- }
-
- return g
-}
-
-func (g *testGenerator) typeName() string {
- return g.t.Name.Name
-}
-
-func (g *testGenerator) testFuncName(base string) string {
- return fmt.Sprintf("%s%s", base, strings.Title(g.t.Name.Name))
-}
-
-func (g *testGenerator) inTestFunction(name string, body func()) {
- g.emit("func %s(t *testing.T) {\n", g.testFuncName(name))
- g.inIndent(body)
- g.emit("}\n\n")
-}
-
-func (g *testGenerator) emitTestNonZeroSize() {
- g.inTestFunction("TestSizeNonZero", func() {
- g.emit("var x %v\n", g.typeName())
- g.emit("if x.SizeBytes() == 0 {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(\"Marshallable.SizeBytes() should not return zero\")\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestSuspectAlignment() {
- g.inTestFunction("TestSuspectAlignment", func() {
- g.emit("var x %v\n", g.typeName())
- g.emit("analysis.AlignmentCheck(t, reflect.TypeOf(x))\n")
- })
-}
-
-func (g *testGenerator) emitTestMarshalUnmarshalPreservesData() {
- g.inTestFunction("TestSafeMarshalUnmarshalPreservesData", func() {
- g.emit("var x, y, z, yUnsafe, zUnsafe %s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
-
- g.emit("buf := make([]byte, x.SizeBytes())\n")
- g.emit("x.MarshalBytes(buf)\n")
- g.emit("bufUnsafe := make([]byte, x.SizeBytes())\n")
- g.emit("x.MarshalUnsafe(bufUnsafe)\n\n")
-
- g.emit("y.UnmarshalBytes(buf)\n")
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalBytes/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("yUnsafe.UnmarshalBytes(bufUnsafe)\n")
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafe/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n\n")
-
- g.emit("z.UnmarshalUnsafe(buf)\n")
- g.emit("if !reflect.DeepEqual(x, z) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalBytes/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, z))\n")
- })
- g.emit("}\n")
- g.emit("zUnsafe.UnmarshalUnsafe(bufUnsafe)\n")
- g.emit("if !reflect.DeepEqual(x, zUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafe/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, zUnsafe))\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestMarshalUnmarshalSlicePreservesData(slice *sliceAPI) {
- for _, name := range []string{"binary", "hostarch"} {
- if !g.imports.markUsed(name) {
- panic(fmt.Sprintf("Generated test for '%s' referenced a non-existent import with local name '%s'", g.typeName(), name))
- }
- }
-
- g.inTestFunction("TestSafeMarshalUnmarshalSlicePreservesData", func() {
- g.emit("var x, y, yUnsafe [8]%s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
- g.emit("size := (*%s)(nil).SizeBytes() * len(x)\n", g.typeName())
- g.emit("buf := bytes.NewBuffer(make([]byte, size))\n")
- g.emit("buf.Reset()\n")
- g.emit("if err := binary.Write(buf, hostarch.ByteOrder, x[:]); err != nil {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"binary.Write failed: %v\", err))\n")
- })
- g.emit("}\n")
- g.emit("bufUnsafe := make([]byte, size)\n")
- g.emit("MarshalUnsafe%s(x[:], bufUnsafe)\n\n", slice.ident)
-
- g.emit("UnmarshalUnsafe%s(y[:], buf.Bytes())\n", slice.ident)
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across binary.Write/UnmarshalUnsafeSlice cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("UnmarshalUnsafe%s(yUnsafe[:], bufUnsafe)\n", slice.ident)
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafeSlice/UnmarshalUnsafeSlice cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n\n")
- })
-}
-
-func (g *testGenerator) emitTestWriteToUnmarshalPreservesData() {
- g.inTestFunction("TestWriteToUnmarshalPreservesData", func() {
- g.emit("var x, y, yUnsafe %s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
-
- g.emit("var buf bytes.Buffer\n\n")
-
- g.emit("x.WriteTo(&buf)\n")
- g.emit("y.UnmarshalBytes(buf.Bytes())\n\n")
- g.emit("yUnsafe.UnmarshalUnsafe(buf.Bytes())\n\n")
-
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across WriteTo/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across WriteTo/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestSizeBytesOnTypedNilPtr() {
- g.inTestFunction("TestSizeBytesOnTypedNilPtr", func() {
- g.emit("var x %s\n", g.typeName())
- g.emit("sizeFromConcrete := x.SizeBytes()\n")
- g.emit("sizeFromTypedNilPtr := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("if sizeFromTypedNilPtr != sizeFromConcrete {\n")
- g.inIndent(func() {
- g.emit("t.Fatalf(\"SizeBytes() on typed nil pointer (%v) doesn't match size returned by a concrete object (%v).\\n\", sizeFromTypedNilPtr, sizeFromConcrete)\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTests(slice *sliceAPI) {
- g.emitTestNonZeroSize()
- g.emitTestSuspectAlignment()
- g.emitTestMarshalUnmarshalPreservesData()
- g.emitTestWriteToUnmarshalPreservesData()
- g.emitTestSizeBytesOnTypedNilPtr()
-
- if slice != nil {
- g.emitTestMarshalUnmarshalSlicePreservesData(slice)
- }
-}
-
-func (g *testGenerator) write(out io.Writer) error {
- return g.sourceBuffer.write(out)
-}
diff --git a/tools/go_marshal/gomarshal/util.go b/tools/go_marshal/gomarshal/util.go
deleted file mode 100644
index 6a42691cd..000000000
--- a/tools/go_marshal/gomarshal/util.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/token"
- "io"
- "os"
- "path"
- "reflect"
- "sort"
- "strings"
-)
-
-var debug = flag.Bool("debug", false, "enables debugging output")
-
-// receiverName returns an appropriate receiver name given a type spec.
-func receiverName(t *ast.TypeSpec) string {
- if len(t.Name.Name) < 1 {
- // Zero length type name?
- panic("unreachable")
- }
- return strings.ToLower(t.Name.Name[:1])
-}
-
-// kindString returns a user-friendly representation of an AST expr type.
-func kindString(e ast.Expr) string {
- switch e.(type) {
- case *ast.Ident:
- return "scalar"
- case *ast.ArrayType:
- return "array"
- case *ast.StructType:
- return "struct"
- case *ast.StarExpr:
- return "pointer"
- case *ast.FuncType:
- return "function"
- case *ast.InterfaceType:
- return "interface"
- case *ast.MapType:
- return "map"
- case *ast.ChanType:
- return "channel"
- default:
- return reflect.TypeOf(e).String()
- }
-}
-
-func forEachStructField(st *ast.StructType, fn func(f *ast.Field)) {
- for _, field := range st.Fields.List {
- fn(field)
- }
-}
-
-// fieldDispatcher is a collection of callbacks for handling different types of
-// fields in a struct declaration.
-type fieldDispatcher struct {
- primitive func(n, t *ast.Ident)
- selector func(n, tX, tSel *ast.Ident)
- array func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident)
- unhandled func(n *ast.Ident)
-}
-
-// Precondition: All dispatch callbacks that will be invoked must be
-// provided.
-func (fd fieldDispatcher) dispatch(f *ast.Field) {
- // Each field declaration may actually be multiple declarations of the same
- // type. For example, consider:
- //
- // type Point struct {
- // x, y, z int
- // }
- //
- // We invoke the call-backs once per such instance.
-
- // Handle embedded fields. Embedded fields have no names, but can be
- // referenced by the type name.
- if len(f.Names) < 1 {
- switch v := f.Type.(type) {
- case *ast.Ident:
- fd.primitive(v, v)
- case *ast.SelectorExpr:
- fd.selector(v.Sel, v.X.(*ast.Ident), v.Sel)
- default:
- // Note: Arrays can't be embedded, which is handled here.
- panic(fmt.Sprintf("Attempted to dispatch on embedded field of unsupported kind: %#v", f.Type))
- }
- return
- }
-
- // Non-embedded field.
- for _, name := range f.Names {
- switch v := f.Type.(type) {
- case *ast.Ident:
- fd.primitive(name, v)
- case *ast.SelectorExpr:
- fd.selector(name, v.X.(*ast.Ident), v.Sel)
- case *ast.ArrayType:
- switch t := v.Elt.(type) {
- case *ast.Ident:
- fd.array(name, v, t)
- default:
- // Should be handled with a better error message during validate.
- panic(fmt.Sprintf("Array element type is of unsupported kind. Expected *ast.Ident, got %v", t))
- }
- default:
- fd.unhandled(name)
- }
- }
-}
-
-// debugEnabled indicates whether debugging is enabled for gomarshal.
-func debugEnabled() bool {
- return *debug
-}
-
-// abort aborts the go_marshal tool with the given error message.
-func abort(msg string) {
- if !strings.HasSuffix(msg, "\n") {
- msg += "\n"
- }
- fmt.Print(msg)
- os.Exit(1)
-}
-
-// abortAt aborts the go_marshal tool with the given error message, with
-// a reference position to the input source.
-func abortAt(p token.Position, msg string) {
- abort(fmt.Sprintf("%v:\n %s\n", p, msg))
-}
-
-// debugf conditionally prints a debug message.
-func debugf(f string, a ...interface{}) {
- if debugEnabled() {
- fmt.Printf(f, a...)
- }
-}
-
-// debugfAt conditionally prints a debug message with a reference to a position
-// in the input source.
-func debugfAt(p token.Position, f string, a ...interface{}) {
- if debugEnabled() {
- fmt.Printf("%s:\n %s", p, fmt.Sprintf(f, a...))
- }
-}
-
-// emit generates a line of code in the output file.
-//
-// emit is a wrapper around writing a formatted string to the output
-// buffer. emit can be invoked in one of two ways:
-//
-// (1) emit("some string")
-// When emit is called with a single string argument, it is simply copied to
-// the output buffer without any further formatting.
-// (2) emit(fmtString, args...)
-// emit can also be invoked in a similar fashion to *Printf() functions,
-// where the first argument is a format string.
-//
-// Calling emit with a single argument that is not a string will result in a
-// panic, as the caller's intent is ambiguous.
-func emit(out io.Writer, indent int, a ...interface{}) {
- const spacesPerIndentLevel = 4
-
- if len(a) < 1 {
- panic("emit() called with no arguments")
- }
-
- if indent > 0 {
- if _, err := fmt.Fprint(out, strings.Repeat(" ", indent*spacesPerIndentLevel)); err != nil {
- // Writing to the emit output should not fail. Typically the output
- // is a byte.Buffer; writes to these never fail.
- panic(err)
- }
- }
-
- first, ok := a[0].(string)
- if !ok {
- // First argument must be either the string to emit (case 1 from
- // function-level comment), or a format string (case 2).
- panic(fmt.Sprintf("First argument to emit() is not a string: %+v", a[0]))
- }
-
- if len(a) == 1 {
- // Single string argument. Assume no formatting requested.
- if _, err := fmt.Fprint(out, first); err != nil {
- // Writing to out should not fail.
- panic(err)
- }
- return
-
- }
-
- // Formatting requested.
- if _, err := fmt.Fprintf(out, first, a[1:]...); err != nil {
- // Writing to out should not fail.
- panic(err)
- }
-}
-
-// sourceBuffer represents fragments of generated go source code.
-//
-// sourceBuffer provides a convenient way to build up go souce fragments in
-// memory. May be safely zero-value initialized. Not thread-safe.
-type sourceBuffer struct {
- // Current indentation level.
- indent int
-
- // Memory buffer containing contents while they're being generated.
- b bytes.Buffer
-}
-
-func (b *sourceBuffer) reset() {
- b.indent = 0
- b.b.Reset()
-}
-
-func (b *sourceBuffer) incIndent() {
- b.indent++
-}
-
-func (b *sourceBuffer) decIndent() {
- if b.indent <= 0 {
- panic("decIndent() without matching incIndent()")
- }
- b.indent--
-}
-
-func (b *sourceBuffer) emit(a ...interface{}) {
- emit(&b.b, b.indent, a...)
-}
-
-func (b *sourceBuffer) emitNoIndent(a ...interface{}) {
- emit(&b.b, 0 /*indent*/, a...)
-}
-
-func (b *sourceBuffer) inIndent(body func()) {
- b.incIndent()
- body()
- b.decIndent()
-}
-
-func (b *sourceBuffer) write(out io.Writer) error {
- _, err := fmt.Fprint(out, b.b.String())
- return err
-}
-
-// Write implements io.Writer.Write.
-func (b *sourceBuffer) Write(buf []byte) (int, error) {
- return (b.b.Write(buf))
-}
-
-// importStmt represents a single import statement.
-type importStmt struct {
- // Local name of the imported package.
- name string
- // Import path.
- path string
- // Indicates whether the local name is an alias, or simply the final
- // component of the path.
- aliased bool
- // Indicates whether this import was referenced by generated code.
- used bool
- // AST node and file set representing the import statement, if any. These
- // are only non-nil if the import statement originates from an input source
- // file.
- spec *ast.ImportSpec
- fset *token.FileSet
-}
-
-func newImport(p string) *importStmt {
- name := path.Base(p)
- return &importStmt{
- name: name,
- path: p,
- aliased: false,
- }
-}
-
-func newImportFromSpec(spec *ast.ImportSpec, f *token.FileSet) *importStmt {
- p := spec.Path.Value[1 : len(spec.Path.Value)-1] // Strip the " quotes around path.
- name := path.Base(p)
- if name == "" || name == "/" || name == "." {
- panic(fmt.Sprintf("Couldn't process local package name for import at %s, (processed as %s)",
- f.Position(spec.Path.Pos()), name))
- }
- if spec.Name != nil {
- name = spec.Name.Name
- }
- return &importStmt{
- name: name,
- path: p,
- aliased: spec.Name != nil,
- spec: spec,
- fset: f,
- }
-}
-
-// String implements fmt.Stringer.String. This generates a string for the import
-// statement appropriate for writing directly to generated code.
-func (i *importStmt) String() string {
- if i.aliased {
- return fmt.Sprintf("%s %q", i.name, i.path)
- }
- return fmt.Sprintf("%q", i.path)
-}
-
-// debugString returns a debug string representing an import statement. This
-// representation is not valid golang code and is used for debugging output.
-func (i *importStmt) debugString() string {
- if i.spec != nil && i.fset != nil {
- return fmt.Sprintf("%s: %s", i.fset.Position(i.spec.Path.Pos()), i)
- }
- return fmt.Sprintf("(go-marshal import): %s", i)
-}
-
-func (i *importStmt) markUsed() {
- i.used = true
-}
-
-func (i *importStmt) equivalent(other *importStmt) bool {
- return i.name == other.name && i.path == other.path && i.aliased == other.aliased
-}
-
-// importTable represents a collection of importStmts.
-//
-// An importTable may contain multiple import statements referencing the same
-// local name. All import statements aliasing to the same local name are
-// technically ambiguous, as if such an import name is used in the generated
-// code, it's not clear which import statement it refers to. We ignore any
-// potential collisions until actually writing the import table to the generated
-// source file. See importTable.write.
-//
-// Given the following import statements across all the files comprising a
-// package marshalled:
-//
-// "sync"
-// "pkg/sync"
-// "pkg/sentry/kernel"
-// ktime "pkg/sentry/kernel/time"
-//
-// An importTable representing them would look like this:
-//
-// importTable {
-// is: map[string][]*importStmt {
-// "sync": []*importStmt{
-// importStmt{name:"sync", path:"sync", aliased:false}
-// importStmt{name:"sync", path:"pkg/sync", aliased:false}
-// },
-// "kernel": []*importStmt{importStmt{
-// name: "kernel",
-// path: "pkg/sentry/kernel",
-// aliased: false
-// }},
-// "ktime": []*importStmt{importStmt{
-// name: "ktime",
-// path: "pkg/sentry/kernel/time",
-// aliased: true,
-// }},
-// }
-// }
-//
-// Note that the local name "sync" is assigned to two different import
-// statements. This is possible if the import statements are from different
-// source files in the same package.
-//
-// Since go-marshal generates a single output file per package regardless of the
-// number of input files, if "sync" is referenced by any generated code, it's
-// unclear which import statement "sync" refers to. While it's theoretically
-// possible to resolve this by assigning a unique local alias to each instance
-// of the sync package, go-marshal currently aborts when it encounters such an
-// ambiguity.
-//
-// TODO(b/151478251): importTable considers the final component of an import
-// path to be the package name, but this is only a convention. The actual
-// package name is determined by the package statement in the source files for
-// the package.
-type importTable struct {
- // Map of imports and whether they should be copied to the output.
- is map[string][]*importStmt
-}
-
-func newImportTable() *importTable {
- return &importTable{
- is: make(map[string][]*importStmt),
- }
-}
-
-// Merges import statements from other into i.
-func (i *importTable) merge(other *importTable) {
- for name, ims := range other.is {
- i.is[name] = append(i.is[name], ims...)
- }
-}
-
-func (i *importTable) addStmt(s *importStmt) *importStmt {
- i.is[s.name] = append(i.is[s.name], s)
- return s
-}
-
-func (i *importTable) add(s string) *importStmt {
- n := newImport(s)
- return i.addStmt(n)
-}
-
-func (i *importTable) addFromSpec(spec *ast.ImportSpec, f *token.FileSet) *importStmt {
- return i.addStmt(newImportFromSpec(spec, f))
-}
-
-// Marks the import named n as used. If no such import is in the table, returns
-// false.
-func (i *importTable) markUsed(n string) bool {
- if ns, ok := i.is[n]; ok {
- for _, n := range ns {
- n.markUsed()
- }
- return true
- }
- return false
-}
-
-func (i *importTable) clear() {
- for _, is := range i.is {
- for _, i := range is {
- i.used = false
- }
- }
-}
-
-func (i *importTable) write(out io.Writer) error {
- if len(i.is) == 0 {
- // Nothing to import, we're done.
- return nil
- }
-
- imports := make([]string, 0, len(i.is))
- for name, is := range i.is {
- var lastUsed *importStmt
- var ambiguous bool
-
- for _, i := range is {
- if i.used {
- if lastUsed != nil {
- if !i.equivalent(lastUsed) {
- ambiguous = true
- }
- }
- lastUsed = i
- }
- }
-
- if ambiguous {
- // We have two or more import statements across the different source
- // files that share a local name, and at least one of these imports
- // are used by the generated code. This ambiguity can't be resolved
- // by go-marshal and requires the user intervention. Dump a list of
- // the colliding import statements and let the user modify the input
- // files as appropriate.
- var b strings.Builder
- fmt.Fprintf(&b, "The imported name %q is used by one of the types marked for marshalling, and which import statement the code refers to is ambiguous. Perhaps give the imports unique local names?\n\n", name)
- fmt.Fprintf(&b, "The following %d import statements are ambiguous for the local name %q:\n", len(is), name)
- // Note: len(is) is guaranteed to be 1 or greater or ambiguous can't
- // be true. Therefore the slicing below is safe.
- for _, i := range is[:len(is)-1] {
- fmt.Fprintf(&b, " %v\n", i.debugString())
- }
- fmt.Fprintf(&b, " %v", is[len(is)-1].debugString())
- panic(b.String())
- }
-
- if lastUsed != nil {
- imports = append(imports, lastUsed.String())
- }
- }
- sort.Strings(imports)
-
- var b sourceBuffer
- b.emit("import (\n")
- b.incIndent()
- for _, i := range imports {
- b.emit("%s\n", i)
- }
- b.decIndent()
- b.emit(")\n\n")
-
- return b.write(out)
-}
diff --git a/tools/go_marshal/main.go b/tools/go_marshal/main.go
deleted file mode 100644
index 6e4a3e8c4..000000000
--- a/tools/go_marshal/main.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// go_marshal is a code generation utility for automatically generating code to
-// marshal go data structures to memory.
-//
-// This binary is typically run as part of the build process, and is invoked by
-// the go_marshal bazel rule defined in defs.bzl.
-//
-// See README.md.
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "strings"
-
- "gvisor.dev/gvisor/tools/go_marshal/gomarshal"
-)
-
-var (
- pkg = flag.String("pkg", "", "output package")
- output = flag.String("output", "", "output file")
- outputTest = flag.String("output_test", "", "output file for tests")
- outputTestUnconditional = flag.String("output_test_unconditional", "", "output file for unconditional tests")
- imports = flag.String("imports", "", "comma-separated list of extra packages to import in generated code")
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s <input go src files>\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
-
- if *pkg == "" {
- flag.Usage()
- fmt.Fprint(os.Stderr, "Flag -pkg must be provided.\n")
- os.Exit(1)
- }
-
- var extraImports []string
- if len(*imports) > 0 {
- // Note: strings.Split(s, sep) returns s if sep doesn't exist in s. Thus
- // we check for an empty imports list to avoid emitting an empty string
- // as an import.
- extraImports = strings.Split(*imports, ",")
- }
- g, err := gomarshal.NewGenerator(flag.Args(), *output, *outputTest, *outputTestUnconditional, *pkg, extraImports)
- if err != nil {
- panic(err)
- }
-
- if err := g.Run(); err != nil {
- panic(err)
- }
-}
diff --git a/tools/go_marshal/test/BUILD b/tools/go_marshal/test/BUILD
deleted file mode 100644
index d315be060..000000000
--- a/tools/go_marshal/test/BUILD
+++ /dev/null
@@ -1,52 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-package_group(
- name = "gomarshal_test",
- packages = [
- "//tools/go_marshal/test/...",
- ],
-)
-
-go_test(
- name = "benchmark_test",
- srcs = ["benchmark_test.go"],
- deps = [
- ":test",
- "//pkg/binary",
- "//pkg/hostarch",
- "//tools/go_marshal/analysis",
- ],
-)
-
-go_library(
- name = "test",
- testonly = 1,
- srcs = [
- "dynamic.go",
- "test.go",
- ],
- marshal = True,
- visibility = ["//tools/go_marshal/test:__subpackages__"],
- deps = [
- "//pkg/marshal/primitive",
- "//tools/go_marshal/test/external",
- ],
-)
-
-go_test(
- name = "marshal_test",
- size = "small",
- srcs = ["marshal_test.go"],
- deps = [
- ":test",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/usermem",
- "//tools/go_marshal/analysis",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/tools/go_marshal/test/benchmark_test.go b/tools/go_marshal/test/benchmark_test.go
deleted file mode 100644
index 16f478ff7..000000000
--- a/tools/go_marshal/test/benchmark_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package benchmark_test
-
-import (
- "bytes"
- encbin "encoding/binary"
- "fmt"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/tools/go_marshal/analysis"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-// Marshalling using the standard encoding/binary package.
-func BenchmarkEncodingBinary(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- size := encbin.Size(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := bytes.NewBuffer(make([]byte, size))
- buf.Reset()
- if err := encbin.Write(buf, hostarch.ByteOrder, &s1); err != nil {
- b.Error("Write:", err)
- }
- if err := encbin.Read(buf, hostarch.ByteOrder, &s2); err != nil {
- b.Error("Read:", err)
- }
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling using the sentry's binary.Marshal.
-func BenchmarkBinary(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- size := binary.Size(s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
- binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling field-by-field with manually-written code.
-func BenchmarkMarshalManual(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, s1.SizeBytes())
-
- // Marshal
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Dev)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Ino)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Nlink)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.Mode)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.UID)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.GID)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, 0)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Rdev)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Size))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blksize))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blocks))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Nsec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Nsec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Nsec))
-
- // Unmarshal
- s2.Dev = hostarch.ByteOrder.Uint64(buf[0:8])
- s2.Ino = hostarch.ByteOrder.Uint64(buf[8:16])
- s2.Nlink = hostarch.ByteOrder.Uint64(buf[16:24])
- s2.Mode = hostarch.ByteOrder.Uint32(buf[24:28])
- s2.UID = hostarch.ByteOrder.Uint32(buf[28:32])
- s2.GID = hostarch.ByteOrder.Uint32(buf[32:36])
- // Padding: buf[36:40]
- s2.Rdev = hostarch.ByteOrder.Uint64(buf[40:48])
- s2.Size = int64(hostarch.ByteOrder.Uint64(buf[48:56]))
- s2.Blksize = int64(hostarch.ByteOrder.Uint64(buf[56:64]))
- s2.Blocks = int64(hostarch.ByteOrder.Uint64(buf[64:72]))
- s2.ATime.Sec = int64(hostarch.ByteOrder.Uint64(buf[72:80]))
- s2.ATime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[80:88]))
- s2.MTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[88:96]))
- s2.MTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[96:104]))
- s2.CTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[104:112]))
- s2.CTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[112:120]))
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling with the go_marshal safe API.
-func BenchmarkGoMarshalSafe(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, s1.SizeBytes())
- s1.MarshalBytes(buf)
- s2.UnmarshalBytes(buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling with the go_marshal unsafe API.
-func BenchmarkGoMarshalUnsafe(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, s1.SizeBytes())
- s1.MarshalUnsafe(buf)
- s2.UnmarshalUnsafe(buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-func BenchmarkBinarySlice(b *testing.B) {
- var s1, s2 [64]test.Stat
- analysis.RandomizeValue(&s1)
-
- size := binary.Size(s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
- binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-func BenchmarkGoMarshalUnsafeSlice(b *testing.B) {
- var s1, s2 [64]test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, (*test.Stat)(nil).SizeBytes()*len(s1))
- test.MarshalUnsafeStatSlice(s1[:], buf)
- test.UnmarshalUnsafeStatSlice(s2[:], buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
diff --git a/tools/go_marshal/test/dynamic.go b/tools/go_marshal/test/dynamic.go
deleted file mode 100644
index 9a812efe9..000000000
--- a/tools/go_marshal/test/dynamic.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import "gvisor.dev/gvisor/pkg/marshal/primitive"
-
-// Type12Dynamic is a dynamically sized struct which depends on the
-// autogenerator to generate some Marshallable methods for it.
-//
-// +marshal dynamic
-type Type12Dynamic struct {
- X primitive.Int64
- Y []primitive.Int64
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Type12Dynamic) SizeBytes() int {
- return (len(t.Y) * 8) + t.X.SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Type12Dynamic) MarshalBytes(dst []byte) {
- t.X.MarshalBytes(dst)
- dst = dst[t.X.SizeBytes():]
- for i, x := range t.Y {
- x.MarshalBytes(dst[i*8 : (i+1)*8])
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Type12Dynamic) UnmarshalBytes(src []byte) {
- t.X.UnmarshalBytes(src)
- if t.Y != nil {
- t.Y = t.Y[:0]
- }
- for i := t.X.SizeBytes(); i < len(src); i += 8 {
- var x primitive.Int64
- x.UnmarshalBytes(src[i:])
- t.Y = append(t.Y, x)
- }
-}
-
-// Type13Dynamic is a dynamically sized struct which depends on the
-// autogenerator to generate some Marshallable methods for it.
-//
-// It represents a string in memory which is preceded by a uint32 indicating
-// the string size.
-//
-// +marshal dynamic
-type Type13Dynamic string
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Type13Dynamic) SizeBytes() int {
- return (*primitive.Uint32)(nil).SizeBytes() + len(*t)
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Type13Dynamic) MarshalBytes(dst []byte) {
- strLen := primitive.Uint32(len(*t))
- strLen.MarshalBytes(dst)
- dst = dst[strLen.SizeBytes():]
- copy(dst[:strLen], *t)
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Type13Dynamic) UnmarshalBytes(src []byte) {
- var strLen primitive.Uint32
- strLen.UnmarshalBytes(src)
- src = src[strLen.SizeBytes():]
- *t = Type13Dynamic(src[:strLen])
-}
diff --git a/tools/go_marshal/test/escape/BUILD b/tools/go_marshal/test/escape/BUILD
deleted file mode 100644
index 62e0b4665..000000000
--- a/tools/go_marshal/test/escape/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "escape",
- testonly = 1,
- srcs = ["escape.go"],
- deps = [
- "//pkg/hostarch",
- "//pkg/marshal",
- "//tools/go_marshal/test",
- ],
-)
diff --git a/tools/go_marshal/test/escape/escape.go b/tools/go_marshal/test/escape/escape.go
deleted file mode 100644
index 1ac606862..000000000
--- a/tools/go_marshal/test/escape/escape.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package escape contains test cases for escape analysis.
-package escape
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-// dummyCopyContext implements marshal.CopyContext.
-type dummyCopyContext struct {
-}
-
-func (*dummyCopyContext) CopyScratchBuffer(size int) []byte {
- return make([]byte, size)
-}
-
-func (*dummyCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {
- return len(b), nil
-}
-
-func (*dummyCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {
- return len(b), nil
-}
-
-func (t *dummyCopyContext) MarshalBytes(addr hostarch.Addr, marshallable marshal.Marshallable) {
- buf := t.CopyScratchBuffer(marshallable.SizeBytes())
- marshallable.MarshalBytes(buf)
- t.CopyOutBytes(addr, buf)
-}
-
-func (t *dummyCopyContext) MarshalUnsafe(addr hostarch.Addr, marshallable marshal.Marshallable) {
- buf := t.CopyScratchBuffer(marshallable.SizeBytes())
- marshallable.MarshalUnsafe(buf)
- t.CopyOutBytes(addr, buf)
-}
-
-// +checkescape:all
-//go:nosplit
-func doCopyIn(t *dummyCopyContext) {
- var stat test.Stat
- stat.CopyIn(t, hostarch.Addr(0xf000ba12))
-}
-
-// +checkescape:all
-//go:nosplit
-func doCopyOut(t *dummyCopyContext) {
- var stat test.Stat
- stat.CopyOut(t, hostarch.Addr(0xf000ba12))
-}
-
-// +mustescape:builtin
-// +mustescape:stack
-//go:nosplit
-func doMarshalBytesDirect(t *dummyCopyContext) {
- var stat test.Stat
- buf := t.CopyScratchBuffer(stat.SizeBytes())
- stat.MarshalBytes(buf)
- t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
-}
-
-// +mustescape:builtin
-// +mustescape:stack
-//go:nosplit
-func doMarshalUnsafeDirect(t *dummyCopyContext) {
- var stat test.Stat
- buf := t.CopyScratchBuffer(stat.SizeBytes())
- stat.MarshalUnsafe(buf)
- t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
-}
-
-// +mustescape:local,heap
-// +mustescape:stack
-//go:nosplit
-func doMarshalBytesViaMarshallable(t *dummyCopyContext) {
- var stat test.Stat
- t.MarshalBytes(hostarch.Addr(0xf000ba12), &stat)
-}
-
-// +mustescape:local,heap
-// +mustescape:stack
-//go:nosplit
-func doMarshalUnsafeViaMarshallable(t *dummyCopyContext) {
- var stat test.Stat
- t.MarshalUnsafe(hostarch.Addr(0xf000ba12), &stat)
-}
diff --git a/tools/go_marshal/test/external/BUILD b/tools/go_marshal/test/external/BUILD
deleted file mode 100644
index 0cf6da603..000000000
--- a/tools/go_marshal/test/external/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "external",
- testonly = 1,
- srcs = ["external.go"],
- marshal = True,
- visibility = ["//tools/go_marshal/test:gomarshal_test"],
-)
diff --git a/tools/go_marshal/test/external/external.go b/tools/go_marshal/test/external/external.go
deleted file mode 100644
index 26fe8e0c8..000000000
--- a/tools/go_marshal/test/external/external.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package external defines types we can import for testing.
-package external
-
-// External is a public Marshallable type for use in testing.
-//
-// +marshal
-type External struct {
- j int64
-}
-
-// NotPacked is an unaligned Marshallable type for use in testing.
-//
-// +marshal
-type NotPacked struct {
- a int32
- b byte `marshal:"unaligned"`
-}
diff --git a/tools/go_marshal/test/marshal_test.go b/tools/go_marshal/test/marshal_test.go
deleted file mode 100644
index dec3e84fd..000000000
--- a/tools/go_marshal/test/marshal_test.go
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package marshal_test contains manual tests for the marshal interface. These
-// are intended to test behaviour not covered by the automatically generated
-// tests.
-package marshal_test
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "reflect"
- "runtime"
- "testing"
- "unsafe"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/tools/go_marshal/analysis"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-var simulatedErr error = linuxerr.EFAULT
-
-// mockCopyContext implements marshal.CopyContext.
-type mockCopyContext struct {
- taskMem usermem.BytesIO
-}
-
-// populate fills the task memory with the contents of val.
-func (t *mockCopyContext) populate(val interface{}) {
- var buf bytes.Buffer
- // Use binary.Write so we aren't testing go-marshal against its own
- // potentially buggy implementation.
- if err := binary.Write(&buf, hostarch.ByteOrder, val); err != nil {
- panic(err)
- }
- t.taskMem.Bytes = buf.Bytes()
-}
-
-func (t *mockCopyContext) setLimit(n int) {
- if len(t.taskMem.Bytes) < n {
- grown := make([]byte, n)
- copy(grown, t.taskMem.Bytes)
- t.taskMem.Bytes = grown
- return
- }
- t.taskMem.Bytes = t.taskMem.Bytes[:n]
-}
-
-// CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer.
-func (t *mockCopyContext) CopyScratchBuffer(size int) []byte {
- return make([]byte, size)
-}
-
-// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. The implementation
-// completely ignores the target address and stores a copy of b in its
-// internally buffer, overriding any previous contents.
-func (t *mockCopyContext) CopyOutBytes(_ hostarch.Addr, b []byte) (int, error) {
- return t.taskMem.CopyOut(nil, 0, b, usermem.IOOpts{})
-}
-
-// CopyInBytes implements marshal.CopyContext.CopyInBytes. The implementation
-// completely ignores the source address and always fills b from the begining of
-// its internal buffer.
-func (t *mockCopyContext) CopyInBytes(_ hostarch.Addr, b []byte) (int, error) {
- return t.taskMem.CopyIn(nil, 0, b, usermem.IOOpts{})
-}
-
-// unsafeMemory returns the underlying memory for m. The returned slice is only
-// valid for the lifetime for m. The garbage collector isn't aware that the
-// returned slice is related to m, the caller must ensure m lives long enough.
-func unsafeMemory(m marshal.Marshallable) []byte {
- if !m.Packed() {
- // We can't return a slice pointing to the underlying memory
- // since the layout isn't packed. Allocate a temporary buffer
- // and marshal instead.
- var buf bytes.Buffer
- if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
- panic(err)
- }
- return buf.Bytes()
- }
-
- // reflect.ValueOf(m)
- // .Elem() // Unwrap interface to inner concrete object
- // .Addr() // Pointer value to object
- // .Pointer() // Actual address from the pointer value
- ptr := reflect.ValueOf(m).Elem().Addr().Pointer()
-
- size := m.SizeBytes()
-
- var mem []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
- hdr.Data = ptr
- hdr.Len = size
- hdr.Cap = size
-
- return mem
-}
-
-// unsafeMemorySlice returns the underlying memory for m. The returned slice is
-// only valid for the lifetime for m. The garbage collector isn't aware that the
-// returned slice is related to m, the caller must ensure m lives long enough.
-//
-// Precondition: m must be a slice.
-func unsafeMemorySlice(m interface{}, elt marshal.Marshallable) []byte {
- kind := reflect.TypeOf(m).Kind()
- if kind != reflect.Slice {
- panic("unsafeMemorySlice called on non-slice")
- }
-
- if !elt.Packed() {
- // We can't return a slice pointing to the underlying memory
- // since the layout isn't packed. Allocate a temporary buffer
- // and marshal instead.
- var buf bytes.Buffer
- if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
- panic(err)
- }
- return buf.Bytes()
- }
-
- v := reflect.ValueOf(m)
- length := v.Len() * elt.SizeBytes()
-
- var mem []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
- hdr.Data = v.Pointer() // This is a pointer to the first elem for slices.
- hdr.Len = length
- hdr.Cap = length
-
- return mem
-}
-
-func isZeroes(buf []byte) bool {
- for _, b := range buf {
- if b != 0 {
- return false
- }
- }
- return true
-}
-
-// compareMemory compares the first n bytes of two chuncks of memory represented
-// by expected and actual.
-func compareMemory(t *testing.T, expected, actual []byte, n int) {
- t.Logf("Expected (%d): %v (%d) + (%d) %v\n", len(expected), expected[:n], n, len(expected)-n, expected[n:])
- t.Logf("Actual (%d): %v (%d) + (%d) %v\n", len(actual), actual[:n], n, len(actual)-n, actual[n:])
-
- if diff := cmp.Diff(expected[:n], actual[:n]); diff != "" {
- t.Errorf("Memory buffers don't match:\n--- expected only\n+++ actual only\n%v", diff)
- }
-}
-
-// limitedCopyIn populates task memory with src, then unmarshals task memory to
-// dst. The task signals an error at limit bytes during copy-in, which should
-// result in a truncated unmarshalling.
-func limitedCopyIn(t *testing.T, src, dst marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.populate(src)
- cc.setLimit(limit)
-
- n, err := dst.CopyIn(&cc, hostarch.Addr(0))
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := unsafeMemory(dst)
- defer runtime.KeepAlive(dst)
-
- compareMemory(t, expectedMem, actualMem, n)
-
- // The last n bytes should be zero for actual, since actual was
- // zero-initialized, and CopyIn shouldn't have touched those bytes. However
- // we can only guarantee we didn't touch anything in the last n bytes if the
- // layout is packed.
- if dst.Packed() && !isZeroes(actualMem[n:]) {
- t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", dst.SizeBytes()-n, actualMem)
- }
-}
-
-// limitedCopyOut marshals src to task memory. The task signals an error at
-// limit bytes during copy-out, which should result in a truncated marshalling.
-func limitedCopyOut(t *testing.T, src marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.setLimit(limit)
-
- n, err := src.CopyOut(&cc, hostarch.Addr(0))
- if n != limit {
- t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if err != simulatedErr {
- t.Errorf("CopyOut returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := cc.taskMem.Bytes
-
- compareMemory(t, expectedMem, actualMem, n)
-}
-
-// copyOutN marshals src to task memory, requesting the marshalling to be
-// limited to limit bytes.
-func copyOutN(t *testing.T, src marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.setLimit(limit)
-
- n, err := src.CopyOutN(&cc, hostarch.Addr(0), limit)
- if err != nil {
- t.Errorf("CopyOut returned unexpected error: %v", err)
- }
- if n != limit {
- t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := cc.taskMem.Bytes
-
- t.Logf("Expected: %v + %v\n", expectedMem[:n], expectedMem[n:])
- t.Logf("Actual : %v + %v\n", actualMem[:n], actualMem[n:])
-
- compareMemory(t, expectedMem, actualMem, n)
-}
-
-// TestLimitedMarshalling verifies marshalling/unmarshalling succeeds when the
-// underyling copy in/out operations partially succeed.
-func TestLimitedMarshalling(t *testing.T) {
- types := []reflect.Type{
- // Packed types.
- reflect.TypeOf((*test.Type2)(nil)),
- reflect.TypeOf((*test.Type3)(nil)),
- reflect.TypeOf((*test.Timespec)(nil)),
- reflect.TypeOf((*test.Stat)(nil)),
- reflect.TypeOf((*test.InetAddr)(nil)),
- reflect.TypeOf((*test.SignalSet)(nil)),
- reflect.TypeOf((*test.SignalSetAlias)(nil)),
- // Non-packed types.
- reflect.TypeOf((*test.Type1)(nil)),
- reflect.TypeOf((*test.Type4)(nil)),
- reflect.TypeOf((*test.Type5)(nil)),
- reflect.TypeOf((*test.Type6)(nil)),
- reflect.TypeOf((*test.Type7)(nil)),
- reflect.TypeOf((*test.Type8)(nil)),
- }
-
- for _, tyPtr := range types {
- // Remove one level of pointer-indirection from the type. We get this
- // back when we pass the type to reflect.New.
- ty := tyPtr.Elem()
-
- // Partial copy-in.
- t.Run(fmt.Sprintf("PartialCopyIn_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- actual := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- limitedCopyIn(t, expected, actual, expected.SizeBytes()/2)
- })
-
- // Partial copy-out.
- t.Run(fmt.Sprintf("PartialCopyOut_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- limitedCopyOut(t, expected, expected.SizeBytes()/2)
- })
-
- // Explicitly request partial copy-out.
- t.Run(fmt.Sprintf("PartialCopyOutN_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- copyOutN(t, expected, expected.SizeBytes()/2)
- })
- }
-}
-
-// TestLimitedMarshalling verifies marshalling/unmarshalling of slices of
-// marshallable types succeed when the underyling copy in/out operations
-// partially succeed.
-func TestLimitedSliceMarshalling(t *testing.T) {
- types := []struct {
- arrayPtrType reflect.Type
- copySliceIn func(cc marshal.CopyContext, addr hostarch.Addr, dstSlice interface{}) (int, error)
- copySliceOut func(cc marshal.CopyContext, addr hostarch.Addr, srcSlice interface{}) (int, error)
- unsafeMemory func(arrPtr interface{}) []byte
- }{
- // Packed types.
- {
- reflect.TypeOf((*[20]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[20]test.Stat)[:]
- return test.CopyStatSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[20]test.Stat)[:]
- return test.CopyStatSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[20]test.Stat)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[1]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[1]test.Stat)[:]
- return test.CopyStatSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[1]test.Stat)[:]
- return test.CopyStatSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[1]test.Stat)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[5]test.SignalSetAlias)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[5]test.SignalSetAlias)[:]
- return test.CopySignalSetAliasSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[5]test.SignalSetAlias)[:]
- return test.CopySignalSetAliasSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[5]test.SignalSetAlias)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- // Non-packed types.
- {
- reflect.TypeOf((*[20]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[20]test.Type1)[:]
- return test.CopyType1SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[20]test.Type1)[:]
- return test.CopyType1SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[20]test.Type1)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[1]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[1]test.Type1)[:]
- return test.CopyType1SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[1]test.Type1)[:]
- return test.CopyType1SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[1]test.Type1)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[7]test.Type8)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[7]test.Type8)[:]
- return test.CopyType8SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[7]test.Type8)[:]
- return test.CopyType8SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[7]test.Type8)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- }
-
- for _, tt := range types {
- // The body of this loop is generic over the type tt.arrayPtrType, with
- // the help of reflection. To aid in readability, comments below show
- // the equivalent go code assuming
- // tt.arrayPtrType = typeof(*[20]test.Stat).
-
- // Equivalent:
- // var x *[20]test.Stat
- // arrayTy := reflect.TypeOf(*x)
- arrayTy := tt.arrayPtrType.Elem()
-
- // Partial copy-in of slices.
- t.Run(fmt.Sprintf("PartialCopySliceIn_%v", arrayTy), func(t *testing.T) {
- // Equivalent:
- // var x [20]test.Stat
- // length := len(x)
- length := arrayTy.Len()
- if length < 1 {
- panic("Test type can't be zero-length array")
- }
- // Equivalent:
- // elem := new(test.Stat).(marshal.Marshallable)
- elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable)
-
- // Equivalent:
- // var expected, actual interface{}
- // expected = new([20]test.Stat)
- // actual = new([20]test.Stat)
- expected := reflect.New(arrayTy).Interface()
- actual := reflect.New(arrayTy).Interface()
-
- analysis.RandomizeValue(expected)
-
- limit := (length * elem.SizeBytes()) / 2
- // Also make sure the limit is partially inside one of the elements.
- limit += elem.SizeBytes() / 2
- analysis.RandomizeValue(expected)
-
- var cc mockCopyContext
- cc.populate(expected)
- cc.setLimit(limit)
-
- n, err := tt.copySliceIn(&cc, hostarch.Addr(0), actual)
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if n < length*elem.SizeBytes() && err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := tt.unsafeMemory(expected)
- defer runtime.KeepAlive(expected)
- actualMem := tt.unsafeMemory(actual)
- defer runtime.KeepAlive(actual)
-
- compareMemory(t, expectedMem, actualMem, n)
-
- // The last n bytes should be zero for actual, since actual was
- // zero-initialized, and CopyIn shouldn't have touched those bytes. However
- // we can only guarantee we didn't touch anything in the last n bytes if the
- // layout is packed.
- if elem.Packed() && !isZeroes(actualMem[n:]) {
- t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", (elem.SizeBytes()*length)-n, actualMem)
- }
- })
-
- // Partial copy-out of slices.
- t.Run(fmt.Sprintf("PartialCopySliceOut_%v", arrayTy), func(t *testing.T) {
- // Equivalent:
- // var x [20]test.Stat
- // length := len(x)
- length := arrayTy.Len()
- if length < 1 {
- panic("Test type can't be zero-length array")
- }
- // Equivalent:
- // elem := new(test.Stat).(marshal.Marshallable)
- elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable)
-
- // Equivalent:
- // var expected, actual interface{}
- // expected = new([20]test.Stat)
- // actual = new([20]test.Stat)
- expected := reflect.New(arrayTy).Interface()
-
- analysis.RandomizeValue(expected)
-
- limit := (length * elem.SizeBytes()) / 2
- // Also make sure the limit is partially inside one of the elements.
- limit += elem.SizeBytes() / 2
- analysis.RandomizeValue(expected)
-
- var cc mockCopyContext
- cc.populate(expected)
- cc.setLimit(limit)
-
- n, err := tt.copySliceOut(&cc, hostarch.Addr(0), expected)
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if n < length*elem.SizeBytes() && err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := tt.unsafeMemory(expected)
- defer runtime.KeepAlive(expected)
- actualMem := cc.taskMem.Bytes
-
- compareMemory(t, expectedMem, actualMem, n)
- })
- }
-}
-
-func TestDynamicTypeStruct(t *testing.T) {
- t12 := test.Type12Dynamic{
- X: 32,
- Y: []primitive.Int64{5, 6, 7},
- }
- var cc mockCopyContext
- cc.setLimit(t12.SizeBytes())
-
- if _, err := t12.CopyOut(&cc, hostarch.Addr(0)); err != nil {
- t.Fatalf("cc.CopyOut faile: %v", err)
- }
-
- res := test.Type12Dynamic{
- Y: make([]primitive.Int64, len(t12.Y)),
- }
- res.CopyIn(&cc, hostarch.Addr(0))
- if !reflect.DeepEqual(t12, res) {
- t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %+v, after = %+v", t12, res)
- }
-}
-
-func TestDynamicTypeIdentifier(t *testing.T) {
- s := test.Type13Dynamic("go_marshal")
- var cc mockCopyContext
- cc.setLimit(s.SizeBytes())
-
- if _, err := s.CopyOut(&cc, hostarch.Addr(0)); err != nil {
- t.Fatalf("cc.CopyOut faile: %v", err)
- }
-
- res := test.Type13Dynamic(make([]byte, len(s)))
- res.CopyIn(&cc, hostarch.Addr(0))
- if res != s {
- t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %s, after = %s", s, res)
- }
-}
diff --git a/tools/go_marshal/test/test.go b/tools/go_marshal/test/test.go
deleted file mode 100644
index e7e3ed74a..000000000
--- a/tools/go_marshal/test/test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test contains data structures for testing the go_marshal tool.
-package test
-
-import (
- // We're intentionally using a package name alias here even though it's not
- // necessary to test the code generator's ability to handle package aliases.
- ex "gvisor.dev/gvisor/tools/go_marshal/test/external"
-)
-
-// Type1 is a test data type.
-//
-// +marshal slice:Type1Slice
-type Type1 struct {
- a Type2
- x, y int64 // Multiple field names.
- b byte `marshal:"unaligned"` // Short field.
- c uint64
- _ uint32 // Unnamed scalar field.
- _ [6]byte // Unnamed vector field, typical padding.
- _ [2]byte
- xs [8]int32
- as [10]Type2 `marshal:"unaligned"` // Array of Marshallable objects.
- ss Type3
-}
-
-// Type2 is a test data type.
-//
-// +marshal
-type Type2 struct {
- n int64
- c byte
- _ [7]byte
- m int64
- a int64
-}
-
-// Type3 is a test data type.
-//
-// +marshal
-type Type3 struct {
- s int64
- x ex.External // Type defined in another package.
-}
-
-// Type4 is a test data type.
-//
-// +marshal
-type Type4 struct {
- c byte
- x int64 `marshal:"unaligned"`
- d byte
- _ [7]byte
-}
-
-// Type5 is a test data type.
-//
-// +marshal
-type Type5 struct {
- n int64
- t Type4
- m int64
-}
-
-// Type6 is a test data type ends mid-word.
-//
-// +marshal
-type Type6 struct {
- a int64
- b int64
- // If c isn't marked unaligned, analysis fails (as it should, since
- // the unsafe API corrupts Type7).
- c byte `marshal:"unaligned"`
-}
-
-// Type7 is a test data type that contains a child struct that ends
-// mid-word.
-// +marshal
-type Type7 struct {
- x Type6
- y int64
-}
-
-// Type8 is a test data type which contains an external non-packed field.
-//
-// +marshal slice:Type8Slice
-type Type8 struct {
- a int64
- np ex.NotPacked
- b int64
-}
-
-// Timespec represents struct timespec in <time.h>.
-//
-// +marshal
-type Timespec struct {
- Sec int64
- Nsec int64
-}
-
-// Stat represents struct stat.
-//
-// +marshal slice:StatSlice
-type Stat struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint32
- UID uint32
- GID uint32
- _ int32
- Rdev uint64
- Size int64
- Blksize int64
- Blocks int64
- ATime Timespec
- MTime Timespec
- CTime Timespec
- _ [3]int64
-}
-
-// InetAddr is an example marshallable newtype on an array.
-//
-// +marshal
-type InetAddr [4]byte
-
-// SignalSet is an example marshallable newtype on a primitive.
-//
-// +marshal slice:SignalSetSlice:inner
-type SignalSet uint64
-
-// SignalSetAlias is an example newtype on another marshallable type.
-//
-// +marshal slice:SignalSetAliasSlice
-type SignalSetAlias SignalSet
-
-const sizeA = 64
-const sizeB = 8
-
-// TestArray is a test data structure on an array with a constant length.
-//
-// +marshal
-type TestArray [sizeA]int32
-
-// TestArray2 is a newtype on an array with a simple arithmetic expression of
-// constants for the array length.
-//
-// +marshal
-type TestArray2 [sizeA * sizeB]int32
-
-// TestArray3 is a newtype on an array with a simple arithmetic expression of
-// mixed constants and literals for the array length.
-//
-// +marshal
-type TestArray3 [sizeA*sizeB + 12]int32
-
-// Type9 is a test data type containing an array with a non-literal length.
-//
-// +marshal
-type Type9 struct {
- x int64
- y [sizeA]int32
-}
-
-// Type10Embed is a test data type which is be embedded into another type.
-//
-// +marshal
-type Type10Embed struct {
- x int64
-}
-
-// Type10 is a test data type which contains an embedded struct.
-//
-// +marshal
-type Type10 struct {
- Type10Embed
- y int64
-}
-
-// Type11 is a test data type which contains an embedded struct from an external
-// package.
-//
-// +marshal
-type Type11 struct {
- ex.External
- y int64
-}
diff --git a/tools/go_stateify/BUILD b/tools/go_stateify/BUILD
deleted file mode 100644
index 913558b4e..000000000
--- a/tools/go_stateify/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "stateify",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = ["//tools/tags"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_stateify/defs.bzl b/tools/go_stateify/defs.bzl
deleted file mode 100644
index 6a5e666f0..000000000
--- a/tools/go_stateify/defs.bzl
+++ /dev/null
@@ -1,60 +0,0 @@
-"""Stateify is a tool for generating state wrappers for Go types."""
-
-def _go_stateify_impl(ctx):
- """Implementation for the stateify tool."""
- output = ctx.outputs.out
-
- # Run the stateify command.
- args = ["-output=%s" % output.path]
- args.append("-fullpkg=%s" % ctx.attr.package)
- if ctx.attr._statepkg:
- args.append("-statepkg=%s" % ctx.attr._statepkg)
- if ctx.attr.imports:
- args.append("-imports=%s" % ",".join(ctx.attr.imports))
- args.append("--")
- for src in ctx.attr.srcs:
- args += [f.path for f in src.files.to_list()]
- ctx.actions.run(
- inputs = ctx.files.srcs,
- outputs = [output],
- mnemonic = "GoStateify",
- progress_message = "Generating state library %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
-go_stateify = rule(
- implementation = _go_stateify_impl,
- doc = "Generates save and restore logic from a set of Go files.",
- attrs = {
- "srcs": attr.label_list(
- doc = """
-The input source files. These files should include all structs in the package
-that need to be saved.
-""",
- mandatory = True,
- allow_files = True,
- ),
- "imports": attr.string_list(
- doc = """
-An optional list of extra non-aliased, Go-style absolute import paths required
-for statified types.
-""",
- mandatory = False,
- ),
- "package": attr.string(
- doc = "The fully qualified package name for the input sources.",
- mandatory = True,
- ),
- "out": attr.output(
- doc = "Name of the generator output file.",
- mandatory = True,
- ),
- "_tool": attr.label(
- executable = True,
- cfg = "host",
- default = Label("//tools/go_stateify:stateify"),
- ),
- "_statepkg": attr.string(default = "gvisor.dev/gvisor/pkg/state"),
- },
-)
diff --git a/tools/go_stateify/main.go b/tools/go_stateify/main.go
deleted file mode 100644
index 93022f504..000000000
--- a/tools/go_stateify/main.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Stateify provides a simple way to generate Load/Save methods based on
-// existing types and struct tags.
-package main
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "sync"
-
- "gvisor.dev/gvisor/tools/tags"
-)
-
-var (
- fullPkg = flag.String("fullpkg", "", "fully qualified output package")
- imports = flag.String("imports", "", "extra imports for the output file")
- output = flag.String("output", "", "output file")
- statePkg = flag.String("statepkg", "", "state import package; defaults to empty")
-)
-
-// resolveTypeName returns a qualified type name.
-func resolveTypeName(typ ast.Expr) (field string, qualified string) {
- for done := false; !done; {
- // Resolve star expressions.
- switch rs := typ.(type) {
- case *ast.StarExpr:
- qualified += "*"
- typ = rs.X
- case *ast.ArrayType:
- if rs.Len == nil {
- // Slice type declaration.
- qualified += "[]"
- } else {
- // Array type declaration.
- qualified += "[" + rs.Len.(*ast.BasicLit).Value + "]"
- }
- typ = rs.Elt
- default:
- // No more descent.
- done = true
- }
- }
-
- // Resolve a package selector.
- sel, ok := typ.(*ast.SelectorExpr)
- if ok {
- qualified = qualified + sel.X.(*ast.Ident).Name + "."
- typ = sel.Sel
- }
-
- // Figure out actual type name.
- field = typ.(*ast.Ident).Name
- qualified = qualified + field
- return
-}
-
-// extractStateTag pulls the relevant state tag.
-func extractStateTag(tag *ast.BasicLit) string {
- if tag == nil {
- return ""
- }
- if len(tag.Value) < 2 {
- return ""
- }
- return reflect.StructTag(tag.Value[1 : len(tag.Value)-1]).Get("state")
-}
-
-// scanFunctions is a set of functions passed to scanFields.
-type scanFunctions struct {
- zerovalue func(name string)
- normal func(name string)
- wait func(name string)
- value func(name, typName string)
-}
-
-// scanFields scans the fields of a struct.
-//
-// Each provided function will be applied to appropriately tagged fields, or
-// skipped if nil.
-//
-// Fields tagged nosave are skipped.
-func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {
- if ss.Fields.List == nil {
- // No fields.
- return
- }
-
- // Scan all fields.
- for _, field := range ss.Fields.List {
- // Calculate the name.
- name := ""
- if field.Names != nil {
- // It's a named field; override.
- name = field.Names[0].Name
- } else {
- // Anonymous types can't be embedded, so we don't need
- // to worry about providing a useful name here.
- name, _ = resolveTypeName(field.Type)
- }
-
- // Skip _ fields.
- if name == "_" {
- continue
- }
-
- // Is this a anonymous struct? If yes, then continue the
- // recursion with the given prefix. We don't pay attention to
- // any tags on the top-level struct field.
- tag := extractStateTag(field.Tag)
- if anon, ok := field.Type.(*ast.StructType); ok && tag == "" {
- scanFields(anon, name+".", fn)
- continue
- }
-
- switch tag {
- case "zerovalue":
- if fn.zerovalue != nil {
- fn.zerovalue(name)
- }
-
- case "":
- if fn.normal != nil {
- fn.normal(name)
- }
-
- case "wait":
- if fn.wait != nil {
- fn.wait(name)
- }
-
- case "manual", "nosave", "ignore":
- // Do nothing.
-
- default:
- if strings.HasPrefix(tag, ".(") && strings.HasSuffix(tag, ")") {
- if fn.value != nil {
- fn.value(name, tag[2:len(tag)-1])
- }
- }
- }
- }
-}
-
-func camelCased(name string) string {
- return strings.ToUpper(name[:1]) + name[1:]
-}
-
-func main() {
- // Parse flags.
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
- if *fullPkg == "" {
- fmt.Fprintf(os.Stderr, "Error: package required.")
- os.Exit(1)
- }
-
- // Open the output file.
- var (
- outputFile *os.File
- err error
- )
- if *output == "" || *output == "-" {
- outputFile = os.Stdout
- } else {
- outputFile, err = os.OpenFile(*output, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error opening output %q: %v", *output, err)
- }
- defer outputFile.Close()
- }
-
- // Set the statePrefix for below, depending on the import.
- statePrefix := ""
- if *statePkg != "" {
- parts := strings.Split(*statePkg, "/")
- statePrefix = parts[len(parts)-1] + "."
- }
-
- // initCalls is dumped at the end.
- var initCalls []string
-
- // Common closures.
- emitRegister := func(name string) {
- initCalls = append(initCalls, fmt.Sprintf("%sRegister((*%s)(nil))", statePrefix, name))
- }
-
- // Automated warning.
- fmt.Fprint(outputFile, "// automatically generated by stateify.\n\n")
-
- // Emit build tags.
- if t := tags.Aggregate(flag.Args()); len(t) > 0 {
- fmt.Fprintf(outputFile, "%s\n\n", strings.Join(t.Lines(), "\n"))
- }
-
- // Emit the package name.
- _, pkg := filepath.Split(*fullPkg)
- fmt.Fprintf(outputFile, "package %s\n\n", pkg)
-
- // Emit the imports lazily.
- var once sync.Once
- maybeEmitImports := func() {
- once.Do(func() {
- // Emit the imports.
- fmt.Fprint(outputFile, "import (\n")
- if *statePkg != "" {
- fmt.Fprintf(outputFile, " \"%s\"\n", *statePkg)
- }
- if *imports != "" {
- for _, i := range strings.Split(*imports, ",") {
- fmt.Fprintf(outputFile, " \"%s\"\n", i)
- }
- }
- fmt.Fprint(outputFile, ")\n\n")
- })
- }
-
- files := make([]*ast.File, 0, len(flag.Args()))
-
- // Parse the input files.
- for _, filename := range flag.Args() {
- // Parse the file.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
- if err != nil {
- // Not a valid input file?
- fmt.Fprintf(os.Stderr, "Input %q can't be parsed: %v\n", filename, err)
- os.Exit(1)
- }
-
- files = append(files, f)
- }
-
- type method struct {
- typeName string
- methodName string
- }
-
- // Search for and add all method to a set. We auto-detecting several
- // different methods (and insert them if we don't find them, in order
- // to ensure that expectations match reality).
- //
- // While we do this, figure out the right receiver name. If there are
- // multiple distinct receivers, then we will just pick the last one.
- simpleMethods := make(map[method]struct{})
- receiverNames := make(map[string]string)
- for _, f := range files {
- // Go over all functions.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- if d.Recv == nil || len(d.Recv.List) != 1 {
- // Not a named method.
- continue
- }
-
- // Save the method and the receiver.
- name, _ := resolveTypeName(d.Recv.List[0].Type)
- simpleMethods[method{
- typeName: name,
- methodName: d.Name.Name,
- }] = struct{}{}
- if len(d.Recv.List[0].Names) > 0 {
- receiverNames[name] = d.Recv.List[0].Names[0].Name
- }
- }
- }
-
- for _, f := range files {
- // Go over all named types.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.TYPE {
- continue
- }
-
- // Only generate code for types marked "// +stateify
- // savable" in one of the proceeding comment lines. If
- // the line is marked "// +stateify type" then only
- // generate type information and register the type.
- if d.Doc == nil {
- continue
- }
- var (
- generateTypeInfo = false
- generateSaverLoader = false
- )
- for _, l := range d.Doc.List {
- if l.Text == "// +stateify savable" {
- generateTypeInfo = true
- generateSaverLoader = true
- break
- }
- if l.Text == "// +stateify type" {
- generateTypeInfo = true
- }
- }
- if !generateTypeInfo && !generateSaverLoader {
- continue
- }
-
- for _, gs := range d.Specs {
- ts := gs.(*ast.TypeSpec)
- recv, ok := receiverNames[ts.Name.Name]
- if !ok {
- // Maybe no methods were defined?
- recv = strings.ToLower(ts.Name.Name[:1])
- }
- switch x := ts.Type.(type) {
- case *ast.StructType:
- maybeEmitImports()
-
- // Record the slot for each field.
- fieldCount := 0
- fields := make(map[string]int)
- emitField := func(name string) {
- fmt.Fprintf(outputFile, " \"%s\",\n", name)
- fields[name] = fieldCount
- fieldCount++
- }
- emitFieldValue := func(name string, _ string) {
- emitField(name)
- }
- emitLoadValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " stateSourceObject.LoadValue(%d, new(%s), func(y interface{}) { %s.load%s(y.(%s)) })\n", fields[name], typName, recv, camelCased(name), typName)
- }
- emitLoad := func(name string) {
- fmt.Fprintf(outputFile, " stateSourceObject.Load(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitLoadWait := func(name string) {
- fmt.Fprintf(outputFile, " stateSourceObject.LoadWait(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitSaveValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " var %sValue %s = %s.save%s()\n", name, typName, recv, camelCased(name))
- fmt.Fprintf(outputFile, " stateSinkObject.SaveValue(%d, %sValue)\n", fields[name], name)
- }
- emitSave := func(name string) {
- fmt.Fprintf(outputFile, " stateSinkObject.Save(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitZeroCheck := func(name string) {
- fmt.Fprintf(outputFile, " if !%sIsZeroValue(&%s.%s) { %sFailf(\"%s is %%#v, expected zero\", &%s.%s) }\n", statePrefix, recv, name, statePrefix, name, recv, name)
- }
-
- // Generate the type name method.
- fmt.Fprintf(outputFile, "func (%s *%s) StateTypeName() string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
- fmt.Fprintf(outputFile, "}\n\n")
-
- // Generate the fields method.
- fmt.Fprintf(outputFile, "func (%s *%s) StateFields() []string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return []string{\n")
- scanFields(x, "", scanFunctions{
- normal: emitField,
- wait: emitField,
- value: emitFieldValue,
- })
- fmt.Fprintf(outputFile, " }\n")
- fmt.Fprintf(outputFile, "}\n\n")
-
- // Define beforeSave if a definition was not found. This prevents
- // the code from compiling if a custom beforeSave was defined in a
- // file not provided to this binary and prevents inherited methods
- // from being called multiple times by overriding them.
- if _, ok := simpleMethods[method{
- typeName: ts.Name.Name,
- methodName: "beforeSave",
- }]; !ok && generateSaverLoader {
- fmt.Fprintf(outputFile, "func (%s *%s) beforeSave() {}\n\n", recv, ts.Name.Name)
- }
-
- // Generate the save method.
- //
- // N.B. For historical reasons, we perform the value saves first,
- // and perform the value loads last. There should be no dependency
- // on this specific behavior, but the ability to specify slots
- // allows a manual implementation to be order-dependent.
- if generateSaverLoader {
- fmt.Fprintf(outputFile, "// +checklocksignore\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateSave(stateSinkObject %sSink) {\n", recv, ts.Name.Name, statePrefix)
- fmt.Fprintf(outputFile, " %s.beforeSave()\n", recv)
- scanFields(x, "", scanFunctions{zerovalue: emitZeroCheck})
- scanFields(x, "", scanFunctions{value: emitSaveValue})
- scanFields(x, "", scanFunctions{normal: emitSave, wait: emitSave})
- fmt.Fprintf(outputFile, "}\n\n")
- }
-
- // Define afterLoad if a definition was not found. We do this for
- // the same reason that we do it for beforeSave.
- _, hasAfterLoad := simpleMethods[method{
- typeName: ts.Name.Name,
- methodName: "afterLoad",
- }]
- if !hasAfterLoad && generateSaverLoader {
- fmt.Fprintf(outputFile, "func (%s *%s) afterLoad() {}\n\n", recv, ts.Name.Name)
- }
-
- // Generate the load method.
- //
- // N.B. See the comment above for the save method.
- if generateSaverLoader {
- fmt.Fprintf(outputFile, "// +checklocksignore\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateLoad(stateSourceObject %sSource) {\n", recv, ts.Name.Name, statePrefix)
- scanFields(x, "", scanFunctions{normal: emitLoad, wait: emitLoadWait})
- scanFields(x, "", scanFunctions{value: emitLoadValue})
- if hasAfterLoad {
- // The call to afterLoad is made conditionally, because when
- // AfterLoad is called, the object encodes a dependency on
- // referred objects (i.e. fields). This means that afterLoad
- // will not be called until the other afterLoads are called.
- fmt.Fprintf(outputFile, " stateSourceObject.AfterLoad(%s.afterLoad)\n", recv)
- }
- fmt.Fprintf(outputFile, "}\n\n")
- }
-
- // Add to our registration.
- emitRegister(ts.Name.Name)
-
- case *ast.Ident, *ast.SelectorExpr, *ast.ArrayType:
- maybeEmitImports()
-
- // Generate the info methods.
- fmt.Fprintf(outputFile, "func (%s *%s) StateTypeName() string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
- fmt.Fprintf(outputFile, "}\n\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateFields() []string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return nil\n")
- fmt.Fprintf(outputFile, "}\n\n")
-
- // See above.
- emitRegister(ts.Name.Name)
- }
- }
- }
- }
-
- if len(initCalls) > 0 {
- // Emit the init() function.
- fmt.Fprintf(outputFile, "func init() {\n")
- for _, ic := range initCalls {
- fmt.Fprintf(outputFile, " %s\n", ic)
- }
- fmt.Fprintf(outputFile, "}\n")
- }
-}
diff --git a/tools/images.mk b/tools/images.mk
deleted file mode 100644
index 2003da5bd..000000000
--- a/tools/images.mk
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##
-## Docker image targets.
-##
-## Images used by the tests must also be built and available locally.
-## The canonical test targets defined below will automatically load
-## relevant images. These can be loaded or built manually via these
-## targets.
-##
-## (*) Note that you may provide an ARCH parameter in order to build
-## and load images from an alternate archiecture (using qemu). When
-## bazel is run as a server, this has the effect of running an full
-## cross-architecture chain, and can produce cross-compiled binaries.
-##
-
-# ARCH is the architecture used for the build. This may be overriden at the
-# command line in order to perform a cross-build (in a limited capacity).
-ARCH := $(shell uname -m)
-ifneq ($(ARCH),$(shell uname -m))
-DOCKER_PLATFORM_ARGS := --platform=$(ARCH)
-else
-DOCKER_PLATFORM_ARGS :=
-endif
-
-# Note that the image prefixes used here must match the image mangling in
-# runsc/testutil.MangleImage. Names are mangled in this way to ensure that all
-# tests are using locally-defined images (that are consistent and idempotent).
-REMOTE_IMAGE_PREFIX ?= gcr.io/gvisor-presubmit
-LOCAL_IMAGE_PREFIX ?= gvisor.dev/images
-ALL_IMAGES := $(subst /,_,$(subst images/,,$(shell find images/ -name Dockerfile -o -name Dockerfile.$(ARCH) | xargs -n 1 dirname | uniq)))
-SUB_IMAGES := $(foreach image,$(ALL_IMAGES),$(if $(findstring _,$(image)),$(image),))
-IMAGE_GROUPS := $(sort $(foreach image,$(SUB_IMAGES),$(firstword $(subst _, ,$(image)))))
-
-define expand_group =
-load-$(1): $$(patsubst $(1)_%, load-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES)))
- @
-.PHONY: load-$(1)
-push-$(1): $$(patsubst $(1)_%, push-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES)))
- @
-.PHONY: push-$(1)
-endef
-$(foreach group,$(IMAGE_GROUPS),$(eval $(call expand_group,$(group))))
-
-list-all-images: ## List all images.
- @for image in $(ALL_IMAGES); do echo $${image}; done
-.PHONY: list-all-images
-
-load-all-images: ## Load all images.
-load-all-images: $(patsubst %,load-%,$(ALL_IMAGES))
-.PHONY: load-all-images
-
-push-all-images: ## Push all images.
-push-all-images: $(patsubst %,push-%,$(ALL_IMAGES))
-.PHONY: push-all-images
-
-# path and dockerfile are used to extract the relevant path and dockerfile
-# (depending on what's available for the given architecture).
-path = images/$(subst _,/,$(1))
-dockerfile = $$(if [ -f "$(call path,$(1))/Dockerfile.$(ARCH)" ]; then echo Dockerfile.$(ARCH); else echo Dockerfile; fi)
-
-# The tag construct is used to memoize the image generated (see README.md).
-# This scheme is used to enable aggressive caching in a central repository, but
-# ensuring that images will always be sourced using the local files.
-tag = $(shell cd images && find $(subst _,/,$(1)) -type f | sort | xargs -n 1 sha256sum | sha256sum - | cut -c 1-16)
-remote_image = $(REMOTE_IMAGE_PREFIX)/$(subst _,/,$(1))_$(ARCH)
-local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1))
-
-# Include all existing images as targets here.
-#
-# Note that we use a _ for the tag separator, instead of :, as the latter is
-# interpreted by Make, unfortunately. tag_expand expands the generic rules to
-# tag-specific targets. These is needed to provide sensible targets for load
-# below, with caching. Basically, if there is a rule generated here, then the
-# load will be skipped. If there is no load generated here, then the default
-# rule for load will kick in.
-#
-# Note that if this rule does not successfully rule, we will simply have
-# additional Docker pull commands that run for all images that are already
-# pulled. No real harm done.
-EXISTING_IMAGES = $(shell docker images --format '{{.Repository}}_{{.Tag}}' | grep -v '<none>')
-define existing_image_rule =
-loaded0_$(1)=load-$$(1): tag-$$(1) # Already available.
-loaded1_$(1)=.PHONY: load-$$(1)
-endef
-$(foreach image, $(EXISTING_IMAGES), $(eval $(call existing_image_rule,$(image))))
-define tag_expand_rule =
-$(eval $(loaded0_$(call remote_image,$(1))_$(call tag,$(1))))
-$(eval $(loaded1_$(call remote_image,$(1))_$(call tag,$(1))))
-endef
-$(foreach image, $(ALL_IMAGES), $(eval $(call tag_expand_rule,$(image))))
-
-# tag tags a local image. This applies both the hash-based tag from above to
-# ensure that caching works as expected, as well as the "latest" tag that is
-# used by the tests.
-local_tag = \
- docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1)) >&2
-latest_tag = \
- docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)) >&2
-tag-%: ## Tag a local image.
- @$(call header,TAG $*)
- @$(call local_tag,$*) && $(call latest_tag,$*)
-
-# pull forces the image to be pulled.
-pull = \
- $(call header,PULL $(1)) && \
- docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) >&2 && \
- $(call local_tag,$(1)) && \
- $(call latest_tag,$(1))
-pull-%: register-cross ## Force a repull of the image.
- @$(call pull,$*)
-
-# rebuild builds the image locally. Only the "remote" tag will be applied. Note
-# we need to explicitly repull the base layer in order to ensure that the
-# architecture is correct. Note that we use the term "rebuild" here to avoid
-# conflicting with the bazel "build" terminology, which is used elsewhere.
-rebuild = \
- $(call header,REBUILD $(1)) && \
- (T=$$(mktemp -d) && cp -a $(call path,$(1))/* $$T && \
- $(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) >&2 &&) \
- docker build $(DOCKER_PLATFORM_ARGS) \
- -f "$$T/$(call dockerfile,$(1))" \
- -t "$(call remote_image,$(1)):$(call tag,$(1))" \
- $$T >&2 && \
- rm -rf $$T) && \
- $(call local_tag,$(1)) && \
- $(call latest_tag,$(1))
-rebuild-%: register-cross ## Force rebuild an image locally.
- @$(call rebuild,$*)
-
-# load will either pull the "remote" or build it locally. This is the preferred
-# entrypoint, as it should never fail. The local tag should always be set after
-# this returns (either by the pull or the build).
-load-%: register-cross ## Pull or build an image locally.
- @($(call pull,$*)) || ($(call rebuild,$*))
-
-# push pushes the remote image, after either pulling (to validate that the tag
-# already exists) or building manually. Note that this generic rule will match
-# the fully-expanded remote image tag.
-push-%: load-% ## Push a given image.
- @docker push $(call remote_image,$*):$(call tag,$*) >&2
-
-# register-cross registers the necessary qemu binaries for cross-compilation.
-# This may be used by any target that may execute containers that are not the
-# native format. Note that this will only apply on the first execution.
-register-cross:
-ifneq ($(ARCH),$(shell uname -m))
-ifeq (,$(wildcard /proc/sys/fs/binfmt_misc/qemu-*))
- @docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes >&2
-else
- @
-endif
-else
- @
-endif
diff --git a/tools/installers/BUILD b/tools/installers/BUILD
deleted file mode 100644
index d9f9c4c40..000000000
--- a/tools/installers/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-# Installers for use by top-level scripts.
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-sh_binary(
- name = "head",
- srcs = ["head.sh"],
- data = [
- "//runsc",
- ],
-)
-
-sh_binary(
- name = "master",
- srcs = ["master.sh"],
-)
-
-sh_binary(
- name = "containerd",
- srcs = ["containerd.sh"],
-)
-
-sh_binary(
- name = "shim",
- srcs = ["shim.sh"],
- data = [
- "//shim:containerd-shim-runsc-v1",
- ],
-)
diff --git a/tools/installers/containerd.sh b/tools/installers/containerd.sh
deleted file mode 100755
index b8da1fe42..000000000
--- a/tools/installers/containerd.sh
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeo pipefail
-
-declare -r CONTAINERD_VERSION=${1:-1.3.0}
-declare -r CONTAINERD_MAJOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $1; }')"
-declare -r CONTAINERD_MINOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $2; }')"
-
-# We're running Go 1.16, but using pre-module containerd and cri-tools.
-export GO111MODULE=off
-
-# Default to an older version for crictl for containerd <= 1.2.
-if [[ "${CONTAINERD_MAJOR}" -eq 1 ]] && [[ "${CONTAINERD_MINOR}" -le 2 ]]; then
- declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.13.0}
-else
- declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.18.0}
-fi
-
-# Helper for Go packages below.
-install_helper() {
- declare -r PACKAGE="${1}"
- declare -r TAG="${2}"
-
- # Clone the repository.
- mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \
- git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}"
-
- # Checkout and build the repository.
- (cd "${GOPATH}"/src/"${PACKAGE}" && \
- git checkout "${TAG}" && \
- make && \
- make install)
-}
-
-# Figure out were btrfs headers are.
-#
-# Ubuntu 16.04 has only btrfs-tools, while 18.04 has a transitional package,
-# and later versions no longer have the transitional package.
-source /etc/os-release
-declare BTRFS_DEV
-if [[ "${VERSION_ID%.*}" -le "18" ]]; then
- BTRFS_DEV="btrfs-tools"
-else
- BTRFS_DEV="libbtrfs-dev"
-fi
-readonly BTRFS_DEV
-
-# Install dependencies for the crictl tests.
-while true; do
- if (apt-get update && apt-get install -y \
- "${BTRFS_DEV}" \
- libseccomp-dev); then
- break
- fi
- result=$?
- if [[ $result -ne 100 ]]; then
- exit $result
- fi
-done
-
-# Install containerd & cri-tools.
-declare -rx GOPATH=$(mktemp -d --tmpdir gopathXXXXX)
-install_helper github.com/containerd/containerd "v${CONTAINERD_VERSION}"
-install_helper github.com/kubernetes-sigs/cri-tools "v${CRITOOLS_VERSION}"
-
-# Configure containerd-shim.
-declare -r shim_config_path=/etc/containerd/runsc/config.toml
-mkdir -p $(dirname ${shim_config_path})
-cat > ${shim_config_path} <<-EOF
-log_path = "/tmp/shim-logs/"
-log_level = "debug"
-
-[runsc_config]
- debug = "true"
- debug-log = "/tmp/runsc-logs/"
- strace = "true"
- file-access = "shared"
-EOF
-
-# Configure CNI.
-(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni)
-cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
-{
- "cniVersion": "0.3.1",
- "name": "bridge",
- "type": "bridge",
- "bridge": "cnio0",
- "isGateway": true,
- "ipMasq": true,
- "ipam": {
- "type": "host-local",
- "ranges": [
- [{"subnet": "10.200.0.0/24"}]
- ],
- "routes": [{"dst": "0.0.0.0/0"}]
- }
-}
-EOF
-cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
-{
- "cniVersion": "0.3.1",
- "type": "loopback"
-}
-EOF
-
-# Configure crictl.
-cat <<EOF | sudo tee /etc/crictl.yaml
-runtime-endpoint: unix:///run/containerd/containerd.sock
-EOF
-
-# Cleanup.
-rm -rf "${GOPATH}"
diff --git a/tools/installers/head.sh b/tools/installers/head.sh
deleted file mode 100755
index a613fcb5b..000000000
--- a/tools/installers/head.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install our runtime.
-runfiles=.
-if [[ -d "$0.runfiles" ]]; then
- runfiles="$0.runfiles"
-fi
-$(find -L "${runfiles}" -executable -type f -name runsc) install
-
-# Restart docker.
-if service docker status 2>/dev/null; then
- service docker restart
-fi
diff --git a/tools/installers/images.sh b/tools/installers/images.sh
deleted file mode 100755
index 52e750f57..000000000
--- a/tools/installers/images.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeuo pipefail
-
-# Find the images directory.
-for images in $(find . -type d -name images); do
- if [[ -f "${images}"/Makefile ]]; then
- make -C "${images}" load-all-images
- fi
-done
diff --git a/tools/installers/master.sh b/tools/installers/master.sh
deleted file mode 100755
index 2c6001c6c..000000000
--- a/tools/installers/master.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install runsc from the master branch.
-set -e
-
-curl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -
-add-apt-repository "deb https://storage.googleapis.com/gvisor/releases release main"
-
-while true; do
- if (apt-get update && apt-get install -y runsc); then
- break
- fi
- result=$?
- if [[ $result -ne 100 ]]; then
- exit $result
- fi
-done
-
-runsc install
-service docker restart
diff --git a/tools/installers/shim.sh b/tools/installers/shim.sh
deleted file mode 100755
index 9af50b5c7..000000000
--- a/tools/installers/shim.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install all the shims.
-#
-# Note that containerd looks at the current executable directory
-# in order to find the shim binary. So we need to check in order
-# of preference. The local containerd installer will install to
-# /usr/local, so we use that first.
-if [[ -x /usr/local/bin/containerd ]]; then
- containerd_install_dir=/usr/local/bin
-else
- containerd_install_dir=/usr/bin
-fi
-runfiles=.
-if [[ -d "$0.runfiles" ]]; then
- runfiles="$0.runfiles"
-fi
-find -L "${runfiles}" -executable -type f -name containerd-shim-runsc-v1 -exec cp -L {} "${containerd_install_dir}" \;
diff --git a/tools/make_apt.sh b/tools/make_apt.sh
deleted file mode 100755
index 935c4db2d..000000000
--- a/tools/make_apt.sh
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [[ "$#" -le 3 ]]; then
- echo "usage: $0 <private-key> <suite> <root> <packages...>"
- exit 1
-fi
-declare private_key
-declare suite
-declare root
-private_key="$(readlink -e "$1")"
-suite="$2"
-root="$(readlink -m "$3")"
-readonly private_key
-readonly suite
-readonly root
-shift; shift; shift # For "$@" below.
-
-# Ensure that we have the correct packages installed.
-function apt_install() {
- while true; do
- sudo apt-get update &&
- sudo apt-get install -y "$@" &&
- true
- result="${?}"
- case $result in
- 0)
- break
- ;;
- 100)
- # 100 is the error code that apt-get returns.
- ;;
- *)
- exit $result
- ;;
- esac
- done
-}
-dpkg-sig --help >/dev/null 2>&1 || apt_install dpkg-sig
-apt-ftparchive --help >/dev/null 2>&1 || apt_install apt-utils
-xz --help >/dev/null 2>&1 || apt_install xz-utils
-
-# Verbose from this point.
-set -xeo pipefail
-
-# Create a directory for the release.
-declare -r release="${root}/dists/${suite}"
-mkdir -p "${release}"
-
-# Create a temporary keyring, and ensure it is cleaned up.
-# Using separate homedir allows us to install apt repositories multiple times
-# using the same key. This is a limitation in GnuPG pre-2.1.
-declare keyring
-declare homedir
-declare gpg_opts
-keyring="$(mktemp /tmp/keyringXXXXXX.gpg)"
-homedir="$(mktemp -d /tmp/homedirXXXXXX)"
-gpg_opts=("--no-default-keyring" "--secret-keyring" "${keyring}" "--homedir" "${homedir}")
-readonly keyring
-readonly homedir
-readonly gpg_opts
-cleanup() {
- rm -rf "${keyring}" "${homedir}"
-}
-trap cleanup EXIT
-
-# We attempt the import twice because the first one will fail if the public key
-# is not found. This isn't actually a failure for us, because we don't require
-# the public key (this may be stored separately). The second import will succeed
-# because, in reality, the first import succeeded and it's a no-op.
-gpg "${gpg_opts[@]}" --import "${private_key}" || \
- gpg "${gpg_opts[@]}" --import "${private_key}"
-
-# Copy the packages into the root.
-for pkg in "$@"; do
- if ! [[ -f "${pkg}" ]]; then
- continue
- fi
- ext=${pkg##*.}
- if [[ "${ext}" != "deb" ]]; then
- continue
- fi
-
- # Extract package information.
- name=$(basename "${pkg}" ".${ext}")
- arch=$(dpkg --info "${pkg}" | grep 'Architecture:' | cut -d':' -f2)
- version=$(dpkg --info "${pkg}" | grep 'Version:' | cut -d':' -f2)
- arch=${arch// /} # Trim whitespace.
- version=${version// /} # Ditto.
- destdir="${root}/pool/${version}/binary-${arch}"
-
- # Copy & sign the package.
- mkdir -p "${destdir}"
- cp -a -L "$(dirname "${pkg}")/${name}.deb" "${destdir}"
- cp -a -L "$(dirname "${pkg}")/${name}.changes" "${destdir}"
- chmod 0644 "${destdir}"/"${name}".*
- # Sign a package only if it isn't signed yet.
- # We use [*] here to expand the gpg_opts array into a single shell-word.
- dpkg-sig -g "${gpg_opts[*]}" --verify "${destdir}/${name}.deb" ||
- dpkg-sig -g "${gpg_opts[*]}" --sign builder "${destdir}/${name}.deb"
-done
-
-# Build the package list.
-declare arches=()
-for dir in "${root}"/pool/*/binary-*; do
- name=$(basename "${dir}")
- arch=${name##binary-}
- arches+=("${arch}")
- repo_packages="${release}"/main/"${name}"
- mkdir -p "${repo_packages}"
- (cd "${root}" && apt-ftparchive packages "${dir##${root}/}" > "${repo_packages}"/Packages)
- if ! [[ -s "${repo_packages}"/Packages ]]; then
- echo "Packages file is size zero." >&2
- exit 1
- fi
- (cd "${repo_packages}" && cat Packages | gzip > Packages.gz)
- (cd "${repo_packages}" && cat Packages | xz > Packages.xz)
-done
-
-# Build the release list.
-cat > "${release}"/apt.conf <<EOF
-APT {
- FTPArchive {
- Release {
- Architectures "${arches[@]}";
- Suite "${suite}";
- Components "main";
- };
- };
-};
-EOF
-(cd "${release}" && apt-ftparchive -c=apt.conf release . > Release)
-rm "${release}"/apt.conf
-
-# Sign the release.
-declare -r digest_opts=("--digest-algo" "SHA512" "--cert-digest-algo" "SHA512")
-(cd "${release}" && rm -f Release.gpg InRelease)
-(cd "${release}" && gpg "${gpg_opts[@]}" --clearsign "${digest_opts[@]}" -o InRelease Release)
-(cd "${release}" && gpg "${gpg_opts[@]}" -abs "${digest_opts[@]}" -o Release.gpg Release)
diff --git a/tools/make_release.sh b/tools/make_release.sh
deleted file mode 100755
index e125c7e96..000000000
--- a/tools/make_release.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [[ "$#" -le 2 ]]; then
- echo "usage: $0 <private-key> <root> <binaries & packages...>"
- echo "The environment variable NIGHTLY may be set to control"
- echo "whether the nightly packages are produced or not."
- exit 1
-fi
-
-set -xeo pipefail
-declare -r private_key="$1"
-shift
-declare -r root="$1"
-shift
-declare -a binaries
-declare -a pkgs
-
-# Collect binaries & packages.
-for arg in "$@"; do
- if [[ "${arg}" == *.deb ]] || [[ "${arg}" == *.changes ]]; then
- pkgs+=("${arg}")
- else
- binaries+=("${arg}")
- fi
-done
-
-# install_raw installs raw artifacts.
-install_raw() {
- for binary in "${binaries[@]}"; do
- # Copy the raw file & generate a sha512sum, sorted by architecture.
- arch=$(file "${binary}" | cut -d',' -f2 | awk '{print $NF}' | tr '-' '_')
- name=$(basename "${binary}")
- mkdir -p "${root}/$1/${arch}"
- cp -f "${binary}" "${root}/$1/${arch}"
- (cd "${root}/$1/${arch}" && sha512sum "${name}" >"${name}.sha512")
- done
-}
-
-# install_apt installs an apt repository.
-install_apt() {
- tools/make_apt.sh "${private_key}" "$1" "${root}" "${pkgs[@]}"
-}
-
-# If nightly, install only nightly artifacts.
-if [[ "${NIGHTLY:-false}" == "true" ]]; then
- # Install the nightly release.
- # https://gvisor.dev/docs/user_guide/install/#nightly
- stamp="$(date -Idate)"
- install_raw "nightly/latest"
- install_raw "nightly/${stamp}"
- install_apt "nightly"
-else
- # Is it a tagged release? Build that.
- tags="$(git tag --points-at HEAD 2>/dev/null || true)"
- if ! [[ -z "${tags}" ]]; then
- # Note that a given commit can match any number of tags. We have to iterate
- # through all possible tags and produce associated artifacts.
- for tag in ${tags}; do
- name=$(echo "${tag}" | cut -d'-' -f2)
- base=$(echo "${name}" | cut -d'.' -f1)
- # Install the "specific" release. This is the latest release with the
- # given date.
- # https://gvisor.dev/docs/user_guide/install/#specific-release
- install_raw "release/${base}"
- # Install the "point release".
- # https://gvisor.dev/docs/user_guide/install/#point-release
- install_raw "release/${name}"
- # Install the latest release.
- # https://gvisor.dev/docs/user_guide/install/#latest-release
- install_raw "release/latest"
-
- install_apt "release"
- install_apt "${base}"
- done
- else
- # Otherwise, assume it is a raw master commit.
- # https://gvisor.dev/docs/user_guide/install/#head
- install_raw "master/latest"
- install_apt "master"
- fi
-fi
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
deleted file mode 100644
index a7e280b32..000000000
--- a/tools/nogo/BUILD
+++ /dev/null
@@ -1,80 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_library", "select_goarch", "select_goos")
-load("//tools/nogo:defs.bzl", "nogo_objdump_tool", "nogo_stdlib", "nogo_target")
-
-package(licenses = ["notice"])
-
-exports_files(["config-schema.json"])
-
-nogo_target(
- name = "target",
- goarch = select_goarch(),
- goos = select_goos(),
- visibility = ["//visibility:public"],
-)
-
-nogo_objdump_tool(
- name = "objdump_tool",
- visibility = ["//visibility:public"],
-)
-
-nogo_stdlib(
- name = "stdlib",
- visibility = ["//visibility:public"],
-)
-
-go_library(
- name = "nogo",
- srcs = [
- "analyzers.go",
- "build.go",
- "config.go",
- "findings.go",
- "nogo.go",
- ],
- nogo = False,
- visibility = ["//:sandbox"],
- deps = [
- "//tools/checkescape",
- "//tools/checklocks",
- "//tools/checkunsafe",
- "//tools/nogo/objdump",
- "//tools/worker",
- "@co_honnef_go_tools//staticcheck:go_default_library",
- "@co_honnef_go_tools//stylecheck:go_default_library",
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/internal/facts:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
- "@org_golang_x_tools//go/gcexportdata:go_default_library",
- "@org_golang_x_tools//go/types/objectpath:go_default_library",
- ],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/nogo/README.md b/tools/nogo/README.md
deleted file mode 100644
index 6e4db18de..000000000
--- a/tools/nogo/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Extended "nogo" analysis
-
-This package provides a build aspect that perform nogo analysis. This will be
-automatically injected to all relevant libraries when using the default
-`go_binary` and `go_library` rules.
-
-It exists for several reasons.
-
-* The default `nogo` provided by bazel is insufficient with respect to the
- possibility of binary analysis. This package allows us to analyze the
- generated binary in addition to using the standard analyzers.
-
-* The configuration provided in this package is much richer than the standard
- `nogo` JSON blob. Specifically, it allows us to exclude specific structures
- from the composite rules (such as the Ranges that are common with the set
- types).
-
-* The bazel version of `nogo` is run directly against the `go_library` and
- `go_binary` targets, meaning that any change to the configuration requires a
- rebuild from scratch (for some reason included all C++ source files in the
- process). Using an aspect is more efficient in this regard.
-
-* The checks supported by this package are exported as tests, which makes it
- easier to reason about and plumb into the build system.
-
-* For uninteresting reasons, it is impossible to integrate the default `nogo`
- analyzer provided by bazel with internal Google tooling. To provide a
- consistent experience, this package allows those systems to be unified.
-
-To use this package, import `nogo_test` from `defs.bzl` and add a single
-dependency which is a `go_binary` or `go_library` rule.
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
deleted file mode 100644
index 2b3c03fec..000000000
--- a/tools/nogo/analyzers.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "encoding/gob"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/asmdecl"
- "golang.org/x/tools/go/analysis/passes/assign"
- "golang.org/x/tools/go/analysis/passes/atomic"
- "golang.org/x/tools/go/analysis/passes/bools"
- "golang.org/x/tools/go/analysis/passes/buildtag"
- "golang.org/x/tools/go/analysis/passes/cgocall"
- "golang.org/x/tools/go/analysis/passes/composite"
- "golang.org/x/tools/go/analysis/passes/copylock"
- "golang.org/x/tools/go/analysis/passes/errorsas"
- "golang.org/x/tools/go/analysis/passes/httpresponse"
- "golang.org/x/tools/go/analysis/passes/loopclosure"
- "golang.org/x/tools/go/analysis/passes/lostcancel"
- "golang.org/x/tools/go/analysis/passes/nilfunc"
- "golang.org/x/tools/go/analysis/passes/nilness"
- "golang.org/x/tools/go/analysis/passes/printf"
- "golang.org/x/tools/go/analysis/passes/shadow"
- "golang.org/x/tools/go/analysis/passes/shift"
- "golang.org/x/tools/go/analysis/passes/stdmethods"
- "golang.org/x/tools/go/analysis/passes/stringintconv"
- "golang.org/x/tools/go/analysis/passes/structtag"
- "golang.org/x/tools/go/analysis/passes/tests"
- "golang.org/x/tools/go/analysis/passes/unmarshal"
- "golang.org/x/tools/go/analysis/passes/unreachable"
- "golang.org/x/tools/go/analysis/passes/unsafeptr"
- "golang.org/x/tools/go/analysis/passes/unusedresult"
- "honnef.co/go/tools/staticcheck"
- "honnef.co/go/tools/stylecheck"
-
- "gvisor.dev/gvisor/tools/checkescape"
- "gvisor.dev/gvisor/tools/checklocks"
- "gvisor.dev/gvisor/tools/checkunsafe"
-)
-
-// AllAnalyzers is a list of all available analyzers.
-var AllAnalyzers = []*analysis.Analyzer{
- asmdecl.Analyzer,
- assign.Analyzer,
- atomic.Analyzer,
- bools.Analyzer,
- buildtag.Analyzer,
- cgocall.Analyzer,
- composite.Analyzer,
- copylock.Analyzer,
- errorsas.Analyzer,
- httpresponse.Analyzer,
- loopclosure.Analyzer,
- lostcancel.Analyzer,
- nilfunc.Analyzer,
- nilness.Analyzer,
- printf.Analyzer,
- shift.Analyzer,
- stdmethods.Analyzer,
- stringintconv.Analyzer,
- shadow.Analyzer,
- structtag.Analyzer,
- tests.Analyzer,
- unmarshal.Analyzer,
- unreachable.Analyzer,
- unsafeptr.Analyzer,
- unusedresult.Analyzer,
- checkescape.Analyzer,
- checkunsafe.Analyzer,
- checklocks.Analyzer,
-}
-
-func register(all []*analysis.Analyzer) {
- // Register all fact types.
- //
- // N.B. This needs to be done recursively, because there may be
- // analyzers in the Requires list that do not appear explicitly above.
- registered := make(map[*analysis.Analyzer]struct{})
- var registerOne func(*analysis.Analyzer)
- registerOne = func(a *analysis.Analyzer) {
- if _, ok := registered[a]; ok {
- return
- }
-
- // Register dependencies.
- for _, da := range a.Requires {
- registerOne(da)
- }
-
- // Register local facts.
- for _, f := range a.FactTypes {
- gob.Register(f)
- }
-
- registered[a] = struct{}{} // Done.
- }
- for _, a := range all {
- registerOne(a)
- }
-}
-
-func init() {
- // Add all staticcheck analyzers.
- for _, a := range staticcheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
- }
- // Add all stylecheck analyzers.
- for _, a := range stylecheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
- }
-
- // Register lists.
- register(AllAnalyzers)
-}
diff --git a/tools/nogo/build.go b/tools/nogo/build.go
deleted file mode 100644
index 4067bb480..000000000
--- a/tools/nogo/build.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package nogo
-
-import (
- "fmt"
- "io"
- "os"
-)
-
-// findStdPkg needs to find the bundled standard library packages.
-func findStdPkg(GOOS, GOARCH, path string) (io.ReadCloser, error) {
- if path == "C" {
- // Cgo builds cannot be analyzed. Skip.
- return nil, ErrSkip
- }
- return os.Open(fmt.Sprintf("external/go_sdk/pkg/%s_%s/%s.a", GOOS, GOARCH, path))
-}
diff --git a/tools/nogo/check/BUILD b/tools/nogo/check/BUILD
deleted file mode 100644
index 666780dd3..000000000
--- a/tools/nogo/check/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "check",
- srcs = ["main.go"],
- nogo = False,
- visibility = ["//visibility:public"],
- deps = [
- "//tools/nogo",
- "//tools/worker",
- ],
-)
diff --git a/tools/nogo/check/main.go b/tools/nogo/check/main.go
deleted file mode 100644
index 0e7e92965..000000000
--- a/tools/nogo/check/main.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary check is the nogo entrypoint.
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
-
- "gvisor.dev/gvisor/tools/nogo"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-var (
- packageFile = flag.String("package", "", "package configuration file (in JSON format)")
- stdlibFile = flag.String("stdlib", "", "stdlib configuration file (in JSON format)")
- findingsOutput = flag.String("findings", "", "output file (or stdout, if not specified)")
- factsOutput = flag.String("facts", "", "output file for facts (optional)")
-)
-
-func loadConfig(file string, config interface{}) interface{} {
- // Load the configuration.
- f, err := os.Open(file)
- if err != nil {
- log.Fatalf("unable to open configuration %q: %v", file, err)
- }
- defer f.Close()
- dec := json.NewDecoder(f)
- dec.DisallowUnknownFields()
- if err := dec.Decode(config); err != nil {
- log.Fatalf("unable to decode configuration: %v", err)
- }
- return config
-}
-
-func main() {
- worker.Work(run)
-}
-
-func run([]string) int {
- var (
- findings []nogo.Finding
- factData []byte
- err error
- )
-
- // Check & load the configuration.
- if *packageFile != "" && *stdlibFile != "" {
- fmt.Fprintf(os.Stderr, "unable to perform stdlib and package analysis; provide only one!")
- return 1
- }
-
- // Run the configuration.
- if *stdlibFile != "" {
- // Perform stdlib analysis.
- c := loadConfig(*stdlibFile, new(nogo.StdlibConfig)).(*nogo.StdlibConfig)
- findings, factData, err = nogo.CheckStdlib(c, nogo.AllAnalyzers)
- } else if *packageFile != "" {
- // Perform standard analysis.
- c := loadConfig(*packageFile, new(nogo.PackageConfig)).(*nogo.PackageConfig)
- findings, factData, err = nogo.CheckPackage(c, nogo.AllAnalyzers, nil)
- } else {
- fmt.Fprintf(os.Stderr, "please provide at least one of package or stdlib!")
- return 1
- }
-
- // Check that analysis was successful.
- if err != nil {
- fmt.Fprintf(os.Stderr, "error performing analysis: %v", err)
- return 1
- }
-
- // Save facts.
- if *factsOutput != "" {
- if err := ioutil.WriteFile(*factsOutput, factData, 0644); err != nil {
- fmt.Fprintf(os.Stderr, "error saving findings to %q: %v", *factsOutput, err)
- return 1
- }
- }
-
- // Write all findings.
- if *findingsOutput != "" {
- w, err := os.OpenFile(*findingsOutput, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error opening output file %q: %v", *findingsOutput, err)
- return 1
- }
- if err := nogo.WriteFindingsTo(w, findings, false /* json */); err != nil {
- fmt.Fprintf(os.Stderr, "error writing findings to %q: %v", *findingsOutput, err)
- return 1
- }
- } else {
- for _, finding := range findings {
- fmt.Fprintf(os.Stdout, "%s\n", finding.String())
- }
- }
-
- return 0
-}
diff --git a/tools/nogo/config-schema.json b/tools/nogo/config-schema.json
deleted file mode 100644
index 3c25fe221..000000000
--- a/tools/nogo/config-schema.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema",
- "definitions": {
- "group": {
- "type": "object",
- "properties": {
- "name": {
- "description": "The name of the group.",
- "type": "string"
- },
- "regex": {
- "description": "A regular expression for matching paths.",
- "type": "string"
- },
- "default": {
- "description": "Whether the group is enabled by default.",
- "type": "boolean"
- }
- },
- "required": [
- "name",
- "regex",
- "default"
- ],
- "additionalProperties": false
- },
- "regexlist": {
- "description": "A list of regular expressions.",
- "oneOf": [
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- {
- "type": "null"
- }
- ]
- },
- "rule": {
- "type": "object",
- "properties": {
- "exclude": {
- "description": "A regular expression for paths to exclude.",
- "$ref": "#/definitions/regexlist"
- },
- "suppress": {
- "description": "A regular expression for messages to suppress.",
- "$ref": "#/definitions/regexlist"
- }
- },
- "additionalProperties": false
- },
- "ruleList": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "$ref": "#/definitions/rule"
- },
- {
- "type": "null"
- }
- ]
- }
- }
- },
- "properties": {
- "groups": {
- "description": "A definition of all groups.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/group"
- },
- "minItems": 1
- },
- "global": {
- "description": "A global set of rules.",
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/rule"
- }
- },
- "analyzers": {
- "description": "A definition of all groups.",
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/ruleList"
- }
- }
- },
- "required": [
- "groups"
- ],
- "additionalProperties": false
-}
diff --git a/tools/nogo/config.go b/tools/nogo/config.go
deleted file mode 100644
index 6436f9d34..000000000
--- a/tools/nogo/config.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "fmt"
- "regexp"
-)
-
-// GroupName is a named group.
-type GroupName string
-
-// AnalyzerName is a named analyzer.
-type AnalyzerName string
-
-// Group represents a named collection of files.
-type Group struct {
- // Name is the short name for the group.
- Name GroupName `yaml:"name"`
-
- // Regex matches all full paths in the group.
- Regex string `yaml:"regex"`
- regex *regexp.Regexp `yaml:"-"`
-
- // Default determines the default group behavior.
- //
- // If Default is true, all Analyzers are enabled for this
- // group. Otherwise, Analyzers must be individually enabled
- // by specifying a (possible empty) ItemConfig for the group
- // in the AnalyzerConfig.
- Default bool `yaml:"default"`
-}
-
-func (g *Group) compile() error {
- r, err := regexp.Compile(g.Regex)
- if err != nil {
- return err
- }
- g.regex = r
- return nil
-}
-
-// ItemConfig is an (Analyzer,Group) configuration.
-type ItemConfig struct {
- // Exclude are analyzer exclusions.
- //
- // Exclude is a list of regular expressions. If the corresponding
- // Analyzer emits a Finding for which Finding.Position.String()
- // matches a regular expression in Exclude, the finding will not
- // be reported.
- Exclude []string `yaml:"exclude,omitempty"`
- exclude []*regexp.Regexp `yaml:"-"`
-
- // Suppress are analyzer suppressions.
- //
- // Suppress is a list of regular expressions. If the corresponding
- // Analyzer emits a Finding for which Finding.Message matches a regular
- // expression in Suppress, the finding will not be reported.
- Suppress []string `yaml:"suppress,omitempty"`
- suppress []*regexp.Regexp `yaml:"-"`
-}
-
-func compileRegexps(ss []string, rs *[]*regexp.Regexp) error {
- *rs = make([]*regexp.Regexp, len(ss))
- for i, s := range ss {
- r, err := regexp.Compile(s)
- if err != nil {
- return err
- }
- (*rs)[i] = r
- }
- return nil
-}
-
-// RegexpCount is used by AnalyzerConfig.RegexpCount.
-func (i *ItemConfig) RegexpCount() int64 {
- if i == nil {
- // See compile.
- return 0
- }
- // Return the number of regular expressions compiled for these items.
- // This is how the cache size of the configuration is measured.
- return int64(len(i.exclude) + len(i.suppress))
-}
-
-func (i *ItemConfig) compile() error {
- if i == nil {
- // This may be nil if nothing is included in the
- // item configuration. That's fine, there's nothing
- // to compile and nothing to exclude & suppress.
- return nil
- }
- if err := compileRegexps(i.Exclude, &i.exclude); err != nil {
- return fmt.Errorf("in exclude: %w", err)
- }
- if err := compileRegexps(i.Suppress, &i.suppress); err != nil {
- return fmt.Errorf("in suppress: %w", err)
- }
- return nil
-}
-
-func merge(a, b []string) []string {
- found := make(map[string]struct{})
- result := make([]string, 0, len(a)+len(b))
- for _, elem := range a {
- found[elem] = struct{}{}
- result = append(result, elem)
- }
- for _, elem := range b {
- if _, ok := found[elem]; ok {
- continue
- }
- result = append(result, elem)
- }
- return result
-}
-
-func (i *ItemConfig) merge(other *ItemConfig) {
- i.Exclude = merge(i.Exclude, other.Exclude)
- i.Suppress = merge(i.Suppress, other.Suppress)
-}
-
-func (i *ItemConfig) shouldReport(fullPos, msg string) bool {
- if i == nil {
- // See above.
- return true
- }
- for _, r := range i.exclude {
- if r.MatchString(fullPos) {
- return false
- }
- }
- for _, r := range i.suppress {
- if r.MatchString(msg) {
- return false
- }
- }
- return true
-}
-
-// AnalyzerConfig is the configuration for a single analyzers.
-//
-// This map is keyed by individual Group names, to allow for different
-// configurations depending on what Group the file belongs to.
-type AnalyzerConfig map[GroupName]*ItemConfig
-
-// RegexpCount is used by Config.Size.
-func (a AnalyzerConfig) RegexpCount() int64 {
- count := int64(0)
- for _, gc := range a {
- count += gc.RegexpCount()
- }
- return count
-}
-
-func (a AnalyzerConfig) compile() error {
- for name, gc := range a {
- if err := gc.compile(); err != nil {
- return fmt.Errorf("invalid group %q: %v", name, err)
- }
- }
- return nil
-}
-
-func (a AnalyzerConfig) merge(other AnalyzerConfig) {
- // Merge all the groups.
- for name, gc := range other {
- old, ok := a[name]
- if !ok || old == nil {
- a[name] = gc // Not configured in a.
- continue
- }
- old.merge(gc)
- }
-}
-
-func (a AnalyzerConfig) shouldReport(groupConfig *Group, fullPos, msg string) bool {
- gc, ok := a[groupConfig.Name]
- if !ok {
- return groupConfig.Default
- }
-
- // Note that if a section appears for a particular group
- // for a particular analyzer, then it will now be enabled,
- // and the group default no longer applies.
- return gc.shouldReport(fullPos, msg)
-}
-
-// Config is a nogo configuration.
-type Config struct {
- // Prefixes defines a set of regular expressions that
- // are standard "prefixes", so that files can be grouped
- // and specific rules applied to individual groups.
- Groups []Group `yaml:"groups"`
-
- // Global is the global analyzer config.
- Global AnalyzerConfig `yaml:"global"`
-
- // Analyzers are individual analyzer configurations. The
- // key for each analyzer is the name of the analyzer. The
- // value is either a boolean (enable/disable), or a map to
- // the groups above.
- Analyzers map[AnalyzerName]AnalyzerConfig `yaml:"analyzers"`
-}
-
-// Size implements worker.Sizer.Size.
-func (c *Config) Size() int64 {
- count := c.Global.RegexpCount()
- for _, config := range c.Analyzers {
- count += config.RegexpCount()
- }
- // The size is measured as the number of regexps that are compiled
- // here. We multiply by 1k to produce an estimate.
- return 1024 * count
-}
-
-// Merge merges two configurations.
-func (c *Config) Merge(other *Config) {
- // Merge all groups.
- //
- // Select the other first, as the order provided in the second will
- // provide precendence over the same group defined in the first one.
- seenGroups := make(map[GroupName]struct{})
- newGroups := make([]Group, 0, len(c.Groups)+len(other.Groups))
- for _, g := range other.Groups {
- newGroups = append(newGroups, g)
- seenGroups[g.Name] = struct{}{}
- }
- for _, g := range c.Groups {
- if _, ok := seenGroups[g.Name]; ok {
- continue
- }
- newGroups = append(newGroups, g)
- }
- c.Groups = newGroups
-
- // Merge global configurations.
- c.Global.merge(other.Global)
-
- // Merge all analyzer configurations.
- for name, ac := range other.Analyzers {
- old, ok := c.Analyzers[name]
- if !ok {
- c.Analyzers[name] = ac // No analyzer in original config.
- continue
- }
- old.merge(ac)
- }
-}
-
-// Compile compiles a configuration to make it useable.
-func (c *Config) Compile() error {
- for i := 0; i < len(c.Groups); i++ {
- if err := c.Groups[i].compile(); err != nil {
- return fmt.Errorf("invalid group %q: %w", c.Groups[i].Name, err)
- }
- }
- if err := c.Global.compile(); err != nil {
- return fmt.Errorf("invalid global: %w", err)
- }
- for name, ac := range c.Analyzers {
- if err := ac.compile(); err != nil {
- return fmt.Errorf("invalid analyzer %q: %w", name, err)
- }
- }
- return nil
-}
-
-// ShouldReport returns true iff the finding should match the Config.
-func (c *Config) ShouldReport(finding Finding) bool {
- fullPos := finding.Position.String()
-
- // Find the matching group.
- var groupConfig *Group
- for i := 0; i < len(c.Groups); i++ {
- if c.Groups[i].regex.MatchString(fullPos) {
- groupConfig = &c.Groups[i]
- break
- }
- }
-
- // If there is no group matching this path, then
- // we default to accept the finding.
- if groupConfig == nil {
- return true
- }
-
- // Suppress via global rule?
- if !c.Global.shouldReport(groupConfig, fullPos, finding.Message) {
- return false
- }
-
- // Try the analyzer config.
- ac, ok := c.Analyzers[finding.Category]
- if !ok {
- return groupConfig.Default
- }
- return ac.shouldReport(groupConfig, fullPos, finding.Message)
-}
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
deleted file mode 100644
index 80182ff6c..000000000
--- a/tools/nogo/defs.bzl
+++ /dev/null
@@ -1,501 +0,0 @@
-"""Nogo rules."""
-
-load("//tools/bazeldefs:go.bzl", "go_context", "go_embed_libraries", "go_importpath", "go_rule")
-
-NogoConfigInfo = provider(
- "information about a nogo configuration",
- fields = {
- "srcs": "the collection of configuration files",
- },
-)
-
-def _nogo_config_impl(ctx):
- return [NogoConfigInfo(
- srcs = ctx.files.srcs,
- )]
-
-nogo_config = rule(
- implementation = _nogo_config_impl,
- attrs = {
- "srcs": attr.label_list(
- doc = "a list of yaml files (schema defined by tool/nogo/config.go).",
- allow_files = True,
- ),
- },
-)
-
-NogoTargetInfo = provider(
- "information about the Go target",
- fields = {
- "goarch": "the build architecture (GOARCH)",
- "goos": "the build OS target (GOOS)",
- "worker_debug": "transitive debugging",
- },
-)
-
-def _nogo_target_impl(ctx):
- return [NogoTargetInfo(
- goarch = ctx.attr.goarch,
- goos = ctx.attr.goos,
- worker_debug = ctx.attr.worker_debug,
- )]
-
-nogo_target = go_rule(
- rule,
- implementation = _nogo_target_impl,
- attrs = {
- "goarch": attr.string(
- doc = "the Go build architecture (propagated to other rules).",
- mandatory = True,
- ),
- "goos": attr.string(
- doc = "the Go OS target (propagated to other rules).",
- mandatory = True,
- ),
- "worker_debug": attr.bool(
- doc = "whether worker debugging should be enabled.",
- default = False,
- ),
- },
-)
-
-def _nogo_objdump_tool_impl(ctx):
- # Construct the magic dump command.
- #
- # Note that in some cases, the input is being fed into the tool via stdin.
- # Unfortunately, the Go objdump tool expects to see a seekable file [1], so
- # we need the tool to handle this case by creating a temporary file.
- #
- # [1] https://github.com/golang/go/issues/41051
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- env_prefix = " ".join(["%s=%s" % (key, value) for (key, value) in go_ctx.env.items()])
- dumper = ctx.actions.declare_file(ctx.label.name)
- ctx.actions.write(dumper, "\n".join([
- "#!/bin/bash",
- "set -euo pipefail",
- "if [[ $# -eq 0 ]]; then",
- " T=$(mktemp -u -t libXXXXXX.a)",
- " cat /dev/stdin > ${T}",
- "else",
- " T=$1;",
- "fi",
- "%s %s tool objdump ${T}" % (
- env_prefix,
- go_ctx.go.path,
- ),
- "if [[ $# -eq 0 ]]; then",
- " rm -rf ${T}",
- "fi",
- "",
- ]), is_executable = True)
-
- # Include the full runfiles.
- return [DefaultInfo(
- runfiles = ctx.runfiles(files = go_ctx.runfiles.to_list()),
- executable = dumper,
- )]
-
-nogo_objdump_tool = go_rule(
- rule,
- implementation = _nogo_objdump_tool_impl,
- attrs = {
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- },
-)
-
-# NogoStdlibInfo is the set of standard library facts.
-NogoStdlibInfo = provider(
- "information for nogo analysis (standard library facts)",
- fields = {
- "facts": "serialized standard library facts",
- "raw_findings": "raw package findings (if relevant)",
- },
-)
-
-def _nogo_stdlib_impl(ctx):
- # Build the standard library facts.
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- facts = ctx.actions.declare_file(ctx.label.name + ".facts")
- raw_findings = ctx.actions.declare_file(ctx.label.name + ".raw_findings")
- config = struct(
- Srcs = [f.path for f in go_ctx.stdlib_srcs],
- GOOS = go_ctx.goos,
- GOARCH = go_ctx.goarch,
- Tags = go_ctx.gotags,
- )
- config_file = ctx.actions.declare_file(ctx.label.name + ".cfg")
- ctx.actions.write(config_file, config.to_json())
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(go_ctx.nogo_args + [
- "-objdump_tool=%s" % ctx.files._objdump_tool[0].path,
- "-stdlib=%s" % config_file.path,
- "-findings=%s" % raw_findings.path,
- "-facts=%s" % facts.path,
- ]),
- )
- ctx.actions.run(
- inputs = [config_file] + go_ctx.stdlib_srcs + [args_file],
- outputs = [facts, raw_findings],
- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),
- executable = ctx.files._check[0],
- mnemonic = "GoStandardLibraryAnalysis",
- # Note that this does not support work execution currently. There is an
- # issue with stdout pollution that is not yet resolved, so this is kept
- # as a separate menomic.
- progress_message = "Analyzing Go Standard Library",
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Return the stdlib facts as output.
- return [NogoStdlibInfo(
- facts = facts,
- raw_findings = raw_findings,
- )]
-
-nogo_stdlib = go_rule(
- rule,
- implementation = _nogo_stdlib_impl,
- attrs = {
- "_check": attr.label(
- default = "//tools/nogo/check:check",
- cfg = "host",
- ),
- "_objdump_tool": attr.label(
- default = "//tools/nogo:objdump_tool",
- cfg = "host",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- },
-)
-
-# NogoInfo is the serialized set of package facts for a nogo analysis.
-#
-# Each go_library rule will generate a corresponding nogo rule, which will run
-# with the source files as input. Note however, that the individual nogo rules
-# are simply stubs that enter into the shadow dependency tree (the "aspect").
-NogoInfo = provider(
- "information for nogo analysis",
- fields = {
- "facts": "serialized package facts",
- "raw_findings": "raw package findings (if relevant)",
- "importpath": "package import path",
- "binaries": "package binary files",
- "srcs": "srcs (for go_test support)",
- "deps": "deps (for go_test support)",
- },
-)
-
-def _select_objfile(files):
- """Returns (.a file, .x file, is_archive).
-
- If no .a file is available, then the first .x file will be returned
- instead, and vice versa. If neither are available, then the first provided
- file will be returned."""
- a_files = [f for f in files if f.path.endswith(".a")]
- x_files = [f for f in files if f.path.endswith(".x")]
- if not len(x_files) and not len(a_files):
- return (files[0], files[0], False)
- if not len(x_files):
- x_files = a_files
- if not len(a_files):
- a_files = x_files
- return a_files[0], x_files[0], True
-
-def _nogo_aspect_impl(target, ctx):
- # If this is a nogo rule itself (and not the shadow of a go_library or
- # go_binary rule created by such a rule), then we simply return nothing.
- # All work is done in the shadow properties for go rules. For a proto
- # library, we simply skip the analysis portion but still need to return a
- # valid NogoInfo to reference the generated binary.
- #
- # Note that we almost exclusively use go_library, not go_tool_library.
- # This is because nogo is manually annotated, so the go_tool_library kind
- # is not needed to avoid dependency loops. Unfortunately, bazel coverdata
- # is exported *only* as a go_tool_library. This does not cause a problem,
- # since there is guaranteed to be no conflict. However for consistency,
- # we should not introduce new go_tool_library dependencies unless strictly
- # necessary.
- if ctx.rule.kind in ("go_library", "go_tool_library", "go_binary", "go_test"):
- srcs = ctx.rule.files.srcs
- deps = ctx.rule.attr.deps
- elif ctx.rule.kind in ("go_proto_library", "go_wrap_cc"):
- srcs = []
- deps = ctx.rule.attr.deps
- else:
- return [NogoInfo()]
-
- # If we're using the "library" attribute, then we need to aggregate the
- # original library sources and dependencies into this target to perform
- # proper type analysis.
- for embed in go_embed_libraries(ctx.rule):
- info = embed[NogoInfo]
- if hasattr(info, "srcs"):
- srcs = srcs + info.srcs
- if hasattr(info, "deps"):
- deps = deps + info.deps
-
- # Start with all target files and srcs as input.
- binaries = target.files.to_list()
- inputs = binaries + srcs
-
- # Generate a shell script that dumps the binary. Annoyingly, this seems
- # necessary as the context in which a run_shell command runs does not seem
- # to cleanly allow us redirect stdout to the actual output file. Perhaps
- # I'm missing something here, but the intermediate script does work.
- target_objfile, target_xfile, has_objfile = _select_objfile(binaries)
- inputs.append(target_objfile)
-
- # Extract the importpath for this package.
- if ctx.rule.kind == "go_test":
- # If this is a test, then it will not be imported by anything else.
- # We can safely set the importapth to just "test". Note that this
- # is necessary if the library also imports the core library (in
- # addition to including the sources directly), which happens in
- # some complex cases (seccomp_victim).
- importpath = "test"
- else:
- importpath = go_importpath(target)
-
- # Collect all info from shadow dependencies.
- fact_map = dict()
- import_map = dict()
- all_raw_findings = []
- for dep in deps:
- # There will be no file attribute set for all transitive dependencies
- # that are not go_library or go_binary rules, such as a proto rules.
- # This is handled by the ctx.rule.kind check above.
- info = dep[NogoInfo]
- if not hasattr(info, "facts"):
- continue
-
- # Configure where to find the binary & fact files. Note that this will
- # use .x and .a regardless of whether this is a go_binary rule, since
- # these dependencies must be go_library rules.
- _, x_file, _ = _select_objfile(info.binaries)
- import_map[info.importpath] = x_file.path
- fact_map[info.importpath] = info.facts.path
-
- # Collect all findings; duplicates are resolved at the end.
- all_raw_findings.extend(info.raw_findings)
-
- # Ensure the above are available as inputs.
- inputs.append(info.facts)
- inputs += info.binaries
-
- # Add the module itself, for the type sanity check. This applies only to
- # the libraries, and not binaries or tests.
- if has_objfile:
- import_map[importpath] = target_xfile.path
-
- # Add the standard library facts.
- stdlib_info = ctx.attr._nogo_stdlib[NogoStdlibInfo]
- stdlib_facts = stdlib_info.facts
- inputs.append(stdlib_facts)
-
- # The nogo tool operates on a configuration serialized in JSON format.
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- facts = ctx.actions.declare_file(target.label.name + ".facts")
- raw_findings = ctx.actions.declare_file(target.label.name + ".raw_findings")
- config = struct(
- ImportPath = importpath,
- GoFiles = [src.path for src in srcs if src.path.endswith(".go")],
- NonGoFiles = [src.path for src in srcs if not src.path.endswith(".go")],
- GOOS = go_ctx.goos,
- GOARCH = go_ctx.goarch,
- Tags = go_ctx.gotags,
- FactMap = fact_map,
- ImportMap = import_map,
- StdlibFacts = stdlib_facts.path,
- )
- config_file = ctx.actions.declare_file(target.label.name + ".cfg")
- ctx.actions.write(config_file, config.to_json())
- inputs.append(config_file)
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(go_ctx.nogo_args + [
- "-binary=%s" % target_objfile.path,
- "-objdump_tool=%s" % ctx.files._objdump_tool[0].path,
- "-package=%s" % config_file.path,
- "-findings=%s" % raw_findings.path,
- "-facts=%s" % facts.path,
- ]),
- )
- ctx.actions.run(
- inputs = inputs + [args_file],
- outputs = [facts, raw_findings],
- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),
- executable = ctx.files._check[0],
- mnemonic = "GoStaticAnalysis",
- progress_message = "Analyzing %s" % target.label,
- execution_requirements = {"supports-workers": "1"},
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Flatten all findings from all dependencies.
- #
- # This is done because all the filtering must be done at the
- # top-level nogo_test to dynamically apply a configuration.
- # This does not actually add any additional work here, but
- # will simply propagate the full list of files.
- all_raw_findings = [stdlib_info.raw_findings] + depset(all_raw_findings).to_list() + [raw_findings]
-
- # Return the package facts as output.
- return [
- NogoInfo(
- facts = facts,
- raw_findings = all_raw_findings,
- importpath = importpath,
- binaries = binaries,
- srcs = srcs,
- deps = deps,
- ),
- ]
-
-nogo_aspect = go_rule(
- aspect,
- implementation = _nogo_aspect_impl,
- attr_aspects = [
- "deps",
- "library",
- "embed",
- ],
- attrs = {
- "_check": attr.label(
- default = "//tools/nogo/check:check",
- cfg = "host",
- ),
- "_objdump_tool": attr.label(
- default = "//tools/nogo:objdump_tool",
- cfg = "host",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- # The name of this attribute must not be _stdlib, since that
- # appears to be reserved for some internal bazel use.
- "_nogo_stdlib": attr.label(
- default = "//tools/nogo:stdlib",
- cfg = "host",
- ),
- },
-)
-
-def _nogo_test_impl(ctx):
- """Check nogo findings."""
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
-
- # Ensure there's a single dependency.
- if len(ctx.attr.deps) != 1:
- fail("nogo_test requires exactly one dep.")
- raw_findings = ctx.attr.deps[0][NogoInfo].raw_findings
-
- # Build a step that applies the configuration.
- config_srcs = ctx.attr.config[NogoConfigInfo].srcs
- findings = ctx.actions.declare_file(ctx.label.name + ".findings")
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(
- ["-input=%s" % f.path for f in raw_findings] +
- ["-config=%s" % f.path for f in config_srcs] +
- ["-output=%s" % findings.path],
- ),
- )
- ctx.actions.run(
- inputs = raw_findings + ctx.files.srcs + config_srcs + [args_file],
- outputs = [findings],
- tools = depset(ctx.files._filter),
- executable = ctx.files._filter[0],
- mnemonic = "GoStaticAnalysis",
- progress_message = "Generating %s" % ctx.label,
- execution_requirements = {"supports-workers": "1"},
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Build a runner that checks the filtered facts.
- #
- # Note that this calls the filter binary without any configuration, so all
- # findings will be included. But this is expected, since we've already
- # filtered out everything that should not be included.
- runner = ctx.actions.declare_file(ctx.label.name)
- runner_content = [
- "#!/bin/bash",
- "exec %s -check -input=%s" % (ctx.files._filter[0].short_path, findings.short_path),
- "",
- ]
- ctx.actions.write(runner, "\n".join(runner_content), is_executable = True)
-
- return [DefaultInfo(
- # The runner just executes the filter again, on the
- # newly generated filtered findings. We still need
- # the filter tool as part of our runfiles, however.
- runfiles = ctx.runfiles(files = ctx.files._filter + [findings]),
- executable = runner,
- ), OutputGroupInfo(
- # Propagate the filtered filters, for consumption by
- # build tooling. Note that the build tooling typically
- # pays attention to the mnemoic above, so this must be
- # what is expected by the tooling.
- nogo_findings = depset([findings]),
- )]
-
-nogo_test = rule(
- implementation = _nogo_test_impl,
- attrs = {
- "config": attr.label(
- mandatory = True,
- doc = "A rule of kind nogo_config.",
- ),
- "deps": attr.label_list(
- aspects = [nogo_aspect],
- doc = "Exactly one Go dependency to be analyzed.",
- ),
- "srcs": attr.label_list(
- allow_files = True,
- doc = "Relevant src files. This is ignored except to make the nogo_test directly affected by the files.",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- "_filter": attr.label(default = "//tools/nogo/filter:filter"),
- },
- test = True,
-)
-
-def _nogo_aspect_tricorder_impl(target, ctx):
- if ctx.rule.kind != "nogo_test" or OutputGroupInfo not in target:
- return []
- if not hasattr(target[OutputGroupInfo], "nogo_findings"):
- return []
- return [
- OutputGroupInfo(tricorder = target[OutputGroupInfo].nogo_findings),
- ]
-
-# Trivial aspect that forwards the findings from a nogo_test rule to
-# go/tricorder, which reads from the `tricorder` output group.
-nogo_aspect_tricorder = aspect(
- implementation = _nogo_aspect_tricorder_impl,
-)
diff --git a/tools/nogo/filter/BUILD b/tools/nogo/filter/BUILD
deleted file mode 100644
index e3049521e..000000000
--- a/tools/nogo/filter/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "filter",
- srcs = ["main.go"],
- nogo = False,
- visibility = ["//visibility:public"],
- deps = [
- "//tools/nogo",
- "//tools/worker",
- "@in_gopkg_yaml_v2//:go_default_library",
- ],
-)
diff --git a/tools/nogo/filter/main.go b/tools/nogo/filter/main.go
deleted file mode 100644
index d50336b9b..000000000
--- a/tools/nogo/filter/main.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary check is the nogo entrypoint.
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-
- yaml "gopkg.in/yaml.v2"
- "gvisor.dev/gvisor/tools/nogo"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-type stringList []string
-
-func (s *stringList) String() string {
- return strings.Join(*s, ",")
-}
-
-func (s *stringList) Set(value string) error {
- *s = append(*s, value)
- return nil
-}
-
-var (
- inputFiles stringList
- configFiles stringList
- outputFile string
- showConfig bool
- check bool
-)
-
-func init() {
- flag.Var(&inputFiles, "input", "findings input files (gob format)")
- flag.StringVar(&outputFile, "output", "", "findings output file (json format)")
- flag.Var(&configFiles, "config", "findings configuration files")
- flag.BoolVar(&showConfig, "show-config", false, "dump configuration only")
- flag.BoolVar(&check, "check", false, "assume input is in json format")
-}
-
-func main() {
- worker.Work(run)
-}
-
-var (
- cachedFindings = worker.NewCache("findings") // With nogo.FindingSet.
- cachedFiltered = worker.NewCache("filtered") // With nogo.FindingSet.
- cachedConfigs = worker.NewCache("configs") // With nogo.Config.
- cachedFullConfigs = worker.NewCache("compiled") // With nogo.Config.
-)
-
-func loadFindings(filename string) nogo.FindingSet {
- return cachedFindings.Lookup([]string{filename}, func() worker.Sizer {
- r, err := os.Open(filename)
- if err != nil {
- log.Fatalf("unable to open input %q: %v", filename, err)
- }
- inputFindings, err := nogo.ExtractFindingsFrom(r, check /* json */)
- if err != nil {
- log.Fatalf("unable to extract findings from %s: %v", filename, err)
- }
- return inputFindings
- }).(nogo.FindingSet)
-}
-
-func loadConfig(filename string) *nogo.Config {
- return cachedConfigs.Lookup([]string{filename}, func() worker.Sizer {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- log.Fatalf("unable to read %s: %v", filename, err)
- }
- var newConfig nogo.Config // For current file.
- dec := yaml.NewDecoder(bytes.NewBuffer(content))
- dec.SetStrict(true)
- if err := dec.Decode(&newConfig); err != nil {
- log.Fatalf("unable to decode %s: %v", filename, err)
- }
- if showConfig {
- content, err := yaml.Marshal(&newConfig)
- if err != nil {
- log.Fatalf("error marshalling config: %v", err)
- }
- fmt.Fprintf(os.Stdout, "Loaded configuration from %s:\n%s\n", filename, string(content))
- }
- return &newConfig
- }).(*nogo.Config)
-}
-
-func loadConfigs(filenames []string) *nogo.Config {
- return cachedFullConfigs.Lookup(filenames, func() worker.Sizer {
- config := &nogo.Config{
- Global: make(nogo.AnalyzerConfig),
- Analyzers: make(map[nogo.AnalyzerName]nogo.AnalyzerConfig),
- }
- for _, filename := range configFiles {
- config.Merge(loadConfig(filename))
- if showConfig {
- mergedBytes, err := yaml.Marshal(config)
- if err != nil {
- log.Fatalf("error marshalling config: %v", err)
- }
- fmt.Fprintf(os.Stdout, "Merged configuration:\n%s\n", string(mergedBytes))
- }
- }
- if err := config.Compile(); err != nil {
- log.Fatalf("error compiling config: %v", err)
- }
- return config
- }).(*nogo.Config)
-}
-
-func run([]string) int {
- // Open and merge all configuations.
- config := loadConfigs(configFiles)
- if showConfig {
- return 0
- }
-
- // Load and filer available findings.
- var filteredFindings []nogo.Finding
- for _, filename := range inputFiles {
- // Note that this applies a caching strategy to the filtered
- // findings, because *this is by far the most expensive part of
- // evaluation*. The set of findings is large and applying the
- // configuration is complex. Therefore, we segment this cache
- // on each individual raw findings input file and the
- // configuration files. Note that this cache is keyed on all
- // the configuration files and each individual raw findings, so
- // is guaranteed to be safe. This allows us to reuse the same
- // filter result many times over, because e.g. all standard
- // library findings will be available to all packages.
- filteredFindings = append(filteredFindings,
- cachedFiltered.Lookup(append(configFiles, filename), func() worker.Sizer {
- inputFindings := loadFindings(filename)
- filteredFindings := make(nogo.FindingSet, 0, len(inputFindings))
- for _, finding := range inputFindings {
- if ok := config.ShouldReport(finding); ok {
- filteredFindings = append(filteredFindings, finding)
- }
- }
- return filteredFindings
- }).(nogo.FindingSet)...)
- }
-
- // Write the output (if required).
- //
- // If the outputFile is specified, then we exit here. Otherwise,
- // we continue to write to stdout and treat like a test.
- //
- // Note that the output of the filter is always json, which is
- // human readable and the format that is consumed by tricorder.
- if outputFile != "" {
- w, err := os.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- log.Fatalf("unable to open output file %q: %v", outputFile, err)
- }
- if err := nogo.WriteFindingsTo(w, filteredFindings, true /* json */); err != nil {
- log.Fatalf("unable to write findings: %v", err)
- }
- return 0
- }
-
- // Treat the run as a test.
- if len(filteredFindings) == 0 {
- fmt.Fprintf(os.Stdout, "PASS\n")
- return 0
- }
- for _, finding := range filteredFindings {
- fmt.Fprintf(os.Stdout, "%s\n", finding.String())
- }
- return 1
-}
diff --git a/tools/nogo/findings.go b/tools/nogo/findings.go
deleted file mode 100644
index 329a7062e..000000000
--- a/tools/nogo/findings.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "encoding/gob"
- "encoding/json"
- "fmt"
- "go/token"
- "io"
- "os"
- "reflect"
- "sort"
-)
-
-// Finding is a single finding.
-type Finding struct {
- Category AnalyzerName
- Position token.Position
- Message string
-}
-
-// findingSize is the size of the finding struct itself.
-var findingSize = int64(reflect.TypeOf(Finding{}).Size())
-
-// Size implements worker.Sizer.Size.
-func (f *Finding) Size() int64 {
- return int64(len(f.Category)) + int64(len(f.Message)) + findingSize
-}
-
-// String implements fmt.Stringer.String.
-func (f *Finding) String() string {
- return fmt.Sprintf("%s: %s: %s", f.Category, f.Position.String(), f.Message)
-}
-
-// FindingSet is a collection of findings.
-type FindingSet []Finding
-
-// Size implmements worker.Sizer.Size.
-func (fs FindingSet) Size() int64 {
- size := int64(0)
- for _, finding := range fs {
- size += finding.Size()
- }
- return size
-}
-
-// Sort sorts all findings.
-func (fs FindingSet) Sort() {
- sort.Slice(fs, func(i, j int) bool {
- switch {
- case fs[i].Position.Filename < fs[j].Position.Filename:
- return true
- case fs[i].Position.Filename > fs[j].Position.Filename:
- return false
- case fs[i].Position.Line < fs[j].Position.Line:
- return true
- case fs[i].Position.Line > fs[j].Position.Line:
- return false
- case fs[i].Position.Column < fs[j].Position.Column:
- return true
- case fs[i].Position.Column > fs[j].Position.Column:
- return false
- case fs[i].Category < fs[j].Category:
- return true
- case fs[i].Category > fs[j].Category:
- return false
- case fs[i].Message < fs[j].Message:
- return true
- case fs[i].Message > fs[j].Message:
- return false
- default:
- return false
- }
- })
-}
-
-// WriteFindingsTo serializes findings.
-func WriteFindingsTo(w io.Writer, findings FindingSet, asJSON bool) error {
- // N.B. Sort all the findings in order to maximize cacheability.
- findings.Sort()
- if asJSON {
- enc := json.NewEncoder(w)
- return enc.Encode(findings)
- }
- enc := gob.NewEncoder(w)
- return enc.Encode(findings)
-}
-
-// ExtractFindingsFromFile loads findings from a file.
-func ExtractFindingsFromFile(filename string, asJSON bool) (FindingSet, error) {
- r, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer r.Close()
- return ExtractFindingsFrom(r, asJSON)
-}
-
-// ExtractFindingsFromBytes loads findings from bytes.
-func ExtractFindingsFrom(r io.Reader, asJSON bool) (findings FindingSet, err error) {
- if asJSON {
- dec := json.NewDecoder(r)
- err = dec.Decode(&findings)
- } else {
- dec := gob.NewDecoder(r)
- err = dec.Decode(&findings)
- }
- return findings, err
-}
-
-func init() {
- gob.Register((*Finding)(nil))
- gob.Register((*FindingSet)(nil))
-}
diff --git a/tools/nogo/nogo.go b/tools/nogo/nogo.go
deleted file mode 100644
index d95d7652f..000000000
--- a/tools/nogo/nogo.go
+++ /dev/null
@@ -1,647 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package nogo implements binary analysis similar to bazel's nogo,
-// or the unitchecker package. It exists in order to provide additional
-// facilities for analysis, namely plumbing through the output from
-// dumping the generated binary (to analyze actual produced code).
-package nogo
-
-import (
- "bytes"
- "encoding/gob"
- "errors"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "go/types"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/internal/facts"
- "golang.org/x/tools/go/gcexportdata"
- "golang.org/x/tools/go/types/objectpath"
-
- // Special case: flags live here and change overall behavior.
- "gvisor.dev/gvisor/tools/nogo/objdump"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-// StdlibConfig is serialized as the configuration.
-//
-// This contains everything required for stdlib analysis.
-type StdlibConfig struct {
- Srcs []string
- GOOS string
- GOARCH string
- Tags []string
-}
-
-// PackageConfig is serialized as the configuration.
-//
-// This contains everything required for single package analysis.
-type PackageConfig struct {
- ImportPath string
- GoFiles []string
- NonGoFiles []string
- Tags []string
- GOOS string
- GOARCH string
- ImportMap map[string]string
- FactMap map[string]string
- StdlibFacts string
-}
-
-// loader is a fact-loader function.
-type loader func(string) ([]byte, error)
-
-// saver is a fact-saver function.
-type saver func([]byte) error
-
-// stdlibFact is used for serialiation.
-type stdlibFact struct {
- Package string
- Facts []byte
-}
-
-// stdlibFacts is a set of standard library facts.
-type stdlibFacts map[string][]byte
-
-// Size implements worker.Sizer.Size.
-func (sf stdlibFacts) Size() int64 {
- size := int64(0)
- for filename, data := range sf {
- size += int64(len(filename))
- size += int64(len(data))
- }
- return size
-}
-
-// EncodeTo serializes stdlibFacts.
-func (sf stdlibFacts) EncodeTo(w io.Writer) error {
- stdlibFactsSorted := make([]stdlibFact, 0, len(sf))
- for pkg, facts := range sf {
- stdlibFactsSorted = append(stdlibFactsSorted, stdlibFact{
- Package: pkg,
- Facts: facts,
- })
- }
- sort.Slice(stdlibFactsSorted, func(i, j int) bool {
- return stdlibFactsSorted[i].Package < stdlibFactsSorted[j].Package
- })
- enc := gob.NewEncoder(w)
- if err := enc.Encode(stdlibFactsSorted); err != nil {
- return err
- }
- return nil
-}
-
-// DecodeFrom deserializes stdlibFacts.
-func (sf stdlibFacts) DecodeFrom(r io.Reader) error {
- var stdlibFactsSorted []stdlibFact
- dec := gob.NewDecoder(r)
- if err := dec.Decode(&stdlibFactsSorted); err != nil {
- return err
- }
- for _, stdlibFact := range stdlibFactsSorted {
- sf[stdlibFact.Package] = stdlibFact.Facts
- }
- return nil
-}
-
-var (
- // cachedFacts caches by file (just byte data).
- cachedFacts = worker.NewCache("facts")
-
- // stdlibCachedFacts caches the standard library (stdlibFacts).
- stdlibCachedFacts = worker.NewCache("stdlib")
-)
-
-// factLoader loads facts.
-func (c *PackageConfig) factLoader(path string) (data []byte, err error) {
- filename, ok := c.FactMap[path]
- if ok {
- cb := cachedFacts.Lookup([]string{filename}, func() worker.Sizer {
- data, readErr := ioutil.ReadFile(filename)
- if readErr != nil {
- err = fmt.Errorf("error loading %q: %w", filename, readErr)
- return nil
- }
- return worker.CacheBytes(data)
- })
- if cb != nil {
- return []byte(cb.(worker.CacheBytes)), err
- }
- return nil, err
- }
- cb := stdlibCachedFacts.Lookup([]string{c.StdlibFacts}, func() worker.Sizer {
- r, openErr := os.Open(c.StdlibFacts)
- if openErr != nil {
- err = fmt.Errorf("error loading stdlib facts from %q: %w", c.StdlibFacts, openErr)
- return nil
- }
- defer r.Close()
- sf := make(stdlibFacts)
- if readErr := sf.DecodeFrom(r); readErr != nil {
- err = fmt.Errorf("error loading stdlib facts: %w", readErr)
- return nil
- }
- return sf
- })
- if cb != nil {
- return (cb.(stdlibFacts))[path], err
- }
- return nil, err
-}
-
-// shouldInclude indicates whether the file should be included.
-//
-// NOTE: This does only basic parsing of tags.
-func (c *PackageConfig) shouldInclude(path string) (bool, error) {
- ctx := build.Default
- ctx.GOOS = c.GOOS
- ctx.GOARCH = c.GOARCH
- ctx.BuildTags = c.Tags
- return ctx.MatchFile(filepath.Dir(path), filepath.Base(path))
-}
-
-// importer is an implementation of go/types.Importer.
-//
-// This wraps a configuration, which provides the map of package names to
-// files, and the facts. Note that this importer implementation will always
-// pass when a given package is not available.
-type importer struct {
- *PackageConfig
- fset *token.FileSet
- cache map[string]*types.Package
- lastErr error
- callback func(string) error
-}
-
-// Import implements types.Importer.Import.
-func (i *importer) Import(path string) (*types.Package, error) {
- if path == "unsafe" {
- // Special case: go/types has pre-defined type information for
- // unsafe. We ensure that this package is correct, in case any
- // analyzers are specifically looking for this.
- return types.Unsafe, nil
- }
-
- // Call the internal callback. This is used to resolve loading order
- // for the standard library. See checkStdlib.
- if i.callback != nil {
- if err := i.callback(path); err != nil {
- i.lastErr = err
- return nil, err
- }
- }
-
- // Check the cache.
- if pkg, ok := i.cache[path]; ok && pkg.Complete() {
- return pkg, nil
- }
-
- // Actually load the data.
- realPath, ok := i.ImportMap[path]
- var (
- rc io.ReadCloser
- err error
- )
- if !ok {
- // Not found in the import path. Attempt to find the package
- // via the standard library.
- rc, err = findStdPkg(i.GOOS, i.GOARCH, path)
- } else {
- // Open the file.
- rc, err = os.Open(realPath)
- }
- if err != nil {
- i.lastErr = err
- return nil, err
- }
- defer rc.Close()
-
- // Load all exported data.
- r, err := gcexportdata.NewReader(rc)
- if err != nil {
- return nil, err
- }
-
- return gcexportdata.Read(r, i.fset, i.cache, path)
-}
-
-// ErrSkip indicates the package should be skipped.
-var ErrSkip = errors.New("skipped")
-
-// CheckStdlib checks the standard library.
-//
-// This constructs a synthetic package configuration for each library in the
-// standard library sources, and call CheckPackage repeatedly.
-//
-// Note that not all parts of the source are expected to build. We skip obvious
-// test files, and cmd files, which should not be dependencies.
-func CheckStdlib(config *StdlibConfig, analyzers []*analysis.Analyzer) (allFindings FindingSet, facts []byte, err error) {
- if len(config.Srcs) == 0 {
- return nil, nil, nil
- }
-
- // Ensure all paths are normalized.
- for i := 0; i < len(config.Srcs); i++ {
- config.Srcs[i] = path.Clean(config.Srcs[i])
- }
-
- // Calculate the root source directory. This is always a directory
- // named 'src', of which we simply take the first we find. This is a
- // bit fragile, but works for all currently known Go source
- // configurations.
- //
- // Note that there may be extra files outside of the root source
- // directory; we simply ignore those.
- rootSrcPrefix := ""
- for _, file := range config.Srcs {
- const src = "/src/"
- i := strings.Index(file, src)
- if i == -1 {
- // Superfluous file.
- continue
- }
-
- // Index of first character after /src/.
- i += len(src)
- rootSrcPrefix = file[:i]
- break
- }
-
- // Aggregate all files by directory.
- packages := make(map[string]*PackageConfig)
- for _, file := range config.Srcs {
- if !strings.HasPrefix(file, rootSrcPrefix) {
- // Superflouous file.
- continue
- }
-
- d := path.Dir(file)
- if len(rootSrcPrefix) >= len(d) {
- continue // Not a file.
- }
- pkg := d[len(rootSrcPrefix):]
- // Skip cmd packages and obvious test files: see above.
- if strings.HasPrefix(pkg, "cmd/") || strings.HasSuffix(file, "_test.go") {
- continue
- }
- c, ok := packages[pkg]
- if !ok {
- c = &PackageConfig{
- ImportPath: pkg,
- GOOS: config.GOOS,
- GOARCH: config.GOARCH,
- Tags: config.Tags,
- }
- packages[pkg] = c
- }
- // Add the files appropriately. Note that they will be further
- // filtered by architecture and build tags below, so this need
- // not be done immediately.
- if strings.HasSuffix(file, ".go") {
- c.GoFiles = append(c.GoFiles, file)
- } else {
- c.NonGoFiles = append(c.NonGoFiles, file)
- }
- }
-
- // Closure to check a single package.
- localStdlibFacts := make(stdlibFacts)
- localStdlibErrs := make(map[string]error)
- stdlibCachedFacts.Lookup([]string{""}, func() worker.Sizer {
- return localStdlibFacts
- })
- var checkOne func(pkg string) error // Recursive.
- checkOne = func(pkg string) error {
- // Is this already done?
- if _, ok := localStdlibFacts[pkg]; ok {
- return nil
- }
- // Did this fail previously?
- if _, ok := localStdlibErrs[pkg]; ok {
- return nil
- }
-
- // Lookup the configuration.
- config, ok := packages[pkg]
- if !ok {
- return nil // Not known.
- }
-
- // Find the binary package, and provide to objdump.
- rc, err := findStdPkg(config.GOOS, config.GOARCH, pkg)
- if err != nil {
- // If there's no binary for this package, it is likely
- // not built with the distribution. That's fine, we can
- // just skip analysis.
- localStdlibErrs[pkg] = err
- return nil
- }
-
- // Provide the input.
- oldReader := objdump.Reader
- objdump.Reader = rc // For analysis.
- defer func() {
- rc.Close()
- objdump.Reader = oldReader // Restore.
- }()
-
- // Run the analysis.
- findings, factData, err := CheckPackage(config, analyzers, checkOne)
- if err != nil {
- // If we can't analyze a package from the standard library,
- // then we skip it. It will simply not have any findings.
- localStdlibErrs[pkg] = err
- return nil
- }
- localStdlibFacts[pkg] = factData
- allFindings = append(allFindings, findings...)
- return nil
- }
-
- // Check all packages.
- //
- // Note that this may call checkOne recursively, so it's not guaranteed
- // to evaluate in the order provided here. We do ensure however, that
- // all packages are evaluated.
- for pkg := range packages {
- if err := checkOne(pkg); err != nil {
- return nil, nil, err
- }
- }
-
- // Sanity check.
- if len(localStdlibFacts) == 0 {
- return nil, nil, fmt.Errorf("no stdlib facts found: misconfiguration?")
- }
-
- // Write out all findings.
- buf := bytes.NewBuffer(nil)
- if err := localStdlibFacts.EncodeTo(buf); err != nil {
- return nil, nil, fmt.Errorf("error serialized stdlib facts: %v", err)
- }
-
- // Write out all errors.
- for pkg, err := range localStdlibErrs {
- log.Printf("WARNING: error while processing %v: %v", pkg, err)
- }
-
- // Return all findings.
- return allFindings, buf.Bytes(), nil
-}
-
-// sanityCheckScope checks that all object in astTypes map to the correct
-// objects in binaryTypes. Note that we don't check whether the sets are the
-// same, we only care about the fidelity of objects in astTypes.
-//
-// When an inconsistency is identified, we record it in the astToBinaryMap.
-// This allows us to dynamically replace facts and correct for the issue. The
-// total number of mismatches is returned.
-func sanityCheckScope(astScope *types.Scope, binaryTypes *types.Package, binaryScope *types.Scope, astToBinary map[types.Object]types.Object) error {
- for _, x := range astScope.Names() {
- fe := astScope.Lookup(x)
- path, err := objectpath.For(fe)
- if err != nil {
- continue // Not an encoded object.
- }
- se, err := objectpath.Object(binaryTypes, path)
- if err != nil {
- continue // May be unused, see below.
- }
- if fe.Id() != se.Id() {
- // These types are incompatible. This means that when
- // this objectpath is loading from the binaryTypes (for
- // dependencies) it will resolve to a fact for that
- // type. We don't actually care about this error since
- // we do the rewritten, but may as well alert.
- log.Printf("WARNING: Object %s is a victim of go/issues/44195.", fe.Id())
- }
- se = binaryScope.Lookup(x)
- if se == nil {
- // The fact may not be exported in the objectdata, if
- // it is package internal. This is fine, as nothing out
- // of this package can use these symbols.
- continue
- }
- // Save the translation.
- astToBinary[fe] = se
- }
- for i := 0; i < astScope.NumChildren(); i++ {
- if err := sanityCheckScope(astScope.Child(i), binaryTypes, binaryScope, astToBinary); err != nil {
- return err
- }
- }
- return nil
-}
-
-// sanityCheckTypes checks that two types are sane. The total number of
-// mismatches is returned.
-func sanityCheckTypes(astTypes, binaryTypes *types.Package, astToBinary map[types.Object]types.Object) error {
- return sanityCheckScope(astTypes.Scope(), binaryTypes, binaryTypes.Scope(), astToBinary)
-}
-
-// CheckPackage runs all given analyzers.
-//
-// The implementation was adapted from [1], which was in turn adpated from [2].
-// This returns a list of matching analysis issues, or an error if the analysis
-// could not be completed.
-//
-// [1] bazelbuid/rules_go/tools/builders/nogo_main.go
-// [2] golang.org/x/tools/go/checker/internal/checker
-func CheckPackage(config *PackageConfig, analyzers []*analysis.Analyzer, importCallback func(string) error) (findings []Finding, factData []byte, err error) {
- imp := &importer{
- PackageConfig: config,
- fset: token.NewFileSet(),
- cache: make(map[string]*types.Package),
- callback: importCallback,
- }
-
- // Load all source files.
- var syntax []*ast.File
- for _, file := range config.GoFiles {
- include, err := config.shouldInclude(file)
- if err != nil {
- return nil, nil, fmt.Errorf("error evaluating file %q: %v", file, err)
- }
- if !include {
- continue
- }
- s, err := parser.ParseFile(imp.fset, file, nil, parser.ParseComments)
- if err != nil {
- return nil, nil, fmt.Errorf("error parsing file %q: %v", file, err)
- }
- syntax = append(syntax, s)
- }
-
- // Check type information.
- typesSizes := types.SizesFor("gc", config.GOARCH)
- typeConfig := types.Config{Importer: imp}
- typesInfo := &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Uses: make(map[*ast.Ident]types.Object),
- Defs: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- }
- astTypes, err := typeConfig.Check(config.ImportPath, imp.fset, syntax, typesInfo)
- if err != nil && imp.lastErr != ErrSkip {
- return nil, nil, fmt.Errorf("error checking types: %w", err)
- }
-
- // Load all facts using the astTypes, although it may need reconciling
- // later on. See the fact functions below.
- astFacts, err := facts.Decode(astTypes, config.factLoader)
- if err != nil {
- return nil, nil, fmt.Errorf("error decoding facts: %w", err)
- }
-
- // Sanity check all types and record metadata to prevent
- // https://github.com/golang/go/issues/44195.
- //
- // This block loads the binary types, whose encoding will be well
- // defined and aligned with any downstream consumers. Below in the fact
- // functions for the analysis, we serialize types to both the astFacts
- // and the binaryFacts if available. The binaryFacts are the final
- // encoded facts in order to ensure compatibility. We keep the
- // intermediate astTypes in order to allow exporting and importing
- // within the local package under analysis.
- var (
- astToBinary = make(map[types.Object]types.Object)
- binaryFacts *facts.Set
- )
- if _, ok := config.ImportMap[config.ImportPath]; ok {
- binaryTypes, err := imp.Import(config.ImportPath)
- if err != nil {
- return nil, nil, fmt.Errorf("error loading self: %w", err)
- }
- if err := sanityCheckTypes(astTypes, binaryTypes, astToBinary); err != nil {
- return nil, nil, fmt.Errorf("error sanity checking types: %w", err)
- }
- binaryFacts, err = facts.Decode(binaryTypes, config.factLoader)
- if err != nil {
- return nil, nil, fmt.Errorf("error decoding facts: %w", err)
- }
- }
-
- // Register fact types and establish dependencies between analyzers.
- // The visit closure will execute recursively, and populate results
- // will all required analysis results.
- results := make(map[*analysis.Analyzer]interface{})
- var visit func(*analysis.Analyzer) error // For recursion.
- visit = func(a *analysis.Analyzer) error {
- if _, ok := results[a]; ok {
- return nil
- }
-
- // Run recursively for all dependencies.
- for _, req := range a.Requires {
- if err := visit(req); err != nil {
- return err
- }
- }
-
- // Run the analysis.
- localFactsFilter := make(map[reflect.Type]bool)
- for _, f := range a.FactTypes {
- localFactsFilter[reflect.TypeOf(f)] = true
- }
- p := &analysis.Pass{
- Analyzer: a,
- Fset: imp.fset,
- Files: syntax,
- Pkg: astTypes,
- TypesInfo: typesInfo,
- ResultOf: results, // All results.
- Report: func(d analysis.Diagnostic) {
- findings = append(findings, Finding{
- Category: AnalyzerName(a.Name),
- Position: imp.fset.Position(d.Pos),
- Message: d.Message,
- })
- },
- ImportPackageFact: astFacts.ImportPackageFact,
- ExportPackageFact: func(fact analysis.Fact) {
- astFacts.ExportPackageFact(fact)
- if binaryFacts != nil {
- binaryFacts.ExportPackageFact(fact)
- }
- },
- ImportObjectFact: astFacts.ImportObjectFact,
- ExportObjectFact: func(obj types.Object, fact analysis.Fact) {
- astFacts.ExportObjectFact(obj, fact)
- // Note that if no object is recorded in
- // astToBinary and binaryFacts != nil, then the
- // object doesn't appear in the exported data.
- // It was likely an internal object to the
- // package, and there is no meaningful
- // downstream consumer of the fact.
- if binaryObj, ok := astToBinary[obj]; ok && binaryFacts != nil {
- binaryFacts.ExportObjectFact(binaryObj, fact)
- }
- },
- AllPackageFacts: func() []analysis.PackageFact { return astFacts.AllPackageFacts(localFactsFilter) },
- AllObjectFacts: func() []analysis.ObjectFact { return astFacts.AllObjectFacts(localFactsFilter) },
- TypesSizes: typesSizes,
- }
- result, err := a.Run(p)
- if err != nil {
- return fmt.Errorf("error running analysis %s: %v", a, err)
- }
-
- // Sanity check & save the result.
- if got, want := reflect.TypeOf(result), a.ResultType; got != want {
- return fmt.Errorf("error: analyzer %s returned a result of type %v, but declared ResultType %v", a, got, want)
- }
- results[a] = result
- return nil // Success.
- }
-
- // Visit all analyzers recursively.
- for _, a := range analyzers {
- if imp.lastErr == ErrSkip {
- continue // No local analysis.
- }
- if err := visit(a); err != nil {
- return nil, nil, err // Already has context.
- }
- }
-
- // Return all findings. Note that we have a preference to returning the
- // binary facts if available, so that downstream consumers of these
- // facts will find the export aligns with the internal type details.
- // See the block above with the call to sanityCheckTypes.
- if binaryFacts != nil {
- return findings, binaryFacts.Encode(), nil
- }
- return findings, astFacts.Encode(), nil
-}
-
-func init() {
- gob.Register((*stdlibFact)(nil))
-}
diff --git a/tools/nogo/objdump/BUILD b/tools/nogo/objdump/BUILD
deleted file mode 100644
index da56efdf7..000000000
--- a/tools/nogo/objdump/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "objdump",
- srcs = ["objdump.go"],
- nogo = False,
- visibility = ["//tools:__subpackages__"],
-)
diff --git a/tools/nogo/objdump/objdump.go b/tools/nogo/objdump/objdump.go
deleted file mode 100644
index 48484abf3..000000000
--- a/tools/nogo/objdump/objdump.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package objdump is a wrapper around relevant objdump flags.
-package objdump
-
-import (
- "flag"
- "fmt"
- "io"
- "os"
- "os/exec"
-)
-
-var (
- // Binary is the binary under analysis.
- //
- // See Reader, below.
- binary = flag.String("binary", "", "binary under analysis")
-
- // Reader is the input stream.
- //
- // This may be set instead of Binary.
- Reader io.Reader
-
- // objdumpTool is the tool used to dump a binary.
- objdumpTool = flag.String("objdump_tool", "", "tool used to dump a binary")
-)
-
-// LoadRaw reads the raw object output.
-func LoadRaw(fn func(r io.Reader) error) error {
- var r io.Reader
- if *binary != "" {
- f, err := os.Open(*binary)
- if err != nil {
- return err
- }
- defer f.Close()
- r = f
- } else if Reader != nil {
- r = Reader
- } else {
- // We have no input stream.
- return fmt.Errorf("no binary or reader provided")
- }
- return fn(r)
-}
-
-// Load reads the objdump output.
-func Load(fn func(r io.Reader) error) error {
- var (
- args []string
- stdin io.Reader
- )
- if *binary != "" {
- args = append(args, *binary)
- } else if Reader != nil {
- stdin = Reader
- } else {
- // We have no input stream or binary.
- return fmt.Errorf("no binary or reader provided")
- }
-
- // Construct our command.
- cmd := exec.Command(*objdumpTool, args...)
- cmd.Stdin = stdin
- cmd.Stderr = os.Stderr
- out, err := cmd.StdoutPipe()
- if err != nil {
- return err
- }
- if err := cmd.Start(); err != nil {
- return err
- }
-
- // Call the user hook.
- userErr := fn(out)
-
- // Wait for the dump to finish.
- if err := cmd.Wait(); userErr == nil && err != nil {
- return err
- }
-
- return userErr
-}
diff --git a/tools/parsers/BUILD b/tools/parsers/BUILD
deleted file mode 100644
index 6932bba9a..000000000
--- a/tools/parsers/BUILD
+++ /dev/null
@@ -1,45 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "parsers_test",
- size = "small",
- srcs = ["go_parser_test.go"],
- library = ":parsers",
- nogo = False,
- deps = [
- "//tools/bigquery",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_library(
- name = "parsers",
- testonly = 1,
- srcs = [
- "go_parser.go",
- ],
- nogo = False,
- visibility = ["//:sandbox"],
- deps = [
- "//test/benchmarks/tools",
- "//tools/bigquery",
- ],
-)
-
-go_binary(
- name = "parser",
- testonly = 1,
- srcs = [
- "parser_main.go",
- "version.go",
- ],
- nogo = False,
- x_defs = {"main.version": "{STABLE_VERSION}"},
- deps = [
- ":parsers",
- "//runsc/flag",
- "//tools/bigquery",
- ],
-)
diff --git a/tools/parsers/go_parser.go b/tools/parsers/go_parser.go
deleted file mode 100644
index 57e538149..000000000
--- a/tools/parsers/go_parser.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package parsers holds parsers to parse Benchmark test output.
-//
-// Parsers parse Benchmark test output and place it in BigQuery
-// structs for sending to BigQuery databases.
-package parsers
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "gvisor.dev/gvisor/test/benchmarks/tools"
- "gvisor.dev/gvisor/tools/bigquery"
-)
-
-// ParseOutput expects golang benchmark output and returns a struct formatted
-// for BigQuery.
-func ParseOutput(output string, name string, official bool) (*bigquery.Suite, error) {
- suite := bigquery.NewSuite(name, official)
- lines := strings.Split(output, "\n")
- for _, line := range lines {
- bm, err := parseLine(line)
- if err != nil {
- return nil, fmt.Errorf("failed to parse line '%s': %v", line, err)
- }
- if bm != nil {
- suite.Benchmarks = append(suite.Benchmarks, bm)
- }
- }
- return suite, nil
-}
-
-// parseLine handles parsing a benchmark line into a bigquery.Benchmark.
-//
-// Example: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 140 requests_per_second.QPS"
-//
-// This function will return the following benchmark:
-// *bigquery.Benchmark{
-// Name: BenchmarkRuby
-// []*bigquery.Condition{
-// {Name: GOMAXPROCS, 6}
-// {Name: server_threads, 1}
-// }
-// []*bigquery.Metric{
-// {Name: ns/op, Unit: ns/op, Sample: 1397875880}
-// {Name: requests_per_second, Unit: QPS, Sample: 140 }
-// }
-//}
-func parseLine(line string) (*bigquery.Benchmark, error) {
- fields := strings.Fields(line)
-
- // Check if this line is a Benchmark line. Otherwise ignore the line.
- if len(fields) < 2 || !strings.HasPrefix(fields[0], "Benchmark") {
- return nil, nil
- }
-
- iters, err := strconv.Atoi(fields[1])
- if err != nil {
- return nil, fmt.Errorf("expecting number of runs, got %s: %v", fields[1], err)
- }
-
- name, params, err := parseNameParams(fields[0])
- if err != nil {
- return nil, fmt.Errorf("parse name/params: %v", err)
- }
-
- bm := bigquery.NewBenchmark(name, iters)
- for _, p := range params {
- bm.AddCondition(p.Name, p.Value)
- }
-
- for i := 1; i < len(fields)/2; i++ {
- value := fields[2*i]
- metric := fields[2*i+1]
- if err := makeMetric(bm, value, metric); err != nil {
- return nil, fmt.Errorf("makeMetric on metric %q value: %s: %v", metric, value, err)
- }
- }
- return bm, nil
-}
-
-// parseNameParams parses the Name, GOMAXPROCS, and Params from the test.
-// Field here should be of the format TESTNAME/PARAMS-GOMAXPROCS.
-// Parameters will be separated by a "/" with individual params being
-// "name.value".
-func parseNameParams(field string) (string, []*tools.Parameter, error) {
- var params []*tools.Parameter
- // Remove GOMAXPROCS from end.
- maxIndex := strings.LastIndex(field, "-")
- if maxIndex < 0 {
- return "", nil, fmt.Errorf("GOMAXPROCS not found: %s", field)
- }
- maxProcs := field[maxIndex+1:]
- params = append(params, &tools.Parameter{
- Name: "GOMAXPROCS",
- Value: maxProcs,
- })
-
- remainder := field[0:maxIndex]
- index := strings.Index(remainder, "/")
- if index == -1 {
- return remainder, params, nil
- }
-
- name := remainder[0:index]
- p := remainder[index+1:]
-
- ps, err := tools.NameToParameters(p)
- if err != nil {
- return "", nil, fmt.Errorf("NameToParameters %s: %v", field, err)
- }
- params = append(params, ps...)
- return name, params, nil
-}
-
-// makeMetric parses metrics and adds them to the passed Benchmark.
-func makeMetric(bm *bigquery.Benchmark, value, metric string) error {
- switch metric {
- // Ignore most output from golang benchmarks.
- case "MB/s", "B/op", "allocs/op":
- return nil
- case "ns/op":
- val, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return fmt.Errorf("ParseFloat %s: %v", value, err)
- }
- bm.AddMetric(metric /*metric name*/, metric /*unit*/, val /*sample*/)
- default:
- m, err := tools.ParseCustomMetric(value, metric)
- if err != nil {
- return fmt.Errorf("ParseCustomMetric %s: %v ", metric, err)
- }
- bm.AddMetric(m.Name, m.Unit, m.Sample)
- }
- return nil
-}
diff --git a/tools/parsers/go_parser_test.go b/tools/parsers/go_parser_test.go
deleted file mode 100644
index 39a13b4af..000000000
--- a/tools/parsers/go_parser_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parsers
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/tools/bigquery"
-)
-
-func TestParseLine(t *testing.T) {
- testCases := []struct {
- name string
- data string
- want *bigquery.Benchmark
- }{
- {
- name: "Iperf",
- data: "BenchmarkIperf/Upload-6 1 11094914892 ns/op 4751711232 bandwidth.bytes_per_second",
- want: &bigquery.Benchmark{
- Name: "BenchmarkIperf",
- Condition: []*bigquery.Condition{
- {
- Name: "iterations",
- Value: "1",
- },
- {
- Name: "GOMAXPROCS",
- Value: "6",
- },
- {
- Name: "Upload",
- Value: "Upload",
- },
- },
- Metric: []*bigquery.Metric{
- {
- Name: "ns/op",
- Unit: "ns/op",
- Sample: 11094914892.0,
- },
- {
- Name: "bandwidth",
- Unit: "bytes_per_second",
- Sample: 4751711232.0,
- },
- },
- },
- },
- {
- name: "Ruby",
- data: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS",
- want: &bigquery.Benchmark{
- Name: "BenchmarkRuby",
- Condition: []*bigquery.Condition{
- {
- Name: "iterations",
- Value: "1",
- },
- {
- Name: "GOMAXPROCS",
- Value: "6",
- },
- {
- Name: "server_threads",
- Value: "1",
- },
- },
- Metric: []*bigquery.Metric{
- {
- Name: "ns/op",
- Unit: "ns/op",
- Sample: 1397875880.0,
- },
- {
- Name: "average_latency",
- Unit: "s",
- Sample: 0.00710,
- },
- {
- Name: "requests_per_second",
- Unit: "QPS",
- Sample: 140.0,
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- got, err := parseLine(tc.data)
- if err != nil {
- t.Fatalf("parseLine failed with: %v", err)
- }
-
- if !cmp.Equal(tc.want, got, nil) {
- for i := range got.Condition {
- t.Logf("Metric: want: %+v got:%+v", got.Condition[i], tc.want.Condition[i])
- }
-
- for i := range got.Metric {
- t.Logf("Metric: want: %+v got:%+v", got.Metric[i], tc.want.Metric[i])
- }
-
- t.Fatalf("Compare failed want: %+v got: %+v", tc.want, got)
- }
- })
-
- }
-}
-
-func TestParseOutput(t *testing.T) {
- testCases := []struct {
- name string
- data string
- numBenchmarks int
- numMetrics int
- numConditions int
- }{
- {
- name: "Startup",
- data: `
- BenchmarkStartupEmpty
- BenchmarkStartupEmpty-6 2 766377884 ns/op 1 allocs/op
- BenchmarkStartupNode
- BenchmarkStartupNode-6 1 1752158409 ns/op 1 allocs/op
- `,
- numBenchmarks: 2,
- numMetrics: 1,
- numConditions: 2,
- },
- {
- name: "Ruby",
- data: `BenchmarkRuby
-BenchmarkRuby/server_threads.1
-BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS
-BenchmarkRuby/server_threads.5
-BenchmarkRuby/server_threads.5-6 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`,
- numBenchmarks: 2,
- numMetrics: 3,
- numConditions: 3,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- suite, err := ParseOutput(tc.data, "", false)
- if err != nil {
- t.Fatalf("parseOutput failed: %v", err)
- } else if len(suite.Benchmarks) != tc.numBenchmarks {
- t.Fatalf("NumBenchmarks failed want: %d got: %d %+v", tc.numBenchmarks, len(suite.Benchmarks), suite.Benchmarks)
- }
-
- for _, bm := range suite.Benchmarks {
- if len(bm.Metric) != tc.numMetrics {
- t.Fatalf("NumMetrics failed want: %d got: %d %+v", tc.numMetrics, len(bm.Metric), bm.Metric)
- }
-
- if len(bm.Condition) != tc.numConditions {
- t.Fatalf("NumConditions failed want: %d got: %d %+v", tc.numConditions, len(bm.Condition), bm.Condition)
- }
- }
- })
- }
-}
diff --git a/tools/parsers/parser_main.go b/tools/parsers/parser_main.go
deleted file mode 100644
index 01396494a..000000000
--- a/tools/parsers/parser_main.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary parser parses Benchmark data from golang benchmarks,
-// puts it into a Schema for BigQuery, and sends it to BigQuery.
-// parser will also initialize a table with the Benchmarks BigQuery schema.
-package main
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
-
- "gvisor.dev/gvisor/runsc/flag"
- bq "gvisor.dev/gvisor/tools/bigquery"
- "gvisor.dev/gvisor/tools/parsers"
-)
-
-const (
- initString = "init"
- initDescription = "initializes a new table with benchmarks schema"
- parseString = "parse"
- parseDescription = "parses given benchmarks file and sends it to BigQuery table."
-)
-
-var (
- // The init command will create a new dataset/table in the given project and initialize
- // the table with the schema in //tools/bigquery/bigquery.go. If the table/dataset exists
- // or has been initialized, init has no effect and successfully returns.
- initCmd = flag.NewFlagSet(initString, flag.ContinueOnError)
- initProject = initCmd.String("project", "", "GCP project to send benchmarks.")
- initDataset = initCmd.String("dataset", "", "dataset to send benchmarks data.")
- initTable = initCmd.String("table", "", "table to send benchmarks data.")
-
- // The parse command parses benchmark data in `file` and sends it to the
- // requested table.
- parseCmd = flag.NewFlagSet(parseString, flag.ContinueOnError)
- file = parseCmd.String("file", "", "file to parse for benchmarks")
- name = parseCmd.String("suite_name", "", "name of the benchmark suite")
- parseProject = parseCmd.String("project", "", "GCP project to send benchmarks.")
- parseDataset = parseCmd.String("dataset", "", "dataset to send benchmarks data.")
- parseTable = parseCmd.String("table", "", "table to send benchmarks data.")
- official = parseCmd.Bool("official", false, "mark input data as official.")
- runtime = parseCmd.String("runtime", "", "runtime used to run the benchmark")
- debug = parseCmd.Bool("debug", false, "print debug logs")
-)
-
-// initBenchmarks initializes a dataset/table in a BigQuery project.
-func initBenchmarks(ctx context.Context) error {
- return bq.InitBigQuery(ctx, *initProject, *initDataset, *initTable, nil)
-}
-
-// parseBenchmarks parses the given file into the BigQuery schema,
-// adds some custom data for the commit, and sends the data to BigQuery.
-func parseBenchmarks(ctx context.Context) error {
- debugLog("Reading file: %s", *file)
- data, err := ioutil.ReadFile(*file)
- if err != nil {
- return fmt.Errorf("failed to read file %s: %v", *file, err)
- }
- debugLog("Parsing output: %s", string(data))
- suite, err := parsers.ParseOutput(string(data), *name, *official)
- if err != nil {
- return fmt.Errorf("failed parse data: %v", err)
- }
- debugLog("Parsed benchmarks: %d", len(suite.Benchmarks))
- if len(suite.Benchmarks) < 1 {
- fmt.Fprintf(os.Stderr, "Failed to find benchmarks for file: %s", *file)
- return nil
- }
-
- extraConditions := []*bq.Condition{
- {
- Name: "runtime",
- Value: *runtime,
- },
- {
- Name: "version",
- Value: version,
- },
- }
-
- suite.Official = *official
- suite.Conditions = append(suite.Conditions, extraConditions...)
- debugLog("Sending benchmarks")
- return bq.SendBenchmarks(ctx, suite, *parseProject, *parseDataset, *parseTable, nil)
-}
-
-func main() {
- ctx := context.Background()
- switch {
- // the "init" command
- case len(os.Args) >= 2 && os.Args[1] == initString:
- if err := initCmd.Parse(os.Args[2:]); err != nil {
- log.Fatalf("Failed parse flags: %v\n", err)
- os.Exit(1)
- }
- if err := initBenchmarks(ctx); err != nil {
- failure := "failed to initialize project: %s dataset: %s table: %s: %v\n"
- log.Fatalf(failure, *parseProject, *parseDataset, *parseTable, err)
- os.Exit(1)
- }
- // the "parse" command.
- case len(os.Args) >= 2 && os.Args[1] == parseString:
- if err := parseCmd.Parse(os.Args[2:]); err != nil {
- log.Fatalf("Failed parse flags: %v\n", err)
- os.Exit(1)
- }
- if err := parseBenchmarks(ctx); err != nil {
- log.Fatalf("Failed parse benchmarks: %v\n", err)
- os.Exit(1)
- }
- default:
- printUsage()
- os.Exit(1)
- }
-}
-
-// printUsage prints the top level usage string.
-func printUsage() {
- usage := `Usage: parser <command> <flags> ...
-
-Available commands:
- %s %s
- %s %s
-`
- log.Printf(usage, initCmd.Name(), initDescription, parseCmd.Name(), parseDescription)
-}
-
-func debugLog(msg string, args ...interface{}) {
- if *debug {
- log.Printf(msg, args...)
- }
-}
diff --git a/tools/parsers/version.go b/tools/parsers/version.go
deleted file mode 100644
index c250f4a2a..000000000
--- a/tools/parsers/version.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package main
-
-// version is set during linking.
-var version = "VERSION_MISSING"
diff --git a/tools/rules_go_symbols.patch b/tools/rules_go_symbols.patch
deleted file mode 100644
index 46767f169..000000000
--- a/tools/rules_go_symbols.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/go/private/rules/test.bzl b/go/private/rules/test.bzl
-index 17516ad7..76b6c68c 100644
---- a/go/private/rules/test.bzl
-+++ b/go/private/rules/test.bzl
-@@ -117,9 +117,6 @@ def _go_test_impl(ctx):
- )
-
- test_gc_linkopts = gc_linkopts(ctx)
-- if not go.mode.debug:
-- # Disable symbol table and DWARF generation for test binaries.
-- test_gc_linkopts.extend(["-s", "-w"])
-
- # Link in the run_dir global for bzltestutil
- test_gc_linkopts.extend(["-X", "github.com/bazelbuild/rules_go/go/tools/bzltestutil.RunDir=" + run_dir])
diff --git a/tools/rules_go_visibility.patch b/tools/rules_go_visibility.patch
deleted file mode 100644
index e5bb2e3d5..000000000
--- a/tools/rules_go_visibility.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/third_party/org_golang_x_tools-gazelle.patch b/third_party/org_golang_x_tools-gazelle.patch
-index 7bdacff5..2fe9ce93 100644
---- a/third_party/org_golang_x_tools-gazelle.patch
-+++ b/third_party/org_golang_x_tools-gazelle.patch
-@@ -2054,7 +2054,7 @@ diff -urN b/go/analysis/internal/facts/BUILD.bazel c/go/analysis/internal/facts/
- + "imports.go",
- + ],
- + importpath = "golang.org/x/tools/go/analysis/internal/facts",
--+ visibility = ["//go/analysis:__subpackages__"],
-++ visibility = ["//visibility:public"],
- + deps = [
- + "//go/analysis",
- + "//go/types/objectpath",
-@@ -2078,7 +2078,7 @@ diff -urN b/go/analysis/internal/facts/BUILD.bazel c/go/analysis/internal/facts/
- +alias(
- + name = "go_default_library",
- + actual = ":facts",
--+ visibility = ["//go/analysis:__subpackages__"],
-++ visibility = ["//visibility:public"],
- +)
- +
- +go_test(
diff --git a/tools/tag_release.sh b/tools/tag_release.sh
deleted file mode 100755
index 50378065e..000000000
--- a/tools/tag_release.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script will optionally map a PiperOrigin-RevId to a given commit,
-# validate a provided release name, create a tag and push it. It must be
-# run manually when a release is created.
-
-set -xeuo pipefail
-
-# Check arguments.
-if [[ "$#" -ne 3 ]]; then
- echo "usage: $0 <commit|revid> <release.rc> <message-file>"
- exit 1
-fi
-
-declare -r target_commit="$1"
-declare -r release="$2"
-declare -r message_file="$3"
-
-if [[ -z "${target_commit}" ]]; then
- echo "error: <commit|revid> is empty."
-fi
-if [[ -z "${release}" ]]; then
- echo "error: <release.rc> is empty."
-fi
-if ! [[ -r "${message_file}" ]]; then
- echo "error: message file '${message_file}' is not readable."
- exit 1
-fi
-
-closest_commit() {
- while read line; do
- if [[ "$line" =~ ^"commit " ]]; then
- current_commit="${line#commit }"
- continue
- elif [[ "$line" =~ "PiperOrigin-RevId: " ]]; then
- revid="${line#PiperOrigin-RevId: }"
- [[ "${revid}" -le "$1" ]] && break
- fi
- done
- echo "${current_commit}"
-}
-
-# Is the passed identifier a sha commit?
-if ! git show "${target_commit}" &> /dev/null; then
- # Extract the commit given a piper ID.
- commit="$(set +o pipefail; \
- git log --first-parent | closest_commit "${target_commit}")"
- declare -r commit
-else
- declare -r commit="${target_commit}"
-fi
-if ! git show "${commit}" &> /dev/null; then
- echo "unknown commit: ${target_commit}"
- exit 1
-fi
-
-# Is the release name sane? Must be a date with patch/rc.
-if ! [[ "${release}" =~ ^20[0-9]{6}\.[0-9]+$ ]]; then
- declare -r expected="$(date +%Y%m%d.0)" # Use today's date.
- echo "unexpected release format: ${release}"
- echo " ... expected like ${expected}"
- exit 1
-fi
-
-# Tag the given commit (annotated, to record the committer). Note that the tag
-# here is applied as a force, in case the tag already exists and is the same.
-# The push will fail in this case (because it is not forced).
-declare -r tag="release-${release}"
-git tag -f -F "${message_file}" -a "${tag}" "${commit}" && \
- git push origin tag "${tag}"
diff --git a/tools/tags/BUILD b/tools/tags/BUILD
deleted file mode 100644
index 1c02e2c89..000000000
--- a/tools/tags/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tags",
- srcs = ["tags.go"],
- marshal = False,
- stateify = False,
- visibility = ["//tools:__subpackages__"],
-)
diff --git a/tools/tags/tags.go b/tools/tags/tags.go
deleted file mode 100644
index f35904e0a..000000000
--- a/tools/tags/tags.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tags is a utility for parsing build tags.
-package tags
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
-)
-
-// OrSet is a set of tags on a single line.
-//
-// Note that tags may include ",", and we don't distinguish this case in the
-// logic below. Ideally, this constraints can be split into separate top-level
-// build tags in order to resolve any issues.
-type OrSet []string
-
-// Line returns the line for this or.
-func (or OrSet) Line() string {
- return fmt.Sprintf("// +build %s", strings.Join([]string(or), " "))
-}
-
-// AndSet is the set of all OrSets.
-type AndSet []OrSet
-
-// Lines returns the lines to be printed.
-func (and AndSet) Lines() (ls []string) {
- for _, or := range and {
- ls = append(ls, or.Line())
- }
- return
-}
-
-// Join joins this AndSet with another.
-func (and AndSet) Join(other AndSet) AndSet {
- return append(and, other...)
-}
-
-// Tags returns the unique set of +build tags.
-//
-// Derived form the runtime's canBuild.
-func Tags(file string) (tags AndSet) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil
- }
- // Check file contents for // +build lines.
- for _, p := range strings.Split(string(data), "\n") {
- p = strings.TrimSpace(p)
- if p == "" {
- continue
- }
- if !strings.HasPrefix(p, "//") {
- break
- }
- if !strings.Contains(p, "+build") {
- continue
- }
- fields := strings.Fields(p[2:])
- if len(fields) < 1 || fields[0] != "+build" {
- continue
- }
- tags = append(tags, OrSet(fields[1:]))
- }
- return tags
-}
-
-// Aggregate aggregates all tags from a set of files.
-//
-// Note that these may be in conflict, in which case the build will fail.
-func Aggregate(files []string) (tags AndSet) {
- for _, file := range files {
- tags = tags.Join(Tags(file))
- }
- return tags
-}
diff --git a/tools/verity/BUILD b/tools/verity/BUILD
deleted file mode 100644
index 77d16359c..000000000
--- a/tools/verity/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-licenses(["notice"])
-
-go_binary(
- name = "measure_tool",
- srcs = [
- "measure_tool.go",
- "measure_tool_unsafe.go",
- ],
- pure = True,
- deps = [
- "//pkg/abi/linux",
- ],
-)
diff --git a/tools/verity/measure_tool.go b/tools/verity/measure_tool.go
deleted file mode 100644
index 0d314ae70..000000000
--- a/tools/verity/measure_tool.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This binary can be used to run a measurement of the verity file system,
-// generate the corresponding Merkle tree files, and return the root hash.
-package main
-
-import (
- "flag"
- "io/ioutil"
- "log"
- "os"
- "syscall"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-var path = flag.String("path", "", "path to the verity file system.")
-
-const maxDigestSize = 64
-
-type digest struct {
- metadata linux.DigestMetadata
- digest [maxDigestSize]byte
-}
-
-func main() {
- flag.Parse()
- if *path == "" {
- log.Fatalf("no path provided")
- }
- if err := enableDir(*path); err != nil {
- log.Fatalf("Failed to enable file system %s: %v", *path, err)
- }
- // Print the root hash of the file system to stdout.
- if err := measure(*path); err != nil {
- log.Fatalf("Failed to measure file system %s: %v", *path, err)
- }
-}
-
-// enableDir enables verity features on all the files and sub-directories within
-// path.
-func enableDir(path string) error {
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return err
- }
- for _, file := range files {
- if file.IsDir() {
- // For directories, first enable its children.
- if err := enableDir(path + "/" + file.Name()); err != nil {
- return err
- }
- } else if file.Mode().IsRegular() {
- // For regular files, open and enable verity feature.
- f, err := os.Open(path + "/" + file.Name())
- if err != nil {
- return err
- }
- var p uintptr
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
- return err
- }
- }
- }
- // Once all children are enabled, enable the parent directory.
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- var p uintptr
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
- return err
- }
- return nil
-}
diff --git a/tools/verity/measure_tool_unsafe.go b/tools/verity/measure_tool_unsafe.go
deleted file mode 100644
index d4079be9e..000000000
--- a/tools/verity/measure_tool_unsafe.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package main
-
-import (
- "encoding/hex"
- "fmt"
- "os"
- "syscall"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-// measure prints the hash of path to stdout.
-func measure(path string) error {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- var digest digest
- digest.metadata.DigestSize = maxDigestSize
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))); err != 0 {
- return err
- }
- fmt.Fprintf(os.Stdout, "%s\n", hex.EncodeToString(digest.digest[:digest.metadata.DigestSize]))
- return err
-}
diff --git a/tools/worker/BUILD b/tools/worker/BUILD
deleted file mode 100644
index dc03ce11e..000000000
--- a/tools/worker/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "bazel_worker_proto", "go_library")
-
-package(licenses = ["notice"])
-
-# For Google-tooling.
-# @unused
-glaze_ignore = [
- "worker.go",
-]
-
-go_library(
- name = "worker",
- srcs = ["worker.go"],
- visibility = ["//tools:__subpackages__"],
- deps = [
- bazel_worker_proto,
- "@org_golang_google_protobuf//encoding/protowire:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/tools/worker/worker.go b/tools/worker/worker.go
deleted file mode 100644
index 669a5f203..000000000
--- a/tools/worker/worker.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package worker provides an implementation of the bazel worker protocol.
-//
-// Tools may be written as a normal command line utility, except the passed
-// run function may be invoked multiple times.
-package worker
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- _ "net/http/pprof" // For profiling.
-
- "golang.org/x/sys/unix"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- wpb "gvisor.dev/bazel/worker_protocol_go_proto"
-)
-
-var (
- persistentWorker = flag.Bool("persistent_worker", false, "enable persistent worker.")
- workerDebug = flag.Bool("worker_debug", false, "debug persistent workers.")
- maximumCacheUsage = flag.Int64("maximum_cache_usage", 1024*1024*1024, "maximum cache size.")
-)
-
-var (
- // inputFiles is the last set of input files.
- //
- // This is used for cache invalidation. The key is the *absolute* path
- // name, and the value is the digest in the current run.
- inputFiles = make(map[string]string)
-
- // activeCaches is the set of active caches.
- activeCaches = make(map[*Cache]struct{})
-
- // totalCacheUsage is the total usage of all caches.
- totalCacheUsage int64
-)
-
-// mustAbs returns the absolute path of a filename or dies.
-func mustAbs(filename string) string {
- abs, err := filepath.Abs(filename)
- if err != nil {
- log.Fatalf("error getting absolute path: %v", err)
- }
- return abs
-}
-
-// updateInputFiles creates an entry in inputFiles.
-func updateInputFile(filename, digest string) {
- inputFiles[mustAbs(filename)] = digest
-}
-
-// Sizer returns a size.
-type Sizer interface {
- Size() int64
-}
-
-// CacheBytes is an example of a Sizer.
-type CacheBytes []byte
-
-// Size implements Sizer.Size.
-func (cb CacheBytes) Size() int64 {
- return int64(len(cb))
-}
-
-// Cache is a worker cache.
-//
-// They can be created via NewCache.
-type Cache struct {
- name string
- entries map[string]Sizer
- size int64
- hits int64
- misses int64
-}
-
-// NewCache returns a new cache.
-func NewCache(name string) *Cache {
- return &Cache{
- name: name,
- }
-}
-
-// Lookup looks up an entry in the cache.
-//
-// It is a function of the given files.
-func (c *Cache) Lookup(filenames []string, generate func() Sizer) Sizer {
- digests := make([]string, 0, len(filenames))
- for _, filename := range filenames {
- digest, ok := inputFiles[mustAbs(filename)]
- if !ok {
- // This is not a valid input. We may not be running as
- // persistent worker in this cache. If that's the case,
- // then the file's contents will not change across the
- // run, and we just use the filename itself.
- digest = filename
- }
- digests = append(digests, digest)
- }
-
- // Attempt the lookup.
- sort.Slice(digests, func(i, j int) bool {
- return digests[i] < digests[j]
- })
- cacheKey := strings.Join(digests, "+")
- if c.entries == nil {
- c.entries = make(map[string]Sizer)
- activeCaches[c] = struct{}{}
- }
- entry, ok := c.entries[cacheKey]
- if ok {
- c.hits++
- return entry
- }
-
- // Generate a new entry.
- entry = generate()
- c.misses++
- c.entries[cacheKey] = entry
- if entry != nil {
- sz := entry.Size()
- c.size += sz
- totalCacheUsage += sz
- }
-
- // Check the capacity of all caches. If it greater than the maximum, we
- // flush everything but still return this entry.
- if totalCacheUsage > *maximumCacheUsage {
- for entry, _ := range activeCaches {
- // Drop all entries.
- entry.size = 0
- entry.entries = nil
- }
- totalCacheUsage = 0 // Reset.
- }
-
- return entry
-}
-
-// allCacheStats returns stats for all caches.
-func allCacheStats() string {
- var sb strings.Builder
- for entry, _ := range activeCaches {
- ratio := float64(entry.hits) / float64(entry.hits+entry.misses)
- fmt.Fprintf(&sb,
- "% 10s: count: % 5d size: % 10d hits: % 7d misses: % 7d ratio: %2.2f\n",
- entry.name, len(entry.entries), entry.size, entry.hits, entry.misses, ratio)
- }
- if len(activeCaches) > 0 {
- fmt.Fprintf(&sb, "total: % 10d\n", totalCacheUsage)
- }
- return sb.String()
-}
-
-// LookupDigest returns a digest for the given file.
-func LookupDigest(filename string) (string, bool) {
- digest, ok := inputFiles[filename]
- return digest, ok
-}
-
-// Work invokes the main function.
-func Work(run func([]string) int) {
- flag.CommandLine.Parse(os.Args[1:])
- if !*persistentWorker {
- // Handle the argument file.
- args := flag.CommandLine.Args()
- if len(args) == 1 && len(args[0]) > 1 && args[0][0] == '@' {
- content, err := ioutil.ReadFile(args[0][1:])
- if err != nil {
- log.Fatalf("unable to parse args file: %v", err)
- }
- // Pull arguments from the file.
- args = strings.Split(string(content), "\n")
- flag.CommandLine.Parse(args)
- args = flag.CommandLine.Args()
- }
- os.Exit(run(args))
- }
-
- var listenHeader string // Emitted always.
- if *workerDebug {
- // Bind a server for profiling.
- listener, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- log.Fatalf("unable to bind a server: %v", err)
- }
- // Construct the header for stats output, below.
- listenHeader = fmt.Sprintf("Listening @ http://localhost:%d\n", listener.Addr().(*net.TCPAddr).Port)
- go http.Serve(listener, nil)
- }
-
- // Move stdout. This is done to prevent anything else from accidentally
- // printing to stdout, which must contain only the valid WorkerResponse
- // serialized protos.
- newOutput, err := unix.Dup(1)
- if err != nil {
- log.Fatalf("unable to move stdout: %v", err)
- }
- // Stderr may be closed or may be a copy of stdout. We make sure that
- // we have an output that is in a completely separate range.
- for newOutput <= 2 {
- newOutput, err = unix.Dup(newOutput)
- if err != nil {
- log.Fatalf("unable to move stdout: %v", err)
- }
- }
-
- // Best-effort: collect logs.
- rPipe, wPipe, err := os.Pipe()
- if err != nil {
- log.Fatalf("unable to create pipe: %v", err)
- }
- if err := unix.Dup2(int(wPipe.Fd()), 1); err != nil {
- log.Fatalf("error duping over stdout: %v", err)
- }
- if err := unix.Dup2(int(wPipe.Fd()), 2); err != nil {
- log.Fatalf("error duping over stderr: %v", err)
- }
- wPipe.Close()
- defer rPipe.Close()
-
- // Read requests from stdin.
- input := bufio.NewReader(os.NewFile(0, "input"))
- output := bufio.NewWriter(os.NewFile(uintptr(newOutput), "output"))
- for {
- szBuf, err := input.Peek(4)
- if err != nil {
- log.Fatalf("unabel to read header: %v", err)
- }
-
- // Parse the size, and discard bits.
- sz, szBytes := protowire.ConsumeVarint(szBuf)
- if szBytes < 0 {
- szBytes = 0
- }
- if _, err := input.Discard(szBytes); err != nil {
- log.Fatalf("error discarding size: %v", err)
- }
-
- // Read a full message.
- msg := make([]byte, int(sz))
- if _, err := io.ReadFull(input, msg); err != nil {
- log.Fatalf("error reading worker request: %v", err)
- }
- var wreq wpb.WorkRequest
- if err := proto.Unmarshal(msg, &wreq); err != nil {
- log.Fatalf("error unmarshaling worker request: %v", err)
- }
-
- // Flush relevant caches.
- inputFiles = make(map[string]string)
- for _, input := range wreq.GetInputs() {
- updateInputFile(input.GetPath(), string(input.GetDigest()))
- }
-
- // Prepare logging.
- outputBuffer := bytes.NewBuffer(nil)
- outputBuffer.WriteString(listenHeader)
- log.SetOutput(outputBuffer)
-
- // Parse all arguments.
- flag.CommandLine.Parse(wreq.GetArguments())
- var exitCode int
- exitChan := make(chan int)
- go func() { exitChan <- run(flag.CommandLine.Args()) }()
- for running := true; running; {
- select {
- case exitCode = <-exitChan:
- running = false
- default:
- }
- // N.B. rPipe is given a read deadline of 1ms. We expect
- // this to turn a copy error after 1ms, and we just keep
- // flushing this buffer while the task is running.
- rPipe.SetReadDeadline(time.Now().Add(time.Millisecond))
- outputBuffer.ReadFrom(rPipe)
- }
-
- if *workerDebug {
- // Attach all cache stats.
- outputBuffer.WriteString(allCacheStats())
- }
-
- // Send the response.
- var wresp wpb.WorkResponse
- wresp.ExitCode = int32(exitCode)
- wresp.Output = string(outputBuffer.Bytes())
- rmsg, err := proto.Marshal(&wresp)
- if err != nil {
- log.Fatalf("error marshaling response: %v", err)
- }
- if _, err := output.Write(append(protowire.AppendVarint(nil, uint64(len(rmsg))), rmsg...)); err != nil {
- log.Fatalf("error sending worker response: %v", err)
- }
- if err := output.Flush(); err != nil {
- log.Fatalf("error flushing output: %v", err)
- }
- }
-}
diff --git a/tools/workspace_status.sh b/tools/workspace_status.sh
deleted file mode 100755
index 62d78ed3d..000000000
--- a/tools/workspace_status.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The STABLE_ prefix will trigger a re-link if it changes.
-echo STABLE_VERSION "$(git describe --always --tags --abbrev=12 --dirty 2>/dev/null || echo 0.0.0)"
diff --git a/tools/yamltest/BUILD b/tools/yamltest/BUILD
deleted file mode 100644
index 475b3badd..000000000
--- a/tools/yamltest/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "yamltest",
- srcs = ["main.go"],
- visibility = ["//visibility:public"],
- deps = [
- "@com_github_xeipuuv_gojsonschema//:go_default_library",
- "@in_gopkg_yaml_v2//:go_default_library",
- ],
-)
diff --git a/tools/yamltest/defs.bzl b/tools/yamltest/defs.bzl
deleted file mode 100644
index fd04f947d..000000000
--- a/tools/yamltest/defs.bzl
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Tools for testing yaml files against schemas."""
-
-def _yaml_test_impl(ctx):
- """Implementation for yaml_test."""
- runner = ctx.actions.declare_file(ctx.label.name)
- ctx.actions.write(runner, "\n".join([
- "#!/bin/bash",
- "set -euo pipefail",
- "%s -schema=%s -- %s" % (
- ctx.files._tool[0].short_path,
- ctx.files.schema[0].short_path,
- " ".join([f.short_path for f in ctx.files.srcs]),
- ),
- ]), is_executable = True)
- return [DefaultInfo(
- runfiles = ctx.runfiles(files = ctx.files._tool + ctx.files.schema + ctx.files.srcs),
- executable = runner,
- )]
-
-yaml_test = rule(
- implementation = _yaml_test_impl,
- doc = "Tests a yaml file against a schema.",
- attrs = {
- "srcs": attr.label_list(
- doc = "The input yaml files.",
- mandatory = True,
- allow_files = True,
- ),
- "schema": attr.label(
- doc = "The schema file in JSON schema format.",
- allow_single_file = True,
- mandatory = True,
- ),
- "_tool": attr.label(
- executable = True,
- cfg = "host",
- default = Label("//tools/yamltest:yamltest"),
- ),
- },
- test = True,
-)
diff --git a/tools/yamltest/main.go b/tools/yamltest/main.go
deleted file mode 100644
index 88271fb66..000000000
--- a/tools/yamltest/main.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary yamltest does strict yaml parsing and validation.
-package main
-
-import (
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "os"
-
- "github.com/xeipuuv/gojsonschema"
- yaml "gopkg.in/yaml.v2"
-)
-
-func fixup(v interface{}) (interface{}, error) {
- switch x := v.(type) {
- case map[interface{}]interface{}:
- // Coerse into a string-based map, required for yaml.
- strMap := make(map[string]interface{})
- for k, v := range x {
- strK, ok := k.(string)
- if !ok {
- // This cannot be converted to JSON at all.
- return nil, fmt.Errorf("invalid key %T in (%#v)", k, x)
- }
- fv, err := fixup(v)
- if err != nil {
- return nil, fmt.Errorf(".%s%w", strK, err)
- }
- strMap[strK] = fv
- }
- return strMap, nil
- case []interface{}:
- for i := range x {
- fv, err := fixup(x[i])
- if err != nil {
- return nil, fmt.Errorf("[%d]%w", i, err)
- }
- x[i] = fv
- }
- return x, nil
- default:
- return v, nil
- }
-}
-
-func loadFile(filename string) (gojsonschema.JSONLoader, error) {
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- dec := yaml.NewDecoder(f)
- dec.SetStrict(true)
- var object interface{}
- if err := dec.Decode(&object); err != nil {
- return nil, err
- }
- fixedObject, err := fixup(object) // For serialization.
- if err != nil {
- return nil, err
- }
- bytes, err := json.Marshal(fixedObject)
- if err != nil {
- return nil, err
- }
- return gojsonschema.NewStringLoader(string(bytes)), nil
-}
-
-var schema = flag.String("schema", "", "path to JSON schema file.")
-
-func main() {
- flag.Parse()
- if *schema == "" || len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(2)
- }
-
- // Construct our schema loader.
- schemaLoader := gojsonschema.NewReferenceLoader(fmt.Sprintf("file://%s", *schema))
-
- // Parse all documents.
- allErrors := make(map[string][]error)
- for _, filename := range flag.Args() {
- // Record the filename with an empty slice for below, where
- // we will emit all files (even those without any errors).
- allErrors[filename] = nil
- documentLoader, err := loadFile(filename)
- if err != nil {
- allErrors[filename] = append(allErrors[filename], err)
- continue
- }
- result, err := gojsonschema.Validate(schemaLoader, documentLoader)
- if err != nil {
- allErrors[filename] = append(allErrors[filename], err)
- continue
- }
- for _, desc := range result.Errors() {
- allErrors[filename] = append(allErrors[filename], errors.New(desc.String()))
- }
- }
-
- // Print errors in yaml format.
- totalErrors := 0
- for filename, errs := range allErrors {
- totalErrors += len(errs)
- if len(errs) == 0 {
- fmt.Fprintf(os.Stderr, "%s: ✓\n", filename)
- continue
- }
- fmt.Fprintf(os.Stderr, "%s:\n", filename)
- for _, err := range errs {
- fmt.Fprintf(os.Stderr, "- %s\n", err)
- }
- }
- if totalErrors != 0 {
- os.Exit(1)
- }
-}
diff --git a/vdso/BUILD b/vdso/BUILD
deleted file mode 100644
index c70bb8218..000000000
--- a/vdso/BUILD
+++ /dev/null
@@ -1,81 +0,0 @@
-# Description:
-# This VDSO is a shared library that provides the same interfaces as the
-# normal system VDSO (time, gettimeofday, clock_gettimeofday) but which uses
-# timekeeping parameters managed by the sandbox kernel.
-
-load("//tools:defs.bzl", "cc_flags_supplier", "cc_toolchain", "select_arch", "vdso_linker_option")
-
-package(licenses = ["notice"])
-
-genrule(
- name = "vdso",
- srcs = [
- "barrier.h",
- "compiler.h",
- "cycle_clock.h",
- "seqlock.h",
- "syscalls.h",
- "vdso.cc",
- "vdso_amd64.lds",
- "vdso_arm64.lds",
- "vdso_time.h",
- "vdso_time.cc",
- ],
- outs = [
- "vdso.so",
- ],
- cmd = "$(CC) $(CC_FLAGS) " +
- "-I. " +
- "-O2 " +
- "-std=c++11 " +
- "-fPIC " +
- "-fno-sanitize=all " +
- # Some toolchains enable stack protector by default. Disable it, the
- # VDSO has no hooks to handle failures.
- "-fno-stack-protector " +
- vdso_linker_option +
- select_arch(
- amd64 = "-m64 ",
- arm64 = "",
- ) +
- "-shared " +
- "-nostdlib " +
- "-Wl,-soname=linux-vdso.so.1 " +
- "-Wl,--hash-style=sysv " +
- "-Wl,--no-undefined " +
- "-Wl,-Bsymbolic " +
- "-Wl,-z,max-page-size=4096 " +
- "-Wl,-z,common-page-size=4096 " +
- select_arch(
- amd64 = "-Wl,-T$(location vdso_amd64.lds) ",
- arm64 = "-Wl,-T$(location vdso_arm64.lds) ",
- no_match_error = "unsupported architecture",
- ) +
- "-o $(location vdso.so) " +
- "$(location vdso.cc) " +
- "$(location vdso_time.cc) " +
- "&& $(location :check_vdso) " +
- "--check-data " +
- "--vdso $(location vdso.so) ",
- exec_tools = [
- ":check_vdso",
- ],
- features = ["-pie"],
- toolchains = [
- cc_toolchain,
- ":no_pie_cc_flags",
- ],
- visibility = ["//:sandbox"],
-)
-
-cc_flags_supplier(
- name = "no_pie_cc_flags",
- features = ["-pie"],
-)
-
-py_binary(
- name = "check_vdso",
- srcs = ["check_vdso.py"],
- python_version = "PY3",
- visibility = ["//:sandbox"],
-)
diff --git a/vdso/barrier.h b/vdso/barrier.h
deleted file mode 100644
index edba4afb5..000000000
--- a/vdso/barrier.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef VDSO_BARRIER_H_
-#define VDSO_BARRIER_H_
-
-namespace vdso {
-
-// Compiler Optimization barrier.
-inline void barrier(void) { __asm__ __volatile__("" ::: "memory"); }
-
-#if __x86_64__
-
-inline void memory_barrier(void) {
- __asm__ __volatile__("mfence" ::: "memory");
-}
-inline void read_barrier(void) { barrier(); }
-inline void write_barrier(void) { barrier(); }
-
-#elif __aarch64__
-
-inline void memory_barrier(void) {
- __asm__ __volatile__("dmb ish" ::: "memory");
-}
-inline void read_barrier(void) {
- __asm__ __volatile__("dmb ishld" ::: "memory");
-}
-inline void write_barrier(void) {
- __asm__ __volatile__("dmb ishst" ::: "memory");
-}
-
-#else
-#error "unsupported architecture"
-#endif
-
-} // namespace vdso
-
-#endif // VDSO_BARRIER_H_
diff --git a/vdso/check_vdso.py b/vdso/check_vdso.py
deleted file mode 100644
index b3ee574f3..000000000
--- a/vdso/check_vdso.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Verify VDSO ELF does not contain any relocations and is directly mmappable.
-"""
-
-import argparse
-import logging
-import re
-import subprocess
-
-PAGE_SIZE = 4096
-
-
-def PageRoundDown(addr):
- """Rounds down to the nearest page.
-
- Args:
- addr: An address.
-
- Returns:
- The address rounded down to the nearest page.
- """
- return addr & ~(PAGE_SIZE - 1)
-
-
-def Fatal(*args, **kwargs):
- """Logs a critical message and exits with code 1.
-
- Args:
- *args: Args to pass to logging.critical.
- **kwargs: Keyword args to pass to logging.critical.
- """
- logging.critical(*args, **kwargs)
- exit(1)
-
-
-def CheckSegments(vdso_path):
- """Verifies layout of PT_LOAD segments.
-
- PT_LOAD segments must be laid out such that the ELF is directly mmappable.
-
- Specifically, check that:
- * PT_LOAD file offsets are equivalent to the memory offset from the first
- segment.
- * No extra zeroed space (memsz) is required.
- * PT_LOAD segments are in order (required for any ELF).
- * No two PT_LOAD segments share part of the same page.
-
- The readelf line format looks like:
- Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
- LOAD 0x000000 0xffffffffff700000 0xffffffffff700000 0x000e68 0x000e68 R E 0x1000
-
- Args:
- vdso_path: Path to VDSO binary.
- """
- output = subprocess.check_output(["readelf", "-lW", vdso_path]).decode()
- lines = output.split("\n")
-
- segments = []
- for line in lines:
- if not line.startswith(" LOAD"):
- continue
-
- components = line.split()
-
- segments.append({
- "offset": int(components[1], 16),
- "addr": int(components[2], 16),
- "filesz": int(components[4], 16),
- "memsz": int(components[5], 16),
- })
-
- if not segments:
- Fatal("No PT_LOAD segments in VDSO")
-
- first = segments[0]
- if first["offset"] != 0:
- Fatal("First PT_LOAD segment has non-zero file offset: %s", first)
-
- for i, segment in enumerate(segments):
- memoff = segment["addr"] - first["addr"]
- if memoff != segment["offset"]:
- Fatal("PT_LOAD segment has different memory and file offsets: %s",
- segments)
-
- if segment["memsz"] != segment["filesz"]:
- Fatal("PT_LOAD segment memsz != filesz: %s", segment)
-
- if i > 0:
- last_end = segments[i-1]["addr"] + segments[i-1]["memsz"]
- if segment["addr"] < last_end:
- Fatal("PT_LOAD segments out of order")
-
- last_page = PageRoundDown(last_end)
- start_page = PageRoundDown(segment["addr"])
- if last_page >= start_page:
- Fatal("PT_LOAD segments share a page: %s and %s", segment,
- segments[i - 1])
-
-
-# Matches the section name in readelf -SW output.
-_SECTION_NAME_RE = re.compile(r"""^\s+\[\ ?\d+\]\s+
- (?P<name>\.\S+)\s+
- (?P<type>\S+)\s+
- (?P<addr>[0-9a-f]+)\s+
- (?P<off>[0-9a-f]+)\s+
- (?P<size>[0-9a-f]+)""", re.VERBOSE)
-
-
-def CheckData(vdso_path):
- """Verifies the VDSO contains no .data or .bss sections.
-
- The readelf line format looks like:
-
- There are 15 section headers, starting at offset 0x15f0:
-
- Section Headers:
- [Nr] Name Type Address Off Size ES Flg Lk Inf Al
- [ 0] NULL 0000000000000000 000000 000000 00 0 0 0
- [ 1] .hash HASH ffffffffff700120 000120 000040 04 A 2 0 8
- [ 2] .dynsym DYNSYM ffffffffff700160 000160 000108 18 A 3 1 8
- ...
- [13] .strtab STRTAB 0000000000000000 001448 000123 00 0 0 1
- [14] .shstrtab STRTAB 0000000000000000 00156b 000083 00 0 0 1
- Key to Flags:
- W (write), A (alloc), X (execute), M (merge), S (strings), I (info),
- L (link order), O (extra OS processing required), G (group), T (TLS),
- C (compressed), x (unknown), o (OS specific), E (exclude),
- l (large), p (processor specific)
-
- Args:
- vdso_path: Path to VDSO binary.
- """
- output = subprocess.check_output(["readelf", "-SW", vdso_path]).decode()
- lines = output.split("\n")
-
- found_text = False
- for line in lines:
- m = re.search(_SECTION_NAME_RE, line)
- if not m:
- continue
-
- if not line.startswith(" ["):
- continue
-
- name = m.group("name")
- size = int(m.group("size"), 16)
-
- if name == ".text" and size != 0:
- found_text = True
-
- # Clang will typically omit these sections entirely; gcc will include them
- # but with size 0.
- if name.startswith(".data") and size != 0:
- Fatal("VDSO contains non-empty .data section:\n%s" % output)
-
- if name.startswith(".bss") and size != 0:
- Fatal("VDSO contains non-empty .bss section:\n%s" % output)
-
- if not found_text:
- Fatal("VDSO contains no/empty .text section? Bad parsing?:\n%s" % output)
-
-
-def CheckRelocs(vdso_path):
- """Verifies that the VDSO includes no relocations.
-
- Args:
- vdso_path: Path to VDSO binary.
- """
- output = subprocess.check_output(["readelf", "-r", vdso_path]).decode()
- if output.strip() != "There are no relocations in this file.":
- Fatal("VDSO contains relocations: %s", output)
-
-
-def main():
- parser = argparse.ArgumentParser(description="Verify VDSO ELF.")
- parser.add_argument("--vdso", required=True, help="Path to VDSO ELF")
- parser.add_argument(
- "--check-data",
- action="store_true",
- help="Check that the ELF contains no .data or .bss sections")
- args = parser.parse_args()
-
- CheckSegments(args.vdso)
- CheckRelocs(args.vdso)
-
- if args.check_data:
- CheckData(args.vdso)
-
-
-if __name__ == "__main__":
- main()
diff --git a/vdso/compiler.h b/vdso/compiler.h
deleted file mode 100644
index 54a510000..000000000
--- a/vdso/compiler.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef VDSO_COMPILER_H_
-#define VDSO_COMPILER_H_
-
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-
-#ifndef __section
-#define __section(S) __attribute__((__section__(#S)))
-#endif
-
-#ifndef __aligned
-#define __aligned(N) __attribute__((__aligned__(N)))
-#endif
-
-#endif // VDSO_COMPILER_H_
diff --git a/vdso/cycle_clock.h b/vdso/cycle_clock.h
deleted file mode 100644
index 5d3fbb257..000000000
--- a/vdso/cycle_clock.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef VDSO_CYCLE_CLOCK_H_
-#define VDSO_CYCLE_CLOCK_H_
-
-#include <stdint.h>
-
-#include "vdso/barrier.h"
-
-namespace vdso {
-
-#if __x86_64__
-
-// TODO(b/74613497): The appropriate barrier instruction to use with rdtsc on
-// x86_64 depends on the vendor. Intel processors can use lfence but AMD may
-// need mfence, depending on MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT.
-
-static inline uint64_t cycle_clock(void) {
- uint32_t lo, hi;
- asm volatile("lfence" : : : "memory");
- asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
- return ((uint64_t)hi << 32) | lo;
-}
-
-#elif __aarch64__
-
-static inline uint64_t cycle_clock(void) {
- uint64_t val;
- asm volatile("mrs %0, CNTVCT_EL0" : "=r"(val)::"memory");
- return val;
-}
-
-#else
-#error "unsupported architecture"
-#endif
-
-} // namespace vdso
-
-#endif // VDSO_CYCLE_CLOCK_H_
diff --git a/vdso/seqlock.h b/vdso/seqlock.h
deleted file mode 100644
index 7a173174b..000000000
--- a/vdso/seqlock.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Low level raw interfaces to the sequence counter used by the VDSO.
-#ifndef VDSO_SEQLOCK_H_
-#define VDSO_SEQLOCK_H_
-
-#include <stdint.h>
-
-#include "vdso/barrier.h"
-#include "vdso/compiler.h"
-
-namespace vdso {
-
-inline int32_t read_seqcount_begin(const uint64_t* s) {
- uint64_t seq = *s;
- read_barrier();
- return seq & ~1;
-}
-
-inline int read_seqcount_retry(const uint64_t* s, uint64_t seq) {
- read_barrier();
- return unlikely(*s != seq);
-}
-
-} // namespace vdso
-
-#endif // VDSO_SEQLOCK_H_
diff --git a/vdso/syscalls.h b/vdso/syscalls.h
deleted file mode 100644
index 0c6a922a0..000000000
--- a/vdso/syscalls.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// System call support for the VDSO.
-//
-// Provides fallback system call interfaces for getcpu()
-// and clock_gettime().
-
-#ifndef VDSO_SYSCALLS_H_
-#define VDSO_SYSCALLS_H_
-
-#include <asm/unistd.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stddef.h>
-#include <sys/types.h>
-
-#define __stringify_1(x...) #x
-#define __stringify(x...) __stringify_1(x)
-
-namespace vdso {
-
-#if __x86_64__
-
-struct getcpu_cache;
-
-static inline int sys_clock_gettime(clockid_t clock, struct timespec* ts) {
- int num = __NR_clock_gettime;
- asm volatile("syscall\n"
- : "+a"(num)
- : "D"(clock), "S"(ts)
- : "rcx", "r11", "memory");
- return num;
-}
-
-static inline int sys_getcpu(unsigned* cpu, unsigned* node,
- struct getcpu_cache* cache) {
- int num = __NR_getcpu;
- asm volatile("syscall\n"
- : "+a"(num)
- : "D"(cpu), "S"(node), "d"(cache)
- : "rcx", "r11", "memory");
- return num;
-}
-
-static inline void sys_rt_sigreturn(void) {
- asm volatile("movl $" __stringify(__NR_rt_sigreturn)", %eax \n"
- "syscall \n");
-}
-
-#elif __aarch64__
-
-static inline int sys_clock_gettime(clockid_t _clkid, struct timespec* _ts) {
- register struct timespec* ts asm("x1") = _ts;
- register clockid_t clkid asm("x0") = _clkid;
- register long ret asm("x0");
- register long nr asm("x8") = __NR_clock_gettime;
-
- asm volatile("svc #0\n"
- : "=r"(ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
- return ret;
-}
-
-static inline int sys_clock_getres(clockid_t _clkid, struct timespec* _ts) {
- register struct timespec* ts asm("x1") = _ts;
- register clockid_t clkid asm("x0") = _clkid;
- register long ret asm("x0");
- register long nr asm("x8") = __NR_clock_getres;
-
- asm volatile("svc #0\n"
- : "=r"(ret)
- : "r"(clkid), "r"(ts), "r"(nr)
- : "memory");
- return ret;
-}
-
-static inline void sys_rt_sigreturn(void) {
- asm volatile("mov x8, #" __stringify(__NR_rt_sigreturn)" \n"
- "svc #0 \n");
-}
-
-#else
-#error "unsupported architecture"
-#endif
-} // namespace vdso
-
-#endif // VDSO_SYSCALLS_H_
diff --git a/vdso/vdso.cc b/vdso/vdso.cc
deleted file mode 100644
index 3b6653b5d..000000000
--- a/vdso/vdso.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This is the VDSO for sandboxed binaries. This file just contains the entry
-// points to the VDSO. All of the real work is done in vdso_time.cc
-
-#define _DEFAULT_SOURCE // ensure glibc provides struct timezone.
-#include <sys/time.h>
-#include <time.h>
-
-#include "vdso/syscalls.h"
-#include "vdso/vdso_time.h"
-
-namespace vdso {
-namespace {
-
-int __common_clock_gettime(clockid_t clock, struct timespec* ts) {
- int ret;
-
- switch (clock) {
- case CLOCK_REALTIME:
- ret = ClockRealtime(ts);
- break;
-
- case CLOCK_BOOTTIME:
- // Fallthrough, CLOCK_BOOTTIME is an alias for CLOCK_MONOTONIC
- case CLOCK_MONOTONIC:
- ret = ClockMonotonic(ts);
- break;
-
- default:
- ret = sys_clock_gettime(clock, ts);
- break;
- }
-
- return ret;
-}
-
-int __common_gettimeofday(struct timeval* tv, struct timezone* tz) {
- if (tv) {
- struct timespec ts;
- int ret = ClockRealtime(&ts);
- if (ret) {
- return ret;
- }
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / 1000;
- }
-
- // Nobody should be calling gettimeofday() with a non-NULL
- // timezone pointer. If they do then they will get zeros.
- if (tz) {
- tz->tz_minuteswest = 0;
- tz->tz_dsttime = 0;
- }
-
- return 0;
-}
-} // namespace
-
-// __kernel_rt_sigreturn() implements rt_sigreturn()
-extern "C" void __kernel_rt_sigreturn(unsigned long unused) {
- // No optimizations yet, just make the real system call.
- sys_rt_sigreturn();
-}
-
-#if __x86_64__
-
-// __vdso_clock_gettime() implements clock_gettime()
-extern "C" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {
- return __common_clock_gettime(clock, ts);
-}
-extern "C" int clock_gettime(clockid_t clock, struct timespec* ts)
- __attribute__((weak, alias("__vdso_clock_gettime")));
-
-// __vdso_gettimeofday() implements gettimeofday()
-extern "C" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {
- return __common_gettimeofday(tv, tz);
-}
-extern "C" int gettimeofday(struct timeval* tv, struct timezone* tz)
- __attribute__((weak, alias("__vdso_gettimeofday")));
-
-// __vdso_time() implements time()
-extern "C" time_t __vdso_time(time_t* t) {
- struct timespec ts;
- ClockRealtime(&ts);
- if (t) {
- *t = ts.tv_sec;
- }
- return ts.tv_sec;
-}
-extern "C" time_t time(time_t* t) __attribute__((weak, alias("__vdso_time")));
-
-// __vdso_getcpu() implements getcpu()
-extern "C" long __vdso_getcpu(unsigned* cpu, unsigned* node,
- struct getcpu_cache* cache) {
- // No optimizations yet, just make the real system call.
- return sys_getcpu(cpu, node, cache);
-}
-extern "C" long getcpu(unsigned* cpu, unsigned* node,
- struct getcpu_cache* cache)
- __attribute__((weak, alias("__vdso_getcpu")));
-
-#elif __aarch64__
-
-// __kernel_clock_gettime() implements clock_gettime()
-extern "C" int __kernel_clock_gettime(clockid_t clock, struct timespec* ts) {
- return __common_clock_gettime(clock, ts);
-}
-
-// __kernel_gettimeofday() implements gettimeofday()
-extern "C" int __kernel_gettimeofday(struct timeval* tv, struct timezone* tz) {
- return __common_gettimeofday(tv, tz);
-}
-
-// __kernel_clock_getres() implements clock_getres()
-extern "C" int __kernel_clock_getres(clockid_t clock, struct timespec* res) {
- int ret = 0;
-
- switch (clock) {
- case CLOCK_REALTIME:
- case CLOCK_MONOTONIC:
- case CLOCK_BOOTTIME: {
- if (res == nullptr) {
- return 0;
- }
-
- res->tv_sec = 0;
- res->tv_nsec = 1;
- break;
- }
-
- default:
- ret = sys_clock_getres(clock, res);
- break;
- }
-
- return ret;
-}
-
-#else
-#error "unsupported architecture"
-#endif
-} // namespace vdso
diff --git a/vdso/vdso_amd64.lds b/vdso/vdso_amd64.lds
deleted file mode 100644
index d114290da..000000000
--- a/vdso/vdso_amd64.lds
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Linker script for the VDSO.
- *
- * The VDSO is essentially a normal ELF shared library that is mapped into the
- * address space of the process that is going to use it. The address of the
- * VDSO is passed to the runtime linker in the AT_SYSINFO_EHDR entry of the aux
- * vector.
- *
- * There are, however, three ways in which the VDSO differs from a normal
- * shared library:
- *
- * - The runtime linker does not attempt to process any relocations for the
- * VDSO so it is the responsibility of whoever loads the VDSO into the
- * address space to do this if necessary. Because of this restriction we are
- * careful to ensure that the VDSO does not need to have any relocations
- * applied to it.
- *
- * - Although the VDSO is position independent and would normally be linked at
- * virtual address 0, the Linux kernel VDSO is actually linked at a non zero
- * virtual address and the code in the system runtime linker that handles the
- * VDSO expects this to be the case so we have to explicitly link this VDSO
- * at a non zero address. The actual address is arbitrary, but we use the
- * same one as the Linux kernel VDSO.
- *
- * - The VDSO will be directly mmapped by the sentry, rather than going through
- * a normal ELF loading process. The VDSO must be carefully constructed such
- * that the layout in the ELF file is identical to the layout in memory.
- */
-
-VDSO_PRELINK = 0xffffffffff700000;
-
-SECTIONS {
- /* The parameter page is mapped just before the VDSO. */
- _params = VDSO_PRELINK - 0x1000;
-
- . = VDSO_PRELINK + SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .rodata : { *(.rodata*) } :text
-
- .altinstructions : { *(.altinstructions) }
- .altinstr_replacement : { *(.altinstr_replacement) }
-
- /*
- * TODO(gvisor.dev/issue/157): Remove this alignment? Then the VDSO would fit
- * in a single page.
- */
- . = ALIGN(0x1000);
- .text : { *(.text*) } :text =0x90909090
-
- /*
- * N.B. There is no data/bss section. This VDSO neither needs nor uses a data
- * section. We omit it entirely because some gcc/clang and gold/bfd version
- * combinations struggle to handle an empty data PHDR segment (internal
- * linker assertion failures result).
- *
- * If the VDSO does incorrectly include a data section, the linker will
- * include it in the text segment. check_vdso.py looks for this degenerate
- * case.
- */
-}
-
-PHDRS {
- text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-/*
- * Define the symbols that are to be exported.
- */
-VERSION {
- LINUX_2.6 {
- global:
- clock_gettime;
- __vdso_clock_gettime;
- gettimeofday;
- __vdso_gettimeofday;
- getcpu;
- __vdso_getcpu;
- time;
- __vdso_time;
- __kernel_rt_sigreturn;
-
- local: *;
- };
-}
diff --git a/vdso/vdso_arm64.lds b/vdso/vdso_arm64.lds
deleted file mode 100644
index 469185468..000000000
--- a/vdso/vdso_arm64.lds
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Linker script for the VDSO.
- *
- * The VDSO is essentially a normal ELF shared library that is mapped into the
- * address space of the process that is going to use it. The address of the
- * VDSO is passed to the runtime linker in the AT_SYSINFO_EHDR entry of the aux
- * vector.
- *
- * There are, however, three ways in which the VDSO differs from a normal
- * shared library:
- *
- * - The runtime linker does not attempt to process any relocations for the
- * VDSO so it is the responsibility of whoever loads the VDSO into the
- * address space to do this if necessary. Because of this restriction we are
- * careful to ensure that the VDSO does not need to have any relocations
- * applied to it.
- *
- * - Although the VDSO is position independent and would normally be linked at
- * virtual address 0, the Linux kernel VDSO is actually linked at a non zero
- * virtual address and the code in the system runtime linker that handles the
- * VDSO expects this to be the case so we have to explicitly link this VDSO
- * at a non zero address. The actual address is arbitrary, but we use the
- * same one as the Linux kernel VDSO.
- *
- * - The VDSO will be directly mmapped by the sentry, rather than going through
- * a normal ELF loading process. The VDSO must be carefully constructed such
- * that the layout in the ELF file is identical to the layout in memory.
- */
-
-VDSO_PRELINK = 0xffffffffff700000;
-
-OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
-OUTPUT_ARCH(aarch64)
-
-SECTIONS {
- /* The parameter page is mapped just before the VDSO. */
- _params = VDSO_PRELINK - 0x1000;
-
- . = VDSO_PRELINK + SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .rodata : { *(.rodata*) } :text
-
- .altinstructions : { *(.altinstructions) }
- .altinstr_replacement : { *(.altinstr_replacement) }
-
- /*
- * TODO(gvisor.dev/issue/157): Remove this alignment? Then the VDSO would fit
- * in a single page.
- */
- . = ALIGN(0x1000);
- .text : { *(.text*) } :text =0xd503201f
-
- /*
- * N.B. There is no data/bss section. This VDSO neither needs nor uses a data
- * section. We omit it entirely because some gcc/clang and gold/bfd version
- * combinations struggle to handle an empty data PHDR segment (internal
- * linker assertion failures result).
- *
- * If the VDSO does incorrectly include a data section, the linker will
- * include it in the text segment. check_vdso.py looks for this degenerate
- * case.
- */
-}
-
-PHDRS {
- text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-/*
- * Define the symbols that are to be exported.
- */
-VERSION {
- LINUX_2.6.39 {
- global:
- __kernel_clock_getres;
- __kernel_clock_gettime;
- __kernel_gettimeofday;
- __kernel_rt_sigreturn;
- local: *;
- };
-}
diff --git a/vdso/vdso_time.cc b/vdso/vdso_time.cc
deleted file mode 100644
index 1bb4bb86b..000000000
--- a/vdso/vdso_time.cc
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "vdso/vdso_time.h"
-
-#include <stdint.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include "vdso/cycle_clock.h"
-#include "vdso/seqlock.h"
-#include "vdso/syscalls.h"
-
-// struct params defines the layout of the parameter page maintained by the
-// kernel (i.e., sentry).
-//
-// This is similar to the VVAR page maintained by the normal Linux kernel for
-// its VDSO, but it has a different layout.
-//
-// It must be kept in sync with VDSOParamPage in pkg/sentry/kernel/vdso.go.
-struct params {
- uint64_t seq_count;
-
- uint64_t monotonic_ready;
- int64_t monotonic_base_cycles;
- int64_t monotonic_base_ref;
- uint64_t monotonic_frequency;
-
- uint64_t realtime_ready;
- int64_t realtime_base_cycles;
- int64_t realtime_base_ref;
- uint64_t realtime_frequency;
-};
-
-// Returns a pointer to the global parameter page.
-//
-// This page lives in the page just before the VDSO binary itself. The linker
-// defines _params as the page before the VDSO.
-//
-// Ideally, we'd simply declare _params as an extern struct params.
-// Unfortunately various combinations of old/new versions of gcc/clang and
-// gold/bfd struggle to generate references to such a global without generating
-// relocations.
-//
-// So instead, we use inline assembly with a construct that seems to have wide
-// compatibility across many toolchains.
-#if __x86_64__
-
-inline struct params* get_params() {
- struct params* p = nullptr;
- asm("leaq _params(%%rip), %0" : "=r"(p) : :);
- return p;
-}
-
-#elif __aarch64__
-
-inline struct params* get_params() {
- struct params* p = nullptr;
- asm("adr %0, _params" : "=r"(p) : :);
- return p;
-}
-
-#else
-#error "unsupported architecture"
-#endif
-
-namespace vdso {
-
-const uint64_t kNsecsPerSec = 1000000000UL;
-
-inline struct timespec ns_to_timespec(uint64_t ns) {
- struct timespec ts;
- ts.tv_sec = ns / kNsecsPerSec;
- ts.tv_nsec = ns % kNsecsPerSec;
- return ts;
-}
-
-inline uint64_t cycles_to_ns(uint64_t frequency, uint64_t cycles) {
- uint64_t mult = (kNsecsPerSec << 32) / frequency;
- return ((unsigned __int128)cycles * mult) >> 32;
-}
-
-// ClockRealtime() is the VDSO implementation of clock_gettime(CLOCK_REALTIME).
-int ClockRealtime(struct timespec* ts) {
- struct params* params = get_params();
- uint64_t seq;
- uint64_t ready;
- int64_t base_ref;
- int64_t base_cycles;
- uint64_t frequency;
- int64_t now_cycles;
-
- do {
- seq = read_seqcount_begin(&params->seq_count);
- ready = params->realtime_ready;
- base_ref = params->realtime_base_ref;
- base_cycles = params->realtime_base_cycles;
- frequency = params->realtime_frequency;
- now_cycles = cycle_clock();
- } while (read_seqcount_retry(&params->seq_count, seq));
-
- if (!ready) {
- // The sandbox kernel ensures that we won't compute a time later than this
- // once the params are ready.
- return sys_clock_gettime(CLOCK_REALTIME, ts);
- }
-
- int64_t delta_cycles =
- (now_cycles < base_cycles) ? 0 : now_cycles - base_cycles;
- int64_t now_ns = base_ref + cycles_to_ns(frequency, delta_cycles);
- *ts = ns_to_timespec(now_ns);
- return 0;
-}
-
-// ClockMonotonic() is the VDSO implementation of
-// clock_gettime(CLOCK_MONOTONIC).
-int ClockMonotonic(struct timespec* ts) {
- struct params* params = get_params();
- uint64_t seq;
- uint64_t ready;
- int64_t base_ref;
- int64_t base_cycles;
- uint64_t frequency;
- int64_t now_cycles;
-
- do {
- seq = read_seqcount_begin(&params->seq_count);
- ready = params->monotonic_ready;
- base_ref = params->monotonic_base_ref;
- base_cycles = params->monotonic_base_cycles;
- frequency = params->monotonic_frequency;
- now_cycles = cycle_clock();
- } while (read_seqcount_retry(&params->seq_count, seq));
-
- if (!ready) {
- // The sandbox kernel ensures that we won't compute a time later than this
- // once the params are ready.
- return sys_clock_gettime(CLOCK_MONOTONIC, ts);
- }
-
- int64_t delta_cycles =
- (now_cycles < base_cycles) ? 0 : now_cycles - base_cycles;
- int64_t now_ns = base_ref + cycles_to_ns(frequency, delta_cycles);
- *ts = ns_to_timespec(now_ns);
- return 0;
-}
-
-} // namespace vdso
diff --git a/vdso/vdso_time.h b/vdso/vdso_time.h
deleted file mode 100644
index 70d079efc..000000000
--- a/vdso/vdso_time.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef VDSO_VDSO_TIME_H_
-#define VDSO_VDSO_TIME_H_
-
-#include <time.h>
-
-namespace vdso {
-
-int ClockRealtime(struct timespec* ts);
-int ClockMonotonic(struct timespec* ts);
-
-} // namespace vdso
-
-#endif // VDSO_VDSO_TIME_H_
diff --git a/webhook/BUILD b/webhook/BUILD
deleted file mode 100644
index 33c585504..000000000
--- a/webhook/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//images:defs.bzl", "docker_image")
-load("//tools:defs.bzl", "go_binary", "pkg_tar")
-
-package(licenses = ["notice"])
-
-docker_image(
- name = "webhook_image",
- data = ":files",
- statements = ['ENTRYPOINT ["/webhook"]'],
-)
-
-# files is the full file system of the webhook container. It is simply:
-# /
-# └─ webhook
-pkg_tar(
- name = "files",
- srcs = [":webhook"],
- extension = "tgz",
- strip_prefix = "/third_party/gvisor/webhook",
-)
-
-go_binary(
- name = "webhook",
- srcs = ["main.go"],
- pure = "on",
- static = "on",
- deps = ["//webhook/pkg/cli"],
-)
diff --git a/webhook/pkg/cli/BUILD b/webhook/pkg/cli/BUILD
deleted file mode 100644
index ac093c556..000000000
--- a/webhook/pkg/cli/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "cli",
- srcs = ["cli.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "//webhook/pkg/injector",
- "@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
- "@io_k8s_apimachinery//pkg/util/net:go_default_library",
- "@io_k8s_client_go//kubernetes:go_default_library",
- "@io_k8s_client_go//rest:go_default_library",
- ],
-)
diff --git a/webhook/pkg/cli/cli_state_autogen.go b/webhook/pkg/cli/cli_state_autogen.go
new file mode 100644
index 000000000..e81991e0b
--- /dev/null
+++ b/webhook/pkg/cli/cli_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package cli
diff --git a/webhook/pkg/injector/BUILD b/webhook/pkg/injector/BUILD
deleted file mode 100644
index d296981be..000000000
--- a/webhook/pkg/injector/BUILD
+++ /dev/null
@@ -1,34 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "injector",
- srcs = [
- "certs.go",
- "webhook.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/log",
- "@com_github_mattbaird_jsonpatch//:go_default_library",
- "@io_k8s_api//admission/v1beta1:go_default_library",
- "@io_k8s_api//admissionregistration/v1beta1:go_default_library",
- "@io_k8s_api//core/v1:go_default_library",
- "@io_k8s_apimachinery//pkg/api/errors:go_default_library",
- "@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
- "@io_k8s_client_go//kubernetes:go_default_library",
- ],
-)
-
-genrule(
- name = "certs",
- srcs = [":gencerts"],
- outs = ["certs.go"],
- cmd = "$$(cut -d ' ' -f 1 <<< \"$(locations :gencerts)\") $@",
-)
-
-sh_binary(
- name = "gencerts",
- srcs = ["gencerts.sh"],
-)
diff --git a/webhook/pkg/injector/certs.go b/webhook/pkg/injector/certs.go
new file mode 100644
index 000000000..96a270137
--- /dev/null
+++ b/webhook/pkg/injector/certs.go
@@ -0,0 +1,97 @@
+package injector
+
+// This file was generated using openssl by the gencerts.sh script.
+
+var caKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtUYj/tDPDDHTlSJpZLIjZ5dUZ4MD3IdwBJLUwTXdDr0TWVhY
+MX7FSJVeKW+hGpx1xZ1R+TdCwzSyfoFFCzttcqnKJ1Kl+FYDBMsIDFZw4BiAAPVK
+sviCMYVtgR+YU4Z1WzudCX8RUbxK5ueifXSf8GbRteu6mCv2uMuDrA+XfZnNWyr+
+6sgU1kzTAUgUMt6VKR069ayClv8P42VRDb9Rw2Hma5x4AnMCF2WaxBN8kLRpqHRd
+it80d5oidY6xpxcBPOismo76U4RpIXtCEBvgvZInhW3qz2rxFYG0PJnw0ovOvCdP
+fcz5sktrerreet3sDiTwU6TM1KSJX+uGmnetnwIDAQABAoIBAAYaDFAhezav3q1g
+cSfAj0yHXYH2eQTNUkn1H1A5ne1HFAWn4aAY4k8lJ/xBE60vow850m6PG6Iyfeeg
+NlDAeViounNEZu3LB2L76pNvvXDtojFmEFOh5dAA68Sy6Y+2MPEXOpv9OPoFWogX
+N/L9H/0ZmOmEu80vfaiOwSnjhHfnzOeWr4AuPpZyQP0/fioZPEYcdBmZMT9hP+gb
+lgz/43QJ/XICO1jypiFUuLz/Z1WsZAuTgjkL1u9AI0Lb5iwA2kp50d8/ebRF24ME
+VhLZNuCGf1/h1oMUq1nf0cvnqR8X/xf2nJganOvCCFWIv05dOiF1pwa0g6UOTMok
+esEx9EECgYEA5Z0QmvGcjZi3jpGpGJhUMCxt/icisG6+TrFEgMDOCbxHpy9joX1O
+zd2k9/ho5LcGdClxV2oE93YeYqEzpdN9C/rp1B5mBPVd7kN04Gvppg2ahgN0/N8d
+egr4sW8y/Q4TsQHJ2gt5iOFyOZ7d4W/rVfUubnf2N9GR7Of3cQXHJ5ECgYEAyhr9
+BS2nJTdzcnLQfLINQJ/DLFJzNajGhhYah1rgDcfOLg93HSwEg5qrRnRh6LnqGqKS
+8hqUJebmdA0Vh8a3ocgiFWbiDyjRyl/IhIzWWgzgqkiBH4AQX8TFZMcy2A+Ju8nz
+GEsxqwdHqTShPiHc8bHwLzDp9SYgFzHyH5k/yi8CgYAoPfTRExNbBa71/7VhKCFp
+KABHENjVjzMvW4YkAqaZsjPkqzuM7AdJsVTeWN0ZaLJq3XCN33jrXfpJUvNYVs6N
+sPYWRRWgPNivj4cGZiNXBP9WCXkRcJEb+JxJjLGlBDiTRzr3SheM0+ZPDvbYMeNO
+91+h4Mh4U/R9TtJhLqAO4QKBgDoQyPMM68HAjbWrEQVSboZLoFqCkcEv7WGmxhZv
+YIH1DuKi5NkHxYA+FslWNK8VgxIF9WwDgN7m2P0sqSvqRuX/RvOZLIeodaXMISMc
+B5W1r3KdBCQVuh6ZvRC4Pn5e8HZ4jhRDvlBh9g/CJDViq0Txl40nY4BgZMXlPqgf
+cnlXAoGAGNaUmN85FxTc6AIPwmX+iY74ktmAsyKb2kVvur4VtjfwLQlM0+d4muFp
+CLZiaejLdfle7St5SVEVHrjoUlbErsnElWGiNODpne2euHsothLQwiZtnktQD3Qp
+vAYf7xflC2sDJT6bs1j2m5Sf58NKqOKMwT/SqjVl9503aQQq66g=
+-----END RSA PRIVATE KEY-----`)
+
+var caCert = []byte(`-----BEGIN CERTIFICATE-----
+MIICqzCCAZMCFHOPDPhe8VHPQBU9eZ4k3biL33tvMA0GCSqGSIb3DQEBCwUAMBEx
+DzANBgNVBAMMBmUyZV9jYTAgFw0yMDEyMzAwMTQ1NTlaGA8yMjk0MTAxNTAxNDU1
+OVowETEPMA0GA1UEAwwGZTJlX2NhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAtUYj/tDPDDHTlSJpZLIjZ5dUZ4MD3IdwBJLUwTXdDr0TWVhYMX7FSJVe
+KW+hGpx1xZ1R+TdCwzSyfoFFCzttcqnKJ1Kl+FYDBMsIDFZw4BiAAPVKsviCMYVt
+gR+YU4Z1WzudCX8RUbxK5ueifXSf8GbRteu6mCv2uMuDrA+XfZnNWyr+6sgU1kzT
+AUgUMt6VKR069ayClv8P42VRDb9Rw2Hma5x4AnMCF2WaxBN8kLRpqHRdit80d5oi
+dY6xpxcBPOismo76U4RpIXtCEBvgvZInhW3qz2rxFYG0PJnw0ovOvCdPfcz5sktr
+erreet3sDiTwU6TM1KSJX+uGmnetnwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAm
+52DD59yJDsbzK2dTTnBYgUtXkYeEvnQmcQ9nY2209MAP3u8nFk/SbeRiRND4ZH44
+QluVyAB64sLEDgL09+Ag5oKo0HaMfbFRFl9zqu/9e5ew6CyIwCsAOSGe0UMn6Wwk
+xmnoCs7N/rMllPSfVQY2Se7VDwD/2qJZwAARlfhMUGDrYNFuEYg9LWCTmIljmmjw
+uKeRvG0goezmkkDKbuFiEM2uT/R0FpTiEn2qaYvDNXKVGLK9JhUeScbThL42jV9m
+Jq86HNWl0lQvmq3UElmal8EAW+fupyFcNWamlNaRLNIEg0BYyS+Bc14cGQ4e20hP
+ANxAnoVTDxatLx6IgF+l
+-----END CERTIFICATE-----`)
+
+var serverKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAvjyW06g28A1Jarin5OSG4wR0uvUteE3+ZTgawezJvdi4T4RW
+zcz1TKvgHBg4zI20SqjLS8m85fIWrmL0xnRYeQR9cK5S9DFvjbSbYmH98/WeDsVF
+iY3G4NnnAZDb9HegXdZLWiAXAE64mVhCFPw1obsRYPVTN+ZbNp3UELB5HC2YrLCR
+XXPLh5YERPpPCmO19FRCKayusHz0YuAGf9+W7oz2gSmnlgymJoO2mDqgNxw1uM20
+2ogB0+QV2phpGQGvVh6ZsilwLMFEwdtAJn331EmslkbXdxROd1g38ERFP0tsAclT
+MuOufhaFU7PkHygdCgUlxmHrOPGt3swVXbmBOwIDAQABAoIBAQCi8HhrP4gWofrB
+GvpLFeXmqhllTLyub3c2sRBSC4NcRTTQftRun193VTu2NJTpt3G2BM9j0Z4nAeyl
+XwKyvFJPDSZ4DgflOjaHFCuG39HJVBYWJqeBwtqWGm0/LRkIUjIcTYDq59NrowF/
+7lVjbgZtyTGAb3/gwEBeDl7mEJEbLQPJBgSjU4NIDnds7XdwshUtkWUePOP2qwy/
+FWHqHZPyfNnqBqX5/Fr9zd5NiQw5XrpM6OKkEV+mXTwaccapmh5qqZHLX6Hwe5jO
+NtXmXtSHAmLZPF6hrmd+gdX9DEqkKvPIes99ZOMWrBEeAy5kHIcb9pOVtSPpwxlV
+vcBf4IChAoGBAOxvN3eIDG6spmtqeh0ynTRpWPytJJuIO2ylJ1IuA6pnxPEQGvue
+4JBVYDvsmoop8B0OTYXC1I8DWagmH276b7guUQwtXI8cudkBavu7ekqa+BqFh/Yy
+UoF8xFI31uw9k7VEmPEa3yTiRy+gtsQniIrvFKEhvTDpxKijg/kxtBbRAoGBAM36
+sEWhy5xp5erUFaFOxKQte2vMEX1An4kzK4NKg6c92WQHauoT9Qn2lhdPt/UYPInv
+Rg+qbKfXhNugPgfkBIXjtW8tZ1TbC8w6OfM030dsSKC9E28crtLAwJ3F4S8HZJMJ
+ZNSHAwaZLqp3dTsdfFt7EI3IAfGMNKnertPt4jJLAoGADwPZH+wx7e1k/Dlc2/HU
+7fkqv5E3W/FA5NtwLdXiQbYpWa3OKOCkHkZtwCB8h/211AKOhcojuZla3kTdhiy9
+X5MBbqaK1EPcwZ1HcAkt8QL6cqS8R7RWbQbBaP9G1OXsNXzPwbAVL8B3CN1J/hcM
+otYgF0OPQkX2SUdpIDA54aECgYAKy8fvbmKIqThGzaTwlntSC5w7cy+7e+agv1DZ
+ic9KtcAuMQFWkYM3aXhGcoQ20XVi2Wq3qXwWdAJfrI9zxGzEQ8IfuKaA2RZONwMQ
+j0XgrXlFC4P4P+2d2EKAQn3iBCYuWsCxLv5mmyLKBobbeVkqRlIAzGnlqi6cdLJ/
+IynG7wKBgQDGYvQLBIN7StLBlVVZAbTaDGloNXo2jifFbbHu1yh1UvFhtZFglz/r
+WXCZxvzaMKxnuMZs2PpggM/tgaoxOWPpFtI4gaUFD5+CgikjWpApM93yP4Zvnwo0
+4EiHPQQiQ6yBgVOHckKRYMJs38dUt433kq9g0HDMkc+ajfKNw5yZMA==
+-----END RSA PRIVATE KEY-----`)
+
+var serverCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIDDzCCAfegAwIBAgIUYw+kvxKk5J0Q1nAtF7WFIUNuxvQwDQYJKoZIhvcNAQEL
+BQAwETEPMA0GA1UEAwwGZTJlX2NhMCAXDTIwMTIzMDAxNDYwMFoYDzIyOTQxMDE1
+MDE0NjAwWjA1MTMwMQYDVQQDDCpndmlzb3ItaW5qZWN0aW9uLWFkbWlzc2lvbi13
+ZWJob29rLmUyZS5zdmMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC+
+PJbTqDbwDUlquKfk5IbjBHS69S14Tf5lOBrB7Mm92LhPhFbNzPVMq+AcGDjMjbRK
+qMtLybzl8hauYvTGdFh5BH1wrlL0MW+NtJtiYf3z9Z4OxUWJjcbg2ecBkNv0d6Bd
+1ktaIBcATriZWEIU/DWhuxFg9VM35ls2ndQQsHkcLZissJFdc8uHlgRE+k8KY7X0
+VEIprK6wfPRi4AZ/35bujPaBKaeWDKYmg7aYOqA3HDW4zbTaiAHT5BXamGkZAa9W
+HpmyKXAswUTB20AmfffUSayWRtd3FE53WDfwREU/S2wByVMy465+FoVTs+QfKB0K
+BSXGYes48a3ezBVduYE7AgMBAAGjOTA3MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXg
+MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOC
+AQEAerR4SLGytdufaftX8wXhp5/8TN5uwsrfifYbYYD7eP1uGxbGHEmsDNmQuPxh
+PAs8emeTlxDDV6oJI8BtjXT0MW4YYDEFVnPynrG9Mu29Qju7lqX+wWUTb37dLmPo
+ISZBi9CvF3a6dCMytOwx7UX2lHn7PuhhN6P1x3IDO3798ED/c42p9m0s+GDHifxS
+NSD27id1LTx3peqJinacRAr1HMHgeoH6Si/HWoKOaapI7rVjpeiK5legQzcfq68r
+Sq3c+NsxCRVrhsj3oyUuPFDuE340NG5qGY4SCSAd/4J4LNScL/uvP4SYmAdrp+nC
+HEP3jklV4cN/EZwyjhQX/lHV/w==
+-----END CERTIFICATE-----`)
diff --git a/webhook/pkg/injector/gencerts.sh b/webhook/pkg/injector/gencerts.sh
deleted file mode 100755
index f7fda4b63..000000000
--- a/webhook/pkg/injector/gencerts.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Generates the a CA cert, a server key, and a server cert signed by the CA.
-# reference:
-# https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts/gencerts.sh
-set -euo pipefail
-
-# Do all the work in TMPDIR, then copy out generated code and delete TMPDIR.
-declare -r OUTDIR="$(readlink -e .)"
-declare -r TMPDIR="$(mktemp -d)"
-cd "${TMPDIR}"
-function cleanup() {
- cd "${OUTDIR}"
- rm -rf "${TMPDIR}"
-}
-trap cleanup EXIT
-
-declare -r CN_BASE="e2e"
-declare -r CN="gvisor-injection-admission-webhook.e2e.svc"
-
-cat > server.conf << EOF
-[req]
-req_extensions = v3_req
-distinguished_name = req_distinguished_name
-[req_distinguished_name]
-[ v3_req ]
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-extendedKeyUsage = clientAuth, serverAuth
-EOF
-
-declare -r OUTFILE="${TMPDIR}/certs.go"
-
-# We depend on OpenSSL being present.
-
-# Create a certificate authority.
-openssl genrsa -out caKey.pem 2048
-openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=${CN_BASE}_ca" -config server.conf
-
-# Create a server certificate.
-openssl genrsa -out serverKey.pem 2048
-# Note the CN is the DNS name of the service of the webhook.
-openssl req -new -key serverKey.pem -out server.csr -subj "/CN=${CN}" -config server.conf
-openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf
-
-echo "package injector" > "${OUTFILE}"
-echo "" >> "${OUTFILE}"
-echo "// This file was generated using openssl by the gencerts.sh script." >> "${OUTFILE}"
-for file in caKey caCert serverKey serverCert; do
- DATA=$(cat "${file}.pem")
- echo "" >> "${OUTFILE}"
- echo "var $file = []byte(\`$DATA\`)" >> "${OUTFILE}"
-done
-
-# Copy generated code into the output directory.
-cp "${OUTFILE}" "${OUTDIR}/$1"
diff --git a/webhook/pkg/injector/injector_state_autogen.go b/webhook/pkg/injector/injector_state_autogen.go
new file mode 100644
index 000000000..2c994b7c9
--- /dev/null
+++ b/webhook/pkg/injector/injector_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package injector
diff --git a/website/BUILD b/website/BUILD
deleted file mode 100644
index 1a38967e5..000000000
--- a/website/BUILD
+++ /dev/null
@@ -1,197 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "pkg_tar")
-load("//website:defs.bzl", "doc", "docs")
-load("//images:defs.bzl", "docker_image")
-
-package(licenses = ["notice"])
-
-docker_image(
- name = "website",
- data = ":files",
- statements = [
- "EXPOSE 8080/tcp",
- 'ENTRYPOINT ["/server"]',
- ],
- tags = [
- "local",
- "manual",
- "no-sandbox",
- ],
-)
-
-# files is the full file system of the generated container.
-#
-# It must collect the all tarballs (produced by the rules below), and run it
-# through the Dockerfile to generate the site. Note that this checks all links,
-# and therefore requires all static content to be present as well.
-#
-# Note that this rule violates most aspects of hermetic builds. However, this
-# works much more reliably than depending on the container_image rules from
-# bazel itself, which are convoluted and seem to have a hard time even finding
-# the toolchain.
-genrule(
- name = "files",
- srcs = [
- ":config",
- ":css",
- ":docs",
- ":static",
- ":syscallmd",
- "//website/blog:posts",
- "//website/cmd/server",
- "@google_root_pem//file",
- ],
- outs = ["files.tgz"],
- cmd = "set -x; " +
- "T=$$(mktemp -d); " +
- "mkdir -p $$T/input && " +
- "mkdir -p $$T/output/_site && " +
- "tar -xf $(location :config) -C $$T/input && " +
- "tar -xf $(location :css) -C $$T/input && " +
- "tar -xf $(location :docs) -C $$T/input && " +
- "tar -xf $(location :syscallmd) -C $$T/input && " +
- "tar -xf $(location //website/blog:posts) -C $$T/input && " +
- "find $$T/input -type f -exec chmod u+rw {} \\; && " +
- "docker run -i --user $$(id -u):$$(id -g) " +
- "-v $$(readlink -m $$T/input):/input " +
- "-v $$(readlink -m $$T/output/_site):/output " +
- "gvisor.dev/images/jekyll && " +
- "tar -xf $(location :static) -C $$T/output/_site && " +
- "docker run -i --user $$(id -u):$$(id -g) " +
- "-v $$(readlink -m $$T/output/_site):/output " +
- "gvisor.dev/images/jekyll " +
- "ruby /checks.rb " +
- "/output && " +
- "cp $(location //website/cmd/server) $$T/output/server && " +
- "mkdir -p $$T/output/etc/ssl && " +
- "cp $(location @google_root_pem//file) $$T/output/etc/ssl/cert.pem && " +
- "tar -zcf $@ -C $$T/output . && " +
- "rm -rf $$T",
- tags = [
- "local",
- "manual",
- "no-sandbox",
- ],
-)
-
-# static are the purely static parts of the site. These are effectively copied
-# in after jekyll generates all the dynamic content.
-pkg_tar(
- name = "static",
- srcs = [
- "archive.key",
- ] + glob([
- "performance/**",
- ]),
- strip_prefix = "./",
-)
-
-# main.scss requires front-matter to be processed.
-genrule(
- name = "css",
- srcs = glob([
- "css/**",
- ]),
- outs = [
- "css.tar",
- ],
- cmd = "T=$$(mktemp -d); " +
- "mkdir -p $$T/css && " +
- "for file in $(SRCS); do " +
- "echo -en '---\\n---\\n' > $$T/css/$$(basename $$file) && " +
- "cat $$file >> $$T/css/$$(basename $$file); " +
- "done && " +
- "tar -C $$T -czf $@ . && " +
- "rm -rf $$T",
-)
-
-# config is "mostly" static content. These are parts of the site that are
-# present when jekyll runs, but are not dynamically generated.
-pkg_tar(
- name = "config",
- srcs = [
- ":css",
- "_config.yml",
- "//website/blog:index.html",
- ] + glob([
- "assets/**",
- "_includes/**",
- "_layouts/**",
- "_plugins/**",
- "_sass/**",
- ]),
- strip_prefix = "./",
-)
-
-# index is the index file.
-doc(
- name = "index",
- src = "index.md",
- layout = "base",
- permalink = "/",
-)
-
-# docs is the dynamic content of the site.
-docs(
- name = "docs",
- deps = [
- ":index",
- "//:code_of_conduct",
- "//:contributing",
- "//:governance",
- "//:security",
- "//g3doc:community",
- "//g3doc:index",
- "//g3doc:roadmap",
- "//g3doc:style",
- "//g3doc/architecture_guide:performance",
- "//g3doc/architecture_guide:platforms",
- "//g3doc/architecture_guide:resources",
- "//g3doc/architecture_guide:security",
- "//g3doc/proposals:gsoc_2021",
- "//g3doc/user_guide:FAQ",
- "//g3doc/user_guide:checkpoint_restore",
- "//g3doc/user_guide:compatibility",
- "//g3doc/user_guide:debugging",
- "//g3doc/user_guide:filesystem",
- "//g3doc/user_guide:install",
- "//g3doc/user_guide:networking",
- "//g3doc/user_guide:platforms",
- "//g3doc/user_guide/containerd:configuration",
- "//g3doc/user_guide/containerd:containerd_11",
- "//g3doc/user_guide/containerd:quick_start",
- "//g3doc/user_guide/quick_start:docker",
- "//g3doc/user_guide/quick_start:kubernetes",
- "//g3doc/user_guide/quick_start:oci",
- "//g3doc/user_guide/tutorials:cni",
- "//g3doc/user_guide/tutorials:docker",
- "//g3doc/user_guide/tutorials:docker_compose",
- "//g3doc/user_guide/tutorials:knative",
- "//g3doc/user_guide/tutorials:kubernetes",
- ],
-)
-
-# Generate JSON for system call tables
-genrule(
- name = "syscalljson",
- outs = ["syscalls.json"],
- cmd = "$(location //runsc) -- help syscalls -format json -filename $@",
- tools = ["//runsc"],
-)
-
-# Generate markdown from the json dump.
-genrule(
- name = "syscallmd",
- srcs = [":syscalljson"],
- outs = ["syscallsmd"],
- cmd = "T=$$(mktemp -d) && " +
- "$(location //website/cmd/syscalldocs) -in $< -out $$T && " +
- "tar -C $$T -czf $@ . && " +
- "rm -rf $$T",
- tools = ["//website/cmd/syscalldocs"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/website/_config.yml b/website/_config.yml
deleted file mode 100644
index dc44945bc..000000000
--- a/website/_config.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-destination: _site
-markdown: kramdown
-kramdown:
- syntax_highlighter: rouge
- toc_levels: "2,3"
-highlighter: rouge
-paginate: 5
-paginate_path: "/blog/page:num/"
-plugins:
- - jekyll-paginate
- - jekyll-autoprefixer
- - jekyll-inline-svg
- - jekyll-relative-links
- - jekyll-feed
- - jekyll-sitemap
-site_url: https://gvisor.dev
-feed:
- path: blog/index.xml
-svg:
- optimize: true
-defaults:
- - scope:
- path: ""
- values:
- layout: default
-analytics: "UA-150193582-1"
-authors:
- jsprad:
- name: Jeremiah Spradlin
- email: jsprad@google.com
- zkoopmans:
- name: Zach Koopmans
- email: zkoopmans@google.com
- igudger:
- name: Ian Gudger
- email: igudger@google.com
- fvoznika:
- name: Fabricio Voznika
- email: fvoznika@google.com
- ianlewis:
- name: Ian Lewis
- email: ianlewis@google.com
- url: https://twitter.com/IanMLewis
- mpratt:
- name: Michael Pratt
- email: mpratt@google.com
diff --git a/website/_includes/byline.html b/website/_includes/byline.html
deleted file mode 100644
index 1e808260f..000000000
--- a/website/_includes/byline.html
+++ /dev/null
@@ -1,18 +0,0 @@
-By
-{% assign last_pos=include.authors.size | minus: 1 %}
-{% assign and_pos=include.authors.size | minus: 2 %}
-{% for i in (0..last_pos) %}
- {% assign author_id=include.authors[i] %}
- {% assign author=site.authors[author_id] %}
- {% if author %}
- <a href="{% if author.url %}{{ author.url }}{% else %}mailto:{{ author.email }}{% endif %}">{{ author.name }}</a>
- {% else %}
- {{ author_id }}
- {% endif %}
- {% if i == and_pos %}
- and
- {% elsif i < and_pos %}
- ,
- {% endif %}
-{% endfor %}
-on <span class="text-muted">{{ include.date | date_to_long_string }}</span>
diff --git a/website/_includes/footer-links.html b/website/_includes/footer-links.html
deleted file mode 100644
index 2036dbaa9..000000000
--- a/website/_includes/footer-links.html
+++ /dev/null
@@ -1,43 +0,0 @@
-<div class="container">
- <div class="row">
- <div class="col-sm-3 col-md-2">
- <p>About</p>
- <ul class="list-unstyled">
- <li><a href="/roadmap/">Roadmap</a></li>
- <li><a href="/contributing/">Contributing</a></li>
- <li><a href="/security/">Security</a></li>
- <li><a href="/community/governance/">Governance</a></li>
- <li><a href="https://policies.google.com/privacy">Privacy Policy</a></li>
- </ul>
- </div>
- <div class="col-sm-3 col-md-2">
- <p>Support</p>
- <ul class="list-unstyled">
- <li><a href="https://github.com/google/gvisor/issues">Issues</a></li>
- <li><a href="/docs">Documentation</a></li>
- <li><a href="/docs/user_guide/faq">FAQ</a></li>
- </ul>
- </div>
- <div class="col-sm-3 col-md-2">
- <p>Connect</p>
- <ul class="list-unstyled">
- <li><a href="https://github.com/google/gvisor">GitHub</a></li>
- <li><a href="https://groups.google.com/forum/#!forum/gvisor-users">User Mailing List</a></li>
- <li><a href="https://groups.google.com/forum/#!forum/gvisor-dev">Developer Mailing List</a></li>
- <li><a href="https://gitter.im/gvisor/community">Gitter Chat</a></li>
- <li><a href="/blog">Blog</a></li>
- </ul>
- </div>
- <div class="col-sm-3 col-md-3"></div>
- <div class="hidden-xs hidden-sm col-md-3">
- <a href="https://cloud.google.com/run">
- <img style="float: right;" src="/assets/logos/powered-gvisor.png" alt="Powered by gVisor"/>
- </a>
- </div>
- </div>
- <div class="row">
- <div class="col-lg-12">
- <p>&copy; {{ 'now' | date: "%Y" }} The gVisor Authors</p>
- </div>
- </div>
-</div>
diff --git a/website/_includes/footer.html b/website/_includes/footer.html
deleted file mode 100644
index 20aa9f52c..000000000
--- a/website/_includes/footer.html
+++ /dev/null
@@ -1,50 +0,0 @@
-<footer class="footer">
- {% include footer-links.html %}
-</footer>
-
-<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.10.1/js/all.min.js" integrity="sha256-Z1Nvg/+y2+vRFhFgFij7Lv0r77yG3hOvWz2wI0SfTa0=" crossorigin="anonymous"></script>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha256-U5ZEeKfGNOja007MMD3YBI0A3OSZOQbeG6z2f2Y0hu8=" crossorigin="anonymous"></script>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/4.13.0/d3.min.js" integrity="sha256-hYXbQJK4qdJiAeDVjjQ9G0D6A0xLnDQ4eJI9dkm7Fpk=" crossorigin="anonymous"></script>
-
-{% if site.analytics %}
-<script>
-var doNotTrack = false;
-if (!doNotTrack) {
- window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
- ga('create', '{{ site.analytics }}', 'auto');
- ga('send', 'pageview');
-}
-</script>
-<script async src='https://www.google-analytics.com/analytics.js'></script>
-{% endif %}
-
-<script>
- var shiftWindow = function() {
- if (location.hash.length !== 0) {
- window.scrollBy(0, -50);
- }
- };
- window.addEventListener("hashchange", shiftWindow);
-
- $(document).ready(function() {
- // Scroll to anchor of location hash, adjusted for fixed navbar.
- window.setTimeout(function() {
- shiftWindow();
- }, 1);
-
- // Flip the caret when submenu toggles are clicked.
- $(".sidebar-submenu").on("show.bs.collapse", function() {
- var toggle = $('[href$="#' + $(this).attr('id') + '"]');
- if (toggle) {
- toggle.addClass("dropup");
- }
- });
- $(".sidebar-submenu").on("hide.bs.collapse", function() {
- var toggle = $('[href$="#' + $(this).attr('id') + '"]');
- if (toggle) {
- toggle.removeClass("dropup");
- }
- });
- });
-</script>
diff --git a/website/_includes/graph.html b/website/_includes/graph.html
deleted file mode 100644
index ba4cf9840..000000000
--- a/website/_includes/graph.html
+++ /dev/null
@@ -1,205 +0,0 @@
-{::nomarkdown}
-{% assign fn = include.id | remove: " " | remove: "-" | downcase %}
-<figure><a href="{{ include.url }}"><svg id="{{ include.id }}" width=500 height=200 onload="render_{{ fn }}()"><title>{{ include.title }}</title></svg></a></figure>
-<script>
-function render_{{ fn }}() {
-d3.csv("{{ include.url }}", function(d, i, columns) {
- return d; // Transformed below.
-}, function(error, data) {
- if (error) throw(error);
-
- // Create a new data that pivots on runtime.
- //
- // To start, we have:
- // runtime, ..., result
- // runc, ..., 1
- // runsc, ..., 2
- //
- // In the end we want:
- // ..., runsc, runc
- // ..., 1, 2
-
- // Filter by metric, if required.
- if ("{{ include.metric }}" != "") {
- orig_columns = data.columns;
- data = data.filter(d => d.metric == "{{ include.metric }}");
- data.columns = orig_columns;
- }
-
- // Filter by method, if required.
- if ("{{ include.method }}" != "") {
- orig_columns = data.columns;
- data = data.filter(d => d.method == "{{ include.method }}");
- data.columns = orig_columns.filter(key => key != "method");
- }
-
- // Enumerate runtimes.
- var runtimes = Array.from(new Set(data.map(d => d.runtime)));
- var metrics = Array.from(new Set(data.map(d => d.metric)));
- if (metrics.length < 1) {
- console.log(data);
- throw("need at least one metric");
- } else if (metrics.length == 1) {
- metric = metrics[0];
- data.columns = data.columns.filter(key => key != "metric");
- } else {
- metric = ""; // Used for grouping.
- }
-
- var isSubset = function(a, sup) {
- var ap = Object.getOwnPropertyNames(a);
- for (var i = 0; i < ap.length; i++) {
- if (a[ap[i]] !== sup[ap[i]]) {
- return false;
- }
- }
- return true;
- };
-
- // Execute a pivot to include runtimes as attributes.
- var new_data = data.map(function(data_item) {
- // Generate a prototype data item.
- var proto_item = Object.assign({}, data_item);
- delete proto_item.runtime;
- delete proto_item.result;
- var next_item = Object.assign({}, proto_item);
-
- // Find all matching runtime items.
- data.forEach(function(d) {
- if (isSubset(proto_item, d)) {
- // Add the result result.
- next_item[d.runtime] = d.result;
- }
- });
- return next_item;
- });
-
- // Remove any duplication.
- new_data = Array.from(new Set(new_data));
- new_data.columns = data.columns;
- new_data.columns = new_data.columns.filter(key => key != "runtime" && key != "result");
- new_data.columns = new_data.columns.concat(runtimes);
- data = new_data;
-
- // Slice based on the first key.
- if (data.columns.length != runtimes.length) {
- x0_key = new_data.columns[0];
- var x1_domain = data.columns.slice(1);
- } else {
- x0_key = "runtime";
- var x1_domain = runtimes;
- }
-
- // Determine varaible margins.
- var x0_domain = data.map(d => d[x0_key]);
- var margin_bottom_pad = 0;
- if (x0_domain.length > 8) {
- margin_bottom_pad = 50;
- }
-
- // Use log scale if required.
- var y_min = 0;
- if ({{ include.log | default: "false" }}) {
- // Need to cap lower end of the domain at 1.
- y_min = 1;
- }
-
- if ({{ include.y_min | default: "false" }}) {
- y_min = "{{ include.y_min }}";
- }
-
- var svg = d3.select("#{{ include.id }}"),
- margin = {top: 20, right: 20, bottom: 30 + margin_bottom_pad, left: 50},
- width = +svg.attr("width") - margin.left - margin.right,
- height = +svg.attr("height") - margin.top - margin.bottom,
- g = svg.append("g").attr("transform", "translate(" + margin.left + "," + margin.top + ")");
-
- var x0 = d3.scaleBand()
- .rangeRound([margin.left / 2, width - (4 * margin.right)])
- .paddingInner(0.1);
-
- var x1 = d3.scaleBand()
- .padding(0.05);
-
- var y = d3.scaleLinear()
- .rangeRound([height, 0]);
- if ({{ include.log | default: "false" }}) {
- y = d3.scaleLog()
- .rangeRound([height, 0]);
- }
-
- var z = d3.scaleOrdinal()
- .range(["#262362", "#FBB03B", "#286FD7", "#6b486b"]);
-
- // Set all domains.
- x0.domain(x0_domain);
- x1.domain(x1_domain).rangeRound([0, x0.bandwidth()]);
- y.domain([y_min, d3.max(data, d => d3.max(x1_domain, key => parseFloat(d[key])))]).nice();
-
- // The data.
- g.append("g")
- .selectAll("g")
- .data(data)
- .enter().append("g")
- .attr("transform", function(d) { return "translate(" + x0(d[x0_key]) + ",0)"; })
- .selectAll("rect")
- .data(d => x1_domain.map(key => ({key, value: d[key]})))
- .enter().append("rect")
- .attr("x", d => x1(d.key))
- .attr("y", d => y(d.value))
- .attr("width", x1.bandwidth())
- .attr("height", d => y(y_min) - y(d.value))
- .attr("fill", d => z(d.key));
-
- // X0 ticks and labels.
- var x0_axis = g.append("g")
- .attr("class", "axis")
- .attr("transform", "translate(0," + height + ")")
- .call(d3.axisBottom(x0));
- if (x0_domain.length > 8) {
- x0_axis.selectAll("text")
- .style("text-anchor", "end")
- .attr("dx", "-.8em")
- .attr("dy", ".15em")
- .attr("transform", "rotate(-65)");
- }
-
- // Y ticks and top-label.
- if (metric == "default") {
- metric = ""; // Don't display.
- }
- g.append("g")
- .attr("class", "axis")
- .call(d3.axisLeft(y).ticks(null, "s"))
- .append("text")
- .attr("x", -30.0)
- .attr("y", y(y.ticks().pop()) - 10.0)
- .attr("dy", "0.32em")
- .attr("fill", "#000")
- .attr("font-weight", "bold")
- .attr("text-anchor", "start")
- .text(metric);
-
- // The legend.
- var legend = g.append("g")
- .attr("font-family", "sans-serif")
- .attr("font-size", 10)
- .attr("text-anchor", "end")
- .selectAll("g")
- .data(x1_domain.slice().reverse())
- .enter().append("g")
- .attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
- legend.append("rect")
- .attr("x", width - 19)
- .attr("width", 19)
- .attr("height", 19)
- .attr("fill", z);
- legend.append("text")
- .attr("x", width - 24)
- .attr("y", 9.5)
- .attr("dy", "0.32em")
- .text(function(d) { return d; });
-});
-}
-</script>
-{:/}
diff --git a/website/_includes/header-links.html b/website/_includes/header-links.html
deleted file mode 100644
index 4232fdaa5..000000000
--- a/website/_includes/header-links.html
+++ /dev/null
@@ -1,19 +0,0 @@
-<nav class="navbar navbar-expand-sm navbar-inverse navbar-fixed-top">
- <div class="container">
- <div class="navbar-brand">
- <a href="/">
- <img src="/assets/logos/logo_solo_on_dark.svg" height="25" class="d-inline-block align-top" style="margin-right: 10px;" alt="logo" />
- gVisor
- </a>
- </div>
-
- <div class="collapse navbar-collapse">
- <ul class="nav navbar-nav navbar-right">
- <li><a href="/docs">Documentation</a></li>
- <li><a href="/blog">Blog</a></li>
- <li><a href="/community/">Community</a></li>
- <li><a href="https://github.com/google/gvisor">GitHub</a></li>
- </ul>
- </div>
- </div>
-</nav>
diff --git a/website/_includes/header.html b/website/_includes/header.html
deleted file mode 100644
index c80310069..000000000
--- a/website/_includes/header.html
+++ /dev/null
@@ -1,30 +0,0 @@
- <head>
- <meta charset="utf-8">
- <meta http-equiv="X-UA-Compatible" content="IE=edge">
- <meta name="viewport" content="width=device-width, initial-scale=1">
- {% if page.title %}
- <title>{{ page.title }} - gVisor</title>
- {% else %}
- <title>gVisor</title>
- {% endif %}
- <link rel="canonical" href="{{ page.url | replace:'index.html','' | prepend: site_root }}">
-
- <!-- Dependencies. -->
- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha256-916EbMg70RQy9LHiGkXzG8hSg9EdNy97GazNG/aiY1w=" crossorigin="anonymous" />
- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.10.1/css/all.min.css" integrity="sha256-fdcFNFiBMrNfWL6OcAGQz6jDgNTRxnrLEd4vJYFWScE=" crossorigin="anonymous" />
-
- <!-- Our own style sheet. -->
- <link rel="stylesheet" type="text/css" href="/css/main.css">
- <link rel="icon" type="image/png" href="/assets/favicons/favicon-32x32.png" sizes="32x32">
- <link rel="icon" type="image/png" href="/assets/favicons/favicon-16x16.png" sizes="16x16">
-
- {% if page.title %}
- <meta name="og:title" content="{{ page.title }}">
- {% else %}
- <meta name="og:title" content="gVisor">
- {% endif %}
- {% if page.description %}
- <meta name="og:description" content="{{ page.description }}">
- {% endif %}
- <meta name="og:image" content="{{ site.site_url }}/assets/logos/logo_solo_on_white_bordered.svg">
- </head>
diff --git a/website/_includes/paginator.html b/website/_includes/paginator.html
deleted file mode 100644
index b4ff4c3b1..000000000
--- a/website/_includes/paginator.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<nav aria-label="...">
- <ul class="pager">
- {% if paginator.previous_page %}
- <li class="previous"><a href="{{ paginator.previous_page_path }}"><span aria-hidden="true">&larr;</span> Newer</a></li>
- {% endif %}
- {% if paginator.next_page %}
- <li class="next"><a href="{{ paginator.next_page_path }}">Older <span aria-hidden="true">&rarr;</span></a></li>
- {% endif %}
- </ul>
-</nav>
diff --git a/website/_includes/required_linux.html b/website/_includes/required_linux.html
deleted file mode 100644
index e9d1b7548..000000000
--- a/website/_includes/required_linux.html
+++ /dev/null
@@ -1,2 +0,0 @@
-> Note: gVisor supports only x86\_64 and requires Linux 4.14.77+
-> ([older Linux](/docs/user_guide/networking/#gso)).
diff --git a/website/_layouts/base.html b/website/_layouts/base.html
deleted file mode 100644
index b30bee0dc..000000000
--- a/website/_layouts/base.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<!DOCTYPE html>
-<html lang="en" itemscope itemtype="https://schema.org/WebPage">
- {% include header.html %}
- <body>
- {% include header-links.html %}
- {{ content }}
- {% include footer.html %}
- </body>
-</html>
diff --git a/website/_layouts/blog.html b/website/_layouts/blog.html
deleted file mode 100644
index 6c371ab50..000000000
--- a/website/_layouts/blog.html
+++ /dev/null
@@ -1,17 +0,0 @@
----
-layout: base
----
-
-<div class="container">
- <div class="row">
- <div class="col-lg-2"></div>
- <div class="col-lg-8">
- <h1>{{ page.title }}</h1>
- {% if page.feed %}
- <a class="btn-inverse" href="/blog/index.xml">Feed&nbsp;<i class="fas fa-rss ml-2"></i></a>
- {% endif %}
- {{ content }}
- </div>
- <div class="col-lg-2"></div>
- </div>
-</div>
diff --git a/website/_layouts/default.html b/website/_layouts/default.html
deleted file mode 100644
index e5523e3fc..000000000
--- a/website/_layouts/default.html
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: base
----
-{% if page.title %}
-<div class="container">
- <div class="page-header">
- <h1>{{ page.title }}</h1>
- </div>
-</div>
-{% endif %}
-
-<div class="container">
- {{ content }}
-</div>
diff --git a/website/_layouts/docs.html b/website/_layouts/docs.html
deleted file mode 100644
index 5b95dc071..000000000
--- a/website/_layouts/docs.html
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: base
-categories:
- - Project
- - User Guide
- - Architecture Guide
- - Compatibility
----
-
-<div class="container">
- <div class="row">
- <div class="col-md-3">
- <nav class="sidebar" id="sidebar-nav">
- {% for category in layout.categories %}
- <h3>{{ category }}</h3>
- <ul class="sidebar-nav">
- {% assign subcats = site.pages | where: 'layout', 'docs' | where: 'category', category | group_by: 'subcategory' | sort: 'name', 'first' %}
- {% for subcategory in subcats %}
- {% assign sorted_pages = subcategory.items | where: 'include_in_menu', true | sort: 'weight', 'last' %}
- {% comment %}If all pages in the subcategory are excluded don't show it.{% endcomment %}
- {% if sorted_pages.size > 0 %}
- {% if subcategory.name != "" %}
- {% assign cid = category | remove: " " | downcase %}
- {% assign sid = subcategory.name | remove: " " | downcase %}
- <li>
- {% comment %}
- If the current page is in the sub-category then set the collapsible to expanded.
- See: https://getbootstrap.com/docs/3.3/javascript/#collapse
- {% endcomment %}
- {% assign expanded = false %}
- {% for p in sorted_pages %}{% if page.url == p.url %}{% assign expanded = true %}{% endif %}{% endfor %}
- <a class="sidebar-nav-heading" data-toggle="collapse" href="#{{ cid }}-{{ sid }}" {% if expanded %}aria-expanded="true"{% else %}aria-expanded="false"{% endif %} aria-controls="{{ cid }}-{{ sid }}">{{ subcategory.name }}<span class="caret"></span></a>
- <ul class="collapse{% if expanded %} in{% endif %} sidebar-nav sidebar-submenu" id="{{ cid }}-{{ sid }}">
- {% endif %}
- {% for p in sorted_pages %}
- <li{% if page.url == p.url %} class="active"{% endif %}><a href="{{ p.url }}">{{ p.title }}</a></li>
- {% endfor %}
- {% if subcategory.name != "" %}
- </li>
- </ul>
- {% endif %}
- {% endif %}
- {% endfor %}
- </ul>
- {% endfor %}
- </nav>
- </div>
-
- <div class="col-md-9">
- <h1>{{ page.title }}</h1>
- {% if page.editpath %}
- <p>
- <a href="https://github.com/google/gvisor/edit/master/{{page.editpath}}" target="_blank" rel="noopener"><i class="fa fa-edit fa-fw"></i> Edit this page</a>
- <a href="https://github.com/google/gvisor/issues/new?title={{page.title | url_encode}}" target="_blank" rel="noopener"><i class="fab fa-github fa-fw"></i> Create issue</a>
- </p>
- {% endif %}
- <div class="docs-content">
- {{ content }}
- </div>
- </div>
- </div>
-</div>
diff --git a/website/_layouts/post.html b/website/_layouts/post.html
deleted file mode 100644
index 640bee5af..000000000
--- a/website/_layouts/post.html
+++ /dev/null
@@ -1,10 +0,0 @@
----
-layout: blog
----
-
-<div class="blog-meta">
- {% include byline.html authors=page.authors date=page.date %}
-</div>
-<div class="blog-content">
- {{ content }}
-</div>
diff --git a/website/_plugins/svg_mime_type.rb b/website/_plugins/svg_mime_type.rb
deleted file mode 100644
index ad6bb6480..000000000
--- a/website/_plugins/svg_mime_type.rb
+++ /dev/null
@@ -1,3 +0,0 @@
-require 'webrick'
-include WEBrick
-WEBrick::HTTPUtils::DefaultMimeTypes.store 'svg', 'image/svg+xml'
diff --git a/website/_sass/footer.scss b/website/_sass/footer.scss
deleted file mode 100644
index ec2ba5e20..000000000
--- a/website/_sass/footer.scss
+++ /dev/null
@@ -1,15 +0,0 @@
-.footer {
- margin-top: 40px;
- background-color: #222;
- color: #fff;
- padding: 20px;
-
- a {
- color: $inverse-link-color;
-
- &:hover,
- &:focus {
- color: $inverse-link-hover-color;
- }
- }
-}
diff --git a/website/_sass/front.scss b/website/_sass/front.scss
deleted file mode 100644
index f1b060560..000000000
--- a/website/_sass/front.scss
+++ /dev/null
@@ -1,17 +0,0 @@
-.jumbotron {
- background-image: url(/assets/images/background_1080p.jpg);
- background-position: center;
- background-repeat: no-repeat;
- background-size: cover;
- background-blend-mode: darken;
- background-color: rgba(0, 0, 0, 0.3);
-
- p {
- color: #fff;
- margin-top: 0;
- margin-bottom: 0;
- font-weight: 300;
- font-size: 24px;
- line-height: 30px;
- }
-}
diff --git a/website/_sass/navbar.scss b/website/_sass/navbar.scss
deleted file mode 100644
index 65bc573ac..000000000
--- a/website/_sass/navbar.scss
+++ /dev/null
@@ -1,26 +0,0 @@
-.navbar-inverse {
- background-color: $primary;
- border-bottom: 1px solid $primary;
-
- .navbar-brand > a {
- color: #fff;
-
- &:focus,
- &:hover {
- color: #fff;
- }
- }
-
- .navbar-nav > li > a {
- color: $inverse-link-color;
-
- &:focus,
- &:hover {
- color: $inverse-link-hover-color;
- }
- }
-
- .navbar-nav .nav-icon {
- font-size: 18px;
- }
-}
diff --git a/website/_sass/sidebar.scss b/website/_sass/sidebar.scss
deleted file mode 100644
index f4ca05df9..000000000
--- a/website/_sass/sidebar.scss
+++ /dev/null
@@ -1,61 +0,0 @@
-$sidebar-border-color: #fff;
-$sidebar-hover-border-color: #66bb6a;
-
-.sidebar {
- margin-top: 40px;
-
- ul.sidebar-nav {
- list-style-type: none;
- padding: 0;
- transition: height 0.01s;
-
- li {
- &.sidebar-nav-heading {
- padding: 10px 0;
- margin: 0;
- display: block;
- font-size: 16px;
- font-weight: 300;
- }
-
- a {
- padding: 4px 0;
- display: block;
- border-right: 2px solid $sidebar-border-color;
-
- &:focus {
- text-decoration: none;
- }
-
- .caret {
- float: right;
- margin-top: 8px;
- margin-right: 10px;
- }
- }
-
- &.active {
- a {
- border-left: 2px solid $sidebar-hover-border-color;
- padding-left: 6px;
- }
- }
- }
-
- ul.sidebar-nav {
- padding-left: 10px;
- }
- }
-}
-
-@media (min-width: 992px) {
- .sidebar-toggle {
- display: none;
- }
-
- .sidebar {
- &.collapse {
- display: block;
- }
- }
-}
diff --git a/website/_sass/style.scss b/website/_sass/style.scss
deleted file mode 100644
index 4deb945d4..000000000
--- a/website/_sass/style.scss
+++ /dev/null
@@ -1,154 +0,0 @@
-$primary: #262362;
-$secondary: #fff;
-$link-color: #286fd7;
-$inverse-link-color: #fff;
-
-$link-hover-color: darken($link-color, 10%);
-$inverse-link-hover-color: darken($inverse-link-color, 10%);
-
-$text-color: #444;
-
-$body-font-family: 'Roboto', 'Helvetica Neue', Helvetica, Arial, sans-serif;
-$code-font-family: 'Source Code Pro', monospace;
-
-html {
- position: relative;
- min-height: 100%;
-}
-
-body {
- color: $text-color;
- font-family: $body-font-family;
- padding-top: 40px;
-}
-
-a {
- color: $link-color;
-
- &:hover,
- &:focus {
- color: $link-hover-color;
- text-decoration: none;
- }
-
- code {
- color: $link-color;
- }
-}
-
-h1,
-h2,
-h3,
-h4,
-h5,
-h6 {
- color: $text-color;
- font-weight: 400;
-}
-
-h1 code,
-h2 code,
-h3 code,
-h4 code,
-h5 code,
-h6 code {
- color: $text-color;
- background: transparent;
-}
-
-h1 {
- font-size: 30px;
- margin-top: 40px;
- margin-bottom: 40px;
-}
-
-h2 {
- font-size: 24px;
- margin-top: 30px;
- margin-bottom: 30px;
-
- code {
- font-size: 24px;
- }
-}
-
-h3 {
- font-size: 20px;
- margin-top: 24px;
- margin-bottom: 24px;
-
- code {
- font-size: 20px;
- }
-}
-
-h4 {
- font-size: 18px;
- margin-top: 20px;
- margin-bottom: 20px;
-
- code {
- font-size: 18px;
- }
-}
-
-p,
-li {
- font-size: 14px;
- line-height: 22px;
-}
-
-code {
- font-family: $code-font-family;
- font-size: 13px;
-}
-
-.btn {
- color: $text-color;
- background-color: $inverse-link-color;
-}
-
-.btn-inverse {
- color: $text-color;
- background-color: #fff;
-}
-
-.well {
- box-shadow: none;
-}
-
-table {
- width: 100%;
-}
-
-table td,
-table th {
- border: 1px solid #ddd;
- padding: 8px;
-}
-
-table tr:nth-child(even) {
- background-color: #eee;
-}
-
-table th {
- padding-top: 12px;
- padding-bottom: 12px;
- background-color: $primary;
- color: $secondary;
-}
-
-.blog-meta {
- margin-top: 10px;
- margin-bottom: 20px;
-}
-
-.docs-content * img {
- display: block;
- margin: 20px auto;
-}
-
-.blog-content * img {
- display: block;
- margin: 20px auto;
-}
diff --git a/website/archive.key b/website/archive.key
deleted file mode 100644
index 8946884a7..000000000
--- a/website/archive.key
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQINBF0meAYBEACcBYPOSBiKtid+qTQlbgKGPxUYt0cNZiQqWXylhYUT4PuNlNx5
-s+sBLFvNTpdTrXMmZ8NkekyjD1HardWvebvJT4u+Ho/9jUr4rP71cNwNtocz/w8G
-DsUXSLgH8SDkq6xw0L+5eGc78BBg9cOeBeFBm3UPgxTBXS9Zevoi2w1lzSxkXvjx
-cGzltzMZfPXERljgLzp9AAfhg/2ouqVQm37fY+P/NDzFMJ1XHPIIp9KJl/prBVud
-jJJteFZ5sgL6MwjBQq2kw+q2Jb8Zfjl0BeXDgGMN5M5lGhX2wTfiMbfo7KWyzRnB
-RpSP3BxlLqYeQUuLG5Yx8z3oA3uBkuKaFOKvXtiScxmGM/+Ri2YM3m66imwDhtmP
-AKwTPI3Re4gWWOffglMVSv2sUAY32XZ74yXjY1VhK3bN3WFUPGrgQx4X7GP0A1Te
-lzqkT3VSMXieImTASosK5L5Q8rryvgCeI9tQLn9EpYFCtU3LXvVgTreGNEEjMOnL
-dR7yOU+Fs775stn6ucqmdYarx7CvKUrNAhgEeHMonLe1cjYScF7NfLO1GIrQKJR2
-DE0f+uJZ52inOkO8ufh3WVQJSYszuS3HCY7w5oj1aP38k/y9zZdZvVvwAWZaiqBQ
-iwjVs6Kub76VVZZhRDf4iYs8k1Zh64nXdfQt250d8U5yMPF3wIJ+c1yhxwARAQAB
-tCpUaGUgZ1Zpc29yIEF1dGhvcnMgPGd2aXNvci1ib3RAZ29vZ2xlLmNvbT6JAk4E
-EwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRvHfheOnHCSRjnJ9Vv
-xtVU4yvZQwUCYO4TxQAKCRBvxtVU4yvZQ9UoEACLPV7CnEA2bjCPi0NCWB/Mo1WL
-evqv7Wv7vmXzI1K9DrqOhxuamQW75SVXg1df0hTJWbKFmDAip6NEC2Rg5P+A8hHj
-nW/VG+q4ZFT662jDhnXQiO9L7EZzjyqNF4yWYzzgnqEu/SmGkDLDYiUCcGBqS2oE
-EQfk7RHJSLMJXAnNDH7OUDgrirSssg/dlQ5uAHA9Au80VvC5fsTKza8b3Aydw3SV
-iB8/Yuikbl8wKbpSGiXtR4viElXjNips0+mBqaUk2xpqSBrsfN+FezcInVXaXFeq
-xtpq2/3M3DYbqCRjqeyd9wNi92FHdOusNrK4MYe0pAYbGjc65BwH+F0T4oJ8ZSJV
-lIt+FZ0MqM1T97XadybYFsJh8qvajQpZEPL+zzNncc4f1d80e7+lwIZV/al0FZWW
-Zlp7TpbeO/uW+lHs5W14YKwaQVh1whapKXTrATipNOOSCw2hnfrT8V7Hy55QWaGZ
-f4/kfy929EeCP16d/LqOClv0j0RBr6NhRBQ0l/BE/mXjJwIk6nKwi+Yi4ek1ARi6
-AlCMLn9AZF7aTGpvCiftzIrlyDfVZT5IX03TayxRHZ4b1Rj8eyJaHcjI49u83gkr
-4LGX08lEawn9nxFSx4RCg2swGiYw5F436wwwAIozqJuDASeTa3QND3au5v0oYWnl
-umDySUl5wPaAaALgzA==
-=5/8T
------END PGP PUBLIC KEY BLOCK-----
diff --git a/website/assets/favicons/apple-touch-icon-180x180.png b/website/assets/favicons/apple-touch-icon-180x180.png
deleted file mode 100644
index bf4b6ce9b..000000000
--- a/website/assets/favicons/apple-touch-icon-180x180.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/favicon-16x16.png b/website/assets/favicons/favicon-16x16.png
deleted file mode 100644
index 083264206..000000000
--- a/website/assets/favicons/favicon-16x16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/favicon-32x32.png b/website/assets/favicons/favicon-32x32.png
deleted file mode 100644
index b8e4caff1..000000000
--- a/website/assets/favicons/favicon-32x32.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/favicon.ico b/website/assets/favicons/favicon.ico
deleted file mode 100644
index 9238b79d9..000000000
--- a/website/assets/favicons/favicon.ico
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/pwa-192x192.png b/website/assets/favicons/pwa-192x192.png
deleted file mode 100644
index 5d2fab785..000000000
--- a/website/assets/favicons/pwa-192x192.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/pwa-512x512.png b/website/assets/favicons/pwa-512x512.png
deleted file mode 100644
index 23824439e..000000000
--- a/website/assets/favicons/pwa-512x512.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/tile150x150.png b/website/assets/favicons/tile150x150.png
deleted file mode 100644
index f76fcffae..000000000
--- a/website/assets/favicons/tile150x150.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/tile310x150.png b/website/assets/favicons/tile310x150.png
deleted file mode 100644
index 4f87e4c12..000000000
--- a/website/assets/favicons/tile310x150.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/tile310x310.png b/website/assets/favicons/tile310x310.png
deleted file mode 100644
index a2926d0bd..000000000
--- a/website/assets/favicons/tile310x310.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/favicons/tile70x70.png b/website/assets/favicons/tile70x70.png
deleted file mode 100644
index 96cc69fc4..000000000
--- a/website/assets/favicons/tile70x70.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/2019-11-18-security-basics-figure1.png b/website/assets/images/2019-11-18-security-basics-figure1.png
deleted file mode 100644
index 2a8134a7a..000000000
--- a/website/assets/images/2019-11-18-security-basics-figure1.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/2019-11-18-security-basics-figure2.png b/website/assets/images/2019-11-18-security-basics-figure2.png
deleted file mode 100644
index f8b416e1d..000000000
--- a/website/assets/images/2019-11-18-security-basics-figure2.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/2019-11-18-security-basics-figure3.png b/website/assets/images/2019-11-18-security-basics-figure3.png
deleted file mode 100644
index 833e3e2b5..000000000
--- a/website/assets/images/2019-11-18-security-basics-figure3.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/2020-04-02-networking-security-figure1.png b/website/assets/images/2020-04-02-networking-security-figure1.png
deleted file mode 100644
index b49cb0242..000000000
--- a/website/assets/images/2020-04-02-networking-security-figure1.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/2020-09-18-containing-a-real-vulnerability-figure1.png b/website/assets/images/2020-09-18-containing-a-real-vulnerability-figure1.png
deleted file mode 100644
index c750f0851..000000000
--- a/website/assets/images/2020-09-18-containing-a-real-vulnerability-figure1.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/background.jpg b/website/assets/images/background.jpg
deleted file mode 100644
index 81f8e332b..000000000
--- a/website/assets/images/background.jpg
+++ /dev/null
Binary files differ
diff --git a/website/assets/images/background_1080p.jpg b/website/assets/images/background_1080p.jpg
deleted file mode 100644
index d312595a6..000000000
--- a/website/assets/images/background_1080p.jpg
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/Makefile b/website/assets/logos/Makefile
deleted file mode 100644
index 49289ecc1..000000000
--- a/website/assets/logos/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/make -f
-
-srcs := $(wildcard *.svg)
-dsts := $(patsubst %.svg,%.png,$(srcs))
-
-all: $(dsts)
-.PHONY: all
-
-%.png %-16.png %-128.png %-1024.png: %.svg
- @inkscape -z -e $*.png $<
- @inkscape -z -w 16 -e $*-16.png $<
- @inkscape -z -w 128 -e $*-128.png $<
- @inkscape -z -w 1024 -e $*-1024.png $<
diff --git a/website/assets/logos/README.md b/website/assets/logos/README.md
deleted file mode 100644
index 2964982dd..000000000
--- a/website/assets/logos/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Logos
-
-This directory contains logo assets.
-
-The colors used are:
-
-* Background (blue): #262262
-* Highlight (yellow): #FBB03B
-
-Use `make` to generate sized PNGs from SVGs.
diff --git a/website/assets/logos/logo_solo_monochrome.png b/website/assets/logos/logo_solo_monochrome.png
deleted file mode 100644
index e09c5ad5e..000000000
--- a/website/assets/logos/logo_solo_monochrome.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_monochrome.svg b/website/assets/logos/logo_solo_monochrome.svg
deleted file mode 100644
index 73126fd8f..000000000
--- a/website/assets/logos/logo_solo_monochrome.svg
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="175.35599"
- height="193.20036"
- viewBox="0 0 175.35599 193.20036"
- sodipodi:docname="logo_solo_monochrome.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.43085925"
- inkscape:cx="374.99057"
- inkscape:cy="88.483321"
- inkscape:window-x="0"
- inkscape:window-y="9"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.9628)"><g
- id="g48"
- transform="translate(548.2423,363.2485)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.433 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.051 2.18,-28.09 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1763)"><path
- d="M 0,0 C 0,2.594 -3.457,4.322 -3.457,4.322 -0.864,5.187 0,8.644 0,8.644 0,8.644 0.865,5.187 3.458,4.322 3.458,4.322 0,2.594 0,0 m -17.099,6.454 c 0,6.742 -8.989,11.236 -8.989,11.236 6.742,2.248 8.989,11.238 8.989,11.238 0,0 2.247,-8.99 8.99,-11.238 0,0 -8.99,-4.494 -8.99,-11.236"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7925)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.579 -4.53,-4.616 -1.515,-0.933 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.057 -1.539,0.17 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.213 1.175,2.82 1.784,6.162 1.81,9.936 -0.049,3.718 -0.683,7.054 -1.886,9.902 -0.703,1.654 -1.585,3.056 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.126 0.998,0.196 1.539,0.209 h 0.031 l 1.688,-0.153 c 1.045,-0.206 2.104,-0.616 3.061,-1.185 1.755,-1.031 3.302,-2.567 4.598,-4.565 2.155,-3.374 3.315,-7.536 3.357,-12.042 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.05 -0.226,0.136 -0.541,0.327 -1.113,0.603 -1.715,0.815 -3.044,1.241 -9.881,3.186 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.922 30.022,10.922 c 0,0 1.439,1.761 3.453,3.692 10e-4,0 10e-4,10e-4 0.002,10e-4 1.052,0.974 2.355,2.076 3.912,3.227 0.046,0.031 0.088,0.063 0.124,0.093 8.708,6.384 25.34,14.163 51.625,9.541 -0.989,1.124 -2.002,2.192 -3.036,3.215 -1.112,0.883 -2.231,1.693 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.867 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 -9.29,25.193 -9.841,24.479 -10.376,23.77 c -0.065,-0.099 -0.141,-0.202 -0.226,-0.307 -1.182,-1.581 -2.271,-3.14 -3.279,-4.674 -3.266,-5.427 -5.631,-11.665 -6.311,-13.545 -10.58,-32.401 2.586,-57.55 5.144,-61.967 8.93,-15.158 24.565,-32.355 50.771,-37.327 0.197,-0.047 0.382,-0.101 0.582,-0.147 1.723,-0.367 4.864,-0.929 8.908,-1.196 1.524,-0.069 3.088,-0.094 4.699,-0.067 1.548,-0.009 2.999,0.017 4.335,0.064 0.396,0.028 0.74,0.041 1.044,0.044 5.102,0.238 8.272,0.775 8.272,0.775 -26.543,1.299 -39.847,13.409 -45.691,21.142 -1.325,1.648 -2.46,3.42 -3.377,5.316 -0.361,0.71 -0.523,1.115 -0.523,1.115 8.459,-7.181 20.294,-13.362 20.294,-13.362 10.611,-4.993 21.737,-7.451 33.524,-5.837 0,0 24.645,2.263 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.023 -1.926,-1.496 -0.991,-0.651 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.128 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.352 -11.944,24.291 -0.124,6.465 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.073 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.461 -0.617,9.27 -4.29,11.624"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_solo_on_dark-1024.png b/website/assets/logos/logo_solo_on_dark-1024.png
deleted file mode 100644
index 6df428c65..000000000
--- a/website/assets/logos/logo_solo_on_dark-1024.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark-128.png b/website/assets/logos/logo_solo_on_dark-128.png
deleted file mode 100644
index 78a85475f..000000000
--- a/website/assets/logos/logo_solo_on_dark-128.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark-16.png b/website/assets/logos/logo_solo_on_dark-16.png
deleted file mode 100644
index 4f1e91c02..000000000
--- a/website/assets/logos/logo_solo_on_dark-16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark.png b/website/assets/logos/logo_solo_on_dark.png
deleted file mode 100644
index da20756f7..000000000
--- a/website/assets/logos/logo_solo_on_dark.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark.svg b/website/assets/logos/logo_solo_on_dark.svg
deleted file mode 100644
index ae8d9e879..000000000
--- a/website/assets/logos/logo_solo_on_dark.svg
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="175.35599"
- height="193.19984"
- viewBox="0 0 175.35599 193.19985"
- sodipodi:docname="logo_solo_on_dark.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1278"
- inkscape:window-height="699"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.8617185"
- inkscape:cx="257.20407"
- inkscape:cy="172.193"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.96254)"><g
- id="g48"
- transform="translate(548.2423,363.2484)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.434 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.05 2.18,-28.089 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1771)"><path
- d="M 0,0 C 0,2.593 -3.457,4.321 -3.457,4.321 -0.864,5.186 0,8.644 0,8.644 0,8.644 0.865,5.186 3.458,4.321 3.458,4.321 0,2.593 0,0 m -17.099,6.453 c 0,6.742 -8.989,11.237 -8.989,11.237 6.742,2.247 8.989,11.237 8.989,11.237 0,0 2.247,-8.99 8.99,-11.237 0,0 -8.99,-4.495 -8.99,-11.237"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7923)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.578 -4.53,-4.615 -1.515,-0.934 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.056 -1.539,0.169 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.214 1.175,2.819 1.784,6.161 1.81,9.935 -0.049,3.719 -0.683,7.054 -1.886,9.902 -0.703,1.655 -1.585,3.057 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.127 0.998,0.196 1.539,0.209 l 0.031,10e-4 1.688,-0.154 c 1.045,-0.206 2.104,-0.615 3.061,-1.184 1.755,-1.032 3.302,-2.568 4.598,-4.565 2.155,-3.374 3.315,-7.537 3.357,-12.043 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.051 -0.226,0.137 -0.541,0.326 -1.113,0.602 -1.715,0.814 -3.044,1.241 -9.881,3.187 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.923 30.022,10.923 c 0,0 1.439,1.76 3.453,3.691 10e-4,10e-4 10e-4,10e-4 0.002,0.002 1.052,0.973 2.355,2.076 3.912,3.226 0.046,0.032 0.088,0.063 0.124,0.094 8.708,6.383 25.34,14.162 51.625,9.54 -0.989,1.124 -2.002,2.193 -3.036,3.215 -1.112,0.884 -2.231,1.694 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.868 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 c -0.571,-0.718 -1.122,-1.431 -1.657,-2.14 -0.065,-0.1 -0.141,-0.202 -0.226,-0.307 -1.182,-1.582 -2.271,-3.141 -3.279,-4.674 -3.266,-5.427 -5.631,-11.666 -6.311,-13.546 -10.58,-32.401 2.586,-57.549 5.144,-61.967 8.93,-15.157 24.565,-32.355 50.771,-37.327 0.197,-0.046 0.382,-0.101 0.582,-0.146 1.723,-0.367 4.864,-0.929 8.908,-1.197 1.524,-0.069 3.088,-0.094 4.699,-0.066 1.548,-0.01 2.999,0.017 4.335,0.064 0.396,0.027 0.74,0.041 1.044,0.044 5.102,0.237 8.272,0.774 8.272,0.774 -26.543,1.3 -39.847,13.41 -45.691,21.142 -1.325,1.648 -2.46,3.421 -3.377,5.316 -0.361,0.711 -0.523,1.115 -0.523,1.115 8.459,-7.18 20.294,-13.361 20.294,-13.361 10.611,-4.993 21.737,-7.452 33.524,-5.838 0,0 24.645,2.264 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.022 -1.926,-1.496 -0.991,-0.65 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.127 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.353 -11.944,24.291 -0.124,6.466 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.074 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.462 -0.617,9.27 -4.29,11.624"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_solo_on_dark_full-1024.png b/website/assets/logos/logo_solo_on_dark_full-1024.png
deleted file mode 100644
index 8d597dd3d..000000000
--- a/website/assets/logos/logo_solo_on_dark_full-1024.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark_full-128.png b/website/assets/logos/logo_solo_on_dark_full-128.png
deleted file mode 100644
index fe6dd5dea..000000000
--- a/website/assets/logos/logo_solo_on_dark_full-128.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark_full-16.png b/website/assets/logos/logo_solo_on_dark_full-16.png
deleted file mode 100644
index f9aa7dfdd..000000000
--- a/website/assets/logos/logo_solo_on_dark_full-16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark_full.png b/website/assets/logos/logo_solo_on_dark_full.png
deleted file mode 100644
index 611b0565e..000000000
--- a/website/assets/logos/logo_solo_on_dark_full.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_dark_full.svg b/website/assets/logos/logo_solo_on_dark_full.svg
deleted file mode 100644
index 6440835b1..000000000
--- a/website/assets/logos/logo_solo_on_dark_full.svg
+++ /dev/null
@@ -1,79 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="246.17325"
- height="246.17325"
- viewBox="0 0 246.17325 246.17326"
- sodipodi:docname="logo_solo_on_dark_full.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1278"
- inkscape:window-height="699"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.8617185"
- inkscape:cx="291.21659"
- inkscape:cy="198.28704"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-580.43785,665.8419)"><circle
- id="path83"
- cx="527.64337"
- cy="-407.06647"
- r="92.314972"
- transform="scale(1,-1)"
- style="fill:#262262;fill-opacity:1;stroke-width:0.48076925" /><g
- id="g48"
- transform="translate(548.2423,363.2484)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.434 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.05 2.18,-28.089 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1771)"><path
- d="M 0,0 C 0,2.593 -3.457,4.321 -3.457,4.321 -0.864,5.186 0,8.644 0,8.644 0,8.644 0.865,5.186 3.458,4.321 3.458,4.321 0,2.593 0,0 m -17.099,6.453 c 0,6.742 -8.989,11.237 -8.989,11.237 6.742,2.247 8.989,11.237 8.989,11.237 0,0 2.247,-8.99 8.99,-11.237 0,0 -8.99,-4.495 -8.99,-11.237"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7923)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.578 -4.53,-4.615 -1.515,-0.934 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.056 -1.539,0.169 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.214 1.175,2.819 1.784,6.161 1.81,9.935 -0.049,3.719 -0.683,7.054 -1.886,9.902 -0.703,1.655 -1.585,3.057 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.127 0.998,0.196 1.539,0.209 l 0.031,10e-4 1.688,-0.154 c 1.045,-0.206 2.104,-0.615 3.061,-1.184 1.755,-1.032 3.302,-2.568 4.598,-4.565 2.155,-3.374 3.315,-7.537 3.357,-12.043 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.051 -0.226,0.137 -0.541,0.326 -1.113,0.602 -1.715,0.814 -3.044,1.241 -9.881,3.187 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.923 30.022,10.923 c 0,0 1.439,1.76 3.453,3.691 10e-4,10e-4 10e-4,10e-4 0.002,0.002 1.052,0.973 2.355,2.076 3.912,3.226 0.046,0.032 0.088,0.063 0.124,0.094 8.708,6.383 25.34,14.162 51.625,9.54 -0.989,1.124 -2.002,2.193 -3.036,3.215 -1.112,0.884 -2.231,1.694 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.868 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 c -0.571,-0.718 -1.122,-1.431 -1.657,-2.14 -0.065,-0.1 -0.141,-0.202 -0.226,-0.307 -1.182,-1.582 -2.271,-3.141 -3.279,-4.674 -3.266,-5.427 -5.631,-11.666 -6.311,-13.546 -10.58,-32.401 2.586,-57.549 5.144,-61.967 8.93,-15.157 24.565,-32.355 50.771,-37.327 0.197,-0.046 0.382,-0.101 0.582,-0.146 1.723,-0.367 4.864,-0.929 8.908,-1.197 1.524,-0.069 3.088,-0.094 4.699,-0.066 1.548,-0.01 2.999,0.017 4.335,0.064 0.396,0.027 0.74,0.041 1.044,0.044 5.102,0.237 8.272,0.774 8.272,0.774 -26.543,1.3 -39.847,13.41 -45.691,21.142 -1.325,1.648 -2.46,3.421 -3.377,5.316 -0.361,0.711 -0.523,1.115 -0.523,1.115 8.459,-7.18 20.294,-13.361 20.294,-13.361 10.611,-4.993 21.737,-7.452 33.524,-5.838 0,0 24.645,2.264 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.022 -1.926,-1.496 -0.991,-0.65 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.127 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.353 -11.944,24.291 -0.124,6.466 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.074 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.462 -0.617,9.27 -4.29,11.624"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_solo_on_white.png b/website/assets/logos/logo_solo_on_white.png
deleted file mode 100644
index ca539cdff..000000000
--- a/website/assets/logos/logo_solo_on_white.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_white.svg b/website/assets/logos/logo_solo_on_white.svg
deleted file mode 100644
index d794ad8e7..000000000
--- a/website/assets/logos/logo_solo_on_white.svg
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="175.35599"
- height="193.20036"
- viewBox="0 0 175.35599 193.20037"
- sodipodi:docname="logo_solo_on_white.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath18"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path16"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.43085925"
- inkscape:cx="370.53985"
- inkscape:cy="50.91009"
- inkscape:window-x="0"
- inkscape:window-y="9"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.9628)"><g
- id="g46"
- transform="translate(548.2428,363.2485)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.433 18.328,70.995 3.068,0 5.742,-0.023 8.417,0.007 2.221,0.025 4.442,0.102 6.663,0.175 4.79,0.154 4.818,0.165 5.881,-4.582 C 42.434,52.544 41.469,38.505 39.11,24.477 38.859,22.985 38.409,21.521 38.246,20.023 37.05,9.081 30.089,3.645 20.164,1.097 13.786,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path48"
- inkscape:connector-curvature="0" /></g><g
- id="g50"
- transform="translate(544.6891,396.1763)"><path
- d="M 0,0 C 0,2.594 -3.457,4.322 -3.457,4.322 -0.864,5.187 0,8.644 0,8.644 0,8.644 0.865,5.187 3.458,4.322 3.458,4.322 0,2.594 0,0 m -17.099,6.454 c 0,6.742 -8.989,11.236 -8.989,11.236 6.742,2.248 8.989,11.238 8.989,11.238 0,0 2.248,-8.99 8.99,-11.238 0,0 -8.99,-4.494 -8.99,-11.236"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path52"
- inkscape:connector-curvature="0" /></g><g
- id="g54"
- transform="translate(485.0861,429.7925)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.579 -4.53,-4.616 -1.515,-0.933 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.057 -1.539,0.17 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.213 1.175,2.82 1.784,6.162 1.81,9.936 -0.049,3.718 -0.683,7.054 -1.886,9.902 -0.703,1.654 -1.585,3.056 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.126 0.998,0.196 1.539,0.209 h 0.031 l 1.688,-0.153 c 1.045,-0.206 2.104,-0.616 3.061,-1.185 1.755,-1.031 3.302,-2.567 4.598,-4.565 2.155,-3.374 3.315,-7.536 3.357,-12.042 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.05 -0.226,0.136 -0.541,0.327 -1.113,0.603 -1.715,0.815 -3.044,1.241 -9.881,3.186 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.922 30.022,10.922 c 0,0 1.439,1.761 3.453,3.692 10e-4,0 10e-4,10e-4 0.002,10e-4 1.052,0.974 2.355,2.076 3.912,3.227 0.046,0.031 0.088,0.063 0.124,0.093 8.708,6.384 25.341,14.163 51.625,9.541 -0.989,1.124 -2.002,2.192 -3.036,3.215 -1.112,0.883 -2.231,1.693 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.867 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 -9.29,25.193 -9.841,24.479 -10.376,23.77 c -0.065,-0.099 -0.141,-0.202 -0.226,-0.307 -1.182,-1.581 -2.271,-3.14 -3.279,-4.674 -3.266,-5.427 -5.631,-11.665 -6.311,-13.545 -10.58,-32.401 2.586,-57.55 5.144,-61.967 8.93,-15.158 24.565,-32.355 50.771,-37.327 0.197,-0.047 0.382,-0.101 0.582,-0.147 1.723,-0.367 4.864,-0.929 8.908,-1.196 1.524,-0.069 3.088,-0.094 4.699,-0.067 1.548,-0.009 2.999,0.017 4.335,0.064 0.396,0.028 0.74,0.041 1.044,0.044 5.102,0.238 8.272,0.775 8.272,0.775 -26.542,1.299 -39.847,13.409 -45.691,21.142 -1.325,1.648 -2.46,3.42 -3.377,5.316 -0.361,0.71 -0.523,1.115 -0.523,1.115 8.459,-7.181 20.294,-13.362 20.294,-13.362 10.611,-4.993 21.737,-7.451 33.525,-5.837 0,0 24.644,2.263 34.463,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.651,-0.526 -1.294,-1.023 -1.926,-1.496 -0.991,-0.651 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.128 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.352 -11.944,24.291 -0.124,6.465 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.948,4.193 3.173,0.073 5.98,0.037 7.457,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.461 -0.617,9.27 -4.29,11.624"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path56"
- inkscape:connector-curvature="0" /></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_solo_on_white_bordered-1024.png b/website/assets/logos/logo_solo_on_white_bordered-1024.png
deleted file mode 100644
index 62bb88d50..000000000
--- a/website/assets/logos/logo_solo_on_white_bordered-1024.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_white_bordered-128.png b/website/assets/logos/logo_solo_on_white_bordered-128.png
deleted file mode 100644
index a8988766c..000000000
--- a/website/assets/logos/logo_solo_on_white_bordered-128.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_white_bordered-16.png b/website/assets/logos/logo_solo_on_white_bordered-16.png
deleted file mode 100644
index a545c49cf..000000000
--- a/website/assets/logos/logo_solo_on_white_bordered-16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_white_bordered.png b/website/assets/logos/logo_solo_on_white_bordered.png
deleted file mode 100644
index cc99b7c51..000000000
--- a/website/assets/logos/logo_solo_on_white_bordered.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_solo_on_white_bordered.svg b/website/assets/logos/logo_solo_on_white_bordered.svg
deleted file mode 100644
index 2e26f144a..000000000
--- a/website/assets/logos/logo_solo_on_white_bordered.svg
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="190.7361"
- height="207.92123"
- viewBox="0 0 190.7361 207.92124"
- sodipodi:docname="logo_solo_on_white_bordered.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath18"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path16"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.609327"
- inkscape:cx="298.55736"
- inkscape:cy="108.65533"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g14" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-612.10927,647.00852)"><g
- id="g12"><g
- id="g14"
- clip-path="url(#clipPath18)"><g
- id="g46"
- transform="translate(594.4321,453.1439)"><path
- d="m 0,0 c 0,0 -0.204,0.139 -0.45,0.277 -0.906,0.547 -1.856,1 -2.744,1.321 -1.685,0.679 -3.62,1.262 -5.761,1.738 l -2.063,2.533 c -1.793,2.283 -6.09,7.358 -13.132,13.334 -8.142,6.209 -24.212,15.045 -47.595,12.442 -4.578,-0.444 -9.077,-1.318 -13.368,-2.597 l -0.232,-0.068 c -12.978,-3.918 -24.155,-11.512 -33.24,-22.601 -0.6,-0.754 -1.179,-1.504 -1.783,-2.307 l -0.134,-0.191 -0.062,-0.074 c -1.194,-1.596 -2.36,-3.258 -3.485,-4.969 l -0.125,-0.198 c -3.559,-5.915 -6.126,-12.72 -6.85,-14.73 -11.284,-34.556 2.735,-61.502 5.669,-66.567 8.482,-14.4 23.945,-32.461 50.005,-38.975 1.42,-0.354 2.872,-0.676 4.356,-0.96 0.016,-0.004 0.036,-0.009 0.053,-0.013 6.537,-1.118 16.647,-1.928 29.969,-0.317 3.21,0.621 8.236,2.535 8.646,8.445 2.209,0.842 10.261,3.812 10.261,3.812 8.572,3.874 18.586,11.106 24.334,24.546 1.21,2.83 0.277,6.128 -2.171,7.994 l -0.202,0.152 c 0.639,1.557 1.125,3.209 1.488,4.93 l 0.019,0.009 c 0,0 0.063,0.325 0.164,0.854 0.013,0.073 0.028,0.144 0.041,0.215 0.402,2.114 1.294,6.91 1.719,10.035 0.02,0.149 0.029,0.268 0.033,0.371 2.136,14.686 2.099,26.608 -0.156,37.847 1.443,0.114 2.672,1.095 3.106,2.477 l 0.655,2.086 C 9.188,-12.066 6.243,-4.003 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path48"
- inkscape:connector-curvature="0" /></g><g
- id="g50"
- transform="translate(551.9267,364.2689)"><path
- d="m 0,0 c 16.398,20.796 22.346,43.748 18.045,69.9 3.02,0 5.654,-0.022 8.288,0.007 2.187,0.025 4.374,0.101 6.56,0.172 4.716,0.152 4.743,0.163 5.79,-4.512 C 41.779,51.734 40.83,37.911 38.507,24.1 38.26,22.631 37.817,21.189 37.656,19.715 36.479,8.941 29.625,3.588 19.853,1.08 13.574,-0.531 7.209,-0.816 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path52"
- inkscape:connector-curvature="0" /></g><g
- id="g54"
- transform="translate(548.4282,396.6898)"><path
- d="M 0,0 C 0,2.553 -3.404,4.255 -3.404,4.255 -0.851,5.105 0,8.51 0,8.51 0,8.51 0.851,5.105 3.404,4.255 3.404,4.255 0,2.553 0,0 m -16.835,6.354 c 0,6.638 -8.851,11.063 -8.851,11.063 6.638,2.213 8.851,11.063 8.851,11.063 0,0 2.212,-8.85 8.85,-11.063 0,0 -8.85,-4.425 -8.85,-11.063"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path56"
- inkscape:connector-curvature="0" /></g><g
- id="g58"
- transform="translate(489.744,429.7865)"><path
- d="m 0,0 c 0,0 -2.18,3.308 -9.585,2.026 0,0 5.894,28.296 35.737,38.209 C 26.152,40.235 -2.777,23.901 0,0 m 0.574,-32.696 -0.003,-0.003 c -1.277,-1.994 -2.778,-3.523 -4.46,-4.544 -1.492,-0.919 -3.13,-1.403 -4.67,-1.403 -0.519,0 -1.028,0.055 -1.516,0.167 l -2.174,0.5 2.15,0.593 c 1.605,0.443 3.016,1.325 4.194,2.623 0.997,1.09 1.826,2.447 2.536,4.149 1.158,2.775 1.757,6.066 1.783,9.781 -0.048,3.661 -0.673,6.945 -1.857,9.75 -0.693,1.629 -1.56,3.009 -2.58,4.099 -1.208,1.29 -2.63,2.143 -4.228,2.536 l -2.154,0.529 2.145,0.564 c 0.473,0.125 0.983,0.193 1.516,0.205 l 0.031,0.002 1.662,-0.152 c 1.029,-0.203 2.071,-0.605 3.014,-1.167 1.727,-1.015 3.25,-2.527 4.526,-4.493 2.122,-3.323 3.264,-7.421 3.305,-11.857 C 3.767,-25.296 2.653,-29.4 0.574,-32.696 M 100.951,17.69 c 0,0 -0.074,0.05 -0.223,0.136 -0.532,0.32 -1.095,0.593 -1.688,0.801 -2.997,1.223 -9.729,3.139 -21.568,2.583 -0.029,0 -0.055,0 -0.084,-0.001 C 52.1,20.798 29.559,10.754 29.559,10.754 c 0,0 1.417,1.733 3.4,3.636 0,0 10e-4,0 10e-4,10e-4 1.036,0.958 2.319,2.044 3.853,3.176 0.044,0.031 0.086,0.062 0.122,0.091 8.573,6.286 24.949,13.945 50.829,9.394 -0.615,0.698 -1.254,1.341 -1.887,1.998 l 0.057,-0.009 c 0,0 -5.858,6.886 -16.555,12.616 -0.613,0.331 -1.252,0.659 -1.91,0.985 -0.038,0.018 -0.073,0.038 -0.112,0.057 -0.067,0.033 -0.128,0.057 -0.195,0.089 -8.007,3.888 -19.263,7.05 -33.564,5.458 -3.873,-0.374 -8.015,-1.116 -12.272,-2.399 -0.007,-0.002 -0.013,-0.003 -0.02,-0.005 L 21.304,45.84 c -10,-3.018 -20.632,-9.029 -29.888,-20.328 -0.563,-0.707 -1.105,-1.409 -1.632,-2.109 -0.064,-0.096 -0.138,-0.198 -0.222,-0.301 -1.164,-1.557 -2.236,-3.091 -3.229,-4.602 -3.216,-5.343 -5.544,-11.486 -6.214,-13.337 -10.417,-31.901 2.546,-56.661 5.065,-61.011 8.793,-14.923 24.186,-31.856 49.988,-36.751 0.195,-0.046 0.377,-0.101 0.573,-0.145 1.697,-0.361 4.79,-0.914 8.771,-1.178 1.5,-0.067 3.04,-0.093 4.626,-0.065 1.525,-0.01 2.953,0.016 4.268,0.063 0.39,0.027 0.729,0.04 1.029,0.044 5.023,0.233 8.144,0.762 8.144,0.762 -26.133,1.279 -39.232,13.203 -44.987,20.816 -1.304,1.622 -2.422,3.367 -3.324,5.234 -0.356,0.699 -0.515,1.097 -0.515,1.097 8.328,-7.069 19.98,-13.155 19.98,-13.155 10.449,-4.917 21.402,-7.337 33.008,-5.747 0,0 24.264,2.227 33.932,24.702 -0.417,0.317 -0.361,0.275 -0.777,0.592 -0.642,-0.518 -1.274,-1.007 -1.898,-1.474 -0.975,-0.64 -1.933,-1.336 -2.89,-2.038 -5.184,-3.431 -9.414,-5.048 -11.934,-5.788 -19.06,-4.811 -36.698,-1.232 -52.114,12.263 -7.14,6.251 -11.572,14.131 -11.759,23.917 -0.122,6.365 -0.188,12.734 -0.135,19.101 0.084,10.023 7.135,17.645 17.126,18.986 20.245,2.716 40.598,3.652 60.993,4.129 3.125,0.072 5.888,0.036 7.342,-3.306 0.025,-0.057 0.358,0.021 0.543,0.035 1.379,4.393 -0.607,9.127 -4.223,11.444"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path60"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_with_text_monochrome.png b/website/assets/logos/logo_with_text_monochrome.png
deleted file mode 100644
index 17442f55d..000000000
--- a/website/assets/logos/logo_with_text_monochrome.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_monochrome.svg b/website/assets/logos/logo_with_text_monochrome.svg
deleted file mode 100644
index 4648e06c0..000000000
--- a/website/assets/logos/logo_with_text_monochrome.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="607.97211"
- height="193.20036"
- viewBox="0 0 607.97212 193.20036"
- sodipodi:docname="logo_with_text_monochrome.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.21542963"
- inkscape:cx="296.21626"
- inkscape:cy="101.98009"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.9628)"><g
- id="g14"><g
- id="g16"
- clip-path="url(#clipPath20)"><g
- id="g22"
- transform="translate(668.8995,400.2876)"><path
- d="m 0,0 c -0.698,-5.234 -4.362,-8.375 -10.991,-9.421 -9.072,0.349 -13.783,5.407 -14.132,15.178 0.698,8.374 5.408,12.719 14.132,13.033 C -4.362,17.776 -0.698,14.341 0,8.479 Z m 0,26.117 c -2.442,2.826 -6.629,4.413 -12.561,4.763 -8.026,0 -14.219,-2.443 -18.581,-7.327 -4.361,-4.886 -6.542,-11.167 -6.542,-18.842 0,-8.026 2.006,-14.395 6.019,-19.105 4.012,-4.71 10.031,-7.066 18.057,-7.066 5.164,0 9.7,1.57 13.608,4.711 v -2.617 c 0,-3.141 -0.986,-6.019 -2.957,-8.636 -1.972,-2.617 -5.749,-3.925 -11.331,-3.925 -5.479,0.349 -10.137,1.919 -13.975,4.71 l -6.804,-7.85 c 5.582,-5.582 13.433,-8.48 23.553,-8.689 l 3.14,0.314 c 6.629,0.279 12.124,2.879 16.487,7.798 4.361,4.92 6.542,11.044 6.542,18.372 V 29.31 L 0,30.88 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path24"
- inkscape:connector-curvature="0" /></g><g
- id="g26"
- transform="translate(720.3033,399.9331)"><path
- d="M 0,0 -19.986,51.176 H -37.513 L -8.457,-21.105 H 8.891 L 37.875,51.176 H 20.348 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path28"
- inkscape:connector-curvature="0" /></g><path
- d="m 762.522,378.828 h 14.655 v 52.392 h -14.655 z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path30"
- inkscape:connector-curvature="0" /><g
- id="g32"
- transform="translate(769.7443,451.1089)"><path
- d="m 0,0 c -2.373,0 -4.257,-0.707 -5.652,-2.12 -1.397,-1.413 -2.094,-3.166 -2.094,-5.26 0,-2.094 0.697,-3.839 2.094,-5.234 1.395,-1.396 3.315,-2.094 5.757,-2.094 2.442,0 4.361,0.698 5.757,2.094 1.395,1.395 2.094,3.14 2.094,5.234 0,2.094 -0.699,3.847 -2.094,5.26 C 4.466,-0.707 2.512,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path34"
- inkscape:connector-curvature="0" /></g><g
- id="g36"
- transform="translate(816.9264,406.8296)"><path
- d="m 0,0 c -3.315,2.477 -7.572,3.96 -12.771,4.449 -3.524,0.697 -5.879,1.578 -7.065,2.643 -1.187,1.064 -1.187,2.251 0,3.559 1.186,1.309 3.541,1.962 7.065,1.962 3.56,0 6.525,-1.412 8.898,-4.239 l 8.165,9.212 c -2.374,2.338 -4.92,4.1 -7.642,5.286 -2.721,1.186 -6.542,1.78 -11.462,1.78 -4.257,0 -8.4,-1.423 -12.43,-4.265 -4.03,-2.845 -6.046,-6.971 -6.046,-12.379 0,-5.898 1.771,-9.91 5.313,-12.038 3.541,-2.129 7.44,-3.437 11.697,-3.925 4.712,-0.454 7.476,-1.335 8.297,-2.645 0.819,-1.307 0.645,-2.615 -0.523,-3.924 -1.171,-1.309 -3.761,-1.963 -7.774,-1.963 -4.99,0.453 -8.67,2.093 -11.043,4.921 l -8.113,-9.161 c 2.373,-2.373 5.208,-4.265 8.505,-5.678 3.298,-1.413 7.424,-2.12 12.379,-2.12 5.199,0 9.752,1.36 13.66,4.083 3.908,2.721 5.862,6.558 5.862,11.514 C 4.972,-6.787 3.315,-2.479 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path38"
- inkscape:connector-curvature="0" /></g><g
- id="g40"
- transform="translate(862.2008,394.9751)"><path
- d="m 0,0 c -2.478,-2.67 -5.705,-4.004 -9.683,-4.004 -3.977,0 -7.205,1.334 -9.682,4.004 -2.478,2.669 -3.717,5.992 -3.717,9.97 0,3.978 1.239,7.31 3.717,9.997 2.477,2.686 5.686,4.03 9.63,4.03 4.012,0 7.257,-1.344 9.735,-4.03 C 2.477,17.28 3.716,13.948 3.716,9.97 3.716,5.992 2.477,2.669 0,0 m -9.63,36.716 c -7.608,0 -13.957,-2.548 -19.052,-7.642 -5.095,-5.095 -7.641,-11.445 -7.641,-19.051 0,-7.573 2.546,-13.915 7.641,-19.025 5.095,-5.113 11.444,-7.669 19.052,-7.669 7.606,0 13.956,2.547 19.051,7.642 5.095,5.093 7.641,11.445 7.641,19.052 0,7.606 -2.546,13.956 -7.641,19.051 -5.095,5.094 -11.445,7.642 -19.051,7.642"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path42"
- inkscape:connector-curvature="0" /></g><g
- id="g44"
- transform="translate(911.9489,431.1675)"><path
- d="m 0,0 c -5.374,0 -9.927,-1.92 -13.66,-5.758 v 5.81 L -28.315,-1.519 V -52.34 h 14.655 v 32.19 c 2.372,4.92 5.338,7.501 8.898,7.746 3.175,-0.105 5.617,-0.925 7.327,-2.46 L 4.868,-0.419 C 3.332,-0.14 1.709,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path46"
- inkscape:connector-curvature="0" /></g><g
- id="g48"
- transform="translate(548.2423,363.2485)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.433 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.051 2.18,-28.09 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1763)"><path
- d="M 0,0 C 0,2.594 -3.457,4.322 -3.457,4.322 -0.864,5.187 0,8.644 0,8.644 0,8.644 0.865,5.187 3.458,4.322 3.458,4.322 0,2.594 0,0 m -17.099,6.454 c 0,6.742 -8.989,11.236 -8.989,11.236 6.742,2.248 8.989,11.238 8.989,11.238 0,0 2.247,-8.99 8.99,-11.238 0,0 -8.99,-4.494 -8.99,-11.236"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7925)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.579 -4.53,-4.616 -1.515,-0.933 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.057 -1.539,0.17 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.213 1.175,2.82 1.784,6.162 1.81,9.936 -0.049,3.718 -0.683,7.054 -1.886,9.902 -0.703,1.654 -1.585,3.056 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.126 0.998,0.196 1.539,0.209 h 0.031 l 1.688,-0.153 c 1.045,-0.206 2.104,-0.616 3.061,-1.185 1.755,-1.031 3.302,-2.567 4.598,-4.565 2.155,-3.374 3.315,-7.536 3.357,-12.042 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.05 -0.226,0.136 -0.541,0.327 -1.113,0.603 -1.715,0.815 -3.044,1.241 -9.881,3.186 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.922 30.022,10.922 c 0,0 1.439,1.761 3.453,3.692 10e-4,0 10e-4,10e-4 0.002,10e-4 1.052,0.974 2.355,2.076 3.912,3.227 0.046,0.031 0.088,0.063 0.124,0.093 8.708,6.384 25.34,14.163 51.625,9.541 -0.989,1.124 -2.002,2.192 -3.036,3.215 -1.112,0.883 -2.231,1.693 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.867 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 -9.29,25.193 -9.841,24.479 -10.376,23.77 c -0.065,-0.099 -0.141,-0.202 -0.226,-0.307 -1.182,-1.581 -2.271,-3.14 -3.279,-4.674 -3.266,-5.427 -5.631,-11.665 -6.311,-13.545 -10.58,-32.401 2.586,-57.55 5.144,-61.967 8.93,-15.158 24.565,-32.355 50.771,-37.327 0.197,-0.047 0.382,-0.101 0.582,-0.147 1.723,-0.367 4.864,-0.929 8.908,-1.196 1.524,-0.069 3.088,-0.094 4.699,-0.067 1.548,-0.009 2.999,0.017 4.335,0.064 0.396,0.028 0.74,0.041 1.044,0.044 5.102,0.238 8.272,0.775 8.272,0.775 -26.543,1.299 -39.847,13.409 -45.691,21.142 -1.325,1.648 -2.46,3.42 -3.377,5.316 -0.361,0.71 -0.523,1.115 -0.523,1.115 8.459,-7.181 20.294,-13.362 20.294,-13.362 10.611,-4.993 21.737,-7.451 33.524,-5.837 0,0 24.645,2.263 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.023 -1.926,-1.496 -0.991,-0.651 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.128 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.352 -11.944,24.291 -0.124,6.465 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.073 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.461 -0.617,9.27 -4.29,11.624"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_with_text_on_dark-1024.png b/website/assets/logos/logo_with_text_on_dark-1024.png
deleted file mode 100644
index a02a9014b..000000000
--- a/website/assets/logos/logo_with_text_on_dark-1024.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark-128.png b/website/assets/logos/logo_with_text_on_dark-128.png
deleted file mode 100644
index efae725b8..000000000
--- a/website/assets/logos/logo_with_text_on_dark-128.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark-16.png b/website/assets/logos/logo_with_text_on_dark-16.png
deleted file mode 100644
index a6069f98f..000000000
--- a/website/assets/logos/logo_with_text_on_dark-16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark.png b/website/assets/logos/logo_with_text_on_dark.png
deleted file mode 100644
index 24de18c11..000000000
--- a/website/assets/logos/logo_with_text_on_dark.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark.svg b/website/assets/logos/logo_with_text_on_dark.svg
deleted file mode 100644
index 52d8e52da..000000000
--- a/website/assets/logos/logo_with_text_on_dark.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="607.97211"
- height="193.19984"
- viewBox="0 0 607.97212 193.19985"
- sodipodi:docname="logo_with_text_on_dark.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.21542963"
- inkscape:cx="296.21626"
- inkscape:cy="101.97992"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.96254)"><g
- id="g14"><g
- id="g16"
- clip-path="url(#clipPath20)"><g
- id="g22"
- transform="translate(668.8995,400.2874)"><path
- d="m 0,0 c -0.698,-5.233 -4.362,-8.375 -10.991,-9.421 -9.072,0.349 -13.783,5.408 -14.132,15.178 0.698,8.375 5.408,12.719 14.132,13.033 C -4.362,17.777 -0.698,14.341 0,8.479 Z m 0,26.117 c -2.442,2.826 -6.629,4.414 -12.561,4.763 -8.026,0 -14.219,-2.443 -18.581,-7.327 -4.361,-4.886 -6.542,-11.166 -6.542,-18.842 0,-8.026 2.006,-14.394 6.019,-19.105 4.012,-4.71 10.031,-7.065 18.057,-7.065 5.164,0 9.7,1.569 13.608,4.71 v -2.616 c 0,-3.141 -0.986,-6.02 -2.957,-8.636 -1.972,-2.618 -5.749,-3.926 -11.331,-3.926 -5.479,0.349 -10.137,1.919 -13.975,4.71 l -6.804,-7.85 c 5.582,-5.582 13.433,-8.48 23.553,-8.689 l 3.14,0.314 c 6.629,0.28 12.124,2.879 16.487,7.798 4.361,4.92 6.542,11.044 6.542,18.373 V 29.31 L 0,30.88 Z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path24"
- inkscape:connector-curvature="0" /></g><g
- id="g26"
- transform="translate(720.3033,399.9339)"><path
- d="m 0,0 -19.986,51.175 h -17.527 l 29.056,-72.28 H 8.891 l 28.984,72.28 H 20.348 Z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path28"
- inkscape:connector-curvature="0" /></g><path
- d="m 762.522,378.828 h 14.655 v 52.392 h -14.655 z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path30"
- inkscape:connector-curvature="0" /><g
- id="g32"
- transform="translate(769.7443,451.1087)"><path
- d="m 0,0 c -2.373,0 -4.257,-0.706 -5.652,-2.12 -1.397,-1.413 -2.094,-3.166 -2.094,-5.26 0,-2.094 0.697,-3.839 2.094,-5.233 1.395,-1.397 3.315,-2.094 5.757,-2.094 2.442,0 4.361,0.697 5.757,2.094 1.395,1.394 2.094,3.139 2.094,5.233 0,2.094 -0.699,3.847 -2.094,5.26 C 4.466,-0.706 2.512,0 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path34"
- inkscape:connector-curvature="0" /></g><g
- id="g36"
- transform="translate(816.9264,406.8294)"><path
- d="m 0,0 c -3.315,2.478 -7.572,3.96 -12.771,4.449 -3.524,0.697 -5.879,1.578 -7.065,2.644 -1.187,1.063 -1.187,2.25 0,3.558 1.186,1.309 3.541,1.963 7.065,1.963 3.56,0 6.525,-1.413 8.898,-4.239 l 8.165,9.211 c -2.374,2.338 -4.92,4.101 -7.642,5.286 -2.721,1.187 -6.542,1.78 -11.462,1.78 -4.257,0 -8.4,-1.422 -12.43,-4.265 -4.03,-2.845 -6.046,-6.971 -6.046,-12.379 0,-5.897 1.771,-9.91 5.313,-12.038 3.541,-2.128 7.44,-3.437 11.697,-3.925 4.712,-0.454 7.476,-1.335 8.297,-2.644 0.819,-1.308 0.645,-2.616 -0.523,-3.924 -1.171,-1.309 -3.761,-1.963 -7.774,-1.963 -4.99,0.453 -8.67,2.092 -11.043,4.92 l -8.113,-9.16 c 2.373,-2.374 5.208,-4.266 8.505,-5.678 3.298,-1.414 7.424,-2.121 12.379,-2.121 5.199,0 9.752,1.361 13.66,4.083 3.908,2.722 5.862,6.559 5.862,11.515 C 4.972,-6.787 3.315,-2.478 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path38"
- inkscape:connector-curvature="0" /></g><g
- id="g40"
- transform="translate(862.2008,394.9749)"><path
- d="m 0,0 c -2.478,-2.669 -5.705,-4.004 -9.683,-4.004 -3.977,0 -7.205,1.335 -9.682,4.004 -2.478,2.67 -3.717,5.992 -3.717,9.97 0,3.978 1.239,7.31 3.717,9.998 2.477,2.685 5.686,4.029 9.63,4.029 4.012,0 7.257,-1.344 9.735,-4.029 C 2.477,17.28 3.716,13.948 3.716,9.97 3.716,5.992 2.477,2.67 0,0 m -9.63,36.716 c -7.608,0 -13.957,-2.547 -19.052,-7.642 -5.095,-5.095 -7.641,-11.445 -7.641,-19.051 0,-7.572 2.546,-13.914 7.641,-19.025 5.095,-5.112 11.444,-7.669 19.052,-7.669 7.606,0 13.956,2.548 19.051,7.642 5.095,5.094 7.641,11.445 7.641,19.052 0,7.606 -2.546,13.956 -7.641,19.051 -5.095,5.095 -11.445,7.642 -19.051,7.642"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path42"
- inkscape:connector-curvature="0" /></g><g
- id="g44"
- transform="translate(911.9489,431.1673)"><path
- d="m 0,0 c -5.374,0 -9.927,-1.919 -13.66,-5.757 v 5.81 l -14.655,-1.571 v -50.821 h 14.655 v 32.189 c 2.372,4.92 5.338,7.502 8.898,7.746 3.175,-0.105 5.617,-0.925 7.327,-2.46 L 4.868,-0.419 C 3.332,-0.14 1.709,0 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path46"
- inkscape:connector-curvature="0" /></g><g
- id="g48"
- transform="translate(548.2423,363.2484)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.434 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.05 2.18,-28.089 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1771)"><path
- d="M 0,0 C 0,2.593 -3.457,4.321 -3.457,4.321 -0.864,5.186 0,8.644 0,8.644 0,8.644 0.865,5.186 3.458,4.321 3.458,4.321 0,2.593 0,0 m -17.099,6.453 c 0,6.742 -8.989,11.237 -8.989,11.237 6.742,2.247 8.989,11.237 8.989,11.237 0,0 2.247,-8.99 8.99,-11.237 0,0 -8.99,-4.495 -8.99,-11.237"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7923)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.578 -4.53,-4.615 -1.515,-0.934 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.056 -1.539,0.169 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.214 1.175,2.819 1.784,6.161 1.81,9.935 -0.049,3.719 -0.683,7.054 -1.886,9.902 -0.703,1.655 -1.585,3.057 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.127 0.998,0.196 1.539,0.209 l 0.031,10e-4 1.688,-0.154 c 1.045,-0.206 2.104,-0.615 3.061,-1.184 1.755,-1.032 3.302,-2.568 4.598,-4.565 2.155,-3.374 3.315,-7.537 3.357,-12.043 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.051 -0.226,0.137 -0.541,0.326 -1.113,0.602 -1.715,0.814 -3.044,1.241 -9.881,3.187 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.923 30.022,10.923 c 0,0 1.439,1.76 3.453,3.691 10e-4,10e-4 10e-4,10e-4 0.002,0.002 1.052,0.973 2.355,2.076 3.912,3.226 0.046,0.032 0.088,0.063 0.124,0.094 8.708,6.383 25.34,14.162 51.625,9.54 -0.989,1.124 -2.002,2.193 -3.036,3.215 -1.112,0.884 -2.231,1.694 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.868 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 c -0.571,-0.718 -1.122,-1.431 -1.657,-2.14 -0.065,-0.1 -0.141,-0.202 -0.226,-0.307 -1.182,-1.582 -2.271,-3.141 -3.279,-4.674 -3.266,-5.427 -5.631,-11.666 -6.311,-13.546 -10.58,-32.401 2.586,-57.549 5.144,-61.967 8.93,-15.157 24.565,-32.355 50.771,-37.327 0.197,-0.046 0.382,-0.101 0.582,-0.146 1.723,-0.367 4.864,-0.929 8.908,-1.197 1.524,-0.069 3.088,-0.094 4.699,-0.066 1.548,-0.01 2.999,0.017 4.335,0.064 0.396,0.027 0.74,0.041 1.044,0.044 5.102,0.237 8.272,0.774 8.272,0.774 -26.543,1.3 -39.847,13.41 -45.691,21.142 -1.325,1.648 -2.46,3.421 -3.377,5.316 -0.361,0.711 -0.523,1.115 -0.523,1.115 8.459,-7.18 20.294,-13.361 20.294,-13.361 10.611,-4.993 21.737,-7.452 33.524,-5.838 0,0 24.645,2.264 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.022 -1.926,-1.496 -0.991,-0.65 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.127 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.353 -11.944,24.291 -0.124,6.466 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.074 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.462 -0.617,9.27 -4.29,11.624"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_with_text_on_dark_full-1024.png b/website/assets/logos/logo_with_text_on_dark_full-1024.png
deleted file mode 100644
index eb2e63981..000000000
--- a/website/assets/logos/logo_with_text_on_dark_full-1024.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark_full-128.png b/website/assets/logos/logo_with_text_on_dark_full-128.png
deleted file mode 100644
index 4ed21e5cb..000000000
--- a/website/assets/logos/logo_with_text_on_dark_full-128.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark_full-16.png b/website/assets/logos/logo_with_text_on_dark_full-16.png
deleted file mode 100644
index d3968da5e..000000000
--- a/website/assets/logos/logo_with_text_on_dark_full-16.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark_full.png b/website/assets/logos/logo_with_text_on_dark_full.png
deleted file mode 100644
index 21feea356..000000000
--- a/website/assets/logos/logo_with_text_on_dark_full.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_dark_full.svg b/website/assets/logos/logo_with_text_on_dark_full.svg
deleted file mode 100644
index 017e72414..000000000
--- a/website/assets/logos/logo_with_text_on_dark_full.svg
+++ /dev/null
@@ -1,120 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="786.19244"
- height="334.21716"
- viewBox="0 0 786.19246 334.21718"
- sodipodi:docname="logo_with_text_on_dark_full.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath20"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path18"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="567"
- inkscape:window-height="462"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.3046635"
- inkscape:cx="541.19762"
- inkscape:cy="67.525134"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-524.53324,714.8519)"><path
- d="M 393.39994,285.47606 H 983.04431 V 536.13894 H 393.39994 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.36289462"
- id="path12"
- inkscape:connector-curvature="0" /><g
- id="g14"><g
- id="g16"
- clip-path="url(#clipPath20)"><g
- id="g22"
- transform="translate(668.8995,400.2874)"><path
- d="m 0,0 c -0.698,-5.233 -4.362,-8.375 -10.991,-9.421 -9.072,0.349 -13.783,5.408 -14.132,15.178 0.698,8.375 5.408,12.719 14.132,13.033 C -4.362,17.777 -0.698,14.341 0,8.479 Z m 0,26.117 c -2.442,2.826 -6.629,4.414 -12.561,4.763 -8.026,0 -14.219,-2.443 -18.581,-7.327 -4.361,-4.886 -6.542,-11.166 -6.542,-18.842 0,-8.026 2.006,-14.394 6.019,-19.105 4.012,-4.71 10.031,-7.065 18.057,-7.065 5.164,0 9.7,1.569 13.608,4.71 v -2.616 c 0,-3.141 -0.986,-6.02 -2.957,-8.636 -1.972,-2.618 -5.749,-3.926 -11.331,-3.926 -5.479,0.349 -10.137,1.919 -13.975,4.71 l -6.804,-7.85 c 5.582,-5.582 13.433,-8.48 23.553,-8.689 l 3.14,0.314 c 6.629,0.28 12.124,2.879 16.487,7.798 4.361,4.92 6.542,11.044 6.542,18.373 V 29.31 L 0,30.88 Z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path24"
- inkscape:connector-curvature="0" /></g><g
- id="g26"
- transform="translate(720.3033,399.9339)"><path
- d="m 0,0 -19.986,51.175 h -17.527 l 29.056,-72.28 H 8.891 l 28.984,72.28 H 20.348 Z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path28"
- inkscape:connector-curvature="0" /></g><path
- d="m 762.522,378.828 h 14.655 v 52.392 h -14.655 z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path30"
- inkscape:connector-curvature="0" /><g
- id="g32"
- transform="translate(769.7443,451.1087)"><path
- d="m 0,0 c -2.373,0 -4.257,-0.706 -5.652,-2.12 -1.397,-1.413 -2.094,-3.166 -2.094,-5.26 0,-2.094 0.697,-3.839 2.094,-5.233 1.395,-1.397 3.315,-2.094 5.757,-2.094 2.442,0 4.361,0.697 5.757,2.094 1.395,1.394 2.094,3.139 2.094,5.233 0,2.094 -0.699,3.847 -2.094,5.26 C 4.466,-0.706 2.512,0 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path34"
- inkscape:connector-curvature="0" /></g><g
- id="g36"
- transform="translate(816.9264,406.8294)"><path
- d="m 0,0 c -3.315,2.478 -7.572,3.96 -12.771,4.449 -3.524,0.697 -5.879,1.578 -7.065,2.644 -1.187,1.063 -1.187,2.25 0,3.558 1.186,1.309 3.541,1.963 7.065,1.963 3.56,0 6.525,-1.413 8.898,-4.239 l 8.165,9.211 c -2.374,2.338 -4.92,4.101 -7.642,5.286 -2.721,1.187 -6.542,1.78 -11.462,1.78 -4.257,0 -8.4,-1.422 -12.43,-4.265 -4.03,-2.845 -6.046,-6.971 -6.046,-12.379 0,-5.897 1.771,-9.91 5.313,-12.038 3.541,-2.128 7.44,-3.437 11.697,-3.925 4.712,-0.454 7.476,-1.335 8.297,-2.644 0.819,-1.308 0.645,-2.616 -0.523,-3.924 -1.171,-1.309 -3.761,-1.963 -7.774,-1.963 -4.99,0.453 -8.67,2.092 -11.043,4.92 l -8.113,-9.16 c 2.373,-2.374 5.208,-4.266 8.505,-5.678 3.298,-1.414 7.424,-2.121 12.379,-2.121 5.199,0 9.752,1.361 13.66,4.083 3.908,2.722 5.862,6.559 5.862,11.515 C 4.972,-6.787 3.315,-2.478 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path38"
- inkscape:connector-curvature="0" /></g><g
- id="g40"
- transform="translate(862.2008,394.9749)"><path
- d="m 0,0 c -2.478,-2.669 -5.705,-4.004 -9.683,-4.004 -3.977,0 -7.205,1.335 -9.682,4.004 -2.478,2.67 -3.717,5.992 -3.717,9.97 0,3.978 1.239,7.31 3.717,9.998 2.477,2.685 5.686,4.029 9.63,4.029 4.012,0 7.257,-1.344 9.735,-4.029 C 2.477,17.28 3.716,13.948 3.716,9.97 3.716,5.992 2.477,2.67 0,0 m -9.63,36.716 c -7.608,0 -13.957,-2.547 -19.052,-7.642 -5.095,-5.095 -7.641,-11.445 -7.641,-19.051 0,-7.572 2.546,-13.914 7.641,-19.025 5.095,-5.112 11.444,-7.669 19.052,-7.669 7.606,0 13.956,2.548 19.051,7.642 5.095,5.094 7.641,11.445 7.641,19.052 0,7.606 -2.546,13.956 -7.641,19.051 -5.095,5.095 -11.445,7.642 -19.051,7.642"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path42"
- inkscape:connector-curvature="0" /></g><g
- id="g44"
- transform="translate(911.9489,431.1673)"><path
- d="m 0,0 c -5.374,0 -9.927,-1.919 -13.66,-5.757 v 5.81 l -14.655,-1.571 v -50.821 h 14.655 v 32.189 c 2.372,4.92 5.338,7.502 8.898,7.746 3.175,-0.105 5.617,-0.925 7.327,-2.46 L 4.868,-0.419 C 3.332,-0.14 1.709,0 0,0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path46"
- inkscape:connector-curvature="0" /></g><g
- id="g48"
- transform="translate(548.2423,363.2484)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.434 18.328,70.995 3.068,0 5.743,-0.023 8.417,0.007 2.222,0.025 4.443,0.102 6.664,0.175 4.79,0.154 4.818,0.165 5.88,-4.582 3.145,-14.05 2.18,-28.089 -0.179,-42.118 -0.25,-1.492 -0.7,-2.956 -0.864,-4.454 C 37.05,9.081 30.089,3.645 20.165,1.097 13.787,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path50"
- inkscape:connector-curvature="0" /></g><g
- id="g52"
- transform="translate(544.6891,396.1771)"><path
- d="M 0,0 C 0,2.593 -3.457,4.321 -3.457,4.321 -0.864,5.186 0,8.644 0,8.644 0,8.644 0.865,5.186 3.458,4.321 3.458,4.321 0,2.593 0,0 m -17.099,6.453 c 0,6.742 -8.989,11.237 -8.989,11.237 6.742,2.247 8.989,11.237 8.989,11.237 0,0 2.247,-8.99 8.99,-11.237 0,0 -8.99,-4.495 -8.99,-11.237"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path54"
- inkscape:connector-curvature="0" /></g><g
- id="g56"
- transform="translate(485.0861,429.7923)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.578 -4.53,-4.615 -1.515,-0.934 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.056 -1.539,0.169 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.214 1.175,2.819 1.784,6.161 1.81,9.935 -0.049,3.719 -0.683,7.054 -1.886,9.902 -0.703,1.655 -1.585,3.057 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.127 0.998,0.196 1.539,0.209 l 0.031,10e-4 1.688,-0.154 c 1.045,-0.206 2.104,-0.615 3.061,-1.184 1.755,-1.032 3.302,-2.568 4.598,-4.565 2.155,-3.374 3.315,-7.537 3.357,-12.043 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.051 -0.226,0.137 -0.541,0.326 -1.113,0.602 -1.715,0.814 -3.044,1.241 -9.881,3.187 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.923 30.022,10.923 c 0,0 1.439,1.76 3.453,3.691 10e-4,10e-4 10e-4,10e-4 0.002,0.002 1.052,0.973 2.355,2.076 3.912,3.226 0.046,0.032 0.088,0.063 0.124,0.094 8.708,6.383 25.34,14.162 51.625,9.54 -0.989,1.124 -2.002,2.193 -3.036,3.215 -1.112,0.884 -2.231,1.694 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.868 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 c -0.571,-0.718 -1.122,-1.431 -1.657,-2.14 -0.065,-0.1 -0.141,-0.202 -0.226,-0.307 -1.182,-1.582 -2.271,-3.141 -3.279,-4.674 -3.266,-5.427 -5.631,-11.666 -6.311,-13.546 -10.58,-32.401 2.586,-57.549 5.144,-61.967 8.93,-15.157 24.565,-32.355 50.771,-37.327 0.197,-0.046 0.382,-0.101 0.582,-0.146 1.723,-0.367 4.864,-0.929 8.908,-1.197 1.524,-0.069 3.088,-0.094 4.699,-0.066 1.548,-0.01 2.999,0.017 4.335,0.064 0.396,0.027 0.74,0.041 1.044,0.044 5.102,0.237 8.272,0.774 8.272,0.774 -26.543,1.3 -39.847,13.41 -45.691,21.142 -1.325,1.648 -2.46,3.421 -3.377,5.316 -0.361,0.711 -0.523,1.115 -0.523,1.115 8.459,-7.18 20.294,-13.361 20.294,-13.361 10.611,-4.993 21.737,-7.452 33.524,-5.838 0,0 24.645,2.264 34.464,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.652,-0.526 -1.294,-1.022 -1.926,-1.496 -0.991,-0.65 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.127 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.353 -11.944,24.291 -0.124,6.466 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.947,4.193 3.174,0.074 5.981,0.037 7.458,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.462 -0.617,9.27 -4.29,11.624"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path58"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_with_text_on_white.png b/website/assets/logos/logo_with_text_on_white.png
deleted file mode 100644
index bf420a057..000000000
--- a/website/assets/logos/logo_with_text_on_white.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_white.svg b/website/assets/logos/logo_with_text_on_white.svg
deleted file mode 100644
index 4275efe83..000000000
--- a/website/assets/logos/logo_with_text_on_white.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="607.97211"
- height="193.20036"
- viewBox="0 0 607.97212 193.20037"
- sodipodi:docname="logo_with_text_on_white.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath18"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path16"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.43085925"
- inkscape:cx="325.455"
- inkscape:cy="50.910097"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-614.45037,638.9628)"><g
- id="g12"><g
- id="g14"
- clip-path="url(#clipPath18)"><g
- id="g20"
- transform="translate(668.8995,400.2876)"><path
- d="m 0,0 c -0.698,-5.234 -4.362,-8.375 -10.991,-9.421 -9.072,0.349 -13.783,5.407 -14.132,15.178 0.698,8.374 5.408,12.719 14.132,13.033 C -4.362,17.776 -0.698,14.341 0,8.479 Z m 0,26.117 c -2.442,2.826 -6.629,4.413 -12.561,4.763 -8.026,0 -14.219,-2.443 -18.581,-7.327 -4.361,-4.886 -6.542,-11.167 -6.542,-18.842 0,-8.026 2.006,-14.395 6.019,-19.105 4.012,-4.71 10.031,-7.066 18.057,-7.066 5.164,0 9.7,1.57 13.608,4.711 v -2.617 c 0,-3.141 -0.986,-6.019 -2.957,-8.636 -1.972,-2.617 -5.749,-3.925 -11.331,-3.925 -5.479,0.349 -10.137,1.919 -13.975,4.71 l -6.804,-7.85 c 5.582,-5.582 13.433,-8.48 23.553,-8.689 l 3.14,0.314 c 6.629,0.279 12.124,2.879 16.487,7.798 4.361,4.92 6.542,11.044 6.542,18.372 V 29.31 L 0,30.88 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path22"
- inkscape:connector-curvature="0" /></g><g
- id="g24"
- transform="translate(720.3033,399.9331)"><path
- d="M 0,0 -19.986,51.176 H -37.513 L -8.457,-21.105 H 8.891 L 37.875,51.176 H 20.348 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path26"
- inkscape:connector-curvature="0" /></g><path
- d="m 762.522,378.828 h 14.655 v 52.392 h -14.655 z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path28"
- inkscape:connector-curvature="0" /><g
- id="g30"
- transform="translate(769.7443,451.1089)"><path
- d="m 0,0 c -2.373,0 -4.257,-0.707 -5.652,-2.12 -1.397,-1.413 -2.094,-3.166 -2.094,-5.26 0,-2.094 0.697,-3.839 2.094,-5.234 1.395,-1.396 3.315,-2.094 5.757,-2.094 2.442,0 4.361,0.698 5.757,2.094 1.395,1.395 2.094,3.14 2.094,5.234 0,2.094 -0.699,3.847 -2.094,5.26 C 4.466,-0.707 2.512,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path32"
- inkscape:connector-curvature="0" /></g><g
- id="g34"
- transform="translate(816.9264,406.8296)"><path
- d="m 0,0 c -3.315,2.477 -7.572,3.96 -12.771,4.449 -3.524,0.697 -5.879,1.578 -7.065,2.643 -1.187,1.064 -1.187,2.251 0,3.559 1.186,1.309 3.541,1.962 7.065,1.962 3.56,0 6.525,-1.412 8.898,-4.239 l 8.165,9.212 c -2.374,2.338 -4.92,4.1 -7.642,5.286 -2.721,1.186 -6.542,1.78 -11.462,1.78 -4.257,0 -8.4,-1.423 -12.43,-4.265 -4.03,-2.845 -6.046,-6.971 -6.046,-12.379 0,-5.898 1.771,-9.91 5.313,-12.038 3.541,-2.129 7.44,-3.437 11.697,-3.925 4.712,-0.454 7.476,-1.335 8.297,-2.645 0.819,-1.307 0.645,-2.615 -0.523,-3.924 -1.171,-1.309 -3.761,-1.963 -7.774,-1.963 -4.99,0.453 -8.67,2.093 -11.043,4.921 l -8.113,-9.161 c 2.373,-2.373 5.208,-4.265 8.505,-5.678 3.298,-1.413 7.424,-2.12 12.379,-2.12 5.199,0 9.752,1.36 13.66,4.083 3.908,2.721 5.862,6.558 5.862,11.514 C 4.972,-6.787 3.315,-2.479 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path36"
- inkscape:connector-curvature="0" /></g><g
- id="g38"
- transform="translate(862.2008,394.9751)"><path
- d="m 0,0 c -2.478,-2.67 -5.705,-4.004 -9.683,-4.004 -3.977,0 -7.205,1.334 -9.682,4.004 -2.478,2.669 -3.717,5.992 -3.717,9.97 0,3.978 1.239,7.31 3.717,9.997 2.477,2.686 5.686,4.03 9.63,4.03 4.012,0 7.257,-1.344 9.735,-4.03 C 2.477,17.28 3.716,13.948 3.716,9.97 3.716,5.992 2.477,2.669 0,0 m -9.63,36.716 c -7.608,0 -13.957,-2.548 -19.052,-7.642 -5.095,-5.095 -7.641,-11.445 -7.641,-19.051 0,-7.573 2.546,-13.915 7.641,-19.025 5.095,-5.113 11.444,-7.669 19.052,-7.669 7.606,0 13.956,2.547 19.051,7.642 5.095,5.093 7.641,11.445 7.641,19.052 0,7.606 -2.546,13.956 -7.641,19.051 -5.095,5.094 -11.445,7.642 -19.051,7.642"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path40"
- inkscape:connector-curvature="0" /></g><g
- id="g42"
- transform="translate(911.9489,431.1675)"><path
- d="m 0,0 c -5.374,0 -9.927,-1.92 -13.66,-5.758 v 5.81 L -28.315,-1.519 V -52.34 h 14.655 v 32.19 c 2.372,4.92 5.338,7.501 8.898,7.746 3.175,-0.105 5.617,-0.925 7.327,-2.46 L 4.868,-0.419 C 3.332,-0.14 1.709,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path44"
- inkscape:connector-curvature="0" /></g><g
- id="g46"
- transform="translate(548.2428,363.2485)"><path
- d="m 0,0 c 16.655,21.121 22.696,44.433 18.328,70.995 3.068,0 5.742,-0.023 8.417,0.007 2.221,0.025 4.442,0.102 6.663,0.175 4.79,0.154 4.818,0.165 5.881,-4.582 C 42.434,52.544 41.469,38.505 39.11,24.477 38.859,22.985 38.409,21.521 38.246,20.023 37.05,9.081 30.089,3.645 20.164,1.097 13.786,-0.54 7.323,-0.829 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path48"
- inkscape:connector-curvature="0" /></g><g
- id="g50"
- transform="translate(544.6891,396.1763)"><path
- d="M 0,0 C 0,2.594 -3.457,4.322 -3.457,4.322 -0.864,5.187 0,8.644 0,8.644 0,8.644 0.865,5.187 3.458,4.322 3.458,4.322 0,2.594 0,0 m -17.099,6.454 c 0,6.742 -8.989,11.236 -8.989,11.236 6.742,2.248 8.989,11.238 8.989,11.238 0,0 2.248,-8.99 8.99,-11.238 0,0 -8.99,-4.494 -8.99,-11.236"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path52"
- inkscape:connector-curvature="0" /></g><g
- id="g54"
- transform="translate(485.0861,429.7925)"><path
- d="m 0,0 c 0,0 -2.214,3.359 -9.736,2.059 0,0 5.987,28.738 36.298,38.806 C 26.562,40.865 -2.82,24.275 0,0 M 0.583,-33.208 0.58,-33.211 c -1.297,-2.026 -2.821,-3.579 -4.53,-4.616 -1.515,-0.933 -3.178,-1.425 -4.743,-1.425 -0.528,0 -1.044,0.057 -1.539,0.17 l -2.209,0.507 2.184,0.603 c 1.63,0.451 3.063,1.347 4.259,2.664 1.014,1.108 1.856,2.485 2.577,4.213 1.175,2.82 1.784,6.162 1.81,9.936 -0.049,3.718 -0.683,7.054 -1.886,9.902 -0.703,1.654 -1.585,3.056 -2.621,4.163 -1.227,1.311 -2.671,2.178 -4.294,2.576 l -2.187,0.538 2.179,0.572 c 0.48,0.126 0.998,0.196 1.539,0.209 h 0.031 l 1.688,-0.153 c 1.045,-0.206 2.104,-0.616 3.061,-1.185 1.755,-1.031 3.302,-2.567 4.598,-4.565 2.155,-3.374 3.315,-7.536 3.357,-12.042 -0.028,-4.548 -1.159,-8.717 -3.271,-12.064 m 101.949,51.176 c 0,0 -0.075,0.05 -0.226,0.136 -0.541,0.327 -1.113,0.603 -1.715,0.815 -3.044,1.241 -9.881,3.186 -21.906,2.623 -0.029,0 -0.056,0 -0.085,-0.001 C 52.916,21.123 30.022,10.922 30.022,10.922 c 0,0 1.439,1.761 3.453,3.692 10e-4,0 10e-4,10e-4 0.002,10e-4 1.052,0.974 2.355,2.076 3.912,3.227 0.046,0.031 0.088,0.063 0.124,0.093 8.708,6.384 25.341,14.163 51.625,9.541 -0.989,1.124 -2.002,2.192 -3.036,3.215 -1.112,0.883 -2.231,1.693 -3.354,2.456 0.02,-0.012 0.039,-0.023 0.059,-0.036 0,0 -17.016,19.415 -48.683,15.891 C 30.19,48.622 25.983,47.867 21.66,46.564 21.653,46.563 21.646,46.562 21.64,46.56 L 21.638,46.558 C 11.48,43.492 0.683,37.387 -8.719,25.911 -9.29,25.193 -9.841,24.479 -10.376,23.77 c -0.065,-0.099 -0.141,-0.202 -0.226,-0.307 -1.182,-1.581 -2.271,-3.14 -3.279,-4.674 -3.266,-5.427 -5.631,-11.665 -6.311,-13.545 -10.58,-32.401 2.586,-57.55 5.144,-61.967 8.93,-15.158 24.565,-32.355 50.771,-37.327 0.197,-0.047 0.382,-0.101 0.582,-0.147 1.723,-0.367 4.864,-0.929 8.908,-1.196 1.524,-0.069 3.088,-0.094 4.699,-0.067 1.548,-0.009 2.999,0.017 4.335,0.064 0.396,0.028 0.74,0.041 1.044,0.044 5.102,0.238 8.272,0.775 8.272,0.775 -26.542,1.299 -39.847,13.409 -45.691,21.142 -1.325,1.648 -2.46,3.42 -3.377,5.316 -0.361,0.71 -0.523,1.115 -0.523,1.115 8.459,-7.181 20.294,-13.362 20.294,-13.362 10.611,-4.993 21.737,-7.451 33.525,-5.837 0,0 24.644,2.263 34.463,25.09 -0.423,0.322 -0.366,0.278 -0.79,0.6 -0.651,-0.526 -1.294,-1.023 -1.926,-1.496 -0.991,-0.651 -1.964,-1.357 -2.937,-2.07 -5.265,-3.485 -9.561,-5.128 -12.12,-5.879 -19.359,-4.887 -37.273,-1.252 -52.93,12.455 -7.253,6.349 -11.754,14.352 -11.944,24.291 -0.124,6.465 -0.19,12.935 -0.136,19.4 0.085,10.181 7.246,17.921 17.394,19.284 20.561,2.759 41.234,3.71 61.948,4.193 3.173,0.073 5.98,0.037 7.457,-3.356 0.025,-0.058 0.363,0.02 0.552,0.035 1.4,4.461 -0.617,9.27 -4.29,11.624"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path56"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/logo_with_text_on_white_bordered.png b/website/assets/logos/logo_with_text_on_white_bordered.png
deleted file mode 100644
index bd1a1e4b7..000000000
--- a/website/assets/logos/logo_with_text_on_white_bordered.png
+++ /dev/null
Binary files differ
diff --git a/website/assets/logos/logo_with_text_on_white_bordered.svg b/website/assets/logos/logo_with_text_on_white_bordered.svg
deleted file mode 100644
index 08125629d..000000000
--- a/website/assets/logos/logo_with_text_on_white_bordered.svg
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.1"
- id="svg2"
- xml:space="preserve"
- width="607.64587"
- height="207.92123"
- viewBox="0 0 607.64589 207.92124"
- sodipodi:docname="logo_with_text_on_white_bordered.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
- id="defs6"><clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath18"><path
- d="M 0,821.614 H 1366 V 0 H 0 Z"
- id="path16"
- inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="640"
- inkscape:window-height="480"
- id="namedview4"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="0.21542963"
- inkscape:cx="298.55736"
- inkscape:cy="108.65533"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="0"
- inkscape:current-layer="g10" /><g
- id="g10"
- inkscape:groupmode="layer"
- inkscape:label="gvisor_final-logo_20190313"
- transform="matrix(1.3333333,0,0,-1.3333333,-612.10927,647.00852)"><g
- id="g12"><g
- id="g14"
- clip-path="url(#clipPath18)"><g
- id="g20"
- transform="translate(670.7226,400.7367)"><path
- d="m 0,0 c -0.687,-5.153 -4.294,-8.246 -10.821,-9.275 -8.933,0.342 -13.571,5.324 -13.914,14.943 0.687,8.245 5.325,12.522 13.914,12.832 C -4.294,17.503 -0.687,14.12 0,8.348 Z m 0,25.715 c -2.404,2.782 -6.527,4.345 -12.367,4.688 -7.902,0 -14,-2.404 -18.294,-7.214 -4.295,-4.81 -6.442,-10.994 -6.442,-18.55 0,-7.903 1.976,-14.172 5.927,-18.811 3.949,-4.638 9.876,-6.956 17.778,-6.956 5.085,0 9.55,1.545 13.398,4.638 v -2.577 c 0,-3.092 -0.971,-5.926 -2.911,-8.502 -1.942,-2.577 -5.66,-3.866 -11.156,-3.866 -5.395,0.344 -9.981,1.89 -13.76,4.638 l -6.699,-7.729 c 5.496,-5.496 13.226,-8.349 23.19,-8.555 l 3.091,0.309 c 6.527,0.275 11.938,2.834 16.233,7.678 4.294,4.844 6.441,10.873 6.441,18.088 V 28.857 L 0,30.403 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path22"
- inkscape:connector-curvature="0" /></g><g
- id="g24"
- transform="translate(721.3339,400.388)"><path
- d="M 0,0 -19.678,50.387 H -36.935 L -8.326,-20.779 H 8.754 L 37.291,50.387 H 20.034 Z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path26"
- inkscape:connector-curvature="0" /></g><path
- d="m 762.901,379.609 h 14.429 v 51.583 h -14.429 z"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path28"
- inkscape:connector-curvature="0" /><g
- id="g30"
- transform="translate(770.0126,450.7747)"><path
- d="m 0,0 c -2.337,0 -4.191,-0.696 -5.565,-2.088 -1.375,-1.391 -2.062,-3.117 -2.062,-5.179 0,-2.061 0.687,-3.779 2.062,-5.153 1.374,-1.374 3.263,-2.061 5.669,-2.061 2.404,0 4.293,0.687 5.667,2.061 1.375,1.374 2.062,3.092 2.062,5.153 0,2.062 -0.687,3.788 -2.062,5.179 C 4.397,-0.696 2.474,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path32"
- inkscape:connector-curvature="0" /></g><g
- id="g34"
- transform="translate(816.4667,407.1781)"><path
- d="m 0,0 c -3.264,2.438 -7.455,3.898 -12.573,4.381 -3.471,0.686 -5.789,1.554 -6.957,2.601 -1.168,1.048 -1.168,2.216 0,3.504 1.168,1.289 3.486,1.933 6.957,1.933 3.504,0 6.424,-1.391 8.76,-4.174 l 8.039,9.069 c -2.336,2.302 -4.844,4.038 -7.524,5.206 -2.68,1.167 -6.441,1.751 -11.285,1.751 -4.192,0 -8.271,-1.4 -12.239,-4.199 -3.968,-2.801 -5.951,-6.863 -5.951,-12.187 0,-5.807 1.742,-9.758 5.229,-11.854 3.486,-2.094 7.326,-3.383 11.518,-3.863 4.638,-0.447 7.36,-1.315 8.168,-2.604 0.807,-1.288 0.635,-2.576 -0.515,-3.864 -1.152,-1.289 -3.703,-1.932 -7.653,-1.932 -4.913,0.446 -8.537,2.06 -10.873,4.843 l -7.988,-9.017 c 2.336,-2.337 5.127,-4.201 8.374,-5.592 3.247,-1.392 7.309,-2.087 12.187,-2.087 5.119,0 9.602,1.339 13.45,4.02 3.848,2.679 5.772,6.458 5.772,11.336 C 4.896,-6.683 3.264,-2.44 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path36"
- inkscape:connector-curvature="0" /></g><g
- id="g38"
- transform="translate(861.0429,395.5062)"><path
- d="m 0,0 c -2.439,-2.628 -5.616,-3.942 -9.533,-3.942 -3.916,0 -7.095,1.314 -9.533,3.942 -2.44,2.628 -3.66,5.9 -3.66,9.816 0,3.917 1.22,7.198 3.66,9.843 2.438,2.646 5.598,3.968 9.482,3.968 3.949,0 7.145,-1.322 9.584,-3.968 C 2.439,17.014 3.659,13.733 3.659,9.816 3.659,5.9 2.439,2.628 0,0 m -9.481,36.149 c -7.491,0 -13.743,-2.507 -18.758,-7.523 -5.017,-5.017 -7.524,-11.269 -7.524,-18.757 0,-7.456 2.507,-13.7 7.524,-18.732 5.015,-5.034 11.267,-7.55 18.758,-7.55 7.489,0 13.741,2.508 18.757,7.523 5.016,5.016 7.524,11.269 7.524,18.759 0,7.488 -2.508,13.74 -7.524,18.757 -5.016,5.016 -11.268,7.523 -18.757,7.523"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path40"
- inkscape:connector-curvature="0" /></g><g
- id="g42"
- transform="translate(910.0244,431.14)"><path
- d="m 0,0 c -5.291,0 -9.773,-1.89 -13.449,-5.668 v 5.72 l -14.43,-1.546 v -50.037 h 14.43 v 31.691 c 2.335,4.845 5.255,7.387 8.76,7.627 3.126,-0.102 5.531,-0.91 7.214,-2.422 L 4.792,-0.412 C 3.28,-0.138 1.683,0 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path44"
- inkscape:connector-curvature="0" /></g><g
- id="g46"
- transform="translate(594.4321,453.1439)"><path
- d="m 0,0 c 0,0 -0.204,0.139 -0.45,0.277 -0.906,0.547 -1.856,1 -2.744,1.321 -1.685,0.679 -3.62,1.262 -5.761,1.738 l -2.063,2.533 c -1.793,2.283 -6.09,7.358 -13.132,13.334 -8.142,6.209 -24.212,15.045 -47.595,12.442 -4.578,-0.444 -9.077,-1.318 -13.368,-2.597 l -0.232,-0.068 c -12.978,-3.918 -24.155,-11.512 -33.24,-22.601 -0.6,-0.754 -1.179,-1.504 -1.783,-2.307 l -0.134,-0.191 -0.062,-0.074 c -1.194,-1.596 -2.36,-3.258 -3.485,-4.969 l -0.125,-0.198 c -3.559,-5.915 -6.126,-12.72 -6.85,-14.73 -11.284,-34.556 2.735,-61.502 5.669,-66.567 8.482,-14.4 23.945,-32.461 50.005,-38.975 1.42,-0.354 2.872,-0.676 4.356,-0.96 0.016,-0.004 0.036,-0.009 0.053,-0.013 6.537,-1.118 16.647,-1.928 29.969,-0.317 3.21,0.621 8.236,2.535 8.646,8.445 2.209,0.842 10.261,3.812 10.261,3.812 8.572,3.874 18.586,11.106 24.334,24.546 1.21,2.83 0.277,6.128 -2.171,7.994 l -0.202,0.152 c 0.639,1.557 1.125,3.209 1.488,4.93 l 0.019,0.009 c 0,0 0.063,0.325 0.164,0.854 0.013,0.073 0.028,0.144 0.041,0.215 0.402,2.114 1.294,6.91 1.719,10.035 0.02,0.149 0.029,0.268 0.033,0.371 2.136,14.686 2.099,26.608 -0.156,37.847 1.443,0.114 2.672,1.095 3.106,2.477 l 0.655,2.086 C 9.188,-12.066 6.243,-4.003 0,0"
- style="fill:#262262;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path48"
- inkscape:connector-curvature="0" /></g><g
- id="g50"
- transform="translate(551.9267,364.2689)"><path
- d="m 0,0 c 16.398,20.796 22.346,43.748 18.045,69.9 3.02,0 5.654,-0.022 8.288,0.007 2.187,0.025 4.374,0.101 6.56,0.172 4.716,0.152 4.743,0.163 5.79,-4.512 C 41.779,51.734 40.83,37.911 38.507,24.1 38.26,22.631 37.817,21.189 37.656,19.715 36.479,8.941 29.625,3.588 19.853,1.08 13.574,-0.531 7.209,-0.816 0,0"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path52"
- inkscape:connector-curvature="0" /></g><g
- id="g54"
- transform="translate(548.4282,396.6898)"><path
- d="M 0,0 C 0,2.553 -3.404,4.255 -3.404,4.255 -0.851,5.105 0,8.51 0,8.51 0,8.51 0.851,5.105 3.404,4.255 3.404,4.255 0,2.553 0,0 m -16.835,6.354 c 0,6.638 -8.851,11.063 -8.851,11.063 6.638,2.213 8.851,11.063 8.851,11.063 0,0 2.212,-8.85 8.85,-11.063 0,0 -8.85,-4.425 -8.85,-11.063"
- style="fill:#fbb03b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path56"
- inkscape:connector-curvature="0" /></g><g
- id="g58"
- transform="translate(489.744,429.7865)"><path
- d="m 0,0 c 0,0 -2.18,3.308 -9.585,2.026 0,0 5.894,28.296 35.737,38.209 C 26.152,40.235 -2.777,23.901 0,0 m 0.574,-32.696 -0.003,-0.003 c -1.277,-1.994 -2.778,-3.523 -4.46,-4.544 -1.492,-0.919 -3.13,-1.403 -4.67,-1.403 -0.519,0 -1.028,0.055 -1.516,0.167 l -2.174,0.5 2.15,0.593 c 1.605,0.443 3.016,1.325 4.194,2.623 0.997,1.09 1.826,2.447 2.536,4.149 1.158,2.775 1.757,6.066 1.783,9.781 -0.048,3.661 -0.673,6.945 -1.857,9.75 -0.693,1.629 -1.56,3.009 -2.58,4.099 -1.208,1.29 -2.63,2.143 -4.228,2.536 l -2.154,0.529 2.145,0.564 c 0.473,0.125 0.983,0.193 1.516,0.205 l 0.031,0.002 1.662,-0.152 c 1.029,-0.203 2.071,-0.605 3.014,-1.167 1.727,-1.015 3.25,-2.527 4.526,-4.493 2.122,-3.323 3.264,-7.421 3.305,-11.857 C 3.767,-25.296 2.653,-29.4 0.574,-32.696 M 100.951,17.69 c 0,0 -0.074,0.05 -0.223,0.136 -0.532,0.32 -1.095,0.593 -1.688,0.801 -2.997,1.223 -9.729,3.139 -21.568,2.583 -0.029,0 -0.055,0 -0.084,-0.001 C 52.1,20.798 29.559,10.754 29.559,10.754 c 0,0 1.417,1.733 3.4,3.636 0,0 10e-4,0 10e-4,10e-4 1.036,0.958 2.319,2.044 3.853,3.176 0.044,0.031 0.086,0.062 0.122,0.091 8.573,6.286 24.949,13.945 50.829,9.394 -0.615,0.698 -1.254,1.341 -1.887,1.998 l 0.057,-0.009 c 0,0 -5.858,6.886 -16.555,12.616 -0.613,0.331 -1.252,0.659 -1.91,0.985 -0.038,0.018 -0.073,0.038 -0.112,0.057 -0.067,0.033 -0.128,0.057 -0.195,0.089 -8.007,3.888 -19.263,7.05 -33.564,5.458 -3.873,-0.374 -8.015,-1.116 -12.272,-2.399 -0.007,-0.002 -0.013,-0.003 -0.02,-0.005 L 21.304,45.84 c -10,-3.018 -20.632,-9.029 -29.888,-20.328 -0.563,-0.707 -1.105,-1.409 -1.632,-2.109 -0.064,-0.096 -0.138,-0.198 -0.222,-0.301 -1.164,-1.557 -2.236,-3.091 -3.229,-4.602 -3.216,-5.343 -5.544,-11.486 -6.214,-13.337 -10.417,-31.901 2.546,-56.661 5.065,-61.011 8.793,-14.923 24.186,-31.856 49.988,-36.751 0.195,-0.046 0.377,-0.101 0.573,-0.145 1.697,-0.361 4.79,-0.914 8.771,-1.178 1.5,-0.067 3.04,-0.093 4.626,-0.065 1.525,-0.01 2.953,0.016 4.268,0.063 0.39,0.027 0.729,0.04 1.029,0.044 5.023,0.233 8.144,0.762 8.144,0.762 -26.133,1.279 -39.232,13.203 -44.987,20.816 -1.304,1.622 -2.422,3.367 -3.324,5.234 -0.356,0.699 -0.515,1.097 -0.515,1.097 8.328,-7.069 19.98,-13.155 19.98,-13.155 10.449,-4.917 21.402,-7.337 33.008,-5.747 0,0 24.264,2.227 33.932,24.702 -0.417,0.317 -0.361,0.275 -0.777,0.592 -0.642,-0.518 -1.274,-1.007 -1.898,-1.474 -0.975,-0.64 -1.933,-1.336 -2.89,-2.038 -5.184,-3.431 -9.414,-5.048 -11.934,-5.788 -19.06,-4.811 -36.698,-1.232 -52.114,12.263 -7.14,6.251 -11.572,14.131 -11.759,23.917 -0.122,6.365 -0.188,12.734 -0.135,19.101 0.084,10.023 7.135,17.645 17.126,18.986 20.245,2.716 40.598,3.652 60.993,4.129 3.125,0.072 5.888,0.036 7.342,-3.306 0.025,-0.057 0.358,0.021 0.543,0.035 1.379,4.393 -0.607,9.127 -4.223,11.444"
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path60"
- inkscape:connector-curvature="0" /></g></g></g></g></svg> \ No newline at end of file
diff --git a/website/assets/logos/powered-gvisor.png b/website/assets/logos/powered-gvisor.png
deleted file mode 100644
index e00c74a33..000000000
--- a/website/assets/logos/powered-gvisor.png
+++ /dev/null
Binary files differ
diff --git a/website/blog/2019-11-18-security-basics.md b/website/blog/2019-11-18-security-basics.md
deleted file mode 100644
index 938605cc2..000000000
--- a/website/blog/2019-11-18-security-basics.md
+++ /dev/null
@@ -1,310 +0,0 @@
-# gVisor Security Basics - Part 1
-
-This blog is a space for engineers and community members to share perspectives
-and deep dives on technology and design within the gVisor project. Though our
-logo suggests we're in the business of space exploration (or perhaps fighting
-sea monsters), we're actually in the business of sandboxing Linux containers.
-When we created gVisor, we had three specific goals in mind; _container-native
-security_, _resource efficiency_, and _platform portability_. To put it simply,
-gVisor provides _efficient defense-in-depth for containers anywhere_.
-
-This post addresses gVisor's _container-native security_, specifically how
-gVisor provides strong isolation between an application and the host OS. Future
-posts will address _resource efficiency_ (how gVisor preserves container
-benefits like fast starts, smaller snapshots, and less memory overhead than VMs)
-and _platform portability_ (run gVisor wherever Linux OCI containers run).
-Delivering on each of these goals requires careful security considerations and a
-robust design.
-
-## What does "sandbox" mean?
-
-gVisor allows the execution of untrusted containers, preventing them from
-adversely affecting the host. This means that the untrusted container is
-prevented from attacking or spying on either the host kernel or any other peer
-userspace processes on the host.
-
-For example, if you are a cloud container hosting service, running containers
-from different customers on the same virtual machine means that compromises
-expose customer data. Properly configured, gVisor can provide sufficient
-isolation to allow different customers to run containers on the same host. There
-are many aspects to the proper configuration, including limiting file and
-network access, which we will discuss in future posts.
-
-## The cost of compromise
-
-gVisor was designed around the premise that any security boundary could
-potentially be compromised with enough time and resources. We tried to optimize
-for a solution that was as costly and time-consuming for an attacker as
-possible, at every layer.
-
-Consequently, gVisor was built through a combination of intentional design
-principles and specific technology choices that work together to provide the
-security isolation needed for running hostile containers on a host. We'll dig
-into it in the next section!
-
-# Design Principles
-
-gVisor was designed with some common
-[secure design](https://en.wikipedia.org/wiki/Secure_by_design) principles in
-mind: Defense-in-Depth, Principle of Least-Privilege, Attack Surface Reduction
-and Secure-by-Default[^1].
-
-In general, Design Principles outline good engineering practices, but in the
-case of security, they also can be thought of as a set of tactics. In a
-real-life castle, there is no single defensive feature. Rather, there are many
-in combination: redundant walls, scattered draw bridges, small bottle-neck
-entrances, moats, etc.
-
-A simplified version of the design is below
-([more detailed version](/docs/))[^2]:
-
-![Figure 1](/assets/images/2019-11-18-security-basics-figure1.png "Simplified design of gVisor.")
-
-In order to discuss design principles, the following components are important to
-know:
-
-* runsc - binary that packages the Sentry, platform, and Gofer(s) that run
- containers. runsc is the drop-in binary for running gVisor in Docker and
- Kubernetes.
-* Untrusted Application - container running in the sandbox. Untrusted
- application/container are used interchangeably in this article.
-* Platform Syscall Switcher - intercepts syscalls from the application and
- passes them to the Sentry with no further handling.
-* Sentry - The "application kernel" in userspace that serves the untrusted
- application. Each application instance has its own Sentry. The Sentry
- handles syscalls, routes I/O to gofers, and manages memory and CPU, all in
- userspace. The Sentry is allowed to make limited, filtered syscalls to the
- host OS.
-* Gofer - a process that specifically handles different types of I/O for the
- Sentry (usually disk I/O). Gofers are also allowed to make filtered syscalls
- to the Host OS.
-* Host OS - the actual OS on which gVisor containers are running, always some
- flavor of Linux (sorry, Windows/MacOS users).
-
-It is important to emphasize what is being protected from the untrusted
-application in this diagram: the host OS and other userspace applications.
-
-In this post, we are only discussing security-related features of gVisor, and
-you might ask, "What about performance, compatibility and stability?" We will
-cover these considerations in future posts.
-
-## Defense-in-Depth
-
-For gVisor, Defense-in-Depth means each component of the software stack trusts
-the other components as little as possible.
-
-It may seem strange that we would want our own software components to distrust
-each other. But by limiting the trust between small, discrete components, each
-component is forced to defend itself against potentially malicious input. And
-when you stack these components on top of each other, you can ensure that
-multiple security barriers must be overcome by an attacker.
-
-And this leads us to how Defense-in-Depth is applied to gVisor: no single
-vulnerability should compromise the host.
-
-In the "Attacker's Advantage / Defender's Dilemma," the defender must succeed
-all the time while the attacker only needs to succeed once. Defense in Depth
-inverts this principle: once the attacker successfully compromises any given
-software component, they are immediately faced with needing to compromise a
-subsequent, distinct layer in order to move laterally or acquire more privilege.
-
-For example, the untrusted container is isolated from the Sentry. The Sentry is
-isolated from host I/O operations by serving those requests in separate
-processes called Gofers. And both the untrusted container and its associated
-Gofers are isolated from the host process that is running the sandbox.
-
-An additional benefit is that this generally leads to more robust and stable
-software, forcing interfaces to be strictly defined and tested to ensure all
-inputs are properly parsed and bounds checked.
-
-## Least-Privilege
-
-The principle of Least-Privilege implies that each software component has only
-the permissions it needs to function, and no more.
-
-Least-Privilege is applied throughout gVisor. Each component and more
-importantly, each interface between the components, is designed so that only the
-minimum level of permission is required for it to perform its function.
-Specifically, the closer you are to the untrusted application, the less
-privilege you have.
-
-![Figure 2](/assets/images/2019-11-18-security-basics-figure2.png "runsc components and their privileges.")
-
-This is evident in how runsc (the drop in gVisor binary for Docker/Kubernetes)
-constructs the sandbox. The Sentry has the least privilege possible (it can't
-even open a file!). Gofers are only allowed file access, so even if it were
-compromised, the host network would be unavailable. Only the runsc binary itself
-has full access to the host OS, and even runsc's access to the host OS is often
-limited through capabilities / chroot / namespacing.
-
-Designing a system with Defense-in-Depth and Least-Privilege in mind encourages
-small, separate, single-purpose components, each with very restricted
-privileges.
-
-## Attack Surface Reduction
-
-There are no bugs in unwritten code. In other words, gVisor supports a feature
-if and only if it is needed to run host Linux containers.
-
-### Host Application/Sentry Interface:
-
-There are a lot of things gVisor does not need to do. For example, it does not
-need to support arbitrary device drivers, nor does it need to support video
-playback. By not implementing what will not be used, we avoid introducing
-potential bugs in our code.
-
-That is not to say gVisor has limited functionality! Quite the opposite, we
-analyzed what is actually needed to run Linux containers and today the Sentry
-supports 237 syscalls[^3]<sup>,</sup>[^4], along with the range of critical
-/proc and /dev files. However, gVisor does not support every syscall in the
-Linux kernel. There are about 350 syscalls[^5] within the 5.3.11 version of the
-Linux kernel, many of which do not apply to Linux containers that typically host
-cloud-like workloads. For example, we don't support old versions of epoll
-(epoll_ctl_old, epoll_wait_old), because they are deprecated in Linux and no
-supported workloads use them.
-
-Furthermore, any exploited vulnerabilities in the implemented syscalls (or
-Sentry code in general) only apply to gaining control of the Sentry. More on
-this in a later post.
-
-### Sentry/Host OS Interface:
-
-The Sentry's interactions with the Host OS are restricted in many ways. For
-instance, no syscall is "passed-through" from the untrusted application to the
-host OS. All syscalls are intercepted and interpreted. In the case where the
-Sentry needs to call the Host OS, we severely limit the syscalls that the Sentry
-itself is allowed to make to the host kernel[^6].
-
-For example, there are many file-system based attacks, where manipulation of
-files or their paths, can lead to compromise of the host[^7]. As a result, the
-Sentry does not allow any syscall that creates or opens a file descriptor. All
-file descriptors must be donated to the sandbox. By disallowing open or creation
-of file descriptors, we eliminate entire categories of these file-based attacks.
-
-This does not affect functionality though. For example, during startup, runsc
-will donate FDs the Sentry that allow for mapping STDIN/STDOUT/STDERR to the
-sandboxed application. Also the Gofer may donate an FD to the Sentry, allowing
-for direct access to some files. And most files will be remotely accessed
-through the Gofers, in which case no FDs are donated to the Sentry.
-
-The Sentry itself is only allowed access to specific
-[allowlisted syscalls](https://github.com/google/gvisor/blob/master/runsc/config/config.go).
-Without networking, the Sentry needs 53 host syscalls in order to function, and
-with networking, it uses an additional 15[^8]. By limiting the allowlist to only
-these needed syscalls, we radically reduce the amount of host OS attack surface.
-If any attempts are made to call something outside the allowlist, it is
-immediately blocked and the sandbox is killed by the Host OS.
-
-### Sentry/Gofer Interface:
-
-The Sentry communicates with the Gofer through a local unix domain socket (UDS)
-via a version of the 9P protocol[^9]. The UDS file descriptor is passed to the
-sandbox during initialization and all communication between the Sentry and Gofer
-happens via 9P. We will go more into how Gofers work in future posts.
-
-### End Result
-
-So, of the 350 syscalls in the Linux kernel, the Sentry needs to implement only
-237 of them to support containers. At most, the Sentry only needs to call 68 of
-the host Linux syscalls. In other words, with gVisor, applications get the vast
-majority (and growing) functionality of Linux containers for only 68 possible
-syscalls to the Host OS. 350 syscalls to 68 is attack surface reduction.
-
-![Figure 3](/assets/images/2019-11-18-security-basics-figure3.png "Reduction of Attack Surface of the Syscall Table. Note that the Senty's Syscall Emulation Layer keeps the Containerized Process from ever calling the Host OS.")
-
-## Secure-by-default
-
-The default choice for a user should be safe. If users need to run a less secure
-configuration of the sandbox for the sake of performance or application
-compatibility, they must make the choice explicitly.
-
-An example of this might be a networking application that is performance
-sensitive. Instead of using the safer, Go-based Netstack in the Sentry, the
-untrusted container can instead use the host Linux networking stack directly.
-However, this means the untrusted container will be directly interacting with
-the host, without the safety benefits of the sandbox. It also means that an
-attack could directly compromise the host through his path.
-
-These less secure configurations are **not** the default. In fact, the user must
-take action to change the configuration and run in a less secure mode.
-Additionally, these actions make it very obvious that a less secure
-configuration is being used.
-
-This can be as simple as forcing a default runtime flag option to the secure
-option. gVisor does this by always using its internal netstack by default.
-However, for certain performance sensitive applications, we allow the usage of
-the host OS networking stack, but it requires the user to actively set a
-flag[^10].
-
-# Technology Choices
-
-Technology choices for gVisor mainly involve things that will give us a security
-boundary.
-
-At a higher level, boundaries in software might be describing a great many
-things. It may be discussing the boundaries between threads, boundaries between
-processes, boundaries between CPU privilege levels, and more.
-
-Security boundaries are interfaces that are designed and built so that entire
-classes of bugs/vulnerabilities are eliminated.
-
-For example, the Sentry and Gofers are implemented using Go. Go was chosen for a
-number of the features it provided. Go is a fast, statically-typed, compiled
-language that has efficient multi-threading support, garbage collection and a
-constrained set of "unsafe" operations.
-
-Using these features enabled safe array and pointer handling. This means entire
-classes of vulnerabilities were eliminated, such as buffer overflows and
-use-after-free.
-
-Another example is our use of very strict syscall switching to ensure that the
-Sentry is always the first software component that parses and interprets the
-calls being made by the untrusted container. Here is an instance where different
-platforms use different solutions, but all of them share this common trait,
-whether it is through the use of ptrace "a la PTRACE_ATTACH"[^11] or kvm's
-ring0[^12].
-
-Finally, one of the most restrictive choices was to use seccomp, to restrict the
-Sentry from being able to open or create a file descriptor on the host. All file
-I/O is required to go through Gofers. Preventing the opening or creation of file
-descriptions eliminates whole categories of bugs around file permissions
-[like this one](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-4557)[^13].
-
-# To be continued - Part 2
-
-In part 2 of this blog post, we will explore gVisor from an attacker's point of
-view. We will use it as an opportunity to examine the specific strengths and
-weaknesses of each gVisor component.
-
-We will also use it to introduce Google's Vulnerability Reward Program[^14], and
-other ways the community can contribute to help make gVisor safe, fast and
-stable.
-<br>
-<br>
-**Updated (2021-07-14):** this post was updated to use more inclusive language.
-<br>
-
---------------------------------------------------------------------------------
-
-[^1]: [https://en.wikipedia.org/wiki/Secure_by_design](https://en.wikipedia.org/wiki/Secure_by_design)
-[^2]: [https://gvisor.dev/docs/architecture_guide](https://gvisor.dev/docs/architecture_guide/)
-[^3]: [https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/linux/linux64_amd64.go](https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/syscalls.go)
-
-<!-- mdformat off(mdformat formats this into multiple lines) -->
-[^4]: Internally that is, it doesn't call to the Host OS to implement them, in fact that is explicitly disallowed, more on that in the future.
-<!-- mdformat on -->
-
-[^5]: [https://elixir.bootlin.com/linux/latest/source/arch/x86/entry/syscalls/syscall_64.tbl#L345](https://elixir.bootlin.com/linux/latest/source/arch/x86/entry/syscalls/syscall_64.tbl#L345)
-[^6]: [https://github.com/google/gvisor/tree/master/runsc/boot/filter](https://github.com/google/gvisor/tree/master/runsc/boot/filter)
-[^7]: [https://en.wikipedia.org/wiki/Dirty_COW](https://en.wikipedia.org/wiki/Dirty_COW)
-[^8]: [https://github.com/google/gvisor/blob/master/runsc/boot/config.go](https://github.com/google/gvisor/blob/master/runsc/boot/config.go)
-
-<!-- mdformat off(mdformat breaks this url by escaping the parenthesis) -->
-[^9]: [https://en.wikipedia.org/wiki/9P_(protocol)](https://en.wikipedia.org/wiki/9P_(protocol))
-<!-- mdformat on -->
-
-[^10]: [https://gvisor.dev/docs/user_guide/networking/#network-passthrough](https://gvisor.dev/docs/user_guide/networking/#network-passthrough)
-[^11]: [https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ptrace/subprocess.go#L390](https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ptrace/subprocess.go#L390)
-[^12]: [https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ring0/kernel_amd64.go#L182](https://github.com/google/gvisor/blob/c7e901f47a09eaac56bd4813227edff016fa6bff/pkg/sentry/platform/ring0/kernel_amd64.go#L182)
-[^13]: [https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-4557](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-4557)
-[^14]: [https://www.google.com/about/appsecurity/reward-program/index.html](https://www.google.com/about/appsecurity/reward-program/index.html)
diff --git a/website/blog/2020-04-02-networking-security.md b/website/blog/2020-04-02-networking-security.md
deleted file mode 100644
index f3ce02d11..000000000
--- a/website/blog/2020-04-02-networking-security.md
+++ /dev/null
@@ -1,183 +0,0 @@
-# gVisor Networking Security
-
-In our
-[first blog post](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/),
-we covered some secure design principles and how they guided the architecture of
-gVisor as a whole. In this post, we will cover how these principles guided the
-networking architecture of gVisor, and the tradeoffs involved. In particular, we
-will cover how these principles culminated in two networking modes, how they
-work, and the properties of each.
-
-## gVisor's security architecture in the context of networking
-
-Linux networking is complicated. The TCP protocol is over 40 years old, and has
-been repeatedly extended over the years to keep up with the rapid pace of
-network infrastructure improvements, all while maintaining compatibility. On top
-of that, Linux networking has a fairly large API surface. Linux supports
-[over 150 options](https://github.com/google/gvisor/blob/960f6a975b7e44c0efe8fd38c66b02017c4fe137/pkg/sentry/strace/socket.go#L476-L644)
-for the most common socket types alone. In fact, the net subsystem is one of the
-largest and fastest growing in Linux at approximately 1.1 million lines of code.
-For comparison, that is several times the size of the entire gVisor codebase.
-
-At the same time, networking is increasingly important. The cloud era is
-arguably about making everything a network service, and in order to make that
-work, the interconnect performance is critical. Adding networking support to
-gVisor was difficult, not just due to the inherent complexity, but also because
-it has the potential to significantly weaken gVisor's security model.
-
-As outlined in the previous blog post, gVisor's
-[secure design principles](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/#design-principles)
-are:
-
-1. Defense in Depth: each component of the software stack trusts each other
- component as little as possible.
-1. Least Privilege: each software component has only the permissions it needs
- to function, and no more.
-1. Attack Surface Reduction: limit the surface area of the host exposed to the
- sandbox.
-1. Secure by Default: the default choice for a user should be safe.
-
-gVisor manifests these principles as a multi-layered system. An application
-running in the sandbox interacts with the Sentry, a userspace kernel, which
-mediates all interactions with the Host OS and beyond. The Sentry is written in
-pure Go with minimal unsafe code, making it less vulnerable to buffer overflows
-and related memory bugs that can lead to a variety of compromises including code
-injection. It emulates Linux using only a minimal and audited set of Host OS
-syscalls that limit the Host OS's attack surface exposed to the Sentry itself.
-The syscall restrictions are enforced by running the Sentry with seccomp
-filters, which enforce that the Sentry can only use the expected set of
-syscalls. The Sentry runs as an unprivileged user and in namespaces, which,
-along with the seccomp filters, ensure that the Sentry is run with the Least
-Privilege required.
-
-gVisor's multi-layered design provides Defense in Depth. The Sentry, which does
-not trust the application because it may attack the Sentry and try to bypass it,
-is the first layer. The sandbox that the Sentry runs in is the second layer. If
-the Sentry were compromised, the attacker would still be in a highly restrictive
-sandbox which they must also break out of in order to compromise the Host OS.
-
-To enable networking functionality while preserving gVisor's security
-properties, we implemented a
-[userspace network stack](https://github.com/google/gvisor/tree/master/pkg/tcpip)
-in the Sentry, which we creatively named Netstack. Netstack is also written in
-Go, not only to avoid unsafe code in the network stack itself, but also to avoid
-a complicated and unsafe Foreign Function Interface. Having its own integrated
-network stack allows the Sentry to implement networking operations using up to
-three Host OS syscalls to read and write packets. These syscalls allow a very
-minimal set of operations which are already allowed (either through the same or
-a similar syscall). Moreover, because packets typically come from off-host (e.g.
-the internet), the Host OS's packet processing code has received a lot of
-scrutiny, hopefully resulting in a high degree of hardening.
-
-![Figure 1](/assets/images/2020-04-02-networking-security-figure1.png "Network and gVisor.")
-
-## Writing a network stack
-
-Netstack was written from scratch specifically for gVisor. Because Netstack was
-designed and implemented to be modular, flexible and self-contained, there are
-now several more projects using Netstack in creative and exciting ways. As we
-discussed, a custom network stack has enabled a variety of security-related
-goals which would not have been possible any other way. This came at a cost
-though. Network stacks are complex and writing a new one comes with many
-challenges, mostly related to application compatibility and performance.
-
-Compatibility issues typically come in two forms: missing features, and features
-with behavior that differs from Linux (usually due to bugs). Both of these are
-inevitable in an implementation of a complex system spanning many quickly
-evolving and ambiguous standards. However, we have invested heavily in this
-area, and the vast majority of applications have no issues using Netstack. For
-example,
-[we now support setting 34 different socket options](https://github.com/google/gvisor/blob/815df2959a76e4a19f5882e40402b9bbca9e70be/pkg/sentry/socket/netstack/netstack.go#L830-L1764)
-versus
-[only 7 in our initial git commit](https://github.com/google/gvisor/blob/d02b74a5dcfed4bfc8f2f8e545bca4d2afabb296/pkg/sentry/socket/epsocket/epsocket.go#L445-L702).
-We are continuing to make good progress in this area.
-
-Performance issues typically come from TCP behavior and packet processing speed.
-To improve our TCP behavior, we are working on implementing the full set of TCP
-RFCs. There are many RFCs which are significant to performance (e.g.
-[RACK](https://tools.ietf.org/id/draft-ietf-tcpm-rack-03.html) and
-[BBR](https://tools.ietf.org/html/draft-cardwell-iccrg-bbr-congestion-control-00))
-that we have yet to implement. This mostly affects TCP performance with
-non-ideal network conditions (e.g. cross continent connections). Faster packet
-processing mostly improves TCP performance when network conditions are very good
-(e.g. within a datacenter). Our primary strategy here is to reduce interactions
-with the Go runtime, specifically the garbage collector (GC) and scheduler. We
-are currently optimizing buffer management to reduce the amount of garbage,
-which will lower the GC cost. To reduce scheduler interactions, we are
-re-architecting the TCP implementation to use fewer goroutines. Performance
-today is good enough for most applications and we are making steady
-improvements. For example, since May of 2019, we have improved the Netstack
-runsc
-[iperf3 download benchmark](https://github.com/google/gvisor/tree/master/test/benchmarks/network)
-score by roughly 15% and upload score by around 10,000X. Current numbers are
-about 17 Gbps download and about 8 Gbps upload versus about 42 Gbps and 43 Gbps
-for native (Linux) respectively.
-
-## An alternative
-
-We also offer an alternative network mode: passthrough. This name can be
-misleading as syscalls are never passed through from the app to the Host OS.
-Instead, the passthrough mode implements networking in gVisor using the Host
-OS's network stack. (This mode is called
-[hostinet](https://github.com/google/gvisor/tree/master/pkg/sentry/socket/hostinet)
-in the codebase.) Passthrough mode can improve performance for some use cases as
-the Host OS's network stack has had an enormous number of person-years poured
-into making it highly performant. However, there is a rather large downside to
-using passthrough mode: it weakens gVisor's security model by increasing the
-Host OS's Attack Surface. This is because using the Host OS's network stack
-requires the Sentry to use the Host OS's
-[Berkeley socket interface](https://en.wikipedia.org/wiki/Berkeley_sockets). The
-Berkeley socket interface is a much larger API surface than the packet interface
-that our network stack uses. When passthrough mode is in use, the Sentry is
-allowed to use
-[15 additional syscalls](https://github.com/google/gvisor/blob/b1576e533223e98ebe4bd1b82b04e3dcda8c4bf1/runsc/boot/filter/config.go#L312-L517).
-Further, this set of syscalls includes some that allow the Sentry to create file
-descriptors, something that
-[we don't normally allow](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/#sentry-host-os-interface)
-as it opens up classes of file-based attacks.
-
-There are some networking features that we can't implement on top of syscalls
-that we feel are safe (most notably those behind
-[ioctl](http://man7.org/linux/man-pages/man2/ioctl.2.html)) and therefore are
-not supported. Because of this, we actually support fewer networking features in
-passthrough mode than we do in Netstack, reducing application compatibility.
-That's right: using our networking stack provides better overall application
-compatibility than using our passthrough mode.
-
-That said, gVisor with passthrough networking still provides a high level of
-isolation. Applications cannot specify host syscall arguments directly, and the
-sentry's seccomp policy restricts its syscall use significantly more than a
-general purpose seccomp policy.
-
-## Secure by Default
-
-The goal of the Secure by Default principle is to make it easy to securely
-sandbox containers. Of course, disabling network access entirely is the most
-secure option, but that is not practical for most applications. To make gVisor
-Secure by Default, we have made Netstack the default networking mode in gVisor
-as we believe that it provides significantly better isolation. For this reason
-we strongly caution users from changing the default unless Netstack flat out
-won't work for them. The passthrough mode option is still provided, but we want
-users to make an informed decision when selecting it.
-
-Another way in which gVisor makes it easy to securely sandbox containers is by
-allowing applications to run unmodified, with no special configuration needed.
-In order to do this, gVisor needs to support all of the features and syscalls
-that applications use. Neither seccomp nor gVisor's passthrough mode can do this
-as applications commonly use syscalls which are too dangerous to be included in
-a secure policy. Even if this dream isn't fully realized today, gVisor's
-architecture with Netstack makes this possible.
-
-## Give Netstack a Try
-
-If you haven't already, try running a workload in gVisor with Netstack. You can
-find instructions on how to get started in our
-[Quick Start](/docs/user_guide/quick_start/docker/). We want to hear about both
-your successes and any issues you encounter. We welcome your contributions,
-whether that be verbal feedback or code contributions, via our
-[Gitter channel](https://gitter.im/gvisor/community),
-[email list](https://groups.google.com/forum/#!forum/gvisor-users),
-[issue tracker](https://gvisor.dev/issue/new), and
-[Github repository](https://github.com/google/gvisor). Feel free to express
-interest in an [open issue](https://gvisor.dev/issue/), or reach out if you
-aren't sure where to start.
diff --git a/website/blog/2020-09-18-containing-a-real-vulnerability.md b/website/blog/2020-09-18-containing-a-real-vulnerability.md
deleted file mode 100644
index 8a6f7bbf1..000000000
--- a/website/blog/2020-09-18-containing-a-real-vulnerability.md
+++ /dev/null
@@ -1,226 +0,0 @@
-# Containing a Real Vulnerability
-
-In the previous two posts we talked about gVisor's
-[security design principles](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/)
-as well as how those are applied in the
-[context of networking](https://gvisor.dev/blog/2020/04/02/gvisor-networking-security/).
-Recently, a new container escape vulnerability
-([CVE-2020-14386](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-14386))
-was announced that ties these topics well together. gVisor is
-[not vulnerable](https://seclists.org/oss-sec/2020/q3/168) to this specific
-issue, but it provides an interesting case study to continue our exploration of
-gVisor's security. While gVisor is not immune to vulnerabilities,
-[we take several steps](https://gvisor.dev/security/) to minimize the impact and
-remediate if a vulnerability is found.
-
-## Escaping the Container
-
-First, let’s describe how the discovered vulnerability works. There are numerous
-ways one can send and receive bytes over the network with Linux. One of the most
-performant ways is to use a ring buffer, which is a memory region shared by the
-application and the kernel. These rings are created by calling
-[setsockopt(2)](https://man7.org/linux/man-pages/man2/setsockopt.2.html) with
-[`PACKET_RX_RING`](https://man7.org/linux/man-pages/man7/packet.7.html) for
-receiving and
-[`PACKET_TX_RING`](https://man7.org/linux/man-pages/man7/packet.7.html) for
-sending packets.
-
-The vulnerability is in the code that reads packets when `PACKET_RX_RING` is
-enabled. There is another option
-([`PACKET_RESERVE`](https://man7.org/linux/man-pages/man7/packet.7.html)) that
-asks the kernel to leave some space in the ring buffer before each packet for
-anything the application needs, e.g. control structures. When a packet is
-received, the kernel calculates where to copy the packet to, taking the amount
-reserved before each packet into consideration. If the amount reserved is large,
-the kernel performed an incorrect calculation which could cause an overflow
-leading to an out-of-bounds write of up to 10 bytes, controlled by the attacker.
-The data in the write is easily controlled using the loopback to send a crafted
-packet and receiving it using a `PACKET_RX_RING` with a carefully selected
-`PACKET_RESERVE` size.
-
-```c
-static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
-// ...
- if (sk->sk_type == SOCK_DGRAM) {
- macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
- po->tp_reserve;
- } else {
- unsigned int maclen = skb_network_offset(skb);
- // tp_reserve is unsigned int, netoff is unsigned short.
- // Addition can overflow netoff
- netoff = TPACKET_ALIGN(po->tp_hdrlen +
- (maclen < 16 ? 16 : maclen)) +
- po->tp_reserve;
- if (po->has_vnet_hdr) {
- netoff += sizeof(struct virtio_net_hdr);
- do_vnet = true;
- }
- // Attacker controls netoff and can make macoff be smaller
- // than sizeof(struct virtio_net_hdr)
- macoff = netoff - maclen;
- }
-// ...
- // "macoff - sizeof(struct virtio_net_hdr)" can be negative,
- // resulting in a pointer before h.raw
- if (do_vnet &&
- virtio_net_hdr_from_skb(skb, h.raw + macoff -
- sizeof(struct virtio_net_hdr),
- vio_le(), true, 0)) {
-// ...
-```
-
-The [`CAP_NET_RAW`](https://man7.org/linux/man-pages/man7/capabilities.7.html)
-capability is required to create the socket above. However, in order to support
-common debugging tools like `ping` and `tcpdump`, Docker containers, including
-those created for Kubernetes, are given `CAP_NET_RAW` by default and thus may be
-able to trigger this vulnerability to elevate privileges and escape the
-container.
-
-Next, we are going to explore why this vulnerability doesn’t work in gVisor, and
-how gVisor could prevent the escape even if a similar vulnerability existed
-inside gVisor’s kernel.
-
-## Default Protections
-
-gVisor does not implement `PACKET_RX_RING`, but **does** support raw sockets
-which are required for `PACKET_RX_RING`. Raw sockets are a controversial feature
-to support in a sandbox environment. While it allows great customizations for
-essential tools like `ping`, it may allow packets to be written to the network
-without any validation. In general, allowing an untrusted application to write
-crafted packets to the network is a questionable idea and a historical source of
-vulnerabilities. With that in mind, if `CAP_NET_RAW` is enabled by default, it
-would not be _secure by default_ to run untrusted applications.
-
-After multiple discussions when raw sockets were first implemented, we decided
-to disable raw sockets by default, **even if `CAP_NET_RAW` is given to the
-application**. Instead, enabling raw sockets in gVisor requires the admin to set
-`--net-raw` flag to runsc when configuring the runtime, in addition to requiring
-the `CAP_NET_RAW` capability in the application. It comes at the expense that
-some tools may not work out of the box, but as part of our
-[secure-by-default](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/#secure-by-default)
-principle, we felt that it was important for the “less secure” configuration to
-be explicit.
-
-Since this bug was due to an overflow in the specific Linux implementation of
-the packet ring, gVisor's raw socket implementation is not affected. However, if
-there were a vulnerability in gVisor, containers would not be allowed to exploit
-it by default.
-
-As an alternative way to implement this same constraint, Kubernetes allows
-[admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
-to be configured to customize requests. Cloud providers can use this to
-implement more stringent policies. For example, GKE implements an admission
-controller for gVisor that
-[removes `CAP_NET_RAW` from gVisor pods](https://cloud.google.com/kubernetes-engine/docs/concepts/sandbox-pods#capabilities)
-unless it has been explicitly set in the pod spec.
-
-## Isolated Kernel
-
-gVisor has its own application kernel, called the Sentry, that is distinct from
-the host kernel. Just like what you would expect from a kernel, gVisor has a
-memory management subsystem, virtual file system, and a full network stack. The
-host network is only used as a transport to carry packets in and out the
-sandbox[^1]. The loopback interface which is used in the exploit stays
-completely inside the sandbox, never reaching the host.
-
-Therefore, even if the Sentry was vulnerable to the attack, there would be two
-factors that would prevent a container escape from happening. First, the
-vulnerability would be limited to the Sentry, and the attacker would compromise
-only the application kernel, bound by a restricted set of
-[seccomp](https://en.wikipedia.org/wiki/Seccomp) filters, discussed more in
-depth below. Second, the Sentry is a distinct implementation of the API, written
-in Go, which provides bounds checking that would have likely prevented access
-past the bounds of the shared region (e.g. see
-[aio](https://cs.opensource.google/gvisor/gvisor/+/master:pkg/sentry/syscalls/linux/vfs2/aio.go;l=210;drc=a11061d78a58ed75b10606d1a770b035ed944b66?q=file:aio&ss=gvisor%2Fgvisor)
-or
-[kcov](https://cs.opensource.google/gvisor/gvisor/+/master:pkg/sentry/kernel/kcov.go;l=272?q=file:kcov&ss=gvisor%2Fgvisor),
-which have similar shared regions).
-
-Here, Kubernetes warrants slightly more explanation. gVisor makes pods the unit
-of isolation and a pod can run multiple containers. In other words, each pod is
-a gVisor instance, and each container is a set of processes running inside
-gVisor, isolated via Sentry-internal namespaces like regular containers inside a
-pod. If there were a vulnerability in gVisor, the privilege escalation would
-allow a container inside the pod to break out to other **containers inside the
-same pod**, but the container still **cannot break out of the pod**.
-
-## Defense in Depth
-
-gVisor follows a
-[common security principle used at Google](https://cloud.google.com/security/infrastructure/design/resources/google_infrastructure_whitepaper_fa.pdf)
-that the system should have two layers of protection, and those layers should
-require different compromises to be broken. We apply this principle by assuming
-that the Sentry (first layer of defense)
-[will be compromised and should not be trusted](https://gvisor.dev/blog/2019/11/18/gvisor-security-basics-part-1/#defense-in-depth).
-In order to protect the host kernel from a compromised Sentry, we wrap it around
-many security and isolations features to ensure only the minimal set of
-functionality from the host kernel is exposed.
-
-![Figure 1](/assets/images/2020-09-18-containing-a-real-vulnerability-figure1.png "Protection layers.")
-
-First, the sandbox runs inside a cgroup that can limit and throttle host
-resources being used. Second, the sandbox joins empty namespaces, including user
-and mount, to further isolate from the host. Next, it changes the process root
-to a read-only directory that contains only `/proc` and nothing else. Then, it
-executes with the unprivileged user/group
-[`nobody`](https://en.wikipedia.org/wiki/Nobody_\(username\)) with all
-capabilities stripped. Last and most importantly, a seccomp filter is added to
-tightly restrict what parts of the Linux syscall surface that gVisor is allowed
-to access. The allowed host surface is a far smaller set of syscalls than the
-Sentry implements for applications to use. Not only restricting the syscall
-being called, but also checking that arguments to these syscalls are within the
-expected set. Dangerous syscalls like <code>execve(2)</code>,
-<code>open(2)</code>, and <code>socket(2)</code> are prohibited, thus an
-attacker isn’t able to execute binaries or acquire new resources on the host.
-
-if there were a vulnerability in gVisor that allowed an attacker to execute code
-inside the Sentry, the attacker still has extremely limited privileges on the
-host. In fact, a compromised Sentry is much more restricted than a
-non-compromised regular container. For CVE-2020-14386 in particular, the attack
-would be blocked by more than one security layer: non-privileged user, no
-capability, and seccomp filters.
-
-Although the surface is drastically reduced, there is still a chance that there
-is a vulnerability in one of the allowed syscalls. That’s why it’s important to
-keep the surface small and carefully consider what syscalls are allowed. You can
-find the full set of allowed syscalls
-[here](https://cs.opensource.google/gvisor/gvisor/+/master:runsc/boot/filter/).
-
-Another possible attack vector is resources that are present in the Sentry, like
-open file descriptors. The Sentry has file descriptors that an attacker could
-potentially use, such as log files, platform files (e.g. `/dev/kvm`), an RPC
-endpoint that allows external communication with the Sentry, and a Netstack
-endpoint that connects the sandbox to the network. The Netstack endpoint in
-particular is a concern because it gives direct access to the network. It’s an
-`AF_PACKET` socket that allows arbitrary L2 packets to be written to the
-network. In the normal case, Netstack assembles packets that go out the network,
-giving the container control over only the payload. But if the Sentry is
-compromised, an attacker can craft packets to the network. In many ways this is
-similar to anyone sending random packets over the internet, but still this is a
-place where the host kernel surface exposed is larger than we would like it to
-be.
-
-## Conclusion
-
-Security comes with many tradeoffs that are often hard to make, such as the
-decision to disable raw sockets by default. However, these tradeoffs have served
-us well, and we've found them to have paid off over time. CVE-2020-14386 offers
-great insight into how multiple layers of protection can be effective against
-such an attack.
-
-We cannot guarantee that a container escape will never happen in gVisor, but we
-do our best to make it as hard as we possibly can.
-
-If you have not tried gVisor yet, it’s easier than you think. Just follow the
-steps [here](https://gvisor.dev/docs/user_guide/install/).
-<br>
-<br>
-
---------------------------------------------------------------------------------
-
-[^1]: Those packets are eventually handled by the host, as it needs to route
- them to local containers or send them out the NIC. The packet will be
- handled by many switches, routers, proxies, servers, etc. along the way,
- which may be subject to their own vulnerabilities.
diff --git a/website/blog/2020-10-22-platform-portability.md b/website/blog/2020-10-22-platform-portability.md
deleted file mode 100644
index 4d82940f9..000000000
--- a/website/blog/2020-10-22-platform-portability.md
+++ /dev/null
@@ -1,120 +0,0 @@
-# Platform Portability
-
-Hardware virtualization is often seen as a requirement to provide an additional
-isolation layer for untrusted applications. However, hardware virtualization
-requires expensive bare-metal machines or cloud instances to run safely with
-good performance, increasing cost and complexity for Cloud users. gVisor,
-however, takes a more flexible approach.
-
-One of the pillars of gVisor's architecture is portability, allowing it to run
-anywhere that runs Linux. Modern Cloud-Native applications run in containers in
-many different places, from bare metal to virtual machines, and can't always
-rely on nested virtualization. It is important for gVisor to be able to support
-the environments where you run containers.
-
-gVisor achieves portability through an abstraction called a _Platform_.
-Platforms can have many implementations, and each implementation can cover
-different environments, making use of available software or hardware features.
-
-## Background
-
-Before we can understand how gVisor achieves portability using platforms, we
-should take a step back and understand how applications interact with their
-host.
-
-Container sandboxes can provide an isolation layer between the host and
-application by virtualizing one of the layers below it, including the hardware
-or operating system. Many sandboxes virtualize the hardware layer by running
-applications in virtual machines. gVisor takes a different approach by
-virtualizing the OS layer.
-
-When an application is run in a normal situation the host operating system loads
-the application into user memory and schedules it for execution. The operating
-system scheduler eventually schedules the application to a CPU and begins
-executing it. It then handles the application's requests, such as for memory and
-the lifecycle of the application. gVisor virtualizes these interactions, such as
-system calls, and context switching that happen between an application and OS.
-
-[System calls](https://en.wikipedia.org/wiki/System_call) allow applications to
-ask the OS to perform some task for it. System calls look like a normal function
-call in most programming languages though works a bit differently under the
-hood. When an application system call is encountered some special processing
-takes place to do a
-[context switch](https://en.wikipedia.org/wiki/Context_switch) into kernel mode
-and begin executing code in the kernel before returning a result to the
-application. Context switching may happen in other situations as well. For
-example, to respond to an interrupt.
-
-## The Platform Interface
-
-gVisor provides a sandbox which implements the Linux OS interface, intercepting
-OS interactions such as system calls and implements them in the sandbox kernel.
-
-It does this to limit interactions with the host, and protect the host from an
-untrusted application running in the sandbox. The Platform is the bottom layer
-of gVisor which provides the environment necessary for gVisor to control and
-manage applications. In general, the Platform must:
-
-1. Provide the ability to create and manage memory address spaces.
-2. Provide execution contexts for running applications in those memory address
- spaces.
-3. Provide the ability to change execution context and return control to gVisor
- at specific times (e.g. system call, page fault)
-
-This interface is conceptually simple, but very powerful. Since the Platform
-interface only requires these three capabilities, it gives gVisor enough control
-for it to act as the application's OS, while still allowing the use of very
-different isolation technologies under the hood. You can learn more about the
-Platform interface in the
-[Platform Guide](https://gvisor.dev/docs/architecture_guide/platforms/).
-
-## Implementations of the Platform Interface
-
-While gVisor can make use of technologies like hardware virtualization, it
-doesn't necessarily rely on any one technology to provide a similar level of
-isolation. The flexibility of the Platform interface allows for implementations
-that use technologies other than hardware virtualization. This allows gVisor to
-run in VMs without nested virtualization, for example. By providing an
-abstraction for the underlying platform, each implementation can make various
-tradeoffs regarding performance or hardware requirements.
-
-Currently gVisor provides two gVisor Platform implementations; the Ptrace
-Platform, and the KVM Platform, each using very different methods to implement
-the Platform interface.
-
-![gVisor Platforms](../../../../../docs/architecture_guide/platforms/platforms.png "Platforms")
-
-The Ptrace Platform uses
-[PTRACE\_SYSEMU](http://man7.org/linux/man-pages/man2/ptrace.2.html) to trap
-syscalls, and uses the host for memory mapping and context switching. This
-platform can run anywhere that ptrace is available, which includes most Linux
-systems, VMs or otherwise.
-
-The KVM Platform uses virtualization, but in an unconventional way. gVisor runs
-in a virtual machine but as both guest OS and VMM, and presents no virtualized
-hardware layer. This provides a simpler interface that can avoid hardware
-initialization for fast start up, while taking advantage of hardware
-virtualization support to improve memory isolation and performance of context
-switching.
-
-The flexibility of the Platform interface allows for a lot of room to improve
-the existing KVM and ptrace platforms, as well as the ability to utilize new
-methods for improving gVisor's performance or portability in future Platform
-implementations.
-
-## Portability
-
-Through the Platform interface, gVisor is able to support bare metal, virtual
-machines, and Cloud environments while still providing a highly secure sandbox
-for running untrusted applications. This is especially important for Cloud and
-Kubernetes users because it allows gVisor to run anywhere that Kubernetes can
-run and provide similar experiences in multi-region, hybrid, multi-platform
-environments.
-
-Give gVisor's open source platforms a try. Using a Platform is as easy as
-providing the `--platform` flag to `runsc`. See the documentation on
-[changing platforms](https://gvisor.dev/docs/user_guide/platforms/) for how to
-use different platforms with Docker. We would love to hear about your experience
-so come chat with us in our
-[Gitter channel](https://gitter.im/gvisor/community), or send us an
-[issue on Github](https://gvisor.dev/issue) if you run into any problems.
diff --git a/website/blog/BUILD b/website/blog/BUILD
deleted file mode 100644
index 17beb721f..000000000
--- a/website/blog/BUILD
+++ /dev/null
@@ -1,58 +0,0 @@
-load("//website:defs.bzl", "doc", "docs")
-
-package(
- default_visibility = ["//website:__pkg__"],
- licenses = ["notice"],
-)
-
-exports_files(["index.html"])
-
-doc(
- name = "security_basics",
- src = "2019-11-18-security-basics.md",
- authors = [
- "jsprad",
- "zkoopmans",
- ],
- layout = "post",
- permalink = "/blog/2019/11/18/gvisor-security-basics-part-1/",
-)
-
-doc(
- name = "networking_security",
- src = "2020-04-02-networking-security.md",
- authors = [
- "igudger",
- ],
- layout = "post",
- permalink = "/blog/2020/04/02/gvisor-networking-security/",
-)
-
-doc(
- name = "containing_a_real_vulnerability",
- src = "2020-09-18-containing-a-real-vulnerability.md",
- authors = [
- "fvoznika",
- ],
- layout = "post",
- permalink = "/blog/2020/09/18/containing-a-real-vulnerability/",
-)
-
-doc(
- name = "platform_portability",
- src = "2020-10-22-platform-portability.md",
- authors = [
- "ianlewis",
- "mpratt",
- ],
- layout = "post",
- permalink = "/blog/2020/10/22/platform-portability/",
-)
-
-docs(
- name = "posts",
- deps = [
- ":" + rule
- for rule in existing_rules()
- ],
-)
diff --git a/website/blog/README.md b/website/blog/README.md
deleted file mode 100644
index e1d685288..000000000
--- a/website/blog/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# gVisor blog
-
-The gVisor blog is owned and run by the gVisor team.
-
-## Contact
-
-Reach out to us on [gitter](https://gitter.im/gvisor/community) or the
-[mailing list](https://groups.google.com/forum/#!forum/gvisor-users) if you
-would like to write a blog post.
-
-## Submit a Post
-
-Anyone can write a blog post and submit it for review. Purely commercial content
-or vendor pitches are not allowed. Please refer to the
-[blog guidelines](#blog-guidelines) for more guidance about content is that
-allowed.
-
-To submit a blog post, follow the steps below.
-
-1. [Sign the Contributor License Agreements](https://gvisor.dev/contributing/)
- if you have not yet done so.
-1. Familiarize yourself with the Markdown format for the
- [existing blog posts](https://github.com/google/gvisor/tree/master/website/blog).
-1. Write your blog post in a text editor of your choice.
-1. (Optional) If you need help with markdown, check out
- [StakEdit](https://stackedit.io/app#) or read
- [Jekyll's formatting reference](https://jekyllrb.com/docs/posts/#creating-posts)
- for more information.
-1. Click **Add file** > **Create new file**.
-1. Paste your content into the editor and save it. Name the file in the
- following way: *[BLOG] Your proposed title* , but don’t put the date in the
- file name. The blog reviewers will work with you on the final file name, and
- the date on which the blog will be published.
-1. When you save the file, GitHub will walk you through the pull request (PR)
- process.
-1. Send us a message on [gitter](https://gitter.im/gvisor/community) with a
- link to your recently created PR.
-1. A reviewer will be assigned to the pull request. They check your submission,
- and work with you on feedback and final details. When the pull request is
- approved, the blog will be scheduled for publication.
-
-### Blog Guidelines {#blog-guidelines}
-
-#### Suitable content:
-
-- **Original content only**
-- gVisor features or project updates
-- Tutorials and demos
-- Use cases
-- Content that is specific to a vendor or platform about gVisor installation
- and use
-
-#### Unsuitable Content:
-
-- Blogs with no content relevant to gVisor
-- Vendor pitches
-
-## Review Process
-
-Each blog post should be approved by at least one person on the team. Once all
-of the review comments have been addressed and approved, a member of the team
-will schedule publication of the blog post.
diff --git a/website/blog/index.html b/website/blog/index.html
deleted file mode 100644
index 272917fc4..000000000
--- a/website/blog/index.html
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: Blog
-layout: blog
-feed: true
-pagination:
- enabled: true
----
-
-{% for post in paginator.posts %}
-<div>
- <h2><a href="{{ post.url }}">{{ post.title }}</a></h2>
- <div class="blog-meta">
- {% include byline.html authors=post.authors date=post.date %}
- </div>
- <p>{{ post.excerpt | strip_html }}</p>
- <p><a href="{{ post.url }}">Full Post &raquo;</a></p>
-</div>
-{% endfor %}
-
-{% if paginator.total_pages > 1 %}
-{% include paginator.html %}
-{% endif %}
-
-<hr>
-
-If you would like to contribute to the gVisor blog check out the
-<a href="https://github.com/google/gvisor/tree/master/website/blog">instructions</a>.
diff --git a/website/cmd/server/BUILD b/website/cmd/server/BUILD
deleted file mode 100644
index e4cf91e07..000000000
--- a/website/cmd/server/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "server",
- srcs = ["main.go"],
- pure = True,
- visibility = ["//website:__pkg__"],
- deps = [
- "@com_github_google_pprof//driver:go_default_library",
- ],
-)
diff --git a/website/cmd/server/main.go b/website/cmd/server/main.go
deleted file mode 100644
index 1e5b56fbb..000000000
--- a/website/cmd/server/main.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2019 The gVisor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Server is the main gvisor.dev binary.
-package main
-
-import (
- "flag"
- "fmt"
- "log"
- "net/http"
- "net/url"
- "os"
- "path"
- "regexp"
- "strings"
-
- "github.com/google/pprof/driver"
-)
-
-var redirects = map[string]string{
- // GitHub redirects.
- "/change": "https://github.com/google/gvisor",
- "/issue": "https://github.com/google/gvisor/issues",
- "/issues": "https://github.com/google/gvisor/issues",
- "/issue/new": "https://github.com/google/gvisor/issues/new",
- "/pr": "https://github.com/google/gvisor/pulls",
-
- // For links.
- "/faq": "/docs/user_guide/faq/",
-
- // From 2020-05-12 to 2020-06-30, the FAQ URL was uppercase. Redirect that
- // back to maintain any links.
- "/docs/user_guide/FAQ/": "/docs/user_guide/faq/",
-
- // Redirects to compatibility docs.
- "/c": "/docs/user_guide/compatibility/",
- "/c/linux/amd64": "/docs/user_guide/compatibility/linux/amd64/",
-
- // Redirect for old URLs.
- "/docs/user_guide/compatibility/amd64/": "/docs/user_guide/compatibility/linux/amd64/",
- "/docs/user_guide/compatibility/amd64": "/docs/user_guide/compatibility/linux/amd64/",
- "/docs/user_guide/kubernetes/": "/docs/user_guide/quick_start/kubernetes/",
- "/docs/user_guide/kubernetes": "/docs/user_guide/quick_start/kubernetes/",
- "/docs/user_guide/oci/": "/docs/user_guide/quick_start/oci/",
- "/docs/user_guide/oci": "/docs/user_guide/quick_start/oci/",
- "/docs/user_guide/docker/": "/docs/user_guide/quick_start/docker/",
- "/docs/user_guide/docker": "/docs/user_guide/quick_start/docker/",
- "/blog/2020/09/22/platform-portability": "/blog/2020/10/22/platform-portability/",
- "/blog/2020/09/22/platform-portability/": "/blog/2020/10/22/platform-portability/",
-
- // Deprecated, but links continue to work.
- "/cl": "https://gvisor-review.googlesource.com",
-
- // Access package documentation.
- "/gvisor": "https://pkg.go.dev/gvisor.dev/gvisor",
-
- // Code search root.
- "/cs": "https://cs.opensource.google/gvisor/gvisor",
-}
-
-type prefixInfo struct {
- baseURL string
- checkValidID bool
- queryEscape bool
-}
-
-var prefixHelpers = map[string]prefixInfo{
- "change": {baseURL: "https://github.com/google/gvisor/commit/%s", checkValidID: true},
- "issue": {baseURL: "https://github.com/google/gvisor/issues/%s", checkValidID: true},
- "issues": {baseURL: "https://github.com/google/gvisor/issues/%s", checkValidID: true},
- "pr": {baseURL: "https://github.com/google/gvisor/pull/%s", checkValidID: true},
-
- // Redirects to compatibility docs.
- "c/linux/amd64": {baseURL: "/docs/user_guide/compatibility/linux/amd64/#%s", checkValidID: true},
-
- // Deprecated, but links continue to work.
- "cl": {baseURL: "https://gvisor-review.googlesource.com/c/gvisor/+/%s", checkValidID: true},
-
- // Redirect to source documentation.
- "gvisor": {baseURL: "https://pkg.go.dev/gvisor.dev/gvisor/%s"},
-
- // Redirect to code search, with the path as the query.
- "cs": {baseURL: "https://cs.opensource.google/search?q=%s&ss=gvisor", queryEscape: true},
-}
-
-var (
- validID = regexp.MustCompile(`^[A-Za-z0-9-]*/?$`)
- goGetHTML5 = `<!doctype html><html><head><meta charset=utf-8>
-<meta name="go-import" content="gvisor.dev/gvisor git https://github.com/google/gvisor">
-<meta name="go-import" content="gvisor.dev/website git https://github.com/google/gvisor-website">
-<title>Go-get</title></head><body></html>`
-)
-
-// cronHandler wraps an http.Handler to check that the request is from the App
-// Engine Cron service.
-// See: https://cloud.google.com/appengine/docs/standard/go112/scheduling-jobs-with-cron-yaml#validating_cron_requests
-func cronHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Header.Get("X-Appengine-Cron") != "true" {
- http.NotFound(w, r)
- return
- }
- // Fallthrough.
- h.ServeHTTP(w, r)
- })
-}
-
-// wrappedHandler wraps an http.Handler.
-//
-// If the query parameters include go-get=1, then we redirect to a single
-// static page that allows us to serve arbitrary Go packages.
-func wrappedHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- gg, ok := r.URL.Query()["go-get"]
- if ok && len(gg) == 1 && gg[0] == "1" {
- // Serve a trivial html page.
- w.Write([]byte(goGetHTML5))
- return
- }
- // Fallthrough.
- h.ServeHTTP(w, r)
- })
-}
-
-// redirectWithQuery redirects to the given target url preserving query parameters.
-func redirectWithQuery(w http.ResponseWriter, r *http.Request, target string) {
- url := target
- if qs := r.URL.RawQuery; qs != "" {
- url += "?" + qs
- }
- http.Redirect(w, r, url, http.StatusFound)
-}
-
-// hostRedirectHandler redirects the www. domain to the naked domain.
-func hostRedirectHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if strings.HasPrefix(r.Host, "www.") {
- // Redirect to the naked domain.
- r.URL.Scheme = "https" // Assume https.
- r.URL.Host = r.Host[4:] // Remove the 'www.'
- http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
- return
- }
-
- if *projectID != "" && r.Host == *projectID+".appspot.com" && *customHost != "" {
- // Redirect to the custom domain.
- r.URL.Scheme = "https" // Assume https.
- r.URL.Host = *customHost
- http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
- return
- }
- h.ServeHTTP(w, r)
- })
-}
-
-// prefixRedirectHandler returns a handler that redirects to the given formated url.
-func prefixRedirectHandler(prefix string, info prefixInfo) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if p := r.URL.Path; p == prefix {
- // Redirect /prefix/ to /prefix.
- http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
- return
- }
- id := r.URL.Path[len(prefix):]
- if info.checkValidID && !validID.MatchString(id) {
- http.Error(w, "Not found", http.StatusNotFound)
- return
- }
- if info.queryEscape {
- id = url.QueryEscape(id)
- }
- target := fmt.Sprintf(info.baseURL, id)
- redirectWithQuery(w, r, target)
- })
-}
-
-// redirectHandler returns a handler that redirects to the given url.
-func redirectHandler(target string) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- redirectWithQuery(w, r, target)
- })
-}
-
-// registerRedirects registers redirect http handlers.
-func registerRedirects(mux *http.ServeMux) {
- for prefix, info := range prefixHelpers {
- p := "/" + prefix + "/"
- mux.Handle(p, hostRedirectHandler(wrappedHandler(prefixRedirectHandler(p, info))))
- }
- for path, redirect := range redirects {
- mux.Handle(path, hostRedirectHandler(wrappedHandler(redirectHandler(redirect))))
- }
-}
-
-// registerStatic registers static file handlers.
-func registerStatic(mux *http.ServeMux, staticDir string) {
- mux.Handle("/", hostRedirectHandler(wrappedHandler(http.FileServer(http.Dir(staticDir)))))
-}
-
-// profileMeta implements synthetic flags for pprof.
-type profileMeta struct {
- // Mux is the mux to register on.
- Mux *http.ServeMux
-
- // SourceURL is the source of the profile.
- SourceURL string
-}
-
-func (*profileMeta) ExtraUsage() string { return "" }
-func (*profileMeta) AddExtraUsage(string) {}
-func (*profileMeta) Bool(_ string, def bool, _ string) *bool { return &def }
-func (*profileMeta) Int(_ string, def int, _ string) *int { return &def }
-func (*profileMeta) Float64(_ string, def float64, _ string) *float64 { return &def }
-func (*profileMeta) StringList(_ string, def string, _ string) *[]*string { return new([]*string) }
-func (*profileMeta) String(option string, def string, _ string) *string {
- switch option {
- case "http":
- // Only http is specified. Other options may be accessible via
- // the web interface, so we just need to spoof a valid option
- // here. The server is actually bound by HTTPServer, below.
- value := "localhost:80"
- return &value
- case "symbolize":
- // Don't attempt symbolization. Most profiles should come with
- // mappings built-in to the profile itself.
- value := "none"
- return &value
- default:
- return &def // Default.
- }
-}
-
-// Parse implements plugin.FlagSet.Parse.
-func (p *profileMeta) Parse(usage func()) []string {
- // Just return the SourceURL. This is interpreted as the profile to
- // download. We validate that the URL corresponds to a Google Cloud
- // Storage URL below.
- return []string{p.SourceURL}
-}
-
-// pprofFixedPrefix is used to limit the exposure to SSRF.
-//
-// See registerProfile below.
-const pprofFixedPrefix = "https://storage.googleapis.com/"
-
-// allowedBuckets enforces constraints on the pprof target.
-//
-// If the continuous integration system is changed in the future to use
-// additional buckets, they may be allowed here. See registerProfile.
-var allowedBuckets = map[string]bool{
- "gvisor-buildkite": true,
-}
-
-// Target returns the URL target.
-func (p *profileMeta) Target() string {
- return fmt.Sprintf("/profile/%s/", p.SourceURL[len(pprofFixedPrefix):])
-}
-
-// HTTPServer is a function passed to driver.PProf.
-func (p *profileMeta) HTTPServer(args *driver.HTTPServerArgs) error {
- target := p.Target()
- for subpath, handler := range args.Handlers {
- handlerPath := path.Join(target, subpath)
- if len(handlerPath) < len(target) {
- // Don't clean the target, match only as the literal
- // directory path in order to keep relative links
- // working in the profile. E.g. /profile/foo/ is the
- // base URL for the profile at https://.../foo.
- //
- // The base target typically shows the dot-based graph,
- // which will not work in the image (due to the lack of
- // a dot binary to execute). Therefore, we redirect to
- // the flamegraph handler. Everything should otherwise
- // work the exact same way, except the "Graph" link.
- handlerPath = target
- handler = redirectHandler(path.Join(handlerPath, "flamegraph"))
- }
- p.Mux.Handle(handlerPath, handler)
- }
- return nil
-}
-
-// registerProfile registers the profile handler.
-//
-// Note that this has a security surface worth considering.
-//
-// We are passed effectively a URL, which we fetch and parse,
-// then display the profile output. We limit the possibility of
-// SSRF by interpreting the URL strictly as a part to an object
-// in Google Cloud Storage, and further limit the buckets that
-// may be used. This contains the vast majority of concerns,
-// since objects must at least be uploaded by our CI system.
-//
-// However, we additionally consider the possibility that users
-// craft malicious profile objects (somehow) and pass those URLs
-// here as well. It seems feasible that we could parse a profile
-// that causes a crash (DOS), but this would be automatically
-// handled without a blip. It seems unlikely that we could parse a
-// profile that gives full code execution, but even so there is
-// nothing in this image except this code and CA certs. At worst,
-// code execution would enable someone to serve up content under the
-// web domain. This would be ephemeral with the specific instance,
-// and persisting such an attack would require constantly crashing
-// instances in whatever way gives remote code execution. Even if
-// this were possible, it's unlikely that exploiting such a crash
-// could be done so constantly and consistently.
-//
-// The user can also fill the "disk" of this container instance,
-// causing an OOM and a crash. This has similar semantics to the
-// DOS scenario above, and would just be handled by Cloud Run.
-//
-// Note that all of the above scenarios would require uploading
-// malicious profiles to controller buckets, and a clear audit
-// trail would exist in those cases.
-func registerProfile(mux *http.ServeMux) {
- const urlPrefix = "/profile/"
- mux.Handle(urlPrefix, hostRedirectHandler(wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Extract the URL; this is everything except the final /.
- parts := strings.Split(r.URL.Path[len(urlPrefix):], "/")
- if len(parts) == 0 {
- http.Error(w, "Invalid URL: no bucket provided.", http.StatusNotFound)
- return
- }
- if !allowedBuckets[parts[0]] {
- http.Error(w, fmt.Sprintf("Invalid URL: not an allowed bucket (%s).", parts[0]), http.StatusNotFound)
- return
- }
- url := pprofFixedPrefix + strings.Join(parts[:len(parts)-1], "/")
- if url == pprofFixedPrefix {
- http.Error(w, "Invalid URL: no path provided.", http.StatusNotFound)
- return
- }
-
- // Set up the meta handler. This will modify the original mux
- // accordingly, and we ultimately return a redirect that
- // includes all the original arguments. This means that if we
- // ever hit a server that does not have this profile loaded, it
- // will load and redirect again.
- meta := &profileMeta{
- Mux: mux,
- SourceURL: url,
- }
- if err := driver.PProf(&driver.Options{
- Flagset: meta,
- HTTPServer: meta.HTTPServer,
- }); err != nil {
- http.Error(w, fmt.Sprintf("Invalid profile: %v", err), http.StatusNotImplemented)
- return
- }
-
- // Serve the path directly.
- mux.ServeHTTP(w, r)
- }))))
-}
-
-func envFlagString(name, def string) string {
- if val, ok := os.LookupEnv(name); ok {
- return val
- }
- return def
-}
-
-var (
- addr = flag.String("http", envFlagString("HTTP", ":"+envFlagString("PORT", "8080")), "HTTP service address")
- staticDir = flag.String("static-dir", envFlagString("STATIC_DIR", "_site"), "static files directory")
-
- // Uses the standard GOOGLE_CLOUD_PROJECT environment variable set by App Engine.
- projectID = flag.String("project-id", envFlagString("GOOGLE_CLOUD_PROJECT", ""), "The App Engine project ID.")
- customHost = flag.String("custom-domain", envFlagString("CUSTOM_DOMAIN", "gvisor.dev"), "The application's custom domain.")
-)
-
-func main() {
- flag.Parse()
-
- registerRedirects(http.DefaultServeMux)
- registerStatic(http.DefaultServeMux, *staticDir)
- registerProfile(http.DefaultServeMux)
-
- log.Printf("Listening on %s...", *addr)
- log.Fatal(http.ListenAndServe(*addr, nil))
-}
diff --git a/website/cmd/syscalldocs/BUILD b/website/cmd/syscalldocs/BUILD
deleted file mode 100644
index c5a0ed7fe..000000000
--- a/website/cmd/syscalldocs/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "syscalldocs",
- srcs = ["main.go"],
- visibility = ["//website:__pkg__"],
-)
diff --git a/website/cmd/syscalldocs/main.go b/website/cmd/syscalldocs/main.go
deleted file mode 100644
index 830d2bac7..000000000
--- a/website/cmd/syscalldocs/main.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2019 The gVisor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary syscalldocs generates system call markdown.
-package main
-
-import (
- "bufio"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "text/template"
-)
-
-// CompatibilityInfo is the collection of all information.
-type CompatibilityInfo map[string]map[string]ArchInfo
-
-// ArchInfo is compatbility doc for an architecture.
-type ArchInfo struct {
- // Syscalls maps syscall number for the architecture to the doc.
- Syscalls map[uintptr]SyscallDoc `json:"syscalls"`
-}
-
-// SyscallDoc represents a single item of syscall documentation.
-type SyscallDoc struct {
- Name string `json:"name"`
- Support string `json:"support"`
- Note string `json:"note,omitempty"`
- URLs []string `json:"urls,omitempty"`
-}
-
-var mdTemplate = template.Must(template.New("out").Parse(`---
-title: {{.Title}}
-description: Syscall Compatibility Reference Documentation for {{.OS}}/{{.Arch}}
-layout: docs
-category: Compatibility
-weight: 50
-permalink: /docs/user_guide/compatibility/{{.OS}}/{{.Arch}}/
-include_in_menu: True
----
-
-This table is a reference of {{.OS}} syscalls for the {{.Arch}} architecture and
-their compatibility status in gVisor. gVisor does not support all syscalls and
-some syscalls may have a partial implementation.
-
-This page is automatically generated from the source code.
-
-Of {{.Total}} syscalls, {{.Supported}} syscalls have a full or partial
-implementation. There are currently {{.Unsupported}} unsupported
-syscalls. {{if .Undocumented}}{{.Undocumented}} syscalls are not yet documented.{{end}}
-
-<table>
- <thead>
- <tr>
- <th>#</th>
- <th>Name</th>
- <th>Support</th>
- <th>Notes</th>
- </tr>
- </thead>
- <tbody>
- {{range $i, $syscall := .Syscalls}}
- <tr id="{{.Name}}">
- <td><a href="#{{.Name}}">{{.Number}}</a></td>
- <td><a href="{{.DocURL}}" target="_blank" rel="noopener">{{.Name}}</a></td>
- <td>{{.Support}}</td>
- <td>{{.Note}} {{range $i, $url := .URLs}}<br/>See: <a href="{{.}}">{{.}}</a>{{end}}</td>
- </tr>
- {{end}}
- </tbody>
-</table>
-`))
-
-// Fatalf writes a message to stderr and exits with error code 1
-func Fatalf(format string, a ...interface{}) {
- fmt.Fprintf(os.Stderr, format, a...)
- os.Exit(1)
-}
-
-// syscallDocURL returns a doc url for a given syscall, doing its best to return a url that exists.
-func syscallDocURL(name string) string {
- customDocs := map[string]string{
- "io_pgetevents": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "rseq": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "io_uring_setup": "https://manpages.debian.org/buster-backports/liburing-dev/io_uring_setup.2.en.html",
- "io_uring_enter": "https://manpages.debian.org/buster-backports/liburing-dev/io_uring_enter.2.en.html",
- "io_uring_register": "https://manpages.debian.org/buster-backports/liburing-dev/io_uring_register.2.en.html",
- "open_tree": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "move_mount": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "fsopen": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "fsconfig": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "fsmount": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- "fspick": "https://man7.org/linux/man-pages/man2/syscalls.2.html",
- }
- if url, ok := customDocs[name]; ok {
- return url
- }
- return fmt.Sprintf("http://man7.org/linux/man-pages/man2/%s.2.html", name)
-}
-
-func main() {
- inputFlag := flag.String("in", "-", "File to input ('-' for stdin)")
- outputDir := flag.String("out", ".", "Directory to output files.")
-
- flag.Parse()
-
- var input io.Reader
- if *inputFlag == "-" {
- input = os.Stdin
- } else {
- i, err := os.Open(*inputFlag)
- if err != nil {
- Fatalf("Error opening %q: %v", *inputFlag, err)
- }
- input = i
- }
- input = bufio.NewReader(input)
-
- var info CompatibilityInfo
- d := json.NewDecoder(input)
- if err := d.Decode(&info); err != nil {
- Fatalf("Error reading json: %v", err)
- }
-
- weight := 0
- for osName, osInfo := range info {
- for archName, archInfo := range osInfo {
- outDir := filepath.Join(*outputDir, osName)
- outFile := filepath.Join(outDir, archName+".md")
-
- if err := os.MkdirAll(outDir, 0755); err != nil {
- Fatalf("Error creating directory %q: %v", *outputDir, err)
- }
-
- f, err := os.OpenFile(outFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- Fatalf("Error opening file %q: %v", outFile, err)
- }
- defer f.Close()
-
- weight += 10
- data := struct {
- Title string
- OS string
- Arch string
- Weight int
- Total int
- Supported int
- Unsupported int
- Undocumented int
- Syscalls []struct {
- Name string
- Number uintptr
- DocURL string
- Support string
- Note string
- URLs []string
- }
- }{
- Title: strings.Title(osName) + "/" + archName,
- OS: osName,
- Arch: archName,
- Weight: weight,
- Total: 0,
- Supported: 0,
- Unsupported: 0,
- Undocumented: 0,
- Syscalls: []struct {
- Name string
- Number uintptr
- DocURL string
- Support string
- Note string
- URLs []string
- }{},
- }
-
- for num, s := range archInfo.Syscalls {
- switch s.Support {
- case "Full Support", "Partial Support":
- data.Supported++
- case "Unimplemented":
- data.Unsupported++
- default:
- data.Undocumented++
- }
- data.Total++
-
- for i := range s.URLs {
- if !strings.HasPrefix(s.URLs[i], "http://") && !strings.HasPrefix(s.URLs[i], "https://") {
- s.URLs[i] = "https://" + s.URLs[i]
- }
- }
-
- data.Syscalls = append(data.Syscalls, struct {
- Name string
- Number uintptr
- DocURL string
- Support string
- Note string
- URLs []string
- }{
- Name: s.Name,
- Number: num,
- DocURL: syscallDocURL(s.Name),
- Support: s.Support,
- Note: s.Note,
- URLs: s.URLs,
- })
- }
-
- sort.Slice(data.Syscalls, func(i, j int) bool {
- return data.Syscalls[i].Number < data.Syscalls[j].Number
- })
-
- if err := mdTemplate.Execute(f, data); err != nil {
- Fatalf("Error writing file %q: %v", outFile, err)
- }
- }
- }
-}
diff --git a/website/css/main.scss b/website/css/main.scss
deleted file mode 100644
index 4b3b7b500..000000000
--- a/website/css/main.scss
+++ /dev/null
@@ -1,10 +0,0 @@
-// The main style sheet for gvisor.dev
-
-// NOTE: Do not include file extensions to import .sass and .css files seamlessly.
-@import "style";
-@import "front";
-@import "navbar";
-@import "sidebar";
-@import "footer";
-// syntax is generated by rougify.
-@import "syntax";
diff --git a/website/defs.bzl b/website/defs.bzl
deleted file mode 100644
index 703040882..000000000
--- a/website/defs.bzl
+++ /dev/null
@@ -1,186 +0,0 @@
-"""Wrappers for website documentation."""
-
-load("//tools:defs.bzl", "short_path")
-
-# DocInfo is a provider which simple adds sufficient metadata to the source
-# files (and additional data files) so that a jeyll header can be constructed
-# dynamically. This is done the via BUILD system so that the plain
-# documentation files can be viewable without non-compliant markdown headers.
-DocInfo = provider(
- "Encapsulates information for a documentation page.",
- fields = [
- "layout",
- "description",
- "permalink",
- "category",
- "subcategory",
- "weight",
- "editpath",
- "authors",
- "include_in_menu",
- ],
-)
-
-def _doc_impl(ctx):
- return [
- DefaultInfo(
- files = depset(ctx.files.src + ctx.files.data),
- ),
- DocInfo(
- layout = ctx.attr.layout,
- description = ctx.attr.description,
- permalink = ctx.attr.permalink,
- category = ctx.attr.category,
- subcategory = ctx.attr.subcategory,
- weight = ctx.attr.weight,
- editpath = short_path(ctx.files.src[0].short_path),
- authors = ctx.attr.authors,
- include_in_menu = ctx.attr.include_in_menu,
- ),
- ]
-
-doc = rule(
- implementation = _doc_impl,
- doc = "Annotate a document for jekyll headers.",
- attrs = {
- "src": attr.label(
- doc = "The markdown source file.",
- mandatory = True,
- allow_single_file = True,
- ),
- "data": attr.label_list(
- doc = "Additional data files (e.g. images).",
- allow_files = True,
- ),
- "layout": attr.string(
- doc = "The document layout.",
- default = "docs",
- ),
- "description": attr.string(
- doc = "The document description.",
- default = "",
- ),
- "permalink": attr.string(
- doc = "The document permalink.",
- mandatory = True,
- ),
- "category": attr.string(
- doc = "The document category.",
- default = "",
- ),
- "subcategory": attr.string(
- doc = "The document subcategory.",
- default = "",
- ),
- "weight": attr.string(
- doc = "The document weight.",
- default = "50",
- ),
- "authors": attr.string_list(),
- "include_in_menu": attr.bool(
- doc = "Include document in the navigation menu.",
- default = True,
- ),
- },
-)
-
-def _docs_impl(ctx):
- # Tarball is the actual output.
- tarball = ctx.actions.declare_file(ctx.label.name + ".tgz")
-
- # But we need an intermediate builder to translate the files.
- builder = ctx.actions.declare_file("%s-builder" % ctx.label.name)
- builder_content = [
- "#!/bin/bash",
- "set -euo pipefail",
- "declare -r T=$(mktemp -d)",
- "function cleanup {",
- " rm -rf $T",
- "}",
- "trap cleanup EXIT",
- ]
- for dep in ctx.attr.deps:
- doc = dep[DocInfo]
-
- # Sanity check the permalink.
- if not doc.permalink.endswith("/"):
- fail("permalink %s for target %s should end with /" % (
- doc.permalink,
- ctx.label.name,
- ))
-
- # Construct the header.
- header = """\
-description: {description}
-permalink: {permalink}
-category: {category}
-subcategory: {subcategory}
-weight: {weight}
-editpath: {editpath}
-authors: {authors}
-layout: {layout}
-include_in_menu: {include_in_menu}"""
-
- for f in dep.files.to_list():
- # Is this a markdown file? If not, then we ensure that it ends up
- # in the same path as the permalink for relative addressing.
- if not f.basename.endswith(".md"):
- builder_content.append("mkdir -p $T/%s" % doc.permalink)
- builder_content.append("cp %s $T/%s" % (f.path, doc.permalink))
- continue
-
- # Is this a post? If yes, then we must put this in the _posts
- # directory. This directory is treated specially with respect to
- # pagination and page generation.
- dest = f.short_path
- if doc.layout == "post":
- dest = "_posts/" + f.basename
- builder_content.append("echo Processing %s... >&2" % f.short_path)
- builder_content.append("mkdir -p $T/$(dirname %s)" % dest)
-
- # Construct the header dynamically. We include the title field from
- # the markdown itself, as this is the g3doc format required. The
- # title will be injected by the web layout however, so we don't
- # want this to appear in the document.
- args = dict([(k, getattr(doc, k)) for k in dir(doc)])
- builder_content.append("title=\"$(grep -E '^# ' %s | head -n 1 | cut -d'#' -f2- || true)\"" % f.path)
- builder_content.append("cat >$T/%s <<EOF" % dest)
- builder_content.append("---")
- builder_content.append("title: $title")
- builder_content.append(header.format(**args))
- builder_content.append("---")
- builder_content.append("EOF")
-
- # To generate the final page, we need to strip out the title (which
- # was pulled above to generate the annotation in the frontmatter,
- # and substitute the [TOC] tag with the {% toc %} plugin tag. Note
- # that the pipeline here is almost important, as the grep will
- # return non-zero if the file is empty, but we ignore that within
- # the pipeline.
- builder_content.append("grep -v -E '^# ' %s | sed -e 's|^\\[TOC\\]$|- TOC\\n{:toc}|' >>$T/%s" %
- (f.path, dest))
-
- builder_content.append("declare -r filename=$(readlink -m %s)" % tarball.path)
- builder_content.append("(cd $T && tar -zcf \"${filename}\" .)\n")
- ctx.actions.write(builder, "\n".join(builder_content), is_executable = True)
-
- # Generate the tarball.
- ctx.actions.run(
- inputs = depset(ctx.files.deps),
- outputs = [tarball],
- progress_message = "Generating %s" % ctx.label,
- executable = builder,
- )
- return [DefaultInfo(
- files = depset([tarball]),
- )]
-
-docs = rule(
- implementation = _docs_impl,
- doc = "Construct a site tarball from doc dependencies.",
- attrs = {
- "deps": attr.label_list(
- doc = "All document dependencies.",
- ),
- },
-)
diff --git a/website/index.md b/website/index.md
deleted file mode 100644
index c6cd477c2..000000000
--- a/website/index.md
+++ /dev/null
@@ -1,50 +0,0 @@
-<div class="jumbotron jumbotron-fluid">
- <div class="container">
- <div class="row">
- <div class="col-md-3"></div>
- <div class="col-md-6">
- <p>gVisor is an <b>application kernel</b> for <b>containers</b> that provides efficient defense-in-depth anywhere.</p>
- <p style="margin-top: 20px;">
- <a class="btn" href="/docs/user_guide/install/">Get started&nbsp;<i class="fas fa-arrow-alt-circle-right ml-2"></i></a>
- <a class="btn" href="/docs/">Learn More&nbsp;<i class="fas fa-arrow-alt-circle-right ml-2"></i></a>
- </p>
- </div>
- <div class="col-md-3"></div>
- </div>
- </div>
-</div>
-
-<div class="container"> <!-- Full page container. -->
-
-<div class="row">
- <div class="col-md-4">
- <h4 id="seamless-security">Container-native Security <i class="fas fa-lock"></i></h4>
- <p>By providing each container with its own application kernel, gVisor
- limits the attack surface of the host. This protection does not limit
- functionality: gVisor runs unmodified binaries and integrates with container
- orchestration systems, such as Docker and Kubernetes, and supports features
- such as volumes and sidecars.</p>
- <a class="button" href="/docs/architecture_guide/security/">Read More &raquo;</a>
- </div>
-
- <div class="col-md-4">
- <h4 id="resource-efficiency">Resource Efficiency <i class="fas fa-feather-alt"></i></h4>
- <p>Containers are efficient because workloads of different shapes and sizes
- can be packed together by sharing host resources. gVisor uses host-native
- abstractions, such as threads and memory mappings, to co-operate with the
- host and enable the same resource model as native containers.</p>
- <a class="button" href="/docs/architecture_guide/resources/">Read More &raquo;</a>
- </div>
-
- <div class="col-md-4">
- <h4 id="platform-portability">Platform Portability <sup>&#9729;</sup>&#9729;</h4>
- <p>Modern infrastructure spans multiple cloud services and data centers,
- often with a mix of managed services and virtualized or traditional servers.
- The pluggable platform architecture of gVisor allows it to run anywhere,
- enabling consistent security policies across multiple environments without
- having to rearchitect your infrastructure.</p>
- <a class="button" href="/docs/architecture_guide/platforms/">Read More &raquo;</a>
- </div>
-</div>
-
-</div> <!-- container -->
diff --git a/website/performance/README.md b/website/performance/README.md
deleted file mode 100644
index 1758fc608..000000000
--- a/website/performance/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Performance data
-
-This directory holds the CSVs generated by the now removed benchmark-tools
-repository. The new functionally equivalent
-[benchmark-tools is available.][benchmark-tools]
-
-In the future, these will be automatically posted to a cloud storage bucket and
-loaded dynamically. At that point, this directory will be removed.
-
-[benchmark-tools]: https://github.com/google/gvisor/tree/master/test/benchmarks
diff --git a/website/performance/applications.csv b/website/performance/applications.csv
deleted file mode 100644
index 7b4661c60..000000000
--- a/website/performance/applications.csv
+++ /dev/null
@@ -1,13 +0,0 @@
-runtime,method,metric,result
-runc,http.node,transfer_rate,3814.85
-runc,http.node,latency,11.0
-runc,http.node,requests_per_second,885.81
-runc,http.ruby,transfer_rate,2874.38
-runc,http.ruby,latency,18.0
-runc,http.ruby,requests_per_second,539.97
-runsc,http.node,transfer_rate,1615.54
-runsc,http.node,latency,27.0
-runsc,http.node,requests_per_second,375.13
-runsc,http.ruby,transfer_rate,1382.71
-runsc,http.ruby,latency,38.0
-runsc,http.ruby,requests_per_second,259.75
diff --git a/website/performance/density.csv b/website/performance/density.csv
deleted file mode 100644
index 729b44941..000000000
--- a/website/performance/density.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-runtime,method,metric,result
-runc,density.empty,memory_usage,4092149.76
-runc,density.node,memory_usage,76709888.0
-runc,density.ruby,memory_usage,45737000.96
-runsc,density.empty,memory_usage,23695032.32
-runsc,density.node,memory_usage,124076605.44
-runsc,density.ruby,memory_usage,106141777.92
-runc,density.redis,memory_usage,1055323750.4
-runsc,density.redis,memory_usage,1076686028.8
diff --git a/website/performance/ffmpeg.csv b/website/performance/ffmpeg.csv
deleted file mode 100644
index 08661c749..000000000
--- a/website/performance/ffmpeg.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-runtime,metric,result
-runc,run_time,82.000625
-runsc,run_time,88.24018
diff --git a/website/performance/fio-tmpfs.csv b/website/performance/fio-tmpfs.csv
deleted file mode 100644
index 99777d2e4..000000000
--- a/website/performance/fio-tmpfs.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-runtime,method,metric,result
-runc,fio.read,bandwidth,4240686080
-runc,fio.write,bandwidth,3029744640
-runsc,fio.read,bandwidth,2533604352
-runsc,fio.write,bandwidth,1207536640
-runc,fio.randread,bandwidth,1221472256
-runc,fio.randwrite,bandwidth,1046094848
-runsc,fio.randread,bandwidth,68940800
-runsc,fio.randwrite,bandwidth,67286016
diff --git a/website/performance/fio.csv b/website/performance/fio.csv
deleted file mode 100644
index 80d6ae289..000000000
--- a/website/performance/fio.csv
+++ /dev/null
@@ -1,9 +0,0 @@
-runtime,method,metric,result
-runc,fio.read,bandwidth,252253184
-runc,fio.write,bandwidth,457767936
-runsc,fio.read,bandwidth,252323840
-runsc,fio.write,bandwidth,431845376
-runc,fio.randread,bandwidth,5284864
-runc,fio.randwrite,bandwidth,107758592
-runsc,fio.randread,bandwidth,4403200
-runsc,fio.randwrite,bandwidth,69161984
diff --git a/website/performance/httpd100k.csv b/website/performance/httpd100k.csv
deleted file mode 100644
index e92c7e9e0..000000000
--- a/website/performance/httpd100k.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-connections,runtime,metric,result
-1,runc,transfer_rate,565.35
-1,runc,latency,1.0
-1,runsc,transfer_rate,282.84
-1,runsc,latency,2.0
-5,runc,transfer_rate,3260.57
-5,runc,latency,1.0
-5,runsc,transfer_rate,832.69
-5,runsc,latency,3.0
-10,runc,transfer_rate,4672.01
-10,runc,latency,1.0
-10,runsc,transfer_rate,1095.47
-10,runsc,latency,4.0
-25,runc,transfer_rate,4964.14
-25,runc,latency,2.0
-25,runsc,transfer_rate,961.03
-25,runsc,latency,12.0
diff --git a/website/performance/httpd10240k.csv b/website/performance/httpd10240k.csv
deleted file mode 100644
index 60dbe7b40..000000000
--- a/website/performance/httpd10240k.csv
+++ /dev/null
@@ -1,17 +0,0 @@
-connections,runtime,metric,result
-1,runc,transfer_rate,674.05
-1,runc,latency,1.0
-1,runsc,transfer_rate,243.35
-1,runsc,latency,2.0
-5,runc,transfer_rate,3089.83
-5,runc,latency,1.0
-5,runsc,transfer_rate,981.91
-5,runsc,latency,2.0
-10,runc,transfer_rate,4701.2
-10,runc,latency,1.0
-10,runsc,transfer_rate,1135.08
-10,runsc,latency,4.0
-25,runc,transfer_rate,5021.36
-25,runc,latency,2.0
-25,runsc,transfer_rate,963.26
-25,runsc,latency,12.0
diff --git a/website/performance/iperf.csv b/website/performance/iperf.csv
deleted file mode 100644
index 1f3b41aec..000000000
--- a/website/performance/iperf.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-runtime,method,metric,result
-runc,network.download,bandwidth,746386000.0
-runc,network.upload,bandwidth,709808000.0
-runsc,network.download,bandwidth,640303500.0
-runsc,network.upload,bandwidth,482254000.0
diff --git a/website/performance/redis.csv b/website/performance/redis.csv
deleted file mode 100644
index 369b16712..000000000
--- a/website/performance/redis.csv
+++ /dev/null
@@ -1,35 +0,0 @@
-runtime,method,metric,result
-runc,PING_INLINE,requests_per_second,30525.03
-runc,PING_BULK,requests_per_second,30293.85
-runc,SET,requests_per_second,30257.19
-runc,GET,requests_per_second,30312.21
-runc,INCR,requests_per_second,30525.03
-runc,LPUSH,requests_per_second,30712.53
-runc,RPUSH,requests_per_second,30459.95
-runc,LPOP,requests_per_second,30367.45
-runc,RPOP,requests_per_second,30665.44
-runc,SADD,requests_per_second,30030.03
-runc,HSET,requests_per_second,30656.04
-runc,SPOP,requests_per_second,29940.12
-runc,LRANGE_100,requests_per_second,24224.81
-runc,LRANGE_300,requests_per_second,14302.06
-runc,LRANGE_500,requests_per_second,11728.83
-runc,LRANGE_600,requests_per_second,9900.99
-runc,MSET,requests_per_second,30120.48
-runsc,PING_INLINE,requests_per_second,14528.55
-runsc,PING_BULK,requests_per_second,15627.44
-runsc,SET,requests_per_second,15403.57
-runsc,GET,requests_per_second,15325.67
-runsc,INCR,requests_per_second,15269.51
-runsc,LPUSH,requests_per_second,15172.2
-runsc,RPUSH,requests_per_second,15117.16
-runsc,LPOP,requests_per_second,15257.86
-runsc,RPOP,requests_per_second,15188.33
-runsc,SADD,requests_per_second,15432.1
-runsc,HSET,requests_per_second,15163.0
-runsc,SPOP,requests_per_second,15561.78
-runsc,LRANGE_100,requests_per_second,13365.41
-runsc,LRANGE_300,requests_per_second,9520.18
-runsc,LRANGE_500,requests_per_second,8248.78
-runsc,LRANGE_600,requests_per_second,6544.07
-runsc,MSET,requests_per_second,14367.82
diff --git a/website/performance/startup.csv b/website/performance/startup.csv
deleted file mode 100644
index 6bad00df6..000000000
--- a/website/performance/startup.csv
+++ /dev/null
@@ -1,7 +0,0 @@
-runtime,method,metric,result
-runc,startup.empty,startup_time_ms,1193.10768
-runc,startup.node,startup_time_ms,2557.95336
-runc,startup.ruby,startup_time_ms,2530.12624
-runsc,startup.empty,startup_time_ms,1144.1775
-runsc,startup.node,startup_time_ms,2441.90284
-runsc,startup.ruby,startup_time_ms,2455.69882
diff --git a/website/performance/sysbench-cpu.csv b/website/performance/sysbench-cpu.csv
deleted file mode 100644
index f4e6b69a6..000000000
--- a/website/performance/sysbench-cpu.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-runtime,metric,result
-runc,cpu_events_per_second,103.62
-runsc,cpu_events_per_second,103.21
diff --git a/website/performance/sysbench-memory.csv b/website/performance/sysbench-memory.csv
deleted file mode 100644
index 626ff4994..000000000
--- a/website/performance/sysbench-memory.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-runtime,metric,result
-runc,memory_ops_per_second,13098.73
-runsc,memory_ops_per_second,13107.44
diff --git a/website/performance/syscall.csv b/website/performance/syscall.csv
deleted file mode 100644
index 40bdce49e..000000000
--- a/website/performance/syscall.csv
+++ /dev/null
@@ -1,4 +0,0 @@
-runtime,metric,result
-runc,syscall_time_ns,1939.0
-runsc,syscall_time_ns,38219.0
-runsc-kvm,syscall_time_ns,763.0
diff --git a/website/performance/tensorflow.csv b/website/performance/tensorflow.csv
deleted file mode 100644
index 03498bef0..000000000
--- a/website/performance/tensorflow.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-runtime,metric,result
-runc,run_time,207.1118165
-runsc,run_time,244.473401